# This file was auto-generated by Fern from our API Definition.

from __future__ import annotations

import datetime as dt
import typing

from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
from ..core.pagination import AsyncPager, SyncPager
from ..core.request_options import RequestOptions
from ..types.agreement_methodology_enum import AgreementMethodologyEnum
from ..types.all_roles_project_list import AllRolesProjectList
from ..types.assignment_settings_request import AssignmentSettingsRequest
from ..types.import_api_request import ImportApiRequest
from ..types.lse_project_create import LseProjectCreate
from ..types.lse_project_response import LseProjectResponse
from ..types.lse_project_update import LseProjectUpdate
from ..types.mode_enum import ModeEnum
from ..types.paginated_all_roles_project_list_list import PaginatedAllRolesProjectListList
from ..types.paginated_lse_project_counts_list import PaginatedLseProjectCountsList
from ..types.prediction_request import PredictionRequest
from ..types.project_label_config import ProjectLabelConfig
from ..types.review_settings_request import ReviewSettingsRequest
from ..types.sampling_de5enum import SamplingDe5Enum
from ..types.skip_queue_enum import SkipQueueEnum
from ..types.user_simple import UserSimple
from ..types.user_simple_request import UserSimpleRequest
from .raw_client import AsyncRawProjectsClient, RawProjectsClient
from .types.duplicate_projects_response import DuplicateProjectsResponse
from .types.import_predictions_projects_response import ImportPredictionsProjectsResponse
from .types.import_tasks_projects_response import ImportTasksProjectsResponse

if typing.TYPE_CHECKING:
    from .assignments.client import AssignmentsClient, AsyncAssignmentsClient
    from .exports.client import AsyncExportsClient, ExportsClient
    from .members.client import AsyncMembersClient, MembersClient
    from .metrics.client import AsyncMetricsClient, MetricsClient
    from .pauses.client import AsyncPausesClient, PausesClient
    from .roles.client import AsyncRolesClient, RolesClient
    from .stats.client import AsyncStatsClient, StatsClient
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)


class ProjectsClient:
    def __init__(self, *, client_wrapper: SyncClientWrapper):
        self._raw_client = RawProjectsClient(client_wrapper=client_wrapper)
        self._client_wrapper = client_wrapper
        self._roles: typing.Optional[RolesClient] = None
        self._exports: typing.Optional[ExportsClient] = None
        self._members: typing.Optional[MembersClient] = None
        self._metrics: typing.Optional[MetricsClient] = None
        self._stats: typing.Optional[StatsClient] = None
        self._assignments: typing.Optional[AssignmentsClient] = None
        self._pauses: typing.Optional[PausesClient] = None

    @property
    def with_raw_response(self) -> RawProjectsClient:
        """
        Retrieves a raw implementation of this client that returns raw responses.

        Returns
        -------
        RawProjectsClient
        """
        return self._raw_client

    def list(
        self,
        *,
        filter: typing.Optional[str] = None,
        ids: typing.Optional[str] = None,
        include: typing.Optional[str] = None,
        members_limit: typing.Optional[int] = None,
        ordering: typing.Optional[str] = None,
        page: typing.Optional[int] = None,
        page_size: typing.Optional[int] = None,
        search: typing.Optional[str] = None,
        state: typing.Optional[str] = None,
        title: typing.Optional[str] = None,
        workspaces: typing.Optional[float] = None,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> SyncPager[AllRolesProjectList, PaginatedAllRolesProjectListList]:
        """
        Retrieve a list of projects.

        Parameters
        ----------
        filter : typing.Optional[str]
            Filter projects by pinned status. Use 'pinned_only' to return only pinned projects, 'exclude_pinned' to return only non-pinned projects, or 'all' to return all projects.

        ids : typing.Optional[str]
            Filter id by in list

        include : typing.Optional[str]
            Comma-separated list of count fields to include in the response to optimize performance. Available fields: task_number, finished_task_number, total_predictions_number, total_annotations_number, num_tasks_with_annotations, useful_annotation_number, ground_truth_number, skipped_annotations_number. If not specified, all count fields are included.

        members_limit : typing.Optional[int]
            Maximum number of members to return

        ordering : typing.Optional[str]
            Which field to use when ordering the results.

        page : typing.Optional[int]
            A page number within the paginated result set.

        page_size : typing.Optional[int]
            Number of results to return per page.

        search : typing.Optional[str]
            Search term for project title and description

        state : typing.Optional[str]
            Filter current_state by exact match

        title : typing.Optional[str]
            Filter title by contains (case-insensitive)

        workspaces : typing.Optional[float]
            Filter workspaces by exact match

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        SyncPager[AllRolesProjectList, PaginatedAllRolesProjectListList]


        Examples
        --------
        from label_studio_sdk import LabelStudio

        client = LabelStudio(
            api_key="YOUR_API_KEY",
        )
        response = client.projects.list()
        for item in response:
            yield item
        # alternatively, you can paginate page-by-page
        for page in response.iter_pages():
            yield page
        """
        return self._raw_client.list(
            filter=filter,
            ids=ids,
            include=include,
            members_limit=members_limit,
            ordering=ordering,
            page=page,
            page_size=page_size,
            search=search,
            state=state,
            title=title,
            workspaces=workspaces,
            request_options=request_options,
        )

    def create(
        self,
        *,
        annotator_evaluation_enabled: typing.Optional[bool] = OMIT,
        color: typing.Optional[str] = OMIT,
        control_weights: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
        created_by: typing.Optional[UserSimpleRequest] = OMIT,
        description: typing.Optional[str] = OMIT,
        enable_empty_annotation: typing.Optional[bool] = OMIT,
        evaluate_predictions_automatically: typing.Optional[bool] = OMIT,
        expert_instruction: typing.Optional[str] = OMIT,
        is_draft: typing.Optional[bool] = OMIT,
        is_published: typing.Optional[bool] = OMIT,
        label_config: typing.Optional[str] = OMIT,
        maximum_annotations: typing.Optional[int] = OMIT,
        min_annotations_to_start_training: typing.Optional[int] = OMIT,
        model_version: typing.Optional[str] = OMIT,
        organization: typing.Optional[int] = OMIT,
        overlap_cohort_percentage: typing.Optional[int] = OMIT,
        pinned_at: typing.Optional[dt.datetime] = OMIT,
        reveal_preannotations_interactively: typing.Optional[bool] = OMIT,
        sampling: typing.Optional[SamplingDe5Enum] = OMIT,
        show_annotation_history: typing.Optional[bool] = OMIT,
        show_collab_predictions: typing.Optional[bool] = OMIT,
        show_ground_truth_first: typing.Optional[bool] = OMIT,
        show_instruction: typing.Optional[bool] = OMIT,
        show_overlap_first: typing.Optional[bool] = OMIT,
        show_skip_button: typing.Optional[bool] = OMIT,
        skip_queue: typing.Optional[SkipQueueEnum] = OMIT,
        task_data_login: typing.Optional[str] = OMIT,
        task_data_password: typing.Optional[str] = OMIT,
        title: typing.Optional[str] = OMIT,
        workspace: typing.Optional[int] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> LseProjectCreate:
        """
        Create a project for a specific organization.

        Parameters
        ----------
        annotator_evaluation_enabled : typing.Optional[bool]
            Enable annotator evaluation for the project

        color : typing.Optional[str]

        control_weights : typing.Optional[typing.Dict[str, typing.Any]]
            Dict of weights for each control tag in metric calculation.

        created_by : typing.Optional[UserSimpleRequest]
            Project owner

        description : typing.Optional[str]
            Project Description

        enable_empty_annotation : typing.Optional[bool]
            Allow annotators to submit empty annotations

        evaluate_predictions_automatically : typing.Optional[bool]
            Retrieve and display predictions when loading a task

        expert_instruction : typing.Optional[str]
            Labeling instructions in HTML format

        is_draft : typing.Optional[bool]
            Whether or not the project is in the middle of being created

        is_published : typing.Optional[bool]
            Whether or not the project is published to annotators

        label_config : typing.Optional[str]
            Label config in XML format. See more about it in documentation

        maximum_annotations : typing.Optional[int]
            Maximum number of annotations for one task. If the number of annotations per task is equal or greater to this value, the task is completed (is_labeled=True)

        min_annotations_to_start_training : typing.Optional[int]
            Minimum number of completed tasks after which model training is started

        model_version : typing.Optional[str]
            Machine learning model version

        organization : typing.Optional[int]

        overlap_cohort_percentage : typing.Optional[int]

        pinned_at : typing.Optional[dt.datetime]
            Pinned date and time

        reveal_preannotations_interactively : typing.Optional[bool]
            Reveal pre-annotations interactively

        sampling : typing.Optional[SamplingDe5Enum]

        show_annotation_history : typing.Optional[bool]
            Show annotation history to annotator

        show_collab_predictions : typing.Optional[bool]
            If set, the annotator can view model predictions

        show_ground_truth_first : typing.Optional[bool]
            Onboarding mode (true): show ground truth tasks first in the labeling stream

        show_instruction : typing.Optional[bool]
            Show instructions to the annotator before they start

        show_overlap_first : typing.Optional[bool]

        show_skip_button : typing.Optional[bool]
            Show a skip button in interface and allow annotators to skip the task

        skip_queue : typing.Optional[SkipQueueEnum]

        task_data_login : typing.Optional[str]
            Task data credentials: login

        task_data_password : typing.Optional[str]
            Task data credentials: password

        title : typing.Optional[str]
            Project Title

        workspace : typing.Optional[int]
            In Workspace

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        LseProjectCreate


        Examples
        --------
        from label_studio_sdk import LabelStudio

        client = LabelStudio(
            api_key="YOUR_API_KEY",
        )
        client.projects.create()
        """
        _response = self._raw_client.create(
            annotator_evaluation_enabled=annotator_evaluation_enabled,
            color=color,
            control_weights=control_weights,
            created_by=created_by,
            description=description,
            enable_empty_annotation=enable_empty_annotation,
            evaluate_predictions_automatically=evaluate_predictions_automatically,
            expert_instruction=expert_instruction,
            is_draft=is_draft,
            is_published=is_published,
            label_config=label_config,
            maximum_annotations=maximum_annotations,
            min_annotations_to_start_training=min_annotations_to_start_training,
            model_version=model_version,
            organization=organization,
            overlap_cohort_percentage=overlap_cohort_percentage,
            pinned_at=pinned_at,
            reveal_preannotations_interactively=reveal_preannotations_interactively,
            sampling=sampling,
            show_annotation_history=show_annotation_history,
            show_collab_predictions=show_collab_predictions,
            show_ground_truth_first=show_ground_truth_first,
            show_instruction=show_instruction,
            show_overlap_first=show_overlap_first,
            show_skip_button=show_skip_button,
            skip_queue=skip_queue,
            task_data_login=task_data_login,
            task_data_password=task_data_password,
            title=title,
            workspace=workspace,
            request_options=request_options,
        )
        return _response.data

    def list_counts(
        self,
        *,
        filter: typing.Optional[str] = None,
        ids: typing.Optional[str] = None,
        include: typing.Optional[str] = None,
        ordering: typing.Optional[str] = None,
        page: typing.Optional[int] = None,
        page_size: typing.Optional[int] = None,
        search: typing.Optional[str] = None,
        state: typing.Optional[str] = None,
        title: typing.Optional[str] = None,
        workspaces: typing.Optional[float] = None,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> PaginatedLseProjectCountsList:
        """
        Returns a list of projects with their counts. For example, task_number which is the total task number in project

        Parameters
        ----------
        filter : typing.Optional[str]
            Filter projects by pinned status. Use 'pinned_only' to return only pinned projects, 'exclude_pinned' to return only non-pinned projects, or 'all' to return all projects.

        ids : typing.Optional[str]
            Filter id by in list

        include : typing.Optional[str]
            Comma-separated list of count fields to include in the response to optimize performance. Available fields: task_number, finished_task_number, total_predictions_number, total_annotations_number, num_tasks_with_annotations, useful_annotation_number, ground_truth_number, skipped_annotations_number. If not specified, all count fields are included.

        ordering : typing.Optional[str]
            Which field to use when ordering the results.

        page : typing.Optional[int]
            A page number within the paginated result set.

        page_size : typing.Optional[int]
            Number of results to return per page.

        search : typing.Optional[str]
            Search term for project title and description

        state : typing.Optional[str]
            Filter current_state by exact match

        title : typing.Optional[str]
            Filter title by contains (case-insensitive)

        workspaces : typing.Optional[float]
            Filter workspaces by exact match

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        PaginatedLseProjectCountsList


        Examples
        --------
        from label_studio_sdk import LabelStudio

        client = LabelStudio(
            api_key="YOUR_API_KEY",
        )
        client.projects.list_counts()
        """
        _response = self._raw_client.list_counts(
            filter=filter,
            ids=ids,
            include=include,
            ordering=ordering,
            page=page,
            page_size=page_size,
            search=search,
            state=state,
            title=title,
            workspaces=workspaces,
            request_options=request_options,
        )
        return _response.data

    def get(
        self,
        id: int,
        *,
        members_limit: typing.Optional[int] = None,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> LseProjectResponse:
        """
        Retrieve information about a project by project ID.

        Parameters
        ----------
        id : int

        members_limit : typing.Optional[int]
            Maximum number of members to return

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        LseProjectResponse
            Project information. Not all fields are available for all roles.

        Examples
        --------
        from label_studio_sdk import LabelStudio

        client = LabelStudio(
            api_key="YOUR_API_KEY",
        )
        client.projects.get(
            id=1,
        )
        """
        _response = self._raw_client.get(id, members_limit=members_limit, request_options=request_options)
        return _response.data

    def delete(self, id: int, *, request_options: typing.Optional[RequestOptions] = None) -> None:
        """
        Delete a project by specified project ID.

        Parameters
        ----------
        id : int

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        None

        Examples
        --------
        from label_studio_sdk import LabelStudio

        client = LabelStudio(
            api_key="YOUR_API_KEY",
        )
        client.projects.delete(
            id=1,
        )
        """
        _response = self._raw_client.delete(id, request_options=request_options)
        return _response.data

    def update(
        self,
        id: int,
        *,
        members_limit: typing.Optional[int] = None,
        agreement_methodology: typing.Optional[AgreementMethodologyEnum] = OMIT,
        agreement_threshold: typing.Optional[str] = OMIT,
        annotation_limit_count: typing.Optional[int] = OMIT,
        annotation_limit_percent: typing.Optional[str] = OMIT,
        annotator_evaluation_continuous_tasks: typing.Optional[int] = OMIT,
        annotator_evaluation_enabled: typing.Optional[bool] = OMIT,
        annotator_evaluation_minimum_score: typing.Optional[str] = OMIT,
        annotator_evaluation_minimum_tasks: typing.Optional[int] = OMIT,
        annotator_evaluation_onboarding_tasks: typing.Optional[int] = OMIT,
        assignment_settings: typing.Optional[AssignmentSettingsRequest] = OMIT,
        color: typing.Optional[str] = OMIT,
        comment_classification_config: typing.Optional[str] = OMIT,
        control_weights: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
        created_by: typing.Optional[UserSimpleRequest] = OMIT,
        custom_script: typing.Optional[str] = OMIT,
        custom_task_lock_ttl: typing.Optional[int] = OMIT,
        description: typing.Optional[str] = OMIT,
        enable_empty_annotation: typing.Optional[bool] = OMIT,
        evaluate_predictions_automatically: typing.Optional[bool] = OMIT,
        expert_instruction: typing.Optional[str] = OMIT,
        is_draft: typing.Optional[bool] = OMIT,
        is_published: typing.Optional[bool] = OMIT,
        label_config: typing.Optional[str] = OMIT,
        max_additional_annotators_assignable: typing.Optional[int] = OMIT,
        maximum_annotations: typing.Optional[int] = OMIT,
        min_annotations_to_start_training: typing.Optional[int] = OMIT,
        model_version: typing.Optional[str] = OMIT,
        organization: typing.Optional[int] = OMIT,
        overlap_cohort_percentage: typing.Optional[int] = OMIT,
        pause_on_failed_annotator_evaluation: typing.Optional[bool] = OMIT,
        pinned_at: typing.Optional[dt.datetime] = OMIT,
        require_comment_on_skip: typing.Optional[bool] = OMIT,
        reveal_preannotations_interactively: typing.Optional[bool] = OMIT,
        review_settings: typing.Optional[ReviewSettingsRequest] = OMIT,
        sampling: typing.Optional[SamplingDe5Enum] = OMIT,
        show_annotation_history: typing.Optional[bool] = OMIT,
        show_collab_predictions: typing.Optional[bool] = OMIT,
        show_ground_truth_first: typing.Optional[bool] = OMIT,
        show_instruction: typing.Optional[bool] = OMIT,
        show_overlap_first: typing.Optional[bool] = OMIT,
        show_skip_button: typing.Optional[bool] = OMIT,
        show_unused_data_columns_to_annotators: typing.Optional[bool] = OMIT,
        skip_queue: typing.Optional[SkipQueueEnum] = OMIT,
        strict_task_overlap: typing.Optional[bool] = OMIT,
        task_data_login: typing.Optional[str] = OMIT,
        task_data_password: typing.Optional[str] = OMIT,
        title: typing.Optional[str] = OMIT,
        workspace: typing.Optional[int] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> LseProjectUpdate:
        """
        Update the details of a specific project.

        Parameters
        ----------
        id : int

        members_limit : typing.Optional[int]
            Maximum number of members to return

        agreement_methodology : typing.Optional[AgreementMethodologyEnum]
            Methodology (Consensus / Pairwise Averaging)

            * `consensus` - Consensus
            * `pairwise` - Pairwise Averaging

        agreement_threshold : typing.Optional[str]
            Agreement threshold

        annotation_limit_count : typing.Optional[int]
            Limit by number of tasks

        annotation_limit_percent : typing.Optional[str]
            Limit by percentage of tasks

        annotator_evaluation_continuous_tasks : typing.Optional[int]
            Continuous Evaluation: Required tasks

        annotator_evaluation_enabled : typing.Optional[bool]
            Evaluate all annotators against ground truth

        annotator_evaluation_minimum_score : typing.Optional[str]
            Score required to pass evaluation

        annotator_evaluation_minimum_tasks : typing.Optional[int]
            Number of tasks for evaluation

        annotator_evaluation_onboarding_tasks : typing.Optional[int]
            Onboarding Evaluation: Required tasks

        assignment_settings : typing.Optional[AssignmentSettingsRequest]

        color : typing.Optional[str]
            Color

        comment_classification_config : typing.Optional[str]

        control_weights : typing.Optional[typing.Dict[str, typing.Any]]
            Dict of weights for each control tag in metric calculation.

        created_by : typing.Optional[UserSimpleRequest]
            Project owner

        custom_script : typing.Optional[str]
            Plugins

        custom_task_lock_ttl : typing.Optional[int]
            Task reservation time. TTL in seconds (UI displays and edits this value in minutes).

        description : typing.Optional[str]
            Description

        enable_empty_annotation : typing.Optional[bool]
            Allow empty annotations

        evaluate_predictions_automatically : typing.Optional[bool]
            Retrieve and display predictions when loading a task

        expert_instruction : typing.Optional[str]
            Instructions

        is_draft : typing.Optional[bool]
            Whether or not the project is in the middle of being created

        is_published : typing.Optional[bool]
            Whether or not the project is published to annotators

        label_config : typing.Optional[str]
            Labeling Configuration

        max_additional_annotators_assignable : typing.Optional[int]
            Maximum additional annotators

        maximum_annotations : typing.Optional[int]
            Annotations per task

        min_annotations_to_start_training : typing.Optional[int]
            Minimum number of completed tasks after which model training is started

        model_version : typing.Optional[str]
            Machine learning model version

        organization : typing.Optional[int]

        overlap_cohort_percentage : typing.Optional[int]
            Annotations per task coverage

        pause_on_failed_annotator_evaluation : typing.Optional[bool]
            Pause annotator on failed evaluation

        pinned_at : typing.Optional[dt.datetime]
            Pinned date and time

        require_comment_on_skip : typing.Optional[bool]
            Require comment to skip

        reveal_preannotations_interactively : typing.Optional[bool]
            Reveal pre-annotations interactively

        review_settings : typing.Optional[ReviewSettingsRequest]

        sampling : typing.Optional[SamplingDe5Enum]

        show_annotation_history : typing.Optional[bool]
            Show Data Manager to Annotators

        show_collab_predictions : typing.Optional[bool]
            Use predictions to pre-label Tasks

        show_ground_truth_first : typing.Optional[bool]
            Onboarding mode (true): show ground truth tasks first in the labeling stream

        show_instruction : typing.Optional[bool]
            Show instructions before labeling

        show_overlap_first : typing.Optional[bool]
            Show tasks with overlap first

        show_skip_button : typing.Optional[bool]
            Allow skipping tasks

        show_unused_data_columns_to_annotators : typing.Optional[bool]
            Show only columns used in labeling configuration to Annotators. API uses inverse field semantics here: set false to show only used columns, set true to show all task.data columns.

        skip_queue : typing.Optional[SkipQueueEnum]

        strict_task_overlap : typing.Optional[bool]
            Enforce strict overlap limit

        task_data_login : typing.Optional[str]
            Login

        task_data_password : typing.Optional[str]
            Password

        title : typing.Optional[str]
            Project Name

        workspace : typing.Optional[int]
            Workspace

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        LseProjectUpdate


        Examples
        --------
        from label_studio_sdk import LabelStudio

        client = LabelStudio(
            api_key="YOUR_API_KEY",
        )
        client.projects.update(
            id=1,
        )
        """
        _response = self._raw_client.update(
            id,
            members_limit=members_limit,
            agreement_methodology=agreement_methodology,
            agreement_threshold=agreement_threshold,
            annotation_limit_count=annotation_limit_count,
            annotation_limit_percent=annotation_limit_percent,
            annotator_evaluation_continuous_tasks=annotator_evaluation_continuous_tasks,
            annotator_evaluation_enabled=annotator_evaluation_enabled,
            annotator_evaluation_minimum_score=annotator_evaluation_minimum_score,
            annotator_evaluation_minimum_tasks=annotator_evaluation_minimum_tasks,
            annotator_evaluation_onboarding_tasks=annotator_evaluation_onboarding_tasks,
            assignment_settings=assignment_settings,
            color=color,
            comment_classification_config=comment_classification_config,
            control_weights=control_weights,
            created_by=created_by,
            custom_script=custom_script,
            custom_task_lock_ttl=custom_task_lock_ttl,
            description=description,
            enable_empty_annotation=enable_empty_annotation,
            evaluate_predictions_automatically=evaluate_predictions_automatically,
            expert_instruction=expert_instruction,
            is_draft=is_draft,
            is_published=is_published,
            label_config=label_config,
            max_additional_annotators_assignable=max_additional_annotators_assignable,
            maximum_annotations=maximum_annotations,
            min_annotations_to_start_training=min_annotations_to_start_training,
            model_version=model_version,
            organization=organization,
            overlap_cohort_percentage=overlap_cohort_percentage,
            pause_on_failed_annotator_evaluation=pause_on_failed_annotator_evaluation,
            pinned_at=pinned_at,
            require_comment_on_skip=require_comment_on_skip,
            reveal_preannotations_interactively=reveal_preannotations_interactively,
            review_settings=review_settings,
            sampling=sampling,
            show_annotation_history=show_annotation_history,
            show_collab_predictions=show_collab_predictions,
            show_ground_truth_first=show_ground_truth_first,
            show_instruction=show_instruction,
            show_overlap_first=show_overlap_first,
            show_skip_button=show_skip_button,
            show_unused_data_columns_to_annotators=show_unused_data_columns_to_annotators,
            skip_queue=skip_queue,
            strict_task_overlap=strict_task_overlap,
            task_data_login=task_data_login,
            task_data_password=task_data_password,
            title=title,
            workspace=workspace,
            request_options=request_options,
        )
        return _response.data

    def list_unique_annotators(
        self, id: int, *, request_options: typing.Optional[RequestOptions] = None
    ) -> typing.List[UserSimple]:
        """
        Return unique users who have submitted annotations in the specified project.

        Parameters
        ----------
        id : int

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        typing.List[UserSimple]
            List of annotator users

        Examples
        --------
        from label_studio_sdk import LabelStudio

        client = LabelStudio(
            api_key="YOUR_API_KEY",
        )
        client.projects.list_unique_annotators(
            id=1,
        )
        """
        _response = self._raw_client.list_unique_annotators(id, request_options=request_options)
        return _response.data

    def duplicate(
        self,
        id: int,
        *,
        mode: ModeEnum,
        title: str,
        workspace: int,
        description: typing.Optional[str] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> DuplicateProjectsResponse:
        """
        <Card href="https://humansignal.com/goenterprise">
                <img style="pointer-events: none; margin-left: 0px; margin-right: 0px;" src="https://docs.humansignal.com/images/badge.svg" alt="Label Studio Enterprise badge"/>
                <p style="margin-top: 10px; font-size: 14px;">
                    This endpoint is not available in Label Studio Community Edition. [Learn more about Label Studio Enterprise](https://humansignal.com/goenterprise)
                </p>
            </Card>
        Make a copy of project.

        Parameters
        ----------
        id : int

        mode : ModeEnum
            What to Duplicate (Project configuration only / Project configuration and tasks)

            * `settings` - Only settings
            * `settings,data` - Settings and tasks

        title : str
            Project Name

        workspace : int
            Destination Workspace

        description : typing.Optional[str]
            Project Description

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        DuplicateProjectsResponse
            Project duplicated

        Examples
        --------
        from label_studio_sdk import LabelStudio

        client = LabelStudio(
            api_key="YOUR_API_KEY",
        )
        client.projects.duplicate(
            id=1,
            mode="settings",
            title="title",
            workspace=1,
        )
        """
        _response = self._raw_client.duplicate(
            id, mode=mode, title=title, workspace=workspace, description=description, request_options=request_options
        )
        return _response.data

    def import_tasks(
        self,
        id: int,
        *,
        request: typing.Sequence[ImportApiRequest],
        commit_to_project: typing.Optional[bool] = None,
        preannotated_from_fields: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
        return_task_ids: typing.Optional[bool] = None,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> ImportTasksProjectsResponse:
        """
        
                    Import data as labeling tasks in bulk using this API endpoint. You can use this API endpoint to import multiple tasks.
                    One POST request is limited at 250K tasks and 200 MB.
        
                    **Note:** Imported data is verified against a project *label_config* and must
                    include all variables that were used in the *label_config*. For example,
                    if the label configuration has a *$text* variable, then each item in a data object
                    must include a "text" field.
                    <br>
        
                    ## Async Import Behavior
                    <hr style="opacity:0.3">
        
                    **For non-Community editions, this endpoint processes imports asynchronously.**
                    
                    - The POST request **can fail** for invalid parameters, malformed request body, or other request-level validation errors.
                    - However, **data validation errors** that occur during import processing are handled asynchronously and will not cause the POST request to fail.
                    - Upon successful request validation, a response is returned: `{"import": <import_id>}`
                    - Use the returned `import_id` to poll the GET `/api/projects/{project_id}/imports/{import_id}` endpoint to check the import status and see any data validation errors.
                    - Data-level errors and import failures will only be visible in the GET request response.
        
                    For Community edition, imports are processed synchronously and return task counts immediately.
                    <br>
        
                    ## POST requests
                    <hr style="opacity:0.3">
        
                    There are three possible ways to import tasks with this endpoint:
        
                    ### 1. **POST with data**
                    Send JSON tasks as POST data. Only JSON is supported for POSTing files directly.
                    Update this example to specify your authorization token and Label Studio instance host, then run the following from
                    the command line.
        
                    ```bash
                    curl -H 'Content-Type: application/json' -H 'Authorization: Token abc123' \\
                    -X POST 'http://localhost:8000/api/projects/1/import' --data '[{"text": "Some text 1"}, {"text": "Some text 2"}]'
                    ```
        
                    ### 2. **POST with files**
                    Send tasks as files. You can attach multiple files with different names.
        
                    - **JSON**: text files in JavaScript object notation format
                    - **CSV**: text files with tables in Comma Separated Values format
                    - **TSV**: text files with tables in Tab Separated Value format
                    - **TXT**: simple text files are similar to CSV with one column and no header, supported for projects with one source only
        
                    Update this example to specify your authorization token, Label Studio instance host, and file name and path,
                    then run the following from the command line:
        
                    ```bash
                    curl -H 'Authorization: Token abc123' \\
                    -X POST 'http://localhost:8000/api/projects/1/import' -F 'file=@path/to/my_file.csv'
                    ```
        
                    ### 3. **POST with URL**
                    You can also provide a URL to a file with labeling tasks. Supported file formats are the same as in option 2.
        
                    ```bash
                    curl -H 'Content-Type: application/json' -H 'Authorization: Token abc123' \\
                    -X POST 'http://localhost:8000/api/projects/1/import' \\
                    --data '[{"url": "http://example.com/test1.csv"}, {"url": "http://example.com/test2.csv"}]'
                    ```
        
                    <br>
                
        
        Parameters
        ----------
        id : int
            A unique integer value identifying this project.
        
        request : typing.Sequence[ImportApiRequest]
        
        commit_to_project : typing.Optional[bool]
            Set to "true" to immediately commit tasks to the project.
        
        preannotated_from_fields : typing.Optional[typing.Union[str, typing.Sequence[str]]]
            List of fields to preannotate from the task data. For example, if you provide a list of `{"text": "text", "prediction": "label"}` items in the request, the system will create a task with the `text` field and a prediction with the `label` field when `preannoted_from_fields=["prediction"]`.
        
        return_task_ids : typing.Optional[bool]
            Set to "true" to return task IDs in the response.
        
        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.
        
        Returns
        -------
        ImportTasksProjectsResponse
            Tasks successfully imported or import queued. **For non-Community editions**, the response will be `{"import": <import_id>}` which you can use to poll the import status. **For Community edition**, the response contains task counts and is processed synchronously.
        
        Examples
        --------
        from label_studio_sdk import LabelStudio
        
        client = LabelStudio(
            api_key="YOUR_API_KEY",
        )
        client.projects.import_tasks(
            id=1,
            request=[],
        )
        """
        _response = self._raw_client.import_tasks(
            id,
            request=request,
            commit_to_project=commit_to_project,
            preannotated_from_fields=preannotated_from_fields,
            return_task_ids=return_task_ids,
            request_options=request_options,
        )
        return _response.data

    def import_predictions(
        self,
        id: int,
        *,
        request: typing.Sequence[PredictionRequest],
        request_options: typing.Optional[RequestOptions] = None,
    ) -> ImportPredictionsProjectsResponse:
        """
        Import model predictions for tasks in the specified project.

        Parameters
        ----------
        id : int
            A unique integer value identifying this project.

        request : typing.Sequence[PredictionRequest]

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        ImportPredictionsProjectsResponse
            Predictions successfully imported

        Examples
        --------
        from label_studio_sdk import LabelStudio, PredictionRequest

        client = LabelStudio(
            api_key="YOUR_API_KEY",
        )
        client.projects.import_predictions(
            id=1,
            request=[
                PredictionRequest(
                    result=[{"key": "value"}],
                    task=1,
                )
            ],
        )
        """
        _response = self._raw_client.import_predictions(id, request=request, request_options=request_options)
        return _response.data

    def validate_label_config(
        self, id: int, *, label_config: str, request_options: typing.Optional[RequestOptions] = None
    ) -> ProjectLabelConfig:
        """
        Determine whether the label configuration for a specific project is valid.

        Parameters
        ----------
        id : int
            A unique integer value identifying this project.

        label_config : str
            Label config in XML format. See more about it in documentation

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        ProjectLabelConfig


        Examples
        --------
        from label_studio_sdk import LabelStudio

        client = LabelStudio(
            api_key="YOUR_API_KEY",
        )
        client.projects.validate_label_config(
            id=1,
            label_config="label_config",
        )
        """
        _response = self._raw_client.validate_label_config(
            id, label_config=label_config, request_options=request_options
        )
        return _response.data

    @property
    def roles(self):
        if self._roles is None:
            from .roles.client import RolesClient  # noqa: E402

            self._roles = RolesClient(client_wrapper=self._client_wrapper)
        return self._roles

    @property
    def exports(self):
        if self._exports is None:
            from .exports.client import ExportsClient  # noqa: E402

            self._exports = ExportsClient(client_wrapper=self._client_wrapper)
        return self._exports

    @property
    def members(self):
        if self._members is None:
            from .members.client import MembersClient  # noqa: E402

            self._members = MembersClient(client_wrapper=self._client_wrapper)
        return self._members

    @property
    def metrics(self):
        if self._metrics is None:
            from .metrics.client import MetricsClient  # noqa: E402

            self._metrics = MetricsClient(client_wrapper=self._client_wrapper)
        return self._metrics

    @property
    def stats(self):
        if self._stats is None:
            from .stats.client import StatsClient  # noqa: E402

            self._stats = StatsClient(client_wrapper=self._client_wrapper)
        return self._stats

    @property
    def assignments(self):
        if self._assignments is None:
            from .assignments.client import AssignmentsClient  # noqa: E402

            self._assignments = AssignmentsClient(client_wrapper=self._client_wrapper)
        return self._assignments

    @property
    def pauses(self):
        if self._pauses is None:
            from .pauses.client import PausesClient  # noqa: E402

            self._pauses = PausesClient(client_wrapper=self._client_wrapper)
        return self._pauses


class AsyncProjectsClient:
    def __init__(self, *, client_wrapper: AsyncClientWrapper):
        self._raw_client = AsyncRawProjectsClient(client_wrapper=client_wrapper)
        self._client_wrapper = client_wrapper
        self._roles: typing.Optional[AsyncRolesClient] = None
        self._exports: typing.Optional[AsyncExportsClient] = None
        self._members: typing.Optional[AsyncMembersClient] = None
        self._metrics: typing.Optional[AsyncMetricsClient] = None
        self._stats: typing.Optional[AsyncStatsClient] = None
        self._assignments: typing.Optional[AsyncAssignmentsClient] = None
        self._pauses: typing.Optional[AsyncPausesClient] = None

    @property
    def with_raw_response(self) -> AsyncRawProjectsClient:
        """
        Retrieves a raw implementation of this client that returns raw responses.

        Returns
        -------
        AsyncRawProjectsClient
        """
        return self._raw_client

    async def list(
        self,
        *,
        filter: typing.Optional[str] = None,
        ids: typing.Optional[str] = None,
        include: typing.Optional[str] = None,
        members_limit: typing.Optional[int] = None,
        ordering: typing.Optional[str] = None,
        page: typing.Optional[int] = None,
        page_size: typing.Optional[int] = None,
        search: typing.Optional[str] = None,
        state: typing.Optional[str] = None,
        title: typing.Optional[str] = None,
        workspaces: typing.Optional[float] = None,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> AsyncPager[AllRolesProjectList, PaginatedAllRolesProjectListList]:
        """
        Retrieve a list of projects.

        Parameters
        ----------
        filter : typing.Optional[str]
            Filter projects by pinned status. Use 'pinned_only' to return only pinned projects, 'exclude_pinned' to return only non-pinned projects, or 'all' to return all projects.

        ids : typing.Optional[str]
            Filter id by in list

        include : typing.Optional[str]
            Comma-separated list of count fields to include in the response to optimize performance. Available fields: task_number, finished_task_number, total_predictions_number, total_annotations_number, num_tasks_with_annotations, useful_annotation_number, ground_truth_number, skipped_annotations_number. If not specified, all count fields are included.

        members_limit : typing.Optional[int]
            Maximum number of members to return

        ordering : typing.Optional[str]
            Which field to use when ordering the results.

        page : typing.Optional[int]
            A page number within the paginated result set.

        page_size : typing.Optional[int]
            Number of results to return per page.

        search : typing.Optional[str]
            Search term for project title and description

        state : typing.Optional[str]
            Filter current_state by exact match

        title : typing.Optional[str]
            Filter title by contains (case-insensitive)

        workspaces : typing.Optional[float]
            Filter workspaces by exact match

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        AsyncPager[AllRolesProjectList, PaginatedAllRolesProjectListList]


        Examples
        --------
        import asyncio

        from label_studio_sdk import AsyncLabelStudio

        client = AsyncLabelStudio(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            response = await client.projects.list()
            async for item in response:
                yield item

            # alternatively, you can paginate page-by-page
            async for page in response.iter_pages():
                yield page


        asyncio.run(main())
        """
        return await self._raw_client.list(
            filter=filter,
            ids=ids,
            include=include,
            members_limit=members_limit,
            ordering=ordering,
            page=page,
            page_size=page_size,
            search=search,
            state=state,
            title=title,
            workspaces=workspaces,
            request_options=request_options,
        )

    async def create(
        self,
        *,
        annotator_evaluation_enabled: typing.Optional[bool] = OMIT,
        color: typing.Optional[str] = OMIT,
        control_weights: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
        created_by: typing.Optional[UserSimpleRequest] = OMIT,
        description: typing.Optional[str] = OMIT,
        enable_empty_annotation: typing.Optional[bool] = OMIT,
        evaluate_predictions_automatically: typing.Optional[bool] = OMIT,
        expert_instruction: typing.Optional[str] = OMIT,
        is_draft: typing.Optional[bool] = OMIT,
        is_published: typing.Optional[bool] = OMIT,
        label_config: typing.Optional[str] = OMIT,
        maximum_annotations: typing.Optional[int] = OMIT,
        min_annotations_to_start_training: typing.Optional[int] = OMIT,
        model_version: typing.Optional[str] = OMIT,
        organization: typing.Optional[int] = OMIT,
        overlap_cohort_percentage: typing.Optional[int] = OMIT,
        pinned_at: typing.Optional[dt.datetime] = OMIT,
        reveal_preannotations_interactively: typing.Optional[bool] = OMIT,
        sampling: typing.Optional[SamplingDe5Enum] = OMIT,
        show_annotation_history: typing.Optional[bool] = OMIT,
        show_collab_predictions: typing.Optional[bool] = OMIT,
        show_ground_truth_first: typing.Optional[bool] = OMIT,
        show_instruction: typing.Optional[bool] = OMIT,
        show_overlap_first: typing.Optional[bool] = OMIT,
        show_skip_button: typing.Optional[bool] = OMIT,
        skip_queue: typing.Optional[SkipQueueEnum] = OMIT,
        task_data_login: typing.Optional[str] = OMIT,
        task_data_password: typing.Optional[str] = OMIT,
        title: typing.Optional[str] = OMIT,
        workspace: typing.Optional[int] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> LseProjectCreate:
        """
        Create a project for a specific organization.

        Parameters
        ----------
        annotator_evaluation_enabled : typing.Optional[bool]
            Enable annotator evaluation for the project

        color : typing.Optional[str]

        control_weights : typing.Optional[typing.Dict[str, typing.Any]]
            Dict of weights for each control tag in metric calculation.

        created_by : typing.Optional[UserSimpleRequest]
            Project owner

        description : typing.Optional[str]
            Project Description

        enable_empty_annotation : typing.Optional[bool]
            Allow annotators to submit empty annotations

        evaluate_predictions_automatically : typing.Optional[bool]
            Retrieve and display predictions when loading a task

        expert_instruction : typing.Optional[str]
            Labeling instructions in HTML format

        is_draft : typing.Optional[bool]
            Whether or not the project is in the middle of being created

        is_published : typing.Optional[bool]
            Whether or not the project is published to annotators

        label_config : typing.Optional[str]
            Label config in XML format. See more about it in documentation

        maximum_annotations : typing.Optional[int]
            Maximum number of annotations for one task. If the number of annotations per task is equal or greater to this value, the task is completed (is_labeled=True)

        min_annotations_to_start_training : typing.Optional[int]
            Minimum number of completed tasks after which model training is started

        model_version : typing.Optional[str]
            Machine learning model version

        organization : typing.Optional[int]

        overlap_cohort_percentage : typing.Optional[int]

        pinned_at : typing.Optional[dt.datetime]
            Pinned date and time

        reveal_preannotations_interactively : typing.Optional[bool]
            Reveal pre-annotations interactively

        sampling : typing.Optional[SamplingDe5Enum]

        show_annotation_history : typing.Optional[bool]
            Show annotation history to annotator

        show_collab_predictions : typing.Optional[bool]
            If set, the annotator can view model predictions

        show_ground_truth_first : typing.Optional[bool]
            Onboarding mode (true): show ground truth tasks first in the labeling stream

        show_instruction : typing.Optional[bool]
            Show instructions to the annotator before they start

        show_overlap_first : typing.Optional[bool]

        show_skip_button : typing.Optional[bool]
            Show a skip button in interface and allow annotators to skip the task

        skip_queue : typing.Optional[SkipQueueEnum]

        task_data_login : typing.Optional[str]
            Task data credentials: login

        task_data_password : typing.Optional[str]
            Task data credentials: password

        title : typing.Optional[str]
            Project Title

        workspace : typing.Optional[int]
            In Workspace

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        LseProjectCreate


        Examples
        --------
        import asyncio

        from label_studio_sdk import AsyncLabelStudio

        client = AsyncLabelStudio(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.projects.create()


        asyncio.run(main())
        """
        _response = await self._raw_client.create(
            annotator_evaluation_enabled=annotator_evaluation_enabled,
            color=color,
            control_weights=control_weights,
            created_by=created_by,
            description=description,
            enable_empty_annotation=enable_empty_annotation,
            evaluate_predictions_automatically=evaluate_predictions_automatically,
            expert_instruction=expert_instruction,
            is_draft=is_draft,
            is_published=is_published,
            label_config=label_config,
            maximum_annotations=maximum_annotations,
            min_annotations_to_start_training=min_annotations_to_start_training,
            model_version=model_version,
            organization=organization,
            overlap_cohort_percentage=overlap_cohort_percentage,
            pinned_at=pinned_at,
            reveal_preannotations_interactively=reveal_preannotations_interactively,
            sampling=sampling,
            show_annotation_history=show_annotation_history,
            show_collab_predictions=show_collab_predictions,
            show_ground_truth_first=show_ground_truth_first,
            show_instruction=show_instruction,
            show_overlap_first=show_overlap_first,
            show_skip_button=show_skip_button,
            skip_queue=skip_queue,
            task_data_login=task_data_login,
            task_data_password=task_data_password,
            title=title,
            workspace=workspace,
            request_options=request_options,
        )
        return _response.data

    async def list_counts(
        self,
        *,
        filter: typing.Optional[str] = None,
        ids: typing.Optional[str] = None,
        include: typing.Optional[str] = None,
        ordering: typing.Optional[str] = None,
        page: typing.Optional[int] = None,
        page_size: typing.Optional[int] = None,
        search: typing.Optional[str] = None,
        state: typing.Optional[str] = None,
        title: typing.Optional[str] = None,
        workspaces: typing.Optional[float] = None,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> PaginatedLseProjectCountsList:
        """
        Returns a list of projects with their counts. For example, task_number which is the total task number in project

        Parameters
        ----------
        filter : typing.Optional[str]
            Filter projects by pinned status. Use 'pinned_only' to return only pinned projects, 'exclude_pinned' to return only non-pinned projects, or 'all' to return all projects.

        ids : typing.Optional[str]
            Filter id by in list

        include : typing.Optional[str]
            Comma-separated list of count fields to include in the response to optimize performance. Available fields: task_number, finished_task_number, total_predictions_number, total_annotations_number, num_tasks_with_annotations, useful_annotation_number, ground_truth_number, skipped_annotations_number. If not specified, all count fields are included.

        ordering : typing.Optional[str]
            Which field to use when ordering the results.

        page : typing.Optional[int]
            A page number within the paginated result set.

        page_size : typing.Optional[int]
            Number of results to return per page.

        search : typing.Optional[str]
            Search term for project title and description

        state : typing.Optional[str]
            Filter current_state by exact match

        title : typing.Optional[str]
            Filter title by contains (case-insensitive)

        workspaces : typing.Optional[float]
            Filter workspaces by exact match

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        PaginatedLseProjectCountsList


        Examples
        --------
        import asyncio

        from label_studio_sdk import AsyncLabelStudio

        client = AsyncLabelStudio(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.projects.list_counts()


        asyncio.run(main())
        """
        _response = await self._raw_client.list_counts(
            filter=filter,
            ids=ids,
            include=include,
            ordering=ordering,
            page=page,
            page_size=page_size,
            search=search,
            state=state,
            title=title,
            workspaces=workspaces,
            request_options=request_options,
        )
        return _response.data

    async def get(
        self,
        id: int,
        *,
        members_limit: typing.Optional[int] = None,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> LseProjectResponse:
        """
        Retrieve information about a project by project ID.

        Parameters
        ----------
        id : int

        members_limit : typing.Optional[int]
            Maximum number of members to return

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        LseProjectResponse
            Project information. Not all fields are available for all roles.

        Examples
        --------
        import asyncio

        from label_studio_sdk import AsyncLabelStudio

        client = AsyncLabelStudio(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.projects.get(
                id=1,
            )


        asyncio.run(main())
        """
        _response = await self._raw_client.get(id, members_limit=members_limit, request_options=request_options)
        return _response.data

    async def delete(self, id: int, *, request_options: typing.Optional[RequestOptions] = None) -> None:
        """
        Delete a project by specified project ID.

        Parameters
        ----------
        id : int

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        None

        Examples
        --------
        import asyncio

        from label_studio_sdk import AsyncLabelStudio

        client = AsyncLabelStudio(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.projects.delete(
                id=1,
            )


        asyncio.run(main())
        """
        _response = await self._raw_client.delete(id, request_options=request_options)
        return _response.data

    async def update(
        self,
        id: int,
        *,
        members_limit: typing.Optional[int] = None,
        agreement_methodology: typing.Optional[AgreementMethodologyEnum] = OMIT,
        agreement_threshold: typing.Optional[str] = OMIT,
        annotation_limit_count: typing.Optional[int] = OMIT,
        annotation_limit_percent: typing.Optional[str] = OMIT,
        annotator_evaluation_continuous_tasks: typing.Optional[int] = OMIT,
        annotator_evaluation_enabled: typing.Optional[bool] = OMIT,
        annotator_evaluation_minimum_score: typing.Optional[str] = OMIT,
        annotator_evaluation_minimum_tasks: typing.Optional[int] = OMIT,
        annotator_evaluation_onboarding_tasks: typing.Optional[int] = OMIT,
        assignment_settings: typing.Optional[AssignmentSettingsRequest] = OMIT,
        color: typing.Optional[str] = OMIT,
        comment_classification_config: typing.Optional[str] = OMIT,
        control_weights: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
        created_by: typing.Optional[UserSimpleRequest] = OMIT,
        custom_script: typing.Optional[str] = OMIT,
        custom_task_lock_ttl: typing.Optional[int] = OMIT,
        description: typing.Optional[str] = OMIT,
        enable_empty_annotation: typing.Optional[bool] = OMIT,
        evaluate_predictions_automatically: typing.Optional[bool] = OMIT,
        expert_instruction: typing.Optional[str] = OMIT,
        is_draft: typing.Optional[bool] = OMIT,
        is_published: typing.Optional[bool] = OMIT,
        label_config: typing.Optional[str] = OMIT,
        max_additional_annotators_assignable: typing.Optional[int] = OMIT,
        maximum_annotations: typing.Optional[int] = OMIT,
        min_annotations_to_start_training: typing.Optional[int] = OMIT,
        model_version: typing.Optional[str] = OMIT,
        organization: typing.Optional[int] = OMIT,
        overlap_cohort_percentage: typing.Optional[int] = OMIT,
        pause_on_failed_annotator_evaluation: typing.Optional[bool] = OMIT,
        pinned_at: typing.Optional[dt.datetime] = OMIT,
        require_comment_on_skip: typing.Optional[bool] = OMIT,
        reveal_preannotations_interactively: typing.Optional[bool] = OMIT,
        review_settings: typing.Optional[ReviewSettingsRequest] = OMIT,
        sampling: typing.Optional[SamplingDe5Enum] = OMIT,
        show_annotation_history: typing.Optional[bool] = OMIT,
        show_collab_predictions: typing.Optional[bool] = OMIT,
        show_ground_truth_first: typing.Optional[bool] = OMIT,
        show_instruction: typing.Optional[bool] = OMIT,
        show_overlap_first: typing.Optional[bool] = OMIT,
        show_skip_button: typing.Optional[bool] = OMIT,
        show_unused_data_columns_to_annotators: typing.Optional[bool] = OMIT,
        skip_queue: typing.Optional[SkipQueueEnum] = OMIT,
        strict_task_overlap: typing.Optional[bool] = OMIT,
        task_data_login: typing.Optional[str] = OMIT,
        task_data_password: typing.Optional[str] = OMIT,
        title: typing.Optional[str] = OMIT,
        workspace: typing.Optional[int] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> LseProjectUpdate:
        """
        Update the details of a specific project.

        Parameters
        ----------
        id : int

        members_limit : typing.Optional[int]
            Maximum number of members to return

        agreement_methodology : typing.Optional[AgreementMethodologyEnum]
            Methodology (Consensus / Pairwise Averaging)

            * `consensus` - Consensus
            * `pairwise` - Pairwise Averaging

        agreement_threshold : typing.Optional[str]
            Agreement threshold

        annotation_limit_count : typing.Optional[int]
            Limit by number of tasks

        annotation_limit_percent : typing.Optional[str]
            Limit by percentage of tasks

        annotator_evaluation_continuous_tasks : typing.Optional[int]
            Continuous Evaluation: Required tasks

        annotator_evaluation_enabled : typing.Optional[bool]
            Evaluate all annotators against ground truth

        annotator_evaluation_minimum_score : typing.Optional[str]
            Score required to pass evaluation

        annotator_evaluation_minimum_tasks : typing.Optional[int]
            Number of tasks for evaluation

        annotator_evaluation_onboarding_tasks : typing.Optional[int]
            Onboarding Evaluation: Required tasks

        assignment_settings : typing.Optional[AssignmentSettingsRequest]

        color : typing.Optional[str]
            Color

        comment_classification_config : typing.Optional[str]

        control_weights : typing.Optional[typing.Dict[str, typing.Any]]
            Dict of weights for each control tag in metric calculation.

        created_by : typing.Optional[UserSimpleRequest]
            Project owner

        custom_script : typing.Optional[str]
            Plugins

        custom_task_lock_ttl : typing.Optional[int]
            Task reservation time. TTL in seconds (UI displays and edits this value in minutes).

        description : typing.Optional[str]
            Description

        enable_empty_annotation : typing.Optional[bool]
            Allow empty annotations

        evaluate_predictions_automatically : typing.Optional[bool]
            Retrieve and display predictions when loading a task

        expert_instruction : typing.Optional[str]
            Instructions

        is_draft : typing.Optional[bool]
            Whether or not the project is in the middle of being created

        is_published : typing.Optional[bool]
            Whether or not the project is published to annotators

        label_config : typing.Optional[str]
            Labeling Configuration

        max_additional_annotators_assignable : typing.Optional[int]
            Maximum additional annotators

        maximum_annotations : typing.Optional[int]
            Annotations per task

        min_annotations_to_start_training : typing.Optional[int]
            Minimum number of completed tasks after which model training is started

        model_version : typing.Optional[str]
            Machine learning model version

        organization : typing.Optional[int]

        overlap_cohort_percentage : typing.Optional[int]
            Annotations per task coverage

        pause_on_failed_annotator_evaluation : typing.Optional[bool]
            Pause annotator on failed evaluation

        pinned_at : typing.Optional[dt.datetime]
            Pinned date and time

        require_comment_on_skip : typing.Optional[bool]
            Require comment to skip

        reveal_preannotations_interactively : typing.Optional[bool]
            Reveal pre-annotations interactively

        review_settings : typing.Optional[ReviewSettingsRequest]

        sampling : typing.Optional[SamplingDe5Enum]

        show_annotation_history : typing.Optional[bool]
            Show Data Manager to Annotators

        show_collab_predictions : typing.Optional[bool]
            Use predictions to pre-label Tasks

        show_ground_truth_first : typing.Optional[bool]
            Onboarding mode (true): show ground truth tasks first in the labeling stream

        show_instruction : typing.Optional[bool]
            Show instructions before labeling

        show_overlap_first : typing.Optional[bool]
            Show tasks with overlap first

        show_skip_button : typing.Optional[bool]
            Allow skipping tasks

        show_unused_data_columns_to_annotators : typing.Optional[bool]
            Show only columns used in labeling configuration to Annotators. API uses inverse field semantics here: set false to show only used columns, set true to show all task.data columns.

        skip_queue : typing.Optional[SkipQueueEnum]

        strict_task_overlap : typing.Optional[bool]
            Enforce strict overlap limit

        task_data_login : typing.Optional[str]
            Login

        task_data_password : typing.Optional[str]
            Password

        title : typing.Optional[str]
            Project Name

        workspace : typing.Optional[int]
            Workspace

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        LseProjectUpdate


        Examples
        --------
        import asyncio

        from label_studio_sdk import AsyncLabelStudio

        client = AsyncLabelStudio(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.projects.update(
                id=1,
            )


        asyncio.run(main())
        """
        _response = await self._raw_client.update(
            id,
            members_limit=members_limit,
            agreement_methodology=agreement_methodology,
            agreement_threshold=agreement_threshold,
            annotation_limit_count=annotation_limit_count,
            annotation_limit_percent=annotation_limit_percent,
            annotator_evaluation_continuous_tasks=annotator_evaluation_continuous_tasks,
            annotator_evaluation_enabled=annotator_evaluation_enabled,
            annotator_evaluation_minimum_score=annotator_evaluation_minimum_score,
            annotator_evaluation_minimum_tasks=annotator_evaluation_minimum_tasks,
            annotator_evaluation_onboarding_tasks=annotator_evaluation_onboarding_tasks,
            assignment_settings=assignment_settings,
            color=color,
            comment_classification_config=comment_classification_config,
            control_weights=control_weights,
            created_by=created_by,
            custom_script=custom_script,
            custom_task_lock_ttl=custom_task_lock_ttl,
            description=description,
            enable_empty_annotation=enable_empty_annotation,
            evaluate_predictions_automatically=evaluate_predictions_automatically,
            expert_instruction=expert_instruction,
            is_draft=is_draft,
            is_published=is_published,
            label_config=label_config,
            max_additional_annotators_assignable=max_additional_annotators_assignable,
            maximum_annotations=maximum_annotations,
            min_annotations_to_start_training=min_annotations_to_start_training,
            model_version=model_version,
            organization=organization,
            overlap_cohort_percentage=overlap_cohort_percentage,
            pause_on_failed_annotator_evaluation=pause_on_failed_annotator_evaluation,
            pinned_at=pinned_at,
            require_comment_on_skip=require_comment_on_skip,
            reveal_preannotations_interactively=reveal_preannotations_interactively,
            review_settings=review_settings,
            sampling=sampling,
            show_annotation_history=show_annotation_history,
            show_collab_predictions=show_collab_predictions,
            show_ground_truth_first=show_ground_truth_first,
            show_instruction=show_instruction,
            show_overlap_first=show_overlap_first,
            show_skip_button=show_skip_button,
            show_unused_data_columns_to_annotators=show_unused_data_columns_to_annotators,
            skip_queue=skip_queue,
            strict_task_overlap=strict_task_overlap,
            task_data_login=task_data_login,
            task_data_password=task_data_password,
            title=title,
            workspace=workspace,
            request_options=request_options,
        )
        return _response.data

    async def list_unique_annotators(
        self, id: int, *, request_options: typing.Optional[RequestOptions] = None
    ) -> typing.List[UserSimple]:
        """
        Return unique users who have submitted annotations in the specified project.

        Parameters
        ----------
        id : int

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        typing.List[UserSimple]
            List of annotator users

        Examples
        --------
        import asyncio

        from label_studio_sdk import AsyncLabelStudio

        client = AsyncLabelStudio(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.projects.list_unique_annotators(
                id=1,
            )


        asyncio.run(main())
        """
        _response = await self._raw_client.list_unique_annotators(id, request_options=request_options)
        return _response.data

    async def duplicate(
        self,
        id: int,
        *,
        mode: ModeEnum,
        title: str,
        workspace: int,
        description: typing.Optional[str] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> DuplicateProjectsResponse:
        """
        <Card href="https://humansignal.com/goenterprise">
                <img style="pointer-events: none; margin-left: 0px; margin-right: 0px;" src="https://docs.humansignal.com/images/badge.svg" alt="Label Studio Enterprise badge"/>
                <p style="margin-top: 10px; font-size: 14px;">
                    This endpoint is not available in Label Studio Community Edition. [Learn more about Label Studio Enterprise](https://humansignal.com/goenterprise)
                </p>
            </Card>
        Make a copy of project.

        Parameters
        ----------
        id : int

        mode : ModeEnum
            What to Duplicate (Project configuration only / Project configuration and tasks)

            * `settings` - Only settings
            * `settings,data` - Settings and tasks

        title : str
            Project Name

        workspace : int
            Destination Workspace

        description : typing.Optional[str]
            Project Description

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        DuplicateProjectsResponse
            Project duplicated

        Examples
        --------
        import asyncio

        from label_studio_sdk import AsyncLabelStudio

        client = AsyncLabelStudio(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.projects.duplicate(
                id=1,
                mode="settings",
                title="title",
                workspace=1,
            )


        asyncio.run(main())
        """
        _response = await self._raw_client.duplicate(
            id, mode=mode, title=title, workspace=workspace, description=description, request_options=request_options
        )
        return _response.data

    async def import_tasks(
        self,
        id: int,
        *,
        request: typing.Sequence[ImportApiRequest],
        commit_to_project: typing.Optional[bool] = None,
        preannotated_from_fields: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
        return_task_ids: typing.Optional[bool] = None,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> ImportTasksProjectsResponse:
        """
        
                    Import data as labeling tasks in bulk using this API endpoint. You can use this API endpoint to import multiple tasks.
                    One POST request is limited at 250K tasks and 200 MB.
        
                    **Note:** Imported data is verified against a project *label_config* and must
                    include all variables that were used in the *label_config*. For example,
                    if the label configuration has a *$text* variable, then each item in a data object
                    must include a "text" field.
                    <br>
        
                    ## Async Import Behavior
                    <hr style="opacity:0.3">
        
                    **For non-Community editions, this endpoint processes imports asynchronously.**
                    
                    - The POST request **can fail** for invalid parameters, malformed request body, or other request-level validation errors.
                    - However, **data validation errors** that occur during import processing are handled asynchronously and will not cause the POST request to fail.
                    - Upon successful request validation, a response is returned: `{"import": <import_id>}`
                    - Use the returned `import_id` to poll the GET `/api/projects/{project_id}/imports/{import_id}` endpoint to check the import status and see any data validation errors.
                    - Data-level errors and import failures will only be visible in the GET request response.
        
                    For Community edition, imports are processed synchronously and return task counts immediately.
                    <br>
        
                    ## POST requests
                    <hr style="opacity:0.3">
        
                    There are three possible ways to import tasks with this endpoint:
        
                    ### 1. **POST with data**
                    Send JSON tasks as POST data. Only JSON is supported for POSTing files directly.
                    Update this example to specify your authorization token and Label Studio instance host, then run the following from
                    the command line.
        
                    ```bash
                    curl -H 'Content-Type: application/json' -H 'Authorization: Token abc123' \\
                    -X POST 'http://localhost:8000/api/projects/1/import' --data '[{"text": "Some text 1"}, {"text": "Some text 2"}]'
                    ```
        
                    ### 2. **POST with files**
                    Send tasks as files. You can attach multiple files with different names.
        
                    - **JSON**: text files in JavaScript object notation format
                    - **CSV**: text files with tables in Comma Separated Values format
                    - **TSV**: text files with tables in Tab Separated Value format
                    - **TXT**: simple text files are similar to CSV with one column and no header, supported for projects with one source only
        
                    Update this example to specify your authorization token, Label Studio instance host, and file name and path,
                    then run the following from the command line:
        
                    ```bash
                    curl -H 'Authorization: Token abc123' \\
                    -X POST 'http://localhost:8000/api/projects/1/import' -F 'file=@path/to/my_file.csv'
                    ```
        
                    ### 3. **POST with URL**
                    You can also provide a URL to a file with labeling tasks. Supported file formats are the same as in option 2.
        
                    ```bash
                    curl -H 'Content-Type: application/json' -H 'Authorization: Token abc123' \\
                    -X POST 'http://localhost:8000/api/projects/1/import' \\
                    --data '[{"url": "http://example.com/test1.csv"}, {"url": "http://example.com/test2.csv"}]'
                    ```
        
                    <br>
                
        
        Parameters
        ----------
        id : int
            A unique integer value identifying this project.
        
        request : typing.Sequence[ImportApiRequest]
        
        commit_to_project : typing.Optional[bool]
            Set to "true" to immediately commit tasks to the project.
        
        preannotated_from_fields : typing.Optional[typing.Union[str, typing.Sequence[str]]]
            List of fields to preannotate from the task data. For example, if you provide a list of `{"text": "text", "prediction": "label"}` items in the request, the system will create a task with the `text` field and a prediction with the `label` field when `preannoted_from_fields=["prediction"]`.
        
        return_task_ids : typing.Optional[bool]
            Set to "true" to return task IDs in the response.
        
        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.
        
        Returns
        -------
        ImportTasksProjectsResponse
            Tasks successfully imported or import queued. **For non-Community editions**, the response will be `{"import": <import_id>}` which you can use to poll the import status. **For Community edition**, the response contains task counts and is processed synchronously.
        
        Examples
        --------
        import asyncio
        
        from label_studio_sdk import AsyncLabelStudio
        
        client = AsyncLabelStudio(
            api_key="YOUR_API_KEY",
        )
        
        
        async def main() -> None:
            await client.projects.import_tasks(
                id=1,
                request=[],
            )
        
        
        asyncio.run(main())
        """
        _response = await self._raw_client.import_tasks(
            id,
            request=request,
            commit_to_project=commit_to_project,
            preannotated_from_fields=preannotated_from_fields,
            return_task_ids=return_task_ids,
            request_options=request_options,
        )
        return _response.data

    async def import_predictions(
        self,
        id: int,
        *,
        request: typing.Sequence[PredictionRequest],
        request_options: typing.Optional[RequestOptions] = None,
    ) -> ImportPredictionsProjectsResponse:
        """
        Import model predictions for tasks in the specified project.

        Parameters
        ----------
        id : int
            A unique integer value identifying this project.

        request : typing.Sequence[PredictionRequest]

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        ImportPredictionsProjectsResponse
            Predictions successfully imported

        Examples
        --------
        import asyncio

        from label_studio_sdk import AsyncLabelStudio, PredictionRequest

        client = AsyncLabelStudio(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.projects.import_predictions(
                id=1,
                request=[
                    PredictionRequest(
                        result=[{"key": "value"}],
                        task=1,
                    )
                ],
            )


        asyncio.run(main())
        """
        _response = await self._raw_client.import_predictions(id, request=request, request_options=request_options)
        return _response.data

    async def validate_label_config(
        self, id: int, *, label_config: str, request_options: typing.Optional[RequestOptions] = None
    ) -> ProjectLabelConfig:
        """
        Determine whether the label configuration for a specific project is valid.

        Parameters
        ----------
        id : int
            A unique integer value identifying this project.

        label_config : str
            Label config in XML format. See more about it in documentation

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        ProjectLabelConfig


        Examples
        --------
        import asyncio

        from label_studio_sdk import AsyncLabelStudio

        client = AsyncLabelStudio(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.projects.validate_label_config(
                id=1,
                label_config="label_config",
            )


        asyncio.run(main())
        """
        _response = await self._raw_client.validate_label_config(
            id, label_config=label_config, request_options=request_options
        )
        return _response.data

    @property
    def roles(self):
        if self._roles is None:
            from .roles.client import AsyncRolesClient  # noqa: E402

            self._roles = AsyncRolesClient(client_wrapper=self._client_wrapper)
        return self._roles

    @property
    def exports(self):
        if self._exports is None:
            from .exports.client import AsyncExportsClient  # noqa: E402

            self._exports = AsyncExportsClient(client_wrapper=self._client_wrapper)
        return self._exports

    @property
    def members(self):
        if self._members is None:
            from .members.client import AsyncMembersClient  # noqa: E402

            self._members = AsyncMembersClient(client_wrapper=self._client_wrapper)
        return self._members

    @property
    def metrics(self):
        if self._metrics is None:
            from .metrics.client import AsyncMetricsClient  # noqa: E402

            self._metrics = AsyncMetricsClient(client_wrapper=self._client_wrapper)
        return self._metrics

    @property
    def stats(self):
        if self._stats is None:
            from .stats.client import AsyncStatsClient  # noqa: E402

            self._stats = AsyncStatsClient(client_wrapper=self._client_wrapper)
        return self._stats

    @property
    def assignments(self):
        if self._assignments is None:
            from .assignments.client import AsyncAssignmentsClient  # noqa: E402

            self._assignments = AsyncAssignmentsClient(client_wrapper=self._client_wrapper)
        return self._assignments

    @property
    def pauses(self):
        if self._pauses is None:
            from .pauses.client import AsyncPausesClient  # noqa: E402

            self._pauses = AsyncPausesClient(client_wrapper=self._client_wrapper)
        return self._pauses
