"""Generated message classes for dataproc version v1.

Manages Hadoop-based clusters and jobs on Google Cloud Platform.
"""
# NOTE: This file is autogenerated and should not be edited by hand.

from __future__ import absolute_import

from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types


package = 'dataproc'


class AcceleratorConfig(_messages.Message):
  r"""Specifies the type and number of accelerator cards attached to the
  instances of an instance. See GPUs on Compute Engine
  (https://cloud.google.com/compute/docs/gpus/).

  Fields:
    acceleratorCount: The number of the accelerator cards of this type exposed
      to this instance.
    acceleratorTypeUri: Full URL, partial URI, or short name of the
      accelerator type resource to expose to this instance. See Compute Engine
      AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acc
      eleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/
      [project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4
      projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4
      nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto
      Zone Placement
      (https://cloud.google.com/dataproc/docs/concepts/configuring-
      clusters/auto-zone#using_auto_zone_placement) feature, you must use the
      short name of the accelerator type resource, for example, nvidia-
      tesla-t4.
  """

  acceleratorCount = _messages.IntegerField(1, variant=_messages.Variant.INT32)
  acceleratorTypeUri = _messages.StringField(2)


class AccessSessionSparkApplicationEnvironmentInfoResponse(_messages.Message):
  r"""Environment details of a Saprk Application.

  Fields:
    applicationEnvironmentInfo: Details about the Environment that the
      application is running in.
  """

  applicationEnvironmentInfo = _messages.MessageField('ApplicationEnvironmentInfo', 1)


class AccessSessionSparkApplicationJobResponse(_messages.Message):
  r"""Details of a particular job associated with Spark Application

  Fields:
    jobData: Output only. Data corresponding to a spark job.
  """

  jobData = _messages.MessageField('JobData', 1)


class AccessSessionSparkApplicationNativeBuildInfoResponse(_messages.Message):
  r"""Details of a native build info for a Spark Application

  Fields:
    executionData: Native SQL Execution Data
  """

  executionData = _messages.MessageField('NativeBuildInfoUiData', 1)


class AccessSessionSparkApplicationNativeSqlQueryResponse(_messages.Message):
  r"""Details of a native query for a Spark Application

  Fields:
    executionData: Native SQL Execution Data
  """

  executionData = _messages.MessageField('NativeSqlExecutionUiData', 1)


class AccessSessionSparkApplicationResponse(_messages.Message):
  r"""A summary of Spark Application

  Fields:
    application: Output only. High level information corresponding to an
      application.
  """

  application = _messages.MessageField('ApplicationInfo', 1)


class AccessSessionSparkApplicationSqlQueryResponse(_messages.Message):
  r"""Details of a query for a Spark Application

  Fields:
    executionData: SQL Execution Data
  """

  executionData = _messages.MessageField('SqlExecutionUiData', 1)


class AccessSessionSparkApplicationSqlSparkPlanGraphResponse(_messages.Message):
  r"""SparkPlanGraph for a Spark Application execution limited to maximum
  10000 clusters.

  Fields:
    sparkPlanGraph: SparkPlanGraph for a Spark Application execution.
  """

  sparkPlanGraph = _messages.MessageField('SparkPlanGraph', 1)


class AccessSessionSparkApplicationStageAttemptResponse(_messages.Message):
  r"""Stage Attempt for a Stage of a Spark Application

  Fields:
    stageData: Output only. Data corresponding to a stage.
  """

  stageData = _messages.MessageField('StageData', 1)


class AccessSessionSparkApplicationStageRddOperationGraphResponse(_messages.Message):
  r"""RDD operation graph for a Spark Application Stage limited to maximum
  10000 clusters.

  Fields:
    rddOperationGraph: RDD operation graph for a Spark Application Stage.
  """

  rddOperationGraph = _messages.MessageField('RddOperationGraph', 1)


class AccessSparkApplicationEnvironmentInfoResponse(_messages.Message):
  r"""Environment details of a Saprk Application.

  Fields:
    applicationEnvironmentInfo: Details about the Environment that the
      application is running in.
  """

  applicationEnvironmentInfo = _messages.MessageField('ApplicationEnvironmentInfo', 1)


class AccessSparkApplicationJobResponse(_messages.Message):
  r"""Details of a particular job associated with Spark Application

  Fields:
    jobData: Output only. Data corresponding to a spark job.
  """

  jobData = _messages.MessageField('JobData', 1)


class AccessSparkApplicationNativeBuildInfoResponse(_messages.Message):
  r"""Details of Native Build Info for a Spark Application

  Fields:
    buildInfo: Native Build Info Data
  """

  buildInfo = _messages.MessageField('NativeBuildInfoUiData', 1)


class AccessSparkApplicationNativeSqlQueryResponse(_messages.Message):
  r"""Details of a query for a Spark Application

  Fields:
    executionData: Native SQL Execution Data
  """

  executionData = _messages.MessageField('NativeSqlExecutionUiData', 1)


class AccessSparkApplicationResponse(_messages.Message):
  r"""A summary of Spark Application

  Fields:
    application: Output only. High level information corresponding to an
      application.
  """

  application = _messages.MessageField('ApplicationInfo', 1)


class AccessSparkApplicationSqlQueryResponse(_messages.Message):
  r"""Details of a query for a Spark Application

  Fields:
    executionData: SQL Execution Data
  """

  executionData = _messages.MessageField('SqlExecutionUiData', 1)


class AccessSparkApplicationSqlSparkPlanGraphResponse(_messages.Message):
  r"""SparkPlanGraph for a Spark Application execution limited to maximum
  10000 clusters.

  Fields:
    sparkPlanGraph: SparkPlanGraph for a Spark Application execution.
  """

  sparkPlanGraph = _messages.MessageField('SparkPlanGraph', 1)


class AccessSparkApplicationStageAttemptResponse(_messages.Message):
  r"""Stage Attempt for a Stage of a Spark Application

  Fields:
    stageData: Output only. Data corresponding to a stage.
  """

  stageData = _messages.MessageField('StageData', 1)


class AccessSparkApplicationStageRddOperationGraphResponse(_messages.Message):
  r"""RDD operation graph for a Spark Application Stage limited to maximum
  10000 clusters.

  Fields:
    rddOperationGraph: RDD operation graph for a Spark Application Stage.
  """

  rddOperationGraph = _messages.MessageField('RddOperationGraph', 1)


class AccumulableInfo(_messages.Message):
  r"""A AccumulableInfo object.

  Fields:
    accumullableInfoId: A string attribute.
    name: A string attribute.
    update: A string attribute.
    value: A string attribute.
  """

  accumullableInfoId = _messages.IntegerField(1)
  name = _messages.StringField(2)
  update = _messages.StringField(3)
  value = _messages.StringField(4)


class AnalyzeBatchRequest(_messages.Message):
  r"""A request to analyze a batch workload.

  Fields:
    requestId: Optional. A unique ID used to identify the request. If the
      service receives two AnalyzeBatchRequest (http://cloud/dataproc/docs/ref
      erence/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.AnalyzeBatc
      hRequest)s with the same request_id, the second request is ignored and
      the Operation that corresponds to the first request created and stored
      in the backend is returned.Recommendation: Set this value to a UUID
      (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value
      must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
      and hyphens (-). The maximum length is 40 characters.
    requestorId: Optional. The requestor ID is used to identify if the request
      comes from a GCA investigation or the old Ask Gemini Experience.
  """

  requestId = _messages.StringField(1)
  requestorId = _messages.StringField(2)


class AnalyzeOperationMetadata(_messages.Message):
  r"""Metadata describing the Analyze operation.

  Enums:
    AnalyzedWorkloadTypeValueValuesEnum: Output only. Type of the workload
      being analyzed.

  Messages:
    LabelsValue: Output only. Labels associated with the operation.

  Fields:
    analyzedWorkloadName: Output only. name of the workload being analyzed.
    analyzedWorkloadType: Output only. Type of the workload being analyzed.
    analyzedWorkloadUuid: Output only. unique identifier of the workload
      typically generated by control plane. E.g. batch uuid.
    createTime: Output only. The time when the operation was created.
    description: Output only. Short description of the operation.
    doneTime: Output only. The time when the operation finished.
    labels: Output only. Labels associated with the operation.
    warnings: Output only. Warnings encountered during operation execution.
  """

  class AnalyzedWorkloadTypeValueValuesEnum(_messages.Enum):
    r"""Output only. Type of the workload being analyzed.

    Values:
      WORKLOAD_TYPE_UNSPECIFIED: Undefined option
      BATCH: Serverless batch job
    """
    WORKLOAD_TYPE_UNSPECIFIED = 0
    BATCH = 1

  @encoding.MapUnrecognizedFields('additionalProperties')
  class LabelsValue(_messages.Message):
    r"""Output only. Labels associated with the operation.

    Messages:
      AdditionalProperty: An additional property for a LabelsValue object.

    Fields:
      additionalProperties: Additional properties of type LabelsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a LabelsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  analyzedWorkloadName = _messages.StringField(1)
  analyzedWorkloadType = _messages.EnumField('AnalyzedWorkloadTypeValueValuesEnum', 2)
  analyzedWorkloadUuid = _messages.StringField(3)
  createTime = _messages.StringField(4)
  description = _messages.StringField(5)
  doneTime = _messages.StringField(6)
  labels = _messages.MessageField('LabelsValue', 7)
  warnings = _messages.StringField(8, repeated=True)


class AppSummary(_messages.Message):
  r"""A AppSummary object.

  Fields:
    numCompletedJobs: A integer attribute.
    numCompletedStages: A integer attribute.
  """

  numCompletedJobs = _messages.IntegerField(1, variant=_messages.Variant.INT32)
  numCompletedStages = _messages.IntegerField(2, variant=_messages.Variant.INT32)


class ApplicationAttemptInfo(_messages.Message):
  r"""Specific attempt of an application.

  Fields:
    appSparkVersion: A string attribute.
    attemptId: A string attribute.
    completed: A boolean attribute.
    durationMillis: A string attribute.
    endTime: A string attribute.
    lastUpdated: A string attribute.
    sparkUser: A string attribute.
    startTime: A string attribute.
  """

  appSparkVersion = _messages.StringField(1)
  attemptId = _messages.StringField(2)
  completed = _messages.BooleanField(3)
  durationMillis = _messages.IntegerField(4)
  endTime = _messages.StringField(5)
  lastUpdated = _messages.StringField(6)
  sparkUser = _messages.StringField(7)
  startTime = _messages.StringField(8)


class ApplicationEnvironmentInfo(_messages.Message):
  r"""Details about the Environment that the application is running in.

  Messages:
    ClasspathEntriesValue: A ClasspathEntriesValue object.
    HadoopPropertiesValue: A HadoopPropertiesValue object.
    MetricsPropertiesValue: A MetricsPropertiesValue object.
    SparkPropertiesValue: A SparkPropertiesValue object.
    SystemPropertiesValue: A SystemPropertiesValue object.

  Fields:
    classpathEntries: A ClasspathEntriesValue attribute.
    hadoopProperties: A HadoopPropertiesValue attribute.
    metricsProperties: A MetricsPropertiesValue attribute.
    resourceProfiles: A ResourceProfileInfo attribute.
    runtime: A SparkRuntimeInfo attribute.
    sparkProperties: A SparkPropertiesValue attribute.
    systemProperties: A SystemPropertiesValue attribute.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class ClasspathEntriesValue(_messages.Message):
    r"""A ClasspathEntriesValue object.

    Messages:
      AdditionalProperty: An additional property for a ClasspathEntriesValue
        object.

    Fields:
      additionalProperties: Additional properties of type
        ClasspathEntriesValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a ClasspathEntriesValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  @encoding.MapUnrecognizedFields('additionalProperties')
  class HadoopPropertiesValue(_messages.Message):
    r"""A HadoopPropertiesValue object.

    Messages:
      AdditionalProperty: An additional property for a HadoopPropertiesValue
        object.

    Fields:
      additionalProperties: Additional properties of type
        HadoopPropertiesValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a HadoopPropertiesValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  @encoding.MapUnrecognizedFields('additionalProperties')
  class MetricsPropertiesValue(_messages.Message):
    r"""A MetricsPropertiesValue object.

    Messages:
      AdditionalProperty: An additional property for a MetricsPropertiesValue
        object.

    Fields:
      additionalProperties: Additional properties of type
        MetricsPropertiesValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a MetricsPropertiesValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  @encoding.MapUnrecognizedFields('additionalProperties')
  class SparkPropertiesValue(_messages.Message):
    r"""A SparkPropertiesValue object.

    Messages:
      AdditionalProperty: An additional property for a SparkPropertiesValue
        object.

    Fields:
      additionalProperties: Additional properties of type SparkPropertiesValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a SparkPropertiesValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  @encoding.MapUnrecognizedFields('additionalProperties')
  class SystemPropertiesValue(_messages.Message):
    r"""A SystemPropertiesValue object.

    Messages:
      AdditionalProperty: An additional property for a SystemPropertiesValue
        object.

    Fields:
      additionalProperties: Additional properties of type
        SystemPropertiesValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a SystemPropertiesValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  classpathEntries = _messages.MessageField('ClasspathEntriesValue', 1)
  hadoopProperties = _messages.MessageField('HadoopPropertiesValue', 2)
  metricsProperties = _messages.MessageField('MetricsPropertiesValue', 3)
  resourceProfiles = _messages.MessageField('ResourceProfileInfo', 4, repeated=True)
  runtime = _messages.MessageField('SparkRuntimeInfo', 5)
  sparkProperties = _messages.MessageField('SparkPropertiesValue', 6)
  systemProperties = _messages.MessageField('SystemPropertiesValue', 7)


class ApplicationInfo(_messages.Message):
  r"""High level information corresponding to an application.

  Enums:
    ApplicationContextIngestionStatusValueValuesEnum:
    QuantileDataStatusValueValuesEnum:

  Fields:
    applicationContextIngestionStatus: A
      ApplicationContextIngestionStatusValueValuesEnum attribute.
    applicationId: A string attribute.
    attempts: A ApplicationAttemptInfo attribute.
    coresGranted: A integer attribute.
    coresPerExecutor: A integer attribute.
    maxCores: A integer attribute.
    memoryPerExecutorMb: A integer attribute.
    name: A string attribute.
    quantileDataStatus: A QuantileDataStatusValueValuesEnum attribute.
  """

  class ApplicationContextIngestionStatusValueValuesEnum(_messages.Enum):
    r"""ApplicationContextIngestionStatusValueValuesEnum enum type.

    Values:
      APPLICATION_CONTEXT_INGESTION_STATUS_UNSPECIFIED: <no description>
      APPLICATION_CONTEXT_INGESTION_STATUS_COMPLETED: <no description>
    """
    APPLICATION_CONTEXT_INGESTION_STATUS_UNSPECIFIED = 0
    APPLICATION_CONTEXT_INGESTION_STATUS_COMPLETED = 1

  class QuantileDataStatusValueValuesEnum(_messages.Enum):
    r"""QuantileDataStatusValueValuesEnum enum type.

    Values:
      QUANTILE_DATA_STATUS_UNSPECIFIED: <no description>
      QUANTILE_DATA_STATUS_COMPLETED: <no description>
      QUANTILE_DATA_STATUS_FAILED: <no description>
    """
    QUANTILE_DATA_STATUS_UNSPECIFIED = 0
    QUANTILE_DATA_STATUS_COMPLETED = 1
    QUANTILE_DATA_STATUS_FAILED = 2

  applicationContextIngestionStatus = _messages.EnumField('ApplicationContextIngestionStatusValueValuesEnum', 1)
  applicationId = _messages.StringField(2)
  attempts = _messages.MessageField('ApplicationAttemptInfo', 3, repeated=True)
  coresGranted = _messages.IntegerField(4, variant=_messages.Variant.INT32)
  coresPerExecutor = _messages.IntegerField(5, variant=_messages.Variant.INT32)
  maxCores = _messages.IntegerField(6, variant=_messages.Variant.INT32)
  memoryPerExecutorMb = _messages.IntegerField(7, variant=_messages.Variant.INT32)
  name = _messages.StringField(8)
  quantileDataStatus = _messages.EnumField('QuantileDataStatusValueValuesEnum', 9)


class AttachedDiskConfig(_messages.Message):
  r"""Specifies the config of attached disk options for single VM instance.

  Enums:
    DiskTypeValueValuesEnum: Optional. Disk type.

  Fields:
    diskSizeGb: Optional. Disk size in GB.
    diskType: Optional. Disk type.
    provisionedIops: Optional. Indicates how many IOPS to provision for the
      attached disk. This sets the number of I/O operations per second that
      the disk can handle. See
      https://cloud.google.com/compute/docs/disks/hyperdisks#hyperdisk-
      features
    provisionedThroughput: Optional. Indicates how much throughput to
      provision for the attached disk. This sets the number of throughput mb
      per second that the disk can handle. See
      https://cloud.google.com/compute/docs/disks/hyperdisks#hyperdisk-
      features
  """

  class DiskTypeValueValuesEnum(_messages.Enum):
    r"""Optional. Disk type.

    Values:
      DISK_TYPE_UNSPECIFIED: Required unspecified disk type.
      HYPERDISK_BALANCED: Hyperdisk Balanced disk type.
      HYPERDISK_EXTREME: Hyperdisk Extreme disk type.
      HYPERDISK_ML: Hyperdisk ML disk type.
      HYPERDISK_THROUGHPUT: Hyperdisk Throughput disk type.
    """
    DISK_TYPE_UNSPECIFIED = 0
    HYPERDISK_BALANCED = 1
    HYPERDISK_EXTREME = 2
    HYPERDISK_ML = 3
    HYPERDISK_THROUGHPUT = 4

  diskSizeGb = _messages.IntegerField(1, variant=_messages.Variant.INT32)
  diskType = _messages.EnumField('DiskTypeValueValuesEnum', 2)
  provisionedIops = _messages.IntegerField(3)
  provisionedThroughput = _messages.IntegerField(4)


class AuthenticationConfig(_messages.Message):
  r"""Authentication configuration for a workload is used to set the default
  identity for the workload execution. The config specifies the type of
  identity (service account or user) that will be used by workloads to access
  resources on the project(s).

  Enums:
    UserWorkloadAuthenticationTypeValueValuesEnum: Optional. Authentication
      type for the user workload running in containers.

  Fields:
    userWorkloadAuthenticationType: Optional. Authentication type for the user
      workload running in containers.
  """

  class UserWorkloadAuthenticationTypeValueValuesEnum(_messages.Enum):
    r"""Optional. Authentication type for the user workload running in
    containers.

    Values:
      AUTHENTICATION_TYPE_UNSPECIFIED: If AuthenticationType is unspecified
        then END_USER_CREDENTIALS is used for 3.0 and newer runtimes, and
        SERVICE_ACCOUNT is used for older runtimes.
      SERVICE_ACCOUNT: Use service account credentials for authenticating to
        other services.
      END_USER_CREDENTIALS: Use OAuth credentials associated with the workload
        creator/user for authenticating to other services.
    """
    AUTHENTICATION_TYPE_UNSPECIFIED = 0
    SERVICE_ACCOUNT = 1
    END_USER_CREDENTIALS = 2

  userWorkloadAuthenticationType = _messages.EnumField('UserWorkloadAuthenticationTypeValueValuesEnum', 1)


class AutoscalingConfig(_messages.Message):
  r"""Autoscaling Policy config associated with the cluster.

  Fields:
    policyUri: Optional. The autoscaling policy used by the cluster.Only
      resource names including projectid and location (region) are valid.
      Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/lo
      cations/[dataproc_region]/autoscalingPolicies/[policy_id] projects/[proj
      ect_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]Note
      that the policy must be in the same project and Dataproc region.
  """

  policyUri = _messages.StringField(1)


class AutoscalingPolicy(_messages.Message):
  r"""Describes an autoscaling policy for Dataproc cluster autoscaler.

  Enums:
    ClusterTypeValueValuesEnum: Optional. The type of the clusters for which
      this autoscaling policy is to be configured.

  Messages:
    LabelsValue: Optional. The labels to associate with this autoscaling
      policy. Label keys must contain 1 to 63 characters, and must conform to
      RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be
      empty, but, if present, must contain 1 to 63 characters, and must
      conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than
      32 labels can be associated with an autoscaling policy.

  Fields:
    basicAlgorithm: A BasicAutoscalingAlgorithm attribute.
    clusterType: Optional. The type of the clusters for which this autoscaling
      policy is to be configured.
    id: Required. The policy id.The id must contain only letters (a-z, A-Z),
      numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end
      with underscore or hyphen. Must consist of between 3 and 50 characters.
    labels: Optional. The labels to associate with this autoscaling policy.
      Label keys must contain 1 to 63 characters, and must conform to RFC 1035
      (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but,
      if present, must contain 1 to 63 characters, and must conform to RFC
      1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can
      be associated with an autoscaling policy.
    name: Output only. The "resource name" of the autoscaling policy, as
      described in https://cloud.google.com/apis/design/resource_names. For
      projects.regions.autoscalingPolicies, the resource name of the policy
      has the following format:
      projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}
      For projects.locations.autoscalingPolicies, the resource name of the
      policy has the following format: projects/{project_id}/locations/{locati
      on}/autoscalingPolicies/{policy_id}
    secondaryWorkerConfig: Optional. Describes how the autoscaler will operate
      for secondary workers.
    workerConfig: Required. Describes how the autoscaler will operate for
      primary workers.
  """

  class ClusterTypeValueValuesEnum(_messages.Enum):
    r"""Optional. The type of the clusters for which this autoscaling policy
    is to be configured.

    Values:
      CLUSTER_TYPE_UNSPECIFIED: Not set.
      STANDARD: Standard dataproc cluster with a minimum of two primary
        workers.
      ZERO_SCALE: Clusters that can use only secondary workers and be scaled
        down to zero secondary worker nodes.
    """
    CLUSTER_TYPE_UNSPECIFIED = 0
    STANDARD = 1
    ZERO_SCALE = 2

  @encoding.MapUnrecognizedFields('additionalProperties')
  class LabelsValue(_messages.Message):
    r"""Optional. The labels to associate with this autoscaling policy. Label
    keys must contain 1 to 63 characters, and must conform to RFC 1035
    (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if
    present, must contain 1 to 63 characters, and must conform to RFC 1035
    (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
    associated with an autoscaling policy.

    Messages:
      AdditionalProperty: An additional property for a LabelsValue object.

    Fields:
      additionalProperties: Additional properties of type LabelsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a LabelsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  basicAlgorithm = _messages.MessageField('BasicAutoscalingAlgorithm', 1)
  clusterType = _messages.EnumField('ClusterTypeValueValuesEnum', 2)
  id = _messages.StringField(3)
  labels = _messages.MessageField('LabelsValue', 4)
  name = _messages.StringField(5)
  secondaryWorkerConfig = _messages.MessageField('InstanceGroupAutoscalingPolicyConfig', 6)
  workerConfig = _messages.MessageField('InstanceGroupAutoscalingPolicyConfig', 7)


class AutotuningConfig(_messages.Message):
  r"""Autotuning configuration of the workload.

  Enums:
    ScenariosValueListEntryValuesEnum:

  Fields:
    cohort: Optional. Autotuning cohort identifier. Identifies families of the
      workloads that have the same shape, for example, daily ETL jobs.
    scenarios: Optional. Scenarios for which tunings are applied.
  """

  class ScenariosValueListEntryValuesEnum(_messages.Enum):
    r"""ScenariosValueListEntryValuesEnum enum type.

    Values:
      SCENARIO_UNSPECIFIED: Default value.
      SCALING: Scaling recommendations such as initialExecutors.
      BHJ: Adding hints for potential relation broadcasts.
      BROADCAST_HASH_JOIN: Adding hints for potential relation broadcasts.
      MEMORY: Memory management for workloads.
      NONE: No autotuning.
      AUTO: Automatic selection of scenarios.
    """
    SCENARIO_UNSPECIFIED = 0
    SCALING = 1
    BHJ = 2
    BROADCAST_HASH_JOIN = 3
    MEMORY = 4
    NONE = 5
    AUTO = 6

  cohort = _messages.StringField(1)
  scenarios = _messages.EnumField('ScenariosValueListEntryValuesEnum', 2, repeated=True)


class AuxiliaryNodeGroup(_messages.Message):
  r"""Node group identification and configuration information.

  Fields:
    nodeGroup: Required. Node group configuration.
    nodeGroupId: Optional. A node group ID. Generated if not specified.The ID
      must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
      and hyphens (-). Cannot begin or end with underscore or hyphen. Must
      consist of from 3 to 33 characters.
  """

  nodeGroup = _messages.MessageField('NodeGroup', 1)
  nodeGroupId = _messages.StringField(2)


class AuxiliaryServicesConfig(_messages.Message):
  r"""Auxiliary services configuration for a Cluster.

  Fields:
    metastoreConfig: Optional. The Hive Metastore configuration for this
      workload.
    sparkHistoryServerConfig: Optional. The Spark History Server configuration
      for the workload.
  """

  metastoreConfig = _messages.MessageField('MetastoreConfig', 1)
  sparkHistoryServerConfig = _messages.MessageField('SparkHistoryServerConfig', 2)


class BasicAutoscalingAlgorithm(_messages.Message):
  r"""Basic algorithm for autoscaling.

  Fields:
    cooldownPeriod: Optional. Duration between scaling events. A scaling
      period starts after the update operation from the previous event has
      completed.Bounds: 2m, 1d. Default: 2m.
    sparkStandaloneConfig: Optional. Spark Standalone autoscaling
      configuration
    yarnConfig: Optional. YARN autoscaling configuration.
  """

  cooldownPeriod = _messages.StringField(1)
  sparkStandaloneConfig = _messages.MessageField('SparkStandaloneAutoscalingConfig', 2)
  yarnConfig = _messages.MessageField('BasicYarnAutoscalingConfig', 3)


class BasicYarnAutoscalingConfig(_messages.Message):
  r"""Basic autoscaling configurations for YARN.

  Fields:
    gracefulDecommissionTimeout: Required. Timeout for YARN graceful
      decommissioning of Node Managers. Specifies the duration to wait for
      jobs to complete before forcefully removing workers (and potentially
      interrupting jobs). Only applicable to downscaling operations.Bounds:
      0s, 1d.
    scaleDownFactor: Required. Fraction of average YARN pending memory in the
      last cooldown period for which to remove workers. A scale-down factor of
      1 will result in scaling down so that there is no available memory
      remaining after the update (more aggressive scaling). A scale-down
      factor of 0 disables removing workers, which can be beneficial for
      autoscaling a single job. See How autoscaling works
      (https://cloud.google.com/dataproc/docs/concepts/configuring-
      clusters/autoscaling#how_autoscaling_works) for more information.Bounds:
      0.0, 1.0.
    scaleDownMinWorkerFraction: Optional. Minimum scale-down threshold as a
      fraction of total cluster size before scaling occurs. For example, in a
      20-worker cluster, a threshold of 0.1 means the autoscaler must
      recommend at least a 2 worker scale-down for the cluster to scale. A
      threshold of 0 means the autoscaler will scale down on any recommended
      change.Bounds: 0.0, 1.0. Default: 0.0.
    scaleUpFactor: Required. Fraction of average YARN pending memory in the
      last cooldown period for which to add workers. A scale-up factor of 1.0
      will result in scaling up so that there is no pending memory remaining
      after the update (more aggressive scaling). A scale-up factor closer to
      0 will result in a smaller magnitude of scaling up (less aggressive
      scaling). See How autoscaling works
      (https://cloud.google.com/dataproc/docs/concepts/configuring-
      clusters/autoscaling#how_autoscaling_works) for more information.Bounds:
      0.0, 1.0.
    scaleUpMinWorkerFraction: Optional. Minimum scale-up threshold as a
      fraction of total cluster size before scaling occurs. For example, in a
      20-worker cluster, a threshold of 0.1 means the autoscaler must
      recommend at least a 2-worker scale-up for the cluster to scale. A
      threshold of 0 means the autoscaler will scale up on any recommended
      change.Bounds: 0.0, 1.0. Default: 0.0.
  """

  gracefulDecommissionTimeout = _messages.StringField(1)
  scaleDownFactor = _messages.FloatField(2)
  scaleDownMinWorkerFraction = _messages.FloatField(3)
  scaleUpFactor = _messages.FloatField(4)
  scaleUpMinWorkerFraction = _messages.FloatField(5)


class Batch(_messages.Message):
  r"""A representation of a batch workload in the service.

  Enums:
    StateValueValuesEnum: Output only. The state of the batch.

  Messages:
    LabelsValue: Optional. The labels to associate with this batch. Label keys
      must contain 1 to 63 characters, and must conform to RFC 1035
      (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but,
      if present, must contain 1 to 63 characters, and must conform to RFC
      1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can
      be associated with a batch.

  Fields:
    createTime: Output only. The time when the batch was created.
    creator: Output only. The email address of the user who created the batch.
    environmentConfig: Optional. Environment configuration for the batch
      execution.
    labels: Optional. The labels to associate with this batch. Label keys must
      contain 1 to 63 characters, and must conform to RFC 1035
      (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but,
      if present, must contain 1 to 63 characters, and must conform to RFC
      1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can
      be associated with a batch.
    name: Output only. The resource name of the batch.
    operation: Output only. The resource name of the operation associated with
      this batch.
    pysparkBatch: Optional. PySpark batch config.
    rayBatch: Optional. Ray batch config.
    runtimeConfig: Optional. Runtime configuration for the batch execution.
    runtimeInfo: Output only. Runtime information about batch execution.
    sparkBatch: Optional. Spark batch config.
    sparkRBatch: Optional. SparkR batch config.
    sparkSqlBatch: Optional. SparkSql batch config.
    state: Output only. The state of the batch.
    stateHistory: Output only. Historical state information for the batch.
    stateMessage: Output only. Batch state details, such as a failure
      description if the state is FAILED.
    stateTime: Output only. The time when the batch entered a current state.
    uuid: Output only. A batch UUID (Unique Universal Identifier). The service
      generates this value when it creates the batch.
  """

  class StateValueValuesEnum(_messages.Enum):
    r"""Output only. The state of the batch.

    Values:
      STATE_UNSPECIFIED: The batch state is unknown.
      PENDING: The batch is created before running.
      RUNNING: The batch is running.
      CANCELLING: The batch is cancelling.
      CANCELLED: The batch cancellation was successful.
      SUCCEEDED: The batch completed successfully.
      FAILED: The batch is no longer running due to an error.
    """
    STATE_UNSPECIFIED = 0
    PENDING = 1
    RUNNING = 2
    CANCELLING = 3
    CANCELLED = 4
    SUCCEEDED = 5
    FAILED = 6

  @encoding.MapUnrecognizedFields('additionalProperties')
  class LabelsValue(_messages.Message):
    r"""Optional. The labels to associate with this batch. Label keys must
    contain 1 to 63 characters, and must conform to RFC 1035
    (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if
    present, must contain 1 to 63 characters, and must conform to RFC 1035
    (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
    associated with a batch.

    Messages:
      AdditionalProperty: An additional property for a LabelsValue object.

    Fields:
      additionalProperties: Additional properties of type LabelsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a LabelsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  createTime = _messages.StringField(1)
  creator = _messages.StringField(2)
  environmentConfig = _messages.MessageField('EnvironmentConfig', 3)
  labels = _messages.MessageField('LabelsValue', 4)
  name = _messages.StringField(5)
  operation = _messages.StringField(6)
  pysparkBatch = _messages.MessageField('PySparkBatch', 7)
  rayBatch = _messages.MessageField('RayBatch', 8)
  runtimeConfig = _messages.MessageField('RuntimeConfig', 9)
  runtimeInfo = _messages.MessageField('RuntimeInfo', 10)
  sparkBatch = _messages.MessageField('SparkBatch', 11)
  sparkRBatch = _messages.MessageField('SparkRBatch', 12)
  sparkSqlBatch = _messages.MessageField('SparkSqlBatch', 13)
  state = _messages.EnumField('StateValueValuesEnum', 14)
  stateHistory = _messages.MessageField('StateHistory', 15, repeated=True)
  stateMessage = _messages.StringField(16)
  stateTime = _messages.StringField(17)
  uuid = _messages.StringField(18)


class BatchOperationMetadata(_messages.Message):
  r"""Metadata describing the Batch operation.

  Enums:
    OperationTypeValueValuesEnum: The operation type.

  Messages:
    LabelsValue: Labels associated with the operation.

  Fields:
    batch: Name of the batch for the operation.
    batchUuid: Batch UUID for the operation.
    createTime: The time when the operation was created.
    description: Short description of the operation.
    doneTime: The time when the operation finished.
    labels: Labels associated with the operation.
    operationType: The operation type.
    warnings: Warnings encountered during operation execution.
  """

  class OperationTypeValueValuesEnum(_messages.Enum):
    r"""The operation type.

    Values:
      BATCH_OPERATION_TYPE_UNSPECIFIED: Batch operation type is unknown.
      BATCH: Batch operation type.
    """
    BATCH_OPERATION_TYPE_UNSPECIFIED = 0
    BATCH = 1

  @encoding.MapUnrecognizedFields('additionalProperties')
  class LabelsValue(_messages.Message):
    r"""Labels associated with the operation.

    Messages:
      AdditionalProperty: An additional property for a LabelsValue object.

    Fields:
      additionalProperties: Additional properties of type LabelsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a LabelsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  batch = _messages.StringField(1)
  batchUuid = _messages.StringField(2)
  createTime = _messages.StringField(3)
  description = _messages.StringField(4)
  doneTime = _messages.StringField(5)
  labels = _messages.MessageField('LabelsValue', 6)
  operationType = _messages.EnumField('OperationTypeValueValuesEnum', 7)
  warnings = _messages.StringField(8, repeated=True)


class BigqueryMetastoreConfig(_messages.Message):
  r"""BigQuery Metastore configuration for the workload.

  Fields:
    location: Optional. Location where the BigQuery Metastore tables will be
      created.
    projectId: Optional. Project ID of the BigQuery Metastore tables.
  """

  location = _messages.StringField(1)
  projectId = _messages.StringField(2)


class Binding(_messages.Message):
  r"""Associates members, or principals, with a role.

  Fields:
    condition: The condition that is associated with this binding.If the
      condition evaluates to true, then this binding applies to the current
      request.If the condition evaluates to false, then this binding does not
      apply to the current request. However, a different role binding might
      grant the same role to one or more of the principals in this binding.To
      learn which resources support conditions in their IAM policies, see the
      IAM documentation
      (https://cloud.google.com/iam/help/conditions/resource-policies).
    members: Specifies the principals requesting access for a Google Cloud
      resource. members can have the following values: allUsers: A special
      identifier that represents anyone who is on the internet; with or
      without a Google account. allAuthenticatedUsers: A special identifier
      that represents anyone who is authenticated with a Google account or a
      service account. Does not include identities that come from external
      identity providers (IdPs) through identity federation. user:{emailid}:
      An email address that represents a specific Google account. For example,
      alice@example.com . serviceAccount:{emailid}: An email address that
      represents a Google service account. For example, my-other-
      app@appspot.gserviceaccount.com.
      serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An
      identifier for a Kubernetes service account
      (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-
      service-accounts). For example, my-project.svc.id.goog[my-namespace/my-
      kubernetes-sa]. group:{emailid}: An email address that represents a
      Google group. For example, admins@example.com. domain:{domain}: The G
      Suite domain (primary) that represents all the users of that domain. For
      example, google.com or example.com. principal://iam.googleapis.com/locat
      ions/global/workforcePools/{pool_id}/subject/{subject_attribute_value}:
      A single identity in a workforce identity pool. principalSet://iam.googl
      eapis.com/locations/global/workforcePools/{pool_id}/group/{group_id}:
      All workforce identities in a group. principalSet://iam.googleapis.com/l
      ocations/global/workforcePools/{pool_id}/attribute.{attribute_name}/{att
      ribute_value}: All workforce identities with a specific attribute value.
      principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_
      id}/*: All identities in a workforce identity pool. principal://iam.goog
      leapis.com/projects/{project_number}/locations/global/workloadIdentityPo
      ols/{pool_id}/subject/{subject_attribute_value}: A single identity in a
      workload identity pool. principalSet://iam.googleapis.com/projects/{proj
      ect_number}/locations/global/workloadIdentityPools/{pool_id}/group/{grou
      p_id}: A workload identity pool group. principalSet://iam.googleapis.com
      /projects/{project_number}/locations/global/workloadIdentityPools/{pool_
      id}/attribute.{attribute_name}/{attribute_value}: All identities in a
      workload identity pool with a certain attribute. principalSet://iam.goog
      leapis.com/projects/{project_number}/locations/global/workloadIdentityPo
      ols/{pool_id}/*: All identities in a workload identity pool.
      deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique
      identifier) representing a user that has been recently deleted. For
      example, alice@example.com?uid=123456789012345678901. If the user is
      recovered, this value reverts to user:{emailid} and the recovered user
      retains the role in the binding.
      deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus
      unique identifier) representing a service account that has been recently
      deleted. For example, my-other-
      app@appspot.gserviceaccount.com?uid=123456789012345678901. If the
      service account is undeleted, this value reverts to
      serviceAccount:{emailid} and the undeleted service account retains the
      role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email
      address (plus unique identifier) representing a Google group that has
      been recently deleted. For example,
      admins@example.com?uid=123456789012345678901. If the group is recovered,
      this value reverts to group:{emailid} and the recovered group retains
      the role in the binding. deleted:principal://iam.googleapis.com/location
      s/global/workforcePools/{pool_id}/subject/{subject_attribute_value}:
      Deleted single identity in a workforce identity pool. For example, delet
      ed:principal://iam.googleapis.com/locations/global/workforcePools/my-
      pool-id/subject/my-subject-attribute-value.
    role: Role that is assigned to the list of members, or principals. For
      example, roles/viewer, roles/editor, or roles/owner.For an overview of
      the IAM roles and permissions, see the IAM documentation
      (https://cloud.google.com/iam/docs/roles-overview). For a list of the
      available pre-defined roles, see here
      (https://cloud.google.com/iam/docs/understanding-roles).
  """

  condition = _messages.MessageField('Expr', 1)
  members = _messages.StringField(2, repeated=True)
  role = _messages.StringField(3)


class BuildInfo(_messages.Message):
  r"""Native Build Info

  Fields:
    buildKey: Optional. Build key.
    buildValue: Optional. Build value.
  """

  buildKey = _messages.StringField(1)
  buildValue = _messages.StringField(2)


class CancelJobRequest(_messages.Message):
  r"""A request to cancel a job."""


class Cluster(_messages.Message):
  r"""Describes the identifying information, config, and status of a Dataproc
  cluster

  Messages:
    LabelsValue: Optional. The labels to associate with this cluster. Label
      keys must contain 1 to 63 characters, and must conform to RFC 1035
      (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but,
      if present, must contain 1 to 63 characters, and must conform to RFC
      1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can
      be associated with a cluster.

  Fields:
    clusterName: Required. The cluster name, which must be unique within a
      project. The name must start with a lowercase letter, and can contain up
      to 51 lowercase letters, numbers, and hyphens. It cannot end with a
      hyphen. The name of a deleted cluster can be reused.
    clusterUuid: Output only. A cluster UUID (Unique Universal Identifier).
      Dataproc generates this value when it creates the cluster.
    config: Optional. The cluster config for a cluster of Compute Engine
      Instances. Note that Dataproc may set default values, and values may
      change when clusters are updated.Exactly one of ClusterConfig or
      VirtualClusterConfig must be specified.
    creator: Output only. The email address of the user who created the
      cluster.
    labels: Optional. The labels to associate with this cluster. Label keys
      must contain 1 to 63 characters, and must conform to RFC 1035
      (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but,
      if present, must contain 1 to 63 characters, and must conform to RFC
      1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can
      be associated with a cluster.
    metrics: Output only. Contains cluster daemon metrics such as HDFS and
      YARN stats.Beta Feature: This report is available for testing purposes
      only. It may be changed before final release.
    projectId: Required. The Google Cloud Platform project ID that the cluster
      belongs to.
    status: Output only. Cluster status.
    statusHistory: Output only. The previous cluster status.
    virtualClusterConfig: Optional. The virtual cluster config is used when
      creating a Dataproc cluster that does not directly control the
      underlying compute resources, for example, when creating a Dataproc-on-
      GKE cluster
      (https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke-
      overview). Dataproc may set default values, and values may change when
      clusters are updated. Exactly one of config or virtual_cluster_config
      must be specified.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class LabelsValue(_messages.Message):
    r"""Optional. The labels to associate with this cluster. Label keys must
    contain 1 to 63 characters, and must conform to RFC 1035
    (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if
    present, must contain 1 to 63 characters, and must conform to RFC 1035
    (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
    associated with a cluster.

    Messages:
      AdditionalProperty: An additional property for a LabelsValue object.

    Fields:
      additionalProperties: Additional properties of type LabelsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a LabelsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  clusterName = _messages.StringField(1)
  clusterUuid = _messages.StringField(2)
  config = _messages.MessageField('ClusterConfig', 3)
  creator = _messages.StringField(4)
  labels = _messages.MessageField('LabelsValue', 5)
  metrics = _messages.MessageField('ClusterMetrics', 6)
  projectId = _messages.StringField(7)
  status = _messages.MessageField('ClusterStatus', 8)
  statusHistory = _messages.MessageField('ClusterStatus', 9, repeated=True)
  virtualClusterConfig = _messages.MessageField('VirtualClusterConfig', 10)


class ClusterAuthenticationConfig(_messages.Message):
  r"""WIP: User workload credential configuration for Personal Auth v2 in
  DPGCE Clusters. This is distinct from
  environmentConfig.executionConfig.authenticationConfig in the s8s config
  (see shared.proto). Unimplemented and not ready for use.

  Enums:
    UserWorkloadAuthenticationTypeValueValuesEnum: Optional. Authentication
      type for the user workload running in containers.

  Fields:
    userWorkloadAuthenticationType: Optional. Authentication type for the user
      workload running in containers.
  """

  class UserWorkloadAuthenticationTypeValueValuesEnum(_messages.Enum):
    r"""Optional. Authentication type for the user workload running in
    containers.

    Values:
      AUTHENTICATION_TYPE_UNSPECIFIED: Authentication type is not specified.
      SYSTEM_SERVICE_ACCOUNT: Use the system service account credentials for
        authenticating to other services.
      END_USER_CREDENTIALS: use the end user credential for authentication.
    """
    AUTHENTICATION_TYPE_UNSPECIFIED = 0
    SYSTEM_SERVICE_ACCOUNT = 1
    END_USER_CREDENTIALS = 2

  userWorkloadAuthenticationType = _messages.EnumField('UserWorkloadAuthenticationTypeValueValuesEnum', 1)


class ClusterConfig(_messages.Message):
  r"""The cluster config.

  Enums:
    ClusterTierValueValuesEnum: Optional. The cluster tier.
    ClusterTypeValueValuesEnum: Optional. The type of the cluster.

  Fields:
    autoscalingConfig: Optional. Autoscaling config for the policy associated
      with the cluster. Cluster does not autoscale if this field is unset.
    auxiliaryNodeGroups: Optional. The node group settings.
    clusterTier: Optional. The cluster tier.
    clusterType: Optional. The type of the cluster.
    configBucket: Optional. A Cloud Storage bucket used to stage job
      dependencies, config files, and job driver console output. If you do not
      specify a staging bucket, Cloud Dataproc will determine a Cloud Storage
      location (US, ASIA, or EU) for your cluster's staging bucket according
      to the Compute Engine zone where your cluster is deployed, and then
      create and manage this project-level, per-location bucket (see Dataproc
      staging and temp buckets
      (https://cloud.google.com/dataproc/docs/concepts/configuring-
      clusters/staging-bucket)). This field requires a Cloud Storage bucket
      name, not a gs://... URI to a Cloud Storage bucket.
    dataprocMetricConfig: Optional. The config for Dataproc metrics.
    diagnosticBucket: Optional. A Cloud Storage bucket used to collect
      checkpoint diagnostic data
      (https://cloud.google.com/dataproc/docs/support/diagnose-
      clusters#checkpoint_diagnostic_data). If you do not specify a diagnostic
      bucket, Cloud Dataproc will use the Dataproc temp bucket to collect the
      checkpoint diagnostic data. This field requires a Cloud Storage bucket
      name, not a gs://... URI to a Cloud Storage bucket.
    encryptionConfig: Optional. Encryption settings for the cluster.
    endpointConfig: Optional. Port/endpoint configuration for this cluster
    gceClusterConfig: Optional. The shared Compute Engine config settings for
      all instances in a cluster.
    gkeClusterConfig: Optional. BETA. The Kubernetes Engine config for
      Dataproc clusters deployed to The Kubernetes Engine config for Dataproc
      clusters deployed to Kubernetes. These config settings are mutually
      exclusive with Compute Engine-based options, such as gce_cluster_config,
      master_config, worker_config, secondary_worker_config, and
      autoscaling_config.
    initializationActions: Optional. Commands to execute on each node after
      config is completed. By default, executables are run on master and all
      worker nodes. You can test a node's role metadata to run an executable
      on a master or worker node, as shown below using curl (you can also use
      wget): ROLE=$(curl -H Metadata-Flavor:Google
      http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if
      [[ "${ROLE}" == 'Master' ]]; then ... master specific actions ... else
      ... worker specific actions ... fi
    lifecycleConfig: Optional. Lifecycle setting for the cluster.
    masterConfig: Optional. The Compute Engine config settings for the
      cluster's master instance.
    metastoreConfig: Optional. Metastore configuration.
    schedulingConfig: Optional. Config for scheduling the resources to be
      allocated when available.
    secondaryWorkerConfig: Optional. The Compute Engine config settings for a
      cluster's secondary worker instances
    securityConfig: Optional. Security settings for the cluster.
    softwareConfig: Optional. The config settings for cluster software.
    tempBucket: Optional. A Cloud Storage bucket used to store ephemeral
      cluster and jobs data, such as Spark and MapReduce history files. If you
      do not specify a temp bucket, Dataproc will determine a Cloud Storage
      location (US, ASIA, or EU) for your cluster's temp bucket according to
      the Compute Engine zone where your cluster is deployed, and then create
      and manage this project-level, per-location bucket. The default bucket
      has a TTL of 90 days, but you can use any TTL (or none) if you specify a
      bucket (see Dataproc staging and temp buckets
      (https://cloud.google.com/dataproc/docs/concepts/configuring-
      clusters/staging-bucket)). This field requires a Cloud Storage bucket
      name, not a gs://... URI to a Cloud Storage bucket.
    workerConfig: Optional. The Compute Engine config settings for the
      cluster's worker instances.
  """

  class ClusterTierValueValuesEnum(_messages.Enum):
    r"""Optional. The cluster tier.

    Values:
      CLUSTER_TIER_UNSPECIFIED: Not set. Works the same as
        CLUSTER_TIER_STANDARD.
      CLUSTER_TIER_STANDARD: Standard Dataproc cluster.
      CLUSTER_TIER_PREMIUM: Premium Dataproc cluster.
    """
    CLUSTER_TIER_UNSPECIFIED = 0
    CLUSTER_TIER_STANDARD = 1
    CLUSTER_TIER_PREMIUM = 2

  class ClusterTypeValueValuesEnum(_messages.Enum):
    r"""Optional. The type of the cluster.

    Values:
      CLUSTER_TYPE_UNSPECIFIED: Not set.
      STANDARD: Standard dataproc cluster with a minimum of two primary
        workers.
      SINGLE_NODE:
        https://cloud.google.com/dataproc/docs/concepts/configuring-
        clusters/single-node-clusters
      ZERO_SCALE: Clusters that can use only secondary workers and be scaled
        down to zero secondary worker nodes.
    """
    CLUSTER_TYPE_UNSPECIFIED = 0
    STANDARD = 1
    SINGLE_NODE = 2
    ZERO_SCALE = 3

  autoscalingConfig = _messages.MessageField('AutoscalingConfig', 1)
  auxiliaryNodeGroups = _messages.MessageField('AuxiliaryNodeGroup', 2, repeated=True)
  clusterTier = _messages.EnumField('ClusterTierValueValuesEnum', 3)
  clusterType = _messages.EnumField('ClusterTypeValueValuesEnum', 4)
  configBucket = _messages.StringField(5)
  dataprocMetricConfig = _messages.MessageField('DataprocMetricConfig', 6)
  diagnosticBucket = _messages.StringField(7)
  encryptionConfig = _messages.MessageField('EncryptionConfig', 8)
  endpointConfig = _messages.MessageField('EndpointConfig', 9)
  gceClusterConfig = _messages.MessageField('GceClusterConfig', 10)
  gkeClusterConfig = _messages.MessageField('GkeClusterConfig', 11)
  initializationActions = _messages.MessageField('NodeInitializationAction', 12, repeated=True)
  lifecycleConfig = _messages.MessageField('LifecycleConfig', 13)
  masterConfig = _messages.MessageField('InstanceGroupConfig', 14)
  metastoreConfig = _messages.MessageField('MetastoreConfig', 15)
  schedulingConfig = _messages.MessageField('SchedulingConfig', 16)
  secondaryWorkerConfig = _messages.MessageField('InstanceGroupConfig', 17)
  securityConfig = _messages.MessageField('SecurityConfig', 18)
  softwareConfig = _messages.MessageField('SoftwareConfig', 19)
  tempBucket = _messages.StringField(20)
  workerConfig = _messages.MessageField('InstanceGroupConfig', 21)


class ClusterMetrics(_messages.Message):
  r"""Contains cluster daemon metrics, such as HDFS and YARN stats.Beta
  Feature: This report is available for testing purposes only. It may be
  changed before final release.

  Messages:
    HdfsMetricsValue: The HDFS metrics.
    SparkMetricsValue: Spark metrics.
    YarnMetricsValue: YARN metrics.

  Fields:
    hdfsMetrics: The HDFS metrics.
    sparkMetrics: Spark metrics.
    yarnMetrics: YARN metrics.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class HdfsMetricsValue(_messages.Message):
    r"""The HDFS metrics.

    Messages:
      AdditionalProperty: An additional property for a HdfsMetricsValue
        object.

    Fields:
      additionalProperties: Additional properties of type HdfsMetricsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a HdfsMetricsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.IntegerField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  @encoding.MapUnrecognizedFields('additionalProperties')
  class SparkMetricsValue(_messages.Message):
    r"""Spark metrics.

    Messages:
      AdditionalProperty: An additional property for a SparkMetricsValue
        object.

    Fields:
      additionalProperties: Additional properties of type SparkMetricsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a SparkMetricsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.IntegerField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  @encoding.MapUnrecognizedFields('additionalProperties')
  class YarnMetricsValue(_messages.Message):
    r"""YARN metrics.

    Messages:
      AdditionalProperty: An additional property for a YarnMetricsValue
        object.

    Fields:
      additionalProperties: Additional properties of type YarnMetricsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a YarnMetricsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.IntegerField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  hdfsMetrics = _messages.MessageField('HdfsMetricsValue', 1)
  sparkMetrics = _messages.MessageField('SparkMetricsValue', 2)
  yarnMetrics = _messages.MessageField('YarnMetricsValue', 3)


class ClusterOperation(_messages.Message):
  r"""The cluster operation triggered by a workflow.

  Fields:
    done: Output only. Indicates the operation is done.
    error: Output only. Error, if operation failed.
    operationId: Output only. The id of the cluster operation.
  """

  done = _messages.BooleanField(1)
  error = _messages.StringField(2)
  operationId = _messages.StringField(3)


class ClusterOperationMetadata(_messages.Message):
  r"""Metadata describing the operation.

  Messages:
    LabelsValue: Output only. Labels associated with the operation

  Fields:
    childOperationIds: Output only. Child operation ids
    clusterName: Output only. Name of the cluster for the operation.
    clusterUuid: Output only. Cluster UUID for the operation.
    description: Output only. Short description of operation.
    labels: Output only. Labels associated with the operation
    operationType: Output only. The operation type.
    status: Output only. Current operation status.
    statusHistory: Output only. The previous operation status.
    warnings: Output only. Errors encountered during operation execution.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class LabelsValue(_messages.Message):
    r"""Output only. Labels associated with the operation

    Messages:
      AdditionalProperty: An additional property for a LabelsValue object.

    Fields:
      additionalProperties: Additional properties of type LabelsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a LabelsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  childOperationIds = _messages.StringField(1, repeated=True)
  clusterName = _messages.StringField(2)
  clusterUuid = _messages.StringField(3)
  description = _messages.StringField(4)
  labels = _messages.MessageField('LabelsValue', 5)
  operationType = _messages.StringField(6)
  status = _messages.MessageField('ClusterOperationStatus', 7)
  statusHistory = _messages.MessageField('ClusterOperationStatus', 8, repeated=True)
  warnings = _messages.StringField(9, repeated=True)


class ClusterOperationStatus(_messages.Message):
  r"""The status of the operation.

  Enums:
    StateValueValuesEnum: Output only. A message containing the operation
      state.

  Fields:
    details: Output only. A message containing any operation metadata details.
    innerState: Output only. A message containing the detailed operation
      state.
    state: Output only. A message containing the operation state.
    stateStartTime: Output only. The time this state was entered.
  """

  class StateValueValuesEnum(_messages.Enum):
    r"""Output only. A message containing the operation state.

    Values:
      UNKNOWN: Unused.
      PENDING: The operation has been created.
      RUNNING: The operation is running.
      DONE: The operation is done; either cancelled or completed.
    """
    UNKNOWN = 0
    PENDING = 1
    RUNNING = 2
    DONE = 3

  details = _messages.StringField(1)
  innerState = _messages.StringField(2)
  state = _messages.EnumField('StateValueValuesEnum', 3)
  stateStartTime = _messages.StringField(4)


class ClusterSelector(_messages.Message):
  r"""A selector that chooses target cluster for jobs based on metadata.

  Messages:
    ClusterLabelsValue: Required. The cluster labels. Cluster must have all
      labels to match.

  Fields:
    clusterLabels: Required. The cluster labels. Cluster must have all labels
      to match.
    zone: Optional. The zone where workflow process executes. This parameter
      does not affect the selection of the cluster.If unspecified, the zone of
      the first cluster matching the selector is used.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class ClusterLabelsValue(_messages.Message):
    r"""Required. The cluster labels. Cluster must have all labels to match.

    Messages:
      AdditionalProperty: An additional property for a ClusterLabelsValue
        object.

    Fields:
      additionalProperties: Additional properties of type ClusterLabelsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a ClusterLabelsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  clusterLabels = _messages.MessageField('ClusterLabelsValue', 1)
  zone = _messages.StringField(2)


class ClusterStatus(_messages.Message):
  r"""The status of a cluster and its instances.

  Enums:
    StateValueValuesEnum: Output only. The cluster's state.
    SubstateValueValuesEnum: Output only. Additional state information that
      includes status reported by the agent.

  Fields:
    detail: Optional. Output only. Details of cluster's state.
    state: Output only. The cluster's state.
    stateStartTime: Output only. Time when this state was entered (see JSON
      representation of Timestamp (https://developers.google.com/protocol-
      buffers/docs/proto3#json)).
    substate: Output only. Additional state information that includes status
      reported by the agent.
  """

  class StateValueValuesEnum(_messages.Enum):
    r"""Output only. The cluster's state.

    Values:
      UNKNOWN: The cluster state is unknown.
      CREATING: The cluster is being created and set up. It is not ready for
        use.
      RUNNING: The cluster is currently running and healthy. It is ready for
        use.Note: The cluster state changes from "creating" to "running"
        status after the master node(s), first two primary worker nodes (and
        the last primary worker node if primary workers > 2) are running.
      ERROR: The cluster encountered an error. It is not ready for use.
      ERROR_DUE_TO_UPDATE: The cluster has encountered an error while being
        updated. Jobs can be submitted to the cluster, but the cluster cannot
        be updated.
      DELETING: The cluster is being deleted. It cannot be used.
      UPDATING: The cluster is being updated. It continues to accept and
        process jobs.
      STOPPING: The cluster is being stopped. It cannot be used.
      STOPPED: The cluster is currently stopped. It is not ready for use.
      STARTING: The cluster is being started. It is not ready for use.
      REPAIRING: The cluster is being repaired. It is not ready for use.
      SCHEDULED: Cluster creation is currently waiting for resources to be
        available. Once all resources are available, it will transition to
        CREATING and then RUNNING.
    """
    UNKNOWN = 0
    CREATING = 1
    RUNNING = 2
    ERROR = 3
    ERROR_DUE_TO_UPDATE = 4
    DELETING = 5
    UPDATING = 6
    STOPPING = 7
    STOPPED = 8
    STARTING = 9
    REPAIRING = 10
    SCHEDULED = 11

  class SubstateValueValuesEnum(_messages.Enum):
    r"""Output only. Additional state information that includes status
    reported by the agent.

    Values:
      UNSPECIFIED: The cluster substate is unknown.
      UNHEALTHY: The cluster is known to be in an unhealthy state (for
        example, critical daemons are not running or HDFS capacity is
        exhausted).Applies to RUNNING state.
      STALE_STATUS: The agent-reported status is out of date (may occur if
        Dataproc loses communication with Agent).Applies to RUNNING state.
    """
    UNSPECIFIED = 0
    UNHEALTHY = 1
    STALE_STATUS = 2

  detail = _messages.StringField(1)
  state = _messages.EnumField('StateValueValuesEnum', 2)
  stateStartTime = _messages.StringField(3)
  substate = _messages.EnumField('SubstateValueValuesEnum', 4)


class ClusterToRepair(_messages.Message):
  r"""Cluster to be repaired

  Enums:
    ClusterRepairActionValueValuesEnum: Required. Repair action to take on the
      cluster resource.

  Fields:
    clusterRepairAction: Required. Repair action to take on the cluster
      resource.
  """

  class ClusterRepairActionValueValuesEnum(_messages.Enum):
    r"""Required. Repair action to take on the cluster resource.

    Values:
      CLUSTER_REPAIR_ACTION_UNSPECIFIED: No action will be taken by default.
      REPAIR_ERROR_DUE_TO_UPDATE_CLUSTER: Repair cluster in
        ERROR_DUE_TO_UPDATE states.
    """
    CLUSTER_REPAIR_ACTION_UNSPECIFIED = 0
    REPAIR_ERROR_DUE_TO_UPDATE_CLUSTER = 1

  clusterRepairAction = _messages.EnumField('ClusterRepairActionValueValuesEnum', 1)


class ConfidentialInstanceConfig(_messages.Message):
  r"""Confidential Instance Config for clusters using Confidential VMs
  (https://cloud.google.com/compute/confidential-vm/docs)

  Fields:
    enableConfidentialCompute: Optional. Defines whether the instance should
      have confidential compute enabled.
  """

  enableConfidentialCompute = _messages.BooleanField(1)


class ConsolidatedExecutorSummary(_messages.Message):
  r"""Consolidated summary about executors used by the application.

  Fields:
    activeTasks: A integer attribute.
    completedTasks: A integer attribute.
    count: A integer attribute.
    diskUsed: A string attribute.
    failedTasks: A integer attribute.
    isExcluded: A integer attribute.
    maxMemory: A string attribute.
    memoryMetrics: A MemoryMetrics attribute.
    memoryUsed: A string attribute.
    rddBlocks: A integer attribute.
    totalCores: A integer attribute.
    totalDurationMillis: A string attribute.
    totalGcTimeMillis: A string attribute.
    totalInputBytes: A string attribute.
    totalShuffleRead: A string attribute.
    totalShuffleWrite: A string attribute.
    totalTasks: A integer attribute.
  """

  activeTasks = _messages.IntegerField(1, variant=_messages.Variant.INT32)
  completedTasks = _messages.IntegerField(2, variant=_messages.Variant.INT32)
  count = _messages.IntegerField(3, variant=_messages.Variant.INT32)
  diskUsed = _messages.IntegerField(4)
  failedTasks = _messages.IntegerField(5, variant=_messages.Variant.INT32)
  isExcluded = _messages.IntegerField(6, variant=_messages.Variant.INT32)
  maxMemory = _messages.IntegerField(7)
  memoryMetrics = _messages.MessageField('MemoryMetrics', 8)
  memoryUsed = _messages.IntegerField(9)
  rddBlocks = _messages.IntegerField(10, variant=_messages.Variant.INT32)
  totalCores = _messages.IntegerField(11, variant=_messages.Variant.INT32)
  totalDurationMillis = _messages.IntegerField(12)
  totalGcTimeMillis = _messages.IntegerField(13)
  totalInputBytes = _messages.IntegerField(14)
  totalShuffleRead = _messages.IntegerField(15)
  totalShuffleWrite = _messages.IntegerField(16)
  totalTasks = _messages.IntegerField(17, variant=_messages.Variant.INT32)


class DataprocMetastoreConfig(_messages.Message):
  r"""Dataproc Metastore configuration for the workload.

  Fields:
    service: Optional. Resource name of an existing Dataproc Metastore
      service.Example:
      projects/[project_id]/locations/[region]/services/[service_id]
  """

  service = _messages.StringField(1)


class DataprocMetricConfig(_messages.Message):
  r"""Dataproc metric config.

  Fields:
    metrics: Required. Metrics sources to enable.
  """

  metrics = _messages.MessageField('Metric', 1, repeated=True)


class DataprocProjectsLocationsAutoscalingPoliciesCreateRequest(_messages.Message):
  r"""A DataprocProjectsLocationsAutoscalingPoliciesCreateRequest object.

  Fields:
    autoscalingPolicy: A AutoscalingPolicy resource to be passed as the
      request body.
    parent: Required. The "resource name" of the region or location, as
      described in https://cloud.google.com/apis/design/resource_names. For
      projects.regions.autoscalingPolicies.create, the resource name of the
      region has the following format: projects/{project_id}/regions/{region}
      For projects.locations.autoscalingPolicies.create, the resource name of
      the location has the following format:
      projects/{project_id}/locations/{location}
  """

  autoscalingPolicy = _messages.MessageField('AutoscalingPolicy', 1)
  parent = _messages.StringField(2, required=True)


class DataprocProjectsLocationsAutoscalingPoliciesDeleteRequest(_messages.Message):
  r"""A DataprocProjectsLocationsAutoscalingPoliciesDeleteRequest object.

  Fields:
    name: Required. The "resource name" of the autoscaling policy, as
      described in https://cloud.google.com/apis/design/resource_names. For
      projects.regions.autoscalingPolicies.delete, the resource name of the
      policy has the following format:
      projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}
      For projects.locations.autoscalingPolicies.delete, the resource name of
      the policy has the following format: projects/{project_id}/locations/{lo
      cation}/autoscalingPolicies/{policy_id}
  """

  name = _messages.StringField(1, required=True)


class DataprocProjectsLocationsAutoscalingPoliciesGetIamPolicyRequest(_messages.Message):
  r"""A DataprocProjectsLocationsAutoscalingPoliciesGetIamPolicyRequest
  object.

  Fields:
    getIamPolicyRequest: A GetIamPolicyRequest resource to be passed as the
      request body.
    resource: REQUIRED: The resource for which the policy is being requested.
      See Resource names (https://cloud.google.com/apis/design/resource_names)
      for the appropriate value for this field.
  """

  getIamPolicyRequest = _messages.MessageField('GetIamPolicyRequest', 1)
  resource = _messages.StringField(2, required=True)


class DataprocProjectsLocationsAutoscalingPoliciesGetRequest(_messages.Message):
  r"""A DataprocProjectsLocationsAutoscalingPoliciesGetRequest object.

  Fields:
    name: Required. The "resource name" of the autoscaling policy, as
      described in https://cloud.google.com/apis/design/resource_names. For
      projects.regions.autoscalingPolicies.get, the resource name of the
      policy has the following format:
      projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}
      For projects.locations.autoscalingPolicies.get, the resource name of the
      policy has the following format: projects/{project_id}/locations/{locati
      on}/autoscalingPolicies/{policy_id}
  """

  name = _messages.StringField(1, required=True)


class DataprocProjectsLocationsAutoscalingPoliciesListRequest(_messages.Message):
  r"""A DataprocProjectsLocationsAutoscalingPoliciesListRequest object.

  Fields:
    pageSize: Optional. The maximum number of results to return in each
      response. Must be less than or equal to 1000. Defaults to 100.
    pageToken: Optional. The page token, returned by a previous call, to
      request the next page of results.
    parent: Required. The "resource name" of the region or location, as
      described in https://cloud.google.com/apis/design/resource_names. For
      projects.regions.autoscalingPolicies.list, the resource name of the
      region has the following format: projects/{project_id}/regions/{region}
      For projects.locations.autoscalingPolicies.list, the resource name of
      the location has the following format:
      projects/{project_id}/locations/{location}
  """

  pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(2)
  parent = _messages.StringField(3, required=True)


class DataprocProjectsLocationsAutoscalingPoliciesSetIamPolicyRequest(_messages.Message):
  r"""A DataprocProjectsLocationsAutoscalingPoliciesSetIamPolicyRequest
  object.

  Fields:
    resource: REQUIRED: The resource for which the policy is being specified.
      See Resource names (https://cloud.google.com/apis/design/resource_names)
      for the appropriate value for this field.
    setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
      request body.
  """

  resource = _messages.StringField(1, required=True)
  setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)


class DataprocProjectsLocationsAutoscalingPoliciesTestIamPermissionsRequest(_messages.Message):
  r"""A DataprocProjectsLocationsAutoscalingPoliciesTestIamPermissionsRequest
  object.

  Fields:
    resource: REQUIRED: The resource for which the policy detail is being
      requested. See Resource names
      (https://cloud.google.com/apis/design/resource_names) for the
      appropriate value for this field.
    testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
      passed as the request body.
  """

  resource = _messages.StringField(1, required=True)
  testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)


class DataprocProjectsLocationsBatchesAnalyzeRequest(_messages.Message):
  r"""A DataprocProjectsLocationsBatchesAnalyzeRequest object.

  Fields:
    analyzeBatchRequest: A AnalyzeBatchRequest resource to be passed as the
      request body.
    name: Required. The fully qualified name of the batch to analyze in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID"
  """

  analyzeBatchRequest = _messages.MessageField('AnalyzeBatchRequest', 1)
  name = _messages.StringField(2, required=True)


class DataprocProjectsLocationsBatchesCreateRequest(_messages.Message):
  r"""A DataprocProjectsLocationsBatchesCreateRequest object.

  Fields:
    batch: A Batch resource to be passed as the request body.
    batchId: Optional. The ID to use for the batch, which will become the
      final component of the batch's resource name.This value must be 4-63
      characters. Valid characters are /[a-z][0-9]-/.
    parent: Required. The parent resource where this batch will be created.
    requestId: Optional. A unique ID used to identify the request. If the
      service receives two CreateBatchRequest (https://cloud.google.com/datapr
      oc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.
      CreateBatchRequest)s with the same request_id, the second request is
      ignored and the Operation that corresponds to the first Batch created
      and stored in the backend is returned.Recommendation: Set this value to
      a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The
      value must contain only letters (a-z, A-Z), numbers (0-9), underscores
      (_), and hyphens (-). The maximum length is 40 characters.
  """

  batch = _messages.MessageField('Batch', 1)
  batchId = _messages.StringField(2)
  parent = _messages.StringField(3, required=True)
  requestId = _messages.StringField(4)


class DataprocProjectsLocationsBatchesDeleteRequest(_messages.Message):
  r"""A DataprocProjectsLocationsBatchesDeleteRequest object.

  Fields:
    name: Required. The fully qualified name of the batch to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID"
  """

  name = _messages.StringField(1, required=True)


class DataprocProjectsLocationsBatchesGetRequest(_messages.Message):
  r"""A DataprocProjectsLocationsBatchesGetRequest object.

  Fields:
    name: Required. The fully qualified name of the batch to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID"
  """

  name = _messages.StringField(1, required=True)


class DataprocProjectsLocationsBatchesListRequest(_messages.Message):
  r"""A DataprocProjectsLocationsBatchesListRequest object.

  Fields:
    filter: Optional. A filter for the batches to return in the response.A
      filter is a logical expression constraining the values of various fields
      in each batch resource. Filters are case sensitive, and may contain
      multiple clauses combined with logical operators (AND/OR). Supported
      fields are batch_id, batch_uuid, state, create_time, and labels.e.g.
      state = RUNNING and create_time < "2023-01-01T00:00:00Z" filters for
      batches in state RUNNING that were created before 2023-01-01. state =
      RUNNING and labels.environment=production filters for batches in state
      in a RUNNING state that have a production environment label.See
      https://google.aip.dev/assets/misc/ebnf-filtering.txt for a detailed
      description of the filter syntax and a list of supported comparisons.
    orderBy: Optional. Field(s) on which to sort the list of batches.Currently
      the only supported sort orders are unspecified (empty) and create_time
      desc to sort by most recently created batches first.See
      https://google.aip.dev/132#ordering for more details.
    pageSize: Optional. The maximum number of batches to return in each
      response. The service may return fewer than this value. The default page
      size is 20; the maximum page size is 1000.
    pageToken: Optional. A page token received from a previous ListBatches
      call. Provide this token to retrieve the subsequent page.
    parent: Required. The parent, which owns this collection of batches.
  """

  filter = _messages.StringField(1)
  orderBy = _messages.StringField(2)
  pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(4)
  parent = _messages.StringField(5, required=True)


class DataprocProjectsLocationsBatchesSparkApplicationsAccessEnvironmentInfoRequest(_messages.Message):
  r"""A DataprocProjectsLocationsBatchesSparkApplicationsAccessEnvironmentInfo
  Request object.

  Fields:
    name: Required. The fully qualified name of the batch to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/s
      parkApplications/APPLICATION_ID"
    parent: Required. Parent (Batch) resource reference.
  """

  name = _messages.StringField(1, required=True)
  parent = _messages.StringField(2)


class DataprocProjectsLocationsBatchesSparkApplicationsAccessJobRequest(_messages.Message):
  r"""A DataprocProjectsLocationsBatchesSparkApplicationsAccessJobRequest
  object.

  Fields:
    jobId: Required. Job ID to fetch data for.
    name: Required. The fully qualified name of the batch to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/s
      parkApplications/APPLICATION_ID"
    parent: Required. Parent (Batch) resource reference.
  """

  jobId = _messages.IntegerField(1)
  name = _messages.StringField(2, required=True)
  parent = _messages.StringField(3)


class DataprocProjectsLocationsBatchesSparkApplicationsAccessNativeBuildInfoRequest(_messages.Message):
  r"""A DataprocProjectsLocationsBatchesSparkApplicationsAccessNativeBuildInfo
  Request object.

  Fields:
    name: Required. The fully qualified name of the batch to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/s
      parkApplications/APPLICATION_ID"
    parent: Required. Parent (Batch) resource reference.
  """

  name = _messages.StringField(1, required=True)
  parent = _messages.StringField(2)


class DataprocProjectsLocationsBatchesSparkApplicationsAccessNativeSqlQueryRequest(_messages.Message):
  r"""A
  DataprocProjectsLocationsBatchesSparkApplicationsAccessNativeSqlQueryRequest
  object.

  Fields:
    executionId: Required. Execution ID
    name: Required. The fully qualified name of the batch to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/s
      parkApplications/APPLICATION_ID"
    parent: Required. Parent (Batch) resource reference.
  """

  executionId = _messages.IntegerField(1)
  name = _messages.StringField(2, required=True)
  parent = _messages.StringField(3)


class DataprocProjectsLocationsBatchesSparkApplicationsAccessRequest(_messages.Message):
  r"""A DataprocProjectsLocationsBatchesSparkApplicationsAccessRequest object.

  Fields:
    name: Required. The fully qualified name of the batch to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/s
      parkApplications/APPLICATION_ID"
    parent: Required. Parent (Batch) resource reference.
  """

  name = _messages.StringField(1, required=True)
  parent = _messages.StringField(2)


class DataprocProjectsLocationsBatchesSparkApplicationsAccessSqlPlanRequest(_messages.Message):
  r"""A DataprocProjectsLocationsBatchesSparkApplicationsAccessSqlPlanRequest
  object.

  Fields:
    executionId: Required. Execution ID
    name: Required. The fully qualified name of the batch to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/s
      parkApplications/APPLICATION_ID"
    parent: Required. Parent (Batch) resource reference.
  """

  executionId = _messages.IntegerField(1)
  name = _messages.StringField(2, required=True)
  parent = _messages.StringField(3)


class DataprocProjectsLocationsBatchesSparkApplicationsAccessSqlQueryRequest(_messages.Message):
  r"""A DataprocProjectsLocationsBatchesSparkApplicationsAccessSqlQueryRequest
  object.

  Fields:
    details: Optional. Lists/ hides details of Spark plan nodes. True is set
      to list and false to hide.
    executionId: Required. Execution ID
    name: Required. The fully qualified name of the batch to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/s
      parkApplications/APPLICATION_ID"
    parent: Required. Parent (Batch) resource reference.
    planDescription: Optional. Enables/ disables physical plan description on
      demand
  """

  details = _messages.BooleanField(1)
  executionId = _messages.IntegerField(2)
  name = _messages.StringField(3, required=True)
  parent = _messages.StringField(4)
  planDescription = _messages.BooleanField(5)


class DataprocProjectsLocationsBatchesSparkApplicationsAccessStageAttemptRequest(_messages.Message):
  r"""A
  DataprocProjectsLocationsBatchesSparkApplicationsAccessStageAttemptRequest
  object.

  Fields:
    name: Required. The fully qualified name of the batch to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/s
      parkApplications/APPLICATION_ID"
    parent: Required. Parent (Batch) resource reference.
    stageAttemptId: Required. Stage Attempt ID
    stageId: Required. Stage ID
    summaryMetricsMask: Optional. The list of summary metrics fields to
      include. Empty list will default to skip all summary metrics fields.
      Example, if the response should include TaskQuantileMetrics, the request
      should have task_quantile_metrics in summary_metrics_mask field
  """

  name = _messages.StringField(1, required=True)
  parent = _messages.StringField(2)
  stageAttemptId = _messages.IntegerField(3, variant=_messages.Variant.INT32)
  stageId = _messages.IntegerField(4)
  summaryMetricsMask = _messages.StringField(5)


class DataprocProjectsLocationsBatchesSparkApplicationsAccessStageRddGraphRequest(_messages.Message):
  r"""A
  DataprocProjectsLocationsBatchesSparkApplicationsAccessStageRddGraphRequest
  object.

  Fields:
    name: Required. The fully qualified name of the batch to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/s
      parkApplications/APPLICATION_ID"
    parent: Required. Parent (Batch) resource reference.
    stageId: Required. Stage ID
  """

  name = _messages.StringField(1, required=True)
  parent = _messages.StringField(2)
  stageId = _messages.IntegerField(3)


class DataprocProjectsLocationsBatchesSparkApplicationsSearchExecutorStageSummaryRequest(_messages.Message):
  r"""A DataprocProjectsLocationsBatchesSparkApplicationsSearchExecutorStageSu
  mmaryRequest object.

  Fields:
    name: Required. The fully qualified name of the batch to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/s
      parkApplications/APPLICATION_ID"
    pageSize: Optional. Maximum number of executors to return in each
      response. The service may return fewer than this. The default page size
      is 10; the maximum page size is 100.
    pageToken: Optional. A page token received from a previous
      AccessSparkApplicationExecutorsList call. Provide this token to retrieve
      the subsequent page.
    parent: Required. Parent (Batch) resource reference.
    stageAttemptId: Required. Stage Attempt ID
    stageId: Required. Stage ID
  """

  name = _messages.StringField(1, required=True)
  pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(3)
  parent = _messages.StringField(4)
  stageAttemptId = _messages.IntegerField(5, variant=_messages.Variant.INT32)
  stageId = _messages.IntegerField(6)


class DataprocProjectsLocationsBatchesSparkApplicationsSearchExecutorsRequest(_messages.Message):
  r"""A
  DataprocProjectsLocationsBatchesSparkApplicationsSearchExecutorsRequest
  object.

  Enums:
    ExecutorStatusValueValuesEnum: Optional. Filter to select whether active/
      dead or all executors should be selected.

  Fields:
    executorStatus: Optional. Filter to select whether active/ dead or all
      executors should be selected.
    name: Required. The fully qualified name of the batch to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/s
      parkApplications/APPLICATION_ID"
    pageSize: Optional. Maximum number of executors to return in each
      response. The service may return fewer than this. The default page size
      is 10; the maximum page size is 100.
    pageToken: Optional. A page token received from a previous
      AccessSparkApplicationExecutorsList call. Provide this token to retrieve
      the subsequent page.
    parent: Required. Parent (Batch) resource reference.
  """

  class ExecutorStatusValueValuesEnum(_messages.Enum):
    r"""Optional. Filter to select whether active/ dead or all executors
    should be selected.

    Values:
      EXECUTOR_STATUS_UNSPECIFIED: <no description>
      EXECUTOR_STATUS_ACTIVE: <no description>
      EXECUTOR_STATUS_DEAD: <no description>
    """
    EXECUTOR_STATUS_UNSPECIFIED = 0
    EXECUTOR_STATUS_ACTIVE = 1
    EXECUTOR_STATUS_DEAD = 2

  executorStatus = _messages.EnumField('ExecutorStatusValueValuesEnum', 1)
  name = _messages.StringField(2, required=True)
  pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(4)
  parent = _messages.StringField(5)


class DataprocProjectsLocationsBatchesSparkApplicationsSearchJobsRequest(_messages.Message):
  r"""A DataprocProjectsLocationsBatchesSparkApplicationsSearchJobsRequest
  object.

  Enums:
    JobStatusValueValuesEnum: Optional. List only jobs in the specific state.

  Fields:
    jobStatus: Optional. List only jobs in the specific state.
    name: Required. The fully qualified name of the batch to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/s
      parkApplications/APPLICATION_ID"
    pageSize: Optional. Maximum number of jobs to return in each response. The
      service may return fewer than this. The default page size is 10; the
      maximum page size is 100.
    pageToken: Optional. A page token received from a previous
      SearchSparkApplicationJobs call. Provide this token to retrieve the
      subsequent page.
    parent: Required. Parent (Batch) resource reference.
  """

  class JobStatusValueValuesEnum(_messages.Enum):
    r"""Optional. List only jobs in the specific state.

    Values:
      JOB_EXECUTION_STATUS_UNSPECIFIED: <no description>
      JOB_EXECUTION_STATUS_RUNNING: <no description>
      JOB_EXECUTION_STATUS_SUCCEEDED: <no description>
      JOB_EXECUTION_STATUS_FAILED: <no description>
      JOB_EXECUTION_STATUS_UNKNOWN: <no description>
    """
    JOB_EXECUTION_STATUS_UNSPECIFIED = 0
    JOB_EXECUTION_STATUS_RUNNING = 1
    JOB_EXECUTION_STATUS_SUCCEEDED = 2
    JOB_EXECUTION_STATUS_FAILED = 3
    JOB_EXECUTION_STATUS_UNKNOWN = 4

  jobStatus = _messages.EnumField('JobStatusValueValuesEnum', 1)
  name = _messages.StringField(2, required=True)
  pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(4)
  parent = _messages.StringField(5)


class DataprocProjectsLocationsBatchesSparkApplicationsSearchNativeSqlQueriesRequest(_messages.Message):
  r"""A DataprocProjectsLocationsBatchesSparkApplicationsSearchNativeSqlQuerie
  sRequest object.

  Fields:
    name: Required. The fully qualified name of the batch to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/s
      parkApplications/APPLICATION_ID"
    pageSize: Optional. Maximum number of queries to return in each response.
      The service may return fewer than this. The default page size is 10; the
      maximum page size is 100.
    pageToken: Optional. A page token received from a previous
      SearchSparkApplicationNativeSqlQueries call. Provide this token to
      retrieve the subsequent page.
    parent: Required. Parent (Batch) resource reference.
  """

  name = _messages.StringField(1, required=True)
  pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(3)
  parent = _messages.StringField(4)


class DataprocProjectsLocationsBatchesSparkApplicationsSearchRequest(_messages.Message):
  r"""A DataprocProjectsLocationsBatchesSparkApplicationsSearchRequest object.

  Enums:
    ApplicationStatusValueValuesEnum: Optional. Search only applications in
      the chosen state.

  Fields:
    applicationStatus: Optional. Search only applications in the chosen state.
    maxEndTime: Optional. Latest end timestamp to list.
    maxTime: Optional. Latest start timestamp to list.
    minEndTime: Optional. Earliest end timestamp to list.
    minTime: Optional. Earliest start timestamp to list.
    pageSize: Optional. Maximum number of applications to return in each
      response. The service may return fewer than this. The default page size
      is 10; the maximum page size is 100.
    pageToken: Optional. A page token received from a previous
      SearchSparkApplications call. Provide this token to retrieve the
      subsequent page.
    parent: Required. The fully qualified name of the batch to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID"
  """

  class ApplicationStatusValueValuesEnum(_messages.Enum):
    r"""Optional. Search only applications in the chosen state.

    Values:
      APPLICATION_STATUS_UNSPECIFIED: <no description>
      APPLICATION_STATUS_RUNNING: <no description>
      APPLICATION_STATUS_COMPLETED: <no description>
    """
    APPLICATION_STATUS_UNSPECIFIED = 0
    APPLICATION_STATUS_RUNNING = 1
    APPLICATION_STATUS_COMPLETED = 2

  applicationStatus = _messages.EnumField('ApplicationStatusValueValuesEnum', 1)
  maxEndTime = _messages.StringField(2)
  maxTime = _messages.StringField(3)
  minEndTime = _messages.StringField(4)
  minTime = _messages.StringField(5)
  pageSize = _messages.IntegerField(6, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(7)
  parent = _messages.StringField(8, required=True)


class DataprocProjectsLocationsBatchesSparkApplicationsSearchSqlQueriesRequest(_messages.Message):
  r"""A
  DataprocProjectsLocationsBatchesSparkApplicationsSearchSqlQueriesRequest
  object.

  Fields:
    details: Optional. Lists/ hides details of Spark plan nodes. True is set
      to list and false to hide.
    name: Required. The fully qualified name of the batch to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/s
      parkApplications/APPLICATION_ID"
    pageSize: Optional. Maximum number of queries to return in each response.
      The service may return fewer than this. The default page size is 10; the
      maximum page size is 100.
    pageToken: Optional. A page token received from a previous
      SearchSparkApplicationSqlQueries call. Provide this token to retrieve
      the subsequent page.
    parent: Required. Parent (Batch) resource reference.
    planDescription: Optional. Enables/ disables physical plan description on
      demand
  """

  details = _messages.BooleanField(1)
  name = _messages.StringField(2, required=True)
  pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(4)
  parent = _messages.StringField(5)
  planDescription = _messages.BooleanField(6)


class DataprocProjectsLocationsBatchesSparkApplicationsSearchStageAttemptTasksRequest(_messages.Message):
  r"""A DataprocProjectsLocationsBatchesSparkApplicationsSearchStageAttemptTas
  ksRequest object.

  Enums:
    TaskStatusValueValuesEnum: Optional. List only tasks in the state.

  Fields:
    name: Required. The fully qualified name of the batch to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/s
      parkApplications/APPLICATION_ID"
    pageSize: Optional. Maximum number of tasks to return in each response.
      The service may return fewer than this. The default page size is 10; the
      maximum page size is 100.
    pageToken: Optional. A page token received from a previous
      ListSparkApplicationStageAttemptTasks call. Provide this token to
      retrieve the subsequent page.
    parent: Required. Parent (Batch) resource reference.
    sortRuntime: Optional. Sort the tasks by runtime.
    stageAttemptId: Optional. Stage Attempt ID
    stageId: Optional. Stage ID
    taskStatus: Optional. List only tasks in the state.
  """

  class TaskStatusValueValuesEnum(_messages.Enum):
    r"""Optional. List only tasks in the state.

    Values:
      TASK_STATUS_UNSPECIFIED: <no description>
      TASK_STATUS_RUNNING: <no description>
      TASK_STATUS_SUCCESS: <no description>
      TASK_STATUS_FAILED: <no description>
      TASK_STATUS_KILLED: <no description>
      TASK_STATUS_PENDING: <no description>
    """
    TASK_STATUS_UNSPECIFIED = 0
    TASK_STATUS_RUNNING = 1
    TASK_STATUS_SUCCESS = 2
    TASK_STATUS_FAILED = 3
    TASK_STATUS_KILLED = 4
    TASK_STATUS_PENDING = 5

  name = _messages.StringField(1, required=True)
  pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(3)
  parent = _messages.StringField(4)
  sortRuntime = _messages.BooleanField(5)
  stageAttemptId = _messages.IntegerField(6, variant=_messages.Variant.INT32)
  stageId = _messages.IntegerField(7)
  taskStatus = _messages.EnumField('TaskStatusValueValuesEnum', 8)


class DataprocProjectsLocationsBatchesSparkApplicationsSearchStageAttemptsRequest(_messages.Message):
  r"""A
  DataprocProjectsLocationsBatchesSparkApplicationsSearchStageAttemptsRequest
  object.

  Fields:
    name: Required. The fully qualified name of the batch to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/s
      parkApplications/APPLICATION_ID"
    pageSize: Optional. Maximum number of stage attempts (paging based on
      stage_attempt_id) to return in each response. The service may return
      fewer than this. The default page size is 10; the maximum page size is
      100.
    pageToken: Optional. A page token received from a previous
      SearchSparkApplicationStageAttempts call. Provide this token to retrieve
      the subsequent page.
    parent: Required. Parent (Batch) resource reference.
    stageId: Required. Stage ID for which attempts are to be fetched
    summaryMetricsMask: Optional. The list of summary metrics fields to
      include. Empty list will default to skip all summary metrics fields.
      Example, if the response should include TaskQuantileMetrics, the request
      should have task_quantile_metrics in summary_metrics_mask field
  """

  name = _messages.StringField(1, required=True)
  pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(3)
  parent = _messages.StringField(4)
  stageId = _messages.IntegerField(5)
  summaryMetricsMask = _messages.StringField(6)


class DataprocProjectsLocationsBatchesSparkApplicationsSearchStagesRequest(_messages.Message):
  r"""A DataprocProjectsLocationsBatchesSparkApplicationsSearchStagesRequest
  object.

  Enums:
    StageStatusValueValuesEnum: Optional. List only stages in the given state.

  Fields:
    name: Required. The fully qualified name of the batch to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/s
      parkApplications/APPLICATION_ID"
    pageSize: Optional. Maximum number of stages (paging based on stage_id) to
      return in each response. The service may return fewer than this. The
      default page size is 10; the maximum page size is 100.
    pageToken: Optional. A page token received from a previous
      FetchSparkApplicationStagesList call. Provide this token to retrieve the
      subsequent page.
    parent: Required. Parent (Batch) resource reference.
    stageStatus: Optional. List only stages in the given state.
    summaryMetricsMask: Optional. The list of summary metrics fields to
      include. Empty list will default to skip all summary metrics fields.
      Example, if the response should include TaskQuantileMetrics, the request
      should have task_quantile_metrics in summary_metrics_mask field
  """

  class StageStatusValueValuesEnum(_messages.Enum):
    r"""Optional. List only stages in the given state.

    Values:
      STAGE_STATUS_UNSPECIFIED: <no description>
      STAGE_STATUS_ACTIVE: <no description>
      STAGE_STATUS_COMPLETE: <no description>
      STAGE_STATUS_FAILED: <no description>
      STAGE_STATUS_PENDING: <no description>
      STAGE_STATUS_SKIPPED: <no description>
    """
    STAGE_STATUS_UNSPECIFIED = 0
    STAGE_STATUS_ACTIVE = 1
    STAGE_STATUS_COMPLETE = 2
    STAGE_STATUS_FAILED = 3
    STAGE_STATUS_PENDING = 4
    STAGE_STATUS_SKIPPED = 5

  name = _messages.StringField(1, required=True)
  pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(3)
  parent = _messages.StringField(4)
  stageStatus = _messages.EnumField('StageStatusValueValuesEnum', 5)
  summaryMetricsMask = _messages.StringField(6)


class DataprocProjectsLocationsBatchesSparkApplicationsSummarizeExecutorsRequest(_messages.Message):
  r"""A
  DataprocProjectsLocationsBatchesSparkApplicationsSummarizeExecutorsRequest
  object.

  Fields:
    name: Required. The fully qualified name of the batch to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/s
      parkApplications/APPLICATION_ID"
    parent: Required. Parent (Batch) resource reference.
  """

  name = _messages.StringField(1, required=True)
  parent = _messages.StringField(2)


class DataprocProjectsLocationsBatchesSparkApplicationsSummarizeJobsRequest(_messages.Message):
  r"""A DataprocProjectsLocationsBatchesSparkApplicationsSummarizeJobsRequest
  object.

  Fields:
    name: Required. The fully qualified name of the batch to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/s
      parkApplications/APPLICATION_ID"
    parent: Required. Parent (Batch) resource reference.
  """

  name = _messages.StringField(1, required=True)
  parent = _messages.StringField(2)


class DataprocProjectsLocationsBatchesSparkApplicationsSummarizeStageAttemptTasksRequest(_messages.Message):
  r"""A DataprocProjectsLocationsBatchesSparkApplicationsSummarizeStageAttempt
  TasksRequest object.

  Fields:
    name: Required. The fully qualified name of the batch to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/s
      parkApplications/APPLICATION_ID"
    parent: Required. Parent (Batch) resource reference.
    stageAttemptId: Required. Stage Attempt ID
    stageId: Required. Stage ID
  """

  name = _messages.StringField(1, required=True)
  parent = _messages.StringField(2)
  stageAttemptId = _messages.IntegerField(3, variant=_messages.Variant.INT32)
  stageId = _messages.IntegerField(4)


class DataprocProjectsLocationsBatchesSparkApplicationsSummarizeStagesRequest(_messages.Message):
  r"""A
  DataprocProjectsLocationsBatchesSparkApplicationsSummarizeStagesRequest
  object.

  Fields:
    name: Required. The fully qualified name of the batch to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/s
      parkApplications/APPLICATION_ID"
    parent: Required. Parent (Batch) resource reference.
  """

  name = _messages.StringField(1, required=True)
  parent = _messages.StringField(2)


class DataprocProjectsLocationsBatchesSparkApplicationsWriteRequest(_messages.Message):
  r"""A DataprocProjectsLocationsBatchesSparkApplicationsWriteRequest object.

  Fields:
    name: Required. The fully qualified name of the spark application to write
      data about in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/
      batches/BATCH_ID/sparkApplications/APPLICATION_ID"
    writeSparkApplicationContextRequest: A WriteSparkApplicationContextRequest
      resource to be passed as the request body.
  """

  name = _messages.StringField(1, required=True)
  writeSparkApplicationContextRequest = _messages.MessageField('WriteSparkApplicationContextRequest', 2)


class DataprocProjectsLocationsOperationsCancelRequest(_messages.Message):
  r"""A DataprocProjectsLocationsOperationsCancelRequest object.

  Fields:
    name: The name of the operation resource to be cancelled.
  """

  name = _messages.StringField(1, required=True)


class DataprocProjectsLocationsOperationsDeleteRequest(_messages.Message):
  r"""A DataprocProjectsLocationsOperationsDeleteRequest object.

  Fields:
    name: The name of the operation resource to be deleted.
  """

  name = _messages.StringField(1, required=True)


class DataprocProjectsLocationsOperationsGetRequest(_messages.Message):
  r"""A DataprocProjectsLocationsOperationsGetRequest object.

  Fields:
    name: The name of the operation resource.
  """

  name = _messages.StringField(1, required=True)


class DataprocProjectsLocationsOperationsListRequest(_messages.Message):
  r"""A DataprocProjectsLocationsOperationsListRequest object.

  Fields:
    filter: The standard list filter.
    name: The name of the operation's parent resource.
    pageSize: The standard list page size.
    pageToken: The standard list page token.
    returnPartialSuccess: When set to true, operations that are reachable are
      returned as normal, and those that are unreachable are returned in the
      ListOperationsResponse.unreachable field.This can only be true when
      reading across collections e.g. when parent is set to
      "projects/example/locations/-".This field is not by default supported
      and will result in an UNIMPLEMENTED error if set unless explicitly
      documented otherwise in service or product specific documentation.
  """

  filter = _messages.StringField(1)
  name = _messages.StringField(2, required=True)
  pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(4)
  returnPartialSuccess = _messages.BooleanField(5)


class DataprocProjectsLocationsSessionTemplatesCreateRequest(_messages.Message):
  r"""A DataprocProjectsLocationsSessionTemplatesCreateRequest object.

  Fields:
    parent: Required. The parent resource where this session template will be
      created.
    sessionTemplate: A SessionTemplate resource to be passed as the request
      body.
  """

  parent = _messages.StringField(1, required=True)
  sessionTemplate = _messages.MessageField('SessionTemplate', 2)


class DataprocProjectsLocationsSessionTemplatesDeleteRequest(_messages.Message):
  r"""A DataprocProjectsLocationsSessionTemplatesDeleteRequest object.

  Fields:
    name: Required. The name of the session template resource to delete.
  """

  name = _messages.StringField(1, required=True)


class DataprocProjectsLocationsSessionTemplatesGetRequest(_messages.Message):
  r"""A DataprocProjectsLocationsSessionTemplatesGetRequest object.

  Fields:
    name: Required. The name of the session template to retrieve.
  """

  name = _messages.StringField(1, required=True)


class DataprocProjectsLocationsSessionTemplatesListRequest(_messages.Message):
  r"""A DataprocProjectsLocationsSessionTemplatesListRequest object.

  Fields:
    filter: Optional. A filter for the session templates to return in the
      response. Filters are case sensitive and have the following syntax:field
      = value AND field = value ...
    pageSize: Optional. The maximum number of sessions to return in each
      response. The service may return fewer than this value.
    pageToken: Optional. A page token received from a previous ListSessions
      call. Provide this token to retrieve the subsequent page.
    parent: Required. The parent that owns this collection of session
      templates.
  """

  filter = _messages.StringField(1)
  pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(3)
  parent = _messages.StringField(4, required=True)


class DataprocProjectsLocationsSessionsCreateRequest(_messages.Message):
  r"""A DataprocProjectsLocationsSessionsCreateRequest object.

  Fields:
    parent: Required. The parent resource where this session will be created.
    requestId: Optional. A unique ID used to identify the request. If the
      service receives two CreateSessionRequests (https://cloud.google.com/dat
      aproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.
      v1.CreateSessionRequest)s with the same ID, the second request is
      ignored, and the first Session is created and stored in the
      backend.Recommendation: Set this value to a UUID
      (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value
      must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
      and hyphens (-). The maximum length is 40 characters.
    session: A Session resource to be passed as the request body.
    sessionId: Required. The ID to use for the session, which becomes the
      final component of the session's resource name.This value must be 4-63
      characters. Valid characters are /a-z-/.
  """

  parent = _messages.StringField(1, required=True)
  requestId = _messages.StringField(2)
  session = _messages.MessageField('Session', 3)
  sessionId = _messages.StringField(4)


class DataprocProjectsLocationsSessionsDeleteRequest(_messages.Message):
  r"""A DataprocProjectsLocationsSessionsDeleteRequest object.

  Fields:
    name: Required. The name of the session resource to delete.
    requestId: Optional. A unique ID used to identify the request. If the
      service receives two DeleteSessionRequest (https://cloud.google.com/data
      proc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v
      1.DeleteSessionRequest)s with the same ID, the second request is
      ignored.Recommendation: Set this value to a UUID
      (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value
      must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
      and hyphens (-). The maximum length is 40 characters.
  """

  name = _messages.StringField(1, required=True)
  requestId = _messages.StringField(2)


class DataprocProjectsLocationsSessionsGetRequest(_messages.Message):
  r"""A DataprocProjectsLocationsSessionsGetRequest object.

  Fields:
    name: Required. The name of the session to retrieve.
  """

  name = _messages.StringField(1, required=True)


class DataprocProjectsLocationsSessionsListRequest(_messages.Message):
  r"""A DataprocProjectsLocationsSessionsListRequest object.

  Fields:
    filter: Optional. A filter for the sessions to return in the response.A
      filter is a logical expression constraining the values of various fields
      in each session resource. Filters are case sensitive, and may contain
      multiple clauses combined with logical operators (AND, OR). Supported
      fields are session_id, session_uuid, state, create_time, and
      labels.Example: state = ACTIVE and create_time < "2023-01-01T00:00:00Z"
      is a filter for sessions in an ACTIVE state that were created before
      2023-01-01. state = ACTIVE and labels.environment=production is a filter
      for sessions in an ACTIVE state that have a production environment
      label.See https://google.aip.dev/assets/misc/ebnf-filtering.txt for a
      detailed description of the filter syntax and a list of supported
      comparators.
    orderBy: Optional. Field(s) on which to sort the list of sessions. See
      https://google.aip.dev/132#ordering for more information.
    pageSize: Optional. The maximum number of sessions to return in each
      response. The service may return fewer than this value.
    pageToken: Optional. A page token received from a previous ListSessions
      call. Provide this token to retrieve the subsequent page.
    parent: Required. The parent, which owns this collection of sessions.
  """

  filter = _messages.StringField(1)
  orderBy = _messages.StringField(2)
  pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(4)
  parent = _messages.StringField(5, required=True)


class DataprocProjectsLocationsSessionsSparkApplicationsAccessEnvironmentInfoRequest(_messages.Message):
  r"""A DataprocProjectsLocationsSessionsSparkApplicationsAccessEnvironmentInf
  oRequest object.

  Fields:
    name: Required. The fully qualified name of the session to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_I
      D/sparkApplications/APPLICATION_ID"
    parent: Required. Parent (Session) resource reference.
  """

  name = _messages.StringField(1, required=True)
  parent = _messages.StringField(2)


class DataprocProjectsLocationsSessionsSparkApplicationsAccessJobRequest(_messages.Message):
  r"""A DataprocProjectsLocationsSessionsSparkApplicationsAccessJobRequest
  object.

  Fields:
    jobId: Required. Job ID to fetch data for.
    name: Required. The fully qualified name of the session to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_I
      D/sparkApplications/APPLICATION_ID"
    parent: Required. Parent (Session) resource reference.
  """

  jobId = _messages.IntegerField(1)
  name = _messages.StringField(2, required=True)
  parent = _messages.StringField(3)


class DataprocProjectsLocationsSessionsSparkApplicationsAccessNativeBuildInfoRequest(_messages.Message):
  r"""A DataprocProjectsLocationsSessionsSparkApplicationsAccessNativeBuildInf
  oRequest object.

  Fields:
    name: Required. The fully qualified name of the session to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_I
      D/sparkApplications/APPLICATION_ID"
    parent: Required. Parent (Session) resource reference.
  """

  name = _messages.StringField(1, required=True)
  parent = _messages.StringField(2)


class DataprocProjectsLocationsSessionsSparkApplicationsAccessNativeSqlQueryRequest(_messages.Message):
  r"""A DataprocProjectsLocationsSessionsSparkApplicationsAccessNativeSqlQuery
  Request object.

  Fields:
    executionId: Required. Execution ID
    name: Required. The fully qualified name of the session to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_I
      D/sparkApplications/APPLICATION_ID"
    parent: Required. Parent (Session) resource reference.
  """

  executionId = _messages.IntegerField(1)
  name = _messages.StringField(2, required=True)
  parent = _messages.StringField(3)


class DataprocProjectsLocationsSessionsSparkApplicationsAccessRequest(_messages.Message):
  r"""A DataprocProjectsLocationsSessionsSparkApplicationsAccessRequest
  object.

  Fields:
    name: Required. The fully qualified name of the session to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_I
      D/sparkApplications/APPLICATION_ID"
    parent: Required. Parent (Session) resource reference.
  """

  name = _messages.StringField(1, required=True)
  parent = _messages.StringField(2)


class DataprocProjectsLocationsSessionsSparkApplicationsAccessSqlPlanRequest(_messages.Message):
  r"""A DataprocProjectsLocationsSessionsSparkApplicationsAccessSqlPlanRequest
  object.

  Fields:
    executionId: Required. Execution ID
    name: Required. The fully qualified name of the session to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_I
      D/sparkApplications/APPLICATION_ID"
    parent: Required. Parent (Session) resource reference.
  """

  executionId = _messages.IntegerField(1)
  name = _messages.StringField(2, required=True)
  parent = _messages.StringField(3)


class DataprocProjectsLocationsSessionsSparkApplicationsAccessSqlQueryRequest(_messages.Message):
  r"""A
  DataprocProjectsLocationsSessionsSparkApplicationsAccessSqlQueryRequest
  object.

  Fields:
    details: Optional. Lists/ hides details of Spark plan nodes. True is set
      to list and false to hide.
    executionId: Required. Execution ID
    name: Required. The fully qualified name of the session to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_I
      D/sparkApplications/APPLICATION_ID"
    parent: Required. Parent (Session) resource reference.
    planDescription: Optional. Enables/ disables physical plan description on
      demand
  """

  details = _messages.BooleanField(1)
  executionId = _messages.IntegerField(2)
  name = _messages.StringField(3, required=True)
  parent = _messages.StringField(4)
  planDescription = _messages.BooleanField(5)


class DataprocProjectsLocationsSessionsSparkApplicationsAccessStageAttemptRequest(_messages.Message):
  r"""A
  DataprocProjectsLocationsSessionsSparkApplicationsAccessStageAttemptRequest
  object.

  Fields:
    name: Required. The fully qualified name of the session to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_I
      D/sparkApplications/APPLICATION_ID"
    parent: Required. Parent (Session) resource reference.
    stageAttemptId: Required. Stage Attempt ID
    stageId: Required. Stage ID
    summaryMetricsMask: Optional. The list of summary metrics fields to
      include. Empty list will default to skip all summary metrics fields.
      Example, if the response should include TaskQuantileMetrics, the request
      should have task_quantile_metrics in summary_metrics_mask field
  """

  name = _messages.StringField(1, required=True)
  parent = _messages.StringField(2)
  stageAttemptId = _messages.IntegerField(3, variant=_messages.Variant.INT32)
  stageId = _messages.IntegerField(4)
  summaryMetricsMask = _messages.StringField(5)


class DataprocProjectsLocationsSessionsSparkApplicationsAccessStageRddGraphRequest(_messages.Message):
  r"""A
  DataprocProjectsLocationsSessionsSparkApplicationsAccessStageRddGraphRequest
  object.

  Fields:
    name: Required. The fully qualified name of the session to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_I
      D/sparkApplications/APPLICATION_ID"
    parent: Required. Parent (Session) resource reference.
    stageId: Required. Stage ID
  """

  name = _messages.StringField(1, required=True)
  parent = _messages.StringField(2)
  stageId = _messages.IntegerField(3)


class DataprocProjectsLocationsSessionsSparkApplicationsSearchExecutorStageSummaryRequest(_messages.Message):
  r"""A DataprocProjectsLocationsSessionsSparkApplicationsSearchExecutorStageS
  ummaryRequest object.

  Fields:
    name: Required. The fully qualified name of the session to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_I
      D/sparkApplications/APPLICATION_ID"
    pageSize: Optional. Maximum number of executors to return in each
      response. The service may return fewer than this. The default page size
      is 10; the maximum page size is 100.
    pageToken: Optional. A page token received from a previous
      SearchSessionSparkApplicationExecutorStageSummary call. Provide this
      token to retrieve the subsequent page.
    parent: Required. Parent (Session) resource reference.
    stageAttemptId: Required. Stage Attempt ID
    stageId: Required. Stage ID
  """

  name = _messages.StringField(1, required=True)
  pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(3)
  parent = _messages.StringField(4)
  stageAttemptId = _messages.IntegerField(5, variant=_messages.Variant.INT32)
  stageId = _messages.IntegerField(6)


class DataprocProjectsLocationsSessionsSparkApplicationsSearchExecutorsRequest(_messages.Message):
  r"""A
  DataprocProjectsLocationsSessionsSparkApplicationsSearchExecutorsRequest
  object.

  Enums:
    ExecutorStatusValueValuesEnum: Optional. Filter to select whether active/
      dead or all executors should be selected.

  Fields:
    executorStatus: Optional. Filter to select whether active/ dead or all
      executors should be selected.
    name: Required. The fully qualified name of the session to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_I
      D/sparkApplications/APPLICATION_ID"
    pageSize: Optional. Maximum number of executors to return in each
      response. The service may return fewer than this. The default page size
      is 10; the maximum page size is 100.
    pageToken: Optional. A page token received from a previous
      SearchSessionSparkApplicationExecutors call. Provide this token to
      retrieve the subsequent page.
    parent: Required. Parent (Session) resource reference.
  """

  class ExecutorStatusValueValuesEnum(_messages.Enum):
    r"""Optional. Filter to select whether active/ dead or all executors
    should be selected.

    Values:
      EXECUTOR_STATUS_UNSPECIFIED: <no description>
      EXECUTOR_STATUS_ACTIVE: <no description>
      EXECUTOR_STATUS_DEAD: <no description>
    """
    EXECUTOR_STATUS_UNSPECIFIED = 0
    EXECUTOR_STATUS_ACTIVE = 1
    EXECUTOR_STATUS_DEAD = 2

  executorStatus = _messages.EnumField('ExecutorStatusValueValuesEnum', 1)
  name = _messages.StringField(2, required=True)
  pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(4)
  parent = _messages.StringField(5)


class DataprocProjectsLocationsSessionsSparkApplicationsSearchJobsRequest(_messages.Message):
  r"""A DataprocProjectsLocationsSessionsSparkApplicationsSearchJobsRequest
  object.

  Enums:
    JobStatusValueValuesEnum: Optional. List only jobs in the specific state.

  Fields:
    jobIds: Optional. List of Job IDs to filter by if provided.
    jobStatus: Optional. List only jobs in the specific state.
    name: Required. The fully qualified name of the session to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_I
      D/sparkApplications/APPLICATION_ID"
    pageSize: Optional. Maximum number of jobs to return in each response. The
      service may return fewer than this. The default page size is 10; the
      maximum page size is 100.
    pageToken: Optional. A page token received from a previous
      SearchSessionSparkApplicationJobs call. Provide this token to retrieve
      the subsequent page.
    parent: Required. Parent (Session) resource reference.
  """

  class JobStatusValueValuesEnum(_messages.Enum):
    r"""Optional. List only jobs in the specific state.

    Values:
      JOB_EXECUTION_STATUS_UNSPECIFIED: <no description>
      JOB_EXECUTION_STATUS_RUNNING: <no description>
      JOB_EXECUTION_STATUS_SUCCEEDED: <no description>
      JOB_EXECUTION_STATUS_FAILED: <no description>
      JOB_EXECUTION_STATUS_UNKNOWN: <no description>
    """
    JOB_EXECUTION_STATUS_UNSPECIFIED = 0
    JOB_EXECUTION_STATUS_RUNNING = 1
    JOB_EXECUTION_STATUS_SUCCEEDED = 2
    JOB_EXECUTION_STATUS_FAILED = 3
    JOB_EXECUTION_STATUS_UNKNOWN = 4

  jobIds = _messages.IntegerField(1, repeated=True)
  jobStatus = _messages.EnumField('JobStatusValueValuesEnum', 2)
  name = _messages.StringField(3, required=True)
  pageSize = _messages.IntegerField(4, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(5)
  parent = _messages.StringField(6)


class DataprocProjectsLocationsSessionsSparkApplicationsSearchNativeSqlQueriesRequest(_messages.Message):
  r"""A DataprocProjectsLocationsSessionsSparkApplicationsSearchNativeSqlQueri
  esRequest object.

  Fields:
    name: Required. The fully qualified name of the session to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_I
      D/sparkApplications/APPLICATION_ID"
    pageSize: Optional. Maximum number of queries to return in each response.
      The service may return fewer than this. The default page size is 10; the
      maximum page size is 100.
    pageToken: Optional. A page token received from a previous
      SearchSessionSparkApplicationSqlQueries call. Provide this token to
      retrieve the subsequent page.
    parent: Required. Parent (Session) resource reference.
  """

  name = _messages.StringField(1, required=True)
  pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(3)
  parent = _messages.StringField(4)


class DataprocProjectsLocationsSessionsSparkApplicationsSearchRequest(_messages.Message):
  r"""A DataprocProjectsLocationsSessionsSparkApplicationsSearchRequest
  object.

  Enums:
    ApplicationStatusValueValuesEnum: Optional. Search only applications in
      the chosen state.

  Fields:
    applicationStatus: Optional. Search only applications in the chosen state.
    maxEndTime: Optional. Latest end timestamp to list.
    maxTime: Optional. Latest start timestamp to list.
    minEndTime: Optional. Earliest end timestamp to list.
    minTime: Optional. Earliest start timestamp to list.
    pageSize: Optional. Maximum number of applications to return in each
      response. The service may return fewer than this. The default page size
      is 10; the maximum page size is 100.
    pageToken: Optional. A page token received from a previous
      SearchSessionSparkApplications call. Provide this token to retrieve the
      subsequent page.
    parent: Required. The fully qualified name of the session to retrieve in
      the format
      "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID"
  """

  class ApplicationStatusValueValuesEnum(_messages.Enum):
    r"""Optional. Search only applications in the chosen state.

    Values:
      APPLICATION_STATUS_UNSPECIFIED: <no description>
      APPLICATION_STATUS_RUNNING: <no description>
      APPLICATION_STATUS_COMPLETED: <no description>
    """
    APPLICATION_STATUS_UNSPECIFIED = 0
    APPLICATION_STATUS_RUNNING = 1
    APPLICATION_STATUS_COMPLETED = 2

  applicationStatus = _messages.EnumField('ApplicationStatusValueValuesEnum', 1)
  maxEndTime = _messages.StringField(2)
  maxTime = _messages.StringField(3)
  minEndTime = _messages.StringField(4)
  minTime = _messages.StringField(5)
  pageSize = _messages.IntegerField(6, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(7)
  parent = _messages.StringField(8, required=True)


class DataprocProjectsLocationsSessionsSparkApplicationsSearchSqlQueriesRequest(_messages.Message):
  r"""A
  DataprocProjectsLocationsSessionsSparkApplicationsSearchSqlQueriesRequest
  object.

  Fields:
    details: Optional. Lists/ hides details of Spark plan nodes. True is set
      to list and false to hide.
    name: Required. The fully qualified name of the session to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_I
      D/sparkApplications/APPLICATION_ID"
    operationIds: Optional. List of Spark Connect operation IDs to filter by
      if provided.
    pageSize: Optional. Maximum number of queries to return in each response.
      The service may return fewer than this. The default page size is 10; the
      maximum page size is 100.
    pageToken: Optional. A page token received from a previous
      SearchSessionSparkApplicationSqlQueries call. Provide this token to
      retrieve the subsequent page.
    parent: Required. Parent (Session) resource reference.
    planDescription: Optional. Enables/ disables physical plan description on
      demand
  """

  details = _messages.BooleanField(1)
  name = _messages.StringField(2, required=True)
  operationIds = _messages.StringField(3, repeated=True)
  pageSize = _messages.IntegerField(4, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(5)
  parent = _messages.StringField(6)
  planDescription = _messages.BooleanField(7)


class DataprocProjectsLocationsSessionsSparkApplicationsSearchStageAttemptTasksRequest(_messages.Message):
  r"""A DataprocProjectsLocationsSessionsSparkApplicationsSearchStageAttemptTa
  sksRequest object.

  Enums:
    TaskStatusValueValuesEnum: Optional. List only tasks in the state.

  Fields:
    name: Required. The fully qualified name of the session to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_I
      D/sparkApplications/APPLICATION_ID"
    pageSize: Optional. Maximum number of tasks to return in each response.
      The service may return fewer than this. The default page size is 10; the
      maximum page size is 100.
    pageToken: Optional. A page token received from a previous
      SearchSessionSparkApplicationStageAttemptTasks call. Provide this token
      to retrieve the subsequent page.
    parent: Required. Parent (Session) resource reference.
    sortRuntime: Optional. Sort the tasks by runtime.
    stageAttemptId: Optional. Stage Attempt ID
    stageId: Optional. Stage ID
    taskStatus: Optional. List only tasks in the state.
  """

  class TaskStatusValueValuesEnum(_messages.Enum):
    r"""Optional. List only tasks in the state.

    Values:
      TASK_STATUS_UNSPECIFIED: <no description>
      TASK_STATUS_RUNNING: <no description>
      TASK_STATUS_SUCCESS: <no description>
      TASK_STATUS_FAILED: <no description>
      TASK_STATUS_KILLED: <no description>
      TASK_STATUS_PENDING: <no description>
    """
    TASK_STATUS_UNSPECIFIED = 0
    TASK_STATUS_RUNNING = 1
    TASK_STATUS_SUCCESS = 2
    TASK_STATUS_FAILED = 3
    TASK_STATUS_KILLED = 4
    TASK_STATUS_PENDING = 5

  name = _messages.StringField(1, required=True)
  pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(3)
  parent = _messages.StringField(4)
  sortRuntime = _messages.BooleanField(5)
  stageAttemptId = _messages.IntegerField(6, variant=_messages.Variant.INT32)
  stageId = _messages.IntegerField(7)
  taskStatus = _messages.EnumField('TaskStatusValueValuesEnum', 8)


class DataprocProjectsLocationsSessionsSparkApplicationsSearchStageAttemptsRequest(_messages.Message):
  r"""A
  DataprocProjectsLocationsSessionsSparkApplicationsSearchStageAttemptsRequest
  object.

  Fields:
    name: Required. The fully qualified name of the session to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_I
      D/sparkApplications/APPLICATION_ID"
    pageSize: Optional. Maximum number of stage attempts (paging based on
      stage_attempt_id) to return in each response. The service may return
      fewer than this. The default page size is 10; the maximum page size is
      100.
    pageToken: Optional. A page token received from a previous
      SearchSessionSparkApplicationStageAttempts call. Provide this token to
      retrieve the subsequent page.
    parent: Required. Parent (Session) resource reference.
    stageId: Required. Stage ID for which attempts are to be fetched
    summaryMetricsMask: Optional. The list of summary metrics fields to
      include. Empty list will default to skip all summary metrics fields.
      Example, if the response should include TaskQuantileMetrics, the request
      should have task_quantile_metrics in summary_metrics_mask field
  """

  name = _messages.StringField(1, required=True)
  pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(3)
  parent = _messages.StringField(4)
  stageId = _messages.IntegerField(5)
  summaryMetricsMask = _messages.StringField(6)


class DataprocProjectsLocationsSessionsSparkApplicationsSearchStagesRequest(_messages.Message):
  r"""A DataprocProjectsLocationsSessionsSparkApplicationsSearchStagesRequest
  object.

  Enums:
    StageStatusValueValuesEnum: Optional. List only stages in the given state.

  Fields:
    name: Required. The fully qualified name of the session to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_I
      D/sparkApplications/APPLICATION_ID"
    pageSize: Optional. Maximum number of stages (paging based on stage_id) to
      return in each response. The service may return fewer than this. The
      default page size is 10; the maximum page size is 100.
    pageToken: Optional. A page token received from a previous
      SearchSessionSparkApplicationStages call. Provide this token to retrieve
      the subsequent page.
    parent: Required. Parent (Session) resource reference.
    stageIds: Optional. List of Stage IDs to filter by if provided.
    stageStatus: Optional. List only stages in the given state.
    summaryMetricsMask: Optional. The list of summary metrics fields to
      include. Empty list will default to skip all summary metrics fields.
      Example, if the response should include TaskQuantileMetrics, the request
      should have task_quantile_metrics in summary_metrics_mask field
  """

  class StageStatusValueValuesEnum(_messages.Enum):
    r"""Optional. List only stages in the given state.

    Values:
      STAGE_STATUS_UNSPECIFIED: <no description>
      STAGE_STATUS_ACTIVE: <no description>
      STAGE_STATUS_COMPLETE: <no description>
      STAGE_STATUS_FAILED: <no description>
      STAGE_STATUS_PENDING: <no description>
      STAGE_STATUS_SKIPPED: <no description>
    """
    STAGE_STATUS_UNSPECIFIED = 0
    STAGE_STATUS_ACTIVE = 1
    STAGE_STATUS_COMPLETE = 2
    STAGE_STATUS_FAILED = 3
    STAGE_STATUS_PENDING = 4
    STAGE_STATUS_SKIPPED = 5

  name = _messages.StringField(1, required=True)
  pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(3)
  parent = _messages.StringField(4)
  stageIds = _messages.IntegerField(5, repeated=True)
  stageStatus = _messages.EnumField('StageStatusValueValuesEnum', 6)
  summaryMetricsMask = _messages.StringField(7)


class DataprocProjectsLocationsSessionsSparkApplicationsSummarizeExecutorsRequest(_messages.Message):
  r"""A
  DataprocProjectsLocationsSessionsSparkApplicationsSummarizeExecutorsRequest
  object.

  Fields:
    name: Required. The fully qualified name of the session to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_I
      D/sparkApplications/APPLICATION_ID"
    parent: Required. Parent (Session) resource reference.
  """

  name = _messages.StringField(1, required=True)
  parent = _messages.StringField(2)


class DataprocProjectsLocationsSessionsSparkApplicationsSummarizeJobsRequest(_messages.Message):
  r"""A DataprocProjectsLocationsSessionsSparkApplicationsSummarizeJobsRequest
  object.

  Fields:
    jobIds: Optional. List of Job IDs to filter by if provided.
    name: Required. The fully qualified name of the session to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_I
      D/sparkApplications/APPLICATION_ID"
    parent: Required. Parent (Session) resource reference.
  """

  jobIds = _messages.IntegerField(1, repeated=True)
  name = _messages.StringField(2, required=True)
  parent = _messages.StringField(3)


class DataprocProjectsLocationsSessionsSparkApplicationsSummarizeStageAttemptTasksRequest(_messages.Message):
  r"""A DataprocProjectsLocationsSessionsSparkApplicationsSummarizeStageAttemp
  tTasksRequest object.

  Fields:
    name: Required. The fully qualified name of the session to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_I
      D/sparkApplications/APPLICATION_ID"
    parent: Required. Parent (Session) resource reference.
    stageAttemptId: Required. Stage Attempt ID
    stageId: Required. Stage ID
  """

  name = _messages.StringField(1, required=True)
  parent = _messages.StringField(2)
  stageAttemptId = _messages.IntegerField(3, variant=_messages.Variant.INT32)
  stageId = _messages.IntegerField(4)


class DataprocProjectsLocationsSessionsSparkApplicationsSummarizeStagesRequest(_messages.Message):
  r"""A
  DataprocProjectsLocationsSessionsSparkApplicationsSummarizeStagesRequest
  object.

  Fields:
    name: Required. The fully qualified name of the session to retrieve in the
      format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_I
      D/sparkApplications/APPLICATION_ID"
    parent: Required. Parent (Session) resource reference.
    stageIds: Optional. List of Stage IDs to filter by if provided.
  """

  name = _messages.StringField(1, required=True)
  parent = _messages.StringField(2)
  stageIds = _messages.IntegerField(3, repeated=True)


class DataprocProjectsLocationsSessionsSparkApplicationsWriteRequest(_messages.Message):
  r"""A DataprocProjectsLocationsSessionsSparkApplicationsWriteRequest object.

  Fields:
    name: Required. The fully qualified name of the spark application to write
      data about in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/
      sessions/SESSION_ID/sparkApplications/APPLICATION_ID"
    writeSessionSparkApplicationContextRequest: A
      WriteSessionSparkApplicationContextRequest resource to be passed as the
      request body.
  """

  name = _messages.StringField(1, required=True)
  writeSessionSparkApplicationContextRequest = _messages.MessageField('WriteSessionSparkApplicationContextRequest', 2)


class DataprocProjectsLocationsSessionsTerminateRequest(_messages.Message):
  r"""A DataprocProjectsLocationsSessionsTerminateRequest object.

  Fields:
    name: Required. The name of the session resource to terminate.
    terminateSessionRequest: A TerminateSessionRequest resource to be passed
      as the request body.
  """

  name = _messages.StringField(1, required=True)
  terminateSessionRequest = _messages.MessageField('TerminateSessionRequest', 2)


class DataprocProjectsLocationsWorkflowTemplatesCreateRequest(_messages.Message):
  r"""A DataprocProjectsLocationsWorkflowTemplatesCreateRequest object.

  Fields:
    parent: Required. The resource name of the region or location, as
      described in https://cloud.google.com/apis/design/resource_names. For
      projects.regions.workflowTemplates.create, the resource name of the
      region has the following format: projects/{project_id}/regions/{region}
      For projects.locations.workflowTemplates.create, the resource name of
      the location has the following format:
      projects/{project_id}/locations/{location}
    workflowTemplate: A WorkflowTemplate resource to be passed as the request
      body.
  """

  parent = _messages.StringField(1, required=True)
  workflowTemplate = _messages.MessageField('WorkflowTemplate', 2)


class DataprocProjectsLocationsWorkflowTemplatesDeleteRequest(_messages.Message):
  r"""A DataprocProjectsLocationsWorkflowTemplatesDeleteRequest object.

  Fields:
    name: Required. The resource name of the workflow template, as described
      in https://cloud.google.com/apis/design/resource_names. For
      projects.regions.workflowTemplates.delete, the resource name of the
      template has the following format:
      projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
      For projects.locations.workflowTemplates.instantiate, the resource name
      of the template has the following format: projects/{project_id}/location
      s/{location}/workflowTemplates/{template_id}
    version: Optional. The version of workflow template to delete. If
      specified, will only delete the template if the current server version
      matches specified version.
  """

  name = _messages.StringField(1, required=True)
  version = _messages.IntegerField(2, variant=_messages.Variant.INT32)


class DataprocProjectsLocationsWorkflowTemplatesGetIamPolicyRequest(_messages.Message):
  r"""A DataprocProjectsLocationsWorkflowTemplatesGetIamPolicyRequest object.

  Fields:
    getIamPolicyRequest: A GetIamPolicyRequest resource to be passed as the
      request body.
    resource: REQUIRED: The resource for which the policy is being requested.
      See Resource names (https://cloud.google.com/apis/design/resource_names)
      for the appropriate value for this field.
  """

  getIamPolicyRequest = _messages.MessageField('GetIamPolicyRequest', 1)
  resource = _messages.StringField(2, required=True)


class DataprocProjectsLocationsWorkflowTemplatesGetRequest(_messages.Message):
  r"""A DataprocProjectsLocationsWorkflowTemplatesGetRequest object.

  Fields:
    name: Required. The resource name of the workflow template, as described
      in https://cloud.google.com/apis/design/resource_names. For
      projects.regions.workflowTemplates.get, the resource name of the
      template has the following format:
      projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
      For projects.locations.workflowTemplates.get, the resource name of the
      template has the following format: projects/{project_id}/locations/{loca
      tion}/workflowTemplates/{template_id}
    version: Optional. The version of workflow template to retrieve. Only
      previously instantiated versions can be retrieved.If unspecified,
      retrieves the current version.
  """

  name = _messages.StringField(1, required=True)
  version = _messages.IntegerField(2, variant=_messages.Variant.INT32)


class DataprocProjectsLocationsWorkflowTemplatesInstantiateInlineRequest(_messages.Message):
  r"""A DataprocProjectsLocationsWorkflowTemplatesInstantiateInlineRequest
  object.

  Fields:
    parent: Required. The resource name of the region or location, as
      described in https://cloud.google.com/apis/design/resource_names. For
      projects.regions.workflowTemplates,instantiateinline, the resource name
      of the region has the following format:
      projects/{project_id}/regions/{region} For
      projects.locations.workflowTemplates.instantiateinline, the resource
      name of the location has the following format:
      projects/{project_id}/locations/{location}
    requestId: Optional. A tag that prevents multiple concurrent workflow
      instances with the same tag from running. This mitigates risk of
      concurrent instances started due to retries.It is recommended to always
      set this value to a UUID
      (https://en.wikipedia.org/wiki/Universally_unique_identifier).The tag
      must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
      and hyphens (-). The maximum length is 40 characters.
    workflowTemplate: A WorkflowTemplate resource to be passed as the request
      body.
  """

  parent = _messages.StringField(1, required=True)
  requestId = _messages.StringField(2)
  workflowTemplate = _messages.MessageField('WorkflowTemplate', 3)


class DataprocProjectsLocationsWorkflowTemplatesInstantiateRequest(_messages.Message):
  r"""A DataprocProjectsLocationsWorkflowTemplatesInstantiateRequest object.

  Fields:
    instantiateWorkflowTemplateRequest: A InstantiateWorkflowTemplateRequest
      resource to be passed as the request body.
    name: Required. The resource name of the workflow template, as described
      in https://cloud.google.com/apis/design/resource_names. For
      projects.regions.workflowTemplates.instantiate, the resource name of the
      template has the following format:
      projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
      For projects.locations.workflowTemplates.instantiate, the resource name
      of the template has the following format: projects/{project_id}/location
      s/{location}/workflowTemplates/{template_id}
  """

  instantiateWorkflowTemplateRequest = _messages.MessageField('InstantiateWorkflowTemplateRequest', 1)
  name = _messages.StringField(2, required=True)


class DataprocProjectsLocationsWorkflowTemplatesListRequest(_messages.Message):
  r"""A DataprocProjectsLocationsWorkflowTemplatesListRequest object.

  Fields:
    pageSize: Optional. The maximum number of results to return in each
      response.
    pageToken: Optional. The page token, returned by a previous call, to
      request the next page of results.
    parent: Required. The resource name of the region or location, as
      described in https://cloud.google.com/apis/design/resource_names. For
      projects.regions.workflowTemplates,list, the resource name of the region
      has the following format: projects/{project_id}/regions/{region} For
      projects.locations.workflowTemplates.list, the resource name of the
      location has the following format:
      projects/{project_id}/locations/{location}
  """

  pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(2)
  parent = _messages.StringField(3, required=True)


class DataprocProjectsLocationsWorkflowTemplatesSetIamPolicyRequest(_messages.Message):
  r"""A DataprocProjectsLocationsWorkflowTemplatesSetIamPolicyRequest object.

  Fields:
    resource: REQUIRED: The resource for which the policy is being specified.
      See Resource names (https://cloud.google.com/apis/design/resource_names)
      for the appropriate value for this field.
    setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
      request body.
  """

  resource = _messages.StringField(1, required=True)
  setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)


class DataprocProjectsLocationsWorkflowTemplatesTestIamPermissionsRequest(_messages.Message):
  r"""A DataprocProjectsLocationsWorkflowTemplatesTestIamPermissionsRequest
  object.

  Fields:
    resource: REQUIRED: The resource for which the policy detail is being
      requested. See Resource names
      (https://cloud.google.com/apis/design/resource_names) for the
      appropriate value for this field.
    testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
      passed as the request body.
  """

  resource = _messages.StringField(1, required=True)
  testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)


class DataprocProjectsRegionsAutoscalingPoliciesCreateRequest(_messages.Message):
  r"""A DataprocProjectsRegionsAutoscalingPoliciesCreateRequest object.

  Fields:
    autoscalingPolicy: A AutoscalingPolicy resource to be passed as the
      request body.
    parent: Required. The "resource name" of the region or location, as
      described in https://cloud.google.com/apis/design/resource_names. For
      projects.regions.autoscalingPolicies.create, the resource name of the
      region has the following format: projects/{project_id}/regions/{region}
      For projects.locations.autoscalingPolicies.create, the resource name of
      the location has the following format:
      projects/{project_id}/locations/{location}
  """

  autoscalingPolicy = _messages.MessageField('AutoscalingPolicy', 1)
  parent = _messages.StringField(2, required=True)


class DataprocProjectsRegionsAutoscalingPoliciesDeleteRequest(_messages.Message):
  r"""A DataprocProjectsRegionsAutoscalingPoliciesDeleteRequest object.

  Fields:
    name: Required. The "resource name" of the autoscaling policy, as
      described in https://cloud.google.com/apis/design/resource_names. For
      projects.regions.autoscalingPolicies.delete, the resource name of the
      policy has the following format:
      projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}
      For projects.locations.autoscalingPolicies.delete, the resource name of
      the policy has the following format: projects/{project_id}/locations/{lo
      cation}/autoscalingPolicies/{policy_id}
  """

  name = _messages.StringField(1, required=True)


class DataprocProjectsRegionsAutoscalingPoliciesGetIamPolicyRequest(_messages.Message):
  r"""A DataprocProjectsRegionsAutoscalingPoliciesGetIamPolicyRequest object.

  Fields:
    getIamPolicyRequest: A GetIamPolicyRequest resource to be passed as the
      request body.
    resource: REQUIRED: The resource for which the policy is being requested.
      See Resource names (https://cloud.google.com/apis/design/resource_names)
      for the appropriate value for this field.
  """

  getIamPolicyRequest = _messages.MessageField('GetIamPolicyRequest', 1)
  resource = _messages.StringField(2, required=True)


class DataprocProjectsRegionsAutoscalingPoliciesGetRequest(_messages.Message):
  r"""A DataprocProjectsRegionsAutoscalingPoliciesGetRequest object.

  Fields:
    name: Required. The "resource name" of the autoscaling policy, as
      described in https://cloud.google.com/apis/design/resource_names. For
      projects.regions.autoscalingPolicies.get, the resource name of the
      policy has the following format:
      projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}
      For projects.locations.autoscalingPolicies.get, the resource name of the
      policy has the following format: projects/{project_id}/locations/{locati
      on}/autoscalingPolicies/{policy_id}
  """

  name = _messages.StringField(1, required=True)


class DataprocProjectsRegionsAutoscalingPoliciesListRequest(_messages.Message):
  r"""A DataprocProjectsRegionsAutoscalingPoliciesListRequest object.

  Fields:
    pageSize: Optional. The maximum number of results to return in each
      response. Must be less than or equal to 1000. Defaults to 100.
    pageToken: Optional. The page token, returned by a previous call, to
      request the next page of results.
    parent: Required. The "resource name" of the region or location, as
      described in https://cloud.google.com/apis/design/resource_names. For
      projects.regions.autoscalingPolicies.list, the resource name of the
      region has the following format: projects/{project_id}/regions/{region}
      For projects.locations.autoscalingPolicies.list, the resource name of
      the location has the following format:
      projects/{project_id}/locations/{location}
  """

  pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(2)
  parent = _messages.StringField(3, required=True)


class DataprocProjectsRegionsAutoscalingPoliciesSetIamPolicyRequest(_messages.Message):
  r"""A DataprocProjectsRegionsAutoscalingPoliciesSetIamPolicyRequest object.

  Fields:
    resource: REQUIRED: The resource for which the policy is being specified.
      See Resource names (https://cloud.google.com/apis/design/resource_names)
      for the appropriate value for this field.
    setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
      request body.
  """

  resource = _messages.StringField(1, required=True)
  setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)


class DataprocProjectsRegionsAutoscalingPoliciesTestIamPermissionsRequest(_messages.Message):
  r"""A DataprocProjectsRegionsAutoscalingPoliciesTestIamPermissionsRequest
  object.

  Fields:
    resource: REQUIRED: The resource for which the policy detail is being
      requested. See Resource names
      (https://cloud.google.com/apis/design/resource_names) for the
      appropriate value for this field.
    testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
      passed as the request body.
  """

  resource = _messages.StringField(1, required=True)
  testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)


class DataprocProjectsRegionsClustersCreateRequest(_messages.Message):
  r"""A DataprocProjectsRegionsClustersCreateRequest object.

  Enums:
    ActionOnFailedPrimaryWorkersValueValuesEnum: Optional. Failure action when
      primary worker creation fails.

  Fields:
    actionOnFailedPrimaryWorkers: Optional. Failure action when primary worker
      creation fails.
    cluster: A Cluster resource to be passed as the request body.
    projectId: Required. The ID of the Google Cloud Platform project that the
      cluster belongs to.
    region: Required. The Dataproc region in which to handle the request.
    requestId: Optional. A unique ID used to identify the request. If the
      server receives two CreateClusterRequest (https://cloud.google.com/datap
      roc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1
      .CreateClusterRequest)s with the same id, then the second request will
      be ignored and the first google.longrunning.Operation created and stored
      in the backend is returned.It is recommended to always set this value to
      a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The
      ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
      and hyphens (-). The maximum length is 40 characters.
  """

  class ActionOnFailedPrimaryWorkersValueValuesEnum(_messages.Enum):
    r"""Optional. Failure action when primary worker creation fails.

    Values:
      FAILURE_ACTION_UNSPECIFIED: When FailureAction is unspecified, failure
        action defaults to NO_ACTION.
      NO_ACTION: Take no action on failure to create a cluster resource.
        NO_ACTION is the default.
      DELETE: Delete the failed cluster resource.
    """
    FAILURE_ACTION_UNSPECIFIED = 0
    NO_ACTION = 1
    DELETE = 2

  actionOnFailedPrimaryWorkers = _messages.EnumField('ActionOnFailedPrimaryWorkersValueValuesEnum', 1)
  cluster = _messages.MessageField('Cluster', 2)
  projectId = _messages.StringField(3, required=True)
  region = _messages.StringField(4, required=True)
  requestId = _messages.StringField(5)


class DataprocProjectsRegionsClustersDeleteRequest(_messages.Message):
  r"""A DataprocProjectsRegionsClustersDeleteRequest object.

  Fields:
    clusterName: Required. The cluster name.
    clusterUuid: Optional. Specifying the cluster_uuid means the RPC should
      fail (with error NOT_FOUND) if cluster with specified UUID does not
      exist.
    gracefulTerminationTimeout: Optional. The graceful termination timeout for
      the deletion of the cluster. Indicate the time the request will wait to
      complete the running jobs on the cluster before its forceful deletion.
      Default value is 0 indicating that the user has not enabled the graceful
      termination. Value can be between 60 second and 6 Hours, in case the
      graceful termination is enabled. (There is no separate flag to check the
      enabling or disabling of graceful termination, it can be checked by the
      values in the field).
    projectId: Required. The ID of the Google Cloud Platform project that the
      cluster belongs to.
    region: Required. The Dataproc region in which to handle the request.
    requestId: Optional. A unique ID used to identify the request. If the
      server receives two DeleteClusterRequest (https://cloud.google.com/datap
      roc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1
      .DeleteClusterRequest)s with the same id, then the second request will
      be ignored and the first google.longrunning.Operation created and stored
      in the backend is returned.It is recommended to always set this value to
      a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The
      ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
      and hyphens (-). The maximum length is 40 characters.
  """

  clusterName = _messages.StringField(1, required=True)
  clusterUuid = _messages.StringField(2)
  gracefulTerminationTimeout = _messages.StringField(3)
  projectId = _messages.StringField(4, required=True)
  region = _messages.StringField(5, required=True)
  requestId = _messages.StringField(6)


class DataprocProjectsRegionsClustersDiagnoseRequest(_messages.Message):
  r"""A DataprocProjectsRegionsClustersDiagnoseRequest object.

  Fields:
    clusterName: Required. The cluster name.
    diagnoseClusterRequest: A DiagnoseClusterRequest resource to be passed as
      the request body.
    projectId: Required. The ID of the Google Cloud Platform project that the
      cluster belongs to.
    region: Required. The Dataproc region in which to handle the request.
  """

  clusterName = _messages.StringField(1, required=True)
  diagnoseClusterRequest = _messages.MessageField('DiagnoseClusterRequest', 2)
  projectId = _messages.StringField(3, required=True)
  region = _messages.StringField(4, required=True)


class DataprocProjectsRegionsClustersGetClusterAsTemplateRequest(_messages.Message):
  r"""A DataprocProjectsRegionsClustersGetClusterAsTemplateRequest object.

  Fields:
    clusterName: Required. The cluster name.
    projectId: Required. The ID of the Google Cloud Platform project that the
      cluster belongs to.
    region: Required. The Dataproc region in which to handle the request.
  """

  clusterName = _messages.StringField(1, required=True)
  projectId = _messages.StringField(2, required=True)
  region = _messages.StringField(3, required=True)


class DataprocProjectsRegionsClustersGetIamPolicyRequest(_messages.Message):
  r"""A DataprocProjectsRegionsClustersGetIamPolicyRequest object.

  Fields:
    getIamPolicyRequest: A GetIamPolicyRequest resource to be passed as the
      request body.
    resource: REQUIRED: The resource for which the policy is being requested.
      See Resource names (https://cloud.google.com/apis/design/resource_names)
      for the appropriate value for this field.
  """

  getIamPolicyRequest = _messages.MessageField('GetIamPolicyRequest', 1)
  resource = _messages.StringField(2, required=True)


class DataprocProjectsRegionsClustersGetRequest(_messages.Message):
  r"""A DataprocProjectsRegionsClustersGetRequest object.

  Fields:
    clusterName: Required. The cluster name.
    projectId: Required. The ID of the Google Cloud Platform project that the
      cluster belongs to.
    region: Required. The Dataproc region in which to handle the request.
  """

  clusterName = _messages.StringField(1, required=True)
  projectId = _messages.StringField(2, required=True)
  region = _messages.StringField(3, required=True)


class DataprocProjectsRegionsClustersInjectCredentialsRequest(_messages.Message):
  r"""A DataprocProjectsRegionsClustersInjectCredentialsRequest object.

  Fields:
    cluster: Required. The cluster, in the form clusters/.
    injectCredentialsRequest: A InjectCredentialsRequest resource to be passed
      as the request body.
    project: Required. The ID of the Google Cloud Platform project the cluster
      belongs to, of the form projects/.
    region: Required. The region containing the cluster, of the form regions/.
  """

  cluster = _messages.StringField(1, required=True)
  injectCredentialsRequest = _messages.MessageField('InjectCredentialsRequest', 2)
  project = _messages.StringField(3, required=True)
  region = _messages.StringField(4, required=True)


class DataprocProjectsRegionsClustersListRequest(_messages.Message):
  r"""A DataprocProjectsRegionsClustersListRequest object.

  Fields:
    filter: Optional. A filter constraining the clusters to list. Filters are
      case-sensitive and have the following syntax:field = value AND field =
      value ...where field is one of status.state, clusterName, or
      labels.[KEY], and [KEY] is a label key. value can be * to match all
      values. status.state can be one of the following: ACTIVE, INACTIVE,
      CREATING, RUNNING, ERROR, DELETING, UPDATING, STOPPING, or STOPPED.
      ACTIVE contains the CREATING, UPDATING, and RUNNING states. INACTIVE
      contains the DELETING, ERROR, STOPPING, and STOPPED states. clusterName
      is the name of the cluster provided at creation time. Only the logical
      AND operator is supported; space-separated items are treated as having
      an implicit AND operator.Example filter:status.state = ACTIVE AND
      clusterName = mycluster AND labels.env = staging AND labels.starred = *
    pageSize: Optional. The standard List page size.
    pageToken: Optional. The standard List page token.
    projectId: Required. The ID of the Google Cloud Platform project that the
      cluster belongs to.
    region: Required. The Dataproc region in which to handle the request.
  """

  filter = _messages.StringField(1)
  pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(3)
  projectId = _messages.StringField(4, required=True)
  region = _messages.StringField(5, required=True)


class DataprocProjectsRegionsClustersNodeGroupsCreateRequest(_messages.Message):
  r"""A DataprocProjectsRegionsClustersNodeGroupsCreateRequest object.

  Fields:
    nodeGroup: A NodeGroup resource to be passed as the request body.
    nodeGroupId: Optional. An optional node group ID. Generated if not
      specified.The ID must contain only letters (a-z, A-Z), numbers (0-9),
      underscores (_), and hyphens (-). Cannot begin or end with underscore or
      hyphen. Must consist of from 3 to 33 characters.
    parent: Required. The parent resource where this node group will be
      created. Format: projects/{project}/regions/{region}/clusters/{cluster}
    parentOperationId: Optional. operation id of the parent operation sending
      the create request
    requestId: Optional. A unique ID used to identify the request. If the
      server receives two CreateNodeGroupRequest (https://cloud.google.com/dat
      aproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.
      v1.CreateNodeGroupRequest) with the same ID, the second request is
      ignored and the first google.longrunning.Operation created and stored in
      the backend is returned.Recommendation: Set this value to a UUID
      (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID
      must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
      and hyphens (-). The maximum length is 40 characters.
  """

  nodeGroup = _messages.MessageField('NodeGroup', 1)
  nodeGroupId = _messages.StringField(2)
  parent = _messages.StringField(3, required=True)
  parentOperationId = _messages.StringField(4)
  requestId = _messages.StringField(5)


class DataprocProjectsRegionsClustersNodeGroupsDeleteRequest(_messages.Message):
  r"""A DataprocProjectsRegionsClustersNodeGroupsDeleteRequest object.

  Fields:
    name: Required. The name of the node group to delete. Format: projects/{pr
      oject}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}
    requestId: Optional. A unique ID used to identify the request. If the
      server receives two DeleteNodeGroupRequest (https://cloud.google.com/dat
      aproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.
      v1.DeleteNodeGroupRequests) with the same ID, the second request is
      ignored and the first google.longrunning.Operation created and stored in
      the backend is returned.Recommendation: Set this value to a UUID
      (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID
      must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
      and hyphens (-). The maximum length is 40 characters.
  """

  name = _messages.StringField(1, required=True)
  requestId = _messages.StringField(2)


class DataprocProjectsRegionsClustersNodeGroupsGetRequest(_messages.Message):
  r"""A DataprocProjectsRegionsClustersNodeGroupsGetRequest object.

  Fields:
    name: Required. The name of the node group to retrieve. Format: projects/{
      project}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}
  """

  name = _messages.StringField(1, required=True)


class DataprocProjectsRegionsClustersNodeGroupsListRequest(_messages.Message):
  r"""A DataprocProjectsRegionsClustersNodeGroupsListRequest object.

  Fields:
    pageSize: The maximum number of node groups to return. The service may
      return fewer than this value. If unspecified, at most 50 node groups are
      returned. The maximum value is 1000. Values greater than 1000 are forced
      to 1000.
    pageToken: A page token, received from a previous ListNodeGroups call.
      Provide this token to retrieve the subsequent page.When paginating, the
      other parameters provided to ListNodeGroups must match the call that
      provided the page token.
    parent: Required. The parent, which owns the collection of node groups.
      Format: projects/{project}/regions/{region}/clusters/{cluster}
  """

  pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(2)
  parent = _messages.StringField(3, required=True)


class DataprocProjectsRegionsClustersNodeGroupsRepairRequest(_messages.Message):
  r"""A DataprocProjectsRegionsClustersNodeGroupsRepairRequest object.

  Fields:
    name: Required. The name of the node group to resize. Format: projects/{pr
      oject}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}
    repairNodeGroupRequest: A RepairNodeGroupRequest resource to be passed as
      the request body.
  """

  name = _messages.StringField(1, required=True)
  repairNodeGroupRequest = _messages.MessageField('RepairNodeGroupRequest', 2)


class DataprocProjectsRegionsClustersNodeGroupsResizeRequest(_messages.Message):
  r"""A DataprocProjectsRegionsClustersNodeGroupsResizeRequest object.

  Fields:
    name: Required. The name of the node group to resize. Format: projects/{pr
      oject}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}
    resizeNodeGroupRequest: A ResizeNodeGroupRequest resource to be passed as
      the request body.
  """

  name = _messages.StringField(1, required=True)
  resizeNodeGroupRequest = _messages.MessageField('ResizeNodeGroupRequest', 2)


class DataprocProjectsRegionsClustersNodeGroupsStartRequest(_messages.Message):
  r"""A DataprocProjectsRegionsClustersNodeGroupsStartRequest object.

  Fields:
    name: Required. The name of the node group to start. Format: projects/{pro
      ject}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}
    startNodeGroupRequest: A StartNodeGroupRequest resource to be passed as
      the request body.
  """

  name = _messages.StringField(1, required=True)
  startNodeGroupRequest = _messages.MessageField('StartNodeGroupRequest', 2)


class DataprocProjectsRegionsClustersNodeGroupsStopRequest(_messages.Message):
  r"""A DataprocProjectsRegionsClustersNodeGroupsStopRequest object.

  Fields:
    name: Required. The name of the node group to stop. Format: projects/{proj
      ect}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}
    stopNodeGroupRequest: A StopNodeGroupRequest resource to be passed as the
      request body.
  """

  name = _messages.StringField(1, required=True)
  stopNodeGroupRequest = _messages.MessageField('StopNodeGroupRequest', 2)


class DataprocProjectsRegionsClustersNodeGroupsUpdateLabelsRequest(_messages.Message):
  r"""A DataprocProjectsRegionsClustersNodeGroupsUpdateLabelsRequest object.

  Fields:
    name: Required. The name of the node group for updating the labels.
      Format: projects/{project}/regions/{region}/clusters/{cluster}/nodeGroup
      s/{nodeGroup}
    updateLabelsNodeGroupRequest: A UpdateLabelsNodeGroupRequest resource to
      be passed as the request body.
  """

  name = _messages.StringField(1, required=True)
  updateLabelsNodeGroupRequest = _messages.MessageField('UpdateLabelsNodeGroupRequest', 2)


class DataprocProjectsRegionsClustersNodeGroupsUpdateMetadataConfigRequest(_messages.Message):
  r"""A DataprocProjectsRegionsClustersNodeGroupsUpdateMetadataConfigRequest
  object.

  Fields:
    name: Required. The name of the node group for updating the config.
      Format: projects/{project}/regions/{region}/clusters/{cluster}/nodeGroup
      s/{nodeGroup}
    updateMetadataConfigNodeGroupRequest: A
      UpdateMetadataConfigNodeGroupRequest resource to be passed as the
      request body.
  """

  name = _messages.StringField(1, required=True)
  updateMetadataConfigNodeGroupRequest = _messages.MessageField('UpdateMetadataConfigNodeGroupRequest', 2)


class DataprocProjectsRegionsClustersPatchRequest(_messages.Message):
  r"""A DataprocProjectsRegionsClustersPatchRequest object.

  Fields:
    cluster: A Cluster resource to be passed as the request body.
    clusterName: Required. The cluster name.
    gracefulDecommissionTimeout: Optional. Timeout for graceful YARN
      decommissioning. Graceful decommissioning allows removing nodes from the
      cluster without interrupting jobs in progress. Timeout specifies how
      long to wait for jobs in progress to finish before forcefully removing
      nodes (and potentially interrupting jobs). Default timeout is 0 (for
      forceful decommission), and the maximum allowed timeout is 1 day. (see
      JSON representation of Duration (https://developers.google.com/protocol-
      buffers/docs/proto3#json)).Only supported on Dataproc image versions 1.2
      and higher.
    projectId: Required. The ID of the Google Cloud Platform project the
      cluster belongs to.
    region: Required. The Dataproc region in which to handle the request.
    requestId: Optional. A unique ID used to identify the request. If the
      server receives two UpdateClusterRequest (https://cloud.google.com/datap
      roc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1
      .UpdateClusterRequest)s with the same id, then the second request will
      be ignored and the first google.longrunning.Operation created and stored
      in the backend is returned.It is recommended to always set this value to
      a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The
      ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
      and hyphens (-). The maximum length is 40 characters.
    updateMask: Required. Specifies the path, relative to Cluster, of the
      field to update. For example, to change the number of workers in a
      cluster to 5, the update_mask parameter would be specified as
      config.worker_config.num_instances, and the PATCH request body would
      specify the new value, as follows: { "config":{ "workerConfig":{
      "numInstances":"5" } } } Similarly, to change the number of preemptible
      workers in a cluster to 5, the update_mask parameter would be
      config.secondary_worker_config.num_instances, and the PATCH request body
      would be set as follows: { "config":{ "secondaryWorkerConfig":{
      "numInstances":"5" } } } *Note:* Currently, only the following fields
      can be updated: *Mask* *Purpose* *labels* Update labels
      *config.worker_config.num_instances* Resize primary worker group
      *config.secondary_worker_config.num_instances* Resize secondary worker
      group config.autoscaling_config.policy_uri Use, stop using, or change
      autoscaling policies
  """

  cluster = _messages.MessageField('Cluster', 1)
  clusterName = _messages.StringField(2, required=True)
  gracefulDecommissionTimeout = _messages.StringField(3)
  projectId = _messages.StringField(4, required=True)
  region = _messages.StringField(5, required=True)
  requestId = _messages.StringField(6)
  updateMask = _messages.StringField(7)


class DataprocProjectsRegionsClustersRepairRequest(_messages.Message):
  r"""A DataprocProjectsRegionsClustersRepairRequest object.

  Fields:
    clusterName: Required. The cluster name.
    projectId: Required. The ID of the Google Cloud Platform project the
      cluster belongs to.
    region: Required. The Dataproc region in which to handle the request.
    repairClusterRequest: A RepairClusterRequest resource to be passed as the
      request body.
  """

  clusterName = _messages.StringField(1, required=True)
  projectId = _messages.StringField(2, required=True)
  region = _messages.StringField(3, required=True)
  repairClusterRequest = _messages.MessageField('RepairClusterRequest', 4)


class DataprocProjectsRegionsClustersSetIamPolicyRequest(_messages.Message):
  r"""A DataprocProjectsRegionsClustersSetIamPolicyRequest object.

  Fields:
    resource: REQUIRED: The resource for which the policy is being specified.
      See Resource names (https://cloud.google.com/apis/design/resource_names)
      for the appropriate value for this field.
    setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
      request body.
  """

  resource = _messages.StringField(1, required=True)
  setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)


class DataprocProjectsRegionsClustersStartRequest(_messages.Message):
  r"""A DataprocProjectsRegionsClustersStartRequest object.

  Fields:
    clusterName: Required. The cluster name.
    projectId: Required. The ID of the Google Cloud Platform project the
      cluster belongs to.
    region: Required. The Dataproc region in which to handle the request.
    startClusterRequest: A StartClusterRequest resource to be passed as the
      request body.
  """

  clusterName = _messages.StringField(1, required=True)
  projectId = _messages.StringField(2, required=True)
  region = _messages.StringField(3, required=True)
  startClusterRequest = _messages.MessageField('StartClusterRequest', 4)


class DataprocProjectsRegionsClustersStopRequest(_messages.Message):
  r"""A DataprocProjectsRegionsClustersStopRequest object.

  Fields:
    clusterName: Required. The cluster name.
    projectId: Required. The ID of the Google Cloud Platform project the
      cluster belongs to.
    region: Required. The Dataproc region in which to handle the request.
    stopClusterRequest: A StopClusterRequest resource to be passed as the
      request body.
  """

  clusterName = _messages.StringField(1, required=True)
  projectId = _messages.StringField(2, required=True)
  region = _messages.StringField(3, required=True)
  stopClusterRequest = _messages.MessageField('StopClusterRequest', 4)


class DataprocProjectsRegionsClustersTestIamPermissionsRequest(_messages.Message):
  r"""A DataprocProjectsRegionsClustersTestIamPermissionsRequest object.

  Fields:
    resource: REQUIRED: The resource for which the policy detail is being
      requested. See Resource names
      (https://cloud.google.com/apis/design/resource_names) for the
      appropriate value for this field.
    testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
      passed as the request body.
  """

  resource = _messages.StringField(1, required=True)
  testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)


class DataprocProjectsRegionsJobsCancelRequest(_messages.Message):
  r"""A DataprocProjectsRegionsJobsCancelRequest object.

  Fields:
    cancelJobRequest: A CancelJobRequest resource to be passed as the request
      body.
    jobId: Required. The job ID.
    projectId: Required. The ID of the Google Cloud Platform project that the
      job belongs to.
    region: Required. The Dataproc region in which to handle the request.
  """

  cancelJobRequest = _messages.MessageField('CancelJobRequest', 1)
  jobId = _messages.StringField(2, required=True)
  projectId = _messages.StringField(3, required=True)
  region = _messages.StringField(4, required=True)


class DataprocProjectsRegionsJobsDeleteRequest(_messages.Message):
  r"""A DataprocProjectsRegionsJobsDeleteRequest object.

  Fields:
    jobId: Required. The job ID.
    projectId: Required. The ID of the Google Cloud Platform project that the
      job belongs to.
    region: Required. The Dataproc region in which to handle the request.
  """

  jobId = _messages.StringField(1, required=True)
  projectId = _messages.StringField(2, required=True)
  region = _messages.StringField(3, required=True)


class DataprocProjectsRegionsJobsGetIamPolicyRequest(_messages.Message):
  r"""A DataprocProjectsRegionsJobsGetIamPolicyRequest object.

  Fields:
    getIamPolicyRequest: A GetIamPolicyRequest resource to be passed as the
      request body.
    resource: REQUIRED: The resource for which the policy is being requested.
      See Resource names (https://cloud.google.com/apis/design/resource_names)
      for the appropriate value for this field.
  """

  getIamPolicyRequest = _messages.MessageField('GetIamPolicyRequest', 1)
  resource = _messages.StringField(2, required=True)


class DataprocProjectsRegionsJobsGetJobAsTemplateRequest(_messages.Message):
  r"""A DataprocProjectsRegionsJobsGetJobAsTemplateRequest object.

  Fields:
    jobId: Required. The job ID.
    projectId: Required. The ID of the Google Cloud Platform project that the
      job belongs to.
    region: Required. The Dataproc region in which to handle the request.
  """

  jobId = _messages.StringField(1, required=True)
  projectId = _messages.StringField(2, required=True)
  region = _messages.StringField(3, required=True)


class DataprocProjectsRegionsJobsGetRequest(_messages.Message):
  r"""A DataprocProjectsRegionsJobsGetRequest object.

  Fields:
    jobId: Required. The job ID.
    projectId: Required. The ID of the Google Cloud Platform project that the
      job belongs to.
    region: Required. The Dataproc region in which to handle the request.
  """

  jobId = _messages.StringField(1, required=True)
  projectId = _messages.StringField(2, required=True)
  region = _messages.StringField(3, required=True)


class DataprocProjectsRegionsJobsListRequest(_messages.Message):
  r"""A DataprocProjectsRegionsJobsListRequest object.

  Enums:
    JobStateMatcherValueValuesEnum: Optional. Specifies enumerated categories
      of jobs to list. (default = match ALL jobs).If filter is provided,
      jobStateMatcher will be ignored.

  Fields:
    clusterName: Optional. If set, the returned jobs list includes only jobs
      that were submitted to the named cluster.
    filter: Optional. A filter constraining the jobs to list. Filters are
      case-sensitive and have the following syntax:field = value AND field =
      value ...where field is status.state or labels.[KEY], and [KEY] is a
      label key. value can be * to match all values. status.state can be
      either ACTIVE or NON_ACTIVE. Only the logical AND operator is supported;
      space-separated items are treated as having an implicit AND
      operator.Example filter:status.state = ACTIVE AND labels.env = staging
      AND labels.starred = *
    jobStateMatcher: Optional. Specifies enumerated categories of jobs to
      list. (default = match ALL jobs).If filter is provided, jobStateMatcher
      will be ignored.
    pageSize: Optional. The number of results to return in each response.
    pageToken: Optional. The page token, returned by a previous call, to
      request the next page of results.
    projectId: Required. The ID of the Google Cloud Platform project that the
      job belongs to.
    region: Required. The Dataproc region in which to handle the request.
  """

  class JobStateMatcherValueValuesEnum(_messages.Enum):
    r"""Optional. Specifies enumerated categories of jobs to list. (default =
    match ALL jobs).If filter is provided, jobStateMatcher will be ignored.

    Values:
      ALL: Match all jobs, regardless of state.
      ACTIVE: Only match jobs in non-terminal states: PENDING, RUNNING, or
        CANCEL_PENDING.
      NON_ACTIVE: Only match jobs in terminal states: CANCELLED, DONE, or
        ERROR.
    """
    ALL = 0
    ACTIVE = 1
    NON_ACTIVE = 2

  clusterName = _messages.StringField(1)
  filter = _messages.StringField(2)
  jobStateMatcher = _messages.EnumField('JobStateMatcherValueValuesEnum', 3)
  pageSize = _messages.IntegerField(4, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(5)
  projectId = _messages.StringField(6, required=True)
  region = _messages.StringField(7, required=True)


class DataprocProjectsRegionsJobsPatchRequest(_messages.Message):
  r"""A DataprocProjectsRegionsJobsPatchRequest object.

  Fields:
    job: A Job resource to be passed as the request body.
    jobId: Required. The job ID.
    projectId: Required. The ID of the Google Cloud Platform project that the
      job belongs to.
    region: Required. The Dataproc region in which to handle the request.
    updateMask: Required. Specifies the path, relative to Job, of the field to
      update. For example, to update the labels of a Job the update_mask
      parameter would be specified as labels, and the PATCH request body would
      specify the new value. *Note:* Currently, labels is the only field that
      can be updated.
  """

  job = _messages.MessageField('Job', 1)
  jobId = _messages.StringField(2, required=True)
  projectId = _messages.StringField(3, required=True)
  region = _messages.StringField(4, required=True)
  updateMask = _messages.StringField(5)


class DataprocProjectsRegionsJobsSetIamPolicyRequest(_messages.Message):
  r"""A DataprocProjectsRegionsJobsSetIamPolicyRequest object.

  Fields:
    resource: REQUIRED: The resource for which the policy is being specified.
      See Resource names (https://cloud.google.com/apis/design/resource_names)
      for the appropriate value for this field.
    setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
      request body.
  """

  resource = _messages.StringField(1, required=True)
  setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)


class DataprocProjectsRegionsJobsSubmitAsOperationRequest(_messages.Message):
  r"""A DataprocProjectsRegionsJobsSubmitAsOperationRequest object.

  Fields:
    projectId: Required. The ID of the Google Cloud Platform project that the
      job belongs to.
    region: Required. The Dataproc region in which to handle the request.
    submitJobRequest: A SubmitJobRequest resource to be passed as the request
      body.
  """

  projectId = _messages.StringField(1, required=True)
  region = _messages.StringField(2, required=True)
  submitJobRequest = _messages.MessageField('SubmitJobRequest', 3)


class DataprocProjectsRegionsJobsSubmitRequest(_messages.Message):
  r"""A DataprocProjectsRegionsJobsSubmitRequest object.

  Fields:
    projectId: Required. The ID of the Google Cloud Platform project that the
      job belongs to.
    region: Required. The Dataproc region in which to handle the request.
    submitJobRequest: A SubmitJobRequest resource to be passed as the request
      body.
  """

  projectId = _messages.StringField(1, required=True)
  region = _messages.StringField(2, required=True)
  submitJobRequest = _messages.MessageField('SubmitJobRequest', 3)


class DataprocProjectsRegionsJobsTestIamPermissionsRequest(_messages.Message):
  r"""A DataprocProjectsRegionsJobsTestIamPermissionsRequest object.

  Fields:
    resource: REQUIRED: The resource for which the policy detail is being
      requested. See Resource names
      (https://cloud.google.com/apis/design/resource_names) for the
      appropriate value for this field.
    testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
      passed as the request body.
  """

  resource = _messages.StringField(1, required=True)
  testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)


class DataprocProjectsRegionsOperationsCancelRequest(_messages.Message):
  r"""A DataprocProjectsRegionsOperationsCancelRequest object.

  Fields:
    name: The name of the operation resource to be cancelled.
  """

  name = _messages.StringField(1, required=True)


class DataprocProjectsRegionsOperationsDeleteRequest(_messages.Message):
  r"""A DataprocProjectsRegionsOperationsDeleteRequest object.

  Fields:
    name: The name of the operation resource to be deleted.
  """

  name = _messages.StringField(1, required=True)


class DataprocProjectsRegionsOperationsGetIamPolicyRequest(_messages.Message):
  r"""A DataprocProjectsRegionsOperationsGetIamPolicyRequest object.

  Fields:
    getIamPolicyRequest: A GetIamPolicyRequest resource to be passed as the
      request body.
    resource: REQUIRED: The resource for which the policy is being requested.
      See Resource names (https://cloud.google.com/apis/design/resource_names)
      for the appropriate value for this field.
  """

  getIamPolicyRequest = _messages.MessageField('GetIamPolicyRequest', 1)
  resource = _messages.StringField(2, required=True)


class DataprocProjectsRegionsOperationsGetRequest(_messages.Message):
  r"""A DataprocProjectsRegionsOperationsGetRequest object.

  Fields:
    name: The name of the operation resource.
  """

  name = _messages.StringField(1, required=True)


class DataprocProjectsRegionsOperationsListRequest(_messages.Message):
  r"""A DataprocProjectsRegionsOperationsListRequest object.

  Fields:
    filter: The standard list filter.
    name: The name of the operation's parent resource.
    pageSize: The standard list page size.
    pageToken: The standard list page token.
    returnPartialSuccess: When set to true, operations that are reachable are
      returned as normal, and those that are unreachable are returned in the
      ListOperationsResponse.unreachable field.This can only be true when
      reading across collections e.g. when parent is set to
      "projects/example/locations/-".This field is not by default supported
      and will result in an UNIMPLEMENTED error if set unless explicitly
      documented otherwise in service or product specific documentation.
  """

  filter = _messages.StringField(1)
  name = _messages.StringField(2, required=True)
  pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(4)
  returnPartialSuccess = _messages.BooleanField(5)


class DataprocProjectsRegionsOperationsSetIamPolicyRequest(_messages.Message):
  r"""A DataprocProjectsRegionsOperationsSetIamPolicyRequest object.

  Fields:
    resource: REQUIRED: The resource for which the policy is being specified.
      See Resource names (https://cloud.google.com/apis/design/resource_names)
      for the appropriate value for this field.
    setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
      request body.
  """

  resource = _messages.StringField(1, required=True)
  setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)


class DataprocProjectsRegionsOperationsTestIamPermissionsRequest(_messages.Message):
  r"""A DataprocProjectsRegionsOperationsTestIamPermissionsRequest object.

  Fields:
    resource: REQUIRED: The resource for which the policy detail is being
      requested. See Resource names
      (https://cloud.google.com/apis/design/resource_names) for the
      appropriate value for this field.
    testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
      passed as the request body.
  """

  resource = _messages.StringField(1, required=True)
  testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)


class DataprocProjectsRegionsWorkflowTemplatesCreateRequest(_messages.Message):
  r"""A DataprocProjectsRegionsWorkflowTemplatesCreateRequest object.

  Fields:
    parent: Required. The resource name of the region or location, as
      described in https://cloud.google.com/apis/design/resource_names. For
      projects.regions.workflowTemplates.create, the resource name of the
      region has the following format: projects/{project_id}/regions/{region}
      For projects.locations.workflowTemplates.create, the resource name of
      the location has the following format:
      projects/{project_id}/locations/{location}
    workflowTemplate: A WorkflowTemplate resource to be passed as the request
      body.
  """

  parent = _messages.StringField(1, required=True)
  workflowTemplate = _messages.MessageField('WorkflowTemplate', 2)


class DataprocProjectsRegionsWorkflowTemplatesDeleteRequest(_messages.Message):
  r"""A DataprocProjectsRegionsWorkflowTemplatesDeleteRequest object.

  Fields:
    name: Required. The resource name of the workflow template, as described
      in https://cloud.google.com/apis/design/resource_names. For
      projects.regions.workflowTemplates.delete, the resource name of the
      template has the following format:
      projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
      For projects.locations.workflowTemplates.instantiate, the resource name
      of the template has the following format: projects/{project_id}/location
      s/{location}/workflowTemplates/{template_id}
    version: Optional. The version of workflow template to delete. If
      specified, will only delete the template if the current server version
      matches specified version.
  """

  name = _messages.StringField(1, required=True)
  version = _messages.IntegerField(2, variant=_messages.Variant.INT32)


class DataprocProjectsRegionsWorkflowTemplatesGetIamPolicyRequest(_messages.Message):
  r"""A DataprocProjectsRegionsWorkflowTemplatesGetIamPolicyRequest object.

  Fields:
    getIamPolicyRequest: A GetIamPolicyRequest resource to be passed as the
      request body.
    resource: REQUIRED: The resource for which the policy is being requested.
      See Resource names (https://cloud.google.com/apis/design/resource_names)
      for the appropriate value for this field.
  """

  getIamPolicyRequest = _messages.MessageField('GetIamPolicyRequest', 1)
  resource = _messages.StringField(2, required=True)


class DataprocProjectsRegionsWorkflowTemplatesGetRequest(_messages.Message):
  r"""A DataprocProjectsRegionsWorkflowTemplatesGetRequest object.

  Fields:
    name: Required. The resource name of the workflow template, as described
      in https://cloud.google.com/apis/design/resource_names. For
      projects.regions.workflowTemplates.get, the resource name of the
      template has the following format:
      projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
      For projects.locations.workflowTemplates.get, the resource name of the
      template has the following format: projects/{project_id}/locations/{loca
      tion}/workflowTemplates/{template_id}
    version: Optional. The version of workflow template to retrieve. Only
      previously instantiated versions can be retrieved.If unspecified,
      retrieves the current version.
  """

  name = _messages.StringField(1, required=True)
  version = _messages.IntegerField(2, variant=_messages.Variant.INT32)


class DataprocProjectsRegionsWorkflowTemplatesInstantiateInlineRequest(_messages.Message):
  r"""A DataprocProjectsRegionsWorkflowTemplatesInstantiateInlineRequest
  object.

  Fields:
    parent: Required. The resource name of the region or location, as
      described in https://cloud.google.com/apis/design/resource_names. For
      projects.regions.workflowTemplates,instantiateinline, the resource name
      of the region has the following format:
      projects/{project_id}/regions/{region} For
      projects.locations.workflowTemplates.instantiateinline, the resource
      name of the location has the following format:
      projects/{project_id}/locations/{location}
    requestId: Optional. A tag that prevents multiple concurrent workflow
      instances with the same tag from running. This mitigates risk of
      concurrent instances started due to retries.It is recommended to always
      set this value to a UUID
      (https://en.wikipedia.org/wiki/Universally_unique_identifier).The tag
      must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
      and hyphens (-). The maximum length is 40 characters.
    workflowTemplate: A WorkflowTemplate resource to be passed as the request
      body.
  """

  parent = _messages.StringField(1, required=True)
  requestId = _messages.StringField(2)
  workflowTemplate = _messages.MessageField('WorkflowTemplate', 3)


class DataprocProjectsRegionsWorkflowTemplatesInstantiateRequest(_messages.Message):
  r"""A DataprocProjectsRegionsWorkflowTemplatesInstantiateRequest object.

  Fields:
    instantiateWorkflowTemplateRequest: A InstantiateWorkflowTemplateRequest
      resource to be passed as the request body.
    name: Required. The resource name of the workflow template, as described
      in https://cloud.google.com/apis/design/resource_names. For
      projects.regions.workflowTemplates.instantiate, the resource name of the
      template has the following format:
      projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
      For projects.locations.workflowTemplates.instantiate, the resource name
      of the template has the following format: projects/{project_id}/location
      s/{location}/workflowTemplates/{template_id}
  """

  instantiateWorkflowTemplateRequest = _messages.MessageField('InstantiateWorkflowTemplateRequest', 1)
  name = _messages.StringField(2, required=True)


class DataprocProjectsRegionsWorkflowTemplatesListRequest(_messages.Message):
  r"""A DataprocProjectsRegionsWorkflowTemplatesListRequest object.

  Fields:
    pageSize: Optional. The maximum number of results to return in each
      response.
    pageToken: Optional. The page token, returned by a previous call, to
      request the next page of results.
    parent: Required. The resource name of the region or location, as
      described in https://cloud.google.com/apis/design/resource_names. For
      projects.regions.workflowTemplates,list, the resource name of the region
      has the following format: projects/{project_id}/regions/{region} For
      projects.locations.workflowTemplates.list, the resource name of the
      location has the following format:
      projects/{project_id}/locations/{location}
  """

  pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(2)
  parent = _messages.StringField(3, required=True)


class DataprocProjectsRegionsWorkflowTemplatesSetIamPolicyRequest(_messages.Message):
  r"""A DataprocProjectsRegionsWorkflowTemplatesSetIamPolicyRequest object.

  Fields:
    resource: REQUIRED: The resource for which the policy is being specified.
      See Resource names (https://cloud.google.com/apis/design/resource_names)
      for the appropriate value for this field.
    setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
      request body.
  """

  resource = _messages.StringField(1, required=True)
  setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)


class DataprocProjectsRegionsWorkflowTemplatesTestIamPermissionsRequest(_messages.Message):
  r"""A DataprocProjectsRegionsWorkflowTemplatesTestIamPermissionsRequest
  object.

  Fields:
    resource: REQUIRED: The resource for which the policy detail is being
      requested. See Resource names
      (https://cloud.google.com/apis/design/resource_names) for the
      appropriate value for this field.
    testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
      passed as the request body.
  """

  resource = _messages.StringField(1, required=True)
  testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)


class DiagnoseClusterRequest(_messages.Message):
  r"""A request to collect cluster diagnostic information.

  Enums:
    TarballAccessValueValuesEnum: Optional. (Optional) The access type to the
      diagnostic tarball. If not specified, falls back to default access of
      the bucket

  Fields:
    diagnosisInterval: Optional. Time interval in which diagnosis should be
      carried out on the cluster.
    job: Optional. DEPRECATED Specifies the job on which diagnosis is to be
      performed. Format: projects/{project}/regions/{region}/jobs/{job}
    jobs: Optional. Specifies a list of jobs on which diagnosis is to be
      performed. Format: projects/{project}/regions/{region}/jobs/{job}
    tarballAccess: Optional. (Optional) The access type to the diagnostic
      tarball. If not specified, falls back to default access of the bucket
    tarballGcsDir: Optional. (Optional) The output Cloud Storage directory for
      the diagnostic tarball. If not specified, a task-specific directory in
      the cluster's staging bucket will be used.
    workers: Optional. A list of workers in the cluster to run the diagnostic
      script on.
    yarnApplicationId: Optional. DEPRECATED Specifies the yarn application on
      which diagnosis is to be performed.
    yarnApplicationIds: Optional. Specifies a list of yarn applications on
      which diagnosis is to be performed.
  """

  class TarballAccessValueValuesEnum(_messages.Enum):
    r"""Optional. (Optional) The access type to the diagnostic tarball. If not
    specified, falls back to default access of the bucket

    Values:
      TARBALL_ACCESS_UNSPECIFIED: Tarball Access unspecified. Falls back to
        default access of the bucket
      GOOGLE_CLOUD_SUPPORT: Google Cloud Support group has read access to the
        diagnostic tarball
      GOOGLE_DATAPROC_DIAGNOSE: Google Cloud Dataproc Diagnose service account
        has read access to the diagnostic tarball
    """
    TARBALL_ACCESS_UNSPECIFIED = 0
    GOOGLE_CLOUD_SUPPORT = 1
    GOOGLE_DATAPROC_DIAGNOSE = 2

  diagnosisInterval = _messages.MessageField('Interval', 1)
  job = _messages.StringField(2)
  jobs = _messages.StringField(3, repeated=True)
  tarballAccess = _messages.EnumField('TarballAccessValueValuesEnum', 4)
  tarballGcsDir = _messages.StringField(5)
  workers = _messages.StringField(6, repeated=True)
  yarnApplicationId = _messages.StringField(7)
  yarnApplicationIds = _messages.StringField(8, repeated=True)


class DiagnoseClusterResults(_messages.Message):
  r"""The location of diagnostic output.

  Fields:
    outputUri: Output only. The Cloud Storage URI of the diagnostic output.
      The output report is a plain text file with a summary of collected
      diagnostics.
  """

  outputUri = _messages.StringField(1)


class DiskConfig(_messages.Message):
  r"""Specifies the config of boot disk and attached disk options for a group
  of VM instances.

  Fields:
    attachedDiskConfigs: Optional. A list of attached disk configs for a group
      of VM instances.
    bootDiskProvisionedIops: Optional. Indicates how many IOPS to provision
      for the disk. This sets the number of I/O operations per second that the
      disk can handle. This field is supported only if boot_disk_type is
      hyperdisk-balanced.
    bootDiskProvisionedThroughput: Optional. Indicates how much throughput to
      provision for the disk. This sets the number of throughput mb per second
      that the disk can handle. Values must be greater than or equal to 1.
      This field is supported only if boot_disk_type is hyperdisk-balanced.
    bootDiskSizeGb: Optional. Size in GB of the boot disk (default is 500GB).
    bootDiskType: Optional. Type of the boot disk (default is "pd-standard").
      Valid values: "pd-balanced" (Persistent Disk Balanced Solid State
      Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard"
      (Persistent Disk Hard Disk Drive). See Disk types
      (https://cloud.google.com/compute/docs/disks#disk-types).
    localSsdInterface: Optional. Interface type of local SSDs (default is
      "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme"
      (Non-Volatile Memory Express). See local SSD performance
      (https://cloud.google.com/compute/docs/disks/local-ssd#performance).
    numLocalSsds: Optional. Number of attached SSDs, from 0 to 8 (default is
      0). If SSDs are not attached, the boot disk is used to store runtime
      logs and HDFS
      (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If
      one or more SSDs are attached, this runtime bulk data is spread across
      them, and the boot disk contains only basic config and installed
      binaries.Note: Local SSD options may vary by machine type and number of
      vCPUs selected.
  """

  attachedDiskConfigs = _messages.MessageField('AttachedDiskConfig', 1, repeated=True)
  bootDiskProvisionedIops = _messages.IntegerField(2)
  bootDiskProvisionedThroughput = _messages.IntegerField(3)
  bootDiskSizeGb = _messages.IntegerField(4, variant=_messages.Variant.INT32)
  bootDiskType = _messages.StringField(5)
  localSsdInterface = _messages.StringField(6)
  numLocalSsds = _messages.IntegerField(7, variant=_messages.Variant.INT32)


class DriverRunner(_messages.Message):
  r"""Driver runner configuration.

  Fields:
    masterDriverRunner: Optional. (default) Run the driver on the master node.
    yarnDriverRunner: Optional. Run the driver on worker nodes using YARN.
  """

  masterDriverRunner = _messages.MessageField('MasterDriverRunner', 1)
  yarnDriverRunner = _messages.MessageField('YarnDriverRunner', 2)


class DriverSchedulingConfig(_messages.Message):
  r"""Driver scheduling configuration.

  Fields:
    memoryMb: Required. The amount of memory in MB the driver is requesting.
    vcores: Required. The number of vCPUs the driver is requesting.
  """

  memoryMb = _messages.IntegerField(1, variant=_messages.Variant.INT32)
  vcores = _messages.IntegerField(2, variant=_messages.Variant.INT32)


class Empty(_messages.Message):
  r"""A generic empty message that you can re-use to avoid defining duplicated
  empty messages in your APIs. A typical example is to use it as the request
  or the response type of an API method. For instance: service Foo { rpc
  Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }
  """



class EncryptionConfig(_messages.Message):
  r"""Encryption settings for the cluster.

  Fields:
    gcePdKmsKeyName: Optional. The Cloud KMS key resource name to use for
      persistent disk encryption for all instances in the cluster. See Use
      CMEK with cluster data
      (https://cloud.google.com//dataproc/docs/concepts/configuring-
      clusters/customer-managed-encryption#use_cmek_with_cluster_data) for
      more information.
    kmsKey: Optional. The Cloud KMS key resource name to use for cluster
      persistent disk and job argument encryption. See Use CMEK with cluster
      data (https://cloud.google.com//dataproc/docs/concepts/configuring-
      clusters/customer-managed-encryption#use_cmek_with_cluster_data) for
      more information.When this key resource name is provided, the following
      job arguments of the following job types submitted to the cluster are
      encrypted using CMEK: FlinkJob args
      (https://cloud.google.com/dataproc/docs/reference/rest/v1/FlinkJob)
      HadoopJob args
      (https://cloud.google.com/dataproc/docs/reference/rest/v1/HadoopJob)
      SparkJob args
      (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob)
      SparkRJob args
      (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkRJob)
      PySparkJob args
      (https://cloud.google.com/dataproc/docs/reference/rest/v1/PySparkJob)
      SparkSqlJob
      (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkSqlJob)
      scriptVariables and queryList.queries HiveJob
      (https://cloud.google.com/dataproc/docs/reference/rest/v1/HiveJob)
      scriptVariables and queryList.queries PigJob
      (https://cloud.google.com/dataproc/docs/reference/rest/v1/PigJob)
      scriptVariables and queryList.queries PrestoJob
      (https://cloud.google.com/dataproc/docs/reference/rest/v1/PrestoJob)
      scriptVariables and queryList.queries
  """

  gcePdKmsKeyName = _messages.StringField(1)
  kmsKey = _messages.StringField(2)


class EndpointConfig(_messages.Message):
  r"""Endpoint config for this cluster

  Messages:
    HttpPortsValue: Output only. The map of port descriptions to URLs. Will
      only be populated if enable_http_port_access is true.

  Fields:
    enableHttpPortAccess: Optional. If true, enable http access to specific
      ports on the cluster from external sources. Defaults to false.
    httpPorts: Output only. The map of port descriptions to URLs. Will only be
      populated if enable_http_port_access is true.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class HttpPortsValue(_messages.Message):
    r"""Output only. The map of port descriptions to URLs. Will only be
    populated if enable_http_port_access is true.

    Messages:
      AdditionalProperty: An additional property for a HttpPortsValue object.

    Fields:
      additionalProperties: Additional properties of type HttpPortsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a HttpPortsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  enableHttpPortAccess = _messages.BooleanField(1)
  httpPorts = _messages.MessageField('HttpPortsValue', 2)


class EnvironmentConfig(_messages.Message):
  r"""Environment configuration for a workload.

  Fields:
    executionConfig: Optional. Execution configuration for a workload.
    peripheralsConfig: Optional. Peripherals configuration that workload has
      access to.
  """

  executionConfig = _messages.MessageField('ExecutionConfig', 1)
  peripheralsConfig = _messages.MessageField('PeripheralsConfig', 2)


class EphemeralMetastoreConfig(_messages.Message):
  r"""Default Metastore configuration for the workload."""


class ExecutionConfig(_messages.Message):
  r"""Execution configuration for a workload.

  Fields:
    authenticationConfig: Optional. Authentication configuration used to set
      the default identity for the workload execution. The config specifies
      the type of identity (service account or user) that will be used by
      workloads to access resources on the project(s).
    idleTtl: Optional. Applies to sessions only. The duration to keep the
      session alive while it's idling. Exceeding this threshold causes the
      session to terminate. This field cannot be set on a batch workload.
      Minimum value is 10 minutes; maximum value is 14 days (see JSON
      representation of Duration (https://developers.google.com/protocol-
      buffers/docs/proto3#json)). Defaults to 1 hour if not set. If both ttl
      and idle_ttl are specified for an interactive session, the conditions
      are treated as OR conditions: the workload will be terminated when it
      has been idle for idle_ttl or when ttl has been exceeded, whichever
      occurs first.
    kmsKey: Optional. The Cloud KMS key to use for encryption.
    networkTags: Optional. Tags used for network traffic control.
    networkUri: Optional. Network URI to connect workload to.
    serviceAccount: Optional. Service account that used to execute workload.
    stagingBucket: Optional. A Cloud Storage bucket used to stage workload
      dependencies, config files, and store workload output and other
      ephemeral data, such as Spark history files. If you do not specify a
      staging bucket, Cloud Dataproc will determine a Cloud Storage location
      according to the region where your workload is running, and then create
      and manage project-level, per-location staging and temporary buckets.
      This field requires a Cloud Storage bucket name, not a gs://... URI to a
      Cloud Storage bucket.
    subnetworkUri: Optional. Subnetwork URI to connect workload to.
    ttl: Optional. The duration after which the workload will be terminated,
      specified as the JSON representation for Duration
      (https://protobuf.dev/programming-guides/proto3/#json). When the
      workload exceeds this duration, it will be unconditionally terminated
      without waiting for ongoing work to finish. If ttl is not specified for
      a batch workload, the workload will be allowed to run until it exits
      naturally (or run forever without exiting). If ttl is not specified for
      an interactive session, it defaults to 24 hours. If ttl is not specified
      for a batch that uses 2.1+ runtime version, it defaults to 4 hours.
      Minimum value is 10 minutes; maximum value is 14 days. If both ttl and
      idle_ttl are specified (for an interactive session), the conditions are
      treated as OR conditions: the workload will be terminated when it has
      been idle for idle_ttl or when ttl has been exceeded, whichever occurs
      first.
  """

  authenticationConfig = _messages.MessageField('AuthenticationConfig', 1)
  idleTtl = _messages.StringField(2)
  kmsKey = _messages.StringField(3)
  networkTags = _messages.StringField(4, repeated=True)
  networkUri = _messages.StringField(5)
  serviceAccount = _messages.StringField(6)
  stagingBucket = _messages.StringField(7)
  subnetworkUri = _messages.StringField(8)
  ttl = _messages.StringField(9)


class ExecutorMetrics(_messages.Message):
  r"""A ExecutorMetrics object.

  Messages:
    MetricsValue: A MetricsValue object.

  Fields:
    metrics: A MetricsValue attribute.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class MetricsValue(_messages.Message):
    r"""A MetricsValue object.

    Messages:
      AdditionalProperty: An additional property for a MetricsValue object.

    Fields:
      additionalProperties: Additional properties of type MetricsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a MetricsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.IntegerField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  metrics = _messages.MessageField('MetricsValue', 1)


class ExecutorMetricsDistributions(_messages.Message):
  r"""A ExecutorMetricsDistributions object.

  Fields:
    diskBytesSpilled: A number attribute.
    failedTasks: A number attribute.
    inputBytes: A number attribute.
    inputRecords: A number attribute.
    killedTasks: A number attribute.
    memoryBytesSpilled: A number attribute.
    outputBytes: A number attribute.
    outputRecords: A number attribute.
    peakMemoryMetrics: A ExecutorPeakMetricsDistributions attribute.
    quantiles: A number attribute.
    shuffleRead: A number attribute.
    shuffleReadRecords: A number attribute.
    shuffleWrite: A number attribute.
    shuffleWriteRecords: A number attribute.
    succeededTasks: A number attribute.
    taskTimeMillis: A number attribute.
  """

  diskBytesSpilled = _messages.FloatField(1, repeated=True)
  failedTasks = _messages.FloatField(2, repeated=True)
  inputBytes = _messages.FloatField(3, repeated=True)
  inputRecords = _messages.FloatField(4, repeated=True)
  killedTasks = _messages.FloatField(5, repeated=True)
  memoryBytesSpilled = _messages.FloatField(6, repeated=True)
  outputBytes = _messages.FloatField(7, repeated=True)
  outputRecords = _messages.FloatField(8, repeated=True)
  peakMemoryMetrics = _messages.MessageField('ExecutorPeakMetricsDistributions', 9)
  quantiles = _messages.FloatField(10, repeated=True)
  shuffleRead = _messages.FloatField(11, repeated=True)
  shuffleReadRecords = _messages.FloatField(12, repeated=True)
  shuffleWrite = _messages.FloatField(13, repeated=True)
  shuffleWriteRecords = _messages.FloatField(14, repeated=True)
  succeededTasks = _messages.FloatField(15, repeated=True)
  taskTimeMillis = _messages.FloatField(16, repeated=True)


class ExecutorPeakMetricsDistributions(_messages.Message):
  r"""A ExecutorPeakMetricsDistributions object.

  Fields:
    executorMetrics: A ExecutorMetrics attribute.
    quantiles: A number attribute.
  """

  executorMetrics = _messages.MessageField('ExecutorMetrics', 1, repeated=True)
  quantiles = _messages.FloatField(2, repeated=True)


class ExecutorResourceRequest(_messages.Message):
  r"""Resources used per executor used by the application.

  Fields:
    amount: A string attribute.
    discoveryScript: A string attribute.
    resourceName: A string attribute.
    vendor: A string attribute.
  """

  amount = _messages.IntegerField(1)
  discoveryScript = _messages.StringField(2)
  resourceName = _messages.StringField(3)
  vendor = _messages.StringField(4)


class ExecutorStageSummary(_messages.Message):
  r"""Executor resources consumed by a stage.

  Fields:
    diskBytesSpilled: A string attribute.
    executorId: A string attribute.
    failedTasks: A integer attribute.
    inputBytes: A string attribute.
    inputRecords: A string attribute.
    isExcludedForStage: A boolean attribute.
    killedTasks: A integer attribute.
    memoryBytesSpilled: A string attribute.
    outputBytes: A string attribute.
    outputRecords: A string attribute.
    peakMemoryMetrics: A ExecutorMetrics attribute.
    shuffleRead: A string attribute.
    shuffleReadRecords: A string attribute.
    shuffleWrite: A string attribute.
    shuffleWriteRecords: A string attribute.
    stageAttemptId: A integer attribute.
    stageId: A string attribute.
    succeededTasks: A integer attribute.
    taskTimeMillis: A string attribute.
  """

  diskBytesSpilled = _messages.IntegerField(1)
  executorId = _messages.StringField(2)
  failedTasks = _messages.IntegerField(3, variant=_messages.Variant.INT32)
  inputBytes = _messages.IntegerField(4)
  inputRecords = _messages.IntegerField(5)
  isExcludedForStage = _messages.BooleanField(6)
  killedTasks = _messages.IntegerField(7, variant=_messages.Variant.INT32)
  memoryBytesSpilled = _messages.IntegerField(8)
  outputBytes = _messages.IntegerField(9)
  outputRecords = _messages.IntegerField(10)
  peakMemoryMetrics = _messages.MessageField('ExecutorMetrics', 11)
  shuffleRead = _messages.IntegerField(12)
  shuffleReadRecords = _messages.IntegerField(13)
  shuffleWrite = _messages.IntegerField(14)
  shuffleWriteRecords = _messages.IntegerField(15)
  stageAttemptId = _messages.IntegerField(16, variant=_messages.Variant.INT32)
  stageId = _messages.IntegerField(17)
  succeededTasks = _messages.IntegerField(18, variant=_messages.Variant.INT32)
  taskTimeMillis = _messages.IntegerField(19)


class ExecutorSummary(_messages.Message):
  r"""Details about executors used by the application.

  Messages:
    AttributesValue: A AttributesValue object.
    ExecutorLogsValue: A ExecutorLogsValue object.
    ResourcesValue: A ResourcesValue object.

  Fields:
    activeTasks: A integer attribute.
    addTime: A string attribute.
    attributes: A AttributesValue attribute.
    completedTasks: A integer attribute.
    diskUsed: A string attribute.
    excludedInStages: A string attribute.
    executorId: A string attribute.
    executorLogs: A ExecutorLogsValue attribute.
    failedTasks: A integer attribute.
    hostPort: A string attribute.
    isActive: A boolean attribute.
    isExcluded: A boolean attribute.
    maxMemory: A string attribute.
    maxTasks: A integer attribute.
    memoryMetrics: A MemoryMetrics attribute.
    memoryUsed: A string attribute.
    peakMemoryMetrics: A ExecutorMetrics attribute.
    rddBlocks: A integer attribute.
    removeReason: A string attribute.
    removeTime: A string attribute.
    resourceProfileId: A integer attribute.
    resources: A ResourcesValue attribute.
    totalCores: A integer attribute.
    totalDurationMillis: A string attribute.
    totalGcTimeMillis: A string attribute.
    totalInputBytes: A string attribute.
    totalShuffleRead: A string attribute.
    totalShuffleWrite: A string attribute.
    totalTasks: A integer attribute.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class AttributesValue(_messages.Message):
    r"""A AttributesValue object.

    Messages:
      AdditionalProperty: An additional property for a AttributesValue object.

    Fields:
      additionalProperties: Additional properties of type AttributesValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a AttributesValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  @encoding.MapUnrecognizedFields('additionalProperties')
  class ExecutorLogsValue(_messages.Message):
    r"""A ExecutorLogsValue object.

    Messages:
      AdditionalProperty: An additional property for a ExecutorLogsValue
        object.

    Fields:
      additionalProperties: Additional properties of type ExecutorLogsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a ExecutorLogsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  @encoding.MapUnrecognizedFields('additionalProperties')
  class ResourcesValue(_messages.Message):
    r"""A ResourcesValue object.

    Messages:
      AdditionalProperty: An additional property for a ResourcesValue object.

    Fields:
      additionalProperties: Additional properties of type ResourcesValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a ResourcesValue object.

      Fields:
        key: Name of the additional property.
        value: A ResourceInformation attribute.
      """

      key = _messages.StringField(1)
      value = _messages.MessageField('ResourceInformation', 2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  activeTasks = _messages.IntegerField(1, variant=_messages.Variant.INT32)
  addTime = _messages.StringField(2)
  attributes = _messages.MessageField('AttributesValue', 3)
  completedTasks = _messages.IntegerField(4, variant=_messages.Variant.INT32)
  diskUsed = _messages.IntegerField(5)
  excludedInStages = _messages.IntegerField(6, repeated=True)
  executorId = _messages.StringField(7)
  executorLogs = _messages.MessageField('ExecutorLogsValue', 8)
  failedTasks = _messages.IntegerField(9, variant=_messages.Variant.INT32)
  hostPort = _messages.StringField(10)
  isActive = _messages.BooleanField(11)
  isExcluded = _messages.BooleanField(12)
  maxMemory = _messages.IntegerField(13)
  maxTasks = _messages.IntegerField(14, variant=_messages.Variant.INT32)
  memoryMetrics = _messages.MessageField('MemoryMetrics', 15)
  memoryUsed = _messages.IntegerField(16)
  peakMemoryMetrics = _messages.MessageField('ExecutorMetrics', 17)
  rddBlocks = _messages.IntegerField(18, variant=_messages.Variant.INT32)
  removeReason = _messages.StringField(19)
  removeTime = _messages.StringField(20)
  resourceProfileId = _messages.IntegerField(21, variant=_messages.Variant.INT32)
  resources = _messages.MessageField('ResourcesValue', 22)
  totalCores = _messages.IntegerField(23, variant=_messages.Variant.INT32)
  totalDurationMillis = _messages.IntegerField(24)
  totalGcTimeMillis = _messages.IntegerField(25)
  totalInputBytes = _messages.IntegerField(26)
  totalShuffleRead = _messages.IntegerField(27)
  totalShuffleWrite = _messages.IntegerField(28)
  totalTasks = _messages.IntegerField(29, variant=_messages.Variant.INT32)


class Expr(_messages.Message):
  r"""Represents a textual expression in the Common Expression Language (CEL)
  syntax. CEL is a C-like expression language. The syntax and semantics of CEL
  are documented at https://github.com/google/cel-spec.Example (Comparison):
  title: "Summary size limit" description: "Determines if a summary is less
  than 100 chars" expression: "document.summary.size() < 100" Example
  (Equality): title: "Requestor is owner" description: "Determines if
  requestor is the document owner" expression: "document.owner ==
  request.auth.claims.email" Example (Logic): title: "Public documents"
  description: "Determine whether the document should be publicly visible"
  expression: "document.type != 'private' && document.type != 'internal'"
  Example (Data Manipulation): title: "Notification string" description:
  "Create a notification string with a timestamp." expression: "'New message
  received at ' + string(document.create_time)" The exact variables and
  functions that may be referenced within an expression are determined by the
  service that evaluates it. See the service documentation for additional
  information.

  Fields:
    description: Optional. Description of the expression. This is a longer
      text which describes the expression, e.g. when hovered over it in a UI.
    expression: Textual representation of an expression in Common Expression
      Language syntax.
    location: Optional. String indicating the location of the expression for
      error reporting, e.g. a file name and a position in the file.
    title: Optional. Title for the expression, i.e. a short string describing
      its purpose. This can be used e.g. in UIs which allow to enter the
      expression.
  """

  description = _messages.StringField(1)
  expression = _messages.StringField(2)
  location = _messages.StringField(3)
  title = _messages.StringField(4)


class FallbackReason(_messages.Message):
  r"""Native SQL Execution Data

  Fields:
    fallbackNode: Optional. Fallback node information.
    fallbackReason: Optional. Fallback to Spark reason.
  """

  fallbackNode = _messages.StringField(1)
  fallbackReason = _messages.StringField(2)


class FlinkJob(_messages.Message):
  r"""A Dataproc job for running Apache Flink applications on YARN.

  Messages:
    PropertiesValue: Optional. A mapping of property names to values, used to
      configure Flink. Properties that conflict with values set by the
      Dataproc API might be overwritten. Can include properties set in
      /etc/flink/conf/flink-defaults.conf and classes in user code.

  Fields:
    args: Optional. The arguments to pass to the driver. Do not include
      arguments, such as --conf, that can be set as job properties, since a
      collision might occur that causes an incorrect job submission.
    jarFileUris: Optional. HCFS URIs of jar files to add to the CLASSPATHs of
      the Flink driver and tasks.
    loggingConfig: Optional. The runtime log config for job execution.
    mainClass: The name of the driver's main class. The jar file that contains
      the class must be in the default CLASSPATH or specified in jarFileUris.
    mainJarFileUri: The HCFS URI of the jar file that contains the main class.
    properties: Optional. A mapping of property names to values, used to
      configure Flink. Properties that conflict with values set by the
      Dataproc API might be overwritten. Can include properties set in
      /etc/flink/conf/flink-defaults.conf and classes in user code.
    savepointUri: Optional. HCFS URI of the savepoint, which contains the last
      saved progress for starting the current job.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class PropertiesValue(_messages.Message):
    r"""Optional. A mapping of property names to values, used to configure
    Flink. Properties that conflict with values set by the Dataproc API might
    be overwritten. Can include properties set in /etc/flink/conf/flink-
    defaults.conf and classes in user code.

    Messages:
      AdditionalProperty: An additional property for a PropertiesValue object.

    Fields:
      additionalProperties: Additional properties of type PropertiesValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a PropertiesValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  args = _messages.StringField(1, repeated=True)
  jarFileUris = _messages.StringField(2, repeated=True)
  loggingConfig = _messages.MessageField('LoggingConfig', 3)
  mainClass = _messages.StringField(4)
  mainJarFileUri = _messages.StringField(5)
  properties = _messages.MessageField('PropertiesValue', 6)
  savepointUri = _messages.StringField(7)


class GceClusterConfig(_messages.Message):
  r"""Common config settings for resources of Compute Engine cluster
  instances, applicable to all instances in the cluster.

  Enums:
    PrivateIpv6GoogleAccessValueValuesEnum: Optional. The type of IPv6 access
      for a cluster.

  Messages:
    MetadataValue: Optional. The Compute Engine metadata entries to add to all
      instances (see Project and instance metadata
      (https://cloud.google.com/compute/docs/storing-retrieving-
      metadata#project_and_instance_metadata)).
    ResourceManagerTagsValue: Optional. Resource manager tags
      (https://cloud.google.com/resource-manager/docs/tags/tags-creating-and-
      managing) to add to all instances (see Use secure tags in Dataproc
      (https://cloud.google.com/dataproc/docs/guides/use-secure-tags)).

  Fields:
    autoZoneExcludeZoneUris: Optional. An optional list of Compute Engine
      zones where the Dataproc cluster will not be located when Auto Zone is
      enabled. Only one of zone_uri or auto_zone_exclude_zone_uris can be set.
      If both are omitted, the service will pick a zone in the cluster Compute
      Engine region. If auto_zone_exclude_zone_uris is set and there are at
      least two zones in the Compute Engine region that are not included in
      the auto_zone_exclude_zone_uris, the service will pick one of those
      zones.A full URL, partial URI, or short name are valid. Examples:
      https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]
      projects/[project_id]/zones/[zone] [zone]
    confidentialInstanceConfig: Optional. Confidential Instance Config for
      clusters using Confidential VMs
      (https://cloud.google.com/compute/confidential-vm/docs).
    internalIpOnly: Optional. This setting applies to subnetwork-enabled
      networks. It is set to true by default in clusters created with image
      versions 2.2.x.When set to true: All cluster VMs have internal IP
      addresses. Google Private Access
      (https://cloud.google.com/vpc/docs/private-google-access) must be
      enabled to access Dataproc and other Google Cloud APIs. Off-cluster
      dependencies must be configured to be accessible without external IP
      addresses.When set to false: Cluster VMs are not restricted to internal
      IP addresses. Ephemeral external IP addresses are assigned to each
      cluster VM.
    metadata: Optional. The Compute Engine metadata entries to add to all
      instances (see Project and instance metadata
      (https://cloud.google.com/compute/docs/storing-retrieving-
      metadata#project_and_instance_metadata)).
    networkUri: Optional. The Compute Engine network to be used for machine
      communications. Cannot be specified with subnetwork_uri. If neither
      network_uri nor subnetwork_uri is specified, the "default" network of
      the project is used, if it exists. Cannot be a "Custom Subnet Network"
      (see Using Subnetworks
      (https://cloud.google.com/compute/docs/subnetworks) for more
      information).A full URL, partial URI, or short name are valid. Examples:
      https://www.googleapis.com/compute/v1/projects/[project_id]/global/netwo
      rks/default projects/[project_id]/global/networks/default default
    nodeGroupAffinity: Optional. Node Group Affinity for sole-tenant clusters.
    privateIpv6GoogleAccess: Optional. The type of IPv6 access for a cluster.
    reservationAffinity: Optional. Reservation Affinity for consuming Zonal
      reservation.
    resourceManagerTags: Optional. Resource manager tags
      (https://cloud.google.com/resource-manager/docs/tags/tags-creating-and-
      managing) to add to all instances (see Use secure tags in Dataproc
      (https://cloud.google.com/dataproc/docs/guides/use-secure-tags)).
    serviceAccount: Optional. The Dataproc service account
      (https://cloud.google.com/dataproc/docs/concepts/configuring-
      clusters/service-accounts#service_accounts_in_dataproc) (also see VM
      Data Plane identity
      (https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-
      principals#vm_service_account_data_plane_identity)) used by Dataproc
      cluster VM instances to access Google Cloud Platform services.If not
      specified, the Compute Engine default service account
      (https://cloud.google.com/compute/docs/access/service-
      accounts#default_service_account) is used.
    serviceAccountScopes: Optional. The URIs of service account scopes to be
      included in Compute Engine instances. The following base set of scopes
      is always included:
      https://www.googleapis.com/auth/cloud.useraccounts.readonly
      https://www.googleapis.com/auth/devstorage.read_write
      https://www.googleapis.com/auth/logging.writeIf no scopes are specified,
      the following defaults are also provided:
      https://www.googleapis.com/auth/bigquery
      https://www.googleapis.com/auth/bigtable.admin.table
      https://www.googleapis.com/auth/bigtable.data
      https://www.googleapis.com/auth/devstorage.full_control
    shieldedInstanceConfig: Optional. Shielded Instance Config for clusters
      using Compute Engine Shielded VMs
      (https://cloud.google.com/security/shielded-cloud/shielded-vm).
    subnetworkUri: Optional. The Compute Engine subnetwork to be used for
      machine communications. Cannot be specified with network_uri.A full URL,
      partial URI, or short name are valid. Examples: https://www.googleapis.c
      om/compute/v1/projects/[project_id]/regions/[region]/subnetworks/sub0
      projects/[project_id]/regions/[region]/subnetworks/sub0 sub0
    tags: The Compute Engine network tags to add to all instances (see Tagging
      instances (https://cloud.google.com/vpc/docs/add-remove-network-tags)).
    zoneUri: Optional. The Compute Engine zone where the Dataproc cluster will
      be located. If omitted, the service will pick a zone in the cluster's
      Compute Engine region. On a get request, zone will always be present.A
      full URL, partial URI, or short name are valid. Examples:
      https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]
      projects/[project_id]/zones/[zone] [zone]
  """

  class PrivateIpv6GoogleAccessValueValuesEnum(_messages.Enum):
    r"""Optional. The type of IPv6 access for a cluster.

    Values:
      PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED: If unspecified, Compute Engine
        default behavior will apply, which is the same as
        INHERIT_FROM_SUBNETWORK.
      INHERIT_FROM_SUBNETWORK: Private access to and from Google Services
        configuration inherited from the subnetwork configuration. This is the
        default Compute Engine behavior.
      OUTBOUND: Enables outbound private IPv6 access to Google Services from
        the Dataproc cluster.
      BIDIRECTIONAL: Enables bidirectional private IPv6 access between Google
        Services and the Dataproc cluster.
    """
    PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED = 0
    INHERIT_FROM_SUBNETWORK = 1
    OUTBOUND = 2
    BIDIRECTIONAL = 3

  @encoding.MapUnrecognizedFields('additionalProperties')
  class MetadataValue(_messages.Message):
    r"""Optional. The Compute Engine metadata entries to add to all instances
    (see Project and instance metadata
    (https://cloud.google.com/compute/docs/storing-retrieving-
    metadata#project_and_instance_metadata)).

    Messages:
      AdditionalProperty: An additional property for a MetadataValue object.

    Fields:
      additionalProperties: Additional properties of type MetadataValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a MetadataValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  @encoding.MapUnrecognizedFields('additionalProperties')
  class ResourceManagerTagsValue(_messages.Message):
    r"""Optional. Resource manager tags (https://cloud.google.com/resource-
    manager/docs/tags/tags-creating-and-managing) to add to all instances (see
    Use secure tags in Dataproc
    (https://cloud.google.com/dataproc/docs/guides/use-secure-tags)).

    Messages:
      AdditionalProperty: An additional property for a
        ResourceManagerTagsValue object.

    Fields:
      additionalProperties: Additional properties of type
        ResourceManagerTagsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a ResourceManagerTagsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  autoZoneExcludeZoneUris = _messages.StringField(1, repeated=True)
  confidentialInstanceConfig = _messages.MessageField('ConfidentialInstanceConfig', 2)
  internalIpOnly = _messages.BooleanField(3)
  metadata = _messages.MessageField('MetadataValue', 4)
  networkUri = _messages.StringField(5)
  nodeGroupAffinity = _messages.MessageField('NodeGroupAffinity', 6)
  privateIpv6GoogleAccess = _messages.EnumField('PrivateIpv6GoogleAccessValueValuesEnum', 7)
  reservationAffinity = _messages.MessageField('ReservationAffinity', 8)
  resourceManagerTags = _messages.MessageField('ResourceManagerTagsValue', 9)
  serviceAccount = _messages.StringField(10)
  serviceAccountScopes = _messages.StringField(11, repeated=True)
  shieldedInstanceConfig = _messages.MessageField('ShieldedInstanceConfig', 12)
  subnetworkUri = _messages.StringField(13)
  tags = _messages.StringField(14, repeated=True)
  zoneUri = _messages.StringField(15)


class GdceClusterConfig(_messages.Message):
  r"""The target GDCE cluster config.

  Fields:
    gdcEdgeIdentityProvider: Optional. The name of the identity provider
      associated with the GDCE cluster.
    gdcEdgeMembershipTarget: Optional. A target GDCE cluster to deploy to. It
      must be in the same project and region as the Dataproc cluster'. Format:
      'projects/{project}/locations/{location}/clusters/{cluster_id}'
    gdcEdgeWorkloadIdentityPool: Optional. The workload identity pool
      associated with the fleet.
  """

  gdcEdgeIdentityProvider = _messages.StringField(1)
  gdcEdgeMembershipTarget = _messages.StringField(2)
  gdcEdgeWorkloadIdentityPool = _messages.StringField(3)


class GetIamPolicyRequest(_messages.Message):
  r"""Request message for GetIamPolicy method.

  Fields:
    options: OPTIONAL: A GetPolicyOptions object for specifying options to
      GetIamPolicy.
  """

  options = _messages.MessageField('GetPolicyOptions', 1)


class GetPolicyOptions(_messages.Message):
  r"""Encapsulates settings provided to GetIamPolicy.

  Fields:
    requestedPolicyVersion: Optional. The maximum policy version that will be
      used to format the policy.Valid values are 0, 1, and 3. Requests
      specifying an invalid value will be rejected.Requests for policies with
      any conditional role bindings must specify version 3. Policies with no
      conditional role bindings may specify any valid value or leave the field
      unset.The policy in the response might use the policy version that you
      specified, or it might use a lower policy version. For example, if you
      specify version 3, but the policy has no conditional role bindings, the
      response uses version 1.To learn which resources support conditions in
      their IAM policies, see the IAM documentation
      (https://cloud.google.com/iam/help/conditions/resource-policies).
  """

  requestedPolicyVersion = _messages.IntegerField(1, variant=_messages.Variant.INT32)


class GkeClusterConfig(_messages.Message):
  r"""The cluster's GKE config.

  Fields:
    gkeClusterTarget: Optional. A target GKE cluster to deploy to. It must be
      in the same project and region as the Dataproc cluster (the GKE cluster
      can be zonal or regional). Format:
      'projects/{project}/locations/{location}/clusters/{cluster_id}'
    namespacedGkeDeploymentTarget: Optional. Deprecated. Use gkeClusterTarget.
      Used only for the deprecated beta. A target for the deployment.
    nodePoolTarget: Optional. GKE node pools where workloads will be
      scheduled. At least one node pool must be assigned the DEFAULT
      GkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified,
      Dataproc constructs a DEFAULT GkeNodePoolTarget. Each role can be given
      to only one GkeNodePoolTarget. All node pools must have the same
      location settings.
  """

  gkeClusterTarget = _messages.StringField(1)
  namespacedGkeDeploymentTarget = _messages.MessageField('NamespacedGkeDeploymentTarget', 2)
  nodePoolTarget = _messages.MessageField('GkeNodePoolTarget', 3, repeated=True)


class GkeEphemeralStorageConfig(_messages.Message):
  r"""GkeEphemeralStorageConfig contains configuration for the ephemeral
  storage filesystem.

  Fields:
    localSsdCount: Number of local SSDs to use to back ephemeral storage. Uses
      NVMe interfaces. Each local SSD is 375 GB in size. If zero, it means to
      disable using local SSDs as ephemeral storage.
  """

  localSsdCount = _messages.IntegerField(1, variant=_messages.Variant.INT32)


class GkeNodeConfig(_messages.Message):
  r"""Parameters that describe cluster nodes.

  Fields:
    accelerators: Optional. A list of hardware accelerators
      (https://cloud.google.com/compute/docs/gpus) to attach to each node.
    bootDiskKmsKey: Optional. The Customer Managed Encryption Key (CMEK)
      (https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek) used
      to encrypt the boot disk attached to each node in the node pool. Specify
      the key using the following format: projects/{project}/locations/{locati
      on}/keyRings/{key_ring}/cryptoKeys/{crypto_key}
    ephemeralStorageConfig: Optional. Parameters for the ephemeral storage
      filesystem. If unspecified, ephemeral storage is backed by the boot
      disk.
    localSsdCount: Optional. The number of local SSD disks to attach to the
      node, which is limited by the maximum number of disks allowable per zone
      (see Adding Local SSDs
      (https://cloud.google.com/compute/docs/disks/local-ssd)).
    machineType: Optional. The name of a Compute Engine machine type
      (https://cloud.google.com/compute/docs/machine-types).
    minCpuPlatform: Optional. Minimum CPU platform
      (https://cloud.google.com/compute/docs/instances/specify-min-cpu-
      platform) to be used by this instance. The instance may be scheduled on
      the specified or a newer CPU platform. Specify the friendly names of CPU
      platforms, such as "Intel Haswell"` or Intel Sandy Bridge".
    preemptible: Optional. Whether the nodes are created as legacy preemptible
      VM instances
      (https://cloud.google.com/compute/docs/instances/preemptible). Also see
      Spot VMs, preemptible VM instances without a maximum lifetime. Legacy
      and Spot preemptible nodes cannot be used in a node pool with the
      CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is
      not assigned (the DEFAULT node pool will assume the CONTROLLER role).
    spot: Optional. Whether the nodes are created as Spot VM instances
      (https://cloud.google.com/compute/docs/instances/spot). Spot VMs are the
      latest update to legacy preemptible VMs. Spot VMs do not have a maximum
      lifetime. Legacy and Spot preemptible nodes cannot be used in a node
      pool with the CONTROLLER role or in the DEFAULT node pool if the
      CONTROLLER role is not assigned (the DEFAULT node pool will assume the
      CONTROLLER role).
  """

  accelerators = _messages.MessageField('GkeNodePoolAcceleratorConfig', 1, repeated=True)
  bootDiskKmsKey = _messages.StringField(2)
  ephemeralStorageConfig = _messages.MessageField('GkeEphemeralStorageConfig', 3)
  localSsdCount = _messages.IntegerField(4, variant=_messages.Variant.INT32)
  machineType = _messages.StringField(5)
  minCpuPlatform = _messages.StringField(6)
  preemptible = _messages.BooleanField(7)
  spot = _messages.BooleanField(8)


class GkeNodePoolAcceleratorConfig(_messages.Message):
  r"""A GkeNodeConfigAcceleratorConfig represents a Hardware Accelerator
  request for a node pool.

  Fields:
    acceleratorCount: The number of accelerator cards exposed to an instance.
    acceleratorType: The accelerator type resource namename (see GPUs on
      Compute Engine).
    gpuPartitionSize: Size of partitions to create on the GPU. Valid values
      are described in the NVIDIA mig user guide
      (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning).
  """

  acceleratorCount = _messages.IntegerField(1)
  acceleratorType = _messages.StringField(2)
  gpuPartitionSize = _messages.StringField(3)


class GkeNodePoolAutoscalingConfig(_messages.Message):
  r"""GkeNodePoolAutoscaling contains information the cluster autoscaler needs
  to adjust the size of the node pool to the current cluster usage.

  Fields:
    maxNodeCount: The maximum number of nodes in the node pool. Must be >=
      min_node_count, and must be > 0. Note: Quota must be sufficient to scale
      up the cluster.
    minNodeCount: The minimum number of nodes in the node pool. Must be >= 0
      and <= max_node_count.
  """

  maxNodeCount = _messages.IntegerField(1, variant=_messages.Variant.INT32)
  minNodeCount = _messages.IntegerField(2, variant=_messages.Variant.INT32)


class GkeNodePoolConfig(_messages.Message):
  r"""The configuration of a GKE node pool used by a Dataproc-on-GKE cluster
  (https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-
  dataproc-on-gke-cluster).

  Fields:
    autoscaling: Optional. The autoscaler configuration for this node pool.
      The autoscaler is enabled only when a valid configuration is present.
    config: Optional. The node pool configuration.
    locations: Optional. The list of Compute Engine zones
      (https://cloud.google.com/compute/docs/zones#available) where node pool
      nodes associated with a Dataproc on GKE virtual cluster will be
      located.Note: All node pools associated with a virtual cluster must be
      located in the same region as the virtual cluster, and they must be
      located in the same zone within that region.If a location is not
      specified during node pool creation, Dataproc on GKE will choose the
      zone.
  """

  autoscaling = _messages.MessageField('GkeNodePoolAutoscalingConfig', 1)
  config = _messages.MessageField('GkeNodeConfig', 2)
  locations = _messages.StringField(3, repeated=True)


class GkeNodePoolTarget(_messages.Message):
  r"""GKE node pools that Dataproc workloads run on.

  Enums:
    RolesValueListEntryValuesEnum:

  Fields:
    nodePool: Required. The target GKE node pool. Format: 'projects/{project}/
      locations/{location}/clusters/{cluster}/nodePools/{node_pool}'
    nodePoolConfig: Input only. The configuration for the GKE node pool.If
      specified, Dataproc attempts to create a node pool with the specified
      shape. If one with the same name already exists, it is verified against
      all specified fields. If a field differs, the virtual cluster creation
      will fail.If omitted, any node pool with the specified name is used. If
      a node pool with the specified name does not exist, Dataproc create a
      node pool with default values.This is an input only field. It will not
      be returned by the API.
    roles: Required. The roles associated with the GKE node pool.
  """

  class RolesValueListEntryValuesEnum(_messages.Enum):
    r"""RolesValueListEntryValuesEnum enum type.

    Values:
      ROLE_UNSPECIFIED: Role is unspecified.
      DEFAULT: At least one node pool must have the DEFAULT role. Work
        assigned to a role that is not associated with a node pool is assigned
        to the node pool with the DEFAULT role. For example, work assigned to
        the CONTROLLER role will be assigned to the node pool with the DEFAULT
        role if no node pool has the CONTROLLER role.
      CONTROLLER: Run work associated with the Dataproc control plane (for
        example, controllers and webhooks). Very low resource requirements.
      SPARK_DRIVER: Run work associated with a Spark driver of a job.
      SPARK_EXECUTOR: Run work associated with a Spark executor of a job.
      SHUFFLE_SERVICE: Run work associated with a shuffle service of a job.
        During private preview only, this role must be set explicitly, it does
        not default to DEFAULT. Once the feature reaches public preview, then
        it will default to DEFAULT as the other roles do.
    """
    ROLE_UNSPECIFIED = 0
    DEFAULT = 1
    CONTROLLER = 2
    SPARK_DRIVER = 3
    SPARK_EXECUTOR = 4
    SHUFFLE_SERVICE = 5

  nodePool = _messages.StringField(1)
  nodePoolConfig = _messages.MessageField('GkeNodePoolConfig', 2)
  roles = _messages.EnumField('RolesValueListEntryValuesEnum', 3, repeated=True)


class GoogleCloudDataprocV1WorkflowTemplateEncryptionConfig(_messages.Message):
  r"""Encryption settings for encrypting workflow template job arguments.

  Fields:
    kmsKey: Optional. The Cloud KMS key name to use for encrypting workflow
      template job arguments.When this this key is provided, the following
      workflow template job arguments
      (https://cloud.google.com/dataproc/docs/concepts/workflows/use-
      workflows#adding_jobs_to_a_template), if present, are CMEK encrypted
      (https://cloud.google.com/dataproc/docs/concepts/configuring-
      clusters/customer-managed-
      encryption#use_cmek_with_workflow_template_data): FlinkJob args
      (https://cloud.google.com/dataproc/docs/reference/rest/v1/FlinkJob)
      HadoopJob args
      (https://cloud.google.com/dataproc/docs/reference/rest/v1/HadoopJob)
      SparkJob args
      (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob)
      SparkRJob args
      (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkRJob)
      PySparkJob args
      (https://cloud.google.com/dataproc/docs/reference/rest/v1/PySparkJob)
      SparkSqlJob
      (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkSqlJob)
      scriptVariables and queryList.queries HiveJob
      (https://cloud.google.com/dataproc/docs/reference/rest/v1/HiveJob)
      scriptVariables and queryList.queries PigJob
      (https://cloud.google.com/dataproc/docs/reference/rest/v1/PigJob)
      scriptVariables and queryList.queries PrestoJob
      (https://cloud.google.com/dataproc/docs/reference/rest/v1/PrestoJob)
      scriptVariables and queryList.queries
  """

  kmsKey = _messages.StringField(1)


class HadoopJob(_messages.Message):
  r"""A Dataproc job for running Apache Hadoop MapReduce
  (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-
  mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN
  (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-
  site/YARN.html).

  Messages:
    PropertiesValue: Optional. A mapping of property names to values, used to
      configure Hadoop. Properties that conflict with values set by the
      Dataproc API might be overwritten. Can include properties set in
      /etc/hadoop/conf/*-site and classes in user code.

  Fields:
    archiveUris: Optional. HCFS URIs of archives to be extracted in the
      working directory of Hadoop drivers and tasks. Supported file types:
      .jar, .tar, .tar.gz, .tgz, or .zip.
    args: Optional. The arguments to pass to the driver. Do not include
      arguments, such as -libjars or -Dfoo=bar, that can be set as job
      properties, since a collision might occur that causes an incorrect job
      submission.
    fileUris: Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to
      be copied to the working directory of Hadoop drivers and distributed
      tasks. Useful for naively parallel tasks.
    jarFileUris: Optional. Jar file URIs to add to the CLASSPATHs of the
      Hadoop driver and tasks.
    loggingConfig: Optional. The runtime log config for job execution.
    mainClass: The name of the driver's main class. The jar file containing
      the class must be in the default CLASSPATH or specified in
      jar_file_uris.
    mainJarFileUri: The HCFS URI of the jar file containing the main class.
      Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-
      mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar'
      'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
    properties: Optional. A mapping of property names to values, used to
      configure Hadoop. Properties that conflict with values set by the
      Dataproc API might be overwritten. Can include properties set in
      /etc/hadoop/conf/*-site and classes in user code.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class PropertiesValue(_messages.Message):
    r"""Optional. A mapping of property names to values, used to configure
    Hadoop. Properties that conflict with values set by the Dataproc API might
    be overwritten. Can include properties set in /etc/hadoop/conf/*-site and
    classes in user code.

    Messages:
      AdditionalProperty: An additional property for a PropertiesValue object.

    Fields:
      additionalProperties: Additional properties of type PropertiesValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a PropertiesValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  archiveUris = _messages.StringField(1, repeated=True)
  args = _messages.StringField(2, repeated=True)
  fileUris = _messages.StringField(3, repeated=True)
  jarFileUris = _messages.StringField(4, repeated=True)
  loggingConfig = _messages.MessageField('LoggingConfig', 5)
  mainClass = _messages.StringField(6)
  mainJarFileUri = _messages.StringField(7)
  properties = _messages.MessageField('PropertiesValue', 8)


class HiveJob(_messages.Message):
  r"""A Dataproc job for running Apache Hive (https://hive.apache.org/)
  queries on YARN.

  Messages:
    PropertiesValue: Optional. A mapping of property names and values, used to
      configure Hive. Properties that conflict with values set by the Dataproc
      API might be overwritten. Can include properties set in
      /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes
      in user code.
    ScriptVariablesValue: Optional. Mapping of query variable names to values
      (equivalent to the Hive command: SET name="value";).

  Fields:
    continueOnFailure: Optional. Whether to continue executing queries if a
      query fails. The default value is false. Setting to true can be useful
      when executing independent parallel queries.
    jarFileUris: Optional. HCFS URIs of jar files to add to the CLASSPATH of
      the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes
      and UDFs.
    properties: Optional. A mapping of property names and values, used to
      configure Hive. Properties that conflict with values set by the Dataproc
      API might be overwritten. Can include properties set in
      /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes
      in user code.
    queryFileUri: The HCFS URI of the script that contains Hive queries.
    queryList: A list of queries.
    scriptVariables: Optional. Mapping of query variable names to values
      (equivalent to the Hive command: SET name="value";).
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class PropertiesValue(_messages.Message):
    r"""Optional. A mapping of property names and values, used to configure
    Hive. Properties that conflict with values set by the Dataproc API might
    be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
    /etc/hive/conf/hive-site.xml, and classes in user code.

    Messages:
      AdditionalProperty: An additional property for a PropertiesValue object.

    Fields:
      additionalProperties: Additional properties of type PropertiesValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a PropertiesValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  @encoding.MapUnrecognizedFields('additionalProperties')
  class ScriptVariablesValue(_messages.Message):
    r"""Optional. Mapping of query variable names to values (equivalent to the
    Hive command: SET name="value";).

    Messages:
      AdditionalProperty: An additional property for a ScriptVariablesValue
        object.

    Fields:
      additionalProperties: Additional properties of type ScriptVariablesValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a ScriptVariablesValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  continueOnFailure = _messages.BooleanField(1)
  jarFileUris = _messages.StringField(2, repeated=True)
  properties = _messages.MessageField('PropertiesValue', 3)
  queryFileUri = _messages.StringField(4)
  queryList = _messages.MessageField('QueryList', 5)
  scriptVariables = _messages.MessageField('ScriptVariablesValue', 6)


class IdentityConfig(_messages.Message):
  r"""Identity related configuration, including service account based secure
  multi-tenancy user mappings.

  Messages:
    UserServiceAccountMappingValue: Required. Map of user to service account.

  Fields:
    userServiceAccountMapping: Required. Map of user to service account.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class UserServiceAccountMappingValue(_messages.Message):
    r"""Required. Map of user to service account.

    Messages:
      AdditionalProperty: An additional property for a
        UserServiceAccountMappingValue object.

    Fields:
      additionalProperties: Additional properties of type
        UserServiceAccountMappingValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a UserServiceAccountMappingValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  userServiceAccountMapping = _messages.MessageField('UserServiceAccountMappingValue', 1)


class InjectCredentialsRequest(_messages.Message):
  r"""A request to inject credentials into a cluster.

  Fields:
    clusterUuid: Required. The cluster UUID.
    credentialsCiphertext: Required. The encrypted credentials being injected
      in to the cluster.The client is responsible for encrypting the
      credentials in a way that is supported by the cluster.A wrapped value is
      used here so that the actual contents of the encrypted credentials are
      not written to audit logs.
  """

  clusterUuid = _messages.StringField(1)
  credentialsCiphertext = _messages.StringField(2)


class InputMetrics(_messages.Message):
  r"""Metrics about the input data read by the task.

  Fields:
    bytesRead: A string attribute.
    recordsRead: A string attribute.
  """

  bytesRead = _messages.IntegerField(1)
  recordsRead = _messages.IntegerField(2)


class InputQuantileMetrics(_messages.Message):
  r"""A InputQuantileMetrics object.

  Fields:
    bytesRead: A Quantiles attribute.
    recordsRead: A Quantiles attribute.
  """

  bytesRead = _messages.MessageField('Quantiles', 1)
  recordsRead = _messages.MessageField('Quantiles', 2)


class InstanceFlexibilityPolicy(_messages.Message):
  r"""Instance flexibility Policy allowing a mixture of VM shapes and
  provisioning models.

  Fields:
    instanceSelectionList: Optional. List of instance selection options that
      the group will use when creating new VMs.
    instanceSelectionResults: Output only. A list of instance selection
      results in the group.
    provisioningModelMix: Optional. Defines how the Group selects the
      provisioning model to ensure required reliability.
  """

  instanceSelectionList = _messages.MessageField('InstanceSelection', 1, repeated=True)
  instanceSelectionResults = _messages.MessageField('InstanceSelectionResult', 2, repeated=True)
  provisioningModelMix = _messages.MessageField('ProvisioningModelMix', 3)


class InstanceGroupAutoscalingPolicyConfig(_messages.Message):
  r"""Configuration for the size bounds of an instance group, including its
  proportional size to other groups.

  Fields:
    maxInstances: Required. Maximum number of instances for this group.
      Required for primary workers. Note that by default, clusters will not
      use secondary workers. Required for secondary workers if the minimum
      secondary instances is set.Primary workers - Bounds: [min_instances, ).
      Secondary workers - Bounds: [min_instances, ). Default: 0.
    minInstances: Optional. Minimum number of instances for this group.Primary
      workers - Bounds: 2, max_instances. Default: 2. Secondary workers -
      Bounds: 0, max_instances. Default: 0.
    weight: Optional. Weight for the instance group, which is used to
      determine the fraction of total workers in the cluster from this
      instance group. For example, if primary workers have weight 2, and
      secondary workers have weight 1, the cluster will have approximately 2
      primary workers for each secondary worker.The cluster may not reach the
      specified balance if constrained by min/max bounds or other autoscaling
      settings. For example, if max_instances for secondary workers is 0, then
      only primary workers will be added. The cluster can also be out of
      balance when created.If weight is not set on any instance group, the
      cluster will default to equal weight for all groups: the cluster will
      attempt to maintain an equal number of workers in each group within the
      configured size bounds for each group. If weight is set for one group
      only, the cluster will default to zero weight on the unset group. For
      example if weight is set only on primary workers, the cluster will use
      primary workers only and no secondary workers.
  """

  maxInstances = _messages.IntegerField(1, variant=_messages.Variant.INT32)
  minInstances = _messages.IntegerField(2, variant=_messages.Variant.INT32)
  weight = _messages.IntegerField(3, variant=_messages.Variant.INT32)


class InstanceGroupConfig(_messages.Message):
  r"""The config settings for Compute Engine resources in an instance group,
  such as a master or worker group.

  Enums:
    PreemptibilityValueValuesEnum: Optional. Specifies the preemptibility of
      the instance group.The default value for master and worker groups is
      NON_PREEMPTIBLE. This default cannot be changed.The default value for
      secondary instances is PREEMPTIBLE.

  Fields:
    accelerators: Optional. The Compute Engine accelerator configuration for
      these instances.
    diskConfig: Optional. Disk option config settings.
    imageUri: Optional. The Compute Engine image resource used for cluster
      instances.The URI can represent an image or image family.Image examples:
      https://www.googleapis.com/compute/v1/projects/[project_id]/global/image
      s/[image-id] projects/[project_id]/global/images/[image-id] image-
      idImage family examples. Dataproc will use the most recent image from
      the family: https://www.googleapis.com/compute/v1/projects/[project_id]/
      global/images/family/[custom-image-family-name]
      projects/[project_id]/global/images/family/[custom-image-family-name]If
      the URI is unspecified, it will be inferred from
      SoftwareConfig.image_version or the system default.
    instanceFlexibilityPolicy: Optional. Instance flexibility Policy allowing
      a mixture of VM shapes and provisioning models.
    instanceNames: Output only. The list of instance names. Dataproc derives
      the names from cluster_name, num_instances, and the instance group.
    instanceReferences: Output only. List of references to Compute Engine
      instances.
    isPreemptible: Output only. Specifies that this instance group contains
      preemptible instances.
    machineTypeUri: Optional. The Compute Engine machine type used for cluster
      instances.A full URL, partial URI, or short name are valid. Examples: ht
      tps://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/m
      achineTypes/n1-standard-2
      projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2
      n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto
      Zone Placement
      (https://cloud.google.com/dataproc/docs/concepts/configuring-
      clusters/auto-zone#using_auto_zone_placement) feature, you must use the
      short name of the machine type resource, for example, n1-standard-2.
    managedGroupConfig: Output only. The config for Compute Engine Instance
      Group Manager that manages this group. This is only used for preemptible
      instance groups.
    minCpuPlatform: Optional. Specifies the minimum cpu platform for the
      Instance Group. See Dataproc -> Minimum CPU Platform
      (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-
      cpu).
    minNumInstances: Optional. The minimum number of primary worker instances
      to create. If min_num_instances is set, cluster creation will succeed if
      the number of primary workers created is at least equal to the
      min_num_instances number.Example: Cluster creation request with
      num_instances = 5 and min_num_instances = 3: If 4 VMs are created and 1
      instance fails, the failed VM is deleted. The cluster is resized to 4
      instances and placed in a RUNNING state. If 2 instances are created and
      3 instances fail, the cluster in placed in an ERROR state. The failed
      VMs are not deleted.
    numInstances: Optional. The number of VM instances in the instance group.
      For HA cluster master_config groups, must be set to 3. For standard
      cluster master_config groups, must be set to 1.
    preemptibility: Optional. Specifies the preemptibility of the instance
      group.The default value for master and worker groups is NON_PREEMPTIBLE.
      This default cannot be changed.The default value for secondary instances
      is PREEMPTIBLE.
    startupConfig: Optional. Configuration to handle the startup of instances
      during cluster create and update process.
  """

  class PreemptibilityValueValuesEnum(_messages.Enum):
    r"""Optional. Specifies the preemptibility of the instance group.The
    default value for master and worker groups is NON_PREEMPTIBLE. This
    default cannot be changed.The default value for secondary instances is
    PREEMPTIBLE.

    Values:
      PREEMPTIBILITY_UNSPECIFIED: Preemptibility is unspecified, the system
        will choose the appropriate setting for each instance group.
      NON_PREEMPTIBLE: Instances are non-preemptible.This option is allowed
        for all instance groups and is the only valid value for Master and
        Worker instance groups.
      PREEMPTIBLE: Instances are preemptible
        (https://cloud.google.com/compute/docs/instances/preemptible).This
        option is allowed only for secondary worker
        (https://cloud.google.com/dataproc/docs/concepts/compute/secondary-
        vms) groups.
      SPOT: Instances are Spot VMs
        (https://cloud.google.com/compute/docs/instances/spot).This option is
        allowed only for secondary worker
        (https://cloud.google.com/dataproc/docs/concepts/compute/secondary-
        vms) groups. Spot VMs are the latest version of preemptible VMs
        (https://cloud.google.com/compute/docs/instances/preemptible), and
        provide additional features.
    """
    PREEMPTIBILITY_UNSPECIFIED = 0
    NON_PREEMPTIBLE = 1
    PREEMPTIBLE = 2
    SPOT = 3

  accelerators = _messages.MessageField('AcceleratorConfig', 1, repeated=True)
  diskConfig = _messages.MessageField('DiskConfig', 2)
  imageUri = _messages.StringField(3)
  instanceFlexibilityPolicy = _messages.MessageField('InstanceFlexibilityPolicy', 4)
  instanceNames = _messages.StringField(5, repeated=True)
  instanceReferences = _messages.MessageField('InstanceReference', 6, repeated=True)
  isPreemptible = _messages.BooleanField(7)
  machineTypeUri = _messages.StringField(8)
  managedGroupConfig = _messages.MessageField('ManagedGroupConfig', 9)
  minCpuPlatform = _messages.StringField(10)
  minNumInstances = _messages.IntegerField(11, variant=_messages.Variant.INT32)
  numInstances = _messages.IntegerField(12, variant=_messages.Variant.INT32)
  preemptibility = _messages.EnumField('PreemptibilityValueValuesEnum', 13)
  startupConfig = _messages.MessageField('StartupConfig', 14)


class InstanceReference(_messages.Message):
  r"""A reference to a Compute Engine instance.

  Fields:
    instanceId: The unique identifier of the Compute Engine instance.
    instanceName: The user-friendly name of the Compute Engine instance.
    publicEciesKey: The public ECIES key used for sharing data with this
      instance.
    publicKey: The public RSA key used for sharing data with this instance.
  """

  instanceId = _messages.StringField(1)
  instanceName = _messages.StringField(2)
  publicEciesKey = _messages.StringField(3)
  publicKey = _messages.StringField(4)


class InstanceSelection(_messages.Message):
  r"""Defines machines types and a rank to which the machines types belong.

  Fields:
    machineTypes: Optional. Full machine-type names, e.g. "n1-standard-16".
    rank: Optional. Preference of this instance selection. Lower number means
      higher preference. Dataproc will first try to create a VM based on the
      machine-type with priority rank and fallback to next rank based on
      availability. Machine types and instance selections with the same
      priority have the same preference.
  """

  machineTypes = _messages.StringField(1, repeated=True)
  rank = _messages.IntegerField(2, variant=_messages.Variant.INT32)


class InstanceSelectionResult(_messages.Message):
  r"""Defines a mapping from machine types to the number of VMs that are
  created with each machine type.

  Fields:
    machineType: Output only. Full machine-type names, e.g. "n1-standard-16".
    vmCount: Output only. Number of VM provisioned with the machine_type.
  """

  machineType = _messages.StringField(1)
  vmCount = _messages.IntegerField(2, variant=_messages.Variant.INT32)


class InstantiateWorkflowTemplateRequest(_messages.Message):
  r"""A request to instantiate a workflow template.

  Messages:
    ParametersValue: Optional. Map from parameter names to values that should
      be used for those parameters. Values may not exceed 1000 characters.

  Fields:
    parameters: Optional. Map from parameter names to values that should be
      used for those parameters. Values may not exceed 1000 characters.
    requestId: Optional. A tag that prevents multiple concurrent workflow
      instances with the same tag from running. This mitigates risk of
      concurrent instances started due to retries.It is recommended to always
      set this value to a UUID
      (https://en.wikipedia.org/wiki/Universally_unique_identifier).The tag
      must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
      and hyphens (-). The maximum length is 40 characters.
    version: Optional. The version of workflow template to instantiate. If
      specified, the workflow will be instantiated only if the current version
      of the workflow template has the supplied version.This option cannot be
      used to instantiate a previous version of workflow template.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class ParametersValue(_messages.Message):
    r"""Optional. Map from parameter names to values that should be used for
    those parameters. Values may not exceed 1000 characters.

    Messages:
      AdditionalProperty: An additional property for a ParametersValue object.

    Fields:
      additionalProperties: Additional properties of type ParametersValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a ParametersValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  parameters = _messages.MessageField('ParametersValue', 1)
  requestId = _messages.StringField(2)
  version = _messages.IntegerField(3, variant=_messages.Variant.INT32)


class Interval(_messages.Message):
  r"""Represents a time interval, encoded as a Timestamp start (inclusive) and
  a Timestamp end (exclusive).The start must be less than or equal to the end.
  When the start equals the end, the interval is empty (matches no time). When
  both start and end are unspecified, the interval matches any time.

  Fields:
    endTime: Optional. Exclusive end of the interval.If specified, a Timestamp
      matching this interval will have to be before the end.
    startTime: Optional. Inclusive start of the interval.If specified, a
      Timestamp matching this interval will have to be the same or after the
      start.
  """

  endTime = _messages.StringField(1)
  startTime = _messages.StringField(2)


class Job(_messages.Message):
  r"""A Dataproc job resource.

  Messages:
    LabelsValue: Optional. The labels to associate with this job. Label keys
      must contain 1 to 63 characters, and must conform to RFC 1035
      (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty, but,
      if present, must contain 1 to 63 characters, and must conform to RFC
      1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can
      be associated with a job.

  Fields:
    done: Output only. Indicates whether the job is completed. If the value is
      false, the job is still in progress. If true, the job is completed, and
      status.state field will indicate if it was successful, failed, or
      cancelled.
    driverControlFilesUri: Output only. If present, the location of
      miscellaneous control files which can be used as part of job setup and
      handling. If not present, control files might be placed in the same
      location as driver_output_uri.
    driverOutputResourceUri: Output only. A URI pointing to the location of
      the stdout of the job's driver program.
    driverRunner: Optional. Configurations for the driver runner
    driverSchedulingConfig: Optional. Driver scheduling configuration.
    flinkJob: Optional. Job is a Flink job.
    hadoopJob: Optional. Job is a Hadoop job.
    hiveJob: Optional. Job is a Hive job.
    jobUuid: Output only. A UUID that uniquely identifies a job within the
      project over time. This is in contrast to a user-settable
      reference.job_id that might be reused over time.
    labels: Optional. The labels to associate with this job. Label keys must
      contain 1 to 63 characters, and must conform to RFC 1035
      (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty, but,
      if present, must contain 1 to 63 characters, and must conform to RFC
      1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can
      be associated with a job.
    pigJob: Optional. Job is a Pig job.
    placement: Required. Job information, including how, when, and where to
      run the job.
    prestoJob: Optional. Job is a Presto job.
    pyflinkJob: Optional. Job is a PyFlink job.
    pysparkJob: Optional. Job is a PySpark job.
    reference: Optional. The fully qualified reference to the job, which can
      be used to obtain the equivalent REST path of the job resource. If this
      property is not specified when a job is created, the server generates a
      job_id.
    scheduling: Optional. Job scheduling configuration.
    sparkJob: Optional. Job is a Spark job.
    sparkRJob: Optional. Job is a SparkR job.
    sparkSqlJob: Optional. Job is a SparkSql job.
    status: Output only. The job status. Additional application-specific
      status information might be contained in the type_job and
      yarn_applications fields.
    statusHistory: Output only. The previous job status.
    trinoJob: Optional. Job is a Trino job.
    yarnApplications: Output only. The collection of YARN applications spun up
      by this job.Beta Feature: This report is available for testing purposes
      only. It might be changed before final release.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class LabelsValue(_messages.Message):
    r"""Optional. The labels to associate with this job. Label keys must
    contain 1 to 63 characters, and must conform to RFC 1035
    (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty, but, if
    present, must contain 1 to 63 characters, and must conform to RFC 1035
    (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
    associated with a job.

    Messages:
      AdditionalProperty: An additional property for a LabelsValue object.

    Fields:
      additionalProperties: Additional properties of type LabelsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a LabelsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  done = _messages.BooleanField(1)
  driverControlFilesUri = _messages.StringField(2)
  driverOutputResourceUri = _messages.StringField(3)
  driverRunner = _messages.MessageField('DriverRunner', 4)
  driverSchedulingConfig = _messages.MessageField('DriverSchedulingConfig', 5)
  flinkJob = _messages.MessageField('FlinkJob', 6)
  hadoopJob = _messages.MessageField('HadoopJob', 7)
  hiveJob = _messages.MessageField('HiveJob', 8)
  jobUuid = _messages.StringField(9)
  labels = _messages.MessageField('LabelsValue', 10)
  pigJob = _messages.MessageField('PigJob', 11)
  placement = _messages.MessageField('JobPlacement', 12)
  prestoJob = _messages.MessageField('PrestoJob', 13)
  pyflinkJob = _messages.MessageField('PyFlinkJob', 14)
  pysparkJob = _messages.MessageField('PySparkJob', 15)
  reference = _messages.MessageField('JobReference', 16)
  scheduling = _messages.MessageField('JobScheduling', 17)
  sparkJob = _messages.MessageField('SparkJob', 18)
  sparkRJob = _messages.MessageField('SparkRJob', 19)
  sparkSqlJob = _messages.MessageField('SparkSqlJob', 20)
  status = _messages.MessageField('JobStatus', 21)
  statusHistory = _messages.MessageField('JobStatus', 22, repeated=True)
  trinoJob = _messages.MessageField('TrinoJob', 23)
  yarnApplications = _messages.MessageField('YarnApplication', 24, repeated=True)


class JobData(_messages.Message):
  r"""Data corresponding to a spark job.

  Enums:
    StatusValueValuesEnum:

  Messages:
    KillTasksSummaryValue: A KillTasksSummaryValue object.

  Fields:
    completionTime: A string attribute.
    description: A string attribute.
    jobGroup: A string attribute.
    jobId: A string attribute.
    killTasksSummary: A KillTasksSummaryValue attribute.
    name: A string attribute.
    numActiveStages: A integer attribute.
    numActiveTasks: A integer attribute.
    numCompletedIndices: A integer attribute.
    numCompletedStages: A integer attribute.
    numCompletedTasks: A integer attribute.
    numFailedStages: A integer attribute.
    numFailedTasks: A integer attribute.
    numKilledTasks: A integer attribute.
    numSkippedStages: A integer attribute.
    numSkippedTasks: A integer attribute.
    numTasks: A integer attribute.
    skippedStages: A integer attribute.
    sqlExecutionId: A string attribute.
    stageIds: A string attribute.
    status: A StatusValueValuesEnum attribute.
    submissionTime: A string attribute.
  """

  class StatusValueValuesEnum(_messages.Enum):
    r"""StatusValueValuesEnum enum type.

    Values:
      JOB_EXECUTION_STATUS_UNSPECIFIED: <no description>
      JOB_EXECUTION_STATUS_RUNNING: <no description>
      JOB_EXECUTION_STATUS_SUCCEEDED: <no description>
      JOB_EXECUTION_STATUS_FAILED: <no description>
      JOB_EXECUTION_STATUS_UNKNOWN: <no description>
    """
    JOB_EXECUTION_STATUS_UNSPECIFIED = 0
    JOB_EXECUTION_STATUS_RUNNING = 1
    JOB_EXECUTION_STATUS_SUCCEEDED = 2
    JOB_EXECUTION_STATUS_FAILED = 3
    JOB_EXECUTION_STATUS_UNKNOWN = 4

  @encoding.MapUnrecognizedFields('additionalProperties')
  class KillTasksSummaryValue(_messages.Message):
    r"""A KillTasksSummaryValue object.

    Messages:
      AdditionalProperty: An additional property for a KillTasksSummaryValue
        object.

    Fields:
      additionalProperties: Additional properties of type
        KillTasksSummaryValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a KillTasksSummaryValue object.

      Fields:
        key: Name of the additional property.
        value: A integer attribute.
      """

      key = _messages.StringField(1)
      value = _messages.IntegerField(2, variant=_messages.Variant.INT32)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  completionTime = _messages.StringField(1)
  description = _messages.StringField(2)
  jobGroup = _messages.StringField(3)
  jobId = _messages.IntegerField(4)
  killTasksSummary = _messages.MessageField('KillTasksSummaryValue', 5)
  name = _messages.StringField(6)
  numActiveStages = _messages.IntegerField(7, variant=_messages.Variant.INT32)
  numActiveTasks = _messages.IntegerField(8, variant=_messages.Variant.INT32)
  numCompletedIndices = _messages.IntegerField(9, variant=_messages.Variant.INT32)
  numCompletedStages = _messages.IntegerField(10, variant=_messages.Variant.INT32)
  numCompletedTasks = _messages.IntegerField(11, variant=_messages.Variant.INT32)
  numFailedStages = _messages.IntegerField(12, variant=_messages.Variant.INT32)
  numFailedTasks = _messages.IntegerField(13, variant=_messages.Variant.INT32)
  numKilledTasks = _messages.IntegerField(14, variant=_messages.Variant.INT32)
  numSkippedStages = _messages.IntegerField(15, variant=_messages.Variant.INT32)
  numSkippedTasks = _messages.IntegerField(16, variant=_messages.Variant.INT32)
  numTasks = _messages.IntegerField(17, variant=_messages.Variant.INT32)
  skippedStages = _messages.IntegerField(18, repeated=True, variant=_messages.Variant.INT32)
  sqlExecutionId = _messages.IntegerField(19)
  stageIds = _messages.IntegerField(20, repeated=True)
  status = _messages.EnumField('StatusValueValuesEnum', 21)
  submissionTime = _messages.StringField(22)


class JobMetadata(_messages.Message):
  r"""Job Operation metadata.

  Fields:
    jobId: Output only. The job id.
    operationType: Output only. Operation type.
    startTime: Output only. Job submission time.
    status: Output only. Most recent job status.
  """

  jobId = _messages.StringField(1)
  operationType = _messages.StringField(2)
  startTime = _messages.StringField(3)
  status = _messages.MessageField('JobStatus', 4)


class JobPlacement(_messages.Message):
  r"""Dataproc job config.

  Messages:
    ClusterLabelsValue: Optional. Cluster labels to identify a cluster where
      the job will be submitted.

  Fields:
    clusterLabels: Optional. Cluster labels to identify a cluster where the
      job will be submitted.
    clusterName: Required. The name of the cluster where the job will be
      submitted.
    clusterUuid: Output only. A cluster UUID generated by the Dataproc service
      when the job is submitted.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class ClusterLabelsValue(_messages.Message):
    r"""Optional. Cluster labels to identify a cluster where the job will be
    submitted.

    Messages:
      AdditionalProperty: An additional property for a ClusterLabelsValue
        object.

    Fields:
      additionalProperties: Additional properties of type ClusterLabelsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a ClusterLabelsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  clusterLabels = _messages.MessageField('ClusterLabelsValue', 1)
  clusterName = _messages.StringField(2)
  clusterUuid = _messages.StringField(3)


class JobReference(_messages.Message):
  r"""Encapsulates the full scoping used to reference a job.

  Fields:
    jobId: Optional. The job ID, which must be unique within the project.The
      ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
      or hyphens (-). The maximum length is 100 characters.If not specified by
      the caller, the job ID will be provided by the server.
    projectId: Optional. The ID of the Google Cloud Platform project that the
      job belongs to. If specified, must match the request project ID.
  """

  jobId = _messages.StringField(1)
  projectId = _messages.StringField(2)


class JobScheduling(_messages.Message):
  r"""Job scheduling options.

  Fields:
    maxFailuresPerHour: Optional. Maximum number of times per hour a driver
      can be restarted as a result of driver exiting with non-zero code before
      job is reported failed.A job might be reported as thrashing if the
      driver exits with a non-zero code four times within a 10-minute
      window.Maximum value is 10.Note: This restartable job option is not
      supported in Dataproc workflow templates
      (https://cloud.google.com/dataproc/docs/concepts/workflows/using-
      workflows#adding_jobs_to_a_template).
    maxFailuresTotal: Optional. Maximum total number of times a driver can be
      restarted as a result of the driver exiting with a non-zero code. After
      the maximum number is reached, the job will be reported as
      failed.Maximum value is 240.Note: Currently, this restartable job option
      is not supported in Dataproc workflow templates
      (https://cloud.google.com/dataproc/docs/concepts/workflows/using-
      workflows#adding_jobs_to_a_template).
    ttl: Optional. The duration after which the workload will be terminated.
      When the workload passes this ttl, it will be unconditionally killed
      without waiting for ongoing work to finish. Minimum value is 10 minutes;
      maximum value is 14 days (see JSON representation of Duration
      (https://developers.google.com/protocol-buffers/docs/proto3#json)
  """

  maxFailuresPerHour = _messages.IntegerField(1, variant=_messages.Variant.INT32)
  maxFailuresTotal = _messages.IntegerField(2, variant=_messages.Variant.INT32)
  ttl = _messages.StringField(3)


class JobStatus(_messages.Message):
  r"""Dataproc job status.

  Enums:
    StateValueValuesEnum: Output only. A state message specifying the overall
      job state.
    SubstateValueValuesEnum: Output only. Additional state information, which
      includes status reported by the agent.

  Fields:
    details: Optional. Output only. Job state details, such as an error
      description if the state is ERROR.
    state: Output only. A state message specifying the overall job state.
    stateStartTime: Output only. The time when this state was entered.
    substate: Output only. Additional state information, which includes status
      reported by the agent.
  """

  class StateValueValuesEnum(_messages.Enum):
    r"""Output only. A state message specifying the overall job state.

    Values:
      STATE_UNSPECIFIED: The job state is unknown.
      PENDING: The job is pending; it has been submitted, but is not yet
        running.
      SETUP_DONE: Job has been received by the service and completed initial
        setup; it will soon be submitted to the cluster.
      RUNNING: The job is running on the cluster.
      CANCEL_PENDING: A CancelJob request has been received, but is pending.
      CANCEL_STARTED: Transient in-flight resources have been canceled, and
        the request to cancel the running job has been issued to the cluster.
      CANCELLED: The job cancellation was successful.
      DONE: The job has completed successfully.
      ERROR: The job has completed, but encountered an error.
      ATTEMPT_FAILURE: Job attempt has failed. The detail field contains
        failure details for this attempt.Applies to restartable jobs only.
    """
    STATE_UNSPECIFIED = 0
    PENDING = 1
    SETUP_DONE = 2
    RUNNING = 3
    CANCEL_PENDING = 4
    CANCEL_STARTED = 5
    CANCELLED = 6
    DONE = 7
    ERROR = 8
    ATTEMPT_FAILURE = 9

  class SubstateValueValuesEnum(_messages.Enum):
    r"""Output only. Additional state information, which includes status
    reported by the agent.

    Values:
      UNSPECIFIED: The job substate is unknown.
      SUBMITTED: The Job is submitted to the agent.Applies to RUNNING state.
      QUEUED: The Job has been received and is awaiting execution (it might be
        waiting for a condition to be met). See the "details" field for the
        reason for the delay.Applies to RUNNING state.
      STALE_STATUS: The agent-reported status is out of date, which can be
        caused by a loss of communication between the agent and Dataproc. If
        the agent does not send a timely update, the job will fail.Applies to
        RUNNING state.
    """
    UNSPECIFIED = 0
    SUBMITTED = 1
    QUEUED = 2
    STALE_STATUS = 3

  details = _messages.StringField(1)
  state = _messages.EnumField('StateValueValuesEnum', 2)
  stateStartTime = _messages.StringField(3)
  substate = _messages.EnumField('SubstateValueValuesEnum', 4)


class JobsSummary(_messages.Message):
  r"""Data related to Jobs page summary

  Fields:
    activeJobs: Number of active jobs
    applicationId: Spark Application Id
    attempts: Attempts info
    completedJobs: Number of completed jobs
    failedJobs: Number of failed jobs
    schedulingMode: Spark Scheduling mode
  """

  activeJobs = _messages.IntegerField(1, variant=_messages.Variant.INT32)
  applicationId = _messages.StringField(2)
  attempts = _messages.MessageField('ApplicationAttemptInfo', 3, repeated=True)
  completedJobs = _messages.IntegerField(4, variant=_messages.Variant.INT32)
  failedJobs = _messages.IntegerField(5, variant=_messages.Variant.INT32)
  schedulingMode = _messages.StringField(6)


class JupyterConfig(_messages.Message):
  r"""Jupyter configuration for an interactive session.

  Enums:
    KernelValueValuesEnum: Optional. Kernel

  Fields:
    displayName: Optional. Display name, shown in the Jupyter kernelspec card.
    kernel: Optional. Kernel
  """

  class KernelValueValuesEnum(_messages.Enum):
    r"""Optional. Kernel

    Values:
      KERNEL_UNSPECIFIED: The kernel is unknown.
      PYTHON: Python kernel.
      SCALA: Scala kernel.
    """
    KERNEL_UNSPECIFIED = 0
    PYTHON = 1
    SCALA = 2

  displayName = _messages.StringField(1)
  kernel = _messages.EnumField('KernelValueValuesEnum', 2)


class KerberosConfig(_messages.Message):
  r"""Specifies Kerberos related configuration.

  Fields:
    crossRealmTrustAdminServer: Optional. The admin server (IP or hostname)
      for the remote trusted realm in a cross realm trust relationship.
    crossRealmTrustKdc: Optional. The KDC (IP or hostname) for the remote
      trusted realm in a cross realm trust relationship.
    crossRealmTrustRealm: Optional. The remote realm the Dataproc on-cluster
      KDC will trust, should the user enable cross realm trust.
    crossRealmTrustSharedPasswordUri: Optional. The Cloud Storage URI of a KMS
      encrypted file containing the shared password between the on-cluster
      Kerberos realm and the remote trusted realm, in a cross realm trust
      relationship.
    enableKerberos: Optional. Flag to indicate whether to Kerberize the
      cluster (default: false). Set this field to true to enable Kerberos on a
      cluster.
    kdcDbKeyUri: Optional. The Cloud Storage URI of a KMS encrypted file
      containing the master key of the KDC database.
    keyPasswordUri: Optional. The Cloud Storage URI of a KMS encrypted file
      containing the password to the user provided key. For the self-signed
      certificate, this password is generated by Dataproc.
    keystorePasswordUri: Optional. The Cloud Storage URI of a KMS encrypted
      file containing the password to the user provided keystore. For the
      self-signed certificate, this password is generated by Dataproc.
    keystoreUri: Optional. The Cloud Storage URI of the keystore file used for
      SSL encryption. If not provided, Dataproc will provide a self-signed
      certificate.
    kmsKeyUri: Optional. The URI of the KMS key used to encrypt sensitive
      files.
    realm: Optional. The name of the on-cluster Kerberos realm. If not
      specified, the uppercased domain of hostnames will be the realm.
    rootPrincipalPasswordUri: Optional. The Cloud Storage URI of a KMS
      encrypted file containing the root principal password.
    tgtLifetimeHours: Optional. The lifetime of the ticket granting ticket, in
      hours. If not specified, or user specifies 0, then default value 10 will
      be used.
    truststorePasswordUri: Optional. The Cloud Storage URI of a KMS encrypted
      file containing the password to the user provided truststore. For the
      self-signed certificate, this password is generated by Dataproc.
    truststoreUri: Optional. The Cloud Storage URI of the truststore file used
      for SSL encryption. If not provided, Dataproc will provide a self-signed
      certificate.
  """

  crossRealmTrustAdminServer = _messages.StringField(1)
  crossRealmTrustKdc = _messages.StringField(2)
  crossRealmTrustRealm = _messages.StringField(3)
  crossRealmTrustSharedPasswordUri = _messages.StringField(4)
  enableKerberos = _messages.BooleanField(5)
  kdcDbKeyUri = _messages.StringField(6)
  keyPasswordUri = _messages.StringField(7)
  keystorePasswordUri = _messages.StringField(8)
  keystoreUri = _messages.StringField(9)
  kmsKeyUri = _messages.StringField(10)
  realm = _messages.StringField(11)
  rootPrincipalPasswordUri = _messages.StringField(12)
  tgtLifetimeHours = _messages.IntegerField(13, variant=_messages.Variant.INT32)
  truststorePasswordUri = _messages.StringField(14)
  truststoreUri = _messages.StringField(15)


class KubernetesClusterConfig(_messages.Message):
  r"""The configuration for running the Dataproc cluster on Kubernetes.

  Fields:
    gdceClusterConfig: Required. The configuration for running the Dataproc
      cluster on GDCE.
    gkeClusterConfig: Required. The configuration for running the Dataproc
      cluster on GKE.
    kubernetesNamespace: Optional. A namespace within the Kubernetes cluster
      to deploy into. If this namespace does not exist, it is created. If it
      exists, Dataproc verifies that another Dataproc VirtualCluster is not
      installed into it. If not specified, the name of the Dataproc Cluster is
      used.
    kubernetesSoftwareConfig: Optional. The software configuration for this
      Dataproc cluster running on Kubernetes.
  """

  gdceClusterConfig = _messages.MessageField('GdceClusterConfig', 1)
  gkeClusterConfig = _messages.MessageField('GkeClusterConfig', 2)
  kubernetesNamespace = _messages.StringField(3)
  kubernetesSoftwareConfig = _messages.MessageField('KubernetesSoftwareConfig', 4)


class KubernetesSoftwareConfig(_messages.Message):
  r"""The software configuration for this Dataproc cluster running on
  Kubernetes.

  Messages:
    ComponentVersionValue: The components that should be installed in this
      Dataproc cluster. The key must be a string from the KubernetesComponent
      enumeration. The value is the version of the software to be installed.
      At least one entry must be specified.
    PropertiesValue: The properties to set on daemon config files.Property
      keys are specified in prefix:property format, for example
      spark:spark.kubernetes.container.image. The following are supported
      prefixes and their mappings: spark: spark-defaults.confFor more
      information, see Cluster properties
      (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).

  Fields:
    componentVersion: The components that should be installed in this Dataproc
      cluster. The key must be a string from the KubernetesComponent
      enumeration. The value is the version of the software to be installed.
      At least one entry must be specified.
    properties: The properties to set on daemon config files.Property keys are
      specified in prefix:property format, for example
      spark:spark.kubernetes.container.image. The following are supported
      prefixes and their mappings: spark: spark-defaults.confFor more
      information, see Cluster properties
      (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class ComponentVersionValue(_messages.Message):
    r"""The components that should be installed in this Dataproc cluster. The
    key must be a string from the KubernetesComponent enumeration. The value
    is the version of the software to be installed. At least one entry must be
    specified.

    Messages:
      AdditionalProperty: An additional property for a ComponentVersionValue
        object.

    Fields:
      additionalProperties: Additional properties of type
        ComponentVersionValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a ComponentVersionValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  @encoding.MapUnrecognizedFields('additionalProperties')
  class PropertiesValue(_messages.Message):
    r"""The properties to set on daemon config files.Property keys are
    specified in prefix:property format, for example
    spark:spark.kubernetes.container.image. The following are supported
    prefixes and their mappings: spark: spark-defaults.confFor more
    information, see Cluster properties
    (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).

    Messages:
      AdditionalProperty: An additional property for a PropertiesValue object.

    Fields:
      additionalProperties: Additional properties of type PropertiesValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a PropertiesValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  componentVersion = _messages.MessageField('ComponentVersionValue', 1)
  properties = _messages.MessageField('PropertiesValue', 2)


class LifecycleConfig(_messages.Message):
  r"""Specifies the cluster auto-delete schedule configuration.

  Fields:
    autoDeleteTime: Optional. The time when cluster will be auto-deleted (see
      JSON representation of Timestamp
      (https://developers.google.com/protocol-buffers/docs/proto3#json)).
    autoDeleteTtl: Optional. The lifetime duration of cluster. The cluster
      will be auto-deleted at the end of this period. Minimum value is 10
      minutes; maximum value is 14 days (see JSON representation of Duration
      (https://developers.google.com/protocol-buffers/docs/proto3#json)).
    autoStopTime: Optional. The time when cluster will be auto-stopped (see
      JSON representation of Timestamp
      (https://developers.google.com/protocol-buffers/docs/proto3#json)).
    autoStopTtl: Optional. The lifetime duration of the cluster. The cluster
      will be auto-stopped at the end of this period, calculated from the time
      of submission of the create or update cluster request. Minimum value is
      10 minutes; maximum value is 14 days (see JSON representation of
      Duration (https://developers.google.com/protocol-
      buffers/docs/proto3#json)).
    idleDeleteTtl: Optional. The duration to keep the cluster alive while
      idling (when no jobs are running). Passing this threshold will cause the
      cluster to be deleted. Minimum value is 5 minutes; maximum value is 14
      days (see JSON representation of Duration
      (https://developers.google.com/protocol-buffers/docs/proto3#json)).
    idleStartTime: Output only. The time when cluster became idle (most recent
      job finished) and became eligible for deletion due to idleness (see JSON
      representation of Timestamp (https://developers.google.com/protocol-
      buffers/docs/proto3#json)).
    idleStopTtl: Optional. The duration to keep the cluster started while
      idling (when no jobs are running). Passing this threshold will cause the
      cluster to be stopped. Minimum value is 5 minutes; maximum value is 14
      days (see JSON representation of Duration
      (https://developers.google.com/protocol-buffers/docs/proto3#json)).
  """

  autoDeleteTime = _messages.StringField(1)
  autoDeleteTtl = _messages.StringField(2)
  autoStopTime = _messages.StringField(3)
  autoStopTtl = _messages.StringField(4)
  idleDeleteTtl = _messages.StringField(5)
  idleStartTime = _messages.StringField(6)
  idleStopTtl = _messages.StringField(7)


class ListAutoscalingPoliciesResponse(_messages.Message):
  r"""A response to a request to list autoscaling policies in a project.

  Fields:
    nextPageToken: Output only. This token is included in the response if
      there are more results to fetch.
    policies: Output only. Autoscaling policies list.
  """

  nextPageToken = _messages.StringField(1)
  policies = _messages.MessageField('AutoscalingPolicy', 2, repeated=True)


class ListBatchesResponse(_messages.Message):
  r"""A list of batch workloads.

  Fields:
    batches: Output only. The batches from the specified collection.
    nextPageToken: A token, which can be sent as page_token to retrieve the
      next page. If this field is omitted, there are no subsequent pages.
    unreachable: Output only. List of Batches that could not be included in
      the response. Attempting to get one of these resources may indicate why
      it was not included in the list response.
  """

  batches = _messages.MessageField('Batch', 1, repeated=True)
  nextPageToken = _messages.StringField(2)
  unreachable = _messages.StringField(3, repeated=True)


class ListClustersResponse(_messages.Message):
  r"""The list of all clusters in a project.

  Fields:
    clusters: Output only. The clusters in the project.
    nextPageToken: Output only. This token is included in the response if
      there are more results to fetch. To fetch additional results, provide
      this value as the page_token in a subsequent ListClustersRequest.
  """

  clusters = _messages.MessageField('Cluster', 1, repeated=True)
  nextPageToken = _messages.StringField(2)


class ListJobsResponse(_messages.Message):
  r"""A list of jobs in a project.

  Fields:
    jobs: Output only. Jobs list.
    nextPageToken: Optional. This token is included in the response if there
      are more results to fetch. To fetch additional results, provide this
      value as the page_token in a subsequent ListJobsRequest.
    unreachable: Output only. List of jobs with kms_key-encrypted parameters
      that could not be decrypted. A response to a jobs.get request may
      indicate the reason for the decryption failure for a specific job.
  """

  jobs = _messages.MessageField('Job', 1, repeated=True)
  nextPageToken = _messages.StringField(2)
  unreachable = _messages.StringField(3, repeated=True)


class ListNodeGroupsResponse(_messages.Message):
  r"""A response to a request to list the node groups in a cluster.

  Fields:
    nextPageToken: A token, which can be sent as page_token to retrieve the
      next page. If this field is omitted, there are no subsequent pages.
    nodeGroups: The node groups in the cluster.
  """

  nextPageToken = _messages.StringField(1)
  nodeGroups = _messages.MessageField('NodeGroup', 2, repeated=True)


class ListOperationsResponse(_messages.Message):
  r"""The response message for Operations.ListOperations.

  Fields:
    nextPageToken: The standard List next-page token.
    operations: A list of operations that matches the specified filter in the
      request.
    unreachable: Unordered list. Unreachable resources. Populated when the
      request sets ListOperationsRequest.return_partial_success and reads
      across collections e.g. when attempting to list all resources across all
      supported locations.
  """

  nextPageToken = _messages.StringField(1)
  operations = _messages.MessageField('Operation', 2, repeated=True)
  unreachable = _messages.StringField(3, repeated=True)


class ListSessionTemplatesResponse(_messages.Message):
  r"""A list of session templates.

  Fields:
    nextPageToken: A token, which can be sent as page_token to retrieve the
      next page. If this field is omitted, there are no subsequent pages.
    sessionTemplates: Output only. Session template list
  """

  nextPageToken = _messages.StringField(1)
  sessionTemplates = _messages.MessageField('SessionTemplate', 2, repeated=True)


class ListSessionsResponse(_messages.Message):
  r"""A list of interactive sessions.

  Fields:
    nextPageToken: A token, which can be sent as page_token, to retrieve the
      next page. If this field is omitted, there are no subsequent pages.
    sessions: Output only. The sessions from the specified collection.
  """

  nextPageToken = _messages.StringField(1)
  sessions = _messages.MessageField('Session', 2, repeated=True)


class ListWorkflowTemplatesResponse(_messages.Message):
  r"""A response to a request to list workflow templates in a project.

  Fields:
    nextPageToken: Output only. This token is included in the response if
      there are more results to fetch. To fetch additional results, provide
      this value as the page_token in a subsequent
      ListWorkflowTemplatesRequest.
    templates: Output only. WorkflowTemplates list.
    unreachable: Output only. List of workflow templates that could not be
      included in the response. Attempting to get one of these resources may
      indicate why it was not included in the list response.
  """

  nextPageToken = _messages.StringField(1)
  templates = _messages.MessageField('WorkflowTemplate', 2, repeated=True)
  unreachable = _messages.StringField(3, repeated=True)


class LoggingConfig(_messages.Message):
  r"""The runtime logging config of the job.

  Messages:
    DriverLogLevelsValue: The per-package log levels for the driver. This can
      include "root" package name to configure rootLogger. Examples: -
      'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG'

  Fields:
    driverLogLevels: The per-package log levels for the driver. This can
      include "root" package name to configure rootLogger. Examples: -
      'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG'
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class DriverLogLevelsValue(_messages.Message):
    r"""The per-package log levels for the driver. This can include "root"
    package name to configure rootLogger. Examples: - 'com.google = FATAL' -
    'root = INFO' - 'org.apache = DEBUG'

    Messages:
      AdditionalProperty: An additional property for a DriverLogLevelsValue
        object.

    Fields:
      additionalProperties: Additional properties of type DriverLogLevelsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a DriverLogLevelsValue object.

      Enums:
        ValueValueValuesEnum:

      Fields:
        key: Name of the additional property.
        value: A ValueValueValuesEnum attribute.
      """

      class ValueValueValuesEnum(_messages.Enum):
        r"""ValueValueValuesEnum enum type.

        Values:
          LEVEL_UNSPECIFIED: Level is unspecified. Use default level for
            log4j.
          ALL: Use ALL level for log4j.
          TRACE: Use TRACE level for log4j.
          DEBUG: Use DEBUG level for log4j.
          INFO: Use INFO level for log4j.
          WARN: Use WARN level for log4j.
          ERROR: Use ERROR level for log4j.
          FATAL: Use FATAL level for log4j.
          OFF: Turn off log4j.
        """
        LEVEL_UNSPECIFIED = 0
        ALL = 1
        TRACE = 2
        DEBUG = 3
        INFO = 4
        WARN = 5
        ERROR = 6
        FATAL = 7
        OFF = 8

      key = _messages.StringField(1)
      value = _messages.EnumField('ValueValueValuesEnum', 2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  driverLogLevels = _messages.MessageField('DriverLogLevelsValue', 1)


class ManagedCluster(_messages.Message):
  r"""Cluster that is managed by the workflow.

  Messages:
    LabelsValue: Optional. The labels to associate with this cluster.Label
      keys must be between 1 and 63 characters long, and must conform to the
      following PCRE regular expression: \p{Ll}\p{Lo}{0,62}Label values must
      be between 1 and 63 characters long, and must conform to the following
      PCRE regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels
      can be associated with a given cluster.

  Fields:
    clusterName: Required. The cluster name prefix. A unique cluster name will
      be formed by appending a random suffix.The name must contain only lower-
      case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a
      letter. Cannot begin or end with hyphen. Must consist of between 2 and
      35 characters.
    config: Required. The cluster configuration.
    labels: Optional. The labels to associate with this cluster.Label keys
      must be between 1 and 63 characters long, and must conform to the
      following PCRE regular expression: \p{Ll}\p{Lo}{0,62}Label values must
      be between 1 and 63 characters long, and must conform to the following
      PCRE regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels
      can be associated with a given cluster.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class LabelsValue(_messages.Message):
    r"""Optional. The labels to associate with this cluster.Label keys must be
    between 1 and 63 characters long, and must conform to the following PCRE
    regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and
    63 characters long, and must conform to the following PCRE regular
    expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be
    associated with a given cluster.

    Messages:
      AdditionalProperty: An additional property for a LabelsValue object.

    Fields:
      additionalProperties: Additional properties of type LabelsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a LabelsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  clusterName = _messages.StringField(1)
  config = _messages.MessageField('ClusterConfig', 2)
  labels = _messages.MessageField('LabelsValue', 3)


class ManagedGroupConfig(_messages.Message):
  r"""Specifies the resources used to actively manage an instance group.

  Fields:
    instanceGroupManagerName: Output only. The name of the Instance Group
      Manager for this group.
    instanceGroupManagerUri: Output only. The partial URI to the instance
      group manager for this group. E.g. projects/my-project/regions/us-
      central1/instanceGroupManagers/my-igm.
    instanceTemplateName: Output only. The name of the Instance Template used
      for the Managed Instance Group.
    instanceTemplateUri: Optional. Output only. Partial URI of the Instance
      Template. Example:
      projects/project_id/regions/region/instanceTemplates/template-id
  """

  instanceGroupManagerName = _messages.StringField(1)
  instanceGroupManagerUri = _messages.StringField(2)
  instanceTemplateName = _messages.StringField(3)
  instanceTemplateUri = _messages.StringField(4)


class MasterDriverRunner(_messages.Message):
  r"""The default mode of executing drivers: on master nodes"""


class MemoryMetrics(_messages.Message):
  r"""A MemoryMetrics object.

  Fields:
    totalOffHeapStorageMemory: A string attribute.
    totalOnHeapStorageMemory: A string attribute.
    usedOffHeapStorageMemory: A string attribute.
    usedOnHeapStorageMemory: A string attribute.
  """

  totalOffHeapStorageMemory = _messages.IntegerField(1)
  totalOnHeapStorageMemory = _messages.IntegerField(2)
  usedOffHeapStorageMemory = _messages.IntegerField(3)
  usedOnHeapStorageMemory = _messages.IntegerField(4)


class MetastoreConfig(_messages.Message):
  r"""Specifies a Metastore configuration.

  Fields:
    bigqueryMetastoreConfig: Optional. The BigQuery Metastore configuration
      for the workload.
    dataprocMetastoreService: Required. Resource name of an existing Dataproc
      Metastore service.Example:
      projects/[project_id]/locations/[dataproc_region]/services/[service-
      name]
  """

  bigqueryMetastoreConfig = _messages.MessageField('BigqueryMetastoreConfig', 1)
  dataprocMetastoreService = _messages.StringField(2)


class Metric(_messages.Message):
  r"""A Dataproc custom metric.

  Enums:
    MetricSourceValueValuesEnum: Required. A standard set of metrics is
      collected unless metricOverrides are specified for the metric source
      (see Custom metrics
      (https://cloud.google.com/dataproc/docs/guides/dataproc-
      metrics#custom_metrics) for more information).

  Fields:
    metricOverrides: Optional. Specify one or more Custom metrics
      (https://cloud.google.com/dataproc/docs/guides/dataproc-
      metrics#custom_metrics) to collect for the metric course (for the SPARK
      metric source (any Spark metric
      (https://spark.apache.org/docs/latest/monitoring.html#metrics) can be
      specified).Provide metrics in the following format: METRIC_SOURCE:
      INSTANCE:GROUP:METRIC Use camelcase as appropriate.Examples:
      yarn:ResourceManager:QueueMetrics:AppsCompleted
      spark:driver:DAGScheduler:job.allJobs
      sparkHistoryServer:JVM:Memory:NonHeapMemoryUsage.committed
      hiveserver2:JVM:Memory:NonHeapMemoryUsage.used Notes: Only the specified
      overridden metrics are collected for the metric source. For example, if
      one or more spark:executive metrics are listed as metric overrides,
      other SPARK metrics are not collected. The collection of the metrics for
      other enabled custom metric sources is unaffected. For example, if both
      SPARK and YARN metric sources are enabled, and overrides are provided
      for Spark metrics only, all YARN metrics are collected.
    metricSource: Required. A standard set of metrics is collected unless
      metricOverrides are specified for the metric source (see Custom metrics
      (https://cloud.google.com/dataproc/docs/guides/dataproc-
      metrics#custom_metrics) for more information).
  """

  class MetricSourceValueValuesEnum(_messages.Enum):
    r"""Required. A standard set of metrics is collected unless
    metricOverrides are specified for the metric source (see Custom metrics
    (https://cloud.google.com/dataproc/docs/guides/dataproc-
    metrics#custom_metrics) for more information).

    Values:
      METRIC_SOURCE_UNSPECIFIED: Required unspecified metric source.
      MONITORING_AGENT_DEFAULTS: Monitoring agent metrics. If this source is
        enabled, Dataproc enables the monitoring agent in Compute Engine, and
        collects monitoring agent metrics, which are published with an
        agent.googleapis.com prefix.
      HDFS: HDFS metric source.
      SPARK: Spark metric source.
      YARN: YARN metric source.
      SPARK_HISTORY_SERVER: Spark History Server metric source.
      HIVESERVER2: Hiveserver2 metric source.
      HIVEMETASTORE: hivemetastore metric source
      FLINK: flink metric source
    """
    METRIC_SOURCE_UNSPECIFIED = 0
    MONITORING_AGENT_DEFAULTS = 1
    HDFS = 2
    SPARK = 3
    YARN = 4
    SPARK_HISTORY_SERVER = 5
    HIVESERVER2 = 6
    HIVEMETASTORE = 7
    FLINK = 8

  metricOverrides = _messages.StringField(1, repeated=True)
  metricSource = _messages.EnumField('MetricSourceValueValuesEnum', 2)


class NamespacedGkeDeploymentTarget(_messages.Message):
  r"""Deprecated. Used only for the deprecated beta. A full, namespace-
  isolated deployment target for an existing GKE cluster.

  Fields:
    clusterNamespace: Optional. A namespace within the GKE cluster to deploy
      into.
    targetGkeCluster: Optional. The target GKE cluster to deploy to. Format:
      'projects/{project}/locations/{location}/clusters/{cluster_id}'
  """

  clusterNamespace = _messages.StringField(1)
  targetGkeCluster = _messages.StringField(2)


class NativeBuildInfoUiData(_messages.Message):
  r"""A NativeBuildInfoUiData object.

  Fields:
    buildClass: Optional. Build class of Native.
    buildInfo: Optional. Build related details.
  """

  buildClass = _messages.StringField(1)
  buildInfo = _messages.MessageField('BuildInfo', 2, repeated=True)


class NativeSqlExecutionUiData(_messages.Message):
  r"""Native SQL Execution Data

  Fields:
    description: Optional. Description of the execution.
    executionId: Required. Execution ID of the Native SQL Execution.
    fallbackDescription: Optional. Description of the fallback.
    fallbackNodeToReason: Optional. Fallback node to reason.
    numFallbackNodes: Optional. Number of nodes fallen back to Spark.
    numNativeNodes: Optional. Number of nodes in Native.
  """

  description = _messages.StringField(1)
  executionId = _messages.IntegerField(2)
  fallbackDescription = _messages.StringField(3)
  fallbackNodeToReason = _messages.MessageField('FallbackReason', 4, repeated=True)
  numFallbackNodes = _messages.IntegerField(5, variant=_messages.Variant.INT32)
  numNativeNodes = _messages.IntegerField(6, variant=_messages.Variant.INT32)


class NodeGroup(_messages.Message):
  r"""Dataproc Node Group. The Dataproc NodeGroup resource is not related to
  the Dataproc NodeGroupAffinity resource.

  Enums:
    RolesValueListEntryValuesEnum:

  Messages:
    LabelsValue: Optional. Node group labels. Label keys must consist of from
      1 to 63 characters and conform to RFC 1035
      (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If
      specified, they must consist of from 1 to 63 characters and conform to
      RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must
      have no more than 32 labels.

  Fields:
    labels: Optional. Node group labels. Label keys must consist of from 1 to
      63 characters and conform to RFC 1035
      (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If
      specified, they must consist of from 1 to 63 characters and conform to
      RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must
      have no more than 32 labels.
    name: The Node group resource name (https://aip.dev/122).
    nodeGroupConfig: Optional. The node group instance group configuration.
    roles: Required. Node group roles.
  """

  class RolesValueListEntryValuesEnum(_messages.Enum):
    r"""RolesValueListEntryValuesEnum enum type.

    Values:
      ROLE_UNSPECIFIED: Required unspecified role.
      DRIVER: Job drivers run on the node pool.
      MASTER: Master nodes.
      PRIMARY_WORKER: Primary workers.
      SECONDARY_WORKER: Secondary workers.
    """
    ROLE_UNSPECIFIED = 0
    DRIVER = 1
    MASTER = 2
    PRIMARY_WORKER = 3
    SECONDARY_WORKER = 4

  @encoding.MapUnrecognizedFields('additionalProperties')
  class LabelsValue(_messages.Message):
    r"""Optional. Node group labels. Label keys must consist of from 1 to 63
    characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).
    Label values can be empty. If specified, they must consist of from 1 to 63
    characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).
    The node group must have no more than 32 labels.

    Messages:
      AdditionalProperty: An additional property for a LabelsValue object.

    Fields:
      additionalProperties: Additional properties of type LabelsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a LabelsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  labels = _messages.MessageField('LabelsValue', 1)
  name = _messages.StringField(2)
  nodeGroupConfig = _messages.MessageField('InstanceGroupConfig', 3)
  roles = _messages.EnumField('RolesValueListEntryValuesEnum', 4, repeated=True)


class NodeGroupAffinity(_messages.Message):
  r"""Node Group Affinity for clusters using sole-tenant node groups. The
  Dataproc NodeGroupAffinity resource is not related to the Dataproc NodeGroup
  resource.

  Fields:
    nodeGroupUri: Required. The URI of a sole-tenant node group resource
      (https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups)
      that the cluster will be created on.A full URL, partial URI, or node
      group name are valid. Examples: https://www.googleapis.com/compute/v1/pr
      ojects/[project_id]/zones/[zone]/nodeGroups/node-group-1
      projects/[project_id]/zones/[zone]/nodeGroups/node-group-1 node-group-1
  """

  nodeGroupUri = _messages.StringField(1)


class NodeGroupOperationMetadata(_messages.Message):
  r"""Metadata describing the node group operation.

  Enums:
    OperationTypeValueValuesEnum: The operation type.

  Messages:
    LabelsValue: Output only. Labels associated with the operation.

  Fields:
    clusterUuid: Output only. Cluster UUID associated with the node group
      operation.
    description: Output only. Short description of operation.
    labels: Output only. Labels associated with the operation.
    nodeGroupId: Output only. Node group ID for the operation.
    operationType: The operation type.
    status: Output only. Current operation status.
    statusHistory: Output only. The previous operation status.
    warnings: Output only. Errors encountered during operation execution.
  """

  class OperationTypeValueValuesEnum(_messages.Enum):
    r"""The operation type.

    Values:
      NODE_GROUP_OPERATION_TYPE_UNSPECIFIED: Node group operation type is
        unknown.
      CREATE: Create node group operation type.
      UPDATE: Update node group operation type.
      DELETE: Delete node group operation type.
      RESIZE: Resize node group operation type.
      REPAIR: Repair node group operation type.
      UPDATE_LABELS: Update node group label operation type.
      START: Start node group operation type.
      STOP: Stop node group operation type.
      UPDATE_METADATA_CONFIG: This operation type is used to update the
        metadata config of a node group. We update the metadata of the VMs in
        the node group and await for intended config change to be completed at
        the node group level. Currently, only the identity config update is
        supported.
    """
    NODE_GROUP_OPERATION_TYPE_UNSPECIFIED = 0
    CREATE = 1
    UPDATE = 2
    DELETE = 3
    RESIZE = 4
    REPAIR = 5
    UPDATE_LABELS = 6
    START = 7
    STOP = 8
    UPDATE_METADATA_CONFIG = 9

  @encoding.MapUnrecognizedFields('additionalProperties')
  class LabelsValue(_messages.Message):
    r"""Output only. Labels associated with the operation.

    Messages:
      AdditionalProperty: An additional property for a LabelsValue object.

    Fields:
      additionalProperties: Additional properties of type LabelsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a LabelsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  clusterUuid = _messages.StringField(1)
  description = _messages.StringField(2)
  labels = _messages.MessageField('LabelsValue', 3)
  nodeGroupId = _messages.StringField(4)
  operationType = _messages.EnumField('OperationTypeValueValuesEnum', 5)
  status = _messages.MessageField('ClusterOperationStatus', 6)
  statusHistory = _messages.MessageField('ClusterOperationStatus', 7, repeated=True)
  warnings = _messages.StringField(8, repeated=True)


class NodeInitializationAction(_messages.Message):
  r"""Specifies an executable to run on a fully configured node and a timeout
  period for executable completion.

  Fields:
    executableFile: Required. Cloud Storage URI of executable file.
    executionTimeout: Optional. Amount of time executable has to complete.
      Default is 10 minutes (see JSON representation of Duration
      (https://developers.google.com/protocol-
      buffers/docs/proto3#json)).Cluster creation fails with an explanatory
      error message (the name of the executable that caused the error and the
      exceeded timeout period) if the executable is not completed at end of
      the timeout period.
  """

  executableFile = _messages.StringField(1)
  executionTimeout = _messages.StringField(2)


class NodePool(_messages.Message):
  r"""indicating a list of workers of same type

  Enums:
    RepairActionValueValuesEnum: Required. Repair action to take on specified
      resources of the node pool.

  Fields:
    id: Required. A unique id of the node pool. Primary and Secondary workers
      can be specified using special reserved ids PRIMARY_WORKER_POOL and
      SECONDARY_WORKER_POOL respectively. Aux node pools can be referenced
      using corresponding pool id.
    instanceNames: Name of instances to be repaired. These instances must
      belong to specified node pool.
    repairAction: Required. Repair action to take on specified resources of
      the node pool.
  """

  class RepairActionValueValuesEnum(_messages.Enum):
    r"""Required. Repair action to take on specified resources of the node
    pool.

    Values:
      REPAIR_ACTION_UNSPECIFIED: No action will be taken by default.
      DELETE: delete the specified list of nodes.
    """
    REPAIR_ACTION_UNSPECIFIED = 0
    DELETE = 1

  id = _messages.StringField(1)
  instanceNames = _messages.StringField(2, repeated=True)
  repairAction = _messages.EnumField('RepairActionValueValuesEnum', 3)


class Operation(_messages.Message):
  r"""This resource represents a long-running operation that is the result of
  a network API call.

  Messages:
    MetadataValue: Service-specific metadata associated with the operation. It
      typically contains progress information and common metadata such as
      create time. Some services might not provide such metadata. Any method
      that returns a long-running operation should document the metadata type,
      if any.
    ResponseValue: The normal, successful response of the operation. If the
      original method returns no data on success, such as Delete, the response
      is google.protobuf.Empty. If the original method is standard
      Get/Create/Update, the response should be the resource. For other
      methods, the response should have the type XxxResponse, where Xxx is the
      original method name. For example, if the original method name is
      TakeSnapshot(), the inferred response type is TakeSnapshotResponse.

  Fields:
    done: If the value is false, it means the operation is still in progress.
      If true, the operation is completed, and either error or response is
      available.
    error: The error result of the operation in case of failure or
      cancellation.
    metadata: Service-specific metadata associated with the operation. It
      typically contains progress information and common metadata such as
      create time. Some services might not provide such metadata. Any method
      that returns a long-running operation should document the metadata type,
      if any.
    name: The server-assigned name, which is only unique within the same
      service that originally returns it. If you use the default HTTP mapping,
      the name should be a resource name ending with operations/{unique_id}.
    response: The normal, successful response of the operation. If the
      original method returns no data on success, such as Delete, the response
      is google.protobuf.Empty. If the original method is standard
      Get/Create/Update, the response should be the resource. For other
      methods, the response should have the type XxxResponse, where Xxx is the
      original method name. For example, if the original method name is
      TakeSnapshot(), the inferred response type is TakeSnapshotResponse.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class MetadataValue(_messages.Message):
    r"""Service-specific metadata associated with the operation. It typically
    contains progress information and common metadata such as create time.
    Some services might not provide such metadata. Any method that returns a
    long-running operation should document the metadata type, if any.

    Messages:
      AdditionalProperty: An additional property for a MetadataValue object.

    Fields:
      additionalProperties: Properties of the object. Contains field @type
        with type URL.
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a MetadataValue object.

      Fields:
        key: Name of the additional property.
        value: A extra_types.JsonValue attribute.
      """

      key = _messages.StringField(1)
      value = _messages.MessageField('extra_types.JsonValue', 2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  @encoding.MapUnrecognizedFields('additionalProperties')
  class ResponseValue(_messages.Message):
    r"""The normal, successful response of the operation. If the original
    method returns no data on success, such as Delete, the response is
    google.protobuf.Empty. If the original method is standard
    Get/Create/Update, the response should be the resource. For other methods,
    the response should have the type XxxResponse, where Xxx is the original
    method name. For example, if the original method name is TakeSnapshot(),
    the inferred response type is TakeSnapshotResponse.

    Messages:
      AdditionalProperty: An additional property for a ResponseValue object.

    Fields:
      additionalProperties: Properties of the object. Contains field @type
        with type URL.
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a ResponseValue object.

      Fields:
        key: Name of the additional property.
        value: A extra_types.JsonValue attribute.
      """

      key = _messages.StringField(1)
      value = _messages.MessageField('extra_types.JsonValue', 2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  done = _messages.BooleanField(1)
  error = _messages.MessageField('Status', 2)
  metadata = _messages.MessageField('MetadataValue', 3)
  name = _messages.StringField(4)
  response = _messages.MessageField('ResponseValue', 5)


class OrderedJob(_messages.Message):
  r"""A job executed by the workflow.

  Messages:
    LabelsValue: Optional. The labels to associate with this job.Label keys
      must be between 1 and 63 characters long, and must conform to the
      following regular expression: \p{Ll}\p{Lo}{0,62}Label values must be
      between 1 and 63 characters long, and must conform to the following
      regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can
      be associated with a given job.

  Fields:
    flinkJob: Optional. Job is a Flink job.
    hadoopJob: Optional. Job is a Hadoop job.
    hiveJob: Optional. Job is a Hive job.
    labels: Optional. The labels to associate with this job.Label keys must be
      between 1 and 63 characters long, and must conform to the following
      regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and
      63 characters long, and must conform to the following regular
      expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be
      associated with a given job.
    pigJob: Optional. Job is a Pig job.
    prerequisiteStepIds: Optional. The optional list of prerequisite job
      step_ids. If not specified, the job will start at the beginning of
      workflow.
    prestoJob: Optional. Job is a Presto job.
    pysparkJob: Optional. Job is a PySpark job.
    scheduling: Optional. Job scheduling configuration.
    sparkJob: Optional. Job is a Spark job.
    sparkRJob: Optional. Job is a SparkR job.
    sparkSqlJob: Optional. Job is a SparkSql job.
    stepId: Required. The step id. The id must be unique among all jobs within
      the template.The step id is used as prefix for job id, as job goog-
      dataproc-workflow-step-id label, and in prerequisiteStepIds field from
      other steps.The id must contain only letters (a-z, A-Z), numbers (0-9),
      underscores (_), and hyphens (-). Cannot begin or end with underscore or
      hyphen. Must consist of between 3 and 50 characters.
    trinoJob: Optional. Job is a Trino job.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class LabelsValue(_messages.Message):
    r"""Optional. The labels to associate with this job.Label keys must be
    between 1 and 63 characters long, and must conform to the following
    regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and
    63 characters long, and must conform to the following regular expression:
    \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be associated with a
    given job.

    Messages:
      AdditionalProperty: An additional property for a LabelsValue object.

    Fields:
      additionalProperties: Additional properties of type LabelsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a LabelsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  flinkJob = _messages.MessageField('FlinkJob', 1)
  hadoopJob = _messages.MessageField('HadoopJob', 2)
  hiveJob = _messages.MessageField('HiveJob', 3)
  labels = _messages.MessageField('LabelsValue', 4)
  pigJob = _messages.MessageField('PigJob', 5)
  prerequisiteStepIds = _messages.StringField(6, repeated=True)
  prestoJob = _messages.MessageField('PrestoJob', 7)
  pysparkJob = _messages.MessageField('PySparkJob', 8)
  scheduling = _messages.MessageField('JobScheduling', 9)
  sparkJob = _messages.MessageField('SparkJob', 10)
  sparkRJob = _messages.MessageField('SparkRJob', 11)
  sparkSqlJob = _messages.MessageField('SparkSqlJob', 12)
  stepId = _messages.StringField(13)
  trinoJob = _messages.MessageField('TrinoJob', 14)


class OutputMetrics(_messages.Message):
  r"""Metrics about the data written by the task.

  Fields:
    bytesWritten: A string attribute.
    recordsWritten: A string attribute.
  """

  bytesWritten = _messages.IntegerField(1)
  recordsWritten = _messages.IntegerField(2)


class OutputQuantileMetrics(_messages.Message):
  r"""A OutputQuantileMetrics object.

  Fields:
    bytesWritten: A Quantiles attribute.
    recordsWritten: A Quantiles attribute.
  """

  bytesWritten = _messages.MessageField('Quantiles', 1)
  recordsWritten = _messages.MessageField('Quantiles', 2)


class ParameterValidation(_messages.Message):
  r"""Configuration for parameter validation.

  Fields:
    regex: Validation based on regular expressions.
    values: Validation based on a list of allowed values.
  """

  regex = _messages.MessageField('RegexValidation', 1)
  values = _messages.MessageField('ValueValidation', 2)


class PeripheralsConfig(_messages.Message):
  r"""Auxiliary services configuration for a workload.

  Fields:
    bigqueryMetastoreConfig: Optional. The BigQuery Metastore configuration
      for the workload.
    dataprocMetastoreConfig: Optional. The Dataproc Metastore configuration
      for the workload.
    ephemeralMetastoreConfig: Optional. An Ephemeral Metastore configuration
      for the workload. For Spark it will be in-memory Hive Metastore.
    metastoreService: Optional. Resource name of an existing Dataproc
      Metastore service.Example:
      projects/[project_id]/locations/[region]/services/[service_id]
    sparkHistoryServerConfig: Optional. The Spark History Server configuration
      for the workload.
  """

  bigqueryMetastoreConfig = _messages.MessageField('BigqueryMetastoreConfig', 1)
  dataprocMetastoreConfig = _messages.MessageField('DataprocMetastoreConfig', 2)
  ephemeralMetastoreConfig = _messages.MessageField('EphemeralMetastoreConfig', 3)
  metastoreService = _messages.StringField(4)
  sparkHistoryServerConfig = _messages.MessageField('SparkHistoryServerConfig', 5)


class PigJob(_messages.Message):
  r"""A Dataproc job for running Apache Pig (https://pig.apache.org/) queries
  on YARN.

  Messages:
    PropertiesValue: Optional. A mapping of property names to values, used to
      configure Pig. Properties that conflict with values set by the Dataproc
      API might be overwritten. Can include properties set in
      /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes
      in user code.
    ScriptVariablesValue: Optional. Mapping of query variable names to values
      (equivalent to the Pig command: name=[value]).

  Fields:
    continueOnFailure: Optional. Whether to continue executing queries if a
      query fails. The default value is false. Setting to true can be useful
      when executing independent parallel queries.
    jarFileUris: Optional. HCFS URIs of jar files to add to the CLASSPATH of
      the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
    loggingConfig: Optional. The runtime log config for job execution.
    properties: Optional. A mapping of property names to values, used to
      configure Pig. Properties that conflict with values set by the Dataproc
      API might be overwritten. Can include properties set in
      /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes
      in user code.
    queryFileUri: The HCFS URI of the script that contains the Pig queries.
    queryList: A list of queries.
    scriptVariables: Optional. Mapping of query variable names to values
      (equivalent to the Pig command: name=[value]).
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class PropertiesValue(_messages.Message):
    r"""Optional. A mapping of property names to values, used to configure
    Pig. Properties that conflict with values set by the Dataproc API might be
    overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
    /etc/pig/conf/pig.properties, and classes in user code.

    Messages:
      AdditionalProperty: An additional property for a PropertiesValue object.

    Fields:
      additionalProperties: Additional properties of type PropertiesValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a PropertiesValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  @encoding.MapUnrecognizedFields('additionalProperties')
  class ScriptVariablesValue(_messages.Message):
    r"""Optional. Mapping of query variable names to values (equivalent to the
    Pig command: name=[value]).

    Messages:
      AdditionalProperty: An additional property for a ScriptVariablesValue
        object.

    Fields:
      additionalProperties: Additional properties of type ScriptVariablesValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a ScriptVariablesValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  continueOnFailure = _messages.BooleanField(1)
  jarFileUris = _messages.StringField(2, repeated=True)
  loggingConfig = _messages.MessageField('LoggingConfig', 3)
  properties = _messages.MessageField('PropertiesValue', 4)
  queryFileUri = _messages.StringField(5)
  queryList = _messages.MessageField('QueryList', 6)
  scriptVariables = _messages.MessageField('ScriptVariablesValue', 7)


class Policy(_messages.Message):
  r"""An Identity and Access Management (IAM) policy, which specifies access
  controls for Google Cloud resources.A Policy is a collection of bindings. A
  binding binds one or more members, or principals, to a single role.
  Principals can be user accounts, service accounts, Google groups, and
  domains (such as G Suite). A role is a named list of permissions; each role
  can be an IAM predefined role or a user-created custom role.For some types
  of Google Cloud resources, a binding can also specify a condition, which is
  a logical expression that allows access to a resource only if the expression
  evaluates to true. A condition can add constraints based on attributes of
  the request, the resource, or both. To learn which resources support
  conditions in their IAM policies, see the IAM documentation
  (https://cloud.google.com/iam/help/conditions/resource-policies).JSON
  example: { "bindings": [ { "role":
  "roles/resourcemanager.organizationAdmin", "members": [
  "user:mike@example.com", "group:admins@example.com", "domain:google.com",
  "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role":
  "roles/resourcemanager.organizationViewer", "members": [
  "user:eve@example.com" ], "condition": { "title": "expirable access",
  "description": "Does not grant access after Sep 2020", "expression":
  "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag":
  "BwWWja0YfJA=", "version": 3 } YAML example: bindings: - members: -
  user:mike@example.com - group:admins@example.com - domain:google.com -
  serviceAccount:my-project-id@appspot.gserviceaccount.com role:
  roles/resourcemanager.organizationAdmin - members: - user:eve@example.com
  role: roles/resourcemanager.organizationViewer condition: title: expirable
  access description: Does not grant access after Sep 2020 expression:
  request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA=
  version: 3 For a description of IAM and its features, see the IAM
  documentation (https://cloud.google.com/iam/docs/).

  Fields:
    bindings: Associates a list of members, or principals, with a role.
      Optionally, may specify a condition that determines how and when the
      bindings are applied. Each of the bindings must contain at least one
      principal.The bindings in a Policy can refer to up to 1,500 principals;
      up to 250 of these principals can be Google groups. Each occurrence of a
      principal counts towards these limits. For example, if the bindings
      grant 50 different roles to user:alice@example.com, and not to any other
      principal, then you can add another 1,450 principals to the bindings in
      the Policy.
    etag: etag is used for optimistic concurrency control as a way to help
      prevent simultaneous updates of a policy from overwriting each other. It
      is strongly suggested that systems make use of the etag in the read-
      modify-write cycle to perform policy updates in order to avoid race
      conditions: An etag is returned in the response to getIamPolicy, and
      systems are expected to put that etag in the request to setIamPolicy to
      ensure that their change will be applied to the same version of the
      policy.Important: If you use IAM Conditions, you must include the etag
      field whenever you call setIamPolicy. If you omit this field, then IAM
      allows you to overwrite a version 3 policy with a version 1 policy, and
      all of the conditions in the version 3 policy are lost.
    version: Specifies the format of the policy.Valid values are 0, 1, and 3.
      Requests that specify an invalid value are rejected.Any operation that
      affects conditional role bindings must specify version 3. This
      requirement applies to the following operations: Getting a policy that
      includes a conditional role binding Adding a conditional role binding to
      a policy Changing a conditional role binding in a policy Removing any
      role binding, with or without a condition, from a policy that includes
      conditionsImportant: If you use IAM Conditions, you must include the
      etag field whenever you call setIamPolicy. If you omit this field, then
      IAM allows you to overwrite a version 3 policy with a version 1 policy,
      and all of the conditions in the version 3 policy are lost.If a policy
      does not include any conditions, operations on that policy may specify
      any valid version or leave the field unset.To learn which resources
      support conditions in their IAM policies, see the IAM documentation
      (https://cloud.google.com/iam/help/conditions/resource-policies).
  """

  bindings = _messages.MessageField('Binding', 1, repeated=True)
  etag = _messages.BytesField(2)
  version = _messages.IntegerField(3, variant=_messages.Variant.INT32)


class PoolData(_messages.Message):
  r"""Pool Data

  Fields:
    name: A string attribute.
    stageIds: A string attribute.
  """

  name = _messages.StringField(1)
  stageIds = _messages.IntegerField(2, repeated=True)


class PrestoJob(_messages.Message):
  r"""A Dataproc job for running Presto (https://prestosql.io/) queries.
  IMPORTANT: The Dataproc Presto Optional Component
  (https://cloud.google.com/dataproc/docs/concepts/components/presto) must be
  enabled when the cluster is created to submit a Presto job to the cluster.

  Messages:
    PropertiesValue: Optional. A mapping of property names to values. Used to
      set Presto session properties (https://prestodb.io/docs/current/sql/set-
      session.html) Equivalent to using the --session flag in the Presto CLI

  Fields:
    clientTags: Optional. Presto client tags to attach to this query
    continueOnFailure: Optional. Whether to continue executing queries if a
      query fails. The default value is false. Setting to true can be useful
      when executing independent parallel queries.
    loggingConfig: Optional. The runtime log config for job execution.
    outputFormat: Optional. The format in which query output will be
      displayed. See the Presto documentation for supported output formats
    properties: Optional. A mapping of property names to values. Used to set
      Presto session properties (https://prestodb.io/docs/current/sql/set-
      session.html) Equivalent to using the --session flag in the Presto CLI
    queryFileUri: The HCFS URI of the script that contains SQL queries.
    queryList: A list of queries.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class PropertiesValue(_messages.Message):
    r"""Optional. A mapping of property names to values. Used to set Presto
    session properties (https://prestodb.io/docs/current/sql/set-session.html)
    Equivalent to using the --session flag in the Presto CLI

    Messages:
      AdditionalProperty: An additional property for a PropertiesValue object.

    Fields:
      additionalProperties: Additional properties of type PropertiesValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a PropertiesValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  clientTags = _messages.StringField(1, repeated=True)
  continueOnFailure = _messages.BooleanField(2)
  loggingConfig = _messages.MessageField('LoggingConfig', 3)
  outputFormat = _messages.StringField(4)
  properties = _messages.MessageField('PropertiesValue', 5)
  queryFileUri = _messages.StringField(6)
  queryList = _messages.MessageField('QueryList', 7)


class ProcessSummary(_messages.Message):
  r"""Process Summary

  Messages:
    ProcessLogsValue: A ProcessLogsValue object.

  Fields:
    addTime: A string attribute.
    hostPort: A string attribute.
    isActive: A boolean attribute.
    processId: A string attribute.
    processLogs: A ProcessLogsValue attribute.
    removeTime: A string attribute.
    totalCores: A integer attribute.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class ProcessLogsValue(_messages.Message):
    r"""A ProcessLogsValue object.

    Messages:
      AdditionalProperty: An additional property for a ProcessLogsValue
        object.

    Fields:
      additionalProperties: Additional properties of type ProcessLogsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a ProcessLogsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  addTime = _messages.StringField(1)
  hostPort = _messages.StringField(2)
  isActive = _messages.BooleanField(3)
  processId = _messages.StringField(4)
  processLogs = _messages.MessageField('ProcessLogsValue', 5)
  removeTime = _messages.StringField(6)
  totalCores = _messages.IntegerField(7, variant=_messages.Variant.INT32)


class PropertiesInfo(_messages.Message):
  r"""Properties of the workload organized by origin.

  Messages:
    AutotuningPropertiesValue: Output only. Properties set by autotuning
      engine.

  Fields:
    autotuningProperties: Output only. Properties set by autotuning engine.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class AutotuningPropertiesValue(_messages.Message):
    r"""Output only. Properties set by autotuning engine.

    Messages:
      AdditionalProperty: An additional property for a
        AutotuningPropertiesValue object.

    Fields:
      additionalProperties: Additional properties of type
        AutotuningPropertiesValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a AutotuningPropertiesValue object.

      Fields:
        key: Name of the additional property.
        value: A ValueInfo attribute.
      """

      key = _messages.StringField(1)
      value = _messages.MessageField('ValueInfo', 2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  autotuningProperties = _messages.MessageField('AutotuningPropertiesValue', 1)


class ProvisioningModelMix(_messages.Message):
  r"""Defines how Dataproc should create VMs with a mixture of provisioning
  models.

  Fields:
    standardCapacityBase: Optional. The base capacity that will always use
      Standard VMs to avoid risk of more preemption than the minimum capacity
      you need. Dataproc will create only standard VMs until it reaches
      standard_capacity_base, then it will start using
      standard_capacity_percent_above_base to mix Spot with Standard VMs. eg.
      If 15 instances are requested and standard_capacity_base is 5, Dataproc
      will create 5 standard VMs and then start mixing spot and standard VMs
      for remaining 10 instances.
    standardCapacityPercentAboveBase: Optional. The percentage of target
      capacity that should use Standard VM. The remaining percentage will use
      Spot VMs. The percentage applies only to the capacity above
      standard_capacity_base. eg. If 15 instances are requested and
      standard_capacity_base is 5 and standard_capacity_percent_above_base is
      30, Dataproc will create 5 standard VMs and then start mixing spot and
      standard VMs for remaining 10 instances. The mix will be 30% standard
      and 70% spot.
  """

  standardCapacityBase = _messages.IntegerField(1, variant=_messages.Variant.INT32)
  standardCapacityPercentAboveBase = _messages.IntegerField(2, variant=_messages.Variant.INT32)


class PyFlinkJob(_messages.Message):
  r"""A Dataproc job for running Apache PyFlink (https://flink.apache.org/)
  applications on YARN.

  Messages:
    PropertiesValue: Optional. A mapping of property names to values, used to
      configure PyFlink. Properties that conflict with values set by the
      Dataproc API might be overwritten. Can include properties set in
      /etc/flink/conf/flink-defaults.conf and classes in user code.

  Fields:
    archiveUris: Optional. HCFS URIs of archives to be extracted into the
      working directory of each executor. Supported file types: .jar, .tar,
      .tar.gz, .tgz, and .zip.
    args: Optional. The arguments to pass to the driver. Do not include
      arguments, such as --conf, that can be set as job properties, since a
      collision might occur that causes an incorrect job submission.
    jarFileUris: Optional. HCFS URIs of jar files to add to the CLASSPATHs of
      the Python driver and tasks.
    loggingConfig: Optional. The runtime log config for job execution.
    mainPythonFileUri: Optional. The HCFS URI of the main Python file to use
      as the driver. Must be a .py file.
    properties: Optional. A mapping of property names to values, used to
      configure PyFlink. Properties that conflict with values set by the
      Dataproc API might be overwritten. Can include properties set in
      /etc/flink/conf/flink-defaults.conf and classes in user code.
    pythonFileUris: Optional. HCFS file URIs of Python files to pass to the
      PyFlink framework. Supported file types: .py, .egg, and .zip.
    pythonModule: Optional. The Python module that contains the PyFlink
      application entry point. This option must be used with python_file_uris
    pythonRequirements: Optional. The requirements.txt file which defines the
      third party dependencies of the PyFlink application
    savepointUri: Optional. HCFS URI of the savepoint which contains the last
      saved progress for this job.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class PropertiesValue(_messages.Message):
    r"""Optional. A mapping of property names to values, used to configure
    PyFlink. Properties that conflict with values set by the Dataproc API
    might be overwritten. Can include properties set in /etc/flink/conf/flink-
    defaults.conf and classes in user code.

    Messages:
      AdditionalProperty: An additional property for a PropertiesValue object.

    Fields:
      additionalProperties: Additional properties of type PropertiesValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a PropertiesValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  archiveUris = _messages.StringField(1, repeated=True)
  args = _messages.StringField(2, repeated=True)
  jarFileUris = _messages.StringField(3, repeated=True)
  loggingConfig = _messages.MessageField('LoggingConfig', 4)
  mainPythonFileUri = _messages.StringField(5)
  properties = _messages.MessageField('PropertiesValue', 6)
  pythonFileUris = _messages.StringField(7, repeated=True)
  pythonModule = _messages.StringField(8)
  pythonRequirements = _messages.StringField(9)
  savepointUri = _messages.StringField(10)


class PyPiRepositoryConfig(_messages.Message):
  r"""Configuration for PyPi repository

  Fields:
    pypiRepository: Optional. PyPi repository address
  """

  pypiRepository = _messages.StringField(1)


class PySparkBatch(_messages.Message):
  r"""A configuration for running an Apache PySpark (https://spark.apache.org/
  docs/latest/api/python/getting_started/quickstart.html) batch workload.

  Fields:
    archiveUris: Optional. HCFS URIs of archives to be extracted into the
      working directory of each executor. Supported file types: .jar, .tar,
      .tar.gz, .tgz, and .zip.
    args: Optional. The arguments to pass to the driver. Do not include
      arguments that can be set as batch properties, such as --conf, since a
      collision can occur that causes an incorrect batch submission.
    fileUris: Optional. HCFS URIs of files to be placed in the working
      directory of each executor.
    jarFileUris: Optional. HCFS URIs of jar files to add to the classpath of
      the Spark driver and tasks.
    mainPythonFileUri: Required. The HCFS URI of the main Python file to use
      as the Spark driver. Must be a .py file.
    pythonFileUris: Optional. HCFS file URIs of Python files to pass to the
      PySpark framework. Supported file types: .py, .egg, and .zip.
  """

  archiveUris = _messages.StringField(1, repeated=True)
  args = _messages.StringField(2, repeated=True)
  fileUris = _messages.StringField(3, repeated=True)
  jarFileUris = _messages.StringField(4, repeated=True)
  mainPythonFileUri = _messages.StringField(5)
  pythonFileUris = _messages.StringField(6, repeated=True)


class PySparkJob(_messages.Message):
  r"""A Dataproc job for running Apache PySpark
  (https://spark.apache.org/docs/latest/api/python/index.html#pyspark-
  overview) applications on YARN.

  Enums:
    SparkEngineValueValuesEnum: Optional. The engine on which the spark job
      runs.

  Messages:
    PropertiesValue: Optional. A mapping of property names to values, used to
      configure PySpark. Properties that conflict with values set by the
      Dataproc API might be overwritten. Can include properties set in
      /etc/spark/conf/spark-defaults.conf and classes in user code.

  Fields:
    archiveUris: Optional. HCFS URIs of archives to be extracted into the
      working directory of each executor. Supported file types: .jar, .tar,
      .tar.gz, .tgz, and .zip.Note: Spark applications must be deployed in
      cluster mode (https://spark.apache.org/docs/latest/cluster-
      overview.html) for correct environment propagation.
    args: Optional. The arguments to pass to the driver. Do not include
      arguments, such as --conf, that can be set as job properties, since a
      collision may occur that causes an incorrect job submission.
    fileUris: Optional. HCFS URIs of files to be placed in the working
      directory of each executor. Useful for naively parallel tasks.
    jarFileUris: Optional. HCFS URIs of jar files to add to the CLASSPATHs of
      the Python driver and tasks.
    loggingConfig: Optional. The runtime log config for job execution.
    mainPythonFileUri: Required. The HCFS URI of the main Python file to use
      as the driver. Must be a .py file.
    properties: Optional. A mapping of property names to values, used to
      configure PySpark. Properties that conflict with values set by the
      Dataproc API might be overwritten. Can include properties set in
      /etc/spark/conf/spark-defaults.conf and classes in user code.
    pythonFileUris: Optional. HCFS file URIs of Python files to pass to the
      PySpark framework. Supported file types: .py, .egg, and .zip.
    sparkEngine: Optional. The engine on which the spark job runs.
  """

  class SparkEngineValueValuesEnum(_messages.Enum):
    r"""Optional. The engine on which the spark job runs.

    Values:
      SPARK_ENGINE_UNSPECIFIED: Not set.
      SPARK_ENGINE_DEFAULT: Default engine for Spark Job
      SPARK_ENGINE_NATIVE: Native Query Engine for Spark Job
    """
    SPARK_ENGINE_UNSPECIFIED = 0
    SPARK_ENGINE_DEFAULT = 1
    SPARK_ENGINE_NATIVE = 2

  @encoding.MapUnrecognizedFields('additionalProperties')
  class PropertiesValue(_messages.Message):
    r"""Optional. A mapping of property names to values, used to configure
    PySpark. Properties that conflict with values set by the Dataproc API
    might be overwritten. Can include properties set in /etc/spark/conf/spark-
    defaults.conf and classes in user code.

    Messages:
      AdditionalProperty: An additional property for a PropertiesValue object.

    Fields:
      additionalProperties: Additional properties of type PropertiesValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a PropertiesValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  archiveUris = _messages.StringField(1, repeated=True)
  args = _messages.StringField(2, repeated=True)
  fileUris = _messages.StringField(3, repeated=True)
  jarFileUris = _messages.StringField(4, repeated=True)
  loggingConfig = _messages.MessageField('LoggingConfig', 5)
  mainPythonFileUri = _messages.StringField(6)
  properties = _messages.MessageField('PropertiesValue', 7)
  pythonFileUris = _messages.StringField(8, repeated=True)
  sparkEngine = _messages.EnumField('SparkEngineValueValuesEnum', 9)


class Quantiles(_messages.Message):
  r"""Quantile metrics data related to Tasks. Units can be seconds, bytes,
  milliseconds, etc depending on the message type.

  Fields:
    count: A string attribute.
    maximum: A string attribute.
    minimum: A string attribute.
    percentile25: A string attribute.
    percentile50: A string attribute.
    percentile75: A string attribute.
    sum: A string attribute.
  """

  count = _messages.IntegerField(1)
  maximum = _messages.IntegerField(2)
  minimum = _messages.IntegerField(3)
  percentile25 = _messages.IntegerField(4)
  percentile50 = _messages.IntegerField(5)
  percentile75 = _messages.IntegerField(6)
  sum = _messages.IntegerField(7)


class QueryList(_messages.Message):
  r"""A list of queries to run on a cluster.

  Fields:
    queries: Required. The queries to execute. You do not need to end a query
      expression with a semicolon. Multiple queries can be specified in one
      string by separating each with a semicolon. Here is an example of a
      Dataproc API snippet that uses a QueryList to specify a HiveJob:
      "hiveJob": { "queryList": { "queries": [ "query1", "query2",
      "query3;query4", ] } }
  """

  queries = _messages.StringField(1, repeated=True)


class RayBatch(_messages.Message):
  r"""A configuration for running an Ray Job
  (https://docs.ray.io/en/latest/cluster/running-applications/job-
  submission/index.html) workload.

  Fields:
    archiveUris: Optional. HCFS URI of archives to be extracted into the
      working directory of each Ray node. Supported file types: .tar, .tar.gz,
      .tgz, and .zip.
    args: Optional. The arguments to pass to the Ray job script.
    mainPythonFileUri: Required. The HCFS URI of the main Python file to use
      as the Ray job. Must be a .py file.
  """

  archiveUris = _messages.StringField(1, repeated=True)
  args = _messages.StringField(2, repeated=True)
  mainPythonFileUri = _messages.StringField(3)


class RddDataDistribution(_messages.Message):
  r"""Details about RDD usage.

  Fields:
    address: A string attribute.
    diskUsed: A string attribute.
    memoryRemaining: A string attribute.
    memoryUsed: A string attribute.
    offHeapMemoryRemaining: A string attribute.
    offHeapMemoryUsed: A string attribute.
    onHeapMemoryRemaining: A string attribute.
    onHeapMemoryUsed: A string attribute.
  """

  address = _messages.StringField(1)
  diskUsed = _messages.IntegerField(2)
  memoryRemaining = _messages.IntegerField(3)
  memoryUsed = _messages.IntegerField(4)
  offHeapMemoryRemaining = _messages.IntegerField(5)
  offHeapMemoryUsed = _messages.IntegerField(6)
  onHeapMemoryRemaining = _messages.IntegerField(7)
  onHeapMemoryUsed = _messages.IntegerField(8)


class RddOperationCluster(_messages.Message):
  r"""A grouping of nodes representing higher level constructs (stage, job
  etc.).

  Fields:
    childClusters: A RddOperationCluster attribute.
    childNodes: A RddOperationNode attribute.
    name: A string attribute.
    rddClusterId: A string attribute.
  """

  childClusters = _messages.MessageField('RddOperationCluster', 1, repeated=True)
  childNodes = _messages.MessageField('RddOperationNode', 2, repeated=True)
  name = _messages.StringField(3)
  rddClusterId = _messages.StringField(4)


class RddOperationEdge(_messages.Message):
  r"""A directed edge representing dependency between two RDDs.

  Fields:
    fromId: A integer attribute.
    toId: A integer attribute.
  """

  fromId = _messages.IntegerField(1, variant=_messages.Variant.INT32)
  toId = _messages.IntegerField(2, variant=_messages.Variant.INT32)


class RddOperationGraph(_messages.Message):
  r"""Graph representing RDD dependencies. Consists of edges and a root
  cluster.

  Fields:
    edges: A RddOperationEdge attribute.
    incomingEdges: A RddOperationEdge attribute.
    outgoingEdges: A RddOperationEdge attribute.
    rootCluster: A RddOperationCluster attribute.
    stageId: A string attribute.
  """

  edges = _messages.MessageField('RddOperationEdge', 1, repeated=True)
  incomingEdges = _messages.MessageField('RddOperationEdge', 2, repeated=True)
  outgoingEdges = _messages.MessageField('RddOperationEdge', 3, repeated=True)
  rootCluster = _messages.MessageField('RddOperationCluster', 4)
  stageId = _messages.IntegerField(5)


class RddOperationNode(_messages.Message):
  r"""A node in the RDD operation graph. Corresponds to a single RDD.

  Enums:
    OutputDeterministicLevelValueValuesEnum:

  Fields:
    barrier: A boolean attribute.
    cached: A boolean attribute.
    callsite: A string attribute.
    name: A string attribute.
    nodeId: A integer attribute.
    outputDeterministicLevel: A OutputDeterministicLevelValueValuesEnum
      attribute.
  """

  class OutputDeterministicLevelValueValuesEnum(_messages.Enum):
    r"""OutputDeterministicLevelValueValuesEnum enum type.

    Values:
      DETERMINISTIC_LEVEL_UNSPECIFIED: <no description>
      DETERMINISTIC_LEVEL_DETERMINATE: <no description>
      DETERMINISTIC_LEVEL_UNORDERED: <no description>
      DETERMINISTIC_LEVEL_INDETERMINATE: <no description>
    """
    DETERMINISTIC_LEVEL_UNSPECIFIED = 0
    DETERMINISTIC_LEVEL_DETERMINATE = 1
    DETERMINISTIC_LEVEL_UNORDERED = 2
    DETERMINISTIC_LEVEL_INDETERMINATE = 3

  barrier = _messages.BooleanField(1)
  cached = _messages.BooleanField(2)
  callsite = _messages.StringField(3)
  name = _messages.StringField(4)
  nodeId = _messages.IntegerField(5, variant=_messages.Variant.INT32)
  outputDeterministicLevel = _messages.EnumField('OutputDeterministicLevelValueValuesEnum', 6)


class RddPartitionInfo(_messages.Message):
  r"""Information about RDD partitions.

  Fields:
    blockName: A string attribute.
    diskUsed: A string attribute.
    executors: A string attribute.
    memoryUsed: A string attribute.
    storageLevel: A string attribute.
  """

  blockName = _messages.StringField(1)
  diskUsed = _messages.IntegerField(2)
  executors = _messages.StringField(3, repeated=True)
  memoryUsed = _messages.IntegerField(4)
  storageLevel = _messages.StringField(5)


class RddStorageInfo(_messages.Message):
  r"""Overall data about RDD storage.

  Fields:
    dataDistribution: A RddDataDistribution attribute.
    diskUsed: A string attribute.
    memoryUsed: A string attribute.
    name: A string attribute.
    numCachedPartitions: A integer attribute.
    numPartitions: A integer attribute.
    partitions: A RddPartitionInfo attribute.
    rddStorageId: A integer attribute.
    storageLevel: A string attribute.
  """

  dataDistribution = _messages.MessageField('RddDataDistribution', 1, repeated=True)
  diskUsed = _messages.IntegerField(2)
  memoryUsed = _messages.IntegerField(3)
  name = _messages.StringField(4)
  numCachedPartitions = _messages.IntegerField(5, variant=_messages.Variant.INT32)
  numPartitions = _messages.IntegerField(6, variant=_messages.Variant.INT32)
  partitions = _messages.MessageField('RddPartitionInfo', 7, repeated=True)
  rddStorageId = _messages.IntegerField(8, variant=_messages.Variant.INT32)
  storageLevel = _messages.StringField(9)


class RegexValidation(_messages.Message):
  r"""Validation based on regular expressions.

  Fields:
    regexes: Required. RE2 regular expressions used to validate the
      parameter's value. The value must match the regex in its entirety
      (substring matches are not sufficient).
  """

  regexes = _messages.StringField(1, repeated=True)


class RepairClusterRequest(_messages.Message):
  r"""A request to repair a cluster.

  Fields:
    cluster: Optional. Cluster to be repaired
    clusterUuid: Optional. Specifying the cluster_uuid means the RPC will fail
      (with error NOT_FOUND) if a cluster with the specified UUID does not
      exist.
    dataprocSuperUser: Optional. Whether the request is submitted by Dataproc
      super user. If true, IAM will check 'dataproc.clusters.repair'
      permission instead of 'dataproc.clusters.update' permission. This is to
      give Dataproc superuser the ability to repair clusters without granting
      the overly broad update permission.
    gracefulDecommissionTimeout: Optional. Timeout for graceful YARN
      decommissioning. Graceful decommissioning facilitates the removal of
      cluster nodes without interrupting jobs in progress. The timeout
      specifies the amount of time to wait for jobs finish before forcefully
      removing nodes. The default timeout is 0 for forceful decommissioning,
      and the maximum timeout period is 1 day. (see JSON Mapping-Duration
      (https://developers.google.com/protocol-
      buffers/docs/proto3#json)).graceful_decommission_timeout is supported in
      Dataproc image versions 1.2+.
    nodePools: Optional. Node pools and corresponding repair action to be
      taken. All node pools should be unique in this request. i.e. Multiple
      entries for the same node pool id are not allowed.
    parentOperationId: Optional. operation id of the parent operation sending
      the repair request
    requestId: Optional. A unique ID used to identify the request. If the
      server receives two RepairClusterRequests with the same ID, the second
      request is ignored, and the first google.longrunning.Operation created
      and stored in the backend is returned.Recommendation: Set this value to
      a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The
      ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
      and hyphens (-). The maximum length is 40 characters.
  """

  cluster = _messages.MessageField('ClusterToRepair', 1)
  clusterUuid = _messages.StringField(2)
  dataprocSuperUser = _messages.BooleanField(3)
  gracefulDecommissionTimeout = _messages.StringField(4)
  nodePools = _messages.MessageField('NodePool', 5, repeated=True)
  parentOperationId = _messages.StringField(6)
  requestId = _messages.StringField(7)


class RepairNodeGroupRequest(_messages.Message):
  r"""A RepairNodeGroupRequest object.

  Enums:
    RepairActionValueValuesEnum: Required. Repair action to take on specified
      resources of the node pool.

  Fields:
    instanceNames: Required. Name of instances to be repaired. These instances
      must belong to specified node pool.
    repairAction: Required. Repair action to take on specified resources of
      the node pool.
    requestId: Optional. A unique ID used to identify the request. If the
      server receives two RepairNodeGroupRequest with the same ID, the second
      request is ignored and the first google.longrunning.Operation created
      and stored in the backend is returned.Recommendation: Set this value to
      a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The
      ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
      and hyphens (-). The maximum length is 40 characters.
  """

  class RepairActionValueValuesEnum(_messages.Enum):
    r"""Required. Repair action to take on specified resources of the node
    pool.

    Values:
      REPAIR_ACTION_UNSPECIFIED: No action will be taken by default.
      REPLACE: replace the specified list of nodes.
    """
    REPAIR_ACTION_UNSPECIFIED = 0
    REPLACE = 1

  instanceNames = _messages.StringField(1, repeated=True)
  repairAction = _messages.EnumField('RepairActionValueValuesEnum', 2)
  requestId = _messages.StringField(3)


class RepositoryConfig(_messages.Message):
  r"""Configuration for dependency repositories

  Fields:
    pypiRepositoryConfig: Optional. Configuration for PyPi repository.
  """

  pypiRepositoryConfig = _messages.MessageField('PyPiRepositoryConfig', 1)


class ReservationAffinity(_messages.Message):
  r"""Reservation Affinity for consuming Zonal reservation.

  Enums:
    ConsumeReservationTypeValueValuesEnum: Optional. Type of reservation to
      consume

  Fields:
    consumeReservationType: Optional. Type of reservation to consume
    key: Optional. Corresponds to the label key of reservation resource.
    values: Optional. Corresponds to the label values of reservation resource.
  """

  class ConsumeReservationTypeValueValuesEnum(_messages.Enum):
    r"""Optional. Type of reservation to consume

    Values:
      TYPE_UNSPECIFIED: <no description>
      NO_RESERVATION: Do not consume from any allocated capacity.
      ANY_RESERVATION: Consume any reservation available.
      SPECIFIC_RESERVATION: Must consume from a specific reservation. Must
        specify key value fields for specifying the reservations.
    """
    TYPE_UNSPECIFIED = 0
    NO_RESERVATION = 1
    ANY_RESERVATION = 2
    SPECIFIC_RESERVATION = 3

  consumeReservationType = _messages.EnumField('ConsumeReservationTypeValueValuesEnum', 1)
  key = _messages.StringField(2)
  values = _messages.StringField(3, repeated=True)


class ResizeNodeGroupRequest(_messages.Message):
  r"""A request to resize a node group.

  Fields:
    gracefulDecommissionTimeout: Optional. Timeout for graceful YARN
      decommissioning. Graceful decommissioning
      (https://cloud.google.com/dataproc/docs/concepts/configuring-
      clusters/scaling-clusters#graceful_decommissioning) allows the removal
      of nodes from the Compute Engine node group without interrupting jobs in
      progress. This timeout specifies how long to wait for jobs in progress
      to finish before forcefully removing nodes (and potentially interrupting
      jobs). Default timeout is 0 (for forceful decommission), and the maximum
      allowed timeout is 1 day. (see JSON representation of Duration
      (https://developers.google.com/protocol-buffers/docs/proto3#json)).Only
      supported on Dataproc image versions 1.2 and higher.
    parentOperationId: Optional. operation id of the parent operation sending
      the resize request
    requestId: Optional. A unique ID used to identify the request. If the
      server receives two ResizeNodeGroupRequest (https://cloud.google.com/dat
      aproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.
      v1.ResizeNodeGroupRequests) with the same ID, the second request is
      ignored and the first google.longrunning.Operation created and stored in
      the backend is returned.Recommendation: Set this value to a UUID
      (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID
      must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
      and hyphens (-). The maximum length is 40 characters.
    size: Required. The number of running instances for the node group to
      maintain. The group adds or removes instances to maintain the number of
      instances specified by this parameter.
  """

  gracefulDecommissionTimeout = _messages.StringField(1)
  parentOperationId = _messages.StringField(2)
  requestId = _messages.StringField(3)
  size = _messages.IntegerField(4, variant=_messages.Variant.INT32)


class ResolvedCohortInfo(_messages.Message):
  r"""Information about the cohort that the workload belongs to.

  Enums:
    CohortSourceValueValuesEnum: Output only. Source of the cohort.

  Fields:
    cohortSource: Output only. Source of the cohort.
    resolvedCohort: Output only. Final cohort that was used to tune the
      workload.
  """

  class CohortSourceValueValuesEnum(_messages.Enum):
    r"""Output only. Source of the cohort.

    Values:
      COHORT_SOURCE_UNSPECIFIED: Cohort source is unspecified.
      USER_PROVIDED: Cohort was resolved from the cohort config, explicitly
        provided by the user.
      AIRFLOW: Composed from the labels coming from Airflow/Composer.
    """
    COHORT_SOURCE_UNSPECIFIED = 0
    USER_PROVIDED = 1
    AIRFLOW = 2

  cohortSource = _messages.EnumField('CohortSourceValueValuesEnum', 1)
  resolvedCohort = _messages.StringField(2)


class ResourceInformation(_messages.Message):
  r"""A ResourceInformation object.

  Fields:
    addresses: A string attribute.
    name: A string attribute.
  """

  addresses = _messages.StringField(1, repeated=True)
  name = _messages.StringField(2)


class ResourceProfileInfo(_messages.Message):
  r"""Resource profile that contains information about all the resources
  required by executors and tasks.

  Messages:
    ExecutorResourcesValue: A ExecutorResourcesValue object.
    TaskResourcesValue: A TaskResourcesValue object.

  Fields:
    executorResources: A ExecutorResourcesValue attribute.
    resourceProfileId: A integer attribute.
    taskResources: A TaskResourcesValue attribute.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class ExecutorResourcesValue(_messages.Message):
    r"""A ExecutorResourcesValue object.

    Messages:
      AdditionalProperty: An additional property for a ExecutorResourcesValue
        object.

    Fields:
      additionalProperties: Additional properties of type
        ExecutorResourcesValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a ExecutorResourcesValue object.

      Fields:
        key: Name of the additional property.
        value: A ExecutorResourceRequest attribute.
      """

      key = _messages.StringField(1)
      value = _messages.MessageField('ExecutorResourceRequest', 2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  @encoding.MapUnrecognizedFields('additionalProperties')
  class TaskResourcesValue(_messages.Message):
    r"""A TaskResourcesValue object.

    Messages:
      AdditionalProperty: An additional property for a TaskResourcesValue
        object.

    Fields:
      additionalProperties: Additional properties of type TaskResourcesValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a TaskResourcesValue object.

      Fields:
        key: Name of the additional property.
        value: A TaskResourceRequest attribute.
      """

      key = _messages.StringField(1)
      value = _messages.MessageField('TaskResourceRequest', 2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  executorResources = _messages.MessageField('ExecutorResourcesValue', 1)
  resourceProfileId = _messages.IntegerField(2, variant=_messages.Variant.INT32)
  taskResources = _messages.MessageField('TaskResourcesValue', 3)


class RuntimeConfig(_messages.Message):
  r"""Runtime configuration for a workload.

  Messages:
    PropertiesValue: Optional. A mapping of property names to values, which
      are used to configure workload execution.

  Fields:
    autotuningConfig: Optional. Autotuning configuration of the workload.
    cohort: Optional. Cohort identifier. Identifies families of the workloads
      that have the same shape, for example, daily ETL jobs.
    containerImage: Optional. Optional custom container image for the job
      runtime environment. If not specified, a default container image will be
      used.
    properties: Optional. A mapping of property names to values, which are
      used to configure workload execution.
    repositoryConfig: Optional. Dependency repository configuration.
    version: Optional. Version of the batch runtime.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class PropertiesValue(_messages.Message):
    r"""Optional. A mapping of property names to values, which are used to
    configure workload execution.

    Messages:
      AdditionalProperty: An additional property for a PropertiesValue object.

    Fields:
      additionalProperties: Additional properties of type PropertiesValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a PropertiesValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  autotuningConfig = _messages.MessageField('AutotuningConfig', 1)
  cohort = _messages.StringField(2)
  containerImage = _messages.StringField(3)
  properties = _messages.MessageField('PropertiesValue', 4)
  repositoryConfig = _messages.MessageField('RepositoryConfig', 5)
  version = _messages.StringField(6)


class RuntimeInfo(_messages.Message):
  r"""Runtime information about workload execution.

  Messages:
    EndpointsValue: Output only. Map of remote access endpoints (such as web
      interfaces and APIs) to their URIs.

  Fields:
    approximateUsage: Output only. Approximate workload resource usage,
      calculated when the workload completes (see Dataproc Serverless pricing
      (https://cloud.google.com/dataproc-serverless/pricing)).Note: This
      metric calculation may change in the future, for example, to capture
      cumulative workload resource consumption during workload execution (see
      the Dataproc Serverless release notes
      (https://cloud.google.com/dataproc-serverless/docs/release-notes) for
      announcements, changes, fixes and other Dataproc developments).
    currentUsage: Output only. Snapshot of current workload resource usage.
    diagnosticOutputUri: Output only. A URI pointing to the location of the
      diagnostics tarball.
    endpoints: Output only. Map of remote access endpoints (such as web
      interfaces and APIs) to their URIs.
    outputUri: Output only. A URI pointing to the location of the stdout and
      stderr of the workload.
    propertiesInfo: Optional. Properties of the workload organized by origin.
    resolvedCohortInfo: Output only. Information about the cohort that the
      workload belongs to.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class EndpointsValue(_messages.Message):
    r"""Output only. Map of remote access endpoints (such as web interfaces
    and APIs) to their URIs.

    Messages:
      AdditionalProperty: An additional property for a EndpointsValue object.

    Fields:
      additionalProperties: Additional properties of type EndpointsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a EndpointsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  approximateUsage = _messages.MessageField('UsageMetrics', 1)
  currentUsage = _messages.MessageField('UsageSnapshot', 2)
  diagnosticOutputUri = _messages.StringField(3)
  endpoints = _messages.MessageField('EndpointsValue', 4)
  outputUri = _messages.StringField(5)
  propertiesInfo = _messages.MessageField('PropertiesInfo', 6)
  resolvedCohortInfo = _messages.MessageField('ResolvedCohortInfo', 7)


class SchedulingConfig(_messages.Message):
  r"""Config for scheduling the request to create Compute Engine resources for
  the cluster, when available.

  Fields:
    requestedRunDuration: Optional. Required lifetime of the resources, once
      provisioned. Min 10 mins, Max/Default 7 days . Note that the cluster can
      still be deleted before reaching this time. This time is a maximum
      amount of time before the cluster is forcibly deleted. Lower times are
      more likely to start running sooner.
    schedulingTimeout: Optional. How long to wait for worker resources to be
      allocated before failing the cluster creation request. Max/Default
      value: 13 days
  """

  requestedRunDuration = _messages.StringField(1)
  schedulingTimeout = _messages.StringField(2)


class SearchSessionSparkApplicationExecutorStageSummaryResponse(_messages.Message):
  r"""List of Executors associated with a Spark Application Stage.

  Fields:
    nextPageToken: This token is included in the response if there are more
      results to fetch. To fetch additional results, provide this value as the
      page_token in a subsequent
      SearchSessionSparkApplicationExecutorStageSummaryRequest.
    sparkApplicationStageExecutors: Details about executors used by the
      application stage.
  """

  nextPageToken = _messages.StringField(1)
  sparkApplicationStageExecutors = _messages.MessageField('ExecutorStageSummary', 2, repeated=True)


class SearchSessionSparkApplicationExecutorsResponse(_messages.Message):
  r"""List of Executors associated with a Spark Application.

  Fields:
    nextPageToken: This token is included in the response if there are more
      results to fetch. To fetch additional results, provide this value as the
      page_token in a subsequent
      SearchSessionSparkApplicationExecutorsRequest.
    sparkApplicationExecutors: Details about executors used by the
      application.
  """

  nextPageToken = _messages.StringField(1)
  sparkApplicationExecutors = _messages.MessageField('ExecutorSummary', 2, repeated=True)


class SearchSessionSparkApplicationJobsResponse(_messages.Message):
  r"""A list of Jobs associated with a Spark Application.

  Fields:
    nextPageToken: This token is included in the response if there are more
      results to fetch. To fetch additional results, provide this value as the
      page_token in a subsequent SearchSessionSparkApplicationJobsRequest.
    sparkApplicationJobs: Output only. Data corresponding to a spark job.
  """

  nextPageToken = _messages.StringField(1)
  sparkApplicationJobs = _messages.MessageField('JobData', 2, repeated=True)


class SearchSessionSparkApplicationNativeSqlQueriesResponse(_messages.Message):
  r"""List of all Native queries for a Spark Application.

  Fields:
    nextPageToken: This token is included in the response if there are more
      results to fetch. To fetch additional results, provide this value as the
      page_token in a subsequent
      SearchSessionSparkApplicationSqlQueriesRequest.
    sparkApplicationNativeSqlQueries: Output only. Native SQL Execution Data
  """

  nextPageToken = _messages.StringField(1)
  sparkApplicationNativeSqlQueries = _messages.MessageField('NativeSqlExecutionUiData', 2, repeated=True)


class SearchSessionSparkApplicationSqlQueriesResponse(_messages.Message):
  r"""List of all queries for a Spark Application.

  Fields:
    nextPageToken: This token is included in the response if there are more
      results to fetch. To fetch additional results, provide this value as the
      page_token in a subsequent
      SearchSessionSparkApplicationSqlQueriesRequest.
    sparkApplicationSqlQueries: Output only. SQL Execution Data
  """

  nextPageToken = _messages.StringField(1)
  sparkApplicationSqlQueries = _messages.MessageField('SqlExecutionUiData', 2, repeated=True)


class SearchSessionSparkApplicationStageAttemptTasksResponse(_messages.Message):
  r"""List of tasks for a stage of a Spark Application

  Fields:
    nextPageToken: This token is included in the response if there are more
      results to fetch. To fetch additional results, provide this value as the
      page_token in a subsequent
      SearchSessionSparkApplicationStageAttemptTasksRequest.
    sparkApplicationStageAttemptTasks: Output only. Data corresponding to
      tasks created by spark.
  """

  nextPageToken = _messages.StringField(1)
  sparkApplicationStageAttemptTasks = _messages.MessageField('TaskData', 2, repeated=True)


class SearchSessionSparkApplicationStageAttemptsResponse(_messages.Message):
  r"""A list of Stage Attempts for a Stage of a Spark Application.

  Fields:
    nextPageToken: This token is included in the response if there are more
      results to fetch. To fetch additional results, provide this value as the
      page_token in a subsequent
      SearchSessionSparkApplicationStageAttemptsRequest.
    sparkApplicationStageAttempts: Output only. Data corresponding to a stage
      attempts
  """

  nextPageToken = _messages.StringField(1)
  sparkApplicationStageAttempts = _messages.MessageField('StageData', 2, repeated=True)


class SearchSessionSparkApplicationStagesResponse(_messages.Message):
  r"""A list of stages associated with a Spark Application.

  Fields:
    nextPageToken: This token is included in the response if there are more
      results to fetch. To fetch additional results, provide this value as the
      page_token in a subsequent SearchSessionSparkApplicationStages.
    sparkApplicationStages: Output only. Data corresponding to a stage.
  """

  nextPageToken = _messages.StringField(1)
  sparkApplicationStages = _messages.MessageField('StageData', 2, repeated=True)


class SearchSessionSparkApplicationsResponse(_messages.Message):
  r"""A list of summary of Spark Applications

  Fields:
    nextPageToken: This token is included in the response if there are more
      results to fetch. To fetch additional results, provide this value as the
      page_token in a subsequent SearchSessionSparkApplicationsRequest.
    sparkApplications: Output only. High level information corresponding to an
      application.
  """

  nextPageToken = _messages.StringField(1)
  sparkApplications = _messages.MessageField('SparkApplication', 2, repeated=True)


class SearchSparkApplicationExecutorStageSummaryResponse(_messages.Message):
  r"""List of Executors associated with a Spark Application Stage.

  Fields:
    nextPageToken: This token is included in the response if there are more
      results to fetch. To fetch additional results, provide this value as the
      page_token in a subsequent SearchSparkApplicationExecutorsListRequest.
    sparkApplicationStageExecutors: Details about executors used by the
      application stage.
  """

  nextPageToken = _messages.StringField(1)
  sparkApplicationStageExecutors = _messages.MessageField('ExecutorStageSummary', 2, repeated=True)


class SearchSparkApplicationExecutorsResponse(_messages.Message):
  r"""List of Executors associated with a Spark Application.

  Fields:
    nextPageToken: This token is included in the response if there are more
      results to fetch. To fetch additional results, provide this value as the
      page_token in a subsequent SearchSparkApplicationExecutorsListRequest.
    sparkApplicationExecutors: Details about executors used by the
      application.
  """

  nextPageToken = _messages.StringField(1)
  sparkApplicationExecutors = _messages.MessageField('ExecutorSummary', 2, repeated=True)


class SearchSparkApplicationJobsResponse(_messages.Message):
  r"""A list of Jobs associated with a Spark Application.

  Fields:
    nextPageToken: This token is included in the response if there are more
      results to fetch. To fetch additional results, provide this value as the
      page_token in a subsequent SearchSparkApplicationJobsRequest.
    sparkApplicationJobs: Output only. Data corresponding to a spark job.
  """

  nextPageToken = _messages.StringField(1)
  sparkApplicationJobs = _messages.MessageField('JobData', 2, repeated=True)


class SearchSparkApplicationNativeSqlQueriesResponse(_messages.Message):
  r"""List of all Native SQL queries details for a Spark Application.

  Fields:
    nextPageToken: This token is included in the response if there are more
      results to fetch. To fetch additional results, provide this value as the
      page_token in a subsequent
      SearchSparkApplicationNativeSqlQueriesRequest.
    sparkApplicationNativeSqlQueries: Output only. Native SQL Execution Data
  """

  nextPageToken = _messages.StringField(1)
  sparkApplicationNativeSqlQueries = _messages.MessageField('NativeSqlExecutionUiData', 2, repeated=True)


class SearchSparkApplicationSqlQueriesResponse(_messages.Message):
  r"""List of all queries for a Spark Application.

  Fields:
    nextPageToken: This token is included in the response if there are more
      results to fetch. To fetch additional results, provide this value as the
      page_token in a subsequent SearchSparkApplicationSqlQueriesRequest.
    sparkApplicationSqlQueries: Output only. SQL Execution Data
  """

  nextPageToken = _messages.StringField(1)
  sparkApplicationSqlQueries = _messages.MessageField('SqlExecutionUiData', 2, repeated=True)


class SearchSparkApplicationStageAttemptTasksResponse(_messages.Message):
  r"""List of tasks for a stage of a Spark Application

  Fields:
    nextPageToken: This token is included in the response if there are more
      results to fetch. To fetch additional results, provide this value as the
      page_token in a subsequent ListSparkApplicationStageAttemptTasksRequest.
    sparkApplicationStageAttemptTasks: Output only. Data corresponding to
      tasks created by spark.
  """

  nextPageToken = _messages.StringField(1)
  sparkApplicationStageAttemptTasks = _messages.MessageField('TaskData', 2, repeated=True)


class SearchSparkApplicationStageAttemptsResponse(_messages.Message):
  r"""A list of Stage Attempts for a Stage of a Spark Application.

  Fields:
    nextPageToken: This token is included in the response if there are more
      results to fetch. To fetch additional results, provide this value as the
      page_token in a subsequent ListSparkApplicationStageAttemptsRequest.
    sparkApplicationStageAttempts: Output only. Data corresponding to a stage
      attempts
  """

  nextPageToken = _messages.StringField(1)
  sparkApplicationStageAttempts = _messages.MessageField('StageData', 2, repeated=True)


class SearchSparkApplicationStagesResponse(_messages.Message):
  r"""A list of stages associated with a Spark Application.

  Fields:
    nextPageToken: This token is included in the response if there are more
      results to fetch. To fetch additional results, provide this value as the
      page_token in a subsequent SearchSparkApplicationStages.
    sparkApplicationStages: Output only. Data corresponding to a stage.
  """

  nextPageToken = _messages.StringField(1)
  sparkApplicationStages = _messages.MessageField('StageData', 2, repeated=True)


class SearchSparkApplicationsResponse(_messages.Message):
  r"""A list of summary of Spark Applications

  Fields:
    nextPageToken: This token is included in the response if there are more
      results to fetch. To fetch additional results, provide this value as the
      page_token in a subsequent SearchSparkApplicationsRequest.
    sparkApplications: Output only. High level information corresponding to an
      application.
  """

  nextPageToken = _messages.StringField(1)
  sparkApplications = _messages.MessageField('SparkApplication', 2, repeated=True)


class SecurityConfig(_messages.Message):
  r"""Security related configuration, including encryption, Kerberos, etc.

  Fields:
    authenticationConfig: Optional. User workload credential configuration
      (WIP). This is mutually exclusive with the identity_config field.
    identityConfig: Optional. Identity related configuration, including
      service account based secure multi-tenancy user mappings.
    kerberosConfig: Optional. Kerberos related configuration.
  """

  authenticationConfig = _messages.MessageField('ClusterAuthenticationConfig', 1)
  identityConfig = _messages.MessageField('IdentityConfig', 2)
  kerberosConfig = _messages.MessageField('KerberosConfig', 3)


class Session(_messages.Message):
  r"""A representation of a session.

  Enums:
    StateValueValuesEnum: Output only. A state of the session.

  Messages:
    LabelsValue: Optional. The labels to associate with the session. Label
      keys must contain 1 to 63 characters, and must conform to RFC 1035
      (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but,
      if present, must contain 1 to 63 characters, and must conform to RFC
      1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can
      be associated with a session.

  Fields:
    createTime: Output only. The time when the session was created.
    creator: Output only. The email address of the user who created the
      session.
    environmentConfig: Optional. Environment configuration for the session
      execution.
    jupyterSession: Optional. Jupyter session config.
    labels: Optional. The labels to associate with the session. Label keys
      must contain 1 to 63 characters, and must conform to RFC 1035
      (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but,
      if present, must contain 1 to 63 characters, and must conform to RFC
      1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can
      be associated with a session.
    name: Identifier. The resource name of the session.
    runtimeConfig: Optional. Runtime configuration for the session execution.
    runtimeInfo: Output only. Runtime information about session execution.
    sessionTemplate: Optional. The session template used by the session.Only
      resource names, including project ID and location, are valid.Example: *
      https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[d
      ataproc_region]/sessionTemplates/[template_id] * projects/[project_id]/l
      ocations/[dataproc_region]/sessionTemplates/[template_id]The template
      must be in the same project and Dataproc region as the session.
    sparkConnectSession: Optional. Spark connect session config.
    state: Output only. A state of the session.
    stateHistory: Output only. Historical state information for the session.
    stateMessage: Output only. Session state details, such as the failure
      description if the state is FAILED.
    stateTime: Output only. The time when the session entered the current
      state.
    user: Optional. The email address of the user who owns the session.
    uuid: Output only. A session UUID (Unique Universal Identifier). The
      service generates this value when it creates the session.
  """

  class StateValueValuesEnum(_messages.Enum):
    r"""Output only. A state of the session.

    Values:
      STATE_UNSPECIFIED: The session state is unknown.
      CREATING: The session is created prior to running.
      ACTIVE: The session is running.
      TERMINATING: The session is terminating.
      TERMINATED: The session is terminated successfully.
      FAILED: The session is no longer running due to an error.
    """
    STATE_UNSPECIFIED = 0
    CREATING = 1
    ACTIVE = 2
    TERMINATING = 3
    TERMINATED = 4
    FAILED = 5

  @encoding.MapUnrecognizedFields('additionalProperties')
  class LabelsValue(_messages.Message):
    r"""Optional. The labels to associate with the session. Label keys must
    contain 1 to 63 characters, and must conform to RFC 1035
    (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if
    present, must contain 1 to 63 characters, and must conform to RFC 1035
    (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
    associated with a session.

    Messages:
      AdditionalProperty: An additional property for a LabelsValue object.

    Fields:
      additionalProperties: Additional properties of type LabelsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a LabelsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  createTime = _messages.StringField(1)
  creator = _messages.StringField(2)
  environmentConfig = _messages.MessageField('EnvironmentConfig', 3)
  jupyterSession = _messages.MessageField('JupyterConfig', 4)
  labels = _messages.MessageField('LabelsValue', 5)
  name = _messages.StringField(6)
  runtimeConfig = _messages.MessageField('RuntimeConfig', 7)
  runtimeInfo = _messages.MessageField('RuntimeInfo', 8)
  sessionTemplate = _messages.StringField(9)
  sparkConnectSession = _messages.MessageField('SparkConnectConfig', 10)
  state = _messages.EnumField('StateValueValuesEnum', 11)
  stateHistory = _messages.MessageField('SessionStateHistory', 12, repeated=True)
  stateMessage = _messages.StringField(13)
  stateTime = _messages.StringField(14)
  user = _messages.StringField(15)
  uuid = _messages.StringField(16)


class SessionOperationMetadata(_messages.Message):
  r"""Metadata describing the Session operation.

  Enums:
    OperationTypeValueValuesEnum: The operation type.

  Messages:
    LabelsValue: Labels associated with the operation.

  Fields:
    createTime: The time when the operation was created.
    description: Short description of the operation.
    doneTime: The time when the operation was finished.
    labels: Labels associated with the operation.
    operationType: The operation type.
    session: Name of the session for the operation.
    sessionUuid: Session UUID for the operation.
    warnings: Warnings encountered during operation execution.
  """

  class OperationTypeValueValuesEnum(_messages.Enum):
    r"""The operation type.

    Values:
      SESSION_OPERATION_TYPE_UNSPECIFIED: Session operation type is unknown.
      CREATE: Create Session operation type.
      TERMINATE: Terminate Session operation type.
      DELETE: Delete Session operation type.
    """
    SESSION_OPERATION_TYPE_UNSPECIFIED = 0
    CREATE = 1
    TERMINATE = 2
    DELETE = 3

  @encoding.MapUnrecognizedFields('additionalProperties')
  class LabelsValue(_messages.Message):
    r"""Labels associated with the operation.

    Messages:
      AdditionalProperty: An additional property for a LabelsValue object.

    Fields:
      additionalProperties: Additional properties of type LabelsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a LabelsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  createTime = _messages.StringField(1)
  description = _messages.StringField(2)
  doneTime = _messages.StringField(3)
  labels = _messages.MessageField('LabelsValue', 4)
  operationType = _messages.EnumField('OperationTypeValueValuesEnum', 5)
  session = _messages.StringField(6)
  sessionUuid = _messages.StringField(7)
  warnings = _messages.StringField(8, repeated=True)


class SessionStateHistory(_messages.Message):
  r"""Historical state information.

  Enums:
    StateValueValuesEnum: Output only. The state of the session at this point
      in the session history.

  Fields:
    state: Output only. The state of the session at this point in the session
      history.
    stateMessage: Output only. Details about the state at this point in the
      session history.
    stateStartTime: Output only. The time when the session entered the
      historical state.
  """

  class StateValueValuesEnum(_messages.Enum):
    r"""Output only. The state of the session at this point in the session
    history.

    Values:
      STATE_UNSPECIFIED: The session state is unknown.
      CREATING: The session is created prior to running.
      ACTIVE: The session is running.
      TERMINATING: The session is terminating.
      TERMINATED: The session is terminated successfully.
      FAILED: The session is no longer running due to an error.
    """
    STATE_UNSPECIFIED = 0
    CREATING = 1
    ACTIVE = 2
    TERMINATING = 3
    TERMINATED = 4
    FAILED = 5

  state = _messages.EnumField('StateValueValuesEnum', 1)
  stateMessage = _messages.StringField(2)
  stateStartTime = _messages.StringField(3)


class SessionTemplate(_messages.Message):
  r"""A representation of a session template.

  Messages:
    LabelsValue: Optional. Labels to associate with sessions created using
      this template. Label keys must contain 1 to 63 characters, and must
      conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values
      can be empty, but, if present, must contain 1 to 63 characters and
      conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than
      32 labels can be associated with a session.

  Fields:
    createTime: Output only. The time when the template was created.
    creator: Output only. The email address of the user who created the
      template.
    description: Optional. Brief description of the template.
    environmentConfig: Optional. Environment configuration for session
      execution.
    jupyterSession: Optional. Jupyter session config.
    labels: Optional. Labels to associate with sessions created using this
      template. Label keys must contain 1 to 63 characters, and must conform
      to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be
      empty, but, if present, must contain 1 to 63 characters and conform to
      RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels
      can be associated with a session.
    name: Required. Identifier. The resource name of the session template.
    preWarmedDriversCount: Optional. The count of drivers to have pre-warmed
      available for Sessions created from this SessionTemplate. These are
      created and billed as soon as the SessionTemplate is created. When a
      Session is created from this SessionTemplate, it will use one of these
      pre-warmed drivers, if any are available. If not available, then the
      Session will create its own driver. As soon as a Session adopts one of
      these pre-warmed drivers, a new pre-warmed driver will be created to
      replace it.
    runtimeConfig: Optional. Runtime configuration for session execution.
    sparkConnectSession: Optional. Spark connect session config.
    updateTime: Output only. The time the template was last updated.
    uuid: Output only. A session template UUID (Unique Universal Identifier).
      The service generates this value when it creates the session template.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class LabelsValue(_messages.Message):
    r"""Optional. Labels to associate with sessions created using this
    template. Label keys must contain 1 to 63 characters, and must conform to
    RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be
    empty, but, if present, must contain 1 to 63 characters and conform to RFC
    1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
    associated with a session.

    Messages:
      AdditionalProperty: An additional property for a LabelsValue object.

    Fields:
      additionalProperties: Additional properties of type LabelsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a LabelsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  createTime = _messages.StringField(1)
  creator = _messages.StringField(2)
  description = _messages.StringField(3)
  environmentConfig = _messages.MessageField('EnvironmentConfig', 4)
  jupyterSession = _messages.MessageField('JupyterConfig', 5)
  labels = _messages.MessageField('LabelsValue', 6)
  name = _messages.StringField(7)
  preWarmedDriversCount = _messages.IntegerField(8, variant=_messages.Variant.INT32)
  runtimeConfig = _messages.MessageField('RuntimeConfig', 9)
  sparkConnectSession = _messages.MessageField('SparkConnectConfig', 10)
  updateTime = _messages.StringField(11)
  uuid = _messages.StringField(12)


class SetIamPolicyRequest(_messages.Message):
  r"""Request message for SetIamPolicy method.

  Fields:
    policy: REQUIRED: The complete policy to be applied to the resource. The
      size of the policy is limited to a few 10s of KB. An empty policy is a
      valid policy but certain Google Cloud services (such as Projects) might
      reject them.
  """

  policy = _messages.MessageField('Policy', 1)


class ShieldedInstanceConfig(_messages.Message):
  r"""Shielded Instance Config for clusters using Compute Engine Shielded VMs
  (https://cloud.google.com/security/shielded-cloud/shielded-vm).

  Fields:
    enableIntegrityMonitoring: Optional. Defines whether instances have
      integrity monitoring enabled.
    enableSecureBoot: Optional. Defines whether instances have Secure Boot
      enabled.
    enableVtpm: Optional. Defines whether instances have the vTPM enabled.
  """

  enableIntegrityMonitoring = _messages.BooleanField(1)
  enableSecureBoot = _messages.BooleanField(2)
  enableVtpm = _messages.BooleanField(3)


class ShufflePushReadMetrics(_messages.Message):
  r"""A ShufflePushReadMetrics object.

  Fields:
    corruptMergedBlockChunks: A string attribute.
    localMergedBlocksFetched: A string attribute.
    localMergedBytesRead: A string attribute.
    localMergedChunksFetched: A string attribute.
    mergedFetchFallbackCount: A string attribute.
    remoteMergedBlocksFetched: A string attribute.
    remoteMergedBytesRead: A string attribute.
    remoteMergedChunksFetched: A string attribute.
    remoteMergedReqsDuration: A string attribute.
  """

  corruptMergedBlockChunks = _messages.IntegerField(1)
  localMergedBlocksFetched = _messages.IntegerField(2)
  localMergedBytesRead = _messages.IntegerField(3)
  localMergedChunksFetched = _messages.IntegerField(4)
  mergedFetchFallbackCount = _messages.IntegerField(5)
  remoteMergedBlocksFetched = _messages.IntegerField(6)
  remoteMergedBytesRead = _messages.IntegerField(7)
  remoteMergedChunksFetched = _messages.IntegerField(8)
  remoteMergedReqsDuration = _messages.IntegerField(9)


class ShufflePushReadQuantileMetrics(_messages.Message):
  r"""A ShufflePushReadQuantileMetrics object.

  Fields:
    corruptMergedBlockChunks: A Quantiles attribute.
    localMergedBlocksFetched: A Quantiles attribute.
    localMergedBytesRead: A Quantiles attribute.
    localMergedChunksFetched: A Quantiles attribute.
    mergedFetchFallbackCount: A Quantiles attribute.
    remoteMergedBlocksFetched: A Quantiles attribute.
    remoteMergedBytesRead: A Quantiles attribute.
    remoteMergedChunksFetched: A Quantiles attribute.
    remoteMergedReqsDuration: A Quantiles attribute.
  """

  corruptMergedBlockChunks = _messages.MessageField('Quantiles', 1)
  localMergedBlocksFetched = _messages.MessageField('Quantiles', 2)
  localMergedBytesRead = _messages.MessageField('Quantiles', 3)
  localMergedChunksFetched = _messages.MessageField('Quantiles', 4)
  mergedFetchFallbackCount = _messages.MessageField('Quantiles', 5)
  remoteMergedBlocksFetched = _messages.MessageField('Quantiles', 6)
  remoteMergedBytesRead = _messages.MessageField('Quantiles', 7)
  remoteMergedChunksFetched = _messages.MessageField('Quantiles', 8)
  remoteMergedReqsDuration = _messages.MessageField('Quantiles', 9)


class ShuffleReadMetrics(_messages.Message):
  r"""Shuffle data read by the task.

  Fields:
    fetchWaitTimeMillis: A string attribute.
    localBlocksFetched: A string attribute.
    localBytesRead: A string attribute.
    recordsRead: A string attribute.
    remoteBlocksFetched: A string attribute.
    remoteBytesRead: A string attribute.
    remoteBytesReadToDisk: A string attribute.
    remoteReqsDuration: A string attribute.
    shufflePushReadMetrics: A ShufflePushReadMetrics attribute.
  """

  fetchWaitTimeMillis = _messages.IntegerField(1)
  localBlocksFetched = _messages.IntegerField(2)
  localBytesRead = _messages.IntegerField(3)
  recordsRead = _messages.IntegerField(4)
  remoteBlocksFetched = _messages.IntegerField(5)
  remoteBytesRead = _messages.IntegerField(6)
  remoteBytesReadToDisk = _messages.IntegerField(7)
  remoteReqsDuration = _messages.IntegerField(8)
  shufflePushReadMetrics = _messages.MessageField('ShufflePushReadMetrics', 9)


class ShuffleReadQuantileMetrics(_messages.Message):
  r"""A ShuffleReadQuantileMetrics object.

  Fields:
    fetchWaitTimeMillis: A Quantiles attribute.
    localBlocksFetched: A Quantiles attribute.
    readBytes: A Quantiles attribute.
    readRecords: A Quantiles attribute.
    remoteBlocksFetched: A Quantiles attribute.
    remoteBytesRead: A Quantiles attribute.
    remoteBytesReadToDisk: A Quantiles attribute.
    remoteReqsDuration: A Quantiles attribute.
    shufflePushReadMetrics: A ShufflePushReadQuantileMetrics attribute.
    totalBlocksFetched: A Quantiles attribute.
  """

  fetchWaitTimeMillis = _messages.MessageField('Quantiles', 1)
  localBlocksFetched = _messages.MessageField('Quantiles', 2)
  readBytes = _messages.MessageField('Quantiles', 3)
  readRecords = _messages.MessageField('Quantiles', 4)
  remoteBlocksFetched = _messages.MessageField('Quantiles', 5)
  remoteBytesRead = _messages.MessageField('Quantiles', 6)
  remoteBytesReadToDisk = _messages.MessageField('Quantiles', 7)
  remoteReqsDuration = _messages.MessageField('Quantiles', 8)
  shufflePushReadMetrics = _messages.MessageField('ShufflePushReadQuantileMetrics', 9)
  totalBlocksFetched = _messages.MessageField('Quantiles', 10)


class ShuffleWriteMetrics(_messages.Message):
  r"""Shuffle data written by task.

  Fields:
    bytesWritten: A string attribute.
    recordsWritten: A string attribute.
    writeTimeNanos: A string attribute.
  """

  bytesWritten = _messages.IntegerField(1)
  recordsWritten = _messages.IntegerField(2)
  writeTimeNanos = _messages.IntegerField(3)


class ShuffleWriteQuantileMetrics(_messages.Message):
  r"""A ShuffleWriteQuantileMetrics object.

  Fields:
    writeBytes: A Quantiles attribute.
    writeRecords: A Quantiles attribute.
    writeTimeNanos: A Quantiles attribute.
  """

  writeBytes = _messages.MessageField('Quantiles', 1)
  writeRecords = _messages.MessageField('Quantiles', 2)
  writeTimeNanos = _messages.MessageField('Quantiles', 3)


class SinkProgress(_messages.Message):
  r"""A SinkProgress object.

  Messages:
    MetricsValue: A MetricsValue object.

  Fields:
    description: A string attribute.
    metrics: A MetricsValue attribute.
    numOutputRows: A string attribute.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class MetricsValue(_messages.Message):
    r"""A MetricsValue object.

    Messages:
      AdditionalProperty: An additional property for a MetricsValue object.

    Fields:
      additionalProperties: Additional properties of type MetricsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a MetricsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  description = _messages.StringField(1)
  metrics = _messages.MessageField('MetricsValue', 2)
  numOutputRows = _messages.IntegerField(3)


class SoftwareConfig(_messages.Message):
  r"""Specifies the selection and config of software inside the cluster.

  Enums:
    OptionalComponentsValueListEntryValuesEnum:

  Messages:
    PropertiesValue: Optional. The properties to set on daemon config
      files.Property keys are specified in prefix:property format, for example
      core:hadoop.tmp.dir. The following are supported prefixes and their
      mappings: capacity-scheduler: capacity-scheduler.xml core: core-site.xml
      distcp: distcp-default.xml hdfs: hdfs-site.xml hive: hive-site.xml
      mapred: mapred-site.xml pig: pig.properties spark: spark-defaults.conf
      yarn: yarn-site.xmlFor more information, see Cluster properties
      (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).

  Fields:
    imageVersion: Optional. The version of software inside the cluster. It
      must be one of the supported Dataproc Versions
      (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-
      versions#supported-dataproc-image-versions), such as "1.2" (including a
      subminor version, such as "1.2.29"), or the "preview" version
      (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-
      versions#other_versions). If unspecified, it defaults to the latest
      Debian version.
    optionalComponents: Optional. The set of components to activate on the
      cluster.
    properties: Optional. The properties to set on daemon config
      files.Property keys are specified in prefix:property format, for example
      core:hadoop.tmp.dir. The following are supported prefixes and their
      mappings: capacity-scheduler: capacity-scheduler.xml core: core-site.xml
      distcp: distcp-default.xml hdfs: hdfs-site.xml hive: hive-site.xml
      mapred: mapred-site.xml pig: pig.properties spark: spark-defaults.conf
      yarn: yarn-site.xmlFor more information, see Cluster properties
      (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
  """

  class OptionalComponentsValueListEntryValuesEnum(_messages.Enum):
    r"""OptionalComponentsValueListEntryValuesEnum enum type.

    Values:
      COMPONENT_UNSPECIFIED: Unspecified component. Specifying this will cause
        Cluster creation to fail.
      ANACONDA: The Anaconda component is no longer supported or applicable to
        supported Dataproc on Compute Engine image versions
        (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-
        version-clusters#supported-dataproc-image-versions). It cannot be
        activated on clusters created with supported Dataproc on Compute
        Engine image versions.
      DELTA: Delta Lake.
      DOCKER: Docker
      DRUID: The Druid query engine. (alpha)
      FLINK: Flink
      HBASE: HBase. (beta)
      HIVE_WEBHCAT: The Hive Web HCatalog (the REST service for accessing
        HCatalog).
      HUDI: Hudi.
      ICEBERG: Iceberg.
      JUPYTER: The Jupyter Notebook.
      KERBEROS: The Kerberos security feature.
      PIG: The Pig component.
      PRESTO: The Presto query engine.
      TRINO: The Trino query engine.
      RANGER: The Ranger service.
      SOLR: The Solr service.
      ZEPPELIN: The Zeppelin notebook.
      ZOOKEEPER: The Zookeeper service.
      DASK: Dask
      GPU_DRIVER: Nvidia GPU driver.
      JUPYTER_KERNEL_GATEWAY: The Jupyter Kernel Gateway.
    """
    COMPONENT_UNSPECIFIED = 0
    ANACONDA = 1
    DELTA = 2
    DOCKER = 3
    DRUID = 4
    FLINK = 5
    HBASE = 6
    HIVE_WEBHCAT = 7
    HUDI = 8
    ICEBERG = 9
    JUPYTER = 10
    KERBEROS = 11
    PIG = 12
    PRESTO = 13
    TRINO = 14
    RANGER = 15
    SOLR = 16
    ZEPPELIN = 17
    ZOOKEEPER = 18
    DASK = 19
    GPU_DRIVER = 20
    JUPYTER_KERNEL_GATEWAY = 21

  @encoding.MapUnrecognizedFields('additionalProperties')
  class PropertiesValue(_messages.Message):
    r"""Optional. The properties to set on daemon config files.Property keys
    are specified in prefix:property format, for example core:hadoop.tmp.dir.
    The following are supported prefixes and their mappings: capacity-
    scheduler: capacity-scheduler.xml core: core-site.xml distcp: distcp-
    default.xml hdfs: hdfs-site.xml hive: hive-site.xml mapred: mapred-
    site.xml pig: pig.properties spark: spark-defaults.conf yarn: yarn-
    site.xmlFor more information, see Cluster properties
    (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).

    Messages:
      AdditionalProperty: An additional property for a PropertiesValue object.

    Fields:
      additionalProperties: Additional properties of type PropertiesValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a PropertiesValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  imageVersion = _messages.StringField(1)
  optionalComponents = _messages.EnumField('OptionalComponentsValueListEntryValuesEnum', 2, repeated=True)
  properties = _messages.MessageField('PropertiesValue', 3)


class SourceProgress(_messages.Message):
  r"""A SourceProgress object.

  Messages:
    MetricsValue: A MetricsValue object.

  Fields:
    description: A string attribute.
    endOffset: A string attribute.
    inputRowsPerSecond: A number attribute.
    latestOffset: A string attribute.
    metrics: A MetricsValue attribute.
    numInputRows: A string attribute.
    processedRowsPerSecond: A number attribute.
    startOffset: A string attribute.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class MetricsValue(_messages.Message):
    r"""A MetricsValue object.

    Messages:
      AdditionalProperty: An additional property for a MetricsValue object.

    Fields:
      additionalProperties: Additional properties of type MetricsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a MetricsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  description = _messages.StringField(1)
  endOffset = _messages.StringField(2)
  inputRowsPerSecond = _messages.FloatField(3)
  latestOffset = _messages.StringField(4)
  metrics = _messages.MessageField('MetricsValue', 5)
  numInputRows = _messages.IntegerField(6)
  processedRowsPerSecond = _messages.FloatField(7)
  startOffset = _messages.StringField(8)


class SparkApplication(_messages.Message):
  r"""A summary of Spark Application

  Fields:
    application: Output only. High level information corresponding to an
      application.
    name: Identifier. Name of the spark application
  """

  application = _messages.MessageField('ApplicationInfo', 1)
  name = _messages.StringField(2)


class SparkBatch(_messages.Message):
  r"""A configuration for running an Apache Spark (https://spark.apache.org/)
  batch workload.

  Fields:
    archiveUris: Optional. HCFS URIs of archives to be extracted into the
      working directory of each executor. Supported file types: .jar, .tar,
      .tar.gz, .tgz, and .zip.
    args: Optional. The arguments to pass to the driver. Do not include
      arguments that can be set as batch properties, such as --conf, since a
      collision can occur that causes an incorrect batch submission.
    fileUris: Optional. HCFS URIs of files to be placed in the working
      directory of each executor.
    jarFileUris: Optional. HCFS URIs of jar files to add to the classpath of
      the Spark driver and tasks.
    mainClass: Optional. The name of the driver main class. The jar file that
      contains the class must be in the classpath or specified in
      jar_file_uris.
    mainJarFileUri: Optional. The HCFS URI of the jar file that contains the
      main class.
  """

  archiveUris = _messages.StringField(1, repeated=True)
  args = _messages.StringField(2, repeated=True)
  fileUris = _messages.StringField(3, repeated=True)
  jarFileUris = _messages.StringField(4, repeated=True)
  mainClass = _messages.StringField(5)
  mainJarFileUri = _messages.StringField(6)


class SparkConnectConfig(_messages.Message):
  r"""Spark connect configuration for an interactive session."""


class SparkHistoryServerConfig(_messages.Message):
  r"""Spark History Server configuration for the workload.

  Fields:
    dataprocCluster: Optional. Resource name of an existing Dataproc Cluster
      to act as a Spark History Server for the workload.Example:
      projects/[project_id]/regions/[region]/clusters/[cluster_name]
  """

  dataprocCluster = _messages.StringField(1)


class SparkJob(_messages.Message):
  r"""A Dataproc job for running Apache Spark (https://spark.apache.org/)
  applications on YARN.

  Enums:
    SparkEngineValueValuesEnum: Optional. The engine on which the spark job
      runs.

  Messages:
    PropertiesValue: Optional. A mapping of property names to values, used to
      configure Spark. Properties that conflict with values set by the
      Dataproc API might be overwritten. Can include properties set in
      /etc/spark/conf/spark-defaults.conf and classes in user code.

  Fields:
    archiveUris: Optional. HCFS URIs of archives to be extracted into the
      working directory of each executor. Supported file types: .jar, .tar,
      .tar.gz, .tgz, and .zip.
    args: Optional. The arguments to pass to the driver. Do not include
      arguments, such as --conf, that can be set as job properties, since a
      collision may occur that causes an incorrect job submission.
    fileUris: Optional. HCFS URIs of files to be placed in the working
      directory of each executor. Useful for naively parallel tasks.
    jarFileUris: Optional. HCFS URIs of jar files to add to the CLASSPATHs of
      the Spark driver and tasks.
    loggingConfig: Optional. The runtime log config for job execution.
    mainClass: The name of the driver's main class. The jar file that contains
      the class must be in the default CLASSPATH or specified in
      SparkJob.jar_file_uris.
    mainJarFileUri: The HCFS URI of the jar file that contains the main class.
    properties: Optional. A mapping of property names to values, used to
      configure Spark. Properties that conflict with values set by the
      Dataproc API might be overwritten. Can include properties set in
      /etc/spark/conf/spark-defaults.conf and classes in user code.
    sparkEngine: Optional. The engine on which the spark job runs.
  """

  class SparkEngineValueValuesEnum(_messages.Enum):
    r"""Optional. The engine on which the spark job runs.

    Values:
      SPARK_ENGINE_UNSPECIFIED: Not set.
      SPARK_ENGINE_DEFAULT: Default engine for Spark Job
      SPARK_ENGINE_NATIVE: Native Query Engine for Spark Job
    """
    SPARK_ENGINE_UNSPECIFIED = 0
    SPARK_ENGINE_DEFAULT = 1
    SPARK_ENGINE_NATIVE = 2

  @encoding.MapUnrecognizedFields('additionalProperties')
  class PropertiesValue(_messages.Message):
    r"""Optional. A mapping of property names to values, used to configure
    Spark. Properties that conflict with values set by the Dataproc API might
    be overwritten. Can include properties set in /etc/spark/conf/spark-
    defaults.conf and classes in user code.

    Messages:
      AdditionalProperty: An additional property for a PropertiesValue object.

    Fields:
      additionalProperties: Additional properties of type PropertiesValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a PropertiesValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  archiveUris = _messages.StringField(1, repeated=True)
  args = _messages.StringField(2, repeated=True)
  fileUris = _messages.StringField(3, repeated=True)
  jarFileUris = _messages.StringField(4, repeated=True)
  loggingConfig = _messages.MessageField('LoggingConfig', 5)
  mainClass = _messages.StringField(6)
  mainJarFileUri = _messages.StringField(7)
  properties = _messages.MessageField('PropertiesValue', 8)
  sparkEngine = _messages.EnumField('SparkEngineValueValuesEnum', 9)


class SparkPlanGraph(_messages.Message):
  r"""A graph used for storing information of an executionPlan of DataFrame.

  Fields:
    edges: A SparkPlanGraphEdge attribute.
    executionId: A string attribute.
    nodes: A SparkPlanGraphNodeWrapper attribute.
  """

  edges = _messages.MessageField('SparkPlanGraphEdge', 1, repeated=True)
  executionId = _messages.IntegerField(2)
  nodes = _messages.MessageField('SparkPlanGraphNodeWrapper', 3, repeated=True)


class SparkPlanGraphCluster(_messages.Message):
  r"""Represents a tree of spark plan.

  Fields:
    desc: A string attribute.
    metrics: A SqlPlanMetric attribute.
    name: A string attribute.
    nodes: A SparkPlanGraphNodeWrapper attribute.
    sparkPlanGraphClusterId: A string attribute.
  """

  desc = _messages.StringField(1)
  metrics = _messages.MessageField('SqlPlanMetric', 2, repeated=True)
  name = _messages.StringField(3)
  nodes = _messages.MessageField('SparkPlanGraphNodeWrapper', 4, repeated=True)
  sparkPlanGraphClusterId = _messages.IntegerField(5)


class SparkPlanGraphEdge(_messages.Message):
  r"""Represents a directed edge in the spark plan tree from child to parent.

  Fields:
    fromId: A string attribute.
    toId: A string attribute.
  """

  fromId = _messages.IntegerField(1)
  toId = _messages.IntegerField(2)


class SparkPlanGraphNode(_messages.Message):
  r"""Represents a node in the spark plan tree.

  Fields:
    desc: A string attribute.
    metrics: A SqlPlanMetric attribute.
    name: A string attribute.
    sparkPlanGraphNodeId: A string attribute.
  """

  desc = _messages.StringField(1)
  metrics = _messages.MessageField('SqlPlanMetric', 2, repeated=True)
  name = _messages.StringField(3)
  sparkPlanGraphNodeId = _messages.IntegerField(4)


class SparkPlanGraphNodeWrapper(_messages.Message):
  r"""Wrapper user to represent either a node or a cluster.

  Fields:
    cluster: A SparkPlanGraphCluster attribute.
    node: A SparkPlanGraphNode attribute.
  """

  cluster = _messages.MessageField('SparkPlanGraphCluster', 1)
  node = _messages.MessageField('SparkPlanGraphNode', 2)


class SparkRBatch(_messages.Message):
  r"""A configuration for running an Apache SparkR
  (https://spark.apache.org/docs/latest/sparkr.html) batch workload.

  Fields:
    archiveUris: Optional. HCFS URIs of archives to be extracted into the
      working directory of each executor. Supported file types: .jar, .tar,
      .tar.gz, .tgz, and .zip.
    args: Optional. The arguments to pass to the Spark driver. Do not include
      arguments that can be set as batch properties, such as --conf, since a
      collision can occur that causes an incorrect batch submission.
    fileUris: Optional. HCFS URIs of files to be placed in the working
      directory of each executor.
    mainRFileUri: Required. The HCFS URI of the main R file to use as the
      driver. Must be a .R or .r file.
  """

  archiveUris = _messages.StringField(1, repeated=True)
  args = _messages.StringField(2, repeated=True)
  fileUris = _messages.StringField(3, repeated=True)
  mainRFileUri = _messages.StringField(4)


class SparkRJob(_messages.Message):
  r"""A Dataproc job for running Apache SparkR
  (https://spark.apache.org/docs/latest/sparkr.html) applications on YARN.

  Enums:
    SparkEngineValueValuesEnum: Optional. The engine on which the spark job
      runs.

  Messages:
    PropertiesValue: Optional. A mapping of property names to values, used to
      configure SparkR. Properties that conflict with values set by the
      Dataproc API might be overwritten. Can include properties set in
      /etc/spark/conf/spark-defaults.conf and classes in user code.

  Fields:
    archiveUris: Optional. HCFS URIs of archives to be extracted into the
      working directory of each executor. Supported file types: .jar, .tar,
      .tar.gz, .tgz, and .zip.
    args: Optional. The arguments to pass to the driver. Do not include
      arguments, such as --conf, that can be set as job properties, since a
      collision may occur that causes an incorrect job submission.
    fileUris: Optional. HCFS URIs of files to be placed in the working
      directory of each executor. Useful for naively parallel tasks.
    loggingConfig: Optional. The runtime log config for job execution.
    mainRFileUri: Required. The HCFS URI of the main R file to use as the
      driver. Must be a .R file.
    properties: Optional. A mapping of property names to values, used to
      configure SparkR. Properties that conflict with values set by the
      Dataproc API might be overwritten. Can include properties set in
      /etc/spark/conf/spark-defaults.conf and classes in user code.
    sparkEngine: Optional. The engine on which the spark job runs.
  """

  class SparkEngineValueValuesEnum(_messages.Enum):
    r"""Optional. The engine on which the spark job runs.

    Values:
      SPARK_ENGINE_UNSPECIFIED: Not set.
      SPARK_ENGINE_DEFAULT: Default engine for Spark Job
      SPARK_ENGINE_NATIVE: Native Query Engine for Spark Job
    """
    SPARK_ENGINE_UNSPECIFIED = 0
    SPARK_ENGINE_DEFAULT = 1
    SPARK_ENGINE_NATIVE = 2

  @encoding.MapUnrecognizedFields('additionalProperties')
  class PropertiesValue(_messages.Message):
    r"""Optional. A mapping of property names to values, used to configure
    SparkR. Properties that conflict with values set by the Dataproc API might
    be overwritten. Can include properties set in /etc/spark/conf/spark-
    defaults.conf and classes in user code.

    Messages:
      AdditionalProperty: An additional property for a PropertiesValue object.

    Fields:
      additionalProperties: Additional properties of type PropertiesValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a PropertiesValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  archiveUris = _messages.StringField(1, repeated=True)
  args = _messages.StringField(2, repeated=True)
  fileUris = _messages.StringField(3, repeated=True)
  loggingConfig = _messages.MessageField('LoggingConfig', 4)
  mainRFileUri = _messages.StringField(5)
  properties = _messages.MessageField('PropertiesValue', 6)
  sparkEngine = _messages.EnumField('SparkEngineValueValuesEnum', 7)


class SparkRuntimeInfo(_messages.Message):
  r"""A SparkRuntimeInfo object.

  Fields:
    javaHome: A string attribute.
    javaVersion: A string attribute.
    scalaVersion: A string attribute.
  """

  javaHome = _messages.StringField(1)
  javaVersion = _messages.StringField(2)
  scalaVersion = _messages.StringField(3)


class SparkSqlBatch(_messages.Message):
  r"""A configuration for running Apache Spark SQL
  (https://spark.apache.org/sql/) queries as a batch workload.

  Messages:
    QueryVariablesValue: Optional. Mapping of query variable names to values
      (equivalent to the Spark SQL command: SET name="value";).

  Fields:
    jarFileUris: Optional. HCFS URIs of jar files to be added to the Spark
      CLASSPATH.
    queryFileUri: Required. The HCFS URI of the script that contains Spark SQL
      queries to execute.
    queryVariables: Optional. Mapping of query variable names to values
      (equivalent to the Spark SQL command: SET name="value";).
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class QueryVariablesValue(_messages.Message):
    r"""Optional. Mapping of query variable names to values (equivalent to the
    Spark SQL command: SET name="value";).

    Messages:
      AdditionalProperty: An additional property for a QueryVariablesValue
        object.

    Fields:
      additionalProperties: Additional properties of type QueryVariablesValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a QueryVariablesValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  jarFileUris = _messages.StringField(1, repeated=True)
  queryFileUri = _messages.StringField(2)
  queryVariables = _messages.MessageField('QueryVariablesValue', 3)


class SparkSqlJob(_messages.Message):
  r"""A Dataproc job for running Apache Spark SQL
  (https://spark.apache.org/sql/) queries.

  Enums:
    SparkEngineValueValuesEnum: Optional. The engine on which the spark job
      runs.

  Messages:
    PropertiesValue: Optional. A mapping of property names to values, used to
      configure Spark SQL's SparkConf. Properties that conflict with values
      set by the Dataproc API might be overwritten.
    ScriptVariablesValue: Optional. Mapping of query variable names to values
      (equivalent to the Spark SQL command: SET name="value";).

  Fields:
    jarFileUris: Optional. HCFS URIs of jar files to be added to the Spark
      CLASSPATH.
    loggingConfig: Optional. The runtime log config for job execution.
    properties: Optional. A mapping of property names to values, used to
      configure Spark SQL's SparkConf. Properties that conflict with values
      set by the Dataproc API might be overwritten.
    queryFileUri: The HCFS URI of the script that contains SQL queries.
    queryList: A list of queries.
    scriptVariables: Optional. Mapping of query variable names to values
      (equivalent to the Spark SQL command: SET name="value";).
    sparkEngine: Optional. The engine on which the spark job runs.
  """

  class SparkEngineValueValuesEnum(_messages.Enum):
    r"""Optional. The engine on which the spark job runs.

    Values:
      SPARK_ENGINE_UNSPECIFIED: Not set.
      SPARK_ENGINE_DEFAULT: Default engine for Spark Job
      SPARK_ENGINE_NATIVE: Native Query Engine for Spark Job
    """
    SPARK_ENGINE_UNSPECIFIED = 0
    SPARK_ENGINE_DEFAULT = 1
    SPARK_ENGINE_NATIVE = 2

  @encoding.MapUnrecognizedFields('additionalProperties')
  class PropertiesValue(_messages.Message):
    r"""Optional. A mapping of property names to values, used to configure
    Spark SQL's SparkConf. Properties that conflict with values set by the
    Dataproc API might be overwritten.

    Messages:
      AdditionalProperty: An additional property for a PropertiesValue object.

    Fields:
      additionalProperties: Additional properties of type PropertiesValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a PropertiesValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  @encoding.MapUnrecognizedFields('additionalProperties')
  class ScriptVariablesValue(_messages.Message):
    r"""Optional. Mapping of query variable names to values (equivalent to the
    Spark SQL command: SET name="value";).

    Messages:
      AdditionalProperty: An additional property for a ScriptVariablesValue
        object.

    Fields:
      additionalProperties: Additional properties of type ScriptVariablesValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a ScriptVariablesValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  jarFileUris = _messages.StringField(1, repeated=True)
  loggingConfig = _messages.MessageField('LoggingConfig', 2)
  properties = _messages.MessageField('PropertiesValue', 3)
  queryFileUri = _messages.StringField(4)
  queryList = _messages.MessageField('QueryList', 5)
  scriptVariables = _messages.MessageField('ScriptVariablesValue', 6)
  sparkEngine = _messages.EnumField('SparkEngineValueValuesEnum', 7)


class SparkStandaloneAutoscalingConfig(_messages.Message):
  r"""Basic autoscaling configurations for Spark Standalone.

  Fields:
    gracefulDecommissionTimeout: Required. Timeout for Spark graceful
      decommissioning of spark workers. Specifies the duration to wait for
      spark worker to complete spark decommissioning tasks before forcefully
      removing workers. Only applicable to downscaling operations.Bounds: 0s,
      1d.
    removeOnlyIdleWorkers: Optional. Remove only idle workers when scaling
      down cluster
    scaleDownFactor: Required. Fraction of required executors to remove from
      Spark Serverless clusters. A scale-down factor of 1.0 will result in
      scaling down so that there are no more executors for the Spark Job.(more
      aggressive scaling). A scale-down factor closer to 0 will result in a
      smaller magnitude of scaling donw (less aggressive scaling).Bounds: 0.0,
      1.0.
    scaleDownMinWorkerFraction: Optional. Minimum scale-down threshold as a
      fraction of total cluster size before scaling occurs. For example, in a
      20-worker cluster, a threshold of 0.1 means the autoscaler must
      recommend at least a 2 worker scale-down for the cluster to scale. A
      threshold of 0 means the autoscaler will scale down on any recommended
      change.Bounds: 0.0, 1.0. Default: 0.0.
    scaleUpFactor: Required. Fraction of required workers to add to Spark
      Standalone clusters. A scale-up factor of 1.0 will result in scaling up
      so that there are no more required workers for the Spark Job (more
      aggressive scaling). A scale-up factor closer to 0 will result in a
      smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0,
      1.0.
    scaleUpMinWorkerFraction: Optional. Minimum scale-up threshold as a
      fraction of total cluster size before scaling occurs. For example, in a
      20-worker cluster, a threshold of 0.1 means the autoscaler must
      recommend at least a 2-worker scale-up for the cluster to scale. A
      threshold of 0 means the autoscaler will scale up on any recommended
      change.Bounds: 0.0, 1.0. Default: 0.0.
  """

  gracefulDecommissionTimeout = _messages.StringField(1)
  removeOnlyIdleWorkers = _messages.BooleanField(2)
  scaleDownFactor = _messages.FloatField(3)
  scaleDownMinWorkerFraction = _messages.FloatField(4)
  scaleUpFactor = _messages.FloatField(5)
  scaleUpMinWorkerFraction = _messages.FloatField(6)


class SparkWrapperObject(_messages.Message):
  r"""Outer message that contains the data obtained from spark listener,
  packaged with information that is required to process it.

  Fields:
    appSummary: A AppSummary attribute.
    applicationEnvironmentInfo: A ApplicationEnvironmentInfo attribute.
    applicationId: Application Id created by Spark.
    applicationInfo: A ApplicationInfo attribute.
    eventTimestamp: VM Timestamp associated with the data object.
    executorStageSummary: A ExecutorStageSummary attribute.
    executorSummary: A ExecutorSummary attribute.
    jobData: A JobData attribute.
    nativeBuildInfoUiData: Native Build Info
    nativeSqlExecutionUiData: Native SQL Execution Info
    poolData: A PoolData attribute.
    processSummary: A ProcessSummary attribute.
    rddOperationGraph: A RddOperationGraph attribute.
    rddStorageInfo: A RddStorageInfo attribute.
    resourceProfileInfo: A ResourceProfileInfo attribute.
    sparkPlanGraph: A SparkPlanGraph attribute.
    speculationStageSummary: A SpeculationStageSummary attribute.
    sqlExecutionUiData: A SqlExecutionUiData attribute.
    stageData: A StageData attribute.
    streamBlockData: A StreamBlockData attribute.
    streamingQueryData: A StreamingQueryData attribute.
    streamingQueryProgress: A StreamingQueryProgress attribute.
    taskData: A TaskData attribute.
  """

  appSummary = _messages.MessageField('AppSummary', 1)
  applicationEnvironmentInfo = _messages.MessageField('ApplicationEnvironmentInfo', 2)
  applicationId = _messages.StringField(3)
  applicationInfo = _messages.MessageField('ApplicationInfo', 4)
  eventTimestamp = _messages.StringField(5)
  executorStageSummary = _messages.MessageField('ExecutorStageSummary', 6)
  executorSummary = _messages.MessageField('ExecutorSummary', 7)
  jobData = _messages.MessageField('JobData', 8)
  nativeBuildInfoUiData = _messages.MessageField('NativeBuildInfoUiData', 9)
  nativeSqlExecutionUiData = _messages.MessageField('NativeSqlExecutionUiData', 10)
  poolData = _messages.MessageField('PoolData', 11)
  processSummary = _messages.MessageField('ProcessSummary', 12)
  rddOperationGraph = _messages.MessageField('RddOperationGraph', 13)
  rddStorageInfo = _messages.MessageField('RddStorageInfo', 14)
  resourceProfileInfo = _messages.MessageField('ResourceProfileInfo', 15)
  sparkPlanGraph = _messages.MessageField('SparkPlanGraph', 16)
  speculationStageSummary = _messages.MessageField('SpeculationStageSummary', 17)
  sqlExecutionUiData = _messages.MessageField('SqlExecutionUiData', 18)
  stageData = _messages.MessageField('StageData', 19)
  streamBlockData = _messages.MessageField('StreamBlockData', 20)
  streamingQueryData = _messages.MessageField('StreamingQueryData', 21)
  streamingQueryProgress = _messages.MessageField('StreamingQueryProgress', 22)
  taskData = _messages.MessageField('TaskData', 23)


class SpeculationStageSummary(_messages.Message):
  r"""Details of the speculation task when speculative execution is enabled.

  Fields:
    numActiveTasks: A integer attribute.
    numCompletedTasks: A integer attribute.
    numFailedTasks: A integer attribute.
    numKilledTasks: A integer attribute.
    numTasks: A integer attribute.
    stageAttemptId: A integer attribute.
    stageId: A string attribute.
  """

  numActiveTasks = _messages.IntegerField(1, variant=_messages.Variant.INT32)
  numCompletedTasks = _messages.IntegerField(2, variant=_messages.Variant.INT32)
  numFailedTasks = _messages.IntegerField(3, variant=_messages.Variant.INT32)
  numKilledTasks = _messages.IntegerField(4, variant=_messages.Variant.INT32)
  numTasks = _messages.IntegerField(5, variant=_messages.Variant.INT32)
  stageAttemptId = _messages.IntegerField(6, variant=_messages.Variant.INT32)
  stageId = _messages.IntegerField(7)


class SqlExecutionUiData(_messages.Message):
  r"""SQL Execution Data

  Messages:
    JobsValue: A JobsValue object.
    MetricValuesValue: A MetricValuesValue object.
    ModifiedConfigsValue: A ModifiedConfigsValue object.

  Fields:
    completionTime: A string attribute.
    description: A string attribute.
    details: A string attribute.
    errorMessage: A string attribute.
    executionId: A string attribute.
    jobs: A JobsValue attribute.
    metricValues: A MetricValuesValue attribute.
    metricValuesIsNull: A boolean attribute.
    metrics: A SqlPlanMetric attribute.
    modifiedConfigs: A ModifiedConfigsValue attribute.
    physicalPlanDescription: A string attribute.
    rootExecutionId: A string attribute.
    stages: A string attribute.
    submissionTime: A string attribute.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class JobsValue(_messages.Message):
    r"""A JobsValue object.

    Messages:
      AdditionalProperty: An additional property for a JobsValue object.

    Fields:
      additionalProperties: Additional properties of type JobsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a JobsValue object.

      Enums:
        ValueValueValuesEnum:

      Fields:
        key: Name of the additional property.
        value: A ValueValueValuesEnum attribute.
      """

      class ValueValueValuesEnum(_messages.Enum):
        r"""ValueValueValuesEnum enum type.

        Values:
          JOB_EXECUTION_STATUS_UNSPECIFIED: <no description>
          JOB_EXECUTION_STATUS_RUNNING: <no description>
          JOB_EXECUTION_STATUS_SUCCEEDED: <no description>
          JOB_EXECUTION_STATUS_FAILED: <no description>
          JOB_EXECUTION_STATUS_UNKNOWN: <no description>
        """
        JOB_EXECUTION_STATUS_UNSPECIFIED = 0
        JOB_EXECUTION_STATUS_RUNNING = 1
        JOB_EXECUTION_STATUS_SUCCEEDED = 2
        JOB_EXECUTION_STATUS_FAILED = 3
        JOB_EXECUTION_STATUS_UNKNOWN = 4

      key = _messages.StringField(1)
      value = _messages.EnumField('ValueValueValuesEnum', 2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  @encoding.MapUnrecognizedFields('additionalProperties')
  class MetricValuesValue(_messages.Message):
    r"""A MetricValuesValue object.

    Messages:
      AdditionalProperty: An additional property for a MetricValuesValue
        object.

    Fields:
      additionalProperties: Additional properties of type MetricValuesValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a MetricValuesValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  @encoding.MapUnrecognizedFields('additionalProperties')
  class ModifiedConfigsValue(_messages.Message):
    r"""A ModifiedConfigsValue object.

    Messages:
      AdditionalProperty: An additional property for a ModifiedConfigsValue
        object.

    Fields:
      additionalProperties: Additional properties of type ModifiedConfigsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a ModifiedConfigsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  completionTime = _messages.StringField(1)
  description = _messages.StringField(2)
  details = _messages.StringField(3)
  errorMessage = _messages.StringField(4)
  executionId = _messages.IntegerField(5)
  jobs = _messages.MessageField('JobsValue', 6)
  metricValues = _messages.MessageField('MetricValuesValue', 7)
  metricValuesIsNull = _messages.BooleanField(8)
  metrics = _messages.MessageField('SqlPlanMetric', 9, repeated=True)
  modifiedConfigs = _messages.MessageField('ModifiedConfigsValue', 10)
  physicalPlanDescription = _messages.StringField(11)
  rootExecutionId = _messages.IntegerField(12)
  stages = _messages.IntegerField(13, repeated=True)
  submissionTime = _messages.StringField(14)


class SqlPlanMetric(_messages.Message):
  r"""Metrics related to SQL execution.

  Fields:
    accumulatorId: A string attribute.
    metricType: A string attribute.
    name: A string attribute.
  """

  accumulatorId = _messages.IntegerField(1)
  metricType = _messages.StringField(2)
  name = _messages.StringField(3)


class StageAttemptTasksSummary(_messages.Message):
  r"""Data related to tasks summary for a Spark Stage Attempt

  Fields:
    applicationId: A string attribute.
    numFailedTasks: A integer attribute.
    numKilledTasks: A integer attribute.
    numPendingTasks: A integer attribute.
    numRunningTasks: A integer attribute.
    numSuccessTasks: A integer attribute.
    numTasks: A integer attribute.
    stageAttemptId: A integer attribute.
    stageId: A string attribute.
  """

  applicationId = _messages.StringField(1)
  numFailedTasks = _messages.IntegerField(2, variant=_messages.Variant.INT32)
  numKilledTasks = _messages.IntegerField(3, variant=_messages.Variant.INT32)
  numPendingTasks = _messages.IntegerField(4, variant=_messages.Variant.INT32)
  numRunningTasks = _messages.IntegerField(5, variant=_messages.Variant.INT32)
  numSuccessTasks = _messages.IntegerField(6, variant=_messages.Variant.INT32)
  numTasks = _messages.IntegerField(7, variant=_messages.Variant.INT32)
  stageAttemptId = _messages.IntegerField(8, variant=_messages.Variant.INT32)
  stageId = _messages.IntegerField(9)


class StageData(_messages.Message):
  r"""Data corresponding to a stage.

  Enums:
    StatusValueValuesEnum:

  Messages:
    ExecutorSummaryValue: A ExecutorSummaryValue object.
    KilledTasksSummaryValue: A KilledTasksSummaryValue object.
    LocalityValue: A LocalityValue object.
    TasksValue: A TasksValue object.

  Fields:
    accumulatorUpdates: A AccumulableInfo attribute.
    completionTime: A string attribute.
    description: A string attribute.
    details: A string attribute.
    executorMetricsDistributions: A ExecutorMetricsDistributions attribute.
    executorSummary: A ExecutorSummaryValue attribute.
    failureReason: A string attribute.
    firstTaskLaunchedTime: A string attribute.
    isShufflePushEnabled: A boolean attribute.
    jobIds: A string attribute.
    killedTasksSummary: A KilledTasksSummaryValue attribute.
    locality: A LocalityValue attribute.
    name: A string attribute.
    numActiveTasks: A integer attribute.
    numCompleteTasks: A integer attribute.
    numCompletedIndices: A integer attribute.
    numFailedTasks: A integer attribute.
    numKilledTasks: A integer attribute.
    numTasks: A integer attribute.
    parentStageIds: A string attribute.
    peakExecutorMetrics: A ExecutorMetrics attribute.
    rddIds: A string attribute.
    resourceProfileId: A integer attribute.
    schedulingPool: A string attribute.
    shuffleMergersCount: A integer attribute.
    speculationSummary: A SpeculationStageSummary attribute.
    stageAttemptId: A integer attribute.
    stageId: A string attribute.
    stageMetrics: A StageMetrics attribute.
    status: A StatusValueValuesEnum attribute.
    submissionTime: A string attribute.
    taskQuantileMetrics: Summary metrics fields. These are included in
      response only if present in summary_metrics_mask field in request
    tasks: A TasksValue attribute.
  """

  class StatusValueValuesEnum(_messages.Enum):
    r"""StatusValueValuesEnum enum type.

    Values:
      STAGE_STATUS_UNSPECIFIED: <no description>
      STAGE_STATUS_ACTIVE: <no description>
      STAGE_STATUS_COMPLETE: <no description>
      STAGE_STATUS_FAILED: <no description>
      STAGE_STATUS_PENDING: <no description>
      STAGE_STATUS_SKIPPED: <no description>
    """
    STAGE_STATUS_UNSPECIFIED = 0
    STAGE_STATUS_ACTIVE = 1
    STAGE_STATUS_COMPLETE = 2
    STAGE_STATUS_FAILED = 3
    STAGE_STATUS_PENDING = 4
    STAGE_STATUS_SKIPPED = 5

  @encoding.MapUnrecognizedFields('additionalProperties')
  class ExecutorSummaryValue(_messages.Message):
    r"""A ExecutorSummaryValue object.

    Messages:
      AdditionalProperty: An additional property for a ExecutorSummaryValue
        object.

    Fields:
      additionalProperties: Additional properties of type ExecutorSummaryValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a ExecutorSummaryValue object.

      Fields:
        key: Name of the additional property.
        value: A ExecutorStageSummary attribute.
      """

      key = _messages.StringField(1)
      value = _messages.MessageField('ExecutorStageSummary', 2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  @encoding.MapUnrecognizedFields('additionalProperties')
  class KilledTasksSummaryValue(_messages.Message):
    r"""A KilledTasksSummaryValue object.

    Messages:
      AdditionalProperty: An additional property for a KilledTasksSummaryValue
        object.

    Fields:
      additionalProperties: Additional properties of type
        KilledTasksSummaryValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a KilledTasksSummaryValue object.

      Fields:
        key: Name of the additional property.
        value: A integer attribute.
      """

      key = _messages.StringField(1)
      value = _messages.IntegerField(2, variant=_messages.Variant.INT32)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  @encoding.MapUnrecognizedFields('additionalProperties')
  class LocalityValue(_messages.Message):
    r"""A LocalityValue object.

    Messages:
      AdditionalProperty: An additional property for a LocalityValue object.

    Fields:
      additionalProperties: Additional properties of type LocalityValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a LocalityValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.IntegerField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  @encoding.MapUnrecognizedFields('additionalProperties')
  class TasksValue(_messages.Message):
    r"""A TasksValue object.

    Messages:
      AdditionalProperty: An additional property for a TasksValue object.

    Fields:
      additionalProperties: Additional properties of type TasksValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a TasksValue object.

      Fields:
        key: Name of the additional property.
        value: A TaskData attribute.
      """

      key = _messages.StringField(1)
      value = _messages.MessageField('TaskData', 2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  accumulatorUpdates = _messages.MessageField('AccumulableInfo', 1, repeated=True)
  completionTime = _messages.StringField(2)
  description = _messages.StringField(3)
  details = _messages.StringField(4)
  executorMetricsDistributions = _messages.MessageField('ExecutorMetricsDistributions', 5)
  executorSummary = _messages.MessageField('ExecutorSummaryValue', 6)
  failureReason = _messages.StringField(7)
  firstTaskLaunchedTime = _messages.StringField(8)
  isShufflePushEnabled = _messages.BooleanField(9)
  jobIds = _messages.IntegerField(10, repeated=True)
  killedTasksSummary = _messages.MessageField('KilledTasksSummaryValue', 11)
  locality = _messages.MessageField('LocalityValue', 12)
  name = _messages.StringField(13)
  numActiveTasks = _messages.IntegerField(14, variant=_messages.Variant.INT32)
  numCompleteTasks = _messages.IntegerField(15, variant=_messages.Variant.INT32)
  numCompletedIndices = _messages.IntegerField(16, variant=_messages.Variant.INT32)
  numFailedTasks = _messages.IntegerField(17, variant=_messages.Variant.INT32)
  numKilledTasks = _messages.IntegerField(18, variant=_messages.Variant.INT32)
  numTasks = _messages.IntegerField(19, variant=_messages.Variant.INT32)
  parentStageIds = _messages.IntegerField(20, repeated=True)
  peakExecutorMetrics = _messages.MessageField('ExecutorMetrics', 21)
  rddIds = _messages.IntegerField(22, repeated=True)
  resourceProfileId = _messages.IntegerField(23, variant=_messages.Variant.INT32)
  schedulingPool = _messages.StringField(24)
  shuffleMergersCount = _messages.IntegerField(25, variant=_messages.Variant.INT32)
  speculationSummary = _messages.MessageField('SpeculationStageSummary', 26)
  stageAttemptId = _messages.IntegerField(27, variant=_messages.Variant.INT32)
  stageId = _messages.IntegerField(28)
  stageMetrics = _messages.MessageField('StageMetrics', 29)
  status = _messages.EnumField('StatusValueValuesEnum', 30)
  submissionTime = _messages.StringField(31)
  taskQuantileMetrics = _messages.MessageField('TaskQuantileMetrics', 32)
  tasks = _messages.MessageField('TasksValue', 33)


class StageInputMetrics(_messages.Message):
  r"""Metrics about the input read by the stage.

  Fields:
    bytesRead: A string attribute.
    recordsRead: A string attribute.
  """

  bytesRead = _messages.IntegerField(1)
  recordsRead = _messages.IntegerField(2)


class StageMetrics(_messages.Message):
  r"""Stage Level Aggregated Metrics

  Fields:
    diskBytesSpilled: A string attribute.
    executorCpuTimeNanos: A string attribute.
    executorDeserializeCpuTimeNanos: A string attribute.
    executorDeserializeTimeMillis: A string attribute.
    executorRunTimeMillis: A string attribute.
    jvmGcTimeMillis: A string attribute.
    memoryBytesSpilled: A string attribute.
    peakExecutionMemoryBytes: A string attribute.
    resultSerializationTimeMillis: A string attribute.
    resultSize: A string attribute.
    stageInputMetrics: A StageInputMetrics attribute.
    stageOutputMetrics: A StageOutputMetrics attribute.
    stageShuffleReadMetrics: A StageShuffleReadMetrics attribute.
    stageShuffleWriteMetrics: A StageShuffleWriteMetrics attribute.
  """

  diskBytesSpilled = _messages.IntegerField(1)
  executorCpuTimeNanos = _messages.IntegerField(2)
  executorDeserializeCpuTimeNanos = _messages.IntegerField(3)
  executorDeserializeTimeMillis = _messages.IntegerField(4)
  executorRunTimeMillis = _messages.IntegerField(5)
  jvmGcTimeMillis = _messages.IntegerField(6)
  memoryBytesSpilled = _messages.IntegerField(7)
  peakExecutionMemoryBytes = _messages.IntegerField(8)
  resultSerializationTimeMillis = _messages.IntegerField(9)
  resultSize = _messages.IntegerField(10)
  stageInputMetrics = _messages.MessageField('StageInputMetrics', 11)
  stageOutputMetrics = _messages.MessageField('StageOutputMetrics', 12)
  stageShuffleReadMetrics = _messages.MessageField('StageShuffleReadMetrics', 13)
  stageShuffleWriteMetrics = _messages.MessageField('StageShuffleWriteMetrics', 14)


class StageOutputMetrics(_messages.Message):
  r"""Metrics about the output written by the stage.

  Fields:
    bytesWritten: A string attribute.
    recordsWritten: A string attribute.
  """

  bytesWritten = _messages.IntegerField(1)
  recordsWritten = _messages.IntegerField(2)


class StageShufflePushReadMetrics(_messages.Message):
  r"""A StageShufflePushReadMetrics object.

  Fields:
    corruptMergedBlockChunks: A string attribute.
    localMergedBlocksFetched: A string attribute.
    localMergedBytesRead: A string attribute.
    localMergedChunksFetched: A string attribute.
    mergedFetchFallbackCount: A string attribute.
    remoteMergedBlocksFetched: A string attribute.
    remoteMergedBytesRead: A string attribute.
    remoteMergedChunksFetched: A string attribute.
    remoteMergedReqsDuration: A string attribute.
  """

  corruptMergedBlockChunks = _messages.IntegerField(1)
  localMergedBlocksFetched = _messages.IntegerField(2)
  localMergedBytesRead = _messages.IntegerField(3)
  localMergedChunksFetched = _messages.IntegerField(4)
  mergedFetchFallbackCount = _messages.IntegerField(5)
  remoteMergedBlocksFetched = _messages.IntegerField(6)
  remoteMergedBytesRead = _messages.IntegerField(7)
  remoteMergedChunksFetched = _messages.IntegerField(8)
  remoteMergedReqsDuration = _messages.IntegerField(9)


class StageShuffleReadMetrics(_messages.Message):
  r"""Shuffle data read for the stage.

  Fields:
    bytesRead: A string attribute.
    fetchWaitTimeMillis: A string attribute.
    localBlocksFetched: A string attribute.
    localBytesRead: A string attribute.
    recordsRead: A string attribute.
    remoteBlocksFetched: A string attribute.
    remoteBytesRead: A string attribute.
    remoteBytesReadToDisk: A string attribute.
    remoteReqsDuration: A string attribute.
    stageShufflePushReadMetrics: A StageShufflePushReadMetrics attribute.
  """

  bytesRead = _messages.IntegerField(1)
  fetchWaitTimeMillis = _messages.IntegerField(2)
  localBlocksFetched = _messages.IntegerField(3)
  localBytesRead = _messages.IntegerField(4)
  recordsRead = _messages.IntegerField(5)
  remoteBlocksFetched = _messages.IntegerField(6)
  remoteBytesRead = _messages.IntegerField(7)
  remoteBytesReadToDisk = _messages.IntegerField(8)
  remoteReqsDuration = _messages.IntegerField(9)
  stageShufflePushReadMetrics = _messages.MessageField('StageShufflePushReadMetrics', 10)


class StageShuffleWriteMetrics(_messages.Message):
  r"""Shuffle data written for the stage.

  Fields:
    bytesWritten: A string attribute.
    recordsWritten: A string attribute.
    writeTimeNanos: A string attribute.
  """

  bytesWritten = _messages.IntegerField(1)
  recordsWritten = _messages.IntegerField(2)
  writeTimeNanos = _messages.IntegerField(3)


class StagesSummary(_messages.Message):
  r"""Data related to Stages page summary

  Fields:
    applicationId: A string attribute.
    numActiveStages: A integer attribute.
    numCompletedStages: A integer attribute.
    numFailedStages: A integer attribute.
    numPendingStages: A integer attribute.
    numSkippedStages: A integer attribute.
  """

  applicationId = _messages.StringField(1)
  numActiveStages = _messages.IntegerField(2, variant=_messages.Variant.INT32)
  numCompletedStages = _messages.IntegerField(3, variant=_messages.Variant.INT32)
  numFailedStages = _messages.IntegerField(4, variant=_messages.Variant.INT32)
  numPendingStages = _messages.IntegerField(5, variant=_messages.Variant.INT32)
  numSkippedStages = _messages.IntegerField(6, variant=_messages.Variant.INT32)


class StandardQueryParameters(_messages.Message):
  r"""Query parameters accepted by all methods.

  Enums:
    FXgafvValueValuesEnum: V1 error format.
    AltValueValuesEnum: Data format for response.

  Fields:
    f__xgafv: V1 error format.
    access_token: OAuth access token.
    alt: Data format for response.
    callback: JSONP
    fields: Selector specifying which fields to include in a partial response.
    key: API key. Your API key identifies your project and provides you with
      API access, quota, and reports. Required unless you provide an OAuth 2.0
      token.
    oauth_token: OAuth 2.0 token for the current user.
    prettyPrint: Returns response with indentations and line breaks.
    quotaUser: Available to use for quota purposes for server-side
      applications. Can be any arbitrary string assigned to a user, but should
      not exceed 40 characters.
    trace: A tracing token of the form "token:<tokenid>" to include in api
      requests.
    uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
    upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
  """

  class AltValueValuesEnum(_messages.Enum):
    r"""Data format for response.

    Values:
      json: Responses with Content-Type of application/json
      media: Media download with context-dependent Content-Type
      proto: Responses with Content-Type of application/x-protobuf
    """
    json = 0
    media = 1
    proto = 2

  class FXgafvValueValuesEnum(_messages.Enum):
    r"""V1 error format.

    Values:
      _1: v1 error format
      _2: v2 error format
    """
    _1 = 0
    _2 = 1

  f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
  access_token = _messages.StringField(2)
  alt = _messages.EnumField('AltValueValuesEnum', 3, default='json')
  callback = _messages.StringField(4)
  fields = _messages.StringField(5)
  key = _messages.StringField(6)
  oauth_token = _messages.StringField(7)
  prettyPrint = _messages.BooleanField(8, default=True)
  quotaUser = _messages.StringField(9)
  trace = _messages.StringField(10)
  uploadType = _messages.StringField(11)
  upload_protocol = _messages.StringField(12)


class StartClusterRequest(_messages.Message):
  r"""A request to start a cluster.

  Fields:
    clusterUuid: Optional. Specifying the cluster_uuid means the RPC will fail
      (with error NOT_FOUND) if a cluster with the specified UUID does not
      exist.
    requestId: Optional. A unique ID used to identify the request. If the
      server receives two StartClusterRequest (https://cloud.google.com/datapr
      oc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.
      StartClusterRequest)s with the same id, then the second request will be
      ignored and the first google.longrunning.Operation created and stored in
      the backend is returned.Recommendation: Set this value to a UUID
      (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID
      must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
      and hyphens (-). The maximum length is 40 characters.
  """

  clusterUuid = _messages.StringField(1)
  requestId = _messages.StringField(2)


class StartNodeGroupRequest(_messages.Message):
  r"""A request to start a node group.

  Fields:
    parentOperationId: Optional. Operation id of the parent operation sending
      the start node group request.
    requestId: Optional. A unique ID used to identify the request. If the
      server receives two StartNodeGroupRequest with the same ID, the second
      request is ignored and the first google.longrunning.Operation created
      and stored in the backend is returned.Recommendation: Set this value to
      a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The
      ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
      and hyphens (-). The maximum length is 40 characters.
  """

  parentOperationId = _messages.StringField(1)
  requestId = _messages.StringField(2)


class StartupConfig(_messages.Message):
  r"""Configuration to handle the startup of instances during cluster create
  and update process.

  Fields:
    requiredRegistrationFraction: Optional. The config setting to enable
      cluster creation/ updation to be successful only after
      required_registration_fraction of instances are up and running. This
      configuration is applicable to only secondary workers for now. The
      cluster will fail if required_registration_fraction of instances are not
      available. This will include instance creation, agent registration, and
      service registration (if enabled).
  """

  requiredRegistrationFraction = _messages.FloatField(1)


class StateHistory(_messages.Message):
  r"""Historical state information.

  Enums:
    StateValueValuesEnum: Output only. The state of the batch at this point in
      history.

  Fields:
    state: Output only. The state of the batch at this point in history.
    stateMessage: Output only. Details about the state at this point in
      history.
    stateStartTime: Output only. The time when the batch entered the
      historical state.
  """

  class StateValueValuesEnum(_messages.Enum):
    r"""Output only. The state of the batch at this point in history.

    Values:
      STATE_UNSPECIFIED: The batch state is unknown.
      PENDING: The batch is created before running.
      RUNNING: The batch is running.
      CANCELLING: The batch is cancelling.
      CANCELLED: The batch cancellation was successful.
      SUCCEEDED: The batch completed successfully.
      FAILED: The batch is no longer running due to an error.
    """
    STATE_UNSPECIFIED = 0
    PENDING = 1
    RUNNING = 2
    CANCELLING = 3
    CANCELLED = 4
    SUCCEEDED = 5
    FAILED = 6

  state = _messages.EnumField('StateValueValuesEnum', 1)
  stateMessage = _messages.StringField(2)
  stateStartTime = _messages.StringField(3)


class StateOperatorProgress(_messages.Message):
  r"""A StateOperatorProgress object.

  Messages:
    CustomMetricsValue: A CustomMetricsValue object.

  Fields:
    allRemovalsTimeMs: A string attribute.
    allUpdatesTimeMs: A string attribute.
    commitTimeMs: A string attribute.
    customMetrics: A CustomMetricsValue attribute.
    memoryUsedBytes: A string attribute.
    numRowsDroppedByWatermark: A string attribute.
    numRowsRemoved: A string attribute.
    numRowsTotal: A string attribute.
    numRowsUpdated: A string attribute.
    numShufflePartitions: A string attribute.
    numStateStoreInstances: A string attribute.
    operatorName: A string attribute.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class CustomMetricsValue(_messages.Message):
    r"""A CustomMetricsValue object.

    Messages:
      AdditionalProperty: An additional property for a CustomMetricsValue
        object.

    Fields:
      additionalProperties: Additional properties of type CustomMetricsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a CustomMetricsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.IntegerField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  allRemovalsTimeMs = _messages.IntegerField(1)
  allUpdatesTimeMs = _messages.IntegerField(2)
  commitTimeMs = _messages.IntegerField(3)
  customMetrics = _messages.MessageField('CustomMetricsValue', 4)
  memoryUsedBytes = _messages.IntegerField(5)
  numRowsDroppedByWatermark = _messages.IntegerField(6)
  numRowsRemoved = _messages.IntegerField(7)
  numRowsTotal = _messages.IntegerField(8)
  numRowsUpdated = _messages.IntegerField(9)
  numShufflePartitions = _messages.IntegerField(10)
  numStateStoreInstances = _messages.IntegerField(11)
  operatorName = _messages.StringField(12)


class Status(_messages.Message):
  r"""The Status type defines a logical error model that is suitable for
  different programming environments, including REST APIs and RPC APIs. It is
  used by gRPC (https://github.com/grpc). Each Status message contains three
  pieces of data: error code, error message, and error details.You can find
  out more about this error model and how to work with it in the API Design
  Guide (https://cloud.google.com/apis/design/errors).

  Messages:
    DetailsValueListEntry: A DetailsValueListEntry object.

  Fields:
    code: The status code, which should be an enum value of google.rpc.Code.
    details: A list of messages that carry the error details. There is a
      common set of message types for APIs to use.
    message: A developer-facing error message, which should be in English. Any
      user-facing error message should be localized and sent in the
      google.rpc.Status.details field, or localized by the client.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class DetailsValueListEntry(_messages.Message):
    r"""A DetailsValueListEntry object.

    Messages:
      AdditionalProperty: An additional property for a DetailsValueListEntry
        object.

    Fields:
      additionalProperties: Properties of the object. Contains field @type
        with type URL.
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a DetailsValueListEntry object.

      Fields:
        key: Name of the additional property.
        value: A extra_types.JsonValue attribute.
      """

      key = _messages.StringField(1)
      value = _messages.MessageField('extra_types.JsonValue', 2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  code = _messages.IntegerField(1, variant=_messages.Variant.INT32)
  details = _messages.MessageField('DetailsValueListEntry', 2, repeated=True)
  message = _messages.StringField(3)


class StopClusterRequest(_messages.Message):
  r"""A request to stop a cluster.

  Fields:
    clusterUuid: Optional. Specifying the cluster_uuid means the RPC will fail
      (with error NOT_FOUND) if a cluster with the specified UUID does not
      exist.
    requestId: Optional. A unique ID used to identify the request. If the
      server receives two StopClusterRequest (https://cloud.google.com/datapro
      c/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.S
      topClusterRequest)s with the same id, then the second request will be
      ignored and the first google.longrunning.Operation created and stored in
      the backend is returned.Recommendation: Set this value to a UUID
      (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID
      must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
      and hyphens (-). The maximum length is 40 characters.
  """

  clusterUuid = _messages.StringField(1)
  requestId = _messages.StringField(2)


class StopNodeGroupRequest(_messages.Message):
  r"""A request to stop a node group.

  Fields:
    parentOperationId: Optional. Operation id of the parent operation sending
      the stop request.
    requestId: Optional. A unique ID used to identify the request. If the
      server receives two StopNodeGroupRequest with the same ID, the second
      request is ignored and the first google.longrunning.Operation created
      and stored in the backend is returned.Recommendation: Set this value to
      a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The
      ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
      and hyphens (-). The maximum length is 40 characters.
  """

  parentOperationId = _messages.StringField(1)
  requestId = _messages.StringField(2)


class StreamBlockData(_messages.Message):
  r"""Stream Block Data.

  Fields:
    deserialized: A boolean attribute.
    diskSize: A string attribute.
    executorId: A string attribute.
    hostPort: A string attribute.
    memSize: A string attribute.
    name: A string attribute.
    storageLevel: A string attribute.
    useDisk: A boolean attribute.
    useMemory: A boolean attribute.
  """

  deserialized = _messages.BooleanField(1)
  diskSize = _messages.IntegerField(2)
  executorId = _messages.StringField(3)
  hostPort = _messages.StringField(4)
  memSize = _messages.IntegerField(5)
  name = _messages.StringField(6)
  storageLevel = _messages.StringField(7)
  useDisk = _messages.BooleanField(8)
  useMemory = _messages.BooleanField(9)


class StreamingQueryData(_messages.Message):
  r"""Streaming

  Fields:
    endTimestamp: A string attribute.
    exception: A string attribute.
    isActive: A boolean attribute.
    name: A string attribute.
    runId: A string attribute.
    startTimestamp: A string attribute.
    streamingQueryId: A string attribute.
  """

  endTimestamp = _messages.IntegerField(1)
  exception = _messages.StringField(2)
  isActive = _messages.BooleanField(3)
  name = _messages.StringField(4)
  runId = _messages.StringField(5)
  startTimestamp = _messages.IntegerField(6)
  streamingQueryId = _messages.StringField(7)


class StreamingQueryProgress(_messages.Message):
  r"""A StreamingQueryProgress object.

  Messages:
    DurationMillisValue: A DurationMillisValue object.
    EventTimeValue: A EventTimeValue object.
    ObservedMetricsValue: A ObservedMetricsValue object.

  Fields:
    batchDuration: A string attribute.
    batchId: A string attribute.
    durationMillis: A DurationMillisValue attribute.
    eventTime: A EventTimeValue attribute.
    name: A string attribute.
    observedMetrics: A ObservedMetricsValue attribute.
    runId: A string attribute.
    sink: A SinkProgress attribute.
    sources: A SourceProgress attribute.
    stateOperators: A StateOperatorProgress attribute.
    streamingQueryProgressId: A string attribute.
    timestamp: A string attribute.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class DurationMillisValue(_messages.Message):
    r"""A DurationMillisValue object.

    Messages:
      AdditionalProperty: An additional property for a DurationMillisValue
        object.

    Fields:
      additionalProperties: Additional properties of type DurationMillisValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a DurationMillisValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.IntegerField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  @encoding.MapUnrecognizedFields('additionalProperties')
  class EventTimeValue(_messages.Message):
    r"""A EventTimeValue object.

    Messages:
      AdditionalProperty: An additional property for a EventTimeValue object.

    Fields:
      additionalProperties: Additional properties of type EventTimeValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a EventTimeValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  @encoding.MapUnrecognizedFields('additionalProperties')
  class ObservedMetricsValue(_messages.Message):
    r"""A ObservedMetricsValue object.

    Messages:
      AdditionalProperty: An additional property for a ObservedMetricsValue
        object.

    Fields:
      additionalProperties: Additional properties of type ObservedMetricsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a ObservedMetricsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  batchDuration = _messages.IntegerField(1)
  batchId = _messages.IntegerField(2)
  durationMillis = _messages.MessageField('DurationMillisValue', 3)
  eventTime = _messages.MessageField('EventTimeValue', 4)
  name = _messages.StringField(5)
  observedMetrics = _messages.MessageField('ObservedMetricsValue', 6)
  runId = _messages.StringField(7)
  sink = _messages.MessageField('SinkProgress', 8)
  sources = _messages.MessageField('SourceProgress', 9, repeated=True)
  stateOperators = _messages.MessageField('StateOperatorProgress', 10, repeated=True)
  streamingQueryProgressId = _messages.StringField(11)
  timestamp = _messages.StringField(12)


class SubmitJobRequest(_messages.Message):
  r"""A request to submit a job.

  Fields:
    job: Required. The job resource.
    requestId: Optional. A unique id used to identify the request. If the
      server receives two SubmitJobRequest (https://cloud.google.com/dataproc/
      docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.Sub
      mitJobRequest)s with the same id, then the second request will be
      ignored and the first Job created and stored in the backend is
      returned.It is recommended to always set this value to a UUID
      (https://en.wikipedia.org/wiki/Universally_unique_identifier).The id
      must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
      and hyphens (-). The maximum length is 40 characters.
  """

  job = _messages.MessageField('Job', 1)
  requestId = _messages.StringField(2)


class SummarizeSessionSparkApplicationExecutorsResponse(_messages.Message):
  r"""Consolidated summary of executors for a Spark Application.

  Fields:
    activeExecutorSummary: Consolidated summary for active executors.
    applicationId: Spark Application Id
    deadExecutorSummary: Consolidated summary for dead executors.
    totalExecutorSummary: Overall consolidated summary for all executors.
  """

  activeExecutorSummary = _messages.MessageField('ConsolidatedExecutorSummary', 1)
  applicationId = _messages.StringField(2)
  deadExecutorSummary = _messages.MessageField('ConsolidatedExecutorSummary', 3)
  totalExecutorSummary = _messages.MessageField('ConsolidatedExecutorSummary', 4)


class SummarizeSessionSparkApplicationJobsResponse(_messages.Message):
  r"""Summary of a Spark Application jobs.

  Fields:
    jobsSummary: Summary of a Spark Application Jobs
  """

  jobsSummary = _messages.MessageField('JobsSummary', 1)


class SummarizeSessionSparkApplicationStageAttemptTasksResponse(_messages.Message):
  r"""Summary of tasks for a Spark Application stage attempt.

  Fields:
    stageAttemptTasksSummary: Summary of tasks for a Spark Application Stage
      Attempt
  """

  stageAttemptTasksSummary = _messages.MessageField('StageAttemptTasksSummary', 1)


class SummarizeSessionSparkApplicationStagesResponse(_messages.Message):
  r"""Summary of a Spark Application stages.

  Fields:
    stagesSummary: Summary of a Spark Application Stages
  """

  stagesSummary = _messages.MessageField('StagesSummary', 1)


class SummarizeSparkApplicationExecutorsResponse(_messages.Message):
  r"""Consolidated summary of executors for a Spark Application.

  Fields:
    activeExecutorSummary: Consolidated summary for active executors.
    applicationId: Spark Application Id
    deadExecutorSummary: Consolidated summary for dead executors.
    totalExecutorSummary: Overall consolidated summary for all executors.
  """

  activeExecutorSummary = _messages.MessageField('ConsolidatedExecutorSummary', 1)
  applicationId = _messages.StringField(2)
  deadExecutorSummary = _messages.MessageField('ConsolidatedExecutorSummary', 3)
  totalExecutorSummary = _messages.MessageField('ConsolidatedExecutorSummary', 4)


class SummarizeSparkApplicationJobsResponse(_messages.Message):
  r"""Summary of a Spark Application jobs.

  Fields:
    jobsSummary: Summary of a Spark Application Jobs
  """

  jobsSummary = _messages.MessageField('JobsSummary', 1)


class SummarizeSparkApplicationStageAttemptTasksResponse(_messages.Message):
  r"""Summary of tasks for a Spark Application stage attempt.

  Fields:
    stageAttemptTasksSummary: Summary of tasks for a Spark Application Stage
      Attempt
  """

  stageAttemptTasksSummary = _messages.MessageField('StageAttemptTasksSummary', 1)


class SummarizeSparkApplicationStagesResponse(_messages.Message):
  r"""Summary of a Spark Application stages.

  Fields:
    stagesSummary: Summary of a Spark Application Stages
  """

  stagesSummary = _messages.MessageField('StagesSummary', 1)


class TaskData(_messages.Message):
  r"""Data corresponding to tasks created by spark.

  Messages:
    ExecutorLogsValue: A ExecutorLogsValue object.

  Fields:
    accumulatorUpdates: A AccumulableInfo attribute.
    attempt: A integer attribute.
    durationMillis: A string attribute.
    errorMessage: A string attribute.
    executorId: A string attribute.
    executorLogs: A ExecutorLogsValue attribute.
    gettingResultTimeMillis: A string attribute.
    hasMetrics: A boolean attribute.
    host: A string attribute.
    index: A integer attribute.
    launchTime: A string attribute.
    partitionId: A integer attribute.
    resultFetchStart: A string attribute.
    schedulerDelayMillis: A string attribute.
    speculative: A boolean attribute.
    stageAttemptId: A integer attribute.
    stageId: A string attribute.
    status: A string attribute.
    taskId: A string attribute.
    taskLocality: A string attribute.
    taskMetrics: A TaskMetrics attribute.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class ExecutorLogsValue(_messages.Message):
    r"""A ExecutorLogsValue object.

    Messages:
      AdditionalProperty: An additional property for a ExecutorLogsValue
        object.

    Fields:
      additionalProperties: Additional properties of type ExecutorLogsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a ExecutorLogsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  accumulatorUpdates = _messages.MessageField('AccumulableInfo', 1, repeated=True)
  attempt = _messages.IntegerField(2, variant=_messages.Variant.INT32)
  durationMillis = _messages.IntegerField(3)
  errorMessage = _messages.StringField(4)
  executorId = _messages.StringField(5)
  executorLogs = _messages.MessageField('ExecutorLogsValue', 6)
  gettingResultTimeMillis = _messages.IntegerField(7)
  hasMetrics = _messages.BooleanField(8)
  host = _messages.StringField(9)
  index = _messages.IntegerField(10, variant=_messages.Variant.INT32)
  launchTime = _messages.StringField(11)
  partitionId = _messages.IntegerField(12, variant=_messages.Variant.INT32)
  resultFetchStart = _messages.StringField(13)
  schedulerDelayMillis = _messages.IntegerField(14)
  speculative = _messages.BooleanField(15)
  stageAttemptId = _messages.IntegerField(16, variant=_messages.Variant.INT32)
  stageId = _messages.IntegerField(17)
  status = _messages.StringField(18)
  taskId = _messages.IntegerField(19)
  taskLocality = _messages.StringField(20)
  taskMetrics = _messages.MessageField('TaskMetrics', 21)


class TaskMetrics(_messages.Message):
  r"""Executor Task Metrics

  Fields:
    diskBytesSpilled: A string attribute.
    executorCpuTimeNanos: A string attribute.
    executorDeserializeCpuTimeNanos: A string attribute.
    executorDeserializeTimeMillis: A string attribute.
    executorRunTimeMillis: A string attribute.
    inputMetrics: A InputMetrics attribute.
    jvmGcTimeMillis: A string attribute.
    memoryBytesSpilled: A string attribute.
    outputMetrics: A OutputMetrics attribute.
    peakExecutionMemoryBytes: A string attribute.
    resultSerializationTimeMillis: A string attribute.
    resultSize: A string attribute.
    shuffleReadMetrics: A ShuffleReadMetrics attribute.
    shuffleWriteMetrics: A ShuffleWriteMetrics attribute.
  """

  diskBytesSpilled = _messages.IntegerField(1)
  executorCpuTimeNanos = _messages.IntegerField(2)
  executorDeserializeCpuTimeNanos = _messages.IntegerField(3)
  executorDeserializeTimeMillis = _messages.IntegerField(4)
  executorRunTimeMillis = _messages.IntegerField(5)
  inputMetrics = _messages.MessageField('InputMetrics', 6)
  jvmGcTimeMillis = _messages.IntegerField(7)
  memoryBytesSpilled = _messages.IntegerField(8)
  outputMetrics = _messages.MessageField('OutputMetrics', 9)
  peakExecutionMemoryBytes = _messages.IntegerField(10)
  resultSerializationTimeMillis = _messages.IntegerField(11)
  resultSize = _messages.IntegerField(12)
  shuffleReadMetrics = _messages.MessageField('ShuffleReadMetrics', 13)
  shuffleWriteMetrics = _messages.MessageField('ShuffleWriteMetrics', 14)


class TaskQuantileMetrics(_messages.Message):
  r"""A TaskQuantileMetrics object.

  Fields:
    diskBytesSpilled: A Quantiles attribute.
    durationMillis: A Quantiles attribute.
    executorCpuTimeNanos: A Quantiles attribute.
    executorDeserializeCpuTimeNanos: A Quantiles attribute.
    executorDeserializeTimeMillis: A Quantiles attribute.
    executorRunTimeMillis: A Quantiles attribute.
    gettingResultTimeMillis: A Quantiles attribute.
    inputMetrics: A InputQuantileMetrics attribute.
    jvmGcTimeMillis: A Quantiles attribute.
    memoryBytesSpilled: A Quantiles attribute.
    outputMetrics: A OutputQuantileMetrics attribute.
    peakExecutionMemoryBytes: A Quantiles attribute.
    resultSerializationTimeMillis: A Quantiles attribute.
    resultSize: A Quantiles attribute.
    schedulerDelayMillis: A Quantiles attribute.
    shuffleReadMetrics: A ShuffleReadQuantileMetrics attribute.
    shuffleWriteMetrics: A ShuffleWriteQuantileMetrics attribute.
  """

  diskBytesSpilled = _messages.MessageField('Quantiles', 1)
  durationMillis = _messages.MessageField('Quantiles', 2)
  executorCpuTimeNanos = _messages.MessageField('Quantiles', 3)
  executorDeserializeCpuTimeNanos = _messages.MessageField('Quantiles', 4)
  executorDeserializeTimeMillis = _messages.MessageField('Quantiles', 5)
  executorRunTimeMillis = _messages.MessageField('Quantiles', 6)
  gettingResultTimeMillis = _messages.MessageField('Quantiles', 7)
  inputMetrics = _messages.MessageField('InputQuantileMetrics', 8)
  jvmGcTimeMillis = _messages.MessageField('Quantiles', 9)
  memoryBytesSpilled = _messages.MessageField('Quantiles', 10)
  outputMetrics = _messages.MessageField('OutputQuantileMetrics', 11)
  peakExecutionMemoryBytes = _messages.MessageField('Quantiles', 12)
  resultSerializationTimeMillis = _messages.MessageField('Quantiles', 13)
  resultSize = _messages.MessageField('Quantiles', 14)
  schedulerDelayMillis = _messages.MessageField('Quantiles', 15)
  shuffleReadMetrics = _messages.MessageField('ShuffleReadQuantileMetrics', 16)
  shuffleWriteMetrics = _messages.MessageField('ShuffleWriteQuantileMetrics', 17)


class TaskResourceRequest(_messages.Message):
  r"""Resources used per task created by the application.

  Fields:
    amount: A number attribute.
    resourceName: A string attribute.
  """

  amount = _messages.FloatField(1)
  resourceName = _messages.StringField(2)


class TemplateParameter(_messages.Message):
  r"""A configurable parameter that replaces one or more fields in the
  template. Parameterizable fields: - Labels - File uris - Job properties -
  Job arguments - Script variables - Main class (in HadoopJob and SparkJob) -
  Zone (in ClusterSelector)

  Fields:
    description: Optional. Brief description of the parameter. Must not exceed
      1024 characters.
    fields: Required. Paths to all fields that the parameter replaces. A field
      is allowed to appear in at most one parameter's list of field paths.A
      field path is similar in syntax to a google.protobuf.FieldMask. For
      example, a field path that references the zone field of a workflow
      template's cluster selector would be specified as
      placement.clusterSelector.zone.Also, field paths can reference fields
      using the following syntax: Values in maps can be referenced by key:
      labels'key' placement.clusterSelector.clusterLabels'key'
      placement.managedCluster.labels'key'
      placement.clusterSelector.clusterLabels'key' jobs'step-id'.labels'key'
      Jobs in the jobs list can be referenced by step-id: jobs'step-
      id'.hadoopJob.mainJarFileUri jobs'step-id'.hiveJob.queryFileUri
      jobs'step-id'.pySparkJob.mainPythonFileUri jobs'step-
      id'.hadoopJob.jarFileUris0 jobs'step-id'.hadoopJob.archiveUris0
      jobs'step-id'.hadoopJob.fileUris0 jobs'step-
      id'.pySparkJob.pythonFileUris0 Items in repeated fields can be
      referenced by a zero-based index: jobs'step-id'.sparkJob.args0 Other
      examples: jobs'step-id'.hadoopJob.properties'key' jobs'step-
      id'.hadoopJob.args0 jobs'step-id'.hiveJob.scriptVariables'key'
      jobs'step-id'.hadoopJob.mainJarFileUri placement.clusterSelector.zoneIt
      may not be possible to parameterize maps and repeated fields in their
      entirety since only individual map values and individual items in
      repeated fields can be referenced. For example, the following field
      paths are invalid: placement.clusterSelector.clusterLabels jobs'step-
      id'.sparkJob.args
    name: Required. Parameter name. The parameter name is used as the key, and
      paired with the parameter value, which are passed to the template when
      the template is instantiated. The name must contain only capital letters
      (A-Z), numbers (0-9), and underscores (_), and must not start with a
      number. The maximum length is 40 characters.
    validation: Optional. Validation rules to be applied to this parameter's
      value.
  """

  description = _messages.StringField(1)
  fields = _messages.StringField(2, repeated=True)
  name = _messages.StringField(3)
  validation = _messages.MessageField('ParameterValidation', 4)


class TerminateSessionRequest(_messages.Message):
  r"""A request to terminate an interactive session.

  Fields:
    requestId: Optional. A unique ID used to identify the request. If the
      service receives two TerminateSessionRequest (https://cloud.google.com/d
      ataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.datapro
      c.v1.TerminateSessionRequest)s with the same ID, the second request is
      ignored.Recommendation: Set this value to a UUID
      (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value
      must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
      and hyphens (-). The maximum length is 40 characters.
  """

  requestId = _messages.StringField(1)


class TestIamPermissionsRequest(_messages.Message):
  r"""Request message for TestIamPermissions method.

  Fields:
    permissions: The set of permissions to check for the resource. Permissions
      with wildcards (such as * or storage.*) are not allowed. For more
      information see IAM Overview
      (https://cloud.google.com/iam/docs/overview#permissions).
  """

  permissions = _messages.StringField(1, repeated=True)


class TestIamPermissionsResponse(_messages.Message):
  r"""Response message for TestIamPermissions method.

  Fields:
    permissions: A subset of TestPermissionsRequest.permissions that the
      caller is allowed.
  """

  permissions = _messages.StringField(1, repeated=True)


class TrinoJob(_messages.Message):
  r"""A Dataproc job for running Trino (https://trino.io/) queries. IMPORTANT:
  The Dataproc Trino Optional Component
  (https://cloud.google.com/dataproc/docs/concepts/components/trino) must be
  enabled when the cluster is created to submit a Trino job to the cluster.

  Messages:
    PropertiesValue: Optional. A mapping of property names to values. Used to
      set Trino session properties (https://trino.io/docs/current/sql/set-
      session.html) Equivalent to using the --session flag in the Trino CLI

  Fields:
    clientTags: Optional. Trino client tags to attach to this query
    continueOnFailure: Optional. Whether to continue executing queries if a
      query fails. The default value is false. Setting to true can be useful
      when executing independent parallel queries.
    loggingConfig: Optional. The runtime log config for job execution.
    outputFormat: Optional. The format in which query output will be
      displayed. See the Trino documentation for supported output formats
    properties: Optional. A mapping of property names to values. Used to set
      Trino session properties (https://trino.io/docs/current/sql/set-
      session.html) Equivalent to using the --session flag in the Trino CLI
    queryFileUri: The HCFS URI of the script that contains SQL queries.
    queryList: A list of queries.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class PropertiesValue(_messages.Message):
    r"""Optional. A mapping of property names to values. Used to set Trino
    session properties (https://trino.io/docs/current/sql/set-session.html)
    Equivalent to using the --session flag in the Trino CLI

    Messages:
      AdditionalProperty: An additional property for a PropertiesValue object.

    Fields:
      additionalProperties: Additional properties of type PropertiesValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a PropertiesValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  clientTags = _messages.StringField(1, repeated=True)
  continueOnFailure = _messages.BooleanField(2)
  loggingConfig = _messages.MessageField('LoggingConfig', 3)
  outputFormat = _messages.StringField(4)
  properties = _messages.MessageField('PropertiesValue', 5)
  queryFileUri = _messages.StringField(6)
  queryList = _messages.MessageField('QueryList', 7)


class UpdateLabelsNodeGroupRequest(_messages.Message):
  r"""A request to update the labels of a node group.

  Messages:
    LabelsValue: Required. The labels to associate with this Node Group. Label
      keys must contain 1 to 63 characters, and must conform to RFC 1035
      (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but,
      if present, must contain 1 to 63 characters, and must conform to RFC
      1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can
      be associated with a cluster.

  Fields:
    labels: Required. The labels to associate with this Node Group. Label keys
      must contain 1 to 63 characters, and must conform to RFC 1035
      (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but,
      if present, must contain 1 to 63 characters, and must conform to RFC
      1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can
      be associated with a cluster.
    parentOperationId: Optional. Operation id of the parent operation sending
      the update labels request.
    requestId: Optional. A unique ID used to identify the request. If the
      server receives two UpdateLabelsNodeGroupRequest (https://cloud.google.c
      om/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dat
      aproc.v1.UpdateLabelsNodeGroupRequests) with the same ID, the second
      request is ignored and the first google.longrunning.Operation created
      and stored in the backend is returned.Recommendation: Set this value to
      a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The
      ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
      and hyphens (-). The maximum length is 40 characters.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class LabelsValue(_messages.Message):
    r"""Required. The labels to associate with this Node Group. Label keys
    must contain 1 to 63 characters, and must conform to RFC 1035
    (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if
    present, must contain 1 to 63 characters, and must conform to RFC 1035
    (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
    associated with a cluster.

    Messages:
      AdditionalProperty: An additional property for a LabelsValue object.

    Fields:
      additionalProperties: Additional properties of type LabelsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a LabelsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  labels = _messages.MessageField('LabelsValue', 1)
  parentOperationId = _messages.StringField(2)
  requestId = _messages.StringField(3)


class UpdateMetadataConfigNodeGroupRequest(_messages.Message):
  r"""A request to update the config of a node group.

  Enums:
    UpdateMetadataConfigTypeValueValuesEnum: Required. The type of metadata
      config update to perform. Currently only
      CLUSTER_MULTITENANCY_USER_MAPPING is supported.

  Messages:
    MetadataConfigMapValue: Required. The metadata config to associate with
      this Node Group. This is a patch on top of the metadata that is defined
      in the NodeGroup's InstanceTemplate, which itself is derived from the
      Cluster's initial configuration. This will include the metadata key
      value pairs to be added on the VMs in the node group.

  Fields:
    metadataConfigMap: Required. The metadata config to associate with this
      Node Group. This is a patch on top of the metadata that is defined in
      the NodeGroup's InstanceTemplate, which itself is derived from the
      Cluster's initial configuration. This will include the metadata key
      value pairs to be added on the VMs in the node group.
    parentOperationId: Optional. Operation id of the parent operation sending
      the update config request.
    requestId: Optional. A unique ID used to identify the request. If the
      server receives two UpdateMetadataConfigNodeGroupRequest (https://cloud.
      google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.c
      loud.dataproc.v1.UpdateLabelsNodeGroupRequests) with the same ID, the
      second request is ignored and the first google.longrunning.Operation
      created and stored in the backend is returned.Recommendation: Set this
      value to a UUID
      (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID
      must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
      and hyphens (-). The maximum length is 40 characters.
    updateMetadataConfigType: Required. The type of metadata config update to
      perform. Currently only CLUSTER_MULTITENANCY_USER_MAPPING is supported.
  """

  class UpdateMetadataConfigTypeValueValuesEnum(_messages.Enum):
    r"""Required. The type of metadata config update to perform. Currently
    only CLUSTER_MULTITENANCY_USER_MAPPING is supported.

    Values:
      UNSPECIFIED: Unused.
      CLUSTER_MULTITENANCY_USER_MAPPING: Update the metadata property(s)
        related to multitenancy user mapping.
    """
    UNSPECIFIED = 0
    CLUSTER_MULTITENANCY_USER_MAPPING = 1

  @encoding.MapUnrecognizedFields('additionalProperties')
  class MetadataConfigMapValue(_messages.Message):
    r"""Required. The metadata config to associate with this Node Group. This
    is a patch on top of the metadata that is defined in the NodeGroup's
    InstanceTemplate, which itself is derived from the Cluster's initial
    configuration. This will include the metadata key value pairs to be added
    on the VMs in the node group.

    Messages:
      AdditionalProperty: An additional property for a MetadataConfigMapValue
        object.

    Fields:
      additionalProperties: Additional properties of type
        MetadataConfigMapValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a MetadataConfigMapValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  metadataConfigMap = _messages.MessageField('MetadataConfigMapValue', 1)
  parentOperationId = _messages.StringField(2)
  requestId = _messages.StringField(3)
  updateMetadataConfigType = _messages.EnumField('UpdateMetadataConfigTypeValueValuesEnum', 4)


class UsageMetrics(_messages.Message):
  r"""Usage metrics represent approximate total resources consumed by a
  workload.

  Fields:
    acceleratorType: Optional. DEPRECATED Accelerator type being used, if any
    milliAcceleratorSeconds: Optional. DEPRECATED Accelerator usage in
      (milliAccelerator x seconds) (see Dataproc Serverless pricing
      (https://cloud.google.com/dataproc-serverless/pricing)).
    milliDcuSeconds: Optional. DCU (Dataproc Compute Units) usage in (milliDCU
      x seconds) (see Dataproc Serverless pricing
      (https://cloud.google.com/dataproc-serverless/pricing)).
    shuffleStorageGbSeconds: Optional. Shuffle storage usage in (GB x seconds)
      (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-
      serverless/pricing)).
    updateTime: Optional. The timestamp of the usage metrics.
  """

  acceleratorType = _messages.StringField(1)
  milliAcceleratorSeconds = _messages.IntegerField(2)
  milliDcuSeconds = _messages.IntegerField(3)
  shuffleStorageGbSeconds = _messages.IntegerField(4)
  updateTime = _messages.StringField(5)


class UsageSnapshot(_messages.Message):
  r"""The usage snapshot represents the resources consumed by a workload at a
  specified time.

  Fields:
    acceleratorType: Optional. Accelerator type being used, if any
    milliAccelerator: Optional. Milli (one-thousandth) accelerator. (see
      Dataproc Serverless pricing (https://cloud.google.com/dataproc-
      serverless/pricing))
    milliDcu: Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs)
      (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-
      serverless/pricing)).
    milliDcuPremium: Optional. Milli (one-thousandth) Dataproc Compute Units
      (DCUs) charged at premium tier (see Dataproc Serverless pricing
      (https://cloud.google.com/dataproc-serverless/pricing)).
    shuffleStorageGb: Optional. Shuffle Storage in gigabytes (GB). (see
      Dataproc Serverless pricing (https://cloud.google.com/dataproc-
      serverless/pricing))
    shuffleStorageGbPremium: Optional. Shuffle Storage in gigabytes (GB)
      charged at premium tier. (see Dataproc Serverless pricing
      (https://cloud.google.com/dataproc-serverless/pricing))
    snapshotTime: Optional. The timestamp of the usage snapshot.
  """

  acceleratorType = _messages.StringField(1)
  milliAccelerator = _messages.IntegerField(2)
  milliDcu = _messages.IntegerField(3)
  milliDcuPremium = _messages.IntegerField(4)
  shuffleStorageGb = _messages.IntegerField(5)
  shuffleStorageGbPremium = _messages.IntegerField(6)
  snapshotTime = _messages.StringField(7)


class ValueInfo(_messages.Message):
  r"""Annotatated property value.

  Fields:
    annotation: Annotation, comment or explanation why the property was set.
    overriddenValue: Optional. Value which was replaced by the corresponding
      component.
    value: Property value.
  """

  annotation = _messages.StringField(1)
  overriddenValue = _messages.StringField(2)
  value = _messages.StringField(3)


class ValueValidation(_messages.Message):
  r"""Validation based on a list of allowed values.

  Fields:
    values: Required. List of allowed values for the parameter.
  """

  values = _messages.StringField(1, repeated=True)


class VirtualClusterConfig(_messages.Message):
  r"""The Dataproc cluster config for a cluster that does not directly control
  the underlying compute resources, such as a Dataproc-on-GKE cluster
  (https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke-overview).

  Fields:
    auxiliaryServicesConfig: Optional. Configuration of auxiliary services
      used by this cluster.
    kubernetesClusterConfig: Required. The configuration for running the
      Dataproc cluster on Kubernetes.
    stagingBucket: Optional. A Cloud Storage bucket used to stage job
      dependencies, config files, and job driver console output. If you do not
      specify a staging bucket, Cloud Dataproc will determine a Cloud Storage
      location (US, ASIA, or EU) for your cluster's staging bucket according
      to the Compute Engine zone where your cluster is deployed, and then
      create and manage this project-level, per-location bucket (see Dataproc
      staging and temp buckets
      (https://cloud.google.com/dataproc/docs/concepts/configuring-
      clusters/staging-bucket)). This field requires a Cloud Storage bucket
      name, not a gs://... URI to a Cloud Storage bucket.
  """

  auxiliaryServicesConfig = _messages.MessageField('AuxiliaryServicesConfig', 1)
  kubernetesClusterConfig = _messages.MessageField('KubernetesClusterConfig', 2)
  stagingBucket = _messages.StringField(3)


class WorkflowGraph(_messages.Message):
  r"""The workflow graph.

  Fields:
    nodes: Output only. The workflow nodes.
  """

  nodes = _messages.MessageField('WorkflowNode', 1, repeated=True)


class WorkflowMetadata(_messages.Message):
  r"""A Dataproc workflow template resource.

  Enums:
    StateValueValuesEnum: Output only. The workflow state.

  Messages:
    ParametersValue: Map from parameter names to values that were used for
      those parameters.

  Fields:
    clusterName: Output only. The name of the target cluster.
    clusterUuid: Output only. The UUID of target cluster.
    createCluster: Output only. The create cluster operation metadata.
    dagEndTime: Output only. DAG end time, only set for workflows with
      dag_timeout when DAG ends.
    dagStartTime: Output only. DAG start time, only set for workflows with
      dag_timeout when DAG begins.
    dagTimeout: Output only. The timeout duration for the DAG of jobs,
      expressed in seconds (see JSON representation of duration
      (https://developers.google.com/protocol-buffers/docs/proto3#json)).
    deleteCluster: Output only. The delete cluster operation metadata.
    endTime: Output only. Workflow end time.
    graph: Output only. The workflow graph.
    parameters: Map from parameter names to values that were used for those
      parameters.
    startTime: Output only. Workflow start time.
    state: Output only. The workflow state.
    template: Output only. The resource name of the workflow template as
      described in https://cloud.google.com/apis/design/resource_names. For
      projects.regions.workflowTemplates, the resource name of the template
      has the following format:
      projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
      For projects.locations.workflowTemplates, the resource name of the
      template has the following format: projects/{project_id}/locations/{loca
      tion}/workflowTemplates/{template_id}
    version: Output only. The version of template at the time of workflow
      instantiation.
  """

  class StateValueValuesEnum(_messages.Enum):
    r"""Output only. The workflow state.

    Values:
      UNKNOWN: Unused.
      PENDING: The operation has been created.
      RUNNING: The operation is running.
      DONE: The operation is done; either cancelled or completed.
    """
    UNKNOWN = 0
    PENDING = 1
    RUNNING = 2
    DONE = 3

  @encoding.MapUnrecognizedFields('additionalProperties')
  class ParametersValue(_messages.Message):
    r"""Map from parameter names to values that were used for those
    parameters.

    Messages:
      AdditionalProperty: An additional property for a ParametersValue object.

    Fields:
      additionalProperties: Additional properties of type ParametersValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a ParametersValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  clusterName = _messages.StringField(1)
  clusterUuid = _messages.StringField(2)
  createCluster = _messages.MessageField('ClusterOperation', 3)
  dagEndTime = _messages.StringField(4)
  dagStartTime = _messages.StringField(5)
  dagTimeout = _messages.StringField(6)
  deleteCluster = _messages.MessageField('ClusterOperation', 7)
  endTime = _messages.StringField(8)
  graph = _messages.MessageField('WorkflowGraph', 9)
  parameters = _messages.MessageField('ParametersValue', 10)
  startTime = _messages.StringField(11)
  state = _messages.EnumField('StateValueValuesEnum', 12)
  template = _messages.StringField(13)
  version = _messages.IntegerField(14, variant=_messages.Variant.INT32)


class WorkflowNode(_messages.Message):
  r"""The workflow node.

  Enums:
    StateValueValuesEnum: Output only. The node state.

  Fields:
    error: Output only. The error detail.
    jobId: Output only. The job id; populated after the node enters RUNNING
      state.
    prerequisiteStepIds: Output only. Node's prerequisite nodes.
    state: Output only. The node state.
    stepId: Output only. The name of the node.
  """

  class StateValueValuesEnum(_messages.Enum):
    r"""Output only. The node state.

    Values:
      NODE_STATE_UNSPECIFIED: State is unspecified.
      BLOCKED: The node is awaiting prerequisite node to finish.
      RUNNABLE: The node is runnable but not running.
      RUNNING: The node is running.
      COMPLETED: The node completed successfully.
      FAILED: The node failed. A node can be marked FAILED because its
        ancestor or peer failed.
    """
    NODE_STATE_UNSPECIFIED = 0
    BLOCKED = 1
    RUNNABLE = 2
    RUNNING = 3
    COMPLETED = 4
    FAILED = 5

  error = _messages.StringField(1)
  jobId = _messages.StringField(2)
  prerequisiteStepIds = _messages.StringField(3, repeated=True)
  state = _messages.EnumField('StateValueValuesEnum', 4)
  stepId = _messages.StringField(5)


class WorkflowTemplate(_messages.Message):
  r"""A Dataproc workflow template resource.

  Messages:
    LabelsValue: Optional. The labels to associate with this template. These
      labels will be propagated to all jobs and clusters created by the
      workflow instance.Label keys must contain 1 to 63 characters, and must
      conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).Label values
      may be empty, but, if present, must contain 1 to 63 characters, and must
      conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).No more than
      32 labels can be associated with a template.

  Fields:
    createTime: Output only. The time template was created.
    dagTimeout: Optional. Timeout duration for the DAG of jobs, expressed in
      seconds (see JSON representation of duration
      (https://developers.google.com/protocol-buffers/docs/proto3#json)). The
      timeout duration must be from 10 minutes ("600s") to 24 hours
      ("86400s"). The timer begins when the first job is submitted. If the
      workflow is running at the end of the timeout period, any remaining jobs
      are cancelled, the workflow is ended, and if the workflow was running on
      a managed cluster, the cluster is deleted.
    encryptionConfig: Optional. Encryption settings for encrypting workflow
      template job arguments.
    id: A string attribute.
    jobs: Required. The Directed Acyclic Graph of Jobs to submit.
    labels: Optional. The labels to associate with this template. These labels
      will be propagated to all jobs and clusters created by the workflow
      instance.Label keys must contain 1 to 63 characters, and must conform to
      RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).Label values may be
      empty, but, if present, must contain 1 to 63 characters, and must
      conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).No more than
      32 labels can be associated with a template.
    name: Output only. The resource name of the workflow template, as
      described in https://cloud.google.com/apis/design/resource_names. For
      projects.regions.workflowTemplates, the resource name of the template
      has the following format:
      projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
      For projects.locations.workflowTemplates, the resource name of the
      template has the following format: projects/{project_id}/locations/{loca
      tion}/workflowTemplates/{template_id}
    parameters: Optional. Template parameters whose values are substituted
      into the template. Values for parameters must be provided when the
      template is instantiated.
    placement: Required. WorkflowTemplate scheduling information.
    updateTime: Output only. The time template was last updated.
    version: Optional. Used to perform a consistent read-modify-write.This
      field should be left blank for a CreateWorkflowTemplate request. It is
      required for an UpdateWorkflowTemplate request, and must match the
      current server version. A typical update template flow would fetch the
      current template with a GetWorkflowTemplate request, which will return
      the current template with the version field filled in with the current
      server version. The user updates other fields in the template, then
      returns it as part of the UpdateWorkflowTemplate request.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class LabelsValue(_messages.Message):
    r"""Optional. The labels to associate with this template. These labels
    will be propagated to all jobs and clusters created by the workflow
    instance.Label keys must contain 1 to 63 characters, and must conform to
    RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).Label values may be empty,
    but, if present, must contain 1 to 63 characters, and must conform to RFC
    1035 (https://www.ietf.org/rfc/rfc1035.txt).No more than 32 labels can be
    associated with a template.

    Messages:
      AdditionalProperty: An additional property for a LabelsValue object.

    Fields:
      additionalProperties: Additional properties of type LabelsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a LabelsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  createTime = _messages.StringField(1)
  dagTimeout = _messages.StringField(2)
  encryptionConfig = _messages.MessageField('GoogleCloudDataprocV1WorkflowTemplateEncryptionConfig', 3)
  id = _messages.StringField(4)
  jobs = _messages.MessageField('OrderedJob', 5, repeated=True)
  labels = _messages.MessageField('LabelsValue', 6)
  name = _messages.StringField(7)
  parameters = _messages.MessageField('TemplateParameter', 8, repeated=True)
  placement = _messages.MessageField('WorkflowTemplatePlacement', 9)
  updateTime = _messages.StringField(10)
  version = _messages.IntegerField(11, variant=_messages.Variant.INT32)


class WorkflowTemplatePlacement(_messages.Message):
  r"""Specifies workflow execution target.Either managed_cluster or
  cluster_selector is required.

  Fields:
    clusterSelector: Optional. A selector that chooses target cluster for jobs
      based on metadata.The selector is evaluated at the time each job is
      submitted.
    managedCluster: A cluster that is managed by the workflow.
  """

  clusterSelector = _messages.MessageField('ClusterSelector', 1)
  managedCluster = _messages.MessageField('ManagedCluster', 2)


class WriteSessionSparkApplicationContextRequest(_messages.Message):
  r"""Write Spark Application data to internal storage systems

  Fields:
    parent: Required. Parent (Batch) resource reference.
    sparkWrapperObjects: Required. The batch of spark application context
      objects sent for ingestion.
  """

  parent = _messages.StringField(1)
  sparkWrapperObjects = _messages.MessageField('SparkWrapperObject', 2, repeated=True)


class WriteSessionSparkApplicationContextResponse(_messages.Message):
  r"""Response returned as an acknowledgement of receipt of data."""


class WriteSparkApplicationContextRequest(_messages.Message):
  r"""Write Spark Application data to internal storage systems

  Fields:
    parent: Required. Parent (Batch) resource reference.
    sparkWrapperObjects: A SparkWrapperObject attribute.
  """

  parent = _messages.StringField(1)
  sparkWrapperObjects = _messages.MessageField('SparkWrapperObject', 2, repeated=True)


class WriteSparkApplicationContextResponse(_messages.Message):
  r"""Response returned as an acknowledgement of receipt of data."""


class YarnApplication(_messages.Message):
  r"""A YARN application created by a job. Application information is a subset
  of org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto.Beta
  Feature: This report is available for testing purposes only. It may be
  changed before final release.

  Enums:
    StateValueValuesEnum: Required. The application state.

  Fields:
    memoryMbSeconds: Optional. The cumulative memory usage of the application
      for a job, measured in mb-seconds.
    name: Required. The application name.
    progress: Required. The numerical progress of the application, from 1 to
      100.
    state: Required. The application state.
    trackingUrl: Optional. The HTTP URL of the ApplicationMaster,
      HistoryServer, or TimelineServer that provides application-specific
      information. The URL uses the internal hostname, and requires a proxy
      server for resolution and, possibly, access.
    vcoreSeconds: Optional. The cumulative CPU time consumed by the
      application for a job, measured in vcore-seconds.
  """

  class StateValueValuesEnum(_messages.Enum):
    r"""Required. The application state.

    Values:
      STATE_UNSPECIFIED: Status is unspecified.
      NEW: Status is NEW.
      NEW_SAVING: Status is NEW_SAVING.
      SUBMITTED: Status is SUBMITTED.
      ACCEPTED: Status is ACCEPTED.
      RUNNING: Status is RUNNING.
      FINISHED: Status is FINISHED.
      FAILED: Status is FAILED.
      KILLED: Status is KILLED.
    """
    STATE_UNSPECIFIED = 0
    NEW = 1
    NEW_SAVING = 2
    SUBMITTED = 3
    ACCEPTED = 4
    RUNNING = 5
    FINISHED = 6
    FAILED = 7
    KILLED = 8

  memoryMbSeconds = _messages.IntegerField(1)
  name = _messages.StringField(2)
  progress = _messages.FloatField(3, variant=_messages.Variant.FLOAT)
  state = _messages.EnumField('StateValueValuesEnum', 4)
  trackingUrl = _messages.StringField(5)
  vcoreSeconds = _messages.IntegerField(6)


class YarnDriverRunner(_messages.Message):
  r"""Schedule the driver on worker nodes using YARN.

  Fields:
    memoryMb: Optional. The amount of memory in MB the driver is requesting
      from YARN.
    vcores: Optional. The number of vCPUs this driver is requesting from YARN.
  """

  memoryMb = _messages.IntegerField(1, variant=_messages.Variant.INT32)
  vcores = _messages.IntegerField(2, variant=_messages.Variant.INT32)


encoding.AddCustomJsonFieldMapping(
    StandardQueryParameters, 'f__xgafv', '$.xgafv')
encoding.AddCustomJsonEnumMapping(
    StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1')
encoding.AddCustomJsonEnumMapping(
    StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
