"""Generated message classes for monitoring version v3.

Manages your Cloud Monitoring data and configurations.
"""
# NOTE: This file is autogenerated and should not be edited by hand.

from __future__ import absolute_import

from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types


package = 'monitoring'


class Aggregation(_messages.Message):
  r"""Describes how to combine multiple time series to provide a different
  view of the data. Aggregation of time series is done in two steps. First,
  each time series in the set is aligned to the same time interval boundaries,
  then the set of time series is optionally reduced in number.Alignment
  consists of applying the per_series_aligner operation to each time series
  after its data has been divided into regular alignment_period time
  intervals. This process takes all of the data points in an alignment period,
  applies a mathematical transformation such as averaging, minimum, maximum,
  delta, etc., and converts them into a single data point per period.Reduction
  is when the aligned and transformed time series can optionally be combined,
  reducing the number of time series through similar mathematical
  transformations. Reduction involves applying a cross_series_reducer to all
  the time series, optionally sorting the time series into subsets with
  group_by_fields, and applying the reducer to each subset.The raw time series
  data can contain a huge amount of information from multiple sources.
  Alignment and reduction transforms this mass of data into a more manageable
  and representative collection of data, for example "the 95% latency across
  the average of all tasks in a cluster". This representative data can be more
  easily graphed and comprehended, and the individual time series data is
  still available for later drilldown. For more details, see Filtering and
  aggregation (https://cloud.google.com/monitoring/api/v3/aggregation).

  Enums:
    CrossSeriesReducerValueValuesEnum: The reduction operation to be used to
      combine time series into a single time series, where the value of each
      data point in the resulting series is a function of all the already
      aligned values in the input time series.Not all reducer operations can
      be applied to all time series. The valid choices depend on the
      metric_kind and the value_type of the original time series. Reduction
      can yield a time series with a different metric_kind or value_type than
      the input time series.Time series data must first be aligned (see
      per_series_aligner) in order to perform cross-time series reduction. If
      cross_series_reducer is specified, then per_series_aligner must be
      specified, and must not be ALIGN_NONE. An alignment_period must also be
      specified; otherwise, an error is returned.
    PerSeriesAlignerValueValuesEnum: An Aligner describes how to bring the
      data points in a single time series into temporal alignment. Except for
      ALIGN_NONE, all alignments cause all the data points in an
      alignment_period to be mathematically grouped together, resulting in a
      single data point for each alignment_period with end timestamp at the
      end of the period.Not all alignment operations may be applied to all
      time series. The valid choices depend on the metric_kind and value_type
      of the original time series. Alignment can change the metric_kind or the
      value_type of the time series.Time series data must be aligned in order
      to perform cross-time series reduction. If cross_series_reducer is
      specified, then per_series_aligner must be specified and not equal to
      ALIGN_NONE and alignment_period must be specified; otherwise, an error
      is returned.

  Fields:
    alignmentPeriod: The alignment_period specifies a time interval, in
      seconds, that is used to divide the data in all the time series into
      consistent blocks of time. This will be done before the per-series
      aligner can be applied to the data.The value must be at least 60
      seconds. If a per-series aligner other than ALIGN_NONE is specified,
      this field is required or an error is returned. If no per-series aligner
      is specified, or the aligner ALIGN_NONE is specified, then this field is
      ignored.The maximum value of the alignment_period is 104 weeks (2 years)
      for charts, and 90,000 seconds (25 hours) for alerting policies.
    crossSeriesReducer: The reduction operation to be used to combine time
      series into a single time series, where the value of each data point in
      the resulting series is a function of all the already aligned values in
      the input time series.Not all reducer operations can be applied to all
      time series. The valid choices depend on the metric_kind and the
      value_type of the original time series. Reduction can yield a time
      series with a different metric_kind or value_type than the input time
      series.Time series data must first be aligned (see per_series_aligner)
      in order to perform cross-time series reduction. If cross_series_reducer
      is specified, then per_series_aligner must be specified, and must not be
      ALIGN_NONE. An alignment_period must also be specified; otherwise, an
      error is returned.
    groupByFields: The set of fields to preserve when cross_series_reducer is
      specified. The group_by_fields determine how the time series are
      partitioned into subsets prior to applying the aggregation operation.
      Each subset contains time series that have the same value for each of
      the grouping fields. Each individual time series is a member of exactly
      one subset. The cross_series_reducer is applied to each subset of time
      series. It is not possible to reduce across different resource types, so
      this field implicitly contains resource.type. Fields not specified in
      group_by_fields are aggregated away. If group_by_fields is not specified
      and all the time series have the same resource type, then the time
      series are aggregated into a single output time series. If
      cross_series_reducer is not defined, this field is ignored.
    perSeriesAligner: An Aligner describes how to bring the data points in a
      single time series into temporal alignment. Except for ALIGN_NONE, all
      alignments cause all the data points in an alignment_period to be
      mathematically grouped together, resulting in a single data point for
      each alignment_period with end timestamp at the end of the period.Not
      all alignment operations may be applied to all time series. The valid
      choices depend on the metric_kind and value_type of the original time
      series. Alignment can change the metric_kind or the value_type of the
      time series.Time series data must be aligned in order to perform cross-
      time series reduction. If cross_series_reducer is specified, then
      per_series_aligner must be specified and not equal to ALIGN_NONE and
      alignment_period must be specified; otherwise, an error is returned.
  """

  class CrossSeriesReducerValueValuesEnum(_messages.Enum):
    r"""The reduction operation to be used to combine time series into a
    single time series, where the value of each data point in the resulting
    series is a function of all the already aligned values in the input time
    series.Not all reducer operations can be applied to all time series. The
    valid choices depend on the metric_kind and the value_type of the original
    time series. Reduction can yield a time series with a different
    metric_kind or value_type than the input time series.Time series data must
    first be aligned (see per_series_aligner) in order to perform cross-time
    series reduction. If cross_series_reducer is specified, then
    per_series_aligner must be specified, and must not be ALIGN_NONE. An
    alignment_period must also be specified; otherwise, an error is returned.

    Values:
      REDUCE_NONE: No cross-time series reduction. The output of the Aligner
        is returned.
      REDUCE_MEAN: Reduce by computing the mean value across time series for
        each alignment period. This reducer is valid for DELTA and GAUGE
        metrics with numeric or distribution values. The value_type of the
        output is DOUBLE.
      REDUCE_MIN: Reduce by computing the minimum value across time series for
        each alignment period. This reducer is valid for DELTA and GAUGE
        metrics with numeric values. The value_type of the output is the same
        as the value_type of the input.
      REDUCE_MAX: Reduce by computing the maximum value across time series for
        each alignment period. This reducer is valid for DELTA and GAUGE
        metrics with numeric values. The value_type of the output is the same
        as the value_type of the input.
      REDUCE_SUM: Reduce by computing the sum across time series for each
        alignment period. This reducer is valid for DELTA and GAUGE metrics
        with numeric and distribution values. The value_type of the output is
        the same as the value_type of the input.
      REDUCE_STDDEV: Reduce by computing the standard deviation across time
        series for each alignment period. This reducer is valid for DELTA and
        GAUGE metrics with numeric or distribution values. The value_type of
        the output is DOUBLE.
      REDUCE_COUNT: Reduce by computing the number of data points across time
        series for each alignment period. This reducer is valid for DELTA and
        GAUGE metrics of numeric, Boolean, distribution, and string
        value_type. The value_type of the output is INT64.
      REDUCE_COUNT_TRUE: Reduce by computing the number of True-valued data
        points across time series for each alignment period. This reducer is
        valid for DELTA and GAUGE metrics of Boolean value_type. The
        value_type of the output is INT64.
      REDUCE_COUNT_FALSE: Reduce by computing the number of False-valued data
        points across time series for each alignment period. This reducer is
        valid for DELTA and GAUGE metrics of Boolean value_type. The
        value_type of the output is INT64.
      REDUCE_FRACTION_TRUE: Reduce by computing the ratio of the number of
        True-valued data points to the total number of data points for each
        alignment period. This reducer is valid for DELTA and GAUGE metrics of
        Boolean value_type. The output value is in the range 0.0, 1.0 and has
        value_type DOUBLE.
      REDUCE_PERCENTILE_99: Reduce by computing the 99th percentile
        (https://en.wikipedia.org/wiki/Percentile) of data points across time
        series for each alignment period. This reducer is valid for GAUGE and
        DELTA metrics of numeric and distribution type. The value of the
        output is DOUBLE.
      REDUCE_PERCENTILE_95: Reduce by computing the 95th percentile
        (https://en.wikipedia.org/wiki/Percentile) of data points across time
        series for each alignment period. This reducer is valid for GAUGE and
        DELTA metrics of numeric and distribution type. The value of the
        output is DOUBLE.
      REDUCE_PERCENTILE_50: Reduce by computing the 50th percentile
        (https://en.wikipedia.org/wiki/Percentile) of data points across time
        series for each alignment period. This reducer is valid for GAUGE and
        DELTA metrics of numeric and distribution type. The value of the
        output is DOUBLE.
      REDUCE_PERCENTILE_05: Reduce by computing the 5th percentile
        (https://en.wikipedia.org/wiki/Percentile) of data points across time
        series for each alignment period. This reducer is valid for GAUGE and
        DELTA metrics of numeric and distribution type. The value of the
        output is DOUBLE.
    """
    REDUCE_NONE = 0
    REDUCE_MEAN = 1
    REDUCE_MIN = 2
    REDUCE_MAX = 3
    REDUCE_SUM = 4
    REDUCE_STDDEV = 5
    REDUCE_COUNT = 6
    REDUCE_COUNT_TRUE = 7
    REDUCE_COUNT_FALSE = 8
    REDUCE_FRACTION_TRUE = 9
    REDUCE_PERCENTILE_99 = 10
    REDUCE_PERCENTILE_95 = 11
    REDUCE_PERCENTILE_50 = 12
    REDUCE_PERCENTILE_05 = 13

  class PerSeriesAlignerValueValuesEnum(_messages.Enum):
    r"""An Aligner describes how to bring the data points in a single time
    series into temporal alignment. Except for ALIGN_NONE, all alignments
    cause all the data points in an alignment_period to be mathematically
    grouped together, resulting in a single data point for each
    alignment_period with end timestamp at the end of the period.Not all
    alignment operations may be applied to all time series. The valid choices
    depend on the metric_kind and value_type of the original time series.
    Alignment can change the metric_kind or the value_type of the time
    series.Time series data must be aligned in order to perform cross-time
    series reduction. If cross_series_reducer is specified, then
    per_series_aligner must be specified and not equal to ALIGN_NONE and
    alignment_period must be specified; otherwise, an error is returned.

    Values:
      ALIGN_NONE: No alignment. Raw data is returned. Not valid if cross-
        series reduction is requested. The value_type of the result is the
        same as the value_type of the input.
      ALIGN_DELTA: Align and convert to DELTA. The output is delta = y1 -
        y0.This alignment is valid for CUMULATIVE and DELTA metrics. If the
        selected alignment period results in periods with no data, then the
        aligned value for such a period is created by interpolation. The
        value_type of the aligned result is the same as the value_type of the
        input.
      ALIGN_RATE: Align and convert to a rate. The result is computed as rate
        = (y1 - y0)/(t1 - t0), or "delta over time". Think of this aligner as
        providing the slope of the line that passes through the value at the
        start and at the end of the alignment_period.This aligner is valid for
        CUMULATIVE and DELTA metrics with numeric values. If the selected
        alignment period results in periods with no data, then the aligned
        value for such a period is created by interpolation. The output is a
        GAUGE metric with value_type DOUBLE.If, by "rate", you mean
        "percentage change", see the ALIGN_PERCENT_CHANGE aligner instead.
      ALIGN_INTERPOLATE: Align by interpolating between adjacent points around
        the alignment period boundary. This aligner is valid for GAUGE metrics
        with numeric values. The value_type of the aligned result is the same
        as the value_type of the input.
      ALIGN_NEXT_OLDER: Align by moving the most recent data point before the
        end of the alignment period to the boundary at the end of the
        alignment period. This aligner is valid for GAUGE metrics. The
        value_type of the aligned result is the same as the value_type of the
        input.
      ALIGN_MIN: Align the time series by returning the minimum value in each
        alignment period. This aligner is valid for GAUGE and DELTA metrics
        with numeric values. The value_type of the aligned result is the same
        as the value_type of the input.
      ALIGN_MAX: Align the time series by returning the maximum value in each
        alignment period. This aligner is valid for GAUGE and DELTA metrics
        with numeric values. The value_type of the aligned result is the same
        as the value_type of the input.
      ALIGN_MEAN: Align the time series by returning the mean value in each
        alignment period. This aligner is valid for GAUGE and DELTA metrics
        with numeric values. The value_type of the aligned result is DOUBLE.
      ALIGN_COUNT: Align the time series by returning the number of values in
        each alignment period. This aligner is valid for GAUGE and DELTA
        metrics with numeric or Boolean values. The value_type of the aligned
        result is INT64.
      ALIGN_SUM: Align the time series by returning the sum of the values in
        each alignment period. This aligner is valid for GAUGE and DELTA
        metrics with numeric and distribution values. The value_type of the
        aligned result is the same as the value_type of the input.
      ALIGN_STDDEV: Align the time series by returning the standard deviation
        of the values in each alignment period. This aligner is valid for
        GAUGE and DELTA metrics with numeric values. The value_type of the
        output is DOUBLE.
      ALIGN_COUNT_TRUE: Align the time series by returning the number of True
        values in each alignment period. This aligner is valid for GAUGE
        metrics with Boolean values. The value_type of the output is INT64.
      ALIGN_COUNT_FALSE: Align the time series by returning the number of
        False values in each alignment period. This aligner is valid for GAUGE
        metrics with Boolean values. The value_type of the output is INT64.
      ALIGN_FRACTION_TRUE: Align the time series by returning the ratio of the
        number of True values to the total number of values in each alignment
        period. This aligner is valid for GAUGE metrics with Boolean values.
        The output value is in the range 0.0, 1.0 and has value_type DOUBLE.
      ALIGN_PERCENTILE_99: Align the time series by using percentile
        aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting
        data point in each alignment period is the 99th percentile of all data
        points in the period. This aligner is valid for GAUGE and DELTA
        metrics with distribution values. The output is a GAUGE metric with
        value_type DOUBLE.
      ALIGN_PERCENTILE_95: Align the time series by using percentile
        aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting
        data point in each alignment period is the 95th percentile of all data
        points in the period. This aligner is valid for GAUGE and DELTA
        metrics with distribution values. The output is a GAUGE metric with
        value_type DOUBLE.
      ALIGN_PERCENTILE_50: Align the time series by using percentile
        aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting
        data point in each alignment period is the 50th percentile of all data
        points in the period. This aligner is valid for GAUGE and DELTA
        metrics with distribution values. The output is a GAUGE metric with
        value_type DOUBLE.
      ALIGN_PERCENTILE_05: Align the time series by using percentile
        aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting
        data point in each alignment period is the 5th percentile of all data
        points in the period. This aligner is valid for GAUGE and DELTA
        metrics with distribution values. The output is a GAUGE metric with
        value_type DOUBLE.
      ALIGN_PERCENT_CHANGE: Align and convert to a percentage change. This
        aligner is valid for GAUGE and DELTA metrics with numeric values. This
        alignment returns ((current - previous)/previous) * 100, where the
        value of previous is determined based on the alignment_period.If the
        values of current and previous are both 0, then the returned value is
        0. If only previous is 0, the returned value is infinity.A 10-minute
        moving mean is computed at each point of the alignment period prior to
        the above calculation to smooth the metric and prevent false positives
        from very short-lived spikes. The moving mean is only applicable for
        data whose values are >= 0. Any values < 0 are treated as a missing
        datapoint, and are ignored. While DELTA metrics are accepted by this
        alignment, special care should be taken that the values for the metric
        will always be positive. The output is a GAUGE metric with value_type
        DOUBLE.
    """
    ALIGN_NONE = 0
    ALIGN_DELTA = 1
    ALIGN_RATE = 2
    ALIGN_INTERPOLATE = 3
    ALIGN_NEXT_OLDER = 4
    ALIGN_MIN = 5
    ALIGN_MAX = 6
    ALIGN_MEAN = 7
    ALIGN_COUNT = 8
    ALIGN_SUM = 9
    ALIGN_STDDEV = 10
    ALIGN_COUNT_TRUE = 11
    ALIGN_COUNT_FALSE = 12
    ALIGN_FRACTION_TRUE = 13
    ALIGN_PERCENTILE_99 = 14
    ALIGN_PERCENTILE_95 = 15
    ALIGN_PERCENTILE_50 = 16
    ALIGN_PERCENTILE_05 = 17
    ALIGN_PERCENT_CHANGE = 18

  alignmentPeriod = _messages.StringField(1)
  crossSeriesReducer = _messages.EnumField('CrossSeriesReducerValueValuesEnum', 2)
  groupByFields = _messages.StringField(3, repeated=True)
  perSeriesAligner = _messages.EnumField('PerSeriesAlignerValueValuesEnum', 4)


class Alert(_messages.Message):
  r"""An alert is the representation of a violation of an alert policy. It is
  a read-only resource that cannot be modified by the accompanied API.

  Enums:
    StateValueValuesEnum: Output only. The current state of the alert.

  Fields:
    closeTime: The time when the alert was closed.
    log: The log information associated with the alert. This field is only
      populated for log-based alerts.
    metadata: The metadata of the monitored resource.
    metric: The metric type and any metric labels preserved from the
      incident's generating condition.
    name: Identifier. The name of the alert.The format is:
      projects/[PROJECT_ID_OR_NUMBER]/alerts/[ALERT_ID] The [ALERT_ID] is a
      system-assigned unique identifier for the alert.
    openTime: The time when the alert was opened.
    policy: The snapshot of the alert policy that generated this alert.
    resource: The monitored resource type and any monitored resource labels
      preserved from the incident's generating condition.
    state: Output only. The current state of the alert.
  """

  class StateValueValuesEnum(_messages.Enum):
    r"""Output only. The current state of the alert.

    Values:
      STATE_UNSPECIFIED: The alert state is unspecified.
      OPEN: The alert is open.
      CLOSED: The alert is closed.
    """
    STATE_UNSPECIFIED = 0
    OPEN = 1
    CLOSED = 2

  closeTime = _messages.StringField(1)
  log = _messages.MessageField('LogMetadata', 2)
  metadata = _messages.MessageField('MonitoredResourceMetadata', 3)
  metric = _messages.MessageField('Metric', 4)
  name = _messages.StringField(5)
  openTime = _messages.StringField(6)
  policy = _messages.MessageField('PolicySnapshot', 7)
  resource = _messages.MessageField('MonitoredResource', 8)
  state = _messages.EnumField('StateValueValuesEnum', 9)


class AlertPolicy(_messages.Message):
  r"""A description of the conditions under which some aspect of your system
  is considered to be "unhealthy" and the ways to notify people or services
  about this state. For an overview of alerting policies, see Introduction to
  Alerting (https://cloud.google.com/monitoring/alerts/).

  Enums:
    CombinerValueValuesEnum: How to combine the results of multiple conditions
      to determine if an incident should be opened. If
      condition_time_series_query_language is present, this must be
      COMBINE_UNSPECIFIED.
    SeverityValueValuesEnum: Optional. The severity of an alerting policy
      indicates how important incidents generated by that policy are. The
      severity level will be displayed on the Incident detail page and in
      notifications.

  Messages:
    UserLabelsValue: User-supplied key/value data to be used for organizing
      and identifying the AlertPolicy objects.The field can contain up to 64
      entries. Each key and value is limited to 63 Unicode characters or 128
      bytes, whichever is smaller. Labels and values can contain only
      lowercase letters, numerals, underscores, and dashes. Keys must begin
      with a letter.Note that Prometheus {alert name} is a valid Prometheus
      label names (https://prometheus.io/docs/concepts/data_model/#metric-
      names-and-labels), whereas Prometheus {rule group} is an unrestricted
      UTF-8 string. This means that they cannot be stored as-is in user
      labels, because they may contain characters that are not allowed in
      user-label values.

  Fields:
    alertStrategy: Control over how this alerting policy's notification
      channels are notified.
    combiner: How to combine the results of multiple conditions to determine
      if an incident should be opened. If condition_time_series_query_language
      is present, this must be COMBINE_UNSPECIFIED.
    conditions: A list of conditions for the policy. The conditions are
      combined by AND or OR according to the combiner field. If the combined
      conditions evaluate to true, then an incident is created. A policy can
      have from one to six conditions. If condition_time_series_query_language
      is present, it must be the only condition. If
      condition_monitoring_query_language is present, it must be the only
      condition.
    creationRecord: A read-only record of the creation of the alerting policy.
      If provided in a call to create or update, this field will be ignored.
    displayName: A short name or phrase used to identify the policy in
      dashboards, notifications, and incidents. To avoid confusion, don't use
      the same display name for multiple policies in the same project. The
      name is limited to 512 Unicode characters.The convention for the
      display_name of a PrometheusQueryLanguageCondition is "{rule group
      name}/{alert name}", where the {rule group name} and {alert name} should
      be taken from the corresponding Prometheus configuration file. This
      convention is not enforced. In any case the display_name is not a unique
      key of the AlertPolicy.
    documentation: Documentation that is included with notifications and
      incidents related to this policy. Best practice is for the documentation
      to include information to help responders understand, mitigate,
      escalate, and correct the underlying problems detected by the alerting
      policy. Notification channels that have limited capacity might not show
      this documentation.
    enabled: Whether or not the policy is enabled. On write, the default
      interpretation if unset is that the policy is enabled. On read, clients
      should not make any assumption about the state if it has not been
      populated. The field should always be populated on List and Get
      operations, unless a field projection has been specified that strips it
      out.
    mutationRecord: A read-only record of the most recent change to the
      alerting policy. If provided in a call to create or update, this field
      will be ignored.
    name: Identifier. Required if the policy exists. The resource name for
      this policy. The format is:
      projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]
      [ALERT_POLICY_ID] is assigned by Cloud Monitoring when the policy is
      created. When calling the alertPolicies.create method, do not include
      the name field in the alerting policy passed as part of the request.
    notificationChannels: Identifies the notification channels to which
      notifications should be sent when incidents are opened or closed or when
      new violations occur on an already opened incident. Each element of this
      array corresponds to the name field in each of the NotificationChannel
      objects that are returned from the ListNotificationChannels method. The
      format of the entries in this field is:
      projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]
    severity: Optional. The severity of an alerting policy indicates how
      important incidents generated by that policy are. The severity level
      will be displayed on the Incident detail page and in notifications.
    userLabels: User-supplied key/value data to be used for organizing and
      identifying the AlertPolicy objects.The field can contain up to 64
      entries. Each key and value is limited to 63 Unicode characters or 128
      bytes, whichever is smaller. Labels and values can contain only
      lowercase letters, numerals, underscores, and dashes. Keys must begin
      with a letter.Note that Prometheus {alert name} is a valid Prometheus
      label names (https://prometheus.io/docs/concepts/data_model/#metric-
      names-and-labels), whereas Prometheus {rule group} is an unrestricted
      UTF-8 string. This means that they cannot be stored as-is in user
      labels, because they may contain characters that are not allowed in
      user-label values.
    validity: Read-only description of how the alerting policy is invalid.
      This field is only set when the alerting policy is invalid. An invalid
      alerting policy will not generate incidents.
  """

  class CombinerValueValuesEnum(_messages.Enum):
    r"""How to combine the results of multiple conditions to determine if an
    incident should be opened. If condition_time_series_query_language is
    present, this must be COMBINE_UNSPECIFIED.

    Values:
      COMBINE_UNSPECIFIED: An unspecified combiner.
      AND: Combine conditions using the logical AND operator. An incident is
        created only if all the conditions are met simultaneously. This
        combiner is satisfied if all conditions are met, even if they are met
        on completely different resources.
      OR: Combine conditions using the logical OR operator. An incident is
        created if any of the listed conditions is met.
      AND_WITH_MATCHING_RESOURCE: Combine conditions using logical AND
        operator, but unlike the regular AND option, an incident is created
        only if all conditions are met simultaneously on at least one
        resource.
    """
    COMBINE_UNSPECIFIED = 0
    AND = 1
    OR = 2
    AND_WITH_MATCHING_RESOURCE = 3

  class SeverityValueValuesEnum(_messages.Enum):
    r"""Optional. The severity of an alerting policy indicates how important
    incidents generated by that policy are. The severity level will be
    displayed on the Incident detail page and in notifications.

    Values:
      SEVERITY_UNSPECIFIED: No severity is specified. This is the default
        value.
      CRITICAL: This is the highest severity level. Use this if the problem
        could cause significant damage or downtime.
      ERROR: This is the medium severity level. Use this if the problem could
        cause minor damage or downtime.
      WARNING: This is the lowest severity level. Use this if the problem is
        not causing any damage or downtime, but could potentially lead to a
        problem in the future.
    """
    SEVERITY_UNSPECIFIED = 0
    CRITICAL = 1
    ERROR = 2
    WARNING = 3

  @encoding.MapUnrecognizedFields('additionalProperties')
  class UserLabelsValue(_messages.Message):
    r"""User-supplied key/value data to be used for organizing and identifying
    the AlertPolicy objects.The field can contain up to 64 entries. Each key
    and value is limited to 63 Unicode characters or 128 bytes, whichever is
    smaller. Labels and values can contain only lowercase letters, numerals,
    underscores, and dashes. Keys must begin with a letter.Note that
    Prometheus {alert name} is a valid Prometheus label names
    (https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels),
    whereas Prometheus {rule group} is an unrestricted UTF-8 string. This
    means that they cannot be stored as-is in user labels, because they may
    contain characters that are not allowed in user-label values.

    Messages:
      AdditionalProperty: An additional property for a UserLabelsValue object.

    Fields:
      additionalProperties: Additional properties of type UserLabelsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a UserLabelsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  alertStrategy = _messages.MessageField('AlertStrategy', 1)
  combiner = _messages.EnumField('CombinerValueValuesEnum', 2)
  conditions = _messages.MessageField('Condition', 3, repeated=True)
  creationRecord = _messages.MessageField('MutationRecord', 4)
  displayName = _messages.StringField(5)
  documentation = _messages.MessageField('Documentation', 6)
  enabled = _messages.BooleanField(7)
  mutationRecord = _messages.MessageField('MutationRecord', 8)
  name = _messages.StringField(9)
  notificationChannels = _messages.StringField(10, repeated=True)
  severity = _messages.EnumField('SeverityValueValuesEnum', 11)
  userLabels = _messages.MessageField('UserLabelsValue', 12)
  validity = _messages.MessageField('Status', 13)


class AlertStrategy(_messages.Message):
  r"""Control over how the notification channels in notification_channels are
  notified when this alert fires.

  Enums:
    NotificationPromptsValueListEntryValuesEnum:

  Fields:
    autoClose: If an alerting policy that was active has no data for this
      long, any open incidents will close
    notificationChannelStrategy: Control how notifications will be sent out,
      on a per-channel basis.
    notificationPrompts: For log-based alert policies, the notification
      prompts is always OPENED. For non log-based alert policies, the
      notification prompts can be OPENED or OPENED, CLOSED.
    notificationRateLimit: Required for log-based alerting policies, i.e.
      policies with a LogMatch condition.This limit is not implemented for
      alerting policies that do not have a LogMatch condition.
  """

  class NotificationPromptsValueListEntryValuesEnum(_messages.Enum):
    r"""NotificationPromptsValueListEntryValuesEnum enum type.

    Values:
      NOTIFICATION_PROMPT_UNSPECIFIED: No strategy specified. Treated as
        error.
      OPENED: Notify when an incident is opened.
      CLOSED: Notify when an incident is closed.
    """
    NOTIFICATION_PROMPT_UNSPECIFIED = 0
    OPENED = 1
    CLOSED = 2

  autoClose = _messages.StringField(1)
  notificationChannelStrategy = _messages.MessageField('NotificationChannelStrategy', 2, repeated=True)
  notificationPrompts = _messages.EnumField('NotificationPromptsValueListEntryValuesEnum', 3, repeated=True)
  notificationRateLimit = _messages.MessageField('NotificationRateLimit', 4)


class AppEngine(_messages.Message):
  r"""App Engine service. Learn more at https://cloud.google.com/appengine.

  Fields:
    moduleId: The ID of the App Engine module underlying this service.
      Corresponds to the module_id resource label in the gae_app monitored
      resource
      (https://cloud.google.com/monitoring/api/resources#tag_gae_app).
  """

  moduleId = _messages.StringField(1)


class AvailabilityCriteria(_messages.Message):
  r"""Future parameters for the availability SLI."""


class BasicAuthentication(_messages.Message):
  r"""The authentication parameters to provide to the specified resource or
  URL that requires a username and password. Currently, only Basic HTTP
  authentication (https://tools.ietf.org/html/rfc7617) is supported in Uptime
  checks.

  Fields:
    password: The password to use when authenticating with the HTTP server.
    username: The username to use when authenticating with the HTTP server.
  """

  password = _messages.StringField(1)
  username = _messages.StringField(2)


class BasicService(_messages.Message):
  r"""A well-known service type, defined by its service type and service
  labels. Documentation and examples here
  (https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-
  structures#basic-svc-w-basic-sli).

  Messages:
    ServiceLabelsValue: Labels that specify the resource that emits the
      monitoring data which is used for SLO reporting of this Service.
      Documentation and valid values for given service types here
      (https://cloud.google.com/stackdriver/docs/solutions/slo-
      monitoring/api/api-structures#basic-svc-w-basic-sli).

  Fields:
    serviceLabels: Labels that specify the resource that emits the monitoring
      data which is used for SLO reporting of this Service. Documentation and
      valid values for given service types here
      (https://cloud.google.com/stackdriver/docs/solutions/slo-
      monitoring/api/api-structures#basic-svc-w-basic-sli).
    serviceType: The type of service that this basic service defines, e.g.
      APP_ENGINE service type. Documentation and valid values here
      (https://cloud.google.com/stackdriver/docs/solutions/slo-
      monitoring/api/api-structures#basic-svc-w-basic-sli).
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class ServiceLabelsValue(_messages.Message):
    r"""Labels that specify the resource that emits the monitoring data which
    is used for SLO reporting of this Service. Documentation and valid values
    for given service types here
    (https://cloud.google.com/stackdriver/docs/solutions/slo-
    monitoring/api/api-structures#basic-svc-w-basic-sli).

    Messages:
      AdditionalProperty: An additional property for a ServiceLabelsValue
        object.

    Fields:
      additionalProperties: Additional properties of type ServiceLabelsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a ServiceLabelsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  serviceLabels = _messages.MessageField('ServiceLabelsValue', 1)
  serviceType = _messages.StringField(2)


class BasicSli(_messages.Message):
  r"""An SLI measuring performance on a well-known service type. Performance
  will be computed on the basis of pre-defined metrics. The type of the
  service_resource determines the metrics to use and the
  service_resource.labels and metric_labels are used to construct a monitoring
  filter to filter that metric down to just the data relevant to this service.

  Fields:
    availability: Good service is defined to be the count of requests made to
      this service that return successfully.
    latency: Good service is defined to be the count of requests made to this
      service that are fast enough with respect to latency.threshold.
    location: OPTIONAL: The set of locations to which this SLI is relevant.
      Telemetry from other locations will not be used to calculate performance
      for this SLI. If omitted, this SLI applies to all locations in which the
      Service has activity. For service types that don't support breaking down
      by location, setting this field will result in an error.
    method: OPTIONAL: The set of RPCs to which this SLI is relevant. Telemetry
      from other methods will not be used to calculate performance for this
      SLI. If omitted, this SLI applies to all the Service's methods. For
      service types that don't support breaking down by method, setting this
      field will result in an error.
    version: OPTIONAL: The set of API versions to which this SLI is relevant.
      Telemetry from other API versions will not be used to calculate
      performance for this SLI. If omitted, this SLI applies to all API
      versions. For service types that don't support breaking down by version,
      setting this field will result in an error.
  """

  availability = _messages.MessageField('AvailabilityCriteria', 1)
  latency = _messages.MessageField('LatencyCriteria', 2)
  location = _messages.StringField(3, repeated=True)
  method = _messages.StringField(4, repeated=True)
  version = _messages.StringField(5, repeated=True)


class BooleanTest(_messages.Message):
  r"""A test that uses an alerting result in a boolean column produced by the
  SQL query.

  Fields:
    column: Required. The name of the column containing the boolean value. If
      the value in a row is NULL, that row is ignored.
  """

  column = _messages.StringField(1)


class BucketOptions(_messages.Message):
  r"""BucketOptions describes the bucket boundaries used to create a histogram
  for the distribution. The buckets can be in a linear sequence, an
  exponential sequence, or each bucket can be specified explicitly.
  BucketOptions does not include the number of values in each bucket.A bucket
  has an inclusive lower bound and exclusive upper bound for the values that
  are counted for that bucket. The upper bound of a bucket must be strictly
  greater than the lower bound. The sequence of N buckets for a distribution
  consists of an underflow bucket (number 0), zero or more finite buckets
  (number 1 through N - 2) and an overflow bucket (number N - 1). The buckets
  are contiguous: the lower bound of bucket i (i > 0) is the same as the upper
  bound of bucket i - 1. The buckets span the whole range of finite values:
  lower bound of the underflow bucket is -infinity and the upper bound of the
  overflow bucket is +infinity. The finite buckets are so-called because both
  bounds are finite.

  Fields:
    explicitBuckets: The explicit buckets.
    exponentialBuckets: The exponential buckets.
    linearBuckets: The linear bucket.
  """

  explicitBuckets = _messages.MessageField('Explicit', 1)
  exponentialBuckets = _messages.MessageField('Exponential', 2)
  linearBuckets = _messages.MessageField('Linear', 3)


class CloudEndpoints(_messages.Message):
  r"""Cloud Endpoints service. Learn more at
  https://cloud.google.com/endpoints.

  Fields:
    service: The name of the Cloud Endpoints service underlying this service.
      Corresponds to the service resource label in the api monitored resource
      (https://cloud.google.com/monitoring/api/resources#tag_api).
  """

  service = _messages.StringField(1)


class CloudFunctionV2Target(_messages.Message):
  r"""A Synthetic Monitor deployed to a Cloud Functions V2 instance.

  Fields:
    cloudRunRevision: Output only. The cloud_run_revision Monitored Resource
      associated with the GCFv2. The Synthetic Monitor execution results
      (metrics, logs, and spans) are reported against this Monitored Resource.
      This field is output only.
    name: Required. Fully qualified GCFv2 resource name i.e.
      projects/{project}/locations/{location}/functions/{function} Required.
  """

  cloudRunRevision = _messages.MessageField('MonitoredResource', 1)
  name = _messages.StringField(2)


class CloudRun(_messages.Message):
  r"""Cloud Run service. Learn more at https://cloud.google.com/run.

  Fields:
    location: The location the service is run. Corresponds to the location
      resource label in the cloud_run_revision monitored resource (https://clo
      ud.google.com/monitoring/api/resources#tag_cloud_run_revision).
    serviceName: The name of the Cloud Run service. Corresponds to the
      service_name resource label in the cloud_run_revision monitored resource
      (https://cloud.google.com/monitoring/api/resources#tag_cloud_run_revisio
      n).
  """

  location = _messages.StringField(1)
  serviceName = _messages.StringField(2)


class ClusterIstio(_messages.Message):
  r"""Istio service scoped to a single Kubernetes cluster. Learn more at
  https://istio.io. Clusters running OSS Istio will have their services
  ingested as this type.

  Fields:
    clusterName: The name of the Kubernetes cluster in which this Istio
      service is defined. Corresponds to the cluster_name resource label in
      k8s_cluster resources.
    location: The location of the Kubernetes cluster in which this Istio
      service is defined. Corresponds to the location resource label in
      k8s_cluster resources.
    serviceName: The name of the Istio service underlying this service.
      Corresponds to the destination_service_name metric label in Istio
      metrics.
    serviceNamespace: The namespace of the Istio service underlying this
      service. Corresponds to the destination_service_namespace metric label
      in Istio metrics.
  """

  clusterName = _messages.StringField(1)
  location = _messages.StringField(2)
  serviceName = _messages.StringField(3)
  serviceNamespace = _messages.StringField(4)


class CollectdPayload(_messages.Message):
  r"""A collection of data points sent from a collectd-based plugin. See the
  collectd documentation for more information.

  Messages:
    MetadataValue: The measurement metadata. Example: "process_id" -> 12345

  Fields:
    endTime: The end time of the interval.
    metadata: The measurement metadata. Example: "process_id" -> 12345
    plugin: The name of the plugin. Example: "disk".
    pluginInstance: The instance name of the plugin Example: "hdcl".
    startTime: The start time of the interval.
    type: The measurement type. Example: "memory".
    typeInstance: The measurement type instance. Example: "used".
    values: The measured values during this time interval. Each value must
      have a different data_source_name.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class MetadataValue(_messages.Message):
    r"""The measurement metadata. Example: "process_id" -> 12345

    Messages:
      AdditionalProperty: An additional property for a MetadataValue object.

    Fields:
      additionalProperties: Additional properties of type MetadataValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a MetadataValue object.

      Fields:
        key: Name of the additional property.
        value: A TypedValue attribute.
      """

      key = _messages.StringField(1)
      value = _messages.MessageField('TypedValue', 2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  endTime = _messages.StringField(1)
  metadata = _messages.MessageField('MetadataValue', 2)
  plugin = _messages.StringField(3)
  pluginInstance = _messages.StringField(4)
  startTime = _messages.StringField(5)
  type = _messages.StringField(6)
  typeInstance = _messages.StringField(7)
  values = _messages.MessageField('CollectdValue', 8, repeated=True)


class CollectdPayloadError(_messages.Message):
  r"""Describes the error status for payloads that were not written.

  Fields:
    error: Records the error status for the payload. If this field is present,
      the partial errors for nested values won't be populated.
    index: The zero-based index in
      CreateCollectdTimeSeriesRequest.collectd_payloads.
    valueErrors: Records the error status for values that were not written due
      to an error.Failed payloads for which nothing is written will not
      include partial value errors.
  """

  error = _messages.MessageField('Status', 1)
  index = _messages.IntegerField(2, variant=_messages.Variant.INT32)
  valueErrors = _messages.MessageField('CollectdValueError', 3, repeated=True)


class CollectdValue(_messages.Message):
  r"""A single data point from a collectd-based plugin.

  Enums:
    DataSourceTypeValueValuesEnum: The type of measurement.

  Fields:
    dataSourceName: The data source for the collectd value. For example, there
      are two data sources for network measurements: "rx" and "tx".
    dataSourceType: The type of measurement.
    value: The measurement value.
  """

  class DataSourceTypeValueValuesEnum(_messages.Enum):
    r"""The type of measurement.

    Values:
      UNSPECIFIED_DATA_SOURCE_TYPE: An unspecified data source type. This
        corresponds to
        google.api.MetricDescriptor.MetricKind.METRIC_KIND_UNSPECIFIED.
      GAUGE: An instantaneous measurement of a varying quantity. This
        corresponds to google.api.MetricDescriptor.MetricKind.GAUGE.
      COUNTER: A cumulative value over time. This corresponds to
        google.api.MetricDescriptor.MetricKind.CUMULATIVE.
      DERIVE: A rate of change of the measurement.
      ABSOLUTE: An amount of change since the last measurement interval. This
        corresponds to google.api.MetricDescriptor.MetricKind.DELTA.
    """
    UNSPECIFIED_DATA_SOURCE_TYPE = 0
    GAUGE = 1
    COUNTER = 2
    DERIVE = 3
    ABSOLUTE = 4

  dataSourceName = _messages.StringField(1)
  dataSourceType = _messages.EnumField('DataSourceTypeValueValuesEnum', 2)
  value = _messages.MessageField('TypedValue', 3)


class CollectdValueError(_messages.Message):
  r"""Describes the error status for values that were not written.

  Fields:
    error: Records the error status for the value.
    index: The zero-based index in CollectdPayload.values within the parent
      CreateCollectdTimeSeriesRequest.collectd_payloads.
  """

  error = _messages.MessageField('Status', 1)
  index = _messages.IntegerField(2, variant=_messages.Variant.INT32)


class Condition(_messages.Message):
  r"""A condition is a true/false test that determines when an alerting policy
  should open an incident. If a condition evaluates to true, it signifies that
  something is wrong.

  Fields:
    conditionAbsent: A condition that checks that a time series continues to
      receive new data points.
    conditionMatchedLog: A condition that checks for log messages matching
      given constraints. If set, no other conditions can be present.
    conditionMonitoringQueryLanguage: A condition that uses the Monitoring
      Query Language to define alerts.
    conditionPrometheusQueryLanguage: A condition that uses the Prometheus
      query language to define alerts.
    conditionSql: A condition that periodically evaluates a SQL query result.
    conditionThreshold: A condition that compares a time series against a
      threshold.
    displayName: A short name or phrase used to identify the condition in
      dashboards, notifications, and incidents. To avoid confusion, don't use
      the same display name for multiple conditions in the same policy.
    name: Required if the condition exists. The unique resource name for this
      condition. Its format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/
      [POLICY_ID]/conditions/[CONDITION_ID] [CONDITION_ID] is assigned by
      Cloud Monitoring when the condition is created as part of a new or
      updated alerting policy.When calling the alertPolicies.create method, do
      not include the name field in the conditions of the requested alerting
      policy. Cloud Monitoring creates the condition identifiers and includes
      them in the new policy.When calling the alertPolicies.update method to
      update a policy, including a condition name causes the existing
      condition to be updated. Conditions without names are added to the
      updated policy. Existing conditions are deleted if they are not
      updated.Best practice is to preserve [CONDITION_ID] if you make only
      small changes, such as those to condition thresholds, durations, or
      trigger values. Otherwise, treat the change as a new condition and let
      the existing condition be deleted.
  """

  conditionAbsent = _messages.MessageField('MetricAbsence', 1)
  conditionMatchedLog = _messages.MessageField('LogMatch', 2)
  conditionMonitoringQueryLanguage = _messages.MessageField('MonitoringQueryLanguageCondition', 3)
  conditionPrometheusQueryLanguage = _messages.MessageField('PrometheusQueryLanguageCondition', 4)
  conditionSql = _messages.MessageField('SqlCondition', 5)
  conditionThreshold = _messages.MessageField('MetricThreshold', 6)
  displayName = _messages.StringField(7)
  name = _messages.StringField(8)


class ContentMatcher(_messages.Message):
  r"""Optional. Used to perform content matching. This allows matching based
  on substrings and regular expressions, together with their negations. Only
  the first 4 MB of an HTTP or HTTPS check's response (and the first 1 MB of a
  TCP check's response) are examined for purposes of content matching.

  Enums:
    MatcherValueValuesEnum: The type of content matcher that will be applied
      to the server output, compared to the content string when the check is
      run.

  Fields:
    content: String, regex or JSON content to match. Maximum 1024 bytes. An
      empty content string indicates no content matching is to be performed.
    jsonPathMatcher: Matcher information for MATCHES_JSON_PATH and
      NOT_MATCHES_JSON_PATH
    matcher: The type of content matcher that will be applied to the server
      output, compared to the content string when the check is run.
  """

  class MatcherValueValuesEnum(_messages.Enum):
    r"""The type of content matcher that will be applied to the server output,
    compared to the content string when the check is run.

    Values:
      CONTENT_MATCHER_OPTION_UNSPECIFIED: No content matcher type specified
        (maintained for backward compatibility, but deprecated for future
        use). Treated as CONTAINS_STRING.
      CONTAINS_STRING: Selects substring matching. The match succeeds if the
        output contains the content string. This is the default value for
        checks without a matcher option, or where the value of matcher is
        CONTENT_MATCHER_OPTION_UNSPECIFIED.
      NOT_CONTAINS_STRING: Selects negation of substring matching. The match
        succeeds if the output does NOT contain the content string.
      MATCHES_REGEX: Selects regular-expression matching. The match succeeds
        if the output matches the regular expression specified in the content
        string. Regex matching is only supported for HTTP/HTTPS checks.
      NOT_MATCHES_REGEX: Selects negation of regular-expression matching. The
        match succeeds if the output does NOT match the regular expression
        specified in the content string. Regex matching is only supported for
        HTTP/HTTPS checks.
      MATCHES_JSON_PATH: Selects JSONPath matching. See JsonPathMatcher for
        details on when the match succeeds. JSONPath matching is only
        supported for HTTP/HTTPS checks.
      NOT_MATCHES_JSON_PATH: Selects JSONPath matching. See JsonPathMatcher
        for details on when the match succeeds. Succeeds when output does NOT
        match as specified. JSONPath is only supported for HTTP/HTTPS checks.
    """
    CONTENT_MATCHER_OPTION_UNSPECIFIED = 0
    CONTAINS_STRING = 1
    NOT_CONTAINS_STRING = 2
    MATCHES_REGEX = 3
    NOT_MATCHES_REGEX = 4
    MATCHES_JSON_PATH = 5
    NOT_MATCHES_JSON_PATH = 6

  content = _messages.StringField(1)
  jsonPathMatcher = _messages.MessageField('JsonPathMatcher', 2)
  matcher = _messages.EnumField('MatcherValueValuesEnum', 3)


class CreateCollectdTimeSeriesRequest(_messages.Message):
  r"""The CreateCollectdTimeSeries request.

  Fields:
    collectdPayloads: The collectd payloads representing the time series data.
      You must not include more than a single point for each time series, so
      no two payloads can have the same values for all of the fields plugin,
      plugin_instance, type, and type_instance.
    collectdVersion: The version of collectd that collected the data. Example:
      "5.3.0-192.el6".
    resource: The monitored resource associated with the time series.
  """

  collectdPayloads = _messages.MessageField('CollectdPayload', 1, repeated=True)
  collectdVersion = _messages.StringField(2)
  resource = _messages.MessageField('MonitoredResource', 3)


class CreateCollectdTimeSeriesResponse(_messages.Message):
  r"""The CreateCollectdTimeSeries response.

  Fields:
    payloadErrors: Records the error status for points that were not written
      due to an error in the request.Failed requests for which nothing is
      written will return an error response instead. Requests where data
      points were rejected by the backend will set summary instead.
    summary: Aggregate statistics from writing the payloads. This field is
      omitted if all points were successfully written, so that the response is
      empty. This is for backwards compatibility with clients that log errors
      on any non-empty response.
  """

  payloadErrors = _messages.MessageField('CollectdPayloadError', 1, repeated=True)
  summary = _messages.MessageField('CreateTimeSeriesSummary', 2)


class CreateTimeSeriesRequest(_messages.Message):
  r"""The CreateTimeSeries request.

  Fields:
    timeSeries: Required. The new data to be added to a list of time series.
      Adds at most one data point to each of several time series. The new data
      point must be more recent than any other point in its time series. Each
      TimeSeries value must fully specify a unique time series by supplying
      all label values for the metric and the monitored resource.The maximum
      number of TimeSeries objects per Create request is 200.
  """

  timeSeries = _messages.MessageField('TimeSeries', 1, repeated=True)


class CreateTimeSeriesSummary(_messages.Message):
  r"""Summary of the result of a failed request to write data to a time
  series.

  Fields:
    errors: The number of points that failed to be written. Order is not
      guaranteed.
    successPointCount: The number of points that were successfully written.
    totalPointCount: The number of points in the request.
  """

  errors = _messages.MessageField('Error', 1, repeated=True)
  successPointCount = _messages.IntegerField(2, variant=_messages.Variant.INT32)
  totalPointCount = _messages.IntegerField(3, variant=_messages.Variant.INT32)


class Criteria(_messages.Message):
  r"""Criteria specific to the AlertPolicys that this Snooze applies to. The
  Snooze will suppress alerts that come from one of the AlertPolicys whose
  names are supplied.

  Fields:
    filter: Optional. When you define a snooze, you can also define a filter
      for that snooze. The filter is a string containing one or more key-value
      pairs. The string uses the standard https://google.aip.dev/160 filter
      syntax. If you define a filter for a snooze, then the snooze can only
      apply to one alert policy. When the snooze is active, incidents won't be
      created when the incident would have key-value pairs (labels) that match
      those specified by the filter in the snooze.Snooze filters support
      resource, metric, and metadata labels. If multiple labels are used, then
      they must be connected with an AND operator. For example, the following
      filter applies the snooze to incidents that have a resource label with
      an instance ID of 1234567890, a metric label with an instance name of
      test_group, a metadata user label with a key of foo and a value of bar,
      and a metadata system label with a key of region and a value of us-
      central1: "filter": "resource.labels.instance_id=\"1234567890\" AND
      metric.labels.instance_name=\"test_group\" AND
      metadata.user_labels.foo=\"bar\" AND metadata.system_labels.region=\"us-
      central1\""
    policies: The specific AlertPolicy names for the alert that should be
      snoozed. The format is:
      projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID] There is a
      limit of 16 policies per snooze. This limit is checked during snooze
      creation. Exactly 1 alert policy is required if filter is specified at
      the same time.
  """

  filter = _messages.StringField(1)
  policies = _messages.StringField(2, repeated=True)


class Custom(_messages.Message):
  r"""Use a custom service to designate a service that you want to monitor
  when none of the other service types (like App Engine, Cloud Run, or a GKE
  type) matches your intended service.
  """



class Daily(_messages.Message):
  r"""Used to schedule the query to run every so many days.

  Fields:
    executionTime: Optional. The time of day (in UTC) at which the query
      should run. If left unspecified, the server picks an arbitrary time of
      day and runs the query at the same time each day.
    periodicity: Required. The number of days between runs. Must be greater
      than or equal to 1 day and less than or equal to 31 days.
  """

  executionTime = _messages.MessageField('TimeOfDay', 1)
  periodicity = _messages.IntegerField(2, variant=_messages.Variant.INT32)


class Distribution(_messages.Message):
  r"""Distribution contains summary statistics for a population of values. It
  optionally contains a histogram representing the distribution of those
  values across a set of buckets.The summary statistics are the count, mean,
  sum of the squared deviation from the mean, the minimum, and the maximum of
  the set of population of values. The histogram is based on a sequence of
  buckets and gives a count of values that fall into each bucket. The
  boundaries of the buckets are given either explicitly or by formulas for
  buckets of fixed or exponentially increasing widths.Although it is not
  forbidden, it is generally a bad idea to include non-finite values
  (infinities or NaNs) in the population of values, as this will render the
  mean and sum_of_squared_deviation fields meaningless.

  Fields:
    bucketCounts: Required in the Cloud Monitoring API v3. The values for each
      bucket specified in bucket_options. The sum of the values in
      bucketCounts must equal the value in the count field of the Distribution
      object. The order of the bucket counts follows the numbering schemes
      described for the three bucket types. The underflow bucket has number 0;
      the finite buckets, if any, have numbers 1 through N-2; and the overflow
      bucket has number N-1. The size of bucket_counts must not be greater
      than N. If the size is less than N, then the remaining buckets are
      assigned values of zero.
    bucketOptions: Required in the Cloud Monitoring API v3. Defines the
      histogram bucket boundaries.
    count: The number of values in the population. Must be non-negative. This
      value must equal the sum of the values in bucket_counts if a histogram
      is provided.
    exemplars: Must be in increasing order of value field.
    mean: The arithmetic mean of the values in the population. If count is
      zero then this field must be zero.
    range: If specified, contains the range of the population values. The
      field must not be present if the count is zero. This field is presently
      ignored by the Cloud Monitoring API v3.
    sumOfSquaredDeviation: The sum of squared deviations from the mean of the
      values in the population. For values x_i this is: Sum[i=1..n]((x_i -
      mean)^2) Knuth, "The Art of Computer Programming", Vol. 2, page 232, 3rd
      edition describes Welford's method for accumulating this sum in one
      pass.If count is zero then this field must be zero.
  """

  bucketCounts = _messages.IntegerField(1, repeated=True)
  bucketOptions = _messages.MessageField('BucketOptions', 2)
  count = _messages.IntegerField(3)
  exemplars = _messages.MessageField('Exemplar', 4, repeated=True)
  mean = _messages.FloatField(5)
  range = _messages.MessageField('Range', 6)
  sumOfSquaredDeviation = _messages.FloatField(7)


class DistributionCut(_messages.Message):
  r"""A DistributionCut defines a TimeSeries and thresholds used for measuring
  good service and total service. The TimeSeries must have ValueType =
  DISTRIBUTION and MetricKind = DELTA or MetricKind = CUMULATIVE. The computed
  good_service will be the estimated count of values in the Distribution that
  fall within the specified min and max.

  Fields:
    distributionFilter: A monitoring filter
      (https://cloud.google.com/monitoring/api/v3/filters) specifying a
      TimeSeries aggregating values. Must have ValueType = DISTRIBUTION and
      MetricKind = DELTA or MetricKind = CUMULATIVE.
    range: Range of values considered "good." For a one-sided range, set one
      bound to an infinite value.
  """

  distributionFilter = _messages.StringField(1)
  range = _messages.MessageField('GoogleMonitoringV3Range', 2)


class Documentation(_messages.Message):
  r"""Documentation that is included in the notifications and incidents
  pertaining to this policy.

  Fields:
    content: The body of the documentation, interpreted according to
      mime_type. The content may not exceed 8,192 Unicode characters and may
      not exceed more than 10,240 bytes when encoded in UTF-8 format,
      whichever is smaller. This text can be templatized by using variables
      (https://cloud.google.com/monitoring/alerts/doc-variables#doc-vars).
    links: Optional. Links to content such as playbooks, repositories, and
      other resources. This field can contain up to 3 entries.
    mimeType: The format of the content field. Presently, only the value
      "text/markdown" is supported. See Markdown
      (https://en.wikipedia.org/wiki/Markdown) for more information.
    subject: Optional. The subject line of the notification. The subject line
      may not exceed 10,240 bytes. In notifications generated by this policy,
      the contents of the subject line after variable expansion will be
      truncated to 255 bytes or shorter at the latest UTF-8 character
      boundary. The 255-byte limit is recommended by this thread
      (https://stackoverflow.com/questions/1592291/what-is-the-email-subject-
      length-limit). It is both the limit imposed by some third-party
      ticketing products and it is common to define textual fields in
      databases as VARCHAR(255).The contents of the subject line can be
      templatized by using variables
      (https://cloud.google.com/monitoring/alerts/doc-variables#doc-vars). If
      this field is missing or empty, a default subject line will be
      generated.
  """

  content = _messages.StringField(1)
  links = _messages.MessageField('Link', 2, repeated=True)
  mimeType = _messages.StringField(3)
  subject = _messages.StringField(4)


class DroppedLabels(_messages.Message):
  r"""A set of (label, value) pairs that were removed from a Distribution time
  series during aggregation and then added as an attachment to a
  Distribution.Exemplar.The full label set for the exemplars is constructed by
  using the dropped pairs in combination with the label values that remain on
  the aggregated Distribution time series. The constructed full label set can
  be used to identify the specific entity, such as the instance or job, which
  might be contributing to a long-tail. However, with dropped labels, the
  storage requirements are reduced because only the aggregated distribution
  values for a large group of time series are stored.Note that there are no
  guarantees on ordering of the labels from exemplar-to-exemplar and from
  distribution-to-distribution in the same stream, and there may be
  duplicates. It is up to clients to resolve any ambiguities.

  Messages:
    LabelValue: Map from label to its value, for all labels dropped in any
      aggregation.

  Fields:
    label: Map from label to its value, for all labels dropped in any
      aggregation.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class LabelValue(_messages.Message):
    r"""Map from label to its value, for all labels dropped in any
    aggregation.

    Messages:
      AdditionalProperty: An additional property for a LabelValue object.

    Fields:
      additionalProperties: Additional properties of type LabelValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a LabelValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  label = _messages.MessageField('LabelValue', 1)


class Empty(_messages.Message):
  r"""A generic empty message that you can re-use to avoid defining duplicated
  empty messages in your APIs. A typical example is to use it as the request
  or the response type of an API method. For instance: service Foo { rpc
  Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }
  """



class Error(_messages.Message):
  r"""Detailed information about an error category.

  Fields:
    pointCount: The number of points that couldn't be written because of
      status.
    status: The status of the requested write operation.
  """

  pointCount = _messages.IntegerField(1, variant=_messages.Variant.INT32)
  status = _messages.MessageField('Status', 2)


class Exemplar(_messages.Message):
  r"""Exemplars are example points that may be used to annotate aggregated
  distribution values. They are metadata that gives information about a
  particular value added to a Distribution bucket, such as a trace ID that was
  active when a value was added. They may contain further information, such as
  a example values and timestamps, origin, etc.

  Messages:
    AttachmentsValueListEntry: A AttachmentsValueListEntry object.

  Fields:
    attachments: Contextual information about the example value. Examples
      are:Trace: type.googleapis.com/google.monitoring.v3.SpanContextLiteral
      string: type.googleapis.com/google.protobuf.StringValueLabels dropped
      during aggregation:
      type.googleapis.com/google.monitoring.v3.DroppedLabelsThere may be only
      a single attachment of any given message type in a single exemplar, and
      this is enforced by the system.
    timestamp: The observation (sampling) time of the above value.
    value: Value of the exemplar point. This value determines to which bucket
      the exemplar belongs.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class AttachmentsValueListEntry(_messages.Message):
    r"""A AttachmentsValueListEntry object.

    Messages:
      AdditionalProperty: An additional property for a
        AttachmentsValueListEntry object.

    Fields:
      additionalProperties: Properties of the object. Contains field @type
        with type URL.
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a AttachmentsValueListEntry object.

      Fields:
        key: Name of the additional property.
        value: A extra_types.JsonValue attribute.
      """

      key = _messages.StringField(1)
      value = _messages.MessageField('extra_types.JsonValue', 2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  attachments = _messages.MessageField('AttachmentsValueListEntry', 1, repeated=True)
  timestamp = _messages.StringField(2)
  value = _messages.FloatField(3)


class Explicit(_messages.Message):
  r"""Specifies a set of buckets with arbitrary widths.There are size(bounds)
  + 1 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 <= i
  < N-1): boundsi Lower bound (1 <= i < N); boundsi - 1The bounds field must
  contain at least one element. If bounds has only one element, then there are
  no finite buckets, and that single element is the common boundary of the
  overflow and underflow buckets.

  Fields:
    bounds: The values must be monotonically increasing.
  """

  bounds = _messages.FloatField(1, repeated=True)


class Exponential(_messages.Message):
  r"""Specifies an exponential sequence of buckets that have a width that is
  proportional to the value of the lower bound. Each bucket represents a
  constant relative uncertainty on a specific value in the bucket.There are
  num_finite_buckets + 2 (= N) buckets. Bucket i has the following
  boundaries:Upper bound (0 <= i < N-1): scale * (growth_factor ^ i).Lower
  bound (1 <= i < N): scale * (growth_factor ^ (i - 1)).

  Fields:
    growthFactor: Must be greater than 1.
    numFiniteBuckets: Must be greater than 0.
    scale: Must be greater than 0.
  """

  growthFactor = _messages.FloatField(1)
  numFiniteBuckets = _messages.IntegerField(2, variant=_messages.Variant.INT32)
  scale = _messages.FloatField(3)


class Field(_messages.Message):
  r"""A single field of a message type.New usages of this message as an
  alternative to FieldDescriptorProto are strongly discouraged. This message
  does not reliability preserve all information necessary to model the schema
  and preserve semantics. Instead make use of FileDescriptorSet which
  preserves the necessary information.

  Enums:
    CardinalityValueValuesEnum: The field cardinality.
    KindValueValuesEnum: The field type.

  Fields:
    cardinality: The field cardinality.
    defaultValue: The string value of the default value of this field. Proto2
      syntax only.
    jsonName: The field JSON name.
    kind: The field type.
    name: The field name.
    number: The field number.
    oneofIndex: The index of the field type in Type.oneofs, for message or
      enumeration types. The first type has index 1; zero means the type is
      not in the list.
    options: The protocol buffer options.
    packed: Whether to use alternative packed wire representation.
    typeUrl: The field type URL, without the scheme, for message or
      enumeration types. Example:
      "type.googleapis.com/google.protobuf.Timestamp".
  """

  class CardinalityValueValuesEnum(_messages.Enum):
    r"""The field cardinality.

    Values:
      CARDINALITY_UNKNOWN: For fields with unknown cardinality.
      CARDINALITY_OPTIONAL: For optional fields.
      CARDINALITY_REQUIRED: For required fields. Proto2 syntax only.
      CARDINALITY_REPEATED: For repeated fields.
    """
    CARDINALITY_UNKNOWN = 0
    CARDINALITY_OPTIONAL = 1
    CARDINALITY_REQUIRED = 2
    CARDINALITY_REPEATED = 3

  class KindValueValuesEnum(_messages.Enum):
    r"""The field type.

    Values:
      TYPE_UNKNOWN: Field type unknown.
      TYPE_DOUBLE: Field type double.
      TYPE_FLOAT: Field type float.
      TYPE_INT64: Field type int64.
      TYPE_UINT64: Field type uint64.
      TYPE_INT32: Field type int32.
      TYPE_FIXED64: Field type fixed64.
      TYPE_FIXED32: Field type fixed32.
      TYPE_BOOL: Field type bool.
      TYPE_STRING: Field type string.
      TYPE_GROUP: Field type group. Proto2 syntax only, and deprecated.
      TYPE_MESSAGE: Field type message.
      TYPE_BYTES: Field type bytes.
      TYPE_UINT32: Field type uint32.
      TYPE_ENUM: Field type enum.
      TYPE_SFIXED32: Field type sfixed32.
      TYPE_SFIXED64: Field type sfixed64.
      TYPE_SINT32: Field type sint32.
      TYPE_SINT64: Field type sint64.
    """
    TYPE_UNKNOWN = 0
    TYPE_DOUBLE = 1
    TYPE_FLOAT = 2
    TYPE_INT64 = 3
    TYPE_UINT64 = 4
    TYPE_INT32 = 5
    TYPE_FIXED64 = 6
    TYPE_FIXED32 = 7
    TYPE_BOOL = 8
    TYPE_STRING = 9
    TYPE_GROUP = 10
    TYPE_MESSAGE = 11
    TYPE_BYTES = 12
    TYPE_UINT32 = 13
    TYPE_ENUM = 14
    TYPE_SFIXED32 = 15
    TYPE_SFIXED64 = 16
    TYPE_SINT32 = 17
    TYPE_SINT64 = 18

  cardinality = _messages.EnumField('CardinalityValueValuesEnum', 1)
  defaultValue = _messages.StringField(2)
  jsonName = _messages.StringField(3)
  kind = _messages.EnumField('KindValueValuesEnum', 4)
  name = _messages.StringField(5)
  number = _messages.IntegerField(6, variant=_messages.Variant.INT32)
  oneofIndex = _messages.IntegerField(7, variant=_messages.Variant.INT32)
  options = _messages.MessageField('Option', 8, repeated=True)
  packed = _messages.BooleanField(9)
  typeUrl = _messages.StringField(10)


class ForecastOptions(_messages.Message):
  r"""Options used when forecasting the time series and testing the predicted
  value against the threshold.

  Fields:
    forecastHorizon: Required. The length of time into the future to forecast
      whether a time series will violate the threshold. If the predicted value
      is found to violate the threshold, and the violation is observed in all
      forecasts made for the configured duration, then the time series is
      considered to be failing. The forecast horizon can range from 1 hour to
      60 hours.
  """

  forecastHorizon = _messages.StringField(1)


class GetNotificationChannelVerificationCodeRequest(_messages.Message):
  r"""The GetNotificationChannelVerificationCode request.

  Fields:
    expireTime: The desired expiration time. If specified, the API will
      guarantee that the returned code will not be valid after the specified
      timestamp; however, the API cannot guarantee that the returned code will
      be valid for at least as long as the requested time (the API puts an
      upper bound on the amount of time for which a code may be valid). If
      omitted, a default expiration will be used, which may be less than the
      max permissible expiration (so specifying an expiration may extend the
      code's lifetime over omitting an expiration, even though the API does
      impose an upper limit on the maximum expiration that is permitted).
  """

  expireTime = _messages.StringField(1)


class GetNotificationChannelVerificationCodeResponse(_messages.Message):
  r"""The GetNotificationChannelVerificationCode request.

  Fields:
    code: The verification code, which may be used to verify other channels
      that have an equivalent identity (i.e. other channels of the same type
      with the same fingerprint such as other email channels with the same
      email address or other sms channels with the same number).
    expireTime: The expiration time associated with the code that was
      returned. If an expiration was provided in the request, this is the
      minimum of the requested expiration in the request and the max permitted
      expiration.
  """

  code = _messages.StringField(1)
  expireTime = _messages.StringField(2)


class GkeNamespace(_messages.Message):
  r"""GKE Namespace. The field names correspond to the resource metadata
  labels on monitored resources that fall under a namespace (for example,
  k8s_container or k8s_pod).

  Fields:
    clusterName: The name of the parent cluster.
    location: The location of the parent cluster. This may be a zone or
      region.
    namespaceName: The name of this namespace.
    projectId: Output only. The project this resource lives in. For legacy
      services migrated from the Custom type, this may be a distinct project
      from the one parenting the service itself.
  """

  clusterName = _messages.StringField(1)
  location = _messages.StringField(2)
  namespaceName = _messages.StringField(3)
  projectId = _messages.StringField(4)


class GkeService(_messages.Message):
  r"""GKE Service. The "service" here represents a Kubernetes service object
  (https://kubernetes.io/docs/concepts/services-networking/service). The field
  names correspond to the resource labels on k8s_service monitored resources
  (https://cloud.google.com/monitoring/api/resources#tag_k8s_service).

  Fields:
    clusterName: The name of the parent cluster.
    location: The location of the parent cluster. This may be a zone or
      region.
    namespaceName: The name of the parent namespace.
    projectId: Output only. The project this resource lives in. For legacy
      services migrated from the Custom type, this may be a distinct project
      from the one parenting the service itself.
    serviceName: The name of this service.
  """

  clusterName = _messages.StringField(1)
  location = _messages.StringField(2)
  namespaceName = _messages.StringField(3)
  projectId = _messages.StringField(4)
  serviceName = _messages.StringField(5)


class GkeWorkload(_messages.Message):
  r"""A GKE Workload (Deployment, StatefulSet, etc). The field names
  correspond to the metadata labels on monitored resources that fall under a
  workload (for example, k8s_container or k8s_pod).

  Fields:
    clusterName: The name of the parent cluster.
    location: The location of the parent cluster. This may be a zone or
      region.
    namespaceName: The name of the parent namespace.
    projectId: Output only. The project this resource lives in. For legacy
      services migrated from the Custom type, this may be a distinct project
      from the one parenting the service itself.
    topLevelControllerName: The name of this workload.
    topLevelControllerType: The type of this workload (for example,
      "Deployment" or "DaemonSet")
  """

  clusterName = _messages.StringField(1)
  location = _messages.StringField(2)
  namespaceName = _messages.StringField(3)
  projectId = _messages.StringField(4)
  topLevelControllerName = _messages.StringField(5)
  topLevelControllerType = _messages.StringField(6)


class GoogleMonitoringV3Range(_messages.Message):
  r"""Range of numerical values within min and max.

  Fields:
    max: Range maximum.
    min: Range minimum.
  """

  max = _messages.FloatField(1)
  min = _messages.FloatField(2)


class Group(_messages.Message):
  r"""The description of a dynamic collection of monitored resources. Each
  group has a filter that is matched against monitored resources and their
  associated metadata. If a group's filter matches an available monitored
  resource, then that resource is a member of that group. Groups can contain
  any number of monitored resources, and each monitored resource can be a
  member of any number of groups.Groups can be nested in parent-child
  hierarchies. The parentName field identifies an optional parent for each
  group. If a group has a parent, then the only monitored resources available
  to be matched by the group's filter are the resources contained in the
  parent group. In other words, a group contains the monitored resources that
  match its filter and the filters of all the group's ancestors. A group
  without a parent can contain any monitored resource.For example, consider an
  infrastructure running a set of instances with two user-defined tags:
  "environment" and "role". A parent group has a filter,
  environment="production". A child of that parent group has a filter,
  role="transcoder". The parent group contains all instances in the production
  environment, regardless of their roles. The child group contains instances
  that have the transcoder role and are in the production environment.The
  monitored resources contained in a group can change at any moment, depending
  on what resources exist and what filters are associated with the group and
  its ancestors.

  Fields:
    displayName: A user-assigned name for this group, used only for display
      purposes.
    filter: The filter used to determine which monitored resources belong to
      this group.
    isCluster: If true, the members of this group are considered to be a
      cluster. The system can perform additional analysis on groups that are
      clusters.
    name: Output only. The name of this group. The format is:
      projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] When creating a group,
      this field is ignored and a new name is created consisting of the
      project specified in the call to CreateGroup and a unique [GROUP_ID]
      that is generated automatically.
    parentName: The name of the group's parent, if it has one. The format is:
      projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] For groups with no
      parent, parent_name is the empty string, "".
  """

  displayName = _messages.StringField(1)
  filter = _messages.StringField(2)
  isCluster = _messages.BooleanField(3)
  name = _messages.StringField(4)
  parentName = _messages.StringField(5)


class Hourly(_messages.Message):
  r"""Used to schedule the query to run every so many hours.

  Fields:
    minuteOffset: Optional. The number of minutes after the hour (in UTC) to
      run the query. Must be greater than or equal to 0 minutes and less than
      or equal to 59 minutes. If left unspecified, then an arbitrary offset is
      used.
    periodicity: Required. The number of hours between runs. Must be greater
      than or equal to 1 hour and less than or equal to 48 hours.
  """

  minuteOffset = _messages.IntegerField(1, variant=_messages.Variant.INT32)
  periodicity = _messages.IntegerField(2, variant=_messages.Variant.INT32)


class HttpCheck(_messages.Message):
  r"""Information involved in an HTTP/HTTPS Uptime check request.

  Enums:
    ContentTypeValueValuesEnum: The content type header to use for the check.
      The following configurations result in errors: 1. Content type is
      specified in both the headers field and the content_type field. 2.
      Request method is GET and content_type is not TYPE_UNSPECIFIED 3.
      Request method is POST and content_type is TYPE_UNSPECIFIED. 4. Request
      method is POST and a "Content-Type" header is provided via headers
      field. The content_type field should be used instead.
    RequestMethodValueValuesEnum: The HTTP request method to use for the
      check. If set to METHOD_UNSPECIFIED then request_method defaults to GET.

  Messages:
    HeadersValue: The list of headers to send as part of the Uptime check
      request. If two headers have the same key and different values, they
      should be entered as a single header, with the value being a comma-
      separated list of all the desired values as described at
      https://www.w3.org/Protocols/rfc2616/rfc2616.txt (page 31). Entering two
      separate headers with the same key in a Create call will cause the first
      to be overwritten by the second. The maximum number of headers allowed
      is 100.

  Fields:
    acceptedResponseStatusCodes: If present, the check will only pass if the
      HTTP response status code is in this set of status codes. If empty, the
      HTTP status code will only pass if the HTTP status code is 200-299.
    authInfo: The authentication information. Optional when creating an HTTP
      check; defaults to empty. Do not set both auth_method and auth_info.
    body: The request body associated with the HTTP POST request. If
      content_type is URL_ENCODED, the body passed in must be URL-encoded.
      Users can provide a Content-Length header via the headers field or the
      API will do so. If the request_method is GET and body is not empty, the
      API will return an error. The maximum byte size is 1 megabyte.Note: If
      client libraries aren't used (which performs the conversion
      automatically) base64 encode your body data since the field is of bytes
      type.
    contentType: The content type header to use for the check. The following
      configurations result in errors: 1. Content type is specified in both
      the headers field and the content_type field. 2. Request method is GET
      and content_type is not TYPE_UNSPECIFIED 3. Request method is POST and
      content_type is TYPE_UNSPECIFIED. 4. Request method is POST and a
      "Content-Type" header is provided via headers field. The content_type
      field should be used instead.
    customContentType: A user provided content type header to use for the
      check. The invalid configurations outlined in the content_type field
      apply to custom_content_type, as well as the following: 1. content_type
      is URL_ENCODED and custom_content_type is set. 2. content_type is
      USER_PROVIDED and custom_content_type is not set.
    headers: The list of headers to send as part of the Uptime check request.
      If two headers have the same key and different values, they should be
      entered as a single header, with the value being a comma-separated list
      of all the desired values as described at
      https://www.w3.org/Protocols/rfc2616/rfc2616.txt (page 31). Entering two
      separate headers with the same key in a Create call will cause the first
      to be overwritten by the second. The maximum number of headers allowed
      is 100.
    maskHeaders: Boolean specifying whether to encrypt the header information.
      Encryption should be specified for any headers related to authentication
      that you do not wish to be seen when retrieving the configuration. The
      server will be responsible for encrypting the headers. On Get/List
      calls, if mask_headers is set to true then the headers will be obscured
      with ******.
    path: Optional (defaults to "/"). The path to the page against which to
      run the check. Will be combined with the host (specified within the
      monitored_resource) and port to construct the full URL. If the provided
      path does not begin with "/", a "/" will be prepended automatically.
    pingConfig: Contains information needed to add pings to an HTTP check.
    port: Optional (defaults to 80 when use_ssl is false, and 443 when use_ssl
      is true). The TCP port on the HTTP server against which to run the
      check. Will be combined with host (specified within the
      monitored_resource) and path to construct the full URL.
    requestMethod: The HTTP request method to use for the check. If set to
      METHOD_UNSPECIFIED then request_method defaults to GET.
    serviceAgentAuthentication: If specified, Uptime will generate and attach
      an OIDC JWT token for the Monitoring service agent service account as an
      Authorization header in the HTTP request when probing.
    useSsl: If true, use HTTPS instead of HTTP to run the check.
    validateSsl: Boolean specifying whether to include SSL certificate
      validation as a part of the Uptime check. Only applies to checks where
      monitored_resource is set to uptime_url. If use_ssl is false, setting
      validate_ssl to true has no effect.
  """

  class ContentTypeValueValuesEnum(_messages.Enum):
    r"""The content type header to use for the check. The following
    configurations result in errors: 1. Content type is specified in both the
    headers field and the content_type field. 2. Request method is GET and
    content_type is not TYPE_UNSPECIFIED 3. Request method is POST and
    content_type is TYPE_UNSPECIFIED. 4. Request method is POST and a
    "Content-Type" header is provided via headers field. The content_type
    field should be used instead.

    Values:
      TYPE_UNSPECIFIED: No content type specified.
      URL_ENCODED: body is in URL-encoded form. Equivalent to setting the
        Content-Type to application/x-www-form-urlencoded in the HTTP request.
      USER_PROVIDED: body is in custom_content_type form. Equivalent to
        setting the Content-Type to the contents of custom_content_type in the
        HTTP request.
    """
    TYPE_UNSPECIFIED = 0
    URL_ENCODED = 1
    USER_PROVIDED = 2

  class RequestMethodValueValuesEnum(_messages.Enum):
    r"""The HTTP request method to use for the check. If set to
    METHOD_UNSPECIFIED then request_method defaults to GET.

    Values:
      METHOD_UNSPECIFIED: No request method specified.
      GET: GET request.
      POST: POST request.
    """
    METHOD_UNSPECIFIED = 0
    GET = 1
    POST = 2

  @encoding.MapUnrecognizedFields('additionalProperties')
  class HeadersValue(_messages.Message):
    r"""The list of headers to send as part of the Uptime check request. If
    two headers have the same key and different values, they should be entered
    as a single header, with the value being a comma-separated list of all the
    desired values as described at
    https://www.w3.org/Protocols/rfc2616/rfc2616.txt (page 31). Entering two
    separate headers with the same key in a Create call will cause the first
    to be overwritten by the second. The maximum number of headers allowed is
    100.

    Messages:
      AdditionalProperty: An additional property for a HeadersValue object.

    Fields:
      additionalProperties: Additional properties of type HeadersValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a HeadersValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  acceptedResponseStatusCodes = _messages.MessageField('ResponseStatusCode', 1, repeated=True)
  authInfo = _messages.MessageField('BasicAuthentication', 2)
  body = _messages.BytesField(3)
  contentType = _messages.EnumField('ContentTypeValueValuesEnum', 4)
  customContentType = _messages.StringField(5)
  headers = _messages.MessageField('HeadersValue', 6)
  maskHeaders = _messages.BooleanField(7)
  path = _messages.StringField(8)
  pingConfig = _messages.MessageField('PingConfig', 9)
  port = _messages.IntegerField(10, variant=_messages.Variant.INT32)
  requestMethod = _messages.EnumField('RequestMethodValueValuesEnum', 11)
  serviceAgentAuthentication = _messages.MessageField('ServiceAgentAuthentication', 12)
  useSsl = _messages.BooleanField(13)
  validateSsl = _messages.BooleanField(14)


class InternalChecker(_messages.Message):
  r"""An internal checker allows Uptime checks to run on private/internal GCP
  resources.

  Enums:
    StateValueValuesEnum: The current operational state of the internal
      checker.

  Fields:
    displayName: The checker's human-readable name. The display name should be
      unique within a Cloud Monitoring Metrics Scope in order to make it
      easier to identify; however, uniqueness is not enforced.
    gcpZone: The GCP zone the Uptime check should egress from. Only respected
      for internal Uptime checks, where internal_network is specified.
    name: A unique resource name for this InternalChecker. The format is:
      projects/[PROJECT_ID_OR_NUMBER]/internalCheckers/[INTERNAL_CHECKER_ID]
      [PROJECT_ID_OR_NUMBER] is the Cloud Monitoring Metrics Scope project for
      the Uptime check config associated with the internal checker.
    network: The GCP VPC network (https://cloud.google.com/vpc/docs/vpc) where
      the internal resource lives (ex: "default").
    peerProjectId: The GCP project ID where the internal checker lives. Not
      necessary the same as the Metrics Scope project.
    state: The current operational state of the internal checker.
  """

  class StateValueValuesEnum(_messages.Enum):
    r"""The current operational state of the internal checker.

    Values:
      UNSPECIFIED: An internal checker should never be in the unspecified
        state.
      CREATING: The checker is being created, provisioned, and configured. A
        checker in this state can be returned by ListInternalCheckers or
        GetInternalChecker, as well as by examining the long running Operation
        (https://cloud.google.com/apis/design/design_patterns#long_running_ope
        rations) that created it.
      RUNNING: The checker is running and available for use. A checker in this
        state can be returned by ListInternalCheckers or GetInternalChecker as
        well as by examining the long running Operation (https://cloud.google.
        com/apis/design/design_patterns#long_running_operations) that created
        it. If a checker is being torn down, it is neither visible nor usable,
        so there is no "deleting" or "down" state.
    """
    UNSPECIFIED = 0
    CREATING = 1
    RUNNING = 2

  displayName = _messages.StringField(1)
  gcpZone = _messages.StringField(2)
  name = _messages.StringField(3)
  network = _messages.StringField(4)
  peerProjectId = _messages.StringField(5)
  state = _messages.EnumField('StateValueValuesEnum', 6)


class IstioCanonicalService(_messages.Message):
  r"""Canonical service scoped to an Istio mesh. Anthos clusters running ASM
  >= 1.6.8 will have their services ingested as this type.

  Fields:
    canonicalService: The name of the canonical service underlying this
      service. Corresponds to the destination_canonical_service_name metric
      label in label in Istio metrics
      (https://cloud.google.com/monitoring/api/metrics_istio).
    canonicalServiceNamespace: The namespace of the canonical service
      underlying this service. Corresponds to the
      destination_canonical_service_namespace metric label in Istio metrics
      (https://cloud.google.com/monitoring/api/metrics_istio).
    meshUid: Identifier for the Istio mesh in which this canonical service is
      defined. Corresponds to the mesh_uid metric label in Istio metrics
      (https://cloud.google.com/monitoring/api/metrics_istio).
  """

  canonicalService = _messages.StringField(1)
  canonicalServiceNamespace = _messages.StringField(2)
  meshUid = _messages.StringField(3)


class JsonPathMatcher(_messages.Message):
  r"""Information needed to perform a JSONPath content match. Used for
  ContentMatcherOption::MATCHES_JSON_PATH and
  ContentMatcherOption::NOT_MATCHES_JSON_PATH.

  Enums:
    JsonMatcherValueValuesEnum: The type of JSONPath match that will be
      applied to the JSON output (ContentMatcher.content)

  Fields:
    jsonMatcher: The type of JSONPath match that will be applied to the JSON
      output (ContentMatcher.content)
    jsonPath: JSONPath within the response output pointing to the expected
      ContentMatcher::content to match against.
  """

  class JsonMatcherValueValuesEnum(_messages.Enum):
    r"""The type of JSONPath match that will be applied to the JSON output
    (ContentMatcher.content)

    Values:
      JSON_PATH_MATCHER_OPTION_UNSPECIFIED: No JSONPath matcher type specified
        (not valid).
      EXACT_MATCH: Selects 'exact string' matching. The match succeeds if the
        content at the json_path within the output is exactly the same as the
        content string.
      REGEX_MATCH: Selects regular-expression matching. The match succeeds if
        the content at the json_path within the output matches the regular
        expression specified in the content string.
    """
    JSON_PATH_MATCHER_OPTION_UNSPECIFIED = 0
    EXACT_MATCH = 1
    REGEX_MATCH = 2

  jsonMatcher = _messages.EnumField('JsonMatcherValueValuesEnum', 1)
  jsonPath = _messages.StringField(2)


class LabelDescriptor(_messages.Message):
  r"""A description of a label.

  Enums:
    ValueTypeValueValuesEnum: The type of data that can be assigned to the
      label.

  Fields:
    description: A human-readable description for the label.
    key: The key for this label. The key must meet the following criteria:
      Does not exceed 100 characters. Matches the following regular
      expression: [a-zA-Z][a-zA-Z0-9_]* The first character must be an upper-
      or lower-case letter. The remaining characters must be letters, digits,
      or underscores.
    valueType: The type of data that can be assigned to the label.
  """

  class ValueTypeValueValuesEnum(_messages.Enum):
    r"""The type of data that can be assigned to the label.

    Values:
      STRING: A variable-length string, not to exceed 1,024 characters. This
        is the default value type.
      BOOL: Boolean; true or false.
      INT64: A 64-bit signed integer.
    """
    STRING = 0
    BOOL = 1
    INT64 = 2

  description = _messages.StringField(1)
  key = _messages.StringField(2)
  valueType = _messages.EnumField('ValueTypeValueValuesEnum', 3)


class LabelValue(_messages.Message):
  r"""A label value.

  Fields:
    boolValue: A bool label value.
    int64Value: An int64 label value.
    stringValue: A string label value.
  """

  boolValue = _messages.BooleanField(1)
  int64Value = _messages.IntegerField(2)
  stringValue = _messages.StringField(3)


class LatencyCriteria(_messages.Message):
  r"""Parameters for a latency threshold SLI.

  Fields:
    threshold: Good service is defined to be the count of requests made to
      this service that return in no more than threshold.
  """

  threshold = _messages.StringField(1)


class Linear(_messages.Message):
  r"""Specifies a linear sequence of buckets that all have the same width
  (except overflow and underflow). Each bucket represents a constant absolute
  uncertainty on the specific value in the bucket.There are num_finite_buckets
  + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 <= i
  < N-1): offset + (width * i).Lower bound (1 <= i < N): offset + (width * (i
  - 1)).

  Fields:
    numFiniteBuckets: Must be greater than 0.
    offset: Lower bound of the first bucket.
    width: Must be greater than 0.
  """

  numFiniteBuckets = _messages.IntegerField(1, variant=_messages.Variant.INT32)
  offset = _messages.FloatField(2)
  width = _messages.FloatField(3)


class Link(_messages.Message):
  r"""Links to content such as playbooks, repositories, and other resources.

  Fields:
    displayName: A short display name for the link. The display name must not
      be empty or exceed 63 characters. Example: "playbook".
    url: The url of a webpage. A url can be templatized by using variables in
      the path or the query parameters. The total length of a URL should not
      exceed 2083 characters before and after variable expansion. Example:
      "https://my_domain.com/playbook?name=${resource.name}"
  """

  displayName = _messages.StringField(1)
  url = _messages.StringField(2)


class ListAlertPoliciesResponse(_messages.Message):
  r"""The protocol for the ListAlertPolicies response.

  Fields:
    alertPolicies: The returned alert policies.
    nextPageToken: If there might be more results than were returned, then
      this field is set to a non-empty value. To see the additional results,
      use that value as page_token in the next call to this method.
    totalSize: The total number of alert policies in all pages. This number is
      only an estimate, and may change in subsequent pages.
      https://aip.dev/158
  """

  alertPolicies = _messages.MessageField('AlertPolicy', 1, repeated=True)
  nextPageToken = _messages.StringField(2)
  totalSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)


class ListAlertsResponse(_messages.Message):
  r"""The ListAlerts response.

  Fields:
    alerts: The list of alerts.
    nextPageToken: If not empty, indicates that there may be more results that
      match the request. Use the value in the page_token field in a subsequent
      request to fetch the next set of results. The token is encrypted and
      only guaranteed to return correct results for 72 hours after it is
      created. If empty, all results have been returned.
    totalSize: The estimated total number of matching results for this query.
  """

  alerts = _messages.MessageField('Alert', 1, repeated=True)
  nextPageToken = _messages.StringField(2)
  totalSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)


class ListGroupMembersResponse(_messages.Message):
  r"""The ListGroupMembers response.

  Fields:
    members: A set of monitored resources in the group.
    nextPageToken: If there are more results than have been returned, then
      this field is set to a non-empty value. To see the additional results,
      use that value as page_token in the next call to this method.
    totalSize: The total number of elements matching this request.
  """

  members = _messages.MessageField('MonitoredResource', 1, repeated=True)
  nextPageToken = _messages.StringField(2)
  totalSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)


class ListGroupsResponse(_messages.Message):
  r"""The ListGroups response.

  Fields:
    group: The groups that match the specified filters.
    nextPageToken: If there are more results than have been returned, then
      this field is set to a non-empty value. To see the additional results,
      use that value as page_token in the next call to this method.
  """

  group = _messages.MessageField('Group', 1, repeated=True)
  nextPageToken = _messages.StringField(2)


class ListMetricDescriptorsResponse(_messages.Message):
  r"""The ListMetricDescriptors response.

  Fields:
    metricDescriptors: The metric descriptors that are available to the
      project and that match the value of filter, if present.
    nextPageToken: If there are more results than have been returned, then
      this field is set to a non-empty value. To see the additional results,
      use that value as page_token in the next call to this method.
  """

  metricDescriptors = _messages.MessageField('MetricDescriptor', 1, repeated=True)
  nextPageToken = _messages.StringField(2)


class ListMonitoredResourceDescriptorsResponse(_messages.Message):
  r"""The ListMonitoredResourceDescriptors response.

  Fields:
    nextPageToken: If there are more results than have been returned, then
      this field is set to a non-empty value. To see the additional results,
      use that value as page_token in the next call to this method.
    resourceDescriptors: The monitored resource descriptors that are available
      to this project and that match filter, if present.
  """

  nextPageToken = _messages.StringField(1)
  resourceDescriptors = _messages.MessageField('MonitoredResourceDescriptor', 2, repeated=True)


class ListNotificationChannelDescriptorsResponse(_messages.Message):
  r"""The ListNotificationChannelDescriptors response.

  Fields:
    channelDescriptors: The monitored resource descriptors supported for the
      specified project, optionally filtered.
    nextPageToken: If not empty, indicates that there may be more results that
      match the request. Use the value in the page_token field in a subsequent
      request to fetch the next set of results. If empty, all results have
      been returned.
  """

  channelDescriptors = _messages.MessageField('NotificationChannelDescriptor', 1, repeated=True)
  nextPageToken = _messages.StringField(2)


class ListNotificationChannelsResponse(_messages.Message):
  r"""The ListNotificationChannels response.

  Fields:
    nextPageToken: If not empty, indicates that there may be more results that
      match the request. Use the value in the page_token field in a subsequent
      request to fetch the next set of results. If empty, all results have
      been returned.
    notificationChannels: The notification channels defined for the specified
      project.
    totalSize: The total number of notification channels in all pages. This
      number is only an estimate, and may change in subsequent pages.
      https://aip.dev/158
  """

  nextPageToken = _messages.StringField(1)
  notificationChannels = _messages.MessageField('NotificationChannel', 2, repeated=True)
  totalSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)


class ListServiceLevelObjectivesResponse(_messages.Message):
  r"""The ListServiceLevelObjectives response.

  Fields:
    nextPageToken: If there are more results than have been returned, then
      this field is set to a non-empty value. To see the additional results,
      use that value as page_token in the next call to this method.
    serviceLevelObjectives: The ServiceLevelObjectives matching the specified
      filter.
  """

  nextPageToken = _messages.StringField(1)
  serviceLevelObjectives = _messages.MessageField('ServiceLevelObjective', 2, repeated=True)


class ListServicesResponse(_messages.Message):
  r"""The ListServices response.

  Fields:
    nextPageToken: If there are more results than have been returned, then
      this field is set to a non-empty value. To see the additional results,
      use that value as page_token in the next call to this method.
    services: The Services matching the specified filter.
  """

  nextPageToken = _messages.StringField(1)
  services = _messages.MessageField('Service', 2, repeated=True)


class ListSnoozesResponse(_messages.Message):
  r"""The results of a successful ListSnoozes call, containing the matching
  Snoozes.

  Fields:
    nextPageToken: Page token for repeated calls to ListSnoozes, to fetch
      additional pages of results. If this is empty or missing, there are no
      more pages.
    snoozes: Snoozes matching this list call.
  """

  nextPageToken = _messages.StringField(1)
  snoozes = _messages.MessageField('Snooze', 2, repeated=True)


class ListTimeSeriesResponse(_messages.Message):
  r"""The ListTimeSeries response.

  Fields:
    executionErrors: Query execution errors that may have caused the time
      series data returned to be incomplete.
    nextPageToken: If there are more results than have been returned, then
      this field is set to a non-empty value. To see the additional results,
      use that value as page_token in the next call to this method.
    timeSeries: One or more time series that match the filter included in the
      request.
    unit: The unit in which all time_series point values are reported. unit
      follows the UCUM format for units as seen in
      https://unitsofmeasure.org/ucum.html. If different time_series have
      different units (for example, because they come from different metric
      types, or a unit is absent), then unit will be "{not_a_unit}".
    unreachable: Cloud regions that were unreachable which may have caused
      incomplete data to be returned.
  """

  executionErrors = _messages.MessageField('Status', 1, repeated=True)
  nextPageToken = _messages.StringField(2)
  timeSeries = _messages.MessageField('TimeSeries', 3, repeated=True)
  unit = _messages.StringField(4)
  unreachable = _messages.StringField(5, repeated=True)


class ListUptimeCheckConfigsResponse(_messages.Message):
  r"""The protocol for the ListUptimeCheckConfigs response.

  Fields:
    nextPageToken: This field represents the pagination token to retrieve the
      next page of results. If the value is empty, it means no further results
      for the request. To retrieve the next page of results, the value of the
      next_page_token is passed to the subsequent List method call (in the
      request message's page_token field).
    totalSize: The total number of Uptime check configurations for the
      project, irrespective of any pagination.
    uptimeCheckConfigs: The returned Uptime check configurations.
  """

  nextPageToken = _messages.StringField(1)
  totalSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
  uptimeCheckConfigs = _messages.MessageField('UptimeCheckConfig', 3, repeated=True)


class ListUptimeCheckIpsResponse(_messages.Message):
  r"""The protocol for the ListUptimeCheckIps response.

  Fields:
    nextPageToken: This field represents the pagination token to retrieve the
      next page of results. If the value is empty, it means no further results
      for the request. To retrieve the next page of results, the value of the
      next_page_token is passed to the subsequent List method call (in the
      request message's page_token field). NOTE: this field is not yet
      implemented
    uptimeCheckIps: The returned list of IP addresses (including region and
      location) that the checkers run from.
  """

  nextPageToken = _messages.StringField(1)
  uptimeCheckIps = _messages.MessageField('UptimeCheckIp', 2, repeated=True)


class LogMatch(_messages.Message):
  r"""A condition type that checks whether a log message in the scoping
  project (https://cloud.google.com/monitoring/api/v3#project_name) satisfies
  the given filter. Logs from other projects in the metrics scope are not
  evaluated.

  Messages:
    LabelExtractorsValue: Optional. A map from a label key to an extractor
      expression, which is used to extract the value for this label key. Each
      entry in this map is a specification for how data should be extracted
      from log entries that match filter. Each combination of extracted values
      is treated as a separate rule for the purposes of triggering
      notifications. Label keys and corresponding values can be used in
      notifications generated by this condition.Please see the documentation
      on logs-based metric valueExtractors (https://cloud.google.com/logging/d
      ocs/reference/v2/rest/v2/projects.metrics#LogMetric.FIELDS.value_extract
      or) for syntax and examples.

  Fields:
    filter: Required. A logs-based filter. See Advanced Logs Queries
      (https://cloud.google.com/logging/docs/view/advanced-queries) for how
      this filter should be constructed.
    labelExtractors: Optional. A map from a label key to an extractor
      expression, which is used to extract the value for this label key. Each
      entry in this map is a specification for how data should be extracted
      from log entries that match filter. Each combination of extracted values
      is treated as a separate rule for the purposes of triggering
      notifications. Label keys and corresponding values can be used in
      notifications generated by this condition.Please see the documentation
      on logs-based metric valueExtractors (https://cloud.google.com/logging/d
      ocs/reference/v2/rest/v2/projects.metrics#LogMetric.FIELDS.value_extract
      or) for syntax and examples.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class LabelExtractorsValue(_messages.Message):
    r"""Optional. A map from a label key to an extractor expression, which is
    used to extract the value for this label key. Each entry in this map is a
    specification for how data should be extracted from log entries that match
    filter. Each combination of extracted values is treated as a separate rule
    for the purposes of triggering notifications. Label keys and corresponding
    values can be used in notifications generated by this condition.Please see
    the documentation on logs-based metric valueExtractors (https://cloud.goog
    le.com/logging/docs/reference/v2/rest/v2/projects.metrics#LogMetric.FIELDS
    .value_extractor) for syntax and examples.

    Messages:
      AdditionalProperty: An additional property for a LabelExtractorsValue
        object.

    Fields:
      additionalProperties: Additional properties of type LabelExtractorsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a LabelExtractorsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  filter = _messages.StringField(1)
  labelExtractors = _messages.MessageField('LabelExtractorsValue', 2)


class LogMetadata(_messages.Message):
  r"""Information about the log for log-based alerts.

  Messages:
    ExtractedLabelsValue: The labels extracted from the log.

  Fields:
    extractedLabels: The labels extracted from the log.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class ExtractedLabelsValue(_messages.Message):
    r"""The labels extracted from the log.

    Messages:
      AdditionalProperty: An additional property for a ExtractedLabelsValue
        object.

    Fields:
      additionalProperties: Additional properties of type ExtractedLabelsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a ExtractedLabelsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  extractedLabels = _messages.MessageField('ExtractedLabelsValue', 1)


class MeshIstio(_messages.Message):
  r"""Istio service scoped to an Istio mesh. Anthos clusters running ASM <
  1.6.8 will have their services ingested as this type.

  Fields:
    meshUid: Identifier for the mesh in which this Istio service is defined.
      Corresponds to the mesh_uid metric label in Istio metrics.
    serviceName: The name of the Istio service underlying this service.
      Corresponds to the destination_service_name metric label in Istio
      metrics.
    serviceNamespace: The namespace of the Istio service underlying this
      service. Corresponds to the destination_service_namespace metric label
      in Istio metrics.
  """

  meshUid = _messages.StringField(1)
  serviceName = _messages.StringField(2)
  serviceNamespace = _messages.StringField(3)


class Metric(_messages.Message):
  r"""A specific metric, identified by specifying values for all of the labels
  of a MetricDescriptor.

  Messages:
    LabelsValue: The set of label values that uniquely identify this metric.
      All labels listed in the MetricDescriptor must be assigned values.

  Fields:
    labels: The set of label values that uniquely identify this metric. All
      labels listed in the MetricDescriptor must be assigned values.
    type: An existing metric type, see google.api.MetricDescriptor. For
      example, custom.googleapis.com/invoice/paid/amount.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class LabelsValue(_messages.Message):
    r"""The set of label values that uniquely identify this metric. All labels
    listed in the MetricDescriptor must be assigned values.

    Messages:
      AdditionalProperty: An additional property for a LabelsValue object.

    Fields:
      additionalProperties: Additional properties of type LabelsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a LabelsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  labels = _messages.MessageField('LabelsValue', 1)
  type = _messages.StringField(2)


class MetricAbsence(_messages.Message):
  r"""A condition type that checks that monitored resources are reporting
  data. The configuration defines a metric and a set of monitored resources.
  The predicate is considered in violation when a time series for the
  specified metric of a monitored resource does not include any data in the
  specified duration.

  Fields:
    aggregations: Specifies the alignment of data points in individual time
      series as well as how to combine the retrieved time series together
      (such as when aggregating multiple streams on each resource to a single
      stream for each resource or when aggregating streams across all members
      of a group of resources). Multiple aggregations are applied in the order
      specified.This field is similar to the one in the ListTimeSeries request
      (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSer
      ies/list). It is advisable to use the ListTimeSeries method when
      debugging this field.
    duration: Required. The amount of time that a time series must fail to
      report new data to be considered failing. The minimum value of this
      field is 120 seconds. Larger values that are a multiple of a minute--for
      example, 240 or 300 seconds--are supported. If an invalid value is
      given, an error will be returned.
    filter: Required. A filter
      (https://cloud.google.com/monitoring/api/v3/filters) that identifies
      which time series should be compared with the threshold.The filter is
      similar to the one that is specified in the ListTimeSeries request (http
      s://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/l
      ist) (that call is useful to verify the time series that will be
      retrieved / processed). The filter must specify the metric type and the
      resource type. Optionally, it can specify resource labels and metric
      labels. This field must not exceed 2048 Unicode characters in length.
    trigger: The number/percent of time series for which the comparison must
      hold in order for the condition to trigger. If unspecified, then the
      condition will trigger if the comparison is true for any of the time
      series that have been identified by filter and aggregations.
  """

  aggregations = _messages.MessageField('Aggregation', 1, repeated=True)
  duration = _messages.StringField(2)
  filter = _messages.StringField(3)
  trigger = _messages.MessageField('Trigger', 4)


class MetricDescriptor(_messages.Message):
  r"""Defines a metric type and its schema. Once a metric descriptor is
  created, deleting or altering it stops data collection and makes the metric
  type's existing data unusable.

  Enums:
    LaunchStageValueValuesEnum: Optional. The launch stage of the metric
      definition.
    MetricKindValueValuesEnum: Whether the metric records instantaneous
      values, changes to a value, etc. Some combinations of metric_kind and
      value_type might not be supported.
    ValueTypeValueValuesEnum: Whether the measurement is an integer, a
      floating-point number, etc. Some combinations of metric_kind and
      value_type might not be supported.

  Fields:
    description: A detailed description of the metric, which can be used in
      documentation.
    displayName: A concise name for the metric, which can be displayed in user
      interfaces. Use sentence case without an ending period, for example
      "Request count". This field is optional but it is recommended to be set
      for any metrics associated with user-visible concepts, such as Quota.
    labels: The set of labels that can be used to describe a specific instance
      of this metric type. For example, the
      appengine.googleapis.com/http/server/response_latencies metric type has
      a label for the HTTP response code, response_code, so you can look at
      latencies for successful responses or just for responses that failed.
    launchStage: Optional. The launch stage of the metric definition.
    metadata: Optional. Metadata which can be used to guide usage of the
      metric.
    metricKind: Whether the metric records instantaneous values, changes to a
      value, etc. Some combinations of metric_kind and value_type might not be
      supported.
    monitoredResourceTypes: Read-only. If present, then a time series, which
      is identified partially by a metric type and a
      MonitoredResourceDescriptor, that is associated with this metric type
      can only be associated with one of the monitored resource types listed
      here.
    name: The resource name of the metric descriptor.
    type: The metric type, including its DNS name prefix. The type is not URL-
      encoded. All user-defined metric types have the DNS name
      custom.googleapis.com or external.googleapis.com. Metric types should
      use a natural hierarchical grouping. For example:
      "custom.googleapis.com/invoice/paid/amount"
      "external.googleapis.com/prometheus/up"
      "appengine.googleapis.com/http/server/response_latencies"
    unit: The units in which the metric value is reported. It is only
      applicable if the value_type is INT64, DOUBLE, or DISTRIBUTION. The unit
      defines the representation of the stored metric values.Different systems
      might scale the values to be more easily displayed (so a value of
      0.02kBy might be displayed as 20By, and a value of 3523kBy might be
      displayed as 3.5MBy). However, if the unit is kBy, then the value of the
      metric is always in thousands of bytes, no matter how it might be
      displayed.If you want a custom metric to record the exact number of CPU-
      seconds used by a job, you can create an INT64 CUMULATIVE metric whose
      unit is s{CPU} (or equivalently 1s{CPU} or just s). If the job uses
      12,005 CPU-seconds, then the value is written as 12005.Alternatively, if
      you want a custom metric to record data in a more granular way, you can
      create a DOUBLE CUMULATIVE metric whose unit is ks{CPU}, and then write
      the value 12.005 (which is 12005/1000), or use Kis{CPU} and write 11.723
      (which is 12005/1024).The supported units are a subset of The Unified
      Code for Units of Measure (https://unitsofmeasure.org/ucum.html)
      standard:Basic units (UNIT) bit bit By byte s second min minute h hour d
      day 1 dimensionlessPrefixes (PREFIX) k kilo (10^3) M mega (10^6) G giga
      (10^9) T tera (10^12) P peta (10^15) E exa (10^18) Z zetta (10^21) Y
      yotta (10^24) m milli (10^-3) u micro (10^-6) n nano (10^-9) p pico
      (10^-12) f femto (10^-15) a atto (10^-18) z zepto (10^-21) y yocto
      (10^-24) Ki kibi (2^10) Mi mebi (2^20) Gi gibi (2^30) Ti tebi (2^40) Pi
      pebi (2^50)GrammarThe grammar also includes these connectors: / division
      or ratio (as an infix operator). For examples, kBy/{email} or MiBy/10ms
      (although you should almost never have /s in a metric unit; rates should
      always be computed at query time from the underlying cumulative or delta
      value). . multiplication or composition (as an infix operator). For
      examples, GBy.d or k{watt}.h.The grammar for a unit is as follows:
      Expression = Component { "." Component } { "/" Component } ; Component =
      ( [ PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; Annotation
      = "{" NAME "}" ; Notes: Annotation is just a comment if it follows a
      UNIT. If the annotation is used alone, then the unit is equivalent to 1.
      For examples, {request}/s == 1/s, By{transmitted}/s == By/s. NAME is a
      sequence of non-blank printable ASCII characters not containing { or }.
      1 represents a unitary dimensionless unit
      (https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such as in
      1/s. It is typically used when none of the basic units are appropriate.
      For example, "new users per day" can be represented as 1/d or {new-
      users}/d (and a metric value 5 would mean "5 new users). Alternatively,
      "thousands of page views per day" would be represented as 1000/d or k1/d
      or k{page_views}/d (and a metric value of 5.3 would mean "5300 page
      views per day"). % represents dimensionless value of 1/100, and
      annotates values giving a percentage (so the metric values are typically
      in the range of 0..100, and a metric value 3 means "3 percent"). 10^2.%
      indicates a metric contains a ratio, typically in the range 0..1, that
      will be multiplied by 100 and displayed as a percentage (so a metric
      value 0.03 means "3 percent").
    valueType: Whether the measurement is an integer, a floating-point number,
      etc. Some combinations of metric_kind and value_type might not be
      supported.
  """

  class LaunchStageValueValuesEnum(_messages.Enum):
    r"""Optional. The launch stage of the metric definition.

    Values:
      LAUNCH_STAGE_UNSPECIFIED: Do not use this default value.
      UNIMPLEMENTED: The feature is not yet implemented. Users can not use it.
      PRELAUNCH: Prelaunch features are hidden from users and are only visible
        internally.
      EARLY_ACCESS: Early Access features are limited to a closed group of
        testers. To use these features, you must sign up in advance and sign a
        Trusted Tester agreement (which includes confidentiality provisions).
        These features may be unstable, changed in backward-incompatible ways,
        and are not guaranteed to be released.
      ALPHA: Alpha is a limited availability test for releases before they are
        cleared for widespread use. By Alpha, all significant design issues
        are resolved and we are in the process of verifying functionality.
        Alpha customers need to apply for access, agree to applicable terms,
        and have their projects allowlisted. Alpha releases don't have to be
        feature complete, no SLAs are provided, and there are no technical
        support obligations, but they will be far enough along that customers
        can actually use them in test environments or for limited-use tests --
        just like they would in normal production cases.
      BETA: Beta is the point at which we are ready to open a release for any
        customer to use. There are no SLA or technical support obligations in
        a Beta release. Products will be complete from a feature perspective,
        but may have some open outstanding issues. Beta releases are suitable
        for limited production use cases.
      GA: GA features are open to all developers and are considered stable and
        fully qualified for production use.
      DEPRECATED: Deprecated features are scheduled to be shut down and
        removed. For more information, see the "Deprecation Policy" section of
        our Terms of Service (https://cloud.google.com/terms/) and the Google
        Cloud Platform Subject to the Deprecation Policy
        (https://cloud.google.com/terms/deprecation) documentation.
    """
    LAUNCH_STAGE_UNSPECIFIED = 0
    UNIMPLEMENTED = 1
    PRELAUNCH = 2
    EARLY_ACCESS = 3
    ALPHA = 4
    BETA = 5
    GA = 6
    DEPRECATED = 7

  class MetricKindValueValuesEnum(_messages.Enum):
    r"""Whether the metric records instantaneous values, changes to a value,
    etc. Some combinations of metric_kind and value_type might not be
    supported.

    Values:
      METRIC_KIND_UNSPECIFIED: Do not use this default value.
      GAUGE: An instantaneous measurement of a value.
      DELTA: The change in a value during a time interval.
      CUMULATIVE: A value accumulated over a time interval. Cumulative
        measurements in a time series should have the same start time and
        increasing end times, until an event resets the cumulative value to
        zero and sets a new start time for the following points.
    """
    METRIC_KIND_UNSPECIFIED = 0
    GAUGE = 1
    DELTA = 2
    CUMULATIVE = 3

  class ValueTypeValueValuesEnum(_messages.Enum):
    r"""Whether the measurement is an integer, a floating-point number, etc.
    Some combinations of metric_kind and value_type might not be supported.

    Values:
      VALUE_TYPE_UNSPECIFIED: Do not use this default value.
      BOOL: The value is a boolean. This value type can be used only if the
        metric kind is GAUGE.
      INT64: The value is a signed 64-bit integer.
      DOUBLE: The value is a double precision floating point number.
      STRING: The value is a text string. This value type can be used only if
        the metric kind is GAUGE.
      DISTRIBUTION: The value is a Distribution.
      MONEY: The value is money.
    """
    VALUE_TYPE_UNSPECIFIED = 0
    BOOL = 1
    INT64 = 2
    DOUBLE = 3
    STRING = 4
    DISTRIBUTION = 5
    MONEY = 6

  description = _messages.StringField(1)
  displayName = _messages.StringField(2)
  labels = _messages.MessageField('LabelDescriptor', 3, repeated=True)
  launchStage = _messages.EnumField('LaunchStageValueValuesEnum', 4)
  metadata = _messages.MessageField('MetricDescriptorMetadata', 5)
  metricKind = _messages.EnumField('MetricKindValueValuesEnum', 6)
  monitoredResourceTypes = _messages.StringField(7, repeated=True)
  name = _messages.StringField(8)
  type = _messages.StringField(9)
  unit = _messages.StringField(10)
  valueType = _messages.EnumField('ValueTypeValueValuesEnum', 11)


class MetricDescriptorMetadata(_messages.Message):
  r"""Additional annotations that can be used to guide the usage of a metric.

  Enums:
    LaunchStageValueValuesEnum: Deprecated. Must use the
      MetricDescriptor.launch_stage instead.
    TimeSeriesResourceHierarchyLevelValueListEntryValuesEnum:

  Fields:
    ingestDelay: The delay of data points caused by ingestion. Data points
      older than this age are guaranteed to be ingested and available to be
      read, excluding data loss due to errors.
    launchStage: Deprecated. Must use the MetricDescriptor.launch_stage
      instead.
    samplePeriod: The sampling period of metric data points. For metrics which
      are written periodically, consecutive data points are stored at this
      time interval, excluding data loss due to errors. Metrics with a higher
      granularity have a smaller sampling period.
    timeSeriesResourceHierarchyLevel: The scope of the timeseries data of the
      metric.
  """

  class LaunchStageValueValuesEnum(_messages.Enum):
    r"""Deprecated. Must use the MetricDescriptor.launch_stage instead.

    Values:
      LAUNCH_STAGE_UNSPECIFIED: Do not use this default value.
      UNIMPLEMENTED: The feature is not yet implemented. Users can not use it.
      PRELAUNCH: Prelaunch features are hidden from users and are only visible
        internally.
      EARLY_ACCESS: Early Access features are limited to a closed group of
        testers. To use these features, you must sign up in advance and sign a
        Trusted Tester agreement (which includes confidentiality provisions).
        These features may be unstable, changed in backward-incompatible ways,
        and are not guaranteed to be released.
      ALPHA: Alpha is a limited availability test for releases before they are
        cleared for widespread use. By Alpha, all significant design issues
        are resolved and we are in the process of verifying functionality.
        Alpha customers need to apply for access, agree to applicable terms,
        and have their projects allowlisted. Alpha releases don't have to be
        feature complete, no SLAs are provided, and there are no technical
        support obligations, but they will be far enough along that customers
        can actually use them in test environments or for limited-use tests --
        just like they would in normal production cases.
      BETA: Beta is the point at which we are ready to open a release for any
        customer to use. There are no SLA or technical support obligations in
        a Beta release. Products will be complete from a feature perspective,
        but may have some open outstanding issues. Beta releases are suitable
        for limited production use cases.
      GA: GA features are open to all developers and are considered stable and
        fully qualified for production use.
      DEPRECATED: Deprecated features are scheduled to be shut down and
        removed. For more information, see the "Deprecation Policy" section of
        our Terms of Service (https://cloud.google.com/terms/) and the Google
        Cloud Platform Subject to the Deprecation Policy
        (https://cloud.google.com/terms/deprecation) documentation.
    """
    LAUNCH_STAGE_UNSPECIFIED = 0
    UNIMPLEMENTED = 1
    PRELAUNCH = 2
    EARLY_ACCESS = 3
    ALPHA = 4
    BETA = 5
    GA = 6
    DEPRECATED = 7

  class TimeSeriesResourceHierarchyLevelValueListEntryValuesEnum(_messages.Enum):
    r"""TimeSeriesResourceHierarchyLevelValueListEntryValuesEnum enum type.

    Values:
      TIME_SERIES_RESOURCE_HIERARCHY_LEVEL_UNSPECIFIED: Do not use this
        default value.
      PROJECT: Scopes a metric to a project.
      ORGANIZATION: Scopes a metric to an organization.
      FOLDER: Scopes a metric to a folder.
    """
    TIME_SERIES_RESOURCE_HIERARCHY_LEVEL_UNSPECIFIED = 0
    PROJECT = 1
    ORGANIZATION = 2
    FOLDER = 3

  ingestDelay = _messages.StringField(1)
  launchStage = _messages.EnumField('LaunchStageValueValuesEnum', 2)
  samplePeriod = _messages.StringField(3)
  timeSeriesResourceHierarchyLevel = _messages.EnumField('TimeSeriesResourceHierarchyLevelValueListEntryValuesEnum', 4, repeated=True)


class MetricRange(_messages.Message):
  r"""A MetricRange is used when each window is good when the value x of a
  single TimeSeries satisfies range.min <= x <= range.max. The provided
  TimeSeries must have ValueType = INT64 or ValueType = DOUBLE and MetricKind
  = GAUGE.

  Fields:
    range: Range of values considered "good." For a one-sided range, set one
      bound to an infinite value.
    timeSeries: A monitoring filter
      (https://cloud.google.com/monitoring/api/v3/filters) specifying the
      TimeSeries to use for evaluating window quality.
  """

  range = _messages.MessageField('GoogleMonitoringV3Range', 1)
  timeSeries = _messages.StringField(2)


class MetricThreshold(_messages.Message):
  r"""A condition type that compares a collection of time series against a
  threshold.

  Enums:
    ComparisonValueValuesEnum: The comparison to apply between the time series
      (indicated by filter and aggregation) and the threshold (indicated by
      threshold_value). The comparison is applied on each time series, with
      the time series on the left-hand side and the threshold on the right-
      hand side.Only COMPARISON_LT and COMPARISON_GT are supported currently.
    EvaluationMissingDataValueValuesEnum: A condition control that determines
      how metric-threshold conditions are evaluated when data stops arriving.
      To use this control, the value of the duration field must be greater
      than or equal to 60 seconds.

  Fields:
    aggregations: Specifies the alignment of data points in individual time
      series as well as how to combine the retrieved time series together
      (such as when aggregating multiple streams on each resource to a single
      stream for each resource or when aggregating streams across all members
      of a group of resources). Multiple aggregations are applied in the order
      specified.This field is similar to the one in the ListTimeSeries request
      (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSer
      ies/list). It is advisable to use the ListTimeSeries method when
      debugging this field.
    comparison: The comparison to apply between the time series (indicated by
      filter and aggregation) and the threshold (indicated by
      threshold_value). The comparison is applied on each time series, with
      the time series on the left-hand side and the threshold on the right-
      hand side.Only COMPARISON_LT and COMPARISON_GT are supported currently.
    denominatorAggregations: Specifies the alignment of data points in
      individual time series selected by denominatorFilter as well as how to
      combine the retrieved time series together (such as when aggregating
      multiple streams on each resource to a single stream for each resource
      or when aggregating streams across all members of a group of
      resources).When computing ratios, the aggregations and
      denominator_aggregations fields must use the same alignment period and
      produce time series that have the same periodicity and labels.
    denominatorFilter: A filter
      (https://cloud.google.com/monitoring/api/v3/filters) that identifies a
      time series that should be used as the denominator of a ratio that will
      be compared with the threshold. If a denominator_filter is specified,
      the time series specified by the filter field will be used as the
      numerator.The filter must specify the metric type and optionally may
      contain restrictions on resource type, resource labels, and metric
      labels. This field may not exceed 2048 Unicode characters in length.
    duration: Required. The amount of time that a time series must violate the
      threshold to be considered failing. Currently, only values that are a
      multiple of a minute--e.g., 0, 60, 120, or 300 seconds--are supported.
      If an invalid value is given, an error will be returned. When choosing a
      duration, it is useful to keep in mind the frequency of the underlying
      time series data (which may also be affected by any alignments specified
      in the aggregations field); a good duration is long enough so that a
      single outlier does not generate spurious alerts, but short enough that
      unhealthy states are detected and alerted on quickly.
    evaluationMissingData: A condition control that determines how metric-
      threshold conditions are evaluated when data stops arriving. To use this
      control, the value of the duration field must be greater than or equal
      to 60 seconds.
    filter: Required. A filter
      (https://cloud.google.com/monitoring/api/v3/filters) that identifies
      which time series should be compared with the threshold.The filter is
      similar to the one that is specified in the ListTimeSeries request (http
      s://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/l
      ist) (that call is useful to verify the time series that will be
      retrieved / processed). The filter must specify the metric type and the
      resource type. Optionally, it can specify resource labels and metric
      labels. This field must not exceed 2048 Unicode characters in length.
    forecastOptions: When this field is present, the MetricThreshold condition
      forecasts whether the time series is predicted to violate the threshold
      within the forecast_horizon. When this field is not set, the
      MetricThreshold tests the current value of the timeseries against the
      threshold.
    thresholdValue: A value against which to compare the time series.
    trigger: The number/percent of time series for which the comparison must
      hold in order for the condition to trigger. If unspecified, then the
      condition will trigger if the comparison is true for any of the time
      series that have been identified by filter and aggregations, or by the
      ratio, if denominator_filter and denominator_aggregations are specified.
  """

  class ComparisonValueValuesEnum(_messages.Enum):
    r"""The comparison to apply between the time series (indicated by filter
    and aggregation) and the threshold (indicated by threshold_value). The
    comparison is applied on each time series, with the time series on the
    left-hand side and the threshold on the right-hand side.Only COMPARISON_LT
    and COMPARISON_GT are supported currently.

    Values:
      COMPARISON_UNSPECIFIED: No ordering relationship is specified.
      COMPARISON_GT: True if the left argument is greater than the right
        argument.
      COMPARISON_GE: True if the left argument is greater than or equal to the
        right argument.
      COMPARISON_LT: True if the left argument is less than the right
        argument.
      COMPARISON_LE: True if the left argument is less than or equal to the
        right argument.
      COMPARISON_EQ: True if the left argument is equal to the right argument.
      COMPARISON_NE: True if the left argument is not equal to the right
        argument.
    """
    COMPARISON_UNSPECIFIED = 0
    COMPARISON_GT = 1
    COMPARISON_GE = 2
    COMPARISON_LT = 3
    COMPARISON_LE = 4
    COMPARISON_EQ = 5
    COMPARISON_NE = 6

  class EvaluationMissingDataValueValuesEnum(_messages.Enum):
    r"""A condition control that determines how metric-threshold conditions
    are evaluated when data stops arriving. To use this control, the value of
    the duration field must be greater than or equal to 60 seconds.

    Values:
      EVALUATION_MISSING_DATA_UNSPECIFIED: An unspecified evaluation missing
        data option. Equivalent to EVALUATION_MISSING_DATA_NO_OP.
      EVALUATION_MISSING_DATA_INACTIVE: If there is no data to evaluate the
        condition, then evaluate the condition as false.
      EVALUATION_MISSING_DATA_ACTIVE: If there is no data to evaluate the
        condition, then evaluate the condition as true.
      EVALUATION_MISSING_DATA_NO_OP: Do not evaluate the condition to any
        value if there is no data.
    """
    EVALUATION_MISSING_DATA_UNSPECIFIED = 0
    EVALUATION_MISSING_DATA_INACTIVE = 1
    EVALUATION_MISSING_DATA_ACTIVE = 2
    EVALUATION_MISSING_DATA_NO_OP = 3

  aggregations = _messages.MessageField('Aggregation', 1, repeated=True)
  comparison = _messages.EnumField('ComparisonValueValuesEnum', 2)
  denominatorAggregations = _messages.MessageField('Aggregation', 3, repeated=True)
  denominatorFilter = _messages.StringField(4)
  duration = _messages.StringField(5)
  evaluationMissingData = _messages.EnumField('EvaluationMissingDataValueValuesEnum', 6)
  filter = _messages.StringField(7)
  forecastOptions = _messages.MessageField('ForecastOptions', 8)
  thresholdValue = _messages.FloatField(9)
  trigger = _messages.MessageField('Trigger', 10)


class Minutes(_messages.Message):
  r"""Used to schedule the query to run every so many minutes.

  Fields:
    periodicity: Required. Number of minutes between runs. The interval must
      be greater than or equal to 5 minutes and less than or equal to 1440
      minutes.
  """

  periodicity = _messages.IntegerField(1, variant=_messages.Variant.INT32)


class MonitoredResource(_messages.Message):
  r"""An object representing a resource that can be used for monitoring,
  logging, billing, or other purposes. Examples include virtual machine
  instances, databases, and storage devices such as disks. The type field
  identifies a MonitoredResourceDescriptor object that describes the
  resource's schema. Information in the labels field identifies the actual
  resource and its attributes according to the schema. For example, a
  particular Compute Engine VM instance could be represented by the following
  object, because the MonitoredResourceDescriptor for "gce_instance" has
  labels "project_id", "instance_id" and "zone": { "type": "gce_instance",
  "labels": { "project_id": "my-project", "instance_id": "12345678901234",
  "zone": "us-central1-a" }}

  Messages:
    LabelsValue: Required. Values for all of the labels listed in the
      associated monitored resource descriptor. For example, Compute Engine VM
      instances use the labels "project_id", "instance_id", and "zone".

  Fields:
    labels: Required. Values for all of the labels listed in the associated
      monitored resource descriptor. For example, Compute Engine VM instances
      use the labels "project_id", "instance_id", and "zone".
    type: Required. The monitored resource type. This field must match the
      type field of a MonitoredResourceDescriptor object. For example, the
      type of a Compute Engine VM instance is gce_instance. For a list of
      types, see Monitoring resource types
      (https://cloud.google.com/monitoring/api/resources) and Logging resource
      types (https://cloud.google.com/logging/docs/api/v2/resource-list).
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class LabelsValue(_messages.Message):
    r"""Required. Values for all of the labels listed in the associated
    monitored resource descriptor. For example, Compute Engine VM instances
    use the labels "project_id", "instance_id", and "zone".

    Messages:
      AdditionalProperty: An additional property for a LabelsValue object.

    Fields:
      additionalProperties: Additional properties of type LabelsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a LabelsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  labels = _messages.MessageField('LabelsValue', 1)
  type = _messages.StringField(2)


class MonitoredResourceDescriptor(_messages.Message):
  r"""An object that describes the schema of a MonitoredResource object using
  a type name and a set of labels. For example, the monitored resource
  descriptor for Google Compute Engine VM instances has a type of
  "gce_instance" and specifies the use of the labels "instance_id" and "zone"
  to identify particular VM instances.Different APIs can support different
  monitored resource types. APIs generally provide a list method that returns
  the monitored resource descriptors used by the API.

  Enums:
    LaunchStageValueValuesEnum: Optional. The launch stage of the monitored
      resource definition.

  Fields:
    description: Optional. A detailed description of the monitored resource
      type that might be used in documentation.
    displayName: Optional. A concise name for the monitored resource type that
      might be displayed in user interfaces. It should be a Title Cased Noun
      Phrase, without any article or other determiners. For example, "Google
      Cloud SQL Database".
    labels: Required. A set of labels used to describe instances of this
      monitored resource type. For example, an individual Google Cloud SQL
      database is identified by values for the labels "database_id" and
      "zone".
    launchStage: Optional. The launch stage of the monitored resource
      definition.
    name: Optional. The resource name of the monitored resource descriptor:
      "projects/{project_id}/monitoredResourceDescriptors/{type}" where {type}
      is the value of the type field in this object and {project_id} is a
      project ID that provides API-specific context for accessing the type.
      APIs that do not use project information can use the resource name
      format "monitoredResourceDescriptors/{type}".
    type: Required. The monitored resource type. For example, the type
      "cloudsql_database" represents databases in Google Cloud SQL. For a list
      of types, see Monitored resource types
      (https://cloud.google.com/monitoring/api/resources) and Logging resource
      types (https://cloud.google.com/logging/docs/api/v2/resource-list).
  """

  class LaunchStageValueValuesEnum(_messages.Enum):
    r"""Optional. The launch stage of the monitored resource definition.

    Values:
      LAUNCH_STAGE_UNSPECIFIED: Do not use this default value.
      UNIMPLEMENTED: The feature is not yet implemented. Users can not use it.
      PRELAUNCH: Prelaunch features are hidden from users and are only visible
        internally.
      EARLY_ACCESS: Early Access features are limited to a closed group of
        testers. To use these features, you must sign up in advance and sign a
        Trusted Tester agreement (which includes confidentiality provisions).
        These features may be unstable, changed in backward-incompatible ways,
        and are not guaranteed to be released.
      ALPHA: Alpha is a limited availability test for releases before they are
        cleared for widespread use. By Alpha, all significant design issues
        are resolved and we are in the process of verifying functionality.
        Alpha customers need to apply for access, agree to applicable terms,
        and have their projects allowlisted. Alpha releases don't have to be
        feature complete, no SLAs are provided, and there are no technical
        support obligations, but they will be far enough along that customers
        can actually use them in test environments or for limited-use tests --
        just like they would in normal production cases.
      BETA: Beta is the point at which we are ready to open a release for any
        customer to use. There are no SLA or technical support obligations in
        a Beta release. Products will be complete from a feature perspective,
        but may have some open outstanding issues. Beta releases are suitable
        for limited production use cases.
      GA: GA features are open to all developers and are considered stable and
        fully qualified for production use.
      DEPRECATED: Deprecated features are scheduled to be shut down and
        removed. For more information, see the "Deprecation Policy" section of
        our Terms of Service (https://cloud.google.com/terms/) and the Google
        Cloud Platform Subject to the Deprecation Policy
        (https://cloud.google.com/terms/deprecation) documentation.
    """
    LAUNCH_STAGE_UNSPECIFIED = 0
    UNIMPLEMENTED = 1
    PRELAUNCH = 2
    EARLY_ACCESS = 3
    ALPHA = 4
    BETA = 5
    GA = 6
    DEPRECATED = 7

  description = _messages.StringField(1)
  displayName = _messages.StringField(2)
  labels = _messages.MessageField('LabelDescriptor', 3, repeated=True)
  launchStage = _messages.EnumField('LaunchStageValueValuesEnum', 4)
  name = _messages.StringField(5)
  type = _messages.StringField(6)


class MonitoredResourceMetadata(_messages.Message):
  r"""Auxiliary metadata for a MonitoredResource object. MonitoredResource
  objects contain the minimum set of information to uniquely identify a
  monitored resource instance. There is some other useful auxiliary metadata.
  Monitoring and Logging use an ingestion pipeline to extract metadata for
  cloud resources of all types, and store the metadata in this message.

  Messages:
    SystemLabelsValue: Output only. Values for predefined system metadata
      labels. System labels are a kind of metadata extracted by Google,
      including "machine_image", "vpc", "subnet_id", "security_group", "name",
      etc. System label values can be only strings, Boolean values, or a list
      of strings. For example: { "name": "my-test-instance", "security_group":
      ["a", "b", "c"], "spot_instance": false }
    UserLabelsValue: Output only. A map of user-defined metadata labels.

  Fields:
    systemLabels: Output only. Values for predefined system metadata labels.
      System labels are a kind of metadata extracted by Google, including
      "machine_image", "vpc", "subnet_id", "security_group", "name", etc.
      System label values can be only strings, Boolean values, or a list of
      strings. For example: { "name": "my-test-instance", "security_group":
      ["a", "b", "c"], "spot_instance": false }
    userLabels: Output only. A map of user-defined metadata labels.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class SystemLabelsValue(_messages.Message):
    r"""Output only. Values for predefined system metadata labels. System
    labels are a kind of metadata extracted by Google, including
    "machine_image", "vpc", "subnet_id", "security_group", "name", etc. System
    label values can be only strings, Boolean values, or a list of strings.
    For example: { "name": "my-test-instance", "security_group": ["a", "b",
    "c"], "spot_instance": false }

    Messages:
      AdditionalProperty: An additional property for a SystemLabelsValue
        object.

    Fields:
      additionalProperties: Properties of the object.
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a SystemLabelsValue object.

      Fields:
        key: Name of the additional property.
        value: A extra_types.JsonValue attribute.
      """

      key = _messages.StringField(1)
      value = _messages.MessageField('extra_types.JsonValue', 2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  @encoding.MapUnrecognizedFields('additionalProperties')
  class UserLabelsValue(_messages.Message):
    r"""Output only. A map of user-defined metadata labels.

    Messages:
      AdditionalProperty: An additional property for a UserLabelsValue object.

    Fields:
      additionalProperties: Additional properties of type UserLabelsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a UserLabelsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  systemLabels = _messages.MessageField('SystemLabelsValue', 1)
  userLabels = _messages.MessageField('UserLabelsValue', 2)


class MonitoringFoldersTimeSeriesListRequest(_messages.Message):
  r"""A MonitoringFoldersTimeSeriesListRequest object.

  Enums:
    AggregationCrossSeriesReducerValueValuesEnum: The reduction operation to
      be used to combine time series into a single time series, where the
      value of each data point in the resulting series is a function of all
      the already aligned values in the input time series.Not all reducer
      operations can be applied to all time series. The valid choices depend
      on the metric_kind and the value_type of the original time series.
      Reduction can yield a time series with a different metric_kind or
      value_type than the input time series.Time series data must first be
      aligned (see per_series_aligner) in order to perform cross-time series
      reduction. If cross_series_reducer is specified, then per_series_aligner
      must be specified, and must not be ALIGN_NONE. An alignment_period must
      also be specified; otherwise, an error is returned.
    AggregationPerSeriesAlignerValueValuesEnum: An Aligner describes how to
      bring the data points in a single time series into temporal alignment.
      Except for ALIGN_NONE, all alignments cause all the data points in an
      alignment_period to be mathematically grouped together, resulting in a
      single data point for each alignment_period with end timestamp at the
      end of the period.Not all alignment operations may be applied to all
      time series. The valid choices depend on the metric_kind and value_type
      of the original time series. Alignment can change the metric_kind or the
      value_type of the time series.Time series data must be aligned in order
      to perform cross-time series reduction. If cross_series_reducer is
      specified, then per_series_aligner must be specified and not equal to
      ALIGN_NONE and alignment_period must be specified; otherwise, an error
      is returned.
    SecondaryAggregationCrossSeriesReducerValueValuesEnum: The reduction
      operation to be used to combine time series into a single time series,
      where the value of each data point in the resulting series is a function
      of all the already aligned values in the input time series.Not all
      reducer operations can be applied to all time series. The valid choices
      depend on the metric_kind and the value_type of the original time
      series. Reduction can yield a time series with a different metric_kind
      or value_type than the input time series.Time series data must first be
      aligned (see per_series_aligner) in order to perform cross-time series
      reduction. If cross_series_reducer is specified, then per_series_aligner
      must be specified, and must not be ALIGN_NONE. An alignment_period must
      also be specified; otherwise, an error is returned.
    SecondaryAggregationPerSeriesAlignerValueValuesEnum: An Aligner describes
      how to bring the data points in a single time series into temporal
      alignment. Except for ALIGN_NONE, all alignments cause all the data
      points in an alignment_period to be mathematically grouped together,
      resulting in a single data point for each alignment_period with end
      timestamp at the end of the period.Not all alignment operations may be
      applied to all time series. The valid choices depend on the metric_kind
      and value_type of the original time series. Alignment can change the
      metric_kind or the value_type of the time series.Time series data must
      be aligned in order to perform cross-time series reduction. If
      cross_series_reducer is specified, then per_series_aligner must be
      specified and not equal to ALIGN_NONE and alignment_period must be
      specified; otherwise, an error is returned.
    ViewValueValuesEnum: Required. Specifies which information is returned
      about the time series.

  Fields:
    aggregation_alignmentPeriod: The alignment_period specifies a time
      interval, in seconds, that is used to divide the data in all the time
      series into consistent blocks of time. This will be done before the per-
      series aligner can be applied to the data.The value must be at least 60
      seconds. If a per-series aligner other than ALIGN_NONE is specified,
      this field is required or an error is returned. If no per-series aligner
      is specified, or the aligner ALIGN_NONE is specified, then this field is
      ignored.The maximum value of the alignment_period is 104 weeks (2 years)
      for charts, and 90,000 seconds (25 hours) for alerting policies.
    aggregation_crossSeriesReducer: The reduction operation to be used to
      combine time series into a single time series, where the value of each
      data point in the resulting series is a function of all the already
      aligned values in the input time series.Not all reducer operations can
      be applied to all time series. The valid choices depend on the
      metric_kind and the value_type of the original time series. Reduction
      can yield a time series with a different metric_kind or value_type than
      the input time series.Time series data must first be aligned (see
      per_series_aligner) in order to perform cross-time series reduction. If
      cross_series_reducer is specified, then per_series_aligner must be
      specified, and must not be ALIGN_NONE. An alignment_period must also be
      specified; otherwise, an error is returned.
    aggregation_groupByFields: The set of fields to preserve when
      cross_series_reducer is specified. The group_by_fields determine how the
      time series are partitioned into subsets prior to applying the
      aggregation operation. Each subset contains time series that have the
      same value for each of the grouping fields. Each individual time series
      is a member of exactly one subset. The cross_series_reducer is applied
      to each subset of time series. It is not possible to reduce across
      different resource types, so this field implicitly contains
      resource.type. Fields not specified in group_by_fields are aggregated
      away. If group_by_fields is not specified and all the time series have
      the same resource type, then the time series are aggregated into a
      single output time series. If cross_series_reducer is not defined, this
      field is ignored.
    aggregation_perSeriesAligner: An Aligner describes how to bring the data
      points in a single time series into temporal alignment. Except for
      ALIGN_NONE, all alignments cause all the data points in an
      alignment_period to be mathematically grouped together, resulting in a
      single data point for each alignment_period with end timestamp at the
      end of the period.Not all alignment operations may be applied to all
      time series. The valid choices depend on the metric_kind and value_type
      of the original time series. Alignment can change the metric_kind or the
      value_type of the time series.Time series data must be aligned in order
      to perform cross-time series reduction. If cross_series_reducer is
      specified, then per_series_aligner must be specified and not equal to
      ALIGN_NONE and alignment_period must be specified; otherwise, an error
      is returned.
    filter: Required. A monitoring filter
      (https://cloud.google.com/monitoring/api/v3/filters) that specifies
      which time series should be returned. The filter must specify a single
      metric type, and can additionally specify metric labels and other
      information. For example: metric.type =
      "compute.googleapis.com/instance/cpu/usage_time" AND
      metric.labels.instance_name = "my-instance-name"
    interval_endTime: Required. The end of the time interval.
    interval_startTime: Optional. The beginning of the time interval. The
      default value for the start time is the end time. The start time must
      not be later than the end time.
    name: Required. The project
      (https://cloud.google.com/monitoring/api/v3#project_name), organization
      or folder on which to execute the request. The format is:
      projects/[PROJECT_ID_OR_NUMBER] organizations/[ORGANIZATION_ID]
      folders/[FOLDER_ID]
    orderBy: Unsupported: must be left blank. The points in each time series
      are currently returned in reverse time order (most recent to oldest).
    pageSize: A positive number that is the maximum number of results to
      return. If page_size is empty or more than 100,000 results, the
      effective page_size is 100,000 results. If view is set to FULL, this is
      the maximum number of Points returned. If view is set to HEADERS, this
      is the maximum number of TimeSeries returned.
    pageToken: If this field is not empty then it must contain the
      nextPageToken value returned by a previous call to this method. Using
      this field causes the method to return additional results from the
      previous method call.
    secondaryAggregation_alignmentPeriod: The alignment_period specifies a
      time interval, in seconds, that is used to divide the data in all the
      time series into consistent blocks of time. This will be done before the
      per-series aligner can be applied to the data.The value must be at least
      60 seconds. If a per-series aligner other than ALIGN_NONE is specified,
      this field is required or an error is returned. If no per-series aligner
      is specified, or the aligner ALIGN_NONE is specified, then this field is
      ignored.The maximum value of the alignment_period is 104 weeks (2 years)
      for charts, and 90,000 seconds (25 hours) for alerting policies.
    secondaryAggregation_crossSeriesReducer: The reduction operation to be
      used to combine time series into a single time series, where the value
      of each data point in the resulting series is a function of all the
      already aligned values in the input time series.Not all reducer
      operations can be applied to all time series. The valid choices depend
      on the metric_kind and the value_type of the original time series.
      Reduction can yield a time series with a different metric_kind or
      value_type than the input time series.Time series data must first be
      aligned (see per_series_aligner) in order to perform cross-time series
      reduction. If cross_series_reducer is specified, then per_series_aligner
      must be specified, and must not be ALIGN_NONE. An alignment_period must
      also be specified; otherwise, an error is returned.
    secondaryAggregation_groupByFields: The set of fields to preserve when
      cross_series_reducer is specified. The group_by_fields determine how the
      time series are partitioned into subsets prior to applying the
      aggregation operation. Each subset contains time series that have the
      same value for each of the grouping fields. Each individual time series
      is a member of exactly one subset. The cross_series_reducer is applied
      to each subset of time series. It is not possible to reduce across
      different resource types, so this field implicitly contains
      resource.type. Fields not specified in group_by_fields are aggregated
      away. If group_by_fields is not specified and all the time series have
      the same resource type, then the time series are aggregated into a
      single output time series. If cross_series_reducer is not defined, this
      field is ignored.
    secondaryAggregation_perSeriesAligner: An Aligner describes how to bring
      the data points in a single time series into temporal alignment. Except
      for ALIGN_NONE, all alignments cause all the data points in an
      alignment_period to be mathematically grouped together, resulting in a
      single data point for each alignment_period with end timestamp at the
      end of the period.Not all alignment operations may be applied to all
      time series. The valid choices depend on the metric_kind and value_type
      of the original time series. Alignment can change the metric_kind or the
      value_type of the time series.Time series data must be aligned in order
      to perform cross-time series reduction. If cross_series_reducer is
      specified, then per_series_aligner must be specified and not equal to
      ALIGN_NONE and alignment_period must be specified; otherwise, an error
      is returned.
    view: Required. Specifies which information is returned about the time
      series.
  """

  class AggregationCrossSeriesReducerValueValuesEnum(_messages.Enum):
    r"""The reduction operation to be used to combine time series into a
    single time series, where the value of each data point in the resulting
    series is a function of all the already aligned values in the input time
    series.Not all reducer operations can be applied to all time series. The
    valid choices depend on the metric_kind and the value_type of the original
    time series. Reduction can yield a time series with a different
    metric_kind or value_type than the input time series.Time series data must
    first be aligned (see per_series_aligner) in order to perform cross-time
    series reduction. If cross_series_reducer is specified, then
    per_series_aligner must be specified, and must not be ALIGN_NONE. An
    alignment_period must also be specified; otherwise, an error is returned.

    Values:
      REDUCE_NONE: No cross-time series reduction. The output of the Aligner
        is returned.
      REDUCE_MEAN: Reduce by computing the mean value across time series for
        each alignment period. This reducer is valid for DELTA and GAUGE
        metrics with numeric or distribution values. The value_type of the
        output is DOUBLE.
      REDUCE_MIN: Reduce by computing the minimum value across time series for
        each alignment period. This reducer is valid for DELTA and GAUGE
        metrics with numeric values. The value_type of the output is the same
        as the value_type of the input.
      REDUCE_MAX: Reduce by computing the maximum value across time series for
        each alignment period. This reducer is valid for DELTA and GAUGE
        metrics with numeric values. The value_type of the output is the same
        as the value_type of the input.
      REDUCE_SUM: Reduce by computing the sum across time series for each
        alignment period. This reducer is valid for DELTA and GAUGE metrics
        with numeric and distribution values. The value_type of the output is
        the same as the value_type of the input.
      REDUCE_STDDEV: Reduce by computing the standard deviation across time
        series for each alignment period. This reducer is valid for DELTA and
        GAUGE metrics with numeric or distribution values. The value_type of
        the output is DOUBLE.
      REDUCE_COUNT: Reduce by computing the number of data points across time
        series for each alignment period. This reducer is valid for DELTA and
        GAUGE metrics of numeric, Boolean, distribution, and string
        value_type. The value_type of the output is INT64.
      REDUCE_COUNT_TRUE: Reduce by computing the number of True-valued data
        points across time series for each alignment period. This reducer is
        valid for DELTA and GAUGE metrics of Boolean value_type. The
        value_type of the output is INT64.
      REDUCE_COUNT_FALSE: Reduce by computing the number of False-valued data
        points across time series for each alignment period. This reducer is
        valid for DELTA and GAUGE metrics of Boolean value_type. The
        value_type of the output is INT64.
      REDUCE_FRACTION_TRUE: Reduce by computing the ratio of the number of
        True-valued data points to the total number of data points for each
        alignment period. This reducer is valid for DELTA and GAUGE metrics of
        Boolean value_type. The output value is in the range 0.0, 1.0 and has
        value_type DOUBLE.
      REDUCE_PERCENTILE_99: Reduce by computing the 99th percentile
        (https://en.wikipedia.org/wiki/Percentile) of data points across time
        series for each alignment period. This reducer is valid for GAUGE and
        DELTA metrics of numeric and distribution type. The value of the
        output is DOUBLE.
      REDUCE_PERCENTILE_95: Reduce by computing the 95th percentile
        (https://en.wikipedia.org/wiki/Percentile) of data points across time
        series for each alignment period. This reducer is valid for GAUGE and
        DELTA metrics of numeric and distribution type. The value of the
        output is DOUBLE.
      REDUCE_PERCENTILE_50: Reduce by computing the 50th percentile
        (https://en.wikipedia.org/wiki/Percentile) of data points across time
        series for each alignment period. This reducer is valid for GAUGE and
        DELTA metrics of numeric and distribution type. The value of the
        output is DOUBLE.
      REDUCE_PERCENTILE_05: Reduce by computing the 5th percentile
        (https://en.wikipedia.org/wiki/Percentile) of data points across time
        series for each alignment period. This reducer is valid for GAUGE and
        DELTA metrics of numeric and distribution type. The value of the
        output is DOUBLE.
    """
    REDUCE_NONE = 0
    REDUCE_MEAN = 1
    REDUCE_MIN = 2
    REDUCE_MAX = 3
    REDUCE_SUM = 4
    REDUCE_STDDEV = 5
    REDUCE_COUNT = 6
    REDUCE_COUNT_TRUE = 7
    REDUCE_COUNT_FALSE = 8
    REDUCE_FRACTION_TRUE = 9
    REDUCE_PERCENTILE_99 = 10
    REDUCE_PERCENTILE_95 = 11
    REDUCE_PERCENTILE_50 = 12
    REDUCE_PERCENTILE_05 = 13

  class AggregationPerSeriesAlignerValueValuesEnum(_messages.Enum):
    r"""An Aligner describes how to bring the data points in a single time
    series into temporal alignment. Except for ALIGN_NONE, all alignments
    cause all the data points in an alignment_period to be mathematically
    grouped together, resulting in a single data point for each
    alignment_period with end timestamp at the end of the period.Not all
    alignment operations may be applied to all time series. The valid choices
    depend on the metric_kind and value_type of the original time series.
    Alignment can change the metric_kind or the value_type of the time
    series.Time series data must be aligned in order to perform cross-time
    series reduction. If cross_series_reducer is specified, then
    per_series_aligner must be specified and not equal to ALIGN_NONE and
    alignment_period must be specified; otherwise, an error is returned.

    Values:
      ALIGN_NONE: No alignment. Raw data is returned. Not valid if cross-
        series reduction is requested. The value_type of the result is the
        same as the value_type of the input.
      ALIGN_DELTA: Align and convert to DELTA. The output is delta = y1 -
        y0.This alignment is valid for CUMULATIVE and DELTA metrics. If the
        selected alignment period results in periods with no data, then the
        aligned value for such a period is created by interpolation. The
        value_type of the aligned result is the same as the value_type of the
        input.
      ALIGN_RATE: Align and convert to a rate. The result is computed as rate
        = (y1 - y0)/(t1 - t0), or "delta over time". Think of this aligner as
        providing the slope of the line that passes through the value at the
        start and at the end of the alignment_period.This aligner is valid for
        CUMULATIVE and DELTA metrics with numeric values. If the selected
        alignment period results in periods with no data, then the aligned
        value for such a period is created by interpolation. The output is a
        GAUGE metric with value_type DOUBLE.If, by "rate", you mean
        "percentage change", see the ALIGN_PERCENT_CHANGE aligner instead.
      ALIGN_INTERPOLATE: Align by interpolating between adjacent points around
        the alignment period boundary. This aligner is valid for GAUGE metrics
        with numeric values. The value_type of the aligned result is the same
        as the value_type of the input.
      ALIGN_NEXT_OLDER: Align by moving the most recent data point before the
        end of the alignment period to the boundary at the end of the
        alignment period. This aligner is valid for GAUGE metrics. The
        value_type of the aligned result is the same as the value_type of the
        input.
      ALIGN_MIN: Align the time series by returning the minimum value in each
        alignment period. This aligner is valid for GAUGE and DELTA metrics
        with numeric values. The value_type of the aligned result is the same
        as the value_type of the input.
      ALIGN_MAX: Align the time series by returning the maximum value in each
        alignment period. This aligner is valid for GAUGE and DELTA metrics
        with numeric values. The value_type of the aligned result is the same
        as the value_type of the input.
      ALIGN_MEAN: Align the time series by returning the mean value in each
        alignment period. This aligner is valid for GAUGE and DELTA metrics
        with numeric values. The value_type of the aligned result is DOUBLE.
      ALIGN_COUNT: Align the time series by returning the number of values in
        each alignment period. This aligner is valid for GAUGE and DELTA
        metrics with numeric or Boolean values. The value_type of the aligned
        result is INT64.
      ALIGN_SUM: Align the time series by returning the sum of the values in
        each alignment period. This aligner is valid for GAUGE and DELTA
        metrics with numeric and distribution values. The value_type of the
        aligned result is the same as the value_type of the input.
      ALIGN_STDDEV: Align the time series by returning the standard deviation
        of the values in each alignment period. This aligner is valid for
        GAUGE and DELTA metrics with numeric values. The value_type of the
        output is DOUBLE.
      ALIGN_COUNT_TRUE: Align the time series by returning the number of True
        values in each alignment period. This aligner is valid for GAUGE
        metrics with Boolean values. The value_type of the output is INT64.
      ALIGN_COUNT_FALSE: Align the time series by returning the number of
        False values in each alignment period. This aligner is valid for GAUGE
        metrics with Boolean values. The value_type of the output is INT64.
      ALIGN_FRACTION_TRUE: Align the time series by returning the ratio of the
        number of True values to the total number of values in each alignment
        period. This aligner is valid for GAUGE metrics with Boolean values.
        The output value is in the range 0.0, 1.0 and has value_type DOUBLE.
      ALIGN_PERCENTILE_99: Align the time series by using percentile
        aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting
        data point in each alignment period is the 99th percentile of all data
        points in the period. This aligner is valid for GAUGE and DELTA
        metrics with distribution values. The output is a GAUGE metric with
        value_type DOUBLE.
      ALIGN_PERCENTILE_95: Align the time series by using percentile
        aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting
        data point in each alignment period is the 95th percentile of all data
        points in the period. This aligner is valid for GAUGE and DELTA
        metrics with distribution values. The output is a GAUGE metric with
        value_type DOUBLE.
      ALIGN_PERCENTILE_50: Align the time series by using percentile
        aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting
        data point in each alignment period is the 50th percentile of all data
        points in the period. This aligner is valid for GAUGE and DELTA
        metrics with distribution values. The output is a GAUGE metric with
        value_type DOUBLE.
      ALIGN_PERCENTILE_05: Align the time series by using percentile
        aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting
        data point in each alignment period is the 5th percentile of all data
        points in the period. This aligner is valid for GAUGE and DELTA
        metrics with distribution values. The output is a GAUGE metric with
        value_type DOUBLE.
      ALIGN_PERCENT_CHANGE: Align and convert to a percentage change. This
        aligner is valid for GAUGE and DELTA metrics with numeric values. This
        alignment returns ((current - previous)/previous) * 100, where the
        value of previous is determined based on the alignment_period.If the
        values of current and previous are both 0, then the returned value is
        0. If only previous is 0, the returned value is infinity.A 10-minute
        moving mean is computed at each point of the alignment period prior to
        the above calculation to smooth the metric and prevent false positives
        from very short-lived spikes. The moving mean is only applicable for
        data whose values are >= 0. Any values < 0 are treated as a missing
        datapoint, and are ignored. While DELTA metrics are accepted by this
        alignment, special care should be taken that the values for the metric
        will always be positive. The output is a GAUGE metric with value_type
        DOUBLE.
    """
    ALIGN_NONE = 0
    ALIGN_DELTA = 1
    ALIGN_RATE = 2
    ALIGN_INTERPOLATE = 3
    ALIGN_NEXT_OLDER = 4
    ALIGN_MIN = 5
    ALIGN_MAX = 6
    ALIGN_MEAN = 7
    ALIGN_COUNT = 8
    ALIGN_SUM = 9
    ALIGN_STDDEV = 10
    ALIGN_COUNT_TRUE = 11
    ALIGN_COUNT_FALSE = 12
    ALIGN_FRACTION_TRUE = 13
    ALIGN_PERCENTILE_99 = 14
    ALIGN_PERCENTILE_95 = 15
    ALIGN_PERCENTILE_50 = 16
    ALIGN_PERCENTILE_05 = 17
    ALIGN_PERCENT_CHANGE = 18

  class SecondaryAggregationCrossSeriesReducerValueValuesEnum(_messages.Enum):
    r"""The reduction operation to be used to combine time series into a
    single time series, where the value of each data point in the resulting
    series is a function of all the already aligned values in the input time
    series.Not all reducer operations can be applied to all time series. The
    valid choices depend on the metric_kind and the value_type of the original
    time series. Reduction can yield a time series with a different
    metric_kind or value_type than the input time series.Time series data must
    first be aligned (see per_series_aligner) in order to perform cross-time
    series reduction. If cross_series_reducer is specified, then
    per_series_aligner must be specified, and must not be ALIGN_NONE. An
    alignment_period must also be specified; otherwise, an error is returned.

    Values:
      REDUCE_NONE: No cross-time series reduction. The output of the Aligner
        is returned.
      REDUCE_MEAN: Reduce by computing the mean value across time series for
        each alignment period. This reducer is valid for DELTA and GAUGE
        metrics with numeric or distribution values. The value_type of the
        output is DOUBLE.
      REDUCE_MIN: Reduce by computing the minimum value across time series for
        each alignment period. This reducer is valid for DELTA and GAUGE
        metrics with numeric values. The value_type of the output is the same
        as the value_type of the input.
      REDUCE_MAX: Reduce by computing the maximum value across time series for
        each alignment period. This reducer is valid for DELTA and GAUGE
        metrics with numeric values. The value_type of the output is the same
        as the value_type of the input.
      REDUCE_SUM: Reduce by computing the sum across time series for each
        alignment period. This reducer is valid for DELTA and GAUGE metrics
        with numeric and distribution values. The value_type of the output is
        the same as the value_type of the input.
      REDUCE_STDDEV: Reduce by computing the standard deviation across time
        series for each alignment period. This reducer is valid for DELTA and
        GAUGE metrics with numeric or distribution values. The value_type of
        the output is DOUBLE.
      REDUCE_COUNT: Reduce by computing the number of data points across time
        series for each alignment period. This reducer is valid for DELTA and
        GAUGE metrics of numeric, Boolean, distribution, and string
        value_type. The value_type of the output is INT64.
      REDUCE_COUNT_TRUE: Reduce by computing the number of True-valued data
        points across time series for each alignment period. This reducer is
        valid for DELTA and GAUGE metrics of Boolean value_type. The
        value_type of the output is INT64.
      REDUCE_COUNT_FALSE: Reduce by computing the number of False-valued data
        points across time series for each alignment period. This reducer is
        valid for DELTA and GAUGE metrics of Boolean value_type. The
        value_type of the output is INT64.
      REDUCE_FRACTION_TRUE: Reduce by computing the ratio of the number of
        True-valued data points to the total number of data points for each
        alignment period. This reducer is valid for DELTA and GAUGE metrics of
        Boolean value_type. The output value is in the range 0.0, 1.0 and has
        value_type DOUBLE.
      REDUCE_PERCENTILE_99: Reduce by computing the 99th percentile
        (https://en.wikipedia.org/wiki/Percentile) of data points across time
        series for each alignment period. This reducer is valid for GAUGE and
        DELTA metrics of numeric and distribution type. The value of the
        output is DOUBLE.
      REDUCE_PERCENTILE_95: Reduce by computing the 95th percentile
        (https://en.wikipedia.org/wiki/Percentile) of data points across time
        series for each alignment period. This reducer is valid for GAUGE and
        DELTA metrics of numeric and distribution type. The value of the
        output is DOUBLE.
      REDUCE_PERCENTILE_50: Reduce by computing the 50th percentile
        (https://en.wikipedia.org/wiki/Percentile) of data points across time
        series for each alignment period. This reducer is valid for GAUGE and
        DELTA metrics of numeric and distribution type. The value of the
        output is DOUBLE.
      REDUCE_PERCENTILE_05: Reduce by computing the 5th percentile
        (https://en.wikipedia.org/wiki/Percentile) of data points across time
        series for each alignment period. This reducer is valid for GAUGE and
        DELTA metrics of numeric and distribution type. The value of the
        output is DOUBLE.
    """
    REDUCE_NONE = 0
    REDUCE_MEAN = 1
    REDUCE_MIN = 2
    REDUCE_MAX = 3
    REDUCE_SUM = 4
    REDUCE_STDDEV = 5
    REDUCE_COUNT = 6
    REDUCE_COUNT_TRUE = 7
    REDUCE_COUNT_FALSE = 8
    REDUCE_FRACTION_TRUE = 9
    REDUCE_PERCENTILE_99 = 10
    REDUCE_PERCENTILE_95 = 11
    REDUCE_PERCENTILE_50 = 12
    REDUCE_PERCENTILE_05 = 13

  class SecondaryAggregationPerSeriesAlignerValueValuesEnum(_messages.Enum):
    r"""An Aligner describes how to bring the data points in a single time
    series into temporal alignment. Except for ALIGN_NONE, all alignments
    cause all the data points in an alignment_period to be mathematically
    grouped together, resulting in a single data point for each
    alignment_period with end timestamp at the end of the period.Not all
    alignment operations may be applied to all time series. The valid choices
    depend on the metric_kind and value_type of the original time series.
    Alignment can change the metric_kind or the value_type of the time
    series.Time series data must be aligned in order to perform cross-time
    series reduction. If cross_series_reducer is specified, then
    per_series_aligner must be specified and not equal to ALIGN_NONE and
    alignment_period must be specified; otherwise, an error is returned.

    Values:
      ALIGN_NONE: No alignment. Raw data is returned. Not valid if cross-
        series reduction is requested. The value_type of the result is the
        same as the value_type of the input.
      ALIGN_DELTA: Align and convert to DELTA. The output is delta = y1 -
        y0.This alignment is valid for CUMULATIVE and DELTA metrics. If the
        selected alignment period results in periods with no data, then the
        aligned value for such a period is created by interpolation. The
        value_type of the aligned result is the same as the value_type of the
        input.
      ALIGN_RATE: Align and convert to a rate. The result is computed as rate
        = (y1 - y0)/(t1 - t0), or "delta over time". Think of this aligner as
        providing the slope of the line that passes through the value at the
        start and at the end of the alignment_period.This aligner is valid for
        CUMULATIVE and DELTA metrics with numeric values. If the selected
        alignment period results in periods with no data, then the aligned
        value for such a period is created by interpolation. The output is a
        GAUGE metric with value_type DOUBLE.If, by "rate", you mean
        "percentage change", see the ALIGN_PERCENT_CHANGE aligner instead.
      ALIGN_INTERPOLATE: Align by interpolating between adjacent points around
        the alignment period boundary. This aligner is valid for GAUGE metrics
        with numeric values. The value_type of the aligned result is the same
        as the value_type of the input.
      ALIGN_NEXT_OLDER: Align by moving the most recent data point before the
        end of the alignment period to the boundary at the end of the
        alignment period. This aligner is valid for GAUGE metrics. The
        value_type of the aligned result is the same as the value_type of the
        input.
      ALIGN_MIN: Align the time series by returning the minimum value in each
        alignment period. This aligner is valid for GAUGE and DELTA metrics
        with numeric values. The value_type of the aligned result is the same
        as the value_type of the input.
      ALIGN_MAX: Align the time series by returning the maximum value in each
        alignment period. This aligner is valid for GAUGE and DELTA metrics
        with numeric values. The value_type of the aligned result is the same
        as the value_type of the input.
      ALIGN_MEAN: Align the time series by returning the mean value in each
        alignment period. This aligner is valid for GAUGE and DELTA metrics
        with numeric values. The value_type of the aligned result is DOUBLE.
      ALIGN_COUNT: Align the time series by returning the number of values in
        each alignment period. This aligner is valid for GAUGE and DELTA
        metrics with numeric or Boolean values. The value_type of the aligned
        result is INT64.
      ALIGN_SUM: Align the time series by returning the sum of the values in
        each alignment period. This aligner is valid for GAUGE and DELTA
        metrics with numeric and distribution values. The value_type of the
        aligned result is the same as the value_type of the input.
      ALIGN_STDDEV: Align the time series by returning the standard deviation
        of the values in each alignment period. This aligner is valid for
        GAUGE and DELTA metrics with numeric values. The value_type of the
        output is DOUBLE.
      ALIGN_COUNT_TRUE: Align the time series by returning the number of True
        values in each alignment period. This aligner is valid for GAUGE
        metrics with Boolean values. The value_type of the output is INT64.
      ALIGN_COUNT_FALSE: Align the time series by returning the number of
        False values in each alignment period. This aligner is valid for GAUGE
        metrics with Boolean values. The value_type of the output is INT64.
      ALIGN_FRACTION_TRUE: Align the time series by returning the ratio of the
        number of True values to the total number of values in each alignment
        period. This aligner is valid for GAUGE metrics with Boolean values.
        The output value is in the range 0.0, 1.0 and has value_type DOUBLE.
      ALIGN_PERCENTILE_99: Align the time series by using percentile
        aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting
        data point in each alignment period is the 99th percentile of all data
        points in the period. This aligner is valid for GAUGE and DELTA
        metrics with distribution values. The output is a GAUGE metric with
        value_type DOUBLE.
      ALIGN_PERCENTILE_95: Align the time series by using percentile
        aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting
        data point in each alignment period is the 95th percentile of all data
        points in the period. This aligner is valid for GAUGE and DELTA
        metrics with distribution values. The output is a GAUGE metric with
        value_type DOUBLE.
      ALIGN_PERCENTILE_50: Align the time series by using percentile
        aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting
        data point in each alignment period is the 50th percentile of all data
        points in the period. This aligner is valid for GAUGE and DELTA
        metrics with distribution values. The output is a GAUGE metric with
        value_type DOUBLE.
      ALIGN_PERCENTILE_05: Align the time series by using percentile
        aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting
        data point in each alignment period is the 5th percentile of all data
        points in the period. This aligner is valid for GAUGE and DELTA
        metrics with distribution values. The output is a GAUGE metric with
        value_type DOUBLE.
      ALIGN_PERCENT_CHANGE: Align and convert to a percentage change. This
        aligner is valid for GAUGE and DELTA metrics with numeric values. This
        alignment returns ((current - previous)/previous) * 100, where the
        value of previous is determined based on the alignment_period.If the
        values of current and previous are both 0, then the returned value is
        0. If only previous is 0, the returned value is infinity.A 10-minute
        moving mean is computed at each point of the alignment period prior to
        the above calculation to smooth the metric and prevent false positives
        from very short-lived spikes. The moving mean is only applicable for
        data whose values are >= 0. Any values < 0 are treated as a missing
        datapoint, and are ignored. While DELTA metrics are accepted by this
        alignment, special care should be taken that the values for the metric
        will always be positive. The output is a GAUGE metric with value_type
        DOUBLE.
    """
    ALIGN_NONE = 0
    ALIGN_DELTA = 1
    ALIGN_RATE = 2
    ALIGN_INTERPOLATE = 3
    ALIGN_NEXT_OLDER = 4
    ALIGN_MIN = 5
    ALIGN_MAX = 6
    ALIGN_MEAN = 7
    ALIGN_COUNT = 8
    ALIGN_SUM = 9
    ALIGN_STDDEV = 10
    ALIGN_COUNT_TRUE = 11
    ALIGN_COUNT_FALSE = 12
    ALIGN_FRACTION_TRUE = 13
    ALIGN_PERCENTILE_99 = 14
    ALIGN_PERCENTILE_95 = 15
    ALIGN_PERCENTILE_50 = 16
    ALIGN_PERCENTILE_05 = 17
    ALIGN_PERCENT_CHANGE = 18

  class ViewValueValuesEnum(_messages.Enum):
    r"""Required. Specifies which information is returned about the time
    series.

    Values:
      FULL: Returns the identity of the metric(s), the time series, and the
        time series data.
      HEADERS: Returns the identity of the metric and the time series
        resource, but not the time series data.
    """
    FULL = 0
    HEADERS = 1

  aggregation_alignmentPeriod = _messages.StringField(1)
  aggregation_crossSeriesReducer = _messages.EnumField('AggregationCrossSeriesReducerValueValuesEnum', 2)
  aggregation_groupByFields = _messages.StringField(3, repeated=True)
  aggregation_perSeriesAligner = _messages.EnumField('AggregationPerSeriesAlignerValueValuesEnum', 4)
  filter = _messages.StringField(5)
  interval_endTime = _messages.StringField(6)
  interval_startTime = _messages.StringField(7)
  name = _messages.StringField(8, required=True)
  orderBy = _messages.StringField(9)
  pageSize = _messages.IntegerField(10, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(11)
  secondaryAggregation_alignmentPeriod = _messages.StringField(12)
  secondaryAggregation_crossSeriesReducer = _messages.EnumField('SecondaryAggregationCrossSeriesReducerValueValuesEnum', 13)
  secondaryAggregation_groupByFields = _messages.StringField(14, repeated=True)
  secondaryAggregation_perSeriesAligner = _messages.EnumField('SecondaryAggregationPerSeriesAlignerValueValuesEnum', 15)
  view = _messages.EnumField('ViewValueValuesEnum', 16)


class MonitoringOrganizationsTimeSeriesListRequest(_messages.Message):
  r"""A MonitoringOrganizationsTimeSeriesListRequest object.

  Enums:
    AggregationCrossSeriesReducerValueValuesEnum: The reduction operation to
      be used to combine time series into a single time series, where the
      value of each data point in the resulting series is a function of all
      the already aligned values in the input time series.Not all reducer
      operations can be applied to all time series. The valid choices depend
      on the metric_kind and the value_type of the original time series.
      Reduction can yield a time series with a different metric_kind or
      value_type than the input time series.Time series data must first be
      aligned (see per_series_aligner) in order to perform cross-time series
      reduction. If cross_series_reducer is specified, then per_series_aligner
      must be specified, and must not be ALIGN_NONE. An alignment_period must
      also be specified; otherwise, an error is returned.
    AggregationPerSeriesAlignerValueValuesEnum: An Aligner describes how to
      bring the data points in a single time series into temporal alignment.
      Except for ALIGN_NONE, all alignments cause all the data points in an
      alignment_period to be mathematically grouped together, resulting in a
      single data point for each alignment_period with end timestamp at the
      end of the period.Not all alignment operations may be applied to all
      time series. The valid choices depend on the metric_kind and value_type
      of the original time series. Alignment can change the metric_kind or the
      value_type of the time series.Time series data must be aligned in order
      to perform cross-time series reduction. If cross_series_reducer is
      specified, then per_series_aligner must be specified and not equal to
      ALIGN_NONE and alignment_period must be specified; otherwise, an error
      is returned.
    SecondaryAggregationCrossSeriesReducerValueValuesEnum: The reduction
      operation to be used to combine time series into a single time series,
      where the value of each data point in the resulting series is a function
      of all the already aligned values in the input time series.Not all
      reducer operations can be applied to all time series. The valid choices
      depend on the metric_kind and the value_type of the original time
      series. Reduction can yield a time series with a different metric_kind
      or value_type than the input time series.Time series data must first be
      aligned (see per_series_aligner) in order to perform cross-time series
      reduction. If cross_series_reducer is specified, then per_series_aligner
      must be specified, and must not be ALIGN_NONE. An alignment_period must
      also be specified; otherwise, an error is returned.
    SecondaryAggregationPerSeriesAlignerValueValuesEnum: An Aligner describes
      how to bring the data points in a single time series into temporal
      alignment. Except for ALIGN_NONE, all alignments cause all the data
      points in an alignment_period to be mathematically grouped together,
      resulting in a single data point for each alignment_period with end
      timestamp at the end of the period.Not all alignment operations may be
      applied to all time series. The valid choices depend on the metric_kind
      and value_type of the original time series. Alignment can change the
      metric_kind or the value_type of the time series.Time series data must
      be aligned in order to perform cross-time series reduction. If
      cross_series_reducer is specified, then per_series_aligner must be
      specified and not equal to ALIGN_NONE and alignment_period must be
      specified; otherwise, an error is returned.
    ViewValueValuesEnum: Required. Specifies which information is returned
      about the time series.

  Fields:
    aggregation_alignmentPeriod: The alignment_period specifies a time
      interval, in seconds, that is used to divide the data in all the time
      series into consistent blocks of time. This will be done before the per-
      series aligner can be applied to the data.The value must be at least 60
      seconds. If a per-series aligner other than ALIGN_NONE is specified,
      this field is required or an error is returned. If no per-series aligner
      is specified, or the aligner ALIGN_NONE is specified, then this field is
      ignored.The maximum value of the alignment_period is 104 weeks (2 years)
      for charts, and 90,000 seconds (25 hours) for alerting policies.
    aggregation_crossSeriesReducer: The reduction operation to be used to
      combine time series into a single time series, where the value of each
      data point in the resulting series is a function of all the already
      aligned values in the input time series.Not all reducer operations can
      be applied to all time series. The valid choices depend on the
      metric_kind and the value_type of the original time series. Reduction
      can yield a time series with a different metric_kind or value_type than
      the input time series.Time series data must first be aligned (see
      per_series_aligner) in order to perform cross-time series reduction. If
      cross_series_reducer is specified, then per_series_aligner must be
      specified, and must not be ALIGN_NONE. An alignment_period must also be
      specified; otherwise, an error is returned.
    aggregation_groupByFields: The set of fields to preserve when
      cross_series_reducer is specified. The group_by_fields determine how the
      time series are partitioned into subsets prior to applying the
      aggregation operation. Each subset contains time series that have the
      same value for each of the grouping fields. Each individual time series
      is a member of exactly one subset. The cross_series_reducer is applied
      to each subset of time series. It is not possible to reduce across
      different resource types, so this field implicitly contains
      resource.type. Fields not specified in group_by_fields are aggregated
      away. If group_by_fields is not specified and all the time series have
      the same resource type, then the time series are aggregated into a
      single output time series. If cross_series_reducer is not defined, this
      field is ignored.
    aggregation_perSeriesAligner: An Aligner describes how to bring the data
      points in a single time series into temporal alignment. Except for
      ALIGN_NONE, all alignments cause all the data points in an
      alignment_period to be mathematically grouped together, resulting in a
      single data point for each alignment_period with end timestamp at the
      end of the period.Not all alignment operations may be applied to all
      time series. The valid choices depend on the metric_kind and value_type
      of the original time series. Alignment can change the metric_kind or the
      value_type of the time series.Time series data must be aligned in order
      to perform cross-time series reduction. If cross_series_reducer is
      specified, then per_series_aligner must be specified and not equal to
      ALIGN_NONE and alignment_period must be specified; otherwise, an error
      is returned.
    filter: Required. A monitoring filter
      (https://cloud.google.com/monitoring/api/v3/filters) that specifies
      which time series should be returned. The filter must specify a single
      metric type, and can additionally specify metric labels and other
      information. For example: metric.type =
      "compute.googleapis.com/instance/cpu/usage_time" AND
      metric.labels.instance_name = "my-instance-name"
    interval_endTime: Required. The end of the time interval.
    interval_startTime: Optional. The beginning of the time interval. The
      default value for the start time is the end time. The start time must
      not be later than the end time.
    name: Required. The project
      (https://cloud.google.com/monitoring/api/v3#project_name), organization
      or folder on which to execute the request. The format is:
      projects/[PROJECT_ID_OR_NUMBER] organizations/[ORGANIZATION_ID]
      folders/[FOLDER_ID]
    orderBy: Unsupported: must be left blank. The points in each time series
      are currently returned in reverse time order (most recent to oldest).
    pageSize: A positive number that is the maximum number of results to
      return. If page_size is empty or more than 100,000 results, the
      effective page_size is 100,000 results. If view is set to FULL, this is
      the maximum number of Points returned. If view is set to HEADERS, this
      is the maximum number of TimeSeries returned.
    pageToken: If this field is not empty then it must contain the
      nextPageToken value returned by a previous call to this method. Using
      this field causes the method to return additional results from the
      previous method call.
    secondaryAggregation_alignmentPeriod: The alignment_period specifies a
      time interval, in seconds, that is used to divide the data in all the
      time series into consistent blocks of time. This will be done before the
      per-series aligner can be applied to the data.The value must be at least
      60 seconds. If a per-series aligner other than ALIGN_NONE is specified,
      this field is required or an error is returned. If no per-series aligner
      is specified, or the aligner ALIGN_NONE is specified, then this field is
      ignored.The maximum value of the alignment_period is 104 weeks (2 years)
      for charts, and 90,000 seconds (25 hours) for alerting policies.
    secondaryAggregation_crossSeriesReducer: The reduction operation to be
      used to combine time series into a single time series, where the value
      of each data point in the resulting series is a function of all the
      already aligned values in the input time series.Not all reducer
      operations can be applied to all time series. The valid choices depend
      on the metric_kind and the value_type of the original time series.
      Reduction can yield a time series with a different metric_kind or
      value_type than the input time series.Time series data must first be
      aligned (see per_series_aligner) in order to perform cross-time series
      reduction. If cross_series_reducer is specified, then per_series_aligner
      must be specified, and must not be ALIGN_NONE. An alignment_period must
      also be specified; otherwise, an error is returned.
    secondaryAggregation_groupByFields: The set of fields to preserve when
      cross_series_reducer is specified. The group_by_fields determine how the
      time series are partitioned into subsets prior to applying the
      aggregation operation. Each subset contains time series that have the
      same value for each of the grouping fields. Each individual time series
      is a member of exactly one subset. The cross_series_reducer is applied
      to each subset of time series. It is not possible to reduce across
      different resource types, so this field implicitly contains
      resource.type. Fields not specified in group_by_fields are aggregated
      away. If group_by_fields is not specified and all the time series have
      the same resource type, then the time series are aggregated into a
      single output time series. If cross_series_reducer is not defined, this
      field is ignored.
    secondaryAggregation_perSeriesAligner: An Aligner describes how to bring
      the data points in a single time series into temporal alignment. Except
      for ALIGN_NONE, all alignments cause all the data points in an
      alignment_period to be mathematically grouped together, resulting in a
      single data point for each alignment_period with end timestamp at the
      end of the period.Not all alignment operations may be applied to all
      time series. The valid choices depend on the metric_kind and value_type
      of the original time series. Alignment can change the metric_kind or the
      value_type of the time series.Time series data must be aligned in order
      to perform cross-time series reduction. If cross_series_reducer is
      specified, then per_series_aligner must be specified and not equal to
      ALIGN_NONE and alignment_period must be specified; otherwise, an error
      is returned.
    view: Required. Specifies which information is returned about the time
      series.
  """

  class AggregationCrossSeriesReducerValueValuesEnum(_messages.Enum):
    r"""The reduction operation to be used to combine time series into a
    single time series, where the value of each data point in the resulting
    series is a function of all the already aligned values in the input time
    series.Not all reducer operations can be applied to all time series. The
    valid choices depend on the metric_kind and the value_type of the original
    time series. Reduction can yield a time series with a different
    metric_kind or value_type than the input time series.Time series data must
    first be aligned (see per_series_aligner) in order to perform cross-time
    series reduction. If cross_series_reducer is specified, then
    per_series_aligner must be specified, and must not be ALIGN_NONE. An
    alignment_period must also be specified; otherwise, an error is returned.

    Values:
      REDUCE_NONE: No cross-time series reduction. The output of the Aligner
        is returned.
      REDUCE_MEAN: Reduce by computing the mean value across time series for
        each alignment period. This reducer is valid for DELTA and GAUGE
        metrics with numeric or distribution values. The value_type of the
        output is DOUBLE.
      REDUCE_MIN: Reduce by computing the minimum value across time series for
        each alignment period. This reducer is valid for DELTA and GAUGE
        metrics with numeric values. The value_type of the output is the same
        as the value_type of the input.
      REDUCE_MAX: Reduce by computing the maximum value across time series for
        each alignment period. This reducer is valid for DELTA and GAUGE
        metrics with numeric values. The value_type of the output is the same
        as the value_type of the input.
      REDUCE_SUM: Reduce by computing the sum across time series for each
        alignment period. This reducer is valid for DELTA and GAUGE metrics
        with numeric and distribution values. The value_type of the output is
        the same as the value_type of the input.
      REDUCE_STDDEV: Reduce by computing the standard deviation across time
        series for each alignment period. This reducer is valid for DELTA and
        GAUGE metrics with numeric or distribution values. The value_type of
        the output is DOUBLE.
      REDUCE_COUNT: Reduce by computing the number of data points across time
        series for each alignment period. This reducer is valid for DELTA and
        GAUGE metrics of numeric, Boolean, distribution, and string
        value_type. The value_type of the output is INT64.
      REDUCE_COUNT_TRUE: Reduce by computing the number of True-valued data
        points across time series for each alignment period. This reducer is
        valid for DELTA and GAUGE metrics of Boolean value_type. The
        value_type of the output is INT64.
      REDUCE_COUNT_FALSE: Reduce by computing the number of False-valued data
        points across time series for each alignment period. This reducer is
        valid for DELTA and GAUGE metrics of Boolean value_type. The
        value_type of the output is INT64.
      REDUCE_FRACTION_TRUE: Reduce by computing the ratio of the number of
        True-valued data points to the total number of data points for each
        alignment period. This reducer is valid for DELTA and GAUGE metrics of
        Boolean value_type. The output value is in the range 0.0, 1.0 and has
        value_type DOUBLE.
      REDUCE_PERCENTILE_99: Reduce by computing the 99th percentile
        (https://en.wikipedia.org/wiki/Percentile) of data points across time
        series for each alignment period. This reducer is valid for GAUGE and
        DELTA metrics of numeric and distribution type. The value of the
        output is DOUBLE.
      REDUCE_PERCENTILE_95: Reduce by computing the 95th percentile
        (https://en.wikipedia.org/wiki/Percentile) of data points across time
        series for each alignment period. This reducer is valid for GAUGE and
        DELTA metrics of numeric and distribution type. The value of the
        output is DOUBLE.
      REDUCE_PERCENTILE_50: Reduce by computing the 50th percentile
        (https://en.wikipedia.org/wiki/Percentile) of data points across time
        series for each alignment period. This reducer is valid for GAUGE and
        DELTA metrics of numeric and distribution type. The value of the
        output is DOUBLE.
      REDUCE_PERCENTILE_05: Reduce by computing the 5th percentile
        (https://en.wikipedia.org/wiki/Percentile) of data points across time
        series for each alignment period. This reducer is valid for GAUGE and
        DELTA metrics of numeric and distribution type. The value of the
        output is DOUBLE.
    """
    REDUCE_NONE = 0
    REDUCE_MEAN = 1
    REDUCE_MIN = 2
    REDUCE_MAX = 3
    REDUCE_SUM = 4
    REDUCE_STDDEV = 5
    REDUCE_COUNT = 6
    REDUCE_COUNT_TRUE = 7
    REDUCE_COUNT_FALSE = 8
    REDUCE_FRACTION_TRUE = 9
    REDUCE_PERCENTILE_99 = 10
    REDUCE_PERCENTILE_95 = 11
    REDUCE_PERCENTILE_50 = 12
    REDUCE_PERCENTILE_05 = 13

  class AggregationPerSeriesAlignerValueValuesEnum(_messages.Enum):
    r"""An Aligner describes how to bring the data points in a single time
    series into temporal alignment. Except for ALIGN_NONE, all alignments
    cause all the data points in an alignment_period to be mathematically
    grouped together, resulting in a single data point for each
    alignment_period with end timestamp at the end of the period.Not all
    alignment operations may be applied to all time series. The valid choices
    depend on the metric_kind and value_type of the original time series.
    Alignment can change the metric_kind or the value_type of the time
    series.Time series data must be aligned in order to perform cross-time
    series reduction. If cross_series_reducer is specified, then
    per_series_aligner must be specified and not equal to ALIGN_NONE and
    alignment_period must be specified; otherwise, an error is returned.

    Values:
      ALIGN_NONE: No alignment. Raw data is returned. Not valid if cross-
        series reduction is requested. The value_type of the result is the
        same as the value_type of the input.
      ALIGN_DELTA: Align and convert to DELTA. The output is delta = y1 -
        y0.This alignment is valid for CUMULATIVE and DELTA metrics. If the
        selected alignment period results in periods with no data, then the
        aligned value for such a period is created by interpolation. The
        value_type of the aligned result is the same as the value_type of the
        input.
      ALIGN_RATE: Align and convert to a rate. The result is computed as rate
        = (y1 - y0)/(t1 - t0), or "delta over time". Think of this aligner as
        providing the slope of the line that passes through the value at the
        start and at the end of the alignment_period.This aligner is valid for
        CUMULATIVE and DELTA metrics with numeric values. If the selected
        alignment period results in periods with no data, then the aligned
        value for such a period is created by interpolation. The output is a
        GAUGE metric with value_type DOUBLE.If, by "rate", you mean
        "percentage change", see the ALIGN_PERCENT_CHANGE aligner instead.
      ALIGN_INTERPOLATE: Align by interpolating between adjacent points around
        the alignment period boundary. This aligner is valid for GAUGE metrics
        with numeric values. The value_type of the aligned result is the same
        as the value_type of the input.
      ALIGN_NEXT_OLDER: Align by moving the most recent data point before the
        end of the alignment period to the boundary at the end of the
        alignment period. This aligner is valid for GAUGE metrics. The
        value_type of the aligned result is the same as the value_type of the
        input.
      ALIGN_MIN: Align the time series by returning the minimum value in each
        alignment period. This aligner is valid for GAUGE and DELTA metrics
        with numeric values. The value_type of the aligned result is the same
        as the value_type of the input.
      ALIGN_MAX: Align the time series by returning the maximum value in each
        alignment period. This aligner is valid for GAUGE and DELTA metrics
        with numeric values. The value_type of the aligned result is the same
        as the value_type of the input.
      ALIGN_MEAN: Align the time series by returning the mean value in each
        alignment period. This aligner is valid for GAUGE and DELTA metrics
        with numeric values. The value_type of the aligned result is DOUBLE.
      ALIGN_COUNT: Align the time series by returning the number of values in
        each alignment period. This aligner is valid for GAUGE and DELTA
        metrics with numeric or Boolean values. The value_type of the aligned
        result is INT64.
      ALIGN_SUM: Align the time series by returning the sum of the values in
        each alignment period. This aligner is valid for GAUGE and DELTA
        metrics with numeric and distribution values. The value_type of the
        aligned result is the same as the value_type of the input.
      ALIGN_STDDEV: Align the time series by returning the standard deviation
        of the values in each alignment period. This aligner is valid for
        GAUGE and DELTA metrics with numeric values. The value_type of the
        output is DOUBLE.
      ALIGN_COUNT_TRUE: Align the time series by returning the number of True
        values in each alignment period. This aligner is valid for GAUGE
        metrics with Boolean values. The value_type of the output is INT64.
      ALIGN_COUNT_FALSE: Align the time series by returning the number of
        False values in each alignment period. This aligner is valid for GAUGE
        metrics with Boolean values. The value_type of the output is INT64.
      ALIGN_FRACTION_TRUE: Align the time series by returning the ratio of the
        number of True values to the total number of values in each alignment
        period. This aligner is valid for GAUGE metrics with Boolean values.
        The output value is in the range 0.0, 1.0 and has value_type DOUBLE.
      ALIGN_PERCENTILE_99: Align the time series by using percentile
        aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting
        data point in each alignment period is the 99th percentile of all data
        points in the period. This aligner is valid for GAUGE and DELTA
        metrics with distribution values. The output is a GAUGE metric with
        value_type DOUBLE.
      ALIGN_PERCENTILE_95: Align the time series by using percentile
        aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting
        data point in each alignment period is the 95th percentile of all data
        points in the period. This aligner is valid for GAUGE and DELTA
        metrics with distribution values. The output is a GAUGE metric with
        value_type DOUBLE.
      ALIGN_PERCENTILE_50: Align the time series by using percentile
        aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting
        data point in each alignment period is the 50th percentile of all data
        points in the period. This aligner is valid for GAUGE and DELTA
        metrics with distribution values. The output is a GAUGE metric with
        value_type DOUBLE.
      ALIGN_PERCENTILE_05: Align the time series by using percentile
        aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting
        data point in each alignment period is the 5th percentile of all data
        points in the period. This aligner is valid for GAUGE and DELTA
        metrics with distribution values. The output is a GAUGE metric with
        value_type DOUBLE.
      ALIGN_PERCENT_CHANGE: Align and convert to a percentage change. This
        aligner is valid for GAUGE and DELTA metrics with numeric values. This
        alignment returns ((current - previous)/previous) * 100, where the
        value of previous is determined based on the alignment_period.If the
        values of current and previous are both 0, then the returned value is
        0. If only previous is 0, the returned value is infinity.A 10-minute
        moving mean is computed at each point of the alignment period prior to
        the above calculation to smooth the metric and prevent false positives
        from very short-lived spikes. The moving mean is only applicable for
        data whose values are >= 0. Any values < 0 are treated as a missing
        datapoint, and are ignored. While DELTA metrics are accepted by this
        alignment, special care should be taken that the values for the metric
        will always be positive. The output is a GAUGE metric with value_type
        DOUBLE.
    """
    ALIGN_NONE = 0
    ALIGN_DELTA = 1
    ALIGN_RATE = 2
    ALIGN_INTERPOLATE = 3
    ALIGN_NEXT_OLDER = 4
    ALIGN_MIN = 5
    ALIGN_MAX = 6
    ALIGN_MEAN = 7
    ALIGN_COUNT = 8
    ALIGN_SUM = 9
    ALIGN_STDDEV = 10
    ALIGN_COUNT_TRUE = 11
    ALIGN_COUNT_FALSE = 12
    ALIGN_FRACTION_TRUE = 13
    ALIGN_PERCENTILE_99 = 14
    ALIGN_PERCENTILE_95 = 15
    ALIGN_PERCENTILE_50 = 16
    ALIGN_PERCENTILE_05 = 17
    ALIGN_PERCENT_CHANGE = 18

  class SecondaryAggregationCrossSeriesReducerValueValuesEnum(_messages.Enum):
    r"""The reduction operation to be used to combine time series into a
    single time series, where the value of each data point in the resulting
    series is a function of all the already aligned values in the input time
    series.Not all reducer operations can be applied to all time series. The
    valid choices depend on the metric_kind and the value_type of the original
    time series. Reduction can yield a time series with a different
    metric_kind or value_type than the input time series.Time series data must
    first be aligned (see per_series_aligner) in order to perform cross-time
    series reduction. If cross_series_reducer is specified, then
    per_series_aligner must be specified, and must not be ALIGN_NONE. An
    alignment_period must also be specified; otherwise, an error is returned.

    Values:
      REDUCE_NONE: No cross-time series reduction. The output of the Aligner
        is returned.
      REDUCE_MEAN: Reduce by computing the mean value across time series for
        each alignment period. This reducer is valid for DELTA and GAUGE
        metrics with numeric or distribution values. The value_type of the
        output is DOUBLE.
      REDUCE_MIN: Reduce by computing the minimum value across time series for
        each alignment period. This reducer is valid for DELTA and GAUGE
        metrics with numeric values. The value_type of the output is the same
        as the value_type of the input.
      REDUCE_MAX: Reduce by computing the maximum value across time series for
        each alignment period. This reducer is valid for DELTA and GAUGE
        metrics with numeric values. The value_type of the output is the same
        as the value_type of the input.
      REDUCE_SUM: Reduce by computing the sum across time series for each
        alignment period. This reducer is valid for DELTA and GAUGE metrics
        with numeric and distribution values. The value_type of the output is
        the same as the value_type of the input.
      REDUCE_STDDEV: Reduce by computing the standard deviation across time
        series for each alignment period. This reducer is valid for DELTA and
        GAUGE metrics with numeric or distribution values. The value_type of
        the output is DOUBLE.
      REDUCE_COUNT: Reduce by computing the number of data points across time
        series for each alignment period. This reducer is valid for DELTA and
        GAUGE metrics of numeric, Boolean, distribution, and string
        value_type. The value_type of the output is INT64.
      REDUCE_COUNT_TRUE: Reduce by computing the number of True-valued data
        points across time series for each alignment period. This reducer is
        valid for DELTA and GAUGE metrics of Boolean value_type. The
        value_type of the output is INT64.
      REDUCE_COUNT_FALSE: Reduce by computing the number of False-valued data
        points across time series for each alignment period. This reducer is
        valid for DELTA and GAUGE metrics of Boolean value_type. The
        value_type of the output is INT64.
      REDUCE_FRACTION_TRUE: Reduce by computing the ratio of the number of
        True-valued data points to the total number of data points for each
        alignment period. This reducer is valid for DELTA and GAUGE metrics of
        Boolean value_type. The output value is in the range 0.0, 1.0 and has
        value_type DOUBLE.
      REDUCE_PERCENTILE_99: Reduce by computing the 99th percentile
        (https://en.wikipedia.org/wiki/Percentile) of data points across time
        series for each alignment period. This reducer is valid for GAUGE and
        DELTA metrics of numeric and distribution type. The value of the
        output is DOUBLE.
      REDUCE_PERCENTILE_95: Reduce by computing the 95th percentile
        (https://en.wikipedia.org/wiki/Percentile) of data points across time
        series for each alignment period. This reducer is valid for GAUGE and
        DELTA metrics of numeric and distribution type. The value of the
        output is DOUBLE.
      REDUCE_PERCENTILE_50: Reduce by computing the 50th percentile
        (https://en.wikipedia.org/wiki/Percentile) of data points across time
        series for each alignment period. This reducer is valid for GAUGE and
        DELTA metrics of numeric and distribution type. The value of the
        output is DOUBLE.
      REDUCE_PERCENTILE_05: Reduce by computing the 5th percentile
        (https://en.wikipedia.org/wiki/Percentile) of data points across time
        series for each alignment period. This reducer is valid for GAUGE and
        DELTA metrics of numeric and distribution type. The value of the
        output is DOUBLE.
    """
    REDUCE_NONE = 0
    REDUCE_MEAN = 1
    REDUCE_MIN = 2
    REDUCE_MAX = 3
    REDUCE_SUM = 4
    REDUCE_STDDEV = 5
    REDUCE_COUNT = 6
    REDUCE_COUNT_TRUE = 7
    REDUCE_COUNT_FALSE = 8
    REDUCE_FRACTION_TRUE = 9
    REDUCE_PERCENTILE_99 = 10
    REDUCE_PERCENTILE_95 = 11
    REDUCE_PERCENTILE_50 = 12
    REDUCE_PERCENTILE_05 = 13

  class SecondaryAggregationPerSeriesAlignerValueValuesEnum(_messages.Enum):
    r"""An Aligner describes how to bring the data points in a single time
    series into temporal alignment. Except for ALIGN_NONE, all alignments
    cause all the data points in an alignment_period to be mathematically
    grouped together, resulting in a single data point for each
    alignment_period with end timestamp at the end of the period.Not all
    alignment operations may be applied to all time series. The valid choices
    depend on the metric_kind and value_type of the original time series.
    Alignment can change the metric_kind or the value_type of the time
    series.Time series data must be aligned in order to perform cross-time
    series reduction. If cross_series_reducer is specified, then
    per_series_aligner must be specified and not equal to ALIGN_NONE and
    alignment_period must be specified; otherwise, an error is returned.

    Values:
      ALIGN_NONE: No alignment. Raw data is returned. Not valid if cross-
        series reduction is requested. The value_type of the result is the
        same as the value_type of the input.
      ALIGN_DELTA: Align and convert to DELTA. The output is delta = y1 -
        y0.This alignment is valid for CUMULATIVE and DELTA metrics. If the
        selected alignment period results in periods with no data, then the
        aligned value for such a period is created by interpolation. The
        value_type of the aligned result is the same as the value_type of the
        input.
      ALIGN_RATE: Align and convert to a rate. The result is computed as rate
        = (y1 - y0)/(t1 - t0), or "delta over time". Think of this aligner as
        providing the slope of the line that passes through the value at the
        start and at the end of the alignment_period.This aligner is valid for
        CUMULATIVE and DELTA metrics with numeric values. If the selected
        alignment period results in periods with no data, then the aligned
        value for such a period is created by interpolation. The output is a
        GAUGE metric with value_type DOUBLE.If, by "rate", you mean
        "percentage change", see the ALIGN_PERCENT_CHANGE aligner instead.
      ALIGN_INTERPOLATE: Align by interpolating between adjacent points around
        the alignment period boundary. This aligner is valid for GAUGE metrics
        with numeric values. The value_type of the aligned result is the same
        as the value_type of the input.
      ALIGN_NEXT_OLDER: Align by moving the most recent data point before the
        end of the alignment period to the boundary at the end of the
        alignment period. This aligner is valid for GAUGE metrics. The
        value_type of the aligned result is the same as the value_type of the
        input.
      ALIGN_MIN: Align the time series by returning the minimum value in each
        alignment period. This aligner is valid for GAUGE and DELTA metrics
        with numeric values. The value_type of the aligned result is the same
        as the value_type of the input.
      ALIGN_MAX: Align the time series by returning the maximum value in each
        alignment period. This aligner is valid for GAUGE and DELTA metrics
        with numeric values. The value_type of the aligned result is the same
        as the value_type of the input.
      ALIGN_MEAN: Align the time series by returning the mean value in each
        alignment period. This aligner is valid for GAUGE and DELTA metrics
        with numeric values. The value_type of the aligned result is DOUBLE.
      ALIGN_COUNT: Align the time series by returning the number of values in
        each alignment period. This aligner is valid for GAUGE and DELTA
        metrics with numeric or Boolean values. The value_type of the aligned
        result is INT64.
      ALIGN_SUM: Align the time series by returning the sum of the values in
        each alignment period. This aligner is valid for GAUGE and DELTA
        metrics with numeric and distribution values. The value_type of the
        aligned result is the same as the value_type of the input.
      ALIGN_STDDEV: Align the time series by returning the standard deviation
        of the values in each alignment period. This aligner is valid for
        GAUGE and DELTA metrics with numeric values. The value_type of the
        output is DOUBLE.
      ALIGN_COUNT_TRUE: Align the time series by returning the number of True
        values in each alignment period. This aligner is valid for GAUGE
        metrics with Boolean values. The value_type of the output is INT64.
      ALIGN_COUNT_FALSE: Align the time series by returning the number of
        False values in each alignment period. This aligner is valid for GAUGE
        metrics with Boolean values. The value_type of the output is INT64.
      ALIGN_FRACTION_TRUE: Align the time series by returning the ratio of the
        number of True values to the total number of values in each alignment
        period. This aligner is valid for GAUGE metrics with Boolean values.
        The output value is in the range 0.0, 1.0 and has value_type DOUBLE.
      ALIGN_PERCENTILE_99: Align the time series by using percentile
        aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting
        data point in each alignment period is the 99th percentile of all data
        points in the period. This aligner is valid for GAUGE and DELTA
        metrics with distribution values. The output is a GAUGE metric with
        value_type DOUBLE.
      ALIGN_PERCENTILE_95: Align the time series by using percentile
        aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting
        data point in each alignment period is the 95th percentile of all data
        points in the period. This aligner is valid for GAUGE and DELTA
        metrics with distribution values. The output is a GAUGE metric with
        value_type DOUBLE.
      ALIGN_PERCENTILE_50: Align the time series by using percentile
        aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting
        data point in each alignment period is the 50th percentile of all data
        points in the period. This aligner is valid for GAUGE and DELTA
        metrics with distribution values. The output is a GAUGE metric with
        value_type DOUBLE.
      ALIGN_PERCENTILE_05: Align the time series by using percentile
        aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting
        data point in each alignment period is the 5th percentile of all data
        points in the period. This aligner is valid for GAUGE and DELTA
        metrics with distribution values. The output is a GAUGE metric with
        value_type DOUBLE.
      ALIGN_PERCENT_CHANGE: Align and convert to a percentage change. This
        aligner is valid for GAUGE and DELTA metrics with numeric values. This
        alignment returns ((current - previous)/previous) * 100, where the
        value of previous is determined based on the alignment_period.If the
        values of current and previous are both 0, then the returned value is
        0. If only previous is 0, the returned value is infinity.A 10-minute
        moving mean is computed at each point of the alignment period prior to
        the above calculation to smooth the metric and prevent false positives
        from very short-lived spikes. The moving mean is only applicable for
        data whose values are >= 0. Any values < 0 are treated as a missing
        datapoint, and are ignored. While DELTA metrics are accepted by this
        alignment, special care should be taken that the values for the metric
        will always be positive. The output is a GAUGE metric with value_type
        DOUBLE.
    """
    ALIGN_NONE = 0
    ALIGN_DELTA = 1
    ALIGN_RATE = 2
    ALIGN_INTERPOLATE = 3
    ALIGN_NEXT_OLDER = 4
    ALIGN_MIN = 5
    ALIGN_MAX = 6
    ALIGN_MEAN = 7
    ALIGN_COUNT = 8
    ALIGN_SUM = 9
    ALIGN_STDDEV = 10
    ALIGN_COUNT_TRUE = 11
    ALIGN_COUNT_FALSE = 12
    ALIGN_FRACTION_TRUE = 13
    ALIGN_PERCENTILE_99 = 14
    ALIGN_PERCENTILE_95 = 15
    ALIGN_PERCENTILE_50 = 16
    ALIGN_PERCENTILE_05 = 17
    ALIGN_PERCENT_CHANGE = 18

  class ViewValueValuesEnum(_messages.Enum):
    r"""Required. Specifies which information is returned about the time
    series.

    Values:
      FULL: Returns the identity of the metric(s), the time series, and the
        time series data.
      HEADERS: Returns the identity of the metric and the time series
        resource, but not the time series data.
    """
    FULL = 0
    HEADERS = 1

  aggregation_alignmentPeriod = _messages.StringField(1)
  aggregation_crossSeriesReducer = _messages.EnumField('AggregationCrossSeriesReducerValueValuesEnum', 2)
  aggregation_groupByFields = _messages.StringField(3, repeated=True)
  aggregation_perSeriesAligner = _messages.EnumField('AggregationPerSeriesAlignerValueValuesEnum', 4)
  filter = _messages.StringField(5)
  interval_endTime = _messages.StringField(6)
  interval_startTime = _messages.StringField(7)
  name = _messages.StringField(8, required=True)
  orderBy = _messages.StringField(9)
  pageSize = _messages.IntegerField(10, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(11)
  secondaryAggregation_alignmentPeriod = _messages.StringField(12)
  secondaryAggregation_crossSeriesReducer = _messages.EnumField('SecondaryAggregationCrossSeriesReducerValueValuesEnum', 13)
  secondaryAggregation_groupByFields = _messages.StringField(14, repeated=True)
  secondaryAggregation_perSeriesAligner = _messages.EnumField('SecondaryAggregationPerSeriesAlignerValueValuesEnum', 15)
  view = _messages.EnumField('ViewValueValuesEnum', 16)


class MonitoringProjectsAlertPoliciesCreateRequest(_messages.Message):
  r"""A MonitoringProjectsAlertPoliciesCreateRequest object.

  Fields:
    alertPolicy: A AlertPolicy resource to be passed as the request body.
    name: Required. The project
      (https://cloud.google.com/monitoring/api/v3#project_name) in which to
      create the alerting policy. The format is:
      projects/[PROJECT_ID_OR_NUMBER] Note that this field names the parent
      container in which the alerting policy will be written, not the name of
      the created policy. |name| must be a host project of a Metrics Scope,
      otherwise INVALID_ARGUMENT error will return. The alerting policy that
      is returned will have a name that contains a normalized representation
      of this name as a prefix but adds a suffix of the form
      /alertPolicies/[ALERT_POLICY_ID], identifying the policy in the
      container.
  """

  alertPolicy = _messages.MessageField('AlertPolicy', 1)
  name = _messages.StringField(2, required=True)


class MonitoringProjectsAlertPoliciesDeleteRequest(_messages.Message):
  r"""A MonitoringProjectsAlertPoliciesDeleteRequest object.

  Fields:
    name: Required. The alerting policy to delete. The format is:
      projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] For more
      information, see AlertPolicy.
  """

  name = _messages.StringField(1, required=True)


class MonitoringProjectsAlertPoliciesGetRequest(_messages.Message):
  r"""A MonitoringProjectsAlertPoliciesGetRequest object.

  Fields:
    name: Required. The alerting policy to retrieve. The format is:
      projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]
  """

  name = _messages.StringField(1, required=True)


class MonitoringProjectsAlertPoliciesListRequest(_messages.Message):
  r"""A MonitoringProjectsAlertPoliciesListRequest object.

  Fields:
    filter: Optional. If provided, this field specifies the criteria that must
      be met by alert policies to be included in the response.For more
      details, see sorting and filtering
      (https://cloud.google.com/monitoring/api/v3/sorting-and-filtering).
    name: Required. The project
      (https://cloud.google.com/monitoring/api/v3#project_name) whose alert
      policies are to be listed. The format is:
      projects/[PROJECT_ID_OR_NUMBER] Note that this field names the parent
      container in which the alerting policies to be listed are stored. To
      retrieve a single alerting policy by name, use the GetAlertPolicy
      operation, instead.
    orderBy: Optional. A comma-separated list of fields by which to sort the
      result. Supports the same set of field references as the filter field.
      Entries can be prefixed with a minus sign to sort by the field in
      descending order.For more details, see sorting and filtering
      (https://cloud.google.com/monitoring/api/v3/sorting-and-filtering).
    pageSize: Optional. The maximum number of results to return in a single
      response.
    pageToken: Optional. If this field is not empty then it must contain the
      nextPageToken value returned by a previous call to this method. Using
      this field causes the method to return more results from the previous
      method call.
  """

  filter = _messages.StringField(1)
  name = _messages.StringField(2, required=True)
  orderBy = _messages.StringField(3)
  pageSize = _messages.IntegerField(4, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(5)


class MonitoringProjectsAlertPoliciesPatchRequest(_messages.Message):
  r"""A MonitoringProjectsAlertPoliciesPatchRequest object.

  Fields:
    alertPolicy: A AlertPolicy resource to be passed as the request body.
    name: Identifier. Required if the policy exists. The resource name for
      this policy. The format is:
      projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]
      [ALERT_POLICY_ID] is assigned by Cloud Monitoring when the policy is
      created. When calling the alertPolicies.create method, do not include
      the name field in the alerting policy passed as part of the request.
    updateMask: Optional. A list of alerting policy field names. If this field
      is not empty, each listed field in the existing alerting policy is set
      to the value of the corresponding field in the supplied policy
      (alert_policy), or to the field's default value if the field is not in
      the supplied alerting policy. Fields not listed retain their previous
      value.Examples of valid field masks include display_name, documentation,
      documentation.content, documentation.mime_type, user_labels,
      user_label.nameofkey, enabled, conditions, combiner, etc.If this field
      is empty, then the supplied alerting policy replaces the existing
      policy. It is the same as deleting the existing policy and adding the
      supplied policy, except for the following: The new policy will have the
      same [ALERT_POLICY_ID] as the former policy. This gives you continuity
      with the former policy in your notifications and incidents. Conditions
      in the new policy will keep their former [CONDITION_ID] if the supplied
      condition includes the name field with that [CONDITION_ID]. If the
      supplied condition omits the name field, then a new [CONDITION_ID] is
      created.
  """

  alertPolicy = _messages.MessageField('AlertPolicy', 1)
  name = _messages.StringField(2, required=True)
  updateMask = _messages.StringField(3)


class MonitoringProjectsAlertsGetRequest(_messages.Message):
  r"""A MonitoringProjectsAlertsGetRequest object.

  Fields:
    name: Required. The name of the alert.The format is:
      projects/[PROJECT_ID_OR_NUMBER]/alerts/[ALERT_ID] The [ALERT_ID] is a
      system-assigned unique identifier for the alert.
  """

  name = _messages.StringField(1, required=True)


class MonitoringProjectsAlertsListRequest(_messages.Message):
  r"""A MonitoringProjectsAlertsListRequest object.

  Fields:
    filter: Optional. An alert is returned if there is a match on any fields
      belonging to the alert or its subfields.
    orderBy: Optional. A comma-separated list of fields in Alert to use for
      sorting. The default sort direction is ascending. To specify descending
      order for a field, add a desc modifier. The following fields are
      supported: open_time close_timeFor example, close_time desc, open_time
      will return the alerts closed most recently, with ties broken in the
      order of older alerts listed first.If the field is not set, the results
      are sorted by open_time desc.
    pageSize: Optional. The maximum number of results to return in a single
      response. If not set to a positive number, at most 50 alerts will be
      returned. The maximum value is 1000; values above 1000 will be coerced
      to 1000.
    pageToken: Optional. If non-empty, page_token must contain a value
      returned as the next_page_token in a previous response to request the
      next set of results.
    parent: Required. The name of the project to list alerts for.
  """

  filter = _messages.StringField(1)
  orderBy = _messages.StringField(2)
  pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(4)
  parent = _messages.StringField(5, required=True)


class MonitoringProjectsCollectdTimeSeriesCreateRequest(_messages.Message):
  r"""A MonitoringProjectsCollectdTimeSeriesCreateRequest object.

  Fields:
    createCollectdTimeSeriesRequest: A CreateCollectdTimeSeriesRequest
      resource to be passed as the request body.
    name: The project
      (https://cloud.google.com/monitoring/api/v3#project_name) in which to
      create the time series. The format is: projects/[PROJECT_ID_OR_NUMBER]
  """

  createCollectdTimeSeriesRequest = _messages.MessageField('CreateCollectdTimeSeriesRequest', 1)
  name = _messages.StringField(2, required=True)


class MonitoringProjectsGroupsCreateRequest(_messages.Message):
  r"""A MonitoringProjectsGroupsCreateRequest object.

  Fields:
    group: A Group resource to be passed as the request body.
    name: Required. The project
      (https://cloud.google.com/monitoring/api/v3#project_name) in which to
      create the group. The format is: projects/[PROJECT_ID_OR_NUMBER]
    validateOnly: If true, validate this request but do not create the group.
  """

  group = _messages.MessageField('Group', 1)
  name = _messages.StringField(2, required=True)
  validateOnly = _messages.BooleanField(3)


class MonitoringProjectsGroupsDeleteRequest(_messages.Message):
  r"""A MonitoringProjectsGroupsDeleteRequest object.

  Fields:
    name: Required. The group to delete. The format is:
      projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]
    recursive: If this field is true, then the request means to delete a group
      with all its descendants. Otherwise, the request means to delete a group
      only when it has no descendants. The default value is false.
  """

  name = _messages.StringField(1, required=True)
  recursive = _messages.BooleanField(2)


class MonitoringProjectsGroupsGetRequest(_messages.Message):
  r"""A MonitoringProjectsGroupsGetRequest object.

  Fields:
    name: Required. The group to retrieve. The format is:
      projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]
  """

  name = _messages.StringField(1, required=True)


class MonitoringProjectsGroupsListRequest(_messages.Message):
  r"""A MonitoringProjectsGroupsListRequest object.

  Fields:
    ancestorsOfGroup: A group name. The format is:
      projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] Returns groups that
      are ancestors of the specified group. The groups are returned in order,
      starting with the immediate parent and ending with the most distant
      ancestor. If the specified group has no immediate parent, the results
      are empty.
    childrenOfGroup: A group name. The format is:
      projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] Returns groups whose
      parent_name field contains the group name. If no groups have this
      parent, the results are empty.
    descendantsOfGroup: A group name. The format is:
      projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] Returns the
      descendants of the specified group. This is a superset of the results
      returned by the children_of_group filter, and includes children-of-
      children, and so forth.
    name: Required. The project
      (https://cloud.google.com/monitoring/api/v3#project_name) whose groups
      are to be listed. The format is: projects/[PROJECT_ID_OR_NUMBER]
    pageSize: A positive number that is the maximum number of results to
      return.
    pageToken: If this field is not empty then it must contain the
      next_page_token value returned by a previous call to this method. Using
      this field causes the method to return additional results from the
      previous method call.
  """

  ancestorsOfGroup = _messages.StringField(1)
  childrenOfGroup = _messages.StringField(2)
  descendantsOfGroup = _messages.StringField(3)
  name = _messages.StringField(4, required=True)
  pageSize = _messages.IntegerField(5, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(6)


class MonitoringProjectsGroupsMembersListRequest(_messages.Message):
  r"""A MonitoringProjectsGroupsMembersListRequest object.

  Fields:
    filter: An optional list filter
      (https://cloud.google.com/monitoring/api/learn_more#filtering)
      describing the members to be returned. The filter may reference the
      type, labels, and metadata of monitored resources that comprise the
      group. For example, to return only resources representing Compute Engine
      VM instances, use this filter: `resource.type = "gce_instance"`
    interval_endTime: Required. The end of the time interval.
    interval_startTime: Optional. The beginning of the time interval. The
      default value for the start time is the end time. The start time must
      not be later than the end time.
    name: Required. The group whose members are listed. The format is:
      projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]
    pageSize: A positive number that is the maximum number of results to
      return.
    pageToken: If this field is not empty then it must contain the
      next_page_token value returned by a previous call to this method. Using
      this field causes the method to return additional results from the
      previous method call.
  """

  filter = _messages.StringField(1)
  interval_endTime = _messages.StringField(2)
  interval_startTime = _messages.StringField(3)
  name = _messages.StringField(4, required=True)
  pageSize = _messages.IntegerField(5, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(6)


class MonitoringProjectsGroupsUpdateRequest(_messages.Message):
  r"""A MonitoringProjectsGroupsUpdateRequest object.

  Fields:
    group: A Group resource to be passed as the request body.
    name: Output only. The name of this group. The format is:
      projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] When creating a group,
      this field is ignored and a new name is created consisting of the
      project specified in the call to CreateGroup and a unique [GROUP_ID]
      that is generated automatically.
    validateOnly: If true, validate this request but do not update the
      existing group.
  """

  group = _messages.MessageField('Group', 1)
  name = _messages.StringField(2, required=True)
  validateOnly = _messages.BooleanField(3)


class MonitoringProjectsMetricDescriptorsDeleteRequest(_messages.Message):
  r"""A MonitoringProjectsMetricDescriptorsDeleteRequest object.

  Fields:
    name: Required. The metric descriptor on which to execute the request. The
      format is: projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID]
      An example of [METRIC_ID] is: "custom.googleapis.com/my_test_metric".
  """

  name = _messages.StringField(1, required=True)


class MonitoringProjectsMetricDescriptorsGetRequest(_messages.Message):
  r"""A MonitoringProjectsMetricDescriptorsGetRequest object.

  Fields:
    name: Required. The metric descriptor on which to execute the request. The
      format is: projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID]
      An example value of [METRIC_ID] is
      "compute.googleapis.com/instance/disk/read_bytes_count".
  """

  name = _messages.StringField(1, required=True)


class MonitoringProjectsMetricDescriptorsListRequest(_messages.Message):
  r"""A MonitoringProjectsMetricDescriptorsListRequest object.

  Fields:
    activeOnly: Optional. If true, only metrics and monitored resource types
      that have recent data (within roughly 25 hours) will be included in the
      response. - If a metric descriptor enumerates monitored resource types,
      only the monitored resource types for which the metric type has recent
      data will be included in the returned metric descriptor, and if none of
      them have recent data, the metric descriptor will not be returned. - If
      a metric descriptor does not enumerate the compatible monitored resource
      types, it will be returned only if the metric type has recent data for
      some monitored resource type. The returned descriptor will not enumerate
      any monitored resource types.
    filter: Optional. If this field is empty, all custom and system-defined
      metric descriptors are returned. Otherwise, the filter
      (https://cloud.google.com/monitoring/api/v3/filters) specifies which
      metric descriptors are to be returned. For example, the following filter
      matches all custom metrics (https://cloud.google.com/monitoring/custom-
      metrics): metric.type = starts_with("custom.googleapis.com/")
    name: Required. The project
      (https://cloud.google.com/monitoring/api/v3#project_name) on which to
      execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER]
    pageSize: Optional. A positive number that is the maximum number of
      results to return. The default and maximum value is 10,000. If a
      page_size <= 0 or > 10,000 is submitted, will instead return a maximum
      of 10,000 results.
    pageToken: Optional. If this field is not empty then it must contain the
      nextPageToken value returned by a previous call to this method. Using
      this field causes the method to return additional results from the
      previous method call.
  """

  activeOnly = _messages.BooleanField(1)
  filter = _messages.StringField(2)
  name = _messages.StringField(3, required=True)
  pageSize = _messages.IntegerField(4, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(5)


class MonitoringProjectsMonitoredResourceDescriptorsGetRequest(_messages.Message):
  r"""A MonitoringProjectsMonitoredResourceDescriptorsGetRequest object.

  Fields:
    name: Required. The monitored resource descriptor to get. The format is: p
      rojects/[PROJECT_ID_OR_NUMBER]/monitoredResourceDescriptors/[RESOURCE_TY
      PE] The [RESOURCE_TYPE] is a predefined type, such as cloudsql_database.
  """

  name = _messages.StringField(1, required=True)


class MonitoringProjectsMonitoredResourceDescriptorsListRequest(_messages.Message):
  r"""A MonitoringProjectsMonitoredResourceDescriptorsListRequest object.

  Fields:
    filter: An optional filter
      (https://cloud.google.com/monitoring/api/v3/filters) describing the
      descriptors to be returned. The filter can reference the descriptor's
      type and labels. For example, the following filter returns only Google
      Compute Engine descriptors that have an id label: resource.type =
      starts_with("gce_") AND resource.label:id
    name: Required. The project
      (https://cloud.google.com/monitoring/api/v3#project_name) on which to
      execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER]
    pageSize: A positive number that is the maximum number of results to
      return.
    pageToken: If this field is not empty then it must contain the
      nextPageToken value returned by a previous call to this method. Using
      this field causes the method to return additional results from the
      previous method call.
  """

  filter = _messages.StringField(1)
  name = _messages.StringField(2, required=True)
  pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(4)


class MonitoringProjectsNotificationChannelDescriptorsGetRequest(_messages.Message):
  r"""A MonitoringProjectsNotificationChannelDescriptorsGetRequest object.

  Fields:
    name: Required. The channel type for which to execute the request. The
      format is: projects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptor
      s/[CHANNEL_TYPE]
  """

  name = _messages.StringField(1, required=True)


class MonitoringProjectsNotificationChannelDescriptorsListRequest(_messages.Message):
  r"""A MonitoringProjectsNotificationChannelDescriptorsListRequest object.

  Fields:
    name: Required. The REST resource name of the parent from which to
      retrieve the notification channel descriptors. The expected syntax is:
      projects/[PROJECT_ID_OR_NUMBER] Note that this names
      (https://cloud.google.com/monitoring/api/v3#project_name) the parent
      container in which to look for the descriptors; to retrieve a single
      descriptor by name, use the GetNotificationChannelDescriptor operation,
      instead.
    pageSize: The maximum number of results to return in a single response. If
      not set to a positive number, a reasonable value will be chosen by the
      service.
    pageToken: If non-empty, page_token must contain a value returned as the
      next_page_token in a previous response to request the next set of
      results.
  """

  name = _messages.StringField(1, required=True)
  pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(3)


class MonitoringProjectsNotificationChannelsCreateRequest(_messages.Message):
  r"""A MonitoringProjectsNotificationChannelsCreateRequest object.

  Fields:
    name: Required. The project
      (https://cloud.google.com/monitoring/api/v3#project_name) on which to
      execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER] This
      names the container into which the channel will be written, this does
      not name the newly created channel. The resulting channel's name will
      have a normalized version of this field as a prefix, but will add
      /notificationChannels/[CHANNEL_ID] to identify the channel.
    notificationChannel: A NotificationChannel resource to be passed as the
      request body.
  """

  name = _messages.StringField(1, required=True)
  notificationChannel = _messages.MessageField('NotificationChannel', 2)


class MonitoringProjectsNotificationChannelsDeleteRequest(_messages.Message):
  r"""A MonitoringProjectsNotificationChannelsDeleteRequest object.

  Fields:
    force: If true, the notification channel will be deleted regardless of its
      use in alert policies (the policies will be updated to remove the
      channel). If false, this operation will fail if the notification channel
      is referenced by existing alerting policies.
    name: Required. The channel for which to execute the request. The format
      is: projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]
  """

  force = _messages.BooleanField(1)
  name = _messages.StringField(2, required=True)


class MonitoringProjectsNotificationChannelsGetRequest(_messages.Message):
  r"""A MonitoringProjectsNotificationChannelsGetRequest object.

  Fields:
    name: Required. The channel for which to execute the request. The format
      is: projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]
  """

  name = _messages.StringField(1, required=True)


class MonitoringProjectsNotificationChannelsGetVerificationCodeRequest(_messages.Message):
  r"""A MonitoringProjectsNotificationChannelsGetVerificationCodeRequest
  object.

  Fields:
    getNotificationChannelVerificationCodeRequest: A
      GetNotificationChannelVerificationCodeRequest resource to be passed as
      the request body.
    name: Required. The notification channel for which a verification code is
      to be generated and retrieved. This must name a channel that is already
      verified; if the specified channel is not verified, the request will
      fail.
  """

  getNotificationChannelVerificationCodeRequest = _messages.MessageField('GetNotificationChannelVerificationCodeRequest', 1)
  name = _messages.StringField(2, required=True)


class MonitoringProjectsNotificationChannelsListRequest(_messages.Message):
  r"""A MonitoringProjectsNotificationChannelsListRequest object.

  Fields:
    filter: Optional. If provided, this field specifies the criteria that must
      be met by notification channels to be included in the response.For more
      details, see sorting and filtering
      (https://cloud.google.com/monitoring/api/v3/sorting-and-filtering).
    name: Required. The project
      (https://cloud.google.com/monitoring/api/v3#project_name) on which to
      execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER] This
      names the container in which to look for the notification channels; it
      does not name a specific channel. To query a specific channel by REST
      resource name, use the GetNotificationChannel operation.
    orderBy: Optional. A comma-separated list of fields by which to sort the
      result. Supports the same set of fields as in filter. Entries can be
      prefixed with a minus sign to sort in descending rather than ascending
      order.For more details, see sorting and filtering
      (https://cloud.google.com/monitoring/api/v3/sorting-and-filtering).
    pageSize: Optional. The maximum number of results to return in a single
      response. If not set to a positive number, a reasonable value will be
      chosen by the service.
    pageToken: Optional. If non-empty, page_token must contain a value
      returned as the next_page_token in a previous response to request the
      next set of results.
  """

  filter = _messages.StringField(1)
  name = _messages.StringField(2, required=True)
  orderBy = _messages.StringField(3)
  pageSize = _messages.IntegerField(4, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(5)


class MonitoringProjectsNotificationChannelsPatchRequest(_messages.Message):
  r"""A MonitoringProjectsNotificationChannelsPatchRequest object.

  Fields:
    name: Identifier. The full REST resource name for this channel. The format
      is: projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]
      The [CHANNEL_ID] is automatically assigned by the server on creation.
    notificationChannel: A NotificationChannel resource to be passed as the
      request body.
    updateMask: Optional. The fields to update.
  """

  name = _messages.StringField(1, required=True)
  notificationChannel = _messages.MessageField('NotificationChannel', 2)
  updateMask = _messages.StringField(3)


class MonitoringProjectsNotificationChannelsSendVerificationCodeRequest(_messages.Message):
  r"""A MonitoringProjectsNotificationChannelsSendVerificationCodeRequest
  object.

  Fields:
    name: Required. The notification channel to which to send a verification
      code.
    sendNotificationChannelVerificationCodeRequest: A
      SendNotificationChannelVerificationCodeRequest resource to be passed as
      the request body.
  """

  name = _messages.StringField(1, required=True)
  sendNotificationChannelVerificationCodeRequest = _messages.MessageField('SendNotificationChannelVerificationCodeRequest', 2)


class MonitoringProjectsNotificationChannelsVerifyRequest(_messages.Message):
  r"""A MonitoringProjectsNotificationChannelsVerifyRequest object.

  Fields:
    name: Required. The notification channel to verify.
    verifyNotificationChannelRequest: A VerifyNotificationChannelRequest
      resource to be passed as the request body.
  """

  name = _messages.StringField(1, required=True)
  verifyNotificationChannelRequest = _messages.MessageField('VerifyNotificationChannelRequest', 2)


class MonitoringProjectsSnoozesCreateRequest(_messages.Message):
  r"""A MonitoringProjectsSnoozesCreateRequest object.

  Fields:
    parent: Required. The project
      (https://cloud.google.com/monitoring/api/v3#project_name) in which a
      Snooze should be created. The format is: projects/[PROJECT_ID_OR_NUMBER]
    snooze: A Snooze resource to be passed as the request body.
  """

  parent = _messages.StringField(1, required=True)
  snooze = _messages.MessageField('Snooze', 2)


class MonitoringProjectsSnoozesGetRequest(_messages.Message):
  r"""A MonitoringProjectsSnoozesGetRequest object.

  Fields:
    name: Required. The ID of the Snooze to retrieve. The format is:
      projects/[PROJECT_ID_OR_NUMBER]/snoozes/[SNOOZE_ID]
  """

  name = _messages.StringField(1, required=True)


class MonitoringProjectsSnoozesListRequest(_messages.Message):
  r"""A MonitoringProjectsSnoozesListRequest object.

  Fields:
    filter: Optional. Optional filter to restrict results to the given
      criteria. The following fields are supported. interval.start_time
      interval.end_timeFor example: interval.start_time >
      "2022-03-11T00:00:00-08:00" AND interval.end_time <
      "2022-03-12T00:00:00-08:00"
    pageSize: Optional. The maximum number of results to return for a single
      query. The server may further constrain the maximum number of results
      returned in a single page. The value should be in the range 1, 1000. If
      the value given is outside this range, the server will decide the number
      of results to be returned.
    pageToken: Optional. The next_page_token from a previous call to
      ListSnoozesRequest to get the next page of results.
    parent: Required. The project
      (https://cloud.google.com/monitoring/api/v3#project_name) whose Snoozes
      should be listed. The format is: projects/[PROJECT_ID_OR_NUMBER]
  """

  filter = _messages.StringField(1)
  pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(3)
  parent = _messages.StringField(4, required=True)


class MonitoringProjectsSnoozesPatchRequest(_messages.Message):
  r"""A MonitoringProjectsSnoozesPatchRequest object.

  Fields:
    name: Required. Identifier. The name of the Snooze. The format is:
      projects/[PROJECT_ID_OR_NUMBER]/snoozes/[SNOOZE_ID] The ID of the Snooze
      will be generated by the system.
    snooze: A Snooze resource to be passed as the request body.
    updateMask: Required. The fields to update.For each field listed in
      update_mask: If the Snooze object supplied in the UpdateSnoozeRequest
      has a value for that field, the value of the field in the existing
      Snooze will be set to the value of the field in the supplied Snooze. If
      the field does not have a value in the supplied Snooze, the field in the
      existing Snooze is set to its default value.Fields not listed retain
      their existing value.The following are the field names that are accepted
      in update_mask: display_name interval.start_time interval.end_timeThat
      said, the start time and end time of the Snooze determines which fields
      can legally be updated. Before attempting an update, users should
      consult the documentation for UpdateSnoozeRequest, which talks about
      which fields can be updated.
  """

  name = _messages.StringField(1, required=True)
  snooze = _messages.MessageField('Snooze', 2)
  updateMask = _messages.StringField(3)


class MonitoringProjectsTimeSeriesCreateRequest(_messages.Message):
  r"""A MonitoringProjectsTimeSeriesCreateRequest object.

  Fields:
    createTimeSeriesRequest: A CreateTimeSeriesRequest resource to be passed
      as the request body.
    name: Required. The project
      (https://cloud.google.com/monitoring/api/v3#project_name) on which to
      execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER]
  """

  createTimeSeriesRequest = _messages.MessageField('CreateTimeSeriesRequest', 1)
  name = _messages.StringField(2, required=True)


class MonitoringProjectsTimeSeriesCreateServiceRequest(_messages.Message):
  r"""A MonitoringProjectsTimeSeriesCreateServiceRequest object.

  Fields:
    createTimeSeriesRequest: A CreateTimeSeriesRequest resource to be passed
      as the request body.
    name: Required. The project
      (https://cloud.google.com/monitoring/api/v3#project_name) on which to
      execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER]
  """

  createTimeSeriesRequest = _messages.MessageField('CreateTimeSeriesRequest', 1)
  name = _messages.StringField(2, required=True)


class MonitoringProjectsTimeSeriesListRequest(_messages.Message):
  r"""A MonitoringProjectsTimeSeriesListRequest object.

  Enums:
    AggregationCrossSeriesReducerValueValuesEnum: The reduction operation to
      be used to combine time series into a single time series, where the
      value of each data point in the resulting series is a function of all
      the already aligned values in the input time series.Not all reducer
      operations can be applied to all time series. The valid choices depend
      on the metric_kind and the value_type of the original time series.
      Reduction can yield a time series with a different metric_kind or
      value_type than the input time series.Time series data must first be
      aligned (see per_series_aligner) in order to perform cross-time series
      reduction. If cross_series_reducer is specified, then per_series_aligner
      must be specified, and must not be ALIGN_NONE. An alignment_period must
      also be specified; otherwise, an error is returned.
    AggregationPerSeriesAlignerValueValuesEnum: An Aligner describes how to
      bring the data points in a single time series into temporal alignment.
      Except for ALIGN_NONE, all alignments cause all the data points in an
      alignment_period to be mathematically grouped together, resulting in a
      single data point for each alignment_period with end timestamp at the
      end of the period.Not all alignment operations may be applied to all
      time series. The valid choices depend on the metric_kind and value_type
      of the original time series. Alignment can change the metric_kind or the
      value_type of the time series.Time series data must be aligned in order
      to perform cross-time series reduction. If cross_series_reducer is
      specified, then per_series_aligner must be specified and not equal to
      ALIGN_NONE and alignment_period must be specified; otherwise, an error
      is returned.
    SecondaryAggregationCrossSeriesReducerValueValuesEnum: The reduction
      operation to be used to combine time series into a single time series,
      where the value of each data point in the resulting series is a function
      of all the already aligned values in the input time series.Not all
      reducer operations can be applied to all time series. The valid choices
      depend on the metric_kind and the value_type of the original time
      series. Reduction can yield a time series with a different metric_kind
      or value_type than the input time series.Time series data must first be
      aligned (see per_series_aligner) in order to perform cross-time series
      reduction. If cross_series_reducer is specified, then per_series_aligner
      must be specified, and must not be ALIGN_NONE. An alignment_period must
      also be specified; otherwise, an error is returned.
    SecondaryAggregationPerSeriesAlignerValueValuesEnum: An Aligner describes
      how to bring the data points in a single time series into temporal
      alignment. Except for ALIGN_NONE, all alignments cause all the data
      points in an alignment_period to be mathematically grouped together,
      resulting in a single data point for each alignment_period with end
      timestamp at the end of the period.Not all alignment operations may be
      applied to all time series. The valid choices depend on the metric_kind
      and value_type of the original time series. Alignment can change the
      metric_kind or the value_type of the time series.Time series data must
      be aligned in order to perform cross-time series reduction. If
      cross_series_reducer is specified, then per_series_aligner must be
      specified and not equal to ALIGN_NONE and alignment_period must be
      specified; otherwise, an error is returned.
    ViewValueValuesEnum: Required. Specifies which information is returned
      about the time series.

  Fields:
    aggregation_alignmentPeriod: The alignment_period specifies a time
      interval, in seconds, that is used to divide the data in all the time
      series into consistent blocks of time. This will be done before the per-
      series aligner can be applied to the data.The value must be at least 60
      seconds. If a per-series aligner other than ALIGN_NONE is specified,
      this field is required or an error is returned. If no per-series aligner
      is specified, or the aligner ALIGN_NONE is specified, then this field is
      ignored.The maximum value of the alignment_period is 104 weeks (2 years)
      for charts, and 90,000 seconds (25 hours) for alerting policies.
    aggregation_crossSeriesReducer: The reduction operation to be used to
      combine time series into a single time series, where the value of each
      data point in the resulting series is a function of all the already
      aligned values in the input time series.Not all reducer operations can
      be applied to all time series. The valid choices depend on the
      metric_kind and the value_type of the original time series. Reduction
      can yield a time series with a different metric_kind or value_type than
      the input time series.Time series data must first be aligned (see
      per_series_aligner) in order to perform cross-time series reduction. If
      cross_series_reducer is specified, then per_series_aligner must be
      specified, and must not be ALIGN_NONE. An alignment_period must also be
      specified; otherwise, an error is returned.
    aggregation_groupByFields: The set of fields to preserve when
      cross_series_reducer is specified. The group_by_fields determine how the
      time series are partitioned into subsets prior to applying the
      aggregation operation. Each subset contains time series that have the
      same value for each of the grouping fields. Each individual time series
      is a member of exactly one subset. The cross_series_reducer is applied
      to each subset of time series. It is not possible to reduce across
      different resource types, so this field implicitly contains
      resource.type. Fields not specified in group_by_fields are aggregated
      away. If group_by_fields is not specified and all the time series have
      the same resource type, then the time series are aggregated into a
      single output time series. If cross_series_reducer is not defined, this
      field is ignored.
    aggregation_perSeriesAligner: An Aligner describes how to bring the data
      points in a single time series into temporal alignment. Except for
      ALIGN_NONE, all alignments cause all the data points in an
      alignment_period to be mathematically grouped together, resulting in a
      single data point for each alignment_period with end timestamp at the
      end of the period.Not all alignment operations may be applied to all
      time series. The valid choices depend on the metric_kind and value_type
      of the original time series. Alignment can change the metric_kind or the
      value_type of the time series.Time series data must be aligned in order
      to perform cross-time series reduction. If cross_series_reducer is
      specified, then per_series_aligner must be specified and not equal to
      ALIGN_NONE and alignment_period must be specified; otherwise, an error
      is returned.
    filter: Required. A monitoring filter
      (https://cloud.google.com/monitoring/api/v3/filters) that specifies
      which time series should be returned. The filter must specify a single
      metric type, and can additionally specify metric labels and other
      information. For example: metric.type =
      "compute.googleapis.com/instance/cpu/usage_time" AND
      metric.labels.instance_name = "my-instance-name"
    interval_endTime: Required. The end of the time interval.
    interval_startTime: Optional. The beginning of the time interval. The
      default value for the start time is the end time. The start time must
      not be later than the end time.
    name: Required. The project
      (https://cloud.google.com/monitoring/api/v3#project_name), organization
      or folder on which to execute the request. The format is:
      projects/[PROJECT_ID_OR_NUMBER] organizations/[ORGANIZATION_ID]
      folders/[FOLDER_ID]
    orderBy: Unsupported: must be left blank. The points in each time series
      are currently returned in reverse time order (most recent to oldest).
    pageSize: A positive number that is the maximum number of results to
      return. If page_size is empty or more than 100,000 results, the
      effective page_size is 100,000 results. If view is set to FULL, this is
      the maximum number of Points returned. If view is set to HEADERS, this
      is the maximum number of TimeSeries returned.
    pageToken: If this field is not empty then it must contain the
      nextPageToken value returned by a previous call to this method. Using
      this field causes the method to return additional results from the
      previous method call.
    secondaryAggregation_alignmentPeriod: The alignment_period specifies a
      time interval, in seconds, that is used to divide the data in all the
      time series into consistent blocks of time. This will be done before the
      per-series aligner can be applied to the data.The value must be at least
      60 seconds. If a per-series aligner other than ALIGN_NONE is specified,
      this field is required or an error is returned. If no per-series aligner
      is specified, or the aligner ALIGN_NONE is specified, then this field is
      ignored.The maximum value of the alignment_period is 104 weeks (2 years)
      for charts, and 90,000 seconds (25 hours) for alerting policies.
    secondaryAggregation_crossSeriesReducer: The reduction operation to be
      used to combine time series into a single time series, where the value
      of each data point in the resulting series is a function of all the
      already aligned values in the input time series.Not all reducer
      operations can be applied to all time series. The valid choices depend
      on the metric_kind and the value_type of the original time series.
      Reduction can yield a time series with a different metric_kind or
      value_type than the input time series.Time series data must first be
      aligned (see per_series_aligner) in order to perform cross-time series
      reduction. If cross_series_reducer is specified, then per_series_aligner
      must be specified, and must not be ALIGN_NONE. An alignment_period must
      also be specified; otherwise, an error is returned.
    secondaryAggregation_groupByFields: The set of fields to preserve when
      cross_series_reducer is specified. The group_by_fields determine how the
      time series are partitioned into subsets prior to applying the
      aggregation operation. Each subset contains time series that have the
      same value for each of the grouping fields. Each individual time series
      is a member of exactly one subset. The cross_series_reducer is applied
      to each subset of time series. It is not possible to reduce across
      different resource types, so this field implicitly contains
      resource.type. Fields not specified in group_by_fields are aggregated
      away. If group_by_fields is not specified and all the time series have
      the same resource type, then the time series are aggregated into a
      single output time series. If cross_series_reducer is not defined, this
      field is ignored.
    secondaryAggregation_perSeriesAligner: An Aligner describes how to bring
      the data points in a single time series into temporal alignment. Except
      for ALIGN_NONE, all alignments cause all the data points in an
      alignment_period to be mathematically grouped together, resulting in a
      single data point for each alignment_period with end timestamp at the
      end of the period.Not all alignment operations may be applied to all
      time series. The valid choices depend on the metric_kind and value_type
      of the original time series. Alignment can change the metric_kind or the
      value_type of the time series.Time series data must be aligned in order
      to perform cross-time series reduction. If cross_series_reducer is
      specified, then per_series_aligner must be specified and not equal to
      ALIGN_NONE and alignment_period must be specified; otherwise, an error
      is returned.
    view: Required. Specifies which information is returned about the time
      series.
  """

  class AggregationCrossSeriesReducerValueValuesEnum(_messages.Enum):
    r"""The reduction operation to be used to combine time series into a
    single time series, where the value of each data point in the resulting
    series is a function of all the already aligned values in the input time
    series.Not all reducer operations can be applied to all time series. The
    valid choices depend on the metric_kind and the value_type of the original
    time series. Reduction can yield a time series with a different
    metric_kind or value_type than the input time series.Time series data must
    first be aligned (see per_series_aligner) in order to perform cross-time
    series reduction. If cross_series_reducer is specified, then
    per_series_aligner must be specified, and must not be ALIGN_NONE. An
    alignment_period must also be specified; otherwise, an error is returned.

    Values:
      REDUCE_NONE: No cross-time series reduction. The output of the Aligner
        is returned.
      REDUCE_MEAN: Reduce by computing the mean value across time series for
        each alignment period. This reducer is valid for DELTA and GAUGE
        metrics with numeric or distribution values. The value_type of the
        output is DOUBLE.
      REDUCE_MIN: Reduce by computing the minimum value across time series for
        each alignment period. This reducer is valid for DELTA and GAUGE
        metrics with numeric values. The value_type of the output is the same
        as the value_type of the input.
      REDUCE_MAX: Reduce by computing the maximum value across time series for
        each alignment period. This reducer is valid for DELTA and GAUGE
        metrics with numeric values. The value_type of the output is the same
        as the value_type of the input.
      REDUCE_SUM: Reduce by computing the sum across time series for each
        alignment period. This reducer is valid for DELTA and GAUGE metrics
        with numeric and distribution values. The value_type of the output is
        the same as the value_type of the input.
      REDUCE_STDDEV: Reduce by computing the standard deviation across time
        series for each alignment period. This reducer is valid for DELTA and
        GAUGE metrics with numeric or distribution values. The value_type of
        the output is DOUBLE.
      REDUCE_COUNT: Reduce by computing the number of data points across time
        series for each alignment period. This reducer is valid for DELTA and
        GAUGE metrics of numeric, Boolean, distribution, and string
        value_type. The value_type of the output is INT64.
      REDUCE_COUNT_TRUE: Reduce by computing the number of True-valued data
        points across time series for each alignment period. This reducer is
        valid for DELTA and GAUGE metrics of Boolean value_type. The
        value_type of the output is INT64.
      REDUCE_COUNT_FALSE: Reduce by computing the number of False-valued data
        points across time series for each alignment period. This reducer is
        valid for DELTA and GAUGE metrics of Boolean value_type. The
        value_type of the output is INT64.
      REDUCE_FRACTION_TRUE: Reduce by computing the ratio of the number of
        True-valued data points to the total number of data points for each
        alignment period. This reducer is valid for DELTA and GAUGE metrics of
        Boolean value_type. The output value is in the range 0.0, 1.0 and has
        value_type DOUBLE.
      REDUCE_PERCENTILE_99: Reduce by computing the 99th percentile
        (https://en.wikipedia.org/wiki/Percentile) of data points across time
        series for each alignment period. This reducer is valid for GAUGE and
        DELTA metrics of numeric and distribution type. The value of the
        output is DOUBLE.
      REDUCE_PERCENTILE_95: Reduce by computing the 95th percentile
        (https://en.wikipedia.org/wiki/Percentile) of data points across time
        series for each alignment period. This reducer is valid for GAUGE and
        DELTA metrics of numeric and distribution type. The value of the
        output is DOUBLE.
      REDUCE_PERCENTILE_50: Reduce by computing the 50th percentile
        (https://en.wikipedia.org/wiki/Percentile) of data points across time
        series for each alignment period. This reducer is valid for GAUGE and
        DELTA metrics of numeric and distribution type. The value of the
        output is DOUBLE.
      REDUCE_PERCENTILE_05: Reduce by computing the 5th percentile
        (https://en.wikipedia.org/wiki/Percentile) of data points across time
        series for each alignment period. This reducer is valid for GAUGE and
        DELTA metrics of numeric and distribution type. The value of the
        output is DOUBLE.
    """
    REDUCE_NONE = 0
    REDUCE_MEAN = 1
    REDUCE_MIN = 2
    REDUCE_MAX = 3
    REDUCE_SUM = 4
    REDUCE_STDDEV = 5
    REDUCE_COUNT = 6
    REDUCE_COUNT_TRUE = 7
    REDUCE_COUNT_FALSE = 8
    REDUCE_FRACTION_TRUE = 9
    REDUCE_PERCENTILE_99 = 10
    REDUCE_PERCENTILE_95 = 11
    REDUCE_PERCENTILE_50 = 12
    REDUCE_PERCENTILE_05 = 13

  class AggregationPerSeriesAlignerValueValuesEnum(_messages.Enum):
    r"""An Aligner describes how to bring the data points in a single time
    series into temporal alignment. Except for ALIGN_NONE, all alignments
    cause all the data points in an alignment_period to be mathematically
    grouped together, resulting in a single data point for each
    alignment_period with end timestamp at the end of the period.Not all
    alignment operations may be applied to all time series. The valid choices
    depend on the metric_kind and value_type of the original time series.
    Alignment can change the metric_kind or the value_type of the time
    series.Time series data must be aligned in order to perform cross-time
    series reduction. If cross_series_reducer is specified, then
    per_series_aligner must be specified and not equal to ALIGN_NONE and
    alignment_period must be specified; otherwise, an error is returned.

    Values:
      ALIGN_NONE: No alignment. Raw data is returned. Not valid if cross-
        series reduction is requested. The value_type of the result is the
        same as the value_type of the input.
      ALIGN_DELTA: Align and convert to DELTA. The output is delta = y1 -
        y0.This alignment is valid for CUMULATIVE and DELTA metrics. If the
        selected alignment period results in periods with no data, then the
        aligned value for such a period is created by interpolation. The
        value_type of the aligned result is the same as the value_type of the
        input.
      ALIGN_RATE: Align and convert to a rate. The result is computed as rate
        = (y1 - y0)/(t1 - t0), or "delta over time". Think of this aligner as
        providing the slope of the line that passes through the value at the
        start and at the end of the alignment_period.This aligner is valid for
        CUMULATIVE and DELTA metrics with numeric values. If the selected
        alignment period results in periods with no data, then the aligned
        value for such a period is created by interpolation. The output is a
        GAUGE metric with value_type DOUBLE.If, by "rate", you mean
        "percentage change", see the ALIGN_PERCENT_CHANGE aligner instead.
      ALIGN_INTERPOLATE: Align by interpolating between adjacent points around
        the alignment period boundary. This aligner is valid for GAUGE metrics
        with numeric values. The value_type of the aligned result is the same
        as the value_type of the input.
      ALIGN_NEXT_OLDER: Align by moving the most recent data point before the
        end of the alignment period to the boundary at the end of the
        alignment period. This aligner is valid for GAUGE metrics. The
        value_type of the aligned result is the same as the value_type of the
        input.
      ALIGN_MIN: Align the time series by returning the minimum value in each
        alignment period. This aligner is valid for GAUGE and DELTA metrics
        with numeric values. The value_type of the aligned result is the same
        as the value_type of the input.
      ALIGN_MAX: Align the time series by returning the maximum value in each
        alignment period. This aligner is valid for GAUGE and DELTA metrics
        with numeric values. The value_type of the aligned result is the same
        as the value_type of the input.
      ALIGN_MEAN: Align the time series by returning the mean value in each
        alignment period. This aligner is valid for GAUGE and DELTA metrics
        with numeric values. The value_type of the aligned result is DOUBLE.
      ALIGN_COUNT: Align the time series by returning the number of values in
        each alignment period. This aligner is valid for GAUGE and DELTA
        metrics with numeric or Boolean values. The value_type of the aligned
        result is INT64.
      ALIGN_SUM: Align the time series by returning the sum of the values in
        each alignment period. This aligner is valid for GAUGE and DELTA
        metrics with numeric and distribution values. The value_type of the
        aligned result is the same as the value_type of the input.
      ALIGN_STDDEV: Align the time series by returning the standard deviation
        of the values in each alignment period. This aligner is valid for
        GAUGE and DELTA metrics with numeric values. The value_type of the
        output is DOUBLE.
      ALIGN_COUNT_TRUE: Align the time series by returning the number of True
        values in each alignment period. This aligner is valid for GAUGE
        metrics with Boolean values. The value_type of the output is INT64.
      ALIGN_COUNT_FALSE: Align the time series by returning the number of
        False values in each alignment period. This aligner is valid for GAUGE
        metrics with Boolean values. The value_type of the output is INT64.
      ALIGN_FRACTION_TRUE: Align the time series by returning the ratio of the
        number of True values to the total number of values in each alignment
        period. This aligner is valid for GAUGE metrics with Boolean values.
        The output value is in the range 0.0, 1.0 and has value_type DOUBLE.
      ALIGN_PERCENTILE_99: Align the time series by using percentile
        aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting
        data point in each alignment period is the 99th percentile of all data
        points in the period. This aligner is valid for GAUGE and DELTA
        metrics with distribution values. The output is a GAUGE metric with
        value_type DOUBLE.
      ALIGN_PERCENTILE_95: Align the time series by using percentile
        aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting
        data point in each alignment period is the 95th percentile of all data
        points in the period. This aligner is valid for GAUGE and DELTA
        metrics with distribution values. The output is a GAUGE metric with
        value_type DOUBLE.
      ALIGN_PERCENTILE_50: Align the time series by using percentile
        aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting
        data point in each alignment period is the 50th percentile of all data
        points in the period. This aligner is valid for GAUGE and DELTA
        metrics with distribution values. The output is a GAUGE metric with
        value_type DOUBLE.
      ALIGN_PERCENTILE_05: Align the time series by using percentile
        aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting
        data point in each alignment period is the 5th percentile of all data
        points in the period. This aligner is valid for GAUGE and DELTA
        metrics with distribution values. The output is a GAUGE metric with
        value_type DOUBLE.
      ALIGN_PERCENT_CHANGE: Align and convert to a percentage change. This
        aligner is valid for GAUGE and DELTA metrics with numeric values. This
        alignment returns ((current - previous)/previous) * 100, where the
        value of previous is determined based on the alignment_period.If the
        values of current and previous are both 0, then the returned value is
        0. If only previous is 0, the returned value is infinity.A 10-minute
        moving mean is computed at each point of the alignment period prior to
        the above calculation to smooth the metric and prevent false positives
        from very short-lived spikes. The moving mean is only applicable for
        data whose values are >= 0. Any values < 0 are treated as a missing
        datapoint, and are ignored. While DELTA metrics are accepted by this
        alignment, special care should be taken that the values for the metric
        will always be positive. The output is a GAUGE metric with value_type
        DOUBLE.
    """
    ALIGN_NONE = 0
    ALIGN_DELTA = 1
    ALIGN_RATE = 2
    ALIGN_INTERPOLATE = 3
    ALIGN_NEXT_OLDER = 4
    ALIGN_MIN = 5
    ALIGN_MAX = 6
    ALIGN_MEAN = 7
    ALIGN_COUNT = 8
    ALIGN_SUM = 9
    ALIGN_STDDEV = 10
    ALIGN_COUNT_TRUE = 11
    ALIGN_COUNT_FALSE = 12
    ALIGN_FRACTION_TRUE = 13
    ALIGN_PERCENTILE_99 = 14
    ALIGN_PERCENTILE_95 = 15
    ALIGN_PERCENTILE_50 = 16
    ALIGN_PERCENTILE_05 = 17
    ALIGN_PERCENT_CHANGE = 18

  class SecondaryAggregationCrossSeriesReducerValueValuesEnum(_messages.Enum):
    r"""The reduction operation to be used to combine time series into a
    single time series, where the value of each data point in the resulting
    series is a function of all the already aligned values in the input time
    series.Not all reducer operations can be applied to all time series. The
    valid choices depend on the metric_kind and the value_type of the original
    time series. Reduction can yield a time series with a different
    metric_kind or value_type than the input time series.Time series data must
    first be aligned (see per_series_aligner) in order to perform cross-time
    series reduction. If cross_series_reducer is specified, then
    per_series_aligner must be specified, and must not be ALIGN_NONE. An
    alignment_period must also be specified; otherwise, an error is returned.

    Values:
      REDUCE_NONE: No cross-time series reduction. The output of the Aligner
        is returned.
      REDUCE_MEAN: Reduce by computing the mean value across time series for
        each alignment period. This reducer is valid for DELTA and GAUGE
        metrics with numeric or distribution values. The value_type of the
        output is DOUBLE.
      REDUCE_MIN: Reduce by computing the minimum value across time series for
        each alignment period. This reducer is valid for DELTA and GAUGE
        metrics with numeric values. The value_type of the output is the same
        as the value_type of the input.
      REDUCE_MAX: Reduce by computing the maximum value across time series for
        each alignment period. This reducer is valid for DELTA and GAUGE
        metrics with numeric values. The value_type of the output is the same
        as the value_type of the input.
      REDUCE_SUM: Reduce by computing the sum across time series for each
        alignment period. This reducer is valid for DELTA and GAUGE metrics
        with numeric and distribution values. The value_type of the output is
        the same as the value_type of the input.
      REDUCE_STDDEV: Reduce by computing the standard deviation across time
        series for each alignment period. This reducer is valid for DELTA and
        GAUGE metrics with numeric or distribution values. The value_type of
        the output is DOUBLE.
      REDUCE_COUNT: Reduce by computing the number of data points across time
        series for each alignment period. This reducer is valid for DELTA and
        GAUGE metrics of numeric, Boolean, distribution, and string
        value_type. The value_type of the output is INT64.
      REDUCE_COUNT_TRUE: Reduce by computing the number of True-valued data
        points across time series for each alignment period. This reducer is
        valid for DELTA and GAUGE metrics of Boolean value_type. The
        value_type of the output is INT64.
      REDUCE_COUNT_FALSE: Reduce by computing the number of False-valued data
        points across time series for each alignment period. This reducer is
        valid for DELTA and GAUGE metrics of Boolean value_type. The
        value_type of the output is INT64.
      REDUCE_FRACTION_TRUE: Reduce by computing the ratio of the number of
        True-valued data points to the total number of data points for each
        alignment period. This reducer is valid for DELTA and GAUGE metrics of
        Boolean value_type. The output value is in the range 0.0, 1.0 and has
        value_type DOUBLE.
      REDUCE_PERCENTILE_99: Reduce by computing the 99th percentile
        (https://en.wikipedia.org/wiki/Percentile) of data points across time
        series for each alignment period. This reducer is valid for GAUGE and
        DELTA metrics of numeric and distribution type. The value of the
        output is DOUBLE.
      REDUCE_PERCENTILE_95: Reduce by computing the 95th percentile
        (https://en.wikipedia.org/wiki/Percentile) of data points across time
        series for each alignment period. This reducer is valid for GAUGE and
        DELTA metrics of numeric and distribution type. The value of the
        output is DOUBLE.
      REDUCE_PERCENTILE_50: Reduce by computing the 50th percentile
        (https://en.wikipedia.org/wiki/Percentile) of data points across time
        series for each alignment period. This reducer is valid for GAUGE and
        DELTA metrics of numeric and distribution type. The value of the
        output is DOUBLE.
      REDUCE_PERCENTILE_05: Reduce by computing the 5th percentile
        (https://en.wikipedia.org/wiki/Percentile) of data points across time
        series for each alignment period. This reducer is valid for GAUGE and
        DELTA metrics of numeric and distribution type. The value of the
        output is DOUBLE.
    """
    REDUCE_NONE = 0
    REDUCE_MEAN = 1
    REDUCE_MIN = 2
    REDUCE_MAX = 3
    REDUCE_SUM = 4
    REDUCE_STDDEV = 5
    REDUCE_COUNT = 6
    REDUCE_COUNT_TRUE = 7
    REDUCE_COUNT_FALSE = 8
    REDUCE_FRACTION_TRUE = 9
    REDUCE_PERCENTILE_99 = 10
    REDUCE_PERCENTILE_95 = 11
    REDUCE_PERCENTILE_50 = 12
    REDUCE_PERCENTILE_05 = 13

  class SecondaryAggregationPerSeriesAlignerValueValuesEnum(_messages.Enum):
    r"""An Aligner describes how to bring the data points in a single time
    series into temporal alignment. Except for ALIGN_NONE, all alignments
    cause all the data points in an alignment_period to be mathematically
    grouped together, resulting in a single data point for each
    alignment_period with end timestamp at the end of the period.Not all
    alignment operations may be applied to all time series. The valid choices
    depend on the metric_kind and value_type of the original time series.
    Alignment can change the metric_kind or the value_type of the time
    series.Time series data must be aligned in order to perform cross-time
    series reduction. If cross_series_reducer is specified, then
    per_series_aligner must be specified and not equal to ALIGN_NONE and
    alignment_period must be specified; otherwise, an error is returned.

    Values:
      ALIGN_NONE: No alignment. Raw data is returned. Not valid if cross-
        series reduction is requested. The value_type of the result is the
        same as the value_type of the input.
      ALIGN_DELTA: Align and convert to DELTA. The output is delta = y1 -
        y0.This alignment is valid for CUMULATIVE and DELTA metrics. If the
        selected alignment period results in periods with no data, then the
        aligned value for such a period is created by interpolation. The
        value_type of the aligned result is the same as the value_type of the
        input.
      ALIGN_RATE: Align and convert to a rate. The result is computed as rate
        = (y1 - y0)/(t1 - t0), or "delta over time". Think of this aligner as
        providing the slope of the line that passes through the value at the
        start and at the end of the alignment_period.This aligner is valid for
        CUMULATIVE and DELTA metrics with numeric values. If the selected
        alignment period results in periods with no data, then the aligned
        value for such a period is created by interpolation. The output is a
        GAUGE metric with value_type DOUBLE.If, by "rate", you mean
        "percentage change", see the ALIGN_PERCENT_CHANGE aligner instead.
      ALIGN_INTERPOLATE: Align by interpolating between adjacent points around
        the alignment period boundary. This aligner is valid for GAUGE metrics
        with numeric values. The value_type of the aligned result is the same
        as the value_type of the input.
      ALIGN_NEXT_OLDER: Align by moving the most recent data point before the
        end of the alignment period to the boundary at the end of the
        alignment period. This aligner is valid for GAUGE metrics. The
        value_type of the aligned result is the same as the value_type of the
        input.
      ALIGN_MIN: Align the time series by returning the minimum value in each
        alignment period. This aligner is valid for GAUGE and DELTA metrics
        with numeric values. The value_type of the aligned result is the same
        as the value_type of the input.
      ALIGN_MAX: Align the time series by returning the maximum value in each
        alignment period. This aligner is valid for GAUGE and DELTA metrics
        with numeric values. The value_type of the aligned result is the same
        as the value_type of the input.
      ALIGN_MEAN: Align the time series by returning the mean value in each
        alignment period. This aligner is valid for GAUGE and DELTA metrics
        with numeric values. The value_type of the aligned result is DOUBLE.
      ALIGN_COUNT: Align the time series by returning the number of values in
        each alignment period. This aligner is valid for GAUGE and DELTA
        metrics with numeric or Boolean values. The value_type of the aligned
        result is INT64.
      ALIGN_SUM: Align the time series by returning the sum of the values in
        each alignment period. This aligner is valid for GAUGE and DELTA
        metrics with numeric and distribution values. The value_type of the
        aligned result is the same as the value_type of the input.
      ALIGN_STDDEV: Align the time series by returning the standard deviation
        of the values in each alignment period. This aligner is valid for
        GAUGE and DELTA metrics with numeric values. The value_type of the
        output is DOUBLE.
      ALIGN_COUNT_TRUE: Align the time series by returning the number of True
        values in each alignment period. This aligner is valid for GAUGE
        metrics with Boolean values. The value_type of the output is INT64.
      ALIGN_COUNT_FALSE: Align the time series by returning the number of
        False values in each alignment period. This aligner is valid for GAUGE
        metrics with Boolean values. The value_type of the output is INT64.
      ALIGN_FRACTION_TRUE: Align the time series by returning the ratio of the
        number of True values to the total number of values in each alignment
        period. This aligner is valid for GAUGE metrics with Boolean values.
        The output value is in the range 0.0, 1.0 and has value_type DOUBLE.
      ALIGN_PERCENTILE_99: Align the time series by using percentile
        aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting
        data point in each alignment period is the 99th percentile of all data
        points in the period. This aligner is valid for GAUGE and DELTA
        metrics with distribution values. The output is a GAUGE metric with
        value_type DOUBLE.
      ALIGN_PERCENTILE_95: Align the time series by using percentile
        aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting
        data point in each alignment period is the 95th percentile of all data
        points in the period. This aligner is valid for GAUGE and DELTA
        metrics with distribution values. The output is a GAUGE metric with
        value_type DOUBLE.
      ALIGN_PERCENTILE_50: Align the time series by using percentile
        aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting
        data point in each alignment period is the 50th percentile of all data
        points in the period. This aligner is valid for GAUGE and DELTA
        metrics with distribution values. The output is a GAUGE metric with
        value_type DOUBLE.
      ALIGN_PERCENTILE_05: Align the time series by using percentile
        aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting
        data point in each alignment period is the 5th percentile of all data
        points in the period. This aligner is valid for GAUGE and DELTA
        metrics with distribution values. The output is a GAUGE metric with
        value_type DOUBLE.
      ALIGN_PERCENT_CHANGE: Align and convert to a percentage change. This
        aligner is valid for GAUGE and DELTA metrics with numeric values. This
        alignment returns ((current - previous)/previous) * 100, where the
        value of previous is determined based on the alignment_period.If the
        values of current and previous are both 0, then the returned value is
        0. If only previous is 0, the returned value is infinity.A 10-minute
        moving mean is computed at each point of the alignment period prior to
        the above calculation to smooth the metric and prevent false positives
        from very short-lived spikes. The moving mean is only applicable for
        data whose values are >= 0. Any values < 0 are treated as a missing
        datapoint, and are ignored. While DELTA metrics are accepted by this
        alignment, special care should be taken that the values for the metric
        will always be positive. The output is a GAUGE metric with value_type
        DOUBLE.
    """
    ALIGN_NONE = 0
    ALIGN_DELTA = 1
    ALIGN_RATE = 2
    ALIGN_INTERPOLATE = 3
    ALIGN_NEXT_OLDER = 4
    ALIGN_MIN = 5
    ALIGN_MAX = 6
    ALIGN_MEAN = 7
    ALIGN_COUNT = 8
    ALIGN_SUM = 9
    ALIGN_STDDEV = 10
    ALIGN_COUNT_TRUE = 11
    ALIGN_COUNT_FALSE = 12
    ALIGN_FRACTION_TRUE = 13
    ALIGN_PERCENTILE_99 = 14
    ALIGN_PERCENTILE_95 = 15
    ALIGN_PERCENTILE_50 = 16
    ALIGN_PERCENTILE_05 = 17
    ALIGN_PERCENT_CHANGE = 18

  class ViewValueValuesEnum(_messages.Enum):
    r"""Required. Specifies which information is returned about the time
    series.

    Values:
      FULL: Returns the identity of the metric(s), the time series, and the
        time series data.
      HEADERS: Returns the identity of the metric and the time series
        resource, but not the time series data.
    """
    FULL = 0
    HEADERS = 1

  aggregation_alignmentPeriod = _messages.StringField(1)
  aggregation_crossSeriesReducer = _messages.EnumField('AggregationCrossSeriesReducerValueValuesEnum', 2)
  aggregation_groupByFields = _messages.StringField(3, repeated=True)
  aggregation_perSeriesAligner = _messages.EnumField('AggregationPerSeriesAlignerValueValuesEnum', 4)
  filter = _messages.StringField(5)
  interval_endTime = _messages.StringField(6)
  interval_startTime = _messages.StringField(7)
  name = _messages.StringField(8, required=True)
  orderBy = _messages.StringField(9)
  pageSize = _messages.IntegerField(10, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(11)
  secondaryAggregation_alignmentPeriod = _messages.StringField(12)
  secondaryAggregation_crossSeriesReducer = _messages.EnumField('SecondaryAggregationCrossSeriesReducerValueValuesEnum', 13)
  secondaryAggregation_groupByFields = _messages.StringField(14, repeated=True)
  secondaryAggregation_perSeriesAligner = _messages.EnumField('SecondaryAggregationPerSeriesAlignerValueValuesEnum', 15)
  view = _messages.EnumField('ViewValueValuesEnum', 16)


class MonitoringProjectsTimeSeriesQueryRequest(_messages.Message):
  r"""A MonitoringProjectsTimeSeriesQueryRequest object.

  Fields:
    name: Required. The project
      (https://cloud.google.com/monitoring/api/v3#project_name) on which to
      execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER]
    queryTimeSeriesRequest: A QueryTimeSeriesRequest resource to be passed as
      the request body.
  """

  name = _messages.StringField(1, required=True)
  queryTimeSeriesRequest = _messages.MessageField('QueryTimeSeriesRequest', 2)


class MonitoringProjectsUptimeCheckConfigsCreateRequest(_messages.Message):
  r"""A MonitoringProjectsUptimeCheckConfigsCreateRequest object.

  Fields:
    parent: Required. The project
      (https://cloud.google.com/monitoring/api/v3#project_name) in which to
      create the Uptime check. The format is: projects/[PROJECT_ID_OR_NUMBER]
    uptimeCheckConfig: A UptimeCheckConfig resource to be passed as the
      request body.
  """

  parent = _messages.StringField(1, required=True)
  uptimeCheckConfig = _messages.MessageField('UptimeCheckConfig', 2)


class MonitoringProjectsUptimeCheckConfigsDeleteRequest(_messages.Message):
  r"""A MonitoringProjectsUptimeCheckConfigsDeleteRequest object.

  Fields:
    name: Required. The Uptime check configuration to delete. The format is:
      projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID]
  """

  name = _messages.StringField(1, required=True)


class MonitoringProjectsUptimeCheckConfigsGetRequest(_messages.Message):
  r"""A MonitoringProjectsUptimeCheckConfigsGetRequest object.

  Fields:
    name: Required. The Uptime check configuration to retrieve. The format is:
      projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID]
  """

  name = _messages.StringField(1, required=True)


class MonitoringProjectsUptimeCheckConfigsListRequest(_messages.Message):
  r"""A MonitoringProjectsUptimeCheckConfigsListRequest object.

  Fields:
    filter: If provided, this field specifies the criteria that must be met by
      uptime checks to be included in the response.For more details, see
      Filtering syntax (https://cloud.google.com/monitoring/api/v3/sorting-
      and-filtering#filter_syntax).
    pageSize: The maximum number of results to return in a single response.
      The server may further constrain the maximum number of results returned
      in a single page. If the page_size is <=0, the server will decide the
      number of results to be returned.
    pageToken: If this field is not empty then it must contain the
      nextPageToken value returned by a previous call to this method. Using
      this field causes the method to return more results from the previous
      method call.
    parent: Required. The project
      (https://cloud.google.com/monitoring/api/v3#project_name) whose Uptime
      check configurations are listed. The format is:
      projects/[PROJECT_ID_OR_NUMBER]
  """

  filter = _messages.StringField(1)
  pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(3)
  parent = _messages.StringField(4, required=True)


class MonitoringProjectsUptimeCheckConfigsPatchRequest(_messages.Message):
  r"""A MonitoringProjectsUptimeCheckConfigsPatchRequest object.

  Fields:
    name: Identifier. A unique resource name for this Uptime check
      configuration. The format is:
      projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID]
      [PROJECT_ID_OR_NUMBER] is the Workspace host project associated with the
      Uptime check.This field should be omitted when creating the Uptime check
      configuration; on create, the resource name is assigned by the server
      and included in the response.
    updateMask: Optional. If present, only the listed fields in the current
      Uptime check configuration are updated with values from the new
      configuration. If this field is empty, then the current configuration is
      completely replaced with the new configuration.
    uptimeCheckConfig: A UptimeCheckConfig resource to be passed as the
      request body.
  """

  name = _messages.StringField(1, required=True)
  updateMask = _messages.StringField(2)
  uptimeCheckConfig = _messages.MessageField('UptimeCheckConfig', 3)


class MonitoringQueryLanguageCondition(_messages.Message):
  r"""A condition type that allows alerting policies to be defined using
  Monitoring Query Language (https://cloud.google.com/monitoring/mql).

  Enums:
    EvaluationMissingDataValueValuesEnum: A condition control that determines
      how metric-threshold conditions are evaluated when data stops arriving.

  Fields:
    duration: Optional. The amount of time that a time series must violate the
      threshold to be considered failing. Currently, only values that are a
      multiple of a minute--e.g., 0, 60, 120, or 300 seconds--are supported.
      If an invalid value is given, an error will be returned. When choosing a
      duration, it is useful to keep in mind the frequency of the underlying
      time series data (which may also be affected by any alignments specified
      in the aggregations field); a good duration is long enough so that a
      single outlier does not generate spurious alerts, but short enough that
      unhealthy states are detected and alerted on quickly. The default value
      is zero.
    evaluationMissingData: A condition control that determines how metric-
      threshold conditions are evaluated when data stops arriving.
    query: Monitoring Query Language (https://cloud.google.com/monitoring/mql)
      query that outputs a boolean stream.
    trigger: The number/percent of time series for which the comparison must
      hold in order for the condition to trigger. If unspecified, then the
      condition will trigger if the comparison is true for any of the time
      series that have been identified by filter and aggregations, or by the
      ratio, if denominator_filter and denominator_aggregations are specified.
  """

  class EvaluationMissingDataValueValuesEnum(_messages.Enum):
    r"""A condition control that determines how metric-threshold conditions
    are evaluated when data stops arriving.

    Values:
      EVALUATION_MISSING_DATA_UNSPECIFIED: An unspecified evaluation missing
        data option. Equivalent to EVALUATION_MISSING_DATA_NO_OP.
      EVALUATION_MISSING_DATA_INACTIVE: If there is no data to evaluate the
        condition, then evaluate the condition as false.
      EVALUATION_MISSING_DATA_ACTIVE: If there is no data to evaluate the
        condition, then evaluate the condition as true.
      EVALUATION_MISSING_DATA_NO_OP: Do not evaluate the condition to any
        value if there is no data.
    """
    EVALUATION_MISSING_DATA_UNSPECIFIED = 0
    EVALUATION_MISSING_DATA_INACTIVE = 1
    EVALUATION_MISSING_DATA_ACTIVE = 2
    EVALUATION_MISSING_DATA_NO_OP = 3

  duration = _messages.StringField(1)
  evaluationMissingData = _messages.EnumField('EvaluationMissingDataValueValuesEnum', 2)
  query = _messages.StringField(3)
  trigger = _messages.MessageField('Trigger', 4)


class MonitoringServicesCreateRequest(_messages.Message):
  r"""A MonitoringServicesCreateRequest object.

  Fields:
    parent: Required. Resource name
      (https://cloud.google.com/monitoring/api/v3#project_name) of the parent
      Metrics Scope. The format is: projects/[PROJECT_ID_OR_NUMBER]
    service: A Service resource to be passed as the request body.
    serviceId: Optional. The Service id to use for this Service. If omitted,
      an id will be generated instead. Must match the pattern [a-z0-9\-]+
  """

  parent = _messages.StringField(1, required=True)
  service = _messages.MessageField('Service', 2)
  serviceId = _messages.StringField(3)


class MonitoringServicesDeleteRequest(_messages.Message):
  r"""A MonitoringServicesDeleteRequest object.

  Fields:
    name: Required. Resource name of the Service to delete. The format is:
      projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]
  """

  name = _messages.StringField(1, required=True)


class MonitoringServicesGetRequest(_messages.Message):
  r"""A MonitoringServicesGetRequest object.

  Fields:
    name: Required. Resource name of the Service. The format is:
      projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]
  """

  name = _messages.StringField(1, required=True)


class MonitoringServicesListRequest(_messages.Message):
  r"""A MonitoringServicesListRequest object.

  Fields:
    filter: A filter specifying what Services to return. The filter supports
      filtering on a particular service-identifier type or one of its
      attributes.To filter on a particular service-identifier type, the
      identifier_case refers to which option in the identifier field is
      populated. For example, the filter identifier_case = "CUSTOM" would
      match all services with a value for the custom field. Valid options
      include "CUSTOM", "APP_ENGINE", "MESH_ISTIO", and the other options
      listed at https://cloud.google.com/monitoring/api/ref_v3/rest/v3/service
      s#ServiceTo filter on an attribute of a service-identifier type, apply
      the filter name by using the snake case of the service-identifier type
      and the attribute of that service-identifier type, and join the two with
      a period. For example, to filter by the meshUid field of the MeshIstio
      service-identifier type, you must filter on mesh_istio.mesh_uid = "123"
      to match all services with mesh UID "123". Service-identifier types and
      their attributes are described at
      https://cloud.google.com/monitoring/api/ref_v3/rest/v3/services#Service
    pageSize: A non-negative number that is the maximum number of results to
      return. When 0, use default page size.
    pageToken: If this field is not empty then it must contain the
      nextPageToken value returned by a previous call to this method. Using
      this field causes the method to return additional results from the
      previous method call.
    parent: Required. Resource name of the parent containing the listed
      services, either a project
      (https://cloud.google.com/monitoring/api/v3#project_name) or a
      Monitoring Metrics Scope. The formats are:
      projects/[PROJECT_ID_OR_NUMBER] workspaces/[HOST_PROJECT_ID_OR_NUMBER]
  """

  filter = _messages.StringField(1)
  pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(3)
  parent = _messages.StringField(4, required=True)


class MonitoringServicesPatchRequest(_messages.Message):
  r"""A MonitoringServicesPatchRequest object.

  Fields:
    name: Identifier. Resource name for this Service. The format is:
      projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]
    service: A Service resource to be passed as the request body.
    updateMask: A set of field paths defining which fields to use for the
      update.
  """

  name = _messages.StringField(1, required=True)
  service = _messages.MessageField('Service', 2)
  updateMask = _messages.StringField(3)


class MonitoringServicesServiceLevelObjectivesCreateRequest(_messages.Message):
  r"""A MonitoringServicesServiceLevelObjectivesCreateRequest object.

  Fields:
    parent: Required. Resource name of the parent Service. The format is:
      projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]
    serviceLevelObjective: A ServiceLevelObjective resource to be passed as
      the request body.
    serviceLevelObjectiveId: Optional. The ServiceLevelObjective id to use for
      this ServiceLevelObjective. If omitted, an id will be generated instead.
      Must match the pattern ^[a-zA-Z0-9-_:.]+$
  """

  parent = _messages.StringField(1, required=True)
  serviceLevelObjective = _messages.MessageField('ServiceLevelObjective', 2)
  serviceLevelObjectiveId = _messages.StringField(3)


class MonitoringServicesServiceLevelObjectivesDeleteRequest(_messages.Message):
  r"""A MonitoringServicesServiceLevelObjectivesDeleteRequest object.

  Fields:
    name: Required. Resource name of the ServiceLevelObjective to delete. The
      format is: projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/service
      LevelObjectives/[SLO_NAME]
  """

  name = _messages.StringField(1, required=True)


class MonitoringServicesServiceLevelObjectivesGetRequest(_messages.Message):
  r"""A MonitoringServicesServiceLevelObjectivesGetRequest object.

  Enums:
    ViewValueValuesEnum: View of the ServiceLevelObjective to return. If
      DEFAULT, return the ServiceLevelObjective as originally defined. If
      EXPLICIT and the ServiceLevelObjective is defined in terms of a
      BasicSli, replace the BasicSli with a RequestBasedSli spelling out how
      the SLI is computed.

  Fields:
    name: Required. Resource name of the ServiceLevelObjective to get. The
      format is: projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/service
      LevelObjectives/[SLO_NAME]
    view: View of the ServiceLevelObjective to return. If DEFAULT, return the
      ServiceLevelObjective as originally defined. If EXPLICIT and the
      ServiceLevelObjective is defined in terms of a BasicSli, replace the
      BasicSli with a RequestBasedSli spelling out how the SLI is computed.
  """

  class ViewValueValuesEnum(_messages.Enum):
    r"""View of the ServiceLevelObjective to return. If DEFAULT, return the
    ServiceLevelObjective as originally defined. If EXPLICIT and the
    ServiceLevelObjective is defined in terms of a BasicSli, replace the
    BasicSli with a RequestBasedSli spelling out how the SLI is computed.

    Values:
      VIEW_UNSPECIFIED: Same as FULL.
      FULL: Return the embedded ServiceLevelIndicator in the form in which it
        was defined. If it was defined using a BasicSli, return that BasicSli.
      EXPLICIT: For ServiceLevelIndicators using BasicSli articulation,
        instead return the ServiceLevelIndicator with its mode of computation
        fully spelled out as a RequestBasedSli. For ServiceLevelIndicators
        using RequestBasedSli or WindowsBasedSli, return the
        ServiceLevelIndicator as it was provided.
    """
    VIEW_UNSPECIFIED = 0
    FULL = 1
    EXPLICIT = 2

  name = _messages.StringField(1, required=True)
  view = _messages.EnumField('ViewValueValuesEnum', 2)


class MonitoringServicesServiceLevelObjectivesListRequest(_messages.Message):
  r"""A MonitoringServicesServiceLevelObjectivesListRequest object.

  Enums:
    ViewValueValuesEnum: View of the ServiceLevelObjectives to return. If
      DEFAULT, return each ServiceLevelObjective as originally defined. If
      EXPLICIT and the ServiceLevelObjective is defined in terms of a
      BasicSli, replace the BasicSli with a RequestBasedSli spelling out how
      the SLI is computed.

  Fields:
    filter: A filter specifying what ServiceLevelObjectives to return.
    pageSize: A non-negative number that is the maximum number of results to
      return. When 0, use default page size.
    pageToken: If this field is not empty then it must contain the
      nextPageToken value returned by a previous call to this method. Using
      this field causes the method to return additional results from the
      previous method call.
    parent: Required. Resource name of the parent containing the listed SLOs,
      either a project or a Monitoring Metrics Scope. The formats are:
      projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]
      workspaces/[HOST_PROJECT_ID_OR_NUMBER]/services/-
    view: View of the ServiceLevelObjectives to return. If DEFAULT, return
      each ServiceLevelObjective as originally defined. If EXPLICIT and the
      ServiceLevelObjective is defined in terms of a BasicSli, replace the
      BasicSli with a RequestBasedSli spelling out how the SLI is computed.
  """

  class ViewValueValuesEnum(_messages.Enum):
    r"""View of the ServiceLevelObjectives to return. If DEFAULT, return each
    ServiceLevelObjective as originally defined. If EXPLICIT and the
    ServiceLevelObjective is defined in terms of a BasicSli, replace the
    BasicSli with a RequestBasedSli spelling out how the SLI is computed.

    Values:
      VIEW_UNSPECIFIED: Same as FULL.
      FULL: Return the embedded ServiceLevelIndicator in the form in which it
        was defined. If it was defined using a BasicSli, return that BasicSli.
      EXPLICIT: For ServiceLevelIndicators using BasicSli articulation,
        instead return the ServiceLevelIndicator with its mode of computation
        fully spelled out as a RequestBasedSli. For ServiceLevelIndicators
        using RequestBasedSli or WindowsBasedSli, return the
        ServiceLevelIndicator as it was provided.
    """
    VIEW_UNSPECIFIED = 0
    FULL = 1
    EXPLICIT = 2

  filter = _messages.StringField(1)
  pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(3)
  parent = _messages.StringField(4, required=True)
  view = _messages.EnumField('ViewValueValuesEnum', 5)


class MonitoringServicesServiceLevelObjectivesPatchRequest(_messages.Message):
  r"""A MonitoringServicesServiceLevelObjectivesPatchRequest object.

  Fields:
    name: Identifier. Resource name for this ServiceLevelObjective. The format
      is: projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelOb
      jectives/[SLO_NAME]
    serviceLevelObjective: A ServiceLevelObjective resource to be passed as
      the request body.
    updateMask: A set of field paths defining which fields to use for the
      update.
  """

  name = _messages.StringField(1, required=True)
  serviceLevelObjective = _messages.MessageField('ServiceLevelObjective', 2)
  updateMask = _messages.StringField(3)


class MonitoringUptimeCheckIpsListRequest(_messages.Message):
  r"""A MonitoringUptimeCheckIpsListRequest object.

  Fields:
    pageSize: The maximum number of results to return in a single response.
      The server may further constrain the maximum number of results returned
      in a single page. If the page_size is <=0, the server will decide the
      number of results to be returned. NOTE: this field is not yet
      implemented
    pageToken: If this field is not empty then it must contain the
      nextPageToken value returned by a previous call to this method. Using
      this field causes the method to return more results from the previous
      method call. NOTE: this field is not yet implemented
  """

  pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(2)


class MutationRecord(_messages.Message):
  r"""Describes a change made to a configuration.

  Fields:
    mutateTime: When the change occurred.
    mutatedBy: The email address of the user making the change.
  """

  mutateTime = _messages.StringField(1)
  mutatedBy = _messages.StringField(2)


class NotificationChannel(_messages.Message):
  r"""A NotificationChannel is a medium through which an alert is delivered
  when a policy violation is detected. Examples of channels include email,
  SMS, and third-party messaging applications. Fields containing sensitive
  information like authentication tokens or contact info are only partially
  populated on retrieval.

  Enums:
    VerificationStatusValueValuesEnum: Indicates whether this channel has been
      verified or not. On a ListNotificationChannels or GetNotificationChannel
      operation, this field is expected to be populated.If the value is
      UNVERIFIED, then it indicates that the channel is non-functioning (it
      both requires verification and lacks verification); otherwise, it is
      assumed that the channel works.If the channel is neither VERIFIED nor
      UNVERIFIED, it implies that the channel is of a type that does not
      require verification or that this specific channel has been exempted
      from verification because it was created prior to verification being
      required for channels of this type.This field cannot be modified using a
      standard UpdateNotificationChannel operation. To change the value of
      this field, you must call VerifyNotificationChannel.

  Messages:
    LabelsValue: Configuration fields that define the channel and its
      behavior. The permissible and required labels are specified in the
      NotificationChannelDescriptor.labels of the
      NotificationChannelDescriptor corresponding to the type field.
    UserLabelsValue: User-supplied key/value data that does not need to
      conform to the corresponding NotificationChannelDescriptor's schema,
      unlike the labels field. This field is intended to be used for
      organizing and identifying the NotificationChannel objects.The field can
      contain up to 64 entries. Each key and value is limited to 63 Unicode
      characters or 128 bytes, whichever is smaller. Labels and values can
      contain only lowercase letters, numerals, underscores, and dashes. Keys
      must begin with a letter.

  Fields:
    creationRecord: Record of the creation of this channel.
    description: An optional human-readable description of this notification
      channel. This description may provide additional details, beyond the
      display name, for the channel. This may not exceed 1024 Unicode
      characters.
    displayName: An optional human-readable name for this notification
      channel. It is recommended that you specify a non-empty and unique name
      in order to make it easier to identify the channels in your project,
      though this is not enforced. The display name is limited to 512 Unicode
      characters.
    enabled: Whether notifications are forwarded to the described channel.
      This makes it possible to disable delivery of notifications to a
      particular channel without removing the channel from all alerting
      policies that reference the channel. This is a more convenient approach
      when the change is temporary and you want to receive notifications from
      the same set of alerting policies on the channel at some point in the
      future.
    labels: Configuration fields that define the channel and its behavior. The
      permissible and required labels are specified in the
      NotificationChannelDescriptor.labels of the
      NotificationChannelDescriptor corresponding to the type field.
    mutationRecords: Records of the modification of this channel.
    name: Identifier. The full REST resource name for this channel. The format
      is: projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]
      The [CHANNEL_ID] is automatically assigned by the server on creation.
    type: The type of the notification channel. This field matches the value
      of the NotificationChannelDescriptor.type field.
    userLabels: User-supplied key/value data that does not need to conform to
      the corresponding NotificationChannelDescriptor's schema, unlike the
      labels field. This field is intended to be used for organizing and
      identifying the NotificationChannel objects.The field can contain up to
      64 entries. Each key and value is limited to 63 Unicode characters or
      128 bytes, whichever is smaller. Labels and values can contain only
      lowercase letters, numerals, underscores, and dashes. Keys must begin
      with a letter.
    verificationStatus: Indicates whether this channel has been verified or
      not. On a ListNotificationChannels or GetNotificationChannel operation,
      this field is expected to be populated.If the value is UNVERIFIED, then
      it indicates that the channel is non-functioning (it both requires
      verification and lacks verification); otherwise, it is assumed that the
      channel works.If the channel is neither VERIFIED nor UNVERIFIED, it
      implies that the channel is of a type that does not require verification
      or that this specific channel has been exempted from verification
      because it was created prior to verification being required for channels
      of this type.This field cannot be modified using a standard
      UpdateNotificationChannel operation. To change the value of this field,
      you must call VerifyNotificationChannel.
  """

  class VerificationStatusValueValuesEnum(_messages.Enum):
    r"""Indicates whether this channel has been verified or not. On a
    ListNotificationChannels or GetNotificationChannel operation, this field
    is expected to be populated.If the value is UNVERIFIED, then it indicates
    that the channel is non-functioning (it both requires verification and
    lacks verification); otherwise, it is assumed that the channel works.If
    the channel is neither VERIFIED nor UNVERIFIED, it implies that the
    channel is of a type that does not require verification or that this
    specific channel has been exempted from verification because it was
    created prior to verification being required for channels of this
    type.This field cannot be modified using a standard
    UpdateNotificationChannel operation. To change the value of this field,
    you must call VerifyNotificationChannel.

    Values:
      VERIFICATION_STATUS_UNSPECIFIED: Sentinel value used to indicate that
        the state is unknown, omitted, or is not applicable (as in the case of
        channels that neither support nor require verification in order to
        function).
      UNVERIFIED: The channel has yet to be verified and requires verification
        to function. Note that this state also applies to the case where the
        verification process has been initiated by sending a verification code
        but where the verification code has not been submitted to complete the
        process.
      VERIFIED: It has been proven that notifications can be received on this
        notification channel and that someone on the project has access to
        messages that are delivered to that channel.
    """
    VERIFICATION_STATUS_UNSPECIFIED = 0
    UNVERIFIED = 1
    VERIFIED = 2

  @encoding.MapUnrecognizedFields('additionalProperties')
  class LabelsValue(_messages.Message):
    r"""Configuration fields that define the channel and its behavior. The
    permissible and required labels are specified in the
    NotificationChannelDescriptor.labels of the NotificationChannelDescriptor
    corresponding to the type field.

    Messages:
      AdditionalProperty: An additional property for a LabelsValue object.

    Fields:
      additionalProperties: Additional properties of type LabelsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a LabelsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  @encoding.MapUnrecognizedFields('additionalProperties')
  class UserLabelsValue(_messages.Message):
    r"""User-supplied key/value data that does not need to conform to the
    corresponding NotificationChannelDescriptor's schema, unlike the labels
    field. This field is intended to be used for organizing and identifying
    the NotificationChannel objects.The field can contain up to 64 entries.
    Each key and value is limited to 63 Unicode characters or 128 bytes,
    whichever is smaller. Labels and values can contain only lowercase
    letters, numerals, underscores, and dashes. Keys must begin with a letter.

    Messages:
      AdditionalProperty: An additional property for a UserLabelsValue object.

    Fields:
      additionalProperties: Additional properties of type UserLabelsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a UserLabelsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  creationRecord = _messages.MessageField('MutationRecord', 1)
  description = _messages.StringField(2)
  displayName = _messages.StringField(3)
  enabled = _messages.BooleanField(4)
  labels = _messages.MessageField('LabelsValue', 5)
  mutationRecords = _messages.MessageField('MutationRecord', 6, repeated=True)
  name = _messages.StringField(7)
  type = _messages.StringField(8)
  userLabels = _messages.MessageField('UserLabelsValue', 9)
  verificationStatus = _messages.EnumField('VerificationStatusValueValuesEnum', 10)


class NotificationChannelDescriptor(_messages.Message):
  r"""A description of a notification channel. The descriptor includes the
  properties of the channel and the set of labels or fields that must be
  specified to configure channels of a given type.

  Enums:
    LaunchStageValueValuesEnum: The product launch stage for channels of this
      type.
    SupportedTiersValueListEntryValuesEnum:

  Fields:
    description: A human-readable description of the notification channel
      type. The description may include a description of the properties of the
      channel and pointers to external documentation.
    displayName: A human-readable name for the notification channel type. This
      form of the name is suitable for a user interface.
    labels: The set of labels that must be defined to identify a particular
      channel of the corresponding type. Each label includes a description for
      how that field should be populated.
    launchStage: The product launch stage for channels of this type.
    name: The full REST resource name for this descriptor. The format is:
      projects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[TYPE] In
      the above, [TYPE] is the value of the type field.
    supportedTiers: The tiers that support this notification channel; the
      project service tier must be one of the supported_tiers.
    type: The type of notification channel, such as "email" and "sms". To view
      the full list of channels, see Channel descriptors
      (https://cloud.google.com/monitoring/alerts/using-channels-api#ncd).
      Notification channel types are globally unique.
  """

  class LaunchStageValueValuesEnum(_messages.Enum):
    r"""The product launch stage for channels of this type.

    Values:
      LAUNCH_STAGE_UNSPECIFIED: Do not use this default value.
      UNIMPLEMENTED: The feature is not yet implemented. Users can not use it.
      PRELAUNCH: Prelaunch features are hidden from users and are only visible
        internally.
      EARLY_ACCESS: Early Access features are limited to a closed group of
        testers. To use these features, you must sign up in advance and sign a
        Trusted Tester agreement (which includes confidentiality provisions).
        These features may be unstable, changed in backward-incompatible ways,
        and are not guaranteed to be released.
      ALPHA: Alpha is a limited availability test for releases before they are
        cleared for widespread use. By Alpha, all significant design issues
        are resolved and we are in the process of verifying functionality.
        Alpha customers need to apply for access, agree to applicable terms,
        and have their projects allowlisted. Alpha releases don't have to be
        feature complete, no SLAs are provided, and there are no technical
        support obligations, but they will be far enough along that customers
        can actually use them in test environments or for limited-use tests --
        just like they would in normal production cases.
      BETA: Beta is the point at which we are ready to open a release for any
        customer to use. There are no SLA or technical support obligations in
        a Beta release. Products will be complete from a feature perspective,
        but may have some open outstanding issues. Beta releases are suitable
        for limited production use cases.
      GA: GA features are open to all developers and are considered stable and
        fully qualified for production use.
      DEPRECATED: Deprecated features are scheduled to be shut down and
        removed. For more information, see the "Deprecation Policy" section of
        our Terms of Service (https://cloud.google.com/terms/) and the Google
        Cloud Platform Subject to the Deprecation Policy
        (https://cloud.google.com/terms/deprecation) documentation.
    """
    LAUNCH_STAGE_UNSPECIFIED = 0
    UNIMPLEMENTED = 1
    PRELAUNCH = 2
    EARLY_ACCESS = 3
    ALPHA = 4
    BETA = 5
    GA = 6
    DEPRECATED = 7

  class SupportedTiersValueListEntryValuesEnum(_messages.Enum):
    r"""SupportedTiersValueListEntryValuesEnum enum type.

    Values:
      SERVICE_TIER_UNSPECIFIED: An invalid sentinel value, used to indicate
        that a tier has not been provided explicitly.
      SERVICE_TIER_BASIC: The Cloud Monitoring Basic tier, a free tier of
        service that provides basic features, a moderate allotment of logs,
        and access to built-in metrics. A number of features are not available
        in this tier. For more details, see the service tiers documentation
        (https://cloud.google.com/monitoring/workspaces/tiers).
      SERVICE_TIER_PREMIUM: The Cloud Monitoring Premium tier, a higher, more
        expensive tier of service that provides access to all Cloud Monitoring
        features, lets you use Cloud Monitoring with AWS accounts, and has a
        larger allotments for logs and metrics. For more details, see the
        service tiers documentation
        (https://cloud.google.com/monitoring/workspaces/tiers).
    """
    SERVICE_TIER_UNSPECIFIED = 0
    SERVICE_TIER_BASIC = 1
    SERVICE_TIER_PREMIUM = 2

  description = _messages.StringField(1)
  displayName = _messages.StringField(2)
  labels = _messages.MessageField('LabelDescriptor', 3, repeated=True)
  launchStage = _messages.EnumField('LaunchStageValueValuesEnum', 4)
  name = _messages.StringField(5)
  supportedTiers = _messages.EnumField('SupportedTiersValueListEntryValuesEnum', 6, repeated=True)
  type = _messages.StringField(7)


class NotificationChannelStrategy(_messages.Message):
  r"""Control over how the notification channels in notification_channels are
  notified when this alert fires, on a per-channel basis.

  Fields:
    notificationChannelNames: The full REST resource name for the notification
      channels that these settings apply to. Each of these correspond to the
      name field in one of the NotificationChannel objects referenced in the
      notification_channels field of this AlertPolicy. The format is:
      projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]
    renotifyInterval: The frequency at which to send reminder notifications
      for open incidents.
  """

  notificationChannelNames = _messages.StringField(1, repeated=True)
  renotifyInterval = _messages.StringField(2)


class NotificationRateLimit(_messages.Message):
  r"""Control over the rate of notifications sent to this alerting policy's
  notification channels.

  Fields:
    period: Not more than one notification per period.
  """

  period = _messages.StringField(1)


class OperationMetadata(_messages.Message):
  r"""Contains metadata for longrunning operation for the edit Metrics Scope
  endpoints.

  Enums:
    StateValueValuesEnum: Current state of the batch operation.

  Fields:
    createTime: The time when the batch request was received.
    state: Current state of the batch operation.
    updateTime: The time when the operation result was last updated.
  """

  class StateValueValuesEnum(_messages.Enum):
    r"""Current state of the batch operation.

    Values:
      STATE_UNSPECIFIED: Invalid.
      CREATED: Request has been received.
      RUNNING: Request is actively being processed.
      DONE: The batch processing is done.
      CANCELLED: The batch processing was cancelled.
    """
    STATE_UNSPECIFIED = 0
    CREATED = 1
    RUNNING = 2
    DONE = 3
    CANCELLED = 4

  createTime = _messages.StringField(1)
  state = _messages.EnumField('StateValueValuesEnum', 2)
  updateTime = _messages.StringField(3)


class Option(_messages.Message):
  r"""A protocol buffer option, which can be attached to a message, field,
  enumeration, etc.New usages of this message as an alternative to
  FileOptions, MessageOptions, FieldOptions, EnumOptions, EnumValueOptions,
  ServiceOptions, or MethodOptions are strongly discouraged.

  Messages:
    ValueValue: The option's value packed in an Any message. If the value is a
      primitive, the corresponding wrapper type defined in
      google/protobuf/wrappers.proto should be used. If the value is an enum,
      it should be stored as an int32 value using the
      google.protobuf.Int32Value type.

  Fields:
    name: The option's name. For protobuf built-in options (options defined in
      descriptor.proto), this is the short name. For example, "map_entry". For
      custom options, it should be the fully-qualified name. For example,
      "google.api.http".
    value: The option's value packed in an Any message. If the value is a
      primitive, the corresponding wrapper type defined in
      google/protobuf/wrappers.proto should be used. If the value is an enum,
      it should be stored as an int32 value using the
      google.protobuf.Int32Value type.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class ValueValue(_messages.Message):
    r"""The option's value packed in an Any message. If the value is a
    primitive, the corresponding wrapper type defined in
    google/protobuf/wrappers.proto should be used. If the value is an enum, it
    should be stored as an int32 value using the google.protobuf.Int32Value
    type.

    Messages:
      AdditionalProperty: An additional property for a ValueValue object.

    Fields:
      additionalProperties: Properties of the object. Contains field @type
        with type URL.
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a ValueValue object.

      Fields:
        key: Name of the additional property.
        value: A extra_types.JsonValue attribute.
      """

      key = _messages.StringField(1)
      value = _messages.MessageField('extra_types.JsonValue', 2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  name = _messages.StringField(1)
  value = _messages.MessageField('ValueValue', 2)


class PerformanceThreshold(_messages.Message):
  r"""A PerformanceThreshold is used when each window is good when that window
  has a sufficiently high performance.

  Fields:
    basicSliPerformance: BasicSli to evaluate to judge window quality.
    performance: RequestBasedSli to evaluate to judge window quality.
    threshold: If window performance >= threshold, the window is counted as
      good.
  """

  basicSliPerformance = _messages.MessageField('BasicSli', 1)
  performance = _messages.MessageField('RequestBasedSli', 2)
  threshold = _messages.FloatField(3)


class PingConfig(_messages.Message):
  r"""Information involved in sending ICMP pings alongside public HTTP/TCP
  checks. For HTTP, the pings are performed for each part of the redirect
  chain.

  Fields:
    pingsCount: Number of ICMP pings. A maximum of 3 ICMP pings is currently
      supported.
  """

  pingsCount = _messages.IntegerField(1, variant=_messages.Variant.INT32)


class Point(_messages.Message):
  r"""A single data point in a time series.

  Fields:
    interval: The time interval to which the data point applies. For GAUGE
      metrics, the start time is optional, but if it is supplied, it must
      equal the end time. For DELTA metrics, the start and end time should
      specify a non-zero interval, with subsequent points specifying
      contiguous and non-overlapping intervals. For CUMULATIVE metrics, the
      start and end time should specify a non-zero interval, with subsequent
      points specifying the same start time and increasing end times, until an
      event resets the cumulative value to zero and sets a new start time for
      the following points.
    value: The value of the data point.
  """

  interval = _messages.MessageField('TimeInterval', 1)
  value = _messages.MessageField('TypedValue', 2)


class PointData(_messages.Message):
  r"""A point's value columns and time interval. Each point has one or more
  point values corresponding to the entries in point_descriptors field in the
  TimeSeriesDescriptor associated with this object.

  Fields:
    timeInterval: The time interval associated with the point.
    values: The values that make up the point.
  """

  timeInterval = _messages.MessageField('TimeInterval', 1)
  values = _messages.MessageField('TypedValue', 2, repeated=True)


class PolicySnapshot(_messages.Message):
  r"""The state of the policy at the time the alert was generated.

  Enums:
    SeverityValueValuesEnum: The severity of the alert policy.

  Messages:
    UserLabelsValue: The user labels for the alert policy.

  Fields:
    displayName: The display name of the alert policy.
    name: The name of the alert policy resource. In the form of
      "projects/PROJECT_ID_OR_NUMBER/alertPolicies/ALERT_POLICY_ID".
    severity: The severity of the alert policy.
    userLabels: The user labels for the alert policy.
  """

  class SeverityValueValuesEnum(_messages.Enum):
    r"""The severity of the alert policy.

    Values:
      SEVERITY_UNSPECIFIED: No severity is specified. This is the default
        value.
      CRITICAL: This is the highest severity level. Use this if the problem
        could cause significant damage or downtime.
      ERROR: This is the medium severity level. Use this if the problem could
        cause minor damage or downtime.
      WARNING: This is the lowest severity level. Use this if the problem is
        not causing any damage or downtime, but could potentially lead to a
        problem in the future.
    """
    SEVERITY_UNSPECIFIED = 0
    CRITICAL = 1
    ERROR = 2
    WARNING = 3

  @encoding.MapUnrecognizedFields('additionalProperties')
  class UserLabelsValue(_messages.Message):
    r"""The user labels for the alert policy.

    Messages:
      AdditionalProperty: An additional property for a UserLabelsValue object.

    Fields:
      additionalProperties: Additional properties of type UserLabelsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a UserLabelsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  displayName = _messages.StringField(1)
  name = _messages.StringField(2)
  severity = _messages.EnumField('SeverityValueValuesEnum', 3)
  userLabels = _messages.MessageField('UserLabelsValue', 4)


class PrometheusQueryLanguageCondition(_messages.Message):
  r"""A condition type that allows alerting policies to be defined using
  Prometheus Query Language (PromQL)
  (https://prometheus.io/docs/prometheus/latest/querying/basics/).The
  PrometheusQueryLanguageCondition message contains information from a
  Prometheus alerting rule and its associated rule group.A Prometheus alerting
  rule is described here (https://prometheus.io/docs/prometheus/latest/configu
  ration/alerting_rules/). The semantics of a Prometheus alerting rule is
  described here (https://prometheus.io/docs/prometheus/latest/configuration/r
  ecording_rules/#rule).A Prometheus rule group is described here (https://pro
  metheus.io/docs/prometheus/latest/configuration/recording_rules/). The
  semantics of a Prometheus rule group is described here (https://prometheus.i
  o/docs/prometheus/latest/configuration/recording_rules/#rule_group).Because
  Cloud Alerting has no representation of a Prometheus rule group resource, we
  must embed the information of the parent rule group inside each of the
  conditions that refer to it. We must also update the contents of all
  Prometheus alerts in case the information of their rule group changes.The
  PrometheusQueryLanguageCondition protocol buffer combines the information of
  the corresponding rule group and alerting rule. The structure of the
  PrometheusQueryLanguageCondition protocol buffer does NOT mimic the
  structure of the Prometheus rule group and alerting rule YAML declarations.
  The PrometheusQueryLanguageCondition protocol buffer may change in the
  future to support future rule group and/or alerting rule features. There are
  no new such features at the present time (2023-06-26).

  Messages:
    LabelsValue: Optional. Labels to add to or overwrite in the PromQL query
      result. Label names must be valid
      (https://prometheus.io/docs/concepts/data_model/#metric-names-and-
      labels). Label values can be templatized by using variables
      (https://cloud.google.com/monitoring/alerts/doc-variables#doc-vars). The
      only available variable names are the names of the labels in the PromQL
      result, including "__name__" and "value". "labels" may be empty.

  Fields:
    alertRule: Optional. The alerting rule name of this alert in the
      corresponding Prometheus configuration file.Some external tools may
      require this field to be populated correctly in order to refer to the
      original Prometheus configuration file. The rule group name and the
      alert name are necessary to update the relevant AlertPolicies in case
      the definition of the rule group changes in the future.This field is
      optional. If this field is not empty, then it must be a valid Prometheus
      label name (https://prometheus.io/docs/concepts/data_model/#metric-
      names-and-labels). This field may not exceed 2048 Unicode characters in
      length.
    disableMetricValidation: Optional. Whether to disable metric existence
      validation for this condition.This allows alerting policies to be
      defined on metrics that do not yet exist, improving advanced customer
      workflows such as configuring alerting policies using Terraform.Users
      with the monitoring.alertPolicyViewer role are able to see the name of
      the non-existent metric in the alerting policy condition.
    duration: Optional. Alerts are considered firing once their PromQL
      expression was evaluated to be "true" for this long. Alerts whose PromQL
      expression was not evaluated to be "true" for long enough are considered
      pending. Must be a non-negative duration or missing. This field is
      optional. Its default value is zero.
    evaluationInterval: Optional. How often this rule should be evaluated.
      Must be a positive multiple of 30 seconds or missing. This field is
      optional. Its default value is 30 seconds. If this
      PrometheusQueryLanguageCondition was generated from a Prometheus
      alerting rule, then this value should be taken from the enclosing rule
      group.
    labels: Optional. Labels to add to or overwrite in the PromQL query
      result. Label names must be valid
      (https://prometheus.io/docs/concepts/data_model/#metric-names-and-
      labels). Label values can be templatized by using variables
      (https://cloud.google.com/monitoring/alerts/doc-variables#doc-vars). The
      only available variable names are the names of the labels in the PromQL
      result, including "__name__" and "value". "labels" may be empty.
    query: Required. The PromQL expression to evaluate. Every evaluation cycle
      this expression is evaluated at the current time, and all resultant time
      series become pending/firing alerts. This field must not be empty.
    ruleGroup: Optional. The rule group name of this alert in the
      corresponding Prometheus configuration file.Some external tools may
      require this field to be populated correctly in order to refer to the
      original Prometheus configuration file. The rule group name and the
      alert name are necessary to update the relevant AlertPolicies in case
      the definition of the rule group changes in the future.This field is
      optional. If this field is not empty, then it must contain a valid UTF-8
      string. This field may not exceed 2048 Unicode characters in length.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class LabelsValue(_messages.Message):
    r"""Optional. Labels to add to or overwrite in the PromQL query result.
    Label names must be valid
    (https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels).
    Label values can be templatized by using variables
    (https://cloud.google.com/monitoring/alerts/doc-variables#doc-vars). The
    only available variable names are the names of the labels in the PromQL
    result, including "__name__" and "value". "labels" may be empty.

    Messages:
      AdditionalProperty: An additional property for a LabelsValue object.

    Fields:
      additionalProperties: Additional properties of type LabelsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a LabelsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  alertRule = _messages.StringField(1)
  disableMetricValidation = _messages.BooleanField(2)
  duration = _messages.StringField(3)
  evaluationInterval = _messages.StringField(4)
  labels = _messages.MessageField('LabelsValue', 5)
  query = _messages.StringField(6)
  ruleGroup = _messages.StringField(7)


class QueryTimeSeriesRequest(_messages.Message):
  r"""The QueryTimeSeries request. For information about the status of
  Monitoring Query Language (MQL), see the MQL deprecation notice
  (https://cloud.google.com/stackdriver/docs/deprecations/mql).

  Fields:
    pageSize: A positive number that is the maximum number of time_series_data
      to return.
    pageToken: If this field is not empty then it must contain the
      nextPageToken value returned by a previous call to this method. Using
      this field causes the method to return additional results from the
      previous method call.
    query: Required. The query in the Monitoring Query Language
      (https://cloud.google.com/monitoring/mql/reference) format. The default
      time zone is in UTC.
  """

  pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
  pageToken = _messages.StringField(2)
  query = _messages.StringField(3)


class QueryTimeSeriesResponse(_messages.Message):
  r"""The QueryTimeSeries response. For information about the status of
  Monitoring Query Language (MQL), see the MQL deprecation notice
  (https://cloud.google.com/stackdriver/docs/deprecations/mql).

  Fields:
    nextPageToken: If there are more results than have been returned, then
      this field is set to a non-empty value. To see the additional results,
      use that value as page_token in the next call to this method.
    partialErrors: Query execution errors that may have caused the time series
      data returned to be incomplete. The available data will be available in
      the response.
    timeSeriesData: The time series data.
    timeSeriesDescriptor: The descriptor for the time series data.
  """

  nextPageToken = _messages.StringField(1)
  partialErrors = _messages.MessageField('Status', 2, repeated=True)
  timeSeriesData = _messages.MessageField('TimeSeriesData', 3, repeated=True)
  timeSeriesDescriptor = _messages.MessageField('TimeSeriesDescriptor', 4)


class Range(_messages.Message):
  r"""The range of the population values.

  Fields:
    max: The maximum of the population values.
    min: The minimum of the population values.
  """

  max = _messages.FloatField(1)
  min = _messages.FloatField(2)


class RequestBasedSli(_messages.Message):
  r"""Service Level Indicators for which atomic units of service are counted
  directly.

  Fields:
    distributionCut: distribution_cut is used when good_service is a count of
      values aggregated in a Distribution that fall into a good range. The
      total_service is the total count of all values aggregated in the
      Distribution.
    goodTotalRatio: good_total_ratio is used when the ratio of good_service to
      total_service is computed from two TimeSeries.
  """

  distributionCut = _messages.MessageField('DistributionCut', 1)
  goodTotalRatio = _messages.MessageField('TimeSeriesRatio', 2)


class ResourceGroup(_messages.Message):
  r"""The resource submessage for group checks. It can be used instead of a
  monitored resource, when multiple resources are being monitored.

  Enums:
    ResourceTypeValueValuesEnum: The resource type of the group members.

  Fields:
    groupId: The group of resources being monitored. Should be only the
      [GROUP_ID], and not the full-path
      projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID].
    resourceType: The resource type of the group members.
  """

  class ResourceTypeValueValuesEnum(_messages.Enum):
    r"""The resource type of the group members.

    Values:
      RESOURCE_TYPE_UNSPECIFIED: Default value (not valid).
      INSTANCE: A group of instances from Google Cloud Platform (GCP) or
        Amazon Web Services (AWS).
      AWS_ELB_LOAD_BALANCER: A group of Amazon ELB load balancers.
    """
    RESOURCE_TYPE_UNSPECIFIED = 0
    INSTANCE = 1
    AWS_ELB_LOAD_BALANCER = 2

  groupId = _messages.StringField(1)
  resourceType = _messages.EnumField('ResourceTypeValueValuesEnum', 2)


class ResponseStatusCode(_messages.Message):
  r"""A status to accept. Either a status code class like "2xx", or an integer
  status code like "200".

  Enums:
    StatusClassValueValuesEnum: A class of status codes to accept.

  Fields:
    statusClass: A class of status codes to accept.
    statusValue: A status code to accept.
  """

  class StatusClassValueValuesEnum(_messages.Enum):
    r"""A class of status codes to accept.

    Values:
      STATUS_CLASS_UNSPECIFIED: Default value that matches no status codes.
      STATUS_CLASS_1XX: The class of status codes between 100 and 199.
      STATUS_CLASS_2XX: The class of status codes between 200 and 299.
      STATUS_CLASS_3XX: The class of status codes between 300 and 399.
      STATUS_CLASS_4XX: The class of status codes between 400 and 499.
      STATUS_CLASS_5XX: The class of status codes between 500 and 599.
      STATUS_CLASS_ANY: The class of all status codes.
    """
    STATUS_CLASS_UNSPECIFIED = 0
    STATUS_CLASS_1XX = 1
    STATUS_CLASS_2XX = 2
    STATUS_CLASS_3XX = 3
    STATUS_CLASS_4XX = 4
    STATUS_CLASS_5XX = 5
    STATUS_CLASS_ANY = 6

  statusClass = _messages.EnumField('StatusClassValueValuesEnum', 1)
  statusValue = _messages.IntegerField(2, variant=_messages.Variant.INT32)


class RowCountTest(_messages.Message):
  r"""A test that checks if the number of rows in the result set violates some
  threshold.

  Enums:
    ComparisonValueValuesEnum: Required. The comparison to apply between the
      number of rows returned by the query and the threshold.

  Fields:
    comparison: Required. The comparison to apply between the number of rows
      returned by the query and the threshold.
    threshold: Required. The value against which to compare the row count.
  """

  class ComparisonValueValuesEnum(_messages.Enum):
    r"""Required. The comparison to apply between the number of rows returned
    by the query and the threshold.

    Values:
      COMPARISON_UNSPECIFIED: No ordering relationship is specified.
      COMPARISON_GT: True if the left argument is greater than the right
        argument.
      COMPARISON_GE: True if the left argument is greater than or equal to the
        right argument.
      COMPARISON_LT: True if the left argument is less than the right
        argument.
      COMPARISON_LE: True if the left argument is less than or equal to the
        right argument.
      COMPARISON_EQ: True if the left argument is equal to the right argument.
      COMPARISON_NE: True if the left argument is not equal to the right
        argument.
    """
    COMPARISON_UNSPECIFIED = 0
    COMPARISON_GT = 1
    COMPARISON_GE = 2
    COMPARISON_LT = 3
    COMPARISON_LE = 4
    COMPARISON_EQ = 5
    COMPARISON_NE = 6

  comparison = _messages.EnumField('ComparisonValueValuesEnum', 1)
  threshold = _messages.IntegerField(2)


class SendNotificationChannelVerificationCodeRequest(_messages.Message):
  r"""The SendNotificationChannelVerificationCode request."""


class Service(_messages.Message):
  r"""A Service is a discrete, autonomous, and network-accessible unit,
  designed to solve an individual concern (Wikipedia
  (https://en.wikipedia.org/wiki/Service-orientation)). In Cloud Monitoring, a
  Service acts as the root resource under which operational aspects of the
  service are accessible.

  Messages:
    UserLabelsValue: Labels which have been used to annotate the service.
      Label keys must start with a letter. Label keys and values may contain
      lowercase letters, numbers, underscores, and dashes. Label keys and
      values have a maximum length of 63 characters, and must be less than 128
      bytes in size. Up to 64 label entries may be stored. For labels which do
      not have a semantic value, the empty string may be supplied for the
      label value.

  Fields:
    appEngine: Type used for App Engine services.
    basicService: Message that contains the service type and service labels of
      this service if it is a basic service. Documentation and examples here
      (https://cloud.google.com/stackdriver/docs/solutions/slo-
      monitoring/api/api-structures#basic-svc-w-basic-sli).
    cloudEndpoints: Type used for Cloud Endpoints services.
    cloudRun: Type used for Cloud Run services.
    clusterIstio: Type used for Istio services that live in a Kubernetes
      cluster.
    custom: Custom service type.
    displayName: Name used for UI elements listing this Service.
    gkeNamespace: Type used for GKE Namespaces.
    gkeService: Type used for GKE Services (the Kubernetes concept of a
      service).
    gkeWorkload: Type used for GKE Workloads.
    istioCanonicalService: Type used for canonical services scoped to an Istio
      mesh. Metrics for Istio are documented here
      (https://istio.io/latest/docs/reference/config/metrics/)
    meshIstio: Type used for Istio services scoped to an Istio mesh.
    name: Identifier. Resource name for this Service. The format is:
      projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]
    telemetry: Configuration for how to query telemetry on a Service.
    userLabels: Labels which have been used to annotate the service. Label
      keys must start with a letter. Label keys and values may contain
      lowercase letters, numbers, underscores, and dashes. Label keys and
      values have a maximum length of 63 characters, and must be less than 128
      bytes in size. Up to 64 label entries may be stored. For labels which do
      not have a semantic value, the empty string may be supplied for the
      label value.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class UserLabelsValue(_messages.Message):
    r"""Labels which have been used to annotate the service. Label keys must
    start with a letter. Label keys and values may contain lowercase letters,
    numbers, underscores, and dashes. Label keys and values have a maximum
    length of 63 characters, and must be less than 128 bytes in size. Up to 64
    label entries may be stored. For labels which do not have a semantic
    value, the empty string may be supplied for the label value.

    Messages:
      AdditionalProperty: An additional property for a UserLabelsValue object.

    Fields:
      additionalProperties: Additional properties of type UserLabelsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a UserLabelsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  appEngine = _messages.MessageField('AppEngine', 1)
  basicService = _messages.MessageField('BasicService', 2)
  cloudEndpoints = _messages.MessageField('CloudEndpoints', 3)
  cloudRun = _messages.MessageField('CloudRun', 4)
  clusterIstio = _messages.MessageField('ClusterIstio', 5)
  custom = _messages.MessageField('Custom', 6)
  displayName = _messages.StringField(7)
  gkeNamespace = _messages.MessageField('GkeNamespace', 8)
  gkeService = _messages.MessageField('GkeService', 9)
  gkeWorkload = _messages.MessageField('GkeWorkload', 10)
  istioCanonicalService = _messages.MessageField('IstioCanonicalService', 11)
  meshIstio = _messages.MessageField('MeshIstio', 12)
  name = _messages.StringField(13)
  telemetry = _messages.MessageField('Telemetry', 14)
  userLabels = _messages.MessageField('UserLabelsValue', 15)


class ServiceAgentAuthentication(_messages.Message):
  r"""Contains information needed for generating either an OpenID Connect
  token (https://developers.google.com/identity/protocols/OpenIDConnect) or
  OAuth token (https://developers.google.com/identity/protocols/oauth2). The
  token will be generated for the Monitoring service agent service account.

  Enums:
    TypeValueValuesEnum: Type of authentication.

  Fields:
    type: Type of authentication.
  """

  class TypeValueValuesEnum(_messages.Enum):
    r"""Type of authentication.

    Values:
      SERVICE_AGENT_AUTHENTICATION_TYPE_UNSPECIFIED: Default value, will
        result in OIDC Authentication.
      OIDC_TOKEN: OIDC Authentication
    """
    SERVICE_AGENT_AUTHENTICATION_TYPE_UNSPECIFIED = 0
    OIDC_TOKEN = 1

  type = _messages.EnumField('TypeValueValuesEnum', 1)


class ServiceLevelIndicator(_messages.Message):
  r"""A Service-Level Indicator (SLI) describes the "performance" of a
  service. For some services, the SLI is well-defined. In such cases, the SLI
  can be described easily by referencing the well-known SLI and providing the
  needed parameters. Alternatively, a "custom" SLI can be defined with a query
  to the underlying metric store. An SLI is defined to be good_service /
  total_service over any queried time interval. The value of performance
  always falls into the range 0 <= performance <= 1. A custom SLI describes
  how to compute this ratio, whether this is by dividing values from a pair of
  time series, cutting a Distribution into good and bad counts, or counting
  time windows in which the service complies with a criterion. For separation
  of concerns, a single Service-Level Indicator measures performance for only
  one aspect of service quality, such as fraction of successful queries or
  fast-enough queries.

  Fields:
    basicSli: Basic SLI on a well-known service type.
    requestBased: Request-based SLIs
    windowsBased: Windows-based SLIs
  """

  basicSli = _messages.MessageField('BasicSli', 1)
  requestBased = _messages.MessageField('RequestBasedSli', 2)
  windowsBased = _messages.MessageField('WindowsBasedSli', 3)


class ServiceLevelObjective(_messages.Message):
  r"""A Service-Level Objective (SLO) describes a level of desired good
  service. It consists of a service-level indicator (SLI), a performance goal,
  and a period over which the objective is to be evaluated against that goal.
  The SLO can use SLIs defined in a number of different manners. Typical SLOs
  might include "99% of requests in each rolling week have latency below 200
  milliseconds" or "99.5% of requests in each calendar month return
  successfully."

  Enums:
    CalendarPeriodValueValuesEnum: A calendar period, semantically "since the
      start of the current ". At this time, only DAY, WEEK, FORTNIGHT, and
      MONTH are supported.

  Messages:
    UserLabelsValue: Labels which have been used to annotate the service-level
      objective. Label keys must start with a letter. Label keys and values
      may contain lowercase letters, numbers, underscores, and dashes. Label
      keys and values have a maximum length of 63 characters, and must be less
      than 128 bytes in size. Up to 64 label entries may be stored. For labels
      which do not have a semantic value, the empty string may be supplied for
      the label value.

  Fields:
    calendarPeriod: A calendar period, semantically "since the start of the
      current ". At this time, only DAY, WEEK, FORTNIGHT, and MONTH are
      supported.
    displayName: Name used for UI elements listing this SLO.
    goal: The fraction of service that must be good in order for this
      objective to be met. 0 < goal <= 0.9999.
    name: Identifier. Resource name for this ServiceLevelObjective. The format
      is: projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelOb
      jectives/[SLO_NAME]
    rollingPeriod: A rolling time period, semantically "in the past ". Must be
      an integer multiple of 1 day no larger than 30 days.
    serviceLevelIndicator: The definition of good service, used to measure and
      calculate the quality of the Service's performance with respect to a
      single aspect of service quality.
    userLabels: Labels which have been used to annotate the service-level
      objective. Label keys must start with a letter. Label keys and values
      may contain lowercase letters, numbers, underscores, and dashes. Label
      keys and values have a maximum length of 63 characters, and must be less
      than 128 bytes in size. Up to 64 label entries may be stored. For labels
      which do not have a semantic value, the empty string may be supplied for
      the label value.
  """

  class CalendarPeriodValueValuesEnum(_messages.Enum):
    r"""A calendar period, semantically "since the start of the current ". At
    this time, only DAY, WEEK, FORTNIGHT, and MONTH are supported.

    Values:
      CALENDAR_PERIOD_UNSPECIFIED: Undefined period, raises an error.
      DAY: A day.
      WEEK: A week. Weeks begin on Monday, following ISO 8601
        (https://en.wikipedia.org/wiki/ISO_week_date).
      FORTNIGHT: A fortnight. The first calendar fortnight of the year begins
        at the start of week 1 according to ISO 8601
        (https://en.wikipedia.org/wiki/ISO_week_date).
      MONTH: A month.
      QUARTER: A quarter. Quarters start on dates 1-Jan, 1-Apr, 1-Jul, and
        1-Oct of each year.
      HALF: A half-year. Half-years start on dates 1-Jan and 1-Jul.
      YEAR: A year.
    """
    CALENDAR_PERIOD_UNSPECIFIED = 0
    DAY = 1
    WEEK = 2
    FORTNIGHT = 3
    MONTH = 4
    QUARTER = 5
    HALF = 6
    YEAR = 7

  @encoding.MapUnrecognizedFields('additionalProperties')
  class UserLabelsValue(_messages.Message):
    r"""Labels which have been used to annotate the service-level objective.
    Label keys must start with a letter. Label keys and values may contain
    lowercase letters, numbers, underscores, and dashes. Label keys and values
    have a maximum length of 63 characters, and must be less than 128 bytes in
    size. Up to 64 label entries may be stored. For labels which do not have a
    semantic value, the empty string may be supplied for the label value.

    Messages:
      AdditionalProperty: An additional property for a UserLabelsValue object.

    Fields:
      additionalProperties: Additional properties of type UserLabelsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a UserLabelsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  calendarPeriod = _messages.EnumField('CalendarPeriodValueValuesEnum', 1)
  displayName = _messages.StringField(2)
  goal = _messages.FloatField(3)
  name = _messages.StringField(4)
  rollingPeriod = _messages.StringField(5)
  serviceLevelIndicator = _messages.MessageField('ServiceLevelIndicator', 6)
  userLabels = _messages.MessageField('UserLabelsValue', 7)


class Snooze(_messages.Message):
  r"""A Snooze will prevent any alerts from being opened, and close any that
  are already open. The Snooze will work on alerts that match the criteria
  defined in the Snooze. The Snooze will be active from interval.start_time
  through interval.end_time.

  Fields:
    criteria: Required. This defines the criteria for applying the Snooze. See
      Criteria for more information.
    displayName: Required. A display name for the Snooze. This can be, at
      most, 512 unicode characters.
    interval: Required. The Snooze will be active from interval.start_time
      through interval.end_time. interval.start_time cannot be in the past.
      There is a 15 second clock skew to account for the time it takes for a
      request to reach the API from the UI.
    name: Required. Identifier. The name of the Snooze. The format is:
      projects/[PROJECT_ID_OR_NUMBER]/snoozes/[SNOOZE_ID] The ID of the Snooze
      will be generated by the system.
  """

  criteria = _messages.MessageField('Criteria', 1)
  displayName = _messages.StringField(2)
  interval = _messages.MessageField('TimeInterval', 3)
  name = _messages.StringField(4)


class SourceContext(_messages.Message):
  r"""SourceContext represents information about the source of a protobuf
  element, like the file in which it is defined.

  Fields:
    fileName: The path-qualified name of the .proto file that contained the
      associated protobuf element. For example:
      "google/protobuf/source_context.proto".
  """

  fileName = _messages.StringField(1)


class SpanContext(_messages.Message):
  r"""The context of a span. This is attached to an Exemplar in Distribution
  values during aggregation.It contains the name of a span with format:
  projects/[PROJECT_ID_OR_NUMBER]/traces/[TRACE_ID]/spans/[SPAN_ID]

  Fields:
    spanName: The resource name of the span. The format is:
      projects/[PROJECT_ID_OR_NUMBER]/traces/[TRACE_ID]/spans/[SPAN_ID]
      [TRACE_ID] is a unique identifier for a trace within a project; it is a
      32-character hexadecimal encoding of a 16-byte array.[SPAN_ID] is a
      unique identifier for a span within a trace; it is a 16-character
      hexadecimal encoding of an 8-byte array.
  """

  spanName = _messages.StringField(1)


class SqlCondition(_messages.Message):
  r"""A condition that allows alerting policies to be defined using GoogleSQL.
  SQL conditions examine a sliding window of logs using GoogleSQL. Alert
  policies with SQL conditions may incur additional billing.

  Fields:
    booleanTest: Test the boolean value in the indicated column.
    daily: Schedule the query to execute every so many days.
    hourly: Schedule the query to execute every so many hours.
    minutes: Schedule the query to execute every so many minutes.
    query: Required. The Log Analytics SQL query to run, as a string. The
      query must conform to the required shape. Specifically, the query must
      not try to filter the input by time. A filter will automatically be
      applied to filter the input so that the query receives all rows received
      since the last time the query was run.For example, the following query
      extracts all log entries containing an HTTP request: SELECT timestamp,
      log_name, severity, http_request, resource, labels FROM my-
      project.global._Default._AllLogs WHERE http_request IS NOT NULL
    rowCountTest: Test the row count against a threshold.
  """

  booleanTest = _messages.MessageField('BooleanTest', 1)
  daily = _messages.MessageField('Daily', 2)
  hourly = _messages.MessageField('Hourly', 3)
  minutes = _messages.MessageField('Minutes', 4)
  query = _messages.StringField(5)
  rowCountTest = _messages.MessageField('RowCountTest', 6)


class StandardQueryParameters(_messages.Message):
  r"""Query parameters accepted by all methods.

  Enums:
    FXgafvValueValuesEnum: V1 error format.
    AltValueValuesEnum: Data format for response.

  Fields:
    f__xgafv: V1 error format.
    access_token: OAuth access token.
    alt: Data format for response.
    callback: JSONP
    fields: Selector specifying which fields to include in a partial response.
    key: API key. Your API key identifies your project and provides you with
      API access, quota, and reports. Required unless you provide an OAuth 2.0
      token.
    oauth_token: OAuth 2.0 token for the current user.
    prettyPrint: Returns response with indentations and line breaks.
    quotaUser: Available to use for quota purposes for server-side
      applications. Can be any arbitrary string assigned to a user, but should
      not exceed 40 characters.
    trace: A tracing token of the form "token:<tokenid>" to include in api
      requests.
    uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
    upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
  """

  class AltValueValuesEnum(_messages.Enum):
    r"""Data format for response.

    Values:
      json: Responses with Content-Type of application/json
      media: Media download with context-dependent Content-Type
      proto: Responses with Content-Type of application/x-protobuf
    """
    json = 0
    media = 1
    proto = 2

  class FXgafvValueValuesEnum(_messages.Enum):
    r"""V1 error format.

    Values:
      _1: v1 error format
      _2: v2 error format
    """
    _1 = 0
    _2 = 1

  f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
  access_token = _messages.StringField(2)
  alt = _messages.EnumField('AltValueValuesEnum', 3, default='json')
  callback = _messages.StringField(4)
  fields = _messages.StringField(5)
  key = _messages.StringField(6)
  oauth_token = _messages.StringField(7)
  prettyPrint = _messages.BooleanField(8, default=True)
  quotaUser = _messages.StringField(9)
  trace = _messages.StringField(10)
  uploadType = _messages.StringField(11)
  upload_protocol = _messages.StringField(12)


class Status(_messages.Message):
  r"""The Status type defines a logical error model that is suitable for
  different programming environments, including REST APIs and RPC APIs. It is
  used by gRPC (https://github.com/grpc). Each Status message contains three
  pieces of data: error code, error message, and error details.You can find
  out more about this error model and how to work with it in the API Design
  Guide (https://cloud.google.com/apis/design/errors).

  Messages:
    DetailsValueListEntry: A DetailsValueListEntry object.

  Fields:
    code: The status code, which should be an enum value of google.rpc.Code.
    details: A list of messages that carry the error details. There is a
      common set of message types for APIs to use.
    message: A developer-facing error message, which should be in English. Any
      user-facing error message should be localized and sent in the
      google.rpc.Status.details field, or localized by the client.
  """

  @encoding.MapUnrecognizedFields('additionalProperties')
  class DetailsValueListEntry(_messages.Message):
    r"""A DetailsValueListEntry object.

    Messages:
      AdditionalProperty: An additional property for a DetailsValueListEntry
        object.

    Fields:
      additionalProperties: Properties of the object. Contains field @type
        with type URL.
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a DetailsValueListEntry object.

      Fields:
        key: Name of the additional property.
        value: A extra_types.JsonValue attribute.
      """

      key = _messages.StringField(1)
      value = _messages.MessageField('extra_types.JsonValue', 2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  code = _messages.IntegerField(1, variant=_messages.Variant.INT32)
  details = _messages.MessageField('DetailsValueListEntry', 2, repeated=True)
  message = _messages.StringField(3)


class SyntheticMonitorTarget(_messages.Message):
  r"""Describes a Synthetic Monitor to be invoked by Uptime.

  Fields:
    cloudFunctionV2: Target a Synthetic Monitor GCFv2 instance.
  """

  cloudFunctionV2 = _messages.MessageField('CloudFunctionV2Target', 1)


class TcpCheck(_messages.Message):
  r"""Information required for a TCP Uptime check request.

  Fields:
    pingConfig: Contains information needed to add pings to a TCP check.
    port: The TCP port on the server against which to run the check. Will be
      combined with host (specified within the monitored_resource) to
      construct the full URL. Required.
  """

  pingConfig = _messages.MessageField('PingConfig', 1)
  port = _messages.IntegerField(2, variant=_messages.Variant.INT32)


class Telemetry(_messages.Message):
  r"""Configuration for how to query telemetry on a Service.

  Fields:
    resourceName: The full name of the resource that defines this service.
      Formatted as described in
      https://cloud.google.com/apis/design/resource_names.
  """

  resourceName = _messages.StringField(1)


class TimeInterval(_messages.Message):
  r"""Describes a time interval: Reads: A half-open time interval. It includes
  the end time but excludes the start time: (startTime, endTime]. The start
  time must be specified, must be earlier than the end time, and should be no
  older than the data retention period for the metric. Writes: A closed time
  interval. It extends from the start time to the end time, and includes both:
  [startTime, endTime]. Valid time intervals depend on the MetricKind (https:/
  /cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#M
  etricKind) of the metric value. The end time must not be earlier than the
  start time, and the end time must not be more than 25 hours in the past or
  more than five minutes in the future. For GAUGE metrics, the startTime value
  is technically optional; if no value is specified, the start time defaults
  to the value of the end time, and the interval represents a single point in
  time. If both start and end times are specified, they must be identical.
  Such an interval is valid only for GAUGE metrics, which are point-in-time
  measurements. The end time of a new interval must be at least a millisecond
  after the end time of the previous interval. For DELTA metrics, the start
  time and end time must specify a non-zero interval, with subsequent points
  specifying contiguous and non-overlapping intervals. For DELTA metrics, the
  start time of the next interval must be at least a millisecond after the end
  time of the previous interval. For CUMULATIVE metrics, the start time and
  end time must specify a non-zero interval, with subsequent points specifying
  the same start time and increasing end times, until an event resets the
  cumulative value to zero and sets a new start time for the following points.
  The new start time must be at least a millisecond after the end time of the
  previous interval. The start time of a new interval must be at least a
  millisecond after the end time of the previous interval because intervals
  are closed. If the start time of a new interval is the same as the end time
  of the previous interval, then data written at the new start time could
  overwrite data written at the previous end time.

  Fields:
    endTime: Required. The end of the time interval.
    startTime: Optional. The beginning of the time interval. The default value
      for the start time is the end time. The start time must not be later
      than the end time.
  """

  endTime = _messages.StringField(1)
  startTime = _messages.StringField(2)


class TimeOfDay(_messages.Message):
  r"""Represents a time of day. The date and time zone are either not
  significant or are specified elsewhere. An API may choose to allow leap
  seconds. Related types are google.type.Date and google.protobuf.Timestamp.

  Fields:
    hours: Hours of a day in 24 hour format. Must be greater than or equal to
      0 and typically must be less than or equal to 23. An API may choose to
      allow the value "24:00:00" for scenarios like business closing time.
    minutes: Minutes of an hour. Must be greater than or equal to 0 and less
      than or equal to 59.
    nanos: Fractions of seconds, in nanoseconds. Must be greater than or equal
      to 0 and less than or equal to 999,999,999.
    seconds: Seconds of a minute. Must be greater than or equal to 0 and
      typically must be less than or equal to 59. An API may allow the value
      60 if it allows leap-seconds.
  """

  hours = _messages.IntegerField(1, variant=_messages.Variant.INT32)
  minutes = _messages.IntegerField(2, variant=_messages.Variant.INT32)
  nanos = _messages.IntegerField(3, variant=_messages.Variant.INT32)
  seconds = _messages.IntegerField(4, variant=_messages.Variant.INT32)


class TimeSeries(_messages.Message):
  r"""A collection of data points that describes the time-varying values of a
  metric. A time series is identified by a combination of a fully-specified
  monitored resource and a fully-specified metric. This type is used for both
  listing and creating time series.

  Enums:
    MetricKindValueValuesEnum: The metric kind of the time series. When
      listing time series, this metric kind might be different from the metric
      kind of the associated metric if this time series is an alignment or
      reduction of other time series.When creating a time series, this field
      is optional. If present, it must be the same as the metric kind of the
      associated metric. If the associated metric's descriptor must be auto-
      created, then this field specifies the metric kind of the new descriptor
      and must be either GAUGE (the default) or CUMULATIVE.
    ValueTypeValueValuesEnum: The value type of the time series. When listing
      time series, this value type might be different from the value type of
      the associated metric if this time series is an alignment or reduction
      of other time series.When creating a time series, this field is
      optional. If present, it must be the same as the type of the data in the
      points field.

  Fields:
    description: Input only. A detailed description of the time series that
      will be associated with the google.api.MetricDescriptor for the metric.
      Once set, this field cannot be changed through CreateTimeSeries.
    metadata: Output only. The associated monitored resource metadata. When
      reading a time series, this field will include metadata labels that are
      explicitly named in the reduction. When creating a time series, this
      field is ignored.
    metric: The associated metric. A fully-specified metric used to identify
      the time series.
    metricKind: The metric kind of the time series. When listing time series,
      this metric kind might be different from the metric kind of the
      associated metric if this time series is an alignment or reduction of
      other time series.When creating a time series, this field is optional.
      If present, it must be the same as the metric kind of the associated
      metric. If the associated metric's descriptor must be auto-created, then
      this field specifies the metric kind of the new descriptor and must be
      either GAUGE (the default) or CUMULATIVE.
    points: The data points of this time series. When listing time series,
      points are returned in reverse time order.When creating a time series,
      this field must contain exactly one point and the point's type must be
      the same as the value type of the associated metric. If the associated
      metric's descriptor must be auto-created, then the value type of the
      descriptor is determined by the point's type, which must be BOOL, INT64,
      DOUBLE, or DISTRIBUTION.
    resource: The associated monitored resource. Custom metrics can use only
      certain monitored resource types in their time series data. For more
      information, see Monitored resources for custom metrics
      (https://cloud.google.com/monitoring/custom-metrics/creating-
      metrics#custom-metric-resources).
    unit: The units in which the metric value is reported. It is only
      applicable if the value_type is INT64, DOUBLE, or DISTRIBUTION. The unit
      defines the representation of the stored metric values. This field can
      only be changed through CreateTimeSeries when it is empty.
    valueType: The value type of the time series. When listing time series,
      this value type might be different from the value type of the associated
      metric if this time series is an alignment or reduction of other time
      series.When creating a time series, this field is optional. If present,
      it must be the same as the type of the data in the points field.
  """

  class MetricKindValueValuesEnum(_messages.Enum):
    r"""The metric kind of the time series. When listing time series, this
    metric kind might be different from the metric kind of the associated
    metric if this time series is an alignment or reduction of other time
    series.When creating a time series, this field is optional. If present, it
    must be the same as the metric kind of the associated metric. If the
    associated metric's descriptor must be auto-created, then this field
    specifies the metric kind of the new descriptor and must be either GAUGE
    (the default) or CUMULATIVE.

    Values:
      METRIC_KIND_UNSPECIFIED: Do not use this default value.
      GAUGE: An instantaneous measurement of a value.
      DELTA: The change in a value during a time interval.
      CUMULATIVE: A value accumulated over a time interval. Cumulative
        measurements in a time series should have the same start time and
        increasing end times, until an event resets the cumulative value to
        zero and sets a new start time for the following points.
    """
    METRIC_KIND_UNSPECIFIED = 0
    GAUGE = 1
    DELTA = 2
    CUMULATIVE = 3

  class ValueTypeValueValuesEnum(_messages.Enum):
    r"""The value type of the time series. When listing time series, this
    value type might be different from the value type of the associated metric
    if this time series is an alignment or reduction of other time series.When
    creating a time series, this field is optional. If present, it must be the
    same as the type of the data in the points field.

    Values:
      VALUE_TYPE_UNSPECIFIED: Do not use this default value.
      BOOL: The value is a boolean. This value type can be used only if the
        metric kind is GAUGE.
      INT64: The value is a signed 64-bit integer.
      DOUBLE: The value is a double precision floating point number.
      STRING: The value is a text string. This value type can be used only if
        the metric kind is GAUGE.
      DISTRIBUTION: The value is a Distribution.
      MONEY: The value is money.
    """
    VALUE_TYPE_UNSPECIFIED = 0
    BOOL = 1
    INT64 = 2
    DOUBLE = 3
    STRING = 4
    DISTRIBUTION = 5
    MONEY = 6

  description = _messages.StringField(1)
  metadata = _messages.MessageField('MonitoredResourceMetadata', 2)
  metric = _messages.MessageField('Metric', 3)
  metricKind = _messages.EnumField('MetricKindValueValuesEnum', 4)
  points = _messages.MessageField('Point', 5, repeated=True)
  resource = _messages.MessageField('MonitoredResource', 6)
  unit = _messages.StringField(7)
  valueType = _messages.EnumField('ValueTypeValueValuesEnum', 8)


class TimeSeriesData(_messages.Message):
  r"""Represents the values of a time series associated with a
  TimeSeriesDescriptor.

  Fields:
    labelValues: The values of the labels in the time series identifier, given
      in the same order as the label_descriptors field of the
      TimeSeriesDescriptor associated with this object. Each value must have a
      value of the type given in the corresponding entry of label_descriptors.
    pointData: The points in the time series.
  """

  labelValues = _messages.MessageField('LabelValue', 1, repeated=True)
  pointData = _messages.MessageField('PointData', 2, repeated=True)


class TimeSeriesDescriptor(_messages.Message):
  r"""A descriptor for the labels and points in a time series.

  Fields:
    labelDescriptors: Descriptors for the labels.
    pointDescriptors: Descriptors for the point data value columns.
  """

  labelDescriptors = _messages.MessageField('LabelDescriptor', 1, repeated=True)
  pointDescriptors = _messages.MessageField('ValueDescriptor', 2, repeated=True)


class TimeSeriesRatio(_messages.Message):
  r"""A TimeSeriesRatio specifies two TimeSeries to use for computing the
  good_service / total_service ratio. The specified TimeSeries must have
  ValueType = DOUBLE or ValueType = INT64 and must have MetricKind = DELTA or
  MetricKind = CUMULATIVE. The TimeSeriesRatio must specify exactly two of
  good, bad, and total, and the relationship good_service + bad_service =
  total_service will be assumed.

  Fields:
    badServiceFilter: A monitoring filter
      (https://cloud.google.com/monitoring/api/v3/filters) specifying a
      TimeSeries quantifying bad service, either demanded service that was not
      provided or demanded service that was of inadequate quality. Must have
      ValueType = DOUBLE or ValueType = INT64 and must have MetricKind = DELTA
      or MetricKind = CUMULATIVE.
    goodServiceFilter: A monitoring filter
      (https://cloud.google.com/monitoring/api/v3/filters) specifying a
      TimeSeries quantifying good service provided. Must have ValueType =
      DOUBLE or ValueType = INT64 and must have MetricKind = DELTA or
      MetricKind = CUMULATIVE.
    totalServiceFilter: A monitoring filter
      (https://cloud.google.com/monitoring/api/v3/filters) specifying a
      TimeSeries quantifying total demanded service. Must have ValueType =
      DOUBLE or ValueType = INT64 and must have MetricKind = DELTA or
      MetricKind = CUMULATIVE.
  """

  badServiceFilter = _messages.StringField(1)
  goodServiceFilter = _messages.StringField(2)
  totalServiceFilter = _messages.StringField(3)


class Trigger(_messages.Message):
  r"""Specifies how many time series must fail a predicate to trigger a
  condition. If not specified, then a {count: 1} trigger is used.

  Fields:
    count: The absolute number of time series that must fail the predicate for
      the condition to be triggered.
    percent: The percentage of time series that must fail the predicate for
      the condition to be triggered.
  """

  count = _messages.IntegerField(1, variant=_messages.Variant.INT32)
  percent = _messages.FloatField(2)


class Type(_messages.Message):
  r"""A protocol buffer message type.New usages of this message as an
  alternative to DescriptorProto are strongly discouraged. This message does
  not reliability preserve all information necessary to model the schema and
  preserve semantics. Instead make use of FileDescriptorSet which preserves
  the necessary information.

  Enums:
    SyntaxValueValuesEnum: The source syntax.

  Fields:
    edition: The source edition string, only valid when syntax is
      SYNTAX_EDITIONS.
    fields: The list of fields.
    name: The fully qualified message name.
    oneofs: The list of types appearing in oneof definitions in this type.
    options: The protocol buffer options.
    sourceContext: The source context.
    syntax: The source syntax.
  """

  class SyntaxValueValuesEnum(_messages.Enum):
    r"""The source syntax.

    Values:
      SYNTAX_PROTO2: Syntax proto2.
      SYNTAX_PROTO3: Syntax proto3.
      SYNTAX_EDITIONS: Syntax editions.
    """
    SYNTAX_PROTO2 = 0
    SYNTAX_PROTO3 = 1
    SYNTAX_EDITIONS = 2

  edition = _messages.StringField(1)
  fields = _messages.MessageField('Field', 2, repeated=True)
  name = _messages.StringField(3)
  oneofs = _messages.StringField(4, repeated=True)
  options = _messages.MessageField('Option', 5, repeated=True)
  sourceContext = _messages.MessageField('SourceContext', 6)
  syntax = _messages.EnumField('SyntaxValueValuesEnum', 7)


class TypedValue(_messages.Message):
  r"""A single strongly-typed value.

  Fields:
    boolValue: A Boolean value: true or false.
    distributionValue: A distribution value.
    doubleValue: A 64-bit double-precision floating-point number. Its
      magnitude is approximately \xb110\xb1300 and it has 16 significant
      digits of precision.
    int64Value: A 64-bit integer. Its range is approximately \xb19.2x1018.
    stringValue: A variable-length string value.
  """

  boolValue = _messages.BooleanField(1)
  distributionValue = _messages.MessageField('Distribution', 2)
  doubleValue = _messages.FloatField(3)
  int64Value = _messages.IntegerField(4)
  stringValue = _messages.StringField(5)


class UptimeCheckConfig(_messages.Message):
  r"""This message configures which resources and services to monitor for
  availability.

  Enums:
    CheckerTypeValueValuesEnum: The type of checkers to use to execute the
      Uptime check.
    SelectedRegionsValueListEntryValuesEnum:

  Messages:
    UserLabelsValue: User-supplied key/value data to be used for organizing
      and identifying the UptimeCheckConfig objects.The field can contain up
      to 64 entries. Each key and value is limited to 63 Unicode characters or
      128 bytes, whichever is smaller. Labels and values can contain only
      lowercase letters, numerals, underscores, and dashes. Keys must begin
      with a letter.

  Fields:
    checkerType: The type of checkers to use to execute the Uptime check.
    contentMatchers: The content that is expected to appear in the data
      returned by the target server against which the check is run. Currently,
      only the first entry in the content_matchers list is supported, and
      additional entries will be ignored. This field is optional and should
      only be specified if a content match is required as part of the/ Uptime
      check.
    disabled: Whether the check is disabled or not.
    displayName: A human-friendly name for the Uptime check configuration. The
      display name should be unique within a Cloud Monitoring Workspace in
      order to make it easier to identify; however, uniqueness is not
      enforced. Required.
    httpCheck: Contains information needed to make an HTTP or HTTPS check.
    internalCheckers: The internal checkers that this check will egress from.
      If is_internal is true and this list is empty, the check will egress
      from all the InternalCheckers configured for the project that owns this
      UptimeCheckConfig.
    isInternal: If this is true, then checks are made only from the
      'internal_checkers'. If it is false, then checks are made only from the
      'selected_regions'. It is an error to provide 'selected_regions' when
      is_internal is true, or to provide 'internal_checkers' when is_internal
      is false.
    logCheckFailures: To specify whether to log the results of failed probes
      to Cloud Logging.
    monitoredResource: The monitored resource
      (https://cloud.google.com/monitoring/api/resources) associated with the
      configuration. The following monitored resource types are valid for this
      field: uptime_url, gce_instance, gae_app, aws_ec2_instance,
      aws_elb_load_balancer k8s_service servicedirectory_service
      cloud_run_revision
    name: Identifier. A unique resource name for this Uptime check
      configuration. The format is:
      projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID]
      [PROJECT_ID_OR_NUMBER] is the Workspace host project associated with the
      Uptime check.This field should be omitted when creating the Uptime check
      configuration; on create, the resource name is assigned by the server
      and included in the response.
    period: How often, in seconds, the Uptime check is performed. Currently,
      the only supported values are 60s (1 minute), 300s (5 minutes), 600s (10
      minutes), and 900s (15 minutes). Optional, defaults to 60s.
    resourceGroup: The group resource associated with the configuration.
    selectedRegions: The list of regions from which the check will be run.
      Some regions contain one location, and others contain more than one. If
      this field is specified, enough regions must be provided to include a
      minimum of 3 locations. Not specifying this field will result in Uptime
      checks running from all available regions.
    syntheticMonitor: Specifies a Synthetic Monitor to invoke.
    tcpCheck: Contains information needed to make a TCP check.
    timeout: The maximum amount of time to wait for the request to complete
      (must be between 1 and 60 seconds). Required.
    userLabels: User-supplied key/value data to be used for organizing and
      identifying the UptimeCheckConfig objects.The field can contain up to 64
      entries. Each key and value is limited to 63 Unicode characters or 128
      bytes, whichever is smaller. Labels and values can contain only
      lowercase letters, numerals, underscores, and dashes. Keys must begin
      with a letter.
  """

  class CheckerTypeValueValuesEnum(_messages.Enum):
    r"""The type of checkers to use to execute the Uptime check.

    Values:
      CHECKER_TYPE_UNSPECIFIED: The default checker type. Currently converted
        to STATIC_IP_CHECKERS on creation, the default conversion behavior may
        change in the future.
      STATIC_IP_CHECKERS: STATIC_IP_CHECKERS are used for uptime checks that
        perform egress across the public internet. STATIC_IP_CHECKERS use the
        static IP addresses returned by ListUptimeCheckIps.
      VPC_CHECKERS: VPC_CHECKERS are used for uptime checks that perform
        egress using Service Directory and private network access. When using
        VPC_CHECKERS, the monitored resource type must be
        servicedirectory_service.
    """
    CHECKER_TYPE_UNSPECIFIED = 0
    STATIC_IP_CHECKERS = 1
    VPC_CHECKERS = 2

  class SelectedRegionsValueListEntryValuesEnum(_messages.Enum):
    r"""SelectedRegionsValueListEntryValuesEnum enum type.

    Values:
      REGION_UNSPECIFIED: Default value if no region is specified. Will result
        in Uptime checks running from all regions.
      USA: Allows checks to run from locations within the United States of
        America.
      EUROPE: Allows checks to run from locations within the continent of
        Europe.
      SOUTH_AMERICA: Allows checks to run from locations within the continent
        of South America.
      ASIA_PACIFIC: Allows checks to run from locations within the Asia
        Pacific area (ex: Singapore).
      USA_OREGON: Allows checks to run from locations within the western
        United States of America
      USA_IOWA: Allows checks to run from locations within the central United
        States of America
      USA_VIRGINIA: Allows checks to run from locations within the eastern
        United States of America
    """
    REGION_UNSPECIFIED = 0
    USA = 1
    EUROPE = 2
    SOUTH_AMERICA = 3
    ASIA_PACIFIC = 4
    USA_OREGON = 5
    USA_IOWA = 6
    USA_VIRGINIA = 7

  @encoding.MapUnrecognizedFields('additionalProperties')
  class UserLabelsValue(_messages.Message):
    r"""User-supplied key/value data to be used for organizing and identifying
    the UptimeCheckConfig objects.The field can contain up to 64 entries. Each
    key and value is limited to 63 Unicode characters or 128 bytes, whichever
    is smaller. Labels and values can contain only lowercase letters,
    numerals, underscores, and dashes. Keys must begin with a letter.

    Messages:
      AdditionalProperty: An additional property for a UserLabelsValue object.

    Fields:
      additionalProperties: Additional properties of type UserLabelsValue
    """

    class AdditionalProperty(_messages.Message):
      r"""An additional property for a UserLabelsValue object.

      Fields:
        key: Name of the additional property.
        value: A string attribute.
      """

      key = _messages.StringField(1)
      value = _messages.StringField(2)

    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)

  checkerType = _messages.EnumField('CheckerTypeValueValuesEnum', 1)
  contentMatchers = _messages.MessageField('ContentMatcher', 2, repeated=True)
  disabled = _messages.BooleanField(3)
  displayName = _messages.StringField(4)
  httpCheck = _messages.MessageField('HttpCheck', 5)
  internalCheckers = _messages.MessageField('InternalChecker', 6, repeated=True)
  isInternal = _messages.BooleanField(7)
  logCheckFailures = _messages.BooleanField(8)
  monitoredResource = _messages.MessageField('MonitoredResource', 9)
  name = _messages.StringField(10)
  period = _messages.StringField(11)
  resourceGroup = _messages.MessageField('ResourceGroup', 12)
  selectedRegions = _messages.EnumField('SelectedRegionsValueListEntryValuesEnum', 13, repeated=True)
  syntheticMonitor = _messages.MessageField('SyntheticMonitorTarget', 14)
  tcpCheck = _messages.MessageField('TcpCheck', 15)
  timeout = _messages.StringField(16)
  userLabels = _messages.MessageField('UserLabelsValue', 17)


class UptimeCheckIp(_messages.Message):
  r"""Contains the region, location, and list of IP addresses where checkers
  in the location run from.

  Enums:
    RegionValueValuesEnum: A broad region category in which the IP address is
      located.

  Fields:
    ipAddress: The IP address from which the Uptime check originates. This is
      a fully specified IP address (not an IP address range). Most IP
      addresses, as of this publication, are in IPv4 format; however, one
      should not rely on the IP addresses being in IPv4 format indefinitely,
      and should support interpreting this field in either IPv4 or IPv6
      format.
    location: A more specific location within the region that typically
      encodes a particular city/town/metro (and its containing state/province
      or country) within the broader umbrella region category.
    region: A broad region category in which the IP address is located.
  """

  class RegionValueValuesEnum(_messages.Enum):
    r"""A broad region category in which the IP address is located.

    Values:
      REGION_UNSPECIFIED: Default value if no region is specified. Will result
        in Uptime checks running from all regions.
      USA: Allows checks to run from locations within the United States of
        America.
      EUROPE: Allows checks to run from locations within the continent of
        Europe.
      SOUTH_AMERICA: Allows checks to run from locations within the continent
        of South America.
      ASIA_PACIFIC: Allows checks to run from locations within the Asia
        Pacific area (ex: Singapore).
      USA_OREGON: Allows checks to run from locations within the western
        United States of America
      USA_IOWA: Allows checks to run from locations within the central United
        States of America
      USA_VIRGINIA: Allows checks to run from locations within the eastern
        United States of America
    """
    REGION_UNSPECIFIED = 0
    USA = 1
    EUROPE = 2
    SOUTH_AMERICA = 3
    ASIA_PACIFIC = 4
    USA_OREGON = 5
    USA_IOWA = 6
    USA_VIRGINIA = 7

  ipAddress = _messages.StringField(1)
  location = _messages.StringField(2)
  region = _messages.EnumField('RegionValueValuesEnum', 3)


class ValueDescriptor(_messages.Message):
  r"""A descriptor for the value columns in a data point.

  Enums:
    MetricKindValueValuesEnum: The value stream kind.
    ValueTypeValueValuesEnum: The value type.

  Fields:
    key: The value key.
    metricKind: The value stream kind.
    unit: The unit in which time_series point values are reported. unit
      follows the UCUM format for units as seen in
      https://unitsofmeasure.org/ucum.html. unit is only valid if value_type
      is INTEGER, DOUBLE, DISTRIBUTION.
    valueType: The value type.
  """

  class MetricKindValueValuesEnum(_messages.Enum):
    r"""The value stream kind.

    Values:
      METRIC_KIND_UNSPECIFIED: Do not use this default value.
      GAUGE: An instantaneous measurement of a value.
      DELTA: The change in a value during a time interval.
      CUMULATIVE: A value accumulated over a time interval. Cumulative
        measurements in a time series should have the same start time and
        increasing end times, until an event resets the cumulative value to
        zero and sets a new start time for the following points.
    """
    METRIC_KIND_UNSPECIFIED = 0
    GAUGE = 1
    DELTA = 2
    CUMULATIVE = 3

  class ValueTypeValueValuesEnum(_messages.Enum):
    r"""The value type.

    Values:
      VALUE_TYPE_UNSPECIFIED: Do not use this default value.
      BOOL: The value is a boolean. This value type can be used only if the
        metric kind is GAUGE.
      INT64: The value is a signed 64-bit integer.
      DOUBLE: The value is a double precision floating point number.
      STRING: The value is a text string. This value type can be used only if
        the metric kind is GAUGE.
      DISTRIBUTION: The value is a Distribution.
      MONEY: The value is money.
    """
    VALUE_TYPE_UNSPECIFIED = 0
    BOOL = 1
    INT64 = 2
    DOUBLE = 3
    STRING = 4
    DISTRIBUTION = 5
    MONEY = 6

  key = _messages.StringField(1)
  metricKind = _messages.EnumField('MetricKindValueValuesEnum', 2)
  unit = _messages.StringField(3)
  valueType = _messages.EnumField('ValueTypeValueValuesEnum', 4)


class VerifyNotificationChannelRequest(_messages.Message):
  r"""The VerifyNotificationChannel request.

  Fields:
    code: Required. The verification code that was delivered to the channel as
      a result of invoking the SendNotificationChannelVerificationCode API
      method or that was retrieved from a verified channel via
      GetNotificationChannelVerificationCode. For example, one might have
      "G-123456" or "TKNZGhhd2EyN3I1MnRnMjRv" (in general, one is only
      guaranteed that the code is valid UTF-8; one should not make any
      assumptions regarding the structure or format of the code).
  """

  code = _messages.StringField(1)


class WindowsBasedSli(_messages.Message):
  r"""A WindowsBasedSli defines good_service as the count of time windows for
  which the provided service was of good quality. Criteria for determining if
  service was good are embedded in the window_criterion.

  Fields:
    goodBadMetricFilter: A monitoring filter
      (https://cloud.google.com/monitoring/api/v3/filters) specifying a
      TimeSeries with ValueType = BOOL. The window is good if any true values
      appear in the window.
    goodTotalRatioThreshold: A window is good if its performance is high
      enough.
    metricMeanInRange: A window is good if the metric's value is in a good
      range, averaged across returned streams.
    metricSumInRange: A window is good if the metric's value is in a good
      range, summed across returned streams.
    windowPeriod: Duration over which window quality is evaluated. Must be an
      integer fraction of a day and at least 60s.
  """

  goodBadMetricFilter = _messages.StringField(1)
  goodTotalRatioThreshold = _messages.MessageField('PerformanceThreshold', 2)
  metricMeanInRange = _messages.MessageField('MetricRange', 3)
  metricSumInRange = _messages.MessageField('MetricRange', 4)
  windowPeriod = _messages.StringField(5)


encoding.AddCustomJsonFieldMapping(
    StandardQueryParameters, 'f__xgafv', '$.xgafv')
encoding.AddCustomJsonEnumMapping(
    StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1')
encoding.AddCustomJsonEnumMapping(
    StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
encoding.AddCustomJsonFieldMapping(
    MonitoringFoldersTimeSeriesListRequest, 'aggregation_alignmentPeriod', 'aggregation.alignmentPeriod')
encoding.AddCustomJsonFieldMapping(
    MonitoringFoldersTimeSeriesListRequest, 'aggregation_crossSeriesReducer', 'aggregation.crossSeriesReducer')
encoding.AddCustomJsonFieldMapping(
    MonitoringFoldersTimeSeriesListRequest, 'aggregation_groupByFields', 'aggregation.groupByFields')
encoding.AddCustomJsonFieldMapping(
    MonitoringFoldersTimeSeriesListRequest, 'aggregation_perSeriesAligner', 'aggregation.perSeriesAligner')
encoding.AddCustomJsonFieldMapping(
    MonitoringFoldersTimeSeriesListRequest, 'interval_endTime', 'interval.endTime')
encoding.AddCustomJsonFieldMapping(
    MonitoringFoldersTimeSeriesListRequest, 'interval_startTime', 'interval.startTime')
encoding.AddCustomJsonFieldMapping(
    MonitoringFoldersTimeSeriesListRequest, 'secondaryAggregation_alignmentPeriod', 'secondaryAggregation.alignmentPeriod')
encoding.AddCustomJsonFieldMapping(
    MonitoringFoldersTimeSeriesListRequest, 'secondaryAggregation_crossSeriesReducer', 'secondaryAggregation.crossSeriesReducer')
encoding.AddCustomJsonFieldMapping(
    MonitoringFoldersTimeSeriesListRequest, 'secondaryAggregation_groupByFields', 'secondaryAggregation.groupByFields')
encoding.AddCustomJsonFieldMapping(
    MonitoringFoldersTimeSeriesListRequest, 'secondaryAggregation_perSeriesAligner', 'secondaryAggregation.perSeriesAligner')
encoding.AddCustomJsonFieldMapping(
    MonitoringOrganizationsTimeSeriesListRequest, 'aggregation_alignmentPeriod', 'aggregation.alignmentPeriod')
encoding.AddCustomJsonFieldMapping(
    MonitoringOrganizationsTimeSeriesListRequest, 'aggregation_crossSeriesReducer', 'aggregation.crossSeriesReducer')
encoding.AddCustomJsonFieldMapping(
    MonitoringOrganizationsTimeSeriesListRequest, 'aggregation_groupByFields', 'aggregation.groupByFields')
encoding.AddCustomJsonFieldMapping(
    MonitoringOrganizationsTimeSeriesListRequest, 'aggregation_perSeriesAligner', 'aggregation.perSeriesAligner')
encoding.AddCustomJsonFieldMapping(
    MonitoringOrganizationsTimeSeriesListRequest, 'interval_endTime', 'interval.endTime')
encoding.AddCustomJsonFieldMapping(
    MonitoringOrganizationsTimeSeriesListRequest, 'interval_startTime', 'interval.startTime')
encoding.AddCustomJsonFieldMapping(
    MonitoringOrganizationsTimeSeriesListRequest, 'secondaryAggregation_alignmentPeriod', 'secondaryAggregation.alignmentPeriod')
encoding.AddCustomJsonFieldMapping(
    MonitoringOrganizationsTimeSeriesListRequest, 'secondaryAggregation_crossSeriesReducer', 'secondaryAggregation.crossSeriesReducer')
encoding.AddCustomJsonFieldMapping(
    MonitoringOrganizationsTimeSeriesListRequest, 'secondaryAggregation_groupByFields', 'secondaryAggregation.groupByFields')
encoding.AddCustomJsonFieldMapping(
    MonitoringOrganizationsTimeSeriesListRequest, 'secondaryAggregation_perSeriesAligner', 'secondaryAggregation.perSeriesAligner')
encoding.AddCustomJsonFieldMapping(
    MonitoringProjectsGroupsMembersListRequest, 'interval_endTime', 'interval.endTime')
encoding.AddCustomJsonFieldMapping(
    MonitoringProjectsGroupsMembersListRequest, 'interval_startTime', 'interval.startTime')
encoding.AddCustomJsonFieldMapping(
    MonitoringProjectsTimeSeriesListRequest, 'aggregation_alignmentPeriod', 'aggregation.alignmentPeriod')
encoding.AddCustomJsonFieldMapping(
    MonitoringProjectsTimeSeriesListRequest, 'aggregation_crossSeriesReducer', 'aggregation.crossSeriesReducer')
encoding.AddCustomJsonFieldMapping(
    MonitoringProjectsTimeSeriesListRequest, 'aggregation_groupByFields', 'aggregation.groupByFields')
encoding.AddCustomJsonFieldMapping(
    MonitoringProjectsTimeSeriesListRequest, 'aggregation_perSeriesAligner', 'aggregation.perSeriesAligner')
encoding.AddCustomJsonFieldMapping(
    MonitoringProjectsTimeSeriesListRequest, 'interval_endTime', 'interval.endTime')
encoding.AddCustomJsonFieldMapping(
    MonitoringProjectsTimeSeriesListRequest, 'interval_startTime', 'interval.startTime')
encoding.AddCustomJsonFieldMapping(
    MonitoringProjectsTimeSeriesListRequest, 'secondaryAggregation_alignmentPeriod', 'secondaryAggregation.alignmentPeriod')
encoding.AddCustomJsonFieldMapping(
    MonitoringProjectsTimeSeriesListRequest, 'secondaryAggregation_crossSeriesReducer', 'secondaryAggregation.crossSeriesReducer')
encoding.AddCustomJsonFieldMapping(
    MonitoringProjectsTimeSeriesListRequest, 'secondaryAggregation_groupByFields', 'secondaryAggregation.groupByFields')
encoding.AddCustomJsonFieldMapping(
    MonitoringProjectsTimeSeriesListRequest, 'secondaryAggregation_perSeriesAligner', 'secondaryAggregation.perSeriesAligner')
