
                           S r SSKJr  SSKJr  SSKJr  SSKJr  Sr	 " S S\R                  5      r " S	 S
\R                  5      r " S S\R                  5      r " S S\R                  5      r " S S\R                  5      r " S S\R                  5      r " S S\R                  5      r " S S\R                  5      r " S S\R                  5      r " S S\R                  5      r " S S\R                  5      r " S S\R                  5      r " S S \R                  5      r " S! S"\R                  5      r " S# S$\R                  5      r " S% S&\R                  5      r " S' S(\R                  5      r " S) S*\R                  5      r " S+ S,\R                  5      r " S- S.\R                  5      r " S/ S0\R                  5      r " S1 S2\R                  5      r  " S3 S4\R                  5      r! " S5 S6\R                  5      r" " S7 S8\R                  5      r# " S9 S:\R                  5      r$ " S; S<\R                  5      r% " S= S>\R                  5      r& " S? S@\R                  5      r' " SA SB\R                  5      r( " SC SD\R                  5      r) " SE SF\R                  5      r* " SG SH\R                  5      r+ " SI SJ\R                  5      r, " SK SL\R                  5      r- " SM SN\R                  5      r. " SO SP\R                  5      r/ " SQ SR\R                  5      r0 " SS ST\R                  5      r1 " SU SV\R                  5      r2 " SW SX\R                  5      r3 " SY SZ\R                  5      r4 " S[ S\\R                  5      r5 " S] S^\R                  5      r6 " S_ S`\R                  5      r7 " Sa Sb\R                  5      r8 " Sc Sd\R                  5      r9 " Se Sf\R                  5      r: " Sg Sh\R                  5      r; " Si Sj\R                  5      r< " Sk Sl\R                  5      r= " Sm Sn\R                  5      r> " So Sp\R                  5      r? " Sq Sr\R                  5      r@ " Ss St\R                  5      rA " Su Sv\R                  5      rB " Sw Sx\R                  5      rC " Sy Sz\R                  5      rD " S{ S|\R                  5      rE " S} S~\R                  5      rF " S S\R                  5      rG " S S\R                  5      rH " S S\R                  5      rI " S S\R                  5      rJ " S S\R                  5      rK " S S\R                  5      rL " S S\R                  5      rM " S S\R                  5      rN " S S\R                  5      rO " S S\R                  5      rP " S S\R                  5      rQ " S S\R                  5      rR " S S\R                  5      rS " S S\R                  5      rT " S S\R                  5      rU " S S\R                  5      rV " S S\R                  5      rW " S S\R                  5      rX " S S\R                  5      rY " S S\R                  5      rZ " S S\R                  5      r[ " S S\R                  5      r\ " S S\R                  5      r] " S S\R                  5      r^ " S S\R                  5      r_ " S S\R                  5      r` " S S\R                  5      ra " S S\R                  5      rb " S S\R                  5      rc " S S\R                  5      rd " S S\R                  5      re\R                  " \XSS5        \R                  " \XR                  SS5        \R                  " \XR                  SS5        g)zuGenerated message classes for speech version v2.

Converts audio to text by applying powerful neural network models.
    )absolute_import)messages)encoding)extra_typesspeechc                   h    \ rS rSrSr " S S\R                  5      r\R                  " SS5      r	Sr
g)AccessMetadata   aR  The access metadata for a particular region. This can be applied if the
org policy for the given project disallows a particular region.

Enums:
  ConstraintTypeValueValuesEnum: Describes the different types of
    constraints that are applied.

Fields:
  constraintType: Describes the different types of constraints that are
    applied.
c                        \ rS rSrSrSrSrSrg),AccessMetadata.ConstraintTypeValueValuesEnum   zDescribes the different types of constraints that are applied.

Values:
  CONSTRAINT_TYPE_UNSPECIFIED: Unspecified constraint applied.
  RESOURCE_LOCATIONS_ORG_POLICY_CREATE_CONSTRAINT: The project's org
    policy disallows the given region.
r       N)__name__
__module____qualname____firstlineno____doc__CONSTRAINT_TYPE_UNSPECIFIED/RESOURCE_LOCATIONS_ORG_POLICY_CREATE_CONSTRAINT__static_attributes__r       Ilib/googlecloudsdk/generated_clients/apis/speech/v2/speech_v2_messages.pyConstraintTypeValueValuesEnumr      s     #$673r   r   r   r   N)r   r   r   r   r   	_messagesEnumr   	EnumFieldconstraintTyper   r   r   r   r	   r	      s,    
	8inn 	8 &&'FJ.r   r	   c                   b    \ rS rSrSr\R                  " SS5      r\R                  " S5      r	Sr
g)AdaptationPhraseSet,   aT  A biasing PhraseSet, which can be either a string referencing the name
of an existing PhraseSets resource, or an inline definition of a PhraseSet.

Fields:
  inlinePhraseSet: An inline defined PhraseSet.
  phraseSet: The name of an existing PhraseSet resource. The user must have
    read access to the resource and it must not be deleted.
	PhraseSetr      r   N)r   r   r   r   r   r   MessageFieldinlinePhraseSetStringField	phraseSetr   r   r   r   r    r    ,   s+     **;:/##A&)r   r    c                       \ rS rSrSrSrg)AutoDetectDecodingConfig:   a   Automatically detected decoding parameters. Supported for the following
encodings: * WAV_LINEAR16: 16-bit signed little-endian PCM samples in a WAV
container. * WAV_MULAW: 8-bit companded mulaw samples in a WAV container. *
WAV_ALAW: 8-bit companded alaw samples in a WAV container. * RFC4867_5_AMR:
AMR frames with an rfc4867.5 header. * RFC4867_5_AMRWB: AMR-WB frames with
an rfc4867.5 header. * FLAC: FLAC frames in the "native FLAC" container
format. * MP3: MPEG audio frames with optional (ignored) ID3 metadata. *
OGG_OPUS: Opus audio frames in an Ogg container. * WEBM_OPUS: Opus audio
frames in a WebM container. * MP4_AAC: AAC audio frames in an MP4 container.
* M4A_AAC: AAC audio frames in an M4A container. * MOV_AAC: AAC audio frames
in an MOV container.
r   Nr   r   r   r   r   r   r   r   r   r)   r)   :   s    r   r)   c                       \ rS rSrSr\R                  " SS5      r\R                  " S5      r	\R                  " S5      r
Srg)	BatchRecognizeFileMetadataJ   a  Metadata about a single file in a batch for BatchRecognize.

Fields:
  config: Features and audio metadata to use for the Automatic Speech
    Recognition. This field in combination with the config_mask field can be
    used to override parts of the default_recognition_config of the
    Recognizer resource as well as the config at the request level.
  configMask: The list of fields in config that override the values in the
    default_recognition_config of the recognizer during this recognition
    request. If no mask is provided, all non-default valued fields in config
    override the values in the recognizer for this recognition request. If a
    mask is provided, only the fields listed in the mask override the config
    in the recognizer for this recognition request. If a wildcard (`*`) is
    provided, config completely overrides and replaces the config in the
    recognizer for this recognition request.
  uri: Cloud Storage URI for the audio file.
RecognitionConfigr   r#      r   N)r   r   r   r   r   r   r$   configr&   
configMaskurir   r   r   r   r-   r-   J   s<    $ !!"5q9&$$Q'*a #r   r-   c                       \ rS rSrSr\R                  " SS5      r\R                  " SS5      r\R                  " SS5      r	\R                  " S	S
5      r
\R                  " SS5      r\R                  " S5      rSrg)BatchRecognizeFileResultb   a/  Final results for a single file.

Fields:
  cloudStorageResult: Recognition results written to Cloud Storage. This is
    populated only when GcsOutputConfig is set in the
    RecognitionOutputConfig.
  error: Error if one was encountered.
  inlineResult: Recognition results. This is populated only when
    InlineOutputConfig is set in the RecognitionOutputConfig.
  metadata: A RecognitionResponseMetadata attribute.
  transcript: Deprecated. Use `inline_result.transcript` instead.
  uri: Deprecated. Use `cloud_storage_result.native_format_uri` instead.
CloudStorageResultr   Statusr#   InlineResultr0   RecognitionResponseMetadata   BatchRecognizeResults      r   N)r   r   r   r   r   r   r$   cloudStorageResulterrorinlineResultmetadata
transcriptr&   r3   r   r   r   r   r5   r5   b   sw     !--.BAF

 
 1
-%'':,##$A1E(%%&=qA*a #r   r5   c                       \ rS rSrSr\R                  " S5       " S S\R                  5      5       r	\R                  " SS5      rSrg)	BatchRecognizeMetadatay   a
  Operation metadata for BatchRecognize.

Messages:
  TranscriptionMetadataValue: Map from provided filename to the
    transcription metadata for that file.

Fields:
  transcriptionMetadata: Map from provided filename to the transcription
    metadata for that file.
additionalPropertiesc                   f    \ rS rSrSr " S S\R                  5      r\R                  " SSSS9r	Sr
g	)
1BatchRecognizeMetadata.TranscriptionMetadataValue   a  Map from provided filename to the transcription metadata for that
file.

Messages:
  AdditionalProperty: An additional property for a
    TranscriptionMetadataValue object.

Fields:
  additionalProperties: Additional properties of type
    TranscriptionMetadataValue
c                   b    \ rS rSrSr\R                  " S5      r\R                  " SS5      r	Sr
g)DBatchRecognizeMetadata.TranscriptionMetadataValue.AdditionalProperty   zAn additional property for a TranscriptionMetadataValue object.

Fields:
  key: Name of the additional property.
  value: A BatchRecognizeTranscriptionMetadata attribute.
r   #BatchRecognizeTranscriptionMetadatar#   r   Nr   r   r   r   r   r   r&   keyr$   valuer   r   r   r   AdditionalPropertyrL      s,    
 !!!$c$$%JANer   rR   r   Trepeatedr   Nr   r   r   r   r   r   MessagerR   r$   rG   r   r   r   r   TranscriptionMetadataValuerI      s4    
	OY.. 	O %112FTXYr   rW   r   r   N)r   r   r   r   r   r   MapUnrecognizedFieldsr   rV   rW   r$   transcriptionMetadatar   r   r   r   rE   rE   y   sM    	 !!"89Z9#4#4 Z :Z4 $001MqQr   rE   c                       \ rS rSrSr " S S\R                  5      r\R                  " SS5      r	\R                  " S5      r\R                  " SS	S
S9r\R                  " SS5      r\R                  " SS5      r\R                  " S5      rSrg)BatchRecognizeRequest   a  Request message for the BatchRecognize method.

Enums:
  ProcessingStrategyValueValuesEnum: Processing strategy to use for this
    request.

Fields:
  config: Features and audio metadata to use for the Automatic Speech
    Recognition. This field in combination with the config_mask field can be
    used to override parts of the default_recognition_config of the
    Recognizer resource.
  configMask: The list of fields in config that override the values in the
    default_recognition_config of the recognizer during this recognition
    request. If no mask is provided, all given fields in config override the
    values in the recognizer for this recognition request. If a mask is
    provided, only the fields listed in the mask override the config in the
    recognizer for this recognition request. If a wildcard (`*`) is
    provided, config completely overrides and replaces the config in the
    recognizer for this recognition request.
  files: Audio files with file metadata for ASR. The maximum number of files
    allowed to be specified is 15.
  processingStrategy: Processing strategy to use for this request.
  recognitionOutputConfig: Configuration options for where to output the
    transcripts of each file.
  recognizer: Required. The name of the Recognizer to use during
    recognition. The expected format is
    `projects/{project}/locations/{location}/recognizers/{recognizer}`. The
    {recognizer} segment may be set to `_` to use an empty implicit
    Recognizer.
c                        \ rS rSrSrSrSrSrg)7BatchRecognizeRequest.ProcessingStrategyValueValuesEnum   aZ  Processing strategy to use for this request.

Values:
  PROCESSING_STRATEGY_UNSPECIFIED: Default value for the processing
    strategy. The request is processed as soon as its received.
  DYNAMIC_BATCHING: If selected, processes the request during lower
    utilization periods for a price discount. The request is fulfilled
    within 24 hours.
r   r   r   N)r   r   r   r   r   PROCESSING_STRATEGY_UNSPECIFIEDDYNAMIC_BATCHINGr   r   r   r   !ProcessingStrategyValueValuesEnumr^      s     '(#r   rb   r/   r   r#   r-   r0   TrS   r;   RecognitionOutputConfigr=   r>   r   N)r   r   r   r   r   r   r   rb   r$   r1   r&   r2   filesr   processingStrategyrecognitionOutputConfig
recognizerr   r   r   r   r[   r[      s    >)..  !!"5q9&$$Q'*

 
 !=q4
P% **+NPQR%223LaP$$Q'*r   r[   c                       \ rS rSrSr\R                  " S5       " S S\R                  5      5       r	\R                  " SS5      r\R                  " S5      rSrg	)
BatchRecognizeResponse   aK  Response message for BatchRecognize that is packaged into a longrunning
Operation.

Messages:
  ResultsValue: Map from filename to the final result for that file.

Fields:
  results: Map from filename to the final result for that file.
  totalBilledDuration: When available, billed audio seconds for the
    corresponding request.
rG   c                   f    \ rS rSrSr " S S\R                  5      r\R                  " SSSS9r	Sr
g	)
#BatchRecognizeResponse.ResultsValue   zMap from filename to the final result for that file.

Messages:
  AdditionalProperty: An additional property for a ResultsValue object.

Fields:
  additionalProperties: Additional properties of type ResultsValue
c                   b    \ rS rSrSr\R                  " S5      r\R                  " SS5      r	Sr
g)6BatchRecognizeResponse.ResultsValue.AdditionalProperty   zAn additional property for a ResultsValue object.

Fields:
  key: Name of the additional property.
  value: A BatchRecognizeFileResult attribute.
r   r5   r#   r   NrO   r   r   r   rR   ro      s,    
 !!!$c$$%?Cer   rR   r   TrS   r   NrU   r   r   r   ResultsValuerl      s4    	DY.. 	D %112FTXYr   rq   r   r#   r   N)r   r   r   r   r   r   rX   r   rV   rq   r$   resultsr&   totalBilledDurationr   r   r   r   ri   ri      s\    
 !!"89ZY&& Z :Z. "">15'!--a0r   ri   c                   b    \ rS rSrSr\R                  " SS5      r\R                  " SSSS9rS	r	g
)r<   i  aj  Output type for Cloud Storage of BatchRecognize transcripts. Though this
proto isn't returned in this API anywhere, the Cloud Storage transcripts
will be this proto serialized and should be parsed as such.

Fields:
  metadata: Metadata about the recognition.
  results: Sequential list of transcription results corresponding to
    sequential portions of audio.
r:   r   SpeechRecognitionResultr#   TrS   r   N
r   r   r   r   r   r   r$   rB   rr   r   r   r   r   r<   r<     s1     ##$A1E(""#<a$O'r   r<   c                       \ rS rSrSr\R                  " SS5      r\R                  " S\R                  R                  S9r\R                  " S5      rSrg	)
rN   i  a  Metadata about transcription for a single file (for example, progress
percent).

Fields:
  error: Error if one was encountered.
  progressPercent: How much of the file has been transcribed so far.
  uri: The Cloud Storage URI to which recognition results will be written.
r8   r   r#   variantr0   r   N)r   r   r   r   r   r   r$   r@   IntegerFieldVariantINT32progressPercentr&   r3   r   r   r   r   rN   rN     sI     
 
 1
-%**1i6G6G6M6MN/a #r   rN   c                   <    \ rS rSrSr\R                  " S5      rSrg)	ClassItemi  z@An item of the class.

Fields:
  value: The class item's value.
r   r   N)	r   r   r   r   r   r   r&   rQ   r   r   r   r   r   r     s     


"%r   r   c                       \ rS rSrSr\R                  " S5      r\R                  " S5      r\R                  " S5      r	Sr
g)r7   i)  a  Final results written to Cloud Storage.

Fields:
  srtFormatUri: The Cloud Storage URI to which recognition results were
    written as SRT formatted captions. This is populated only when `SRT`
    output is requested.
  uri: The Cloud Storage URI to which recognition results were written.
  vttFormatUri: The Cloud Storage URI to which recognition results were
    written as VTT formatted captions. This is populated only when `VTT`
    output is requested.
r   r#   r0   r   N)r   r   r   r   r   r   r&   srtFormatUrir3   vttFormatUrir   r   r   r   r7   r7   )  s9    
 &&q),a #&&q),r   r7   c                       \ rS rSrSr\R                  " S5      r\R                  " S5      r\R                  " S5      r	Sr
g)Configi;  a  Message representing the config for the Speech-to-Text API. This
includes an optional [KMS key](https://cloud.google.com/kms/docs/resource-
hierarchy#keys) with which incoming data will be encrypted.

Fields:
  kmsKeyName: Optional. An optional [KMS key
    name](https://cloud.google.com/kms/docs/resource-hierarchy#keys) that if
    present, will be used to encrypt Speech-to-Text resources at-rest.
    Updating this key will not encrypt existing resources using this key;
    only new resources will be encrypted using this key. The expected format
    is `projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKe
    ys/{crypto_key}`.
  name: Output only. Identifier. The name of the config resource. There is
    exactly one config resource per project per location. The expected
    format is `projects/{project}/locations/{location}/config`.
  updateTime: Output only. The most recent time this resource was modified.
r   r#   r0   r   N)r   r   r   r   r   r   r&   
kmsKeyNamename
updateTimer   r   r   r   r   r   ;  s9    $ $$Q'*			q	!$$$Q'*r   r   c                       \ rS rSrSr\R                  " SS5      r\R                  " S5      r	\R                  " S5      r
\R                  " S5      rSrg	)
CreateCustomClassRequestiS  aH  Request message for the CreateCustomClass method.

Fields:
  customClass: Required. The CustomClass to create.
  customClassId: The ID to use for the CustomClass, which will become the
    final component of the CustomClass's resource name. This value should be
    4-63 characters, and valid characters are /a-z-/.
  parent: Required. The project and location where this CustomClass will be
    created. The expected format is
    `projects/{project}/locations/{location}`.
  validateOnly: If set, validate the request and preview the CustomClass,
    but do not actually create it.
CustomClassr   r#   r0   r;   r   Nr   r   r   r   r   r   r$   customClassr&   customClassIdparentBooleanFieldvalidateOnlyr   r   r   r   r   r   S  sK     &&}a8+''*-  #&''*,r   r   c                       \ rS rSrSr\R                  " S5      r\R                  " SS5      r	\R                  " S5      r
\R                  " S5      rSrg	)
CreatePhraseSetRequestih  a8  Request message for the CreatePhraseSet method.

Fields:
  parent: Required. The project and location where this PhraseSet will be
    created. The expected format is
    `projects/{project}/locations/{location}`.
  phraseSet: Required. The PhraseSet to create.
  phraseSetId: The ID to use for the PhraseSet, which will become the final
    component of the PhraseSet's resource name. This value should be 4-63
    characters, and valid characters are /a-z-/.
  validateOnly: If set, validate the request and preview the PhraseSet, but
    do not actually create it.
r   r"   r#   r0   r;   r   Nr   r   r   r   r   r   r&   r   r$   r'   phraseSetIdr   r   r   r   r   r   r   r   h  sK       #&$$[!4)%%a(+''*,r   r   c                       \ rS rSrSr\R                  " S5      r\R                  " SS5      r	\R                  " S5      r
\R                  " S5      rSrg	)
CreateRecognizerRequesti}  a@  Request message for the CreateRecognizer method.

Fields:
  parent: Required. The project and location where this Recognizer will be
    created. The expected format is
    `projects/{project}/locations/{location}`.
  recognizer: Required. The Recognizer to create.
  recognizerId: The ID to use for the Recognizer, which will become the
    final component of the Recognizer's resource name. This value should be
    4-63 characters, and valid characters are /a-z-/.
  validateOnly: If set, validate the request and preview the Recognizer, but
    do not actually create it.
r   
Recognizerr#   r0   r;   r   Nr   r   r   r   r   r   r&   r   r$   rg   recognizerIdr   r   r   r   r   r   r   r   }  sK       #&%%lA6*&&q),''*,r   r   c                      \ rS rSrSr " S S\R                  5      r\R                  " S5       " S S\R                  5      5       r\R                  " SS5      r\R                  " S	5      r\R                  " S
5      r\R                  " S5      r\R                  " S5      r\R                  " S5      r\R                  " SSSS9r\R                  " S5      r\R                  " S5      r\R                  " S5      r\R0                  " S5      r\R4                  " SS5      r\R                  " S5      r\R                  " S5      rSrg)r   i  a	  CustomClass for biasing in speech recognition. Used to define a set of
words or phrases that represents a common concept or theme likely to appear
in your audio, for example a list of passenger ship names.

Enums:
  StateValueValuesEnum: Output only. The CustomClass lifecycle state.

Messages:
  AnnotationsValue: Optional. Allows users to store small amounts of
    arbitrary data. Both the key and the value must be 63 characters or less
    each. At most 100 annotations.

Fields:
  annotations: Optional. Allows users to store small amounts of arbitrary
    data. Both the key and the value must be 63 characters or less each. At
    most 100 annotations.
  createTime: Output only. Creation time.
  deleteTime: Output only. The time at which this resource was requested for
    deletion.
  displayName: Optional. User-settable, human-readable name for the
    CustomClass. Must be 63 characters or less.
  etag: Output only. This checksum is computed by the server based on the
    value of other fields. This may be sent on update, undelete, and delete
    requests to ensure the client has an up-to-date value before proceeding.
  expireTime: Output only. The time at which this resource will be purged.
  items: A collection of class items.
  kmsKeyName: Output only. The [KMS key
    name](https://cloud.google.com/kms/docs/resource-hierarchy#keys) with
    which the CustomClass is encrypted. The expected format is `projects/{pr
    oject}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}`
    .
  kmsKeyVersionName: Output only. The [KMS key version
    name](https://cloud.google.com/kms/docs/resource-hierarchy#key_versions)
    with which the CustomClass is encrypted. The expected format is `project
    s/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_
    key}/cryptoKeyVersions/{crypto_key_version}`.
  name: Output only. Identifier. The resource name of the CustomClass.
    Format:
    `projects/{project}/locations/{location}/customClasses/{custom_class}`.
  reconciling: Output only. Whether or not this CustomClass is in the
    process of being updated.
  state: Output only. The CustomClass lifecycle state.
  uid: Output only. System-assigned unique identifier for the CustomClass.
  updateTime: Output only. The most recent time this resource was modified.
c                   $    \ rS rSrSrSrSrSrSrg) CustomClass.StateValueValuesEnumi  zOutput only. The CustomClass lifecycle state.

Values:
  STATE_UNSPECIFIED: Unspecified state. This is only used/useful for
    distinguishing unset values.
  ACTIVE: The normal and active state.
  DELETED: This CustomClass has been deleted.
r   r   r#   r   N	r   r   r   r   r   STATE_UNSPECIFIEDACTIVEDELETEDr   r   r   r   StateValueValuesEnumr          FGr   r   rG   c                   f    \ rS rSrSr " S S\R                  5      r\R                  " SSSS9r	Sr
g	)
CustomClass.AnnotationsValuei  aD  Optional. Allows users to store small amounts of arbitrary data. Both
the key and the value must be 63 characters or less each. At most 100
annotations.

Messages:
  AdditionalProperty: An additional property for a AnnotationsValue
    object.

Fields:
  additionalProperties: Additional properties of type AnnotationsValue
c                   `    \ rS rSrSr\R                  " S5      r\R                  " S5      rSr	g)/CustomClass.AnnotationsValue.AdditionalPropertyi  An additional property for a AnnotationsValue object.

Fields:
  key: Name of the additional property.
  value: A string attribute.
r   r#   r   N
r   r   r   r   r   r   r&   rP   rQ   r   r   r   r   rR   r     )    
 !!!$c##A&er   rR   r   TrS   r   NrU   r   r   r   AnnotationsValuer     s2    
	'Y.. 	' %112FTXYr   r   r   r#   r0   r;   r=   r>   r      TrS      	   
               r   N)r   r   r   r   r   r   r   r   r   rX   rV   r   r$   annotationsr&   
createTime
deleteTimedisplayNameetag
expireTimeitemsr   kmsKeyVersionNamer   r   reconcilingr   stateuidr   r   r   r   r   r   r     s5   ,\Y^^  !!"89Z** Z :Z4 &&'91=+$$Q'*$$Q'*%%a(+			q	!$$$Q'*

 
 a$
?%$$Q'*++A.			r	"$&&r*+


4b
9%b!#$$R(*r   r   c                       \ rS rSrSr\R                  " S5      r\R                  " S5      r	\R                  " S5      r
\R                  " S5      rSrg)	DeleteCustomClassRequesti  a  Request message for the DeleteCustomClass method.

Fields:
  allowMissing: If set to true, and the CustomClass is not found, the
    request will succeed and be a no-op (no Operation is recorded in this
    case).
  etag: This checksum is computed by the server based on the value of other
    fields. This may be sent on update, undelete, and delete requests to
    ensure the client has an up-to-date value before proceeding.
  name: Required. The name of the CustomClass to delete. Format:
    `projects/{project}/locations/{location}/customClasses/{custom_class}`
  validateOnly: If set, validate the request and preview the deleted
    CustomClass, but do not actually delete it.
r   r#   r0   r;   r   Nr   r   r   r   r   r   r   allowMissingr&   r   r   r   r   r   r   r   r   r     sI     ''*,			q	!$			q	!$''*,r   r   c                       \ rS rSrSr\R                  " S5      r\R                  " S5      r	\R                  " S5      r
\R                  " S5      rSrg)	DeletePhraseSetRequesti  a  Request message for the DeletePhraseSet method.

Fields:
  allowMissing: If set to true, and the PhraseSet is not found, the request
    will succeed and be a no-op (no Operation is recorded in this case).
  etag: This checksum is computed by the server based on the value of other
    fields. This may be sent on update, undelete, and delete requests to
    ensure the client has an up-to-date value before proceeding.
  name: Required. The name of the PhraseSet to delete. Format:
    `projects/{project}/locations/{location}/phraseSets/{phrase_set}`
  validateOnly: If set, validate the request and preview the deleted
    PhraseSet, but do not actually delete it.
r   r#   r0   r;   r   Nr   r   r   r   r   r     I     ''*,			q	!$			q	!$''*,r   r   c                       \ rS rSrSr\R                  " S5      r\R                  " S5      r	\R                  " S5      r
\R                  " S5      rSrg)	DeleteRecognizerRequesti$  a  Request message for the DeleteRecognizer method.

Fields:
  allowMissing: If set to true, and the Recognizer is not found, the request
    will succeed and be a no-op (no Operation is recorded in this case).
  etag: This checksum is computed by the server based on the value of other
    fields. This may be sent on update, undelete, and delete requests to
    ensure the client has an up-to-date value before proceeding.
  name: Required. The name of the Recognizer to delete. Format:
    `projects/{project}/locations/{location}/recognizers/{recognizer}`
  validateOnly: If set, validate the request and preview the deleted
    Recognizer, but do not actually delete it.
r   r#   r0   r;   r   Nr   r   r   r   r   r   $  r   r   r   c                       \ rS rSrSr\R                  " S5      r\R                  " S\R                  R                  S9rSrg)DenoiserConfigi9  a  Denoiser config. May not be supported for all models and may have no
effect.

Fields:
  denoiseAudio: Denoise audio before sending to the transcription model.
  snrThreshold: Signal-to-Noise Ratio (SNR) threshold for the denoiser. Here
    SNR means the loudness of the speech signal. Audio with an SNR below
    this threshold, meaning the speech is too quiet, will be prevented from
    being sent to the transcription model. If snr_threshold=0, no filtering
    will be applied.
r   r#   rx   r   N)r   r   r   r   r   r   r   denoiseAudio
FloatFieldr{   FLOATsnrThresholdr   r   r   r   r   r   9  s7    
 ''*,%%a1B1B1H1HI,r   r   c                       \ rS rSrSr\R                  " S5      r\R                  " S5      r	\R                  " S5      r
Srg)EntryiJ  zA single replacement configuration.

Fields:
  caseSensitive: Whether the search is case sensitive.
  replace: What to replace with. Max length is 100 characters.
  search: What to replace. Max length is 100 characters.
r   r#   r0   r   N)r   r   r   r   r   r   r   caseSensitiver&   replacesearchr   r   r   r   r   r   J  s9     ((+-!!!$'  #&r   r   c                       \ rS rSrSr " S S\R                  5      r\R                  " S\R                  R                  S9r\R                  " SS5      r\R                  " S\R                  R                  S9rS	rg
)ExplicitDecodingConfigiX  a  Explicitly specified decoding parameters.

Enums:
  EncodingValueValuesEnum: Required. Encoding of the audio data sent for
    recognition.

Fields:
  audioChannelCount: Optional. Number of channels present in the audio data
    sent for recognition. Note that this field is marked as OPTIONAL for
    backward compatibility reasons. It is (and has always been) effectively
    REQUIRED. The maximum allowed value is 8.
  encoding: Required. Encoding of the audio data sent for recognition.
  sampleRateHertz: Optional. Sample rate in Hertz of the audio data sent for
    recognition. Valid values are: 8000-48000, and 16000 is optimal. For
    best results, set the sampling rate of the audio source to 16000 Hz. If
    that's not possible, use the native sample rate of the audio source
    (instead of resampling). Note that this field is marked as OPTIONAL for
    backward compatibility reasons. It is (and has always been) effectively
    REQUIRED.
c                   L    \ rS rSrSrSrSrSrSrSr	Sr
S	rS
rSrSrSrSrSrSrg).ExplicitDecodingConfig.EncodingValueValuesEnumin  a  Required. Encoding of the audio data sent for recognition.

Values:
  AUDIO_ENCODING_UNSPECIFIED: Default value. This value is unused.
  LINEAR16: Headerless 16-bit signed little-endian PCM samples.
  MULAW: Headerless 8-bit companded mulaw samples.
  ALAW: Headerless 8-bit companded alaw samples.
  AMR: AMR frames with an rfc4867.5 header.
  AMR_WB: AMR-WB frames with an rfc4867.5 header.
  FLAC: FLAC frames in the "native FLAC" container format.
  MP3: MPEG audio frames with optional (ignored) ID3 metadata.
  OGG_OPUS: Opus audio frames in an Ogg container.
  WEBM_OPUS: Opus audio frames in a WebM container.
  MP4_AAC: AAC audio frames in an MP4 container.
  M4A_AAC: AAC audio frames in an M4A container.
  MOV_AAC: AAC audio frames in an MOV container.
r   r   r#   r0   r;   r=   r>   r   r   r   r   r   r   r   N)r   r   r   r   r   AUDIO_ENCODING_UNSPECIFIEDLINEAR16MULAWALAWAMRAMR_WBFLACMP3OGG_OPUS	WEBM_OPUSMP4_AACM4A_AACMOV_AACr   r   r   r   EncodingValueValuesEnumr   n  sK    " "#HED
CFD
CHIGGGr   r   r   rx   r#   r0   r   N)r   r   r   r   r   r   r   r   rz   r{   r|   audioChannelCountr   r   sampleRateHertzr   r   r   r   r   r   X  sj    *	 @  ,,Q	8I8I8O8OP  !:A>(**1i6G6G6M6MN/r   r   c                   <    \ rS rSrSr\R                  " S5      rSrg)GcsOutputConfigi  zOutput configurations for Cloud Storage.

Fields:
  uri: The Cloud Storage URI prefix with which recognition results will be
    written.
r   r   N)	r   r   r   r   r   r   r&   r3   r   r   r   r   r   r     s     	a #r   r   c                       \ rS rSrSrSrg)InlineOutputConfigi  z*Output configurations for inline response.r   Nr+   r   r   r   r   r     s    3r   r   c                       \ rS rSrSr\R                  " S5      r\R                  " SS5      r	\R                  " S5      r
Srg)	r9   i  a  Final results returned inline in the recognition response.

Fields:
  srtCaptions: The transcript for the audio file as SRT formatted captions.
    This is populated only when `SRT` output is requested.
  transcript: The transcript for the audio file.
  vttCaptions: The transcript for the audio file as VTT formatted captions.
    This is populated only when `VTT` output is requested.
r   r<   r#   r0   r   N)r   r   r   r   r   r   r&   srtCaptionsr$   rC   vttCaptionsr   r   r   r   r9   r9     s<     %%a(+%%&=qA*%%a(+r   r9   c                       \ rS rSrSr\R                  " S5       " S S\R                  5      5       r	\R                  " SS5      rSrg)	LanguageMetadatai  zThe metadata about locales available in a given region. Currently this
is just the models that are available for each locale

Messages:
  ModelsValue: Map of locale (language code) -> models

Fields:
  models: Map of locale (language code) -> models
rG   c                   f    \ rS rSrSr " S S\R                  5      r\R                  " SSSS9r	Sr
g	)
LanguageMetadata.ModelsValuei  zMap of locale (language code) -> models

Messages:
  AdditionalProperty: An additional property for a ModelsValue object.

Fields:
  additionalProperties: Additional properties of type ModelsValue
c                   b    \ rS rSrSr\R                  " S5      r\R                  " SS5      r	Sr
g)/LanguageMetadata.ModelsValue.AdditionalPropertyi  zAn additional property for a ModelsValue object.

Fields:
  key: Name of the additional property.
  value: A ModelMetadata attribute.
r   ModelMetadatar#   r   NrO   r   r   r   rR   r     +    
 !!!$c$$_a8er   rR   r   TrS   r   NrU   r   r   r   ModelsValuer     s2    	9Y.. 	9 %112FTXYr   r   r   r   N)r   r   r   r   r   r   rX   r   rV   r   r$   modelsr   r   r   r   r   r     sK     !!"89ZI%% Z :Z. !!-3&r   r   c                   `    \ rS rSrSr\R                  " SSSS9r\R                  " S5      r	Sr
g	)
ListCustomClassesResponsei  a+  Response message for the ListCustomClasses method.

Fields:
  customClasses: The list of requested CustomClasses.
  nextPageToken: A token, which can be sent as page_token to retrieve the
    next page. If this field is omitted, there are no subsequent pages. This
    token expires after 72 hours.
r   r   TrS   r#   r   N)r   r   r   r   r   r   r$   customClassesr&   nextPageTokenr   r   r   r   r   r     s-     ((DI-''*-r   r   c                   `    \ rS rSrSr\R                  " SSSS9r\R                  " S5      r	Sr
g	)
ListLocationsResponsei  zThe response message for Locations.ListLocations.

Fields:
  locations: A list of locations that matches the specified filter in the
    request.
  nextPageToken: The standard List next-page token.
Locationr   TrS   r#   r   N)r   r   r   r   r   r   r$   	locationsr&   r   r   r   r   r   r   r     s-     $$ZTB)''*-r   r   c                       \ rS rSrSr\R                  " S5      r\R                  " SSSS9r	\R                  " SSS9r
S	rg
)ListOperationsResponsei  a  The response message for Operations.ListOperations.

Fields:
  nextPageToken: The standard List next-page token.
  operations: A list of operations that matches the specified filter in the
    request.
  unreachable: Unordered list. Unreachable resources. Populated when the
    request sets `ListOperationsRequest.return_partial_success` and reads
    across collections e.g. when attempting to list all resources across all
    supported locations.
r   	Operationr#   TrS   r0   r   N)r   r   r   r   r   r   r&   r   r$   
operationsunreachabler   r   r   r   r   r     s?    
 ''*-%%k1tD*%%a$7+r   r   c                   `    \ rS rSrSr\R                  " S5      r\R                  " SSSS9r	Sr
g	)
ListPhraseSetsResponsei  a"  Response message for the ListPhraseSets method.

Fields:
  nextPageToken: A token, which can be sent as page_token to retrieve the
    next page. If this field is omitted, there are no subsequent pages. This
    token expires after 72 hours.
  phraseSets: The list of requested PhraseSets.
r   r"   r#   TrS   r   N)r   r   r   r   r   r   r&   r   r$   
phraseSetsr   r   r   r   r  r    s-     ''*-%%k1tD*r   r  c                   `    \ rS rSrSr\R                  " S5      r\R                  " SSSS9r	Sr
g	)
ListRecognizersResponsei  a%  Response message for the ListRecognizers method.

Fields:
  nextPageToken: A token, which can be sent as page_token to retrieve the
    next page. If this field is omitted, there are no subsequent pages. This
    token expires after 72 hours.
  recognizers: The list of requested Recognizers.
r   r   r#   TrS   r   N)r   r   r   r   r   r   r&   r   r$   recognizersr   r   r   r   r  r    s-     ''*-&&|QF+r   r  c                   x   \ rS rSrSr\R                  " S5       " S S\R                  5      5       r	\R                  " S5       " S S\R                  5      5       r
\R                  " S5      r\R                  " SS	5      r\R                  " S
5      r\R                  " SS5      r\R                  " S5      rSrg)r   i!  aM  A resource that represents a Google Cloud location.

Messages:
  LabelsValue: Cross-service attributes for the location. For example
    {"cloud.googleapis.com/region": "us-east1"}
  MetadataValue: Service-specific metadata. For example the available
    capacity at the given location.

Fields:
  displayName: The friendly name for this location, typically a nearby city
    name. For example, "Tokyo".
  labels: Cross-service attributes for the location. For example
    {"cloud.googleapis.com/region": "us-east1"}
  locationId: The canonical id for this location. For example: `"us-east1"`.
  metadata: Service-specific metadata. For example the available capacity at
    the given location.
  name: Resource name for the location, which may vary between
    implementations. For example: `"projects/example-project/locations/us-
    east1"`
rG   c                   f    \ rS rSrSr " S S\R                  5      r\R                  " SSSS9r	Sr
g	)
Location.LabelsValuei7  a   Cross-service attributes for the location. For example
{"cloud.googleapis.com/region": "us-east1"}

Messages:
  AdditionalProperty: An additional property for a LabelsValue object.

Fields:
  additionalProperties: Additional properties of type LabelsValue
c                   `    \ rS rSrSr\R                  " S5      r\R                  " S5      rSr	g)'Location.LabelsValue.AdditionalPropertyiC  zAn additional property for a LabelsValue object.

Fields:
  key: Name of the additional property.
  value: A string attribute.
r   r#   r   Nr   r   r   r   rR   r  C  r   r   rR   r   TrS   r   NrU   r   r   r   LabelsValuer  7  s2    	'Y.. 	' %112FTXYr   r  c                   f    \ rS rSrSr " S S\R                  5      r\R                  " SSSS9r	Sr
g	)
Location.MetadataValueiP  a  Service-specific metadata. For example the available capacity at the
given location.

Messages:
  AdditionalProperty: An additional property for a MetadataValue object.

Fields:
  additionalProperties: Properties of the object. Contains field @type
    with type URL.
c                   b    \ rS rSrSr\R                  " S5      r\R                  " SS5      r	Sr
g))Location.MetadataValue.AdditionalPropertyi]  An additional property for a MetadataValue object.

Fields:
  key: Name of the additional property.
  value: A extra_types.JsonValue attribute.
r   extra_types.JsonValuer#   r   NrO   r   r   r   rR   r  ]  ,    
 !!!$c$$%<a@er   rR   r   TrS   r   NrU   r   r   r   MetadataValuer  P  4    		AY.. 	A %112FTXYr   r  r   r#   r0   r;   r=   r   N)r   r   r   r   r   r   rX   r   rV   r  r  r&   r   r$   labels
locationIdrB   r   r   r   r   r   r   r   !  s    * !!"89ZI%% Z :Z0 !!"89Zi'' Z :Z2 %%a(+!!-3&$$Q'*##OQ7(			q	!$r   r   c                   d    \ rS rSrSr\R                  " SS5      r\R                  " SS5      rSr	g)	LocationsMetadataiq  au  Main metadata for the Locations API for STT V2. Currently this is just
the metadata about locales, models, and features

Fields:
  accessMetadata: Information about access metadata for the region and given
    project.
  languages: Information about available locales, models, and features
    represented in the hierarchical structure of locales -> models ->
    features
r	   r   r   r#   r   N)
r   r   r   r   r   r   r$   accessMetadata	languagesr   r   r   r   r  r  q  s/    	 ))*:A>.$$%7;)r   r  c                   `    \ rS rSrSr\R                  " S5      r\R                  " S5      rSr	g)ModelFeaturei  a  Represents a singular feature of a model. If the feature is
`recognizer`, the release_state of the feature represents the release_state
of the model

Fields:
  feature: The name of the feature (Note: the feature can be `recognizer`)
  releaseState: The release state of the feature
r   r#   r   N)
r   r   r   r   r   r   r&   featurereleaseStater   r   r   r   r   r     s)     !!!$'&&q),r   r   c                   <    \ rS rSrSr\R                  " SSSS9rSrg)	ModelFeaturesi  zRepresents the collection of features belonging to a model

Fields:
  modelFeature: Repeated field that contains all features of the model
r   r   TrS   r   N)	r   r   r   r   r   r   r$   modelFeaturer   r   r   r   r$  r$    s     ''DI,r   r$  c                       \ rS rSrSr\R                  " S5       " S S\R                  5      5       r	\R                  " SS5      rSrg)	r   i  a  The metadata about the models in a given region for a specific locale.
Currently this is just the features of the model

Messages:
  ModelFeaturesValue: Map of the model name -> features of that model

Fields:
  modelFeatures: Map of the model name -> features of that model
rG   c                   f    \ rS rSrSr " S S\R                  5      r\R                  " SSSS9r	Sr
g	)
 ModelMetadata.ModelFeaturesValuei  zMap of the model name -> features of that model

Messages:
  AdditionalProperty: An additional property for a ModelFeaturesValue
    object.

Fields:
  additionalProperties: Additional properties of type ModelFeaturesValue
c                   b    \ rS rSrSr\R                  " S5      r\R                  " SS5      r	Sr
g)3ModelMetadata.ModelFeaturesValue.AdditionalPropertyi  zAn additional property for a ModelFeaturesValue object.

Fields:
  key: Name of the additional property.
  value: A ModelFeatures attribute.
r   r$  r#   r   NrO   r   r   r   rR   r*    r   r   rR   r   TrS   r   NrU   r   r   r   ModelFeaturesValuer(    s2    	9Y.. 	9 %112FTXYr   r+  r   r   N)r   r   r   r   r   r   rX   r   rV   r+  r$   modelFeaturesr   r   r   r   r   r     sL     !!"89Z9,, Z :Z0 (()=qA-r   r   c                       \ rS rSrSrSrg)NativeOutputFileFormatConfigi  zDOutput configurations for serialized `BatchRecognizeResults` protos.r   Nr+   r   r   r   r.  r.    s    Mr   r.  c                   z   \ rS rSrSr\R                  " S5       " S S\R                  5      5       r	\R                  " S5       " S S\R                  5      5       r
\R                  " S5      r\R                  " S	S
5      r\R                  " SS5      r\R                   " S5      r\R                  " SS5      rSrg)r  i  a  This resource represents a long-running operation that is the result of
a network API call.

Messages:
  MetadataValue: Service-specific metadata associated with the operation. It
    typically contains progress information and common metadata such as
    create time. Some services might not provide such metadata. Any method
    that returns a long-running operation should document the metadata type,
    if any.
  ResponseValue: The normal, successful response of the operation. If the
    original method returns no data on success, such as `Delete`, the
    response is `google.protobuf.Empty`. If the original method is standard
    `Get`/`Create`/`Update`, the response should be the resource. For other
    methods, the response should have the type `XxxResponse`, where `Xxx` is
    the original method name. For example, if the original method name is
    `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.

Fields:
  done: If the value is `false`, it means the operation is still in
    progress. If `true`, the operation is completed, and either `error` or
    `response` is available.
  error: The error result of the operation in case of failure or
    cancellation.
  metadata: Service-specific metadata associated with the operation. It
    typically contains progress information and common metadata such as
    create time. Some services might not provide such metadata. Any method
    that returns a long-running operation should document the metadata type,
    if any.
  name: The server-assigned name, which is only unique within the same
    service that originally returns it. If you use the default HTTP mapping,
    the `name` should be a resource name ending with
    `operations/{unique_id}`.
  response: The normal, successful response of the operation. If the
    original method returns no data on success, such as `Delete`, the
    response is `google.protobuf.Empty`. If the original method is standard
    `Get`/`Create`/`Update`, the response should be the resource. For other
    methods, the response should have the type `XxxResponse`, where `Xxx` is
    the original method name. For example, if the original method name is
    `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
rG   c                   f    \ rS rSrSr " S S\R                  5      r\R                  " SSSS9r	Sr
g	)
Operation.MetadataValuei  a  Service-specific metadata associated with the operation. It typically
contains progress information and common metadata such as create time.
Some services might not provide such metadata. Any method that returns a
long-running operation should document the metadata type, if any.

Messages:
  AdditionalProperty: An additional property for a MetadataValue object.

Fields:
  additionalProperties: Properties of the object. Contains field @type
    with type URL.
c                   b    \ rS rSrSr\R                  " S5      r\R                  " SS5      r	Sr
g)*Operation.MetadataValue.AdditionalPropertyi  r  r   r  r#   r   NrO   r   r   r   rR   r3    r  r   rR   r   TrS   r   NrU   r   r   r   r  r1    s4    	AY.. 	A %112FTXYr   r  c                   f    \ rS rSrSr " S S\R                  5      r\R                  " SSSS9r	Sr
g	)
Operation.ResponseValuei
  a  The normal, successful response of the operation. If the original
method returns no data on success, such as `Delete`, the response is
`google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.

Messages:
  AdditionalProperty: An additional property for a ResponseValue object.

Fields:
  additionalProperties: Properties of the object. Contains field @type
    with type URL.
c                   b    \ rS rSrSr\R                  " S5      r\R                  " SS5      r	Sr
g)*Operation.ResponseValue.AdditionalPropertyi  zAn additional property for a ResponseValue object.

Fields:
  key: Name of the additional property.
  value: A extra_types.JsonValue attribute.
r   r  r#   r   NrO   r   r   r   rR   r7    r  r   rR   r   TrS   r   NrU   r   r   r   ResponseValuer5  
  s4     	AY.. 	A %112FTXYr   r8  r   r8   r#   r0   r;   r=   r   N)r   r   r   r   r   r   rX   r   rV   r  r8  r   doner$   r@   rB   r&   r   responser   r   r   r   r  r    s    'R !!"89Zi'' Z :Z6 !!"89Zi'' Z :Z< 
			"$

 
 1
-%##OQ7(			q	!$##OQ7(r   r  c                   t   \ rS rSrSr\R                  " SS5      r\R                  " SS5      r\R                  " SS5      r	\R                  " S	S
5      r
\R                  " SS5      r\R                  " S5      r\R                  " SS5      r\R                  " SS5      r\R                  " SS5      r\R                  " S5      r\R                  " S5      r\R                  " S5      r\R(                  " S\R*                  R,                  S9r\R                  " S5      r\R                  " SS5      r\R                  " SS5      r\R                  " SS5      r\R                  " S S!5      r\R                  " S"S#5      r\R                  " S$S%5      r\R                  " S&S'5      r\R                  " S(5      r S)r!g*)+OperationMetadatai0  a
	  Represents the metadata of a long-running operation.

Fields:
  batchRecognizeMetadata: Metadata specific to the BatchRecognize method.
  batchRecognizeRequest: The BatchRecognizeRequest that spawned the
    Operation.
  createCustomClassRequest: The CreateCustomClassRequest that spawned the
    Operation.
  createPhraseSetRequest: The CreatePhraseSetRequest that spawned the
    Operation.
  createRecognizerRequest: The CreateRecognizerRequest that spawned the
    Operation.
  createTime: The time the operation was created.
  deleteCustomClassRequest: The DeleteCustomClassRequest that spawned the
    Operation.
  deletePhraseSetRequest: The DeletePhraseSetRequest that spawned the
    Operation.
  deleteRecognizerRequest: The DeleteRecognizerRequest that spawned the
    Operation.
  kmsKeyName: The [KMS key name](https://cloud.google.com/kms/docs/resource-
    hierarchy#keys) with which the content of the Operation is encrypted.
    The expected format is `projects/{project}/locations/{location}/keyRings
    /{key_ring}/cryptoKeys/{crypto_key}`.
  kmsKeyVersionName: The [KMS key version
    name](https://cloud.google.com/kms/docs/resource-hierarchy#key_versions)
    with which content of the Operation is encrypted. The expected format is
    `projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/
    {crypto_key}/cryptoKeyVersions/{crypto_key_version}`.
  method: The method that triggered the operation.
  progressPercent: The percent progress of the Operation. Values can range
    from 0-100. If the value is 100, then the operation is finished.
  resource: The resource path for the target of the operation.
  undeleteCustomClassRequest: The UndeleteCustomClassRequest that spawned
    the Operation.
  undeletePhraseSetRequest: The UndeletePhraseSetRequest that spawned the
    Operation.
  undeleteRecognizerRequest: The UndeleteRecognizerRequest that spawned the
    Operation.
  updateConfigRequest: The UpdateConfigRequest that spawned the Operation.
  updateCustomClassRequest: The UpdateCustomClassRequest that spawned the
    Operation.
  updatePhraseSetRequest: The UpdatePhraseSetRequest that spawned the
    Operation.
  updateRecognizerRequest: The UpdateRecognizerRequest that spawned the
    Operation.
  updateTime: The time the operation was last updated.
rE   r   r[   r#   r   r0   r   r;   r   r=   r>   r   r   r   r   r   r   r   r   r   r   rx   r   UndeleteCustomClassRequest   UndeletePhraseSetRequest   UndeleteRecognizerRequestr
   UpdateConfigRequest   UpdateCustomClassRequest   UpdatePhraseSetRequest   UpdateRecognizerRequest      r   N)"r   r   r   r   r   r   r$   batchRecognizeMetadatabatchRecognizeRequestcreateCustomClassRequestcreatePhraseSetRequestcreateRecognizerRequestr&   r   deleteCustomClassRequestdeletePhraseSetRequestdeleteRecognizerRequestr   r   methodrz   r{   r|   r}   resourceundeleteCustomClassRequestundeletePhraseSetRequestundeleteRecognizerRequestupdateConfigRequestupdateCustomClassRequestupdatePhraseSetRequestupdateRecognizerRequestr   r   r   r   r   r<  r<  0  s   .` %112JAN#001H!L&334NPQR$112JAN%223LaP$$Q'*&334NPQR$112JAN%223LaP$$R(*++B/  $&**2y7H7H7N7NO/""2&((556RTVW&334NPRS'445PRTU!../DbI&334NPRS$112JBO%223LbQ$$R(*r   r<  c                       \ rS rSrSr\R                  " SS5      r\R                  " SS5      r\R                  " SS5      r	S	r
g
)OutputFormatConfigiy  a  Configuration for the format of the results stored to `output`.

Fields:
  native: Configuration for the native output format. If this field is set
    or if no other output format field is set, then transcripts will be
    written to the sink in the native format.
  srt: Configuration for the SRT output format. If this field is set, then
    transcripts will be written to the sink in the SRT format.
  vtt: Configuration for the VTT output format. If this field is set, then
    transcripts will be written to the sink in the VTT format.
r.  r   SrtOutputFileFormatConfigr#   VttOutputFileFormatConfigr0   r   N)r   r   r   r   r   r   r$   nativesrtvttr   r   r   r   r]  r]  y  sB    
 !!"@!D&:A>#:A>#r   r]  c                       \ rS rSrSr\R                  " S\R                  R                  S9r	\R                  " S5      rSrg)Phrasei  a  A Phrase contains words and phrase "hints" so that the speech
recognition is more likely to recognize them. This can be used to improve
the accuracy for specific words and phrases, for example, if specific
commands are typically spoken by the user. This can also be used to add
additional words to the vocabulary of the recognizer. List items can also
include CustomClass references containing groups of words that represent
common concepts that occur in natural language.

Fields:
  boost: Hint Boost. Overrides the boost set at the phrase set level.
    Positive value will increase the probability that a specific phrase will
    be recognized over other similar sounding phrases. The higher the boost,
    the higher the chance of false positive recognition as well. Negative
    boost values would correspond to anti-biasing. Anti-biasing is not
    enabled, so negative boost values will return an error. Boost values
    must be between 0 and 20. Any values outside that range will return an
    error. We recommend using a binary search approach to finding the
    optimal value for your use case as well as adding phrases both with and
    without boost to your requests.
  value: The phrase itself.
r   rx   r#   r   N)r   r   r   r   r   r   r   r{   r   boostr&   rQ   r   r   r   r   rd  rd    s7    , 

q)*;*;*A*A
B%



"%r   rd  c                      \ rS rSrSr " S S\R                  5      r\R                  " S5       " S S\R                  5      5       r\R                  " SS5      r\R                  " S	\R                  R                   S
9r\R$                  " S5      r\R$                  " S5      r\R$                  " S5      r\R$                  " S5      r\R$                  " S5      r\R$                  " S5      r\R$                  " S5      r\R$                  " S5      r\R                  " SSSS9r\R8                  " S5      r\R<                  " SS5      r\R$                  " S5      r \R$                  " S5      r!Sr"g)r"   i  a
  PhraseSet for biasing in speech recognition. A PhraseSet is used to
provide "hints" to the speech recognizer to favor specific words and phrases
in the results.

Enums:
  StateValueValuesEnum: Output only. The PhraseSet lifecycle state.

Messages:
  AnnotationsValue: Allows users to store small amounts of arbitrary data.
    Both the key and the value must be 63 characters or less each. At most
    100 annotations.

Fields:
  annotations: Allows users to store small amounts of arbitrary data. Both
    the key and the value must be 63 characters or less each. At most 100
    annotations.
  boost: Hint Boost. Positive value will increase the probability that a
    specific phrase will be recognized over other similar sounding phrases.
    The higher the boost, the higher the chance of false positive
    recognition as well. Valid `boost` values are between 0 (exclusive) and
    20. We recommend using a binary search approach to finding the optimal
    value for your use case as well as adding phrases both with and without
    boost to your requests.
  createTime: Output only. Creation time.
  deleteTime: Output only. The time at which this resource was requested for
    deletion.
  displayName: User-settable, human-readable name for the PhraseSet. Must be
    63 characters or less.
  etag: Output only. This checksum is computed by the server based on the
    value of other fields. This may be sent on update, undelete, and delete
    requests to ensure the client has an up-to-date value before proceeding.
  expireTime: Output only. The time at which this resource will be purged.
  kmsKeyName: Output only. The [KMS key
    name](https://cloud.google.com/kms/docs/resource-hierarchy#keys) with
    which the PhraseSet is encrypted. The expected format is `projects/{proj
    ect}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}`.
  kmsKeyVersionName: Output only. The [KMS key version
    name](https://cloud.google.com/kms/docs/resource-hierarchy#key_versions)
    with which the PhraseSet is encrypted. The expected format is `projects/
    {project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_ke
    y}/cryptoKeyVersions/{crypto_key_version}`.
  name: Output only. Identifier. The resource name of the PhraseSet. Format:
    `projects/{project}/locations/{location}/phraseSets/{phrase_set}`.
  phrases: A list of word and phrases.
  reconciling: Output only. Whether or not this PhraseSet is in the process
    of being updated.
  state: Output only. The PhraseSet lifecycle state.
  uid: Output only. System-assigned unique identifier for the PhraseSet.
  updateTime: Output only. The most recent time this resource was modified.
c                   $    \ rS rSrSrSrSrSrSrg)PhraseSet.StateValueValuesEnumi  zOutput only. The PhraseSet lifecycle state.

Values:
  STATE_UNSPECIFIED: Unspecified state. This is only used/useful for
    distinguishing unset values.
  ACTIVE: The normal and active state.
  DELETED: This PhraseSet has been deleted.
r   r   r#   r   Nr   r   r   r   r   rh    r   r   r   rG   c                   f    \ rS rSrSr " S S\R                  5      r\R                  " SSSS9r	Sr
g	)
PhraseSet.AnnotationsValuei  :  Allows users to store small amounts of arbitrary data. Both the key
and the value must be 63 characters or less each. At most 100 annotations.

Messages:
  AdditionalProperty: An additional property for a AnnotationsValue
    object.

Fields:
  additionalProperties: Additional properties of type AnnotationsValue
c                   `    \ rS rSrSr\R                  " S5      r\R                  " S5      rSr	g)-PhraseSet.AnnotationsValue.AdditionalPropertyi  r   r   r#   r   Nr   r   r   r   rR   rm    r   r   rR   r   TrS   r   NrU   r   r   r   r   rj    2    		'Y.. 	' %112FTXYr   r   r   r#   rx   r0   r;   r=   r>   r   r   r   r   rd  r   TrS   r   r   r   r>  r   N)#r   r   r   r   r   r   r   r   r   rX   rV   r   r$   r   r   r{   r   re  r&   r   r   r   r   r   r   r   r   phrasesr   r   r   r   r   r   r   r   r   r   r"   r"     sS   1fY^^  !!"89Z** Z :Z2 &&'91=+


q)*;*;*A*A
B%$$Q'*$$Q'*%%a(+			q	!$$$Q'*$$Q'*++A.			r	"$""8R$?'&&r*+


4b
9%b!#$$R(*r   r"   c                   h   \ rS rSrSr\R                  " SS5      r\R                  " SS5      r\R                  " SS5      r	\R                  " S	S
5      r
\R                  " SS5      r\R                  " SSS9r\R                  " S5      r\R                  " SS5      r\R                  " SS5      rSrg)r/   i  a  Provides information to the Recognizer that specifies how to process the
recognition request.

Fields:
  adaptation: Speech adaptation context that weights recognizer predictions
    for specific words and phrases.
  autoDecodingConfig: Automatically detect decoding parameters. Preferred
    for supported formats.
  denoiserConfig: Optional. Optional denoiser config. May not be supported
    for all models and may have no effect.
  explicitDecodingConfig: Explicitly specified decoding parameters. Required
    if using headerless PCM audio (linear16, mulaw, alaw).
  features: Speech recognition features to enable.
  languageCodes: Optional. The language of the supplied audio as a
    [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
    Language tags are normalized to BCP-47 before they are used eg "en-us"
    becomes "en-US". Supported languages for each model are listed in the
    [Table of Supported Models](https://cloud.google.com/speech-to-
    text/v2/docs/speech-to-text-supported-languages). If additional
    languages are provided, recognition result will contain recognition in
    the most likely language detected. The recognition result will include
    the language tag of the language detected in the audio.
  model: Optional. Which model to use for recognition requests. Select the
    model best suited to your domain to get best results. Guidance for
    choosing which model to use can be found in the [Transcription Models
    Documentation](https://cloud.google.com/speech-to-
    text/v2/docs/transcription-model) and the models supported in each
    region can be found in the [Table Of Supported
    Models](https://cloud.google.com/speech-to-text/v2/docs/speech-to-text-
    supported-languages).
  transcriptNormalization: Optional. Use transcription normalization to
    automatically replace parts of the transcript with phrases of your
    choosing. For StreamingRecognize, this normalization only applies to
    stable partial transcripts (stability > 0.8) and final transcripts.
  translationConfig: Optional. Optional configuration used to automatically
    run translation on the given audio to the desired language for supported
    models.
SpeechAdaptationr   r)   r#   r   r0   r   r;   RecognitionFeaturesr=   r>   TrS   r   TranscriptNormalizationr   TranslationConfigr   r   N)r   r   r   r   r   r   r$   
adaptationautoDecodingConfigdenoiserConfigexplicitDecodingConfigfeaturesr&   languageCodesmodeltranscriptNormalizationtranslationConfigr   r   r   r   r/   r/     s    %N %%&8!<* --.H!L))*:A>.$112JAN##$91=(''D9-



"%%223LaP,,-@!Dr   r/   c                      \ rS rSrSr " S S\R                  5      r\R                  " SS5      r	\R                  " S5      r\R                  " S5      r\R                  " S	5      r\R                  " S
5      r\R                  " S5      r\R                   " S\R"                  R$                  S9r\R(                  " SS5      r\R                  " S5      rSrg)rr  iE  a  Available recognition features.

Enums:
  MultiChannelModeValueValuesEnum: Mode for recognizing multi-channel audio.

Fields:
  diarizationConfig: Configuration to enable speaker diarization. To enable
    diarization, set this field to an empty SpeakerDiarizationConfig
    message.
  enableAutomaticPunctuation: If `true`, adds punctuation to recognition
    result hypotheses. This feature is only available in select languages.
    The default `false` value does not add punctuation to result hypotheses.
  enableSpokenEmojis: The spoken emoji behavior for the call. If `true`,
    adds spoken emoji formatting for the request. This will replace spoken
    emojis with the corresponding Unicode symbols in the final transcript.
    If `false`, spoken emojis are not replaced.
  enableSpokenPunctuation: The spoken punctuation behavior for the call. If
    `true`, replaces spoken punctuation with the corresponding symbols in
    the request. For example, "how are you question mark" becomes "how are
    you?". See https://cloud.google.com/speech-to-text/docs/spoken-
    punctuation for support. If `false`, spoken punctuation is not replaced.
  enableWordConfidence: If `true`, the top result includes a list of words
    and the confidence for those words. If `false`, no word-level confidence
    information is returned. The default is `false`.
  enableWordTimeOffsets: If `true`, the top result includes a list of words
    and the start and end time offsets (timestamps) for those words. If
    `false`, no word-level time offset information is returned. The default
    is `false`.
  maxAlternatives: Maximum number of recognition hypotheses to be returned.
    The server may return fewer than `max_alternatives`. Valid values are
    `0`-`30`. A value of `0` or `1` will return a maximum of one. If
    omitted, will return a maximum of one.
  multiChannelMode: Mode for recognizing multi-channel audio.
  profanityFilter: If set to `true`, the server will attempt to filter out
    profanities, replacing all but the initial character in each filtered
    word with asterisks, for instance, "f***". If set to `false` or omitted,
    profanities won't be filtered out.
c                        \ rS rSrSrSrSrSrg)3RecognitionFeatures.MultiChannelModeValueValuesEnumim  a  Mode for recognizing multi-channel audio.

Values:
  MULTI_CHANNEL_MODE_UNSPECIFIED: Default value for the multi-channel
    mode. If the audio contains multiple channels, only the first channel
    will be transcribed; other channels will be ignored.
  SEPARATE_RECOGNITION_PER_CHANNEL: If selected, each channel in the
    provided audio is transcribed independently. This cannot be selected
    if the selected model is `latest_short`.
r   r   r   N)r   r   r   r   r   MULTI_CHANNEL_MODE_UNSPECIFIED SEPARATE_RECOGNITION_PER_CHANNELr   r   r   r   MultiChannelModeValueValuesEnumr  m  s    	 &'"'($r   r  SpeakerDiarizationConfigr   r#   r0   r;   r=   r>   r   rx   r   r   r   N)r   r   r   r   r   r   r   r  r$   diarizationConfigr   enableAutomaticPunctuationenableSpokenEmojisenableSpokenPunctuationenableWordConfidenceenableWordTimeOffsetsrz   r{   r|   maxAlternativesr   multiChannelModeprofanityFilterr   r   r   r   rr  rr  E  s    %N)	 )  ,,-GK(55a8 --a0%2215"//2#003**1i6G6G6M6MN/(()JAN**1-/r   rr  c                       \ rS rSrSr\R                  " SS5      r\R                  " SS5      r\R                  " SS5      r	S	r
g
)rc   i  ag  Configuration options for the output(s) of recognition.

Fields:
  gcsOutputConfig: If this message is populated, recognition results are
    written to the provided Google Cloud Storage URI.
  inlineResponseConfig: If this message is populated, recognition results
    are provided in the BatchRecognizeResponse message of the Operation when
    completed. This is only supported when calling BatchRecognize with just
    one audio file.
  outputFormatConfig: Optional. Configuration for the format of the results
    stored to `output`. If unspecified transcripts will be written in the
    `NATIVE` format only.
r   r   r   r#   r]  r0   r   N)r   r   r   r   r   r   r$   gcsOutputConfiginlineResponseConfigoutputFormatConfigr   r   r   r   rc   rc     sD     **+<a@/"//0DaH --.BAFr   rc   c                   `    \ rS rSrSr\R                  " S5      r\R                  " S5      rSr	g)r:   i  zMetadata about the recognition request and response.

Fields:
  requestId: Global request identifier auto-generated by the API.
  totalBilledDuration: When available, billed audio seconds for the
    corresponding request.
r   r#   r   N)
r   r   r   r   r   r   r&   	requestIdrs   r   r   r   r   r:   r:     s*     ##A&)!--a0r   r:   c                       \ rS rSrSr\R                  " SS5      r\R                  " S5      r	\R                  " S5      r\R                  " S5      rSrg	)
RecognizeRequesti  a  Request message for the Recognize method. Either `content` or `uri` must
be supplied. Supplying both or neither returns INVALID_ARGUMENT. See
[content limits](https://cloud.google.com/speech-to-text/quotas#content).

Fields:
  config: Features and audio metadata to use for the Automatic Speech
    Recognition. This field in combination with the config_mask field can be
    used to override parts of the default_recognition_config of the
    Recognizer resource.
  configMask: The list of fields in config that override the values in the
    default_recognition_config of the recognizer during this recognition
    request. If no mask is provided, all non-default valued fields in config
    override the values in the recognizer for this recognition request. If a
    mask is provided, only the fields listed in the mask override the config
    in the recognizer for this recognition request. If a wildcard (`*`) is
    provided, config completely overrides and replaces the config in the
    recognizer for this recognition request.
  content: The audio data bytes encoded as specified in RecognitionConfig.
    As with all bytes fields, proto buffers use a pure binary
    representation, whereas JSON representations use base64.
  uri: URI that points to a file that contains audio data bytes as specified
    in RecognitionConfig. The file must not be compressed (for example,
    gzip). Currently, only Google Cloud Storage URIs are supported, which
    must be specified in the following format:
    `gs://bucket_name/object_name` (other URI formats return
    INVALID_ARGUMENT). For more information, see [Request
    URIs](https://cloud.google.com/storage/docs/reference-uris).
r/   r   r#   r0   r;   r   N)r   r   r   r   r   r   r$   r1   r&   r2   
BytesFieldcontentr3   r   r   r   r   r  r    sL    : !!"5q9&$$Q'*  #'a #r   r  c                   b    \ rS rSrSr\R                  " SS5      r\R                  " SSSS9rS	r	g
)RecognizeResponsei  zResponse message for the Recognize method.

Fields:
  metadata: Metadata about the recognition.
  results: Sequential list of transcription results corresponding to
    sequential portions of audio.
r:   r   ru   r#   TrS   r   Nrv   r   r   r   r  r    s1     ##$A1E(""#<a$O'r   r  c                      \ rS rSrSr " S S\R                  5      r\R                  " S5       " S S\R                  5      5       r\R                  " SS5      r\R                  " S	5      r\R                  " S
S5      r\R                  " S5      r\R                  " S5      r\R                  " S5      r\R                  " S5      r\R                  " S5      r\R                  " S5      r\R                  " SSS9r\R                  " S5      r\R                  " S5      r\R4                  " S5      r\R8                  " SS5      r\R                  " S5      r\R                  " S5      rSr g)r   i  ag  A Recognizer message. Stores recognition configuration and metadata.

Enums:
  StateValueValuesEnum: Output only. The Recognizer lifecycle state.

Messages:
  AnnotationsValue: Allows users to store small amounts of arbitrary data.
    Both the key and the value must be 63 characters or less each. At most
    100 annotations.

Fields:
  annotations: Allows users to store small amounts of arbitrary data. Both
    the key and the value must be 63 characters or less each. At most 100
    annotations.
  createTime: Output only. Creation time.
  defaultRecognitionConfig: Default configuration to use for requests with
    this Recognizer. This can be overwritten by inline configuration in the
    RecognizeRequest.config field.
  deleteTime: Output only. The time at which this Recognizer was requested
    for deletion.
  displayName: User-settable, human-readable name for the Recognizer. Must
    be 63 characters or less.
  etag: Output only. This checksum is computed by the server based on the
    value of other fields. This may be sent on update, undelete, and delete
    requests to ensure the client has an up-to-date value before proceeding.
  expireTime: Output only. The time at which this Recognizer will be purged.
  kmsKeyName: Output only. The [KMS key
    name](https://cloud.google.com/kms/docs/resource-hierarchy#keys) with
    which the Recognizer is encrypted. The expected format is `projects/{pro
    ject}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}`.
  kmsKeyVersionName: Output only. The [KMS key version
    name](https://cloud.google.com/kms/docs/resource-hierarchy#key_versions)
    with which the Recognizer is encrypted. The expected format is `projects
    /{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_k
    ey}/cryptoKeyVersions/{crypto_key_version}`.
  languageCodes: Optional. This field is now deprecated. Prefer the
    `language_codes` field in the `RecognitionConfig` message. The language
    of the supplied audio as a [BCP-47](https://www.rfc-
    editor.org/rfc/bcp/bcp47.txt) language tag. Supported languages for each
    model are listed in the [Table of Supported
    Models](https://cloud.google.com/speech-to-text/v2/docs/speech-to-text-
    supported-languages). If additional languages are provided, recognition
    result will contain recognition in the most likely language detected.
    The recognition result will include the language tag of the language
    detected in the audio. When you create or update a Recognizer, these
    values are stored in normalized BCP-47 form. For example, "en-us" is
    stored as "en-US".
  model: Optional. This field is now deprecated. Prefer the `model` field in
    the `RecognitionConfig` message. Which model to use for recognition
    requests. Select the model best suited to your domain to get best
    results. Guidance for choosing which model to use can be found in the
    [Transcription Models Documentation](https://cloud.google.com/speech-to-
    text/v2/docs/transcription-model) and the models supported in each
    region can be found in the [Table Of Supported
    Models](https://cloud.google.com/speech-to-text/v2/docs/speech-to-text-
    supported-languages).
  name: Output only. Identifier. The resource name of the Recognizer.
    Format:
    `projects/{project}/locations/{location}/recognizers/{recognizer}`.
  reconciling: Output only. Whether or not this Recognizer is in the process
    of being updated.
  state: Output only. The Recognizer lifecycle state.
  uid: Output only. System-assigned unique identifier for the Recognizer.
  updateTime: Output only. The most recent time this Recognizer was
    modified.
c                   $    \ rS rSrSrSrSrSrSrg)Recognizer.StateValueValuesEnumi  zOutput only. The Recognizer lifecycle state.

Values:
  STATE_UNSPECIFIED: The default value. This value is used if the state is
    omitted.
  ACTIVE: The Recognizer is active and ready for use.
  DELETED: This Recognizer has been deleted.
r   r   r#   r   Nr   r   r   r   r   r    r   r   r   rG   c                   f    \ rS rSrSr " S S\R                  5      r\R                  " SSSS9r	Sr
g	)
Recognizer.AnnotationsValuei)  rk  c                   `    \ rS rSrSr\R                  " S5      r\R                  " S5      rSr	g).Recognizer.AnnotationsValue.AdditionalPropertyi6  r   r   r#   r   Nr   r   r   r   rR   r  6  r   r   rR   r   TrS   r   NrU   r   r   r   r   r  )  rn  r   r   r   r#   r/   r0   r;   r=   r>   r   r   r   r   TrS   r   r   r   r   r>  r@  r   N)!r   r   r   r   r   r   r   r   r   rX   rV   r   r$   r   r&   r   defaultRecognitionConfigr   r   r   r   r   r   rz  r{  r   r   r   r   r   r   r   r   r   r   r   r   r     sX   AFY^^  !!"89Z** Z :Z2 &&'91=+$$Q'*&334GK$$Q'*%%a(+			q	!$$$Q'*$$Q'*++A.''T:-



#%			r	"$&&r*+


4b
9%b!#$$R(*r   r   c                       \ rS rSrSr\R                  " S\R                  R                  S9r	\R                  " S\R                  R                  S9r
Srg)r  iU  a4  Configuration to enable speaker diarization.

Fields:
  maxSpeakerCount: Optional. The system automatically determines the number
    of speakers. This value is not currently used.
  minSpeakerCount: Optional. The system automatically determines the number
    of speakers. This value is not currently used.
r   rx   r#   r   N)r   r   r   r   r   r   rz   r{   r|   maxSpeakerCountminSpeakerCountr   r   r   r   r  r  U  sE     **1i6G6G6M6MN/**1i6G6G6M6MN/r   r  c                   `    \ rS rSrSr\R                  " SSSS9r\R                  " SSSS9rS	r	g
)rq  ic  a  Provides "hints" to the speech recognizer to favor specific words and
phrases in the results. PhraseSets can be specified as an inline resource,
or a reference to an existing PhraseSet resource.

Fields:
  customClasses: A list of inline CustomClasses. Existing CustomClass
    resources can be referenced directly in a PhraseSet.
  phraseSets: A list of inline or referenced PhraseSets.
r   r   TrS   r    r#   r   N)
r   r   r   r   r   r   r$   r   r  r   r   r   r   rq  rq  c  s2     ((DI-%%&;QN*r   rq  c                   :    \ rS rSrSr\R                  " SSS9rSrg)'SpeechProjectsLocationsConfigGetRequestir  a  A SpeechProjectsLocationsConfigGetRequest object.

Fields:
  name: Required. The name of the config to retrieve. There is exactly one
    config resource per project per location. The expected format is
    `projects/{project}/locations/{location}/config`.
r   Trequiredr   N	r   r   r   r   r   r   r&   r   r   r   r   r   r  r  r       
		q4	0$r   r  c                       \ rS rSrSr\R                  " SS5      r\R                  " SSS9r	\R                  " S5      r
S	rg
)*SpeechProjectsLocationsConfigUpdateRequesti~  a~  A SpeechProjectsLocationsConfigUpdateRequest object.

Fields:
  config: A Config resource to be passed as the request body.
  name: Output only. Identifier. The name of the config resource. There is
    exactly one config resource per project per location. The expected
    format is `projects/{project}/locations/{location}/config`.
  updateMask: The list of fields to be updated.
r   r   r#   Tr  r0   r   N)r   r   r   r   r   r   r$   r1   r&   r   
updateMaskr   r   r   r   r  r  ~  s=     !!(A.&			q4	0$$$Q'*r   r  c                       \ rS rSrSr\R                  " SS5      r\R                  " S5      r	\R                  " SSS9r
\R                  " S	5      rS
rg)1SpeechProjectsLocationsCustomClassesCreateRequesti  af  A SpeechProjectsLocationsCustomClassesCreateRequest object.

Fields:
  customClass: A CustomClass resource to be passed as the request body.
  customClassId: The ID to use for the CustomClass, which will become the
    final component of the CustomClass's resource name. This value should be
    4-63 characters, and valid characters are /a-z-/.
  parent: Required. The project and location where this CustomClass will be
    created. The expected format is
    `projects/{project}/locations/{location}`.
  validateOnly: If set, validate the request and preview the CustomClass,
    but do not actually create it.
r   r   r#   r0   Tr  r;   r   Nr   r   r   r   r  r    sM     &&}a8+''*-  T2&''*,r   r  c                       \ rS rSrSr\R                  " S5      r\R                  " S5      r	\R                  " SSS9r
\R                  " S5      rS	rg
)1SpeechProjectsLocationsCustomClassesDeleteRequesti  a  A SpeechProjectsLocationsCustomClassesDeleteRequest object.

Fields:
  allowMissing: If set to true, and the CustomClass is not found, the
    request will succeed and be a no-op (no Operation is recorded in this
    case).
  etag: This checksum is computed by the server based on the value of other
    fields. This may be sent on update, undelete, and delete requests to
    ensure the client has an up-to-date value before proceeding.
  name: Required. The name of the CustomClass to delete. Format:
    `projects/{project}/locations/{location}/customClasses/{custom_class}`
  validateOnly: If set, validate the request and preview the deleted
    CustomClass, but do not actually delete it.
r   r#   r0   Tr  r;   r   Nr   r   r   r   r  r    sK     ''*,			q	!$			q4	0$''*,r   r  c                   :    \ rS rSrSr\R                  " SSS9rSrg).SpeechProjectsLocationsCustomClassesGetRequesti  zA SpeechProjectsLocationsCustomClassesGetRequest object.

Fields:
  name: Required. The name of the CustomClass to retrieve. The expected
    format is
    `projects/{project}/locations/{location}/customClasses/{custom_class}`.
r   Tr  r   Nr  r   r   r   r  r    r  r   r  c                       \ rS rSrSr\R                  " S\R                  R                  S9r	\R                  " S5      r\R                  " SSS9r\R                  " S	5      rS
rg)/SpeechProjectsLocationsCustomClassesListRequesti  a^  A SpeechProjectsLocationsCustomClassesListRequest object.

Fields:
  pageSize: Number of results per requests. A valid page_size ranges from 0
    to 100 inclusive. If the page_size is zero or unspecified, a page size
    of 5 will be chosen. If the page size exceeds 100, it will be coerced
    down to 100. Note that a call might return fewer results than the
    requested page size.
  pageToken: A page token, received from a previous ListCustomClasses call.
    Provide this to retrieve the subsequent page. When paginating, all other
    parameters provided to ListCustomClasses must match the call that
    provided the page token.
  parent: Required. The project and location of CustomClass resources to
    list. The expected format is `projects/{project}/locations/{location}`.
  showDeleted: Whether, or not, to show resources that have been deleted.
r   rx   r#   r0   Tr  r;   r   Nr   r   r   r   r   r   rz   r{   r|   pageSizer&   	pageTokenr   r   showDeletedr   r   r   r   r  r    sY    " ##Ay/@/@/F/FG(##A&)  T2&&&q)+r   r  c                       \ rS rSrSr\R                  " SS5      r\R                  " SSS9r	\R                  " S5      r
\R                  " S	5      rS
rg)0SpeechProjectsLocationsCustomClassesPatchRequesti  a  A SpeechProjectsLocationsCustomClassesPatchRequest object.

Fields:
  customClass: A CustomClass resource to be passed as the request body.
  name: Output only. Identifier. The resource name of the CustomClass.
    Format:
    `projects/{project}/locations/{location}/customClasses/{custom_class}`.
  updateMask: The list of fields to be updated. If empty, all fields are
    considered for update.
  validateOnly: If set, validate the request and preview the updated
    CustomClass, but do not actually update it.
r   r   r#   Tr  r0   r;   r   N)r   r   r   r   r   r   r$   r   r&   r   r  r   r   r   r   r   r   r  r    sM     &&}a8+			q4	0$$$Q'*''*,r   r  c                   :    \ rS rSrSr\R                  " SSS9rSrg)!SpeechProjectsLocationsGetRequesti  z]A SpeechProjectsLocationsGetRequest object.

Fields:
  name: Resource name for the location.
r   Tr  r   Nr  r   r   r   r  r         
		q4	0$r   r  c                       \ rS rSrSr\R                  " SSS9r\R                  " S5      r\R                  " SSS9r	\R                  " S	\R                  R                  S
9r\R                  " S5      rSrg)"SpeechProjectsLocationsListRequesti  a  A SpeechProjectsLocationsListRequest object.

Fields:
  extraLocationTypes: Optional. Do not use this field. It is unsupported and
    is ignored unless explicitly documented otherwise. This is primarily for
    internal usage.
  filter: A filter to narrow down results to a preferred subset. The
    filtering language accepts strings like `"displayName=tokyo"`, and is
    documented in more detail in [AIP-160](https://google.aip.dev/160).
  name: The resource that owns the locations collection, if applicable.
  pageSize: The maximum number of results to return. If not set, the service
    selects a default.
  pageToken: A page token received from the `next_page_token` field in the
    response. Send that page token to receive the subsequent page.
r   TrS   r#   r0   r  r;   rx   r=   r   N)r   r   r   r   r   r   r&   extraLocationTypesfilterr   rz   r{   r|   r  r  r   r   r   r   r  r    sl      !,,Q>  #&			q4	0$##Ay/@/@/F/FG(##A&)r   r  c                   :    \ rS rSrSr\R                  " SSS9rSrg)+SpeechProjectsLocationsOperationsGetRequesti  zkA SpeechProjectsLocationsOperationsGetRequest object.

Fields:
  name: The name of the operation resource.
r   Tr  r   Nr  r   r   r   r  r    r  r   r  c                       \ rS rSrSr\R                  " S5      r\R                  " SSS9r\R                  " S\R                  R                  S9r\R                  " S	5      r\R                  " S
5      rSrg),SpeechProjectsLocationsOperationsListRequesti  a  A SpeechProjectsLocationsOperationsListRequest object.

Fields:
  filter: The standard list filter.
  name: The name of the operation's parent resource.
  pageSize: The standard list page size.
  pageToken: The standard list page token.
  returnPartialSuccess: When set to `true`, operations that are reachable
    are returned as normal, and those that are unreachable are returned in
    the [ListOperationsResponse.unreachable] field. This can only be `true`
    when reading across collections e.g. when `parent` is set to
    `"projects/example/locations/-"`. This field is not by default supported
    and will result in an `UNIMPLEMENTED` error if set unless explicitly
    documented otherwise in service or product specific documentation.
r   r#   Tr  r0   rx   r;   r=   r   N)r   r   r   r   r   r   r&   r  r   rz   r{   r|   r  r  r   returnPartialSuccessr   r   r   r   r  r    sj        #&			q4	0$##Ay/@/@/F/FG(##A&)"//2r   r  c                       \ rS rSrSr\R                  " SSS9r\R                  " SS5      r	\R                  " S5      r
\R                  " S	5      rS
rg).SpeechProjectsLocationsPhraseSetsCreateRequesti5  aU  A SpeechProjectsLocationsPhraseSetsCreateRequest object.

Fields:
  parent: Required. The project and location where this PhraseSet will be
    created. The expected format is
    `projects/{project}/locations/{location}`.
  phraseSet: A PhraseSet resource to be passed as the request body.
  phraseSetId: The ID to use for the PhraseSet, which will become the final
    component of the PhraseSet's resource name. This value should be 4-63
    characters, and valid characters are /a-z-/.
  validateOnly: If set, validate the request and preview the PhraseSet, but
    do not actually create it.
r   Tr  r"   r#   r0   r;   r   Nr   r   r   r   r  r  5  sM       T2&$$[!4)%%a(+''*,r   r  c                       \ rS rSrSr\R                  " S5      r\R                  " S5      r	\R                  " SSS9r
\R                  " S5      rS	rg
).SpeechProjectsLocationsPhraseSetsDeleteRequestiJ  a  A SpeechProjectsLocationsPhraseSetsDeleteRequest object.

Fields:
  allowMissing: If set to true, and the PhraseSet is not found, the request
    will succeed and be a no-op (no Operation is recorded in this case).
  etag: This checksum is computed by the server based on the value of other
    fields. This may be sent on update, undelete, and delete requests to
    ensure the client has an up-to-date value before proceeding.
  name: Required. The name of the PhraseSet to delete. Format:
    `projects/{project}/locations/{location}/phraseSets/{phrase_set}`
  validateOnly: If set, validate the request and preview the deleted
    PhraseSet, but do not actually delete it.
r   r#   r0   Tr  r;   r   Nr   r   r   r   r  r  J  K     ''*,			q	!$			q4	0$''*,r   r  c                   :    \ rS rSrSr\R                  " SSS9rSrg)+SpeechProjectsLocationsPhraseSetsGetRequesti_  zA SpeechProjectsLocationsPhraseSetsGetRequest object.

Fields:
  name: Required. The name of the PhraseSet to retrieve. The expected format
    is `projects/{project}/locations/{location}/phraseSets/{phrase_set}`.
r   Tr  r   Nr  r   r   r   r  r  _  s     
		q4	0$r   r  c                       \ rS rSrSr\R                  " S\R                  R                  S9r	\R                  " S5      r\R                  " SSS9r\R                  " S	5      rS
rg),SpeechProjectsLocationsPhraseSetsListRequestij  a  A SpeechProjectsLocationsPhraseSetsListRequest object.

Fields:
  pageSize: The maximum number of PhraseSets to return. The service may
    return fewer than this value. If unspecified, at most 5 PhraseSets will
    be returned. The maximum value is 100; values above 100 will be coerced
    to 100.
  pageToken: A page token, received from a previous ListPhraseSets call.
    Provide this to retrieve the subsequent page. When paginating, all other
    parameters provided to ListPhraseSets must match the call that provided
    the page token.
  parent: Required. The project and location of PhraseSet resources to list.
    The expected format is `projects/{project}/locations/{location}`.
  showDeleted: Whether, or not, to show resources that have been deleted.
r   rx   r#   r0   Tr  r;   r   Nr  r   r   r   r  r  j  Y      ##Ay/@/@/F/FG(##A&)  T2&&&q)+r   r  c                       \ rS rSrSr\R                  " SSS9r\R                  " SS5      r	\R                  " S5      r
\R                  " S	5      rS
rg)-SpeechProjectsLocationsPhraseSetsPatchRequesti  a4  A SpeechProjectsLocationsPhraseSetsPatchRequest object.

Fields:
  name: Output only. Identifier. The resource name of the PhraseSet. Format:
    `projects/{project}/locations/{location}/phraseSets/{phrase_set}`.
  phraseSet: A PhraseSet resource to be passed as the request body.
  updateMask: The list of fields to update. If empty, all non-default valued
    fields are considered for update. Use `*` to update the entire PhraseSet
    resource.
  validateOnly: If set, validate the request and preview the updated
    PhraseSet, but do not actually update it.
r   Tr  r"   r#   r0   r;   r   N)r   r   r   r   r   r   r&   r   r$   r'   r  r   r   r   r   r   r   r  r    sM     
		q4	0$$$[!4)$$Q'*''*,r   r  c                       \ rS rSrSr\R                  " SSS9r\R                  " SS5      r	\R                  " S5      r
\R                  " S	5      rS
rg)/SpeechProjectsLocationsRecognizersCreateRequesti  a]  A SpeechProjectsLocationsRecognizersCreateRequest object.

Fields:
  parent: Required. The project and location where this Recognizer will be
    created. The expected format is
    `projects/{project}/locations/{location}`.
  recognizer: A Recognizer resource to be passed as the request body.
  recognizerId: The ID to use for the Recognizer, which will become the
    final component of the Recognizer's resource name. This value should be
    4-63 characters, and valid characters are /a-z-/.
  validateOnly: If set, validate the request and preview the Recognizer, but
    do not actually create it.
r   Tr  r   r#   r0   r;   r   Nr   r   r   r   r  r    sM       T2&%%lA6*&&q),''*,r   r  c                       \ rS rSrSr\R                  " S5      r\R                  " S5      r	\R                  " SSS9r
\R                  " S5      rS	rg
)/SpeechProjectsLocationsRecognizersDeleteRequesti  a  A SpeechProjectsLocationsRecognizersDeleteRequest object.

Fields:
  allowMissing: If set to true, and the Recognizer is not found, the request
    will succeed and be a no-op (no Operation is recorded in this case).
  etag: This checksum is computed by the server based on the value of other
    fields. This may be sent on update, undelete, and delete requests to
    ensure the client has an up-to-date value before proceeding.
  name: Required. The name of the Recognizer to delete. Format:
    `projects/{project}/locations/{location}/recognizers/{recognizer}`
  validateOnly: If set, validate the request and preview the deleted
    Recognizer, but do not actually delete it.
r   r#   r0   Tr  r;   r   Nr   r   r   r   r  r    r  r   r  c                   :    \ rS rSrSr\R                  " SSS9rSrg),SpeechProjectsLocationsRecognizersGetRequesti  zA SpeechProjectsLocationsRecognizersGetRequest object.

Fields:
  name: Required. The name of the Recognizer to retrieve. The expected
    format is
    `projects/{project}/locations/{location}/recognizers/{recognizer}`.
r   Tr  r   Nr  r   r   r   r  r    r  r   r  c                       \ rS rSrSr\R                  " S\R                  R                  S9r	\R                  " S5      r\R                  " SSS9r\R                  " S	5      rS
rg)-SpeechProjectsLocationsRecognizersListRequesti  a  A SpeechProjectsLocationsRecognizersListRequest object.

Fields:
  pageSize: The maximum number of Recognizers to return. The service may
    return fewer than this value. If unspecified, at most 5 Recognizers will
    be returned. The maximum value is 100; values above 100 will be coerced
    to 100.
  pageToken: A page token, received from a previous ListRecognizers call.
    Provide this to retrieve the subsequent page. When paginating, all other
    parameters provided to ListRecognizers must match the call that provided
    the page token.
  parent: Required. The project and location of Recognizers to list. The
    expected format is `projects/{project}/locations/{location}`.
  showDeleted: Whether, or not, to show resources that have been deleted.
r   rx   r#   r0   Tr  r;   r   Nr  r   r   r   r  r    r  r   r  c                       \ rS rSrSr\R                  " SSS9r\R                  " SS5      r	\R                  " S5      r
\R                  " S	5      rS
rg).SpeechProjectsLocationsRecognizersPatchRequesti  a?  A SpeechProjectsLocationsRecognizersPatchRequest object.

Fields:
  name: Output only. Identifier. The resource name of the Recognizer.
    Format:
    `projects/{project}/locations/{location}/recognizers/{recognizer}`.
  recognizer: A Recognizer resource to be passed as the request body.
  updateMask: The list of fields to update. If empty, all non-default valued
    fields are considered for update. Use `*` to update the entire
    Recognizer resource.
  validateOnly: If set, validate the request and preview the updated
    Recognizer, but do not actually update it.
r   Tr  r   r#   r0   r;   r   N)r   r   r   r   r   r   r&   r   r$   rg   r  r   r   r   r   r   r   r  r    sM     
		q4	0$%%lA6*$$Q'*''*,r   r  c                   `    \ rS rSrSr\R                  " SS5      r\R                  " SSS9r	Sr
g	)
2SpeechProjectsLocationsRecognizersRecognizeRequesti  a  A SpeechProjectsLocationsRecognizersRecognizeRequest object.

Fields:
  recognizeRequest: A RecognizeRequest resource to be passed as the request
    body.
  recognizer: Required. The name of the Recognizer to use during
    recognition. The expected format is
    `projects/{project}/locations/{location}/recognizers/{recognizer}`. The
    {recognizer} segment may be set to `_` to use an empty implicit
    Recognizer.
r  r   r#   Tr  r   N)r   r   r   r   r   r   r$   recognizeRequestr&   rg   r   r   r   r   r  r    s/    
 ++,>B$$Q6*r   r  c                       \ rS rSrSr\R                  " S\R                  R                  S9r	\R                  " S5      r\R                  " SSSS	9rS
rg)SpeechRecognitionAlternativei  a  Alternative hypotheses (a.k.a. n-best list).

Fields:
  confidence: The confidence estimate between 0.0 and 1.0. A higher number
    indicates an estimated greater likelihood that the recognized words are
    correct. This field is set only for the top alternative of a non-
    streaming result or, of a streaming result where is_final is set to
    `true`. This field is not guaranteed to be accurate and users should not
    rely on it to be always provided. The default of 0.0 is a sentinel value
    indicating `confidence` was not set.
  transcript: Transcript text representing the words that the user spoke.
  words: A list of word-specific information for each recognized word. When
    the SpeakerDiarizationConfig is set, you will see all the words from the
    beginning of the audio.
r   rx   r#   WordInfor0   TrS   r   N)r   r   r   r   r   r   r   r{   r   
confidencer&   rC   r$   wordsr   r   r   r   r  r    sK      ##Ay/@/@/F/FG*$$Q'*

 
 Q
>%r   r  c                       \ rS rSrSr\R                  " SSSS9r\R                  " S\R                  R                  S9r\R                  " S	5      r\R                  " S
5      rSrg)ru   i  aW  A speech recognition result corresponding to a portion of the audio.

Fields:
  alternatives: May contain one or more recognition hypotheses. These
    alternatives are ordered in terms of accuracy, with the top (first)
    alternative being the most probable, as ranked by the recognizer.
  channelTag: For multi-channel audio, this is the channel number
    corresponding to the recognized result for the audio from that channel.
    For `audio_channel_count` = `N`, its output values can range from `1` to
    `N`.
  languageCode: Output only. The [BCP-47](https://www.rfc-
    editor.org/rfc/bcp/bcp47.txt) language tag of the language in this
    result. This language code was detected to have the most likelihood of
    being spoken in the audio.
  resultEndOffset: Time offset of the end of this result relative to the
    beginning of the audio.
r  r   TrS   r#   rx   r0   r;   r   N)r   r   r   r   r   r   r$   alternativesrz   r{   r|   
channelTagr&   languageCoderesultEndOffsetr   r   r   r   ru   ru     s]    $ ''(FTXY,%%a1B1B1H1HI*&&q),))!,/r   ru   c                       \ rS rSrSrSrg)r^  i7  z~Output configurations [SubRip
Text](https://www.matroska.org/technical/subtitles.html#srt-subtitles)
formatted subtitle file.
r   Nr+   r   r   r   r^  r^  7  s    r   r^  c                      \ rS rSrSr " S S\R                  5      r " S S\R                  5      r\R                  " SS5      r
\R                  " S5      r\R                  " SS	S
S9r\R                  " S5      r\R                  " S5      r\R                  " S5      r\R                  " S5      r\R$                  " SSS9r\R                  " S5      r\R                  " S5      r\R                  " S5      r\R                  " S5      rSrg)StandardQueryParametersi?  a  Query parameters accepted by all methods.

Enums:
  FXgafvValueValuesEnum: V1 error format.
  AltValueValuesEnum: Data format for response.

Fields:
  f__xgafv: V1 error format.
  access_token: OAuth access token.
  alt: Data format for response.
  callback: JSONP
  fields: Selector specifying which fields to include in a partial response.
  key: API key. Your API key identifies your project and provides you with
    API access, quota, and reports. Required unless you provide an OAuth 2.0
    token.
  oauth_token: OAuth 2.0 token for the current user.
  prettyPrint: Returns response with indentations and line breaks.
  quotaUser: Available to use for quota purposes for server-side
    applications. Can be any arbitrary string assigned to a user, but should
    not exceed 40 characters.
  trace: A tracing token of the form "token:<tokenid>" to include in api
    requests.
  uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
  upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
c                   $    \ rS rSrSrSrSrSrSrg)*StandardQueryParameters.AltValueValuesEnumiZ  zData format for response.

Values:
  json: Responses with Content-Type of application/json
  media: Media download with context-dependent Content-Type
  proto: Responses with Content-Type of application/x-protobuf
r   r   r#   r   N)	r   r   r   r   r   jsonmediaprotor   r   r   r   AltValueValuesEnumr  Z  s     DEEr   r  c                        \ rS rSrSrSrSrSrg)-StandardQueryParameters.FXgafvValueValuesEnumif  zFV1 error format.

Values:
  _1: v1 error format
  _2: v2 error format
r   r   r   N)r   r   r   r   r   _1_2r   r   r   r   FXgafvValueValuesEnumr  f  s     
B	
Br   r  r   r#   r0   r  )defaultr;   r=   r>   r   r   Tr   r   r   r   r   N)r   r   r   r   r   r   r   r  r  r   f__xgafvr&   access_tokenaltcallbackfieldsrP   oauth_tokenr   prettyPrint	quotaUsertrace
uploadTypeupload_protocolr   r   r   r   r  r  ?  s    4
9>> 
inn    !8!<(&&q),0!VD#""1%(  #&a #%%a(+&&q$7+##A&)



#%$$R(*))"-/r   r  c                       \ rS rSrSr\R                  " S5       " S S\R                  5      5       r	\R                  " S\R                  R                  S9r\R                  " SSS	S
9r\R                   " S5      rSrg)r8   i~  a  The `Status` type defines a logical error model that is suitable for
different programming environments, including REST APIs and RPC APIs. It is
used by [gRPC](https://github.com/grpc). Each `Status` message contains
three pieces of data: error code, error message, and error details. You can
find out more about this error model and how to work with it in the [API
Design Guide](https://cloud.google.com/apis/design/errors).

Messages:
  DetailsValueListEntry: A DetailsValueListEntry object.

Fields:
  code: The status code, which should be an enum value of google.rpc.Code.
  details: A list of messages that carry the error details. There is a
    common set of message types for APIs to use.
  message: A developer-facing error message, which should be in English. Any
    user-facing error message should be localized and sent in the
    google.rpc.Status.details field, or localized by the client.
rG   c                   f    \ rS rSrSr " S S\R                  5      r\R                  " SSSS9r	Sr
g	)
Status.DetailsValueListEntryi  zA DetailsValueListEntry object.

Messages:
  AdditionalProperty: An additional property for a DetailsValueListEntry
    object.

Fields:
  additionalProperties: Properties of the object. Contains field @type
    with type URL.
c                   b    \ rS rSrSr\R                  " S5      r\R                  " SS5      r	Sr
g)/Status.DetailsValueListEntry.AdditionalPropertyi  zAn additional property for a DetailsValueListEntry object.

Fields:
  key: Name of the additional property.
  value: A extra_types.JsonValue attribute.
r   r  r#   r   NrO   r   r   r   rR   r
    r  r   rR   r   TrS   r   NrU   r   r   r   DetailsValueListEntryr    r  r   r  r   rx   r#   TrS   r0   r   N)r   r   r   r   r   r   rX   r   rV   r  rz   r{   r|   coder$   detailsr&   messager   r   r   r   r8   r8   ~  s|    & !!"89Zi// Z :Z2 
		9+<+<+B+B	C$""#:AM'!!!$'r   r8   c                   <   \ rS rSrSr\R                  " SSSS9r\R                  " S\R                  R                  S9r\R                  " S	5      r\R                  " S
5      r\R                  " S5      r\R"                  " S\R                  R$                  S9rSrg)StreamingRecognitionResulti  a2  A streaming speech recognition result corresponding to a portion of the
audio that is currently being processed.

Fields:
  alternatives: May contain one or more recognition hypotheses. These
    alternatives are ordered in terms of accuracy, with the top (first)
    alternative being the most probable, as ranked by the recognizer.
  channelTag: For multi-channel audio, this is the channel number
    corresponding to the recognized result for the audio from that channel.
    For `audio_channel_count` = `N`, its output values can range from `1` to
    `N`.
  isFinal: If `false`, this StreamingRecognitionResult represents an interim
    result that may change. If `true`, this is the final time the speech
    service will return this particular StreamingRecognitionResult, the
    recognizer will not return any further hypotheses for this portion of
    the transcript and corresponding audio.
  languageCode: Output only. The [BCP-47](https://www.rfc-
    editor.org/rfc/bcp/bcp47.txt) language tag of the language in this
    result. This language code was detected to have the most likelihood of
    being spoken in the audio.
  resultEndOffset: Time offset of the end of this result relative to the
    beginning of the audio.
  stability: An estimate of the likelihood that the recognizer will not
    change its guess about this interim result. Values range from 0.0
    (completely unstable) to 1.0 (completely stable). This field is only
    provided for interim results (is_final=`false`). The default of 0.0 is a
    sentinel value indicating `stability` was not set.
r  r   TrS   r#   rx   r0   r;   r=   r>   r   N)r   r   r   r   r   r   r$   r  rz   r{   r|   r  r   isFinalr&   r  r  r   r   	stabilityr   r   r   r   r  r    s    : ''(FTXY,%%a1B1B1H1HI*""1%'&&q),))!,/""1i.?.?.E.EF)r   r  c                   <    \ rS rSrSr\R                  " SSSS9rSrg)	rs  i  aH  Transcription normalization configuration. Use transcription
normalization to automatically replace parts of the transcript with phrases
of your choosing. For StreamingRecognize, this normalization only applies to
stable partial transcripts (stability > 0.8) and final transcripts.

Fields:
  entries: A list of replacement entries. We will perform replacement with
    one entry at a time. For example, the second entry in ["cat" => "dog",
    "mountain cat" => "mountain dog"] will never be applied because we will
    always process the first entry before it. At most 100 entries.
r   r   TrS   r   N)	r   r   r   r   r   r   r$   entriesr   r   r   r   rs  rs    s    
 ""7A='r   rs  c                   <    \ rS rSrSr\R                  " S5      rSrg)rt  i  zTranslation configuration. Use to translate the given audio into text
for the desired language.

Fields:
  targetLanguage: Required. The language code to translate to.
r   r   N)	r   r   r   r   r   r   r&   targetLanguager   r   r   r   rt  rt    s     ((+.r   rt  c                       \ rS rSrSr\R                  " S5      r\R                  " S5      r\R                  " S5      r
Srg)r=  i  a  Request message for the UndeleteCustomClass method.

Fields:
  etag: This checksum is computed by the server based on the value of other
    fields. This may be sent on update, undelete, and delete requests to
    ensure the client has an up-to-date value before proceeding.
  name: Required. The name of the CustomClass to undelete. Format:
    `projects/{project}/locations/{location}/customClasses/{custom_class}`
  validateOnly: If set, validate the request and preview the undeleted
    CustomClass, but do not actually undelete it.
r   r#   r0   r   Nr   r   r   r   r   r   r&   r   r   r   r   r   r   r   r   r=  r=    9    
 
		q	!$			q	!$''*,r   r=  c                       \ rS rSrSr\R                  " S5      r\R                  " S5      r\R                  " S5      r
Srg)r?  i	  a  Request message for the UndeletePhraseSet method.

Fields:
  etag: This checksum is computed by the server based on the value of other
    fields. This may be sent on update, undelete, and delete requests to
    ensure the client has an up-to-date value before proceeding.
  name: Required. The name of the PhraseSet to undelete. Format:
    `projects/{project}/locations/{location}/phraseSets/{phrase_set}`
  validateOnly: If set, validate the request and preview the undeleted
    PhraseSet, but do not actually undelete it.
r   r#   r0   r   Nr  r   r   r   r?  r?  	  r  r   r?  c                       \ rS rSrSr\R                  " S5      r\R                  " S5      r\R                  " S5      r
Srg)rA  i	  a  Request message for the UndeleteRecognizer method.

Fields:
  etag: This checksum is computed by the server based on the value of other
    fields. This may be sent on update, undelete, and delete requests to
    ensure the client has an up-to-date value before proceeding.
  name: Required. The name of the Recognizer to undelete. Format:
    `projects/{project}/locations/{location}/recognizers/{recognizer}`
  validateOnly: If set, validate the request and preview the undeleted
    Recognizer, but do not actually undelete it.
r   r#   r0   r   Nr  r   r   r   rA  rA  	  r  r   rA  c                   b    \ rS rSrSr\R                  " SS5      r\R                  " S5      r	Sr
g)rB  i(	  a)  Request message for the UpdateConfig method.

Fields:
  config: Required. The config to update. The config's `name` field is used
    to identify the config to be updated. The expected format is
    `projects/{project}/locations/{location}/config`.
  updateMask: The list of fields to be updated.
r   r   r#   r   N)r   r   r   r   r   r   r$   r1   r&   r  r   r   r   r   rB  rB  (	  s+     !!(A.&$$Q'*r   rB  c                       \ rS rSrSr\R                  " SS5      r\R                  " S5      r	\R                  " S5      rSrg)	rD  i6	  a  Request message for the UpdateCustomClass method.

Fields:
  customClass: Required. The CustomClass to update. The CustomClass's `name`
    field is used to identify the CustomClass to update. Format:
    `projects/{project}/locations/{location}/customClasses/{custom_class}`.
  updateMask: The list of fields to be updated. If empty, all fields are
    considered for update.
  validateOnly: If set, validate the request and preview the updated
    CustomClass, but do not actually update it.
r   r   r#   r0   r   N)r   r   r   r   r   r   r$   r   r&   r  r   r   r   r   r   r   rD  rD  6	  s;    
 &&}a8+$$Q'*''*,r   rD  c                       \ rS rSrSr\R                  " SS5      r\R                  " S5      r	\R                  " S5      rSrg)	rF  iH	  a!  Request message for the UpdatePhraseSet method.

Fields:
  phraseSet: Required. The PhraseSet to update. The PhraseSet's `name` field
    is used to identify the PhraseSet to update. Format:
    `projects/{project}/locations/{location}/phraseSets/{phrase_set}`.
  updateMask: The list of fields to update. If empty, all non-default valued
    fields are considered for update. Use `*` to update the entire PhraseSet
    resource.
  validateOnly: If set, validate the request and preview the updated
    PhraseSet, but do not actually update it.
r"   r   r#   r0   r   N)r   r   r   r   r   r   r$   r'   r&   r  r   r   r   r   r   r   rF  rF  H	  s;     $$[!4)$$Q'*''*,r   rF  c                       \ rS rSrSr\R                  " SS5      r\R                  " S5      r	\R                  " S5      rSrg)	rH  i[	  a)  Request message for the UpdateRecognizer method.

Fields:
  recognizer: Required. The Recognizer to update. The Recognizer's `name`
    field is used to identify the Recognizer to update. Format:
    `projects/{project}/locations/{location}/recognizers/{recognizer}`.
  updateMask: The list of fields to update. If empty, all non-default valued
    fields are considered for update. Use `*` to update the entire
    Recognizer resource.
  validateOnly: If set, validate the request and preview the updated
    Recognizer, but do not actually update it.
r   r   r#   r0   r   N)r   r   r   r   r   r   r$   rg   r&   r  r   r   r   r   r   r   rH  rH  [	  s;     %%lA6*$$Q'*''*,r   rH  c                       \ rS rSrSrSrg)r_  in	  z\Output configurations for [WebVTT](https://www.w3.org/TR/webvtt1/)
formatted subtitle file.
r   Nr+   r   r   r   r_  r_  n	  s    r   r_  c                       \ rS rSrSr\R                  " S\R                  R                  S9r	\R                  " S5      r\R                  " S5      r\R                  " S5      r\R                  " S5      rS	rg
)r  iu	  a  Word-specific information for recognized words.

Fields:
  confidence: The confidence estimate between 0.0 and 1.0. A higher number
    indicates an estimated greater likelihood that the recognized words are
    correct. This field is set only for the top alternative of a non-
    streaming result or, of a streaming result where is_final is set to
    `true`. This field is not guaranteed to be accurate and users should not
    rely on it to be always provided. The default of 0.0 is a sentinel value
    indicating `confidence` was not set.
  endOffset: Time offset relative to the beginning of the audio, and
    corresponding to the end of the spoken word. This field is only set if
    enable_word_time_offsets is `true` and only in the top hypothesis. This
    is an experimental feature and the accuracy of the time offset can vary.
  speakerLabel: A distinct label is assigned for every speaker within the
    audio. This field specifies which one of those speakers was detected to
    have spoken this word. `speaker_label` is set if
    SpeakerDiarizationConfig is given and only in the top alternative.
  startOffset: Time offset relative to the beginning of the audio, and
    corresponding to the start of the spoken word. This field is only set if
    enable_word_time_offsets is `true` and only in the top hypothesis. This
    is an experimental feature and the accuracy of the time offset can vary.
  word: The word corresponding to this set of information.
r   rx   r#   r0   r;   r=   r   N)r   r   r   r   r   r   r   r{   r   r  r&   	endOffsetspeakerLabelstartOffsetwordr   r   r   r   r  r  u	  sg    2 ##Ay/@/@/F/FG*##A&)&&q),%%a(+			q	!$r   r  r  z$.xgafvr  1r  2N)ir   
__future__r   apitools.base.protorpcliter   r   apitools.base.pyr   r   packagerV   r	   r    r)   r-   r5   rE   r[   ri   r<   rN   r   r7   r   r   r   r   r   r   r   r   r   r   r   r   r   r9   r   r   r   r   r  r  r   r  r   r$  r   r.  r  r<  r]  rd  r"   r/   rr  rc   r:   r  r  r   r  rq  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  ru   r^  r  r8   r  rs  rt  r=  r?  rA  rB  rD  rF  rH  r_  r  AddCustomJsonFieldMappingAddCustomJsonEnumMappingr  r   r   r   <module>r.     s   ' < % ( KY&& K6')++ 'y00  !!2!2 !0!y00 !.'RY.. 'RT2(I-- 2(j&1Y.. &1RPI-- P!)*;*; !#	!! #*** *$(Y (0+y00 +*+Y.. +*+i// +*d))## d)N+y00 +,+Y.. +*+i// +*JY&& J"$I $8OY.. 8Ov!i'' !4** 4)9$$ ) #4y(( #4L+	 1 1 +
+I-- 
+8Y.. 8$EY.. EGi// GM"y   M"`<	)) < *9$$ *JI%% J$BI%% $BNN9#4#4 Ni8	!! i8XF)	)) F)R?** ?$#Y #6i)	!! i)X0E	)) 0Ef>.)++ >.BGi// G(
1)"3"3 
1!!y(( !!H
P	)) 
Pz)"" z)zOy00 OOy(( O	1i.?.? 	1(1B1B ( +	8I8I +*+	8I8I +,	1Y5F5F 	1*i6G6G *0+y7H7H +(1	(9(9 1'):): '01)2C2C 1393D3D 30+Y5F5F +*+Y5F5F +*1)2C2C 1*93D3D *.+I4E4E +(+i6G6G +*+i6G6G +*	193D3D 	1*I4E4E *.+Y5F5F +*79J9J 7"?9#4#4 ?,-i// -2	 1 1 <.i// <.~0%Y 0%f#G!2!2 #GL>i// > ,	)) ,+!2!2 +$+y00 +$+	 1 1 +$()++ (+y00 +$+Y.. +&+i// +&	 1 1 "y   "B 	 " "Z4  ! !114>  ! !114>r   