
                           S r SSKJr  SSKJr  SSKJr  SSKJr  Sr	 " S S\R                  5      r " S	 S
\R                  5      r " S S\R                  5      r " S S\R                  5      r " S S\R                  5      r " S S\R                  5      r " S S\R                  5      r " S S\R                  5      r " S S\R                  5      r " S S\R                  5      r " S S\R                  5      r " S S\R                  5      r " S S \R                  5      r " S! S"\R                  5      r " S# S$\R                  5      r " S% S&\R                  5      r " S' S(\R                  5      r " S) S*\R                  5      r " S+ S,\R                  5      r " S- S.\R                  5      r " S/ S0\R                  5      r " S1 S2\R                  5      r  " S3 S4\R                  5      r! " S5 S6\R                  5      r" " S7 S8\R                  5      r# " S9 S:\R                  5      r$ " S; S<\R                  5      r% " S= S>\R                  5      r& " S? S@\R                  5      r' " SA SB\R                  5      r( " SC SD\R                  5      r) " SE SF\R                  5      r* " SG SH\R                  5      r+ " SI SJ\R                  5      r, " SK SL\R                  5      r- " SM SN\R                  5      r. " SO SP\R                  5      r/ " SQ SR\R                  5      r0 " SS ST\R                  5      r1 " SU SV\R                  5      r2 " SW SX\R                  5      r3 " SY SZ\R                  5      r4 " S[ S\\R                  5      r5 " S] S^\R                  5      r6\Rn                  " \2S_S`5        \Rp                  " \2Rr                  SaSb5        \Rp                  " \2Rr                  ScSd5        ge)fz|Generated message classes for speech version v1p1beta1.

Converts audio to text by applying powerful neural network models.
    )absolute_import)messages)encoding)extra_typesspeechc                   :    \ rS rSrSr\R                  " SSS9rSrg)ABNFGrammar   zA ABNFGrammar object.

Fields:
  abnfStrings: All declarations and rules of an ABNF grammar broken up into
    multiple strings that will end up concatenated.
   Trepeated N)	__name__
__module____qualname____firstlineno____doc__	_messagesStringFieldabnfStrings__static_attributes__r       Wlib/googlecloudsdk/generated_clients/apis/speech/v1p1beta1/speech_v1p1beta1_messages.pyr	   r	      s     %%a$7+r   r	   c                   <    \ rS rSrSr\R                  " S5      rSrg)	ClassItem   z@An item of the class.

Fields:
  value: The class item's value.
r   r   N)	r   r   r   r   r   r   r   valuer   r   r   r   r   r      s     


"%r   r   c                   b    \ rS rSrSr\R                  " SS5      r\R                  " S5      r	Sr
g)CreateCustomClassRequest&   a  Message sent by the client for the `CreateCustomClass` method.

Fields:
  customClass: Required. The custom class to create.
  customClassId: Required. The ID to use for the custom class, which will
    become the final component of the custom class' resource name. This
    value should restrict to letters, numbers, and hyphens, with the first
    character a letter, the last a letter or a number, and be 4-63
    characters.
CustomClassr      r   N)r   r   r   r   r   r   MessageFieldcustomClassr   customClassIdr   r   r   r   r   r   &   s+    	 &&}a8+''*-r   r   c                   b    \ rS rSrSr\R                  " SS5      r\R                  " S5      r	Sr
g)CreatePhraseSetRequest6   a  Message sent by the client for the `CreatePhraseSet` method.

Fields:
  phraseSet: Required. The phrase set to create.
  phraseSetId: Required. The ID to use for the phrase set, which will become
    the final component of the phrase set's resource name. This value should
    restrict to letters, numbers, and hyphens, with the first character a
    letter, the last a letter or a number, and be 4-63 characters.
	PhraseSetr   r"   r   N)r   r   r   r   r   r   r#   	phraseSetr   phraseSetIdr   r   r   r   r'   r'   6   s+     $$[!4)%%a(+r   r'   c                   n   \ rS rSrSr " S S\R                  5      r\R                  " S5       " S S\R                  5      5       r\R                  " SS5      r\R                  " S	5      r\R                  " S
5      r\R                  " S5      r\R                  " S5      r\R                  " S5      r\R                  " SSSS9r\R                  " S5      r\R                  " S5      r\R                  " S5      r\R0                  " S5      r\R4                  " SS5      r\R                  " S5      rSrg)r!   E   a	  A set of words or phrases that represents a common concept likely to
appear in your audio, for example a list of passenger ship names.
CustomClass items can be substituted into placeholders that you set in
PhraseSet phrases.

Enums:
  StateValueValuesEnum: Output only. The CustomClass lifecycle state. This
    field is not used.

Messages:
  AnnotationsValue: Output only. Allows users to store small amounts of
    arbitrary data. Both the key and the value must be 63 characters or less
    each. At most 100 annotations. This field is not used.

Fields:
  annotations: Output only. Allows users to store small amounts of arbitrary
    data. Both the key and the value must be 63 characters or less each. At
    most 100 annotations. This field is not used.
  customClassId: If this custom class is a resource, the custom_class_id is
    the resource id of the CustomClass. Case sensitive.
  deleteTime: Output only. The time at which this resource was requested for
    deletion. This field is not used.
  displayName: Output only. User-settable, human-readable name for the
    CustomClass. Must be 63 characters or less. This field is not used.
  etag: Output only. This checksum is computed by the server based on the
    value of other fields. This may be sent on update, undelete, and delete
    requests to ensure the client has an up-to-date value before proceeding.
    This field is not used.
  expireTime: Output only. The time at which this resource will be purged.
    This field is not used.
  items: A collection of class items.
  kmsKeyName: Output only. The [KMS key
    name](https://cloud.google.com/kms/docs/resource-hierarchy#keys) with
    which the content of the ClassItem is encrypted. The expected format is
    `projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/
    {crypto_key}`.
  kmsKeyVersionName: Output only. The [KMS key version
    name](https://cloud.google.com/kms/docs/resource-hierarchy#key_versions)
    with which content of the ClassItem is encrypted. The expected format is
    `projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/
    {crypto_key}/cryptoKeyVersions/{crypto_key_version}`.
  name: The resource name of the custom class.
  reconciling: Output only. Whether or not this CustomClass is in the
    process of being updated. This field is not used.
  state: Output only. The CustomClass lifecycle state. This field is not
    used.
  uid: Output only. System-assigned unique identifier for the CustomClass.
    This field is not used.
c                   $    \ rS rSrSrSrSrSrSrg) CustomClass.StateValueValuesEnumx   
  Output only. The CustomClass lifecycle state. This field is not used.

Values:
  STATE_UNSPECIFIED: Unspecified state. This is only used/useful for
    distinguishing unset values.
  ACTIVE: The normal and active state.
  DELETED: This CustomClass has been deleted.
r   r   r"   r   N	r   r   r   r   r   STATE_UNSPECIFIEDACTIVEDELETEDr   r   r   r   StateValueValuesEnumr/   x        FGr   r6   additionalPropertiesc                   f    \ rS rSrSr " S S\R                  5      r\R                  " SSSS9r	Sr
g	)
CustomClass.AnnotationsValue   _  Output only. Allows users to store small amounts of arbitrary data.
Both the key and the value must be 63 characters or less each. At most 100
annotations. This field is not used.

Messages:
  AdditionalProperty: An additional property for a AnnotationsValue
    object.

Fields:
  additionalProperties: Additional properties of type AnnotationsValue
c                   `    \ rS rSrSr\R                  " S5      r\R                  " S5      rSr	g)/CustomClass.AnnotationsValue.AdditionalProperty   An additional property for a AnnotationsValue object.

Fields:
  key: Name of the additional property.
  value: A string attribute.
r   r"   r   N
r   r   r   r   r   r   r   keyr   r   r   r   r   AdditionalPropertyr>      )    
 !!!$c##A&er   rC   r   Tr   r   Nr   r   r   r   r   r   MessagerC   r#   r8   r   r   r   r   AnnotationsValuer:      2    
	'Y.. 	' %112FTXYr   rG   r   r"               r      Tr      	   
            r   N)r   r   r   r   r   r   Enumr6   r   MapUnrecognizedFieldsrF   rG   r#   annotationsr   r%   
deleteTimedisplayNameetag
expireTimeitems
kmsKeyNamekmsKeyVersionNamenameBooleanFieldreconciling	EnumFieldstateuidr   r   r   r   r!   r!   E   s%   0dY^^  !!"89Z** Z :Z4 &&'91=+''*-$$Q'*%%a(+			q	!$$$Q'*

 
 a$
?%$$Q'*++A.			r	"$&&r*+


4b
9%b!#r   r!   c                       \ rS rSrSrSrg)Empty   a  A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo { rpc
Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }
r   N)r   r   r   r   r   r   r   r   r   re   re      s    r   re   c                       \ rS rSrSr\R                  " S5      r\R                  " S5      r	\R                  " S5      r
Srg)Entry   zA single replacement configuration.

Fields:
  caseSensitive: Whether the search is case sensitive.
  replace: What to replace with. Max length is 100 characters.
  search: What to replace. Max length is 100 characters.
r   r"   rI   r   N)r   r   r   r   r   r   r_   caseSensitiver   replacesearchr   r   r   r   rh   rh      s9     ((+-!!!$'  #&r   rh   c                   `    \ rS rSrSr\R                  " SSSS9r\R                  " S5      r	Sr
g	)
ListCustomClassesResponse   a  Message returned to the client by the `ListCustomClasses` method.

Fields:
  customClasses: The custom classes.
  nextPageToken: A token, which can be sent as `page_token` to retrieve the
    next page. If this field is omitted, there are no subsequent pages.
r!   r   Tr   r"   r   N)r   r   r   r   r   r   r#   customClassesr   nextPageTokenr   r   r   r   rn   rn      s-     ((DI-''*-r   rn   c                       \ rS rSrSr\R                  " S5      r\R                  " SSSS9r	\R                  " SSS9r
S	rg
)ListOperationsResponse   a  The response message for Operations.ListOperations.

Fields:
  nextPageToken: The standard List next-page token.
  operations: A list of operations that matches the specified filter in the
    request.
  unreachable: Unordered list. Unreachable resources. Populated when the
    request sets `ListOperationsRequest.return_partial_success` and reads
    across collections e.g. when attempting to list all resources across all
    supported locations.
r   	Operationr"   Tr   rI   r   N)r   r   r   r   r   r   r   rq   r#   
operationsunreachabler   r   r   r   rs   rs      s?    
 ''*-%%k1tD*%%a$7+r   rs   c                   `    \ rS rSrSr\R                  " S5      r\R                  " SSSS9r	Sr
g	)
ListPhraseSetResponse   zMessage returned to the client by the `ListPhraseSet` method.

Fields:
  nextPageToken: A token, which can be sent as `page_token` to retrieve the
    next page. If this field is omitted, there are no subsequent pages.
  phraseSets: The phrase set.
r   r)   r"   Tr   r   N)r   r   r   r   r   r   r   rq   r#   
phraseSetsr   r   r   r   ry   ry      s-     ''*-%%k1tD*r   ry   c                       \ rS rSrSr\R                  " S5      r\R                  " SS5      r	\R                  " S\R                  R                  S9r\R                  " S5      r\R                  " S	5      rS
rg)LongRunningRecognizeMetadata   a  Describes the progress of a long-running `LongRunningRecognize` call. It
is included in the `metadata` field of the `Operation` returned by the
`GetOperation` call of the `google::longrunning::Operations` service.

Fields:
  lastUpdateTime: Time of the most recent processing update.
  outputConfig: Output only. A copy of the TranscriptOutputConfig if it was
    set in the request.
  progressPercent: Approximate percentage of audio processed thus far.
    Guaranteed to be 100 when the audio is fully processed and the results
    are available.
  startTime: Time when the request was received.
  uri: Output only. The URI of the audio file being transcribed. Empty if
    the audio was sent as byte content.
r   TranscriptOutputConfigr"   rI   variantrJ   rK   r   N)r   r   r   r   r   r   r   lastUpdateTimer#   outputConfigIntegerFieldVariantINT32progressPercent	startTimeurir   r   r   r   r}   r}      sj      ((+.''(@!D,**1i6G6G6M6MN/##A&)a #r   r}   c                       \ rS rSrSr\R                  " SS5      r\R                  " SS5      r\R                  " SS5      r	S	r
g
)LongRunningRecognizeRequesti
  aT  The top-level message sent by the client for the `LongRunningRecognize`
method.

Fields:
  audio: Required. The audio data to be recognized.
  config: Required. Provides information to the recognizer that specifies
    how to process the request.
  outputConfig: Optional. Specifies an optional destination for the
    recognition results.
RecognitionAudior   RecognitionConfigr"   r   rI   r   N)r   r   r   r   r   r   r#   audioconfigr   r   r   r   r   r   r   
  sB    	 
 
 !3Q
7%!!"5q9&''(@!D,r   r   c                       \ rS rSrSr\R                  " SS5      r\R                  " SS5      r\R                  " S5      r
\R                  " SS	S
S9r\R                  " SS5      r\R                  " S5      rSrg)LongRunningRecognizeResponsei  aj  The only message returned to the client by the `LongRunningRecognize`
method. It contains the result as zero or more sequential
`SpeechRecognitionResult` messages. It is included in the `result.response`
field of the `Operation` returned by the `GetOperation` call of the
`google::longrunning::Operations` service.

Fields:
  outputConfig: Original output config if present in the request.
  outputError: If the transcript output fails this field contains the
    relevant error.
  requestId: The ID associated with the request. This is a unique ID
    specific only to the given request.
  results: Sequential list of transcription results corresponding to
    sequential portions of audio.
  speechAdaptationInfo: Provides information on speech adaptation behavior
    in response
  totalBilledTime: When available, billed audio seconds for the
    corresponding request.
r   r   Statusr"   rI   SpeechRecognitionResultrJ   Tr   SpeechAdaptationInforK   rL   r   N)r   r   r   r   r   r   r#   r   outputErrorr   	requestIdresultsspeechAdaptationInfor   totalBilledTimer   r   r   r   r   r     sw    ( ''(@!D,&&x3+$$Q')""#<a$O'"//0FJ))!,/r   r   c                   z   \ rS rSrSr\R                  " S5       " S S\R                  5      5       r	\R                  " S5       " S S\R                  5      5       r
\R                  " S5      r\R                  " S	S
5      r\R                  " SS5      r\R                   " S5      r\R                  " SS5      rSrg)ru   i8  a  This resource represents a long-running operation that is the result of
a network API call.

Messages:
  MetadataValue: Service-specific metadata associated with the operation. It
    typically contains progress information and common metadata such as
    create time. Some services might not provide such metadata. Any method
    that returns a long-running operation should document the metadata type,
    if any.
  ResponseValue: The normal, successful response of the operation. If the
    original method returns no data on success, such as `Delete`, the
    response is `google.protobuf.Empty`. If the original method is standard
    `Get`/`Create`/`Update`, the response should be the resource. For other
    methods, the response should have the type `XxxResponse`, where `Xxx` is
    the original method name. For example, if the original method name is
    `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.

Fields:
  done: If the value is `false`, it means the operation is still in
    progress. If `true`, the operation is completed, and either `error` or
    `response` is available.
  error: The error result of the operation in case of failure or
    cancellation.
  metadata: Service-specific metadata associated with the operation. It
    typically contains progress information and common metadata such as
    create time. Some services might not provide such metadata. Any method
    that returns a long-running operation should document the metadata type,
    if any.
  name: The server-assigned name, which is only unique within the same
    service that originally returns it. If you use the default HTTP mapping,
    the `name` should be a resource name ending with
    `operations/{unique_id}`.
  response: The normal, successful response of the operation. If the
    original method returns no data on success, such as `Delete`, the
    response is `google.protobuf.Empty`. If the original method is standard
    `Get`/`Create`/`Update`, the response should be the resource. For other
    methods, the response should have the type `XxxResponse`, where `Xxx` is
    the original method name. For example, if the original method name is
    `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
r8   c                   f    \ rS rSrSr " S S\R                  5      r\R                  " SSSS9r	Sr
g	)
Operation.MetadataValueib  a  Service-specific metadata associated with the operation. It typically
contains progress information and common metadata such as create time.
Some services might not provide such metadata. Any method that returns a
long-running operation should document the metadata type, if any.

Messages:
  AdditionalProperty: An additional property for a MetadataValue object.

Fields:
  additionalProperties: Properties of the object. Contains field @type
    with type URL.
c                   b    \ rS rSrSr\R                  " S5      r\R                  " SS5      r	Sr
g)*Operation.MetadataValue.AdditionalPropertyiq  zAn additional property for a MetadataValue object.

Fields:
  key: Name of the additional property.
  value: A extra_types.JsonValue attribute.
r   extra_types.JsonValuer"   r   Nr   r   r   r   r   r   r   rB   r#   r   r   r   r   r   rC   r   q  ,    
 !!!$c$$%<a@er   rC   r   Tr   r   NrE   r   r   r   MetadataValuer   b  s4    	AY.. 	A %112FTXYr   r   c                   f    \ rS rSrSr " S S\R                  5      r\R                  " SSSS9r	Sr
g	)
Operation.ResponseValuei~  a  The normal, successful response of the operation. If the original
method returns no data on success, such as `Delete`, the response is
`google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.

Messages:
  AdditionalProperty: An additional property for a ResponseValue object.

Fields:
  additionalProperties: Properties of the object. Contains field @type
    with type URL.
c                   b    \ rS rSrSr\R                  " S5      r\R                  " SS5      r	Sr
g)*Operation.ResponseValue.AdditionalPropertyi  zAn additional property for a ResponseValue object.

Fields:
  key: Name of the additional property.
  value: A extra_types.JsonValue attribute.
r   r   r"   r   Nr   r   r   r   rC   r     r   r   rC   r   Tr   r   NrE   r   r   r   ResponseValuer   ~  s4     	AY.. 	A %112FTXYr   r   r   r   r"   rI   rJ   rK   r   N)r   r   r   r   r   r   rU   r   rF   r   r   r_   doner#   errormetadatar   r^   responser   r   r   r   ru   ru   8  s    'R !!"89Zi'' Z :Z6 !!"89Zi'' Z :Z< 
			"$

 
 1
-%##OQ7(			q	!$##OQ7(r   ru   c                       \ rS rSrSr\R                  " S\R                  R                  S9r	\R                  " S5      rSrg)Phrasei  a  A phrases containing words and phrase "hints" so that the speech
recognition is more likely to recognize them. This can be used to improve
the accuracy for specific words and phrases, for example, if specific
commands are typically spoken by the user. This can also be used to add
additional words to the vocabulary of the recognizer. See [usage
limits](https://cloud.google.com/speech-to-text/quotas#content). List items
can also include pre-built or custom classes containing groups of words that
represent common concepts that occur in natural language. For example,
rather than providing a phrase hint for every month of the year (e.g. "i was
born in january", "i was born in febuary", ...), use the pre-built `$MONTH`
class improves the likelihood of correctly transcribing audio that includes
months (e.g. "i was born in $month"). To refer to pre-built classes, use the
class' symbol prepended with `$` e.g. `$MONTH`. To refer to custom classes
that were defined inline in the request, set the class's `custom_class_id`
to a string unique to all class resources and inline classes. Then use the
class' id wrapped in $`{...}` e.g. "${my-months}". To refer to custom
classes resources, use the class' id wrapped in `${}` (e.g. `${my-months}`).
Speech-to-Text supports three locations: `global`, `us` (US North America),
and `eu` (Europe). If you are calling the `speech.googleapis.com` endpoint,
use the `global` location. To specify a region, use a [regional
endpoint](https://cloud.google.com/speech-to-text/docs/endpoints) with
matching `us` or `eu` location value.

Fields:
  boost: Hint Boost. Overrides the boost set at the phrase set level.
    Positive value will increase the probability that a specific phrase will
    be recognized over other similar sounding phrases. The higher the boost,
    the higher the chance of false positive recognition as well. Negative
    boost will simply be ignored. Though `boost` can accept a wide range of
    positive values, most use cases are best served with values between 0
    and 20. We recommend using a binary search approach to finding the
    optimal value for your use case as well as adding phrases both with and
    without boost to your requests.
  value: The phrase itself.
r   r   r"   r   N)r   r   r   r   r   r   
FloatFieldr   FLOATboostr   r   r   r   r   r   r   r     s8    "H 

q)*;*;*A*A
B%



"%r   r   c                      \ rS rSrSr " S S\R                  5      r\R                  " S5       " S S\R                  5      5       r\R                  " SS5      r\R                  " S	\R                  R                   S
9r\R$                  " S5      r\R$                  " S5      r\R$                  " S5      r\R$                  " S5      r\R$                  " S5      r\R$                  " S5      r\R$                  " S5      r\R                  " SSSS9r\R6                  " S5      r\R:                  " SS5      r\R$                  " S5      rSr g)r)   i  a  Provides "hints" to the speech recognizer to favor specific words and
phrases in the results.

Enums:
  StateValueValuesEnum: Output only. The CustomClass lifecycle state. This
    field is not used.

Messages:
  AnnotationsValue: Output only. Allows users to store small amounts of
    arbitrary data. Both the key and the value must be 63 characters or less
    each. At most 100 annotations. This field is not used.

Fields:
  annotations: Output only. Allows users to store small amounts of arbitrary
    data. Both the key and the value must be 63 characters or less each. At
    most 100 annotations. This field is not used.
  boost: Hint Boost. Positive value will increase the probability that a
    specific phrase will be recognized over other similar sounding phrases.
    The higher the boost, the higher the chance of false positive
    recognition as well. Negative boost values would correspond to anti-
    biasing. Anti-biasing is not enabled, so negative boost will simply be
    ignored. Though `boost` can accept a wide range of positive values, most
    use cases are best served with values between 0 (exclusive) and 20. We
    recommend using a binary search approach to finding the optimal value
    for your use case as well as adding phrases both with and without boost
    to your requests.
  deleteTime: Output only. The time at which this resource was requested for
    deletion. This field is not used.
  displayName: Output only. User-settable, human-readable name for the
    PhraseSet. Must be 63 characters or less. This field is not used.
  etag: Output only. This checksum is computed by the server based on the
    value of other fields. This may be sent on update, undelete, and delete
    requests to ensure the client has an up-to-date value before proceeding.
    This field is not used.
  expireTime: Output only. The time at which this resource will be purged.
    This field is not used.
  kmsKeyName: Output only. The [KMS key
    name](https://cloud.google.com/kms/docs/resource-hierarchy#keys) with
    which the content of the PhraseSet is encrypted. The expected format is
    `projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/
    {crypto_key}`.
  kmsKeyVersionName: Output only. The [KMS key version
    name](https://cloud.google.com/kms/docs/resource-hierarchy#key_versions)
    with which content of the PhraseSet is encrypted. The expected format is
    `projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/
    {crypto_key}/cryptoKeyVersions/{crypto_key_version}`.
  name: The resource name of the phrase set.
  phrases: A list of word and phrases.
  reconciling: Output only. Whether or not this PhraseSet is in the process
    of being updated. This field is not used.
  state: Output only. The CustomClass lifecycle state. This field is not
    used.
  uid: Output only. System-assigned unique identifier for the PhraseSet.
    This field is not used.
c                   $    \ rS rSrSrSrSrSrSrg)PhraseSet.StateValueValuesEnumi  r1   r   r   r"   r   Nr2   r   r   r   r6   r     r7   r   r6   r8   c                   f    \ rS rSrSr " S S\R                  5      r\R                  " SSSS9r	Sr
g	)
PhraseSet.AnnotationsValuei  r<   c                   `    \ rS rSrSr\R                  " S5      r\R                  " S5      rSr	g)-PhraseSet.AnnotationsValue.AdditionalPropertyi!  r@   r   r"   r   NrA   r   r   r   rC   r   !  rD   r   rC   r   Tr   r   NrE   r   r   r   rG   r     rH   r   rG   r   r"   r   rI   rJ   rK   rL   rM   rN   rO   r   rP   Tr   rQ   rR   rS   r   N)!r   r   r   r   r   r   rT   r6   r   rU   rF   rG   r#   rV   r   r   r   r   r   rW   rX   rY   rZ   r\   r]   r^   phrasesr_   r`   ra   rb   rc   r   r   r   r   r)   r)     s3   6pY^^  !!"89Z** Z :Z4 &&'91=+


q)*;*;*A*A
B%$$Q'*%%a(+			q	!$$$Q'*$$Q'*++A.			q	!$""8R$?'&&r*+


4b
9%b!#r   r)   c                   `    \ rS rSrSr\R                  " S5      r\R                  " S5      r	Sr
g)r   i=  a  Contains audio data in the encoding specified in the
`RecognitionConfig`. Either `content` or `uri` must be supplied. Supplying
both or neither returns google.rpc.Code.INVALID_ARGUMENT. See [content
limits](https://cloud.google.com/speech-to-text/quotas#content).

Fields:
  content: The audio data bytes encoded as specified in `RecognitionConfig`.
    Note: as with all bytes fields, proto buffers use a pure binary
    representation, whereas JSON representations use base64.
  uri: URI that points to a file that contains audio data bytes as specified
    in `RecognitionConfig`. The file must not be compressed (for example,
    gzip). Currently, only Google Cloud Storage URIs are supported, which
    must be specified in the following format:
    `gs://bucket_name/object_name` (other URI formats return
    google.rpc.Code.INVALID_ARGUMENT). For more information, see [Request
    URIs](https://cloud.google.com/storage/docs/reference-uris).
r   r"   r   N)r   r   r   r   r   r   
BytesFieldcontentr   r   r   r   r   r   r   r   =  s)    $   #'a #r   r   c                      \ rS rSrSr " S S\R                  5      r\R                  " SS5      r	\R                  " SSS	9r\R                  " S
\R                  R                  S9r\R                  " SS5      r\R                  " S\R                  R                  S9r\R$                  " S5      r\R$                  " S5      r\R$                  " S5      r\R$                  " S5      r\R$                  " S5      r\R$                  " S5      r\R$                  " S5      r\R4                  " SS5      r\R                  " S5      r\R                  " S\R                  R                  S9r\R                  " SS5      r\R                  " S5      r\R$                  " S5      r \R                  " S\R                  R                  S9r!\R                  " SSSS	9r"\R                  " S S!5      r#\R$                  " S"5      r$S#r%g$)%r   iT  a"  Provides information to the recognizer that specifies how to process the
request.

Enums:
  EncodingValueValuesEnum: Encoding of audio data sent in all
    `RecognitionAudio` messages. This field is optional for `FLAC` and `WAV`
    audio files and required for all other audio formats. For details, see
    AudioEncoding.

Fields:
  adaptation: Speech adaptation configuration improves the accuracy of
    speech recognition. For more information, see the [speech
    adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
    documentation. When speech adaptation is set it supersedes the
    `speech_contexts` field.
  alternativeLanguageCodes: A list of up to 3 additional
    [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
    listing possible alternative languages of the supplied audio. See
    [Language Support](https://cloud.google.com/speech-to-
    text/docs/languages) for a list of the currently supported language
    codes. If alternative languages are listed, recognition result will
    contain recognition in the most likely language detected including the
    main language_code. The recognition result will include the language tag
    of the language detected in the audio. Note: This feature is only
    supported for Voice Command and Voice Search use cases and performance
    may vary for other use cases (e.g., phone call transcription).
  audioChannelCount: The number of channels in the input audio data. ONLY
    set this for MULTI-CHANNEL recognition. Valid values for LINEAR16,
    OGG_OPUS and FLAC are `1`-`8`. Valid value for MULAW, AMR, AMR_WB and
    SPEEX_WITH_HEADER_BYTE is only `1`. If `0` or omitted, defaults to one
    channel (mono). Note: We only recognize the first channel by default. To
    perform independent recognition on each channel set
    `enable_separate_recognition_per_channel` to 'true'.
  diarizationConfig: Config to enable speaker diarization and set additional
    parameters to make diarization better suited for your application. Note:
    When this is enabled, we send all the words from the beginning of the
    audio for the top alternative in every consecutive STREAMING responses.
    This is done in order to improve our speaker tags as our models learn to
    identify the speakers in the conversation over time. For non-streaming
    requests, the diarization results will be provided only in the top
    alternative of the FINAL SpeechRecognitionResult.
  diarizationSpeakerCount: If set, specifies the estimated number of
    speakers in the conversation. Defaults to '2'. Ignored unless
    enable_speaker_diarization is set to true. Note: Use diarization_config
    instead.
  enableAutomaticPunctuation: If 'true', adds punctuation to recognition
    result hypotheses. This feature is only available in select languages.
    Setting this for requests in other languages has no effect at all. The
    default 'false' value does not add punctuation to result hypotheses.
  enableSeparateRecognitionPerChannel: This needs to be set to `true`
    explicitly and `audio_channel_count` > 1 to get each channel recognized
    separately. The recognition result will contain a `channel_tag` field to
    state which channel that result belongs to. If this is not true, we will
    only recognize the first channel. The request is billed cumulatively for
    all channels recognized: `audio_channel_count` multiplied by the length
    of the audio.
  enableSpeakerDiarization: If 'true', enables speaker detection for each
    recognized word in the top alternative of the recognition result using a
    speaker_label provided in the WordInfo. Note: Use diarization_config
    instead.
  enableSpokenEmojis: The spoken emoji behavior for the call If not set,
    uses default behavior based on model of choice If 'true', adds spoken
    emoji formatting for the request. This will replace spoken emojis with
    the corresponding Unicode symbols in the final transcript. If 'false',
    spoken emojis are not replaced.
  enableSpokenPunctuation: The spoken punctuation behavior for the call If
    not set, uses default behavior based on model of choice e.g.
    command_and_search will enable spoken punctuation by default If 'true',
    replaces spoken punctuation with the corresponding symbols in the
    request. For example, "how are you question mark" becomes "how are
    you?". See https://cloud.google.com/speech-to-text/docs/spoken-
    punctuation for support. If 'false', spoken punctuation is not replaced.
  enableWordConfidence: If `true`, the top result includes a list of words
    and the confidence for those words. If `false`, no word-level confidence
    information is returned. The default is `false`.
  enableWordTimeOffsets: If `true`, the top result includes a list of words
    and the start and end time offsets (timestamps) for those words. If
    `false`, no word-level time offset information is returned. The default
    is `false`.
  encoding: Encoding of audio data sent in all `RecognitionAudio` messages.
    This field is optional for `FLAC` and `WAV` audio files and required for
    all other audio formats. For details, see AudioEncoding.
  languageCode: Required. The language of the supplied audio as a
    [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
    Example: "en-US". See [Language
    Support](https://cloud.google.com/speech-to-text/docs/languages) for a
    list of the currently supported language codes.
  maxAlternatives: Maximum number of recognition hypotheses to be returned.
    Specifically, the maximum number of `SpeechRecognitionAlternative`
    messages within each `SpeechRecognitionResult`. The server may return
    fewer than `max_alternatives`. Valid values are `0`-`30`. A value of `0`
    or `1` will return a maximum of one. If omitted, will return a maximum
    of one.
  metadata: Metadata regarding this request.
  model: Which model to select for the given request. Select the model best
    suited to your domain to get best results. If a model is not explicitly
    specified, then we auto-select a model based on the parameters in the
    RecognitionConfig. *Model* *Description* latest_long Best for long form
    content like media or conversation. latest_short Best for short form
    content like commands or single shot directed speech. command_and_search
    Best for short queries such as voice commands or voice search.
    phone_call Best for audio that originated from a phone call (typically
    recorded at an 8khz sampling rate). video Best for audio that originated
    from video or includes multiple speakers. Ideally the audio is recorded
    at a 16khz or greater sampling rate. This is a premium model that costs
    more than the standard rate. default Best for audio that is not one of
    the specific audio models. For example, long-form audio. Ideally the
    audio is high-fidelity, recorded at a 16khz or greater sampling rate.
    medical_conversation Best for audio that originated from a conversation
    between a medical provider and patient. medical_dictation Best for audio
    that originated from dictation notes by a medical provider.
  profanityFilter: If set to `true`, the server will attempt to filter out
    profanities, replacing all but the initial character in each filtered
    word with asterisks, e.g. "f***". If set to `false` or omitted,
    profanities won't be filtered out.
  sampleRateHertz: Sample rate in Hertz of the audio data sent in all
    `RecognitionAudio` messages. Valid values are: 8000-48000. 16000 is
    optimal. For best results, set the sampling rate of the audio source to
    16000 Hz. If that's not possible, use the native sample rate of the
    audio source (instead of re-sampling). This field is optional for FLAC
    and WAV audio files, but is required for all other audio formats. For
    details, see AudioEncoding.
  speechContexts: Array of SpeechContext. A means to provide context to
    assist the speech recognition. For more information, see [speech
    adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
  transcriptNormalization: Optional. Use transcription normalization to
    automatically replace parts of the transcript with phrases of your
    choosing. For StreamingRecognize, this normalization only applies to
    stable partial transcripts (stability > 0.8) and final transcripts.
  useEnhanced: Set to true to use an enhanced model for speech recognition.
    If `use_enhanced` is set to true and the `model` field is not set, then
    an appropriate enhanced model is chosen if an enhanced model exists for
    the audio. If `use_enhanced` is true and an enhanced version of the
    specified model does not exist, then the speech is recognized using the
    standard version of the specified model.
c                   D    \ rS rSrSrSrSrSrSrSr	Sr
S	rS
rSrSrSrSrg))RecognitionConfig.EncodingValueValuesEnumi  a	  Encoding of audio data sent in all `RecognitionAudio` messages. This
field is optional for `FLAC` and `WAV` audio files and required for all
other audio formats. For details, see AudioEncoding.

Values:
  ENCODING_UNSPECIFIED: Not specified.
  LINEAR16: Uncompressed 16-bit signed little-endian samples (Linear PCM).
  FLAC: `FLAC` (Free Lossless Audio Codec) is the recommended encoding
    because it is lossless--therefore recognition is not compromised--and
    requires only about half the bandwidth of `LINEAR16`. `FLAC` stream
    encoding supports 16-bit and 24-bit samples, however, not all fields
    in `STREAMINFO` are supported.
  MULAW: 8-bit samples that compand 14-bit audio samples using G.711
    PCMU/mu-law.
  AMR: Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be
    8000.
  AMR_WB: Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be
    16000.
  OGG_OPUS: Opus encoded audio frames in Ogg container
    ([OggOpus](https://wiki.xiph.org/OggOpus)). `sample_rate_hertz` must
    be one of 8000, 12000, 16000, 24000, or 48000.
  SPEEX_WITH_HEADER_BYTE: Although the use of lossy encodings is not
    recommended, if a very low bitrate encoding is required, `OGG_OPUS` is
    highly preferred over Speex encoding. The [Speex](https://speex.org/)
    encoding supported by Cloud Speech API has a header byte in each
    block, as in MIME type `audio/x-speex-with-header-byte`. It is a
    variant of the RTP Speex encoding defined in [RFC
    5574](https://tools.ietf.org/html/rfc5574). The stream is a sequence
    of blocks, one block per RTP packet. Each block starts with a byte
    containing the length of the block, in bytes, followed by one or more
    frames of Speex data, padded to an integral number of bytes (octets)
    as specified in RFC 5574. In other words, each RTP header is replaced
    with a single byte containing the block length. Only Speex wideband is
    supported. `sample_rate_hertz` must be 16000.
  MP3: MP3 audio. MP3 encoding is a Beta feature and only available in
    v1p1beta1. Support all standard MP3 bitrates (which range from 32-320
    kbps). When using this encoding, `sample_rate_hertz` has to match the
    sample rate of the file being used.
  WEBM_OPUS: Opus encoded audio frames in WebM container
    ([WebM](https://www.webmproject.org/docs/container/)).
    `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or
    48000.
  ALAW: 8-bit samples that compand 13-bit audio samples using G.711
    PCMU/a-law.
r   r   r"   rI   rJ   rK   rL   rM   rN   rO   rP   r   N)r   r   r   r   r   ENCODING_UNSPECIFIEDLINEAR16FLACMULAWAMRAMR_WBOGG_OPUSSPEEX_WITH_HEADER_BYTEMP3	WEBM_OPUSALAWr   r   r   r   EncodingValueValuesEnumr     sC    ,Z HDE
CFH
CIDr   r   SpeechAdaptationr   r"   Tr   rI   r   SpeakerDiarizationConfigrJ   rK   rL   rM   rN   rO   rP   rQ   rR   rS         RecognitionMetadata   r
         SpeechContext   TranscriptNormalization      r   N)&r   r   r   r   r   r   rT   r   r#   
adaptationr   alternativeLanguageCodesr   r   r   audioChannelCountdiarizationConfigdiarizationSpeakerCountr_   enableAutomaticPunctuation#enableSeparateRecognitionPerChannelenableSpeakerDiarizationenableSpokenEmojisenableSpokenPunctuationenableWordConfidenceenableWordTimeOffsetsra   r   languageCodemaxAlternativesr   modelprofanityFiltersampleRateHertzspeechContextstranscriptNormalizationuseEnhancedr   r   r   r   r   r   T  s   GR8	 8t %%&8!<*&221tD,,Q	8I8I8O8OP,,-GK%221i>O>O>U>UV(55a8(1(>(>q(A%&33A6 --a0%2226"//3#004  !:B?(&&r*,**2y7H7H7N7NO/##$92>(



#%**2./**2y7H7H7N7NO/))/2M.%223LbQ&&r*+r   r   c                   2   \ rS rSrSr " S S\R                  5      r " S S\R                  5      r " S S\R                  5      r	 " S	 S
\R                  5      r
\R                  " S5      r\R                  " S\R                  R                  S9r\R"                  " SS5      r\R"                  " SS5      r\R                  " S5      r\R"                  " SS5      r\R                  " S5      r\R                  " S5      r\R"                  " S
S5      rSrg)r   i0  a  Description of audio data to be recognized.

Enums:
  InteractionTypeValueValuesEnum: The use case most closely describing the
    audio content to be recognized.
  MicrophoneDistanceValueValuesEnum: The audio type that most closely
    describes the audio being recognized.
  OriginalMediaTypeValueValuesEnum: The original media the speech was
    recorded on.
  RecordingDeviceTypeValueValuesEnum: The type of device the speech was
    recorded with.

Fields:
  audioTopic: Description of the content. Eg. "Recordings of federal supreme
    court hearings from 2012".
  industryNaicsCodeOfAudio: The industry vertical to which this speech
    recognition request most closely applies. This is most indicative of the
    topics contained in the audio. Use the 6-digit NAICS code to identify
    the industry vertical - see https://www.naics.com/search/.
  interactionType: The use case most closely describing the audio content to
    be recognized.
  microphoneDistance: The audio type that most closely describes the audio
    being recognized.
  obfuscatedId: Obfuscated (privacy-protected) ID of the user, to identify
    number of unique users using the service.
  originalMediaType: The original media the speech was recorded on.
  originalMimeType: Mime type of the original audio file. For example
    `audio/m4a`, `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`. A list of
    possible audio mime types is maintained at
    http://www.iana.org/assignments/media-types/media-types.xhtml#audio
  recordingDeviceName: The device used to make the recording. Examples
    'Nexus 5X' or 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or
    'Cardioid Microphone'.
  recordingDeviceType: The type of device the speech was recorded with.
c                   <    \ rS rSrSrSrSrSrSrSr	Sr
S	rS
rSrSrg)2RecognitionMetadata.InteractionTypeValueValuesEnumiU  aM  The use case most closely describing the audio content to be
recognized.

Values:
  INTERACTION_TYPE_UNSPECIFIED: Use case is either unknown or is something
    other than one of the other values below.
  DISCUSSION: Multiple people in a conversation or discussion. For example
    in a meeting with two or more people actively participating. Typically
    all the primary people speaking would be in the same room (if not, see
    PHONE_CALL)
  PRESENTATION: One or more persons lecturing or presenting to others,
    mostly uninterrupted.
  PHONE_CALL: A phone-call or video-conference in which two or more
    people, who are not in the same room, are actively participating.
  VOICEMAIL: A recorded message intended for another person to listen to.
  PROFESSIONALLY_PRODUCED: Professionally produced audio (eg. TV Show,
    Podcast).
  VOICE_SEARCH: Transcribe spoken questions and queries into text.
  VOICE_COMMAND: Transcribe voice commands, such as for controlling a
    device.
  DICTATION: Transcribe speech to text to create a written document, such
    as a text-message, email or report.
r   r   r"   rI   rJ   rK   rL   rM   rN   r   N)r   r   r   r   r   INTERACTION_TYPE_UNSPECIFIED
DISCUSSIONPRESENTATION
PHONE_CALL	VOICEMAILPROFESSIONALLY_PRODUCEDVOICE_SEARCHVOICE_COMMAND	DICTATIONr   r   r   r   InteractionTypeValueValuesEnumr   U  s8    . $% JLJILMIr   r   c                   (    \ rS rSrSrSrSrSrSrSr	g)	5RecognitionMetadata.MicrophoneDistanceValueValuesEnumiw  a  The audio type that most closely describes the audio being recognized.

Values:
  MICROPHONE_DISTANCE_UNSPECIFIED: Audio type is not known.
  NEARFIELD: The audio was captured from a closely placed microphone. Eg.
    phone, dictaphone, or handheld microphone. Generally if there speaker
    is within 1 meter of the microphone.
  MIDFIELD: The speaker if within 3 meters of the microphone.
  FARFIELD: The speaker is more than 3 meters away from the microphone.
r   r   r"   rI   r   N)
r   r   r   r   r   MICROPHONE_DISTANCE_UNSPECIFIED	NEARFIELDMIDFIELDFARFIELDr   r   r   r   !MicrophoneDistanceValueValuesEnumr   w  s    	 '(#IHHr   r   c                   $    \ rS rSrSrSrSrSrSrg)4RecognitionMetadata.OriginalMediaTypeValueValuesEnumi  zThe original media the speech was recorded on.

Values:
  ORIGINAL_MEDIA_TYPE_UNSPECIFIED: Unknown original media type.
  AUDIO: The speech data is an audio recording.
  VIDEO: The speech data originally recorded on a video.
r   r   r"   r   N)	r   r   r   r   r   ORIGINAL_MEDIA_TYPE_UNSPECIFIEDAUDIOVIDEOr   r   r   r    OriginalMediaTypeValueValuesEnumr    s     '(#EEr   r  c                   4    \ rS rSrSrSrSrSrSrSr	Sr
S	rS
rg)6RecognitionMetadata.RecordingDeviceTypeValueValuesEnumi  a  The type of device the speech was recorded with.

Values:
  RECORDING_DEVICE_TYPE_UNSPECIFIED: The recording device is unknown.
  SMARTPHONE: Speech was recorded on a smartphone.
  PC: Speech was recorded using a personal computer or tablet.
  PHONE_LINE: Speech was recorded over a phone line.
  VEHICLE: Speech was recorded in a vehicle.
  OTHER_OUTDOOR_DEVICE: Speech was recorded outdoors.
  OTHER_INDOOR_DEVICE: Speech was recorded indoors.
r   r   r"   rI   rJ   rK   rL   r   N)r   r   r   r   r   !RECORDING_DEVICE_TYPE_UNSPECIFIED
SMARTPHONEPC
PHONE_LINEVEHICLEOTHER_OUTDOOR_DEVICEOTHER_INDOOR_DEVICEr   r   r   r   "RecordingDeviceTypeValueValuesEnumr    s/    
 )*%J	
BJGr   r  r   r"   r   rI   rJ   rK   rL   rM   rN   rO   r   N)r   r   r   r   r   r   rT   r   r   r  r  r   
audioTopicr   r   UINT32industryNaicsCodeOfAudiora   interactionTypemicrophoneDistanceobfuscatedIdoriginalMediaTypeoriginalMimeTyperecordingDeviceNamerecordingDeviceTyper   r   r   r   r   r   0  s    "H y~~  D)..  
 
9>> ( $$Q'*&33Ay?P?P?W?WX''(H!L/ **+NPQR''*,))*LaP**1-!--a0!++,PRSTr   r   c                   d    \ rS rSrSr\R                  " SS5      r\R                  " SS5      rSr	g)	RecognizeRequesti  zThe top-level message sent by the client for the `Recognize` method.

Fields:
  audio: Required. The audio data to be recognized.
  config: Required. Provides information to the recognizer that specifies
    how to process the request.
r   r   r   r"   r   N)
r   r   r   r   r   r   r#   r   r   r   r   r   r   r  r    s/     
 
 !3Q
7%!!"5q9&r   r  c                       \ rS rSrSr\R                  " S5      r\R                  " SSSS9r	\R                  " SS	5      r
\R                  " S
5      r\R                  " S5      rSrg)RecognizeResponsei  a  The only message returned to the client by the `Recognize` method. It
contains the result as zero or more sequential `SpeechRecognitionResult`
messages.

Fields:
  requestId: The ID associated with the request. This is a unique ID
    specific only to the given request.
  results: Sequential list of transcription results corresponding to
    sequential portions of audio.
  speechAdaptationInfo: Provides information on adaptation behavior in
    response
  totalBilledTime: When available, billed audio seconds for the
    corresponding request.
  usingLegacyModels: Whether request used legacy asr models (was not
    automatically migrated to use conformer models).
r   r   r"   Tr   r   rI   rJ   rK   r   N)r   r   r   r   r   r   r   r   r#   r   r   r   r   r_   usingLegacyModelsr   r   r   r   r  r    sc    " $$Q')""#<a$O'"//0FJ))!,/,,Q/r   r  c                      \ rS rSrSr\R                  " S5      r\R                  " S\R                  R                  S9r\R                  " S\R                  R                  S9r\R                  " S\R                  R                  S9rSrg	)
r   i  a  Config to enable speaker diarization.

Fields:
  enableSpeakerDiarization: If 'true', enables speaker detection for each
    recognized word in the top alternative of the recognition result using a
    speaker_label provided in the WordInfo.
  maxSpeakerCount: Maximum number of speakers in the conversation. This
    range gives you more flexibility by allowing the system to automatically
    determine the correct number of speakers. If not set, the default value
    is 6.
  minSpeakerCount: Minimum number of speakers in the conversation. This
    range gives you more flexibility by allowing the system to automatically
    determine the correct number of speakers. If not set, the default value
    is 2.
  speakerTag: Output only. Unused.
r   r"   r   rI   rJ   r   N)r   r   r   r   r   r   r_   r   r   r   r   maxSpeakerCountminSpeakerCount
speakerTagr   r   r   r   r   r     st    " '33A6**1i6G6G6M6MN/**1i6G6G6M6MN/%%a1B1B1H1HI*r   r   c                       \ rS rSrSr\R                  " SS5      r\R                  " SSSS9r\R                  " S	SS9r
\R                  " S
SSS9rSrg)r   i  a  Speech adaptation configuration.

Fields:
  abnfGrammar: Augmented Backus-Naur form (ABNF) is a standardized grammar
    notation comprised by a set of derivation rules. See specifications:
    https://www.w3.org/TR/speech-grammar
  customClasses: A collection of custom classes. To specify the classes
    inline, leave the class' `name` blank and fill in the rest of its
    fields, giving it a unique `custom_class_id`. Refer to the inline
    defined class in phrase hints by its `custom_class_id`.
  phraseSetReferences: A collection of phrase set resource names to use.
  phraseSets: A collection of phrase sets. To specify the hints inline,
    leave the phrase set's `name` blank and fill in the rest of its fields.
    Any phrase set can use any custom class.
r	   r   r!   r"   Tr   rI   r)   rJ   r   N)r   r   r   r   r   r   r#   abnfGrammarrp   r   phraseSetReferencesr{   r   r   r   r   r   r     sV      &&}a8+((DI-!--a$?%%k1tD*r   r   c                   `    \ rS rSrSr\R                  " S5      r\R                  " S5      r	Sr
g)r   i  aC  Information on speech adaptation use in results

Fields:
  adaptationTimeout: Whether there was a timeout when applying speech
    adaptation. If true, adaptation had no effect in the response
    transcript.
  timeoutMessage: If set, returns a message specifying which part of the
    speech adaptation request timed out.
r   r"   r   N)r   r   r   r   r   r   r_   adaptationTimeoutr   timeoutMessager   r   r   r   r   r     s*      ,,Q/((+.r   r   c                       \ rS rSrSr\R                  " S\R                  R                  S9r	\R                  " SSS9rSrg	)
r   i  a  Provides "hints" to the speech recognizer to favor specific words and
phrases in the results.

Fields:
  boost: Hint Boost. Positive value will increase the probability that a
    specific phrase will be recognized over other similar sounding phrases.
    The higher the boost, the higher the chance of false positive
    recognition as well. Negative boost values would correspond to anti-
    biasing. Anti-biasing is not enabled, so negative boost will simply be
    ignored. Though `boost` can accept a wide range of positive values, most
    use cases are best served with values between 0 and 20. We recommend
    using a binary search approach to finding the optimal value for your use
    case.
  phrases: A list of strings containing words and phrases "hints" so that
    the speech recognition is more likely to recognize them. This can be
    used to improve the accuracy for specific words and phrases, for
    example, if specific commands are typically spoken by the user. This can
    also be used to add additional words to the vocabulary of the
    recognizer. See [usage limits](https://cloud.google.com/speech-to-
    text/quotas#content). List items can also be set to classes for groups
    of words that represent common concepts that occur in natural language.
    For example, rather than providing phrase hints for every month of the
    year, using the $MONTH class improves the likelihood of correctly
    transcribing audio that includes months.
r   r   r"   Tr   r   N)r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r     s9    4 

q)*;*;*A*A
B%!!!d3'r   r   c                   :    \ rS rSrSr\R                  " SSS9rSrg)SpeechOperationsGetRequesti5  zZA SpeechOperationsGetRequest object.

Fields:
  name: The name of the operation resource.
r   Trequiredr   N	r   r   r   r   r   r   r   r^   r   r   r   r   r+  r+  5  s     
		q4	0$r   r+  c                       \ rS rSrSr\R                  " S5      r\R                  " S5      r\R                  " S\R                  R                  S9r\R                  " S5      r\R                  " S5      rS	rg
)SpeechOperationsListRequesti?  a  A SpeechOperationsListRequest object.

Fields:
  filter: The standard list filter.
  name: The name of the operation's parent resource.
  pageSize: The standard list page size.
  pageToken: The standard list page token.
  returnPartialSuccess: When set to `true`, operations that are reachable
    are returned as normal, and those that are unreachable are returned in
    the [ListOperationsResponse.unreachable] field. This can only be `true`
    when reading across collections e.g. when `parent` is set to
    `"projects/example/locations/-"`. This field is not by default supported
    and will result in an `UNIMPLEMENTED` error if set unless explicitly
    documented otherwise in service or product specific documentation.
r   r"   rI   r   rJ   rK   r   N)r   r   r   r   r   r   r   filterr^   r   r   r   pageSize	pageTokenr_   returnPartialSuccessr   r   r   r   r0  r0  ?  sh        #&			q	!$##Ay/@/@/F/FG(##A&)"//2r   r0  c                   `    \ rS rSrSr\R                  " SS5      r\R                  " SSS9r	Sr
g	)
1SpeechProjectsLocationsCustomClassesCreateRequestiW  a  A SpeechProjectsLocationsCustomClassesCreateRequest object.

Fields:
  createCustomClassRequest: A CreateCustomClassRequest resource to be passed
    as the request body.
  parent: Required. The parent resource where this custom class will be
    created. Format: `projects/{project}/locations/{location}/customClasses`
    Speech-to-Text supports three locations: `global`, `us` (US North
    America), and `eu` (Europe). If you are calling the
    `speech.googleapis.com` endpoint, use the `global` location. To specify
    a region, use a [regional endpoint](https://cloud.google.com/speech-to-
    text/docs/endpoints) with matching `us` or `eu` location value.
r   r   r"   Tr,  r   N)r   r   r   r   r   r   r#   createCustomClassRequestr   parentr   r   r   r   r6  r6  W  s0     '334NPQR  T2&r   r6  c                   :    \ rS rSrSr\R                  " SSS9rSrg)1SpeechProjectsLocationsCustomClassesDeleteRequestij  a,  A SpeechProjectsLocationsCustomClassesDeleteRequest object.

Fields:
  name: Required. The name of the custom class to delete. Format:
    `projects/{project}/locations/{location}/customClasses/{custom_class}`
    Speech-to-Text supports three locations: `global`, `us` (US North
    America), and `eu` (Europe). If you are calling the
    `speech.googleapis.com` endpoint, use the `global` location. To specify
    a region, use a [regional endpoint](https://cloud.google.com/speech-to-
    text/docs/endpoints) with matching `us` or `eu` location value.
r   Tr,  r   Nr.  r   r   r   r:  r:  j      
 
		q4	0$r   r:  c                   :    \ rS rSrSr\R                  " SSS9rSrg).SpeechProjectsLocationsCustomClassesGetRequestiz  zA SpeechProjectsLocationsCustomClassesGetRequest object.

Fields:
  name: Required. The name of the custom class to retrieve. Format:
    `projects/{project}/locations/{location}/customClasses/{custom_class}`
r   Tr,  r   Nr.  r   r   r   r=  r=  z       
		q4	0$r   r=  c                       \ rS rSrSr\R                  " S\R                  R                  S9r	\R                  " S5      r\R                  " SSS9rS	rg
)/SpeechProjectsLocationsCustomClassesListRequesti  a#  A SpeechProjectsLocationsCustomClassesListRequest object.

Fields:
  pageSize: The maximum number of custom classes to return. The service may
    return fewer than this value. If unspecified, at most 50 custom classes
    will be returned. The maximum value is 1000; values above 1000 will be
    coerced to 1000.
  pageToken: A page token, received from a previous `ListCustomClass` call.
    Provide this to retrieve the subsequent page. When paginating, all other
    parameters provided to `ListCustomClass` must match the call that
    provided the page token.
  parent: Required. The parent, which owns this collection of custom
    classes. Format: `projects/{project}/locations/{location}/customClasses`
    Speech-to-Text supports three locations: `global`, `us` (US North
    America), and `eu` (Europe). If you are calling the
    `speech.googleapis.com` endpoint, use the `global` location. To specify
    a region, use a [regional endpoint](https://cloud.google.com/speech-to-
    text/docs/endpoints) with matching `us` or `eu` location value.
r   r   r"   rI   Tr,  r   Nr   r   r   r   r   r   r   r   r   r2  r   r3  r8  r   r   r   r   r@  r@    I    ( ##Ay/@/@/F/FG(##A&)  T2&r   r@  c                       \ rS rSrSr\R                  " SS5      r\R                  " SSS9r	\R                  " S5      r
S	rg
)0SpeechProjectsLocationsCustomClassesPatchRequesti  zA SpeechProjectsLocationsCustomClassesPatchRequest object.

Fields:
  customClass: A CustomClass resource to be passed as the request body.
  name: The resource name of the custom class.
  updateMask: The list of fields to be updated.
r!   r   r"   Tr,  rI   r   N)r   r   r   r   r   r   r#   r$   r   r^   
updateMaskr   r   r   r   rD  rD    s=     &&}a8+			q4	0$$$Q'*r   rD  c                   `    \ rS rSrSr\R                  " SS5      r\R                  " SSS9r	Sr
g	)
.SpeechProjectsLocationsPhraseSetsCreateRequesti  a  A SpeechProjectsLocationsPhraseSetsCreateRequest object.

Fields:
  createPhraseSetRequest: A CreatePhraseSetRequest resource to be passed as
    the request body.
  parent: Required. The parent resource where this phrase set will be
    created. Format: `projects/{project}/locations/{location}` Speech-to-
    Text supports three locations: `global`, `us` (US North America), and
    `eu` (Europe). If you are calling the `speech.googleapis.com` endpoint,
    use the `global` location. To specify a region, use a [regional
    endpoint](https://cloud.google.com/speech-to-text/docs/endpoints) with
    matching `us` or `eu` location value.
r'   r   r"   Tr,  r   N)r   r   r   r   r   r   r#   createPhraseSetRequestr   r8  r   r   r   r   rG  rG    s/     %112JAN  T2&r   rG  c                   :    \ rS rSrSr\R                  " SSS9rSrg).SpeechProjectsLocationsPhraseSetsDeleteRequesti  zA SpeechProjectsLocationsPhraseSetsDeleteRequest object.

Fields:
  name: Required. The name of the phrase set to delete. Format:
    `projects/{project}/locations/{location}/phraseSets/{phrase_set}`
r   Tr,  r   Nr.  r   r   r   rJ  rJ    r>  r   rJ  c                   :    \ rS rSrSr\R                  " SSS9rSrg)+SpeechProjectsLocationsPhraseSetsGetRequesti  a!  A SpeechProjectsLocationsPhraseSetsGetRequest object.

Fields:
  name: Required. The name of the phrase set to retrieve. Format:
    `projects/{project}/locations/{location}/phraseSets/{phrase_set}`
    Speech-to-Text supports three locations: `global`, `us` (US North
    America), and `eu` (Europe). If you are calling the
    `speech.googleapis.com` endpoint, use the `global` location. To specify
    a region, use a [regional endpoint](https://cloud.google.com/speech-to-
    text/docs/endpoints) with matching `us` or `eu` location value.
r   Tr,  r   Nr.  r   r   r   rL  rL    r;  r   rL  c                       \ rS rSrSr\R                  " S\R                  R                  S9r	\R                  " S5      r\R                  " SSS9rS	rg
),SpeechProjectsLocationsPhraseSetsListRequesti  a  A SpeechProjectsLocationsPhraseSetsListRequest object.

Fields:
  pageSize: The maximum number of phrase sets to return. The service may
    return fewer than this value. If unspecified, at most 50 phrase sets
    will be returned. The maximum value is 1000; values above 1000 will be
    coerced to 1000.
  pageToken: A page token, received from a previous `ListPhraseSet` call.
    Provide this to retrieve the subsequent page. When paginating, all other
    parameters provided to `ListPhraseSet` must match the call that provided
    the page token.
  parent: Required. The parent, which owns this collection of phrase set.
    Format: `projects/{project}/locations/{location}` Speech-to-Text
    supports three locations: `global`, `us` (US North America), and `eu`
    (Europe). If you are calling the `speech.googleapis.com` endpoint, use
    the `global` location. To specify a region, use a [regional
    endpoint](https://cloud.google.com/speech-to-text/docs/endpoints) with
    matching `us` or `eu` location value.
r   r   r"   rI   Tr,  r   NrA  r   r   r   rN  rN    rB  r   rN  c                       \ rS rSrSr\R                  " SSS9r\R                  " SS5      r	\R                  " S5      r
S	rg
)-SpeechProjectsLocationsPhraseSetsPatchRequesti  zA SpeechProjectsLocationsPhraseSetsPatchRequest object.

Fields:
  name: The resource name of the phrase set.
  phraseSet: A PhraseSet resource to be passed as the request body.
  updateMask: The list of fields to be updated.
r   Tr,  r)   r"   rI   r   N)r   r   r   r   r   r   r   r^   r#   r*   rE  r   r   r   r   rP  rP    s=     
		q4	0$$$[!4)$$Q'*r   rP  c                       \ rS rSrSr\R                  " S\R                  R                  S9r	\R                  " S5      r\R                  " SSSS	9rS
rg)SpeechRecognitionAlternativei  a  Alternative hypotheses (a.k.a. n-best list).

Fields:
  confidence: The confidence estimate between 0.0 and 1.0. A higher number
    indicates an estimated greater likelihood that the recognized words are
    correct. This field is set only for the top alternative of a non-
    streaming result or, of a streaming result where `is_final=true`. This
    field is not guaranteed to be accurate and users should not rely on it
    to be always provided. The default of 0.0 is a sentinel value indicating
    `confidence` was not set.
  transcript: Transcript text representing the words that the user spoke. In
    languages that use spaces to separate words, the transcript might have a
    leading space if it isn't the first result. You can concatenate each
    result to obtain the full transcript without using a separator.
  words: A list of word-specific information for each recognized word. Note:
    When `enable_speaker_diarization` is true, you will see all the words
    from the beginning of the audio.
r   r   r"   WordInforI   Tr   r   N)r   r   r   r   r   r   r   r   r   
confidencer   
transcriptr#   wordsr   r   r   r   rR  rR    sK    & ##Ay/@/@/F/FG*$$Q'*

 
 Q
>%r   rR  c                       \ rS rSrSr\R                  " SSSS9r\R                  " S\R                  R                  S9r\R                  " S	5      r\R                  " S
5      rSrg)r   i  a  A speech recognition result corresponding to a portion of the audio.

Fields:
  alternatives: May contain one or more recognition hypotheses (up to the
    maximum specified in `max_alternatives`). These alternatives are ordered
    in terms of accuracy, with the top (first) alternative being the most
    probable, as ranked by the recognizer.
  channelTag: For multi-channel audio, this is the channel number
    corresponding to the recognized result for the audio from that channel.
    For audio_channel_count = N, its output values can range from '1' to
    'N'.
  languageCode: Output only. The [BCP-47](https://www.rfc-
    editor.org/rfc/bcp/bcp47.txt) language tag of the language in this
    result. This language code was detected to have the most likelihood of
    being spoken in the audio.
  resultEndTime: Time offset of the end of this result relative to the
    beginning of the audio.
rR  r   Tr   r"   r   rI   rJ   r   N)r   r   r   r   r   r   r#   alternativesr   r   r   
channelTagr   r   resultEndTimer   r   r   r   r   r     s]    & ''(FTXY,%%a1B1B1H1HI*&&q),''*-r   r   c                      \ rS rSrSr " S S\R                  5      r " S S\R                  5      r\R                  " SS5      r
\R                  " S5      r\R                  " SS	S
S9r\R                  " S5      r\R                  " S5      r\R                  " S5      r\R                  " S5      r\R$                  " SSS9r\R                  " S5      r\R                  " S5      r\R                  " S5      r\R                  " S5      rSrg)StandardQueryParametersi6  a  Query parameters accepted by all methods.

Enums:
  FXgafvValueValuesEnum: V1 error format.
  AltValueValuesEnum: Data format for response.

Fields:
  f__xgafv: V1 error format.
  access_token: OAuth access token.
  alt: Data format for response.
  callback: JSONP
  fields: Selector specifying which fields to include in a partial response.
  key: API key. Your API key identifies your project and provides you with
    API access, quota, and reports. Required unless you provide an OAuth 2.0
    token.
  oauth_token: OAuth 2.0 token for the current user.
  prettyPrint: Returns response with indentations and line breaks.
  quotaUser: Available to use for quota purposes for server-side
    applications. Can be any arbitrary string assigned to a user, but should
    not exceed 40 characters.
  trace: A tracing token of the form "token:<tokenid>" to include in api
    requests.
  uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
  upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
c                   $    \ rS rSrSrSrSrSrSrg)*StandardQueryParameters.AltValueValuesEnumiQ  zData format for response.

Values:
  json: Responses with Content-Type of application/json
  media: Media download with context-dependent Content-Type
  proto: Responses with Content-Type of application/x-protobuf
r   r   r"   r   N)	r   r   r   r   r   jsonmediaprotor   r   r   r   AltValueValuesEnumr^  Q  s     DEEr   rb  c                        \ rS rSrSrSrSrSrg)-StandardQueryParameters.FXgafvValueValuesEnumi]  zFV1 error format.

Values:
  _1: v1 error format
  _2: v2 error format
r   r   r   N)r   r   r   r   r   _1_2r   r   r   r   FXgafvValueValuesEnumrd  ]  s     
B	
Br   rg  r   r"   rI   r_  )defaultrJ   rK   rL   rM   rN   TrO   rP   rQ   rR   r   N)r   r   r   r   r   r   rT   rb  rg  ra   f__xgafvr   access_tokenaltcallbackfieldsrB   oauth_tokenr_   prettyPrint	quotaUsertrace
uploadTypeupload_protocolr   r   r   r   r\  r\  6  s    4
9>> 
inn    !8!<(&&q),0!VD#""1%(  #&a #%%a(+&&q$7+##A&)



#%$$R(*))"-/r   r\  c                       \ rS rSrSr\R                  " S5       " S S\R                  5      5       r	\R                  " S\R                  R                  S9r\R                  " SSS	S
9r\R                   " S5      rSrg)r   iu  a  The `Status` type defines a logical error model that is suitable for
different programming environments, including REST APIs and RPC APIs. It is
used by [gRPC](https://github.com/grpc). Each `Status` message contains
three pieces of data: error code, error message, and error details. You can
find out more about this error model and how to work with it in the [API
Design Guide](https://cloud.google.com/apis/design/errors).

Messages:
  DetailsValueListEntry: A DetailsValueListEntry object.

Fields:
  code: The status code, which should be an enum value of google.rpc.Code.
  details: A list of messages that carry the error details. There is a
    common set of message types for APIs to use.
  message: A developer-facing error message, which should be in English. Any
    user-facing error message should be localized and sent in the
    google.rpc.Status.details field, or localized by the client.
r8   c                   f    \ rS rSrSr " S S\R                  5      r\R                  " SSSS9r	Sr
g	)
Status.DetailsValueListEntryi  zA DetailsValueListEntry object.

Messages:
  AdditionalProperty: An additional property for a DetailsValueListEntry
    object.

Fields:
  additionalProperties: Properties of the object. Contains field @type
    with type URL.
c                   b    \ rS rSrSr\R                  " S5      r\R                  " SS5      r	Sr
g)/Status.DetailsValueListEntry.AdditionalPropertyi  zAn additional property for a DetailsValueListEntry object.

Fields:
  key: Name of the additional property.
  value: A extra_types.JsonValue attribute.
r   r   r"   r   Nr   r   r   r   rC   rx    r   r   rC   r   Tr   r   NrE   r   r   r   DetailsValueListEntryrv    s4    		AY.. 	A %112FTXYr   ry  r   r   r"   Tr   rI   r   N)r   r   r   r   r   r   rU   r   rF   ry  r   r   r   coder#   detailsr   messager   r   r   r   r   r   u  s|    & !!"89Zi// Z :Z2 
		9+<+<+B+B	C$""#:AM'!!!$'r   r   c                   <    \ rS rSrSr\R                  " SSSS9rSrg)	r   i  aH  Transcription normalization configuration. Use transcription
normalization to automatically replace parts of the transcript with phrases
of your choosing. For StreamingRecognize, this normalization only applies to
stable partial transcripts (stability > 0.8) and final transcripts.

Fields:
  entries: A list of replacement entries. We will perform replacement with
    one entry at a time. For example, the second entry in ["cat" => "dog",
    "mountain cat" => "mountain dog"] will never be applied because we will
    always process the first entry before it. At most 100 entries.
rh   r   Tr   r   N)	r   r   r   r   r   r   r#   entriesr   r   r   r   r   r     s    
 ""7A='r   r   c                   <    \ rS rSrSr\R                  " S5      rSrg)r   i  zSpecifies an optional destination for the recognition results.

Fields:
  gcsUri: Specifies a Cloud Storage URI for the recognition results. Must be
    specified in the format: `gs://bucket_name/object_name`, and the bucket
    must already exist.
r   r   N)	r   r   r   r   r   r   r   gcsUrir   r   r   r   r   r     s       #&r   r   c                   <   \ rS rSrSr\R                  " S\R                  R                  S9r	\R                  " S5      r\R                  " S5      r\R                  " S\R                  R                  S9r\R                  " S5      r\R                  " S	5      rS
rg)rS  i  a  Word-specific information for recognized words.

Fields:
  confidence: The confidence estimate between 0.0 and 1.0. A higher number
    indicates an estimated greater likelihood that the recognized words are
    correct. This field is set only for the top alternative of a non-
    streaming result or, of a streaming result where `is_final=true`. This
    field is not guaranteed to be accurate and users should not rely on it
    to be always provided. The default of 0.0 is a sentinel value indicating
    `confidence` was not set.
  endTime: Time offset relative to the beginning of the audio, and
    corresponding to the end of the spoken word. This field is only set if
    `enable_word_time_offsets=true` and only in the top hypothesis. This is
    an experimental feature and the accuracy of the time offset can vary.
  speakerLabel: Output only. A label value assigned for every unique speaker
    within the audio. This field specifies which speaker was detected to
    have spoken this word. For some models, like medical_conversation this
    can be actual speaker role, for example "patient" or "provider", but
    generally this would be a number identifying a speaker. This field is
    only set if enable_speaker_diarization = 'true' and only for the top
    alternative.
  speakerTag: Output only. A distinct integer value is assigned for every
    speaker within the audio. This field specifies which one of those
    speakers was detected to have spoken this word. Value ranges from '1' to
    diarization_speaker_count. speaker_tag is set if
    enable_speaker_diarization = 'true' and only for the top alternative.
    Note: Use speaker_label instead.
  startTime: Time offset relative to the beginning of the audio, and
    corresponding to the start of the spoken word. This field is only set if
    `enable_word_time_offsets=true` and only in the top hypothesis. This is
    an experimental feature and the accuracy of the time offset can vary.
  word: The word corresponding to this set of information.
r   r   r"   rI   rJ   rK   rL   r   N)r   r   r   r   r   r   r   r   r   rT  r   endTimespeakerLabelr   r   r"  r   wordr   r   r   r   rS  rS    s     D ##Ay/@/@/F/FG*!!!$'&&q),%%a1B1B1H1HI*##A&)			q	!$r   rS  ri  z$.xgafvre  1rf  2N):r   
__future__r   apitools.base.protorpcliter   r   apitools.base.pyr   r   packagerF   r	   r   r   r'   r!   re   rh   rn   rs   ry   r}   r   r   ru   r   r)   r   r   r   r  r  r   r   r   r   r+  r0  r6  r:  r=  r@  rD  rG  rJ  rL  rN  rP  rR  r   r\  r   r   r   rS  AddCustomJsonFieldMappingAddCustomJsonEnumMappingrg  r   r   r   <module>r     s   ' < % ( 8)## 8#	!! #+y00 + )Y.. )g")## g"TI $I $
+	 1 1 
+8Y.. 8$
EI-- 
E!9#4#4 !0E)"3"3 E"-9#4#4 -:i8	!! i8X&#Y &#Rm"	!! m"`!y(( !.Y+	)) Y+xU)++ UD
:y(( 
:0	)) 02Jy00 J0Ey(( E.,9,, ,4I%% 4>1!2!2 13)"3"3 303	8I8I 3&1	8I8I 1 1Y5F5F 13i6G6G 34(y7H7H (3Y5F5F 3&1Y5F5F 11)2C2C 1 393D3D 34(I4E4E (?9#4#4 ?2+i// +4<.i// <.~0%Y 0%f>i// > 	$Y.. 	$("y   ("V 	 " "Z4  ! !114>  ! !114>r   