xref: /aosp_15_r20/external/googleapis/google/cloud/speech/v1/cloud_speech.proto (revision d5c09012810ac0c9f33fe448fb6da8260d444cc9)
1// Copyright 2023 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15syntax = "proto3";
16
17package google.cloud.speech.v1;
18
19import "google/api/annotations.proto";
20import "google/api/client.proto";
21import "google/api/field_behavior.proto";
22import "google/cloud/speech/v1/resource.proto";
23import "google/longrunning/operations.proto";
24import "google/protobuf/duration.proto";
25import "google/protobuf/timestamp.proto";
26import "google/protobuf/wrappers.proto";
27import "google/rpc/status.proto";
28
29option cc_enable_arenas = true;
30option go_package = "cloud.google.com/go/speech/apiv1/speechpb;speechpb";
31option java_multiple_files = true;
32option java_outer_classname = "SpeechProto";
33option java_package = "com.google.cloud.speech.v1";
34option objc_class_prefix = "GCS";
35
36// Service that implements Google Cloud Speech API.
37service Speech {
38  option (google.api.default_host) = "speech.googleapis.com";
39  option (google.api.oauth_scopes) =
40      "https://www.googleapis.com/auth/cloud-platform";
41
42  // Performs synchronous speech recognition: receive results after all audio
43  // has been sent and processed.
44  rpc Recognize(RecognizeRequest) returns (RecognizeResponse) {
45    option (google.api.http) = {
46      post: "/v1/speech:recognize"
47      body: "*"
48    };
49    option (google.api.method_signature) = "config,audio";
50  }
51
52  // Performs asynchronous speech recognition: receive results via the
53  // google.longrunning.Operations interface. Returns either an
54  // `Operation.error` or an `Operation.response` which contains
55  // a `LongRunningRecognizeResponse` message.
56  // For more information on asynchronous speech recognition, see the
57  // [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize).
58  rpc LongRunningRecognize(LongRunningRecognizeRequest)
59      returns (google.longrunning.Operation) {
60    option (google.api.http) = {
61      post: "/v1/speech:longrunningrecognize"
62      body: "*"
63    };
64    option (google.api.method_signature) = "config,audio";
65    option (google.longrunning.operation_info) = {
66      response_type: "LongRunningRecognizeResponse"
67      metadata_type: "LongRunningRecognizeMetadata"
68    };
69  }
70
71  // Performs bidirectional streaming speech recognition: receive results while
72  // sending audio. This method is only available via the gRPC API (not REST).
73  rpc StreamingRecognize(stream StreamingRecognizeRequest)
74      returns (stream StreamingRecognizeResponse) {}
75}
76
77// The top-level message sent by the client for the `Recognize` method.
78message RecognizeRequest {
79  // Required. Provides information to the recognizer that specifies how to
80  // process the request.
81  RecognitionConfig config = 1 [(google.api.field_behavior) = REQUIRED];
82
83  // Required. The audio data to be recognized.
84  RecognitionAudio audio = 2 [(google.api.field_behavior) = REQUIRED];
85}
86
87// The top-level message sent by the client for the `LongRunningRecognize`
88// method.
89message LongRunningRecognizeRequest {
90  // Required. Provides information to the recognizer that specifies how to
91  // process the request.
92  RecognitionConfig config = 1 [(google.api.field_behavior) = REQUIRED];
93
94  // Required. The audio data to be recognized.
95  RecognitionAudio audio = 2 [(google.api.field_behavior) = REQUIRED];
96
97  // Optional. Specifies an optional destination for the recognition results.
98  TranscriptOutputConfig output_config = 4
99      [(google.api.field_behavior) = OPTIONAL];
100}
101
102// Specifies an optional destination for the recognition results.
103message TranscriptOutputConfig {
104  oneof output_type {
105    // Specifies a Cloud Storage URI for the recognition results. Must be
106    // specified in the format: `gs://bucket_name/object_name`, and the bucket
107    // must already exist.
108    string gcs_uri = 1;
109  }
110}
111
112// The top-level message sent by the client for the `StreamingRecognize` method.
113// Multiple `StreamingRecognizeRequest` messages are sent. The first message
114// must contain a `streaming_config` message and must not contain
115// `audio_content`. All subsequent messages must contain `audio_content` and
116// must not contain a `streaming_config` message.
117message StreamingRecognizeRequest {
118  // The streaming request, which is either a streaming config or audio content.
119  oneof streaming_request {
120    // Provides information to the recognizer that specifies how to process the
121    // request. The first `StreamingRecognizeRequest` message must contain a
122    // `streaming_config`  message.
123    StreamingRecognitionConfig streaming_config = 1;
124
125    // The audio data to be recognized. Sequential chunks of audio data are sent
126    // in sequential `StreamingRecognizeRequest` messages. The first
127    // `StreamingRecognizeRequest` message must not contain `audio_content` data
128    // and all subsequent `StreamingRecognizeRequest` messages must contain
129    // `audio_content` data. The audio bytes must be encoded as specified in
130    // `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
131    // pure binary representation (not base64). See
132    // [content limits](https://cloud.google.com/speech-to-text/quotas#content).
133    bytes audio_content = 2;
134  }
135}
136
137// Provides information to the recognizer that specifies how to process the
138// request.
139message StreamingRecognitionConfig {
140  // Events that a timeout can be set on for voice activity.
141  message VoiceActivityTimeout {
142    // Duration to timeout the stream if no speech begins.
143    google.protobuf.Duration speech_start_timeout = 1;
144
145    // Duration to timeout the stream after speech ends.
146    google.protobuf.Duration speech_end_timeout = 2;
147  }
148
149  // Required. Provides information to the recognizer that specifies how to
150  // process the request.
151  RecognitionConfig config = 1 [(google.api.field_behavior) = REQUIRED];
152
153  // If `false` or omitted, the recognizer will perform continuous
154  // recognition (continuing to wait for and process audio even if the user
155  // pauses speaking) until the client closes the input stream (gRPC API) or
156  // until the maximum time limit has been reached. May return multiple
157  // `StreamingRecognitionResult`s with the `is_final` flag set to `true`.
158  //
159  // If `true`, the recognizer will detect a single spoken utterance. When it
160  // detects that the user has paused or stopped speaking, it will return an
161  // `END_OF_SINGLE_UTTERANCE` event and cease recognition. It will return no
162  // more than one `StreamingRecognitionResult` with the `is_final` flag set to
163  // `true`.
164  //
165  // The `single_utterance` field can only be used with specified models,
166  // otherwise an error is thrown. The `model` field in [`RecognitionConfig`][]
167  // must be set to:
168  //
169  // * `command_and_search`
170  // * `phone_call` AND additional field `useEnhanced`=`true`
171  // * The `model` field is left undefined. In this case the API auto-selects
172  //   a model based on any other parameters that you set in
173  //   `RecognitionConfig`.
174  bool single_utterance = 2;
175
176  // If `true`, interim results (tentative hypotheses) may be
177  // returned as they become available (these interim results are indicated with
178  // the `is_final=false` flag).
179  // If `false` or omitted, only `is_final=true` result(s) are returned.
180  bool interim_results = 3;
181
182  // If `true`, responses with voice activity speech events will be returned as
183  // they are detected.
184  bool enable_voice_activity_events = 5;
185
186  // If set, the server will automatically close the stream after the specified
187  // duration has elapsed after the last VOICE_ACTIVITY speech event has been
188  // sent. The field `voice_activity_events` must also be set to true.
189  VoiceActivityTimeout voice_activity_timeout = 6;
190}
191
192// Provides information to the recognizer that specifies how to process the
193// request.
194message RecognitionConfig {
195  // The encoding of the audio data sent in the request.
196  //
197  // All encodings support only 1 channel (mono) audio, unless the
198  // `audio_channel_count` and `enable_separate_recognition_per_channel` fields
199  // are set.
200  //
201  // For best results, the audio source should be captured and transmitted using
202  // a lossless encoding (`FLAC` or `LINEAR16`). The accuracy of the speech
203  // recognition can be reduced if lossy codecs are used to capture or transmit
204  // audio, particularly if background noise is present. Lossy codecs include
205  // `MULAW`, `AMR`, `AMR_WB`, `OGG_OPUS`, `SPEEX_WITH_HEADER_BYTE`, `MP3`,
206  // and `WEBM_OPUS`.
207  //
208  // The `FLAC` and `WAV` audio file formats include a header that describes the
209  // included audio content. You can request recognition for `WAV` files that
210  // contain either `LINEAR16` or `MULAW` encoded audio.
211  // If you send `FLAC` or `WAV` audio file format in
212  // your request, you do not need to specify an `AudioEncoding`; the audio
213  // encoding format is determined from the file header. If you specify
214  // an `AudioEncoding` when you send  send `FLAC` or `WAV` audio, the
215  // encoding configuration must match the encoding described in the audio
216  // header; otherwise the request returns an
217  // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT] error
218  // code.
219  enum AudioEncoding {
220    // Not specified.
221    ENCODING_UNSPECIFIED = 0;
222
223    // Uncompressed 16-bit signed little-endian samples (Linear PCM).
224    LINEAR16 = 1;
225
226    // `FLAC` (Free Lossless Audio
227    // Codec) is the recommended encoding because it is
228    // lossless--therefore recognition is not compromised--and
229    // requires only about half the bandwidth of `LINEAR16`. `FLAC` stream
230    // encoding supports 16-bit and 24-bit samples, however, not all fields in
231    // `STREAMINFO` are supported.
232    FLAC = 2;
233
234    // 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
235    MULAW = 3;
236
237    // Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000.
238    AMR = 4;
239
240    // Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000.
241    AMR_WB = 5;
242
243    // Opus encoded audio frames in Ogg container
244    // ([OggOpus](https://wiki.xiph.org/OggOpus)).
245    // `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or 48000.
246    OGG_OPUS = 6;
247
248    // Although the use of lossy encodings is not recommended, if a very low
249    // bitrate encoding is required, `OGG_OPUS` is highly preferred over
250    // Speex encoding. The [Speex](https://speex.org/)  encoding supported by
251    // Cloud Speech API has a header byte in each block, as in MIME type
252    // `audio/x-speex-with-header-byte`.
253    // It is a variant of the RTP Speex encoding defined in
254    // [RFC 5574](https://tools.ietf.org/html/rfc5574).
255    // The stream is a sequence of blocks, one block per RTP packet. Each block
256    // starts with a byte containing the length of the block, in bytes, followed
257    // by one or more frames of Speex data, padded to an integral number of
258    // bytes (octets) as specified in RFC 5574. In other words, each RTP header
259    // is replaced with a single byte containing the block length. Only Speex
260    // wideband is supported. `sample_rate_hertz` must be 16000.
261    SPEEX_WITH_HEADER_BYTE = 7;
262
263    // MP3 audio. MP3 encoding is a Beta feature and only available in
264    // v1p1beta1. Support all standard MP3 bitrates (which range from 32-320
265    // kbps). When using this encoding, `sample_rate_hertz` has to match the
266    // sample rate of the file being used.
267    MP3 = 8;
268
269    // Opus encoded audio frames in WebM container
270    // ([OggOpus](https://wiki.xiph.org/OggOpus)). `sample_rate_hertz` must be
271    // one of 8000, 12000, 16000, 24000, or 48000.
272    WEBM_OPUS = 9;
273  }
274
275  // Encoding of audio data sent in all `RecognitionAudio` messages.
276  // This field is optional for `FLAC` and `WAV` audio files and required
277  // for all other audio formats. For details, see
278  // [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
279  AudioEncoding encoding = 1;
280
281  // Sample rate in Hertz of the audio data sent in all
282  // `RecognitionAudio` messages. Valid values are: 8000-48000.
283  // 16000 is optimal. For best results, set the sampling rate of the audio
284  // source to 16000 Hz. If that's not possible, use the native sample rate of
285  // the audio source (instead of re-sampling).
286  // This field is optional for FLAC and WAV audio files, but is
287  // required for all other audio formats. For details, see
288  // [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
289  int32 sample_rate_hertz = 2;
290
291  // The number of channels in the input audio data.
292  // ONLY set this for MULTI-CHANNEL recognition.
293  // Valid values for LINEAR16, OGG_OPUS and FLAC are `1`-`8`.
294  // Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
295  // If `0` or omitted, defaults to one channel (mono).
296  // Note: We only recognize the first channel by default.
297  // To perform independent recognition on each channel set
298  // `enable_separate_recognition_per_channel` to 'true'.
299  int32 audio_channel_count = 7;
300
301  // This needs to be set to `true` explicitly and `audio_channel_count` > 1
302  // to get each channel recognized separately. The recognition result will
303  // contain a `channel_tag` field to state which channel that result belongs
304  // to. If this is not true, we will only recognize the first channel. The
305  // request is billed cumulatively for all channels recognized:
306  // `audio_channel_count` multiplied by the length of the audio.
307  bool enable_separate_recognition_per_channel = 12;
308
309  // Required. The language of the supplied audio as a
310  // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
311  // Example: "en-US".
312  // See [Language
313  // Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
314  // of the currently supported language codes.
315  string language_code = 3 [(google.api.field_behavior) = REQUIRED];
316
317  // A list of up to 3 additional
318  // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
319  // listing possible alternative languages of the supplied audio.
320  // See [Language
321  // Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
322  // of the currently supported language codes. If alternative languages are
323  // listed, recognition result will contain recognition in the most likely
324  // language detected including the main language_code. The recognition result
325  // will include the language tag of the language detected in the audio. Note:
326  // This feature is only supported for Voice Command and Voice Search use cases
327  // and performance may vary for other use cases (e.g., phone call
328  // transcription).
329  repeated string alternative_language_codes = 18;
330
331  // Maximum number of recognition hypotheses to be returned.
332  // Specifically, the maximum number of `SpeechRecognitionAlternative` messages
333  // within each `SpeechRecognitionResult`.
334  // The server may return fewer than `max_alternatives`.
335  // Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
336  // one. If omitted, will return a maximum of one.
337  int32 max_alternatives = 4;
338
339  // If set to `true`, the server will attempt to filter out
340  // profanities, replacing all but the initial character in each filtered word
341  // with asterisks, e.g. "f***". If set to `false` or omitted, profanities
342  // won't be filtered out.
343  bool profanity_filter = 5;
344
345  // Speech adaptation configuration improves the accuracy of speech
346  // recognition. For more information, see the [speech
347  // adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
348  // documentation.
349  // When speech adaptation is set it supersedes the `speech_contexts` field.
350  SpeechAdaptation adaptation = 20;
351
352  // Optional. Use transcription normalization to automatically replace parts of
353  // the transcript with phrases of your choosing. For StreamingRecognize, this
354  // normalization only applies to stable partial transcripts (stability > 0.8)
355  // and final transcripts.
356  TranscriptNormalization transcript_normalization = 24
357      [(google.api.field_behavior) = OPTIONAL];
358
359  // Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
360  // A means to provide context to assist the speech recognition. For more
361  // information, see
362  // [speech
363  // adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
364  repeated SpeechContext speech_contexts = 6;
365
366  // If `true`, the top result includes a list of words and
367  // the start and end time offsets (timestamps) for those words. If
368  // `false`, no word-level time offset information is returned. The default is
369  // `false`.
370  bool enable_word_time_offsets = 8;
371
372  // If `true`, the top result includes a list of words and the
373  // confidence for those words. If `false`, no word-level confidence
374  // information is returned. The default is `false`.
375  bool enable_word_confidence = 15;
376
377  // If 'true', adds punctuation to recognition result hypotheses.
378  // This feature is only available in select languages. Setting this for
379  // requests in other languages has no effect at all.
380  // The default 'false' value does not add punctuation to result hypotheses.
381  bool enable_automatic_punctuation = 11;
382
383  // The spoken punctuation behavior for the call
384  // If not set, uses default behavior based on model of choice
385  // e.g. command_and_search will enable spoken punctuation by default
386  // If 'true', replaces spoken punctuation with the corresponding symbols in
387  // the request. For example, "how are you question mark" becomes "how are
388  // you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
389  // for support. If 'false', spoken punctuation is not replaced.
390  google.protobuf.BoolValue enable_spoken_punctuation = 22;
391
392  // The spoken emoji behavior for the call
393  // If not set, uses default behavior based on model of choice
394  // If 'true', adds spoken emoji formatting for the request. This will replace
395  // spoken emojis with the corresponding Unicode symbols in the final
396  // transcript. If 'false', spoken emojis are not replaced.
397  google.protobuf.BoolValue enable_spoken_emojis = 23;
398
399  // Config to enable speaker diarization and set additional
400  // parameters to make diarization better suited for your application.
401  // Note: When this is enabled, we send all the words from the beginning of the
402  // audio for the top alternative in every consecutive STREAMING responses.
403  // This is done in order to improve our speaker tags as our models learn to
404  // identify the speakers in the conversation over time.
405  // For non-streaming requests, the diarization results will be provided only
406  // in the top alternative of the FINAL SpeechRecognitionResult.
407  SpeakerDiarizationConfig diarization_config = 19;
408
409  // Metadata regarding this request.
410  RecognitionMetadata metadata = 9;
411
412  // Which model to select for the given request. Select the model
413  // best suited to your domain to get best results. If a model is not
414  // explicitly specified, then we auto-select a model based on the parameters
415  // in the RecognitionConfig.
416  // <table>
417  //   <tr>
418  //     <td><b>Model</b></td>
419  //     <td><b>Description</b></td>
420  //   </tr>
421  //   <tr>
422  //     <td><code>latest_long</code></td>
423  //     <td>Best for long form content like media or conversation.</td>
424  //   </tr>
425  //   <tr>
426  //     <td><code>latest_short</code></td>
427  //     <td>Best for short form content like commands or single shot directed
428  //     speech.</td>
429  //   </tr>
430  //   <tr>
431  //     <td><code>command_and_search</code></td>
432  //     <td>Best for short queries such as voice commands or voice search.</td>
433  //   </tr>
434  //   <tr>
435  //     <td><code>phone_call</code></td>
436  //     <td>Best for audio that originated from a phone call (typically
437  //     recorded at an 8khz sampling rate).</td>
438  //   </tr>
439  //   <tr>
440  //     <td><code>video</code></td>
441  //     <td>Best for audio that originated from video or includes multiple
442  //         speakers. Ideally the audio is recorded at a 16khz or greater
443  //         sampling rate. This is a premium model that costs more than the
444  //         standard rate.</td>
445  //   </tr>
446  //   <tr>
447  //     <td><code>default</code></td>
448  //     <td>Best for audio that is not one of the specific audio models.
449  //         For example, long-form audio. Ideally the audio is high-fidelity,
450  //         recorded at a 16khz or greater sampling rate.</td>
451  //   </tr>
452  //   <tr>
453  //     <td><code>medical_conversation</code></td>
454  //     <td>Best for audio that originated from a conversation between a
455  //         medical provider and patient.</td>
456  //   </tr>
457  //   <tr>
458  //     <td><code>medical_dictation</code></td>
459  //     <td>Best for audio that originated from dictation notes by a medical
460  //         provider.</td>
461  //   </tr>
462  // </table>
463  string model = 13;
464
465  // Set to true to use an enhanced model for speech recognition.
466  // If `use_enhanced` is set to true and the `model` field is not set, then
467  // an appropriate enhanced model is chosen if an enhanced model exists for
468  // the audio.
469  //
470  // If `use_enhanced` is true and an enhanced version of the specified model
471  // does not exist, then the speech is recognized using the standard version
472  // of the specified model.
473  bool use_enhanced = 14;
474}
475
476// Config to enable speaker diarization.
477message SpeakerDiarizationConfig {
478  // If 'true', enables speaker detection for each recognized word in
479  // the top alternative of the recognition result using a speaker_label
480  // provided in the WordInfo.
481  bool enable_speaker_diarization = 1;
482
483  // Minimum number of speakers in the conversation. This range gives you more
484  // flexibility by allowing the system to automatically determine the correct
485  // number of speakers. If not set, the default value is 2.
486  int32 min_speaker_count = 2;
487
488  // Maximum number of speakers in the conversation. This range gives you more
489  // flexibility by allowing the system to automatically determine the correct
490  // number of speakers. If not set, the default value is 6.
491  int32 max_speaker_count = 3;
492
493  // Output only. Unused.
494  int32 speaker_tag = 5
495      [deprecated = true, (google.api.field_behavior) = OUTPUT_ONLY];
496}
497
498// Description of audio data to be recognized.
499message RecognitionMetadata {
500  option deprecated = true;
501
502  // Use case categories that the audio recognition request can be described
503  // by.
504  enum InteractionType {
505    // Use case is either unknown or is something other than one of the other
506    // values below.
507    INTERACTION_TYPE_UNSPECIFIED = 0;
508
509    // Multiple people in a conversation or discussion. For example in a
510    // meeting with two or more people actively participating. Typically
511    // all the primary people speaking would be in the same room (if not,
512    // see PHONE_CALL)
513    DISCUSSION = 1;
514
515    // One or more persons lecturing or presenting to others, mostly
516    // uninterrupted.
517    PRESENTATION = 2;
518
519    // A phone-call or video-conference in which two or more people, who are
520    // not in the same room, are actively participating.
521    PHONE_CALL = 3;
522
523    // A recorded message intended for another person to listen to.
524    VOICEMAIL = 4;
525
526    // Professionally produced audio (eg. TV Show, Podcast).
527    PROFESSIONALLY_PRODUCED = 5;
528
529    // Transcribe spoken questions and queries into text.
530    VOICE_SEARCH = 6;
531
532    // Transcribe voice commands, such as for controlling a device.
533    VOICE_COMMAND = 7;
534
535    // Transcribe speech to text to create a written document, such as a
536    // text-message, email or report.
537    DICTATION = 8;
538  }
539
540  // Enumerates the types of capture settings describing an audio file.
541  enum MicrophoneDistance {
542    // Audio type is not known.
543    MICROPHONE_DISTANCE_UNSPECIFIED = 0;
544
545    // The audio was captured from a closely placed microphone. Eg. phone,
546    // dictaphone, or handheld microphone. Generally if there speaker is within
547    // 1 meter of the microphone.
548    NEARFIELD = 1;
549
550    // The speaker if within 3 meters of the microphone.
551    MIDFIELD = 2;
552
553    // The speaker is more than 3 meters away from the microphone.
554    FARFIELD = 3;
555  }
556
557  // The original media the speech was recorded on.
558  enum OriginalMediaType {
559    // Unknown original media type.
560    ORIGINAL_MEDIA_TYPE_UNSPECIFIED = 0;
561
562    // The speech data is an audio recording.
563    AUDIO = 1;
564
565    // The speech data originally recorded on a video.
566    VIDEO = 2;
567  }
568
569  // The type of device the speech was recorded with.
570  enum RecordingDeviceType {
571    // The recording device is unknown.
572    RECORDING_DEVICE_TYPE_UNSPECIFIED = 0;
573
574    // Speech was recorded on a smartphone.
575    SMARTPHONE = 1;
576
577    // Speech was recorded using a personal computer or tablet.
578    PC = 2;
579
580    // Speech was recorded over a phone line.
581    PHONE_LINE = 3;
582
583    // Speech was recorded in a vehicle.
584    VEHICLE = 4;
585
586    // Speech was recorded outdoors.
587    OTHER_OUTDOOR_DEVICE = 5;
588
589    // Speech was recorded indoors.
590    OTHER_INDOOR_DEVICE = 6;
591  }
592
593  // The use case most closely describing the audio content to be recognized.
594  InteractionType interaction_type = 1;
595
596  // The industry vertical to which this speech recognition request most
597  // closely applies. This is most indicative of the topics contained
598  // in the audio.  Use the 6-digit NAICS code to identify the industry
599  // vertical - see https://www.naics.com/search/.
600  uint32 industry_naics_code_of_audio = 3;
601
602  // The audio type that most closely describes the audio being recognized.
603  MicrophoneDistance microphone_distance = 4;
604
605  // The original media the speech was recorded on.
606  OriginalMediaType original_media_type = 5;
607
608  // The type of device the speech was recorded with.
609  RecordingDeviceType recording_device_type = 6;
610
611  // The device used to make the recording.  Examples 'Nexus 5X' or
612  // 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or
613  // 'Cardioid Microphone'.
614  string recording_device_name = 7;
615
616  // Mime type of the original audio file.  For example `audio/m4a`,
617  // `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`.
618  // A list of possible audio mime types is maintained at
619  // http://www.iana.org/assignments/media-types/media-types.xhtml#audio
620  string original_mime_type = 8;
621
622  // Description of the content. Eg. "Recordings of federal supreme court
623  // hearings from 2012".
624  string audio_topic = 10;
625}
626
627// Provides "hints" to the speech recognizer to favor specific words and phrases
628// in the results.
629message SpeechContext {
630  // A list of strings containing words and phrases "hints" so that
631  // the speech recognition is more likely to recognize them. This can be used
632  // to improve the accuracy for specific words and phrases, for example, if
633  // specific commands are typically spoken by the user. This can also be used
634  // to add additional words to the vocabulary of the recognizer. See
635  // [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
636  //
637  // List items can also be set to classes for groups of words that represent
638  // common concepts that occur in natural language. For example, rather than
639  // providing phrase hints for every month of the year, using the $MONTH class
640  // improves the likelihood of correctly transcribing audio that includes
641  // months.
642  repeated string phrases = 1;
643
644  // Hint Boost. Positive value will increase the probability that a specific
645  // phrase will be recognized over other similar sounding phrases. The higher
646  // the boost, the higher the chance of false positive recognition as well.
647  // Negative boost values would correspond to anti-biasing. Anti-biasing is not
648  // enabled, so negative boost will simply be ignored. Though `boost` can
649  // accept a wide range of positive values, most use cases are best served with
650  // values between 0 and 20. We recommend using a binary search approach to
651  // finding the optimal value for your use case.
652  float boost = 4;
653}
654
655// Contains audio data in the encoding specified in the `RecognitionConfig`.
656// Either `content` or `uri` must be supplied. Supplying both or neither
657// returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT].
658// See [content limits](https://cloud.google.com/speech-to-text/quotas#content).
659message RecognitionAudio {
660  // The audio source, which is either inline content or a Google Cloud
661  // Storage uri.
662  oneof audio_source {
663    // The audio data bytes encoded as specified in
664    // `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
665    // pure binary representation, whereas JSON representations use base64.
666    bytes content = 1;
667
668    // URI that points to a file that contains audio data bytes as specified in
669    // `RecognitionConfig`. The file must not be compressed (for example, gzip).
670    // Currently, only Google Cloud Storage URIs are
671    // supported, which must be specified in the following format:
672    // `gs://bucket_name/object_name` (other URI formats return
673    // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]).
674    // For more information, see [Request
675    // URIs](https://cloud.google.com/storage/docs/reference-uris).
676    string uri = 2;
677  }
678}
679
680// The only message returned to the client by the `Recognize` method. It
681// contains the result as zero or more sequential `SpeechRecognitionResult`
682// messages.
683message RecognizeResponse {
684  // Sequential list of transcription results corresponding to
685  // sequential portions of audio.
686  repeated SpeechRecognitionResult results = 2;
687
688  // When available, billed audio seconds for the corresponding request.
689  google.protobuf.Duration total_billed_time = 3;
690
691  // Provides information on adaptation behavior in response
692  SpeechAdaptationInfo speech_adaptation_info = 7;
693
694  // The ID associated with the request. This is a unique ID specific only to
695  // the given request.
696  int64 request_id = 8;
697}
698
699// The only message returned to the client by the `LongRunningRecognize` method.
700// It contains the result as zero or more sequential `SpeechRecognitionResult`
701// messages. It is included in the `result.response` field of the `Operation`
702// returned by the `GetOperation` call of the `google::longrunning::Operations`
703// service.
704message LongRunningRecognizeResponse {
705  // Sequential list of transcription results corresponding to
706  // sequential portions of audio.
707  repeated SpeechRecognitionResult results = 2;
708
709  // When available, billed audio seconds for the corresponding request.
710  google.protobuf.Duration total_billed_time = 3;
711
712  // Original output config if present in the request.
713  TranscriptOutputConfig output_config = 6;
714
715  // If the transcript output fails this field contains the relevant error.
716  google.rpc.Status output_error = 7;
717
718  // Provides information on speech adaptation behavior in response
719  SpeechAdaptationInfo speech_adaptation_info = 8;
720
721  // The ID associated with the request. This is a unique ID specific only to
722  // the given request.
723  int64 request_id = 9;
724}
725
726// Describes the progress of a long-running `LongRunningRecognize` call. It is
727// included in the `metadata` field of the `Operation` returned by the
728// `GetOperation` call of the `google::longrunning::Operations` service.
729message LongRunningRecognizeMetadata {
730  // Approximate percentage of audio processed thus far. Guaranteed to be 100
731  // when the audio is fully processed and the results are available.
732  int32 progress_percent = 1;
733
734  // Time when the request was received.
735  google.protobuf.Timestamp start_time = 2;
736
737  // Time of the most recent processing update.
738  google.protobuf.Timestamp last_update_time = 3;
739
740  // Output only. The URI of the audio file being transcribed. Empty if the
741  // audio was sent as byte content.
742  string uri = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
743}
744
745// `StreamingRecognizeResponse` is the only message returned to the client by
746// `StreamingRecognize`. A series of zero or more `StreamingRecognizeResponse`
747// messages are streamed back to the client. If there is no recognizable
748// audio, and `single_utterance` is set to false, then no messages are streamed
749// back to the client.
750//
751// Here's an example of a series of `StreamingRecognizeResponse`s that might be
752// returned while processing audio:
753//
754// 1. results { alternatives { transcript: "tube" } stability: 0.01 }
755//
756// 2. results { alternatives { transcript: "to be a" } stability: 0.01 }
757//
758// 3. results { alternatives { transcript: "to be" } stability: 0.9 }
759//    results { alternatives { transcript: " or not to be" } stability: 0.01 }
760//
761// 4. results { alternatives { transcript: "to be or not to be"
762//                             confidence: 0.92 }
763//              alternatives { transcript: "to bee or not to bee" }
764//              is_final: true }
765//
766// 5. results { alternatives { transcript: " that's" } stability: 0.01 }
767//
768// 6. results { alternatives { transcript: " that is" } stability: 0.9 }
769//    results { alternatives { transcript: " the question" } stability: 0.01 }
770//
771// 7. results { alternatives { transcript: " that is the question"
772//                             confidence: 0.98 }
773//              alternatives { transcript: " that was the question" }
774//              is_final: true }
775//
776// Notes:
777//
778// - Only two of the above responses #4 and #7 contain final results; they are
779//   indicated by `is_final: true`. Concatenating these together generates the
780//   full transcript: "to be or not to be that is the question".
781//
782// - The others contain interim `results`. #3 and #6 contain two interim
783//   `results`: the first portion has a high stability and is less likely to
784//   change; the second portion has a low stability and is very likely to
785//   change. A UI designer might choose to show only high stability `results`.
786//
787// - The specific `stability` and `confidence` values shown above are only for
788//   illustrative purposes. Actual values may vary.
789//
790// - In each response, only one of these fields will be set:
791//     `error`,
792//     `speech_event_type`, or
793//     one or more (repeated) `results`.
794message StreamingRecognizeResponse {
795  // Indicates the type of speech event.
796  enum SpeechEventType {
797    // No speech event specified.
798    SPEECH_EVENT_UNSPECIFIED = 0;
799
800    // This event indicates that the server has detected the end of the user's
801    // speech utterance and expects no additional speech. Therefore, the server
802    // will not process additional audio (although it may subsequently return
803    // additional results). The client should stop sending additional audio
804    // data, half-close the gRPC connection, and wait for any additional results
805    // until the server closes the gRPC connection. This event is only sent if
806    // `single_utterance` was set to `true`, and is not used otherwise.
807    END_OF_SINGLE_UTTERANCE = 1;
808
809    // This event indicates that the server has detected the beginning of human
810    // voice activity in the stream. This event can be returned multiple times
811    // if speech starts and stops repeatedly throughout the stream. This event
812    // is only sent if `voice_activity_events` is set to true.
813    SPEECH_ACTIVITY_BEGIN = 2;
814
815    // This event indicates that the server has detected the end of human voice
816    // activity in the stream. This event can be returned multiple times if
817    // speech starts and stops repeatedly throughout the stream. This event is
818    // only sent if `voice_activity_events` is set to true.
819    SPEECH_ACTIVITY_END = 3;
820
821    // This event indicates that the user-set timeout for speech activity begin
822    // or end has exceeded. Upon receiving this event, the client is expected to
823    // send a half close. Further audio will not be processed.
824    SPEECH_ACTIVITY_TIMEOUT = 4;
825  }
826
827  // If set, returns a [google.rpc.Status][google.rpc.Status] message that
828  // specifies the error for the operation.
829  google.rpc.Status error = 1;
830
831  // This repeated list contains zero or more results that
832  // correspond to consecutive portions of the audio currently being processed.
833  // It contains zero or one `is_final=true` result (the newly settled portion),
834  // followed by zero or more `is_final=false` results (the interim results).
835  repeated StreamingRecognitionResult results = 2;
836
837  // Indicates the type of speech event.
838  SpeechEventType speech_event_type = 4;
839
840  // Time offset between the beginning of the audio and event emission.
841  google.protobuf.Duration speech_event_time = 8;
842
843  // When available, billed audio seconds for the stream.
844  // Set only if this is the last response in the stream.
845  google.protobuf.Duration total_billed_time = 5;
846
847  // Provides information on adaptation behavior in response
848  SpeechAdaptationInfo speech_adaptation_info = 9;
849
850  // The ID associated with the request. This is a unique ID specific only to
851  // the given request.
852  int64 request_id = 10;
853}
854
855// A streaming speech recognition result corresponding to a portion of the audio
856// that is currently being processed.
857message StreamingRecognitionResult {
858  // May contain one or more recognition hypotheses (up to the
859  // maximum specified in `max_alternatives`).
860  // These alternatives are ordered in terms of accuracy, with the top (first)
861  // alternative being the most probable, as ranked by the recognizer.
862  repeated SpeechRecognitionAlternative alternatives = 1;
863
864  // If `false`, this `StreamingRecognitionResult` represents an
865  // interim result that may change. If `true`, this is the final time the
866  // speech service will return this particular `StreamingRecognitionResult`,
867  // the recognizer will not return any further hypotheses for this portion of
868  // the transcript and corresponding audio.
869  bool is_final = 2;
870
871  // An estimate of the likelihood that the recognizer will not
872  // change its guess about this interim result. Values range from 0.0
873  // (completely unstable) to 1.0 (completely stable).
874  // This field is only provided for interim results (`is_final=false`).
875  // The default of 0.0 is a sentinel value indicating `stability` was not set.
876  float stability = 3;
877
878  // Time offset of the end of this result relative to the
879  // beginning of the audio.
880  google.protobuf.Duration result_end_time = 4;
881
882  // For multi-channel audio, this is the channel number corresponding to the
883  // recognized result for the audio from that channel.
884  // For audio_channel_count = N, its output values can range from '1' to 'N'.
885  int32 channel_tag = 5;
886
887  // Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
888  // language tag of the language in this result. This language code was
889  // detected to have the most likelihood of being spoken in the audio.
890  string language_code = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
891}
892
893// A speech recognition result corresponding to a portion of the audio.
894message SpeechRecognitionResult {
895  // May contain one or more recognition hypotheses (up to the
896  // maximum specified in `max_alternatives`).
897  // These alternatives are ordered in terms of accuracy, with the top (first)
898  // alternative being the most probable, as ranked by the recognizer.
899  repeated SpeechRecognitionAlternative alternatives = 1;
900
901  // For multi-channel audio, this is the channel number corresponding to the
902  // recognized result for the audio from that channel.
903  // For audio_channel_count = N, its output values can range from '1' to 'N'.
904  int32 channel_tag = 2;
905
906  // Time offset of the end of this result relative to the
907  // beginning of the audio.
908  google.protobuf.Duration result_end_time = 4;
909
910  // Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
911  // language tag of the language in this result. This language code was
912  // detected to have the most likelihood of being spoken in the audio.
913  string language_code = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
914}
915
916// Alternative hypotheses (a.k.a. n-best list).
917message SpeechRecognitionAlternative {
918  // Transcript text representing the words that the user spoke.
919  // In languages that use spaces to separate words, the transcript might have a
920  // leading space if it isn't the first result. You can concatenate each result
921  // to obtain the full transcript without using a separator.
922  string transcript = 1;
923
924  // The confidence estimate between 0.0 and 1.0. A higher number
925  // indicates an estimated greater likelihood that the recognized words are
926  // correct. This field is set only for the top alternative of a non-streaming
927  // result or, of a streaming result where `is_final=true`.
928  // This field is not guaranteed to be accurate and users should not rely on it
929  // to be always provided.
930  // The default of 0.0 is a sentinel value indicating `confidence` was not set.
931  float confidence = 2;
932
933  // A list of word-specific information for each recognized word.
934  // Note: When `enable_speaker_diarization` is true, you will see all the words
935  // from the beginning of the audio.
936  repeated WordInfo words = 3;
937}
938
939// Word-specific information for recognized words.
940message WordInfo {
941  // Time offset relative to the beginning of the audio,
942  // and corresponding to the start of the spoken word.
943  // This field is only set if `enable_word_time_offsets=true` and only
944  // in the top hypothesis.
945  // This is an experimental feature and the accuracy of the time offset can
946  // vary.
947  google.protobuf.Duration start_time = 1;
948
949  // Time offset relative to the beginning of the audio,
950  // and corresponding to the end of the spoken word.
951  // This field is only set if `enable_word_time_offsets=true` and only
952  // in the top hypothesis.
953  // This is an experimental feature and the accuracy of the time offset can
954  // vary.
955  google.protobuf.Duration end_time = 2;
956
957  // The word corresponding to this set of information.
958  string word = 3;
959
960  // The confidence estimate between 0.0 and 1.0. A higher number
961  // indicates an estimated greater likelihood that the recognized words are
962  // correct. This field is set only for the top alternative of a non-streaming
963  // result or, of a streaming result where `is_final=true`.
964  // This field is not guaranteed to be accurate and users should not rely on it
965  // to be always provided.
966  // The default of 0.0 is a sentinel value indicating `confidence` was not set.
967  float confidence = 4;
968
969  // Output only. A distinct integer value is assigned for every speaker within
970  // the audio. This field specifies which one of those speakers was detected to
971  // have spoken this word. Value ranges from '1' to diarization_speaker_count.
972  // speaker_tag is set if enable_speaker_diarization = 'true' and only for the
973  // top alternative.
974  // Note: Use speaker_label instead.
975  int32 speaker_tag = 5
976      [deprecated = true, (google.api.field_behavior) = OUTPUT_ONLY];
977
978  // Output only. A label value assigned for every unique speaker within the
979  // audio. This field specifies which speaker was detected to have spoken this
980  // word. For some models, like medical_conversation this can be actual speaker
981  // role, for example "patient" or "provider", but generally this would be a
982  // number identifying a speaker. This field is only set if
983  // enable_speaker_diarization = 'true' and only for the top alternative.
984  string speaker_label = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
985}
986
987// Information on speech adaptation use in results
988message SpeechAdaptationInfo {
989  // Whether there was a timeout when applying speech adaptation. If true,
990  // adaptation had no effect in the response transcript.
991  bool adaptation_timeout = 1;
992
993  // If set, returns a message specifying which part of the speech adaptation
994  // request timed out.
995  string timeout_message = 4;
996}
997