1// Copyright 2022 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15syntax = "proto3";
16
17package google.cloud.speech.v1;
18
19import "google/api/annotations.proto";
20import "google/api/client.proto";
21import "google/api/field_behavior.proto";
22import "google/cloud/speech/v1/resource.proto";
23import "google/longrunning/operations.proto";
24import "google/protobuf/duration.proto";
25import "google/protobuf/timestamp.proto";
26import "google/protobuf/wrappers.proto";
27import "google/rpc/status.proto";
28
29option cc_enable_arenas = true;
30option go_package = "cloud.google.com/go/speech/apiv1/speechpb;speechpb";
31option java_multiple_files = true;
32option java_outer_classname = "SpeechProto";
33option java_package = "com.google.cloud.speech.v1";
34option objc_class_prefix = "GCS";
35
36// Service that implements Google Cloud Speech API.
37service Speech {
38  option (google.api.default_host) = "speech.googleapis.com";
39  option (google.api.oauth_scopes) =
40      "https://www.googleapis.com/auth/cloud-platform";
41
42  // Performs synchronous speech recognition: receive results after all audio
43  // has been sent and processed.
44  rpc Recognize(RecognizeRequest) returns (RecognizeResponse) {
45    option (google.api.http) = {
46      post: "/v1/speech:recognize"
47      body: "*"
48    };
49    option (google.api.method_signature) = "config,audio";
50  }
51
52  // Performs asynchronous speech recognition: receive results via the
53  // google.longrunning.Operations interface. Returns either an
54  // `Operation.error` or an `Operation.response` which contains
55  // a `LongRunningRecognizeResponse` message.
56  // For more information on asynchronous speech recognition, see the
57  // [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize).
58  rpc LongRunningRecognize(LongRunningRecognizeRequest)
59      returns (google.longrunning.Operation) {
60    option (google.api.http) = {
61      post: "/v1/speech:longrunningrecognize"
62      body: "*"
63    };
64    option (google.api.method_signature) = "config,audio";
65    option (google.longrunning.operation_info) = {
66      response_type: "LongRunningRecognizeResponse"
67      metadata_type: "LongRunningRecognizeMetadata"
68    };
69  }
70
71  // Performs bidirectional streaming speech recognition: receive results while
72  // sending audio. This method is only available via the gRPC API (not REST).
73  rpc StreamingRecognize(stream StreamingRecognizeRequest)
74      returns (stream StreamingRecognizeResponse) {}
75}
76
77// The top-level message sent by the client for the `Recognize` method.
78message RecognizeRequest {
79  // Required. Provides information to the recognizer that specifies how to
80  // process the request.
81  RecognitionConfig config = 1 [(google.api.field_behavior) = REQUIRED];
82
83  // Required. The audio data to be recognized.
84  RecognitionAudio audio = 2 [(google.api.field_behavior) = REQUIRED];
85}
86
87// The top-level message sent by the client for the `LongRunningRecognize`
88// method.
89message LongRunningRecognizeRequest {
90  // Required. Provides information to the recognizer that specifies how to
91  // process the request.
92  RecognitionConfig config = 1 [(google.api.field_behavior) = REQUIRED];
93
94  // Required. The audio data to be recognized.
95  RecognitionAudio audio = 2 [(google.api.field_behavior) = REQUIRED];
96
97  // Optional. Specifies an optional destination for the recognition results.
98  TranscriptOutputConfig output_config = 4
99      [(google.api.field_behavior) = OPTIONAL];
100}
101
102// Specifies an optional destination for the recognition results.
103message TranscriptOutputConfig {
104  oneof output_type {
105    // Specifies a Cloud Storage URI for the recognition results. Must be
106    // specified in the format: `gs://bucket_name/object_name`, and the bucket
107    // must already exist.
108    string gcs_uri = 1;
109  }
110}
111
112// The top-level message sent by the client for the `StreamingRecognize` method.
113// Multiple `StreamingRecognizeRequest` messages are sent. The first message
114// must contain a `streaming_config` message and must not contain
115// `audio_content`. All subsequent messages must contain `audio_content` and
116// must not contain a `streaming_config` message.
117message StreamingRecognizeRequest {
118  // The streaming request, which is either a streaming config or audio content.
119  oneof streaming_request {
120    // Provides information to the recognizer that specifies how to process the
121    // request. The first `StreamingRecognizeRequest` message must contain a
122    // `streaming_config`  message.
123    StreamingRecognitionConfig streaming_config = 1;
124
125    // The audio data to be recognized. Sequential chunks of audio data are sent
126    // in sequential `StreamingRecognizeRequest` messages. The first
127    // `StreamingRecognizeRequest` message must not contain `audio_content` data
128    // and all subsequent `StreamingRecognizeRequest` messages must contain
129    // `audio_content` data. The audio bytes must be encoded as specified in
130    // `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
131    // pure binary representation (not base64). See
132    // [content limits](https://cloud.google.com/speech-to-text/quotas#content).
133    bytes audio_content = 2;
134  }
135}
136
137// Provides information to the recognizer that specifies how to process the
138// request.
139message StreamingRecognitionConfig {
140  // Events that a timeout can be set on for voice activity.
141  message VoiceActivityTimeout {
142    // Duration to timeout the stream if no speech begins.
143    google.protobuf.Duration speech_start_timeout = 1;
144
145    // Duration to timeout the stream after speech ends.
146    google.protobuf.Duration speech_end_timeout = 2;
147  }
148
149  // Required. Provides information to the recognizer that specifies how to
150  // process the request.
151  RecognitionConfig config = 1 [(google.api.field_behavior) = REQUIRED];
152
153  // If `false` or omitted, the recognizer will perform continuous
154  // recognition (continuing to wait for and process audio even if the user
155  // pauses speaking) until the client closes the input stream (gRPC API) or
156  // until the maximum time limit has been reached. May return multiple
157  // `StreamingRecognitionResult`s with the `is_final` flag set to `true`.
158  //
159  // If `true`, the recognizer will detect a single spoken utterance. When it
160  // detects that the user has paused or stopped speaking, it will return an
161  // `END_OF_SINGLE_UTTERANCE` event and cease recognition. It will return no
162  // more than one `StreamingRecognitionResult` with the `is_final` flag set to
163  // `true`.
164  //
165  // The `single_utterance` field can only be used with specified models,
166  // otherwise an error is thrown. The `model` field in [`RecognitionConfig`][]
167  // must be set to:
168  //
169  // * `command_and_search`
170  // * `phone_call` AND additional field `useEnhanced`=`true`
171  // * The `model` field is left undefined. In this case the API auto-selects
172  //   a model based on any other parameters that you set in
173  //   `RecognitionConfig`.
174  bool single_utterance = 2;
175
176  // If `true`, interim results (tentative hypotheses) may be
177  // returned as they become available (these interim results are indicated with
178  // the `is_final=false` flag).
179  // If `false` or omitted, only `is_final=true` result(s) are returned.
180  bool interim_results = 3;
181
182  // If `true`, responses with voice activity speech events will be returned as
183  // they are detected.
184  bool enable_voice_activity_events = 5;
185
186  // If set, the server will automatically close the stream after the specified
187  // duration has elapsed after the last VOICE_ACTIVITY speech event has been
188  // sent. The field `voice_activity_events` must also be set to true.
189  VoiceActivityTimeout voice_activity_timeout = 6;
190}
191
192// Provides information to the recognizer that specifies how to process the
193// request.
194message RecognitionConfig {
195  // The encoding of the audio data sent in the request.
196  //
197  // All encodings support only 1 channel (mono) audio, unless the
198  // `audio_channel_count` and `enable_separate_recognition_per_channel` fields
199  // are set.
200  //
201  // For best results, the audio source should be captured and transmitted using
202  // a lossless encoding (`FLAC` or `LINEAR16`). The accuracy of the speech
203  // recognition can be reduced if lossy codecs are used to capture or transmit
204  // audio, particularly if background noise is present. Lossy codecs include
205  // `MULAW`, `AMR`, `AMR_WB`, `OGG_OPUS`, `SPEEX_WITH_HEADER_BYTE`, `MP3`,
206  // and `WEBM_OPUS`.
207  //
208  // The `FLAC` and `WAV` audio file formats include a header that describes the
209  // included audio content. You can request recognition for `WAV` files that
210  // contain either `LINEAR16` or `MULAW` encoded audio.
211  // If you send `FLAC` or `WAV` audio file format in
212  // your request, you do not need to specify an `AudioEncoding`; the audio
213  // encoding format is determined from the file header. If you specify
214  // an `AudioEncoding` when you send  send `FLAC` or `WAV` audio, the
215  // encoding configuration must match the encoding described in the audio
216  // header; otherwise the request returns an
217  // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT] error
218  // code.
219  enum AudioEncoding {
220    // Not specified.
221    ENCODING_UNSPECIFIED = 0;
222
223    // Uncompressed 16-bit signed little-endian samples (Linear PCM).
224    LINEAR16 = 1;
225
226    // `FLAC` (Free Lossless Audio
227    // Codec) is the recommended encoding because it is
228    // lossless--therefore recognition is not compromised--and
229    // requires only about half the bandwidth of `LINEAR16`. `FLAC` stream
230    // encoding supports 16-bit and 24-bit samples, however, not all fields in
231    // `STREAMINFO` are supported.
232    FLAC = 2;
233
234    // 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
235    MULAW = 3;
236
237    // Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000.
238    AMR = 4;
239
240    // Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000.
241    AMR_WB = 5;
242
243    // Opus encoded audio frames in Ogg container
244    // ([OggOpus](https://wiki.xiph.org/OggOpus)).
245    // `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or 48000.
246    OGG_OPUS = 6;
247
248    // Although the use of lossy encodings is not recommended, if a very low
249    // bitrate encoding is required, `OGG_OPUS` is highly preferred over
250    // Speex encoding. The [Speex](https://speex.org/)  encoding supported by
251    // Cloud Speech API has a header byte in each block, as in MIME type
252    // `audio/x-speex-with-header-byte`.
253    // It is a variant of the RTP Speex encoding defined in
254    // [RFC 5574](https://tools.ietf.org/html/rfc5574).
255    // The stream is a sequence of blocks, one block per RTP packet. Each block
256    // starts with a byte containing the length of the block, in bytes, followed
257    // by one or more frames of Speex data, padded to an integral number of
258    // bytes (octets) as specified in RFC 5574. In other words, each RTP header
259    // is replaced with a single byte containing the block length. Only Speex
260    // wideband is supported. `sample_rate_hertz` must be 16000.
261    SPEEX_WITH_HEADER_BYTE = 7;
262
263    // Opus encoded audio frames in WebM container
264    // ([OggOpus](https://wiki.xiph.org/OggOpus)). `sample_rate_hertz` must be
265    // one of 8000, 12000, 16000, 24000, or 48000.
266    WEBM_OPUS = 9;
267  }
268
269  // Encoding of audio data sent in all `RecognitionAudio` messages.
270  // This field is optional for `FLAC` and `WAV` audio files and required
271  // for all other audio formats. For details, see
272  // [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
273  AudioEncoding encoding = 1;
274
275  // Sample rate in Hertz of the audio data sent in all
276  // `RecognitionAudio` messages. Valid values are: 8000-48000.
277  // 16000 is optimal. For best results, set the sampling rate of the audio
278  // source to 16000 Hz. If that's not possible, use the native sample rate of
279  // the audio source (instead of re-sampling).
280  // This field is optional for FLAC and WAV audio files, but is
281  // required for all other audio formats. For details, see
282  // [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
283  int32 sample_rate_hertz = 2;
284
285  // The number of channels in the input audio data.
286  // ONLY set this for MULTI-CHANNEL recognition.
287  // Valid values for LINEAR16, OGG_OPUS and FLAC are `1`-`8`.
288  // Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
289  // If `0` or omitted, defaults to one channel (mono).
290  // Note: We only recognize the first channel by default.
291  // To perform independent recognition on each channel set
292  // `enable_separate_recognition_per_channel` to 'true'.
293  int32 audio_channel_count = 7;
294
295  // This needs to be set to `true` explicitly and `audio_channel_count` > 1
296  // to get each channel recognized separately. The recognition result will
297  // contain a `channel_tag` field to state which channel that result belongs
298  // to. If this is not true, we will only recognize the first channel. The
299  // request is billed cumulatively for all channels recognized:
300  // `audio_channel_count` multiplied by the length of the audio.
301  bool enable_separate_recognition_per_channel = 12;
302
303  // Required. The language of the supplied audio as a
304  // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
305  // Example: "en-US".
306  // See [Language
307  // Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
308  // of the currently supported language codes.
309  string language_code = 3 [(google.api.field_behavior) = REQUIRED];
310
311  // A list of up to 3 additional
312  // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
313  // listing possible alternative languages of the supplied audio.
314  // See [Language
315  // Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
316  // of the currently supported language codes. If alternative languages are
317  // listed, recognition result will contain recognition in the most likely
318  // language detected including the main language_code. The recognition result
319  // will include the language tag of the language detected in the audio. Note:
320  // This feature is only supported for Voice Command and Voice Search use cases
321  // and performance may vary for other use cases (e.g., phone call
322  // transcription).
323  repeated string alternative_language_codes = 18;
324
325  // Maximum number of recognition hypotheses to be returned.
326  // Specifically, the maximum number of `SpeechRecognitionAlternative` messages
327  // within each `SpeechRecognitionResult`.
328  // The server may return fewer than `max_alternatives`.
329  // Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
330  // one. If omitted, will return a maximum of one.
331  int32 max_alternatives = 4;
332
333  // If set to `true`, the server will attempt to filter out
334  // profanities, replacing all but the initial character in each filtered word
335  // with asterisks, e.g. "f***". If set to `false` or omitted, profanities
336  // won't be filtered out.
337  bool profanity_filter = 5;
338
339  // Speech adaptation configuration improves the accuracy of speech
340  // recognition. For more information, see the [speech
341  // adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
342  // documentation.
343  // When speech adaptation is set it supersedes the `speech_contexts` field.
344  SpeechAdaptation adaptation = 20;
345
346  // Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
347  // A means to provide context to assist the speech recognition. For more
348  // information, see
349  // [speech
350  // adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
351  repeated SpeechContext speech_contexts = 6;
352
353  // If `true`, the top result includes a list of words and
354  // the start and end time offsets (timestamps) for those words. If
355  // `false`, no word-level time offset information is returned. The default is
356  // `false`.
357  bool enable_word_time_offsets = 8;
358
359  // If `true`, the top result includes a list of words and the
360  // confidence for those words. If `false`, no word-level confidence
361  // information is returned. The default is `false`.
362  bool enable_word_confidence = 15;
363
364  // If 'true', adds punctuation to recognition result hypotheses.
365  // This feature is only available in select languages. Setting this for
366  // requests in other languages has no effect at all.
367  // The default 'false' value does not add punctuation to result hypotheses.
368  bool enable_automatic_punctuation = 11;
369
370  // The spoken punctuation behavior for the call
371  // If not set, uses default behavior based on model of choice
372  // e.g. command_and_search will enable spoken punctuation by default
373  // If 'true', replaces spoken punctuation with the corresponding symbols in
374  // the request. For example, "how are you question mark" becomes "how are
375  // you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
376  // for support. If 'false', spoken punctuation is not replaced.
377  google.protobuf.BoolValue enable_spoken_punctuation = 22;
378
379  // The spoken emoji behavior for the call
380  // If not set, uses default behavior based on model of choice
381  // If 'true', adds spoken emoji formatting for the request. This will replace
382  // spoken emojis with the corresponding Unicode symbols in the final
383  // transcript. If 'false', spoken emojis are not replaced.
384  google.protobuf.BoolValue enable_spoken_emojis = 23;
385
386  // Config to enable speaker diarization and set additional
387  // parameters to make diarization better suited for your application.
388  // Note: When this is enabled, we send all the words from the beginning of the
389  // audio for the top alternative in every consecutive STREAMING responses.
390  // This is done in order to improve our speaker tags as our models learn to
391  // identify the speakers in the conversation over time.
392  // For non-streaming requests, the diarization results will be provided only
393  // in the top alternative of the FINAL SpeechRecognitionResult.
394  SpeakerDiarizationConfig diarization_config = 19;
395
396  // Metadata regarding this request.
397  RecognitionMetadata metadata = 9;
398
399  // Which model to select for the given request. Select the model
400  // best suited to your domain to get best results. If a model is not
401  // explicitly specified, then we auto-select a model based on the parameters
402  // in the RecognitionConfig.
403  // <table>
404  //   <tr>
405  //     <td><b>Model</b></td>
406  //     <td><b>Description</b></td>
407  //   </tr>
408  //   <tr>
409  //     <td><code>latest_long</code></td>
410  //     <td>Best for long form content like media or conversation.</td>
411  //   </tr>
412  //   <tr>
413  //     <td><code>latest_short</code></td>
414  //     <td>Best for short form content like commands or single shot directed
415  //     speech.</td>
416  //   </tr>
417  //   <tr>
418  //     <td><code>command_and_search</code></td>
419  //     <td>Best for short queries such as voice commands or voice search.</td>
420  //   </tr>
421  //   <tr>
422  //     <td><code>phone_call</code></td>
423  //     <td>Best for audio that originated from a phone call (typically
424  //     recorded at an 8khz sampling rate).</td>
425  //   </tr>
426  //   <tr>
427  //     <td><code>video</code></td>
428  //     <td>Best for audio that originated from video or includes multiple
429  //         speakers. Ideally the audio is recorded at a 16khz or greater
430  //         sampling rate. This is a premium model that costs more than the
431  //         standard rate.</td>
432  //   </tr>
433  //   <tr>
434  //     <td><code>default</code></td>
435  //     <td>Best for audio that is not one of the specific audio models.
436  //         For example, long-form audio. Ideally the audio is high-fidelity,
437  //         recorded at a 16khz or greater sampling rate.</td>
438  //   </tr>
439  //   <tr>
440  //     <td><code>medical_conversation</code></td>
441  //     <td>Best for audio that originated from a conversation between a
442  //         medical provider and patient.</td>
443  //   </tr>
444  //   <tr>
445  //     <td><code>medical_dictation</code></td>
446  //     <td>Best for audio that originated from dictation notes by a medical
447  //         provider.</td>
448  //   </tr>
449  // </table>
450  string model = 13;
451
452  // Set to true to use an enhanced model for speech recognition.
453  // If `use_enhanced` is set to true and the `model` field is not set, then
454  // an appropriate enhanced model is chosen if an enhanced model exists for
455  // the audio.
456  //
457  // If `use_enhanced` is true and an enhanced version of the specified model
458  // does not exist, then the speech is recognized using the standard version
459  // of the specified model.
460  bool use_enhanced = 14;
461}
462
463// Config to enable speaker diarization.
464message SpeakerDiarizationConfig {
465  // If 'true', enables speaker detection for each recognized word in
466  // the top alternative of the recognition result using a speaker_tag provided
467  // in the WordInfo.
468  bool enable_speaker_diarization = 1;
469
470  // Minimum number of speakers in the conversation. This range gives you more
471  // flexibility by allowing the system to automatically determine the correct
472  // number of speakers. If not set, the default value is 2.
473  int32 min_speaker_count = 2;
474
475  // Maximum number of speakers in the conversation. This range gives you more
476  // flexibility by allowing the system to automatically determine the correct
477  // number of speakers. If not set, the default value is 6.
478  int32 max_speaker_count = 3;
479
480  // Output only. Unused.
481  int32 speaker_tag = 5
482      [deprecated = true, (google.api.field_behavior) = OUTPUT_ONLY];
483}
484
485// Description of audio data to be recognized.
486message RecognitionMetadata {
487  option deprecated = true;
488
489  // Use case categories that the audio recognition request can be described
490  // by.
491  enum InteractionType {
492    // Use case is either unknown or is something other than one of the other
493    // values below.
494    INTERACTION_TYPE_UNSPECIFIED = 0;
495
496    // Multiple people in a conversation or discussion. For example in a
497    // meeting with two or more people actively participating. Typically
498    // all the primary people speaking would be in the same room (if not,
499    // see PHONE_CALL)
500    DISCUSSION = 1;
501
502    // One or more persons lecturing or presenting to others, mostly
503    // uninterrupted.
504    PRESENTATION = 2;
505
506    // A phone-call or video-conference in which two or more people, who are
507    // not in the same room, are actively participating.
508    PHONE_CALL = 3;
509
510    // A recorded message intended for another person to listen to.
511    VOICEMAIL = 4;
512
513    // Professionally produced audio (eg. TV Show, Podcast).
514    PROFESSIONALLY_PRODUCED = 5;
515
516    // Transcribe spoken questions and queries into text.
517    VOICE_SEARCH = 6;
518
519    // Transcribe voice commands, such as for controlling a device.
520    VOICE_COMMAND = 7;
521
522    // Transcribe speech to text to create a written document, such as a
523    // text-message, email or report.
524    DICTATION = 8;
525  }
526
527  // Enumerates the types of capture settings describing an audio file.
528  enum MicrophoneDistance {
529    // Audio type is not known.
530    MICROPHONE_DISTANCE_UNSPECIFIED = 0;
531
532    // The audio was captured from a closely placed microphone. Eg. phone,
533    // dictaphone, or handheld microphone. Generally if there speaker is within
534    // 1 meter of the microphone.
535    NEARFIELD = 1;
536
537    // The speaker if within 3 meters of the microphone.
538    MIDFIELD = 2;
539
540    // The speaker is more than 3 meters away from the microphone.
541    FARFIELD = 3;
542  }
543
544  // The original media the speech was recorded on.
545  enum OriginalMediaType {
546    // Unknown original media type.
547    ORIGINAL_MEDIA_TYPE_UNSPECIFIED = 0;
548
549    // The speech data is an audio recording.
550    AUDIO = 1;
551
552    // The speech data originally recorded on a video.
553    VIDEO = 2;
554  }
555
556  // The type of device the speech was recorded with.
557  enum RecordingDeviceType {
558    // The recording device is unknown.
559    RECORDING_DEVICE_TYPE_UNSPECIFIED = 0;
560
561    // Speech was recorded on a smartphone.
562    SMARTPHONE = 1;
563
564    // Speech was recorded using a personal computer or tablet.
565    PC = 2;
566
567    // Speech was recorded over a phone line.
568    PHONE_LINE = 3;
569
570    // Speech was recorded in a vehicle.
571    VEHICLE = 4;
572
573    // Speech was recorded outdoors.
574    OTHER_OUTDOOR_DEVICE = 5;
575
576    // Speech was recorded indoors.
577    OTHER_INDOOR_DEVICE = 6;
578  }
579
580  // The use case most closely describing the audio content to be recognized.
581  InteractionType interaction_type = 1;
582
583  // The industry vertical to which this speech recognition request most
584  // closely applies. This is most indicative of the topics contained
585  // in the audio.  Use the 6-digit NAICS code to identify the industry
586  // vertical - see https://www.naics.com/search/.
587  uint32 industry_naics_code_of_audio = 3;
588
589  // The audio type that most closely describes the audio being recognized.
590  MicrophoneDistance microphone_distance = 4;
591
592  // The original media the speech was recorded on.
593  OriginalMediaType original_media_type = 5;
594
595  // The type of device the speech was recorded with.
596  RecordingDeviceType recording_device_type = 6;
597
598  // The device used to make the recording.  Examples 'Nexus 5X' or
599  // 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or
600  // 'Cardioid Microphone'.
601  string recording_device_name = 7;
602
603  // Mime type of the original audio file.  For example `audio/m4a`,
604  // `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`.
605  // A list of possible audio mime types is maintained at
606  // http://www.iana.org/assignments/media-types/media-types.xhtml#audio
607  string original_mime_type = 8;
608
609  // Description of the content. Eg. "Recordings of federal supreme court
610  // hearings from 2012".
611  string audio_topic = 10;
612}
613
614// Provides "hints" to the speech recognizer to favor specific words and phrases
615// in the results.
616message SpeechContext {
617  // A list of strings containing words and phrases "hints" so that
618  // the speech recognition is more likely to recognize them. This can be used
619  // to improve the accuracy for specific words and phrases, for example, if
620  // specific commands are typically spoken by the user. This can also be used
621  // to add additional words to the vocabulary of the recognizer. See
622  // [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
623  //
624  // List items can also be set to classes for groups of words that represent
625  // common concepts that occur in natural language. For example, rather than
626  // providing phrase hints for every month of the year, using the $MONTH class
627  // improves the likelihood of correctly transcribing audio that includes
628  // months.
629  repeated string phrases = 1;
630
631  // Hint Boost. Positive value will increase the probability that a specific
632  // phrase will be recognized over other similar sounding phrases. The higher
633  // the boost, the higher the chance of false positive recognition as well.
634  // Negative boost values would correspond to anti-biasing. Anti-biasing is not
635  // enabled, so negative boost will simply be ignored. Though `boost` can
636  // accept a wide range of positive values, most use cases are best served with
637  // values between 0 and 20. We recommend using a binary search approach to
638  // finding the optimal value for your use case.
639  float boost = 4;
640}
641
642// Contains audio data in the encoding specified in the `RecognitionConfig`.
643// Either `content` or `uri` must be supplied. Supplying both or neither
644// returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT].
645// See [content limits](https://cloud.google.com/speech-to-text/quotas#content).
646message RecognitionAudio {
647  // The audio source, which is either inline content or a Google Cloud
648  // Storage uri.
649  oneof audio_source {
650    // The audio data bytes encoded as specified in
651    // `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
652    // pure binary representation, whereas JSON representations use base64.
653    bytes content = 1;
654
655    // URI that points to a file that contains audio data bytes as specified in
656    // `RecognitionConfig`. The file must not be compressed (for example, gzip).
657    // Currently, only Google Cloud Storage URIs are
658    // supported, which must be specified in the following format:
659    // `gs://bucket_name/object_name` (other URI formats return
660    // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]).
661    // For more information, see [Request
662    // URIs](https://cloud.google.com/storage/docs/reference-uris).
663    string uri = 2;
664  }
665}
666
667// The only message returned to the client by the `Recognize` method. It
668// contains the result as zero or more sequential `SpeechRecognitionResult`
669// messages.
670message RecognizeResponse {
671  // Sequential list of transcription results corresponding to
672  // sequential portions of audio.
673  repeated SpeechRecognitionResult results = 2;
674
675  // When available, billed audio seconds for the corresponding request.
676  google.protobuf.Duration total_billed_time = 3;
677
678  // Provides information on adaptation behavior in response
679  SpeechAdaptationInfo speech_adaptation_info = 7;
680
681  // The ID associated with the request. This is a unique ID specific only to
682  // the given request.
683  int64 request_id = 8;
684}
685
686// The only message returned to the client by the `LongRunningRecognize` method.
687// It contains the result as zero or more sequential `SpeechRecognitionResult`
688// messages. It is included in the `result.response` field of the `Operation`
689// returned by the `GetOperation` call of the `google::longrunning::Operations`
690// service.
691message LongRunningRecognizeResponse {
692  // Sequential list of transcription results corresponding to
693  // sequential portions of audio.
694  repeated SpeechRecognitionResult results = 2;
695
696  // When available, billed audio seconds for the corresponding request.
697  google.protobuf.Duration total_billed_time = 3;
698
699  // Original output config if present in the request.
700  TranscriptOutputConfig output_config = 6;
701
702  // If the transcript output fails this field contains the relevant error.
703  google.rpc.Status output_error = 7;
704
705  // Provides information on speech adaptation behavior in response
706  SpeechAdaptationInfo speech_adaptation_info = 8;
707
708  // The ID associated with the request. This is a unique ID specific only to
709  // the given request.
710  int64 request_id = 9;
711}
712
713// Describes the progress of a long-running `LongRunningRecognize` call. It is
714// included in the `metadata` field of the `Operation` returned by the
715// `GetOperation` call of the `google::longrunning::Operations` service.
716message LongRunningRecognizeMetadata {
717  // Approximate percentage of audio processed thus far. Guaranteed to be 100
718  // when the audio is fully processed and the results are available.
719  int32 progress_percent = 1;
720
721  // Time when the request was received.
722  google.protobuf.Timestamp start_time = 2;
723
724  // Time of the most recent processing update.
725  google.protobuf.Timestamp last_update_time = 3;
726
727  // Output only. The URI of the audio file being transcribed. Empty if the
728  // audio was sent as byte content.
729  string uri = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
730}
731
732// `StreamingRecognizeResponse` is the only message returned to the client by
733// `StreamingRecognize`. A series of zero or more `StreamingRecognizeResponse`
734// messages are streamed back to the client. If there is no recognizable
735// audio, and `single_utterance` is set to false, then no messages are streamed
736// back to the client.
737//
738// Here's an example of a series of `StreamingRecognizeResponse`s that might be
739// returned while processing audio:
740//
741// 1. results { alternatives { transcript: "tube" } stability: 0.01 }
742//
743// 2. results { alternatives { transcript: "to be a" } stability: 0.01 }
744//
745// 3. results { alternatives { transcript: "to be" } stability: 0.9 }
746//    results { alternatives { transcript: " or not to be" } stability: 0.01 }
747//
748// 4. results { alternatives { transcript: "to be or not to be"
749//                             confidence: 0.92 }
750//              alternatives { transcript: "to bee or not to bee" }
751//              is_final: true }
752//
753// 5. results { alternatives { transcript: " that's" } stability: 0.01 }
754//
755// 6. results { alternatives { transcript: " that is" } stability: 0.9 }
756//    results { alternatives { transcript: " the question" } stability: 0.01 }
757//
758// 7. results { alternatives { transcript: " that is the question"
759//                             confidence: 0.98 }
760//              alternatives { transcript: " that was the question" }
761//              is_final: true }
762//
763// Notes:
764//
765// - Only two of the above responses #4 and #7 contain final results; they are
766//   indicated by `is_final: true`. Concatenating these together generates the
767//   full transcript: "to be or not to be that is the question".
768//
769// - The others contain interim `results`. #3 and #6 contain two interim
770//   `results`: the first portion has a high stability and is less likely to
771//   change; the second portion has a low stability and is very likely to
772//   change. A UI designer might choose to show only high stability `results`.
773//
774// - The specific `stability` and `confidence` values shown above are only for
775//   illustrative purposes. Actual values may vary.
776//
777// - In each response, only one of these fields will be set:
778//     `error`,
779//     `speech_event_type`, or
780//     one or more (repeated) `results`.
781message StreamingRecognizeResponse {
782  // Indicates the type of speech event.
783  enum SpeechEventType {
784    // No speech event specified.
785    SPEECH_EVENT_UNSPECIFIED = 0;
786
787    // This event indicates that the server has detected the end of the user's
788    // speech utterance and expects no additional speech. Therefore, the server
789    // will not process additional audio (although it may subsequently return
790    // additional results). The client should stop sending additional audio
791    // data, half-close the gRPC connection, and wait for any additional results
792    // until the server closes the gRPC connection. This event is only sent if
793    // `single_utterance` was set to `true`, and is not used otherwise.
794    END_OF_SINGLE_UTTERANCE = 1;
795
796    // This event indicates that the server has detected the beginning of human
797    // voice activity in the stream. This event can be returned multiple times
798    // if speech starts and stops repeatedly throughout the stream. This event
799    // is only sent if `voice_activity_events` is set to true.
800    SPEECH_ACTIVITY_BEGIN = 2;
801
802    // This event indicates that the server has detected the end of human voice
803    // activity in the stream. This event can be returned multiple times if
804    // speech starts and stops repeatedly throughout the stream. This event is
805    // only sent if `voice_activity_events` is set to true.
806    SPEECH_ACTIVITY_END = 3;
807
808    // This event indicates that the user-set timeout for speech activity begin
809    // or end has exceeded. Upon receiving this event, the client is expected to
810    // send a half close. Further audio will not be processed.
811    SPEECH_ACTIVITY_TIMEOUT = 4;
812  }
813
814  // If set, returns a [google.rpc.Status][google.rpc.Status] message that
815  // specifies the error for the operation.
816  google.rpc.Status error = 1;
817
818  // This repeated list contains zero or more results that
819  // correspond to consecutive portions of the audio currently being processed.
820  // It contains zero or one `is_final=true` result (the newly settled portion),
821  // followed by zero or more `is_final=false` results (the interim results).
822  repeated StreamingRecognitionResult results = 2;
823
824  // Indicates the type of speech event.
825  SpeechEventType speech_event_type = 4;
826
827  // Time offset between the beginning of the audio and event emission.
828  google.protobuf.Duration speech_event_time = 8;
829
830  // When available, billed audio seconds for the stream.
831  // Set only if this is the last response in the stream.
832  google.protobuf.Duration total_billed_time = 5;
833
834  // Provides information on adaptation behavior in response
835  SpeechAdaptationInfo speech_adaptation_info = 9;
836
837  // The ID associated with the request. This is a unique ID specific only to
838  // the given request.
839  int64 request_id = 10;
840}
841
842// A streaming speech recognition result corresponding to a portion of the audio
843// that is currently being processed.
844message StreamingRecognitionResult {
845  // May contain one or more recognition hypotheses (up to the
846  // maximum specified in `max_alternatives`).
847  // These alternatives are ordered in terms of accuracy, with the top (first)
848  // alternative being the most probable, as ranked by the recognizer.
849  repeated SpeechRecognitionAlternative alternatives = 1;
850
851  // If `false`, this `StreamingRecognitionResult` represents an
852  // interim result that may change. If `true`, this is the final time the
853  // speech service will return this particular `StreamingRecognitionResult`,
854  // the recognizer will not return any further hypotheses for this portion of
855  // the transcript and corresponding audio.
856  bool is_final = 2;
857
858  // An estimate of the likelihood that the recognizer will not
859  // change its guess about this interim result. Values range from 0.0
860  // (completely unstable) to 1.0 (completely stable).
861  // This field is only provided for interim results (`is_final=false`).
862  // The default of 0.0 is a sentinel value indicating `stability` was not set.
863  float stability = 3;
864
865  // Time offset of the end of this result relative to the
866  // beginning of the audio.
867  google.protobuf.Duration result_end_time = 4;
868
869  // For multi-channel audio, this is the channel number corresponding to the
870  // recognized result for the audio from that channel.
871  // For audio_channel_count = N, its output values can range from '1' to 'N'.
872  int32 channel_tag = 5;
873
874  // Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
875  // language tag of the language in this result. This language code was
876  // detected to have the most likelihood of being spoken in the audio.
877  string language_code = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
878}
879
880// A speech recognition result corresponding to a portion of the audio.
881message SpeechRecognitionResult {
882  // May contain one or more recognition hypotheses (up to the
883  // maximum specified in `max_alternatives`).
884  // These alternatives are ordered in terms of accuracy, with the top (first)
885  // alternative being the most probable, as ranked by the recognizer.
886  repeated SpeechRecognitionAlternative alternatives = 1;
887
888  // For multi-channel audio, this is the channel number corresponding to the
889  // recognized result for the audio from that channel.
890  // For audio_channel_count = N, its output values can range from '1' to 'N'.
891  int32 channel_tag = 2;
892
893  // Time offset of the end of this result relative to the
894  // beginning of the audio.
895  google.protobuf.Duration result_end_time = 4;
896
897  // Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
898  // language tag of the language in this result. This language code was
899  // detected to have the most likelihood of being spoken in the audio.
900  string language_code = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
901}
902
903// Alternative hypotheses (a.k.a. n-best list).
904message SpeechRecognitionAlternative {
905  // Transcript text representing the words that the user spoke.
906  // In languages that use spaces to separate words, the transcript might have a
907  // leading space if it isn't the first result. You can concatenate each result
908  // to obtain the full transcript without using a separator.
909  string transcript = 1;
910
911  // The confidence estimate between 0.0 and 1.0. A higher number
912  // indicates an estimated greater likelihood that the recognized words are
913  // correct. This field is set only for the top alternative of a non-streaming
914  // result or, of a streaming result where `is_final=true`.
915  // This field is not guaranteed to be accurate and users should not rely on it
916  // to be always provided.
917  // The default of 0.0 is a sentinel value indicating `confidence` was not set.
918  float confidence = 2;
919
920  // A list of word-specific information for each recognized word.
921  // Note: When `enable_speaker_diarization` is true, you will see all the words
922  // from the beginning of the audio.
923  repeated WordInfo words = 3;
924}
925
926// Word-specific information for recognized words.
927message WordInfo {
928  // Time offset relative to the beginning of the audio,
929  // and corresponding to the start of the spoken word.
930  // This field is only set if `enable_word_time_offsets=true` and only
931  // in the top hypothesis.
932  // This is an experimental feature and the accuracy of the time offset can
933  // vary.
934  google.protobuf.Duration start_time = 1;
935
936  // Time offset relative to the beginning of the audio,
937  // and corresponding to the end of the spoken word.
938  // This field is only set if `enable_word_time_offsets=true` and only
939  // in the top hypothesis.
940  // This is an experimental feature and the accuracy of the time offset can
941  // vary.
942  google.protobuf.Duration end_time = 2;
943
944  // The word corresponding to this set of information.
945  string word = 3;
946
947  // The confidence estimate between 0.0 and 1.0. A higher number
948  // indicates an estimated greater likelihood that the recognized words are
949  // correct. This field is set only for the top alternative of a non-streaming
950  // result or, of a streaming result where `is_final=true`.
951  // This field is not guaranteed to be accurate and users should not rely on it
952  // to be always provided.
953  // The default of 0.0 is a sentinel value indicating `confidence` was not set.
954  float confidence = 4;
955
956  // Output only. A distinct integer value is assigned for every speaker within
957  // the audio. This field specifies which one of those speakers was detected to
958  // have spoken this word. Value ranges from '1' to diarization_speaker_count.
959  // speaker_tag is set if enable_speaker_diarization = 'true' and only in the
960  // top alternative.
961  int32 speaker_tag = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
962}
963
964// Information on speech adaptation use in results
965message SpeechAdaptationInfo {
966  // Whether there was a timeout when applying speech adaptation. If true,
967  // adaptation had no effect in the response transcript.
968  bool adaptation_timeout = 1;
969
970  // If set, returns a message specifying which part of the speech adaptation
971  // request timed out.
972  string timeout_message = 4;
973}
974