xref: /aosp_15_r20/external/googleapis/google/cloud/dialogflow/cx/v3/audio_config.proto (revision d5c09012810ac0c9f33fe448fb6da8260d444cc9)
1// Copyright 2023 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15syntax = "proto3";
16
17package google.cloud.dialogflow.cx.v3;
18
19import "google/api/field_behavior.proto";
20import "google/api/resource.proto";
21import "google/protobuf/duration.proto";
22
23option cc_enable_arenas = true;
24option csharp_namespace = "Google.Cloud.Dialogflow.Cx.V3";
25option go_package = "cloud.google.com/go/dialogflow/cx/apiv3/cxpb;cxpb";
26option java_multiple_files = true;
27option java_outer_classname = "AudioConfigProto";
28option java_package = "com.google.cloud.dialogflow.cx.v3";
29option objc_class_prefix = "DF";
30option ruby_package = "Google::Cloud::Dialogflow::CX::V3";
31option (google.api.resource_definition) = {
32  type: "automl.googleapis.com/Model"
33  pattern: "projects/{project}/locations/{location}/models/{model}"
34};
35
36// Audio encoding of the audio content sent in the conversational query request.
37// Refer to the
38// [Cloud Speech API
39// documentation](https://cloud.google.com/speech-to-text/docs/basics) for more
40// details.
41enum AudioEncoding {
42  // Not specified.
43  AUDIO_ENCODING_UNSPECIFIED = 0;
44
45  // Uncompressed 16-bit signed little-endian samples (Linear PCM).
46  AUDIO_ENCODING_LINEAR_16 = 1;
47
48  // [`FLAC`](https://xiph.org/flac/documentation.html) (Free Lossless Audio
49  // Codec) is the recommended encoding because it is lossless (therefore
50  // recognition is not compromised) and requires only about half the
51  // bandwidth of `LINEAR16`. `FLAC` stream encoding supports 16-bit and
52  // 24-bit samples, however, not all fields in `STREAMINFO` are supported.
53  AUDIO_ENCODING_FLAC = 2;
54
55  // 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
56  AUDIO_ENCODING_MULAW = 3;
57
58  // Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000.
59  AUDIO_ENCODING_AMR = 4;
60
61  // Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000.
62  AUDIO_ENCODING_AMR_WB = 5;
63
64  // Opus encoded audio frames in Ogg container
65  // ([OggOpus](https://wiki.xiph.org/OggOpus)).
66  // `sample_rate_hertz` must be 16000.
67  AUDIO_ENCODING_OGG_OPUS = 6;
68
69  // Although the use of lossy encodings is not recommended, if a very low
70  // bitrate encoding is required, `OGG_OPUS` is highly preferred over
71  // Speex encoding. The [Speex](https://speex.org/) encoding supported by
72  // Dialogflow API has a header byte in each block, as in MIME type
73  // `audio/x-speex-with-header-byte`.
74  // It is a variant of the RTP Speex encoding defined in
75  // [RFC 5574](https://tools.ietf.org/html/rfc5574).
76  // The stream is a sequence of blocks, one block per RTP packet. Each block
77  // starts with a byte containing the length of the block, in bytes, followed
78  // by one or more frames of Speex data, padded to an integral number of
79  // bytes (octets) as specified in RFC 5574. In other words, each RTP header
80  // is replaced with a single byte containing the block length. Only Speex
81  // wideband is supported. `sample_rate_hertz` must be 16000.
82  AUDIO_ENCODING_SPEEX_WITH_HEADER_BYTE = 7;
83}
84
85// Variant of the specified [Speech
86// model][google.cloud.dialogflow.cx.v3.InputAudioConfig.model] to use.
87//
88// See the [Cloud Speech
89// documentation](https://cloud.google.com/speech-to-text/docs/enhanced-models)
90// for which models have different variants. For example, the "phone_call" model
91// has both a standard and an enhanced variant. When you use an enhanced model,
92// you will generally receive higher quality results than for a standard model.
93enum SpeechModelVariant {
94  // No model variant specified. In this case Dialogflow defaults to
95  // USE_BEST_AVAILABLE.
96  SPEECH_MODEL_VARIANT_UNSPECIFIED = 0;
97
98  // Use the best available variant of the [Speech
99  // model][InputAudioConfig.model] that the caller is eligible for.
100  USE_BEST_AVAILABLE = 1;
101
102  // Use standard model variant even if an enhanced model is available.  See the
103  // [Cloud Speech
104  // documentation](https://cloud.google.com/speech-to-text/docs/enhanced-models)
105  // for details about enhanced models.
106  USE_STANDARD = 2;
107
108  // Use an enhanced model variant:
109  //
110  // * If an enhanced variant does not exist for the given
111  //   [model][google.cloud.dialogflow.cx.v3.InputAudioConfig.model] and request
112  //   language, Dialogflow falls back to the standard variant.
113  //
114  //   The [Cloud Speech
115  //   documentation](https://cloud.google.com/speech-to-text/docs/enhanced-models)
116  //   describes which models have enhanced variants.
117  USE_ENHANCED = 3;
118}
119
120// Information for a word recognized by the speech recognizer.
121message SpeechWordInfo {
122  // The word this info is for.
123  string word = 3;
124
125  // Time offset relative to the beginning of the audio that corresponds to the
126  // start of the spoken word. This is an experimental feature and the accuracy
127  // of the time offset can vary.
128  google.protobuf.Duration start_offset = 1;
129
130  // Time offset relative to the beginning of the audio that corresponds to the
131  // end of the spoken word. This is an experimental feature and the accuracy of
132  // the time offset can vary.
133  google.protobuf.Duration end_offset = 2;
134
135  // The Speech confidence between 0.0 and 1.0 for this word. A higher number
136  // indicates an estimated greater likelihood that the recognized word is
137  // correct. The default of 0.0 is a sentinel value indicating that confidence
138  // was not set.
139  //
140  // This field is not guaranteed to be fully stable over time for the same
141  // audio input. Users should also not rely on it to always be provided.
142  float confidence = 4;
143}
144
145// Configuration of the barge-in behavior. Barge-in instructs the API to return
146// a detected utterance at a proper time while the client is playing back the
147// response audio from a previous request. When the client sees the
148// utterance, it should stop the playback and immediately get ready for
149// receiving the responses for the current request.
150//
151// The barge-in handling requires the client to start streaming audio input
152// as soon as it starts playing back the audio from the previous response. The
153// playback is modeled into two phases:
154//
155// * No barge-in phase: which goes first and during which speech detection
156//   should not be carried out.
157//
158// * Barge-in phase: which follows the no barge-in phase and during which
159//   the API starts speech detection and may inform the client that an utterance
160//   has been detected. Note that no-speech event is not expected in this
161//   phase.
162//
163// The client provides this configuration in terms of the durations of those
164// two phases. The durations are measured in terms of the audio length from the
165// the start of the input audio.
166//
167// No-speech event is a response with END_OF_UTTERANCE without any transcript
168// following up.
169message BargeInConfig {
170  // Duration that is not eligible for barge-in at the beginning of the input
171  // audio.
172  google.protobuf.Duration no_barge_in_duration = 1;
173
174  // Total duration for the playback at the beginning of the input audio.
175  google.protobuf.Duration total_duration = 2;
176}
177
178// Instructs the speech recognizer on how to process the audio content.
179message InputAudioConfig {
180  // Required. Audio encoding of the audio content to process.
181  AudioEncoding audio_encoding = 1 [(google.api.field_behavior) = REQUIRED];
182
183  // Sample rate (in Hertz) of the audio content sent in the query.
184  // Refer to
185  // [Cloud Speech API
186  // documentation](https://cloud.google.com/speech-to-text/docs/basics) for
187  // more details.
188  int32 sample_rate_hertz = 2;
189
190  // Optional. If `true`, Dialogflow returns
191  // [SpeechWordInfo][google.cloud.dialogflow.cx.v3.SpeechWordInfo] in
192  // [StreamingRecognitionResult][google.cloud.dialogflow.cx.v3.StreamingRecognitionResult]
193  // with information about the recognized speech words, e.g. start and end time
194  // offsets. If false or unspecified, Speech doesn't return any word-level
195  // information.
196  bool enable_word_info = 13;
197
198  // Optional. A list of strings containing words and phrases that the speech
199  // recognizer should recognize with higher likelihood.
200  //
201  // See [the Cloud Speech
202  // documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
203  // for more details.
204  repeated string phrase_hints = 4;
205
206  // Optional. Which Speech model to select for the given request.
207  // For more information, see
208  // [Speech
209  // models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models).
210  string model = 7;
211
212  // Optional. Which variant of the [Speech
213  // model][google.cloud.dialogflow.cx.v3.InputAudioConfig.model] to use.
214  SpeechModelVariant model_variant = 10;
215
216  // Optional. If `false` (default), recognition does not cease until the
217  // client closes the stream.
218  // If `true`, the recognizer will detect a single spoken utterance in input
219  // audio. Recognition ceases when it detects the audio's voice has
220  // stopped or paused. In this case, once a detected intent is received, the
221  // client should close the stream and start a new request with a new stream as
222  // needed.
223  // Note: This setting is relevant only for streaming methods.
224  bool single_utterance = 8;
225
226  // Configuration of barge-in behavior during the streaming of input audio.
227  BargeInConfig barge_in_config = 15;
228
229  // If `true`, the request will opt out for STT conformer model migration.
230  // This field will be deprecated once force migration takes place in June
231  // 2024. Please refer to [Dialogflow CX Speech model
232  // migration](https://cloud.google.com/dialogflow/cx/docs/concept/speech-model-migration).
233  bool opt_out_conformer_model_migration = 26;
234}
235
236// Gender of the voice as described in
237// [SSML voice element](https://www.w3.org/TR/speech-synthesis11/#edef_voice).
238enum SsmlVoiceGender {
239  // An unspecified gender, which means that the client doesn't care which
240  // gender the selected voice will have.
241  SSML_VOICE_GENDER_UNSPECIFIED = 0;
242
243  // A male voice.
244  SSML_VOICE_GENDER_MALE = 1;
245
246  // A female voice.
247  SSML_VOICE_GENDER_FEMALE = 2;
248
249  // A gender-neutral voice.
250  SSML_VOICE_GENDER_NEUTRAL = 3;
251}
252
253// Description of which voice to use for speech synthesis.
254message VoiceSelectionParams {
255  // Optional. The name of the voice. If not set, the service will choose a
256  // voice based on the other parameters such as language_code and
257  // [ssml_gender][google.cloud.dialogflow.cx.v3.VoiceSelectionParams.ssml_gender].
258  //
259  // For the list of available voices, please refer to [Supported voices and
260  // languages](https://cloud.google.com/text-to-speech/docs/voices).
261  string name = 1;
262
263  // Optional. The preferred gender of the voice. If not set, the service will
264  // choose a voice based on the other parameters such as language_code and
265  // [name][google.cloud.dialogflow.cx.v3.VoiceSelectionParams.name]. Note that
266  // this is only a preference, not requirement. If a voice of the appropriate
267  // gender is not available, the synthesizer substitutes a voice with a
268  // different gender rather than failing the request.
269  SsmlVoiceGender ssml_gender = 2;
270}
271
272// Configuration of how speech should be synthesized.
273message SynthesizeSpeechConfig {
274  // Optional. Speaking rate/speed, in the range [0.25, 4.0]. 1.0 is the normal
275  // native speed supported by the specific voice. 2.0 is twice as fast, and
276  // 0.5 is half as fast. If unset(0.0), defaults to the native 1.0 speed. Any
277  // other values < 0.25 or > 4.0 will return an error.
278  double speaking_rate = 1;
279
280  // Optional. Speaking pitch, in the range [-20.0, 20.0]. 20 means increase 20
281  // semitones from the original pitch. -20 means decrease 20 semitones from the
282  // original pitch.
283  double pitch = 2;
284
285  // Optional. Volume gain (in dB) of the normal native volume supported by the
286  // specific voice, in the range [-96.0, 16.0]. If unset, or set to a value of
287  // 0.0 (dB), will play at normal native signal amplitude. A value of -6.0 (dB)
288  // will play at approximately half the amplitude of the normal native signal
289  // amplitude. A value of +6.0 (dB) will play at approximately twice the
290  // amplitude of the normal native signal amplitude. We strongly recommend not
291  // to exceed +10 (dB) as there's usually no effective increase in loudness for
292  // any value greater than that.
293  double volume_gain_db = 3;
294
295  // Optional. An identifier which selects 'audio effects' profiles that are
296  // applied on (post synthesized) text to speech. Effects are applied on top of
297  // each other in the order they are given.
298  repeated string effects_profile_id = 5;
299
300  // Optional. The desired voice of the synthesized audio.
301  VoiceSelectionParams voice = 4;
302}
303
304// Audio encoding of the output audio format in Text-To-Speech.
305enum OutputAudioEncoding {
306  // Not specified.
307  OUTPUT_AUDIO_ENCODING_UNSPECIFIED = 0;
308
309  // Uncompressed 16-bit signed little-endian samples (Linear PCM).
310  // Audio content returned as LINEAR16 also contains a WAV header.
311  OUTPUT_AUDIO_ENCODING_LINEAR_16 = 1;
312
313  // MP3 audio at 32kbps.
314  OUTPUT_AUDIO_ENCODING_MP3 = 2;
315
316  // MP3 audio at 64kbps.
317  OUTPUT_AUDIO_ENCODING_MP3_64_KBPS = 4;
318
319  // Opus encoded audio wrapped in an ogg container. The result will be a
320  // file which can be played natively on Android, and in browsers (at least
321  // Chrome and Firefox). The quality of the encoding is considerably higher
322  // than MP3 while using approximately the same bitrate.
323  OUTPUT_AUDIO_ENCODING_OGG_OPUS = 3;
324
325  // 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
326  OUTPUT_AUDIO_ENCODING_MULAW = 5;
327}
328
329// Instructs the speech synthesizer how to generate the output audio content.
330message OutputAudioConfig {
331  // Required. Audio encoding of the synthesized audio content.
332  OutputAudioEncoding audio_encoding = 1
333      [(google.api.field_behavior) = REQUIRED];
334
335  // Optional. The synthesis sample rate (in hertz) for this audio. If not
336  // provided, then the synthesizer will use the default sample rate based on
337  // the audio encoding. If this is different from the voice's natural sample
338  // rate, then the synthesizer will honor this request by converting to the
339  // desired sample rate (which might result in worse audio quality).
340  int32 sample_rate_hertz = 2;
341
342  // Optional. Configuration of how speech should be synthesized.
343  // If not specified,
344  // [Agent.text_to_speech_settings][google.cloud.dialogflow.cx.v3.Agent.text_to_speech_settings]
345  // is applied.
346  SynthesizeSpeechConfig synthesize_speech_config = 3;
347}
348
349// Settings related to speech synthesizing.
350message TextToSpeechSettings {
351  // Configuration of how speech should be synthesized, mapping from language
352  // (https://cloud.google.com/dialogflow/cx/docs/reference/language) to
353  // SynthesizeSpeechConfig.
354  //
355  // These settings affect:
356  //
357  //  - The [phone
358  //  gateway](https://cloud.google.com/dialogflow/cx/docs/concept/integration/phone-gateway)
359  //    synthesize configuration set via
360  //    [Agent.text_to_speech_settings][google.cloud.dialogflow.cx.v3.Agent.text_to_speech_settings].
361  //
362  //  - How speech is synthesized when invoking
363  //  [session][google.cloud.dialogflow.cx.v3.Sessions] APIs.
364  //    [Agent.text_to_speech_settings][google.cloud.dialogflow.cx.v3.Agent.text_to_speech_settings]
365  //    only applies if
366  //    [OutputAudioConfig.synthesize_speech_config][google.cloud.dialogflow.cx.v3.OutputAudioConfig.synthesize_speech_config]
367  //    is not specified.
368  map<string, SynthesizeSpeechConfig> synthesize_speech_configs = 1;
369}
370