1// Copyright 2023 Google LLC 2// 3// Licensed under the Apache License, Version 2.0 (the "License"); 4// you may not use this file except in compliance with the License. 5// You may obtain a copy of the License at 6// 7// http://www.apache.org/licenses/LICENSE-2.0 8// 9// Unless required by applicable law or agreed to in writing, software 10// distributed under the License is distributed on an "AS IS" BASIS, 11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12// See the License for the specific language governing permissions and 13// limitations under the License. 14 15syntax = "proto3"; 16 17package google.cloud.texttospeech.v1beta1; 18 19import "google/api/annotations.proto"; 20import "google/api/client.proto"; 21import "google/api/field_behavior.proto"; 22import "google/api/resource.proto"; 23 24option cc_enable_arenas = true; 25option csharp_namespace = "Google.Cloud.TextToSpeech.V1Beta1"; 26option go_package = "cloud.google.com/go/texttospeech/apiv1beta1/texttospeechpb;texttospeechpb"; 27option java_multiple_files = true; 28option java_outer_classname = "TextToSpeechProto"; 29option java_package = "com.google.cloud.texttospeech.v1beta1"; 30option php_namespace = "Google\\Cloud\\TextToSpeech\\V1beta1"; 31option ruby_package = "Google::Cloud::TextToSpeech::V1beta1"; 32option (google.api.resource_definition) = { 33 type: "automl.googleapis.com/Model" 34 pattern: "projects/{project}/locations/{location}/models/{model}" 35}; 36 37// Service that implements Google Cloud Text-to-Speech API. 38service TextToSpeech { 39 option (google.api.default_host) = "texttospeech.googleapis.com"; 40 option (google.api.oauth_scopes) = 41 "https://www.googleapis.com/auth/cloud-platform"; 42 43 // Returns a list of Voice supported for synthesis. 44 rpc ListVoices(ListVoicesRequest) returns (ListVoicesResponse) { 45 option (google.api.http) = { 46 get: "/v1beta1/voices" 47 }; 48 option (google.api.method_signature) = "language_code"; 49 } 50 51 // Synthesizes speech synchronously: receive results after all text input 52 // has been processed. 53 rpc SynthesizeSpeech(SynthesizeSpeechRequest) 54 returns (SynthesizeSpeechResponse) { 55 option (google.api.http) = { 56 post: "/v1beta1/text:synthesize" 57 body: "*" 58 }; 59 option (google.api.method_signature) = "input,voice,audio_config"; 60 } 61} 62 63// Gender of the voice as described in 64// [SSML voice element](https://www.w3.org/TR/speech-synthesis11/#edef_voice). 65enum SsmlVoiceGender { 66 // An unspecified gender. 67 // In VoiceSelectionParams, this means that the client doesn't care which 68 // gender the selected voice will have. In the Voice field of 69 // ListVoicesResponse, this may mean that the voice doesn't fit any of the 70 // other categories in this enum, or that the gender of the voice isn't known. 71 SSML_VOICE_GENDER_UNSPECIFIED = 0; 72 73 // A male voice. 74 MALE = 1; 75 76 // A female voice. 77 FEMALE = 2; 78 79 // A gender-neutral voice. This voice is not yet supported. 80 NEUTRAL = 3; 81} 82 83// Configuration to set up audio encoder. The encoding determines the output 84// audio format that we'd like. 85enum AudioEncoding { 86 // Not specified. Will return result 87 // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. 88 AUDIO_ENCODING_UNSPECIFIED = 0; 89 90 // Uncompressed 16-bit signed little-endian samples (Linear PCM). 91 // Audio content returned as LINEAR16 also contains a WAV header. 92 LINEAR16 = 1; 93 94 // MP3 audio at 32kbps. 95 MP3 = 2; 96 97 // MP3 at 64kbps. 98 MP3_64_KBPS = 4; 99 100 // Opus encoded audio wrapped in an ogg container. The result will be a 101 // file which can be played natively on Android, and in browsers (at least 102 // Chrome and Firefox). The quality of the encoding is considerably higher 103 // than MP3 while using approximately the same bitrate. 104 OGG_OPUS = 3; 105 106 // 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law. 107 // Audio content returned as MULAW also contains a WAV header. 108 MULAW = 5; 109 110 // 8-bit samples that compand 14-bit audio samples using G.711 PCMU/A-law. 111 // Audio content returned as ALAW also contains a WAV header. 112 ALAW = 6; 113} 114 115// The top-level message sent by the client for the `ListVoices` method. 116message ListVoicesRequest { 117 // Optional. Recommended. 118 // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. 119 // If not specified, the API will return all supported voices. 120 // If specified, the ListVoices call will only return voices that can be used 121 // to synthesize this language_code. For example, if you specify `"en-NZ"`, 122 // all `"en-NZ"` voices will be returned. If you specify `"no"`, both 123 // `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices will be 124 // returned. 125 string language_code = 1 [(google.api.field_behavior) = OPTIONAL]; 126} 127 128// The message returned to the client by the `ListVoices` method. 129message ListVoicesResponse { 130 // The list of voices. 131 repeated Voice voices = 1; 132} 133 134// Description of a voice supported by the TTS service. 135message Voice { 136 // The languages that this voice supports, expressed as 137 // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g. 138 // "en-US", "es-419", "cmn-tw"). 139 repeated string language_codes = 1; 140 141 // The name of this voice. Each distinct voice has a unique name. 142 string name = 2; 143 144 // The gender of this voice. 145 SsmlVoiceGender ssml_gender = 3; 146 147 // The natural sample rate (in hertz) for this voice. 148 int32 natural_sample_rate_hertz = 4; 149} 150 151// The top-level message sent by the client for the `SynthesizeSpeech` method. 152message SynthesizeSpeechRequest { 153 // The type of timepoint information that is returned in the response. 154 enum TimepointType { 155 // Not specified. No timepoint information will be returned. 156 TIMEPOINT_TYPE_UNSPECIFIED = 0; 157 158 // Timepoint information of `<mark>` tags in SSML input will be returned. 159 SSML_MARK = 1; 160 } 161 162 // Required. The Synthesizer requires either plain text or SSML as input. 163 SynthesisInput input = 1 [(google.api.field_behavior) = REQUIRED]; 164 165 // Required. The desired voice of the synthesized audio. 166 VoiceSelectionParams voice = 2 [(google.api.field_behavior) = REQUIRED]; 167 168 // Required. The configuration of the synthesized audio. 169 AudioConfig audio_config = 3 [(google.api.field_behavior) = REQUIRED]; 170 171 // Whether and what timepoints are returned in the response. 172 repeated TimepointType enable_time_pointing = 4; 173} 174 175// Contains text input to be synthesized. Either `text` or `ssml` must be 176// supplied. Supplying both or neither returns 177// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. The 178// input size is limited to 5000 bytes. 179message SynthesisInput { 180 // The input source, which is either plain text or SSML. 181 oneof input_source { 182 // The raw text to be synthesized. 183 string text = 1; 184 185 // The SSML document to be synthesized. The SSML document must be valid 186 // and well-formed. Otherwise the RPC will fail and return 187 // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. For 188 // more information, see 189 // [SSML](https://cloud.google.com/text-to-speech/docs/ssml). 190 string ssml = 2; 191 } 192} 193 194// Description of which voice to use for a synthesis request. 195message VoiceSelectionParams { 196 // Required. The language (and potentially also the region) of the voice 197 // expressed as a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) 198 // language tag, e.g. "en-US". This should not include a script tag (e.g. use 199 // "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred 200 // from the input provided in the SynthesisInput. The TTS service 201 // will use this parameter to help choose an appropriate voice. Note that 202 // the TTS service may choose a voice with a slightly different language code 203 // than the one selected; it may substitute a different region 204 // (e.g. using en-US rather than en-CA if there isn't a Canadian voice 205 // available), or even a different language, e.g. using "nb" (Norwegian 206 // Bokmal) instead of "no" (Norwegian)". 207 string language_code = 1 [(google.api.field_behavior) = REQUIRED]; 208 209 // The name of the voice. If not set, the service will choose a 210 // voice based on the other parameters such as language_code and gender. 211 string name = 2; 212 213 // The preferred gender of the voice. If not set, the service will 214 // choose a voice based on the other parameters such as language_code and 215 // name. Note that this is only a preference, not requirement; if a 216 // voice of the appropriate gender is not available, the synthesizer should 217 // substitute a voice with a different gender rather than failing the request. 218 SsmlVoiceGender ssml_gender = 3; 219 220 // The configuration for a custom voice. If [CustomVoiceParams.model] is set, 221 // the service will choose the custom voice matching the specified 222 // configuration. 223 CustomVoiceParams custom_voice = 4; 224} 225 226// Description of audio data to be synthesized. 227message AudioConfig { 228 // Required. The format of the audio byte stream. 229 AudioEncoding audio_encoding = 1 [(google.api.field_behavior) = REQUIRED]; 230 231 // Optional. Input only. Speaking rate/speed, in the range [0.25, 4.0]. 1.0 is 232 // the normal native speed supported by the specific voice. 2.0 is twice as 233 // fast, and 0.5 is half as fast. If unset(0.0), defaults to the native 1.0 234 // speed. Any other values < 0.25 or > 4.0 will return an error. 235 double speaking_rate = 2 [ 236 (google.api.field_behavior) = INPUT_ONLY, 237 (google.api.field_behavior) = OPTIONAL 238 ]; 239 240 // Optional. Input only. Speaking pitch, in the range [-20.0, 20.0]. 20 means 241 // increase 20 semitones from the original pitch. -20 means decrease 20 242 // semitones from the original pitch. 243 double pitch = 3 [ 244 (google.api.field_behavior) = INPUT_ONLY, 245 (google.api.field_behavior) = OPTIONAL 246 ]; 247 248 // Optional. Input only. Volume gain (in dB) of the normal native volume 249 // supported by the specific voice, in the range [-96.0, 16.0]. If unset, or 250 // set to a value of 0.0 (dB), will play at normal native signal amplitude. A 251 // value of -6.0 (dB) will play at approximately half the amplitude of the 252 // normal native signal amplitude. A value of +6.0 (dB) will play at 253 // approximately twice the amplitude of the normal native signal amplitude. 254 // Strongly recommend not to exceed +10 (dB) as there's usually no effective 255 // increase in loudness for any value greater than that. 256 double volume_gain_db = 4 [ 257 (google.api.field_behavior) = INPUT_ONLY, 258 (google.api.field_behavior) = OPTIONAL 259 ]; 260 261 // Optional. The synthesis sample rate (in hertz) for this audio. When this is 262 // specified in SynthesizeSpeechRequest, if this is different from the voice's 263 // natural sample rate, then the synthesizer will honor this request by 264 // converting to the desired sample rate (which might result in worse audio 265 // quality), unless the specified sample rate is not supported for the 266 // encoding chosen, in which case it will fail the request and return 267 // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. 268 int32 sample_rate_hertz = 5 [(google.api.field_behavior) = OPTIONAL]; 269 270 // Optional. Input only. An identifier which selects 'audio effects' profiles 271 // that are applied on (post synthesized) text to speech. Effects are applied 272 // on top of each other in the order they are given. See 273 // [audio 274 // profiles](https://cloud.google.com/text-to-speech/docs/audio-profiles) for 275 // current supported profile ids. 276 repeated string effects_profile_id = 6 [ 277 (google.api.field_behavior) = INPUT_ONLY, 278 (google.api.field_behavior) = OPTIONAL 279 ]; 280} 281 282// Description of the custom voice to be synthesized. 283message CustomVoiceParams { 284 // Deprecated. The usage of the synthesized audio. Usage does not affect 285 // billing. 286 enum ReportedUsage { 287 // Request with reported usage unspecified will be rejected. 288 REPORTED_USAGE_UNSPECIFIED = 0; 289 290 // For scenarios where the synthesized audio is not downloadable and can 291 // only be used once. For example, real-time request in IVR system. 292 REALTIME = 1; 293 294 // For scenarios where the synthesized audio is downloadable and can be 295 // reused. For example, the synthesized audio is downloaded, stored in 296 // customer service system and played repeatedly. 297 OFFLINE = 2; 298 } 299 300 // Required. The name of the AutoML model that synthesizes the custom voice. 301 string model = 1 [ 302 (google.api.field_behavior) = REQUIRED, 303 (google.api.resource_reference) = { type: "automl.googleapis.com/Model" } 304 ]; 305 306 // Optional. Deprecated. The usage of the synthesized audio to be reported. 307 ReportedUsage reported_usage = 3 308 [deprecated = true, (google.api.field_behavior) = OPTIONAL]; 309} 310 311// The message returned to the client by the `SynthesizeSpeech` method. 312message SynthesizeSpeechResponse { 313 // The audio data bytes encoded as specified in the request, including the 314 // header for encodings that are wrapped in containers (e.g. MP3, OGG_OPUS). 315 // For LINEAR16 audio, we include the WAV header. Note: as 316 // with all bytes fields, protobuffers use a pure binary representation, 317 // whereas JSON representations use base64. 318 bytes audio_content = 1; 319 320 // A link between a position in the original request input and a corresponding 321 // time in the output audio. It's only supported via `<mark>` of SSML input. 322 repeated Timepoint timepoints = 2; 323 324 // The audio metadata of `audio_content`. 325 AudioConfig audio_config = 4; 326} 327 328// This contains a mapping between a certain point in the input text and a 329// corresponding time in the output audio. 330message Timepoint { 331 // Timepoint name as received from the client within `<mark>` tag. 332 string mark_name = 4; 333 334 // Time offset in seconds from the start of the synthesized audio. 335 double time_seconds = 3; 336} 337