xref: /aosp_15_r20/external/googleapis/google/cloud/dialogflow/cx/v3/session.proto (revision d5c09012810ac0c9f33fe448fb6da8260d444cc9)
1// Copyright 2023 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15syntax = "proto3";
16
17package google.cloud.dialogflow.cx.v3;
18
19import "google/api/annotations.proto";
20import "google/api/client.proto";
21import "google/api/field_behavior.proto";
22import "google/api/resource.proto";
23import "google/cloud/dialogflow/cx/v3/advanced_settings.proto";
24import "google/cloud/dialogflow/cx/v3/audio_config.proto";
25import "google/cloud/dialogflow/cx/v3/flow.proto";
26import "google/cloud/dialogflow/cx/v3/intent.proto";
27import "google/cloud/dialogflow/cx/v3/page.proto";
28import "google/cloud/dialogflow/cx/v3/response_message.proto";
29import "google/cloud/dialogflow/cx/v3/session_entity_type.proto";
30import "google/protobuf/duration.proto";
31import "google/protobuf/field_mask.proto";
32import "google/protobuf/struct.proto";
33import "google/rpc/status.proto";
34import "google/type/latlng.proto";
35
36option cc_enable_arenas = true;
37option csharp_namespace = "Google.Cloud.Dialogflow.Cx.V3";
38option go_package = "cloud.google.com/go/dialogflow/cx/apiv3/cxpb;cxpb";
39option java_multiple_files = true;
40option java_outer_classname = "SessionProto";
41option java_package = "com.google.cloud.dialogflow.cx.v3";
42option objc_class_prefix = "DF";
43option ruby_package = "Google::Cloud::Dialogflow::CX::V3";
44option (google.api.resource_definition) = {
45  type: "dialogflow.googleapis.com/Session"
46  pattern: "projects/{project}/locations/{location}/agents/{agent}/sessions/{session}"
47  pattern: "projects/{project}/locations/{location}/agents/{agent}/environments/{environment}/sessions/{session}"
48};
49option (google.api.resource_definition) = {
50  type: "discoveryengine.googleapis.com/DataStore"
51  pattern: "projects/{project}/locations/{location}/dataStores/{data_store}"
52  pattern: "projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}"
53};
54
55// A session represents an interaction with a user. You retrieve user input
56// and pass it to the
57// [DetectIntent][google.cloud.dialogflow.cx.v3.Sessions.DetectIntent] method to
58// determine user intent and respond.
59service Sessions {
60  option (google.api.default_host) = "dialogflow.googleapis.com";
61  option (google.api.oauth_scopes) =
62      "https://www.googleapis.com/auth/cloud-platform,"
63      "https://www.googleapis.com/auth/dialogflow";
64
65  // Processes a natural language query and returns structured, actionable data
66  // as a result. This method is not idempotent, because it may cause session
67  // entity types to be updated, which in turn might affect results of future
68  // queries.
69  //
70  // Note: Always use agent versions for production traffic.
71  // See [Versions and
72  // environments](https://cloud.google.com/dialogflow/cx/docs/concept/version).
73  rpc DetectIntent(DetectIntentRequest) returns (DetectIntentResponse) {
74    option (google.api.http) = {
75      post: "/v3/{session=projects/*/locations/*/agents/*/sessions/*}:detectIntent"
76      body: "*"
77      additional_bindings {
78        post: "/v3/{session=projects/*/locations/*/agents/*/environments/*/sessions/*}:detectIntent"
79        body: "*"
80      }
81    };
82  }
83
84  // Processes a natural language query and returns structured, actionable data
85  // as a result through server-side streaming. Server-side streaming allows
86  // Dialogflow to send [partial
87  // responses](https://cloud.google.com/dialogflow/cx/docs/concept/fulfillment#partial-response)
88  // earlier in a single request.
89  rpc ServerStreamingDetectIntent(DetectIntentRequest)
90      returns (stream DetectIntentResponse) {
91    option (google.api.http) = {
92      post: "/v3/{session=projects/*/locations/*/agents/*/sessions/*}:serverStreamingDetectIntent"
93      body: "*"
94      additional_bindings {
95        post: "/v3/{session=projects/*/locations/*/agents/*/environments/*/sessions/*}:serverStreamingDetectIntent"
96        body: "*"
97      }
98    };
99  }
100
101  // Processes a natural language query in audio format in a streaming fashion
102  // and returns structured, actionable data as a result. This method is only
103  // available via the gRPC API (not REST).
104  //
105  // Note: Always use agent versions for production traffic.
106  // See [Versions and
107  // environments](https://cloud.google.com/dialogflow/cx/docs/concept/version).
108  rpc StreamingDetectIntent(stream StreamingDetectIntentRequest)
109      returns (stream StreamingDetectIntentResponse) {}
110
111  // Returns preliminary intent match results, doesn't change the session
112  // status.
113  rpc MatchIntent(MatchIntentRequest) returns (MatchIntentResponse) {
114    option (google.api.http) = {
115      post: "/v3/{session=projects/*/locations/*/agents/*/sessions/*}:matchIntent"
116      body: "*"
117      additional_bindings {
118        post: "/v3/{session=projects/*/locations/*/agents/*/environments/*/sessions/*}:matchIntent"
119        body: "*"
120      }
121    };
122  }
123
124  // Fulfills a matched intent returned by
125  // [MatchIntent][google.cloud.dialogflow.cx.v3.Sessions.MatchIntent]. Must be
126  // called after
127  // [MatchIntent][google.cloud.dialogflow.cx.v3.Sessions.MatchIntent], with
128  // input from
129  // [MatchIntentResponse][google.cloud.dialogflow.cx.v3.MatchIntentResponse].
130  // Otherwise, the behavior is undefined.
131  rpc FulfillIntent(FulfillIntentRequest) returns (FulfillIntentResponse) {
132    option (google.api.http) = {
133      post: "/v3/{match_intent_request.session=projects/*/locations/*/agents/*/sessions/*}:fulfillIntent"
134      body: "*"
135      additional_bindings {
136        post: "/v3/{match_intent_request.session=projects/*/locations/*/agents/*/environments/*/sessions/*}:fulfillIntent"
137        body: "*"
138      }
139    };
140  }
141
142  // Updates the feedback received from the user for a single turn of the bot
143  // response.
144  rpc SubmitAnswerFeedback(SubmitAnswerFeedbackRequest)
145      returns (AnswerFeedback) {
146    option (google.api.http) = {
147      post: "/v3/{session=projects/*/locations/*/agents/*/sessions/*}:submitAnswerFeedback"
148      body: "*"
149    };
150  }
151}
152
153// Stores information about feedback provided by users about a response.
154message AnswerFeedback {
155  // Represents thumbs up/down rating provided by user about a response.
156  enum Rating {
157    // Rating not specified.
158    RATING_UNSPECIFIED = 0;
159
160    // Thumbs up feedback from user.
161    THUMBS_UP = 1;
162
163    // Thumbs down feedback from user.
164    THUMBS_DOWN = 2;
165  }
166
167  // Stores extra information about why users provided thumbs down rating.
168  message RatingReason {
169    // Optional. Custom reason labels for thumbs down rating provided by the
170    // user. The maximum number of labels allowed is 10 and the maximum length
171    // of a single label is 128 characters.
172    repeated string reason_labels = 3 [(google.api.field_behavior) = OPTIONAL];
173
174    // Optional. Additional feedback about the rating.
175    // This field can be populated without choosing a predefined `reason`.
176    string feedback = 2 [(google.api.field_behavior) = OPTIONAL];
177  }
178
179  // Optional. Rating from user for the specific Dialogflow response.
180  Rating rating = 1 [(google.api.field_behavior) = OPTIONAL];
181
182  // Optional. In case of thumbs down rating provided, users can optionally
183  // provide context about the rating.
184  RatingReason rating_reason = 2 [(google.api.field_behavior) = OPTIONAL];
185
186  // Optional. Custom rating from the user about the provided answer, with
187  // maximum length of 1024 characters. For example, client could use a
188  // customized JSON object to indicate the rating.
189  string custom_rating = 3 [(google.api.field_behavior) = OPTIONAL];
190}
191
192// The request to set the feedback for a bot answer.
193message SubmitAnswerFeedbackRequest {
194  // Required. The name of the session the feedback was sent to.
195  string session = 1 [
196    (google.api.field_behavior) = REQUIRED,
197    (google.api.resource_reference) = {
198      type: "dialogflow.googleapis.com/Session"
199    }
200  ];
201
202  // Required. ID of the response to update its feedback. This is the same as
203  // DetectIntentResponse.response_id.
204  string response_id = 2 [(google.api.field_behavior) = REQUIRED];
205
206  // Required. Feedback provided for a bot answer.
207  AnswerFeedback answer_feedback = 3 [(google.api.field_behavior) = REQUIRED];
208
209  // Optional. The mask to control which fields to update. If the mask is not
210  // present, all fields will be updated.
211  google.protobuf.FieldMask update_mask = 4
212      [(google.api.field_behavior) = OPTIONAL];
213}
214
215// The request to detect user's intent.
216message DetectIntentRequest {
217  // Required. The name of the session this query is sent to.
218  // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
219  // ID>/sessions/<Session ID>` or `projects/<Project ID>/locations/<Location
220  // ID>/agents/<Agent ID>/environments/<Environment ID>/sessions/<Session ID>`.
221  // If `Environment ID` is not specified, we assume default 'draft'
222  // environment.
223  // It's up to the API caller to choose an appropriate `Session ID`. It can be
224  // a random number or some type of session identifiers (preferably hashed).
225  // The length of the `Session ID` must not exceed 36 characters.
226  //
227  // For more information, see the [sessions
228  // guide](https://cloud.google.com/dialogflow/cx/docs/concept/session).
229  //
230  // Note: Always use agent versions for production traffic.
231  // See [Versions and
232  // environments](https://cloud.google.com/dialogflow/cx/docs/concept/version).
233  string session = 1 [
234    (google.api.field_behavior) = REQUIRED,
235    (google.api.resource_reference) = {
236      type: "dialogflow.googleapis.com/Session"
237    }
238  ];
239
240  // The parameters of this query.
241  QueryParameters query_params = 2;
242
243  // Required. The input specification.
244  QueryInput query_input = 3 [(google.api.field_behavior) = REQUIRED];
245
246  // Instructs the speech synthesizer how to generate the output audio.
247  OutputAudioConfig output_audio_config = 4;
248}
249
250// The message returned from the DetectIntent method.
251message DetectIntentResponse {
252  // Represents different DetectIntentResponse types.
253  enum ResponseType {
254    // Not specified. This should never happen.
255    RESPONSE_TYPE_UNSPECIFIED = 0;
256
257    // Partial response. e.g. Aggregated responses in a Fulfillment that enables
258    // `return_partial_response` can be returned as partial response.
259    // WARNING: partial response is not eligible for barge-in.
260    PARTIAL = 1;
261
262    // Final response.
263    FINAL = 2;
264  }
265
266  // Output only. The unique identifier of the response. It can be used to
267  // locate a response in the training example set or for reporting issues.
268  string response_id = 1;
269
270  // The result of the conversational query.
271  QueryResult query_result = 2;
272
273  // The audio data bytes encoded as specified in the request.
274  // Note: The output audio is generated based on the values of default platform
275  // text responses found in the
276  // [`query_result.response_messages`][google.cloud.dialogflow.cx.v3.QueryResult.response_messages]
277  // field. If multiple default text responses exist, they will be concatenated
278  // when generating audio. If no default platform text responses exist, the
279  // generated audio content will be empty.
280  //
281  // In some scenarios, multiple output audio fields may be present in the
282  // response structure. In these cases, only the top-most-level audio output
283  // has content.
284  bytes output_audio = 4;
285
286  // The config used by the speech synthesizer to generate the output audio.
287  OutputAudioConfig output_audio_config = 5;
288
289  // Response type.
290  ResponseType response_type = 6;
291
292  // Indicates whether the partial response can be cancelled when a later
293  // response arrives. e.g. if the agent specified some music as partial
294  // response, it can be cancelled.
295  bool allow_cancellation = 7;
296}
297
298// The top-level message sent by the client to the
299// [Sessions.StreamingDetectIntent][google.cloud.dialogflow.cx.v3.Sessions.StreamingDetectIntent]
300// method.
301//
302// Multiple request messages should be sent in order:
303//
304// 1.  The first message must contain
305//     [session][google.cloud.dialogflow.cx.v3.StreamingDetectIntentRequest.session],
306//     [query_input][google.cloud.dialogflow.cx.v3.StreamingDetectIntentRequest.query_input]
307//     plus optionally
308//     [query_params][google.cloud.dialogflow.cx.v3.StreamingDetectIntentRequest.query_params].
309//     If the client wants to receive an audio response, it should also contain
310//     [output_audio_config][google.cloud.dialogflow.cx.v3.StreamingDetectIntentRequest.output_audio_config].
311//
312// 2.  If
313// [query_input][google.cloud.dialogflow.cx.v3.StreamingDetectIntentRequest.query_input]
314// was set to
315//     [query_input.audio.config][google.cloud.dialogflow.cx.v3.AudioInput.config],
316//     all subsequent messages must contain
317//     [query_input.audio.audio][google.cloud.dialogflow.cx.v3.AudioInput.audio]
318//     to continue with Speech recognition. If you decide to rather detect an
319//     intent from text input after you already started Speech recognition,
320//     please send a message with
321//     [query_input.text][google.cloud.dialogflow.cx.v3.QueryInput.text].
322//
323//     However, note that:
324//
325//     * Dialogflow will bill you for the audio duration so far.
326//     * Dialogflow discards all Speech recognition results in favor of the
327//       input text.
328//     * Dialogflow will use the language code from the first message.
329//
330// After you sent all input, you must half-close or abort the request stream.
331message StreamingDetectIntentRequest {
332  // The name of the session this query is sent to.
333  // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
334  // ID>/sessions/<Session ID>` or `projects/<Project ID>/locations/<Location
335  // ID>/agents/<Agent ID>/environments/<Environment ID>/sessions/<Session ID>`.
336  // If `Environment ID` is not specified, we assume default 'draft'
337  // environment.
338  // It's up to the API caller to choose an appropriate `Session ID`. It can be
339  // a random number or some type of session identifiers (preferably hashed).
340  // The length of the `Session ID` must not exceed 36 characters.
341  // Note: session must be set in the first request.
342  //
343  // For more information, see the [sessions
344  // guide](https://cloud.google.com/dialogflow/cx/docs/concept/session).
345  //
346  // Note: Always use agent versions for production traffic.
347  // See [Versions and
348  // environments](https://cloud.google.com/dialogflow/cx/docs/concept/version).
349  string session = 1 [(google.api.resource_reference) = {
350    type: "dialogflow.googleapis.com/Session"
351  }];
352
353  // The parameters of this query.
354  QueryParameters query_params = 2;
355
356  // Required. The input specification.
357  QueryInput query_input = 3 [(google.api.field_behavior) = REQUIRED];
358
359  // Instructs the speech synthesizer how to generate the output audio.
360  OutputAudioConfig output_audio_config = 4;
361
362  // Enable partial detect intent response. If this flag is not enabled,
363  // response stream still contains only one final `DetectIntentResponse` even
364  // if some `Fulfillment`s in the agent have been configured to return partial
365  // responses.
366  bool enable_partial_response = 5;
367
368  // If true, `StreamingDetectIntentResponse.debugging_info` will get populated.
369  bool enable_debugging_info = 8;
370}
371
372// Cloud conversation info for easier debugging.
373// It will get populated in `StreamingDetectIntentResponse` or
374// `StreamingAnalyzeContentResponse` when the flag `enable_debugging_info` is
375// set to true in corresponding requests.
376message CloudConversationDebuggingInfo {
377  // Number of input audio data chunks in streaming requests.
378  int32 audio_data_chunks = 1;
379
380  // Time offset of the end of speech utterance relative to the
381  // beginning of the first audio chunk.
382  google.protobuf.Duration result_end_time_offset = 2;
383
384  // Duration of first audio chunk.
385  google.protobuf.Duration first_audio_duration = 3;
386
387  // Whether client used single utterance mode.
388  bool single_utterance = 5;
389
390  // Time offsets of the speech partial results relative to the beginning of
391  // the stream.
392  repeated google.protobuf.Duration speech_partial_results_end_times = 6;
393
394  // Time offsets of the speech final results (is_final=true) relative to the
395  // beginning of the stream.
396  repeated google.protobuf.Duration speech_final_results_end_times = 7;
397
398  // Total number of partial responses.
399  int32 partial_responses = 8;
400
401  // Time offset of Speaker ID stream close time relative to the Speech stream
402  // close time in milliseconds. Only meaningful for conversations involving
403  // passive verification.
404  int32 speaker_id_passive_latency_ms_offset = 9;
405
406  // Whether a barge-in event is triggered in this request.
407  bool bargein_event_triggered = 10;
408
409  // Whether speech uses single utterance mode.
410  bool speech_single_utterance = 11;
411
412  // Time offsets of the DTMF partial results relative to the beginning of
413  // the stream.
414  repeated google.protobuf.Duration dtmf_partial_results_times = 12;
415
416  // Time offsets of the DTMF final results relative to the beginning of
417  // the stream.
418  repeated google.protobuf.Duration dtmf_final_results_times = 13;
419
420  // Time offset of the end-of-single-utterance signal relative to the
421  // beginning of the stream.
422  google.protobuf.Duration single_utterance_end_time_offset = 14;
423
424  // No speech timeout settings for the stream.
425  google.protobuf.Duration no_speech_timeout = 15;
426
427  // Speech endpointing timeout settings for the stream.
428  google.protobuf.Duration endpointing_timeout = 19;
429
430  // Whether the streaming terminates with an injected text query.
431  bool is_input_text = 16;
432
433  // Client half close time in terms of input audio duration.
434  google.protobuf.Duration client_half_close_time_offset = 17;
435
436  // Client half close time in terms of API streaming duration.
437  google.protobuf.Duration client_half_close_streaming_time_offset = 18;
438}
439
440// The top-level message returned from the
441// [StreamingDetectIntent][google.cloud.dialogflow.cx.v3.Sessions.StreamingDetectIntent]
442// method.
443//
444// Multiple response messages (N) can be returned in order.
445//
446// The first (N-1) responses set either the `recognition_result` or
447// `detect_intent_response` field, depending on the request:
448//
449// *   If the `StreamingDetectIntentRequest.query_input.audio` field was
450//     set, and the `StreamingDetectIntentRequest.enable_partial_response`
451//     field was false, the `recognition_result` field is populated for each
452//     of the (N-1) responses.
453//     See the
454//     [StreamingRecognitionResult][google.cloud.dialogflow.cx.v3.StreamingRecognitionResult]
455//     message for details about the result message sequence.
456//
457// *   If the `StreamingDetectIntentRequest.enable_partial_response` field was
458//     true, the `detect_intent_response` field is populated for each
459//     of the (N-1) responses, where 1 <= N <= 4.
460//     These responses set the
461//     [DetectIntentResponse.response_type][google.cloud.dialogflow.cx.v3.DetectIntentResponse.response_type]
462//     field to `PARTIAL`.
463//
464// For the final Nth response message, the `detect_intent_response` is fully
465// populated, and
466// [DetectIntentResponse.response_type][google.cloud.dialogflow.cx.v3.DetectIntentResponse.response_type]
467// is set to `FINAL`.
468message StreamingDetectIntentResponse {
469  // The output response.
470  oneof response {
471    // The result of speech recognition.
472    StreamingRecognitionResult recognition_result = 1;
473
474    // The response from detect intent.
475    DetectIntentResponse detect_intent_response = 2;
476  }
477
478  // Debugging info that would get populated when
479  // `StreamingDetectIntentRequest.enable_debugging_info` is set to true.
480  CloudConversationDebuggingInfo debugging_info = 4;
481}
482
483// Contains a speech recognition result corresponding to a portion of the audio
484// that is currently being processed or an indication that this is the end
485// of the single requested utterance.
486//
487// While end-user audio is being processed, Dialogflow sends a series of
488// results. Each result may contain a `transcript` value. A transcript
489// represents a portion of the utterance. While the recognizer is processing
490// audio, transcript values may be interim values or finalized values.
491// Once a transcript is finalized, the `is_final` value is set to true and
492// processing continues for the next transcript.
493//
494// If `StreamingDetectIntentRequest.query_input.audio.config.single_utterance`
495// was true, and the recognizer has completed processing audio,
496// the `message_type` value is set to `END_OF_SINGLE_UTTERANCE and the
497// following (last) result contains the last finalized transcript.
498//
499// The complete end-user utterance is determined by concatenating the
500// finalized transcript values received for the series of results.
501//
502// In the following example, single utterance is enabled. In the case where
503// single utterance is not enabled, result 7 would not occur.
504//
505// ```
506// Num | transcript              | message_type            | is_final
507// --- | ----------------------- | ----------------------- | --------
508// 1   | "tube"                  | TRANSCRIPT              | false
509// 2   | "to be a"               | TRANSCRIPT              | false
510// 3   | "to be"                 | TRANSCRIPT              | false
511// 4   | "to be or not to be"    | TRANSCRIPT              | true
512// 5   | "that's"                | TRANSCRIPT              | false
513// 6   | "that is                | TRANSCRIPT              | false
514// 7   | unset                   | END_OF_SINGLE_UTTERANCE | unset
515// 8   | " that is the question" | TRANSCRIPT              | true
516// ```
517//
518// Concatenating the finalized transcripts with `is_final` set to true,
519// the complete utterance becomes "to be or not to be that is the question".
520message StreamingRecognitionResult {
521  // Type of the response message.
522  enum MessageType {
523    // Not specified. Should never be used.
524    MESSAGE_TYPE_UNSPECIFIED = 0;
525
526    // Message contains a (possibly partial) transcript.
527    TRANSCRIPT = 1;
528
529    // This event indicates that the server has detected the end of the user's
530    // speech utterance and expects no additional speech. Therefore, the server
531    // will not process additional audio (although it may subsequently return
532    // additional results). The client should stop sending additional audio
533    // data, half-close the gRPC connection, and wait for any additional results
534    // until the server closes the gRPC connection. This message is only sent if
535    // [`single_utterance`][google.cloud.dialogflow.cx.v3.InputAudioConfig.single_utterance]
536    // was set to `true`, and is not used otherwise.
537    END_OF_SINGLE_UTTERANCE = 2;
538  }
539
540  // Type of the result message.
541  MessageType message_type = 1;
542
543  // Transcript text representing the words that the user spoke.
544  // Populated if and only if `message_type` = `TRANSCRIPT`.
545  string transcript = 2;
546
547  // If `false`, the `StreamingRecognitionResult` represents an
548  // interim result that may change. If `true`, the recognizer will not return
549  // any further hypotheses about this piece of the audio. May only be populated
550  // for `message_type` = `TRANSCRIPT`.
551  bool is_final = 3;
552
553  // The Speech confidence between 0.0 and 1.0 for the current portion of audio.
554  // A higher number indicates an estimated greater likelihood that the
555  // recognized words are correct. The default of 0.0 is a sentinel value
556  // indicating that confidence was not set.
557  //
558  // This field is typically only provided if `is_final` is true and you should
559  // not rely on it being accurate or even set.
560  float confidence = 4;
561
562  // An estimate of the likelihood that the speech recognizer will
563  // not change its guess about this interim recognition result:
564  // * If the value is unspecified or 0.0, Dialogflow didn't compute the
565  //   stability. In particular, Dialogflow will only provide stability for
566  //   `TRANSCRIPT` results with `is_final = false`.
567  // * Otherwise, the value is in (0.0, 1.0] where 0.0 means completely
568  //   unstable and 1.0 means completely stable.
569  float stability = 6;
570
571  // Word-specific information for the words recognized by Speech in
572  // [transcript][google.cloud.dialogflow.cx.v3.StreamingRecognitionResult.transcript].
573  // Populated if and only if `message_type` = `TRANSCRIPT` and
574  // [InputAudioConfig.enable_word_info] is set.
575  repeated SpeechWordInfo speech_word_info = 7;
576
577  // Time offset of the end of this Speech recognition result relative to the
578  // beginning of the audio. Only populated for `message_type` =
579  // `TRANSCRIPT`.
580  google.protobuf.Duration speech_end_offset = 8;
581
582  // Detected language code for the transcript.
583  string language_code = 10;
584}
585
586// Represents the parameters of a conversational query.
587message QueryParameters {
588  // The time zone of this conversational query from the [time zone
589  // database](https://www.iana.org/time-zones), e.g., America/New_York,
590  // Europe/Paris. If not provided, the time zone specified in the agent is
591  // used.
592  string time_zone = 1;
593
594  // The geo location of this conversational query.
595  google.type.LatLng geo_location = 2;
596
597  // Additional session entity types to replace or extend developer entity types
598  // with. The entity synonyms apply to all languages and persist for the
599  // session of this query.
600  repeated SessionEntityType session_entity_types = 3;
601
602  // This field can be used to pass custom data into the webhook associated with
603  // the agent. Arbitrary JSON objects are supported.
604  // Some integrations that query a Dialogflow agent may provide additional
605  // information in the payload.
606  // In particular, for the Dialogflow Phone Gateway integration, this field has
607  // the form:
608  // ```
609  // {
610  //  "telephony": {
611  //    "caller_id": "+18558363987"
612  //  }
613  // }
614  // ```
615  google.protobuf.Struct payload = 4;
616
617  // Additional parameters to be put into [session
618  // parameters][SessionInfo.parameters]. To remove a
619  // parameter from the session, clients should explicitly set the parameter
620  // value to null.
621  //
622  // You can reference the session parameters in the agent with the following
623  // format: $session.params.parameter-id.
624  //
625  // Depending on your protocol or client library language, this is a
626  // map, associative array, symbol table, dictionary, or JSON object
627  // composed of a collection of (MapKey, MapValue) pairs:
628  //
629  // * MapKey type: string
630  // * MapKey value: parameter name
631  // * MapValue type: If parameter's entity type is a composite entity then use
632  // map, otherwise, depending on the parameter value type, it could be one of
633  // string, number, boolean, null, list or map.
634  // * MapValue value: If parameter's entity type is a composite entity then use
635  // map from composite entity property names to property values, otherwise,
636  // use parameter value.
637  google.protobuf.Struct parameters = 5;
638
639  // The unique identifier of the [page][google.cloud.dialogflow.cx.v3.Page] to
640  // override the [current page][QueryResult.current_page] in the session.
641  // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
642  // ID>/flows/<Flow ID>/pages/<Page ID>`.
643  //
644  // If `current_page` is specified, the previous state of the session will be
645  // ignored by Dialogflow, including the [previous
646  // page][QueryResult.current_page] and the [previous session
647  // parameters][QueryResult.parameters].
648  // In most cases,
649  // [current_page][google.cloud.dialogflow.cx.v3.QueryParameters.current_page]
650  // and [parameters][google.cloud.dialogflow.cx.v3.QueryParameters.parameters]
651  // should be configured together to direct a session to a specific state.
652  string current_page = 6 [
653    (google.api.resource_reference) = { type: "dialogflow.googleapis.com/Page" }
654  ];
655
656  // Whether to disable webhook calls for this request.
657  bool disable_webhook = 7;
658
659  // Configures whether sentiment analysis should be performed. If not
660  // provided, sentiment analysis is not performed.
661  bool analyze_query_text_sentiment = 8;
662
663  // This field can be used to pass HTTP headers for a webhook
664  // call. These headers will be sent to webhook along with the headers that
665  // have been configured through Dialogflow web console. The headers defined
666  // within this field will overwrite the headers configured through Dialogflow
667  // console if there is a conflict. Header names are case-insensitive.
668  // Google's specified headers are not allowed. Including: "Host",
669  // "Content-Length", "Connection", "From", "User-Agent", "Accept-Encoding",
670  // "If-Modified-Since", "If-None-Match", "X-Forwarded-For", etc.
671  map<string, string> webhook_headers = 10;
672
673  // A list of flow versions to override for the request.
674  // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
675  // ID>/flows/<Flow ID>/versions/<Version ID>`.
676  //
677  // If version 1 of flow X is included in this list, the traffic of
678  // flow X will go through version 1 regardless of the version configuration in
679  // the environment. Each flow can have at most one version specified in this
680  // list.
681  repeated string flow_versions = 14 [(google.api.resource_reference) = {
682    type: "dialogflow.googleapis.com/Version"
683  }];
684
685  // The channel which this query is for.
686  //
687  // If specified, only the
688  // [ResponseMessage][google.cloud.dialogflow.cx.v3.ResponseMessage] associated
689  // with the channel will be returned. If no
690  // [ResponseMessage][google.cloud.dialogflow.cx.v3.ResponseMessage] is
691  // associated with the channel, it falls back to the
692  // [ResponseMessage][google.cloud.dialogflow.cx.v3.ResponseMessage] with
693  // unspecified channel.
694  //
695  // If unspecified, the
696  // [ResponseMessage][google.cloud.dialogflow.cx.v3.ResponseMessage] with
697  // unspecified channel will be returned.
698  string channel = 15;
699
700  // Optional. Configure lifetime of the Dialogflow session.
701  // By default, a Dialogflow session remains active and its data is stored for
702  // 30 minutes after the last request is sent for the session.
703  // This value should be no longer than 1 day.
704  google.protobuf.Duration session_ttl = 16
705      [(google.api.field_behavior) = OPTIONAL];
706
707  // Optional. Information about the end-user to improve the relevance and
708  // accuracy of generative answers.
709  //
710  // This will be interpreted and used by a language model, so, for good
711  // results, the data should be self-descriptive, and in a simple structure.
712  //
713  // Example:
714  //
715  // ```json
716  // {
717  //   "subscription plan": "Business Premium Plus",
718  //   "devices owned": [
719  //     {"model": "Google Pixel 7"},
720  //     {"model": "Google Pixel Tablet"}
721  //   ]
722  // }
723  // ```
724  google.protobuf.Struct end_user_metadata = 18
725      [(google.api.field_behavior) = OPTIONAL];
726
727  // Optional. Search configuration for UCS search queries.
728  SearchConfig search_config = 20 [(google.api.field_behavior) = OPTIONAL];
729}
730
731// Search configuration for UCS search queries.
732message SearchConfig {
733  // Optional. Boosting configuration for the datastores.
734  repeated BoostSpecs boost_specs = 1 [(google.api.field_behavior) = OPTIONAL];
735
736  // Optional. Filter configuration for the datastores.
737  repeated FilterSpecs filter_specs = 2
738      [(google.api.field_behavior) = OPTIONAL];
739}
740
741// Boost specification to boost certain documents.
742// A copy of google.cloud.discoveryengine.v1main.BoostSpec, field documentation
743// is available at
744// https://cloud.google.com/generative-ai-app-builder/docs/reference/rest/v1alpha/BoostSpec
745message BoostSpec {
746  // Boost applies to documents which match a condition.
747  message ConditionBoostSpec {
748    // Optional. An expression which specifies a boost condition. The syntax and
749    // supported fields are the same as a filter expression.
750    // Examples:
751    //
752    // * To boost documents with document ID "doc_1" or "doc_2", and
753    // color
754    //   "Red" or "Blue":
755    //     * (id: ANY("doc_1", "doc_2")) AND (color: ANY("Red","Blue"))
756    string condition = 1 [(google.api.field_behavior) = OPTIONAL];
757
758    // Optional. Strength of the condition boost, which should be in [-1, 1].
759    // Negative boost means demotion. Default is 0.0.
760    //
761    // Setting to 1.0 gives the document a big promotion. However, it does not
762    // necessarily mean that the boosted document will be the top result at
763    // all times, nor that other documents will be excluded. Results could
764    // still be shown even when none of them matches the condition. And
765    // results that are significantly more relevant to the search query can
766    // still trump your heavily favored but irrelevant documents.
767    //
768    // Setting to -1.0 gives the document a big demotion. However, results
769    // that are deeply relevant might still be shown. The document will have
770    // an upstream battle to get a fairly high ranking, but it is not blocked
771    // out completely.
772    //
773    // Setting to 0.0 means no boost applied. The boosting condition is
774    // ignored.
775    float boost = 2 [(google.api.field_behavior) = OPTIONAL];
776  }
777
778  // Optional. Condition boost specifications. If a document matches multiple
779  // conditions in the specifictions, boost scores from these specifications are
780  // all applied and combined in a non-linear way. Maximum number of
781  // specifications is 20.
782  repeated ConditionBoostSpec condition_boost_specs = 1
783      [(google.api.field_behavior) = OPTIONAL];
784}
785
786// Boost specifications for data stores.
787message BoostSpecs {
788  // Optional. Data Stores where the boosting configuration is applied. The full
789  // names of the referenced data stores. Formats:
790  // `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}`
791  // `projects/{project}/locations/{location}/dataStores/{data_store}`
792  repeated string data_stores = 1 [
793    (google.api.field_behavior) = OPTIONAL,
794    (google.api.resource_reference) = {
795      type: "discoveryengine.googleapis.com/DataStore"
796    }
797  ];
798
799  // Optional. A list of boosting specifications.
800  repeated BoostSpec spec = 2 [(google.api.field_behavior) = OPTIONAL];
801}
802
803// Filter specifications for data stores.
804message FilterSpecs {
805  // Optional. Data Stores where the boosting configuration is applied. The full
806  // names of the referenced data stores. Formats:
807  // `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}`
808  // `projects/{project}/locations/{location}/dataStores/{data_store}`
809  repeated string data_stores = 1 [
810    (google.api.field_behavior) = OPTIONAL,
811    (google.api.resource_reference) = {
812      type: "discoveryengine.googleapis.com/DataStore"
813    }
814  ];
815
816  // Optional. The filter expression to be applied.
817  // Expression syntax is documented at
818  // https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata#filter-expression-syntax
819  string filter = 2 [(google.api.field_behavior) = OPTIONAL];
820}
821
822// Represents the query input. It can contain one of:
823//
824// 1. A conversational query in the form of text.
825//
826// 2. An intent query that specifies which intent to trigger.
827//
828// 3. Natural language speech audio to be processed.
829//
830// 4. An event to be triggered.
831//
832// 5. DTMF digits to invoke an intent and fill in parameter value.
833//
834// 6. The results of a tool executed by the client.
835message QueryInput {
836  // Required. The input specification.
837  oneof input {
838    // The natural language text to be processed.
839    TextInput text = 2;
840
841    // The intent to be triggered.
842    IntentInput intent = 3;
843
844    // The natural language speech audio to be processed.
845    AudioInput audio = 5;
846
847    // The event to be triggered.
848    EventInput event = 6;
849
850    // The DTMF event to be handled.
851    DtmfInput dtmf = 7;
852  }
853
854  // Required. The language of the input. See [Language
855  // Support](https://cloud.google.com/dialogflow/cx/docs/reference/language)
856  // for a list of the currently supported language codes. Note that queries in
857  // the same session do not necessarily need to specify the same language.
858  string language_code = 4 [(google.api.field_behavior) = REQUIRED];
859}
860
861// Represents the result of a conversational query.
862message QueryResult {
863  // The original conversational query.
864  oneof query {
865    // If [natural language text][google.cloud.dialogflow.cx.v3.TextInput] was
866    // provided as input, this field will contain a copy of the text.
867    string text = 1;
868
869    // If an [intent][google.cloud.dialogflow.cx.v3.IntentInput] was provided as
870    // input, this field will contain a copy of the intent identifier. Format:
871    // `projects/<Project ID>/locations/<Location ID>/agents/<Agent
872    // ID>/intents/<Intent ID>`.
873    string trigger_intent = 11 [(google.api.resource_reference) = {
874      type: "dialogflow.googleapis.com/Intent"
875    }];
876
877    // If [natural language speech
878    // audio][google.cloud.dialogflow.cx.v3.AudioInput] was provided as input,
879    // this field will contain the transcript for the audio.
880    string transcript = 12;
881
882    // If an [event][google.cloud.dialogflow.cx.v3.EventInput] was provided as
883    // input, this field will contain the name of the event.
884    string trigger_event = 14;
885
886    // If a [DTMF][google.cloud.dialogflow.cx.v3.DtmfInput] was provided as
887    // input, this field will contain a copy of the
888    // [DtmfInput][google.cloud.dialogflow.cx.v3.DtmfInput].
889    DtmfInput dtmf = 23;
890  }
891
892  // The language that was triggered during intent detection.
893  // See [Language
894  // Support](https://cloud.google.com/dialogflow/cx/docs/reference/language)
895  // for a list of the currently supported language codes.
896  string language_code = 2;
897
898  // The collected [session
899  // parameters][google.cloud.dialogflow.cx.v3.SessionInfo.parameters].
900  //
901  // Depending on your protocol or client library language, this is a
902  // map, associative array, symbol table, dictionary, or JSON object
903  // composed of a collection of (MapKey, MapValue) pairs:
904  //
905  // * MapKey type: string
906  // * MapKey value: parameter name
907  // * MapValue type: If parameter's entity type is a composite entity then use
908  // map, otherwise, depending on the parameter value type, it could be one of
909  // string, number, boolean, null, list or map.
910  // * MapValue value: If parameter's entity type is a composite entity then use
911  // map from composite entity property names to property values, otherwise,
912  // use parameter value.
913  google.protobuf.Struct parameters = 3;
914
915  // The list of rich messages returned to the client. Responses vary from
916  // simple text messages to more sophisticated, structured payloads used
917  // to drive complex logic.
918  repeated ResponseMessage response_messages = 4;
919
920  // The list of webhook ids in the order of call sequence.
921  repeated string webhook_ids = 25;
922
923  // The list of webhook display names in the order of call sequence.
924  repeated string webhook_display_names = 26;
925
926  // The list of webhook latencies in the order of call sequence.
927  repeated google.protobuf.Duration webhook_latencies = 27;
928
929  // The list of webhook tags in the order of call sequence.
930  repeated string webhook_tags = 29;
931
932  // The list of webhook call status in the order of call sequence.
933  repeated google.rpc.Status webhook_statuses = 13;
934
935  // The list of webhook payload in
936  // [WebhookResponse.payload][google.cloud.dialogflow.cx.v3.WebhookResponse.payload],
937  // in the order of call sequence. If some webhook call fails or doesn't return
938  // any payload, an empty `Struct` would be used instead.
939  repeated google.protobuf.Struct webhook_payloads = 6;
940
941  // The current [Page][google.cloud.dialogflow.cx.v3.Page]. Some, not all
942  // fields are filled in this message, including but not limited to `name` and
943  // `display_name`.
944  Page current_page = 7;
945
946  // The current [Flow][google.cloud.dialogflow.cx.v3.Flow]. Some, not all
947  // fields are filled in this message, including but not limited to `name` and
948  // `display_name`.
949  Flow current_flow = 31;
950
951  // The [Intent][google.cloud.dialogflow.cx.v3.Intent] that matched the
952  // conversational query. Some, not all fields are filled in this message,
953  // including but not limited to: `name` and `display_name`. This field is
954  // deprecated, please use
955  // [QueryResult.match][google.cloud.dialogflow.cx.v3.QueryResult.match]
956  // instead.
957  Intent intent = 8 [deprecated = true];
958
959  // The intent detection confidence. Values range from 0.0 (completely
960  // uncertain) to 1.0 (completely certain).
961  // This value is for informational purpose only and is only used to
962  // help match the best intent within the classification threshold.
963  // This value may change for the same end-user expression at any time due to a
964  // model retraining or change in implementation.
965  // This field is deprecated, please use
966  // [QueryResult.match][google.cloud.dialogflow.cx.v3.QueryResult.match]
967  // instead.
968  float intent_detection_confidence = 9 [deprecated = true];
969
970  // Intent match result, could be an intent or an event.
971  Match match = 15;
972
973  // The free-form diagnostic info. For example, this field could contain
974  // webhook call latency. The fields of this data can change without notice,
975  // so you should not write code that depends on its structure.
976  //
977  // One of the fields is called "Alternative Matched Intents", which may
978  // aid with debugging. The following describes these intent results:
979  //
980  // - The list is empty if no intent was matched to end-user input.
981  // - Only intents that are referenced in the currently active flow are
982  //   included.
983  // - The matched intent is included.
984  // - Other intents that could have matched end-user input, but did not match
985  //   because they are referenced by intent routes that are out of
986  //   [scope](https://cloud.google.com/dialogflow/cx/docs/concept/handler#scope),
987  //   are included.
988  // - Other intents referenced by intent routes in scope that matched end-user
989  //   input, but had a lower confidence score.
990  google.protobuf.Struct diagnostic_info = 10;
991
992  // The sentiment analyss result, which depends on
993  // [`analyze_query_text_sentiment`]
994  // [google.cloud.dialogflow.cx.v3.QueryParameters.analyze_query_text_sentiment],
995  // specified in the request.
996  SentimentAnalysisResult sentiment_analysis_result = 17;
997
998  // Returns the current advanced settings including IVR settings. Even though
999  // the operations configured by these settings are performed by Dialogflow,
1000  // the client may need to perform special logic at the moment. For example, if
1001  // Dialogflow exports audio to Google Cloud Storage, then the client may need
1002  // to wait for the resulting object to appear in the bucket before proceeding.
1003  AdvancedSettings advanced_settings = 21;
1004
1005  // Indicates whether the Thumbs up/Thumbs down rating controls are need to be
1006  // shown for the response in the Dialogflow Messenger widget.
1007  bool allow_answer_feedback = 32;
1008}
1009
1010// Represents the natural language text to be processed.
1011message TextInput {
1012  // Required. The UTF-8 encoded natural language text to be processed.
1013  string text = 1 [(google.api.field_behavior) = REQUIRED];
1014}
1015
1016// Represents the intent to trigger programmatically rather than as a result of
1017// natural language processing.
1018message IntentInput {
1019  // Required. The unique identifier of the intent.
1020  // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
1021  // ID>/intents/<Intent ID>`.
1022  string intent = 1 [
1023    (google.api.field_behavior) = REQUIRED,
1024    (google.api.resource_reference) = {
1025      type: "dialogflow.googleapis.com/Intent"
1026    }
1027  ];
1028}
1029
1030// Represents the natural speech audio to be processed.
1031message AudioInput {
1032  // Required. Instructs the speech recognizer how to process the speech audio.
1033  InputAudioConfig config = 1 [(google.api.field_behavior) = REQUIRED];
1034
1035  // The natural language speech audio to be processed.
1036  // A single request can contain up to 2 minutes of speech audio data.
1037  // The [transcribed
1038  // text][google.cloud.dialogflow.cx.v3.QueryResult.transcript] cannot contain
1039  // more than 256 bytes.
1040  //
1041  // For non-streaming audio detect intent, both `config` and `audio` must be
1042  // provided.
1043  // For streaming audio detect intent, `config` must be provided in
1044  // the first request and `audio` must be provided in all following requests.
1045  bytes audio = 2;
1046}
1047
1048// Represents the event to trigger.
1049message EventInput {
1050  // Name of the event.
1051  string event = 1;
1052}
1053
1054// Represents the input for dtmf event.
1055message DtmfInput {
1056  // The dtmf digits.
1057  string digits = 1;
1058
1059  // The finish digit (if any).
1060  string finish_digit = 2;
1061}
1062
1063// Represents one match result of [MatchIntent][].
1064message Match {
1065  // Type of a Match.
1066  enum MatchType {
1067    // Not specified. Should never be used.
1068    MATCH_TYPE_UNSPECIFIED = 0;
1069
1070    // The query was matched to an intent.
1071    INTENT = 1;
1072
1073    // The query directly triggered an intent.
1074    DIRECT_INTENT = 2;
1075
1076    // The query was used for parameter filling.
1077    PARAMETER_FILLING = 3;
1078
1079    // No match was found for the query.
1080    NO_MATCH = 4;
1081
1082    // Indicates an empty query.
1083    NO_INPUT = 5;
1084
1085    // The query directly triggered an event.
1086    EVENT = 6;
1087  }
1088
1089  // The [Intent][google.cloud.dialogflow.cx.v3.Intent] that matched the query.
1090  // Some, not all fields are filled in this message, including but not limited
1091  // to: `name` and `display_name`. Only filled for
1092  // [`INTENT`][google.cloud.dialogflow.cx.v3.Match.MatchType] match type.
1093  Intent intent = 1;
1094
1095  // The event that matched the query. Filled for
1096  // [`EVENT`][google.cloud.dialogflow.cx.v3.Match.MatchType],
1097  // [`NO_MATCH`][google.cloud.dialogflow.cx.v3.Match.MatchType] and
1098  // [`NO_INPUT`][google.cloud.dialogflow.cx.v3.Match.MatchType] match types.
1099  string event = 6;
1100
1101  // The collection of parameters extracted from the query.
1102  //
1103  // Depending on your protocol or client library language, this is a
1104  // map, associative array, symbol table, dictionary, or JSON object
1105  // composed of a collection of (MapKey, MapValue) pairs:
1106  //
1107  // * MapKey type: string
1108  // * MapKey value: parameter name
1109  // * MapValue type: If parameter's entity type is a composite entity then use
1110  // map, otherwise, depending on the parameter value type, it could be one of
1111  // string, number, boolean, null, list or map.
1112  // * MapValue value: If parameter's entity type is a composite entity then use
1113  // map from composite entity property names to property values, otherwise,
1114  // use parameter value.
1115  google.protobuf.Struct parameters = 2;
1116
1117  // Final text input which was matched during MatchIntent. This value can be
1118  // different from original input sent in request because of spelling
1119  // correction or other processing.
1120  string resolved_input = 3;
1121
1122  // Type of this [Match][google.cloud.dialogflow.cx.v3.Match].
1123  MatchType match_type = 4;
1124
1125  // The confidence of this match. Values range from 0.0 (completely uncertain)
1126  // to 1.0 (completely certain).
1127  // This value is for informational purpose only and is only used to help match
1128  // the best intent within the classification threshold. This value may change
1129  // for the same end-user expression at any time due to a model retraining or
1130  // change in implementation.
1131  float confidence = 5;
1132}
1133
1134// Request of [MatchIntent][].
1135message MatchIntentRequest {
1136  // Required. The name of the session this query is sent to.
1137  // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
1138  // ID>/sessions/<Session ID>` or `projects/<Project ID>/locations/<Location
1139  // ID>/agents/<Agent ID>/environments/<Environment ID>/sessions/<Session ID>`.
1140  // If `Environment ID` is not specified, we assume default 'draft'
1141  // environment.
1142  // It's up to the API caller to choose an appropriate `Session ID`. It can be
1143  // a random number or some type of session identifiers (preferably hashed).
1144  // The length of the `Session ID` must not exceed 36 characters.
1145  //
1146  // For more information, see the [sessions
1147  // guide](https://cloud.google.com/dialogflow/cx/docs/concept/session).
1148  string session = 1 [
1149    (google.api.field_behavior) = REQUIRED,
1150    (google.api.resource_reference) = {
1151      type: "dialogflow.googleapis.com/Session"
1152    }
1153  ];
1154
1155  // The parameters of this query.
1156  QueryParameters query_params = 2;
1157
1158  // Required. The input specification.
1159  QueryInput query_input = 3 [(google.api.field_behavior) = REQUIRED];
1160
1161  // Persist session parameter changes from `query_params`.
1162  bool persist_parameter_changes = 5;
1163}
1164
1165// Response of [MatchIntent][].
1166message MatchIntentResponse {
1167  // The original conversational query.
1168  oneof query {
1169    // If [natural language text][google.cloud.dialogflow.cx.v3.TextInput] was
1170    // provided as input, this field will contain a copy of the text.
1171    string text = 1;
1172
1173    // If an [intent][google.cloud.dialogflow.cx.v3.IntentInput] was provided as
1174    // input, this field will contain a copy of the intent identifier. Format:
1175    // `projects/<Project ID>/locations/<Location ID>/agents/<Agent
1176    // ID>/intents/<Intent ID>`.
1177    string trigger_intent = 2 [(google.api.resource_reference) = {
1178      type: "dialogflow.googleapis.com/Intent"
1179    }];
1180
1181    // If [natural language speech
1182    // audio][google.cloud.dialogflow.cx.v3.AudioInput] was provided as input,
1183    // this field will contain the transcript for the audio.
1184    string transcript = 3;
1185
1186    // If an [event][google.cloud.dialogflow.cx.v3.EventInput] was provided as
1187    // input, this field will contain a copy of the event name.
1188    string trigger_event = 6;
1189  }
1190
1191  // Match results, if more than one, ordered descendingly by the confidence
1192  // we have that the particular intent matches the query.
1193  repeated Match matches = 4;
1194
1195  // The current [Page][google.cloud.dialogflow.cx.v3.Page]. Some, not all
1196  // fields are filled in this message, including but not limited to `name` and
1197  // `display_name`.
1198  Page current_page = 5;
1199}
1200
1201// Request of [FulfillIntent][]
1202message FulfillIntentRequest {
1203  // Must be same as the corresponding MatchIntent request, otherwise the
1204  // behavior is undefined.
1205  MatchIntentRequest match_intent_request = 1;
1206
1207  // The matched intent/event to fulfill.
1208  Match match = 2;
1209
1210  // Instructs the speech synthesizer how to generate output audio.
1211  OutputAudioConfig output_audio_config = 3;
1212}
1213
1214// Response of [FulfillIntent][]
1215message FulfillIntentResponse {
1216  // Output only. The unique identifier of the response. It can be used to
1217  // locate a response in the training example set or for reporting issues.
1218  string response_id = 1;
1219
1220  // The result of the conversational query.
1221  QueryResult query_result = 2;
1222
1223  // The audio data bytes encoded as specified in the request.
1224  // Note: The output audio is generated based on the values of default platform
1225  // text responses found in the
1226  // [`query_result.response_messages`][google.cloud.dialogflow.cx.v3.QueryResult.response_messages]
1227  // field. If multiple default text responses exist, they will be concatenated
1228  // when generating audio. If no default platform text responses exist, the
1229  // generated audio content will be empty.
1230  //
1231  // In some scenarios, multiple output audio fields may be present in the
1232  // response structure. In these cases, only the top-most-level audio output
1233  // has content.
1234  bytes output_audio = 3;
1235
1236  // The config used by the speech synthesizer to generate the output audio.
1237  OutputAudioConfig output_audio_config = 4;
1238}
1239
1240// The result of sentiment analysis. Sentiment analysis inspects user input
1241// and identifies the prevailing subjective opinion, especially to determine a
1242// user's attitude as positive, negative, or neutral.
1243message SentimentAnalysisResult {
1244  // Sentiment score between -1.0 (negative sentiment) and 1.0 (positive
1245  // sentiment).
1246  float score = 1;
1247
1248  // A non-negative number in the [0, +inf) range, which represents the absolute
1249  // magnitude of sentiment, regardless of score (positive or negative).
1250  float magnitude = 2;
1251}
1252