xref: /aosp_15_r20/external/googleapis/google/cloud/dialogflow/cx/v3beta1/session.proto (revision d5c09012810ac0c9f33fe448fb6da8260d444cc9)
1// Copyright 2023 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15syntax = "proto3";
16
17package google.cloud.dialogflow.cx.v3beta1;
18
19import "google/api/annotations.proto";
20import "google/api/client.proto";
21import "google/api/field_behavior.proto";
22import "google/api/resource.proto";
23import "google/cloud/dialogflow/cx/v3beta1/advanced_settings.proto";
24import "google/cloud/dialogflow/cx/v3beta1/audio_config.proto";
25import "google/cloud/dialogflow/cx/v3beta1/data_store_connection.proto";
26import "google/cloud/dialogflow/cx/v3beta1/example.proto";
27import "google/cloud/dialogflow/cx/v3beta1/flow.proto";
28import "google/cloud/dialogflow/cx/v3beta1/generative_settings.proto";
29import "google/cloud/dialogflow/cx/v3beta1/intent.proto";
30import "google/cloud/dialogflow/cx/v3beta1/page.proto";
31import "google/cloud/dialogflow/cx/v3beta1/response_message.proto";
32import "google/cloud/dialogflow/cx/v3beta1/session_entity_type.proto";
33import "google/cloud/dialogflow/cx/v3beta1/tool_call.proto";
34import "google/protobuf/duration.proto";
35import "google/protobuf/field_mask.proto";
36import "google/protobuf/struct.proto";
37import "google/rpc/status.proto";
38import "google/type/latlng.proto";
39
40option cc_enable_arenas = true;
41option csharp_namespace = "Google.Cloud.Dialogflow.Cx.V3Beta1";
42option go_package = "cloud.google.com/go/dialogflow/cx/apiv3beta1/cxpb;cxpb";
43option java_multiple_files = true;
44option java_outer_classname = "SessionProto";
45option java_package = "com.google.cloud.dialogflow.cx.v3beta1";
46option objc_class_prefix = "DF";
47option ruby_package = "Google::Cloud::Dialogflow::CX::V3beta1";
48option (google.api.resource_definition) = {
49  type: "dialogflow.googleapis.com/Session"
50  pattern: "projects/{project}/locations/{location}/agents/{agent}/sessions/{session}"
51  pattern: "projects/{project}/locations/{location}/agents/{agent}/environments/{environment}/sessions/{session}"
52};
53option (google.api.resource_definition) = {
54  type: "discoveryengine.googleapis.com/DataStore"
55  pattern: "projects/{project}/locations/{location}/dataStores/{data_store}"
56  pattern: "projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}"
57};
58
59// A session represents an interaction with a user. You retrieve user input
60// and pass it to the
61// [DetectIntent][google.cloud.dialogflow.cx.v3beta1.Sessions.DetectIntent]
62// method to determine user intent and respond.
63service Sessions {
64  option (google.api.default_host) = "dialogflow.googleapis.com";
65  option (google.api.oauth_scopes) =
66      "https://www.googleapis.com/auth/cloud-platform,"
67      "https://www.googleapis.com/auth/dialogflow";
68
69  // Processes a natural language query and returns structured, actionable data
70  // as a result. This method is not idempotent, because it may cause session
71  // entity types to be updated, which in turn might affect results of future
72  // queries.
73  //
74  // Note: Always use agent versions for production traffic.
75  // See [Versions and
76  // environments](https://cloud.google.com/dialogflow/cx/docs/concept/version).
77  rpc DetectIntent(DetectIntentRequest) returns (DetectIntentResponse) {
78    option (google.api.http) = {
79      post: "/v3beta1/{session=projects/*/locations/*/agents/*/sessions/*}:detectIntent"
80      body: "*"
81      additional_bindings {
82        post: "/v3beta1/{session=projects/*/locations/*/agents/*/environments/*/sessions/*}:detectIntent"
83        body: "*"
84      }
85    };
86  }
87
88  // Processes a natural language query and returns structured, actionable data
89  // as a result through server-side streaming. Server-side streaming allows
90  // Dialogflow to send [partial
91  // responses](https://cloud.google.com/dialogflow/cx/docs/concept/fulfillment#partial-response)
92  // earlier in a single request.
93  rpc ServerStreamingDetectIntent(DetectIntentRequest)
94      returns (stream DetectIntentResponse) {
95    option (google.api.http) = {
96      post: "/v3beta1/{session=projects/*/locations/*/agents/*/sessions/*}:serverStreamingDetectIntent"
97      body: "*"
98      additional_bindings {
99        post: "/v3beta1/{session=projects/*/locations/*/agents/*/environments/*/sessions/*}:serverStreamingDetectIntent"
100        body: "*"
101      }
102    };
103  }
104
105  // Processes a natural language query in audio format in a streaming fashion
106  // and returns structured, actionable data as a result. This method is only
107  // available via the gRPC API (not REST).
108  //
109  // Note: Always use agent versions for production traffic.
110  // See [Versions and
111  // environments](https://cloud.google.com/dialogflow/cx/docs/concept/version).
112  rpc StreamingDetectIntent(stream StreamingDetectIntentRequest)
113      returns (stream StreamingDetectIntentResponse) {}
114
115  // Returns preliminary intent match results, doesn't change the session
116  // status.
117  rpc MatchIntent(MatchIntentRequest) returns (MatchIntentResponse) {
118    option (google.api.http) = {
119      post: "/v3beta1/{session=projects/*/locations/*/agents/*/sessions/*}:matchIntent"
120      body: "*"
121      additional_bindings {
122        post: "/v3beta1/{session=projects/*/locations/*/agents/*/environments/*/sessions/*}:matchIntent"
123        body: "*"
124      }
125    };
126  }
127
128  // Fulfills a matched intent returned by
129  // [MatchIntent][google.cloud.dialogflow.cx.v3beta1.Sessions.MatchIntent].
130  // Must be called after
131  // [MatchIntent][google.cloud.dialogflow.cx.v3beta1.Sessions.MatchIntent],
132  // with input from
133  // [MatchIntentResponse][google.cloud.dialogflow.cx.v3beta1.MatchIntentResponse].
134  // Otherwise, the behavior is undefined.
135  rpc FulfillIntent(FulfillIntentRequest) returns (FulfillIntentResponse) {
136    option (google.api.http) = {
137      post: "/v3beta1/{match_intent_request.session=projects/*/locations/*/agents/*/sessions/*}:fulfillIntent"
138      body: "*"
139      additional_bindings {
140        post: "/v3beta1/{match_intent_request.session=projects/*/locations/*/agents/*/environments/*/sessions/*}:fulfillIntent"
141        body: "*"
142      }
143    };
144  }
145
146  // Updates the feedback received from the user for a single turn of the bot
147  // response.
148  rpc SubmitAnswerFeedback(SubmitAnswerFeedbackRequest)
149      returns (AnswerFeedback) {
150    option (google.api.http) = {
151      post: "/v3beta1/{session=projects/*/locations/*/agents/*/sessions/*}:submitAnswerFeedback"
152      body: "*"
153    };
154  }
155}
156
157// Stores information about feedback provided by users about a response.
158message AnswerFeedback {
159  // Represents thumbs up/down rating provided by user about a response.
160  enum Rating {
161    // Rating not specified.
162    RATING_UNSPECIFIED = 0;
163
164    // Thumbs up feedback from user.
165    THUMBS_UP = 1;
166
167    // Thumbs down feedback from user.
168    THUMBS_DOWN = 2;
169  }
170
171  // Stores extra information about why users provided thumbs down rating.
172  message RatingReason {
173    // Optional. Custom reason labels for thumbs down rating provided by the
174    // user. The maximum number of labels allowed is 10 and the maximum length
175    // of a single label is 128 characters.
176    repeated string reason_labels = 3 [(google.api.field_behavior) = OPTIONAL];
177
178    // Optional. Additional feedback about the rating.
179    // This field can be populated without choosing a predefined `reason`.
180    string feedback = 2 [(google.api.field_behavior) = OPTIONAL];
181  }
182
183  // Optional. Rating from user for the specific Dialogflow response.
184  Rating rating = 1 [(google.api.field_behavior) = OPTIONAL];
185
186  // Optional. In case of thumbs down rating provided, users can optionally
187  // provide context about the rating.
188  RatingReason rating_reason = 2 [(google.api.field_behavior) = OPTIONAL];
189
190  // Optional. Custom rating from the user about the provided answer, with
191  // maximum length of 1024 characters. For example, client could use a
192  // customized JSON object to indicate the rating.
193  string custom_rating = 3 [(google.api.field_behavior) = OPTIONAL];
194}
195
196// The request to set the feedback for a bot answer.
197message SubmitAnswerFeedbackRequest {
198  // Required. The name of the session the feedback was sent to.
199  string session = 1 [
200    (google.api.field_behavior) = REQUIRED,
201    (google.api.resource_reference) = {
202      type: "dialogflow.googleapis.com/Session"
203    }
204  ];
205
206  // Required. ID of the response to update its feedback. This is the same as
207  // DetectIntentResponse.response_id.
208  string response_id = 2 [(google.api.field_behavior) = REQUIRED];
209
210  // Required. Feedback provided for a bot answer.
211  AnswerFeedback answer_feedback = 3 [(google.api.field_behavior) = REQUIRED];
212
213  // Optional. The mask to control which fields to update. If the mask is not
214  // present, all fields will be updated.
215  google.protobuf.FieldMask update_mask = 4
216      [(google.api.field_behavior) = OPTIONAL];
217}
218
219// The request to detect user's intent.
220message DetectIntentRequest {
221  // Required. The name of the session this query is sent to.
222  // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
223  // ID>/sessions/<Session ID>` or `projects/<Project ID>/locations/<Location
224  // ID>/agents/<Agent ID>/environments/<Environment ID>/sessions/<Session ID>`.
225  // If `Environment ID` is not specified, we assume default 'draft'
226  // environment.
227  // It's up to the API caller to choose an appropriate `Session ID`. It can be
228  // a random number or some type of session identifiers (preferably hashed).
229  // The length of the `Session ID` must not exceed 36 characters.
230  //
231  // For more information, see the [sessions
232  // guide](https://cloud.google.com/dialogflow/cx/docs/concept/session).
233  //
234  // Note: Always use agent versions for production traffic.
235  // See [Versions and
236  // environments](https://cloud.google.com/dialogflow/cx/docs/concept/version).
237  string session = 1 [
238    (google.api.field_behavior) = REQUIRED,
239    (google.api.resource_reference) = {
240      type: "dialogflow.googleapis.com/Session"
241    }
242  ];
243
244  // The parameters of this query.
245  QueryParameters query_params = 2;
246
247  // Required. The input specification.
248  QueryInput query_input = 3 [(google.api.field_behavior) = REQUIRED];
249
250  // Instructs the speech synthesizer how to generate the output audio.
251  OutputAudioConfig output_audio_config = 4;
252}
253
254// The message returned from the DetectIntent method.
255message DetectIntentResponse {
256  // Represents different DetectIntentResponse types.
257  enum ResponseType {
258    // Not specified. This should never happen.
259    RESPONSE_TYPE_UNSPECIFIED = 0;
260
261    // Partial response. e.g. Aggregated responses in a Fulfillment that enables
262    // `return_partial_response` can be returned as partial response.
263    // WARNING: partial response is not eligible for barge-in.
264    PARTIAL = 1;
265
266    // Final response.
267    FINAL = 2;
268  }
269
270  // Output only. The unique identifier of the response. It can be used to
271  // locate a response in the training example set or for reporting issues.
272  string response_id = 1;
273
274  // The result of the conversational query.
275  QueryResult query_result = 2;
276
277  // The audio data bytes encoded as specified in the request.
278  // Note: The output audio is generated based on the values of default platform
279  // text responses found in the
280  // [`query_result.response_messages`][google.cloud.dialogflow.cx.v3beta1.QueryResult.response_messages]
281  // field. If multiple default text responses exist, they will be concatenated
282  // when generating audio. If no default platform text responses exist, the
283  // generated audio content will be empty.
284  //
285  // In some scenarios, multiple output audio fields may be present in the
286  // response structure. In these cases, only the top-most-level audio output
287  // has content.
288  bytes output_audio = 4;
289
290  // The config used by the speech synthesizer to generate the output audio.
291  OutputAudioConfig output_audio_config = 5;
292
293  // Response type.
294  ResponseType response_type = 6;
295
296  // Indicates whether the partial response can be cancelled when a later
297  // response arrives. e.g. if the agent specified some music as partial
298  // response, it can be cancelled.
299  bool allow_cancellation = 7;
300}
301
302// The top-level message sent by the client to the
303// [Sessions.StreamingDetectIntent][google.cloud.dialogflow.cx.v3beta1.Sessions.StreamingDetectIntent]
304// method.
305//
306// Multiple request messages should be sent in order:
307//
308// 1.  The first message must contain
309//     [session][google.cloud.dialogflow.cx.v3beta1.StreamingDetectIntentRequest.session],
310//     [query_input][google.cloud.dialogflow.cx.v3beta1.StreamingDetectIntentRequest.query_input]
311//     plus optionally
312//     [query_params][google.cloud.dialogflow.cx.v3beta1.StreamingDetectIntentRequest.query_params].
313//     If the client wants to receive an audio response, it should also contain
314//     [output_audio_config][google.cloud.dialogflow.cx.v3beta1.StreamingDetectIntentRequest.output_audio_config].
315//
316// 2.  If
317// [query_input][google.cloud.dialogflow.cx.v3beta1.StreamingDetectIntentRequest.query_input]
318// was set to
319//     [query_input.audio.config][google.cloud.dialogflow.cx.v3beta1.AudioInput.config],
320//     all subsequent messages must contain
321//     [query_input.audio.audio][google.cloud.dialogflow.cx.v3beta1.AudioInput.audio]
322//     to continue with Speech recognition. If you decide to rather detect an
323//     intent from text input after you already started Speech recognition,
324//     please send a message with
325//     [query_input.text][google.cloud.dialogflow.cx.v3beta1.QueryInput.text].
326//
327//     However, note that:
328//
329//     * Dialogflow will bill you for the audio duration so far.
330//     * Dialogflow discards all Speech recognition results in favor of the
331//       input text.
332//     * Dialogflow will use the language code from the first message.
333//
334// After you sent all input, you must half-close or abort the request stream.
335message StreamingDetectIntentRequest {
336  // The name of the session this query is sent to.
337  // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
338  // ID>/sessions/<Session ID>` or `projects/<Project ID>/locations/<Location
339  // ID>/agents/<Agent ID>/environments/<Environment ID>/sessions/<Session ID>`.
340  // If `Environment ID` is not specified, we assume default 'draft'
341  // environment.
342  // It's up to the API caller to choose an appropriate `Session ID`. It can be
343  // a random number or some type of session identifiers (preferably hashed).
344  // The length of the `Session ID` must not exceed 36 characters.
345  // Note: session must be set in the first request.
346  //
347  // For more information, see the [sessions
348  // guide](https://cloud.google.com/dialogflow/cx/docs/concept/session).
349  //
350  // Note: Always use agent versions for production traffic.
351  // See [Versions and
352  // environments](https://cloud.google.com/dialogflow/cx/docs/concept/version).
353  string session = 1 [(google.api.resource_reference) = {
354    type: "dialogflow.googleapis.com/Session"
355  }];
356
357  // The parameters of this query.
358  QueryParameters query_params = 2;
359
360  // Required. The input specification.
361  QueryInput query_input = 3 [(google.api.field_behavior) = REQUIRED];
362
363  // Instructs the speech synthesizer how to generate the output audio.
364  OutputAudioConfig output_audio_config = 4;
365
366  // Enable partial detect intent response. If this flag is not enabled,
367  // response stream still contains only one final `DetectIntentResponse` even
368  // if some `Fulfillment`s in the agent have been configured to return partial
369  // responses.
370  bool enable_partial_response = 5;
371
372  // If true, `StreamingDetectIntentResponse.debugging_info` will get populated.
373  bool enable_debugging_info = 8;
374}
375
376// Cloud conversation info for easier debugging.
377// It will get populated in `StreamingDetectIntentResponse` or
378// `StreamingAnalyzeContentResponse` when the flag `enable_debugging_info` is
379// set to true in corresponding requests.
380message CloudConversationDebuggingInfo {
381  // Number of input audio data chunks in streaming requests.
382  int32 audio_data_chunks = 1;
383
384  // Time offset of the end of speech utterance relative to the
385  // beginning of the first audio chunk.
386  google.protobuf.Duration result_end_time_offset = 2;
387
388  // Duration of first audio chunk.
389  google.protobuf.Duration first_audio_duration = 3;
390
391  // Whether client used single utterance mode.
392  bool single_utterance = 5;
393
394  // Time offsets of the speech partial results relative to the beginning of
395  // the stream.
396  repeated google.protobuf.Duration speech_partial_results_end_times = 6;
397
398  // Time offsets of the speech final results (is_final=true) relative to the
399  // beginning of the stream.
400  repeated google.protobuf.Duration speech_final_results_end_times = 7;
401
402  // Total number of partial responses.
403  int32 partial_responses = 8;
404
405  // Time offset of Speaker ID stream close time relative to the Speech stream
406  // close time in milliseconds. Only meaningful for conversations involving
407  // passive verification.
408  int32 speaker_id_passive_latency_ms_offset = 9;
409
410  // Whether a barge-in event is triggered in this request.
411  bool bargein_event_triggered = 10;
412
413  // Whether speech uses single utterance mode.
414  bool speech_single_utterance = 11;
415
416  // Time offsets of the DTMF partial results relative to the beginning of
417  // the stream.
418  repeated google.protobuf.Duration dtmf_partial_results_times = 12;
419
420  // Time offsets of the DTMF final results relative to the beginning of
421  // the stream.
422  repeated google.protobuf.Duration dtmf_final_results_times = 13;
423
424  // Time offset of the end-of-single-utterance signal relative to the
425  // beginning of the stream.
426  google.protobuf.Duration single_utterance_end_time_offset = 14;
427
428  // No speech timeout settings for the stream.
429  google.protobuf.Duration no_speech_timeout = 15;
430
431  // Speech endpointing timeout settings for the stream.
432  google.protobuf.Duration endpointing_timeout = 19;
433
434  // Whether the streaming terminates with an injected text query.
435  bool is_input_text = 16;
436
437  // Client half close time in terms of input audio duration.
438  google.protobuf.Duration client_half_close_time_offset = 17;
439
440  // Client half close time in terms of API streaming duration.
441  google.protobuf.Duration client_half_close_streaming_time_offset = 18;
442}
443
444// The top-level message returned from the
445// [StreamingDetectIntent][google.cloud.dialogflow.cx.v3beta1.Sessions.StreamingDetectIntent]
446// method.
447//
448// Multiple response messages (N) can be returned in order.
449//
450// The first (N-1) responses set either the `recognition_result` or
451// `detect_intent_response` field, depending on the request:
452//
453// *   If the `StreamingDetectIntentRequest.query_input.audio` field was
454//     set, and the `StreamingDetectIntentRequest.enable_partial_response`
455//     field was false, the `recognition_result` field is populated for each
456//     of the (N-1) responses.
457//     See the
458//     [StreamingRecognitionResult][google.cloud.dialogflow.cx.v3beta1.StreamingRecognitionResult]
459//     message for details about the result message sequence.
460//
461// *   If the `StreamingDetectIntentRequest.enable_partial_response` field was
462//     true, the `detect_intent_response` field is populated for each
463//     of the (N-1) responses, where 1 <= N <= 4.
464//     These responses set the
465//     [DetectIntentResponse.response_type][google.cloud.dialogflow.cx.v3beta1.DetectIntentResponse.response_type]
466//     field to `PARTIAL`.
467//
468// For the final Nth response message, the `detect_intent_response` is fully
469// populated, and
470// [DetectIntentResponse.response_type][google.cloud.dialogflow.cx.v3beta1.DetectIntentResponse.response_type]
471// is set to `FINAL`.
472message StreamingDetectIntentResponse {
473  // The output response.
474  oneof response {
475    // The result of speech recognition.
476    StreamingRecognitionResult recognition_result = 1;
477
478    // The response from detect intent.
479    DetectIntentResponse detect_intent_response = 2;
480  }
481
482  // Debugging info that would get populated when
483  // `StreamingDetectIntentRequest.enable_debugging_info` is set to true.
484  CloudConversationDebuggingInfo debugging_info = 4;
485}
486
487// Contains a speech recognition result corresponding to a portion of the audio
488// that is currently being processed or an indication that this is the end
489// of the single requested utterance.
490//
491// While end-user audio is being processed, Dialogflow sends a series of
492// results. Each result may contain a `transcript` value. A transcript
493// represents a portion of the utterance. While the recognizer is processing
494// audio, transcript values may be interim values or finalized values.
495// Once a transcript is finalized, the `is_final` value is set to true and
496// processing continues for the next transcript.
497//
498// If `StreamingDetectIntentRequest.query_input.audio.config.single_utterance`
499// was true, and the recognizer has completed processing audio,
500// the `message_type` value is set to `END_OF_SINGLE_UTTERANCE and the
501// following (last) result contains the last finalized transcript.
502//
503// The complete end-user utterance is determined by concatenating the
504// finalized transcript values received for the series of results.
505//
506// In the following example, single utterance is enabled. In the case where
507// single utterance is not enabled, result 7 would not occur.
508//
509// ```
510// Num | transcript              | message_type            | is_final
511// --- | ----------------------- | ----------------------- | --------
512// 1   | "tube"                  | TRANSCRIPT              | false
513// 2   | "to be a"               | TRANSCRIPT              | false
514// 3   | "to be"                 | TRANSCRIPT              | false
515// 4   | "to be or not to be"    | TRANSCRIPT              | true
516// 5   | "that's"                | TRANSCRIPT              | false
517// 6   | "that is                | TRANSCRIPT              | false
518// 7   | unset                   | END_OF_SINGLE_UTTERANCE | unset
519// 8   | " that is the question" | TRANSCRIPT              | true
520// ```
521//
522// Concatenating the finalized transcripts with `is_final` set to true,
523// the complete utterance becomes "to be or not to be that is the question".
524message StreamingRecognitionResult {
525  // Type of the response message.
526  enum MessageType {
527    // Not specified. Should never be used.
528    MESSAGE_TYPE_UNSPECIFIED = 0;
529
530    // Message contains a (possibly partial) transcript.
531    TRANSCRIPT = 1;
532
533    // This event indicates that the server has detected the end of the user's
534    // speech utterance and expects no additional speech. Therefore, the server
535    // will not process additional audio (although it may subsequently return
536    // additional results). The client should stop sending additional audio
537    // data, half-close the gRPC connection, and wait for any additional results
538    // until the server closes the gRPC connection. This message is only sent if
539    // [`single_utterance`][google.cloud.dialogflow.cx.v3beta1.InputAudioConfig.single_utterance]
540    // was set to `true`, and is not used otherwise.
541    END_OF_SINGLE_UTTERANCE = 2;
542  }
543
544  // Type of the result message.
545  MessageType message_type = 1;
546
547  // Transcript text representing the words that the user spoke.
548  // Populated if and only if `message_type` = `TRANSCRIPT`.
549  string transcript = 2;
550
551  // If `false`, the `StreamingRecognitionResult` represents an
552  // interim result that may change. If `true`, the recognizer will not return
553  // any further hypotheses about this piece of the audio. May only be populated
554  // for `message_type` = `TRANSCRIPT`.
555  bool is_final = 3;
556
557  // The Speech confidence between 0.0 and 1.0 for the current portion of audio.
558  // A higher number indicates an estimated greater likelihood that the
559  // recognized words are correct. The default of 0.0 is a sentinel value
560  // indicating that confidence was not set.
561  //
562  // This field is typically only provided if `is_final` is true and you should
563  // not rely on it being accurate or even set.
564  float confidence = 4;
565
566  // An estimate of the likelihood that the speech recognizer will
567  // not change its guess about this interim recognition result:
568  // * If the value is unspecified or 0.0, Dialogflow didn't compute the
569  //   stability. In particular, Dialogflow will only provide stability for
570  //   `TRANSCRIPT` results with `is_final = false`.
571  // * Otherwise, the value is in (0.0, 1.0] where 0.0 means completely
572  //   unstable and 1.0 means completely stable.
573  float stability = 6;
574
575  // Word-specific information for the words recognized by Speech in
576  // [transcript][google.cloud.dialogflow.cx.v3beta1.StreamingRecognitionResult.transcript].
577  // Populated if and only if `message_type` = `TRANSCRIPT` and
578  // [InputAudioConfig.enable_word_info] is set.
579  repeated SpeechWordInfo speech_word_info = 7;
580
581  // Time offset of the end of this Speech recognition result relative to the
582  // beginning of the audio. Only populated for `message_type` =
583  // `TRANSCRIPT`.
584  google.protobuf.Duration speech_end_offset = 8;
585
586  // Detected language code for the transcript.
587  string language_code = 10;
588}
589
590// Represents the parameters of a conversational query.
591message QueryParameters {
592  // The time zone of this conversational query from the [time zone
593  // database](https://www.iana.org/time-zones), e.g., America/New_York,
594  // Europe/Paris. If not provided, the time zone specified in the agent is
595  // used.
596  string time_zone = 1;
597
598  // The geo location of this conversational query.
599  google.type.LatLng geo_location = 2;
600
601  // Additional session entity types to replace or extend developer entity types
602  // with. The entity synonyms apply to all languages and persist for the
603  // session of this query.
604  repeated SessionEntityType session_entity_types = 3;
605
606  // This field can be used to pass custom data into the webhook associated with
607  // the agent. Arbitrary JSON objects are supported.
608  // Some integrations that query a Dialogflow agent may provide additional
609  // information in the payload.
610  // In particular, for the Dialogflow Phone Gateway integration, this field has
611  // the form:
612  // ```
613  // {
614  //  "telephony": {
615  //    "caller_id": "+18558363987"
616  //  }
617  // }
618  // ```
619  google.protobuf.Struct payload = 4;
620
621  // Additional parameters to be put into [session
622  // parameters][SessionInfo.parameters]. To remove a
623  // parameter from the session, clients should explicitly set the parameter
624  // value to null.
625  //
626  // You can reference the session parameters in the agent with the following
627  // format: $session.params.parameter-id.
628  //
629  // Depending on your protocol or client library language, this is a
630  // map, associative array, symbol table, dictionary, or JSON object
631  // composed of a collection of (MapKey, MapValue) pairs:
632  //
633  // * MapKey type: string
634  // * MapKey value: parameter name
635  // * MapValue type: If parameter's entity type is a composite entity then use
636  // map, otherwise, depending on the parameter value type, it could be one of
637  // string, number, boolean, null, list or map.
638  // * MapValue value: If parameter's entity type is a composite entity then use
639  // map from composite entity property names to property values, otherwise,
640  // use parameter value.
641  google.protobuf.Struct parameters = 5;
642
643  // The unique identifier of the
644  // [page][google.cloud.dialogflow.cx.v3beta1.Page] to override the [current
645  // page][QueryResult.current_page] in the session.
646  // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
647  // ID>/flows/<Flow ID>/pages/<Page ID>`.
648  //
649  // If `current_page` is specified, the previous state of the session will be
650  // ignored by Dialogflow, including the [previous
651  // page][QueryResult.current_page] and the [previous session
652  // parameters][QueryResult.parameters].
653  // In most cases,
654  // [current_page][google.cloud.dialogflow.cx.v3beta1.QueryParameters.current_page]
655  // and
656  // [parameters][google.cloud.dialogflow.cx.v3beta1.QueryParameters.parameters]
657  // should be configured together to direct a session to a specific state.
658  string current_page = 6 [
659    (google.api.resource_reference) = { type: "dialogflow.googleapis.com/Page" }
660  ];
661
662  // Whether to disable webhook calls for this request.
663  bool disable_webhook = 7;
664
665  // Configures whether sentiment analysis should be performed. If not
666  // provided, sentiment analysis is not performed.
667  bool analyze_query_text_sentiment = 8;
668
669  // This field can be used to pass HTTP headers for a webhook
670  // call. These headers will be sent to webhook along with the headers that
671  // have been configured through Dialogflow web console. The headers defined
672  // within this field will overwrite the headers configured through Dialogflow
673  // console if there is a conflict. Header names are case-insensitive.
674  // Google's specified headers are not allowed. Including: "Host",
675  // "Content-Length", "Connection", "From", "User-Agent", "Accept-Encoding",
676  // "If-Modified-Since", "If-None-Match", "X-Forwarded-For", etc.
677  map<string, string> webhook_headers = 10;
678
679  // A list of flow versions to override for the request.
680  // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
681  // ID>/flows/<Flow ID>/versions/<Version ID>`.
682  //
683  // If version 1 of flow X is included in this list, the traffic of
684  // flow X will go through version 1 regardless of the version configuration in
685  // the environment. Each flow can have at most one version specified in this
686  // list.
687  repeated string flow_versions = 14 [(google.api.resource_reference) = {
688    type: "dialogflow.googleapis.com/Version"
689  }];
690
691  // Optional. Start the session with the specified
692  // [playbook][google.cloud.dialogflow.cx.v3beta1.Playbook]. You can only
693  // specify the playbook at the beginning of the session. Otherwise, an error
694  // will be thrown.
695  //
696  // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
697  // ID>/playbooks/<Playbook ID>`.
698  string current_playbook = 19 [
699    (google.api.field_behavior) = OPTIONAL,
700    (google.api.resource_reference) = {
701      type: "dialogflow.googleapis.com/Playbook"
702    }
703  ];
704
705  // Optional. Use the specified LLM model settings for processing the request.
706  LlmModelSettings llm_model_settings = 21
707      [(google.api.field_behavior) = OPTIONAL];
708
709  // The channel which this query is for.
710  //
711  // If specified, only the
712  // [ResponseMessage][google.cloud.dialogflow.cx.v3beta1.ResponseMessage]
713  // associated with the channel will be returned. If no
714  // [ResponseMessage][google.cloud.dialogflow.cx.v3beta1.ResponseMessage] is
715  // associated with the channel, it falls back to the
716  // [ResponseMessage][google.cloud.dialogflow.cx.v3beta1.ResponseMessage] with
717  // unspecified channel.
718  //
719  // If unspecified, the
720  // [ResponseMessage][google.cloud.dialogflow.cx.v3beta1.ResponseMessage] with
721  // unspecified channel will be returned.
722  string channel = 15;
723
724  // Optional. Configure lifetime of the Dialogflow session.
725  // By default, a Dialogflow session remains active and its data is stored for
726  // 30 minutes after the last request is sent for the session.
727  // This value should be no longer than 1 day.
728  google.protobuf.Duration session_ttl = 16
729      [(google.api.field_behavior) = OPTIONAL];
730
731  // Optional. Information about the end-user to improve the relevance and
732  // accuracy of generative answers.
733  //
734  // This will be interpreted and used by a language model, so, for good
735  // results, the data should be self-descriptive, and in a simple structure.
736  //
737  // Example:
738  //
739  // ```json
740  // {
741  //   "subscription plan": "Business Premium Plus",
742  //   "devices owned": [
743  //     {"model": "Google Pixel 7"},
744  //     {"model": "Google Pixel Tablet"}
745  //   ]
746  // }
747  // ```
748  google.protobuf.Struct end_user_metadata = 18
749      [(google.api.field_behavior) = OPTIONAL];
750
751  // Optional. Search configuration for UCS search queries.
752  SearchConfig search_config = 20 [(google.api.field_behavior) = OPTIONAL];
753
754  // Optional. If set to true and data stores are involved in serving the
755  // request then
756  // DetectIntentResponse.query_result.data_store_connection_signals
757  // will be filled with data that can help evaluations.
758  bool populate_data_store_connection_signals = 25
759      [(google.api.field_behavior) = OPTIONAL];
760}
761
762// Search configuration for UCS search queries.
763message SearchConfig {
764  // Optional. Boosting configuration for the datastores.
765  repeated BoostSpecs boost_specs = 1 [(google.api.field_behavior) = OPTIONAL];
766
767  // Optional. Filter configuration for the datastores.
768  repeated FilterSpecs filter_specs = 2
769      [(google.api.field_behavior) = OPTIONAL];
770}
771
772// Boost specification to boost certain documents.
773// A copy of google.cloud.discoveryengine.v1main.BoostSpec, field documentation
774// is available at
775// https://cloud.google.com/generative-ai-app-builder/docs/reference/rest/v1alpha/BoostSpec
776message BoostSpec {
777  // Boost applies to documents which match a condition.
778  message ConditionBoostSpec {
779    // Optional. An expression which specifies a boost condition. The syntax and
780    // supported fields are the same as a filter expression.
781    // Examples:
782    //
783    // * To boost documents with document ID "doc_1" or "doc_2", and
784    // color
785    //   "Red" or "Blue":
786    //     * (id: ANY("doc_1", "doc_2")) AND (color: ANY("Red","Blue"))
787    string condition = 1 [(google.api.field_behavior) = OPTIONAL];
788
789    // Optional. Strength of the condition boost, which should be in [-1, 1].
790    // Negative boost means demotion. Default is 0.0.
791    //
792    // Setting to 1.0 gives the document a big promotion. However, it does not
793    // necessarily mean that the boosted document will be the top result at
794    // all times, nor that other documents will be excluded. Results could
795    // still be shown even when none of them matches the condition. And
796    // results that are significantly more relevant to the search query can
797    // still trump your heavily favored but irrelevant documents.
798    //
799    // Setting to -1.0 gives the document a big demotion. However, results
800    // that are deeply relevant might still be shown. The document will have
801    // an upstream battle to get a fairly high ranking, but it is not blocked
802    // out completely.
803    //
804    // Setting to 0.0 means no boost applied. The boosting condition is
805    // ignored.
806    float boost = 2 [(google.api.field_behavior) = OPTIONAL];
807  }
808
809  // Optional. Condition boost specifications. If a document matches multiple
810  // conditions in the specifictions, boost scores from these specifications are
811  // all applied and combined in a non-linear way. Maximum number of
812  // specifications is 20.
813  repeated ConditionBoostSpec condition_boost_specs = 1
814      [(google.api.field_behavior) = OPTIONAL];
815}
816
817// Boost specifications for data stores.
818message BoostSpecs {
819  // Optional. Data Stores where the boosting configuration is applied. The full
820  // names of the referenced data stores. Formats:
821  // `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}`
822  // `projects/{project}/locations/{location}/dataStores/{data_store}
823  repeated string data_stores = 1 [
824    (google.api.field_behavior) = OPTIONAL,
825    (google.api.resource_reference) = {
826      type: "discoveryengine.googleapis.com/DataStore"
827    }
828  ];
829
830  // Optional. A list of boosting specifications.
831  repeated BoostSpec spec = 2 [(google.api.field_behavior) = OPTIONAL];
832}
833
834// Filter specifications for data stores.
835message FilterSpecs {
836  // Optional. Data Stores where the boosting configuration is applied. The full
837  // names of the referenced data stores. Formats:
838  // `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}`
839  // `projects/{project}/locations/{location}/dataStores/{data_store}
840  repeated string data_stores = 1 [
841    (google.api.field_behavior) = OPTIONAL,
842    (google.api.resource_reference) = {
843      type: "discoveryengine.googleapis.com/DataStore"
844    }
845  ];
846
847  // Optional. The filter expression to be applied.
848  // Expression syntax is documented at
849  // https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata#filter-expression-syntax
850  string filter = 2 [(google.api.field_behavior) = OPTIONAL];
851}
852
853// Represents the query input. It can contain one of:
854//
855// 1. A conversational query in the form of text.
856//
857// 2. An intent query that specifies which intent to trigger.
858//
859// 3. Natural language speech audio to be processed.
860//
861// 4. An event to be triggered.
862//
863// 5. DTMF digits to invoke an intent and fill in parameter value.
864//
865// 6. The results of a tool executed by the client.
866message QueryInput {
867  // Required. The input specification.
868  oneof input {
869    // The natural language text to be processed.
870    TextInput text = 2;
871
872    // The intent to be triggered.
873    IntentInput intent = 3;
874
875    // The natural language speech audio to be processed.
876    AudioInput audio = 5;
877
878    // The event to be triggered.
879    EventInput event = 6;
880
881    // The DTMF event to be handled.
882    DtmfInput dtmf = 7;
883
884    // The results of a tool executed by the client.
885    google.cloud.dialogflow.cx.v3beta1.ToolCallResult tool_call_result = 11;
886  }
887
888  // Required. The language of the input. See [Language
889  // Support](https://cloud.google.com/dialogflow/cx/docs/reference/language)
890  // for a list of the currently supported language codes. Note that queries in
891  // the same session do not necessarily need to specify the same language.
892  string language_code = 4 [(google.api.field_behavior) = REQUIRED];
893}
894
895// Represents the information of a query if handled by generative agent
896// resources.
897message GenerativeInfo {
898  // The stack of [playbooks][google.cloud.dialogflow.cx.v3beta1.Playbook] that
899  // the conversation has currently entered, with the most recent one on the
900  // top.
901  repeated string current_playbooks = 1;
902
903  // The actions performed by the generative playbook for the current agent
904  // response.
905  Example action_tracing_info = 2;
906}
907
908// Represents the result of a conversational query.
909message QueryResult {
910  // The original conversational query.
911  oneof query {
912    // If [natural language text][google.cloud.dialogflow.cx.v3beta1.TextInput]
913    // was provided as input, this field will contain a copy of the text.
914    string text = 1;
915
916    // If an [intent][google.cloud.dialogflow.cx.v3beta1.IntentInput] was
917    // provided as input, this field will contain a copy of the intent
918    // identifier. Format: `projects/<Project ID>/locations/<Location
919    // ID>/agents/<Agent ID>/intents/<Intent ID>`.
920    string trigger_intent = 11 [(google.api.resource_reference) = {
921      type: "dialogflow.googleapis.com/Intent"
922    }];
923
924    // If [natural language speech
925    // audio][google.cloud.dialogflow.cx.v3beta1.AudioInput] was provided as
926    // input, this field will contain the transcript for the audio.
927    string transcript = 12;
928
929    // If an [event][google.cloud.dialogflow.cx.v3beta1.EventInput] was provided
930    // as input, this field will contain the name of the event.
931    string trigger_event = 14;
932
933    // If a [DTMF][google.cloud.dialogflow.cx.v3beta1.DtmfInput] was provided as
934    // input, this field will contain a copy of the
935    // [DtmfInput][google.cloud.dialogflow.cx.v3beta1.DtmfInput].
936    DtmfInput dtmf = 23;
937  }
938
939  // The language that was triggered during intent detection.
940  // See [Language
941  // Support](https://cloud.google.com/dialogflow/cx/docs/reference/language)
942  // for a list of the currently supported language codes.
943  string language_code = 2;
944
945  // The collected [session
946  // parameters][google.cloud.dialogflow.cx.v3beta1.SessionInfo.parameters].
947  //
948  // Depending on your protocol or client library language, this is a
949  // map, associative array, symbol table, dictionary, or JSON object
950  // composed of a collection of (MapKey, MapValue) pairs:
951  //
952  // * MapKey type: string
953  // * MapKey value: parameter name
954  // * MapValue type: If parameter's entity type is a composite entity then use
955  // map, otherwise, depending on the parameter value type, it could be one of
956  // string, number, boolean, null, list or map.
957  // * MapValue value: If parameter's entity type is a composite entity then use
958  // map from composite entity property names to property values, otherwise,
959  // use parameter value.
960  google.protobuf.Struct parameters = 3;
961
962  // The list of rich messages returned to the client. Responses vary from
963  // simple text messages to more sophisticated, structured payloads used
964  // to drive complex logic.
965  repeated ResponseMessage response_messages = 4;
966
967  // The list of webhook ids in the order of call sequence.
968  repeated string webhook_ids = 25;
969
970  // The list of webhook display names in the order of call sequence.
971  repeated string webhook_display_names = 26;
972
973  // The list of webhook latencies in the order of call sequence.
974  repeated google.protobuf.Duration webhook_latencies = 27;
975
976  // The list of webhook tags in the order of call sequence.
977  repeated string webhook_tags = 29;
978
979  // The list of webhook call status in the order of call sequence.
980  repeated google.rpc.Status webhook_statuses = 13;
981
982  // The list of webhook payload in
983  // [WebhookResponse.payload][google.cloud.dialogflow.cx.v3beta1.WebhookResponse.payload],
984  // in the order of call sequence. If some webhook call fails or doesn't return
985  // any payload, an empty `Struct` would be used instead.
986  repeated google.protobuf.Struct webhook_payloads = 6;
987
988  // The current [Page][google.cloud.dialogflow.cx.v3beta1.Page]. Some, not all
989  // fields are filled in this message, including but not limited to `name` and
990  // `display_name`.
991  Page current_page = 7;
992
993  // The current [Flow][google.cloud.dialogflow.cx.v3beta1.Flow]. Some, not all
994  // fields are filled in this message, including but not limited to `name` and
995  // `display_name`.
996  Flow current_flow = 31;
997
998  // The [Intent][google.cloud.dialogflow.cx.v3beta1.Intent] that matched the
999  // conversational query. Some, not all fields are filled in this message,
1000  // including but not limited to: `name` and `display_name`. This field is
1001  // deprecated, please use
1002  // [QueryResult.match][google.cloud.dialogflow.cx.v3beta1.QueryResult.match]
1003  // instead.
1004  Intent intent = 8 [deprecated = true];
1005
1006  // The intent detection confidence. Values range from 0.0 (completely
1007  // uncertain) to 1.0 (completely certain).
1008  // This value is for informational purpose only and is only used to
1009  // help match the best intent within the classification threshold.
1010  // This value may change for the same end-user expression at any time due to a
1011  // model retraining or change in implementation.
1012  // This field is deprecated, please use
1013  // [QueryResult.match][google.cloud.dialogflow.cx.v3beta1.QueryResult.match]
1014  // instead.
1015  float intent_detection_confidence = 9 [deprecated = true];
1016
1017  // Intent match result, could be an intent or an event.
1018  Match match = 15;
1019
1020  // The free-form diagnostic info. For example, this field could contain
1021  // webhook call latency. The fields of this data can change without notice,
1022  // so you should not write code that depends on its structure.
1023  //
1024  // One of the fields is called "Alternative Matched Intents", which may
1025  // aid with debugging. The following describes these intent results:
1026  //
1027  // - The list is empty if no intent was matched to end-user input.
1028  // - Only intents that are referenced in the currently active flow are
1029  //   included.
1030  // - The matched intent is included.
1031  // - Other intents that could have matched end-user input, but did not match
1032  //   because they are referenced by intent routes that are out of
1033  //   [scope](https://cloud.google.com/dialogflow/cx/docs/concept/handler#scope),
1034  //   are included.
1035  // - Other intents referenced by intent routes in scope that matched end-user
1036  //   input, but had a lower confidence score.
1037  google.protobuf.Struct diagnostic_info = 10;
1038
1039  // The information of a query if handled by generative agent resources.
1040  GenerativeInfo generative_info = 33;
1041
1042  // The sentiment analyss result, which depends on
1043  // [`analyze_query_text_sentiment`]
1044  // [google.cloud.dialogflow.cx.v3beta1.QueryParameters.analyze_query_text_sentiment],
1045  // specified in the request.
1046  SentimentAnalysisResult sentiment_analysis_result = 17;
1047
1048  // Returns the current advanced settings including IVR settings. Even though
1049  // the operations configured by these settings are performed by Dialogflow,
1050  // the client may need to perform special logic at the moment. For example, if
1051  // Dialogflow exports audio to Google Cloud Storage, then the client may need
1052  // to wait for the resulting object to appear in the bucket before proceeding.
1053  AdvancedSettings advanced_settings = 21;
1054
1055  // Indicates whether the Thumbs up/Thumbs down rating controls are need to be
1056  // shown for the response in the Dialogflow Messenger widget.
1057  bool allow_answer_feedback = 32;
1058
1059  // Optional. Data store connection feature output signals.
1060  // Filled only when data stores are involved in serving the query and
1061  // DetectIntentRequest.populate data_store_connection_quality_signals is set
1062  // to true in the request.
1063  DataStoreConnectionSignals data_store_connection_signals = 35
1064      [(google.api.field_behavior) = OPTIONAL];
1065}
1066
1067// Represents the natural language text to be processed.
1068message TextInput {
1069  // Required. The UTF-8 encoded natural language text to be processed.
1070  string text = 1 [(google.api.field_behavior) = REQUIRED];
1071}
1072
1073// Represents the intent to trigger programmatically rather than as a result of
1074// natural language processing.
1075message IntentInput {
1076  // Required. The unique identifier of the intent.
1077  // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
1078  // ID>/intents/<Intent ID>`.
1079  string intent = 1 [
1080    (google.api.field_behavior) = REQUIRED,
1081    (google.api.resource_reference) = {
1082      type: "dialogflow.googleapis.com/Intent"
1083    }
1084  ];
1085}
1086
1087// Represents the natural speech audio to be processed.
1088message AudioInput {
1089  // Required. Instructs the speech recognizer how to process the speech audio.
1090  InputAudioConfig config = 1 [(google.api.field_behavior) = REQUIRED];
1091
1092  // The natural language speech audio to be processed.
1093  // A single request can contain up to 2 minutes of speech audio data.
1094  // The [transcribed
1095  // text][google.cloud.dialogflow.cx.v3beta1.QueryResult.transcript] cannot
1096  // contain more than 256 bytes.
1097  //
1098  // For non-streaming audio detect intent, both `config` and `audio` must be
1099  // provided.
1100  // For streaming audio detect intent, `config` must be provided in
1101  // the first request and `audio` must be provided in all following requests.
1102  bytes audio = 2;
1103}
1104
1105// Represents the event to trigger.
1106message EventInput {
1107  // Name of the event.
1108  string event = 1;
1109}
1110
1111// Represents the input for dtmf event.
1112message DtmfInput {
1113  // The dtmf digits.
1114  string digits = 1;
1115
1116  // The finish digit (if any).
1117  string finish_digit = 2;
1118}
1119
1120// Represents one match result of [MatchIntent][].
1121message Match {
1122  // Type of a Match.
1123  enum MatchType {
1124    // Not specified. Should never be used.
1125    MATCH_TYPE_UNSPECIFIED = 0;
1126
1127    // The query was matched to an intent.
1128    INTENT = 1;
1129
1130    // The query directly triggered an intent.
1131    DIRECT_INTENT = 2;
1132
1133    // The query was used for parameter filling.
1134    PARAMETER_FILLING = 3;
1135
1136    // No match was found for the query.
1137    NO_MATCH = 4;
1138
1139    // Indicates an empty query.
1140    NO_INPUT = 5;
1141
1142    // The query directly triggered an event.
1143    EVENT = 6;
1144  }
1145
1146  // The [Intent][google.cloud.dialogflow.cx.v3beta1.Intent] that matched the
1147  // query. Some, not all fields are filled in this message, including but not
1148  // limited to: `name` and `display_name`. Only filled for
1149  // [`INTENT`][google.cloud.dialogflow.cx.v3beta1.Match.MatchType] match type.
1150  Intent intent = 1;
1151
1152  // The event that matched the query. Filled for
1153  // [`EVENT`][google.cloud.dialogflow.cx.v3beta1.Match.MatchType],
1154  // [`NO_MATCH`][google.cloud.dialogflow.cx.v3beta1.Match.MatchType] and
1155  // [`NO_INPUT`][google.cloud.dialogflow.cx.v3beta1.Match.MatchType] match
1156  // types.
1157  string event = 6;
1158
1159  // The collection of parameters extracted from the query.
1160  //
1161  // Depending on your protocol or client library language, this is a
1162  // map, associative array, symbol table, dictionary, or JSON object
1163  // composed of a collection of (MapKey, MapValue) pairs:
1164  //
1165  // * MapKey type: string
1166  // * MapKey value: parameter name
1167  // * MapValue type: If parameter's entity type is a composite entity then use
1168  // map, otherwise, depending on the parameter value type, it could be one of
1169  // string, number, boolean, null, list or map.
1170  // * MapValue value: If parameter's entity type is a composite entity then use
1171  // map from composite entity property names to property values, otherwise,
1172  // use parameter value.
1173  google.protobuf.Struct parameters = 2;
1174
1175  // Final text input which was matched during MatchIntent. This value can be
1176  // different from original input sent in request because of spelling
1177  // correction or other processing.
1178  string resolved_input = 3;
1179
1180  // Type of this [Match][google.cloud.dialogflow.cx.v3beta1.Match].
1181  MatchType match_type = 4;
1182
1183  // The confidence of this match. Values range from 0.0 (completely uncertain)
1184  // to 1.0 (completely certain).
1185  // This value is for informational purpose only and is only used to help match
1186  // the best intent within the classification threshold. This value may change
1187  // for the same end-user expression at any time due to a model retraining or
1188  // change in implementation.
1189  float confidence = 5;
1190}
1191
1192// Request of [MatchIntent][].
1193message MatchIntentRequest {
1194  // Required. The name of the session this query is sent to.
1195  // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
1196  // ID>/sessions/<Session ID>` or `projects/<Project ID>/locations/<Location
1197  // ID>/agents/<Agent ID>/environments/<Environment ID>/sessions/<Session ID>`.
1198  // If `Environment ID` is not specified, we assume default 'draft'
1199  // environment.
1200  // It's up to the API caller to choose an appropriate `Session ID`. It can be
1201  // a random number or some type of session identifiers (preferably hashed).
1202  // The length of the `Session ID` must not exceed 36 characters.
1203  //
1204  // For more information, see the [sessions
1205  // guide](https://cloud.google.com/dialogflow/cx/docs/concept/session).
1206  string session = 1 [
1207    (google.api.field_behavior) = REQUIRED,
1208    (google.api.resource_reference) = {
1209      type: "dialogflow.googleapis.com/Session"
1210    }
1211  ];
1212
1213  // The parameters of this query.
1214  QueryParameters query_params = 2;
1215
1216  // Required. The input specification.
1217  QueryInput query_input = 3 [(google.api.field_behavior) = REQUIRED];
1218
1219  // Persist session parameter changes from `query_params`.
1220  bool persist_parameter_changes = 5;
1221}
1222
1223// Response of [MatchIntent][].
1224message MatchIntentResponse {
1225  // The original conversational query.
1226  oneof query {
1227    // If [natural language text][google.cloud.dialogflow.cx.v3beta1.TextInput]
1228    // was provided as input, this field will contain a copy of the text.
1229    string text = 1;
1230
1231    // If an [intent][google.cloud.dialogflow.cx.v3beta1.IntentInput] was
1232    // provided as input, this field will contain a copy of the intent
1233    // identifier. Format: `projects/<Project ID>/locations/<Location
1234    // ID>/agents/<Agent ID>/intents/<Intent ID>`.
1235    string trigger_intent = 2 [(google.api.resource_reference) = {
1236      type: "dialogflow.googleapis.com/Intent"
1237    }];
1238
1239    // If [natural language speech
1240    // audio][google.cloud.dialogflow.cx.v3beta1.AudioInput] was provided as
1241    // input, this field will contain the transcript for the audio.
1242    string transcript = 3;
1243
1244    // If an [event][google.cloud.dialogflow.cx.v3beta1.EventInput] was provided
1245    // as input, this field will contain a copy of the event name.
1246    string trigger_event = 6;
1247  }
1248
1249  // Match results, if more than one, ordered descendingly by the confidence
1250  // we have that the particular intent matches the query.
1251  repeated Match matches = 4;
1252
1253  // The current [Page][google.cloud.dialogflow.cx.v3beta1.Page]. Some, not all
1254  // fields are filled in this message, including but not limited to `name` and
1255  // `display_name`.
1256  Page current_page = 5;
1257}
1258
1259// Request of [FulfillIntent][]
1260message FulfillIntentRequest {
1261  // Must be same as the corresponding MatchIntent request, otherwise the
1262  // behavior is undefined.
1263  MatchIntentRequest match_intent_request = 1;
1264
1265  // The matched intent/event to fulfill.
1266  Match match = 2;
1267
1268  // Instructs the speech synthesizer how to generate output audio.
1269  OutputAudioConfig output_audio_config = 3;
1270}
1271
1272// Response of [FulfillIntent][]
1273message FulfillIntentResponse {
1274  // Output only. The unique identifier of the response. It can be used to
1275  // locate a response in the training example set or for reporting issues.
1276  string response_id = 1;
1277
1278  // The result of the conversational query.
1279  QueryResult query_result = 2;
1280
1281  // The audio data bytes encoded as specified in the request.
1282  // Note: The output audio is generated based on the values of default platform
1283  // text responses found in the
1284  // [`query_result.response_messages`][google.cloud.dialogflow.cx.v3beta1.QueryResult.response_messages]
1285  // field. If multiple default text responses exist, they will be concatenated
1286  // when generating audio. If no default platform text responses exist, the
1287  // generated audio content will be empty.
1288  //
1289  // In some scenarios, multiple output audio fields may be present in the
1290  // response structure. In these cases, only the top-most-level audio output
1291  // has content.
1292  bytes output_audio = 3;
1293
1294  // The config used by the speech synthesizer to generate the output audio.
1295  OutputAudioConfig output_audio_config = 4;
1296}
1297
1298// The result of sentiment analysis. Sentiment analysis inspects user input
1299// and identifies the prevailing subjective opinion, especially to determine a
1300// user's attitude as positive, negative, or neutral.
1301message SentimentAnalysisResult {
1302  // Sentiment score between -1.0 (negative sentiment) and 1.0 (positive
1303  // sentiment).
1304  float score = 1;
1305
1306  // A non-negative number in the [0, +inf) range, which represents the absolute
1307  // magnitude of sentiment, regardless of score (positive or negative).
1308  float magnitude = 2;
1309}
1310