xref: /aosp_15_r20/external/googleapis/google/cloud/dialogflow/v2/participant.proto (revision d5c09012810ac0c9f33fe448fb6da8260d444cc9)
1// Copyright 2023 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15syntax = "proto3";
16
17package google.cloud.dialogflow.v2;
18
19import "google/api/annotations.proto";
20import "google/api/client.proto";
21import "google/api/field_behavior.proto";
22import "google/api/resource.proto";
23import "google/cloud/dialogflow/v2/audio_config.proto";
24import "google/cloud/dialogflow/v2/session.proto";
25import "google/protobuf/field_mask.proto";
26import "google/protobuf/struct.proto";
27import "google/protobuf/timestamp.proto";
28import "google/rpc/status.proto";
29
30option cc_enable_arenas = true;
31option csharp_namespace = "Google.Cloud.Dialogflow.V2";
32option go_package = "cloud.google.com/go/dialogflow/apiv2/dialogflowpb;dialogflowpb";
33option java_multiple_files = true;
34option java_outer_classname = "ParticipantProto";
35option java_package = "com.google.cloud.dialogflow.v2";
36option objc_class_prefix = "DF";
37
38// Service for managing [Participants][google.cloud.dialogflow.v2.Participant].
39service Participants {
40  option (google.api.default_host) = "dialogflow.googleapis.com";
41  option (google.api.oauth_scopes) =
42      "https://www.googleapis.com/auth/cloud-platform,"
43      "https://www.googleapis.com/auth/dialogflow";
44
45  // Creates a new participant in a conversation.
46  rpc CreateParticipant(CreateParticipantRequest) returns (Participant) {
47    option (google.api.http) = {
48      post: "/v2/{parent=projects/*/conversations/*}/participants"
49      body: "participant"
50      additional_bindings {
51        post: "/v2/{parent=projects/*/locations/*/conversations/*}/participants"
52        body: "participant"
53      }
54    };
55    option (google.api.method_signature) = "parent,participant";
56  }
57
58  // Retrieves a conversation participant.
59  rpc GetParticipant(GetParticipantRequest) returns (Participant) {
60    option (google.api.http) = {
61      get: "/v2/{name=projects/*/conversations/*/participants/*}"
62      additional_bindings {
63        get: "/v2/{name=projects/*/locations/*/conversations/*/participants/*}"
64      }
65    };
66    option (google.api.method_signature) = "name";
67  }
68
69  // Returns the list of all participants in the specified conversation.
70  rpc ListParticipants(ListParticipantsRequest)
71      returns (ListParticipantsResponse) {
72    option (google.api.http) = {
73      get: "/v2/{parent=projects/*/conversations/*}/participants"
74      additional_bindings {
75        get: "/v2/{parent=projects/*/locations/*/conversations/*}/participants"
76      }
77    };
78    option (google.api.method_signature) = "parent";
79  }
80
81  // Updates the specified participant.
82  rpc UpdateParticipant(UpdateParticipantRequest) returns (Participant) {
83    option (google.api.http) = {
84      patch: "/v2/{participant.name=projects/*/conversations/*/participants/*}"
85      body: "participant"
86      additional_bindings {
87        patch: "/v2/{participant.name=projects/*/locations/*/conversations/*/participants/*}"
88        body: "participant"
89      }
90    };
91    option (google.api.method_signature) = "participant,update_mask";
92  }
93
94  // Adds a text (chat, for example), or audio (phone recording, for example)
95  // message from a participant into the conversation.
96  //
97  // Note: Always use agent versions for production traffic
98  // sent to virtual agents. See [Versions and
99  // environments](https://cloud.google.com/dialogflow/es/docs/agents-versions).
100  rpc AnalyzeContent(AnalyzeContentRequest) returns (AnalyzeContentResponse) {
101    option (google.api.http) = {
102      post: "/v2/{participant=projects/*/conversations/*/participants/*}:analyzeContent"
103      body: "*"
104      additional_bindings {
105        post: "/v2/{participant=projects/*/locations/*/conversations/*/participants/*}:analyzeContent"
106        body: "*"
107      }
108    };
109    option (google.api.method_signature) = "participant,text_input";
110    option (google.api.method_signature) = "participant,event_input";
111  }
112
113  // Adds a text (chat, for example), or audio (phone recording, for example)
114  // message from a participant into the conversation.
115  // Note: This method is only available through the gRPC API (not REST).
116  //
117  // The top-level message sent to the client by the server is
118  // `StreamingAnalyzeContentResponse`. Multiple response messages can be
119  // returned in order. The first one or more messages contain the
120  // `recognition_result` field. Each result represents a more complete
121  // transcript of what the user said. The next message contains the
122  // `reply_text` field and potentially the `reply_audio` field. The message can
123  // also contain the `automated_agent_reply` field.
124  //
125  // Note: Always use agent versions for production traffic
126  // sent to virtual agents. See [Versions and
127  // environments](https://cloud.google.com/dialogflow/es/docs/agents-versions).
128  rpc StreamingAnalyzeContent(stream StreamingAnalyzeContentRequest)
129      returns (stream StreamingAnalyzeContentResponse) {}
130
131  // Gets suggested articles for a participant based on specific historical
132  // messages.
133  rpc SuggestArticles(SuggestArticlesRequest)
134      returns (SuggestArticlesResponse) {
135    option (google.api.http) = {
136      post: "/v2/{parent=projects/*/conversations/*/participants/*}/suggestions:suggestArticles"
137      body: "*"
138      additional_bindings {
139        post: "/v2/{parent=projects/*/locations/*/conversations/*/participants/*}/suggestions:suggestArticles"
140        body: "*"
141      }
142    };
143    option (google.api.method_signature) = "parent";
144  }
145
146  // Gets suggested faq answers for a participant based on specific historical
147  // messages.
148  rpc SuggestFaqAnswers(SuggestFaqAnswersRequest)
149      returns (SuggestFaqAnswersResponse) {
150    option (google.api.http) = {
151      post: "/v2/{parent=projects/*/conversations/*/participants/*}/suggestions:suggestFaqAnswers"
152      body: "*"
153      additional_bindings {
154        post: "/v2/{parent=projects/*/locations/*/conversations/*/participants/*}/suggestions:suggestFaqAnswers"
155        body: "*"
156      }
157    };
158    option (google.api.method_signature) = "parent";
159  }
160
161  // Gets smart replies for a participant based on specific historical
162  // messages.
163  rpc SuggestSmartReplies(SuggestSmartRepliesRequest)
164      returns (SuggestSmartRepliesResponse) {
165    option (google.api.http) = {
166      post: "/v2/{parent=projects/*/conversations/*/participants/*}/suggestions:suggestSmartReplies"
167      body: "*"
168      additional_bindings {
169        post: "/v2/{parent=projects/*/locations/*/conversations/*/participants/*}/suggestions:suggestSmartReplies"
170        body: "*"
171      }
172    };
173    option (google.api.method_signature) = "parent";
174  }
175}
176
177// Represents a conversation participant (human agent, virtual agent, end-user).
178message Participant {
179  option (google.api.resource) = {
180    type: "dialogflow.googleapis.com/Participant"
181    pattern: "projects/{project}/conversations/{conversation}/participants/{participant}"
182    pattern: "projects/{project}/locations/{location}/conversations/{conversation}/participants/{participant}"
183  };
184
185  // Enumeration of the roles a participant can play in a conversation.
186  enum Role {
187    // Participant role not set.
188    ROLE_UNSPECIFIED = 0;
189
190    // Participant is a human agent.
191    HUMAN_AGENT = 1;
192
193    // Participant is an automated agent, such as a Dialogflow agent.
194    AUTOMATED_AGENT = 2;
195
196    // Participant is an end user that has called or chatted with
197    // Dialogflow services.
198    END_USER = 3;
199  }
200
201  // Optional. The unique identifier of this participant.
202  // Format: `projects/<Project ID>/locations/<Location
203  // ID>/conversations/<Conversation ID>/participants/<Participant ID>`.
204  string name = 1 [(google.api.field_behavior) = OPTIONAL];
205
206  // Immutable. The role this participant plays in the conversation. This field
207  // must be set during participant creation and is then immutable.
208  Role role = 2 [(google.api.field_behavior) = IMMUTABLE];
209
210  // Optional. Label applied to streams representing this participant in SIPREC
211  // XML metadata and SDP. This is used to assign transcriptions from that
212  // media stream to this participant. This field can be updated.
213  string sip_recording_media_label = 6 [(google.api.field_behavior) = OPTIONAL];
214
215  // Optional. Obfuscated user id that should be associated with the created
216  // participant.
217  //
218  // You can specify a user id as follows:
219  //
220  // 1. If you set this field in
221  //    [CreateParticipantRequest][google.cloud.dialogflow.v2.CreateParticipantRequest.participant]
222  //    or
223  //    [UpdateParticipantRequest][google.cloud.dialogflow.v2.UpdateParticipantRequest.participant],
224  //    Dialogflow adds the obfuscated user id with the participant.
225  //
226  // 2. If you set this field in
227  //    [AnalyzeContent][google.cloud.dialogflow.v2.AnalyzeContentRequest.obfuscated_external_user_id]
228  //    or
229  //    [StreamingAnalyzeContent][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.obfuscated_external_user_id],
230  //    Dialogflow will update
231  //    [Participant.obfuscated_external_user_id][google.cloud.dialogflow.v2.Participant.obfuscated_external_user_id].
232  //
233  // Dialogflow returns an error if you try to add a user id for a
234  // non-[END_USER][google.cloud.dialogflow.v2.Participant.Role.END_USER]
235  // participant.
236  //
237  // Dialogflow uses this user id for billing and measurement purposes. For
238  // example, Dialogflow determines whether a user in one conversation returned
239  // in a later conversation.
240  //
241  // Note:
242  //
243  // * Please never pass raw user ids to Dialogflow. Always obfuscate your user
244  //   id first.
245  // * Dialogflow only accepts a UTF-8 encoded string, e.g., a hex digest of a
246  //   hash function like SHA-512.
247  // * The length of the user id must be <= 256 characters.
248  string obfuscated_external_user_id = 7
249      [(google.api.field_behavior) = OPTIONAL];
250
251  // Optional. Key-value filters on the metadata of documents returned by
252  // article suggestion. If specified, article suggestion only returns suggested
253  // documents that match all filters in their
254  // [Document.metadata][google.cloud.dialogflow.v2.Document.metadata]. Multiple
255  // values for a metadata key should be concatenated by comma. For example,
256  // filters to match all documents that have 'US' or 'CA' in their market
257  // metadata values and 'agent' in their user metadata values will be
258  // ```
259  // documents_metadata_filters {
260  //   key: "market"
261  //   value: "US,CA"
262  // }
263  // documents_metadata_filters {
264  //   key: "user"
265  //   value: "agent"
266  // }
267  // ```
268  map<string, string> documents_metadata_filters = 8
269      [(google.api.field_behavior) = OPTIONAL];
270}
271
272// Represents a message posted into a conversation.
273message Message {
274  option (google.api.resource) = {
275    type: "dialogflow.googleapis.com/Message"
276    pattern: "projects/{project}/conversations/{conversation}/messages/{message}"
277    pattern: "projects/{project}/locations/{location}/conversations/{conversation}/messages/{message}"
278  };
279
280  // Optional. The unique identifier of the message.
281  // Format: `projects/<Project ID>/locations/<Location
282  // ID>/conversations/<Conversation ID>/messages/<Message ID>`.
283  string name = 1 [(google.api.field_behavior) = OPTIONAL];
284
285  // Required. The message content.
286  string content = 2 [(google.api.field_behavior) = REQUIRED];
287
288  // Optional. The message language.
289  // This should be a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
290  // language tag. Example: "en-US".
291  string language_code = 3 [(google.api.field_behavior) = OPTIONAL];
292
293  // Output only. The participant that sends this message.
294  string participant = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
295
296  // Output only. The role of the participant.
297  Participant.Role participant_role = 5
298      [(google.api.field_behavior) = OUTPUT_ONLY];
299
300  // Output only. The time when the message was created in Contact Center AI.
301  google.protobuf.Timestamp create_time = 6
302      [(google.api.field_behavior) = OUTPUT_ONLY];
303
304  // Optional. The time when the message was sent.
305  google.protobuf.Timestamp send_time = 9
306      [(google.api.field_behavior) = OPTIONAL];
307
308  // Output only. The annotation for the message.
309  MessageAnnotation message_annotation = 7
310      [(google.api.field_behavior) = OUTPUT_ONLY];
311
312  // Output only. The sentiment analysis result for the message.
313  SentimentAnalysisResult sentiment_analysis = 8
314      [(google.api.field_behavior) = OUTPUT_ONLY];
315}
316
317// The request message for
318// [Participants.CreateParticipant][google.cloud.dialogflow.v2.Participants.CreateParticipant].
319message CreateParticipantRequest {
320  // Required. Resource identifier of the conversation adding the participant.
321  // Format: `projects/<Project ID>/locations/<Location
322  // ID>/conversations/<Conversation ID>`.
323  string parent = 1 [
324    (google.api.field_behavior) = REQUIRED,
325    (google.api.resource_reference) = {
326      child_type: "dialogflow.googleapis.com/Participant"
327    }
328  ];
329
330  // Required. The participant to create.
331  Participant participant = 2 [(google.api.field_behavior) = REQUIRED];
332}
333
334// The request message for
335// [Participants.GetParticipant][google.cloud.dialogflow.v2.Participants.GetParticipant].
336message GetParticipantRequest {
337  // Required. The name of the participant. Format:
338  // `projects/<Project ID>/locations/<Location ID>/conversations/<Conversation
339  // ID>/participants/<Participant ID>`.
340  string name = 1 [
341    (google.api.field_behavior) = REQUIRED,
342    (google.api.resource_reference) = {
343      type: "dialogflow.googleapis.com/Participant"
344    }
345  ];
346}
347
348// The request message for
349// [Participants.ListParticipants][google.cloud.dialogflow.v2.Participants.ListParticipants].
350message ListParticipantsRequest {
351  // Required. The conversation to list all participants from.
352  // Format: `projects/<Project ID>/locations/<Location
353  // ID>/conversations/<Conversation ID>`.
354  string parent = 1 [
355    (google.api.field_behavior) = REQUIRED,
356    (google.api.resource_reference) = {
357      child_type: "dialogflow.googleapis.com/Participant"
358    }
359  ];
360
361  // Optional. The maximum number of items to return in a single page. By
362  // default 100 and at most 1000.
363  int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL];
364
365  // Optional. The next_page_token value returned from a previous list request.
366  string page_token = 3 [(google.api.field_behavior) = OPTIONAL];
367}
368
369// The response message for
370// [Participants.ListParticipants][google.cloud.dialogflow.v2.Participants.ListParticipants].
371message ListParticipantsResponse {
372  // The list of participants. There is a maximum number of items
373  // returned based on the page_size field in the request.
374  repeated Participant participants = 1;
375
376  // Token to retrieve the next page of results or empty if there are no
377  // more results in the list.
378  string next_page_token = 2;
379}
380
381// The request message for
382// [Participants.UpdateParticipant][google.cloud.dialogflow.v2.Participants.UpdateParticipant].
383message UpdateParticipantRequest {
384  // Required. The participant to update.
385  Participant participant = 1 [(google.api.field_behavior) = REQUIRED];
386
387  // Required. The mask to specify which fields to update.
388  google.protobuf.FieldMask update_mask = 2
389      [(google.api.field_behavior) = REQUIRED];
390}
391
392// The request message for
393// [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent].
394message AnalyzeContentRequest {
395  // Required. The name of the participant this text comes from.
396  // Format: `projects/<Project ID>/locations/<Location
397  // ID>/conversations/<Conversation ID>/participants/<Participant ID>`.
398  string participant = 1 [
399    (google.api.field_behavior) = REQUIRED,
400    (google.api.resource_reference) = {
401      type: "dialogflow.googleapis.com/Participant"
402    }
403  ];
404
405  // Required. The input content.
406  oneof input {
407    // The natural language text to be processed.
408    TextInput text_input = 6;
409
410    // An input event to send to Dialogflow.
411    EventInput event_input = 8;
412
413    // An input representing the selection of a suggestion.
414    SuggestionInput suggestion_input = 12;
415  }
416
417  // Speech synthesis configuration.
418  // The speech synthesis settings for a virtual agent that may be configured
419  // for the associated conversation profile are not used when calling
420  // AnalyzeContent. If this configuration is not supplied, speech synthesis
421  // is disabled.
422  OutputAudioConfig reply_audio_config = 5;
423
424  // Parameters for a Dialogflow virtual-agent query.
425  QueryParameters query_params = 9;
426
427  // Parameters for a human assist query.
428  AssistQueryParameters assist_query_params = 14;
429
430  // Additional parameters to be put into Dialogflow CX session parameters. To
431  // remove a parameter from the session, clients should explicitly set the
432  // parameter value to null.
433  //
434  // Note: this field should only be used if you are connecting to a Dialogflow
435  // CX agent.
436  google.protobuf.Struct cx_parameters = 18;
437
438  // A unique identifier for this request. Restricted to 36 ASCII characters.
439  // A random UUID is recommended.
440  // This request is only idempotent if a `request_id` is provided.
441  string request_id = 11;
442}
443
444// The message in the response that indicates the parameters of DTMF.
445message DtmfParameters {
446  // Indicates whether DTMF input can be handled in the next request.
447  bool accepts_dtmf_input = 1;
448}
449
450// The response message for
451// [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent].
452message AnalyzeContentResponse {
453  // The output text content.
454  // This field is set if the automated agent responded with text to show to
455  // the user.
456  string reply_text = 1;
457
458  // The audio data bytes encoded as specified in the request.
459  // This field is set if:
460  //
461  //  - `reply_audio_config` was specified in the request, or
462  //  - The automated agent responded with audio to play to the user. In such
463  //    case, `reply_audio.config` contains settings used to synthesize the
464  //    speech.
465  //
466  // In some scenarios, multiple output audio fields may be present in the
467  // response structure. In these cases, only the top-most-level audio output
468  // has content.
469  OutputAudio reply_audio = 2;
470
471  // Only set if a Dialogflow automated agent has responded.
472  // Note that: [AutomatedAgentReply.detect_intent_response.output_audio][]
473  // and [AutomatedAgentReply.detect_intent_response.output_audio_config][]
474  // are always empty, use
475  // [reply_audio][google.cloud.dialogflow.v2.AnalyzeContentResponse.reply_audio]
476  // instead.
477  AutomatedAgentReply automated_agent_reply = 3;
478
479  // Message analyzed by CCAI.
480  Message message = 5;
481
482  // The suggestions for most recent human agent. The order is the same as
483  // [HumanAgentAssistantConfig.SuggestionConfig.feature_configs][google.cloud.dialogflow.v2.HumanAgentAssistantConfig.SuggestionConfig.feature_configs]
484  // of
485  // [HumanAgentAssistantConfig.human_agent_suggestion_config][google.cloud.dialogflow.v2.HumanAgentAssistantConfig.human_agent_suggestion_config].
486  //
487  // Note that any failure of Agent Assist features will not lead to the overall
488  // failure of an AnalyzeContent API call. Instead, the features will
489  // fail silently with the error field set in the corresponding
490  // SuggestionResult.
491  repeated SuggestionResult human_agent_suggestion_results = 6;
492
493  // The suggestions for end user. The order is the same as
494  // [HumanAgentAssistantConfig.SuggestionConfig.feature_configs][google.cloud.dialogflow.v2.HumanAgentAssistantConfig.SuggestionConfig.feature_configs]
495  // of
496  // [HumanAgentAssistantConfig.end_user_suggestion_config][google.cloud.dialogflow.v2.HumanAgentAssistantConfig.end_user_suggestion_config].
497  //
498  // Same as human_agent_suggestion_results, any failure of Agent Assist
499  // features will not lead to the overall failure of an AnalyzeContent API
500  // call. Instead, the features will fail silently with the error field set in
501  // the corresponding SuggestionResult.
502  repeated SuggestionResult end_user_suggestion_results = 7;
503
504  // Indicates the parameters of DTMF.
505  DtmfParameters dtmf_parameters = 9;
506}
507
508// The top-level message sent by the client to the
509// [Participants.StreamingAnalyzeContent][google.cloud.dialogflow.v2.Participants.StreamingAnalyzeContent]
510// method.
511//
512// Multiple request messages should be sent in order:
513//
514// 1.  The first message must contain
515//     [participant][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.participant],
516//     [config][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.config]
517//     and optionally
518//     [query_params][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.query_params].
519//     If you want to receive an audio response, it should also contain
520//     [reply_audio_config][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.reply_audio_config].
521//     The message must not contain
522//     [input][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.input].
523//
524// 2.  If
525// [config][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.config] in
526// the first message
527//     was set to
528//     [audio_config][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.audio_config],
529//     all subsequent messages must contain
530//     [input_audio][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.input_audio]
531//     to continue with Speech recognition. However, note that:
532//
533//     * Dialogflow will bill you for the audio so far.
534//     * Dialogflow discards all Speech recognition results in favor of the
535//       text input.
536//
537//  3. If
538//  [StreamingAnalyzeContentRequest.config][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.config]
539//  in the first message was set
540//    to
541//    [StreamingAnalyzeContentRequest.text_config][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.text_config],
542//    then the second message must contain only
543//    [input_text][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.input_text].
544//    Moreover, you must not send more than two messages.
545//
546//  After you sent all input, you must half-close or abort the request stream.
547message StreamingAnalyzeContentRequest {
548  // Required. The name of the participant this text comes from.
549  // Format: `projects/<Project ID>/locations/<Location
550  // ID>/conversations/<Conversation ID>/participants/<Participant ID>`.
551  string participant = 1 [
552    (google.api.field_behavior) = REQUIRED,
553    (google.api.resource_reference) = {
554      type: "dialogflow.googleapis.com/Participant"
555    }
556  ];
557
558  // The input config.
559  oneof config {
560    // Instructs the speech recognizer how to process the speech audio.
561    InputAudioConfig audio_config = 2;
562
563    // The natural language text to be processed.
564    InputTextConfig text_config = 3;
565  }
566
567  // Speech synthesis configuration.
568  // The speech synthesis settings for a virtual agent that may be configured
569  // for the associated conversation profile are not used when calling
570  // StreamingAnalyzeContent. If this configuration is not supplied, speech
571  // synthesis is disabled.
572  OutputAudioConfig reply_audio_config = 4;
573
574  // The input.
575  oneof input {
576    // The input audio content to be recognized. Must be sent if `audio_config`
577    // is set in the first message. The complete audio over all streaming
578    // messages must not exceed 1 minute.
579    bytes input_audio = 5;
580
581    // The UTF-8 encoded natural language text to be processed. Must be sent if
582    // `text_config` is set in the first message. Text length must not exceed
583    // 256 bytes for virtual agent interactions. The `input_text` field can be
584    // only sent once, and would cancel the speech recognition if any ongoing.
585    string input_text = 6;
586
587    // The DTMF digits used to invoke intent and fill in parameter value.
588    //
589    // This input is ignored if the previous response indicated that DTMF input
590    // is not accepted.
591    TelephonyDtmfEvents input_dtmf = 9;
592  }
593
594  // Parameters for a Dialogflow virtual-agent query.
595  QueryParameters query_params = 7;
596
597  // Parameters for a human assist query.
598  AssistQueryParameters assist_query_params = 8;
599
600  // Additional parameters to be put into Dialogflow CX session parameters. To
601  // remove a parameter from the session, clients should explicitly set the
602  // parameter value to null.
603  //
604  // Note: this field should only be used if you are connecting to a Dialogflow
605  // CX agent.
606  google.protobuf.Struct cx_parameters = 13;
607
608  // Optional. Enable full bidirectional streaming. You can keep streaming the
609  // audio until timeout, and there's no need to half close the stream to get
610  // the response.
611  //
612  // Restrictions:
613  //
614  // - Timeout: 3 mins.
615  // - Audio Encoding: only supports
616  // [AudioEncoding.AUDIO_ENCODING_LINEAR_16][google.cloud.dialogflow.v2.AudioEncoding.AUDIO_ENCODING_LINEAR_16]
617  // and
618  // [AudioEncoding.AUDIO_ENCODING_MULAW][google.cloud.dialogflow.v2.AudioEncoding.AUDIO_ENCODING_MULAW]
619  // - Lifecycle: conversation should be in `Assist Stage`, go to
620  //   [Conversation.CreateConversation][] for more information.
621  //
622  // InvalidArgument Error will be returned if the one of restriction checks
623  // failed.
624  //
625  // You can find more details in
626  // https://cloud.google.com/agent-assist/docs/extended-streaming
627  bool enable_extended_streaming = 11 [(google.api.field_behavior) = OPTIONAL];
628
629  // Enable partial virtual agent responses. If this flag is not enabled,
630  // response stream still contains only one final response even if some
631  // `Fulfillment`s in Dialogflow virtual agent have been configured to return
632  // partial responses.
633  bool enable_partial_automated_agent_reply = 12;
634
635  // If true, `StreamingAnalyzeContentResponse.debugging_info` will get
636  // populated.
637  bool enable_debugging_info = 19;
638}
639
640// The top-level message returned from the `StreamingAnalyzeContent` method.
641//
642// Multiple response messages can be returned in order:
643//
644// 1.  If the input was set to streaming audio, the first one or more messages
645//     contain `recognition_result`. Each `recognition_result` represents a more
646//     complete transcript of what the user said. The last `recognition_result`
647//     has `is_final` set to `true`.
648//
649// 2.  In virtual agent stage: if `enable_partial_automated_agent_reply` is
650//     true, the following N (currently 1 <= N <= 4) messages
651//     contain `automated_agent_reply` and optionally `reply_audio`
652//     returned by the virtual agent. The first (N-1)
653//     `automated_agent_reply`s will have `automated_agent_reply_type` set to
654//     `PARTIAL`. The last `automated_agent_reply` has
655//     `automated_agent_reply_type` set to `FINAL`.
656//     If `enable_partial_automated_agent_reply` is not enabled, response stream
657//     only contains the final reply.
658//
659//     In human assist stage: the following N (N >= 1) messages contain
660//     `human_agent_suggestion_results`, `end_user_suggestion_results` or
661//     `message`.
662message StreamingAnalyzeContentResponse {
663  // The result of speech recognition.
664  StreamingRecognitionResult recognition_result = 1;
665
666  // The output text content.
667  // This field is set if an automated agent responded with a text for the user.
668  string reply_text = 2;
669
670  // The audio data bytes encoded as specified in the request.
671  // This field is set if:
672  //
673  //  - The `reply_audio_config` field is specified in the request.
674  //  - The automated agent, which this output comes from, responded with audio.
675  //    In such case, the `reply_audio.config` field contains settings used to
676  //    synthesize the speech.
677  //
678  // In some scenarios, multiple output audio fields may be present in the
679  // response structure. In these cases, only the top-most-level audio output
680  // has content.
681  OutputAudio reply_audio = 3;
682
683  // Only set if a Dialogflow automated agent has responded.
684  // Note that: [AutomatedAgentReply.detect_intent_response.output_audio][]
685  // and [AutomatedAgentReply.detect_intent_response.output_audio_config][]
686  // are always empty, use
687  // [reply_audio][google.cloud.dialogflow.v2.StreamingAnalyzeContentResponse.reply_audio]
688  // instead.
689  AutomatedAgentReply automated_agent_reply = 4;
690
691  // Message analyzed by CCAI.
692  Message message = 6;
693
694  // The suggestions for most recent human agent. The order is the same as
695  // [HumanAgentAssistantConfig.SuggestionConfig.feature_configs][google.cloud.dialogflow.v2.HumanAgentAssistantConfig.SuggestionConfig.feature_configs]
696  // of
697  // [HumanAgentAssistantConfig.human_agent_suggestion_config][google.cloud.dialogflow.v2.HumanAgentAssistantConfig.human_agent_suggestion_config].
698  repeated SuggestionResult human_agent_suggestion_results = 7;
699
700  // The suggestions for end user. The order is the same as
701  // [HumanAgentAssistantConfig.SuggestionConfig.feature_configs][google.cloud.dialogflow.v2.HumanAgentAssistantConfig.SuggestionConfig.feature_configs]
702  // of
703  // [HumanAgentAssistantConfig.end_user_suggestion_config][google.cloud.dialogflow.v2.HumanAgentAssistantConfig.end_user_suggestion_config].
704  repeated SuggestionResult end_user_suggestion_results = 8;
705
706  // Indicates the parameters of DTMF.
707  DtmfParameters dtmf_parameters = 10;
708
709  // Debugging info that would get populated when
710  // `StreamingAnalyzeContentRequest.enable_debugging_info` is set to true.
711  CloudConversationDebuggingInfo debugging_info = 11;
712}
713
714// The request message for
715// [Participants.SuggestArticles][google.cloud.dialogflow.v2.Participants.SuggestArticles].
716message SuggestArticlesRequest {
717  // Required. The name of the participant to fetch suggestion for.
718  // Format: `projects/<Project ID>/locations/<Location
719  // ID>/conversations/<Conversation ID>/participants/<Participant ID>`.
720  string parent = 1 [
721    (google.api.field_behavior) = REQUIRED,
722    (google.api.resource_reference) = {
723      type: "dialogflow.googleapis.com/Participant"
724    }
725  ];
726
727  // Optional. The name of the latest conversation message to compile suggestion
728  // for. If empty, it will be the latest message of the conversation.
729  //
730  // Format: `projects/<Project ID>/locations/<Location
731  // ID>/conversations/<Conversation ID>/messages/<Message ID>`.
732  string latest_message = 2 [
733    (google.api.field_behavior) = OPTIONAL,
734    (google.api.resource_reference) = {
735      type: "dialogflow.googleapis.com/Message"
736    }
737  ];
738
739  // Optional. Max number of messages prior to and including
740  // [latest_message][google.cloud.dialogflow.v2.SuggestArticlesRequest.latest_message]
741  // to use as context when compiling the suggestion. By default 20 and at
742  // most 50.
743  int32 context_size = 3 [(google.api.field_behavior) = OPTIONAL];
744
745  // Parameters for a human assist query.
746  AssistQueryParameters assist_query_params = 4;
747}
748
749// The response message for
750// [Participants.SuggestArticles][google.cloud.dialogflow.v2.Participants.SuggestArticles].
751message SuggestArticlesResponse {
752  // Articles ordered by score in descending order.
753  repeated ArticleAnswer article_answers = 1;
754
755  // The name of the latest conversation message used to compile
756  // suggestion for.
757  //
758  // Format: `projects/<Project ID>/locations/<Location
759  // ID>/conversations/<Conversation ID>/messages/<Message ID>`.
760  string latest_message = 2;
761
762  // Number of messages prior to and including
763  // [latest_message][google.cloud.dialogflow.v2.SuggestArticlesResponse.latest_message]
764  // to compile the suggestion. It may be smaller than the
765  // [SuggestArticlesRequest.context_size][google.cloud.dialogflow.v2.SuggestArticlesRequest.context_size]
766  // field in the request if there aren't that many messages in the
767  // conversation.
768  int32 context_size = 3;
769}
770
771// The request message for
772// [Participants.SuggestFaqAnswers][google.cloud.dialogflow.v2.Participants.SuggestFaqAnswers].
773message SuggestFaqAnswersRequest {
774  // Required. The name of the participant to fetch suggestion for.
775  // Format: `projects/<Project ID>/locations/<Location
776  // ID>/conversations/<Conversation ID>/participants/<Participant ID>`.
777  string parent = 1 [
778    (google.api.field_behavior) = REQUIRED,
779    (google.api.resource_reference) = {
780      type: "dialogflow.googleapis.com/Participant"
781    }
782  ];
783
784  // Optional. The name of the latest conversation message to compile suggestion
785  // for. If empty, it will be the latest message of the conversation.
786  //
787  // Format: `projects/<Project ID>/locations/<Location
788  // ID>/conversations/<Conversation ID>/messages/<Message ID>`.
789  string latest_message = 2 [
790    (google.api.field_behavior) = OPTIONAL,
791    (google.api.resource_reference) = {
792      type: "dialogflow.googleapis.com/Message"
793    }
794  ];
795
796  // Optional. Max number of messages prior to and including
797  // [latest_message] to use as context when compiling the
798  // suggestion. By default 20 and at most 50.
799  int32 context_size = 3 [(google.api.field_behavior) = OPTIONAL];
800
801  // Parameters for a human assist query.
802  AssistQueryParameters assist_query_params = 4;
803}
804
805// The request message for
806// [Participants.SuggestFaqAnswers][google.cloud.dialogflow.v2.Participants.SuggestFaqAnswers].
807message SuggestFaqAnswersResponse {
808  // Answers extracted from FAQ documents.
809  repeated FaqAnswer faq_answers = 1;
810
811  // The name of the latest conversation message used to compile
812  // suggestion for.
813  //
814  // Format: `projects/<Project ID>/locations/<Location
815  // ID>/conversations/<Conversation ID>/messages/<Message ID>`.
816  string latest_message = 2;
817
818  // Number of messages prior to and including
819  // [latest_message][google.cloud.dialogflow.v2.SuggestFaqAnswersResponse.latest_message]
820  // to compile the suggestion. It may be smaller than the
821  // [SuggestFaqAnswersRequest.context_size][google.cloud.dialogflow.v2.SuggestFaqAnswersRequest.context_size]
822  // field in the request if there aren't that many messages in the
823  // conversation.
824  int32 context_size = 3;
825}
826
827// The request message for
828// [Participants.SuggestSmartReplies][google.cloud.dialogflow.v2.Participants.SuggestSmartReplies].
829message SuggestSmartRepliesRequest {
830  // Required. The name of the participant to fetch suggestion for.
831  // Format: `projects/<Project ID>/locations/<Location
832  // ID>/conversations/<Conversation ID>/participants/<Participant ID>`.
833  string parent = 1 [
834    (google.api.field_behavior) = REQUIRED,
835    (google.api.resource_reference) = {
836      type: "dialogflow.googleapis.com/Participant"
837    }
838  ];
839
840  // The current natural language text segment to compile suggestion
841  // for. This provides a way for user to get follow up smart reply suggestion
842  // after a smart reply selection, without sending a text message.
843  TextInput current_text_input = 4;
844
845  // The name of the latest conversation message to compile suggestion
846  // for. If empty, it will be the latest message of the conversation.
847  //
848  // Format: `projects/<Project ID>/locations/<Location
849  // ID>/conversations/<Conversation ID>/messages/<Message ID>`.
850  string latest_message = 2 [(google.api.resource_reference) = {
851    type: "dialogflow.googleapis.com/Message"
852  }];
853
854  // Max number of messages prior to and including
855  // [latest_message] to use as context when compiling the
856  // suggestion. By default 20 and at most 50.
857  int32 context_size = 3;
858}
859
860// The response message for
861// [Participants.SuggestSmartReplies][google.cloud.dialogflow.v2.Participants.SuggestSmartReplies].
862message SuggestSmartRepliesResponse {
863  // Output only. Multiple reply options provided by smart reply service. The
864  // order is based on the rank of the model prediction.
865  // The maximum number of the returned replies is set in SmartReplyConfig.
866  repeated SmartReplyAnswer smart_reply_answers = 1
867      [(google.api.field_behavior) = OUTPUT_ONLY];
868
869  // The name of the latest conversation message used to compile
870  // suggestion for.
871  //
872  // Format: `projects/<Project ID>/locations/<Location
873  // ID>/conversations/<Conversation ID>/messages/<Message ID>`.
874  string latest_message = 2 [(google.api.resource_reference) = {
875    type: "dialogflow.googleapis.com/Message"
876  }];
877
878  // Number of messages prior to and including
879  // [latest_message][google.cloud.dialogflow.v2.SuggestSmartRepliesResponse.latest_message]
880  // to compile the suggestion. It may be smaller than the
881  // [SuggestSmartRepliesRequest.context_size][google.cloud.dialogflow.v2.SuggestSmartRepliesRequest.context_size]
882  // field in the request if there aren't that many messages in the
883  // conversation.
884  int32 context_size = 3;
885}
886
887// Represents the natural language speech audio to be played to the end user.
888message OutputAudio {
889  // Instructs the speech synthesizer how to generate the speech
890  // audio.
891  OutputAudioConfig config = 1;
892
893  // The natural language speech audio.
894  bytes audio = 2;
895}
896
897// Represents a response from an automated agent.
898message AutomatedAgentReply {
899  // Represents different automated agent reply types.
900  enum AutomatedAgentReplyType {
901    // Not specified. This should never happen.
902    AUTOMATED_AGENT_REPLY_TYPE_UNSPECIFIED = 0;
903
904    // Partial reply. e.g. Aggregated responses in a `Fulfillment` that enables
905    // `return_partial_response` can be returned as partial reply.
906    // WARNING: partial reply is not eligible for barge-in.
907    PARTIAL = 1;
908
909    // Final reply.
910    FINAL = 2;
911  }
912
913  // Response of the Dialogflow
914  // [Sessions.DetectIntent][google.cloud.dialogflow.v2.Sessions.DetectIntent]
915  // call.
916  DetectIntentResponse detect_intent_response = 1;
917
918  // AutomatedAgentReply type.
919  AutomatedAgentReplyType automated_agent_reply_type = 7;
920
921  // Indicates whether the partial automated agent reply is interruptible when a
922  // later reply message arrives. e.g. if the agent specified some music as
923  // partial response, it can be cancelled.
924  bool allow_cancellation = 8;
925
926  // The unique identifier of the current Dialogflow CX conversation page.
927  // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
928  // ID>/flows/<Flow ID>/pages/<Page ID>`.
929  string cx_current_page = 11;
930}
931
932// Represents article answer.
933message ArticleAnswer {
934  // The article title.
935  string title = 1;
936
937  // The article URI.
938  string uri = 2;
939
940  // Article snippets.
941  repeated string snippets = 3;
942
943  // Article match confidence.
944  // The system's confidence score that this article is a good match for this
945  // conversation, as a value from 0.0 (completely uncertain) to 1.0
946  // (completely certain).
947  float confidence = 4;
948
949  // A map that contains metadata about the answer and the
950  // document from which it originates.
951  map<string, string> metadata = 5;
952
953  // The name of answer record, in the format of
954  // "projects/<Project ID>/locations/<Location ID>/answerRecords/<Answer Record
955  // ID>"
956  string answer_record = 6;
957}
958
959// Represents answer from "frequently asked questions".
960message FaqAnswer {
961  // The piece of text from the `source` knowledge base document.
962  string answer = 1;
963
964  // The system's confidence score that this Knowledge answer is a good match
965  // for this conversational query, range from 0.0 (completely uncertain)
966  // to 1.0 (completely certain).
967  float confidence = 2;
968
969  // The corresponding FAQ question.
970  string question = 3;
971
972  // Indicates which Knowledge Document this answer was extracted
973  // from.
974  // Format: `projects/<Project ID>/locations/<Location
975  // ID>/agent/knowledgeBases/<Knowledge Base ID>/documents/<Document ID>`.
976  string source = 4;
977
978  // A map that contains metadata about the answer and the
979  // document from which it originates.
980  map<string, string> metadata = 5;
981
982  // The name of answer record, in the format of
983  // "projects/<Project ID>/locations/<Location ID>/answerRecords/<Answer Record
984  // ID>"
985  string answer_record = 6;
986}
987
988// Represents a smart reply answer.
989message SmartReplyAnswer {
990  // The content of the reply.
991  string reply = 1;
992
993  // Smart reply confidence.
994  // The system's confidence score that this reply is a good match for
995  // this conversation, as a value from 0.0 (completely uncertain) to 1.0
996  // (completely certain).
997  float confidence = 2;
998
999  // The name of answer record, in the format of
1000  // "projects/<Project ID>/locations/<Location ID>/answerRecords/<Answer Record
1001  // ID>"
1002  string answer_record = 3 [(google.api.resource_reference) = {
1003    type: "dialogflow.googleapis.com/AnswerRecord"
1004  }];
1005}
1006
1007// Represents an intent suggestion.
1008message IntentSuggestion {
1009  // The display name of the intent.
1010  string display_name = 1;
1011
1012  // The name of the intent.
1013  oneof intent {
1014    // The unique identifier of this
1015    // [intent][google.cloud.dialogflow.v2.Intent]. Format: `projects/<Project
1016    // ID>/locations/<Location ID>/agent/intents/<Intent ID>`.
1017    string intent_v2 = 2;
1018  }
1019
1020  // Human readable description for better understanding an intent like its
1021  // scope, content, result etc. Maximum character limit: 140 characters.
1022  string description = 5;
1023}
1024
1025// Represents a Dialogflow assist answer.
1026message DialogflowAssistAnswer {
1027  // Result from DetectIntent for one matched intent.
1028  oneof result {
1029    // Result from v2 agent.
1030    QueryResult query_result = 1;
1031
1032    // An intent suggestion generated from conversation.
1033    IntentSuggestion intent_suggestion = 5;
1034  }
1035
1036  // The name of answer record, in the format of
1037  // "projects/<Project ID>/locations/<Location ID>/answerRecords/<Answer Record
1038  // ID>"
1039  string answer_record = 2;
1040}
1041
1042// One response of different type of suggestion response which is used in
1043// the response of
1044// [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent]
1045// and
1046// [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent],
1047// as well as
1048// [HumanAgentAssistantEvent][google.cloud.dialogflow.v2.HumanAgentAssistantEvent].
1049message SuggestionResult {
1050  // Different type of suggestion response.
1051  oneof suggestion_response {
1052    // Error status if the request failed.
1053    google.rpc.Status error = 1;
1054
1055    // SuggestArticlesResponse if request is for ARTICLE_SUGGESTION.
1056    SuggestArticlesResponse suggest_articles_response = 2;
1057
1058    // SuggestFaqAnswersResponse if request is for FAQ_ANSWER.
1059    SuggestFaqAnswersResponse suggest_faq_answers_response = 3;
1060
1061    // SuggestSmartRepliesResponse if request is for SMART_REPLY.
1062    SuggestSmartRepliesResponse suggest_smart_replies_response = 4;
1063  }
1064}
1065
1066// Defines the language used in the input text.
1067message InputTextConfig {
1068  // Required. The language of this conversational query. See [Language
1069  // Support](https://cloud.google.com/dialogflow/docs/reference/language)
1070  // for a list of the currently supported language codes.
1071  string language_code = 1 [(google.api.field_behavior) = REQUIRED];
1072}
1073
1074// Represents a part of a message possibly annotated with an entity. The part
1075// can be an entity or purely a part of the message between two entities or
1076// message start/end.
1077message AnnotatedMessagePart {
1078  // A part of a message possibly annotated with an entity.
1079  string text = 1;
1080
1081  // The [Dialogflow system entity
1082  // type](https://cloud.google.com/dialogflow/docs/reference/system-entities)
1083  // of this message part. If this is empty, Dialogflow could not annotate the
1084  // phrase part with a system entity.
1085  string entity_type = 2;
1086
1087  // The [Dialogflow system entity formatted value
1088  // ](https://cloud.google.com/dialogflow/docs/reference/system-entities) of
1089  // this message part. For example for a system entity of type
1090  // `@sys.unit-currency`, this may contain:
1091  // <pre>
1092  // {
1093  //   "amount": 5,
1094  //   "currency": "USD"
1095  // }
1096  // </pre>
1097  google.protobuf.Value formatted_value = 3;
1098}
1099
1100// Represents the result of annotation for the message.
1101message MessageAnnotation {
1102  // The collection of annotated message parts ordered by their
1103  // position in the message. You can recover the annotated message by
1104  // concatenating [AnnotatedMessagePart.text].
1105  repeated AnnotatedMessagePart parts = 1;
1106
1107  // Indicates whether the text message contains entities.
1108  bool contain_entities = 2;
1109}
1110
1111// Represents the selection of a suggestion.
1112message SuggestionInput {
1113  // Required. The ID of a suggestion selected by the human agent.
1114  // The suggestion(s) were generated in a previous call to
1115  // request Dialogflow assist.
1116  // The format is:
1117  // `projects/<Project ID>/locations/<Location ID>/answerRecords/<Answer Record
1118  // ID>` where <Answer Record ID> is an alphanumeric string.
1119  string answer_record = 1 [(google.api.field_behavior) = REQUIRED];
1120}
1121
1122// Represents the parameters of human assist query.
1123message AssistQueryParameters {
1124  // Key-value filters on the metadata of documents returned by article
1125  // suggestion. If specified, article suggestion only returns suggested
1126  // documents that match all filters in their
1127  // [Document.metadata][google.cloud.dialogflow.v2.Document.metadata]. Multiple
1128  // values for a metadata key should be concatenated by comma. For example,
1129  // filters to match all documents that have 'US' or 'CA' in their market
1130  // metadata values and 'agent' in their user metadata values will be
1131  // ```
1132  // documents_metadata_filters {
1133  //   key: "market"
1134  //   value: "US,CA"
1135  // }
1136  // documents_metadata_filters {
1137  //   key: "user"
1138  //   value: "agent"
1139  // }
1140  // ```
1141  map<string, string> documents_metadata_filters = 1;
1142}
1143