xref: /aosp_15_r20/external/googleapis/google/cloud/contactcenterinsights/v1/resources.proto (revision d5c09012810ac0c9f33fe448fb6da8260d444cc9)
1// Copyright 2023 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15syntax = "proto3";
16
17package google.cloud.contactcenterinsights.v1;
18
19import "google/api/field_behavior.proto";
20import "google/api/resource.proto";
21import "google/protobuf/duration.proto";
22import "google/protobuf/timestamp.proto";
23
24option csharp_namespace = "Google.Cloud.ContactCenterInsights.V1";
25option go_package = "cloud.google.com/go/contactcenterinsights/apiv1/contactcenterinsightspb;contactcenterinsightspb";
26option java_multiple_files = true;
27option java_outer_classname = "ResourcesProto";
28option java_package = "com.google.cloud.contactcenterinsights.v1";
29option php_namespace = "Google\\Cloud\\ContactCenterInsights\\V1";
30option ruby_package = "Google::Cloud::ContactCenterInsights::V1";
31option (google.api.resource_definition) = {
32  type: "dialogflow.googleapis.com/ConversationProfile"
33  pattern: "projects/{project}/locations/{location}/conversationProfiles/{conversation_profile}"
34};
35option (google.api.resource_definition) = {
36  type: "dialogflow.googleapis.com/Participant"
37  pattern: "projects/{project}/conversations/{conversation}/participants/{participant}"
38  pattern: "projects/{project}/locations/{location}/conversations/{conversation}/participants/{participant}"
39};
40option (google.api.resource_definition) = {
41  type: "speech.googleapis.com/Recognizer"
42  pattern: "projects/{project}/locations/{location}/recognizers/{recognizer}"
43};
44
45// The conversation resource.
46message Conversation {
47  option (google.api.resource) = {
48    type: "contactcenterinsights.googleapis.com/Conversation"
49    pattern: "projects/{project}/locations/{location}/conversations/{conversation}"
50  };
51
52  // Call-specific metadata.
53  message CallMetadata {
54    // The audio channel that contains the customer.
55    int32 customer_channel = 1;
56
57    // The audio channel that contains the agent.
58    int32 agent_channel = 2;
59  }
60
61  // Conversation metadata related to quality management.
62  message QualityMetadata {
63    // Information about an agent involved in the conversation.
64    message AgentInfo {
65      // A user-specified string representing the agent.
66      string agent_id = 1;
67
68      // The agent's name.
69      string display_name = 2;
70
71      // A user-specified string representing the agent's team.
72      string team = 3;
73
74      // A user-provided string indicating the outcome of the agent's segment of
75      // the call.
76      string disposition_code = 4;
77    }
78
79    // An arbitrary integer value indicating the customer's satisfaction rating.
80    int32 customer_satisfaction_rating = 1;
81
82    // The amount of time the customer waited to connect with an agent.
83    google.protobuf.Duration wait_duration = 2;
84
85    // An arbitrary string value specifying the menu path the customer took.
86    string menu_path = 3;
87
88    // Information about agents involved in the call.
89    repeated AgentInfo agent_info = 4;
90  }
91
92  // A message representing the transcript of a conversation.
93  message Transcript {
94    // A segment of a full transcript.
95    message TranscriptSegment {
96      // Word-level info for words in a transcript.
97      message WordInfo {
98        // Time offset of the start of this word relative to the beginning of
99        // the total conversation.
100        google.protobuf.Duration start_offset = 1;
101
102        // Time offset of the end of this word relative to the beginning of the
103        // total conversation.
104        google.protobuf.Duration end_offset = 2;
105
106        // The word itself. Includes punctuation marks that surround the word.
107        string word = 3;
108
109        // A confidence estimate between 0.0 and 1.0 of the fidelity of this
110        // word. A default value of 0.0 indicates that the value is unset.
111        float confidence = 4;
112      }
113
114      // Metadata from Dialogflow relating to the current transcript segment.
115      message DialogflowSegmentMetadata {
116        // Whether the transcript segment was covered under the configured smart
117        // reply allowlist in Agent Assist.
118        bool smart_reply_allowlist_covered = 1;
119      }
120
121      // The time that the message occurred, if provided.
122      google.protobuf.Timestamp message_time = 6;
123
124      // The text of this segment.
125      string text = 1;
126
127      // A confidence estimate between 0.0 and 1.0 of the fidelity of this
128      // segment. A default value of 0.0 indicates that the value is unset.
129      float confidence = 2;
130
131      // A list of the word-specific information for each word in the segment.
132      repeated WordInfo words = 3;
133
134      // The language code of this segment as a
135      // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
136      // Example: "en-US".
137      string language_code = 4;
138
139      // For conversations derived from multi-channel audio, this is the channel
140      // number corresponding to the audio from that channel. For
141      // audioChannelCount = N, its output values can range from '1' to 'N'. A
142      // channel tag of 0 indicates that the audio is mono.
143      int32 channel_tag = 5;
144
145      // The participant of this segment.
146      ConversationParticipant segment_participant = 9;
147
148      // CCAI metadata relating to the current transcript segment.
149      DialogflowSegmentMetadata dialogflow_segment_metadata = 10;
150
151      // The sentiment for this transcript segment.
152      SentimentData sentiment = 11;
153    }
154
155    // A list of sequential transcript segments that comprise the conversation.
156    repeated TranscriptSegment transcript_segments = 1;
157  }
158
159  // Possible media for the conversation.
160  enum Medium {
161    // Default value, if unspecified will default to PHONE_CALL.
162    MEDIUM_UNSPECIFIED = 0;
163
164    // The format for conversations that took place over the phone.
165    PHONE_CALL = 1;
166
167    // The format for conversations that took place over chat.
168    CHAT = 2;
169  }
170
171  // Metadata that applies to the conversation.
172  oneof metadata {
173    // Call-specific metadata.
174    CallMetadata call_metadata = 7;
175  }
176
177  // A time to live expiration setting, can be either a specified timestamp or a
178  // duration from the time that the conversation creation request was received.
179  // Conversations with an expiration set will be removed up to 24 hours after
180  // the specified time.
181  oneof expiration {
182    // The time at which this conversation should expire. After this time, the
183    // conversation data and any associated analyses will be deleted.
184    google.protobuf.Timestamp expire_time = 15;
185
186    // Input only. The TTL for this resource. If specified, then this TTL will
187    // be used to calculate the expire time.
188    google.protobuf.Duration ttl = 16
189        [(google.api.field_behavior) = INPUT_ONLY];
190  }
191
192  // Immutable. The resource name of the conversation.
193  // Format:
194  // projects/{project}/locations/{location}/conversations/{conversation}
195  string name = 1 [(google.api.field_behavior) = IMMUTABLE];
196
197  // The source of the audio and transcription for the conversation.
198  ConversationDataSource data_source = 2;
199
200  // Output only. The time at which the conversation was created.
201  google.protobuf.Timestamp create_time = 3
202      [(google.api.field_behavior) = OUTPUT_ONLY];
203
204  // Output only. The most recent time at which the conversation was updated.
205  google.protobuf.Timestamp update_time = 4
206      [(google.api.field_behavior) = OUTPUT_ONLY];
207
208  // The time at which the conversation started.
209  google.protobuf.Timestamp start_time = 17;
210
211  // A user-specified language code for the conversation.
212  string language_code = 14;
213
214  // An opaque, user-specified string representing the human agent who handled
215  // the conversation.
216  string agent_id = 5;
217
218  // A map for the user to specify any custom fields. A maximum of 20 labels per
219  // conversation is allowed, with a maximum of 256 characters per entry.
220  map<string, string> labels = 6;
221
222  // Conversation metadata related to quality management.
223  QualityMetadata quality_metadata = 24;
224
225  // Output only. The conversation transcript.
226  Transcript transcript = 8 [(google.api.field_behavior) = OUTPUT_ONLY];
227
228  // Immutable. The conversation medium, if unspecified will default to
229  // PHONE_CALL.
230  Medium medium = 9 [(google.api.field_behavior) = IMMUTABLE];
231
232  // Output only. The duration of the conversation.
233  google.protobuf.Duration duration = 10
234      [(google.api.field_behavior) = OUTPUT_ONLY];
235
236  // Output only. The number of turns in the conversation.
237  int32 turn_count = 11 [(google.api.field_behavior) = OUTPUT_ONLY];
238
239  // Output only. The conversation's latest analysis, if one exists.
240  Analysis latest_analysis = 12 [(google.api.field_behavior) = OUTPUT_ONLY];
241
242  // Output only. Latest summary of the conversation.
243  ConversationSummarizationSuggestionData latest_summary = 20
244      [(google.api.field_behavior) = OUTPUT_ONLY];
245
246  // Output only. The annotations that were generated during the customer and
247  // agent interaction.
248  repeated RuntimeAnnotation runtime_annotations = 13
249      [(google.api.field_behavior) = OUTPUT_ONLY];
250
251  // Output only. All the matched Dialogflow intents in the call. The key
252  // corresponds to a Dialogflow intent, format:
253  // projects/{project}/agent/{agent}/intents/{intent}
254  map<string, DialogflowIntent> dialogflow_intents = 18
255      [(google.api.field_behavior) = OUTPUT_ONLY];
256
257  // Obfuscated user ID which the customer sent to us.
258  string obfuscated_user_id = 21;
259}
260
261// The analysis resource.
262message Analysis {
263  option (google.api.resource) = {
264    type: "contactcenterinsights.googleapis.com/Analysis"
265    pattern: "projects/{project}/locations/{location}/conversations/{conversation}/analyses/{analysis}"
266  };
267
268  // Immutable. The resource name of the analysis.
269  // Format:
270  // projects/{project}/locations/{location}/conversations/{conversation}/analyses/{analysis}
271  string name = 1 [(google.api.field_behavior) = IMMUTABLE];
272
273  // Output only. The time at which the analysis was requested.
274  google.protobuf.Timestamp request_time = 2
275      [(google.api.field_behavior) = OUTPUT_ONLY];
276
277  // Output only. The time at which the analysis was created, which occurs when
278  // the long-running operation completes.
279  google.protobuf.Timestamp create_time = 3
280      [(google.api.field_behavior) = OUTPUT_ONLY];
281
282  // Output only. The result of the analysis, which is populated when the
283  // analysis finishes.
284  AnalysisResult analysis_result = 7
285      [(google.api.field_behavior) = OUTPUT_ONLY];
286
287  // To select the annotators to run and the phrase matchers to use
288  // (if any). If not specified, all annotators will be run.
289  AnnotatorSelector annotator_selector = 8;
290}
291
292// The conversation source, which is a combination of transcript and audio.
293message ConversationDataSource {
294  // The source of the conversation.
295  oneof source {
296    // A Cloud Storage location specification for the audio and transcript.
297    GcsSource gcs_source = 1;
298
299    // The source when the conversation comes from Dialogflow.
300    DialogflowSource dialogflow_source = 3;
301  }
302}
303
304// A Cloud Storage source of conversation data.
305message GcsSource {
306  // Cloud Storage URI that points to a file that contains the conversation
307  // audio.
308  string audio_uri = 1;
309
310  // Immutable. Cloud Storage URI that points to a file that contains the
311  // conversation transcript.
312  string transcript_uri = 2 [(google.api.field_behavior) = IMMUTABLE];
313}
314
315// A Dialogflow source of conversation data.
316message DialogflowSource {
317  // Output only. The name of the Dialogflow conversation that this conversation
318  // resource is derived from. Format:
319  // projects/{project}/locations/{location}/conversations/{conversation}
320  string dialogflow_conversation = 1
321      [(google.api.field_behavior) = OUTPUT_ONLY];
322
323  // Cloud Storage URI that points to a file that contains the conversation
324  // audio.
325  string audio_uri = 3;
326}
327
328// The result of an analysis.
329message AnalysisResult {
330  // Call-specific metadata created during analysis.
331  message CallAnalysisMetadata {
332    // A list of call annotations that apply to this call.
333    repeated CallAnnotation annotations = 2;
334
335    // All the entities in the call.
336    map<string, Entity> entities = 3;
337
338    // Overall conversation-level sentiment for each channel of the call.
339    repeated ConversationLevelSentiment sentiments = 4;
340
341    // All the matched intents in the call.
342    map<string, Intent> intents = 6;
343
344    // All the matched phrase matchers in the call.
345    map<string, PhraseMatchData> phrase_matchers = 7;
346
347    // Overall conversation-level issue modeling result.
348    IssueModelResult issue_model_result = 8;
349  }
350
351  // Metadata discovered during analysis.
352  oneof metadata {
353    // Call-specific metadata created by the analysis.
354    CallAnalysisMetadata call_analysis_metadata = 2;
355  }
356
357  // The time at which the analysis ended.
358  google.protobuf.Timestamp end_time = 1;
359}
360
361// Issue Modeling result on a conversation.
362message IssueModelResult {
363  // Issue model that generates the result.
364  // Format: projects/{project}/locations/{location}/issueModels/{issue_model}
365  string issue_model = 1 [(google.api.resource_reference) = {
366    type: "contactcenterinsights.googleapis.com/IssueModel"
367  }];
368
369  // All the matched issues.
370  repeated IssueAssignment issues = 2;
371}
372
373// One channel of conversation-level sentiment data.
374message ConversationLevelSentiment {
375  // The channel of the audio that the data applies to.
376  int32 channel_tag = 1;
377
378  // Data specifying sentiment.
379  SentimentData sentiment_data = 2;
380}
381
382// Information about the issue.
383message IssueAssignment {
384  // Resource name of the assigned issue.
385  string issue = 1;
386
387  // Score indicating the likelihood of the issue assignment.
388  // currently bounded on [0,1].
389  double score = 2;
390
391  // Immutable. Display name of the assigned issue. This field is set at time of
392  // analyis and immutable since then.
393  string display_name = 3 [(google.api.field_behavior) = IMMUTABLE];
394}
395
396// A piece of metadata that applies to a window of a call.
397message CallAnnotation {
398  // The data in the annotation.
399  oneof data {
400    // Data specifying an interruption.
401    InterruptionData interruption_data = 10;
402
403    // Data specifying sentiment.
404    SentimentData sentiment_data = 11;
405
406    // Data specifying silence.
407    SilenceData silence_data = 12;
408
409    // Data specifying a hold.
410    HoldData hold_data = 13;
411
412    // Data specifying an entity mention.
413    EntityMentionData entity_mention_data = 15;
414
415    // Data specifying an intent match.
416    IntentMatchData intent_match_data = 16;
417
418    // Data specifying a phrase match.
419    PhraseMatchData phrase_match_data = 17;
420
421    // Data specifying an issue match.
422    IssueMatchData issue_match_data = 18;
423  }
424
425  // The channel of the audio where the annotation occurs. For single-channel
426  // audio, this field is not populated.
427  int32 channel_tag = 1;
428
429  // The boundary in the conversation where the annotation starts, inclusive.
430  AnnotationBoundary annotation_start_boundary = 4;
431
432  // The boundary in the conversation where the annotation ends, inclusive.
433  AnnotationBoundary annotation_end_boundary = 5;
434}
435
436// A point in a conversation that marks the start or the end of an annotation.
437message AnnotationBoundary {
438  // A detailed boundary, which describes a more specific point.
439  oneof detailed_boundary {
440    // The word index of this boundary with respect to the first word in the
441    // transcript piece. This index starts at zero.
442    int32 word_index = 3;
443  }
444
445  // The index in the sequence of transcribed pieces of the conversation where
446  // the boundary is located. This index starts at zero.
447  int32 transcript_index = 1;
448}
449
450// The data for an entity annotation.
451// Represents a phrase in the conversation that is a known entity, such
452// as a person, an organization, or location.
453message Entity {
454  // The type of the entity. For most entity types, the associated metadata is a
455  // Wikipedia URL (`wikipedia_url`) and Knowledge Graph MID (`mid`). The table
456  // below lists the associated fields for entities that have different
457  // metadata.
458  enum Type {
459    // Unspecified.
460    TYPE_UNSPECIFIED = 0;
461
462    // Person.
463    PERSON = 1;
464
465    // Location.
466    LOCATION = 2;
467
468    // Organization.
469    ORGANIZATION = 3;
470
471    // Event.
472    EVENT = 4;
473
474    // Artwork.
475    WORK_OF_ART = 5;
476
477    // Consumer product.
478    CONSUMER_GOOD = 6;
479
480    // Other types of entities.
481    OTHER = 7;
482
483    // Phone number.
484    //
485    // The metadata lists the phone number (formatted according to local
486    // convention), plus whichever additional elements appear in the text:
487    //
488    // * `number` - The actual number, broken down into sections according to
489    // local convention.
490    // * `national_prefix` - Country code, if detected.
491    // * `area_code` - Region or area code, if detected.
492    // * `extension` - Phone extension (to be dialed after connection), if
493    // detected.
494    PHONE_NUMBER = 9;
495
496    // Address.
497    //
498    // The metadata identifies the street number and locality plus whichever
499    // additional elements appear in the text:
500    //
501    // * `street_number` - Street number.
502    // * `locality` - City or town.
503    // * `street_name` - Street/route name, if detected.
504    // * `postal_code` - Postal code, if detected.
505    // * `country` - Country, if detected.
506    // * `broad_region` - Administrative area, such as the state, if detected.
507    // * `narrow_region` - Smaller administrative area, such as county, if
508    // detected.
509    // * `sublocality` - Used in Asian addresses to demark a district within a
510    // city, if detected.
511    ADDRESS = 10;
512
513    // Date.
514    //
515    // The metadata identifies the components of the date:
516    //
517    // * `year` - Four digit year, if detected.
518    // * `month` - Two digit month number, if detected.
519    // * `day` - Two digit day number, if detected.
520    DATE = 11;
521
522    // Number.
523    //
524    // The metadata is the number itself.
525    NUMBER = 12;
526
527    // Price.
528    //
529    // The metadata identifies the `value` and `currency`.
530    PRICE = 13;
531  }
532
533  // The representative name for the entity.
534  string display_name = 1;
535
536  // The entity type.
537  Type type = 2;
538
539  // Metadata associated with the entity.
540  //
541  // For most entity types, the metadata is a Wikipedia URL (`wikipedia_url`)
542  // and Knowledge Graph MID (`mid`), if they are available. For the metadata
543  // associated with other entity types, see the Type table below.
544  map<string, string> metadata = 3;
545
546  // The salience score associated with the entity in the [0, 1.0] range.
547  //
548  // The salience score for an entity provides information about the
549  // importance or centrality of that entity to the entire document text.
550  // Scores closer to 0 are less salient, while scores closer to 1.0 are highly
551  // salient.
552  float salience = 4;
553
554  // The aggregate sentiment expressed for this entity in the conversation.
555  SentimentData sentiment = 5;
556}
557
558// The data for an intent.
559// Represents a detected intent in the conversation, for example MAKES_PROMISE.
560message Intent {
561  // The unique identifier of the intent.
562  string id = 1;
563
564  // The human-readable name of the intent.
565  string display_name = 2;
566}
567
568// The data for a matched phrase matcher.
569// Represents information identifying a phrase matcher for a given match.
570message PhraseMatchData {
571  // The unique identifier (the resource name) of the phrase matcher.
572  string phrase_matcher = 1;
573
574  // The human-readable name of the phrase matcher.
575  string display_name = 2;
576}
577
578// The data for a Dialogflow intent.
579// Represents a detected intent in the conversation, e.g. MAKES_PROMISE.
580message DialogflowIntent {
581  // The human-readable name of the intent.
582  string display_name = 1;
583}
584
585// The data for an interruption annotation.
586message InterruptionData {}
587
588// The data for a silence annotation.
589message SilenceData {}
590
591// The data for a hold annotation.
592message HoldData {}
593
594// The data for an entity mention annotation.
595// This represents a mention of an `Entity` in the conversation.
596message EntityMentionData {
597  // The supported types of mentions.
598  enum MentionType {
599    // Unspecified.
600    MENTION_TYPE_UNSPECIFIED = 0;
601
602    // Proper noun.
603    PROPER = 1;
604
605    // Common noun (or noun compound).
606    COMMON = 2;
607  }
608
609  // The key of this entity in conversation entities.
610  // Can be used to retrieve the exact `Entity` this mention is attached to.
611  string entity_unique_id = 1;
612
613  // The type of the entity mention.
614  MentionType type = 2;
615
616  // Sentiment expressed for this mention of the entity.
617  SentimentData sentiment = 3;
618}
619
620// The data for an intent match.
621// Represents an intent match for a text segment in the conversation. A text
622// segment can be part of a sentence, a complete sentence, or an utterance
623// with multiple sentences.
624message IntentMatchData {
625  // The id of the matched intent.
626  // Can be used to retrieve the corresponding intent information.
627  string intent_unique_id = 1;
628}
629
630// The data for a sentiment annotation.
631message SentimentData {
632  // A non-negative number from 0 to infinity which represents the abolute
633  // magnitude of sentiment regardless of score.
634  float magnitude = 1;
635
636  // The sentiment score between -1.0 (negative) and 1.0 (positive).
637  float score = 2;
638}
639
640// The data for an issue match annotation.
641message IssueMatchData {
642  // Information about the issue's assignment.
643  IssueAssignment issue_assignment = 1;
644}
645
646// The issue model resource.
647message IssueModel {
648  option (google.api.resource) = {
649    type: "contactcenterinsights.googleapis.com/IssueModel"
650    pattern: "projects/{project}/locations/{location}/issueModels/{issue_model}"
651  };
652
653  // Configs for the input data used to create the issue model.
654  message InputDataConfig {
655    // Medium of conversations used in training data. This field is being
656    // deprecated. To specify the medium to be used in training a new issue
657    // model, set the `medium` field on `filter`.
658    Conversation.Medium medium = 1 [deprecated = true];
659
660    // Output only. Number of conversations used in training. Output only.
661    int64 training_conversations_count = 2
662        [(google.api.field_behavior) = OUTPUT_ONLY];
663
664    // A filter to reduce the conversations used for training the model to a
665    // specific subset.
666    string filter = 3;
667  }
668
669  // State of the model.
670  enum State {
671    // Unspecified.
672    STATE_UNSPECIFIED = 0;
673
674    // Model is not deployed but is ready to deploy.
675    UNDEPLOYED = 1;
676
677    // Model is being deployed.
678    DEPLOYING = 2;
679
680    // Model is deployed and is ready to be used. A model can only be used in
681    // analysis if it's in this state.
682    DEPLOYED = 3;
683
684    // Model is being undeployed.
685    UNDEPLOYING = 4;
686
687    // Model is being deleted.
688    DELETING = 5;
689  }
690
691  // Type of the model.
692  enum ModelType {
693    // Unspecified model type.
694    MODEL_TYPE_UNSPECIFIED = 0;
695
696    // Type V1.
697    TYPE_V1 = 1;
698
699    // Type V2.
700    TYPE_V2 = 2;
701  }
702
703  // Immutable. The resource name of the issue model.
704  // Format:
705  // projects/{project}/locations/{location}/issueModels/{issue_model}
706  string name = 1 [(google.api.field_behavior) = IMMUTABLE];
707
708  // The representative name for the issue model.
709  string display_name = 2;
710
711  // Output only. The time at which this issue model was created.
712  google.protobuf.Timestamp create_time = 3
713      [(google.api.field_behavior) = OUTPUT_ONLY];
714
715  // Output only. The most recent time at which the issue model was updated.
716  google.protobuf.Timestamp update_time = 4
717      [(google.api.field_behavior) = OUTPUT_ONLY];
718
719  // Output only. Number of issues in this issue model.
720  int64 issue_count = 8 [(google.api.field_behavior) = OUTPUT_ONLY];
721
722  // Output only. State of the model.
723  State state = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
724
725  // Configs for the input data that used to create the issue model.
726  InputDataConfig input_data_config = 6;
727
728  // Output only. Immutable. The issue model's label statistics on its training
729  // data.
730  IssueModelLabelStats training_stats = 7 [
731    (google.api.field_behavior) = OUTPUT_ONLY,
732    (google.api.field_behavior) = IMMUTABLE
733  ];
734
735  // Type of the model.
736  ModelType model_type = 9;
737
738  // Language of the model.
739  string language_code = 10;
740}
741
742// The issue resource.
743message Issue {
744  option (google.api.resource) = {
745    type: "contactcenterinsights.googleapis.com/Issue"
746    pattern: "projects/{project}/locations/{location}/issueModels/{issue_model}/issues/{issue}"
747  };
748
749  // Immutable. The resource name of the issue.
750  // Format:
751  // projects/{project}/locations/{location}/issueModels/{issue_model}/issues/{issue}
752  string name = 1 [(google.api.field_behavior) = IMMUTABLE];
753
754  // The representative name for the issue.
755  string display_name = 2;
756
757  // Output only. The time at which this issue was created.
758  google.protobuf.Timestamp create_time = 3
759      [(google.api.field_behavior) = OUTPUT_ONLY];
760
761  // Output only. The most recent time that this issue was updated.
762  google.protobuf.Timestamp update_time = 4
763      [(google.api.field_behavior) = OUTPUT_ONLY];
764
765  // Output only. Resource names of the sample representative utterances that
766  // match to this issue.
767  repeated string sample_utterances = 6
768      [(google.api.field_behavior) = OUTPUT_ONLY];
769}
770
771// Aggregated statistics about an issue model.
772message IssueModelLabelStats {
773  // Aggregated statistics about an issue.
774  message IssueStats {
775    // Issue resource.
776    // Format:
777    // projects/{project}/locations/{location}/issueModels/{issue_model}/issues/{issue}
778    string issue = 1;
779
780    // Number of conversations attached to the issue at this point in time.
781    int64 labeled_conversations_count = 2;
782
783    // Display name of the issue.
784    string display_name = 3;
785  }
786
787  // Number of conversations the issue model has analyzed at this point in time.
788  int64 analyzed_conversations_count = 1;
789
790  // Number of analyzed conversations for which no issue was applicable at this
791  // point in time.
792  int64 unclassified_conversations_count = 2;
793
794  // Statistics on each issue. Key is the issue's resource name.
795  map<string, IssueStats> issue_stats = 3;
796}
797
798// The phrase matcher resource.
799message PhraseMatcher {
800  option (google.api.resource) = {
801    type: "contactcenterinsights.googleapis.com/PhraseMatcher"
802    pattern: "projects/{project}/locations/{location}/phraseMatchers/{phrase_matcher}"
803  };
804
805  // Specifies how to combine each phrase match rule group to determine whether
806  // there is a match.
807  enum PhraseMatcherType {
808    // Unspecified.
809    PHRASE_MATCHER_TYPE_UNSPECIFIED = 0;
810
811    // Must meet all phrase match rule groups or there is no match.
812    ALL_OF = 1;
813
814    // If any of the phrase match rule groups are met, there is a match.
815    ANY_OF = 2;
816  }
817
818  // The resource name of the phrase matcher.
819  // Format:
820  // projects/{project}/locations/{location}/phraseMatchers/{phrase_matcher}
821  string name = 1;
822
823  // Output only. Immutable. The revision ID of the phrase matcher.
824  // A new revision is committed whenever the matcher is changed, except when it
825  // is activated or deactivated. A server generated random ID will be used.
826  // Example: locations/global/phraseMatchers/my-first-matcher@1234567
827  string revision_id = 2 [
828    (google.api.field_behavior) = IMMUTABLE,
829    (google.api.field_behavior) = OUTPUT_ONLY
830  ];
831
832  // The customized version tag to use for the phrase matcher. If not specified,
833  // it will default to `revision_id`.
834  string version_tag = 3;
835
836  // Output only. The timestamp of when the revision was created. It is also the
837  // create time when a new matcher is added.
838  google.protobuf.Timestamp revision_create_time = 4
839      [(google.api.field_behavior) = OUTPUT_ONLY];
840
841  // The human-readable name of the phrase matcher.
842  string display_name = 5;
843
844  // Required. The type of this phrase matcher.
845  PhraseMatcherType type = 6 [(google.api.field_behavior) = REQUIRED];
846
847  // Applies the phrase matcher only when it is active.
848  bool active = 7;
849
850  // A list of phase match rule groups that are included in this matcher.
851  repeated PhraseMatchRuleGroup phrase_match_rule_groups = 8;
852
853  // Output only. The most recent time at which the activation status was
854  // updated.
855  google.protobuf.Timestamp activation_update_time = 9
856      [(google.api.field_behavior) = OUTPUT_ONLY];
857
858  // The role whose utterances the phrase matcher should be matched
859  // against. If the role is ROLE_UNSPECIFIED it will be matched against any
860  // utterances in the transcript.
861  ConversationParticipant.Role role_match = 10;
862
863  // Output only. The most recent time at which the phrase matcher was updated.
864  google.protobuf.Timestamp update_time = 11
865      [(google.api.field_behavior) = OUTPUT_ONLY];
866}
867
868// A message representing a rule in the phrase matcher.
869message PhraseMatchRuleGroup {
870  // Specifies how to combine each phrase match rule for whether there is a
871  // match.
872  enum PhraseMatchRuleGroupType {
873    // Unspecified.
874    PHRASE_MATCH_RULE_GROUP_TYPE_UNSPECIFIED = 0;
875
876    // Must meet all phrase match rules or there is no match.
877    ALL_OF = 1;
878
879    // If any of the phrase match rules are met, there is a match.
880    ANY_OF = 2;
881  }
882
883  // Required. The type of this phrase match rule group.
884  PhraseMatchRuleGroupType type = 1 [(google.api.field_behavior) = REQUIRED];
885
886  // A list of phrase match rules that are included in this group.
887  repeated PhraseMatchRule phrase_match_rules = 2;
888}
889
890// The data for a phrase match rule.
891message PhraseMatchRule {
892  // Required. The phrase to be matched.
893  string query = 1 [(google.api.field_behavior) = REQUIRED];
894
895  // Specifies whether the phrase must be missing from the transcript segment or
896  // present in the transcript segment.
897  bool negated = 2;
898
899  // Provides additional information about the rule that specifies how to apply
900  // the rule.
901  PhraseMatchRuleConfig config = 3;
902}
903
904// Configuration information of a phrase match rule.
905message PhraseMatchRuleConfig {
906  // The configuration of the phrase match rule.
907  oneof config {
908    // The configuration for the exact match rule.
909    ExactMatchConfig exact_match_config = 1;
910  }
911}
912
913// Exact match configuration.
914message ExactMatchConfig {
915  // Whether to consider case sensitivity when performing an exact match.
916  bool case_sensitive = 1;
917}
918
919// The settings resource.
920message Settings {
921  option (google.api.resource) = {
922    type: "contactcenterinsights.googleapis.com/Settings"
923    pattern: "projects/{project}/locations/{location}/settings"
924  };
925
926  // Default configuration when creating Analyses in Insights.
927  message AnalysisConfig {
928    // Percentage of conversations created using Dialogflow runtime integration
929    // to analyze automatically, between [0, 100].
930    double runtime_integration_analysis_percentage = 1;
931
932    // Percentage of conversations created using the UploadConversation endpoint
933    // to analyze automatically, between [0, 100].
934    double upload_conversation_analysis_percentage = 6;
935
936    // To select the annotators to run and the phrase matchers to use
937    // (if any). If not specified, all annotators will be run.
938    AnnotatorSelector annotator_selector = 5;
939  }
940
941  // Immutable. The resource name of the settings resource.
942  // Format:
943  // projects/{project}/locations/{location}/settings
944  string name = 1 [(google.api.field_behavior) = IMMUTABLE];
945
946  // Output only. The time at which the settings was created.
947  google.protobuf.Timestamp create_time = 2
948      [(google.api.field_behavior) = OUTPUT_ONLY];
949
950  // Output only. The time at which the settings were last updated.
951  google.protobuf.Timestamp update_time = 3
952      [(google.api.field_behavior) = OUTPUT_ONLY];
953
954  // A language code to be applied to each transcript segment unless the segment
955  // already specifies a language code. Language code defaults to "en-US" if it
956  // is neither specified on the segment nor here.
957  string language_code = 4;
958
959  // The default TTL for newly-created conversations. If a conversation has a
960  // specified expiration, that value will be used instead. Changing this
961  // value will not change the expiration of existing conversations.
962  // Conversations with no expire time persist until they are deleted.
963  google.protobuf.Duration conversation_ttl = 5;
964
965  // A map that maps a notification trigger to a Pub/Sub topic. Each time a
966  // specified trigger occurs, Insights will notify the corresponding Pub/Sub
967  // topic.
968  //
969  // Keys are notification triggers. Supported keys are:
970  //
971  // * "all-triggers": Notify each time any of the supported triggers occurs.
972  // * "create-analysis": Notify each time an analysis is created.
973  // * "create-conversation": Notify each time a conversation is created.
974  // * "export-insights-data": Notify each time an export is complete.
975  // * "update-conversation": Notify each time a conversation is updated via
976  // UpdateConversation.
977  //
978  // Values are Pub/Sub topics. The format of each Pub/Sub topic is:
979  // projects/{project}/topics/{topic}
980  map<string, string> pubsub_notification_settings = 6;
981
982  // Default analysis settings.
983  AnalysisConfig analysis_config = 7;
984
985  // Default DLP redaction resources to be applied while ingesting
986  // conversations.
987  RedactionConfig redaction_config = 10;
988
989  // Optional. Default Speech-to-Text resources to be used while ingesting audio
990  // files. Optional, CCAI Insights will create a default if not provided.
991  SpeechConfig speech_config = 11 [(google.api.field_behavior) = OPTIONAL];
992}
993
994// DLP resources used for redaction while ingesting conversations.
995message RedactionConfig {
996  // The fully-qualified DLP deidentify template resource name.
997  // Format:
998  // `projects/{project}/deidentifyTemplates/{template}`
999  string deidentify_template = 1;
1000
1001  // The fully-qualified DLP inspect template resource name.
1002  // Format:
1003  // `projects/{project}/locations/{location}/inspectTemplates/{template}`
1004  string inspect_template = 2;
1005}
1006
1007// Speech-to-Text configuration.
1008message SpeechConfig {
1009  // The fully-qualified Speech Recognizer resource name.
1010  // Format:
1011  // `projects/{project_id}/locations/{location}/recognizer/{recognizer}`
1012  string speech_recognizer = 1 [(google.api.resource_reference) = {
1013    type: "speech.googleapis.com/Recognizer"
1014  }];
1015}
1016
1017// An annotation that was generated during the customer and agent interaction.
1018message RuntimeAnnotation {
1019  // The data in the annotation.
1020  oneof data {
1021    // Agent Assist Article Suggestion data.
1022    ArticleSuggestionData article_suggestion = 6;
1023
1024    // Agent Assist FAQ answer data.
1025    FaqAnswerData faq_answer = 7;
1026
1027    // Agent Assist Smart Reply data.
1028    SmartReplyData smart_reply = 8;
1029
1030    // Agent Assist Smart Compose suggestion data.
1031    SmartComposeSuggestionData smart_compose_suggestion = 9;
1032
1033    // Dialogflow interaction data.
1034    DialogflowInteractionData dialogflow_interaction = 10;
1035
1036    // Conversation summarization suggestion data.
1037    ConversationSummarizationSuggestionData
1038        conversation_summarization_suggestion = 12;
1039  }
1040
1041  // The unique identifier of the annotation.
1042  // Format:
1043  // projects/{project}/locations/{location}/conversationDatasets/{dataset}/conversationDataItems/{data_item}/conversationAnnotations/{annotation}
1044  string annotation_id = 1;
1045
1046  // The time at which this annotation was created.
1047  google.protobuf.Timestamp create_time = 2;
1048
1049  // The boundary in the conversation where the annotation starts, inclusive.
1050  AnnotationBoundary start_boundary = 3;
1051
1052  // The boundary in the conversation where the annotation ends, inclusive.
1053  AnnotationBoundary end_boundary = 4;
1054
1055  // The feedback that the customer has about the answer in `data`.
1056  AnswerFeedback answer_feedback = 5;
1057}
1058
1059// The feedback that the customer has about a certain answer in the
1060// conversation.
1061message AnswerFeedback {
1062  // The correctness level of an answer.
1063  enum CorrectnessLevel {
1064    // Correctness level unspecified.
1065    CORRECTNESS_LEVEL_UNSPECIFIED = 0;
1066
1067    // Answer is totally wrong.
1068    NOT_CORRECT = 1;
1069
1070    // Answer is partially correct.
1071    PARTIALLY_CORRECT = 2;
1072
1073    // Answer is fully correct.
1074    FULLY_CORRECT = 3;
1075  }
1076
1077  // The correctness level of an answer.
1078  CorrectnessLevel correctness_level = 1;
1079
1080  // Indicates whether an answer or item was clicked by the human agent.
1081  bool clicked = 2;
1082
1083  // Indicates whether an answer or item was displayed to the human agent in the
1084  // agent desktop UI.
1085  bool displayed = 3;
1086}
1087
1088// Agent Assist Article Suggestion data.
1089message ArticleSuggestionData {
1090  // Article title.
1091  string title = 1;
1092
1093  // Article URI.
1094  string uri = 2;
1095
1096  // The system's confidence score that this article is a good match for this
1097  // conversation, ranging from 0.0 (completely uncertain) to 1.0 (completely
1098  // certain).
1099  float confidence_score = 3;
1100
1101  // Map that contains metadata about the Article Suggestion and the document
1102  // that it originates from.
1103  map<string, string> metadata = 4;
1104
1105  // The name of the answer record.
1106  // Format:
1107  // projects/{project}/locations/{location}/answerRecords/{answer_record}
1108  string query_record = 5;
1109
1110  // The knowledge document that this answer was extracted from.
1111  // Format:
1112  // projects/{project}/knowledgeBases/{knowledge_base}/documents/{document}
1113  string source = 6;
1114}
1115
1116// Agent Assist frequently-asked-question answer data.
1117message FaqAnswerData {
1118  // The piece of text from the `source` knowledge base document.
1119  string answer = 1;
1120
1121  // The system's confidence score that this answer is a good match for this
1122  // conversation, ranging from 0.0 (completely uncertain) to 1.0 (completely
1123  // certain).
1124  float confidence_score = 2;
1125
1126  // The corresponding FAQ question.
1127  string question = 3;
1128
1129  // Map that contains metadata about the FAQ answer and the document that
1130  // it originates from.
1131  map<string, string> metadata = 4;
1132
1133  // The name of the answer record.
1134  // Format:
1135  // projects/{project}/locations/{location}/answerRecords/{answer_record}
1136  string query_record = 5;
1137
1138  // The knowledge document that this answer was extracted from.
1139  // Format:
1140  // projects/{project}/knowledgeBases/{knowledge_base}/documents/{document}.
1141  string source = 6;
1142}
1143
1144// Agent Assist Smart Reply data.
1145message SmartReplyData {
1146  // The content of the reply.
1147  string reply = 1;
1148
1149  // The system's confidence score that this reply is a good match for this
1150  // conversation, ranging from 0.0 (completely uncertain) to 1.0 (completely
1151  // certain).
1152  double confidence_score = 2;
1153
1154  // Map that contains metadata about the Smart Reply and the document from
1155  // which it originates.
1156  map<string, string> metadata = 3;
1157
1158  // The name of the answer record.
1159  // Format:
1160  // projects/{project}/locations/{location}/answerRecords/{answer_record}
1161  string query_record = 4;
1162}
1163
1164// Agent Assist Smart Compose suggestion data.
1165message SmartComposeSuggestionData {
1166  // The content of the suggestion.
1167  string suggestion = 1;
1168
1169  // The system's confidence score that this suggestion is a good match for this
1170  // conversation, ranging from 0.0 (completely uncertain) to 1.0 (completely
1171  // certain).
1172  double confidence_score = 2;
1173
1174  // Map that contains metadata about the Smart Compose suggestion and the
1175  // document from which it originates.
1176  map<string, string> metadata = 3;
1177
1178  // The name of the answer record.
1179  // Format:
1180  // projects/{project}/locations/{location}/answerRecords/{answer_record}
1181  string query_record = 4;
1182}
1183
1184// Dialogflow interaction data.
1185message DialogflowInteractionData {
1186  // The Dialogflow intent resource path. Format:
1187  // projects/{project}/agent/{agent}/intents/{intent}
1188  string dialogflow_intent_id = 1;
1189
1190  // The confidence of the match ranging from 0.0 (completely uncertain) to 1.0
1191  // (completely certain).
1192  float confidence = 2;
1193}
1194
1195// Conversation summarization suggestion data.
1196message ConversationSummarizationSuggestionData {
1197  // The summarization content that is concatenated into one string.
1198  string text = 1;
1199
1200  // The summarization content that is divided into sections. The key is the
1201  // section's name and the value is the section's content. There is no
1202  // specific format for the key or value.
1203  map<string, string> text_sections = 5;
1204
1205  // The confidence score of the summarization.
1206  float confidence = 2;
1207
1208  // A map that contains metadata about the summarization and the document
1209  // from which it originates.
1210  map<string, string> metadata = 3;
1211
1212  // The name of the answer record.
1213  // Format:
1214  // projects/{project}/locations/{location}/answerRecords/{answer_record}
1215  string answer_record = 4;
1216
1217  // The name of the model that generates this summary.
1218  // Format:
1219  // projects/{project}/locations/{location}/conversationModels/{conversation_model}
1220  string conversation_model = 6;
1221}
1222
1223// The call participant speaking for a given utterance.
1224message ConversationParticipant {
1225  // The role of the participant.
1226  enum Role {
1227    // Participant's role is not set.
1228    ROLE_UNSPECIFIED = 0;
1229
1230    // Participant is a human agent.
1231    HUMAN_AGENT = 1;
1232
1233    // Participant is an automated agent.
1234    AUTOMATED_AGENT = 2;
1235
1236    // Participant is an end user who conversed with the contact center.
1237    END_USER = 3;
1238
1239    // Participant is either a human or automated agent.
1240    ANY_AGENT = 4;
1241  }
1242
1243  oneof participant {
1244    // The name of the participant provided by Dialogflow. Format:
1245    // projects/{project}/locations/{location}/conversations/{conversation}/participants/{participant}
1246    string dialogflow_participant_name = 5 [(google.api.resource_reference) = {
1247      type: "dialogflow.googleapis.com/Participant"
1248    }];
1249
1250    // A user-specified ID representing the participant.
1251    string user_id = 6;
1252  }
1253
1254  // Deprecated. Use `dialogflow_participant_name` instead.
1255  // The name of the Dialogflow participant. Format:
1256  // projects/{project}/locations/{location}/conversations/{conversation}/participants/{participant}
1257  string dialogflow_participant = 1 [deprecated = true];
1258
1259  // Obfuscated user ID from Dialogflow.
1260  string obfuscated_external_user_id = 3;
1261
1262  // The role of the participant.
1263  Role role = 2;
1264}
1265
1266// The View resource.
1267message View {
1268  option (google.api.resource) = {
1269    type: "contactcenterinsights.googleapis.com/View"
1270    pattern: "projects/{project}/locations/{location}/views/{view}"
1271  };
1272
1273  // Immutable. The resource name of the view.
1274  // Format:
1275  // projects/{project}/locations/{location}/views/{view}
1276  string name = 1 [(google.api.field_behavior) = IMMUTABLE];
1277
1278  // The human-readable display name of the view.
1279  string display_name = 2;
1280
1281  // Output only. The time at which this view was created.
1282  google.protobuf.Timestamp create_time = 3
1283      [(google.api.field_behavior) = OUTPUT_ONLY];
1284
1285  // Output only. The most recent time at which the view was updated.
1286  google.protobuf.Timestamp update_time = 4
1287      [(google.api.field_behavior) = OUTPUT_ONLY];
1288
1289  // String with specific view properties, must be non-empty.
1290  string value = 5;
1291}
1292
1293// Selector of all available annotators and phrase matchers to run.
1294message AnnotatorSelector {
1295  // Configuration for summarization.
1296  message SummarizationConfig {
1297    // Summarization model to use, if `conversation_profile` is not used.
1298    enum SummarizationModel {
1299      // Unspecified summarization model.
1300      SUMMARIZATION_MODEL_UNSPECIFIED = 0;
1301
1302      // The CCAI baseline model.
1303      BASELINE_MODEL = 1;
1304    }
1305
1306    // Summarization must use either a preexisting conversation profile or one
1307    // of the supported default models.
1308    oneof model_source {
1309      // Resource name of the Dialogflow conversation profile.
1310      // Format:
1311      // projects/{project}/locations/{location}/conversationProfiles/{conversation_profile}
1312      string conversation_profile = 1 [(google.api.resource_reference) = {
1313        type: "dialogflow.googleapis.com/ConversationProfile"
1314      }];
1315
1316      // Default summarization model to be used.
1317      SummarizationModel summarization_model = 2;
1318    }
1319  }
1320
1321  // Whether to run the interruption annotator.
1322  bool run_interruption_annotator = 1;
1323
1324  // Whether to run the silence annotator.
1325  bool run_silence_annotator = 2;
1326
1327  // Whether to run the active phrase matcher annotator(s).
1328  bool run_phrase_matcher_annotator = 3;
1329
1330  // The list of phrase matchers to run. If not provided, all active phrase
1331  // matchers will be used. If inactive phrase matchers are provided, they will
1332  // not be used. Phrase matchers will be run only if
1333  // run_phrase_matcher_annotator is set to true. Format:
1334  // projects/{project}/locations/{location}/phraseMatchers/{phrase_matcher}
1335  repeated string phrase_matchers = 4 [(google.api.resource_reference) = {
1336    type: "contactcenterinsights.googleapis.com/PhraseMatcher"
1337  }];
1338
1339  // Whether to run the sentiment annotator.
1340  bool run_sentiment_annotator = 5;
1341
1342  // Whether to run the entity annotator.
1343  bool run_entity_annotator = 6;
1344
1345  // Whether to run the intent annotator.
1346  bool run_intent_annotator = 7;
1347
1348  // Whether to run the issue model annotator. A model should have already been
1349  // deployed for this to take effect.
1350  bool run_issue_model_annotator = 8;
1351
1352  // The issue model to run. If not provided, the most recently deployed topic
1353  // model will be used. The provided issue model will only be used for
1354  // inference if the issue model is deployed and if run_issue_model_annotator
1355  // is set to true. If more than one issue model is provided, only the first
1356  // provided issue model will be used for inference.
1357  repeated string issue_models = 10 [(google.api.resource_reference) = {
1358    type: "contactcenterinsights.googleapis.com/IssueModel"
1359  }];
1360
1361  // Whether to run the summarization annotator.
1362  bool run_summarization_annotator = 9;
1363
1364  // Configuration for the summarization annotator.
1365  SummarizationConfig summarization_config = 11;
1366}
1367