xref: /aosp_15_r20/external/googleapis/google/cloud/aiplatform/v1/explanation.proto (revision d5c09012810ac0c9f33fe448fb6da8260d444cc9)
1// Copyright 2023 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15syntax = "proto3";
16
17package google.cloud.aiplatform.v1;
18
19import "google/api/field_behavior.proto";
20import "google/cloud/aiplatform/v1/explanation_metadata.proto";
21import "google/cloud/aiplatform/v1/io.proto";
22import "google/protobuf/struct.proto";
23
24option csharp_namespace = "Google.Cloud.AIPlatform.V1";
25option go_package = "cloud.google.com/go/aiplatform/apiv1/aiplatformpb;aiplatformpb";
26option java_multiple_files = true;
27option java_outer_classname = "ExplanationProto";
28option java_package = "com.google.cloud.aiplatform.v1";
29option php_namespace = "Google\\Cloud\\AIPlatform\\V1";
30option ruby_package = "Google::Cloud::AIPlatform::V1";
31
32// Explanation of a prediction (provided in
33// [PredictResponse.predictions][google.cloud.aiplatform.v1.PredictResponse.predictions])
34// produced by the Model on a given
35// [instance][google.cloud.aiplatform.v1.ExplainRequest.instances].
36message Explanation {
37  // Output only. Feature attributions grouped by predicted outputs.
38  //
39  // For Models that predict only one output, such as regression Models that
40  // predict only one score, there is only one attibution that explains the
41  // predicted output. For Models that predict multiple outputs, such as
42  // multiclass Models that predict multiple classes, each element explains one
43  // specific item.
44  // [Attribution.output_index][google.cloud.aiplatform.v1.Attribution.output_index]
45  // can be used to identify which output this attribution is explaining.
46  //
47  // By default, we provide Shapley values for the predicted class. However,
48  // you can configure the explanation request to generate Shapley values for
49  // any other classes too. For example, if a model predicts a probability of
50  // `0.4` for approving a loan application, the model's decision is to reject
51  // the application since `p(reject) = 0.6 > p(approve) = 0.4`, and the default
52  // Shapley values would be computed for rejection decision and not approval,
53  // even though the latter might be the positive class.
54  //
55  // If users set
56  // [ExplanationParameters.top_k][google.cloud.aiplatform.v1.ExplanationParameters.top_k],
57  // the attributions are sorted by
58  // [instance_output_value][Attributions.instance_output_value] in descending
59  // order. If
60  // [ExplanationParameters.output_indices][google.cloud.aiplatform.v1.ExplanationParameters.output_indices]
61  // is specified, the attributions are stored by
62  // [Attribution.output_index][google.cloud.aiplatform.v1.Attribution.output_index]
63  // in the same order as they appear in the output_indices.
64  repeated Attribution attributions = 1
65      [(google.api.field_behavior) = OUTPUT_ONLY];
66
67  // Output only. List of the nearest neighbors for example-based explanations.
68  //
69  // For models deployed with the examples explanations feature enabled, the
70  // attributions field is empty and instead the neighbors field is populated.
71  repeated Neighbor neighbors = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
72}
73
74// Aggregated explanation metrics for a Model over a set of instances.
75message ModelExplanation {
76  // Output only. Aggregated attributions explaining the Model's prediction
77  // outputs over the set of instances. The attributions are grouped by outputs.
78  //
79  // For Models that predict only one output, such as regression Models that
80  // predict only one score, there is only one attibution that explains the
81  // predicted output. For Models that predict multiple outputs, such as
82  // multiclass Models that predict multiple classes, each element explains one
83  // specific item.
84  // [Attribution.output_index][google.cloud.aiplatform.v1.Attribution.output_index]
85  // can be used to identify which output this attribution is explaining.
86  //
87  // The
88  // [baselineOutputValue][google.cloud.aiplatform.v1.Attribution.baseline_output_value],
89  // [instanceOutputValue][google.cloud.aiplatform.v1.Attribution.instance_output_value]
90  // and
91  // [featureAttributions][google.cloud.aiplatform.v1.Attribution.feature_attributions]
92  // fields are averaged over the test data.
93  //
94  // NOTE: Currently AutoML tabular classification Models produce only one
95  // attribution, which averages attributions over all the classes it predicts.
96  // [Attribution.approximation_error][google.cloud.aiplatform.v1.Attribution.approximation_error]
97  // is not populated.
98  repeated Attribution mean_attributions = 1
99      [(google.api.field_behavior) = OUTPUT_ONLY];
100}
101
102// Attribution that explains a particular prediction output.
103message Attribution {
104  // Output only. Model predicted output if the input instance is constructed
105  // from the baselines of all the features defined in
106  // [ExplanationMetadata.inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
107  // The field name of the output is determined by the key in
108  // [ExplanationMetadata.outputs][google.cloud.aiplatform.v1.ExplanationMetadata.outputs].
109  //
110  // If the Model's predicted output has multiple dimensions (rank > 1), this is
111  // the value in the output located by
112  // [output_index][google.cloud.aiplatform.v1.Attribution.output_index].
113  //
114  // If there are multiple baselines, their output values are averaged.
115  double baseline_output_value = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
116
117  // Output only. Model predicted output on the corresponding [explanation
118  // instance][ExplainRequest.instances]. The field name of the output is
119  // determined by the key in
120  // [ExplanationMetadata.outputs][google.cloud.aiplatform.v1.ExplanationMetadata.outputs].
121  //
122  // If the Model predicted output has multiple dimensions, this is the value in
123  // the output located by
124  // [output_index][google.cloud.aiplatform.v1.Attribution.output_index].
125  double instance_output_value = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
126
127  // Output only. Attributions of each explained feature. Features are extracted
128  // from the [prediction
129  // instances][google.cloud.aiplatform.v1.ExplainRequest.instances] according
130  // to [explanation metadata for
131  // inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
132  //
133  // The value is a struct, whose keys are the name of the feature. The values
134  // are how much the feature in the
135  // [instance][google.cloud.aiplatform.v1.ExplainRequest.instances] contributed
136  // to the predicted result.
137  //
138  // The format of the value is determined by the feature's input format:
139  //
140  //   * If the feature is a scalar value, the attribution value is a
141  //     [floating number][google.protobuf.Value.number_value].
142  //
143  //   * If the feature is an array of scalar values, the attribution value is
144  //     an [array][google.protobuf.Value.list_value].
145  //
146  //   * If the feature is a struct, the attribution value is a
147  //     [struct][google.protobuf.Value.struct_value]. The keys in the
148  //     attribution value struct are the same as the keys in the feature
149  //     struct. The formats of the values in the attribution struct are
150  //     determined by the formats of the values in the feature struct.
151  //
152  // The
153  // [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri]
154  // field, pointed to by the
155  // [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] field of the
156  // [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models]
157  // object, points to the schema file that describes the features and their
158  // attribution values (if it is populated).
159  google.protobuf.Value feature_attributions = 3
160      [(google.api.field_behavior) = OUTPUT_ONLY];
161
162  // Output only. The index that locates the explained prediction output.
163  //
164  // If the prediction output is a scalar value, output_index is not populated.
165  // If the prediction output has multiple dimensions, the length of the
166  // output_index list is the same as the number of dimensions of the output.
167  // The i-th element in output_index is the element index of the i-th dimension
168  // of the output vector. Indices start from 0.
169  repeated int32 output_index = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
170
171  // Output only. The display name of the output identified by
172  // [output_index][google.cloud.aiplatform.v1.Attribution.output_index]. For
173  // example, the predicted class name by a multi-classification Model.
174  //
175  // This field is only populated iff the Model predicts display names as a
176  // separate field along with the explained output. The predicted display name
177  // must has the same shape of the explained output, and can be located using
178  // output_index.
179  string output_display_name = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
180
181  // Output only. Error of
182  // [feature_attributions][google.cloud.aiplatform.v1.Attribution.feature_attributions]
183  // caused by approximation used in the explanation method. Lower value means
184  // more precise attributions.
185  //
186  // * For Sampled Shapley
187  // [attribution][google.cloud.aiplatform.v1.ExplanationParameters.sampled_shapley_attribution],
188  // increasing
189  // [path_count][google.cloud.aiplatform.v1.SampledShapleyAttribution.path_count]
190  // might reduce the error.
191  // * For Integrated Gradients
192  // [attribution][google.cloud.aiplatform.v1.ExplanationParameters.integrated_gradients_attribution],
193  // increasing
194  // [step_count][google.cloud.aiplatform.v1.IntegratedGradientsAttribution.step_count]
195  // might reduce the error.
196  // * For [XRAI
197  // attribution][google.cloud.aiplatform.v1.ExplanationParameters.xrai_attribution],
198  // increasing
199  // [step_count][google.cloud.aiplatform.v1.XraiAttribution.step_count] might
200  // reduce the error.
201  //
202  // See [this introduction](/vertex-ai/docs/explainable-ai/overview)
203  // for more information.
204  double approximation_error = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
205
206  // Output only. Name of the explain output. Specified as the key in
207  // [ExplanationMetadata.outputs][google.cloud.aiplatform.v1.ExplanationMetadata.outputs].
208  string output_name = 7 [(google.api.field_behavior) = OUTPUT_ONLY];
209}
210
211// Neighbors for example-based explanations.
212message Neighbor {
213  // Output only. The neighbor id.
214  string neighbor_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
215
216  // Output only. The neighbor distance.
217  double neighbor_distance = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
218}
219
220// Specification of Model explanation.
221message ExplanationSpec {
222  // Required. Parameters that configure explaining of the Model's predictions.
223  ExplanationParameters parameters = 1 [(google.api.field_behavior) = REQUIRED];
224
225  // Optional. Metadata describing the Model's input and output for explanation.
226  ExplanationMetadata metadata = 2 [(google.api.field_behavior) = OPTIONAL];
227}
228
229// Parameters to configure explaining for Model's predictions.
230message ExplanationParameters {
231  oneof method {
232    // An attribution method that approximates Shapley values for features that
233    // contribute to the label being predicted. A sampling strategy is used to
234    // approximate the value rather than considering all subsets of features.
235    // Refer to this paper for model details: https://arxiv.org/abs/1306.4265.
236    SampledShapleyAttribution sampled_shapley_attribution = 1;
237
238    // An attribution method that computes Aumann-Shapley values taking
239    // advantage of the model's fully differentiable structure. Refer to this
240    // paper for more details: https://arxiv.org/abs/1703.01365
241    IntegratedGradientsAttribution integrated_gradients_attribution = 2;
242
243    // An attribution method that redistributes Integrated Gradients
244    // attribution to segmented regions, taking advantage of the model's fully
245    // differentiable structure. Refer to this paper for
246    // more details: https://arxiv.org/abs/1906.02825
247    //
248    // XRAI currently performs better on natural images, like a picture of a
249    // house or an animal. If the images are taken in artificial environments,
250    // like a lab or manufacturing line, or from diagnostic equipment, like
251    // x-rays or quality-control cameras, use Integrated Gradients instead.
252    XraiAttribution xrai_attribution = 3;
253
254    // Example-based explanations that returns the nearest neighbors from the
255    // provided dataset.
256    Examples examples = 7;
257  }
258
259  // If populated, returns attributions for top K indices of outputs
260  // (defaults to 1). Only applies to Models that predicts more than one outputs
261  // (e,g, multi-class Models). When set to -1, returns explanations for all
262  // outputs.
263  int32 top_k = 4;
264
265  // If populated, only returns attributions that have
266  // [output_index][google.cloud.aiplatform.v1.Attribution.output_index]
267  // contained in output_indices. It must be an ndarray of integers, with the
268  // same shape of the output it's explaining.
269  //
270  // If not populated, returns attributions for
271  // [top_k][google.cloud.aiplatform.v1.ExplanationParameters.top_k] indices of
272  // outputs. If neither top_k nor output_indices is populated, returns the
273  // argmax index of the outputs.
274  //
275  // Only applicable to Models that predict multiple outputs (e,g, multi-class
276  // Models that predict multiple classes).
277  google.protobuf.ListValue output_indices = 5;
278}
279
280// An attribution method that approximates Shapley values for features that
281// contribute to the label being predicted. A sampling strategy is used to
282// approximate the value rather than considering all subsets of features.
283message SampledShapleyAttribution {
284  // Required. The number of feature permutations to consider when approximating
285  // the Shapley values.
286  //
287  // Valid range of its value is [1, 50], inclusively.
288  int32 path_count = 1 [(google.api.field_behavior) = REQUIRED];
289}
290
291// An attribution method that computes the Aumann-Shapley value taking advantage
292// of the model's fully differentiable structure. Refer to this paper for
293// more details: https://arxiv.org/abs/1703.01365
294message IntegratedGradientsAttribution {
295  // Required. The number of steps for approximating the path integral.
296  // A good value to start is 50 and gradually increase until the
297  // sum to diff property is within the desired error range.
298  //
299  // Valid range of its value is [1, 100], inclusively.
300  int32 step_count = 1 [(google.api.field_behavior) = REQUIRED];
301
302  // Config for SmoothGrad approximation of gradients.
303  //
304  // When enabled, the gradients are approximated by averaging the gradients
305  // from noisy samples in the vicinity of the inputs. Adding
306  // noise can help improve the computed gradients. Refer to this paper for more
307  // details: https://arxiv.org/pdf/1706.03825.pdf
308  SmoothGradConfig smooth_grad_config = 2;
309
310  // Config for IG with blur baseline.
311  //
312  // When enabled, a linear path from the maximally blurred image to the input
313  // image is created. Using a blurred baseline instead of zero (black image) is
314  // motivated by the BlurIG approach explained here:
315  // https://arxiv.org/abs/2004.03383
316  BlurBaselineConfig blur_baseline_config = 3;
317}
318
319// An explanation method that redistributes Integrated Gradients
320// attributions to segmented regions, taking advantage of the model's fully
321// differentiable structure. Refer to this paper for more details:
322// https://arxiv.org/abs/1906.02825
323//
324// Supported only by image Models.
325message XraiAttribution {
326  // Required. The number of steps for approximating the path integral.
327  // A good value to start is 50 and gradually increase until the
328  // sum to diff property is met within the desired error range.
329  //
330  // Valid range of its value is [1, 100], inclusively.
331  int32 step_count = 1 [(google.api.field_behavior) = REQUIRED];
332
333  // Config for SmoothGrad approximation of gradients.
334  //
335  // When enabled, the gradients are approximated by averaging the gradients
336  // from noisy samples in the vicinity of the inputs. Adding
337  // noise can help improve the computed gradients. Refer to this paper for more
338  // details: https://arxiv.org/pdf/1706.03825.pdf
339  SmoothGradConfig smooth_grad_config = 2;
340
341  // Config for XRAI with blur baseline.
342  //
343  // When enabled, a linear path from the maximally blurred image to the input
344  // image is created. Using a blurred baseline instead of zero (black image) is
345  // motivated by the BlurIG approach explained here:
346  // https://arxiv.org/abs/2004.03383
347  BlurBaselineConfig blur_baseline_config = 3;
348}
349
350// Config for SmoothGrad approximation of gradients.
351//
352// When enabled, the gradients are approximated by averaging the gradients from
353// noisy samples in the vicinity of the inputs. Adding noise can help improve
354// the computed gradients. Refer to this paper for more details:
355// https://arxiv.org/pdf/1706.03825.pdf
356message SmoothGradConfig {
357  // Represents the standard deviation of the gaussian kernel
358  // that will be used to add noise to the interpolated inputs
359  // prior to computing gradients.
360  oneof GradientNoiseSigma {
361    // This is a single float value and will be used to add noise to all the
362    // features. Use this field when all features are normalized to have the
363    // same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where
364    // features are normalized to have 0-mean and 1-variance. Learn more about
365    // [normalization](https://developers.google.com/machine-learning/data-prep/transform/normalization).
366    //
367    // For best results the recommended value is about 10% - 20% of the standard
368    // deviation of the input feature. Refer to section 3.2 of the SmoothGrad
369    // paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1.
370    //
371    // If the distribution is different per feature, set
372    // [feature_noise_sigma][google.cloud.aiplatform.v1.SmoothGradConfig.feature_noise_sigma]
373    // instead for each feature.
374    float noise_sigma = 1;
375
376    // This is similar to
377    // [noise_sigma][google.cloud.aiplatform.v1.SmoothGradConfig.noise_sigma],
378    // but provides additional flexibility. A separate noise sigma can be
379    // provided for each feature, which is useful if their distributions are
380    // different. No noise is added to features that are not set. If this field
381    // is unset,
382    // [noise_sigma][google.cloud.aiplatform.v1.SmoothGradConfig.noise_sigma]
383    // will be used for all features.
384    FeatureNoiseSigma feature_noise_sigma = 2;
385  }
386
387  // The number of gradient samples to use for
388  // approximation. The higher this number, the more accurate the gradient
389  // is, but the runtime complexity increases by this factor as well.
390  // Valid range of its value is [1, 50]. Defaults to 3.
391  int32 noisy_sample_count = 3;
392}
393
394// Noise sigma by features. Noise sigma represents the standard deviation of the
395// gaussian kernel that will be used to add noise to interpolated inputs prior
396// to computing gradients.
397message FeatureNoiseSigma {
398  // Noise sigma for a single feature.
399  message NoiseSigmaForFeature {
400    // The name of the input feature for which noise sigma is provided. The
401    // features are defined in
402    // [explanation metadata
403    // inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
404    string name = 1;
405
406    // This represents the standard deviation of the Gaussian kernel that will
407    // be used to add noise to the feature prior to computing gradients. Similar
408    // to [noise_sigma][google.cloud.aiplatform.v1.SmoothGradConfig.noise_sigma]
409    // but represents the noise added to the current feature. Defaults to 0.1.
410    float sigma = 2;
411  }
412
413  // Noise sigma per feature. No noise is added to features that are not set.
414  repeated NoiseSigmaForFeature noise_sigma = 1;
415}
416
417// Config for blur baseline.
418//
419// When enabled, a linear path from the maximally blurred image to the input
420// image is created. Using a blurred baseline instead of zero (black image) is
421// motivated by the BlurIG approach explained here:
422// https://arxiv.org/abs/2004.03383
423message BlurBaselineConfig {
424  // The standard deviation of the blur kernel for the blurred baseline. The
425  // same blurring parameter is used for both the height and the width
426  // dimension. If not set, the method defaults to the zero (i.e. black for
427  // images) baseline.
428  float max_blur_sigma = 1;
429}
430
431// Example-based explainability that returns the nearest neighbors from the
432// provided dataset.
433message Examples {
434  // The Cloud Storage input instances.
435  message ExampleGcsSource {
436    // The format of the input example instances.
437    enum DataFormat {
438      // Format unspecified, used when unset.
439      DATA_FORMAT_UNSPECIFIED = 0;
440
441      // Examples are stored in JSONL files.
442      JSONL = 1;
443    }
444
445    // The format in which instances are given, if not specified, assume it's
446    // JSONL format. Currently only JSONL format is supported.
447    DataFormat data_format = 1;
448
449    // The Cloud Storage location for the input instances.
450    GcsSource gcs_source = 2;
451  }
452
453  oneof source {
454    // The Cloud Storage input instances.
455    ExampleGcsSource example_gcs_source = 5;
456  }
457
458  oneof config {
459    // The full configuration for the generated index, the semantics are the
460    // same as [metadata][google.cloud.aiplatform.v1.Index.metadata] and should
461    // match
462    // [NearestNeighborSearchConfig](https://cloud.google.com/vertex-ai/docs/explainable-ai/configuring-explanations-example-based#nearest-neighbor-search-config).
463    google.protobuf.Value nearest_neighbor_search_config = 2;
464
465    // Simplified preset configuration, which automatically sets configuration
466    // values based on the desired query speed-precision trade-off and modality.
467    Presets presets = 4;
468  }
469
470  // The number of neighbors to return when querying for examples.
471  int32 neighbor_count = 3;
472}
473
474// Preset configuration for example-based explanations
475message Presets {
476  // Preset option controlling parameters for query speed-precision trade-off
477  enum Query {
478    // More precise neighbors as a trade-off against slower response.
479    PRECISE = 0;
480
481    // Faster response as a trade-off against less precise neighbors.
482    FAST = 1;
483  }
484
485  // Preset option controlling parameters for different modalities
486  enum Modality {
487    // Should not be set. Added as a recommended best practice for enums
488    MODALITY_UNSPECIFIED = 0;
489
490    // IMAGE modality
491    IMAGE = 1;
492
493    // TEXT modality
494    TEXT = 2;
495
496    // TABULAR modality
497    TABULAR = 3;
498  }
499
500  // Preset option controlling parameters for speed-precision trade-off when
501  // querying for examples. If omitted, defaults to `PRECISE`.
502  optional Query query = 1;
503
504  // The modality of the uploaded model, which automatically configures the
505  // distance measurement and feature normalization for the underlying example
506  // index and queries. If your model does not precisely fit one of these types,
507  // it is okay to choose the closest type.
508  Modality modality = 2;
509}
510
511// The [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] entries
512// that can be overridden at [online
513// explanation][google.cloud.aiplatform.v1.PredictionService.Explain] time.
514message ExplanationSpecOverride {
515  // The parameters to be overridden. Note that the
516  // attribution method cannot be changed. If not specified,
517  // no parameter is overridden.
518  ExplanationParameters parameters = 1;
519
520  // The metadata to be overridden. If not specified, no metadata is overridden.
521  ExplanationMetadataOverride metadata = 2;
522
523  // The example-based explanations parameter overrides.
524  ExamplesOverride examples_override = 3;
525}
526
527// The [ExplanationMetadata][google.cloud.aiplatform.v1.ExplanationMetadata]
528// entries that can be overridden at [online
529// explanation][google.cloud.aiplatform.v1.PredictionService.Explain] time.
530message ExplanationMetadataOverride {
531  // The [input
532  // metadata][google.cloud.aiplatform.v1.ExplanationMetadata.InputMetadata]
533  // entries to be overridden.
534  message InputMetadataOverride {
535    // Baseline inputs for this feature.
536    //
537    // This overrides the `input_baseline` field of the
538    // [ExplanationMetadata.InputMetadata][google.cloud.aiplatform.v1.ExplanationMetadata.InputMetadata]
539    // object of the corresponding feature's input metadata. If it's not
540    // specified, the original baselines are not overridden.
541    repeated google.protobuf.Value input_baselines = 1;
542  }
543
544  // Required. Overrides the [input
545  // metadata][google.cloud.aiplatform.v1.ExplanationMetadata.inputs] of the
546  // features. The key is the name of the feature to be overridden. The keys
547  // specified here must exist in the input metadata to be overridden. If a
548  // feature is not specified here, the corresponding feature's input metadata
549  // is not overridden.
550  map<string, InputMetadataOverride> inputs = 1
551      [(google.api.field_behavior) = REQUIRED];
552}
553
554// Overrides for example-based explanations.
555message ExamplesOverride {
556  // Data format enum.
557  enum DataFormat {
558    // Unspecified format. Must not be used.
559    DATA_FORMAT_UNSPECIFIED = 0;
560
561    // Provided data is a set of model inputs.
562    INSTANCES = 1;
563
564    // Provided data is a set of embeddings.
565    EMBEDDINGS = 2;
566  }
567
568  // The number of neighbors to return.
569  int32 neighbor_count = 1;
570
571  // The number of neighbors to return that have the same crowding tag.
572  int32 crowding_count = 2;
573
574  // Restrict the resulting nearest neighbors to respect these constraints.
575  repeated ExamplesRestrictionsNamespace restrictions = 3;
576
577  // If true, return the embeddings instead of neighbors.
578  bool return_embeddings = 4;
579
580  // The format of the data being provided with each call.
581  DataFormat data_format = 5;
582}
583
584// Restrictions namespace for example-based explanations overrides.
585message ExamplesRestrictionsNamespace {
586  // The namespace name.
587  string namespace_name = 1;
588
589  // The list of allowed tags.
590  repeated string allow = 2;
591
592  // The list of deny tags.
593  repeated string deny = 3;
594}
595