1// Copyright 2023 Google LLC 2// 3// Licensed under the Apache License, Version 2.0 (the "License"); 4// you may not use this file except in compliance with the License. 5// You may obtain a copy of the License at 6// 7// http://www.apache.org/licenses/LICENSE-2.0 8// 9// Unless required by applicable law or agreed to in writing, software 10// distributed under the License is distributed on an "AS IS" BASIS, 11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12// See the License for the specific language governing permissions and 13// limitations under the License. 14 15syntax = "proto3"; 16 17package google.cloud.aiplatform.v1beta1; 18 19import "google/api/field_behavior.proto"; 20import "google/protobuf/struct.proto"; 21 22option csharp_namespace = "Google.Cloud.AIPlatform.V1Beta1"; 23option go_package = "cloud.google.com/go/aiplatform/apiv1beta1/aiplatformpb;aiplatformpb"; 24option java_multiple_files = true; 25option java_outer_classname = "ExplanationMetadataProto"; 26option java_package = "com.google.cloud.aiplatform.v1beta1"; 27option php_namespace = "Google\\Cloud\\AIPlatform\\V1beta1"; 28option ruby_package = "Google::Cloud::AIPlatform::V1beta1"; 29 30// Metadata describing the Model's input and output for explanation. 31message ExplanationMetadata { 32 // Metadata of the input of a feature. 33 // 34 // Fields other than 35 // [InputMetadata.input_baselines][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.input_baselines] 36 // are applicable only for Models that are using Vertex AI-provided images for 37 // Tensorflow. 38 message InputMetadata { 39 // Domain details of the input feature value. Provides numeric information 40 // about the feature, such as its range (min, max). If the feature has been 41 // pre-processed, for example with z-scoring, then it provides information 42 // about how to recover the original feature. For example, if the input 43 // feature is an image and it has been pre-processed to obtain 0-mean and 44 // stddev = 1 values, then original_mean, and original_stddev refer to the 45 // mean and stddev of the original feature (e.g. image tensor) from which 46 // input feature (with mean = 0 and stddev = 1) was obtained. 47 message FeatureValueDomain { 48 // The minimum permissible value for this feature. 49 float min_value = 1; 50 51 // The maximum permissible value for this feature. 52 float max_value = 2; 53 54 // If this input feature has been normalized to a mean value of 0, 55 // the original_mean specifies the mean value of the domain prior to 56 // normalization. 57 float original_mean = 3; 58 59 // If this input feature has been normalized to a standard deviation of 60 // 1.0, the original_stddev specifies the standard deviation of the domain 61 // prior to normalization. 62 float original_stddev = 4; 63 } 64 65 // Visualization configurations for image explanation. 66 message Visualization { 67 // Type of the image visualization. Only applicable to 68 // [Integrated Gradients 69 // attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution]. 70 enum Type { 71 // Should not be used. 72 TYPE_UNSPECIFIED = 0; 73 74 // Shows which pixel contributed to the image prediction. 75 PIXELS = 1; 76 77 // Shows which region contributed to the image prediction by outlining 78 // the region. 79 OUTLINES = 2; 80 } 81 82 // Whether to only highlight pixels with positive contributions, negative 83 // or both. Defaults to POSITIVE. 84 enum Polarity { 85 // Default value. This is the same as POSITIVE. 86 POLARITY_UNSPECIFIED = 0; 87 88 // Highlights the pixels/outlines that were most influential to the 89 // model's prediction. 90 POSITIVE = 1; 91 92 // Setting polarity to negative highlights areas that does not lead to 93 // the models's current prediction. 94 NEGATIVE = 2; 95 96 // Shows both positive and negative attributions. 97 BOTH = 3; 98 } 99 100 // The color scheme used for highlighting areas. 101 enum ColorMap { 102 // Should not be used. 103 COLOR_MAP_UNSPECIFIED = 0; 104 105 // Positive: green. Negative: pink. 106 PINK_GREEN = 1; 107 108 // Viridis color map: A perceptually uniform color mapping which is 109 // easier to see by those with colorblindness and progresses from yellow 110 // to green to blue. Positive: yellow. Negative: blue. 111 VIRIDIS = 2; 112 113 // Positive: red. Negative: red. 114 RED = 3; 115 116 // Positive: green. Negative: green. 117 GREEN = 4; 118 119 // Positive: green. Negative: red. 120 RED_GREEN = 6; 121 122 // PiYG palette. 123 PINK_WHITE_GREEN = 5; 124 } 125 126 // How the original image is displayed in the visualization. 127 enum OverlayType { 128 // Default value. This is the same as NONE. 129 OVERLAY_TYPE_UNSPECIFIED = 0; 130 131 // No overlay. 132 NONE = 1; 133 134 // The attributions are shown on top of the original image. 135 ORIGINAL = 2; 136 137 // The attributions are shown on top of grayscaled version of the 138 // original image. 139 GRAYSCALE = 3; 140 141 // The attributions are used as a mask to reveal predictive parts of 142 // the image and hide the un-predictive parts. 143 MASK_BLACK = 4; 144 } 145 146 // Type of the image visualization. Only applicable to 147 // [Integrated Gradients 148 // attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution]. 149 // OUTLINES shows regions of attribution, while PIXELS shows per-pixel 150 // attribution. Defaults to OUTLINES. 151 Type type = 1; 152 153 // Whether to only highlight pixels with positive contributions, negative 154 // or both. Defaults to POSITIVE. 155 Polarity polarity = 2; 156 157 // The color scheme used for the highlighted areas. 158 // 159 // Defaults to PINK_GREEN for 160 // [Integrated Gradients 161 // attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution], 162 // which shows positive attributions in green and negative in pink. 163 // 164 // Defaults to VIRIDIS for 165 // [XRAI 166 // attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution], 167 // which highlights the most influential regions in yellow and the least 168 // influential in blue. 169 ColorMap color_map = 3; 170 171 // Excludes attributions above the specified percentile from the 172 // highlighted areas. Using the clip_percent_upperbound and 173 // clip_percent_lowerbound together can be useful for filtering out noise 174 // and making it easier to see areas of strong attribution. Defaults to 175 // 99.9. 176 float clip_percent_upperbound = 4; 177 178 // Excludes attributions below the specified percentile, from the 179 // highlighted areas. Defaults to 62. 180 float clip_percent_lowerbound = 5; 181 182 // How the original image is displayed in the visualization. 183 // Adjusting the overlay can help increase visual clarity if the original 184 // image makes it difficult to view the visualization. Defaults to NONE. 185 OverlayType overlay_type = 6; 186 } 187 188 // Defines how a feature is encoded. Defaults to IDENTITY. 189 enum Encoding { 190 // Default value. This is the same as IDENTITY. 191 ENCODING_UNSPECIFIED = 0; 192 193 // The tensor represents one feature. 194 IDENTITY = 1; 195 196 // The tensor represents a bag of features where each index maps to 197 // a feature. 198 // [InputMetadata.index_feature_mapping][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.index_feature_mapping] 199 // must be provided for this encoding. For example: 200 // ``` 201 // input = [27, 6.0, 150] 202 // index_feature_mapping = ["age", "height", "weight"] 203 // ``` 204 BAG_OF_FEATURES = 2; 205 206 // The tensor represents a bag of features where each index maps to a 207 // feature. Zero values in the tensor indicates feature being 208 // non-existent. 209 // [InputMetadata.index_feature_mapping][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.index_feature_mapping] 210 // must be provided for this encoding. For example: 211 // ``` 212 // input = [2, 0, 5, 0, 1] 213 // index_feature_mapping = ["a", "b", "c", "d", "e"] 214 // ``` 215 BAG_OF_FEATURES_SPARSE = 3; 216 217 // The tensor is a list of binaries representing whether a feature exists 218 // or not (1 indicates existence). 219 // [InputMetadata.index_feature_mapping][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.index_feature_mapping] 220 // must be provided for this encoding. For example: 221 // ``` 222 // input = [1, 0, 1, 0, 1] 223 // index_feature_mapping = ["a", "b", "c", "d", "e"] 224 // ``` 225 INDICATOR = 4; 226 227 // The tensor is encoded into a 1-dimensional array represented by an 228 // encoded tensor. 229 // [InputMetadata.encoded_tensor_name][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.encoded_tensor_name] 230 // must be provided for this encoding. For example: 231 // ``` 232 // input = ["This", "is", "a", "test", "."] 233 // encoded = [0.1, 0.2, 0.3, 0.4, 0.5] 234 // ``` 235 COMBINED_EMBEDDING = 5; 236 237 // Select this encoding when the input tensor is encoded into a 238 // 2-dimensional array represented by an encoded tensor. 239 // [InputMetadata.encoded_tensor_name][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.encoded_tensor_name] 240 // must be provided for this encoding. The first dimension of the encoded 241 // tensor's shape is the same as the input tensor's shape. For example: 242 // ``` 243 // input = ["This", "is", "a", "test", "."] 244 // encoded = [[0.1, 0.2, 0.3, 0.4, 0.5], 245 // [0.2, 0.1, 0.4, 0.3, 0.5], 246 // [0.5, 0.1, 0.3, 0.5, 0.4], 247 // [0.5, 0.3, 0.1, 0.2, 0.4], 248 // [0.4, 0.3, 0.2, 0.5, 0.1]] 249 // ``` 250 CONCAT_EMBEDDING = 6; 251 } 252 253 // Baseline inputs for this feature. 254 // 255 // If no baseline is specified, Vertex AI chooses the baseline for this 256 // feature. If multiple baselines are specified, Vertex AI returns the 257 // average attributions across them in 258 // [Attribution.feature_attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions]. 259 // 260 // For Vertex AI-provided Tensorflow images (both 1.x and 2.x), the shape 261 // of each baseline must match the shape of the input tensor. If a scalar is 262 // provided, we broadcast to the same shape as the input tensor. 263 // 264 // For custom images, the element of the baselines must be in the same 265 // format as the feature's input in the 266 // [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances][]. 267 // The schema of any single instance may be specified via Endpoint's 268 // DeployedModels' 269 // [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] 270 // [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] 271 // [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. 272 repeated google.protobuf.Value input_baselines = 1; 273 274 // Name of the input tensor for this feature. Required and is only 275 // applicable to Vertex AI-provided images for Tensorflow. 276 string input_tensor_name = 2; 277 278 // Defines how the feature is encoded into the input tensor. Defaults to 279 // IDENTITY. 280 Encoding encoding = 3; 281 282 // Modality of the feature. Valid values are: numeric, image. Defaults to 283 // numeric. 284 string modality = 4; 285 286 // The domain details of the input feature value. Like min/max, original 287 // mean or standard deviation if normalized. 288 FeatureValueDomain feature_value_domain = 5; 289 290 // Specifies the index of the values of the input tensor. 291 // Required when the input tensor is a sparse representation. Refer to 292 // Tensorflow documentation for more details: 293 // https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor. 294 string indices_tensor_name = 6; 295 296 // Specifies the shape of the values of the input if the input is a sparse 297 // representation. Refer to Tensorflow documentation for more details: 298 // https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor. 299 string dense_shape_tensor_name = 7; 300 301 // A list of feature names for each index in the input tensor. 302 // Required when the input 303 // [InputMetadata.encoding][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.encoding] 304 // is BAG_OF_FEATURES, BAG_OF_FEATURES_SPARSE, INDICATOR. 305 repeated string index_feature_mapping = 8; 306 307 // Encoded tensor is a transformation of the input tensor. Must be provided 308 // if choosing 309 // [Integrated Gradients 310 // attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution] 311 // or [XRAI 312 // attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution] 313 // and the input tensor is not differentiable. 314 // 315 // An encoded tensor is generated if the input tensor is encoded by a lookup 316 // table. 317 string encoded_tensor_name = 9; 318 319 // A list of baselines for the encoded tensor. 320 // 321 // The shape of each baseline should match the shape of the encoded tensor. 322 // If a scalar is provided, Vertex AI broadcasts to the same shape as the 323 // encoded tensor. 324 repeated google.protobuf.Value encoded_baselines = 10; 325 326 // Visualization configurations for image explanation. 327 Visualization visualization = 11; 328 329 // Name of the group that the input belongs to. Features with the same group 330 // name will be treated as one feature when computing attributions. Features 331 // grouped together can have different shapes in value. If provided, there 332 // will be one single attribution generated in 333 // [Attribution.feature_attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions], 334 // keyed by the group name. 335 string group_name = 12; 336 } 337 338 // Metadata of the prediction output to be explained. 339 message OutputMetadata { 340 // Defines how to map 341 // [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] 342 // to 343 // [Attribution.output_display_name][google.cloud.aiplatform.v1beta1.Attribution.output_display_name]. 344 // 345 // If neither of the fields are specified, 346 // [Attribution.output_display_name][google.cloud.aiplatform.v1beta1.Attribution.output_display_name] 347 // will not be populated. 348 oneof display_name_mapping { 349 // Static mapping between the index and display name. 350 // 351 // Use this if the outputs are a deterministic n-dimensional array, e.g. a 352 // list of scores of all the classes in a pre-defined order for a 353 // multi-classification Model. It's not feasible if the outputs are 354 // non-deterministic, e.g. the Model produces top-k classes or sort the 355 // outputs by their values. 356 // 357 // The shape of the value must be an n-dimensional array of strings. The 358 // number of dimensions must match that of the outputs to be explained. 359 // The 360 // [Attribution.output_display_name][google.cloud.aiplatform.v1beta1.Attribution.output_display_name] 361 // is populated by locating in the mapping with 362 // [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]. 363 google.protobuf.Value index_display_name_mapping = 1; 364 365 // Specify a field name in the prediction to look for the display name. 366 // 367 // Use this if the prediction contains the display names for the outputs. 368 // 369 // The display names in the prediction must have the same shape of the 370 // outputs, so that it can be located by 371 // [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] 372 // for a specific output. 373 string display_name_mapping_key = 2; 374 } 375 376 // Name of the output tensor. Required and is only applicable to Vertex 377 // AI provided images for Tensorflow. 378 string output_tensor_name = 3; 379 } 380 381 // Required. Map from feature names to feature input metadata. Keys are the 382 // name of the features. Values are the specification of the feature. 383 // 384 // An empty InputMetadata is valid. It describes a text feature which has the 385 // name specified as the key in 386 // [ExplanationMetadata.inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs]. 387 // The baseline of the empty feature is chosen by Vertex AI. 388 // 389 // For Vertex AI-provided Tensorflow images, the key can be any friendly 390 // name of the feature. Once specified, 391 // [featureAttributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions] 392 // are keyed by this key (if not grouped with another feature). 393 // 394 // For custom images, the key must match with the key in 395 // [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances]. 396 map<string, InputMetadata> inputs = 1 397 [(google.api.field_behavior) = REQUIRED]; 398 399 // Required. Map from output names to output metadata. 400 // 401 // For Vertex AI-provided Tensorflow images, keys can be any user defined 402 // string that consists of any UTF-8 characters. 403 // 404 // For custom images, keys are the name of the output field in the prediction 405 // to be explained. 406 // 407 // Currently only one key is allowed. 408 map<string, OutputMetadata> outputs = 2 409 [(google.api.field_behavior) = REQUIRED]; 410 411 // Points to a YAML file stored on Google Cloud Storage describing the format 412 // of the [feature 413 // attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions]. 414 // The schema is defined as an OpenAPI 3.0.2 [Schema 415 // Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). 416 // AutoML tabular Models always have this field populated by Vertex AI. 417 // Note: The URI given on output may be different, including the URI scheme, 418 // than the one given on input. The output URI will point to a location where 419 // the user only has a read access. 420 string feature_attributions_schema_uri = 3; 421 422 // Name of the source to generate embeddings for example based explanations. 423 string latent_space_source = 5; 424} 425