1// Copyright 2022 Google LLC 2// 3// Licensed under the Apache License, Version 2.0 (the "License"); 4// you may not use this file except in compliance with the License. 5// You may obtain a copy of the License at 6// 7// http://www.apache.org/licenses/LICENSE-2.0 8// 9// Unless required by applicable law or agreed to in writing, software 10// distributed under the License is distributed on an "AS IS" BASIS, 11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12// See the License for the specific language governing permissions and 13// limitations under the License. 14 15syntax = "proto3"; 16 17package google.cloud.asset.v1p7beta1; 18 19import "google/api/annotations.proto"; 20import "google/api/client.proto"; 21import "google/api/field_behavior.proto"; 22import "google/api/resource.proto"; 23import "google/longrunning/operations.proto"; 24import "google/protobuf/timestamp.proto"; 25 26option csharp_namespace = "Google.Cloud.Asset.V1P7Beta1"; 27option go_package = "cloud.google.com/go/asset/apiv1p7beta1/assetpb;assetpb"; 28option java_multiple_files = true; 29option java_outer_classname = "AssetServiceProto"; 30option java_package = "com.google.cloud.asset.v1p7beta1"; 31option php_namespace = "Google\\Cloud\\Asset\\V1p7beta1"; 32 33// Asset service definition. 34service AssetService { 35 option (google.api.default_host) = "cloudasset.googleapis.com"; 36 option (google.api.oauth_scopes) = 37 "https://www.googleapis.com/auth/cloud-platform"; 38 39 // Exports assets with time and resource types to a given Cloud Storage 40 // location/BigQuery table. For Cloud Storage location destinations, the 41 // output format is newline-delimited JSON. Each line represents a 42 // [google.cloud.asset.v1p7beta1.Asset][google.cloud.asset.v1p7beta1.Asset] in 43 // the JSON format; for BigQuery table destinations, the output table stores 44 // the fields in asset proto as columns. This API implements the 45 // [google.longrunning.Operation][google.longrunning.Operation] API , which 46 // allows you to keep track of the export. We recommend intervals of at least 47 // 2 seconds with exponential retry to poll the export operation result. For 48 // regular-size resource parent, the export operation usually finishes within 49 // 5 minutes. 50 rpc ExportAssets(ExportAssetsRequest) returns (google.longrunning.Operation) { 51 option (google.api.http) = { 52 post: "/v1p7beta1/{parent=*/*}:exportAssets" 53 body: "*" 54 }; 55 option (google.longrunning.operation_info) = { 56 response_type: "google.cloud.asset.v1p7beta1.ExportAssetsResponse" 57 metadata_type: "google.cloud.asset.v1p7beta1.ExportAssetsRequest" 58 }; 59 } 60} 61 62// Export asset request. 63message ExportAssetsRequest { 64 // Required. The relative name of the root asset. This can only be an 65 // organization number (such as "organizations/123"), a project ID (such as 66 // "projects/my-project-id"), or a project number (such as "projects/12345"), 67 // or a folder number (such as "folders/123"). 68 string parent = 1 [ 69 (google.api.field_behavior) = REQUIRED, 70 (google.api.resource_reference) = { 71 child_type: "cloudasset.googleapis.com/Asset" 72 } 73 ]; 74 75 // Timestamp to take an asset snapshot. This can only be set to a timestamp 76 // between the current time and the current time minus 35 days (inclusive). 77 // If not specified, the current time will be used. Due to delays in resource 78 // data collection and indexing, there is a volatile window during which 79 // running the same query may get different results. 80 google.protobuf.Timestamp read_time = 2; 81 82 // A list of asset types to take a snapshot for. For example: 83 // "compute.googleapis.com/Disk". 84 // 85 // Regular expressions are also supported. For example: 86 // 87 // * "compute.googleapis.com.*" snapshots resources whose asset type starts 88 // with "compute.googleapis.com". 89 // * ".*Instance" snapshots resources whose asset type ends with "Instance". 90 // * ".*Instance.*" snapshots resources whose asset type contains "Instance". 91 // 92 // See [RE2](https://github.com/google/re2/wiki/Syntax) for all supported 93 // regular expression syntax. If the regular expression does not match any 94 // supported asset type, an INVALID_ARGUMENT error will be returned. 95 // 96 // If specified, only matching assets will be returned, otherwise, it will 97 // snapshot all asset types. See [Introduction to Cloud Asset 98 // Inventory](https://cloud.google.com/asset-inventory/docs/overview) 99 // for all supported asset types. 100 repeated string asset_types = 3; 101 102 // Asset content type. If not specified, no content but the asset name will be 103 // returned. 104 ContentType content_type = 4; 105 106 // Required. Output configuration indicating where the results will be output 107 // to. 108 OutputConfig output_config = 5 [(google.api.field_behavior) = REQUIRED]; 109 110 // A list of relationship types to export, for example: 111 // `INSTANCE_TO_INSTANCEGROUP`. This field should only be specified if 112 // content_type=RELATIONSHIP. If specified, it will snapshot [asset_types]' 113 // specified relationships, or give errors if any relationship_types' 114 // supported types are not in [asset_types]. If not specified, it will 115 // snapshot all [asset_types]' supported relationships. An unspecified 116 // [asset_types] field means all supported asset_types. See [Introduction to 117 // Cloud Asset 118 // Inventory](https://cloud.google.com/asset-inventory/docs/overview) for all 119 // supported asset types and relationship types. 120 repeated string relationship_types = 6; 121} 122 123// The export asset response. This message is returned by the 124// [google.longrunning.Operations.GetOperation][google.longrunning.Operations.GetOperation] 125// method in the returned 126// [google.longrunning.Operation.response][google.longrunning.Operation.response] 127// field. 128message ExportAssetsResponse { 129 // Time the snapshot was taken. 130 google.protobuf.Timestamp read_time = 1; 131 132 // Output configuration indicating where the results were output to. 133 OutputConfig output_config = 2; 134 135 // Output result indicating where the assets were exported to. For example, a 136 // set of actual Cloud Storage object URIs where the assets are 137 // exported to. The URIs can be different from what [output_config] has 138 // specified, as the service will split the output object into multiple ones 139 // once it exceeds a single Cloud Storage object limit. 140 OutputResult output_result = 3; 141} 142 143// Output configuration for export assets destination. 144message OutputConfig { 145 // Asset export destination. 146 oneof destination { 147 // Destination on Cloud Storage. 148 GcsDestination gcs_destination = 1; 149 150 // Destination on BigQuery. The output table stores the fields in asset 151 // proto as columns in BigQuery. 152 BigQueryDestination bigquery_destination = 2; 153 } 154} 155 156// Output result of export assets. 157message OutputResult { 158 // Asset export result. 159 oneof result { 160 // Export result on Cloud Storage. 161 GcsOutputResult gcs_result = 1; 162 } 163} 164 165// A Cloud Storage output result. 166message GcsOutputResult { 167 // List of URIs of the Cloud Storage objects. Example: 168 // "gs://bucket_name/object_name". 169 repeated string uris = 1; 170} 171 172// A Cloud Storage location. 173message GcsDestination { 174 // Required. 175 oneof object_uri { 176 // The URI of the Cloud Storage object. It's the same URI that is used by 177 // gsutil. Example: "gs://bucket_name/object_name". See [Viewing and 178 // Editing Object 179 // Metadata](https://cloud.google.com/storage/docs/viewing-editing-metadata) 180 // for more information. 181 string uri = 1; 182 183 // The URI prefix of all generated Cloud Storage objects. Example: 184 // "gs://bucket_name/object_name_prefix". Each object URI is in format: 185 // "gs://bucket_name/object_name_prefix/{ASSET_TYPE}/{SHARD_NUMBER} and only 186 // contains assets for that type. <shard number> starts from 0. Example: 187 // "gs://bucket_name/object_name_prefix/compute.googleapis.com/Disk/0" is 188 // the first shard of output objects containing all 189 // compute.googleapis.com/Disk assets. An INVALID_ARGUMENT error will be 190 // returned if file with the same name "gs://bucket_name/object_name_prefix" 191 // already exists. 192 string uri_prefix = 2; 193 } 194} 195 196// A BigQuery destination for exporting assets to. 197message BigQueryDestination { 198 // Required. The BigQuery dataset in format 199 // "projects/projectId/datasets/datasetId", to which the snapshot result 200 // should be exported. If this dataset does not exist, the export call returns 201 // an INVALID_ARGUMENT error. 202 string dataset = 1 [(google.api.field_behavior) = REQUIRED]; 203 204 // Required. The BigQuery table to which the snapshot result should be 205 // written. If this table does not exist, a new table with the given name 206 // will be created. 207 string table = 2 [(google.api.field_behavior) = REQUIRED]; 208 209 // If the destination table already exists and this flag is `TRUE`, the 210 // table will be overwritten by the contents of assets snapshot. If the flag 211 // is `FALSE` or unset and the destination table already exists, the export 212 // call returns an INVALID_ARGUMEMT error. 213 bool force = 3; 214 215 // [partition_spec] determines whether to export to partitioned table(s) and 216 // how to partition the data. 217 // 218 // If [partition_spec] is unset or [partition_spec.partition_key] is unset or 219 // `PARTITION_KEY_UNSPECIFIED`, the snapshot results will be exported to 220 // non-partitioned table(s). [force] will decide whether to overwrite existing 221 // table(s). 222 // 223 // If [partition_spec] is specified. First, the snapshot results will be 224 // written to partitioned table(s) with two additional timestamp columns, 225 // readTime and requestTime, one of which will be the partition key. Secondly, 226 // in the case when any destination table already exists, it will first try to 227 // update existing table's schema as necessary by appending additional 228 // columns. Then, if [force] is `TRUE`, the corresponding partition will be 229 // overwritten by the snapshot results (data in different partitions will 230 // remain intact); if [force] is unset or `FALSE`, it will append the data. An 231 // error will be returned if the schema update or data appension fails. 232 PartitionSpec partition_spec = 4; 233 234 // If this flag is `TRUE`, the snapshot results will be written to one or 235 // multiple tables, each of which contains results of one asset type. The 236 // [force] and [partition_spec] fields will apply to each of them. 237 // 238 // Field [table] will be concatenated with "_" and the asset type names (see 239 // https://cloud.google.com/asset-inventory/docs/supported-asset-types for 240 // supported asset types) to construct per-asset-type table names, in which 241 // all non-alphanumeric characters like "." and "/" will be substituted by 242 // "_". Example: if field [table] is "mytable" and snapshot results 243 // contain "storage.googleapis.com/Bucket" assets, the corresponding table 244 // name will be "mytable_storage_googleapis_com_Bucket". If any of these 245 // tables does not exist, a new table with the concatenated name will be 246 // created. 247 // 248 // When [content_type] in the ExportAssetsRequest is `RESOURCE`, the schema of 249 // each table will include RECORD-type columns mapped to the nested fields in 250 // the Asset.resource.data field of that asset type (up to the 15 nested level 251 // BigQuery supports 252 // (https://cloud.google.com/bigquery/docs/nested-repeated#limitations)). The 253 // fields in >15 nested levels will be stored in JSON format string as a child 254 // column of its parent RECORD column. 255 // 256 // If error occurs when exporting to any table, the whole export call will 257 // return an error but the export results that already succeed will persist. 258 // Example: if exporting to table_type_A succeeds when exporting to 259 // table_type_B fails during one export call, the results in table_type_A will 260 // persist and there will not be partial results persisting in a table. 261 bool separate_tables_per_asset_type = 5; 262} 263 264// Specifications of BigQuery partitioned table as export destination. 265message PartitionSpec { 266 // This enum is used to determine the partition key column when exporting 267 // assets to BigQuery partitioned table(s). Note that, if the partition key is 268 // a timestamp column, the actual partition is based on its date value 269 // (expressed in UTC. see details in 270 // https://cloud.google.com/bigquery/docs/partitioned-tables#date_timestamp_partitioned_tables). 271 enum PartitionKey { 272 // Unspecified partition key. If used, it means using non-partitioned table. 273 PARTITION_KEY_UNSPECIFIED = 0; 274 275 // The time when the snapshot is taken. If specified as partition key, the 276 // result table(s) is partitoned by the additional timestamp column, 277 // readTime. If [read_time] in ExportAssetsRequest is specified, the 278 // readTime column's value will be the same as it. Otherwise, its value will 279 // be the current time that is used to take the snapshot. 280 READ_TIME = 1; 281 282 // The time when the request is received and started to be processed. If 283 // specified as partition key, the result table(s) is partitoned by the 284 // requestTime column, an additional timestamp column representing when the 285 // request was received. 286 REQUEST_TIME = 2; 287 } 288 289 // The partition key for BigQuery partitioned table. 290 PartitionKey partition_key = 1; 291} 292 293// Asset content type. 294enum ContentType { 295 // Unspecified content type. 296 CONTENT_TYPE_UNSPECIFIED = 0; 297 298 // Resource metadata. 299 RESOURCE = 1; 300 301 // The actual IAM policy set on a resource. 302 IAM_POLICY = 2; 303 304 // The organization policy set on an asset. 305 ORG_POLICY = 4; 306 307 // The Access Context Manager policy set on an asset. 308 ACCESS_POLICY = 5; 309 310 // The related resources. 311 RELATIONSHIP = 7; 312} 313