1// Copyright 2022 Google LLC 2// 3// Licensed under the Apache License, Version 2.0 (the "License"); 4// you may not use this file except in compliance with the License. 5// You may obtain a copy of the License at 6// 7// http://www.apache.org/licenses/LICENSE-2.0 8// 9// Unless required by applicable law or agreed to in writing, software 10// distributed under the License is distributed on an "AS IS" BASIS, 11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12// See the License for the specific language governing permissions and 13// limitations under the License. 14 15syntax = "proto3"; 16 17package google.cloud.visionai.v1alpha1; 18 19import "google/api/annotations.proto"; 20import "google/api/client.proto"; 21import "google/api/field_behavior.proto"; 22import "google/api/resource.proto"; 23import "google/cloud/visionai/v1alpha1/annotations.proto"; 24import "google/cloud/visionai/v1alpha1/common.proto"; 25import "google/longrunning/operations.proto"; 26import "google/protobuf/duration.proto"; 27import "google/protobuf/field_mask.proto"; 28import "google/protobuf/timestamp.proto"; 29 30option csharp_namespace = "Google.Cloud.VisionAI.V1Alpha1"; 31option go_package = "cloud.google.com/go/visionai/apiv1alpha1/visionaipb;visionaipb"; 32option java_multiple_files = true; 33option java_outer_classname = "PlatformProto"; 34option java_package = "com.google.cloud.visionai.v1alpha1"; 35option php_namespace = "Google\\Cloud\\VisionAI\\V1alpha1"; 36option ruby_package = "Google::Cloud::VisionAI::V1alpha1"; 37 38// Service describing handlers for resources 39service AppPlatform { 40 option (google.api.default_host) = "visionai.googleapis.com"; 41 option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; 42 43 // Lists Applications in a given project and location. 44 rpc ListApplications(ListApplicationsRequest) returns (ListApplicationsResponse) { 45 option (google.api.http) = { 46 get: "/v1alpha1/{parent=projects/*/locations/*}/applications" 47 }; 48 option (google.api.method_signature) = "parent"; 49 } 50 51 // Gets details of a single Application. 52 rpc GetApplication(GetApplicationRequest) returns (Application) { 53 option (google.api.http) = { 54 get: "/v1alpha1/{name=projects/*/locations/*/applications/*}" 55 }; 56 option (google.api.method_signature) = "name"; 57 } 58 59 // Creates a new Application in a given project and location. 60 rpc CreateApplication(CreateApplicationRequest) returns (google.longrunning.Operation) { 61 option (google.api.http) = { 62 post: "/v1alpha1/{parent=projects/*/locations/*}/applications" 63 body: "application" 64 }; 65 option (google.api.method_signature) = "parent,application"; 66 option (google.longrunning.operation_info) = { 67 response_type: "Application" 68 metadata_type: "OperationMetadata" 69 }; 70 } 71 72 // Updates the parameters of a single Application. 73 rpc UpdateApplication(UpdateApplicationRequest) returns (google.longrunning.Operation) { 74 option (google.api.http) = { 75 patch: "/v1alpha1/{application.name=projects/*/locations/*/applications/*}" 76 body: "application" 77 }; 78 option (google.api.method_signature) = "application,update_mask"; 79 option (google.longrunning.operation_info) = { 80 response_type: "Application" 81 metadata_type: "OperationMetadata" 82 }; 83 } 84 85 // Deletes a single Application. 86 rpc DeleteApplication(DeleteApplicationRequest) returns (google.longrunning.Operation) { 87 option (google.api.http) = { 88 delete: "/v1alpha1/{name=projects/*/locations/*/applications/*}" 89 }; 90 option (google.api.method_signature) = "name"; 91 option (google.longrunning.operation_info) = { 92 response_type: "google.protobuf.Empty" 93 metadata_type: "OperationMetadata" 94 }; 95 } 96 97 // Deploys a single Application. 98 rpc DeployApplication(DeployApplicationRequest) returns (google.longrunning.Operation) { 99 option (google.api.http) = { 100 post: "/v1alpha1/{name=projects/*/locations/*/applications/*}:deploy" 101 body: "*" 102 }; 103 option (google.api.method_signature) = "name"; 104 option (google.longrunning.operation_info) = { 105 response_type: "DeployApplicationResponse" 106 metadata_type: "OperationMetadata" 107 }; 108 } 109 110 // Undeploys a single Application. 111 rpc UndeployApplication(UndeployApplicationRequest) returns (google.longrunning.Operation) { 112 option (google.api.http) = { 113 post: "/v1alpha1/{name=projects/*/locations/*/applications/*}:undeploy" 114 body: "*" 115 }; 116 option (google.api.method_signature) = "name"; 117 option (google.longrunning.operation_info) = { 118 response_type: "UndeployApplicationResponse" 119 metadata_type: "OperationMetadata" 120 }; 121 } 122 123 // Adds target stream input to the Application. 124 // If the Application is deployed, the corresponding new Application instance 125 // will be created. If the stream has already been in the Application, the RPC 126 // will fail. 127 rpc AddApplicationStreamInput(AddApplicationStreamInputRequest) returns (google.longrunning.Operation) { 128 option (google.api.http) = { 129 post: "/v1alpha1/{name=projects/*/locations/*/applications/*}:addStreamInput" 130 body: "*" 131 }; 132 option (google.api.method_signature) = "name"; 133 option (google.longrunning.operation_info) = { 134 response_type: "AddApplicationStreamInputResponse" 135 metadata_type: "OperationMetadata" 136 }; 137 } 138 139 // Remove target stream input to the Application, if the Application is 140 // deployed, the corresponding instance based will be deleted. If the stream 141 // is not in the Application, the RPC will fail. 142 rpc RemoveApplicationStreamInput(RemoveApplicationStreamInputRequest) returns (google.longrunning.Operation) { 143 option (google.api.http) = { 144 post: "/v1alpha1/{name=projects/*/locations/*/applications/*}:removeStreamInput" 145 body: "*" 146 }; 147 option (google.api.method_signature) = "name"; 148 option (google.longrunning.operation_info) = { 149 response_type: "RemoveApplicationStreamInputResponse" 150 metadata_type: "OperationMetadata" 151 }; 152 } 153 154 // Update target stream input to the Application, if the Application is 155 // deployed, the corresponding instance based will be deployed. For 156 // CreateOrUpdate behavior, set allow_missing to true. 157 rpc UpdateApplicationStreamInput(UpdateApplicationStreamInputRequest) returns (google.longrunning.Operation) { 158 option (google.api.http) = { 159 post: "/v1alpha1/{name=projects/*/locations/*/applications/*}:updateStreamInput" 160 body: "*" 161 }; 162 option (google.api.method_signature) = "name"; 163 option (google.longrunning.operation_info) = { 164 response_type: "UpdateApplicationStreamInputResponse" 165 metadata_type: "OperationMetadata" 166 }; 167 } 168 169 // Lists Instances in a given project and location. 170 rpc ListInstances(ListInstancesRequest) returns (ListInstancesResponse) { 171 option (google.api.http) = { 172 get: "/v1alpha1/{parent=projects/*/locations/*/applications/*}/instances" 173 }; 174 option (google.api.method_signature) = "parent"; 175 } 176 177 // Gets details of a single Instance. 178 rpc GetInstance(GetInstanceRequest) returns (Instance) { 179 option (google.api.http) = { 180 get: "/v1alpha1/{name=projects/*/locations/*/applications/*/instances/*}" 181 }; 182 option (google.api.method_signature) = "name"; 183 } 184 185 // Adds target stream input to the Application. 186 // If the Application is deployed, the corresponding new Application instance 187 // will be created. If the stream has already been in the Application, the RPC 188 // will fail. 189 rpc CreateApplicationInstances(CreateApplicationInstancesRequest) returns (google.longrunning.Operation) { 190 option (google.api.http) = { 191 post: "/v1alpha1/{name=projects/*/locations/*/applications/*}:createApplicationInstances" 192 body: "*" 193 }; 194 option (google.api.method_signature) = "name"; 195 option (google.longrunning.operation_info) = { 196 response_type: "CreateApplicationInstancesResponse" 197 metadata_type: "OperationMetadata" 198 }; 199 } 200 201 // Remove target stream input to the Application, if the Application is 202 // deployed, the corresponding instance based will be deleted. If the stream 203 // is not in the Application, the RPC will fail. 204 rpc DeleteApplicationInstances(DeleteApplicationInstancesRequest) returns (google.longrunning.Operation) { 205 option (google.api.http) = { 206 post: "/v1alpha1/{name=projects/*/locations/*/applications/*}:deleteApplicationInstances" 207 body: "*" 208 }; 209 option (google.api.method_signature) = "name"; 210 option (google.longrunning.operation_info) = { 211 response_type: "Instance" 212 metadata_type: "OperationMetadata" 213 }; 214 } 215 216 // Adds target stream input to the Application. 217 // If the Application is deployed, the corresponding new Application instance 218 // will be created. If the stream has already been in the Application, the RPC 219 // will fail. 220 rpc UpdateApplicationInstances(UpdateApplicationInstancesRequest) returns (google.longrunning.Operation) { 221 option (google.api.http) = { 222 post: "/v1alpha1/{name=projects/*/locations/*/applications/*}:updateApplicationInstances" 223 body: "*" 224 }; 225 option (google.api.method_signature) = "name, application_instances"; 226 option (google.longrunning.operation_info) = { 227 response_type: "UpdateApplicationInstancesResponse" 228 metadata_type: "OperationMetadata" 229 }; 230 } 231 232 // Lists Drafts in a given project and location. 233 rpc ListDrafts(ListDraftsRequest) returns (ListDraftsResponse) { 234 option (google.api.http) = { 235 get: "/v1alpha1/{parent=projects/*/locations/*/applications/*}/drafts" 236 }; 237 option (google.api.method_signature) = "parent"; 238 } 239 240 // Gets details of a single Draft. 241 rpc GetDraft(GetDraftRequest) returns (Draft) { 242 option (google.api.http) = { 243 get: "/v1alpha1/{name=projects/*/locations/*/applications/*/drafts/*}" 244 }; 245 option (google.api.method_signature) = "name"; 246 } 247 248 // Creates a new Draft in a given project and location. 249 rpc CreateDraft(CreateDraftRequest) returns (google.longrunning.Operation) { 250 option (google.api.http) = { 251 post: "/v1alpha1/{parent=projects/*/locations/*/applications/*}/drafts" 252 body: "draft" 253 }; 254 option (google.api.method_signature) = "parent,draft,draft_id"; 255 option (google.longrunning.operation_info) = { 256 response_type: "Draft" 257 metadata_type: "OperationMetadata" 258 }; 259 } 260 261 // Updates the parameters of a single Draft. 262 rpc UpdateDraft(UpdateDraftRequest) returns (google.longrunning.Operation) { 263 option (google.api.http) = { 264 patch: "/v1alpha1/{draft.name=projects/*/locations/*/applications/*/drafts/*}" 265 body: "draft" 266 }; 267 option (google.api.method_signature) = "draft,update_mask"; 268 option (google.longrunning.operation_info) = { 269 response_type: "Draft" 270 metadata_type: "OperationMetadata" 271 }; 272 } 273 274 // Deletes a single Draft. 275 rpc DeleteDraft(DeleteDraftRequest) returns (google.longrunning.Operation) { 276 option (google.api.http) = { 277 delete: "/v1alpha1/{name=projects/*/locations/*/applications/*/drafts/*}" 278 }; 279 option (google.api.method_signature) = "name"; 280 option (google.longrunning.operation_info) = { 281 response_type: "google.protobuf.Empty" 282 metadata_type: "OperationMetadata" 283 }; 284 } 285 286 // Lists Processors in a given project and location. 287 rpc ListProcessors(ListProcessorsRequest) returns (ListProcessorsResponse) { 288 option (google.api.http) = { 289 get: "/v1alpha1/{parent=projects/*/locations/*}/processors" 290 }; 291 option (google.api.method_signature) = "parent"; 292 } 293 294 // ListPrebuiltProcessors is a custom pass-through verb that Lists Prebuilt 295 // Processors. 296 rpc ListPrebuiltProcessors(ListPrebuiltProcessorsRequest) returns (ListPrebuiltProcessorsResponse) { 297 option (google.api.http) = { 298 post: "/v1alpha1/{parent=projects/*/locations/*}/processors:prebuilt" 299 body: "*" 300 }; 301 option (google.api.method_signature) = "parent"; 302 } 303 304 // Gets details of a single Processor. 305 rpc GetProcessor(GetProcessorRequest) returns (Processor) { 306 option (google.api.http) = { 307 get: "/v1alpha1/{name=projects/*/locations/*/processors/*}" 308 }; 309 option (google.api.method_signature) = "name"; 310 } 311 312 // Creates a new Processor in a given project and location. 313 rpc CreateProcessor(CreateProcessorRequest) returns (google.longrunning.Operation) { 314 option (google.api.http) = { 315 post: "/v1alpha1/{parent=projects/*/locations/*}/processors" 316 body: "processor" 317 }; 318 option (google.api.method_signature) = "parent,processor,processor_id"; 319 option (google.longrunning.operation_info) = { 320 response_type: "Processor" 321 metadata_type: "OperationMetadata" 322 }; 323 } 324 325 // Updates the parameters of a single Processor. 326 rpc UpdateProcessor(UpdateProcessorRequest) returns (google.longrunning.Operation) { 327 option (google.api.http) = { 328 patch: "/v1alpha1/{processor.name=projects/*/locations/*/processors/*}" 329 body: "processor" 330 }; 331 option (google.api.method_signature) = "processor,update_mask"; 332 option (google.longrunning.operation_info) = { 333 response_type: "Processor" 334 metadata_type: "OperationMetadata" 335 }; 336 } 337 338 // Deletes a single Processor. 339 rpc DeleteProcessor(DeleteProcessorRequest) returns (google.longrunning.Operation) { 340 option (google.api.http) = { 341 delete: "/v1alpha1/{name=projects/*/locations/*/processors/*}" 342 }; 343 option (google.api.method_signature) = "name"; 344 option (google.longrunning.operation_info) = { 345 response_type: "google.protobuf.Empty" 346 metadata_type: "OperationMetadata" 347 }; 348 } 349} 350 351// All the supported model types in Vision AI App Platform. 352enum ModelType { 353 // Processor Type UNSPECIFIED. 354 MODEL_TYPE_UNSPECIFIED = 0; 355 356 // Model Type Image Classification. 357 IMAGE_CLASSIFICATION = 1; 358 359 // Model Type Object Detection. 360 OBJECT_DETECTION = 2; 361 362 // Model Type Video Classification. 363 VIDEO_CLASSIFICATION = 3; 364 365 // Model Type Object Tracking. 366 VIDEO_OBJECT_TRACKING = 4; 367 368 // Model Type Action Recognition. 369 VIDEO_ACTION_RECOGNITION = 5; 370 371 // Model Type Occupancy Counting. 372 OCCUPANCY_COUNTING = 6; 373 374 // Model Type Person Blur. 375 PERSON_BLUR = 7; 376 377 // Model Type Vertex Custom. 378 VERTEX_CUSTOM = 8; 379} 380 381// Represents a hardware accelerator type. 382enum AcceleratorType { 383 // Unspecified accelerator type, which means no accelerator. 384 ACCELERATOR_TYPE_UNSPECIFIED = 0; 385 386 // Nvidia Tesla K80 GPU. 387 NVIDIA_TESLA_K80 = 1; 388 389 // Nvidia Tesla P100 GPU. 390 NVIDIA_TESLA_P100 = 2; 391 392 // Nvidia Tesla V100 GPU. 393 NVIDIA_TESLA_V100 = 3; 394 395 // Nvidia Tesla P4 GPU. 396 NVIDIA_TESLA_P4 = 4; 397 398 // Nvidia Tesla T4 GPU. 399 NVIDIA_TESLA_T4 = 5; 400 401 // Nvidia Tesla A100 GPU. 402 NVIDIA_TESLA_A100 = 8; 403 404 // TPU v2. 405 TPU_V2 = 6; 406 407 // TPU v3. 408 TPU_V3 = 7; 409} 410 411// Message for DeleteApplicationInstance Response. 412message DeleteApplicationInstancesResponse { 413 414} 415 416// Message for CreateApplicationInstance Response. 417message CreateApplicationInstancesResponse { 418 419} 420 421// Message for UpdateApplicationInstances Response. 422message UpdateApplicationInstancesResponse { 423 424} 425 426// Message for adding stream input to an Application. 427message CreateApplicationInstancesRequest { 428 // Required. the name of the application to retrieve. 429 // Format: 430 // "projects/{project}/locations/{location}/applications/{application}" 431 string name = 1 [ 432 (google.api.field_behavior) = REQUIRED, 433 (google.api.resource_reference) = { 434 type: "visionai.googleapis.com/Application" 435 } 436 ]; 437 438 // Required. The resources being created. 439 repeated ApplicationInstance application_instances = 2 [(google.api.field_behavior) = REQUIRED]; 440 441 // Optional. An optional request ID to identify requests. Specify a unique request ID 442 // so that if you must retry your request, the server will know to ignore 443 // the request if it has already been completed. The server will guarantee 444 // that for at least 60 minutes since the first request. 445 // 446 // For example, consider a situation where you make an initial request and t 447 // he request times out. If you make the request again with the same request 448 // ID, the server can check if original operation with the same request ID 449 // was received, and if so, will ignore the second request. This prevents 450 // clients from accidentally creating duplicate commitments. 451 // 452 // The request ID must be a valid UUID with the exception that zero UUID is 453 // not supported (00000000-0000-0000-0000-000000000000). 454 string request_id = 4 [(google.api.field_behavior) = OPTIONAL]; 455} 456 457// Message for removing stream input from an Application. 458message DeleteApplicationInstancesRequest { 459 // Required. the name of the application to retrieve. 460 // Format: 461 // "projects/{project}/locations/{location}/applications/{application}" 462 string name = 1 [ 463 (google.api.field_behavior) = REQUIRED, 464 (google.api.resource_reference) = { 465 type: "visionai.googleapis.com/Application" 466 } 467 ]; 468 469 // Required. Id of the requesting object. 470 repeated string instance_ids = 2 [ 471 (google.api.field_behavior) = REQUIRED, 472 (google.api.resource_reference) = { 473 type: "visionai.googleapis.com/Instance" 474 } 475 ]; 476 477 // Optional. An optional request ID to identify requests. Specify a unique request ID 478 // so that if you must retry your request, the server will know to ignore 479 // the request if it has already been completed. The server will guarantee 480 // that for at least 60 minutes since the first request. 481 // 482 // For example, consider a situation where you make an initial request and t 483 // he request times out. If you make the request again with the same request 484 // ID, the server can check if original operation with the same request ID 485 // was received, and if so, will ignore the second request. This prevents 486 // clients from accidentally creating duplicate commitments. 487 // 488 // The request ID must be a valid UUID with the exception that zero UUID is 489 // not supported (00000000-0000-0000-0000-000000000000). 490 string request_id = 3 [(google.api.field_behavior) = OPTIONAL]; 491} 492 493// RPC Request Messages. 494// Message for DeployApplication Response. 495message DeployApplicationResponse { 496 497} 498 499// Message for UndeployApplication Response. 500message UndeployApplicationResponse { 501 502} 503 504// Message for RemoveApplicationStreamInput Response. 505message RemoveApplicationStreamInputResponse { 506 507} 508 509// Message for AddApplicationStreamInput Response. 510message AddApplicationStreamInputResponse { 511 512} 513 514// Message for AddApplicationStreamInput Response. 515message UpdateApplicationStreamInputResponse { 516 517} 518 519// Message for requesting list of Applications. 520message ListApplicationsRequest { 521 // Required. Parent value for ListApplicationsRequest. 522 string parent = 1 [ 523 (google.api.field_behavior) = REQUIRED, 524 (google.api.resource_reference) = { 525 child_type: "visionai.googleapis.com/Application" 526 } 527 ]; 528 529 // Requested page size. Server may return fewer items than requested. 530 // If unspecified, server will pick an appropriate default. 531 int32 page_size = 2; 532 533 // A token identifying a page of results the server should return. 534 string page_token = 3; 535 536 // Filtering results. 537 string filter = 4; 538 539 // Hint for how to order the results. 540 string order_by = 5; 541} 542 543// Message for response to listing Applications. 544message ListApplicationsResponse { 545 // The list of Application. 546 repeated Application applications = 1; 547 548 // A token identifying a page of results the server should return. 549 string next_page_token = 2; 550 551 // Locations that could not be reached. 552 repeated string unreachable = 3; 553} 554 555// Message for getting a Application. 556message GetApplicationRequest { 557 // Required. Name of the resource. 558 string name = 1 [ 559 (google.api.field_behavior) = REQUIRED, 560 (google.api.resource_reference) = { 561 type: "visionai.googleapis.com/Application" 562 } 563 ]; 564} 565 566// Message for creating a Application. 567message CreateApplicationRequest { 568 // Required. Value for parent. 569 string parent = 1 [ 570 (google.api.field_behavior) = REQUIRED, 571 (google.api.resource_reference) = { 572 child_type: "visionai.googleapis.com/Application" 573 } 574 ]; 575 576 // Required. Id of the requesting object. 577 string application_id = 2 [(google.api.field_behavior) = REQUIRED]; 578 579 // Required. The resource being created. 580 Application application = 3 [(google.api.field_behavior) = REQUIRED]; 581 582 // Optional. An optional request ID to identify requests. Specify a unique request ID 583 // so that if you must retry your request, the server will know to ignore 584 // the request if it has already been completed. The server will guarantee 585 // that for at least 60 minutes since the first request. 586 // 587 // For example, consider a situation where you make an initial request and t 588 // he request times out. If you make the request again with the same request 589 // ID, the server can check if original operation with the same request ID 590 // was received, and if so, will ignore the second request. This prevents 591 // clients from accidentally creating duplicate commitments. 592 // 593 // The request ID must be a valid UUID with the exception that zero UUID is 594 // not supported (00000000-0000-0000-0000-000000000000). 595 string request_id = 4 [(google.api.field_behavior) = OPTIONAL]; 596} 597 598// Message for updating an Application. 599message UpdateApplicationRequest { 600 // Optional. Field mask is used to specify the fields to be overwritten in the 601 // Application resource by the update. 602 // The fields specified in the update_mask are relative to the resource, not 603 // the full request. A field will be overwritten if it is in the mask. If the 604 // user does not provide a mask then all fields will be overwritten. 605 google.protobuf.FieldMask update_mask = 1 [(google.api.field_behavior) = OPTIONAL]; 606 607 // Required. The resource being updated. 608 Application application = 2 [(google.api.field_behavior) = REQUIRED]; 609 610 // Optional. An optional request ID to identify requests. Specify a unique request ID 611 // so that if you must retry your request, the server will know to ignore 612 // the request if it has already been completed. The server will guarantee 613 // that for at least 60 minutes since the first request. 614 // 615 // For example, consider a situation where you make an initial request and t 616 // he request times out. If you make the request again with the same request 617 // ID, the server can check if original operation with the same request ID 618 // was received, and if so, will ignore the second request. This prevents 619 // clients from accidentally creating duplicate commitments. 620 // 621 // The request ID must be a valid UUID with the exception that zero UUID is 622 // not supported (00000000-0000-0000-0000-000000000000). 623 string request_id = 3 [(google.api.field_behavior) = OPTIONAL]; 624} 625 626// Message for deleting an Application. 627message DeleteApplicationRequest { 628 // Required. Name of the resource. 629 string name = 1 [ 630 (google.api.field_behavior) = REQUIRED, 631 (google.api.resource_reference) = { 632 type: "visionai.googleapis.com/Application" 633 } 634 ]; 635 636 // Optional. An optional request ID to identify requests. Specify a unique request ID 637 // so that if you must retry your request, the server will know to ignore 638 // the request if it has already been completed. The server will guarantee 639 // that for at least 60 minutes after the first request. 640 // 641 // For example, consider a situation where you make an initial request and t 642 // he request times out. If you make the request again with the same request 643 // ID, the server can check if original operation with the same request ID 644 // was received, and if so, will ignore the second request. This prevents 645 // clients from accidentally creating duplicate commitments. 646 // 647 // The request ID must be a valid UUID with the exception that zero UUID is 648 // not supported (00000000-0000-0000-0000-000000000000). 649 string request_id = 2 [(google.api.field_behavior) = OPTIONAL]; 650 651 // Optional. If set to true, any instances and drafts from this application will also be 652 // deleted. (Otherwise, the request will only work if the application has no 653 // instances and drafts.) 654 bool force = 3 [(google.api.field_behavior) = OPTIONAL]; 655} 656 657// Message for deploying an Application. 658message DeployApplicationRequest { 659 // Required. the name of the application to retrieve. 660 // Format: 661 // "projects/{project}/locations/{location}/applications/{application}" 662 string name = 1 [ 663 (google.api.field_behavior) = REQUIRED, 664 (google.api.resource_reference) = { 665 type: "visionai.googleapis.com/Application" 666 } 667 ]; 668 669 // If set, validate the request and preview the application graph, but do not 670 // actually deploy it. 671 bool validate_only = 2; 672 673 // Optional. An optional request ID to identify requests. Specify a unique request ID 674 // so that if you must retry your request, the server will know to ignore 675 // the request if it has already been completed. The server will guarantee 676 // that for at least 60 minutes since the first request. 677 // 678 // For example, consider a situation where you make an initial request and t 679 // he request times out. If you make the request again with the same request 680 // ID, the server can check if original operation with the same request ID 681 // was received, and if so, will ignore the second request. This prevents 682 // clients from accidentally creating duplicate commitments. 683 // 684 // The request ID must be a valid UUID with the exception that zero UUID is 685 // not supported (00000000-0000-0000-0000-000000000000). 686 string request_id = 3 [(google.api.field_behavior) = OPTIONAL]; 687 688 // Optional. Whether or not to enable monitoring for the application on deployment. 689 bool enable_monitoring = 4 [(google.api.field_behavior) = OPTIONAL]; 690} 691 692// Message for undeploying an Application. 693message UndeployApplicationRequest { 694 // Required. the name of the application to retrieve. 695 // Format: 696 // "projects/{project}/locations/{location}/applications/{application}" 697 string name = 1 [ 698 (google.api.field_behavior) = REQUIRED, 699 (google.api.resource_reference) = { 700 type: "visionai.googleapis.com/Application" 701 } 702 ]; 703 704 // Optional. An optional request ID to identify requests. Specify a unique request ID 705 // so that if you must retry your request, the server will know to ignore 706 // the request if it has already been completed. The server will guarantee 707 // that for at least 60 minutes since the first request. 708 // 709 // For example, consider a situation where you make an initial request and t 710 // he request times out. If you make the request again with the same request 711 // ID, the server can check if original operation with the same request ID 712 // was received, and if so, will ignore the second request. This prevents 713 // clients from accidentally creating duplicate commitments. 714 // 715 // The request ID must be a valid UUID with the exception that zero UUID is 716 // not supported (00000000-0000-0000-0000-000000000000). 717 string request_id = 2 [(google.api.field_behavior) = OPTIONAL]; 718} 719 720// Message about a single stream input config. 721message ApplicationStreamInput { 722 StreamWithAnnotation stream_with_annotation = 1; 723} 724 725// Message for adding stream input to an Application. 726message AddApplicationStreamInputRequest { 727 // Required. the name of the application to retrieve. 728 // Format: 729 // "projects/{project}/locations/{location}/applications/{application}" 730 string name = 1 [ 731 (google.api.field_behavior) = REQUIRED, 732 (google.api.resource_reference) = { 733 type: "visionai.googleapis.com/Application" 734 } 735 ]; 736 737 // The stream inputs to add, the stream resource name is the key of each 738 // StreamInput, and it must be unique within each application. 739 repeated ApplicationStreamInput application_stream_inputs = 2; 740 741 // Optional. An optional request ID to identify requests. Specify a unique request ID 742 // so that if you must retry your request, the server will know to ignore 743 // the request if it has already been completed. The server will guarantee 744 // that for at least 60 minutes since the first request. 745 // 746 // For example, consider a situation where you make an initial request and t 747 // he request times out. If you make the request again with the same request 748 // ID, the server can check if original operation with the same request ID 749 // was received, and if so, will ignore the second request. This prevents 750 // clients from accidentally creating duplicate commitments. 751 // 752 // The request ID must be a valid UUID with the exception that zero UUID is 753 // not supported (00000000-0000-0000-0000-000000000000). 754 string request_id = 3 [(google.api.field_behavior) = OPTIONAL]; 755} 756 757// Message for updating stream input to an Application. 758message UpdateApplicationStreamInputRequest { 759 // Required. the name of the application to retrieve. 760 // Format: 761 // "projects/{project}/locations/{location}/applications/{application}" 762 string name = 1 [ 763 (google.api.field_behavior) = REQUIRED, 764 (google.api.resource_reference) = { 765 type: "visionai.googleapis.com/Application" 766 } 767 ]; 768 769 // The stream inputs to update, the stream resource name is the key of each 770 // StreamInput, and it must be unique within each application. 771 repeated ApplicationStreamInput application_stream_inputs = 2; 772 773 // Optional. An optional request ID to identify requests. Specify a unique request ID 774 // so that if you must retry your request, the server will know to ignore 775 // the request if it has already been completed. The server will guarantee 776 // that for at least 60 minutes since the first request. 777 // 778 // For example, consider a situation where you make an initial request and t 779 // he request times out. If you make the request again with the same request 780 // ID, the server can check if original operation with the same request ID 781 // was received, and if so, will ignore the second request. This prevents 782 // clients from accidentally creating duplicate commitments. 783 // 784 // The request ID must be a valid UUID with the exception that zero UUID is 785 // not supported (00000000-0000-0000-0000-000000000000). 786 string request_id = 3 [(google.api.field_behavior) = OPTIONAL]; 787 788 // If true, UpdateApplicationStreamInput will insert stream input to 789 // application even if the target stream is not included in the application. 790 bool allow_missing = 4; 791} 792 793// Message for removing stream input from an Application. 794message RemoveApplicationStreamInputRequest { 795 // Message about target streamInput to remove. 796 message TargetStreamInput { 797 string stream = 1 [(google.api.resource_reference) = { 798 type: "visionai.googleapis.com/Stream" 799 }]; 800 } 801 802 // Required. the name of the application to retrieve. 803 // Format: 804 // "projects/{project}/locations/{location}/applications/{application}" 805 string name = 1 [ 806 (google.api.field_behavior) = REQUIRED, 807 (google.api.resource_reference) = { 808 type: "visionai.googleapis.com/Application" 809 } 810 ]; 811 812 // The target stream to remove. 813 repeated TargetStreamInput target_stream_inputs = 2; 814 815 // Optional. An optional request ID to identify requests. Specify a unique request ID 816 // so that if you must retry your request, the server will know to ignore 817 // the request if it has already been completed. The server will guarantee 818 // that for at least 60 minutes since the first request. 819 // 820 // For example, consider a situation where you make an initial request and t 821 // he request times out. If you make the request again with the same request 822 // ID, the server can check if original operation with the same request ID 823 // was received, and if so, will ignore the second request. This prevents 824 // clients from accidentally creating duplicate commitments. 825 // 826 // The request ID must be a valid UUID with the exception that zero UUID is 827 // not supported (00000000-0000-0000-0000-000000000000). 828 string request_id = 3 [(google.api.field_behavior) = OPTIONAL]; 829} 830 831// Message for requesting list of Instances. 832message ListInstancesRequest { 833 // Required. Parent value for ListInstancesRequest. 834 string parent = 1 [ 835 (google.api.field_behavior) = REQUIRED, 836 (google.api.resource_reference) = { 837 child_type: "visionai.googleapis.com/Instance" 838 } 839 ]; 840 841 // Requested page size. Server may return fewer items than requested. 842 // If unspecified, server will pick an appropriate default. 843 int32 page_size = 2; 844 845 // A token identifying a page of results the server should return. 846 string page_token = 3; 847 848 // Filtering results. 849 string filter = 4; 850 851 // Hint for how to order the results. 852 string order_by = 5; 853} 854 855// Message for response to listing Instances. 856message ListInstancesResponse { 857 // The list of Instance. 858 repeated Instance instances = 1; 859 860 // A token identifying a page of results the server should return. 861 string next_page_token = 2; 862 863 // Locations that could not be reached. 864 repeated string unreachable = 3; 865} 866 867// Message for getting a Instance. 868message GetInstanceRequest { 869 // Required. Name of the resource. 870 string name = 1 [ 871 (google.api.field_behavior) = REQUIRED, 872 (google.api.resource_reference) = { 873 type: "visionai.googleapis.com/Instance" 874 } 875 ]; 876} 877 878// Message for requesting list of Drafts. 879message ListDraftsRequest { 880 // Required. Parent value for ListDraftsRequest. 881 string parent = 1 [ 882 (google.api.field_behavior) = REQUIRED, 883 (google.api.resource_reference) = { 884 child_type: "visionai.googleapis.com/Draft" 885 } 886 ]; 887 888 // Requested page size. Server may return fewer items than requested. 889 // If unspecified, server will pick an appropriate default. 890 int32 page_size = 2; 891 892 // A token identifying a page of results the server should return. 893 string page_token = 3; 894 895 // Filtering results. 896 string filter = 4; 897 898 // Hint for how to order the results. 899 string order_by = 5; 900} 901 902// Message for response to listing Drafts. 903message ListDraftsResponse { 904 // The list of Draft. 905 repeated Draft drafts = 1; 906 907 // A token identifying a page of results the server should return. 908 string next_page_token = 2; 909 910 // Locations that could not be reached. 911 repeated string unreachable = 3; 912} 913 914// Message for getting a Draft. 915message GetDraftRequest { 916 // Required. Name of the resource. 917 string name = 1 [ 918 (google.api.field_behavior) = REQUIRED, 919 (google.api.resource_reference) = { 920 type: "visionai.googleapis.com/Draft" 921 } 922 ]; 923} 924 925// Message for creating a Draft. 926message CreateDraftRequest { 927 // Required. Value for parent. 928 string parent = 1 [ 929 (google.api.field_behavior) = REQUIRED, 930 (google.api.resource_reference) = { 931 child_type: "visionai.googleapis.com/Draft" 932 } 933 ]; 934 935 // Required. Id of the requesting object. 936 string draft_id = 2 [(google.api.field_behavior) = REQUIRED]; 937 938 // Required. The resource being created. 939 Draft draft = 3 [(google.api.field_behavior) = REQUIRED]; 940 941 // Optional. An optional request ID to identify requests. Specify a unique request ID 942 // so that if you must retry your request, the server will know to ignore 943 // the request if it has already been completed. The server will guarantee 944 // that for at least 60 minutes since the first request. 945 // 946 // For example, consider a situation where you make an initial request and t 947 // he request times out. If you make the request again with the same request 948 // ID, the server can check if original operation with the same request ID 949 // was received, and if so, will ignore the second request. This prevents 950 // clients from accidentally creating duplicate commitments. 951 // 952 // The request ID must be a valid UUID with the exception that zero UUID is 953 // not supported (00000000-0000-0000-0000-000000000000). 954 string request_id = 4 [(google.api.field_behavior) = OPTIONAL]; 955} 956 957// Message for updating an Draft. 958message UpdateDraftRequest { 959 // Optional. Field mask is used to specify the fields to be overwritten in the 960 // Draft resource by the update. 961 // The fields specified in the update_mask are relative to the resource, not 962 // the full request. A field will be overwritten if it is in the mask. If the 963 // user does not provide a mask then all fields will be overwritten. 964 google.protobuf.FieldMask update_mask = 1 [(google.api.field_behavior) = OPTIONAL]; 965 966 // Required. The resource being updated. 967 Draft draft = 2 [(google.api.field_behavior) = REQUIRED]; 968 969 // Optional. An optional request ID to identify requests. Specify a unique request ID 970 // so that if you must retry your request, the server will know to ignore 971 // the request if it has already been completed. The server will guarantee 972 // that for at least 60 minutes since the first request. 973 // 974 // For example, consider a situation where you make an initial request and t 975 // he request times out. If you make the request again with the same request 976 // ID, the server can check if original operation with the same request ID 977 // was received, and if so, will ignore the second request. This prevents 978 // clients from accidentally creating duplicate commitments. 979 // 980 // The request ID must be a valid UUID with the exception that zero UUID is 981 // not supported (00000000-0000-0000-0000-000000000000). 982 string request_id = 3 [(google.api.field_behavior) = OPTIONAL]; 983 984 // If true, UpdateDraftRequest will create one resource if the target resource 985 // doesn't exist, this time, the field_mask will be ignored. 986 bool allow_missing = 4; 987} 988 989// Message for updating an ApplicationInstance. 990message UpdateApplicationInstancesRequest { 991 message UpdateApplicationInstance { 992 // Optional. Field mask is used to specify the fields to be overwritten in the 993 // Draft resource by the update. 994 // The fields specified in the update_mask are relative to the resource, not 995 // the full request. A field will be overwritten if it is in the mask. If 996 // the user does not provide a mask then all fields will be overwritten. 997 google.protobuf.FieldMask update_mask = 1 [(google.api.field_behavior) = OPTIONAL]; 998 999 // Required. The resource being updated. 1000 Instance instance = 2 [(google.api.field_behavior) = REQUIRED]; 1001 1002 // Required. The id of the instance. 1003 string instance_id = 3 [(google.api.field_behavior) = REQUIRED]; 1004 } 1005 1006 // Required. the name of the application to retrieve. 1007 // Format: 1008 // "projects/{project}/locations/{location}/applications/{application}" 1009 string name = 1 [ 1010 (google.api.field_behavior) = REQUIRED, 1011 (google.api.resource_reference) = { 1012 type: "visionai.googleapis.com/Application" 1013 } 1014 ]; 1015 1016 repeated UpdateApplicationInstance application_instances = 2; 1017 1018 // Optional. An optional request ID to identify requests. Specify a unique request ID 1019 // so that if you must retry your request, the server will know to ignore 1020 // the request if it has already been completed. The server will guarantee 1021 // that for at least 60 minutes since the first request. 1022 // 1023 // For example, consider a situation where you make an initial request and t 1024 // he request times out. If you make the request again with the same request 1025 // ID, the server can check if original operation with the same request ID 1026 // was received, and if so, will ignore the second request. This prevents 1027 // clients from accidentally creating duplicate commitments. 1028 // 1029 // The request ID must be a valid UUID with the exception that zero UUID is 1030 // not supported (00000000-0000-0000-0000-000000000000). 1031 string request_id = 3 [(google.api.field_behavior) = OPTIONAL]; 1032 1033 // If true, Update Request will create one resource if the target resource 1034 // doesn't exist, this time, the field_mask will be ignored. 1035 bool allow_missing = 4; 1036} 1037 1038// Message for deleting an Draft. 1039message DeleteDraftRequest { 1040 // Required. Name of the resource. 1041 string name = 1 [ 1042 (google.api.field_behavior) = REQUIRED, 1043 (google.api.resource_reference) = { 1044 type: "visionai.googleapis.com/Draft" 1045 } 1046 ]; 1047 1048 // Optional. An optional request ID to identify requests. Specify a unique request ID 1049 // so that if you must retry your request, the server will know to ignore 1050 // the request if it has already been completed. The server will guarantee 1051 // that for at least 60 minutes after the first request. 1052 // 1053 // For example, consider a situation where you make an initial request and t 1054 // he request times out. If you make the request again with the same request 1055 // ID, the server can check if original operation with the same request ID 1056 // was received, and if so, will ignore the second request. This prevents 1057 // clients from accidentally creating duplicate commitments. 1058 // 1059 // The request ID must be a valid UUID with the exception that zero UUID is 1060 // not supported (00000000-0000-0000-0000-000000000000). 1061 string request_id = 2 [(google.api.field_behavior) = OPTIONAL]; 1062} 1063 1064// Message for requesting list of Processors. 1065message ListProcessorsRequest { 1066 // Required. Parent value for ListProcessorsRequest. 1067 string parent = 1 [ 1068 (google.api.field_behavior) = REQUIRED, 1069 (google.api.resource_reference) = { 1070 child_type: "visionai.googleapis.com/Processor" 1071 } 1072 ]; 1073 1074 // Requested page size. Server may return fewer items than requested. 1075 // If unspecified, server will pick an appropriate default. 1076 int32 page_size = 2; 1077 1078 // A token identifying a page of results the server should return. 1079 string page_token = 3; 1080 1081 // Filtering results. 1082 string filter = 4; 1083 1084 // Hint for how to order the results. 1085 string order_by = 5; 1086} 1087 1088// Message for response to listing Processors. 1089message ListProcessorsResponse { 1090 // The list of Processor. 1091 repeated Processor processors = 1; 1092 1093 // A token identifying a page of results the server should return. 1094 string next_page_token = 2; 1095 1096 // Locations that could not be reached. 1097 repeated string unreachable = 3; 1098} 1099 1100// Request Message for listing Prebuilt Processors. 1101message ListPrebuiltProcessorsRequest { 1102 // Required. Parent path. 1103 string parent = 1 [ 1104 (google.api.field_behavior) = REQUIRED, 1105 (google.api.resource_reference) = { 1106 child_type: "visionai.googleapis.com/Processor" 1107 } 1108 ]; 1109} 1110 1111// Response Message for listing Prebuilt Processors. 1112message ListPrebuiltProcessorsResponse { 1113 // The list of Processor. 1114 repeated Processor processors = 1; 1115} 1116 1117// Message for getting a Processor. 1118message GetProcessorRequest { 1119 // Required. Name of the resource. 1120 string name = 1 [ 1121 (google.api.field_behavior) = REQUIRED, 1122 (google.api.resource_reference) = { 1123 type: "visionai.googleapis.com/Processor" 1124 } 1125 ]; 1126} 1127 1128// Message for creating a Processor. 1129message CreateProcessorRequest { 1130 // Required. Value for parent. 1131 string parent = 1 [ 1132 (google.api.field_behavior) = REQUIRED, 1133 (google.api.resource_reference) = { 1134 child_type: "visionai.googleapis.com/Processor" 1135 } 1136 ]; 1137 1138 // Required. Id of the requesting object. 1139 string processor_id = 2 [(google.api.field_behavior) = REQUIRED]; 1140 1141 // Required. The resource being created. 1142 Processor processor = 3 [(google.api.field_behavior) = REQUIRED]; 1143 1144 // Optional. An optional request ID to identify requests. Specify a unique request ID 1145 // so that if you must retry your request, the server will know to ignore 1146 // the request if it has already been completed. The server will guarantee 1147 // that for at least 60 minutes since the first request. 1148 // 1149 // For example, consider a situation where you make an initial request and t 1150 // he request times out. If you make the request again with the same request 1151 // ID, the server can check if original operation with the same request ID 1152 // was received, and if so, will ignore the second request. This prevents 1153 // clients from accidentally creating duplicate commitments. 1154 // 1155 // The request ID must be a valid UUID with the exception that zero UUID is 1156 // not supported (00000000-0000-0000-0000-000000000000). 1157 string request_id = 4 [(google.api.field_behavior) = OPTIONAL]; 1158} 1159 1160// Message for updating a Processor. 1161message UpdateProcessorRequest { 1162 // Optional. Field mask is used to specify the fields to be overwritten in the 1163 // Processor resource by the update. 1164 // The fields specified in the update_mask are relative to the resource, not 1165 // the full request. A field will be overwritten if it is in the mask. If the 1166 // user does not provide a mask then all fields will be overwritten. 1167 google.protobuf.FieldMask update_mask = 1 [(google.api.field_behavior) = OPTIONAL]; 1168 1169 // Required. The resource being updated. 1170 Processor processor = 2 [(google.api.field_behavior) = REQUIRED]; 1171 1172 // Optional. An optional request ID to identify requests. Specify a unique request ID 1173 // so that if you must retry your request, the server will know to ignore 1174 // the request if it has already been completed. The server will guarantee 1175 // that for at least 60 minutes since the first request. 1176 // 1177 // For example, consider a situation where you make an initial request and t 1178 // he request times out. If you make the request again with the same request 1179 // ID, the server can check if original operation with the same request ID 1180 // was received, and if so, will ignore the second request. This prevents 1181 // clients from accidentally creating duplicate commitments. 1182 // 1183 // The request ID must be a valid UUID with the exception that zero UUID is 1184 // not supported (00000000-0000-0000-0000-000000000000). 1185 string request_id = 3 [(google.api.field_behavior) = OPTIONAL]; 1186} 1187 1188// Message for deleting a Processor. 1189message DeleteProcessorRequest { 1190 // Required. Name of the resource 1191 string name = 1 [ 1192 (google.api.field_behavior) = REQUIRED, 1193 (google.api.resource_reference) = { 1194 type: "visionai.googleapis.com/Processor" 1195 } 1196 ]; 1197 1198 // Optional. An optional request ID to identify requests. Specify a unique request ID 1199 // so that if you must retry your request, the server will know to ignore 1200 // the request if it has already been completed. The server will guarantee 1201 // that for at least 60 minutes after the first request. 1202 // 1203 // For example, consider a situation where you make an initial request and t 1204 // he request times out. If you make the request again with the same request 1205 // ID, the server can check if original operation with the same request ID 1206 // was received, and if so, will ignore the second request. This prevents 1207 // clients from accidentally creating duplicate commitments. 1208 // 1209 // The request ID must be a valid UUID with the exception that zero UUID is 1210 // not supported (00000000-0000-0000-0000-000000000000). 1211 string request_id = 2 [(google.api.field_behavior) = OPTIONAL]; 1212} 1213 1214// Message describing Application object 1215message Application { 1216 option (google.api.resource) = { 1217 type: "visionai.googleapis.com/Application" 1218 pattern: "projects/{project}/locations/{location}/applications/{application}" 1219 style: DECLARATIVE_FRIENDLY 1220 }; 1221 1222 // Message storing the runtime information of the application. 1223 message ApplicationRuntimeInfo { 1224 // Message about output resources from application. 1225 message GlobalOutputResource { 1226 // The full resource name of the outputted resources. 1227 string output_resource = 1; 1228 1229 // The name of graph node who produces the output resource name. 1230 // For example: 1231 // output_resource: 1232 // /projects/123/locations/us-central1/corpora/my-corpus/dataSchemas/my-schema 1233 // producer_node: occupancy-count 1234 string producer_node = 2; 1235 1236 // The key of the output resource, it has to be unique within the same 1237 // producer node. One producer node can output several output resources, 1238 // the key can be used to match corresponding output resources. 1239 string key = 3; 1240 } 1241 1242 // Monitoring-related configuration for an application. 1243 message MonitoringConfig { 1244 // Whether this application has monitoring enabled. 1245 bool enabled = 1; 1246 } 1247 1248 // Timestamp when the engine be deployed 1249 google.protobuf.Timestamp deploy_time = 1; 1250 1251 // Globally created resources like warehouse dataschemas. 1252 repeated GlobalOutputResource global_output_resources = 3; 1253 1254 // Monitoring-related configuration for this application. 1255 MonitoringConfig monitoring_config = 4; 1256 } 1257 1258 // State of the Application 1259 enum State { 1260 // The default value. This value is used if the state is omitted. 1261 STATE_UNSPECIFIED = 0; 1262 1263 // State CREATED. 1264 CREATED = 1; 1265 1266 // State DEPLOYING. 1267 DEPLOYING = 2; 1268 1269 // State DEPLOYED. 1270 DEPLOYED = 3; 1271 1272 // State UNDEPLOYING. 1273 UNDEPLOYING = 4; 1274 1275 // State DELETED. 1276 DELETED = 5; 1277 1278 // State ERROR. 1279 ERROR = 6; 1280 1281 // State CREATING. 1282 CREATING = 7; 1283 1284 // State Updating. 1285 UPDATING = 8; 1286 1287 // State Deleting. 1288 DELETING = 9; 1289 1290 // State Fixing. 1291 FIXING = 10; 1292 } 1293 1294 // name of resource 1295 string name = 1; 1296 1297 // Output only. [Output only] Create timestamp 1298 google.protobuf.Timestamp create_time = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; 1299 1300 // Output only. [Output only] Update timestamp 1301 google.protobuf.Timestamp update_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; 1302 1303 // Labels as key value pairs 1304 map<string, string> labels = 4; 1305 1306 // Required. A user friendly display name for the solution. 1307 string display_name = 5 [(google.api.field_behavior) = REQUIRED]; 1308 1309 // A description for this application. 1310 string description = 6; 1311 1312 // Application graph configuration. 1313 ApplicationConfigs application_configs = 7; 1314 1315 // Output only. Application graph runtime info. Only exists when application state equals 1316 // to DEPLOYED. 1317 ApplicationRuntimeInfo runtime_info = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; 1318 1319 // Output only. State of the application. 1320 State state = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; 1321} 1322 1323// Message storing the graph of the application. 1324message ApplicationConfigs { 1325 // message storing the config for event delivery 1326 message EventDeliveryConfig { 1327 // The delivery channel for the event notification, only pub/sub topic is 1328 // supported now. 1329 // Example channel: 1330 // [//pubsub.googleapis.com/projects/visionai-testing-stable/topics/test-topic] 1331 string channel = 1; 1332 1333 // The expected delivery interval for the same event. The same event won't 1334 // be notified multiple times during this internal event that it is 1335 // happening multiple times during the period of time.The same event is 1336 // identified by <event_id, app_platform_metadata>. 1337 google.protobuf.Duration minimal_delivery_interval = 2; 1338 } 1339 1340 // A list of nodes in the application graph. 1341 repeated Node nodes = 1; 1342 1343 // Event-related configuration for this application. 1344 EventDeliveryConfig event_delivery_config = 3; 1345} 1346 1347// Message describing node object. 1348message Node { 1349 // Message describing one edge pointing into a node. 1350 message InputEdge { 1351 // The name of the parent node. 1352 string parent_node = 1; 1353 1354 // The connected output artifact of the parent node. 1355 // It can be omitted if target processor only has 1 output artifact. 1356 string parent_output_channel = 2; 1357 1358 // The connected input channel of the current node's processor. 1359 // It can be omitted if target processor only has 1 input channel. 1360 string connected_input_channel = 3; 1361 } 1362 1363 oneof stream_output_config { 1364 // By default, the output of the node will only be available to downstream 1365 // nodes. To consume the direct output from the application node, the output 1366 // must be sent to Vision AI Streams at first. 1367 // 1368 // By setting output_all_output_channels_to_stream to true, App Platform 1369 // will automatically send all the outputs of the current node to Vision AI 1370 // Stream resources (one stream per output channel). The output stream 1371 // resource will be created by App Platform automatically during deployment 1372 // and deleted after application un-deployment. 1373 // Note that this config applies to all the Application Instances. 1374 // 1375 // The output stream can be override at instance level by 1376 // configuring the `output_resources` section of Instance resource. 1377 // `producer_node` should be current node, `output_resource_binding` should 1378 // be the output channel name (or leave it blank if there is only 1 output 1379 // channel of the processor) and `output_resource` should be the target 1380 // output stream. 1381 bool output_all_output_channels_to_stream = 6; 1382 } 1383 1384 // Required. A unique name for the node. 1385 string name = 1 [(google.api.field_behavior) = REQUIRED]; 1386 1387 // A user friendly display name for the node. 1388 string display_name = 2; 1389 1390 // Node config. 1391 ProcessorConfig node_config = 3; 1392 1393 // Processor name refer to the chosen processor resource. 1394 string processor = 4; 1395 1396 // Parent node. Input node should not have parent node. For V1 Alpha1/Beta 1397 // only media warehouse node can have multiple parents, other types of nodes 1398 // will only have one parent. 1399 repeated InputEdge parents = 5; 1400} 1401 1402// Message describing Draft object 1403message Draft { 1404 option (google.api.resource) = { 1405 type: "visionai.googleapis.com/Draft" 1406 pattern: "projects/{project}/locations/{location}/applications/{application}/drafts/{draft}" 1407 style: DECLARATIVE_FRIENDLY 1408 }; 1409 1410 // name of resource 1411 string name = 1; 1412 1413 // Output only. [Output only] Create timestamp 1414 google.protobuf.Timestamp create_time = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; 1415 1416 // Output only. [Output only] Create timestamp 1417 google.protobuf.Timestamp update_time = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; 1418 1419 // Labels as key value pairs 1420 map<string, string> labels = 3; 1421 1422 // Required. A user friendly display name for the solution. 1423 string display_name = 4 [(google.api.field_behavior) = REQUIRED]; 1424 1425 // A description for this application. 1426 string description = 5; 1427 1428 // The draft application configs which haven't been updated to an application. 1429 ApplicationConfigs draft_application_configs = 6; 1430} 1431 1432// Message describing Instance object 1433message Instance { 1434 option (google.api.resource) = { 1435 type: "visionai.googleapis.com/Instance" 1436 pattern: "projects/{project}/locations/{location}/applications/{application}/instances/{instance}" 1437 style: DECLARATIVE_FRIENDLY 1438 }; 1439 1440 // Message of input resource used in one application instance. 1441 message InputResource { 1442 // Required. Specify the input to the application instance. 1443 oneof input_resource_information { 1444 // The direct input resource name. 1445 string input_resource = 1; 1446 1447 // If the input resource is VisionAI Stream, the associated annotations 1448 // can be specified using annotated_stream instead. 1449 StreamWithAnnotation annotated_stream = 4 [deprecated = true]; 1450 } 1451 1452 // The name of graph node who receives the input resource. 1453 // For example: 1454 // input_resource: 1455 // visionai.googleapis.com/v1/projects/123/locations/us-central1/clusters/456/streams/input-stream-a 1456 // consumer_node: stream-input 1457 string consumer_node = 2; 1458 1459 // The specific input resource binding which will consume the current Input 1460 // Resource, can be ignored is there is only 1 input binding. 1461 string input_resource_binding = 3; 1462 1463 // Contains resource annotations. 1464 ResourceAnnotations annotations = 5; 1465 } 1466 1467 // Message of output resource used in one application instance. 1468 message OutputResource { 1469 // The output resource name for the current application instance. 1470 string output_resource = 1; 1471 1472 // The name of graph node who produces the output resource name. 1473 // For example: 1474 // output_resource: 1475 // /projects/123/locations/us-central1/clusters/456/streams/output-application-789-stream-a-occupancy-counting 1476 // producer_node: occupancy-counting 1477 string producer_node = 2; 1478 1479 // The specific output resource binding which produces the current 1480 // OutputResource. 1481 string output_resource_binding = 4; 1482 1483 // Output only. Whether the output resource is temporary which means the resource is 1484 // generated during the deployment of the application. 1485 // Temporary resource will be deleted during the undeployment of the 1486 // application. 1487 bool is_temporary = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; 1488 1489 // Output only. Whether the output resource is created automatically by the Vision AI App 1490 // Platform. 1491 bool autogen = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; 1492 } 1493 1494 // State of the Instance 1495 enum State { 1496 // The default value. This value is used if the state is omitted. 1497 STATE_UNSPECIFIED = 0; 1498 1499 // State CREATING. 1500 CREATING = 1; 1501 1502 // State CREATED. 1503 CREATED = 2; 1504 1505 // State DEPLOYING. 1506 DEPLOYING = 3; 1507 1508 // State DEPLOYED. 1509 DEPLOYED = 4; 1510 1511 // State UNDEPLOYING. 1512 UNDEPLOYING = 5; 1513 1514 // State DELETED. 1515 DELETED = 6; 1516 1517 // State ERROR. 1518 ERROR = 7; 1519 1520 // State Updating 1521 UPDATING = 8; 1522 1523 // State Deleting. 1524 DELETING = 9; 1525 1526 // State Fixing. 1527 FIXING = 10; 1528 } 1529 1530 // Output only. name of resource 1531 string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; 1532 1533 // Output only. [Output only] Create timestamp 1534 google.protobuf.Timestamp create_time = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; 1535 1536 // Output only. [Output only] Update timestamp 1537 google.protobuf.Timestamp update_time = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; 1538 1539 // Labels as key value pairs 1540 map<string, string> labels = 3; 1541 1542 // Required. A user friendly display name for the solution. 1543 string display_name = 4 [(google.api.field_behavior) = REQUIRED]; 1544 1545 // A description for this application. 1546 string description = 5; 1547 1548 // The input resources for the current application instance. 1549 // For example: 1550 // input_resources: 1551 // visionai.googleapis.com/v1/projects/123/locations/us-central1/clusters/456/streams/stream-a 1552 repeated InputResource input_resources = 6; 1553 1554 // All the output resources associated to one application instance. 1555 repeated OutputResource output_resources = 7; 1556 1557 // State of the instance. 1558 State state = 9; 1559} 1560 1561// Message for creating a Instance. 1562message ApplicationInstance { 1563 // Required. Id of the requesting object. 1564 string instance_id = 1 [(google.api.field_behavior) = REQUIRED]; 1565 1566 // Required. The resource being created. 1567 Instance instance = 2 [(google.api.field_behavior) = REQUIRED]; 1568} 1569 1570// Message describing Processor object. 1571// Next ID: 18 1572message Processor { 1573 option (google.api.resource) = { 1574 type: "visionai.googleapis.com/Processor" 1575 pattern: "projects/{project}/locations/{location}/processors/{processor}" 1576 style: DECLARATIVE_FRIENDLY 1577 }; 1578 1579 // Type 1580 enum ProcessorType { 1581 // Processor Type UNSPECIFIED. 1582 PROCESSOR_TYPE_UNSPECIFIED = 0; 1583 1584 // Processor Type PRETRAINED. 1585 // Pretrained processor is developed by Vision AI App Platform with 1586 // state-of-the-art vision data processing functionality, like occupancy 1587 // counting or person blur. Pretrained processor is usually publicly 1588 // available. 1589 PRETRAINED = 1; 1590 1591 // Processor Type CUSTOM. 1592 // Custom processors are specialized processors which are either uploaded by 1593 // customers or imported from other GCP platform (for example Vertex AI). 1594 // Custom processor is only visible to the creator. 1595 CUSTOM = 2; 1596 1597 // Processor Type CONNECTOR. 1598 // Connector processors are special processors which perform I/O for the 1599 // application, they do not processing the data but either deliver the data 1600 // to other processors or receive data from other processors. 1601 CONNECTOR = 3; 1602 } 1603 1604 enum ProcessorState { 1605 // Unspecified Processor state. 1606 PROCESSOR_STATE_UNSPECIFIED = 0; 1607 1608 // Processor is being created (not ready for use). 1609 CREATING = 1; 1610 1611 // Processor is and ready for use. 1612 ACTIVE = 2; 1613 1614 // Processor is being deleted (not ready for use). 1615 DELETING = 3; 1616 1617 // Processor deleted or creation failed . 1618 FAILED = 4; 1619 } 1620 1621 // name of resource. 1622 string name = 1; 1623 1624 // Output only. [Output only] Create timestamp. 1625 google.protobuf.Timestamp create_time = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; 1626 1627 // Output only. [Output only] Update timestamp. 1628 google.protobuf.Timestamp update_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; 1629 1630 // Labels as key value pairs. 1631 map<string, string> labels = 4; 1632 1633 // Required. A user friendly display name for the processor. 1634 string display_name = 5 [(google.api.field_behavior) = REQUIRED]; 1635 1636 // Illustrative sentences for describing the functionality of the processor. 1637 string description = 10; 1638 1639 // Output only. Processor Type. 1640 ProcessorType processor_type = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; 1641 1642 // Model Type. 1643 ModelType model_type = 13; 1644 1645 // Source info for customer created processor. 1646 CustomProcessorSourceInfo custom_processor_source_info = 7; 1647 1648 // Output only. State of the Processor. 1649 ProcessorState state = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; 1650 1651 // Output only. [Output only] The input / output specifications of a processor, each type 1652 // of processor has fixed input / output specs which cannot be altered by 1653 // customer. 1654 ProcessorIOSpec processor_io_spec = 11 [(google.api.field_behavior) = OUTPUT_ONLY]; 1655 1656 // Output only. The corresponding configuration can be used in the Application to customize 1657 // the behavior of the processor. 1658 string configuration_typeurl = 14 [(google.api.field_behavior) = OUTPUT_ONLY]; 1659 1660 repeated StreamAnnotationType supported_annotation_types = 15 [(google.api.field_behavior) = OUTPUT_ONLY]; 1661 1662 // Indicates if the processor supports post processing. 1663 bool supports_post_processing = 17; 1664} 1665 1666// Message describing the input / output specifications of a processor. 1667message ProcessorIOSpec { 1668 // Message for input channel specification. 1669 message GraphInputChannelSpec { 1670 // The name of the current input channel. 1671 string name = 1; 1672 1673 // The data types of the current input channel. 1674 // When this field has more than 1 value, it means this input channel can be 1675 // connected to either of these different data types. 1676 DataType data_type = 2; 1677 1678 // If specified, only those detailed data types can be connected to the 1679 // processor. For example, jpeg stream for MEDIA, or PredictionResult proto 1680 // for PROTO type. If unspecified, then any proto is accepted. 1681 repeated string accepted_data_type_uris = 5; 1682 1683 // Whether the current input channel is required by the processor. 1684 // For example, for a processor with required video input and optional audio 1685 // input, if video input is missing, the application will be rejected while 1686 // the audio input can be missing as long as the video input exists. 1687 bool required = 3; 1688 1689 // How many input edges can be connected to this input channel. 0 means 1690 // unlimited. 1691 int64 max_connection_allowed = 4; 1692 } 1693 1694 // Message for output channel specification. 1695 message GraphOutputChannelSpec { 1696 // The name of the current output channel. 1697 string name = 1; 1698 1699 // The data type of the current output channel. 1700 DataType data_type = 2; 1701 1702 string data_type_uri = 3; 1703 } 1704 1705 // Message for instance resource channel specification. 1706 // External resources are virtual nodes which are not expressed in the 1707 // application graph. Each processor expresses its out-graph spec, so customer 1708 // is able to override the external source or destinations to the 1709 message InstanceResourceInputBindingSpec { 1710 oneof resource_type { 1711 // The configuration proto that includes the Googleapis resources. I.e. 1712 // type.googleapis.com/google.cloud.vision.v1.StreamWithAnnotation 1713 string config_type_uri = 2; 1714 1715 // The direct type url of Googleapis resource. i.e. 1716 // type.googleapis.com/google.cloud.vision.v1.Asset 1717 string resource_type_uri = 3; 1718 } 1719 1720 // Name of the input binding, unique within the processor. 1721 string name = 1; 1722 } 1723 1724 message InstanceResourceOutputBindingSpec { 1725 // Name of the output binding, unique within the processor. 1726 string name = 1; 1727 1728 // The resource type uri of the acceptable output resource. 1729 string resource_type_uri = 2; 1730 1731 // Whether the output resource needs to be explicitly set in the instance. 1732 // If it is false, the processor will automatically generate it if required. 1733 bool explicit = 3; 1734 } 1735 1736 // High level data types supported by the processor. 1737 enum DataType { 1738 // The default value of DataType. 1739 DATA_TYPE_UNSPECIFIED = 0; 1740 1741 // Video data type like H264. 1742 VIDEO = 1; 1743 1744 // Protobuf data type, usually used for general data blob. 1745 PROTO = 2; 1746 } 1747 1748 // For processors with input_channel_specs, the processor must be explicitly 1749 // connected to another processor. 1750 repeated GraphInputChannelSpec graph_input_channel_specs = 3; 1751 1752 // The output artifact specifications for the current processor. 1753 repeated GraphOutputChannelSpec graph_output_channel_specs = 4; 1754 1755 // The input resource that needs to be fed from the application instance. 1756 repeated InstanceResourceInputBindingSpec instance_resource_input_binding_specs = 5; 1757 1758 // The output resource that the processor will generate per instance. 1759 // Other than the explicitly listed output bindings here, all the processors' 1760 // GraphOutputChannels can be binded to stream resource. The bind name then is 1761 // the same as the GraphOutputChannel's name. 1762 repeated InstanceResourceOutputBindingSpec instance_resource_output_binding_specs = 6; 1763} 1764 1765// Describes the source info for a custom processor. 1766message CustomProcessorSourceInfo { 1767 // The schema is defined as an OpenAPI 3.0.2 [Schema 1768 // Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). 1769 message ModelSchema { 1770 // Cloud Storage location to a YAML file that defines the format of a single 1771 // instance used in prediction and explanation requests. 1772 GcsSource instances_schema = 1; 1773 1774 // Cloud Storage location to a YAML file that defines the prediction and 1775 // explanation parameters. 1776 GcsSource parameters_schema = 2; 1777 1778 // Cloud Storage location to a YAML file that defines the format of a single 1779 // prediction or explanation. 1780 GcsSource predictions_schema = 3; 1781 } 1782 1783 // Source type of the imported custom processor. 1784 enum SourceType { 1785 // Source type unspecified. 1786 SOURCE_TYPE_UNSPECIFIED = 0; 1787 1788 // Custom processors coming from Vertex AutoML product. 1789 VERTEX_AUTOML = 1; 1790 1791 // Custom processors coming from general custom models from Vertex. 1792 VERTEX_CUSTOM = 2; 1793 } 1794 1795 // The path where App Platform loads the artifacts for the custom processor. 1796 oneof artifact_path { 1797 // The resource name original model hosted in the vertex AI platform. 1798 string vertex_model = 2; 1799 } 1800 1801 // The original product which holds the custom processor's functionality. 1802 SourceType source_type = 1; 1803 1804 // Output only. Additional info related to the imported custom processor. 1805 // Data is filled in by app platform during the processor creation. 1806 map<string, string> additional_info = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; 1807 1808 // Model schema files which specifies the signature of the model. 1809 // For VERTEX_CUSTOM models, instances schema is required. 1810 // If instances schema is not specified during the processor creation, 1811 // VisionAI Platform will try to get it from Vertex, if it doesn't exist, the 1812 // creation will fail. 1813 ModelSchema model_schema = 5; 1814} 1815 1816// Next ID: 24 1817message ProcessorConfig { 1818 oneof processor_config { 1819 // Configs of stream input processor. 1820 VideoStreamInputConfig video_stream_input_config = 9; 1821 1822 // Config of AI-enabled input devices. 1823 AIEnabledDevicesInputConfig ai_enabled_devices_input_config = 20; 1824 1825 // Configs of media warehouse processor. 1826 MediaWarehouseConfig media_warehouse_config = 10; 1827 1828 // Configs of person blur processor. 1829 PersonBlurConfig person_blur_config = 11; 1830 1831 // Configs of occupancy count processor. 1832 OccupancyCountConfig occupancy_count_config = 12; 1833 1834 // Configs of Person Vehicle Detection processor. 1835 PersonVehicleDetectionConfig person_vehicle_detection_config = 15; 1836 1837 // Configs of Vertex AutoML vision processor. 1838 VertexAutoMLVisionConfig vertex_automl_vision_config = 13; 1839 1840 // Configs of Vertex AutoML video processor. 1841 VertexAutoMLVideoConfig vertex_automl_video_config = 14; 1842 1843 // Configs of Vertex Custom processor. 1844 VertexCustomConfig vertex_custom_config = 17; 1845 1846 // Configs of General Object Detection processor. 1847 GeneralObjectDetectionConfig general_object_detection_config = 18; 1848 1849 // Configs of BigQuery processor. 1850 BigQueryConfig big_query_config = 19; 1851 1852 // Configs of personal_protective_equipment_detection_config 1853 PersonalProtectiveEquipmentDetectionConfig personal_protective_equipment_detection_config = 22; 1854 } 1855} 1856 1857// Message describing Vision AI stream with application specific annotations. 1858// All the StreamAnnotation object inside this message MUST have unique id. 1859message StreamWithAnnotation { 1860 // Message describing annotations specific to application node. 1861 message NodeAnnotation { 1862 // The node name of the application graph. 1863 string node = 1; 1864 1865 // The node specific stream annotations. 1866 repeated StreamAnnotation annotations = 2; 1867 } 1868 1869 // Vision AI Stream resource name. 1870 string stream = 1 [(google.api.resource_reference) = { 1871 type: "visionai.googleapis.com/Stream" 1872 }]; 1873 1874 // Annotations that will be applied to the whole application. 1875 repeated StreamAnnotation application_annotations = 2; 1876 1877 // Annotations that will be applied to the specific node of the application. 1878 // If the same type of the annotations is applied to both application and 1879 // node, the node annotation will be added in addition to the global 1880 // application one. 1881 // For example, if there is one active zone annotation for the whole 1882 // application and one active zone annotation for the Occupancy Analytic 1883 // processor, then the Occupancy Analytic processor will have two active zones 1884 // defined. 1885 repeated NodeAnnotation node_annotations = 3; 1886} 1887 1888// Message describing annotations specific to application node. 1889// This message is a duplication of StreamWithAnnotation.NodeAnnotation. 1890message ApplicationNodeAnnotation { 1891 // The node name of the application graph. 1892 string node = 1; 1893 1894 // The node specific stream annotations. 1895 repeated StreamAnnotation annotations = 2; 1896} 1897 1898// Message describing general annotation for resources. 1899message ResourceAnnotations { 1900 // Annotations that will be applied to the whole application. 1901 repeated StreamAnnotation application_annotations = 1; 1902 1903 // Annotations that will be applied to the specific node of the application. 1904 // If the same type of the annotations is applied to both application and 1905 // node, the node annotation will be added in addition to the global 1906 // application one. 1907 // For example, if there is one active zone annotation for the whole 1908 // application and one active zone annotation for the Occupancy Analytic 1909 // processor, then the Occupancy Analytic processor will have two active zones 1910 // defined. 1911 repeated ApplicationNodeAnnotation node_annotations = 2; 1912} 1913 1914// Message describing Video Stream Input Config. 1915// This message should only be used as a placeholder for builtin:stream-input 1916// processor, actual stream binding should be specified using corresponding 1917// API. 1918message VideoStreamInputConfig { 1919 repeated string streams = 1 [deprecated = true]; 1920 1921 repeated StreamWithAnnotation streams_with_annotation = 2 [deprecated = true]; 1922} 1923 1924// Message describing AI-enabled Devices Input Config. 1925message AIEnabledDevicesInputConfig { 1926 1927} 1928 1929// Message describing MediaWarehouseConfig. 1930message MediaWarehouseConfig { 1931 // Resource name of the Media Warehouse corpus. 1932 // Format: 1933 // projects/${project_id}/locations/${location_id}/corpora/${corpus_id} 1934 string corpus = 1; 1935 1936 // Deprecated. 1937 string region = 2 [deprecated = true]; 1938 1939 // The duration for which all media assets, associated metadata, and search 1940 // documents can exist. 1941 google.protobuf.Duration ttl = 3; 1942} 1943 1944// Message describing FaceBlurConfig. 1945message PersonBlurConfig { 1946 // Type of Person Blur 1947 enum PersonBlurType { 1948 // PersonBlur Type UNSPECIFIED. 1949 PERSON_BLUR_TYPE_UNSPECIFIED = 0; 1950 1951 // FaceBlur Type full occlusion. 1952 FULL_OCCULUSION = 1; 1953 1954 // FaceBlur Type blur filter. 1955 BLUR_FILTER = 2; 1956 } 1957 1958 // Person blur type. 1959 PersonBlurType person_blur_type = 1; 1960 1961 // Whether only blur faces other than the whole object in the processor. 1962 bool faces_only = 2; 1963} 1964 1965// Message describing OccupancyCountConfig. 1966message OccupancyCountConfig { 1967 // Whether to count the appearances of people, output counts have 'people' as 1968 // the key. 1969 bool enable_people_counting = 1; 1970 1971 // Whether to count the appearances of vehicles, output counts will have 1972 // 'vehicle' as the key. 1973 bool enable_vehicle_counting = 2; 1974 1975 // Whether to track each invidual object's loitering time inside the scene or 1976 // specific zone. 1977 bool enable_dwelling_time_tracking = 3; 1978} 1979 1980// Message describing PersonVehicleDetectionConfig. 1981message PersonVehicleDetectionConfig { 1982 // At least one of enable_people_counting and enable_vehicle_counting fields 1983 // must be set to true. 1984 // Whether to count the appearances of people, output counts have 'people' as 1985 // the key. 1986 bool enable_people_counting = 1; 1987 1988 // Whether to count the appearances of vehicles, output counts will have 1989 // 'vehicle' as the key. 1990 bool enable_vehicle_counting = 2; 1991} 1992 1993// Message describing PersonalProtectiveEquipmentDetectionConfig. 1994message PersonalProtectiveEquipmentDetectionConfig { 1995 // Whether to enable face coverage detection. 1996 bool enable_face_coverage_detection = 1; 1997 1998 // Whether to enable head coverage detection. 1999 bool enable_head_coverage_detection = 2; 2000 2001 // Whether to enable hands coverage detection. 2002 bool enable_hands_coverage_detection = 3; 2003} 2004 2005// Message of configurations for General Object Detection processor. 2006message GeneralObjectDetectionConfig { 2007 2008} 2009 2010// Message of configurations for BigQuery processor. 2011message BigQueryConfig { 2012 // BigQuery table resource for Vision AI Platform to ingest annotations to. 2013 string table = 1; 2014 2015 // Data Schema 2016 // By default, Vision AI Application will try to write annotations to the 2017 // target BigQuery table using the following schema: 2018 // 2019 // ingestion_time: TIMESTAMP, the ingestion time of the original data. 2020 // 2021 // application: STRING, name of the application which produces the annotation. 2022 // 2023 // instance: STRING, Id of the instance which produces the annotation. 2024 // 2025 // node: STRING, name of the application graph node which produces the 2026 // annotation. 2027 // 2028 // annotation: STRING or JSON, the actual annotation protobuf will be 2029 // converted to json string with bytes field as 64 encoded string. It can be 2030 // written to both String or Json type column. 2031 // 2032 // To forward annotation data to an existing BigQuery table, customer needs to 2033 // make sure the compatibility of the schema. 2034 // The map maps application node name to its corresponding cloud function 2035 // endpoint to transform the annotations directly to the 2036 // google.cloud.bigquery.storage.v1.AppendRowsRequest (only avro_rows or 2037 // proto_rows should be set). If configured, annotations produced by 2038 // corresponding application node will sent to the Cloud Function at first 2039 // before be forwarded to BigQuery. 2040 // 2041 // If the default table schema doesn't fit, customer is able to transform the 2042 // annotation output from Vision AI Application to arbitrary BigQuery table 2043 // schema with CloudFunction. 2044 // * The cloud function will receive AppPlatformCloudFunctionRequest where 2045 // the annotations field will be the json format of Vision AI annotation. 2046 // * The cloud function should return AppPlatformCloudFunctionResponse with 2047 // AppendRowsRequest stored in the annotations field. 2048 // * To drop the annotation, simply clear the annotations field in the 2049 // returned AppPlatformCloudFunctionResponse. 2050 map<string, string> cloud_function_mapping = 2; 2051 2052 // If true, App Platform will create the BigQuery DataSet and the 2053 // BigQuery Table with default schema if the specified table doesn't exist. 2054 // This doesn't work if any cloud function customized schema is specified 2055 // since the system doesn't know your desired schema. 2056 // JSON column will be used in the default table created by App Platform. 2057 bool create_default_table_if_not_exists = 3; 2058} 2059 2060// Message of configurations of Vertex AutoML Vision Processors. 2061message VertexAutoMLVisionConfig { 2062 // Only entities with higher score than the threshold will be returned. 2063 // Value 0.0 means to return all the detected entities. 2064 float confidence_threshold = 1; 2065 2066 // At most this many predictions will be returned per output frame. 2067 // Value 0 means to return all the detected entities. 2068 int32 max_predictions = 2; 2069} 2070 2071// Message describing VertexAutoMLVideoConfig. 2072message VertexAutoMLVideoConfig { 2073 // Only entities with higher score than the threshold will be returned. 2074 // Value 0.0 means returns all the detected entities. 2075 float confidence_threshold = 1; 2076 2077 // Labels specified in this field won't be returned. 2078 repeated string blocked_labels = 2; 2079 2080 // At most this many predictions will be returned per output frame. 2081 // Value 0 means to return all the detected entities. 2082 int32 max_predictions = 3; 2083 2084 // Only Bounding Box whose size is larger than this limit will be returned. 2085 // Object Tracking only. 2086 // Value 0.0 means to return all the detected entities. 2087 float bounding_box_size_limit = 4; 2088} 2089 2090// Message describing VertexCustomConfig. 2091message VertexCustomConfig { 2092 // The max prediction frame per second. This attribute sets how fast the 2093 // operator sends prediction requests to Vertex AI endpoint. Default value is 2094 // 0, which means there is no max prediction fps limit. The operator sends 2095 // prediction requests at input fps. 2096 int32 max_prediction_fps = 1; 2097 2098 // A description of resources that are dedicated to the DeployedModel, and 2099 // that need a higher degree of manual configuration. 2100 DedicatedResources dedicated_resources = 2; 2101 2102 // If not empty, the prediction result will be sent to the specified cloud 2103 // function for post processing. 2104 // * The cloud function will receive AppPlatformCloudFunctionRequest where 2105 // the annotations field will be the json format of proto PredictResponse. 2106 // * The cloud function should return AppPlatformCloudFunctionResponse with 2107 // PredictResponse stored in the annotations field. 2108 // * To drop the prediction output, simply clear the payload field in the 2109 // returned AppPlatformCloudFunctionResponse. 2110 string post_processing_cloud_function = 3; 2111 2112 // If true, the prediction request received by custom model will also contain 2113 // metadata with the following schema: 2114 // 'appPlatformMetadata': { 2115 // 'ingestionTime': DOUBLE; (UNIX timestamp) 2116 // 'application': STRING; 2117 // 'instanceId': STRING; 2118 // 'node': STRING; 2119 // 'processor': STRING; 2120 // } 2121 bool attach_application_metadata = 4; 2122} 2123 2124// Specification of a single machine. 2125message MachineSpec { 2126 // Immutable. The type of the machine. 2127 // 2128 // See the [list of machine types supported for 2129 // prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) 2130 // 2131 // See the [list of machine types supported for custom 2132 // training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). 2133 // 2134 // For [DeployedModel][] this field is optional, and the default 2135 // value is `n1-standard-2`. For [BatchPredictionJob][] or as part of 2136 // [WorkerPoolSpec][] this field is required. 2137 string machine_type = 1 [(google.api.field_behavior) = IMMUTABLE]; 2138 2139 // Immutable. The type of accelerator(s) that may be attached to the machine as per 2140 // [accelerator_count][google.cloud.visionai.v1alpha1.MachineSpec.accelerator_count]. 2141 AcceleratorType accelerator_type = 2 [(google.api.field_behavior) = IMMUTABLE]; 2142 2143 // The number of accelerators to attach to the machine. 2144 int32 accelerator_count = 3; 2145} 2146 2147// The metric specification that defines the target resource utilization 2148// (CPU utilization, accelerator's duty cycle, and so on) for calculating the 2149// desired replica count. 2150message AutoscalingMetricSpec { 2151 // Required. The resource metric name. 2152 // Supported metrics: 2153 // 2154 // * For Online Prediction: 2155 // * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` 2156 // * `aiplatform.googleapis.com/prediction/online/cpu/utilization` 2157 string metric_name = 1 [(google.api.field_behavior) = REQUIRED]; 2158 2159 // The target resource utilization in percentage (1% - 100%) for the given 2160 // metric; once the real usage deviates from the target by a certain 2161 // percentage, the machine replicas change. The default value is 60 2162 // (representing 60%) if not provided. 2163 int32 target = 2; 2164} 2165 2166// A description of resources that are dedicated to a DeployedModel, and 2167// that need a higher degree of manual configuration. 2168message DedicatedResources { 2169 // Required. Immutable. The specification of a single machine used by the prediction. 2170 MachineSpec machine_spec = 1 [ 2171 (google.api.field_behavior) = REQUIRED, 2172 (google.api.field_behavior) = IMMUTABLE 2173 ]; 2174 2175 // Required. Immutable. The minimum number of machine replicas this DeployedModel will be always 2176 // deployed on. This value must be greater than or equal to 1. 2177 // 2178 // If traffic against the DeployedModel increases, it may dynamically be 2179 // deployed onto more replicas, and as traffic decreases, some of these extra 2180 // replicas may be freed. 2181 int32 min_replica_count = 2 [ 2182 (google.api.field_behavior) = REQUIRED, 2183 (google.api.field_behavior) = IMMUTABLE 2184 ]; 2185 2186 // Immutable. The maximum number of replicas this DeployedModel may be deployed on when 2187 // the traffic against it increases. If the requested value is too large, 2188 // the deployment will error, but if deployment succeeds then the ability 2189 // to scale the model to that many replicas is guaranteed (barring service 2190 // outages). If traffic against the DeployedModel increases beyond what its 2191 // replicas at maximum may handle, a portion of the traffic will be dropped. 2192 // If this value is not provided, will use [min_replica_count][google.cloud.visionai.v1alpha1.DedicatedResources.min_replica_count] as the 2193 // default value. 2194 // 2195 // The value of this field impacts the charge against Vertex CPU and GPU 2196 // quotas. Specifically, you will be charged for max_replica_count * 2197 // number of cores in the selected machine type) and (max_replica_count * 2198 // number of GPUs per replica in the selected machine type). 2199 int32 max_replica_count = 3 [(google.api.field_behavior) = IMMUTABLE]; 2200 2201 // Immutable. The metric specifications that overrides a resource 2202 // utilization metric (CPU utilization, accelerator's duty cycle, and so on) 2203 // target value (default to 60 if not set). At most one entry is allowed per 2204 // metric. 2205 // 2206 // If [machine_spec.accelerator_count][google.cloud.visionai.v1alpha1.MachineSpec.accelerator_count] is 2207 // above 0, the autoscaling will be based on both CPU utilization and 2208 // accelerator's duty cycle metrics and scale up when either metrics exceeds 2209 // its target value while scale down if both metrics are under their target 2210 // value. The default target value is 60 for both metrics. 2211 // 2212 // If [machine_spec.accelerator_count][google.cloud.visionai.v1alpha1.MachineSpec.accelerator_count] is 2213 // 0, the autoscaling will be based on CPU utilization metric only with 2214 // default target value 60 if not explicitly set. 2215 // 2216 // For example, in the case of Online Prediction, if you want to override 2217 // target CPU utilization to 80, you should set 2218 // [autoscaling_metric_specs.metric_name][google.cloud.visionai.v1alpha1.AutoscalingMetricSpec.metric_name] 2219 // to `aiplatform.googleapis.com/prediction/online/cpu/utilization` and 2220 // [autoscaling_metric_specs.target][google.cloud.visionai.v1alpha1.AutoscalingMetricSpec.target] to `80`. 2221 repeated AutoscalingMetricSpec autoscaling_metric_specs = 4 [(google.api.field_behavior) = IMMUTABLE]; 2222} 2223