xref: /aosp_15_r20/external/googleapis/google/cloud/visionai/v1/platform.proto (revision d5c09012810ac0c9f33fe448fb6da8260d444cc9)
1// Copyright 2023 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15syntax = "proto3";
16
17package google.cloud.visionai.v1;
18
19import "google/api/annotations.proto";
20import "google/api/client.proto";
21import "google/api/field_behavior.proto";
22import "google/api/resource.proto";
23import "google/cloud/visionai/v1/annotations.proto";
24import "google/cloud/visionai/v1/common.proto";
25import "google/longrunning/operations.proto";
26import "google/protobuf/duration.proto";
27import "google/protobuf/empty.proto";
28import "google/protobuf/field_mask.proto";
29import "google/protobuf/struct.proto";
30import "google/protobuf/timestamp.proto";
31
32option csharp_namespace = "Google.Cloud.VisionAI.V1";
33option go_package = "cloud.google.com/go/visionai/apiv1/visionaipb;visionaipb";
34option java_multiple_files = true;
35option java_outer_classname = "PlatformProto";
36option java_package = "com.google.cloud.visionai.v1";
37option php_namespace = "Google\\Cloud\\VisionAI\\V1";
38option ruby_package = "Google::Cloud::VisionAI::V1";
39
40// Service describing handlers for resources
41service AppPlatform {
42  option (google.api.default_host) = "visionai.googleapis.com";
43  option (google.api.oauth_scopes) =
44      "https://www.googleapis.com/auth/cloud-platform";
45
46  // Lists Applications in a given project and location.
47  rpc ListApplications(ListApplicationsRequest)
48      returns (ListApplicationsResponse) {
49    option (google.api.http) = {
50      get: "/v1/{parent=projects/*/locations/*}/applications"
51    };
52    option (google.api.method_signature) = "parent";
53  }
54
55  // Gets details of a single Application.
56  rpc GetApplication(GetApplicationRequest) returns (Application) {
57    option (google.api.http) = {
58      get: "/v1/{name=projects/*/locations/*/applications/*}"
59    };
60    option (google.api.method_signature) = "name";
61  }
62
63  // Creates a new Application in a given project and location.
64  rpc CreateApplication(CreateApplicationRequest)
65      returns (google.longrunning.Operation) {
66    option (google.api.http) = {
67      post: "/v1/{parent=projects/*/locations/*}/applications"
68      body: "application"
69    };
70    option (google.api.method_signature) = "parent,application";
71    option (google.longrunning.operation_info) = {
72      response_type: "Application"
73      metadata_type: "OperationMetadata"
74    };
75  }
76
77  // Updates the parameters of a single Application.
78  rpc UpdateApplication(UpdateApplicationRequest)
79      returns (google.longrunning.Operation) {
80    option (google.api.http) = {
81      patch: "/v1/{application.name=projects/*/locations/*/applications/*}"
82      body: "application"
83    };
84    option (google.api.method_signature) = "application,update_mask";
85    option (google.longrunning.operation_info) = {
86      response_type: "Application"
87      metadata_type: "OperationMetadata"
88    };
89  }
90
91  // Deletes a single Application.
92  rpc DeleteApplication(DeleteApplicationRequest)
93      returns (google.longrunning.Operation) {
94    option (google.api.http) = {
95      delete: "/v1/{name=projects/*/locations/*/applications/*}"
96    };
97    option (google.api.method_signature) = "name";
98    option (google.longrunning.operation_info) = {
99      response_type: "google.protobuf.Empty"
100      metadata_type: "OperationMetadata"
101    };
102  }
103
104  // Deploys a single Application.
105  rpc DeployApplication(DeployApplicationRequest)
106      returns (google.longrunning.Operation) {
107    option (google.api.http) = {
108      post: "/v1/{name=projects/*/locations/*/applications/*}:deploy"
109      body: "*"
110    };
111    option (google.api.method_signature) = "name";
112    option (google.longrunning.operation_info) = {
113      response_type: "DeployApplicationResponse"
114      metadata_type: "OperationMetadata"
115    };
116  }
117
118  // Undeploys a single Application.
119  rpc UndeployApplication(UndeployApplicationRequest)
120      returns (google.longrunning.Operation) {
121    option (google.api.http) = {
122      post: "/v1/{name=projects/*/locations/*/applications/*}:undeploy"
123      body: "*"
124    };
125    option (google.api.method_signature) = "name";
126    option (google.longrunning.operation_info) = {
127      response_type: "UndeployApplicationResponse"
128      metadata_type: "OperationMetadata"
129    };
130  }
131
132  // Adds target stream input to the Application.
133  // If the Application is deployed, the corresponding new Application instance
134  // will be created. If the stream has already been in the Application, the RPC
135  // will fail.
136  rpc AddApplicationStreamInput(AddApplicationStreamInputRequest)
137      returns (google.longrunning.Operation) {
138    option (google.api.http) = {
139      post: "/v1/{name=projects/*/locations/*/applications/*}:addStreamInput"
140      body: "*"
141    };
142    option (google.api.method_signature) = "name";
143    option (google.longrunning.operation_info) = {
144      response_type: "AddApplicationStreamInputResponse"
145      metadata_type: "OperationMetadata"
146    };
147  }
148
149  // Remove target stream input to the Application, if the Application is
150  // deployed, the corresponding instance based will be deleted. If the stream
151  // is not in the Application, the RPC will fail.
152  rpc RemoveApplicationStreamInput(RemoveApplicationStreamInputRequest)
153      returns (google.longrunning.Operation) {
154    option (google.api.http) = {
155      post: "/v1/{name=projects/*/locations/*/applications/*}:removeStreamInput"
156      body: "*"
157    };
158    option (google.api.method_signature) = "name";
159    option (google.longrunning.operation_info) = {
160      response_type: "RemoveApplicationStreamInputResponse"
161      metadata_type: "OperationMetadata"
162    };
163  }
164
165  // Update target stream input to the Application, if the Application is
166  // deployed, the corresponding instance based will be deployed. For
167  // CreateOrUpdate behavior, set allow_missing to true.
168  rpc UpdateApplicationStreamInput(UpdateApplicationStreamInputRequest)
169      returns (google.longrunning.Operation) {
170    option (google.api.http) = {
171      post: "/v1/{name=projects/*/locations/*/applications/*}:updateStreamInput"
172      body: "*"
173    };
174    option (google.api.method_signature) = "name";
175    option (google.longrunning.operation_info) = {
176      response_type: "UpdateApplicationStreamInputResponse"
177      metadata_type: "OperationMetadata"
178    };
179  }
180
181  // Lists Instances in a given project and location.
182  rpc ListInstances(ListInstancesRequest) returns (ListInstancesResponse) {
183    option (google.api.http) = {
184      get: "/v1/{parent=projects/*/locations/*/applications/*}/instances"
185    };
186    option (google.api.method_signature) = "parent";
187  }
188
189  // Gets details of a single Instance.
190  rpc GetInstance(GetInstanceRequest) returns (Instance) {
191    option (google.api.http) = {
192      get: "/v1/{name=projects/*/locations/*/applications/*/instances/*}"
193    };
194    option (google.api.method_signature) = "name";
195  }
196
197  // Adds target stream input to the Application.
198  // If the Application is deployed, the corresponding new Application instance
199  // will be created. If the stream has already been in the Application, the RPC
200  // will fail.
201  rpc CreateApplicationInstances(CreateApplicationInstancesRequest)
202      returns (google.longrunning.Operation) {
203    option (google.api.http) = {
204      post: "/v1/{name=projects/*/locations/*/applications/*}:createApplicationInstances"
205      body: "*"
206    };
207    option (google.api.method_signature) = "name";
208    option (google.longrunning.operation_info) = {
209      response_type: "CreateApplicationInstancesResponse"
210      metadata_type: "OperationMetadata"
211    };
212  }
213
214  // Remove target stream input to the Application, if the Application is
215  // deployed, the corresponding instance based will be deleted. If the stream
216  // is not in the Application, the RPC will fail.
217  rpc DeleteApplicationInstances(DeleteApplicationInstancesRequest)
218      returns (google.longrunning.Operation) {
219    option (google.api.http) = {
220      post: "/v1/{name=projects/*/locations/*/applications/*}:deleteApplicationInstances"
221      body: "*"
222    };
223    option (google.api.method_signature) = "name";
224    option (google.longrunning.operation_info) = {
225      response_type: "Instance"
226      metadata_type: "OperationMetadata"
227    };
228  }
229
230  // Adds target stream input to the Application.
231  // If the Application is deployed, the corresponding new Application instance
232  // will be created. If the stream has already been in the Application, the RPC
233  // will fail.
234  rpc UpdateApplicationInstances(UpdateApplicationInstancesRequest)
235      returns (google.longrunning.Operation) {
236    option (google.api.http) = {
237      post: "/v1/{name=projects/*/locations/*/applications/*}:updateApplicationInstances"
238      body: "*"
239    };
240    option (google.api.method_signature) = "name, application_instances";
241    option (google.longrunning.operation_info) = {
242      response_type: "UpdateApplicationInstancesResponse"
243      metadata_type: "OperationMetadata"
244    };
245  }
246
247  // Lists Drafts in a given project and location.
248  rpc ListDrafts(ListDraftsRequest) returns (ListDraftsResponse) {
249    option (google.api.http) = {
250      get: "/v1/{parent=projects/*/locations/*/applications/*}/drafts"
251    };
252    option (google.api.method_signature) = "parent";
253  }
254
255  // Gets details of a single Draft.
256  rpc GetDraft(GetDraftRequest) returns (Draft) {
257    option (google.api.http) = {
258      get: "/v1/{name=projects/*/locations/*/applications/*/drafts/*}"
259    };
260    option (google.api.method_signature) = "name";
261  }
262
263  // Creates a new Draft in a given project and location.
264  rpc CreateDraft(CreateDraftRequest) returns (google.longrunning.Operation) {
265    option (google.api.http) = {
266      post: "/v1/{parent=projects/*/locations/*/applications/*}/drafts"
267      body: "draft"
268    };
269    option (google.api.method_signature) = "parent,draft,draft_id";
270    option (google.longrunning.operation_info) = {
271      response_type: "Draft"
272      metadata_type: "OperationMetadata"
273    };
274  }
275
276  // Updates the parameters of a single Draft.
277  rpc UpdateDraft(UpdateDraftRequest) returns (google.longrunning.Operation) {
278    option (google.api.http) = {
279      patch: "/v1/{draft.name=projects/*/locations/*/applications/*/drafts/*}"
280      body: "draft"
281    };
282    option (google.api.method_signature) = "draft,update_mask";
283    option (google.longrunning.operation_info) = {
284      response_type: "Draft"
285      metadata_type: "OperationMetadata"
286    };
287  }
288
289  // Deletes a single Draft.
290  rpc DeleteDraft(DeleteDraftRequest) returns (google.longrunning.Operation) {
291    option (google.api.http) = {
292      delete: "/v1/{name=projects/*/locations/*/applications/*/drafts/*}"
293    };
294    option (google.api.method_signature) = "name";
295    option (google.longrunning.operation_info) = {
296      response_type: "google.protobuf.Empty"
297      metadata_type: "OperationMetadata"
298    };
299  }
300
301  // Lists Processors in a given project and location.
302  rpc ListProcessors(ListProcessorsRequest) returns (ListProcessorsResponse) {
303    option (google.api.http) = {
304      get: "/v1/{parent=projects/*/locations/*}/processors"
305    };
306    option (google.api.method_signature) = "parent";
307  }
308
309  // ListPrebuiltProcessors is a custom pass-through verb that Lists Prebuilt
310  // Processors.
311  rpc ListPrebuiltProcessors(ListPrebuiltProcessorsRequest)
312      returns (ListPrebuiltProcessorsResponse) {
313    option (google.api.http) = {
314      post: "/v1/{parent=projects/*/locations/*}/processors:prebuilt"
315      body: "*"
316    };
317    option (google.api.method_signature) = "parent";
318  }
319
320  // Gets details of a single Processor.
321  rpc GetProcessor(GetProcessorRequest) returns (Processor) {
322    option (google.api.http) = {
323      get: "/v1/{name=projects/*/locations/*/processors/*}"
324    };
325    option (google.api.method_signature) = "name";
326  }
327
328  // Creates a new Processor in a given project and location.
329  rpc CreateProcessor(CreateProcessorRequest)
330      returns (google.longrunning.Operation) {
331    option (google.api.http) = {
332      post: "/v1/{parent=projects/*/locations/*}/processors"
333      body: "processor"
334    };
335    option (google.api.method_signature) = "parent,processor,processor_id";
336    option (google.longrunning.operation_info) = {
337      response_type: "Processor"
338      metadata_type: "OperationMetadata"
339    };
340  }
341
342  // Updates the parameters of a single Processor.
343  rpc UpdateProcessor(UpdateProcessorRequest)
344      returns (google.longrunning.Operation) {
345    option (google.api.http) = {
346      patch: "/v1/{processor.name=projects/*/locations/*/processors/*}"
347      body: "processor"
348    };
349    option (google.api.method_signature) = "processor,update_mask";
350    option (google.longrunning.operation_info) = {
351      response_type: "Processor"
352      metadata_type: "OperationMetadata"
353    };
354  }
355
356  // Deletes a single Processor.
357  rpc DeleteProcessor(DeleteProcessorRequest)
358      returns (google.longrunning.Operation) {
359    option (google.api.http) = {
360      delete: "/v1/{name=projects/*/locations/*/processors/*}"
361    };
362    option (google.api.method_signature) = "name";
363    option (google.longrunning.operation_info) = {
364      response_type: "google.protobuf.Empty"
365      metadata_type: "OperationMetadata"
366    };
367  }
368}
369
370// All the supported model types in Vision AI App Platform.
371enum ModelType {
372  // Processor Type UNSPECIFIED.
373  MODEL_TYPE_UNSPECIFIED = 0;
374
375  // Model Type Image Classification.
376  IMAGE_CLASSIFICATION = 1;
377
378  // Model Type Object Detection.
379  OBJECT_DETECTION = 2;
380
381  // Model Type Video Classification.
382  VIDEO_CLASSIFICATION = 3;
383
384  // Model Type Object Tracking.
385  VIDEO_OBJECT_TRACKING = 4;
386
387  // Model Type Action Recognition.
388  VIDEO_ACTION_RECOGNITION = 5;
389
390  // Model Type Occupancy Counting.
391  OCCUPANCY_COUNTING = 6;
392
393  // Model Type Person Blur.
394  PERSON_BLUR = 7;
395
396  // Model Type Vertex Custom.
397  VERTEX_CUSTOM = 8;
398
399  // Model Type Product Recognizer.
400  PRODUCT_RECOGNIZER = 9;
401
402  // Model Type Tag Recognizer.
403  TAG_RECOGNIZER = 10;
404
405  // Model Type SynthID.
406  SYNTH_ID = 15;
407}
408
409// Represents a hardware accelerator type.
410enum AcceleratorType {
411  // Unspecified accelerator type, which means no accelerator.
412  ACCELERATOR_TYPE_UNSPECIFIED = 0;
413
414  // Nvidia Tesla K80 GPU.
415  NVIDIA_TESLA_K80 = 1;
416
417  // Nvidia Tesla P100 GPU.
418  NVIDIA_TESLA_P100 = 2;
419
420  // Nvidia Tesla V100 GPU.
421  NVIDIA_TESLA_V100 = 3;
422
423  // Nvidia Tesla P4 GPU.
424  NVIDIA_TESLA_P4 = 4;
425
426  // Nvidia Tesla T4 GPU.
427  NVIDIA_TESLA_T4 = 5;
428
429  // Nvidia Tesla A100 GPU.
430  NVIDIA_TESLA_A100 = 8;
431
432  // TPU v2.
433  TPU_V2 = 6;
434
435  // TPU v3.
436  TPU_V3 = 7;
437}
438
439// All supported data types.
440enum DataType {
441  // The default value of DataType.
442  DATA_TYPE_UNSPECIFIED = 0;
443
444  // Video data type like H264.
445  VIDEO = 1;
446
447  // Image data type.
448  IMAGE = 3;
449
450  // Protobuf data type, usually used for general data blob.
451  PROTO = 2;
452
453  // A placeholder data type, applicable for the universal input processor which
454  // supports any data type. This will be instantiated and replaced by a
455  // concrete underlying `DataType` during instance deployment.
456  PLACEHOLDER = 4;
457}
458
459// Message for DeleteApplicationInstance Response.
460message DeleteApplicationInstancesResponse {}
461
462// Message for CreateApplicationInstance Response.
463message CreateApplicationInstancesResponse {}
464
465// Message for UpdateApplicationInstances Response.
466message UpdateApplicationInstancesResponse {}
467
468// Message for adding stream input to an Application.
469message CreateApplicationInstancesRequest {
470  // Required. the name of the application to retrieve.
471  // Format:
472  // "projects/{project}/locations/{location}/applications/{application}"
473  string name = 1 [
474    (google.api.field_behavior) = REQUIRED,
475    (google.api.resource_reference) = {
476      type: "visionai.googleapis.com/Application"
477    }
478  ];
479
480  // Required. The resources being created.
481  repeated ApplicationInstance application_instances = 2
482      [(google.api.field_behavior) = REQUIRED];
483
484  // Optional. An optional request ID to identify requests. Specify a unique
485  // request ID so that if you must retry your request, the server will know to
486  // ignore the request if it has already been completed. The server will
487  // guarantee that for at least 60 minutes since the first request.
488  //
489  // For example, consider a situation where you make an initial request and
490  // the request times out. If you make the request again with the same request
491  // ID, the server can check if original operation with the same request ID
492  // was received, and if so, will ignore the second request. This prevents
493  // clients from accidentally creating duplicate commitments.
494  //
495  // The request ID must be a valid UUID with the exception that zero UUID is
496  // not supported (00000000-0000-0000-0000-000000000000).
497  string request_id = 4 [(google.api.field_behavior) = OPTIONAL];
498}
499
500// Message for removing stream input from an Application.
501message DeleteApplicationInstancesRequest {
502  // Required. the name of the application to retrieve.
503  // Format:
504  // "projects/{project}/locations/{location}/applications/{application}"
505  string name = 1 [
506    (google.api.field_behavior) = REQUIRED,
507    (google.api.resource_reference) = {
508      type: "visionai.googleapis.com/Application"
509    }
510  ];
511
512  // Required. Id of the requesting object.
513  repeated string instance_ids = 2 [
514    (google.api.field_behavior) = REQUIRED,
515    (google.api.resource_reference) = {
516      type: "visionai.googleapis.com/Instance"
517    }
518  ];
519
520  // Optional. An optional request ID to identify requests. Specify a unique
521  // request ID so that if you must retry your request, the server will know to
522  // ignore the request if it has already been completed. The server will
523  // guarantee that for at least 60 minutes since the first request.
524  //
525  // For example, consider a situation where you make an initial request and
526  // the request times out. If you make the request again with the same request
527  // ID, the server can check if original operation with the same request ID
528  // was received, and if so, will ignore the second request. This prevents
529  // clients from accidentally creating duplicate commitments.
530  //
531  // The request ID must be a valid UUID with the exception that zero UUID is
532  // not supported (00000000-0000-0000-0000-000000000000).
533  string request_id = 3 [(google.api.field_behavior) = OPTIONAL];
534}
535
536// RPC Request Messages.
537// Message for DeployApplication Response.
538message DeployApplicationResponse {}
539
540// Message for UndeployApplication Response.
541message UndeployApplicationResponse {}
542
543// Message for RemoveApplicationStreamInput Response.
544message RemoveApplicationStreamInputResponse {}
545
546// Message for AddApplicationStreamInput Response.
547message AddApplicationStreamInputResponse {}
548
549// Message for AddApplicationStreamInput Response.
550message UpdateApplicationStreamInputResponse {}
551
552// Message for requesting list of Applications.
553message ListApplicationsRequest {
554  // Required. Parent value for ListApplicationsRequest.
555  string parent = 1 [
556    (google.api.field_behavior) = REQUIRED,
557    (google.api.resource_reference) = {
558      child_type: "visionai.googleapis.com/Application"
559    }
560  ];
561
562  // Requested page size. Server may return fewer items than requested.
563  // If unspecified, server will pick an appropriate default.
564  int32 page_size = 2;
565
566  // A token identifying a page of results the server should return.
567  string page_token = 3;
568
569  // Filtering results.
570  string filter = 4;
571
572  // Hint for how to order the results.
573  string order_by = 5;
574}
575
576// Message for response to listing Applications.
577message ListApplicationsResponse {
578  // The list of Application.
579  repeated Application applications = 1;
580
581  // A token identifying a page of results the server should return.
582  string next_page_token = 2;
583
584  // Locations that could not be reached.
585  repeated string unreachable = 3;
586}
587
588// Message for getting a Application.
589message GetApplicationRequest {
590  // Required. Name of the resource.
591  string name = 1 [
592    (google.api.field_behavior) = REQUIRED,
593    (google.api.resource_reference) = {
594      type: "visionai.googleapis.com/Application"
595    }
596  ];
597}
598
599// Message for creating a Application.
600message CreateApplicationRequest {
601  // Required. Value for parent.
602  string parent = 1 [
603    (google.api.field_behavior) = REQUIRED,
604    (google.api.resource_reference) = {
605      child_type: "visionai.googleapis.com/Application"
606    }
607  ];
608
609  // Required. Id of the requesting object.
610  string application_id = 2 [(google.api.field_behavior) = REQUIRED];
611
612  // Required. The resource being created.
613  Application application = 3 [(google.api.field_behavior) = REQUIRED];
614
615  // Optional. An optional request ID to identify requests. Specify a unique
616  // request ID so that if you must retry your request, the server will know to
617  // ignore the request if it has already been completed. The server will
618  // guarantee that for at least 60 minutes since the first request.
619  //
620  // For example, consider a situation where you make an initial request and
621  // the request times out. If you make the request again with the same request
622  // ID, the server can check if original operation with the same request ID
623  // was received, and if so, will ignore the second request. This prevents
624  // clients from accidentally creating duplicate commitments.
625  //
626  // The request ID must be a valid UUID with the exception that zero UUID is
627  // not supported (00000000-0000-0000-0000-000000000000).
628  string request_id = 4 [(google.api.field_behavior) = OPTIONAL];
629}
630
631// Message for updating an Application.
632message UpdateApplicationRequest {
633  // Optional. Field mask is used to specify the fields to be overwritten in the
634  // Application resource by the update.
635  // The fields specified in the update_mask are relative to the resource, not
636  // the full request. A field will be overwritten if it is in the mask. If the
637  // user does not provide a mask then all fields will be overwritten.
638  google.protobuf.FieldMask update_mask = 1
639      [(google.api.field_behavior) = OPTIONAL];
640
641  // Required. The resource being updated.
642  Application application = 2 [(google.api.field_behavior) = REQUIRED];
643
644  // Optional. An optional request ID to identify requests. Specify a unique
645  // request ID so that if you must retry your request, the server will know to
646  // ignore the request if it has already been completed. The server will
647  // guarantee that for at least 60 minutes since the first request.
648  //
649  // For example, consider a situation where you make an initial request and
650  // the request times out. If you make the request again with the same request
651  // ID, the server can check if original operation with the same request ID
652  // was received, and if so, will ignore the second request. This prevents
653  // clients from accidentally creating duplicate commitments.
654  //
655  // The request ID must be a valid UUID with the exception that zero UUID is
656  // not supported (00000000-0000-0000-0000-000000000000).
657  string request_id = 3 [(google.api.field_behavior) = OPTIONAL];
658}
659
660// Message for deleting an Application.
661message DeleteApplicationRequest {
662  // Required. Name of the resource.
663  string name = 1 [
664    (google.api.field_behavior) = REQUIRED,
665    (google.api.resource_reference) = {
666      type: "visionai.googleapis.com/Application"
667    }
668  ];
669
670  // Optional. An optional request ID to identify requests. Specify a unique
671  // request ID so that if you must retry your request, the server will know to
672  // ignore the request if it has already been completed. The server will
673  // guarantee that for at least 60 minutes after the first request.
674  //
675  // For example, consider a situation where you make an initial request and
676  // the request times out. If you make the request again with the same request
677  // ID, the server can check if original operation with the same request ID
678  // was received, and if so, will ignore the second request. This prevents
679  // clients from accidentally creating duplicate commitments.
680  //
681  // The request ID must be a valid UUID with the exception that zero UUID is
682  // not supported (00000000-0000-0000-0000-000000000000).
683  string request_id = 2 [(google.api.field_behavior) = OPTIONAL];
684
685  // Optional. If set to true, any instances and drafts from this application
686  // will also be deleted. (Otherwise, the request will only work if the
687  // application has no instances and drafts.)
688  bool force = 3 [(google.api.field_behavior) = OPTIONAL];
689}
690
691// Message for deploying an Application.
692message DeployApplicationRequest {
693  // Required. the name of the application to retrieve.
694  // Format:
695  // "projects/{project}/locations/{location}/applications/{application}"
696  string name = 1 [
697    (google.api.field_behavior) = REQUIRED,
698    (google.api.resource_reference) = {
699      type: "visionai.googleapis.com/Application"
700    }
701  ];
702
703  // If set, validate the request and preview the application graph, but do not
704  // actually deploy it.
705  bool validate_only = 2;
706
707  // Optional. An optional request ID to identify requests. Specify a unique
708  // request ID so that if you must retry your request, the server will know to
709  // ignore the request if it has already been completed. The server will
710  // guarantee that for at least 60 minutes since the first request.
711  //
712  // For example, consider a situation where you make an initial request and
713  // the request times out. If you make the request again with the same request
714  // ID, the server can check if original operation with the same request ID
715  // was received, and if so, will ignore the second request. This prevents
716  // clients from accidentally creating duplicate commitments.
717  //
718  // The request ID must be a valid UUID with the exception that zero UUID is
719  // not supported (00000000-0000-0000-0000-000000000000).
720  string request_id = 3 [(google.api.field_behavior) = OPTIONAL];
721
722  // Optional. Whether or not to enable monitoring for the application on
723  // deployment.
724  bool enable_monitoring = 4 [(google.api.field_behavior) = OPTIONAL];
725}
726
727// Message for undeploying an Application.
728message UndeployApplicationRequest {
729  // Required. the name of the application to retrieve.
730  // Format:
731  // "projects/{project}/locations/{location}/applications/{application}"
732  string name = 1 [
733    (google.api.field_behavior) = REQUIRED,
734    (google.api.resource_reference) = {
735      type: "visionai.googleapis.com/Application"
736    }
737  ];
738
739  // Optional. An optional request ID to identify requests. Specify a unique
740  // request ID so that if you must retry your request, the server will know to
741  // ignore the request if it has already been completed. The server will
742  // guarantee that for at least 60 minutes since the first request.
743  //
744  // For example, consider a situation where you make an initial request and
745  // the request times out. If you make the request again with the same request
746  // ID, the server can check if original operation with the same request ID
747  // was received, and if so, will ignore the second request. This prevents
748  // clients from accidentally creating duplicate commitments.
749  //
750  // The request ID must be a valid UUID with the exception that zero UUID is
751  // not supported (00000000-0000-0000-0000-000000000000).
752  string request_id = 2 [(google.api.field_behavior) = OPTIONAL];
753}
754
755// Message about a single stream input config.
756message ApplicationStreamInput {
757  StreamWithAnnotation stream_with_annotation = 1;
758}
759
760// Message for adding stream input to an Application.
761message AddApplicationStreamInputRequest {
762  // Required. the name of the application to retrieve.
763  // Format:
764  // "projects/{project}/locations/{location}/applications/{application}"
765  string name = 1 [
766    (google.api.field_behavior) = REQUIRED,
767    (google.api.resource_reference) = {
768      type: "visionai.googleapis.com/Application"
769    }
770  ];
771
772  // The stream inputs to add, the stream resource name is the key of each
773  // StreamInput, and it must be unique within each application.
774  repeated ApplicationStreamInput application_stream_inputs = 2;
775
776  // Optional. An optional request ID to identify requests. Specify a unique
777  // request ID so that if you must retry your request, the server will know to
778  // ignore the request if it has already been completed. The server will
779  // guarantee that for at least 60 minutes since the first request.
780  //
781  // For example, consider a situation where you make an initial request and
782  // the request times out. If you make the request again with the same request
783  // ID, the server can check if original operation with the same request ID
784  // was received, and if so, will ignore the second request. This prevents
785  // clients from accidentally creating duplicate commitments.
786  //
787  // The request ID must be a valid UUID with the exception that zero UUID is
788  // not supported (00000000-0000-0000-0000-000000000000).
789  string request_id = 3 [(google.api.field_behavior) = OPTIONAL];
790}
791
792// Message for updating stream input to an Application.
793message UpdateApplicationStreamInputRequest {
794  // Required. the name of the application to retrieve.
795  // Format:
796  // "projects/{project}/locations/{location}/applications/{application}"
797  string name = 1 [
798    (google.api.field_behavior) = REQUIRED,
799    (google.api.resource_reference) = {
800      type: "visionai.googleapis.com/Application"
801    }
802  ];
803
804  // The stream inputs to update, the stream resource name is the key of each
805  // StreamInput, and it must be unique within each application.
806  repeated ApplicationStreamInput application_stream_inputs = 2;
807
808  // Optional. An optional request ID to identify requests. Specify a unique
809  // request ID so that if you must retry your request, the server will know to
810  // ignore the request if it has already been completed. The server will
811  // guarantee that for at least 60 minutes since the first request.
812  //
813  // For example, consider a situation where you make an initial request and
814  // the request times out. If you make the request again with the same request
815  // ID, the server can check if original operation with the same request ID
816  // was received, and if so, will ignore the second request. This prevents
817  // clients from accidentally creating duplicate commitments.
818  //
819  // The request ID must be a valid UUID with the exception that zero UUID is
820  // not supported (00000000-0000-0000-0000-000000000000).
821  string request_id = 3 [(google.api.field_behavior) = OPTIONAL];
822
823  // If true, UpdateApplicationStreamInput will insert stream input to
824  // application even if the target stream is not included in the application.
825  bool allow_missing = 4;
826}
827
828// Message for removing stream input from an Application.
829message RemoveApplicationStreamInputRequest {
830  // Message about target streamInput to remove.
831  message TargetStreamInput {
832    string stream = 1 [(google.api.resource_reference) = {
833      type: "visionai.googleapis.com/Stream"
834    }];
835  }
836
837  // Required. the name of the application to retrieve.
838  // Format:
839  // "projects/{project}/locations/{location}/applications/{application}"
840  string name = 1 [
841    (google.api.field_behavior) = REQUIRED,
842    (google.api.resource_reference) = {
843      type: "visionai.googleapis.com/Application"
844    }
845  ];
846
847  // The target stream to remove.
848  repeated TargetStreamInput target_stream_inputs = 2;
849
850  // Optional. An optional request ID to identify requests. Specify a unique
851  // request ID so that if you must retry your request, the server will know to
852  // ignore the request if it has already been completed. The server will
853  // guarantee that for at least 60 minutes since the first request.
854  //
855  // For example, consider a situation where you make an initial request and
856  // the request times out. If you make the request again with the same request
857  // ID, the server can check if original operation with the same request ID
858  // was received, and if so, will ignore the second request. This prevents
859  // clients from accidentally creating duplicate commitments.
860  //
861  // The request ID must be a valid UUID with the exception that zero UUID is
862  // not supported (00000000-0000-0000-0000-000000000000).
863  string request_id = 3 [(google.api.field_behavior) = OPTIONAL];
864}
865
866// Message for requesting list of Instances.
867message ListInstancesRequest {
868  // Required. Parent value for ListInstancesRequest.
869  string parent = 1 [
870    (google.api.field_behavior) = REQUIRED,
871    (google.api.resource_reference) = {
872      child_type: "visionai.googleapis.com/Instance"
873    }
874  ];
875
876  // Requested page size. Server may return fewer items than requested.
877  // If unspecified, server will pick an appropriate default.
878  int32 page_size = 2;
879
880  // A token identifying a page of results the server should return.
881  string page_token = 3;
882
883  // Filtering results.
884  string filter = 4;
885
886  // Hint for how to order the results.
887  string order_by = 5;
888}
889
890// Message for response to listing Instances.
891message ListInstancesResponse {
892  // The list of Instance.
893  repeated Instance instances = 1;
894
895  // A token identifying a page of results the server should return.
896  string next_page_token = 2;
897
898  // Locations that could not be reached.
899  repeated string unreachable = 3;
900}
901
902// Message for getting a Instance.
903message GetInstanceRequest {
904  // Required. Name of the resource.
905  string name = 1 [
906    (google.api.field_behavior) = REQUIRED,
907    (google.api.resource_reference) = {
908      type: "visionai.googleapis.com/Instance"
909    }
910  ];
911}
912
913// Message for requesting list of Drafts.
914message ListDraftsRequest {
915  // Required. Parent value for ListDraftsRequest.
916  string parent = 1 [
917    (google.api.field_behavior) = REQUIRED,
918    (google.api.resource_reference) = {
919      child_type: "visionai.googleapis.com/Draft"
920    }
921  ];
922
923  // Requested page size. Server may return fewer items than requested.
924  // If unspecified, server will pick an appropriate default.
925  int32 page_size = 2;
926
927  // A token identifying a page of results the server should return.
928  string page_token = 3;
929
930  // Filtering results.
931  string filter = 4;
932
933  // Hint for how to order the results.
934  string order_by = 5;
935}
936
937// Message for response to listing Drafts.
938message ListDraftsResponse {
939  // The list of Draft.
940  repeated Draft drafts = 1;
941
942  // A token identifying a page of results the server should return.
943  string next_page_token = 2;
944
945  // Locations that could not be reached.
946  repeated string unreachable = 3;
947}
948
949// Message for getting a Draft.
950message GetDraftRequest {
951  // Required. Name of the resource.
952  string name = 1 [
953    (google.api.field_behavior) = REQUIRED,
954    (google.api.resource_reference) = { type: "visionai.googleapis.com/Draft" }
955  ];
956}
957
958// Message for creating a Draft.
959message CreateDraftRequest {
960  // Required. Value for parent.
961  string parent = 1 [
962    (google.api.field_behavior) = REQUIRED,
963    (google.api.resource_reference) = {
964      child_type: "visionai.googleapis.com/Draft"
965    }
966  ];
967
968  // Required. Id of the requesting object.
969  string draft_id = 2 [(google.api.field_behavior) = REQUIRED];
970
971  // Required. The resource being created.
972  Draft draft = 3 [(google.api.field_behavior) = REQUIRED];
973
974  // Optional. An optional request ID to identify requests. Specify a unique
975  // request ID so that if you must retry your request, the server will know to
976  // ignore the request if it has already been completed. The server will
977  // guarantee that for at least 60 minutes since the first request.
978  //
979  // For example, consider a situation where you make an initial request and
980  // the request times out. If you make the request again with the same request
981  // ID, the server can check if original operation with the same request ID
982  // was received, and if so, will ignore the second request. This prevents
983  // clients from accidentally creating duplicate commitments.
984  //
985  // The request ID must be a valid UUID with the exception that zero UUID is
986  // not supported (00000000-0000-0000-0000-000000000000).
987  string request_id = 4 [(google.api.field_behavior) = OPTIONAL];
988}
989
990// Message for updating a Draft.
991message UpdateDraftRequest {
992  // Optional. Field mask is used to specify the fields to be overwritten in the
993  // Draft resource by the update.
994  // The fields specified in the update_mask are relative to the resource, not
995  // the full request. A field will be overwritten if it is in the mask. If the
996  // user does not provide a mask then all fields will be overwritten.
997  google.protobuf.FieldMask update_mask = 1
998      [(google.api.field_behavior) = OPTIONAL];
999
1000  // Required. The resource being updated.
1001  Draft draft = 2 [(google.api.field_behavior) = REQUIRED];
1002
1003  // Optional. An optional request ID to identify requests. Specify a unique
1004  // request ID so that if you must retry your request, the server will know to
1005  // ignore the request if it has already been completed. The server will
1006  // guarantee that for at least 60 minutes since the first request.
1007  //
1008  // For example, consider a situation where you make an initial request and
1009  // the request times out. If you make the request again with the same request
1010  // ID, the server can check if original operation with the same request ID
1011  // was received, and if so, will ignore the second request. This prevents
1012  // clients from accidentally creating duplicate commitments.
1013  //
1014  // The request ID must be a valid UUID with the exception that zero UUID is
1015  // not supported (00000000-0000-0000-0000-000000000000).
1016  string request_id = 3 [(google.api.field_behavior) = OPTIONAL];
1017
1018  // If true, UpdateDraftRequest will create one resource if the target resource
1019  // doesn't exist, this time, the field_mask will be ignored.
1020  bool allow_missing = 4;
1021}
1022
1023// Message for updating an ApplicationInstance.
1024message UpdateApplicationInstancesRequest {
1025  message UpdateApplicationInstance {
1026    // Optional. Field mask is used to specify the fields to be overwritten in
1027    // the Draft resource by the update. The fields specified in the update_mask
1028    // are relative to the resource, not the full request. A field will be
1029    // overwritten if it is in the mask. If the user does not provide a mask
1030    // then all fields will be overwritten.
1031    google.protobuf.FieldMask update_mask = 1
1032        [(google.api.field_behavior) = OPTIONAL];
1033
1034    // Required. The resource being updated.
1035    Instance instance = 2 [(google.api.field_behavior) = REQUIRED];
1036
1037    // Required. The id of the instance.
1038    string instance_id = 3 [(google.api.field_behavior) = REQUIRED];
1039  }
1040
1041  // Required. the name of the application to retrieve.
1042  // Format:
1043  // "projects/{project}/locations/{location}/applications/{application}"
1044  string name = 1 [
1045    (google.api.field_behavior) = REQUIRED,
1046    (google.api.resource_reference) = {
1047      type: "visionai.googleapis.com/Application"
1048    }
1049  ];
1050
1051  repeated UpdateApplicationInstance application_instances = 2;
1052
1053  // Optional. An optional request ID to identify requests. Specify a unique
1054  // request ID so that if you must retry your request, the server will know to
1055  // ignore the request if it has already been completed. The server will
1056  // guarantee that for at least 60 minutes since the first request.
1057  //
1058  // For example, consider a situation where you make an initial request and
1059  // the request times out. If you make the request again with the same request
1060  // ID, the server can check if original operation with the same request ID
1061  // was received, and if so, will ignore the second request. This prevents
1062  // clients from accidentally creating duplicate commitments.
1063  //
1064  // The request ID must be a valid UUID with the exception that zero UUID is
1065  // not supported (00000000-0000-0000-0000-000000000000).
1066  string request_id = 3 [(google.api.field_behavior) = OPTIONAL];
1067
1068  // If true, Update Request will create one resource if the target resource
1069  // doesn't exist, this time, the field_mask will be ignored.
1070  bool allow_missing = 4;
1071}
1072
1073// Message for deleting a Draft.
1074message DeleteDraftRequest {
1075  // Required. Name of the resource.
1076  string name = 1 [
1077    (google.api.field_behavior) = REQUIRED,
1078    (google.api.resource_reference) = { type: "visionai.googleapis.com/Draft" }
1079  ];
1080
1081  // Optional. An optional request ID to identify requests. Specify a unique
1082  // request ID so that if you must retry your request, the server will know to
1083  // ignore the request if it has already been completed. The server will
1084  // guarantee that for at least 60 minutes after the first request.
1085  //
1086  // For example, consider a situation where you make an initial request and
1087  // the request times out. If you make the request again with the same request
1088  // ID, the server can check if original operation with the same request ID
1089  // was received, and if so, will ignore the second request. This prevents
1090  // clients from accidentally creating duplicate commitments.
1091  //
1092  // The request ID must be a valid UUID with the exception that zero UUID is
1093  // not supported (00000000-0000-0000-0000-000000000000).
1094  string request_id = 2 [(google.api.field_behavior) = OPTIONAL];
1095}
1096
1097// Message for requesting list of Processors.
1098message ListProcessorsRequest {
1099  // Required. Parent value for ListProcessorsRequest.
1100  string parent = 1 [
1101    (google.api.field_behavior) = REQUIRED,
1102    (google.api.resource_reference) = {
1103      child_type: "visionai.googleapis.com/Processor"
1104    }
1105  ];
1106
1107  // Requested page size. Server may return fewer items than requested.
1108  // If unspecified, server will pick an appropriate default.
1109  int32 page_size = 2;
1110
1111  // A token identifying a page of results the server should return.
1112  string page_token = 3;
1113
1114  // Filtering results.
1115  string filter = 4;
1116
1117  // Hint for how to order the results.
1118  string order_by = 5;
1119}
1120
1121// Message for response to listing Processors.
1122message ListProcessorsResponse {
1123  // The list of Processor.
1124  repeated Processor processors = 1;
1125
1126  // A token identifying a page of results the server should return.
1127  string next_page_token = 2;
1128
1129  // Locations that could not be reached.
1130  repeated string unreachable = 3;
1131}
1132
1133// Request Message for listing Prebuilt Processors.
1134message ListPrebuiltProcessorsRequest {
1135  // Required. Parent path.
1136  string parent = 1 [
1137    (google.api.field_behavior) = REQUIRED,
1138    (google.api.resource_reference) = {
1139      child_type: "visionai.googleapis.com/Processor"
1140    }
1141  ];
1142}
1143
1144// Response Message for listing Prebuilt Processors.
1145message ListPrebuiltProcessorsResponse {
1146  // The list of Processor.
1147  repeated Processor processors = 1;
1148}
1149
1150// Message for getting a Processor.
1151message GetProcessorRequest {
1152  // Required. Name of the resource.
1153  string name = 1 [
1154    (google.api.field_behavior) = REQUIRED,
1155    (google.api.resource_reference) = {
1156      type: "visionai.googleapis.com/Processor"
1157    }
1158  ];
1159}
1160
1161// Message for creating a Processor.
1162message CreateProcessorRequest {
1163  // Required. Value for parent.
1164  string parent = 1 [
1165    (google.api.field_behavior) = REQUIRED,
1166    (google.api.resource_reference) = {
1167      child_type: "visionai.googleapis.com/Processor"
1168    }
1169  ];
1170
1171  // Required. Id of the requesting object.
1172  string processor_id = 2 [(google.api.field_behavior) = REQUIRED];
1173
1174  // Required. The resource being created.
1175  Processor processor = 3 [(google.api.field_behavior) = REQUIRED];
1176
1177  // Optional. An optional request ID to identify requests. Specify a unique
1178  // request ID so that if you must retry your request, the server will know to
1179  // ignore the request if it has already been completed. The server will
1180  // guarantee that for at least 60 minutes since the first request.
1181  //
1182  // For example, consider a situation where you make an initial request and
1183  // the request times out. If you make the request again with the same request
1184  // ID, the server can check if original operation with the same request ID
1185  // was received, and if so, will ignore the second request. This prevents
1186  // clients from accidentally creating duplicate commitments.
1187  //
1188  // The request ID must be a valid UUID with the exception that zero UUID is
1189  // not supported (00000000-0000-0000-0000-000000000000).
1190  string request_id = 4 [(google.api.field_behavior) = OPTIONAL];
1191}
1192
1193// Message for updating a Processor.
1194message UpdateProcessorRequest {
1195  // Optional. Field mask is used to specify the fields to be overwritten in the
1196  // Processor resource by the update.
1197  // The fields specified in the update_mask are relative to the resource, not
1198  // the full request. A field will be overwritten if it is in the mask. If the
1199  // user does not provide a mask then all fields will be overwritten.
1200  google.protobuf.FieldMask update_mask = 1
1201      [(google.api.field_behavior) = OPTIONAL];
1202
1203  // Required. The resource being updated.
1204  Processor processor = 2 [(google.api.field_behavior) = REQUIRED];
1205
1206  // Optional. An optional request ID to identify requests. Specify a unique
1207  // request ID so that if you must retry your request, the server will know to
1208  // ignore the request if it has already been completed. The server will
1209  // guarantee that for at least 60 minutes since the first request.
1210  //
1211  // For example, consider a situation where you make an initial request and
1212  // the request times out. If you make the request again with the same request
1213  // ID, the server can check if original operation with the same request ID
1214  // was received, and if so, will ignore the second request. This prevents
1215  // clients from accidentally creating duplicate commitments.
1216  //
1217  // The request ID must be a valid UUID with the exception that zero UUID is
1218  // not supported (00000000-0000-0000-0000-000000000000).
1219  string request_id = 3 [(google.api.field_behavior) = OPTIONAL];
1220}
1221
1222// Message for deleting a Processor.
1223message DeleteProcessorRequest {
1224  // Required. Name of the resource
1225  string name = 1 [
1226    (google.api.field_behavior) = REQUIRED,
1227    (google.api.resource_reference) = {
1228      type: "visionai.googleapis.com/Processor"
1229    }
1230  ];
1231
1232  // Optional. An optional request ID to identify requests. Specify a unique
1233  // request ID so that if you must retry your request, the server will know to
1234  // ignore the request if it has already been completed. The server will
1235  // guarantee that for at least 60 minutes after the first request.
1236  //
1237  // For example, consider a situation where you make an initial request and
1238  // the request times out. If you make the request again with the same request
1239  // ID, the server can check if original operation with the same request ID
1240  // was received, and if so, will ignore the second request. This prevents
1241  // clients from accidentally creating duplicate commitments.
1242  //
1243  // The request ID must be a valid UUID with the exception that zero UUID is
1244  // not supported (00000000-0000-0000-0000-000000000000).
1245  string request_id = 2 [(google.api.field_behavior) = OPTIONAL];
1246}
1247
1248// Message describing Application object
1249message Application {
1250  option (google.api.resource) = {
1251    type: "visionai.googleapis.com/Application"
1252    pattern: "projects/{project}/locations/{location}/applications/{application}"
1253    style: DECLARATIVE_FRIENDLY
1254  };
1255
1256  // Message storing the runtime information of the application.
1257  message ApplicationRuntimeInfo {
1258    // Message about output resources from application.
1259    message GlobalOutputResource {
1260      // The full resource name of the outputted resources.
1261      string output_resource = 1;
1262
1263      // The name of graph node who produces the output resource name.
1264      // For example:
1265      // output_resource:
1266      // /projects/123/locations/us-central1/corpora/my-corpus/dataSchemas/my-schema
1267      // producer_node: occupancy-count
1268      string producer_node = 2;
1269
1270      // The key of the output resource, it has to be unique within the same
1271      // producer node. One producer node can output several output resources,
1272      // the key can be used to match corresponding output resources.
1273      string key = 3;
1274    }
1275
1276    // Monitoring-related configuration for an application.
1277    message MonitoringConfig {
1278      // Whether this application has monitoring enabled.
1279      bool enabled = 1;
1280    }
1281
1282    // Timestamp when the engine be deployed
1283    google.protobuf.Timestamp deploy_time = 1;
1284
1285    // Globally created resources like warehouse dataschemas.
1286    repeated GlobalOutputResource global_output_resources = 3;
1287
1288    // Monitoring-related configuration for this application.
1289    MonitoringConfig monitoring_config = 4;
1290  }
1291
1292  // State of the Application
1293  enum State {
1294    // The default value. This value is used if the state is omitted.
1295    STATE_UNSPECIFIED = 0;
1296
1297    // State CREATED.
1298    CREATED = 1;
1299
1300    // State DEPLOYING.
1301    DEPLOYING = 2;
1302
1303    // State DEPLOYED.
1304    DEPLOYED = 3;
1305
1306    // State UNDEPLOYING.
1307    UNDEPLOYING = 4;
1308
1309    // State DELETED.
1310    DELETED = 5;
1311
1312    // State ERROR.
1313    ERROR = 6;
1314
1315    // State CREATING.
1316    CREATING = 7;
1317
1318    // State Updating.
1319    UPDATING = 8;
1320
1321    // State Deleting.
1322    DELETING = 9;
1323
1324    // State Fixing.
1325    FIXING = 10;
1326  }
1327
1328  // Billing mode of the Application
1329  enum BillingMode {
1330    // The default value.
1331    BILLING_MODE_UNSPECIFIED = 0;
1332
1333    // Pay as you go billing mode.
1334    PAYG = 1;
1335
1336    // Monthly billing mode.
1337    MONTHLY = 2;
1338  }
1339
1340  // name of resource
1341  string name = 1;
1342
1343  // Output only. [Output only] Create timestamp
1344  google.protobuf.Timestamp create_time = 2
1345      [(google.api.field_behavior) = OUTPUT_ONLY];
1346
1347  // Output only. [Output only] Update timestamp
1348  google.protobuf.Timestamp update_time = 3
1349      [(google.api.field_behavior) = OUTPUT_ONLY];
1350
1351  // Labels as key value pairs
1352  map<string, string> labels = 4;
1353
1354  // Required. A user friendly display name for the solution.
1355  string display_name = 5 [(google.api.field_behavior) = REQUIRED];
1356
1357  // A description for this application.
1358  string description = 6;
1359
1360  // Application graph configuration.
1361  ApplicationConfigs application_configs = 7;
1362
1363  // Output only. Application graph runtime info. Only exists when application
1364  // state equals to DEPLOYED.
1365  ApplicationRuntimeInfo runtime_info = 8
1366      [(google.api.field_behavior) = OUTPUT_ONLY];
1367
1368  // Output only. State of the application.
1369  State state = 9 [(google.api.field_behavior) = OUTPUT_ONLY];
1370
1371  // Billing mode of the application.
1372  BillingMode billing_mode = 12;
1373}
1374
1375// Message storing the graph of the application.
1376message ApplicationConfigs {
1377  // message storing the config for event delivery
1378  message EventDeliveryConfig {
1379    // The delivery channel for the event notification, only pub/sub topic is
1380    // supported now.
1381    // Example channel:
1382    // [//pubsub.googleapis.com/projects/visionai-testing-stable/topics/test-topic]
1383    string channel = 1;
1384
1385    // The expected delivery interval for the same event. The same event won't
1386    // be notified multiple times during this internal event that it is
1387    // happening multiple times during the period of time.The same event is
1388    // identified by <event_id, app_platform_metadata>.
1389    google.protobuf.Duration minimal_delivery_interval = 2;
1390  }
1391
1392  // A list of nodes  in the application graph.
1393  repeated Node nodes = 1;
1394
1395  // Event-related configuration for this application.
1396  EventDeliveryConfig event_delivery_config = 3;
1397}
1398
1399// Message describing node object.
1400message Node {
1401  // Message describing one edge pointing into a node.
1402  message InputEdge {
1403    // The name of the parent node.
1404    string parent_node = 1;
1405
1406    // The connected output artifact of the parent node.
1407    // It can be omitted if target processor only has 1 output artifact.
1408    string parent_output_channel = 2;
1409
1410    // The connected input channel of the current node's processor.
1411    // It can be omitted if target processor only has 1 input channel.
1412    string connected_input_channel = 3;
1413  }
1414
1415  oneof stream_output_config {
1416    // By default, the output of the node will only be available to downstream
1417    // nodes. To consume the direct output from the application node, the output
1418    // must be sent to Vision AI Streams at first.
1419    //
1420    // By setting output_all_output_channels_to_stream to true, App Platform
1421    // will automatically send all the outputs of the current node to Vision AI
1422    // Stream resources (one stream per output channel). The output stream
1423    // resource will be created by App Platform automatically during deployment
1424    // and deleted after application un-deployment.
1425    // Note that this config applies to all the Application Instances.
1426    //
1427    // The output stream can be override at instance level by
1428    // configuring the `output_resources` section of Instance resource.
1429    // `producer_node` should be current node, `output_resource_binding` should
1430    // be the output channel name (or leave it blank if there is only 1 output
1431    // channel of the processor) and `output_resource` should be the target
1432    // output stream.
1433    bool output_all_output_channels_to_stream = 6;
1434  }
1435
1436  // Required. A unique name for the node.
1437  string name = 1 [(google.api.field_behavior) = REQUIRED];
1438
1439  // A user friendly display name for the node.
1440  string display_name = 2;
1441
1442  // Node config.
1443  ProcessorConfig node_config = 3;
1444
1445  // Processor name refer to the chosen processor resource.
1446  string processor = 4;
1447
1448  // Parent node. Input node should not have parent node. For V1 Alpha1/Beta
1449  // only media warehouse node can have multiple parents, other types of nodes
1450  // will only have one parent.
1451  repeated InputEdge parents = 5;
1452}
1453
1454// Message describing Draft object
1455message Draft {
1456  option (google.api.resource) = {
1457    type: "visionai.googleapis.com/Draft"
1458    pattern: "projects/{project}/locations/{location}/applications/{application}/drafts/{draft}"
1459    style: DECLARATIVE_FRIENDLY
1460  };
1461
1462  // name of resource
1463  string name = 1;
1464
1465  // Output only. [Output only] Create timestamp
1466  google.protobuf.Timestamp create_time = 2
1467      [(google.api.field_behavior) = OUTPUT_ONLY];
1468
1469  // Output only. [Output only] Create timestamp
1470  google.protobuf.Timestamp update_time = 7
1471      [(google.api.field_behavior) = OUTPUT_ONLY];
1472
1473  // Labels as key value pairs
1474  map<string, string> labels = 3;
1475
1476  // Required. A user friendly display name for the solution.
1477  string display_name = 4 [(google.api.field_behavior) = REQUIRED];
1478
1479  // A description for this application.
1480  string description = 5;
1481
1482  // The draft application configs which haven't been updated to an application.
1483  ApplicationConfigs draft_application_configs = 6;
1484}
1485
1486// Message describing Instance object
1487// Next ID: 12
1488message Instance {
1489  option (google.api.resource) = {
1490    type: "visionai.googleapis.com/Instance"
1491    pattern: "projects/{project}/locations/{location}/applications/{application}/instances/{instance}"
1492    style: DECLARATIVE_FRIENDLY
1493  };
1494
1495  // Message of input resource used in one application instance.
1496  message InputResource {
1497    // Required. Specify the input to the application instance.
1498    oneof input_resource_information {
1499      // The direct input resource name.
1500      // If the instance type is STREAMING_PREDICTION, the input resource is in
1501      // format of
1502      // "projects/123/locations/us-central1/clusters/456/streams/stream-a".
1503      // If the instance type is BATCH_PREDICTION from Cloud Storage input
1504      // container, the input resource is in format of "gs://bucket-a".
1505      string input_resource = 1;
1506
1507      // If the input resource is VisionAI Stream, the associated annotations
1508      // can be specified using annotated_stream instead.
1509      StreamWithAnnotation annotated_stream = 4 [deprecated = true];
1510    }
1511
1512    // Data type for the current input resource.
1513    DataType data_type = 6;
1514
1515    // The name of graph node who receives the input resource.
1516    // For example:
1517    // input_resource:
1518    // visionai.googleapis.com/v1/projects/123/locations/us-central1/clusters/456/streams/input-stream-a
1519    // consumer_node: stream-input
1520    string consumer_node = 2;
1521
1522    // The specific input resource binding which will consume the current Input
1523    // Resource, can be ignored is there is only 1 input binding.
1524    string input_resource_binding = 3;
1525
1526    // Contains resource annotations.
1527    ResourceAnnotations annotations = 5;
1528  }
1529
1530  // Message of output resource used in one application instance.
1531  message OutputResource {
1532    // The output resource name for the current application instance.
1533    string output_resource = 1;
1534
1535    // The name of graph node who produces the output resource name.
1536    // For example:
1537    // output_resource:
1538    // /projects/123/locations/us-central1/clusters/456/streams/output-application-789-stream-a-occupancy-counting
1539    // producer_node: occupancy-counting
1540    string producer_node = 2;
1541
1542    // The specific output resource binding which produces the current
1543    // OutputResource.
1544    string output_resource_binding = 4;
1545
1546    // Output only. Whether the output resource is temporary which means the
1547    // resource is generated during the deployment of the application. Temporary
1548    // resource will be deleted during the undeployment of the application.
1549    bool is_temporary = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
1550
1551    // Output only. Whether the output resource is created automatically by the
1552    // Vision AI App Platform.
1553    bool autogen = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
1554  }
1555
1556  // All the supported instance types.
1557  enum InstanceType {
1558    // Unspecified instance type.
1559    // If the instance type is not specified, the default one is
1560    // STREAMING_PREDICTION.
1561    INSTANCE_TYPE_UNSPECIFIED = 0;
1562
1563    // Instance type for streaming prediction.
1564    STREAMING_PREDICTION = 1;
1565
1566    // Instance type for batch prediction.
1567    BATCH_PREDICTION = 2;
1568
1569    // Instance type for online prediction.
1570    ONLINE_PREDICTION = 3;
1571  }
1572
1573  // State of the Instance
1574  enum State {
1575    // The default value. This value is used if the state is omitted.
1576    STATE_UNSPECIFIED = 0;
1577
1578    // State CREATING.
1579    CREATING = 1;
1580
1581    // State CREATED.
1582    CREATED = 2;
1583
1584    // State DEPLOYING.
1585    DEPLOYING = 3;
1586
1587    // State DEPLOYED.
1588    DEPLOYED = 4;
1589
1590    // State UNDEPLOYING.
1591    UNDEPLOYING = 5;
1592
1593    // State DELETED.
1594    DELETED = 6;
1595
1596    // State ERROR.
1597    ERROR = 7;
1598
1599    // State Updating
1600    UPDATING = 8;
1601
1602    // State Deleting.
1603    DELETING = 9;
1604
1605    // State Fixing.
1606    FIXING = 10;
1607
1608    // State Finished.
1609    FINISHED = 11;
1610  }
1611
1612  // Output only. name of resource
1613  string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
1614
1615  // Output only. [Output only] Create timestamp
1616  google.protobuf.Timestamp create_time = 2
1617      [(google.api.field_behavior) = OUTPUT_ONLY];
1618
1619  // Output only. [Output only] Update timestamp
1620  google.protobuf.Timestamp update_time = 8
1621      [(google.api.field_behavior) = OUTPUT_ONLY];
1622
1623  // Labels as key value pairs
1624  map<string, string> labels = 3;
1625
1626  // Required. A user friendly display name for the solution.
1627  string display_name = 4 [(google.api.field_behavior) = REQUIRED];
1628
1629  // A description for this instance.
1630  string description = 5;
1631
1632  // The instance type for the current instance.
1633  InstanceType instance_type = 10;
1634
1635  // The input resources for the current application instance.
1636  // For example:
1637  // input_resources:
1638  // visionai.googleapis.com/v1/projects/123/locations/us-central1/clusters/456/streams/stream-a
1639  repeated InputResource input_resources = 6;
1640
1641  // All the output resources associated to one application instance.
1642  repeated OutputResource output_resources = 7;
1643
1644  // State of the instance.
1645  State state = 9;
1646}
1647
1648// Message for creating a Instance.
1649message ApplicationInstance {
1650  // Required. Id of the requesting object.
1651  string instance_id = 1 [(google.api.field_behavior) = REQUIRED];
1652
1653  // Required. The resource being created.
1654  Instance instance = 2 [(google.api.field_behavior) = REQUIRED];
1655}
1656
1657// Message describing Processor object.
1658// Next ID: 19
1659message Processor {
1660  option (google.api.resource) = {
1661    type: "visionai.googleapis.com/Processor"
1662    pattern: "projects/{project}/locations/{location}/processors/{processor}"
1663    style: DECLARATIVE_FRIENDLY
1664  };
1665
1666  // Type
1667  enum ProcessorType {
1668    // Processor Type UNSPECIFIED.
1669    PROCESSOR_TYPE_UNSPECIFIED = 0;
1670
1671    // Processor Type PRETRAINED.
1672    // Pretrained processor is developed by Vision AI App Platform with
1673    // state-of-the-art vision data processing functionality, like occupancy
1674    // counting or person blur. Pretrained processor is usually publicly
1675    // available.
1676    PRETRAINED = 1;
1677
1678    // Processor Type CUSTOM.
1679    // Custom processors are specialized processors which are either uploaded by
1680    // customers or imported from other GCP platform (for example Vertex AI).
1681    // Custom processor is only visible to the creator.
1682    CUSTOM = 2;
1683
1684    // Processor Type CONNECTOR.
1685    // Connector processors are special processors which perform I/O for the
1686    // application, they do not processing the data but either deliver the data
1687    // to other processors or receive data from other processors.
1688    CONNECTOR = 3;
1689  }
1690
1691  enum ProcessorState {
1692    // Unspecified Processor state.
1693    PROCESSOR_STATE_UNSPECIFIED = 0;
1694
1695    // Processor is being created (not ready for use).
1696    CREATING = 1;
1697
1698    // Processor is and ready for use.
1699    ACTIVE = 2;
1700
1701    // Processor is being deleted (not ready for use).
1702    DELETING = 3;
1703
1704    // Processor deleted or creation failed .
1705    FAILED = 4;
1706  }
1707
1708  // name of resource.
1709  string name = 1;
1710
1711  // Output only. [Output only] Create timestamp.
1712  google.protobuf.Timestamp create_time = 2
1713      [(google.api.field_behavior) = OUTPUT_ONLY];
1714
1715  // Output only. [Output only] Update timestamp.
1716  google.protobuf.Timestamp update_time = 3
1717      [(google.api.field_behavior) = OUTPUT_ONLY];
1718
1719  // Labels as key value pairs.
1720  map<string, string> labels = 4;
1721
1722  // Required. A user friendly display name for the processor.
1723  string display_name = 5 [(google.api.field_behavior) = REQUIRED];
1724
1725  // Illustrative sentences for describing the functionality of the processor.
1726  string description = 10;
1727
1728  // Output only. Processor Type.
1729  ProcessorType processor_type = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
1730
1731  // Model Type.
1732  ModelType model_type = 13;
1733
1734  // Source info for customer created processor.
1735  CustomProcessorSourceInfo custom_processor_source_info = 7;
1736
1737  // Output only. State of the Processor.
1738  ProcessorState state = 8 [(google.api.field_behavior) = OUTPUT_ONLY];
1739
1740  // Output only. [Output only] The input / output specifications of a
1741  // processor, each type of processor has fixed input / output specs which
1742  // cannot be altered by customer.
1743  ProcessorIOSpec processor_io_spec = 11
1744      [(google.api.field_behavior) = OUTPUT_ONLY];
1745
1746  // Output only. The corresponding configuration can be used in the Application
1747  // to customize the behavior of the processor.
1748  string configuration_typeurl = 14 [(google.api.field_behavior) = OUTPUT_ONLY];
1749
1750  repeated StreamAnnotationType supported_annotation_types = 15
1751      [(google.api.field_behavior) = OUTPUT_ONLY];
1752
1753  // Indicates if the processor supports post processing.
1754  bool supports_post_processing = 17;
1755
1756  // Which instance types this processor supports; if empty, this default to
1757  // STREAMING_PREDICTION.
1758  repeated Instance.InstanceType supported_instance_types = 18;
1759}
1760
1761// Message describing the input / output specifications of a processor.
1762message ProcessorIOSpec {
1763  // Message for input channel specification.
1764  message GraphInputChannelSpec {
1765    // The name of the current input channel.
1766    string name = 1;
1767
1768    // The data types of the current input channel.
1769    // When this field has more than 1 value, it means this input channel can be
1770    // connected to either of these different data types.
1771    DataType data_type = 2;
1772
1773    // If specified, only those detailed data types can be connected to the
1774    // processor. For example, jpeg stream for MEDIA, or PredictionResult proto
1775    // for PROTO type. If unspecified, then any proto is accepted.
1776    repeated string accepted_data_type_uris = 5;
1777
1778    // Whether the current input channel is required by the processor.
1779    // For example, for a processor with required video input and optional audio
1780    // input, if video input is missing, the application will be rejected while
1781    // the audio input can be missing as long as the video input exists.
1782    bool required = 3;
1783
1784    // How many input edges can be connected to this input channel. 0 means
1785    // unlimited.
1786    int64 max_connection_allowed = 4;
1787  }
1788
1789  // Message for output channel specification.
1790  message GraphOutputChannelSpec {
1791    // The name of the current output channel.
1792    string name = 1;
1793
1794    // The data type of the current output channel.
1795    DataType data_type = 2;
1796
1797    string data_type_uri = 3;
1798  }
1799
1800  // Message for instance resource channel specification.
1801  // External resources are virtual nodes which are not expressed in the
1802  // application graph. Each processor expresses its out-graph spec, so customer
1803  // is able to override the external source or destinations to the
1804  message InstanceResourceInputBindingSpec {
1805    oneof resource_type {
1806      // The configuration proto that includes the Googleapis resources. I.e.
1807      // type.googleapis.com/google.cloud.vision.v1.StreamWithAnnotation
1808      string config_type_uri = 2;
1809
1810      // The direct type url of Googleapis resource. i.e.
1811      // type.googleapis.com/google.cloud.vision.v1.Asset
1812      string resource_type_uri = 3;
1813    }
1814
1815    // Name of the input binding, unique within the processor.
1816    string name = 1;
1817  }
1818
1819  message InstanceResourceOutputBindingSpec {
1820    // Name of the output binding, unique within the processor.
1821    string name = 1;
1822
1823    // The resource type uri of the acceptable output resource.
1824    string resource_type_uri = 2;
1825
1826    // Whether the output resource needs to be explicitly set in the instance.
1827    // If it is false, the processor will automatically generate it if required.
1828    bool explicit = 3;
1829  }
1830
1831  // For processors with input_channel_specs, the processor must be explicitly
1832  // connected to another processor.
1833  repeated GraphInputChannelSpec graph_input_channel_specs = 3;
1834
1835  // The output artifact specifications for the current processor.
1836  repeated GraphOutputChannelSpec graph_output_channel_specs = 4;
1837
1838  // The input resource that needs to be fed from the application instance.
1839  repeated InstanceResourceInputBindingSpec
1840      instance_resource_input_binding_specs = 5;
1841
1842  // The output resource that the processor will generate per instance.
1843  // Other than the explicitly listed output bindings here, all the processors'
1844  // GraphOutputChannels can be binded to stream resource. The bind name then is
1845  // the same as the GraphOutputChannel's name.
1846  repeated InstanceResourceOutputBindingSpec
1847      instance_resource_output_binding_specs = 6;
1848}
1849
1850// Describes the source info for a custom processor.
1851message CustomProcessorSourceInfo {
1852  // Message describes product recognizer artifact.
1853  message ProductRecognizerArtifact {
1854    // Required. Resource name of RetailProductRecognitionIndex.
1855    // Format is
1856    // 'projects/*/locations/*/retailCatalogs/*/retailProductRecognitionIndexes/*'
1857    string retail_product_recognition_index = 1
1858        [(google.api.field_behavior) = REQUIRED];
1859
1860    // Optional. The resource name of embedding model hosted in Vertex AI
1861    // Platform.
1862    string vertex_model = 2 [(google.api.field_behavior) = OPTIONAL];
1863  }
1864
1865  // The schema is defined as an OpenAPI 3.0.2 [Schema
1866  // Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
1867  message ModelSchema {
1868    // Cloud Storage location to a YAML file that defines the format of a single
1869    // instance used in prediction and explanation requests.
1870    GcsSource instances_schema = 1;
1871
1872    // Cloud Storage location to a YAML file that defines the prediction and
1873    // explanation parameters.
1874    GcsSource parameters_schema = 2;
1875
1876    // Cloud Storage location to a YAML file that defines the format of a single
1877    // prediction or explanation.
1878    GcsSource predictions_schema = 3;
1879  }
1880
1881  // Source type of the imported custom processor.
1882  enum SourceType {
1883    // Source type unspecified.
1884    SOURCE_TYPE_UNSPECIFIED = 0;
1885
1886    // Custom processors coming from Vertex AutoML product.
1887    VERTEX_AUTOML = 1;
1888
1889    // Custom processors coming from general custom models from Vertex.
1890    VERTEX_CUSTOM = 2;
1891
1892    // Source for Product Recognizer.
1893    PRODUCT_RECOGNIZER = 3;
1894  }
1895
1896  // The path where App Platform loads the artifacts for the custom processor.
1897  oneof artifact_path {
1898    // The resource name original model hosted in the vertex AI platform.
1899    string vertex_model = 2;
1900
1901    // Artifact for product recognizer.
1902    ProductRecognizerArtifact product_recognizer_artifact = 3;
1903  }
1904
1905  // The original product which holds the custom processor's functionality.
1906  SourceType source_type = 1;
1907
1908  // Output only. Additional info related to the imported custom processor.
1909  // Data is filled in by app platform during the processor creation.
1910  map<string, string> additional_info = 4
1911      [(google.api.field_behavior) = OUTPUT_ONLY];
1912
1913  // Model schema files which specifies the signature of the model.
1914  // For VERTEX_CUSTOM models, instances schema is required.
1915  // If instances schema is not specified during the processor creation,
1916  // VisionAI Platform will try to get it from Vertex, if it doesn't exist, the
1917  // creation will fail.
1918  ModelSchema model_schema = 5;
1919}
1920
1921// Next ID: 35
1922message ProcessorConfig {
1923  oneof processor_config {
1924    // Configs of stream input processor.
1925    VideoStreamInputConfig video_stream_input_config = 9;
1926
1927    // Config of AI-enabled input devices.
1928    AIEnabledDevicesInputConfig ai_enabled_devices_input_config = 20;
1929
1930    // Configs of media warehouse processor.
1931    MediaWarehouseConfig media_warehouse_config = 10;
1932
1933    // Configs of person blur processor.
1934    PersonBlurConfig person_blur_config = 11;
1935
1936    // Configs of occupancy count processor.
1937    OccupancyCountConfig occupancy_count_config = 12;
1938
1939    // Configs of Person Vehicle Detection processor.
1940    PersonVehicleDetectionConfig person_vehicle_detection_config = 15;
1941
1942    // Configs of Vertex AutoML vision processor.
1943    VertexAutoMLVisionConfig vertex_automl_vision_config = 13;
1944
1945    // Configs of Vertex AutoML video processor.
1946    VertexAutoMLVideoConfig vertex_automl_video_config = 14;
1947
1948    // Configs of Vertex Custom processor.
1949    VertexCustomConfig vertex_custom_config = 17;
1950
1951    // Configs of General Object Detection processor.
1952    GeneralObjectDetectionConfig general_object_detection_config = 18;
1953
1954    // Configs of BigQuery processor.
1955    BigQueryConfig big_query_config = 19;
1956
1957    // Configs of Cloud Storage output processor.
1958    GcsOutputConfig gcs_output_config = 27;
1959
1960    // Runtime configs of Product Recognizer processor.
1961    ProductRecognizerConfig product_recognizer_config = 21;
1962
1963    // Configs of personal_protective_equipment_detection_config
1964    PersonalProtectiveEquipmentDetectionConfig
1965        personal_protective_equipment_detection_config = 22;
1966
1967    // Runtime configs of Tag Recognizer processor.
1968    TagRecognizerConfig tag_recognizer_config = 25;
1969
1970    // Runtime configs of UniversalInput processor.
1971    UniversalInputConfig universal_input_config = 28;
1972  }
1973
1974  // Experimental configurations. Structured object containing not-yet-stable
1975  // processor parameters.
1976  google.protobuf.Struct experimental_config = 26;
1977}
1978
1979// Message describing Vision AI stream with application specific annotations.
1980// All the StreamAnnotation object inside this message MUST have unique id.
1981message StreamWithAnnotation {
1982  // Message describing annotations specific to application node.
1983  message NodeAnnotation {
1984    // The node name of the application graph.
1985    string node = 1;
1986
1987    // The node specific stream annotations.
1988    repeated StreamAnnotation annotations = 2;
1989  }
1990
1991  // Vision AI Stream resource name.
1992  string stream = 1 [
1993    (google.api.resource_reference) = { type: "visionai.googleapis.com/Stream" }
1994  ];
1995
1996  // Annotations that will be applied to the whole application.
1997  repeated StreamAnnotation application_annotations = 2;
1998
1999  // Annotations that will be applied to the specific node of the application.
2000  // If the same type of the annotations is applied to both application and
2001  // node, the node annotation will be added in addition to the global
2002  // application one.
2003  // For example, if there is one active zone annotation for the whole
2004  // application and one active zone annotation for the Occupancy Analytic
2005  // processor, then the Occupancy Analytic processor will have two active zones
2006  // defined.
2007  repeated NodeAnnotation node_annotations = 3;
2008}
2009
2010// Message describing annotations specific to application node.
2011// This message is a duplication of StreamWithAnnotation.NodeAnnotation.
2012message ApplicationNodeAnnotation {
2013  // The node name of the application graph.
2014  string node = 1;
2015
2016  // The node specific stream annotations.
2017  repeated StreamAnnotation annotations = 2;
2018}
2019
2020// Message describing general annotation for resources.
2021message ResourceAnnotations {
2022  // Annotations that will be applied to the whole application.
2023  repeated StreamAnnotation application_annotations = 1;
2024
2025  // Annotations that will be applied to the specific node of the application.
2026  // If the same type of the annotations is applied to both application and
2027  // node, the node annotation will be added in addition to the global
2028  // application one.
2029  // For example, if there is one active zone annotation for the whole
2030  // application and one active zone annotation for the Occupancy Analytic
2031  // processor, then the Occupancy Analytic processor will have two active zones
2032  // defined.
2033  repeated ApplicationNodeAnnotation node_annotations = 2;
2034}
2035
2036// Message describing Video Stream Input Config.
2037// This message should only be used as a placeholder for builtin:stream-input
2038// processor, actual stream binding should be specified using corresponding
2039// API.
2040message VideoStreamInputConfig {
2041  repeated string streams = 1 [deprecated = true];
2042
2043  repeated StreamWithAnnotation streams_with_annotation = 2 [deprecated = true];
2044}
2045
2046// Message describing AI-enabled Devices Input Config.
2047message AIEnabledDevicesInputConfig {}
2048
2049// Message describing MediaWarehouseConfig.
2050message MediaWarehouseConfig {
2051  // Resource name of the Media Warehouse corpus.
2052  // Format:
2053  // projects/${project_id}/locations/${location_id}/corpora/${corpus_id}
2054  string corpus = 1;
2055
2056  // Deprecated.
2057  string region = 2 [deprecated = true];
2058
2059  // The duration for which all media assets, associated metadata, and search
2060  // documents can exist.
2061  google.protobuf.Duration ttl = 3;
2062}
2063
2064// Message describing FaceBlurConfig.
2065message PersonBlurConfig {
2066  // Type of Person Blur
2067  enum PersonBlurType {
2068    // PersonBlur Type UNSPECIFIED.
2069    PERSON_BLUR_TYPE_UNSPECIFIED = 0;
2070
2071    // FaceBlur Type full occlusion.
2072    FULL_OCCULUSION = 1;
2073
2074    // FaceBlur Type blur filter.
2075    BLUR_FILTER = 2;
2076  }
2077
2078  // Person blur type.
2079  PersonBlurType person_blur_type = 1;
2080
2081  // Whether only blur faces other than the whole object in the processor.
2082  bool faces_only = 2;
2083}
2084
2085// Message describing OccupancyCountConfig.
2086message OccupancyCountConfig {
2087  // Whether to count the appearances of people, output counts have 'people' as
2088  // the key.
2089  bool enable_people_counting = 1;
2090
2091  // Whether to count the appearances of vehicles, output counts will have
2092  // 'vehicle' as the key.
2093  bool enable_vehicle_counting = 2;
2094
2095  // Whether to track each invidual object's loitering time inside the scene or
2096  // specific zone.
2097  bool enable_dwelling_time_tracking = 3;
2098}
2099
2100// Message describing PersonVehicleDetectionConfig.
2101message PersonVehicleDetectionConfig {
2102  // At least one of enable_people_counting and enable_vehicle_counting fields
2103  // must be set to true.
2104  // Whether to count the appearances of people, output counts have 'people' as
2105  // the key.
2106  bool enable_people_counting = 1;
2107
2108  // Whether to count the appearances of vehicles, output counts will have
2109  // 'vehicle' as the key.
2110  bool enable_vehicle_counting = 2;
2111}
2112
2113// Message describing PersonalProtectiveEquipmentDetectionConfig.
2114message PersonalProtectiveEquipmentDetectionConfig {
2115  // Whether to enable face coverage detection.
2116  bool enable_face_coverage_detection = 1;
2117
2118  // Whether to enable head coverage detection.
2119  bool enable_head_coverage_detection = 2;
2120
2121  // Whether to enable hands coverage detection.
2122  bool enable_hands_coverage_detection = 3;
2123}
2124
2125// Message of configurations for General Object Detection processor.
2126message GeneralObjectDetectionConfig {}
2127
2128// Message of configurations for BigQuery processor.
2129message BigQueryConfig {
2130  // BigQuery table resource for Vision AI Platform to ingest annotations to.
2131  string table = 1;
2132
2133  // Data Schema
2134  // By default, Vision AI Application will try to write annotations to the
2135  // target BigQuery table using the following schema:
2136  //
2137  // ingestion_time: TIMESTAMP, the ingestion time of the original data.
2138  //
2139  // application: STRING, name of the application which produces the annotation.
2140  //
2141  // instance: STRING, Id of the instance which produces the annotation.
2142  //
2143  // node: STRING, name of the application graph node which produces the
2144  // annotation.
2145  //
2146  // annotation: STRING or JSON, the actual annotation protobuf will be
2147  // converted to json string with bytes field as 64 encoded string. It can be
2148  // written to both String or Json type column.
2149  //
2150  // To forward annotation data to an existing BigQuery table, customer needs to
2151  // make sure the compatibility of the schema.
2152  // The map maps application node name to its corresponding cloud function
2153  // endpoint to transform the annotations directly to the
2154  // google.cloud.bigquery.storage.v1.AppendRowsRequest (only avro_rows or
2155  // proto_rows should be set). If configured, annotations produced by
2156  // corresponding application node will sent to the Cloud Function at first
2157  // before be forwarded to BigQuery.
2158  //
2159  // If the default table schema doesn't fit, customer is able to transform the
2160  // annotation output from Vision AI Application to arbitrary BigQuery table
2161  // schema with CloudFunction.
2162  // * The cloud function will receive AppPlatformCloudFunctionRequest where
2163  // the annotations field will be the json format of Vision AI annotation.
2164  // * The cloud function should return AppPlatformCloudFunctionResponse with
2165  // AppendRowsRequest stored in the annotations field.
2166  // * To drop the annotation, simply clear the annotations field in the
2167  // returned AppPlatformCloudFunctionResponse.
2168  map<string, string> cloud_function_mapping = 2;
2169
2170  // If true, App Platform will create the BigQuery DataSet and the
2171  // BigQuery Table with default schema if the specified table doesn't exist.
2172  // This doesn't work if any cloud function customized schema is specified
2173  // since the system doesn't know your desired schema.
2174  // JSON column will be used in the default table created by App Platform.
2175  bool create_default_table_if_not_exists = 3;
2176}
2177
2178// Message of configurations of Vertex AutoML Vision Processors.
2179message VertexAutoMLVisionConfig {
2180  // Only entities with higher score than the threshold will be returned.
2181  // Value 0.0 means to return all the detected entities.
2182  float confidence_threshold = 1;
2183
2184  // At most this many predictions will be returned per output frame.
2185  // Value 0 means to return all the detected entities.
2186  int32 max_predictions = 2;
2187}
2188
2189// Message describing VertexAutoMLVideoConfig.
2190message VertexAutoMLVideoConfig {
2191  // Only entities with higher score than the threshold will be returned.
2192  // Value 0.0 means returns all the detected entities.
2193  float confidence_threshold = 1;
2194
2195  // Labels specified in this field won't be returned.
2196  repeated string blocked_labels = 2;
2197
2198  // At most this many predictions will be returned per output frame.
2199  // Value 0 means to return all the detected entities.
2200  int32 max_predictions = 3;
2201
2202  // Only Bounding Box whose size is larger than this limit will be returned.
2203  // Object Tracking only.
2204  // Value 0.0 means to return all the detected entities.
2205  float bounding_box_size_limit = 4;
2206}
2207
2208// Message describing VertexCustomConfig.
2209message VertexCustomConfig {
2210  // The max prediction frame per second. This attribute sets how fast the
2211  // operator sends prediction requests to Vertex AI endpoint. Default value is
2212  // 0, which means there is no max prediction fps limit. The operator sends
2213  // prediction requests at input fps.
2214  int32 max_prediction_fps = 1;
2215
2216  // A description of resources that are dedicated to the DeployedModel, and
2217  // that need a higher degree of manual configuration.
2218  DedicatedResources dedicated_resources = 2;
2219
2220  // If not empty, the prediction result will be sent to the specified cloud
2221  // function for post processing.
2222  // * The cloud function will receive AppPlatformCloudFunctionRequest where
2223  // the annotations field will be the json format of proto PredictResponse.
2224  // * The cloud function should return AppPlatformCloudFunctionResponse with
2225  // PredictResponse stored in the annotations field.
2226  // * To drop the prediction output, simply clear the payload field in the
2227  // returned AppPlatformCloudFunctionResponse.
2228  string post_processing_cloud_function = 3;
2229
2230  // If true, the prediction request received by custom model will also contain
2231  // metadata with the following schema:
2232  // 'appPlatformMetadata': {
2233  //       'ingestionTime': DOUBLE; (UNIX timestamp)
2234  //       'application': STRING;
2235  //       'instanceId': STRING;
2236  //       'node': STRING;
2237  //       'processor': STRING;
2238  //  }
2239  bool attach_application_metadata = 4;
2240
2241  // Optional. By setting the configuration_input_topic, processor will
2242  // subscribe to given topic, only pub/sub topic is supported now. Example
2243  // channel:
2244  // //pubsub.googleapis.com/projects/visionai-testing-stable/topics/test-topic
2245  // message schema should be:
2246  // message Message {
2247  // // The ID of the stream that associates with the application instance.
2248  // string stream_id = 1;
2249  // // The target fps. By default, the custom processor will *not* send any
2250  // data to the Vertex Prediction container. Note that once the
2251  // dynamic_config_input_topic is set, max_prediction_fps will not work and be
2252  // preceded by the fps set inside the topic.
2253  // int32 fps = 2;
2254  // }
2255  optional string dynamic_config_input_topic = 6
2256      [(google.api.field_behavior) = OPTIONAL];
2257}
2258
2259// Message describing GcsOutputConfig.
2260message GcsOutputConfig {
2261  // The Cloud Storage path for Vision AI Platform to ingest annotations to.
2262  string gcs_path = 1;
2263}
2264
2265// Message describing UniversalInputConfig.
2266message UniversalInputConfig {}
2267
2268// Specification of a single machine.
2269message MachineSpec {
2270  // Immutable. The type of the machine.
2271  //
2272  // See the [list of machine types supported for
2273  // prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types)
2274  //
2275  // See the [list of machine types supported for custom
2276  // training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types).
2277  //
2278  // For [DeployedModel][] this field is optional, and the default
2279  // value is `n1-standard-2`. For [BatchPredictionJob][] or as part of
2280  // [WorkerPoolSpec][] this field is required.
2281  string machine_type = 1 [(google.api.field_behavior) = IMMUTABLE];
2282
2283  // Immutable. The type of accelerator(s) that may be attached to the machine
2284  // as per
2285  // [accelerator_count][google.cloud.visionai.v1.MachineSpec.accelerator_count].
2286  AcceleratorType accelerator_type = 2
2287      [(google.api.field_behavior) = IMMUTABLE];
2288
2289  // The number of accelerators to attach to the machine.
2290  int32 accelerator_count = 3;
2291}
2292
2293// The metric specification that defines the target resource utilization
2294// (CPU utilization, accelerator's duty cycle, and so on) for calculating the
2295// desired replica count.
2296message AutoscalingMetricSpec {
2297  // Required. The resource metric name.
2298  // Supported metrics:
2299  //
2300  // * For Online Prediction:
2301  // * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle`
2302  // * `aiplatform.googleapis.com/prediction/online/cpu/utilization`
2303  string metric_name = 1 [(google.api.field_behavior) = REQUIRED];
2304
2305  // The target resource utilization in percentage (1% - 100%) for the given
2306  // metric; once the real usage deviates from the target by a certain
2307  // percentage, the machine replicas change. The default value is 60
2308  // (representing 60%) if not provided.
2309  int32 target = 2;
2310}
2311
2312// A description of resources that are dedicated to a DeployedModel, and
2313// that need a higher degree of manual configuration.
2314message DedicatedResources {
2315  // Required. Immutable. The specification of a single machine used by the
2316  // prediction.
2317  MachineSpec machine_spec = 1 [
2318    (google.api.field_behavior) = REQUIRED,
2319    (google.api.field_behavior) = IMMUTABLE
2320  ];
2321
2322  // Required. Immutable. The minimum number of machine replicas this
2323  // DeployedModel will be always deployed on. This value must be greater than
2324  // or equal to 1.
2325  //
2326  // If traffic against the DeployedModel increases, it may dynamically be
2327  // deployed onto more replicas, and as traffic decreases, some of these extra
2328  // replicas may be freed.
2329  int32 min_replica_count = 2 [
2330    (google.api.field_behavior) = REQUIRED,
2331    (google.api.field_behavior) = IMMUTABLE
2332  ];
2333
2334  // Immutable. The maximum number of replicas this DeployedModel may be
2335  // deployed on when the traffic against it increases. If the requested value
2336  // is too large, the deployment will error, but if deployment succeeds then
2337  // the ability to scale the model to that many replicas is guaranteed (barring
2338  // service outages). If traffic against the DeployedModel increases beyond
2339  // what its replicas at maximum may handle, a portion of the traffic will be
2340  // dropped. If this value is not provided, will use
2341  // [min_replica_count][google.cloud.visionai.v1.DedicatedResources.min_replica_count]
2342  // as the default value.
2343  //
2344  // The value of this field impacts the charge against Vertex CPU and GPU
2345  // quotas. Specifically, you will be charged for max_replica_count *
2346  // number of cores in the selected machine type) and (max_replica_count *
2347  // number of GPUs per replica in the selected machine type).
2348  int32 max_replica_count = 3 [(google.api.field_behavior) = IMMUTABLE];
2349
2350  // Immutable. The metric specifications that overrides a resource
2351  // utilization metric (CPU utilization, accelerator's duty cycle, and so on)
2352  // target value (default to 60 if not set). At most one entry is allowed per
2353  // metric.
2354  //
2355  // If
2356  // [machine_spec.accelerator_count][google.cloud.visionai.v1.MachineSpec.accelerator_count]
2357  // is above 0, the autoscaling will be based on both CPU utilization and
2358  // accelerator's duty cycle metrics and scale up when either metrics exceeds
2359  // its target value while scale down if both metrics are under their target
2360  // value. The default target value is 60 for both metrics.
2361  //
2362  // If
2363  // [machine_spec.accelerator_count][google.cloud.visionai.v1.MachineSpec.accelerator_count]
2364  // is 0, the autoscaling will be based on CPU utilization metric only with
2365  // default target value 60 if not explicitly set.
2366  //
2367  // For example, in the case of Online Prediction, if you want to override
2368  // target CPU utilization to 80, you should set
2369  // [autoscaling_metric_specs.metric_name][google.cloud.visionai.v1.AutoscalingMetricSpec.metric_name]
2370  // to `aiplatform.googleapis.com/prediction/online/cpu/utilization` and
2371  // [autoscaling_metric_specs.target][google.cloud.visionai.v1.AutoscalingMetricSpec.target]
2372  // to `80`.
2373  repeated AutoscalingMetricSpec autoscaling_metric_specs = 4
2374      [(google.api.field_behavior) = IMMUTABLE];
2375}
2376
2377// Message describing ProductRecognizerConfig.
2378message ProductRecognizerConfig {
2379  // The resource name of retail endpoint to use.
2380  string retail_endpoint = 1;
2381
2382  // Confidence threshold to filter detection results. If not set, a system
2383  // default value will be used.
2384  float recognition_confidence_threshold = 2;
2385}
2386
2387// Message describing TagRecognizerConfig.
2388message TagRecognizerConfig {
2389  // Confidence threshold to filter detection results. If not set, a system
2390  // default value will be used.
2391  float entity_detection_confidence_threshold = 1;
2392
2393  // Configuration to customize how tags are parsed.
2394  TagParsingConfig tag_parsing_config = 2;
2395}
2396
2397// Configuration for tag parsing.
2398message TagParsingConfig {
2399  // Configuration for parsing a tag entity class.
2400  message EntityParsingConfig {
2401    // Type of entity matching strategy.
2402    enum EntityMatchingStrategy {
2403      // If unspecified, multi-line matching will be used by default.
2404      ENTITY_MATCHING_STRATEGY_UNSPECIFIED = 0;
2405
2406      // Matches multiple lines of text.
2407      MULTI_LINE_MATCHING = 1;
2408
2409      // Matches the line with the maximum overlap area with entity bounding
2410      // box.
2411      MAX_OVERLAP_AREA = 2;
2412    }
2413
2414    // Required. The tag entity class name. This should match the class name
2415    // produced by the tag entity detection model.
2416    string entity_class = 1 [(google.api.field_behavior) = REQUIRED];
2417
2418    // Optional. An regular expression hint.
2419    string regex = 2 [(google.api.field_behavior) = OPTIONAL];
2420
2421    // Optional. Entity matching strategy.
2422    EntityMatchingStrategy entity_matching_strategy = 3
2423        [(google.api.field_behavior) = OPTIONAL];
2424  }
2425
2426  // Each tag entity class may have an optional EntityParsingConfig which is
2427  // used to help parse the entities of the class.
2428  repeated EntityParsingConfig entity_parsing_configs = 1;
2429}
2430