xref: /aosp_15_r20/external/googleapis/google/ai/generativelanguage/v1/safety.proto (revision d5c09012810ac0c9f33fe448fb6da8260d444cc9)
1// Copyright 2023 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15syntax = "proto3";
16
17package google.ai.generativelanguage.v1;
18
19import "google/api/field_behavior.proto";
20
21option go_package = "cloud.google.com/go/ai/generativelanguage/apiv1/generativelanguagepb;generativelanguagepb";
22option java_multiple_files = true;
23option java_outer_classname = "SafetyProto";
24option java_package = "com.google.ai.generativelanguage.v1";
25
26// The category of a rating.
27//
28// These categories cover various kinds of harms that developers
29// may wish to adjust.
30enum HarmCategory {
31  // Category is unspecified.
32  HARM_CATEGORY_UNSPECIFIED = 0;
33
34  // Negative or harmful comments targeting identity and/or protected attribute.
35  HARM_CATEGORY_DEROGATORY = 1;
36
37  // Content that is rude, disrespectful, or profane.
38  HARM_CATEGORY_TOXICITY = 2;
39
40  // Describes scenarios depicting violence against an individual or group, or
41  // general descriptions of gore.
42  HARM_CATEGORY_VIOLENCE = 3;
43
44  // Contains references to sexual acts or other lewd content.
45  HARM_CATEGORY_SEXUAL = 4;
46
47  // Promotes unchecked medical advice.
48  HARM_CATEGORY_MEDICAL = 5;
49
50  // Dangerous content that promotes, facilitates, or encourages harmful acts.
51  HARM_CATEGORY_DANGEROUS = 6;
52
53  // Harasment content.
54  HARM_CATEGORY_HARASSMENT = 7;
55
56  // Hate speech and content.
57  HARM_CATEGORY_HATE_SPEECH = 8;
58
59  // Sexually explicit content.
60  HARM_CATEGORY_SEXUALLY_EXPLICIT = 9;
61
62  // Dangerous content.
63  HARM_CATEGORY_DANGEROUS_CONTENT = 10;
64}
65
66// Safety rating for a piece of content.
67//
68// The safety rating contains the category of harm and the
69// harm probability level in that category for a piece of content.
70// Content is classified for safety across a number of
71// harm categories and the probability of the harm classification is included
72// here.
73message SafetyRating {
74  // The probability that a piece of content is harmful.
75  //
76  // The classification system gives the probability of the content being
77  // unsafe. This does not indicate the severity of harm for a piece of content.
78  enum HarmProbability {
79    // Probability is unspecified.
80    HARM_PROBABILITY_UNSPECIFIED = 0;
81
82    // Content has a negligible chance of being unsafe.
83    NEGLIGIBLE = 1;
84
85    // Content has a low chance of being unsafe.
86    LOW = 2;
87
88    // Content has a medium chance of being unsafe.
89    MEDIUM = 3;
90
91    // Content has a high chance of being unsafe.
92    HIGH = 4;
93  }
94
95  // Required. The category for this rating.
96  HarmCategory category = 3 [(google.api.field_behavior) = REQUIRED];
97
98  // Required. The probability of harm for this content.
99  HarmProbability probability = 4 [(google.api.field_behavior) = REQUIRED];
100
101  // Was this content blocked because of this rating?
102  bool blocked = 5;
103}
104
105// Safety setting, affecting the safety-blocking behavior.
106//
107// Passing a safety setting for a category changes the allowed proability that
108// content is blocked.
109message SafetySetting {
110  // Block at and beyond a specified harm probability.
111  enum HarmBlockThreshold {
112    // Threshold is unspecified.
113    HARM_BLOCK_THRESHOLD_UNSPECIFIED = 0;
114
115    // Content with NEGLIGIBLE will be allowed.
116    BLOCK_LOW_AND_ABOVE = 1;
117
118    // Content with NEGLIGIBLE and LOW will be allowed.
119    BLOCK_MEDIUM_AND_ABOVE = 2;
120
121    // Content with NEGLIGIBLE, LOW, and MEDIUM will be allowed.
122    BLOCK_ONLY_HIGH = 3;
123
124    // All content will be allowed.
125    BLOCK_NONE = 4;
126  }
127
128  // Required. The category for this setting.
129  HarmCategory category = 3 [(google.api.field_behavior) = REQUIRED];
130
131  // Required. Controls the probability threshold at which harm is blocked.
132  HarmBlockThreshold threshold = 4 [(google.api.field_behavior) = REQUIRED];
133}
134