xref: /aosp_15_r20/external/googleapis/google/ai/generativelanguage/v1beta/safety.proto (revision d5c09012810ac0c9f33fe448fb6da8260d444cc9)
1// Copyright 2023 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15syntax = "proto3";
16
17package google.ai.generativelanguage.v1beta;
18
19import "google/api/field_behavior.proto";
20
21option go_package = "cloud.google.com/go/ai/generativelanguage/apiv1beta/generativelanguagepb;generativelanguagepb";
22option java_multiple_files = true;
23option java_outer_classname = "SafetyProto";
24option java_package = "com.google.ai.generativelanguage.v1beta";
25
26// The category of a rating.
27//
28// These categories cover various kinds of harms that developers
29// may wish to adjust.
30enum HarmCategory {
31  // Category is unspecified.
32  HARM_CATEGORY_UNSPECIFIED = 0;
33
34  // Negative or harmful comments targeting identity and/or protected attribute.
35  HARM_CATEGORY_DEROGATORY = 1;
36
37  // Content that is rude, disrespectful, or profane.
38  HARM_CATEGORY_TOXICITY = 2;
39
40  // Describes scenarios depicting violence against an individual or group, or
41  // general descriptions of gore.
42  HARM_CATEGORY_VIOLENCE = 3;
43
44  // Contains references to sexual acts or other lewd content.
45  HARM_CATEGORY_SEXUAL = 4;
46
47  // Promotes unchecked medical advice.
48  HARM_CATEGORY_MEDICAL = 5;
49
50  // Dangerous content that promotes, facilitates, or encourages harmful acts.
51  HARM_CATEGORY_DANGEROUS = 6;
52
53  // Harasment content.
54  HARM_CATEGORY_HARASSMENT = 7;
55
56  // Hate speech and content.
57  HARM_CATEGORY_HATE_SPEECH = 8;
58
59  // Sexually explicit content.
60  HARM_CATEGORY_SEXUALLY_EXPLICIT = 9;
61
62  // Dangerous content.
63  HARM_CATEGORY_DANGEROUS_CONTENT = 10;
64}
65
66// Content filtering metadata associated with processing a single request.
67//
68// ContentFilter contains a reason and an optional supporting string. The reason
69// may be unspecified.
70message ContentFilter {
71  // A list of reasons why content may have been blocked.
72  enum BlockedReason {
73    // A blocked reason was not specified.
74    BLOCKED_REASON_UNSPECIFIED = 0;
75
76    // Content was blocked by safety settings.
77    SAFETY = 1;
78
79    // Content was blocked, but the reason is uncategorized.
80    OTHER = 2;
81  }
82
83  // The reason content was blocked during request processing.
84  BlockedReason reason = 1;
85
86  // A string that describes the filtering behavior in more detail.
87  optional string message = 2;
88}
89
90// Safety feedback for an entire request.
91//
92// This field is populated if content in the input and/or response is blocked
93// due to safety settings. SafetyFeedback may not exist for every HarmCategory.
94// Each SafetyFeedback will return the safety settings used by the request as
95// well as the lowest HarmProbability that should be allowed in order to return
96// a result.
97message SafetyFeedback {
98  // Safety rating evaluated from content.
99  SafetyRating rating = 1;
100
101  // Safety settings applied to the request.
102  SafetySetting setting = 2;
103}
104
105// Safety rating for a piece of content.
106//
107// The safety rating contains the category of harm and the
108// harm probability level in that category for a piece of content.
109// Content is classified for safety across a number of
110// harm categories and the probability of the harm classification is included
111// here.
112message SafetyRating {
113  // The probability that a piece of content is harmful.
114  //
115  // The classification system gives the probability of the content being
116  // unsafe. This does not indicate the severity of harm for a piece of content.
117  enum HarmProbability {
118    // Probability is unspecified.
119    HARM_PROBABILITY_UNSPECIFIED = 0;
120
121    // Content has a negligible chance of being unsafe.
122    NEGLIGIBLE = 1;
123
124    // Content has a low chance of being unsafe.
125    LOW = 2;
126
127    // Content has a medium chance of being unsafe.
128    MEDIUM = 3;
129
130    // Content has a high chance of being unsafe.
131    HIGH = 4;
132  }
133
134  // Required. The category for this rating.
135  HarmCategory category = 3 [(google.api.field_behavior) = REQUIRED];
136
137  // Required. The probability of harm for this content.
138  HarmProbability probability = 4 [(google.api.field_behavior) = REQUIRED];
139
140  // Was this content blocked because of this rating?
141  bool blocked = 5;
142}
143
144// Safety setting, affecting the safety-blocking behavior.
145//
146// Passing a safety setting for a category changes the allowed proability that
147// content is blocked.
148message SafetySetting {
149  // Block at and beyond a specified harm probability.
150  enum HarmBlockThreshold {
151    // Threshold is unspecified.
152    HARM_BLOCK_THRESHOLD_UNSPECIFIED = 0;
153
154    // Content with NEGLIGIBLE will be allowed.
155    BLOCK_LOW_AND_ABOVE = 1;
156
157    // Content with NEGLIGIBLE and LOW will be allowed.
158    BLOCK_MEDIUM_AND_ABOVE = 2;
159
160    // Content with NEGLIGIBLE, LOW, and MEDIUM will be allowed.
161    BLOCK_ONLY_HIGH = 3;
162
163    // All content will be allowed.
164    BLOCK_NONE = 4;
165  }
166
167  // Required. The category for this setting.
168  HarmCategory category = 3 [(google.api.field_behavior) = REQUIRED];
169
170  // Required. Controls the probability threshold at which harm is blocked.
171  HarmBlockThreshold threshold = 4 [(google.api.field_behavior) = REQUIRED];
172}
173