xref: /aosp_15_r20/external/googleapis/google/ai/generativelanguage/v1beta3/safety.proto (revision d5c09012810ac0c9f33fe448fb6da8260d444cc9)
1// Copyright 2023 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15syntax = "proto3";
16
17package google.ai.generativelanguage.v1beta3;
18
19import "google/api/field_behavior.proto";
20
21option go_package = "cloud.google.com/go/ai/generativelanguage/apiv1beta3/generativelanguagepb;generativelanguagepb";
22option java_multiple_files = true;
23option java_outer_classname = "SafetyProto";
24option java_package = "com.google.ai.generativelanguage.v1beta3";
25
26// The category of a rating.
27//
28// These categories cover various kinds of harms that developers
29// may wish to adjust.
30enum HarmCategory {
31  // Category is unspecified.
32  HARM_CATEGORY_UNSPECIFIED = 0;
33
34  // Negative or harmful comments targeting identity and/or protected attribute.
35  HARM_CATEGORY_DEROGATORY = 1;
36
37  // Content that is rude, disrepspectful, or profane.
38  HARM_CATEGORY_TOXICITY = 2;
39
40  // Describes scenarios depictng violence against an individual or group, or
41  // general descriptions of gore.
42  HARM_CATEGORY_VIOLENCE = 3;
43
44  // Contains references to sexual acts or other lewd content.
45  HARM_CATEGORY_SEXUAL = 4;
46
47  // Promotes unchecked medical advice.
48  HARM_CATEGORY_MEDICAL = 5;
49
50  // Dangerous content that promotes, facilitates, or encourages harmful acts.
51  HARM_CATEGORY_DANGEROUS = 6;
52}
53
54// Content filtering metadata associated with processing a single request.
55//
56// ContentFilter contains a reason and an optional supporting string. The reason
57// may be unspecified.
58message ContentFilter {
59  // A list of reasons why content may have been blocked.
60  enum BlockedReason {
61    // A blocked reason was not specified.
62    BLOCKED_REASON_UNSPECIFIED = 0;
63
64    // Content was blocked by safety settings.
65    SAFETY = 1;
66
67    // Content was blocked, but the reason is uncategorized.
68    OTHER = 2;
69  }
70
71  // The reason content was blocked during request processing.
72  BlockedReason reason = 1;
73
74  // A string that describes the filtering behavior in more detail.
75  optional string message = 2;
76}
77
78// Safety feedback for an entire request.
79//
80// This field is populated if content in the input and/or response is blocked
81// due to safety settings. SafetyFeedback may not exist for every HarmCategory.
82// Each SafetyFeedback will return the safety settings used by the request as
83// well as the lowest HarmProbability that should be allowed in order to return
84// a result.
85message SafetyFeedback {
86  // Safety rating evaluated from content.
87  SafetyRating rating = 1;
88
89  // Safety settings applied to the request.
90  SafetySetting setting = 2;
91}
92
93// Safety rating for a piece of content.
94//
95// The safety rating contains the category of harm and the
96// harm probability level in that category for a piece of content.
97// Content is classified for safety across a number of
98// harm categories and the probability of the harm classification is included
99// here.
100message SafetyRating {
101  // The probability that a piece of content is harmful.
102  //
103  // The classification system gives the probability of the content being
104  // unsafe. This does not indicate the severity of harm for a piece of content.
105  enum HarmProbability {
106    // Probability is unspecified.
107    HARM_PROBABILITY_UNSPECIFIED = 0;
108
109    // Content has a negligible chance of being unsafe.
110    NEGLIGIBLE = 1;
111
112    // Content has a low chance of being unsafe.
113    LOW = 2;
114
115    // Content has a medium chance of being unsafe.
116    MEDIUM = 3;
117
118    // Content has a high chance of being unsafe.
119    HIGH = 4;
120  }
121
122  // Required. The category for this rating.
123  HarmCategory category = 3 [(google.api.field_behavior) = REQUIRED];
124
125  // Required. The probability of harm for this content.
126  HarmProbability probability = 4 [(google.api.field_behavior) = REQUIRED];
127}
128
129// Safety setting, affecting the safety-blocking behavior.
130//
131// Passing a safety setting for a category changes the allowed proability that
132// content is blocked.
133message SafetySetting {
134  // Block at and beyond a specified harm probability.
135  enum HarmBlockThreshold {
136    // Threshold is unspecified.
137    HARM_BLOCK_THRESHOLD_UNSPECIFIED = 0;
138
139    // Content with NEGLIGIBLE will be allowed.
140    BLOCK_LOW_AND_ABOVE = 1;
141
142    // Content with NEGLIGIBLE and LOW will be allowed.
143    BLOCK_MEDIUM_AND_ABOVE = 2;
144
145    // Content with NEGLIGIBLE, LOW, and MEDIUM will be allowed.
146    BLOCK_ONLY_HIGH = 3;
147
148    // All content will be allowed.
149    BLOCK_NONE = 4;
150  }
151
152  // Required. The category for this setting.
153  HarmCategory category = 3 [(google.api.field_behavior) = REQUIRED];
154
155  // Required. Controls the probability threshold at which harm is blocked.
156  HarmBlockThreshold threshold = 4 [(google.api.field_behavior) = REQUIRED];
157}
158