explanation_metadata.proto 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401
  1. // Copyright 2021 Google LLC
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. syntax = "proto3";
  15. package google.cloud.aiplatform.v1beta1;
  16. import "google/api/field_behavior.proto";
  17. import "google/protobuf/struct.proto";
  18. import "google/api/annotations.proto";
  19. option csharp_namespace = "Google.Cloud.AIPlatform.V1Beta1";
  20. option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform";
  21. option java_multiple_files = true;
  22. option java_outer_classname = "ExplanationMetadataProto";
  23. option java_package = "com.google.cloud.aiplatform.v1beta1";
  24. option php_namespace = "Google\\Cloud\\AIPlatform\\V1beta1";
  25. option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
  26. // Metadata describing the Model's input and output for explanation.
  27. message ExplanationMetadata {
  28. // Metadata of the input of a feature.
  29. //
  30. // Fields other than [InputMetadata.input_baselines][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.input_baselines] are applicable only
  31. // for Models that are using Vertex AI-provided images for Tensorflow.
  32. message InputMetadata {
  33. // Domain details of the input feature value. Provides numeric information
  34. // about the feature, such as its range (min, max). If the feature has been
  35. // pre-processed, for example with z-scoring, then it provides information
  36. // about how to recover the original feature. For example, if the input
  37. // feature is an image and it has been pre-processed to obtain 0-mean and
  38. // stddev = 1 values, then original_mean, and original_stddev refer to the
  39. // mean and stddev of the original feature (e.g. image tensor) from which
  40. // input feature (with mean = 0 and stddev = 1) was obtained.
  41. message FeatureValueDomain {
  42. // The minimum permissible value for this feature.
  43. float min_value = 1;
  44. // The maximum permissible value for this feature.
  45. float max_value = 2;
  46. // If this input feature has been normalized to a mean value of 0,
  47. // the original_mean specifies the mean value of the domain prior to
  48. // normalization.
  49. float original_mean = 3;
  50. // If this input feature has been normalized to a standard deviation of
  51. // 1.0, the original_stddev specifies the standard deviation of the domain
  52. // prior to normalization.
  53. float original_stddev = 4;
  54. }
  55. // Visualization configurations for image explanation.
  56. message Visualization {
  57. // Type of the image visualization. Only applicable to [Integrated
  58. // Gradients attribution]
  59. // [ExplanationParameters.integrated_gradients_attribution].
  60. enum Type {
  61. // Should not be used.
  62. TYPE_UNSPECIFIED = 0;
  63. // Shows which pixel contributed to the image prediction.
  64. PIXELS = 1;
  65. // Shows which region contributed to the image prediction by outlining
  66. // the region.
  67. OUTLINES = 2;
  68. }
  69. // Whether to only highlight pixels with positive contributions, negative
  70. // or both. Defaults to POSITIVE.
  71. enum Polarity {
  72. // Default value. This is the same as POSITIVE.
  73. POLARITY_UNSPECIFIED = 0;
  74. // Highlights the pixels/outlines that were most influential to the
  75. // model's prediction.
  76. POSITIVE = 1;
  77. // Setting polarity to negative highlights areas that does not lead to
  78. // the models's current prediction.
  79. NEGATIVE = 2;
  80. // Shows both positive and negative attributions.
  81. BOTH = 3;
  82. }
  83. // The color scheme used for highlighting areas.
  84. enum ColorMap {
  85. // Should not be used.
  86. COLOR_MAP_UNSPECIFIED = 0;
  87. // Positive: green. Negative: pink.
  88. PINK_GREEN = 1;
  89. // Viridis color map: A perceptually uniform color mapping which is
  90. // easier to see by those with colorblindness and progresses from yellow
  91. // to green to blue. Positive: yellow. Negative: blue.
  92. VIRIDIS = 2;
  93. // Positive: red. Negative: red.
  94. RED = 3;
  95. // Positive: green. Negative: green.
  96. GREEN = 4;
  97. // Positive: green. Negative: red.
  98. RED_GREEN = 6;
  99. // PiYG palette.
  100. PINK_WHITE_GREEN = 5;
  101. }
  102. // How the original image is displayed in the visualization.
  103. enum OverlayType {
  104. // Default value. This is the same as NONE.
  105. OVERLAY_TYPE_UNSPECIFIED = 0;
  106. // No overlay.
  107. NONE = 1;
  108. // The attributions are shown on top of the original image.
  109. ORIGINAL = 2;
  110. // The attributions are shown on top of grayscaled version of the
  111. // original image.
  112. GRAYSCALE = 3;
  113. // The attributions are used as a mask to reveal predictive parts of
  114. // the image and hide the un-predictive parts.
  115. MASK_BLACK = 4;
  116. }
  117. // Type of the image visualization. Only applicable to [Integrated
  118. // Gradients attribution]
  119. // [ExplanationParameters.integrated_gradients_attribution]. OUTLINES
  120. // shows regions of attribution, while PIXELS shows per-pixel attribution.
  121. // Defaults to OUTLINES.
  122. Type type = 1;
  123. // Whether to only highlight pixels with positive contributions, negative
  124. // or both. Defaults to POSITIVE.
  125. Polarity polarity = 2;
  126. // The color scheme used for the highlighted areas.
  127. //
  128. // Defaults to PINK_GREEN for [Integrated Gradients
  129. // attribution][ExplanationParameters.integrated_gradients_attribution],
  130. // which shows positive attributions in green and negative in pink.
  131. //
  132. // Defaults to VIRIDIS for
  133. // [XRAI attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution], which
  134. // highlights the most influential regions in yellow and the least
  135. // influential in blue.
  136. ColorMap color_map = 3;
  137. // Excludes attributions above the specified percentile from the
  138. // highlighted areas. Using the clip_percent_upperbound and
  139. // clip_percent_lowerbound together can be useful for filtering out noise
  140. // and making it easier to see areas of strong attribution. Defaults to
  141. // 99.9.
  142. float clip_percent_upperbound = 4;
  143. // Excludes attributions below the specified percentile, from the
  144. // highlighted areas. Defaults to 62.
  145. float clip_percent_lowerbound = 5;
  146. // How the original image is displayed in the visualization.
  147. // Adjusting the overlay can help increase visual clarity if the original
  148. // image makes it difficult to view the visualization. Defaults to NONE.
  149. OverlayType overlay_type = 6;
  150. }
  151. // Defines how the feature is encoded to [encoded_tensor][]. Defaults to
  152. // IDENTITY.
  153. enum Encoding {
  154. // Default value. This is the same as IDENTITY.
  155. ENCODING_UNSPECIFIED = 0;
  156. // The tensor represents one feature.
  157. IDENTITY = 1;
  158. // The tensor represents a bag of features where each index maps to
  159. // a feature. [InputMetadata.index_feature_mapping][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.index_feature_mapping] must be provided for
  160. // this encoding. For example:
  161. // ```
  162. // input = [27, 6.0, 150]
  163. // index_feature_mapping = ["age", "height", "weight"]
  164. // ```
  165. BAG_OF_FEATURES = 2;
  166. // The tensor represents a bag of features where each index maps to a
  167. // feature. Zero values in the tensor indicates feature being
  168. // non-existent. [InputMetadata.index_feature_mapping][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.index_feature_mapping] must be provided
  169. // for this encoding. For example:
  170. // ```
  171. // input = [2, 0, 5, 0, 1]
  172. // index_feature_mapping = ["a", "b", "c", "d", "e"]
  173. // ```
  174. BAG_OF_FEATURES_SPARSE = 3;
  175. // The tensor is a list of binaries representing whether a feature exists
  176. // or not (1 indicates existence). [InputMetadata.index_feature_mapping][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.index_feature_mapping]
  177. // must be provided for this encoding. For example:
  178. // ```
  179. // input = [1, 0, 1, 0, 1]
  180. // index_feature_mapping = ["a", "b", "c", "d", "e"]
  181. // ```
  182. INDICATOR = 4;
  183. // The tensor is encoded into a 1-dimensional array represented by an
  184. // encoded tensor. [InputMetadata.encoded_tensor_name][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.encoded_tensor_name] must be provided
  185. // for this encoding. For example:
  186. // ```
  187. // input = ["This", "is", "a", "test", "."]
  188. // encoded = [0.1, 0.2, 0.3, 0.4, 0.5]
  189. // ```
  190. COMBINED_EMBEDDING = 5;
  191. // Select this encoding when the input tensor is encoded into a
  192. // 2-dimensional array represented by an encoded tensor.
  193. // [InputMetadata.encoded_tensor_name][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.encoded_tensor_name] must be provided for this
  194. // encoding. The first dimension of the encoded tensor's shape is the same
  195. // as the input tensor's shape. For example:
  196. // ```
  197. // input = ["This", "is", "a", "test", "."]
  198. // encoded = [[0.1, 0.2, 0.3, 0.4, 0.5],
  199. // [0.2, 0.1, 0.4, 0.3, 0.5],
  200. // [0.5, 0.1, 0.3, 0.5, 0.4],
  201. // [0.5, 0.3, 0.1, 0.2, 0.4],
  202. // [0.4, 0.3, 0.2, 0.5, 0.1]]
  203. // ```
  204. CONCAT_EMBEDDING = 6;
  205. }
  206. // Baseline inputs for this feature.
  207. //
  208. // If no baseline is specified, Vertex AI chooses the baseline for this
  209. // feature. If multiple baselines are specified, Vertex AI returns the
  210. // average attributions across them in
  211. // [Attributions.baseline_attribution][].
  212. //
  213. // For Vertex AI-provided Tensorflow images (both 1.x and 2.x), the shape
  214. // of each baseline must match the shape of the input tensor. If a scalar is
  215. // provided, we broadcast to the same shape as the input tensor.
  216. //
  217. // For custom images, the element of the baselines must be in the same
  218. // format as the feature's input in the
  219. // [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances][]. The schema of any single instance
  220. // may be specified via Endpoint's DeployedModels'
  221. // [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model]
  222. // [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata]
  223. // [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri].
  224. repeated google.protobuf.Value input_baselines = 1;
  225. // Name of the input tensor for this feature. Required and is only
  226. // applicable to Vertex AI-provided images for Tensorflow.
  227. string input_tensor_name = 2;
  228. // Defines how the feature is encoded into the input tensor. Defaults to
  229. // IDENTITY.
  230. Encoding encoding = 3;
  231. // Modality of the feature. Valid values are: numeric, image. Defaults to
  232. // numeric.
  233. string modality = 4;
  234. // The domain details of the input feature value. Like min/max, original
  235. // mean or standard deviation if normalized.
  236. FeatureValueDomain feature_value_domain = 5;
  237. // Specifies the index of the values of the input tensor.
  238. // Required when the input tensor is a sparse representation. Refer to
  239. // Tensorflow documentation for more details:
  240. // https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor.
  241. string indices_tensor_name = 6;
  242. // Specifies the shape of the values of the input if the input is a sparse
  243. // representation. Refer to Tensorflow documentation for more details:
  244. // https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor.
  245. string dense_shape_tensor_name = 7;
  246. // A list of feature names for each index in the input tensor.
  247. // Required when the input [InputMetadata.encoding][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.encoding] is BAG_OF_FEATURES,
  248. // BAG_OF_FEATURES_SPARSE, INDICATOR.
  249. repeated string index_feature_mapping = 8;
  250. // Encoded tensor is a transformation of the input tensor. Must be provided
  251. // if choosing [Integrated Gradients
  252. // attribution][ExplanationParameters.integrated_gradients_attribution] or
  253. // [XRAI attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution]
  254. // and the input tensor is not differentiable.
  255. //
  256. // An encoded tensor is generated if the input tensor is encoded by a lookup
  257. // table.
  258. string encoded_tensor_name = 9;
  259. // A list of baselines for the encoded tensor.
  260. //
  261. // The shape of each baseline should match the shape of the encoded tensor.
  262. // If a scalar is provided, Vertex AI broadcasts to the same shape as the
  263. // encoded tensor.
  264. repeated google.protobuf.Value encoded_baselines = 10;
  265. // Visualization configurations for image explanation.
  266. Visualization visualization = 11;
  267. // Name of the group that the input belongs to. Features with the same group
  268. // name will be treated as one feature when computing attributions. Features
  269. // grouped together can have different shapes in value. If provided, there
  270. // will be one single attribution generated in [
  271. // featureAttributions][Attribution.feature_attributions], keyed by the
  272. // group name.
  273. string group_name = 12;
  274. }
  275. // Metadata of the prediction output to be explained.
  276. message OutputMetadata {
  277. // Defines how to map [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] to
  278. // [Attribution.output_display_name][google.cloud.aiplatform.v1beta1.Attribution.output_display_name].
  279. //
  280. // If neither of the fields are specified,
  281. // [Attribution.output_display_name][google.cloud.aiplatform.v1beta1.Attribution.output_display_name] will not be populated.
  282. oneof display_name_mapping {
  283. // Static mapping between the index and display name.
  284. //
  285. // Use this if the outputs are a deterministic n-dimensional array, e.g. a
  286. // list of scores of all the classes in a pre-defined order for a
  287. // multi-classification Model. It's not feasible if the outputs are
  288. // non-deterministic, e.g. the Model produces top-k classes or sort the
  289. // outputs by their values.
  290. //
  291. // The shape of the value must be an n-dimensional array of strings. The
  292. // number of dimensions must match that of the outputs to be explained.
  293. // The [Attribution.output_display_name][google.cloud.aiplatform.v1beta1.Attribution.output_display_name] is populated by locating in the
  294. // mapping with [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index].
  295. google.protobuf.Value index_display_name_mapping = 1;
  296. // Specify a field name in the prediction to look for the display name.
  297. //
  298. // Use this if the prediction contains the display names for the outputs.
  299. //
  300. // The display names in the prediction must have the same shape of the
  301. // outputs, so that it can be located by [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] for
  302. // a specific output.
  303. string display_name_mapping_key = 2;
  304. }
  305. // Name of the output tensor. Required and is only applicable to AI
  306. // Platform provided images for Tensorflow.
  307. string output_tensor_name = 3;
  308. }
  309. // Required. Map from feature names to feature input metadata. Keys are the name of the
  310. // features. Values are the specification of the feature.
  311. //
  312. // An empty InputMetadata is valid. It describes a text feature which has the
  313. // name specified as the key in [ExplanationMetadata.inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs]. The baseline
  314. // of the empty feature is chosen by Vertex AI.
  315. //
  316. // For Vertex AI-provided Tensorflow images, the key can be any friendly
  317. // name of the feature. Once specified,
  318. // [featureAttributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions] are keyed by
  319. // this key (if not grouped with another feature).
  320. //
  321. // For custom images, the key must match with the key in
  322. // [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances].
  323. map<string, InputMetadata> inputs = 1 [(google.api.field_behavior) = REQUIRED];
  324. // Required. Map from output names to output metadata.
  325. //
  326. // For Vertex AI-provided Tensorflow images, keys can be any user defined
  327. // string that consists of any UTF-8 characters.
  328. //
  329. // For custom images, keys are the name of the output field in the prediction
  330. // to be explained.
  331. //
  332. // Currently only one key is allowed.
  333. map<string, OutputMetadata> outputs = 2 [(google.api.field_behavior) = REQUIRED];
  334. // Points to a YAML file stored on Google Cloud Storage describing the format
  335. // of the [feature attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions].
  336. // The schema is defined as an OpenAPI 3.0.2 [Schema
  337. // Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
  338. // AutoML tabular Models always have this field populated by Vertex AI.
  339. // Note: The URI given on output may be different, including the URI scheme,
  340. // than the one given on input. The output URI will point to a location where
  341. // the user only has a read access.
  342. string feature_attributions_schema_uri = 3;
  343. }