training_pipeline.proto 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354
  1. // Copyright 2021 Google LLC
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. syntax = "proto3";
  15. package google.cloud.aiplatform.v1;
  16. import "google/api/field_behavior.proto";
  17. import "google/api/resource.proto";
  18. import "google/cloud/aiplatform/v1/encryption_spec.proto";
  19. import "google/cloud/aiplatform/v1/io.proto";
  20. import "google/cloud/aiplatform/v1/machine_resources.proto";
  21. import "google/cloud/aiplatform/v1/manual_batch_tuning_parameters.proto";
  22. import "google/cloud/aiplatform/v1/model.proto";
  23. import "google/cloud/aiplatform/v1/pipeline_state.proto";
  24. import "google/protobuf/struct.proto";
  25. import "google/protobuf/timestamp.proto";
  26. import "google/rpc/status.proto";
  27. import "google/api/annotations.proto";
  28. option csharp_namespace = "Google.Cloud.AIPlatform.V1";
  29. option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1;aiplatform";
  30. option java_multiple_files = true;
  31. option java_outer_classname = "TrainingPipelineProto";
  32. option java_package = "com.google.cloud.aiplatform.v1";
  33. option php_namespace = "Google\\Cloud\\AIPlatform\\V1";
  34. option ruby_package = "Google::Cloud::AIPlatform::V1";
  35. // The TrainingPipeline orchestrates tasks associated with training a Model. It
  36. // always executes the training task, and optionally may also
  37. // export data from Vertex AI's Dataset which becomes the training input,
  38. // [upload][google.cloud.aiplatform.v1.ModelService.UploadModel] the Model to Vertex AI, and evaluate the
  39. // Model.
  40. message TrainingPipeline {
  41. option (google.api.resource) = {
  42. type: "aiplatform.googleapis.com/TrainingPipeline"
  43. pattern: "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}"
  44. };
  45. // Output only. Resource name of the TrainingPipeline.
  46. string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
  47. // Required. The user-defined name of this TrainingPipeline.
  48. string display_name = 2 [(google.api.field_behavior) = REQUIRED];
  49. // Specifies Vertex AI owned input data that may be used for training the
  50. // Model. The TrainingPipeline's [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition] should make
  51. // clear whether this config is used and if there are any special requirements
  52. // on how it should be filled. If nothing about this config is mentioned in
  53. // the [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition], then it should be assumed that the
  54. // TrainingPipeline does not depend on this configuration.
  55. InputDataConfig input_data_config = 3;
  56. // Required. A Google Cloud Storage path to the YAML file that defines the training task
  57. // which is responsible for producing the model artifact, and may also include
  58. // additional auxiliary work.
  59. // The definition files that can be used here are found in
  60. // gs://google-cloud-aiplatform/schema/trainingjob/definition/.
  61. // Note: The URI given on output will be immutable and probably different,
  62. // including the URI scheme, than the one given on input. The output URI will
  63. // point to a location where the user only has a read access.
  64. string training_task_definition = 4 [(google.api.field_behavior) = REQUIRED];
  65. // Required. The training task's parameter(s), as specified in the
  66. // [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition]'s `inputs`.
  67. google.protobuf.Value training_task_inputs = 5 [(google.api.field_behavior) = REQUIRED];
  68. // Output only. The metadata information as specified in the [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition]'s
  69. // `metadata`. This metadata is an auxiliary runtime and final information
  70. // about the training task. While the pipeline is running this information is
  71. // populated only at a best effort basis. Only present if the
  72. // pipeline's [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition] contains `metadata` object.
  73. google.protobuf.Value training_task_metadata = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
  74. // Describes the Model that may be uploaded (via [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel])
  75. // by this TrainingPipeline. The TrainingPipeline's
  76. // [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition] should make clear whether this Model
  77. // description should be populated, and if there are any special requirements
  78. // regarding how it should be filled. If nothing is mentioned in the
  79. // [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition], then it should be assumed that this field
  80. // should not be filled and the training task either uploads the Model without
  81. // a need of this information, or that training task does not support
  82. // uploading a Model as part of the pipeline.
  83. // When the Pipeline's state becomes `PIPELINE_STATE_SUCCEEDED` and
  84. // the trained Model had been uploaded into Vertex AI, then the
  85. // model_to_upload's resource [name][google.cloud.aiplatform.v1.Model.name] is populated. The Model
  86. // is always uploaded into the Project and Location in which this pipeline
  87. // is.
  88. Model model_to_upload = 7;
  89. // Output only. The detailed state of the pipeline.
  90. PipelineState state = 9 [(google.api.field_behavior) = OUTPUT_ONLY];
  91. // Output only. Only populated when the pipeline's state is `PIPELINE_STATE_FAILED` or
  92. // `PIPELINE_STATE_CANCELLED`.
  93. google.rpc.Status error = 10 [(google.api.field_behavior) = OUTPUT_ONLY];
  94. // Output only. Time when the TrainingPipeline was created.
  95. google.protobuf.Timestamp create_time = 11 [(google.api.field_behavior) = OUTPUT_ONLY];
  96. // Output only. Time when the TrainingPipeline for the first time entered the
  97. // `PIPELINE_STATE_RUNNING` state.
  98. google.protobuf.Timestamp start_time = 12 [(google.api.field_behavior) = OUTPUT_ONLY];
  99. // Output only. Time when the TrainingPipeline entered any of the following states:
  100. // `PIPELINE_STATE_SUCCEEDED`, `PIPELINE_STATE_FAILED`,
  101. // `PIPELINE_STATE_CANCELLED`.
  102. google.protobuf.Timestamp end_time = 13 [(google.api.field_behavior) = OUTPUT_ONLY];
  103. // Output only. Time when the TrainingPipeline was most recently updated.
  104. google.protobuf.Timestamp update_time = 14 [(google.api.field_behavior) = OUTPUT_ONLY];
  105. // The labels with user-defined metadata to organize TrainingPipelines.
  106. //
  107. // Label keys and values can be no longer than 64 characters
  108. // (Unicode codepoints), can only contain lowercase letters, numeric
  109. // characters, underscores and dashes. International characters are allowed.
  110. //
  111. // See https://goo.gl/xmQnxf for more information and examples of labels.
  112. map<string, string> labels = 15;
  113. // Customer-managed encryption key spec for a TrainingPipeline. If set, this
  114. // TrainingPipeline will be secured by this key.
  115. //
  116. // Note: Model trained by this TrainingPipeline is also secured by this key if
  117. // [model_to_upload][google.cloud.aiplatform.v1.TrainingPipeline.encryption_spec] is not set separately.
  118. EncryptionSpec encryption_spec = 18;
  119. }
  120. // Specifies Vertex AI owned input data to be used for training, and
  121. // possibly evaluating, the Model.
  122. message InputDataConfig {
  123. // The instructions how the input data should be split between the
  124. // training, validation and test sets.
  125. // If no split type is provided, the [fraction_split][google.cloud.aiplatform.v1.InputDataConfig.fraction_split] is used by default.
  126. oneof split {
  127. // Split based on fractions defining the size of each set.
  128. FractionSplit fraction_split = 2;
  129. // Split based on the provided filters for each set.
  130. FilterSplit filter_split = 3;
  131. // Supported only for tabular Datasets.
  132. //
  133. // Split based on a predefined key.
  134. PredefinedSplit predefined_split = 4;
  135. // Supported only for tabular Datasets.
  136. //
  137. // Split based on the timestamp of the input data pieces.
  138. TimestampSplit timestamp_split = 5;
  139. }
  140. // Only applicable to Custom and Hyperparameter Tuning TrainingPipelines.
  141. //
  142. // The destination of the training data to be written to.
  143. //
  144. // Supported destination file formats:
  145. // * For non-tabular data: "jsonl".
  146. // * For tabular data: "csv" and "bigquery".
  147. //
  148. // The following Vertex AI environment variables are passed to containers
  149. // or python modules of the training task when this field is set:
  150. //
  151. // * AIP_DATA_FORMAT : Exported data format.
  152. // * AIP_TRAINING_DATA_URI : Sharded exported training data uris.
  153. // * AIP_VALIDATION_DATA_URI : Sharded exported validation data uris.
  154. // * AIP_TEST_DATA_URI : Sharded exported test data uris.
  155. oneof destination {
  156. // The Cloud Storage location where the training data is to be
  157. // written to. In the given directory a new directory is created with
  158. // name:
  159. // `dataset-<dataset-id>-<annotation-type>-<timestamp-of-training-call>`
  160. // where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format.
  161. // All training input data is written into that directory.
  162. //
  163. // The Vertex AI environment variables representing Cloud Storage
  164. // data URIs are represented in the Cloud Storage wildcard
  165. // format to support sharded data. e.g.: "gs://.../training-*.jsonl"
  166. //
  167. // * AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for tabular data
  168. // * AIP_TRAINING_DATA_URI =
  169. // "gcs_destination/dataset-<dataset-id>-<annotation-type>-<time>/training-*.${AIP_DATA_FORMAT}"
  170. //
  171. // * AIP_VALIDATION_DATA_URI =
  172. // "gcs_destination/dataset-<dataset-id>-<annotation-type>-<time>/validation-*.${AIP_DATA_FORMAT}"
  173. //
  174. // * AIP_TEST_DATA_URI =
  175. // "gcs_destination/dataset-<dataset-id>-<annotation-type>-<time>/test-*.${AIP_DATA_FORMAT}"
  176. GcsDestination gcs_destination = 8;
  177. // Only applicable to custom training with tabular Dataset with BigQuery
  178. // source.
  179. //
  180. // The BigQuery project location where the training data is to be written
  181. // to. In the given project a new dataset is created with name
  182. // `dataset_<dataset-id>_<annotation-type>_<timestamp-of-training-call>`
  183. // where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All training
  184. // input data is written into that dataset. In the dataset three
  185. // tables are created, `training`, `validation` and `test`.
  186. //
  187. // * AIP_DATA_FORMAT = "bigquery".
  188. // * AIP_TRAINING_DATA_URI =
  189. // "bigquery_destination.dataset_<dataset-id>_<annotation-type>_<time>.training"
  190. //
  191. // * AIP_VALIDATION_DATA_URI =
  192. // "bigquery_destination.dataset_<dataset-id>_<annotation-type>_<time>.validation"
  193. //
  194. // * AIP_TEST_DATA_URI =
  195. // "bigquery_destination.dataset_<dataset-id>_<annotation-type>_<time>.test"
  196. BigQueryDestination bigquery_destination = 10;
  197. }
  198. // Required. The ID of the Dataset in the same Project and Location which data will be
  199. // used to train the Model. The Dataset must use schema compatible with
  200. // Model being trained, and what is compatible should be described in the
  201. // used TrainingPipeline's [training_task_definition]
  202. // [google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition].
  203. // For tabular Datasets, all their data is exported to training, to pick
  204. // and choose from.
  205. string dataset_id = 1 [(google.api.field_behavior) = REQUIRED];
  206. // Applicable only to Datasets that have DataItems and Annotations.
  207. //
  208. // A filter on Annotations of the Dataset. Only Annotations that both
  209. // match this filter and belong to DataItems not ignored by the split method
  210. // are used in respectively training, validation or test role, depending on
  211. // the role of the DataItem they are on (for the auto-assigned that role is
  212. // decided by Vertex AI). A filter with same syntax as the one used in
  213. // [ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations] may be used, but note
  214. // here it filters across all Annotations of the Dataset, and not just within
  215. // a single DataItem.
  216. string annotations_filter = 6;
  217. // Applicable only to custom training with Datasets that have DataItems and
  218. // Annotations.
  219. //
  220. // Cloud Storage URI that points to a YAML file describing the annotation
  221. // schema. The schema is defined as an OpenAPI 3.0.2 [Schema
  222. // Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
  223. // The schema files that can be used here are found in
  224. // gs://google-cloud-aiplatform/schema/dataset/annotation/ , note that the
  225. // chosen schema must be consistent with
  226. // [metadata][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri] of the Dataset specified by
  227. // [dataset_id][google.cloud.aiplatform.v1.InputDataConfig.dataset_id].
  228. //
  229. // Only Annotations that both match this schema and belong to DataItems not
  230. // ignored by the split method are used in respectively training, validation
  231. // or test role, depending on the role of the DataItem they are on.
  232. //
  233. // When used in conjunction with [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter], the Annotations used
  234. // for training are filtered by both [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter] and
  235. // [annotation_schema_uri][google.cloud.aiplatform.v1.InputDataConfig.annotation_schema_uri].
  236. string annotation_schema_uri = 9;
  237. }
  238. // Assigns the input data to training, validation, and test sets as per the
  239. // given fractions. Any of `training_fraction`, `validation_fraction` and
  240. // `test_fraction` may optionally be provided, they must sum to up to 1. If the
  241. // provided ones sum to less than 1, the remainder is assigned to sets as
  242. // decided by Vertex AI. If none of the fractions are set, by default roughly
  243. // 80% of data is used for training, 10% for validation, and 10% for test.
  244. message FractionSplit {
  245. // The fraction of the input data that is to be used to train the Model.
  246. double training_fraction = 1;
  247. // The fraction of the input data that is to be used to validate the Model.
  248. double validation_fraction = 2;
  249. // The fraction of the input data that is to be used to evaluate the Model.
  250. double test_fraction = 3;
  251. }
  252. // Assigns input data to training, validation, and test sets based on the given
  253. // filters, data pieces not matched by any filter are ignored. Currently only
  254. // supported for Datasets containing DataItems.
  255. // If any of the filters in this message are to match nothing, then they can be
  256. // set as '-' (the minus sign).
  257. //
  258. // Supported only for unstructured Datasets.
  259. //
  260. message FilterSplit {
  261. // Required. A filter on DataItems of the Dataset. DataItems that match
  262. // this filter are used to train the Model. A filter with same syntax
  263. // as the one used in [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems] may be used. If a
  264. // single DataItem is matched by more than one of the FilterSplit filters,
  265. // then it is assigned to the first set that applies to it in the
  266. // training, validation, test order.
  267. string training_filter = 1 [(google.api.field_behavior) = REQUIRED];
  268. // Required. A filter on DataItems of the Dataset. DataItems that match
  269. // this filter are used to validate the Model. A filter with same syntax
  270. // as the one used in [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems] may be used. If a
  271. // single DataItem is matched by more than one of the FilterSplit filters,
  272. // then it is assigned to the first set that applies to it in the
  273. // training, validation, test order.
  274. string validation_filter = 2 [(google.api.field_behavior) = REQUIRED];
  275. // Required. A filter on DataItems of the Dataset. DataItems that match
  276. // this filter are used to test the Model. A filter with same syntax
  277. // as the one used in [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems] may be used. If a
  278. // single DataItem is matched by more than one of the FilterSplit filters,
  279. // then it is assigned to the first set that applies to it in the
  280. // training, validation, test order.
  281. string test_filter = 3 [(google.api.field_behavior) = REQUIRED];
  282. }
  283. // Assigns input data to training, validation, and test sets based on the
  284. // value of a provided key.
  285. //
  286. // Supported only for tabular Datasets.
  287. message PredefinedSplit {
  288. // Required. The key is a name of one of the Dataset's data columns.
  289. // The value of the key (either the label's value or value in the column)
  290. // must be one of {`training`, `validation`, `test`}, and it defines to which
  291. // set the given piece of data is assigned. If for a piece of data the key
  292. // is not present or has an invalid value, that piece is ignored by the
  293. // pipeline.
  294. string key = 1 [(google.api.field_behavior) = REQUIRED];
  295. }
  296. // Assigns input data to training, validation, and test sets based on a
  297. // provided timestamps. The youngest data pieces are assigned to training set,
  298. // next to validation set, and the oldest to the test set.
  299. //
  300. // Supported only for tabular Datasets.
  301. message TimestampSplit {
  302. // The fraction of the input data that is to be used to train the Model.
  303. double training_fraction = 1;
  304. // The fraction of the input data that is to be used to validate the Model.
  305. double validation_fraction = 2;
  306. // The fraction of the input data that is to be used to evaluate the Model.
  307. double test_fraction = 3;
  308. // Required. The key is a name of one of the Dataset's data columns.
  309. // The values of the key (the values in the column) must be in RFC 3339
  310. // `date-time` format, where `time-offset` = `"Z"`
  311. // (e.g. 1985-04-12T23:20:50.52Z). If for a piece of data the key is not
  312. // present or has an invalid value, that piece is ignored by the pipeline.
  313. string key = 4 [(google.api.field_behavior) = REQUIRED];
  314. }