feat: add saved_queries to Dataset in aiplatform v1beta1 dataset.proto

feat: add order_by to ListModelVersionRequest in aiplatform v1beta1 model_service.proto
feat: add update_all_stopped_trials to ConvexAutomatedStoppingSpec in aiplatform v1beta1 study.proto
feat: add ReadTensorboardUsage rpc in aiplatform v1beta1 tensorboard_service.proto

PiperOrigin-RevId: 500741013
pull/765/head
Google APIs 2 years ago committed by Copybara-Service
parent 2028ca57b3
commit e8675f0880
  1. 24
      google/cloud/aiplatform/v1beta1/annotation.proto
  2. 10
      google/cloud/aiplatform/v1beta1/annotation_spec.proto
  3. 6
      google/cloud/aiplatform/v1beta1/artifact.proto
  4. 214
      google/cloud/aiplatform/v1beta1/batch_prediction_job.proto
  5. 18
      google/cloud/aiplatform/v1beta1/completion_stats.proto
  6. 10
      google/cloud/aiplatform/v1beta1/context.proto
  7. 64
      google/cloud/aiplatform/v1beta1/custom_job.proto
  8. 18
      google/cloud/aiplatform/v1beta1/data_item.proto
  9. 41
      google/cloud/aiplatform/v1beta1/data_labeling_job.proto
  10. 54
      google/cloud/aiplatform/v1beta1/dataset.proto
  11. 119
      google/cloud/aiplatform/v1beta1/dataset_service.proto
  12. 9
      google/cloud/aiplatform/v1beta1/deployment_resource_pool.proto
  13. 35
      google/cloud/aiplatform/v1beta1/deployment_resource_pool_service.proto
  14. 4
      google/cloud/aiplatform/v1beta1/encryption_spec.proto
  15. 89
      google/cloud/aiplatform/v1beta1/endpoint.proto
  16. 120
      google/cloud/aiplatform/v1beta1/endpoint_service.proto
  17. 23
      google/cloud/aiplatform/v1beta1/entity_type.proto
  18. 3
      google/cloud/aiplatform/v1beta1/event.proto
  19. 6
      google/cloud/aiplatform/v1beta1/execution.proto
  20. 200
      google/cloud/aiplatform/v1beta1/explanation.proto
  21. 108
      google/cloud/aiplatform/v1beta1/explanation_metadata.proto
  22. 49
      google/cloud/aiplatform/v1beta1/feature.proto
  23. 3
      google/cloud/aiplatform/v1beta1/feature_monitoring_stats.proto
  24. 45
      google/cloud/aiplatform/v1beta1/featurestore.proto
  25. 11
      google/cloud/aiplatform/v1beta1/featurestore_monitoring.proto
  26. 72
      google/cloud/aiplatform/v1beta1/featurestore_online_service.proto
  27. 343
      google/cloud/aiplatform/v1beta1/featurestore_service.proto
  28. 34
      google/cloud/aiplatform/v1beta1/hyperparameter_tuning_job.proto
  29. 47
      google/cloud/aiplatform/v1beta1/index.proto
  30. 107
      google/cloud/aiplatform/v1beta1/index_endpoint.proto
  31. 106
      google/cloud/aiplatform/v1beta1/index_endpoint_service.proto
  32. 82
      google/cloud/aiplatform/v1beta1/index_service.proto
  33. 3
      google/cloud/aiplatform/v1beta1/io.proto
  34. 302
      google/cloud/aiplatform/v1beta1/job_service.proto
  35. 102
      google/cloud/aiplatform/v1beta1/machine_resources.proto
  36. 10
      google/cloud/aiplatform/v1beta1/manual_batch_tuning_parameters.proto
  37. 9
      google/cloud/aiplatform/v1beta1/metadata_schema.proto
  38. 351
      google/cloud/aiplatform/v1beta1/metadata_service.proto
  39. 6
      google/cloud/aiplatform/v1beta1/metadata_store.proto
  40. 45
      google/cloud/aiplatform/v1beta1/migratable_resource.proto
  41. 79
      google/cloud/aiplatform/v1beta1/migration_service.proto
  42. 324
      google/cloud/aiplatform/v1beta1/model.proto
  43. 70
      google/cloud/aiplatform/v1beta1/model_deployment_monitoring_job.proto
  44. 16
      google/cloud/aiplatform/v1beta1/model_evaluation.proto
  45. 22
      google/cloud/aiplatform/v1beta1/model_evaluation_slice.proto
  46. 13
      google/cloud/aiplatform/v1beta1/model_monitoring.proto
  47. 229
      google/cloud/aiplatform/v1beta1/model_service.proto
  48. 9
      google/cloud/aiplatform/v1beta1/operation.proto
  49. 164
      google/cloud/aiplatform/v1beta1/pipeline_job.proto
  50. 124
      google/cloud/aiplatform/v1beta1/pipeline_service.proto
  51. 109
      google/cloud/aiplatform/v1beta1/prediction_service.proto
  52. 10
      google/cloud/aiplatform/v1beta1/saved_query.proto
  53. 6
      google/cloud/aiplatform/v1beta1/specialist_pool.proto
  54. 48
      google/cloud/aiplatform/v1beta1/specialist_pool_service.proto
  55. 91
      google/cloud/aiplatform/v1beta1/study.proto
  56. 13
      google/cloud/aiplatform/v1beta1/tensorboard.proto
  57. 21
      google/cloud/aiplatform/v1beta1/tensorboard_data.proto
  58. 9
      google/cloud/aiplatform/v1beta1/tensorboard_experiment.proto
  59. 6
      google/cloud/aiplatform/v1beta1/tensorboard_run.proto
  60. 457
      google/cloud/aiplatform/v1beta1/tensorboard_service.proto
  61. 26
      google/cloud/aiplatform/v1beta1/tensorboard_time_series.proto
  62. 164
      google/cloud/aiplatform/v1beta1/training_pipeline.proto
  63. 7
      google/cloud/aiplatform/v1beta1/unmanaged_container_model.proto
  64. 72
      google/cloud/aiplatform/v1beta1/vizier_service.proto

@ -41,8 +41,9 @@ message Annotation {
// Output only. Resource name of the Annotation.
string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Required. Google Cloud Storage URI points to a YAML file describing [payload][google.cloud.aiplatform.v1beta1.Annotation.payload]. The
// schema is defined as an [OpenAPI 3.0.2 Schema
// Required. Google Cloud Storage URI points to a YAML file describing
// [payload][google.cloud.aiplatform.v1beta1.Annotation.payload]. The schema
// is defined as an [OpenAPI 3.0.2 Schema
// Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
// The schema files that can be used here are found in
// gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the
@ -55,19 +56,23 @@ message Annotation {
google.protobuf.Value payload = 3 [(google.api.field_behavior) = REQUIRED];
// Output only. Timestamp when this Annotation was created.
google.protobuf.Timestamp create_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 4
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this Annotation was last updated.
google.protobuf.Timestamp update_time = 7 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp update_time = 7
[(google.api.field_behavior) = OUTPUT_ONLY];
// Optional. Used to perform consistent read-modify-write updates. If not set, a blind
// "overwrite" update happens.
// Optional. Used to perform consistent read-modify-write updates. If not set,
// a blind "overwrite" update happens.
string etag = 8 [(google.api.field_behavior) = OPTIONAL];
// Output only. The source of the Annotation.
UserActionReference annotation_source = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
UserActionReference annotation_source = 5
[(google.api.field_behavior) = OUTPUT_ONLY];
// Optional. The labels with user-defined metadata to organize your Annotations.
// Optional. The labels with user-defined metadata to organize your
// Annotations.
//
// Label keys and values can be no longer than 64 characters
// (Unicode codepoints), can only contain lowercase letters, numeric
@ -84,7 +89,8 @@ message Annotation {
// If not set, the Annotation is not visible in the UI.
//
// * "aiplatform.googleapis.com/payload_schema":
// output only, its value is the [payload_schema's][google.cloud.aiplatform.v1beta1.Annotation.payload_schema_uri]
// output only, its value is the
// [payload_schema's][google.cloud.aiplatform.v1beta1.Annotation.payload_schema_uri]
// title.
map<string, string> labels = 6 [(google.api.field_behavior) = OPTIONAL];
}

@ -44,12 +44,14 @@ message AnnotationSpec {
string display_name = 2 [(google.api.field_behavior) = REQUIRED];
// Output only. Timestamp when this AnnotationSpec was created.
google.protobuf.Timestamp create_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 3
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when AnnotationSpec was last updated.
google.protobuf.Timestamp update_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp update_time = 4
[(google.api.field_behavior) = OUTPUT_ONLY];
// Optional. Used to perform consistent read-modify-write updates. If not set, a blind
// "overwrite" update happens.
// Optional. Used to perform consistent read-modify-write updates. If not set,
// a blind "overwrite" update happens.
string etag = 5 [(google.api.field_behavior) = OPTIONAL];
}

@ -75,10 +75,12 @@ message Artifact {
map<string, string> labels = 10;
// Output only. Timestamp when this Artifact was created.
google.protobuf.Timestamp create_time = 11 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 11
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this Artifact was last updated.
google.protobuf.Timestamp update_time = 12 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp update_time = 12
[(google.api.field_behavior) = OUTPUT_ONLY];
// The state of this Artifact. This is a property of the Artifact, and does
// not imply or capture any ongoing process. This property is managed by

@ -40,19 +40,24 @@ option java_package = "com.google.cloud.aiplatform.v1beta1";
option php_namespace = "Google\\Cloud\\AIPlatform\\V1beta1";
option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
// A job that uses a [Model][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] to produce predictions
// on multiple [input instances][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. If
// predictions for significant portion of the instances fail, the job may finish
// without attempting predictions for all remaining instances.
// A job that uses a
// [Model][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] to produce
// predictions on multiple [input
// instances][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config].
// If predictions for significant portion of the instances fail, the job may
// finish without attempting predictions for all remaining instances.
message BatchPredictionJob {
option (google.api.resource) = {
type: "aiplatform.googleapis.com/BatchPredictionJob"
pattern: "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}"
};
// Configures the input to [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob].
// See [Model.supported_input_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_input_storage_formats] for Model's supported input
// formats, and how instances should be expressed via any of them.
// Configures the input to
// [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob].
// See
// [Model.supported_input_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_input_storage_formats]
// for Model's supported input formats, and how instances should be expressed
// via any of them.
message InputConfig {
// Required. The source of the input.
oneof source {
@ -78,7 +83,8 @@ message BatchPredictionJob {
message InstanceConfig {
// The format of the instance that the Model accepts. Vertex AI will
// convert compatible
// [batch prediction input instance formats][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.instances_format]
// [batch prediction input instance
// formats][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.instances_format]
// to the specified format.
//
// Supported values are:
@ -92,10 +98,13 @@ message BatchPredictionJob {
// * `array`: Each input is converted to JSON array format.
// * For `bigquery`, each row is converted to an array. The order
// of columns is determined by the BigQuery column order, unless
// [included_fields][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InstanceConfig.included_fields] is populated.
// [included_fields][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InstanceConfig.included_fields] must be populated for specifying field orders.
// [included_fields][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InstanceConfig.included_fields]
// is populated.
// [included_fields][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InstanceConfig.included_fields]
// must be populated for specifying field orders.
// * For `jsonl`, if each line of the JSONL input is an object,
// [included_fields][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InstanceConfig.included_fields] must be populated for specifying field orders.
// [included_fields][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InstanceConfig.included_fields]
// must be populated for specifying field orders.
// * Does not apply to `csv`, `file-list`, `tf-record`, or
// `tf-record-gzip`.
//
@ -104,7 +113,8 @@ message BatchPredictionJob {
//
// * For `bigquery` and `csv`, the behavior is the same as `array`. The
// order of columns is the same as defined in the file or table, unless
// [included_fields][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InstanceConfig.included_fields] is populated.
// [included_fields][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InstanceConfig.included_fields]
// is populated.
// * For `jsonl`, the prediction instance format is determined by
// each line of the input.
// * For `tf-record`/`tf-record-gzip`, each record will be converted to
@ -119,10 +129,11 @@ message BatchPredictionJob {
//
// The values identified by the key field is not included in the transformed
// instances that is sent to the Model. This is similar to
// specifying this name of the field in [excluded_fields][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InstanceConfig.excluded_fields]. In addition,
// the batch prediction output will not include the instances. Instead the
// output will only include the value of the key field, in a field named
// `key` in the output:
// specifying this name of the field in
// [excluded_fields][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InstanceConfig.excluded_fields].
// In addition, the batch prediction output will not include the instances.
// Instead the output will only include the value of the key field, in a
// field named `key` in the output:
//
// * For `jsonl` output format, the output will have a `key` field
// instead of the `instance` field.
@ -136,10 +147,14 @@ message BatchPredictionJob {
// Fields that will be included in the prediction instance that is
// sent to the Model.
//
// If [instance_type][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InstanceConfig.instance_type] is `array`, the order of field names in
// included_fields also determines the order of the values in the array.
// If
// [instance_type][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InstanceConfig.instance_type]
// is `array`, the order of field names in included_fields also determines
// the order of the values in the array.
//
// When included_fields is populated, [excluded_fields][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InstanceConfig.excluded_fields] must be empty.
// When included_fields is populated,
// [excluded_fields][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InstanceConfig.excluded_fields]
// must be empty.
//
// The input must be JSONL with objects at each line, CSV, BigQuery
// or TfRecord.
@ -148,19 +163,25 @@ message BatchPredictionJob {
// Fields that will be excluded in the prediction instance that is
// sent to the Model.
//
// Excluded will be attached to the batch prediction output if [key_field][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InstanceConfig.key_field]
// Excluded will be attached to the batch prediction output if
// [key_field][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InstanceConfig.key_field]
// is not specified.
//
// When excluded_fields is populated, [included_fields][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InstanceConfig.included_fields] must be empty.
// When excluded_fields is populated,
// [included_fields][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InstanceConfig.included_fields]
// must be empty.
//
// The input must be JSONL with objects at each line, CSV, BigQuery
// or TfRecord.
repeated string excluded_fields = 4;
}
// Configures the output of [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob].
// See [Model.supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats] for supported output
// formats, and how predictions are expressed via any of them.
// Configures the output of
// [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob].
// See
// [Model.supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats]
// for supported output formats, and how predictions are expressed via any of
// them.
message OutputConfig {
// Required. The destination of the output.
oneof destination {
@ -171,11 +192,13 @@ message BatchPredictionJob {
// Inside of it files `predictions_0001.<extension>`,
// `predictions_0002.<extension>`, ..., `predictions_N.<extension>`
// are created where `<extension>` depends on chosen
// [predictions_format][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.predictions_format], and N may equal 0001 and depends on the total
// number of successfully predicted instances.
// If the Model has both [instance][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]
// and [prediction][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri] schemata
// defined then each such file contains predictions as per the
// [predictions_format][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.predictions_format],
// and N may equal 0001 and depends on the total number of successfully
// predicted instances. If the Model has both
// [instance][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]
// and
// [prediction][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]
// schemata defined then each such file contains predictions as per the
// [predictions_format][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.predictions_format].
// If prediction for any instance failed (partially or completely), then
// an additional `errors_0001.<extension>`, `errors_0002.<extension>`,...,
@ -194,45 +217,52 @@ message BatchPredictionJob {
// become underscores), and timestamp is in
// YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset
// two tables will be created, `predictions`, and `errors`.
// If the Model has both [instance][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]
// and [prediction][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri] schemata
// defined then the tables have columns as follows: The `predictions`
// table contains instances for which the prediction succeeded, it
// has columns as per a concatenation of the Model's instance and
// prediction schemata. The `errors` table contains rows for which the
// prediction has failed, it has instance columns, as per the
// If the Model has both
// [instance][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]
// and
// [prediction][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]
// schemata defined then the tables have columns as follows: The
// `predictions` table contains instances for which the prediction
// succeeded, it has columns as per a concatenation of the Model's
// instance and prediction schemata. The `errors` table contains rows for
// which the prediction has failed, it has instance columns, as per the
// instance schema, followed by a single "errors" column, which as values
// has [google.rpc.Status][google.rpc.Status]
// represented as a STRUCT, and containing only `code` and `message`.
BigQueryDestination bigquery_destination = 3;
}
// Required. The format in which Vertex AI gives the predictions, must be one of the
// Required. The format in which Vertex AI gives the predictions, must be
// one of the
// [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model]
// [supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats].
string predictions_format = 1 [(google.api.field_behavior) = REQUIRED];
}
// Further describes this job's output.
// Supplements [output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config].
// Supplements
// [output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config].
message OutputInfo {
// The output location into which prediction output is written.
oneof output_location {
// Output only. The full path of the Cloud Storage directory created, into which
// the prediction output is written.
string gcs_output_directory = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The full path of the Cloud Storage directory created, into
// which the prediction output is written.
string gcs_output_directory = 1
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The path of the BigQuery dataset created, in
// `bq://projectId.bqDatasetId`
// format, into which the prediction output is written.
string bigquery_output_dataset = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
string bigquery_output_dataset = 2
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// Output only. The name of the BigQuery table created, in
// `predictions_<timestamp>`
// format, into which the prediction output is written.
// Can be used by UI to generate the BigQuery output path, for example.
string bigquery_output_table = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
string bigquery_output_table = 4
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// Output only. Resource name of the BatchPredictionJob.
@ -250,10 +280,11 @@ message BatchPredictionJob {
// The model resource name may contain version id or version alias to specify
// the version, if no version is specified, the default version will be used.
string model = 3 [(google.api.resource_reference) = {
type: "aiplatform.googleapis.com/Model"
}];
type: "aiplatform.googleapis.com/Model"
}];
// Output only. The version ID of the Model that produces the predictions via this job.
// Output only. The version ID of the Model that produces the predictions via
// this job.
string model_version_id = 30 [(google.api.field_behavior) = OUTPUT_ONLY];
// Contains model information necessary to perform batch prediction without
@ -261,9 +292,9 @@ message BatchPredictionJob {
// Exactly one of model and unmanaged_container_model must be set.
UnmanagedContainerModel unmanaged_container_model = 28;
// Required. Input configuration of the instances on which predictions are performed.
// The schema of any single instance may be specified via
// the [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model]
// Required. Input configuration of the instances on which predictions are
// performed. The schema of any single instance may be specified via the
// [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model]
// [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata]
// [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri].
InputConfig input_config = 4 [(google.api.field_behavior) = REQUIRED];
@ -273,7 +304,8 @@ message BatchPredictionJob {
InstanceConfig instance_config = 27;
// The parameters that govern the predictions. The schema of the parameters
// may be specified via the [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model]
// may be specified via the
// [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model]
// [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata]
// [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri].
google.protobuf.Value model_parameters = 5;
@ -289,7 +321,8 @@ message BatchPredictionJob {
OutputConfig output_config = 6 [(google.api.field_behavior) = REQUIRED];
// The config of resources used by the Model during the batch prediction. If
// the Model [supports][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types]
// the Model
// [supports][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types]
// DEDICATED_RESOURCES this config may be provided (and the job will use these
// resources), if the Model doesn't support AUTOMATIC_RESOURCES, this config
// must be provided.
@ -304,35 +337,50 @@ message BatchPredictionJob {
// permission on this service account.
string service_account = 29;
// Immutable. Parameters configuring the batch behavior. Currently only applicable when
// [dedicated_resources][google.cloud.aiplatform.v1beta1.BatchPredictionJob.dedicated_resources] are used (in other cases Vertex AI does
// the tuning itself).
ManualBatchTuningParameters manual_batch_tuning_parameters = 8 [(google.api.field_behavior) = IMMUTABLE];
// Immutable. Parameters configuring the batch behavior. Currently only
// applicable when
// [dedicated_resources][google.cloud.aiplatform.v1beta1.BatchPredictionJob.dedicated_resources]
// are used (in other cases Vertex AI does the tuning itself).
ManualBatchTuningParameters manual_batch_tuning_parameters = 8
[(google.api.field_behavior) = IMMUTABLE];
// Generate explanation with the batch prediction results.
//
// When set to `true`, the batch prediction output changes based on the
// `predictions_format` field of the
// [BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config] object:
// [BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config]
// object:
//
// * `bigquery`: output includes a column named `explanation`. The value
// is a struct that conforms to the [Explanation][google.cloud.aiplatform.v1beta1.Explanation] object.
// is a struct that conforms to the
// [Explanation][google.cloud.aiplatform.v1beta1.Explanation] object.
// * `jsonl`: The JSON objects on each line include an additional entry
// keyed `explanation`. The value of the entry is a JSON object that
// conforms to the [Explanation][google.cloud.aiplatform.v1beta1.Explanation] object.
// conforms to the
// [Explanation][google.cloud.aiplatform.v1beta1.Explanation] object.
// * `csv`: Generating explanations for CSV format is not supported.
//
// If this field is set to true, either the [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] or
// [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] must be populated.
// If this field is set to true, either the
// [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec]
// or
// [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec]
// must be populated.
bool generate_explanation = 23;
// Explanation configuration for this BatchPredictionJob. Can be
// specified only if [generate_explanation][google.cloud.aiplatform.v1beta1.BatchPredictionJob.generate_explanation] is set to `true`.
// specified only if
// [generate_explanation][google.cloud.aiplatform.v1beta1.BatchPredictionJob.generate_explanation]
// is set to `true`.
//
// This value overrides the value of [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec]. All fields of
// [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] are optional in the request. If a field of the
// [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] object is not populated, the corresponding field of
// the [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] object is inherited.
// This value overrides the value of
// [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec].
// All fields of
// [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec]
// are optional in the request. If a field of the
// [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec]
// object is not populated, the corresponding field of the
// [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec]
// object is inherited.
ExplanationSpec explanation_spec = 25;
// Output only. Information further describing the output of this job.
@ -349,32 +397,39 @@ message BatchPredictionJob {
// For example, single files that can't be read.
// This field never exceeds 20 entries.
// Status details fields contain standard Google Cloud error details.
repeated google.rpc.Status partial_failures = 12 [(google.api.field_behavior) = OUTPUT_ONLY];
repeated google.rpc.Status partial_failures = 12
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Information about resources that had been consumed by this job.
// Provided in real time at best effort basis, as well as a final value
// Output only. Information about resources that had been consumed by this
// job. Provided in real time at best effort basis, as well as a final value
// once the job completes.
//
// Note: This field currently may be not populated for batch predictions that
// use AutoML Models.
ResourcesConsumed resources_consumed = 13 [(google.api.field_behavior) = OUTPUT_ONLY];
ResourcesConsumed resources_consumed = 13
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Statistics on completed and failed prediction instances.
CompletionStats completion_stats = 14 [(google.api.field_behavior) = OUTPUT_ONLY];
CompletionStats completion_stats = 14
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Time when the BatchPredictionJob was created.
google.protobuf.Timestamp create_time = 15 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 15
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Time when the BatchPredictionJob for the first time entered the
// `JOB_STATE_RUNNING` state.
google.protobuf.Timestamp start_time = 16 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Time when the BatchPredictionJob for the first time entered
// the `JOB_STATE_RUNNING` state.
google.protobuf.Timestamp start_time = 16
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Time when the BatchPredictionJob entered any of the following states:
// `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`.
google.protobuf.Timestamp end_time = 17 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Time when the BatchPredictionJob entered any of the following
// states: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`.
google.protobuf.Timestamp end_time = 17
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Time when the BatchPredictionJob was most recently updated.
google.protobuf.Timestamp update_time = 18 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp update_time = 18
[(google.api.field_behavior) = OUTPUT_ONLY];
// The labels with user-defined metadata to organize BatchPredictionJobs.
//
@ -399,5 +454,6 @@ message BatchPredictionJob {
repeated ModelMonitoringStatsAnomalies model_monitoring_stats_anomalies = 31;
// Output only. The running status of the model monitoring pipeline.
google.rpc.Status model_monitoring_status = 32 [(google.api.field_behavior) = OUTPUT_ONLY];
google.rpc.Status model_monitoring_status = 32
[(google.api.field_behavior) = OUTPUT_ONLY];
}

@ -35,14 +35,16 @@ message CompletionStats {
// Output only. The number of entities for which any error was encountered.
int64 failed_count = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. In cases when enough errors are encountered a job, pipeline, or operation
// may be failed as a whole. Below is the number of entities for which the
// processing had not been finished (either in successful or failed state).
// Set to -1 if the number is unknown (for example, the operation failed
// before the total entity number could be collected).
// Output only. In cases when enough errors are encountered a job, pipeline,
// or operation may be failed as a whole. Below is the number of entities for
// which the processing had not been finished (either in successful or failed
// state). Set to -1 if the number is unknown (for example, the operation
// failed before the total entity number could be collected).
int64 incomplete_count = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The number of the successful forecast points that are generated by the
// forecasting model. This is ONLY used by the forecasting batch prediction.
int64 successful_forecast_point_count = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The number of the successful forecast points that are
// generated by the forecasting model. This is ONLY used by the forecasting
// batch prediction.
int64 successful_forecast_point_count = 5
[(google.api.field_behavior) = OUTPUT_ONLY];
}

@ -57,13 +57,15 @@ message Context {
map<string, string> labels = 9;
// Output only. Timestamp when this Context was created.
google.protobuf.Timestamp create_time = 10 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 10
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this Context was last updated.
google.protobuf.Timestamp update_time = 11 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp update_time = 11
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. A list of resource names of Contexts that are parents of this Context.
// A Context may have at most 10 parent_contexts.
// Output only. A list of resource names of Contexts that are parents of this
// Context. A Context may have at most 10 parent_contexts.
repeated string parent_contexts = 12 [
(google.api.field_behavior) = OUTPUT_ONLY,
(google.api.resource_reference) = {

@ -60,18 +60,22 @@ message CustomJob {
JobState state = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Time when the CustomJob was created.
google.protobuf.Timestamp create_time = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 6
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Time when the CustomJob for the first time entered the
// `JOB_STATE_RUNNING` state.
google.protobuf.Timestamp start_time = 7 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp start_time = 7
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Time when the CustomJob entered any of the following states:
// `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`.
google.protobuf.Timestamp end_time = 8 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp end_time = 8
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Time when the CustomJob was most recently updated.
google.protobuf.Timestamp update_time = 9 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp update_time = 9
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Only populated when job's state is `JOB_STATE_FAILED` or
// `JOB_STATE_CANCELLED`.
@ -94,7 +98,8 @@ message CustomJob {
// Output only. URIs for accessing [interactive
// shells](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell)
// (one URI for each training node). Only available if
// [job_spec.enable_web_access][google.cloud.aiplatform.v1beta1.CustomJobSpec.enable_web_access] is `true`.
// [job_spec.enable_web_access][google.cloud.aiplatform.v1beta1.CustomJobSpec.enable_web_access]
// is `true`.
//
// The keys are names of each node in the training job; for example,
// `workerpool0-0` for the primary node, `workerpool1-0` for the first node in
@ -102,15 +107,17 @@ message CustomJob {
// second worker pool.
//
// The values are the URIs for each node's interactive shell.
map<string, string> web_access_uris = 16 [(google.api.field_behavior) = OUTPUT_ONLY];
map<string, string> web_access_uris = 16
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// Represents the spec of a CustomJob.
message CustomJobSpec {
// Required. The spec of the worker pools including machine type and Docker image.
// All worker pools except the first one are optional and can be skipped by
// providing an empty value.
repeated WorkerPoolSpec worker_pool_specs = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The spec of the worker pools including machine type and Docker
// image. All worker pools except the first one are optional and can be
// skipped by providing an empty value.
repeated WorkerPoolSpec worker_pool_specs = 1
[(google.api.field_behavior) = REQUIRED];
// Scheduling options for a CustomJob.
Scheduling scheduling = 3;
@ -137,9 +144,7 @@ message CustomJobSpec {
// If this field is left unspecified, the job is not peered with any network.
string network = 5 [
(google.api.field_behavior) = OPTIONAL,
(google.api.resource_reference) = {
type: "compute.googleapis.com/Network"
}
(google.api.resource_reference) = { type: "compute.googleapis.com/Network" }
];
// Optional. A list of names for the reserved ip ranges under the VPC network
@ -150,14 +155,15 @@ message CustomJobSpec {
// network.
//
// Example: ['vertex-ai-ip-range'].
repeated string reserved_ip_ranges = 13 [(google.api.field_behavior) = OPTIONAL];
repeated string reserved_ip_ranges = 13
[(google.api.field_behavior) = OPTIONAL];
// The Cloud Storage location to store the output of this CustomJob or
// HyperparameterTuningJob. For HyperparameterTuningJob,
// the baseOutputDirectory of
// each child CustomJob backing a Trial is set to a subdirectory of name
// [id][google.cloud.aiplatform.v1beta1.Trial.id] under its parent HyperparameterTuningJob's
// baseOutputDirectory.
// [id][google.cloud.aiplatform.v1beta1.Trial.id] under its parent
// HyperparameterTuningJob's baseOutputDirectory.
//
// The following Vertex AI environment variables will be passed to
// containers or python modules when this field is set:
@ -175,9 +181,9 @@ message CustomJobSpec {
// * AIP_TENSORBOARD_LOG_DIR = `<base_output_directory>/<trial_id>/logs/`
GcsDestination base_output_directory = 6;
// Optional. The name of a Vertex AI [Tensorboard][google.cloud.aiplatform.v1beta1.Tensorboard] resource to which this CustomJob
// will upload Tensorboard logs.
// Format:
// Optional. The name of a Vertex AI
// [Tensorboard][google.cloud.aiplatform.v1beta1.Tensorboard] resource to
// which this CustomJob will upload Tensorboard logs. Format:
// `projects/{project}/locations/{location}/tensorboards/{tensorboard}`
string tensorboard = 7 [
(google.api.field_behavior) = OPTIONAL,
@ -191,7 +197,11 @@ message CustomJobSpec {
// to training containers.
//
// If set to `true`, you can access interactive shells at the URIs given
// by [CustomJob.web_access_uris][google.cloud.aiplatform.v1beta1.CustomJob.web_access_uris] or [Trial.web_access_uris][google.cloud.aiplatform.v1beta1.Trial.web_access_uris] (within
// by
// [CustomJob.web_access_uris][google.cloud.aiplatform.v1beta1.CustomJob.web_access_uris]
// or
// [Trial.web_access_uris][google.cloud.aiplatform.v1beta1.Trial.web_access_uris]
// (within
// [HyperparameterTuningJob.trials][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.trials]).
bool enable_web_access = 10 [(google.api.field_behavior) = OPTIONAL];
}
@ -225,8 +235,8 @@ message WorkerPoolSpec {
// The spec of a Container.
message ContainerSpec {
// Required. The URI of a container image in the Container Registry that is to be run on
// each worker replica.
// Required. The URI of a container image in the Container Registry that is to
// be run on each worker replica.
string image_uri = 1 [(google.api.field_behavior) = REQUIRED];
// The command to be invoked when the container is started.
@ -243,17 +253,17 @@ message ContainerSpec {
// The spec of a Python packaged code.
message PythonPackageSpec {
// Required. The URI of a container image in Artifact Registry that will run the
// provided Python package. Vertex AI provides a wide range of executor
// Required. The URI of a container image in Artifact Registry that will run
// the provided Python package. Vertex AI provides a wide range of executor
// images with pre-installed packages to meet users' various use cases. See
// the list of [pre-built containers for
// training](https://cloud.google.com/vertex-ai/docs/training/pre-built-containers).
// You must use an image from this list.
string executor_image_uri = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The Google Cloud Storage location of the Python package files which are
// the training program and its dependent packages.
// The maximum number of package URIs is 100.
// Required. The Google Cloud Storage location of the Python package files
// which are the training program and its dependent packages. The maximum
// number of package URIs is 100.
repeated string package_uris = 2 [(google.api.field_behavior) = REQUIRED];
// Required. The Python module name to run after installing the packages.

@ -41,10 +41,12 @@ message DataItem {
string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this DataItem was created.
google.protobuf.Timestamp create_time = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 2
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this DataItem was last updated.
google.protobuf.Timestamp update_time = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp update_time = 6
[(google.api.field_behavior) = OUTPUT_ONLY];
// Optional. The labels with user-defined metadata to organize your DataItems.
//
@ -59,12 +61,14 @@ message DataItem {
// and are immutable.
map<string, string> labels = 3 [(google.api.field_behavior) = OPTIONAL];
// Required. The data that the DataItem represents (for example, an image or a text
// snippet). The schema of the payload is stored in the parent Dataset's
// [metadata schema's][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri] dataItemSchemaUri field.
// Required. The data that the DataItem represents (for example, an image or a
// text snippet). The schema of the payload is stored in the parent Dataset's
// [metadata
// schema's][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri]
// dataItemSchemaUri field.
google.protobuf.Value payload = 4 [(google.api.field_behavior) = REQUIRED];
// Optional. Used to perform consistent read-modify-write updates. If not set, a blind
// "overwrite" update happens.
// Optional. Used to perform consistent read-modify-write updates. If not set,
// a blind "overwrite" update happens.
string etag = 7 [(google.api.field_behavior) = OPTIONAL];
}

@ -50,9 +50,8 @@ message DataLabelingJob {
// Display name of a DataLabelingJob.
string display_name = 2 [(google.api.field_behavior) = REQUIRED];
// Required. Dataset resource names. Right now we only support labeling from a single
// Dataset.
// Format:
// Required. Dataset resource names. Right now we only support labeling from a
// single Dataset. Format:
// `projects/{project}/locations/{location}/datasets/{dataset}`
repeated string datasets = 3 [
(google.api.field_behavior) = REQUIRED,
@ -74,14 +73,14 @@ message DataLabelingJob {
// Required. Number of labelers to work on each DataItem.
int32 labeler_count = 4 [(google.api.field_behavior) = REQUIRED];
// Required. The Google Cloud Storage location of the instruction pdf. This pdf is
// shared with labelers, and provides detailed description on how to label
// DataItems in Datasets.
// Required. The Google Cloud Storage location of the instruction pdf. This
// pdf is shared with labelers, and provides detailed description on how to
// label DataItems in Datasets.
string instruction_uri = 5 [(google.api.field_behavior) = REQUIRED];
// Required. Points to a YAML file stored on Google Cloud Storage describing the
// config for a specific type of DataLabelingJob.
// The schema files that can be used here are found in the
// Required. Points to a YAML file stored on Google Cloud Storage describing
// the config for a specific type of DataLabelingJob. The schema files that
// can be used here are found in the
// https://storage.googleapis.com/google-cloud-aiplatform bucket in the
// /schema/datalabelingjob/inputs/ folder.
string inputs_schema_uri = 6 [(google.api.field_behavior) = REQUIRED];
@ -92,22 +91,25 @@ message DataLabelingJob {
// Output only. The detailed state of the job.
JobState state = 8 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Current labeling job progress percentage scaled in interval [0, 100],
// indicating the percentage of DataItems that has been finished.
// Output only. Current labeling job progress percentage scaled in interval
// [0, 100], indicating the percentage of DataItems that has been finished.
int32 labeling_progress = 13 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Estimated cost(in US dollars) that the DataLabelingJob has incurred to
// date.
google.type.Money current_spend = 14 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Estimated cost(in US dollars) that the DataLabelingJob has
// incurred to date.
google.type.Money current_spend = 14
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this DataLabelingJob was created.
google.protobuf.Timestamp create_time = 9 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 9
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this DataLabelingJob was updated most recently.
google.protobuf.Timestamp update_time = 10 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp update_time = 10
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. DataLabelingJob errors. It is only populated when job's state is
// `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`.
// Output only. DataLabelingJob errors. It is only populated when job's state
// is `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`.
google.rpc.Status error = 22 [(google.api.field_behavior) = OUTPUT_ONLY];
// The labels with user-defined metadata to organize your DataLabelingJobs.
@ -121,7 +123,8 @@ message DataLabelingJob {
// and are immutable. Following system labels exist for each DataLabelingJob:
//
// * "aiplatform.googleapis.com/schema": output only, its value is the
// [inputs_schema][google.cloud.aiplatform.v1beta1.DataLabelingJob.inputs_schema_uri]'s title.
// [inputs_schema][google.cloud.aiplatform.v1beta1.DataLabelingJob.inputs_schema_uri]'s
// title.
map<string, string> labels = 11;
// The SpecialistPools' resource names associated with this job.

@ -20,6 +20,7 @@ import "google/api/field_behavior.proto";
import "google/api/resource.proto";
import "google/cloud/aiplatform/v1beta1/encryption_spec.proto";
import "google/cloud/aiplatform/v1beta1/io.proto";
import "google/cloud/aiplatform/v1beta1/saved_query.proto";
import "google/protobuf/struct.proto";
import "google/protobuf/timestamp.proto";
@ -49,21 +50,22 @@ message Dataset {
// The description of the Dataset.
string description = 16;
// Required. Points to a YAML file stored on Google Cloud Storage describing additional
// information about the Dataset.
// The schema is defined as an OpenAPI 3.0.2 Schema Object.
// The schema files that can be used here are found in
// gs://google-cloud-aiplatform/schema/dataset/metadata/.
// Required. Points to a YAML file stored on Google Cloud Storage describing
// additional information about the Dataset. The schema is defined as an
// OpenAPI 3.0.2 Schema Object. The schema files that can be used here are
// found in gs://google-cloud-aiplatform/schema/dataset/metadata/.
string metadata_schema_uri = 3 [(google.api.field_behavior) = REQUIRED];
// Required. Additional information about the Dataset.
google.protobuf.Value metadata = 8 [(google.api.field_behavior) = REQUIRED];
// Output only. Timestamp when this Dataset was created.
google.protobuf.Timestamp create_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 4
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this Dataset was last updated.
google.protobuf.Timestamp update_time = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp update_time = 5
[(google.api.field_behavior) = OUTPUT_ONLY];
// Used to perform consistent read-modify-write updates. If not set, a blind
// "overwrite" update happens.
@ -82,15 +84,27 @@ message Dataset {
// and are immutable. Following system labels exist for each Dataset:
//
// * "aiplatform.googleapis.com/dataset_metadata_schema": output only, its
// value is the [metadata_schema's][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri] title.
// value is the
// [metadata_schema's][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri]
// title.
map<string, string> labels = 7;
// All SavedQueries belong to the Dataset will be returned in List/Get
// Dataset response. The [annotation_specs][SavedQuery.annotation_specs] field
// will not be populated except for UI cases which will only use
// [annotation_spec_count][google.cloud.aiplatform.v1beta1.SavedQuery.annotation_spec_count].
// In CreateDataset request, a SavedQuery is created together if
// this field is set, up to one SavedQuery can be set in CreateDatasetRequest.
// The SavedQuery should not contain any AnnotationSpec.
repeated SavedQuery saved_queries = 9;
// Customer-managed encryption key spec for a Dataset. If set, this Dataset
// and all sub-resources of this Dataset will be secured by this key.
EncryptionSpec encryption_spec = 11;
// Output only. The resource name of the Artifact that was created in MetadataStore when
// creating the Dataset. The Artifact resource name pattern is
// Output only. The resource name of the Artifact that was created in
// MetadataStore when creating the Dataset. The Artifact resource name pattern
// is
// `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`.
string metadata_artifact = 17 [(google.api.field_behavior) = OUTPUT_ONLY];
}
@ -114,21 +128,27 @@ message ImportDataConfig {
// considered identical if their content bytes are identical (e.g. image bytes
// or pdf bytes).
// These labels will be overridden by Annotation labels specified inside index
// file referenced by [import_schema_uri][google.cloud.aiplatform.v1beta1.ImportDataConfig.import_schema_uri], e.g. jsonl file.
// file referenced by
// [import_schema_uri][google.cloud.aiplatform.v1beta1.ImportDataConfig.import_schema_uri],
// e.g. jsonl file.
map<string, string> data_item_labels = 2;
// Labels that will be applied to newly imported Annotations. If two
// Annotations are identical, one of them will be deduped. Two Annotations are
// considered identical if their [payload][google.cloud.aiplatform.v1beta1.Annotation.payload],
// [payload_schema_uri][google.cloud.aiplatform.v1beta1.Annotation.payload_schema_uri] and all of their
// considered identical if their
// [payload][google.cloud.aiplatform.v1beta1.Annotation.payload],
// [payload_schema_uri][google.cloud.aiplatform.v1beta1.Annotation.payload_schema_uri]
// and all of their
// [labels][google.cloud.aiplatform.v1beta1.Annotation.labels] are the same.
// These labels will be overridden by Annotation labels specified inside index
// file referenced by [import_schema_uri][google.cloud.aiplatform.v1beta1.ImportDataConfig.import_schema_uri], e.g. jsonl file.
// file referenced by
// [import_schema_uri][google.cloud.aiplatform.v1beta1.ImportDataConfig.import_schema_uri],
// e.g. jsonl file.
map<string, string> annotation_labels = 3;
// Required. Points to a YAML file stored on Google Cloud Storage describing the import
// format. Validation will be done against the schema. The schema is defined
// as an [OpenAPI 3.0.2 Schema
// Required. Points to a YAML file stored on Google Cloud Storage describing
// the import format. Validation will be done against the schema. The schema
// is defined as an [OpenAPI 3.0.2 Schema
// Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
string import_schema_uri = 4 [(google.api.field_behavior) = REQUIRED];
}

@ -41,10 +41,12 @@ option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
// resources.
service DatasetService {
option (google.api.default_host) = "aiplatform.googleapis.com";
option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
option (google.api.oauth_scopes) =
"https://www.googleapis.com/auth/cloud-platform";
// Creates a Dataset.
rpc CreateDataset(CreateDatasetRequest) returns (google.longrunning.Operation) {
rpc CreateDataset(CreateDatasetRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*}/datasets"
body: "dataset"
@ -82,7 +84,8 @@ service DatasetService {
}
// Deletes a Dataset.
rpc DeleteDataset(DeleteDatasetRequest) returns (google.longrunning.Operation) {
rpc DeleteDataset(DeleteDatasetRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
delete: "/v1beta1/{name=projects/*/locations/*/datasets/*}"
};
@ -128,14 +131,16 @@ service DatasetService {
}
// Searches DataItems in a Dataset.
rpc SearchDataItems(SearchDataItemsRequest) returns (SearchDataItemsResponse) {
rpc SearchDataItems(SearchDataItemsRequest)
returns (SearchDataItemsResponse) {
option (google.api.http) = {
get: "/v1beta1/{dataset=projects/*/locations/*/datasets/*}:searchDataItems"
};
}
// Lists SavedQueries in a Dataset.
rpc ListSavedQueries(ListSavedQueriesRequest) returns (ListSavedQueriesResponse) {
rpc ListSavedQueries(ListSavedQueriesRequest)
returns (ListSavedQueriesResponse) {
option (google.api.http) = {
get: "/v1beta1/{parent=projects/*/locations/*/datasets/*}/savedQueries"
};
@ -151,7 +156,8 @@ service DatasetService {
}
// Lists Annotations belongs to a dataitem
rpc ListAnnotations(ListAnnotationsRequest) returns (ListAnnotationsResponse) {
rpc ListAnnotations(ListAnnotationsRequest)
returns (ListAnnotationsResponse) {
option (google.api.http) = {
get: "/v1beta1/{parent=projects/*/locations/*/datasets/*/dataItems/*}/annotations"
};
@ -159,7 +165,8 @@ service DatasetService {
}
}
// Request message for [DatasetService.CreateDataset][google.cloud.aiplatform.v1beta1.DatasetService.CreateDataset].
// Request message for
// [DatasetService.CreateDataset][google.cloud.aiplatform.v1beta1.DatasetService.CreateDataset].
message CreateDatasetRequest {
// Required. The resource name of the Location to create the Dataset in.
// Format: `projects/{project}/locations/{location}`
@ -174,13 +181,15 @@ message CreateDatasetRequest {
Dataset dataset = 2 [(google.api.field_behavior) = REQUIRED];
}
// Runtime operation information for [DatasetService.CreateDataset][google.cloud.aiplatform.v1beta1.DatasetService.CreateDataset].
// Runtime operation information for
// [DatasetService.CreateDataset][google.cloud.aiplatform.v1beta1.DatasetService.CreateDataset].
message CreateDatasetOperationMetadata {
// The operation generic information.
GenericOperationMetadata generic_metadata = 1;
}
// Request message for [DatasetService.GetDataset][google.cloud.aiplatform.v1beta1.DatasetService.GetDataset].
// Request message for
// [DatasetService.GetDataset][google.cloud.aiplatform.v1beta1.DatasetService.GetDataset].
message GetDatasetRequest {
// Required. The name of the Dataset resource.
string name = 1 [
@ -194,22 +203,25 @@ message GetDatasetRequest {
google.protobuf.FieldMask read_mask = 2;
}
// Request message for [DatasetService.UpdateDataset][google.cloud.aiplatform.v1beta1.DatasetService.UpdateDataset].
// Request message for
// [DatasetService.UpdateDataset][google.cloud.aiplatform.v1beta1.DatasetService.UpdateDataset].
message UpdateDatasetRequest {
// Required. The Dataset which replaces the resource on the server.
Dataset dataset = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The update mask applies to the resource.
// For the `FieldMask` definition, see [google.protobuf.FieldMask][google.protobuf.FieldMask].
// Updatable fields:
// For the `FieldMask` definition, see
// [google.protobuf.FieldMask][google.protobuf.FieldMask]. Updatable fields:
//
// * `display_name`
// * `description`
// * `labels`
google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED];
google.protobuf.FieldMask update_mask = 2
[(google.api.field_behavior) = REQUIRED];
}
// Request message for [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets].
// Request message for
// [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets].
message ListDatasetsRequest {
// Required. The name of the Dataset's parent resource.
// Format: `projects/{project}/locations/{location}`
@ -255,7 +267,8 @@ message ListDatasetsRequest {
string order_by = 6;
}
// Response message for [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets].
// Response message for
// [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets].
message ListDatasetsResponse {
// A list of Datasets that matches the specified filter in the request.
repeated Dataset datasets = 1;
@ -264,7 +277,8 @@ message ListDatasetsResponse {
string next_page_token = 2;
}
// Request message for [DatasetService.DeleteDataset][google.cloud.aiplatform.v1beta1.DatasetService.DeleteDataset].
// Request message for
// [DatasetService.DeleteDataset][google.cloud.aiplatform.v1beta1.DatasetService.DeleteDataset].
message DeleteDatasetRequest {
// Required. The resource name of the Dataset to delete.
// Format:
@ -277,7 +291,8 @@ message DeleteDatasetRequest {
];
}
// Request message for [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData].
// Request message for
// [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData].
message ImportDataRequest {
// Required. The name of the Dataset resource.
// Format:
@ -289,23 +304,25 @@ message ImportDataRequest {
}
];
// Required. The desired input locations. The contents of all input locations will be
// imported in one batch.
repeated ImportDataConfig import_configs = 2 [(google.api.field_behavior) = REQUIRED];
// Required. The desired input locations. The contents of all input locations
// will be imported in one batch.
repeated ImportDataConfig import_configs = 2
[(google.api.field_behavior) = REQUIRED];
}
// Response message for [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData].
message ImportDataResponse {
// Response message for
// [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData].
message ImportDataResponse {}
}
// Runtime operation information for [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData].
// Runtime operation information for
// [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData].
message ImportDataOperationMetadata {
// The common part of the operation metadata.
GenericOperationMetadata generic_metadata = 1;
}
// Request message for [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData].
// Request message for
// [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData].
message ExportDataRequest {
// Required. The name of the Dataset resource.
// Format:
@ -321,13 +338,15 @@ message ExportDataRequest {
ExportDataConfig export_config = 2 [(google.api.field_behavior) = REQUIRED];
}
// Response message for [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData].
// Response message for
// [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData].
message ExportDataResponse {
// All of the files that are exported in this export operation.
repeated string exported_files = 1;
}
// Runtime operation information for [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData].
// Runtime operation information for
// [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData].
message ExportDataOperationMetadata {
// The common part of the operation metadata.
GenericOperationMetadata generic_metadata = 1;
@ -337,7 +356,8 @@ message ExportDataOperationMetadata {
string gcs_output_directory = 2;
}
// Request message for [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems].
// Request message for
// [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems].
message ListDataItemsRequest {
// Required. The resource name of the Dataset to list DataItems from.
// Format:
@ -366,7 +386,8 @@ message ListDataItemsRequest {
string order_by = 6;
}
// Response message for [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems].
// Response message for
// [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems].
message ListDataItemsResponse {
// A list of DataItems that matches the specified filter in the request.
repeated DataItem data_items = 1;
@ -375,12 +396,13 @@ message ListDataItemsResponse {
string next_page_token = 2;
}
// Request message for [DatasetService.SearchDataItems][google.cloud.aiplatform.v1beta1.DatasetService.SearchDataItems].
// Request message for
// [DatasetService.SearchDataItems][google.cloud.aiplatform.v1beta1.DatasetService.SearchDataItems].
message SearchDataItemsRequest {
// Expression that allows ranking results based on annotation's property.
message OrderByAnnotation {
// Required. Saved query of the Annotation. Only Annotations belong to this saved
// query will be considered for ordering.
// Required. Saved query of the Annotation. Only Annotations belong to this
// saved query will be considered for ordering.
string saved_query = 1 [(google.api.field_behavior) = REQUIRED];
// A comma-separated list of annotation fields to order by, sorted in
@ -452,7 +474,8 @@ message SearchDataItemsRequest {
// belong to.
repeated string annotation_filters = 11;
// Mask specifying which fields of [DataItemView][google.cloud.aiplatform.v1beta1.DataItemView] to read.
// Mask specifying which fields of
// [DataItemView][google.cloud.aiplatform.v1beta1.DataItemView] to read.
google.protobuf.FieldMask field_mask = 6;
// If set, only up to this many of Annotations will be returned per
@ -470,18 +493,23 @@ message SearchDataItemsRequest {
// A token identifying a page of results for the server to return
// Typically obtained via
// [SearchDataItemsResponse.next_page_token][google.cloud.aiplatform.v1beta1.SearchDataItemsResponse.next_page_token] of the previous
// [DatasetService.SearchDataItems][google.cloud.aiplatform.v1beta1.DatasetService.SearchDataItems] call.
// [SearchDataItemsResponse.next_page_token][google.cloud.aiplatform.v1beta1.SearchDataItemsResponse.next_page_token]
// of the previous
// [DatasetService.SearchDataItems][google.cloud.aiplatform.v1beta1.DatasetService.SearchDataItems]
// call.
string page_token = 10;
}
// Response message for [DatasetService.SearchDataItems][google.cloud.aiplatform.v1beta1.DatasetService.SearchDataItems].
// Response message for
// [DatasetService.SearchDataItems][google.cloud.aiplatform.v1beta1.DatasetService.SearchDataItems].
message SearchDataItemsResponse {
// The DataItemViews read.
repeated DataItemView data_item_views = 1;
// A token to retrieve next page of results.
// Pass to [SearchDataItemsRequest.page_token][google.cloud.aiplatform.v1beta1.SearchDataItemsRequest.page_token] to obtain that page.
// Pass to
// [SearchDataItemsRequest.page_token][google.cloud.aiplatform.v1beta1.SearchDataItemsRequest.page_token]
// to obtain that page.
string next_page_token = 2;
}
@ -504,7 +532,8 @@ message DataItemView {
bool has_truncated_annotations = 3;
}
// Request message for [DatasetService.ListSavedQueries][google.cloud.aiplatform.v1beta1.DatasetService.ListSavedQueries].
// Request message for
// [DatasetService.ListSavedQueries][google.cloud.aiplatform.v1beta1.DatasetService.ListSavedQueries].
message ListSavedQueriesRequest {
// Required. The resource name of the Dataset to list SavedQueries from.
// Format:
@ -533,7 +562,8 @@ message ListSavedQueriesRequest {
string order_by = 6;
}
// Response message for [DatasetService.ListSavedQueries][google.cloud.aiplatform.v1beta1.DatasetService.ListSavedQueries].
// Response message for
// [DatasetService.ListSavedQueries][google.cloud.aiplatform.v1beta1.DatasetService.ListSavedQueries].
message ListSavedQueriesResponse {
// A list of SavedQueries that match the specified filter in the request.
repeated SavedQuery saved_queries = 1;
@ -542,7 +572,8 @@ message ListSavedQueriesResponse {
string next_page_token = 2;
}
// Request message for [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1beta1.DatasetService.GetAnnotationSpec].
// Request message for
// [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1beta1.DatasetService.GetAnnotationSpec].
message GetAnnotationSpecRequest {
// Required. The name of the AnnotationSpec resource.
// Format:
@ -558,7 +589,8 @@ message GetAnnotationSpecRequest {
google.protobuf.FieldMask read_mask = 2;
}
// Request message for [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations].
// Request message for
// [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations].
message ListAnnotationsRequest {
// Required. The resource name of the DataItem to list Annotations from.
// Format:
@ -587,7 +619,8 @@ message ListAnnotationsRequest {
string order_by = 6;
}
// Response message for [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations].
// Response message for
// [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations].
message ListAnnotationsResponse {
// A list of Annotations that matches the specified filter in the request.
repeated Annotation annotations = 1;

@ -42,9 +42,12 @@ message DeploymentResourcePool {
// `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}`
string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Required. The underlying DedicatedResources that the DeploymentResourcePool uses.
DedicatedResources dedicated_resources = 2 [(google.api.field_behavior) = REQUIRED];
// Required. The underlying DedicatedResources that the DeploymentResourcePool
// uses.
DedicatedResources dedicated_resources = 2
[(google.api.field_behavior) = REQUIRED];
// Output only. Timestamp when this DeploymentResourcePool was created.
google.protobuf.Timestamp create_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 4
[(google.api.field_behavior) = OUTPUT_ONLY];
}

@ -37,15 +37,18 @@ option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
// A service that manages the DeploymentResourcePool resource.
service DeploymentResourcePoolService {
option (google.api.default_host) = "aiplatform.googleapis.com";
option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
option (google.api.oauth_scopes) =
"https://www.googleapis.com/auth/cloud-platform";
// Create a DeploymentResourcePool.
rpc CreateDeploymentResourcePool(CreateDeploymentResourcePoolRequest) returns (google.longrunning.Operation) {
rpc CreateDeploymentResourcePool(CreateDeploymentResourcePoolRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*}/deploymentResourcePools"
body: "*"
};
option (google.api.method_signature) = "parent,deployment_resource_pool,deployment_resource_pool_id";
option (google.api.method_signature) =
"parent,deployment_resource_pool,deployment_resource_pool_id";
option (google.longrunning.operation_info) = {
response_type: "DeploymentResourcePool"
metadata_type: "CreateDeploymentResourcePoolOperationMetadata"
@ -53,7 +56,8 @@ service DeploymentResourcePoolService {
}
// Get a DeploymentResourcePool.
rpc GetDeploymentResourcePool(GetDeploymentResourcePoolRequest) returns (DeploymentResourcePool) {
rpc GetDeploymentResourcePool(GetDeploymentResourcePoolRequest)
returns (DeploymentResourcePool) {
option (google.api.http) = {
get: "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*}"
};
@ -61,7 +65,8 @@ service DeploymentResourcePoolService {
}
// List DeploymentResourcePools in a location.
rpc ListDeploymentResourcePools(ListDeploymentResourcePoolsRequest) returns (ListDeploymentResourcePoolsResponse) {
rpc ListDeploymentResourcePools(ListDeploymentResourcePoolsRequest)
returns (ListDeploymentResourcePoolsResponse) {
option (google.api.http) = {
get: "/v1beta1/{parent=projects/*/locations/*}/deploymentResourcePools"
};
@ -69,7 +74,8 @@ service DeploymentResourcePoolService {
}
// Delete a DeploymentResourcePool.
rpc DeleteDeploymentResourcePool(DeleteDeploymentResourcePoolRequest) returns (google.longrunning.Operation) {
rpc DeleteDeploymentResourcePool(DeleteDeploymentResourcePoolRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
delete: "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*}"
};
@ -81,7 +87,8 @@ service DeploymentResourcePoolService {
}
// List DeployedModels that have been deployed on this DeploymentResourcePool.
rpc QueryDeployedModels(QueryDeployedModelsRequest) returns (QueryDeployedModelsResponse) {
rpc QueryDeployedModels(QueryDeployedModelsRequest)
returns (QueryDeployedModelsResponse) {
option (google.api.http) = {
get: "/v1beta1/{deployment_resource_pool=projects/*/locations/*/deploymentResourcePools/*}:queryDeployedModels"
};
@ -91,8 +98,8 @@ service DeploymentResourcePoolService {
// Request message for CreateDeploymentResourcePool method.
message CreateDeploymentResourcePoolRequest {
// Required. The parent location resource where this DeploymentResourcePool will be
// created. Format: `projects/{project}/locations/{location}`
// Required. The parent location resource where this DeploymentResourcePool
// will be created. Format: `projects/{project}/locations/{location}`
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
@ -101,7 +108,8 @@ message CreateDeploymentResourcePoolRequest {
];
// Required. The DeploymentResourcePool to create.
DeploymentResourcePool deployment_resource_pool = 2 [(google.api.field_behavior) = REQUIRED];
DeploymentResourcePool deployment_resource_pool = 2
[(google.api.field_behavior) = REQUIRED];
// Required. The ID to use for the DeploymentResourcePool, which
// will become the final component of the DeploymentResourcePool's resource
@ -109,7 +117,8 @@ message CreateDeploymentResourcePoolRequest {
//
// The maximum length is 63 characters, and valid characters
// are `/^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$/`.
string deployment_resource_pool_id = 3 [(google.api.field_behavior) = REQUIRED];
string deployment_resource_pool_id = 3
[(google.api.field_behavior) = REQUIRED];
}
// Runtime operation information for CreateDeploymentResourcePool method.
@ -133,8 +142,8 @@ message GetDeploymentResourcePoolRequest {
// Request message for ListDeploymentResourcePools method.
message ListDeploymentResourcePoolsRequest {
// Required. The parent Location which owns this collection of DeploymentResourcePools.
// Format: `projects/{project}/locations/{location}`
// Required. The parent Location which owns this collection of
// DeploymentResourcePools. Format: `projects/{project}/locations/{location}`
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {

@ -29,8 +29,8 @@ option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
// Represents a customer-managed encryption key spec that can be applied to
// a top-level resource.
message EncryptionSpec {
// Required. The Cloud KMS resource identifier of the customer managed encryption key
// used to protect a resource. Has the form:
// Required. The Cloud KMS resource identifier of the customer managed
// encryption key used to protect a resource. Has the form:
// `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`.
// The key needs to be in the same region as where the compute resource is
// created.

@ -52,9 +52,13 @@ message Endpoint {
string description = 3;
// Output only. The models deployed in this Endpoint.
// To add or remove DeployedModels use [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel] and
// [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel] respectively.
repeated DeployedModel deployed_models = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
// To add or remove DeployedModels use
// [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]
// and
// [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]
// respectively.
repeated DeployedModel deployed_models = 4
[(google.api.field_behavior) = OUTPUT_ONLY];
// A map from a DeployedModel's ID to the percentage of this Endpoint's
// traffic that should be forwarded to that DeployedModel.
@ -80,10 +84,12 @@ message Endpoint {
map<string, string> labels = 7;
// Output only. Timestamp when this Endpoint was created.
google.protobuf.Timestamp create_time = 8 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 8
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this Endpoint was last updated.
google.protobuf.Timestamp update_time = 9 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp update_time = 9
[(google.api.field_behavior) = OUTPUT_ONLY];
// Customer-managed encryption key spec for an Endpoint. If set, this
// Endpoint and all sub-resources of this Endpoint will be secured by
@ -97,7 +103,8 @@ message Endpoint {
// Private services access must already be configured for the network. If left
// unspecified, the Endpoint is not peered with any network.
//
// Only one of the fields, [network][google.cloud.aiplatform.v1beta1.Endpoint.network] or
// Only one of the fields,
// [network][google.cloud.aiplatform.v1beta1.Endpoint.network] or
// [enable_private_service_connect][google.cloud.aiplatform.v1beta1.Endpoint.enable_private_service_connect],
// can be set.
//
@ -105,20 +112,21 @@ message Endpoint {
// `projects/{project}/global/networks/{network}`.
// Where `{project}` is a project number, as in `12345`, and `{network}` is
// network name.
string network = 13 [(google.api.resource_reference) = {
type: "compute.googleapis.com/Network"
}];
string network = 13 [
(google.api.resource_reference) = { type: "compute.googleapis.com/Network" }
];
// Deprecated: If true, expose the Endpoint via private service connect.
//
// Only one of the fields, [network][google.cloud.aiplatform.v1beta1.Endpoint.network] or
// Only one of the fields,
// [network][google.cloud.aiplatform.v1beta1.Endpoint.network] or
// [enable_private_service_connect][google.cloud.aiplatform.v1beta1.Endpoint.enable_private_service_connect],
// can be set.
bool enable_private_service_connect = 17 [deprecated = true];
// Output only. Resource name of the Model Monitoring job associated with this Endpoint
// if monitoring is enabled by [CreateModelDeploymentMonitoringJob][].
// Format:
// Output only. Resource name of the Model Monitoring job associated with this
// Endpoint if monitoring is enabled by
// [CreateModelDeploymentMonitoringJob][]. Format:
// `projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`
string model_deployment_monitoring_job = 14 [
(google.api.field_behavior) = OUTPUT_ONLY,
@ -128,7 +136,8 @@ message Endpoint {
];
// Configures the request-response logging for online prediction.
PredictRequestResponseLoggingConfig predict_request_response_logging_config = 18;
PredictRequestResponseLoggingConfig predict_request_response_logging_config =
18;
}
// A deployment of a Model. Endpoints contain one or more DeployedModels.
@ -151,18 +160,19 @@ message DeployedModel {
// Format:
// `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}`
string shared_resources = 17 [(google.api.resource_reference) = {
type: "aiplatform.googleapis.com/DeploymentResourcePool"
}];
type: "aiplatform.googleapis.com/DeploymentResourcePool"
}];
}
// Immutable. The ID of the DeployedModel. If not provided upon deployment, Vertex AI
// will generate a value for this ID.
// Immutable. The ID of the DeployedModel. If not provided upon deployment,
// Vertex AI will generate a value for this ID.
//
// This value should be 1-10 characters, and valid characters are /[0-9]/.
string id = 1 [(google.api.field_behavior) = IMMUTABLE];
// Required. The resource name of the Model that this is the deployment of. Note that
// the Model may be in a different location than the DeployedModel's Endpoint.
// Required. The resource name of the Model that this is the deployment of.
// Note that the Model may be in a different location than the DeployedModel's
// Endpoint.
//
// The resource name may contain version id or version alias to specify the
// version, if no version is specified, the default version will be deployed.
@ -181,17 +191,26 @@ message DeployedModel {
string display_name = 3;
// Output only. Timestamp when the DeployedModel was created.
google.protobuf.Timestamp create_time = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 6
[(google.api.field_behavior) = OUTPUT_ONLY];
// Explanation configuration for this DeployedModel.
//
// When deploying a Model using [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel], this value
// overrides the value of [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec]. All fields of
// [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] are optional in the request. If a field of
// [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] is not populated, the value of the same field of
// [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] is inherited. If the corresponding
// [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] is not populated, all fields of the
// [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] will be used for the explanation configuration.
// When deploying a Model using
// [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel],
// this value overrides the value of
// [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec].
// All fields of
// [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec]
// are optional in the request. If a field of
// [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec]
// is not populated, the value of the same field of
// [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec]
// is inherited. If the corresponding
// [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec]
// is not populated, all fields of the
// [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec]
// will be used for the explanation configuration.
ExplanationSpec explanation_spec = 9;
// The service account that the DeployedModel's container runs as. Specify the
@ -217,10 +236,12 @@ message DeployedModel {
// Estimate your costs before enabling this option.
bool enable_access_logging = 13;
// Output only. Provide paths for users to send predict/explain/health requests directly to
// the deployed model services running on Cloud via private services access.
// This field is populated if [network][google.cloud.aiplatform.v1beta1.Endpoint.network] is configured.
PrivateEndpoints private_endpoints = 14 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Provide paths for users to send predict/explain/health
// requests directly to the deployed model services running on Cloud via
// private services access. This field is populated if
// [network][google.cloud.aiplatform.v1beta1.Endpoint.network] is configured.
PrivateEndpoints private_endpoints = 14
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// PrivateEndpoints proto is used to provide paths for users to send
@ -238,8 +259,8 @@ message PrivateEndpoints {
// Output only. Http(s) path to send health check requests.
string health_http_uri = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The name of the service attachment resource. Populated if private service
// connect is enabled.
// Output only. The name of the service attachment resource. Populated if
// private service connect is enabled.
string service_attachment = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
}

@ -36,10 +36,12 @@ option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
// A service for managing Vertex AI's Endpoints.
service EndpointService {
option (google.api.default_host) = "aiplatform.googleapis.com";
option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
option (google.api.oauth_scopes) =
"https://www.googleapis.com/auth/cloud-platform";
// Creates an Endpoint.
rpc CreateEndpoint(CreateEndpointRequest) returns (google.longrunning.Operation) {
rpc CreateEndpoint(CreateEndpointRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*}/endpoints"
body: "endpoint"
@ -78,7 +80,8 @@ service EndpointService {
}
// Deletes an Endpoint.
rpc DeleteEndpoint(DeleteEndpointRequest) returns (google.longrunning.Operation) {
rpc DeleteEndpoint(DeleteEndpointRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
delete: "/v1beta1/{name=projects/*/locations/*/endpoints/*}"
};
@ -95,7 +98,8 @@ service EndpointService {
post: "/v1beta1/{endpoint=projects/*/locations/*/endpoints/*}:deployModel"
body: "*"
};
option (google.api.method_signature) = "endpoint,deployed_model,traffic_split";
option (google.api.method_signature) =
"endpoint,deployed_model,traffic_split";
option (google.longrunning.operation_info) = {
response_type: "DeployModelResponse"
metadata_type: "DeployModelOperationMetadata"
@ -104,12 +108,14 @@ service EndpointService {
// Undeploys a Model from an Endpoint, removing a DeployedModel from it, and
// freeing all resources it's using.
rpc UndeployModel(UndeployModelRequest) returns (google.longrunning.Operation) {
rpc UndeployModel(UndeployModelRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta1/{endpoint=projects/*/locations/*/endpoints/*}:undeployModel"
body: "*"
};
option (google.api.method_signature) = "endpoint,deployed_model_id,traffic_split";
option (google.api.method_signature) =
"endpoint,deployed_model_id,traffic_split";
option (google.longrunning.operation_info) = {
response_type: "UndeployModelResponse"
metadata_type: "UndeployModelOperationMetadata"
@ -117,7 +123,8 @@ service EndpointService {
}
}
// Request message for [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint].
// Request message for
// [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint].
message CreateEndpointRequest {
// Required. The resource name of the Location to create the Endpoint in.
// Format: `projects/{project}/locations/{location}`
@ -142,13 +149,15 @@ message CreateEndpointRequest {
string endpoint_id = 4 [(google.api.field_behavior) = IMMUTABLE];
}
// Runtime operation information for [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint].
// Runtime operation information for
// [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint].
message CreateEndpointOperationMetadata {
// The operation generic information.
GenericOperationMetadata generic_metadata = 1;
}
// Request message for [EndpointService.GetEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.GetEndpoint]
// Request message for
// [EndpointService.GetEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.GetEndpoint]
message GetEndpointRequest {
// Required. The name of the Endpoint resource.
// Format:
@ -161,10 +170,11 @@ message GetEndpointRequest {
];
}
// Request message for [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints].
// Request message for
// [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints].
message ListEndpointsRequest {
// Required. The resource name of the Location from which to list the Endpoints.
// Format: `projects/{project}/locations/{location}`
// Required. The resource name of the Location from which to list the
// Endpoints. Format: `projects/{project}/locations/{location}`
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
@ -172,11 +182,12 @@ message ListEndpointsRequest {
}
];
// Optional. An expression for filtering the results of the request. For field names
// both snake_case and camelCase are supported.
// Optional. An expression for filtering the results of the request. For field
// names both snake_case and camelCase are supported.
//
// * `endpoint` supports = and !=. `endpoint` represents the Endpoint ID,
// i.e. the last segment of the Endpoint's [resource name][google.cloud.aiplatform.v1beta1.Endpoint.name].
// i.e. the last segment of the Endpoint's [resource
// name][google.cloud.aiplatform.v1beta1.Endpoint.name].
// * `display_name` supports = and, !=
// * `labels` supports general map functions that is:
// * `labels.key=value` - key:value equality
@ -194,34 +205,44 @@ message ListEndpointsRequest {
// Optional. The standard list page token.
// Typically obtained via
// [ListEndpointsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListEndpointsResponse.next_page_token] of the previous
// [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints] call.
// [ListEndpointsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListEndpointsResponse.next_page_token]
// of the previous
// [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]
// call.
string page_token = 4 [(google.api.field_behavior) = OPTIONAL];
// Optional. Mask specifying which fields to read.
google.protobuf.FieldMask read_mask = 5 [(google.api.field_behavior) = OPTIONAL];
google.protobuf.FieldMask read_mask = 5
[(google.api.field_behavior) = OPTIONAL];
}
// Response message for [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints].
// Response message for
// [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints].
message ListEndpointsResponse {
// List of Endpoints in the requested page.
repeated Endpoint endpoints = 1;
// A token to retrieve the next page of results.
// Pass to [ListEndpointsRequest.page_token][google.cloud.aiplatform.v1beta1.ListEndpointsRequest.page_token] to obtain that page.
// Pass to
// [ListEndpointsRequest.page_token][google.cloud.aiplatform.v1beta1.ListEndpointsRequest.page_token]
// to obtain that page.
string next_page_token = 2;
}
// Request message for [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint].
// Request message for
// [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint].
message UpdateEndpointRequest {
// Required. The Endpoint which replaces the resource on the server.
Endpoint endpoint = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The update mask applies to the resource. See [google.protobuf.FieldMask][google.protobuf.FieldMask].
google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED];
// Required. The update mask applies to the resource. See
// [google.protobuf.FieldMask][google.protobuf.FieldMask].
google.protobuf.FieldMask update_mask = 2
[(google.api.field_behavior) = REQUIRED];
}
// Request message for [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeleteEndpoint].
// Request message for
// [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeleteEndpoint].
message DeleteEndpointRequest {
// Required. The name of the Endpoint resource to be deleted.
// Format:
@ -234,7 +255,8 @@ message DeleteEndpointRequest {
];
}
// Request message for [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel].
// Request message for
// [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel].
message DeployModelRequest {
// Required. The name of the Endpoint resource into which to deploy a Model.
// Format:
@ -247,8 +269,9 @@ message DeployModelRequest {
];
// Required. The DeployedModel to be created within the Endpoint. Note that
// [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] must be updated for the DeployedModel to start
// receiving traffic, either as part of this call, or via
// [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split]
// must be updated for the DeployedModel to start receiving traffic, either as
// part of this call, or via
// [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint].
DeployedModel deployed_model = 2 [(google.api.field_behavior) = REQUIRED];
@ -256,29 +279,34 @@ message DeployModelRequest {
// traffic that should be forwarded to that DeployedModel.
//
// If this field is non-empty, then the Endpoint's
// [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] will be overwritten with it.
// To refer to the ID of the just being deployed Model, a "0" should be used,
// and the actual ID of the new DeployedModel will be filled in its place by
// this method. The traffic percentage values must add up to 100.
// [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split]
// will be overwritten with it. To refer to the ID of the just being deployed
// Model, a "0" should be used, and the actual ID of the new DeployedModel
// will be filled in its place by this method. The traffic percentage values
// must add up to 100.
//
// If this field is empty, then the Endpoint's
// [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] is not updated.
// [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] is
// not updated.
map<string, int32> traffic_split = 3;
}
// Response message for [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel].
// Response message for
// [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel].
message DeployModelResponse {
// The DeployedModel that had been deployed in the Endpoint.
DeployedModel deployed_model = 1;
}
// Runtime operation information for [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel].
// Runtime operation information for
// [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel].
message DeployModelOperationMetadata {
// The operation generic information.
GenericOperationMetadata generic_metadata = 1;
}
// Request message for [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel].
// Request message for
// [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel].
message UndeployModelRequest {
// Required. The name of the Endpoint resource from which to undeploy a Model.
// Format:
@ -294,21 +322,21 @@ message UndeployModelRequest {
string deployed_model_id = 2 [(google.api.field_behavior) = REQUIRED];
// If this field is provided, then the Endpoint's
// [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] will be overwritten with it. If
// last DeployedModel is being undeployed from the Endpoint, the
// [Endpoint.traffic_split] will always end up empty when this call returns.
// A DeployedModel will be successfully undeployed only if it doesn't have
// any traffic assigned to it when this method executes, or if this field
// unassigns any traffic to it.
// [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split]
// will be overwritten with it. If last DeployedModel is being undeployed from
// the Endpoint, the [Endpoint.traffic_split] will always end up empty when
// this call returns. A DeployedModel will be successfully undeployed only if
// it doesn't have any traffic assigned to it when this method executes, or if
// this field unassigns any traffic to it.
map<string, int32> traffic_split = 3;
}
// Response message for [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel].
message UndeployModelResponse {
}
// Response message for
// [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel].
message UndeployModelResponse {}
// Runtime operation information for [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel].
// Runtime operation information for
// [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel].
message UndeployModelOperationMetadata {
// The operation generic information.
GenericOperationMetadata generic_metadata = 1;

@ -52,12 +52,15 @@ message EntityType {
string description = 2 [(google.api.field_behavior) = OPTIONAL];
// Output only. Timestamp when this EntityType was created.
google.protobuf.Timestamp create_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 3
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this EntityType was most recently updated.
google.protobuf.Timestamp update_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp update_time = 4
[(google.api.field_behavior) = OUTPUT_ONLY];
// Optional. The labels with user-defined metadata to organize your EntityTypes.
// Optional. The labels with user-defined metadata to organize your
// EntityTypes.
//
// Label keys and values can be no longer than 64 characters
// (Unicode codepoints), can only contain lowercase letters, numeric
@ -70,19 +73,21 @@ message EntityType {
// and are immutable.
map<string, string> labels = 6 [(google.api.field_behavior) = OPTIONAL];
// Optional. Used to perform a consistent read-modify-write updates. If not set, a blind
// "overwrite" update happens.
// Optional. Used to perform a consistent read-modify-write updates. If not
// set, a blind "overwrite" update happens.
string etag = 7 [(google.api.field_behavior) = OPTIONAL];
// Optional. The default monitoring configuration for all Features with value type
// ([Feature.ValueType][google.cloud.aiplatform.v1beta1.Feature.ValueType]) BOOL, STRING, DOUBLE or INT64 under this
// EntityType.
// Optional. The default monitoring configuration for all Features with value
// type
// ([Feature.ValueType][google.cloud.aiplatform.v1beta1.Feature.ValueType])
// BOOL, STRING, DOUBLE or INT64 under this EntityType.
//
// If this is populated with
// [FeaturestoreMonitoringConfig.monitoring_interval] specified, snapshot
// analysis monitoring is enabled. Otherwise, snapshot analysis monitoring is
// disabled.
FeaturestoreMonitoringConfig monitoring_config = 8 [(google.api.field_behavior) = OPTIONAL];
FeaturestoreMonitoringConfig monitoring_config = 8
[(google.api.field_behavior) = OPTIONAL];
// Optional. Config for data retention policy in offline storage.
// TTL in days for feature values that will be stored in offline storage.

@ -60,7 +60,8 @@ message Event {
];
// Output only. Time the Event occurred.
google.protobuf.Timestamp event_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp event_time = 3
[(google.api.field_behavior) = OUTPUT_ONLY];
// Required. The type of the Event.
Type type = 4 [(google.api.field_behavior) = REQUIRED];

@ -87,10 +87,12 @@ message Execution {
map<string, string> labels = 10;
// Output only. Timestamp when this Execution was created.
google.protobuf.Timestamp create_time = 11 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 11
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this Execution was last updated.
google.protobuf.Timestamp update_time = 12 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp update_time = 12
[(google.api.field_behavior) = OUTPUT_ONLY];
// The title of the schema describing the metadata.
//

@ -29,8 +29,10 @@ option java_package = "com.google.cloud.aiplatform.v1beta1";
option php_namespace = "Google\\Cloud\\AIPlatform\\V1beta1";
option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
// Explanation of a prediction (provided in [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions])
// produced by the Model on a given [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances].
// Explanation of a prediction (provided in
// [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions])
// produced by the Model on a given
// [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances].
message Explanation {
// Output only. Feature attributions grouped by predicted outputs.
//
@ -38,15 +40,21 @@ message Explanation {
// predict only one score, there is only one attibution that explains the
// predicted output. For Models that predict multiple outputs, such as
// multiclass Models that predict multiple classes, each element explains one
// specific item. [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] can be used to identify which
// output this attribution is explaining.
// specific item.
// [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]
// can be used to identify which output this attribution is explaining.
//
// If users set [ExplanationParameters.top_k][google.cloud.aiplatform.v1beta1.ExplanationParameters.top_k], the attributions are sorted
// by [instance_output_value][Attributions.instance_output_value] in
// descending order. If [ExplanationParameters.output_indices][google.cloud.aiplatform.v1beta1.ExplanationParameters.output_indices] is specified,
// the attributions are stored by [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] in the same
// order as they appear in the output_indices.
repeated Attribution attributions = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// If users set
// [ExplanationParameters.top_k][google.cloud.aiplatform.v1beta1.ExplanationParameters.top_k],
// the attributions are sorted by
// [instance_output_value][Attributions.instance_output_value] in descending
// order. If
// [ExplanationParameters.output_indices][google.cloud.aiplatform.v1beta1.ExplanationParameters.output_indices]
// is specified, the attributions are stored by
// [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]
// in the same order as they appear in the output_indices.
repeated Attribution attributions = 1
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. List of the nearest neighbors for example-based explanations.
//
@ -57,54 +65,66 @@ message Explanation {
// Aggregated explanation metrics for a Model over a set of instances.
message ModelExplanation {
// Output only. Aggregated attributions explaining the Model's prediction outputs over the
// set of instances. The attributions are grouped by outputs.
// Output only. Aggregated attributions explaining the Model's prediction
// outputs over the set of instances. The attributions are grouped by outputs.
//
// For Models that predict only one output, such as regression Models that
// predict only one score, there is only one attibution that explains the
// predicted output. For Models that predict multiple outputs, such as
// multiclass Models that predict multiple classes, each element explains one
// specific item. [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] can be used to identify which
// output this attribution is explaining.
// specific item.
// [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]
// can be used to identify which output this attribution is explaining.
//
// The [baselineOutputValue][google.cloud.aiplatform.v1beta1.Attribution.baseline_output_value],
// [instanceOutputValue][google.cloud.aiplatform.v1beta1.Attribution.instance_output_value] and
// [featureAttributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions] fields are
// averaged over the test data.
// The
// [baselineOutputValue][google.cloud.aiplatform.v1beta1.Attribution.baseline_output_value],
// [instanceOutputValue][google.cloud.aiplatform.v1beta1.Attribution.instance_output_value]
// and
// [featureAttributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions]
// fields are averaged over the test data.
//
// NOTE: Currently AutoML tabular classification Models produce only one
// attribution, which averages attributions over all the classes it predicts.
// [Attribution.approximation_error][google.cloud.aiplatform.v1beta1.Attribution.approximation_error] is not populated.
repeated Attribution mean_attributions = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// [Attribution.approximation_error][google.cloud.aiplatform.v1beta1.Attribution.approximation_error]
// is not populated.
repeated Attribution mean_attributions = 1
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// Attribution that explains a particular prediction output.
message Attribution {
// Output only. Model predicted output if the input instance is constructed from the
// baselines of all the features defined in [ExplanationMetadata.inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs].
// Output only. Model predicted output if the input instance is constructed
// from the baselines of all the features defined in
// [ExplanationMetadata.inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs].
// The field name of the output is determined by the key in
// [ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs].
//
// If the Model's predicted output has multiple dimensions (rank > 1), this is
// the value in the output located by [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index].
// the value in the output located by
// [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index].
//
// If there are multiple baselines, their output values are averaged.
double baseline_output_value = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Model predicted output on the corresponding [explanation
// instance][ExplainRequest.instances]. The field name of the output is
// determined by the key in [ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs].
// determined by the key in
// [ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs].
//
// If the Model predicted output has multiple dimensions, this is the value in
// the output located by [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index].
// the output located by
// [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index].
double instance_output_value = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Attributions of each explained feature. Features are extracted from
// the [prediction instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances] according to
// [explanation metadata for inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs].
// Output only. Attributions of each explained feature. Features are extracted
// from the [prediction
// instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances]
// according to [explanation metadata for
// inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs].
//
// The value is a struct, whose keys are the name of the feature. The values
// are how much the feature in the [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances]
// are how much the feature in the
// [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances]
// contributed to the predicted result.
//
// The format of the value is determined by the feature's input format:
@ -121,11 +141,16 @@ message Attribution {
// struct. The formats of the values in the attribution struct are
// determined by the formats of the values in the feature struct.
//
// The [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1beta1.ExplanationMetadata.feature_attributions_schema_uri] field,
// pointed to by the [ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec] field of the
// [Endpoint.deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] object, points to the schema file that
// describes the features and their attribution values (if it is populated).
google.protobuf.Value feature_attributions = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
// The
// [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1beta1.ExplanationMetadata.feature_attributions_schema_uri]
// field, pointed to by the
// [ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec] field of
// the
// [Endpoint.deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models]
// object, points to the schema file that describes the features and their
// attribution values (if it is populated).
google.protobuf.Value feature_attributions = 3
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The index that locates the explained prediction output.
//
@ -136,8 +161,9 @@ message Attribution {
// of the output vector. Indices start from 0.
repeated int32 output_index = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The display name of the output identified by [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]. For example,
// the predicted class name by a multi-classification Model.
// Output only. The display name of the output identified by
// [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index].
// For example, the predicted class name by a multi-classification Model.
//
// This field is only populated iff the Model predicts display names as a
// separate field along with the explained output. The predicted display name
@ -145,20 +171,26 @@ message Attribution {
// output_index.
string output_display_name = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Error of [feature_attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions] caused by approximation used in the
// explanation method. Lower value means more precise attributions.
// Output only. Error of
// [feature_attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions]
// caused by approximation used in the explanation method. Lower value means
// more precise attributions.
//
// * For Sampled Shapley
// [attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.sampled_shapley_attribution],
// increasing [path_count][google.cloud.aiplatform.v1beta1.SampledShapleyAttribution.path_count] might reduce
// the error.
// increasing
// [path_count][google.cloud.aiplatform.v1beta1.SampledShapleyAttribution.path_count]
// might reduce the error.
// * For Integrated Gradients
// [attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution],
// increasing [step_count][google.cloud.aiplatform.v1beta1.IntegratedGradientsAttribution.step_count] might
// reduce the error.
// * For [XRAI attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution],
// increasing
// [step_count][google.cloud.aiplatform.v1beta1.XraiAttribution.step_count] might reduce the error.
// [step_count][google.cloud.aiplatform.v1beta1.IntegratedGradientsAttribution.step_count]
// might reduce the error.
// * For [XRAI
// attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution],
// increasing
// [step_count][google.cloud.aiplatform.v1beta1.XraiAttribution.step_count]
// might reduce the error.
//
// See [this introduction](/vertex-ai/docs/explainable-ai/overview)
// for more information.
@ -224,13 +256,14 @@ message ExplanationParameters {
int32 top_k = 4;
// If populated, only returns attributions that have
// [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] contained in output_indices. It
// must be an ndarray of integers, with the same shape of the output it's
// explaining.
// [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]
// contained in output_indices. It must be an ndarray of integers, with the
// same shape of the output it's explaining.
//
// If not populated, returns attributions for [top_k][google.cloud.aiplatform.v1beta1.ExplanationParameters.top_k] indices of outputs.
// If neither top_k nor output_indices is populated, returns the argmax
// index of the outputs.
// If not populated, returns attributions for
// [top_k][google.cloud.aiplatform.v1beta1.ExplanationParameters.top_k]
// indices of outputs. If neither top_k nor output_indices is populated,
// returns the argmax index of the outputs.
//
// Only applicable to Models that predict multiple outputs (e,g, multi-class
// Models that predict multiple classes).
@ -241,8 +274,8 @@ message ExplanationParameters {
// contribute to the label being predicted. A sampling strategy is used to
// approximate the value rather than considering all subsets of features.
message SampledShapleyAttribution {
// Required. The number of feature permutations to consider when approximating the
// Shapley values.
// Required. The number of feature permutations to consider when approximating
// the Shapley values.
//
// Valid range of its value is [1, 50], inclusively.
int32 path_count = 1 [(google.api.field_behavior) = REQUIRED];
@ -329,16 +362,18 @@ message SmoothGradConfig {
// paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1.
//
// If the distribution is different per feature, set
// [feature_noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.feature_noise_sigma] instead
// for each feature.
// [feature_noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.feature_noise_sigma]
// instead for each feature.
float noise_sigma = 1;
// This is similar to [noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma], but
// provides additional flexibility. A separate noise sigma can be provided
// for each feature, which is useful if their distributions are different.
// No noise is added to features that are not set. If this field is unset,
// [noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma] will be used for all
// features.
// This is similar to
// [noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma],
// but provides additional flexibility. A separate noise sigma can be
// provided for each feature, which is useful if their distributions are
// different. No noise is added to features that are not set. If this field
// is unset,
// [noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma]
// will be used for all features.
FeatureNoiseSigma feature_noise_sigma = 2;
}
@ -357,13 +392,15 @@ message FeatureNoiseSigma {
message NoiseSigmaForFeature {
// The name of the input feature for which noise sigma is provided. The
// features are defined in
// [explanation metadata inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs].
// [explanation metadata
// inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs].
string name = 1;
// This represents the standard deviation of the Gaussian kernel that will
// be used to add noise to the feature prior to computing gradients. Similar
// to [noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma] but represents the
// noise added to the current feature. Defaults to 0.1.
// to
// [noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma]
// but represents the noise added to the current feature. Defaults to 0.1.
float sigma = 2;
}
@ -390,7 +427,8 @@ message BlurBaselineConfig {
message Examples {
oneof config {
// The configuration for the generated index, the semantics are the same as
// [metadata][google.cloud.aiplatform.v1beta1.Index.metadata] and should match NearestNeighborSearchConfig.
// [metadata][google.cloud.aiplatform.v1beta1.Index.metadata] and should
// match NearestNeighborSearchConfig.
google.protobuf.Value nearest_neighbor_search_config = 2;
// Preset config based on the desired query speed-precision trade-off
@ -439,12 +477,13 @@ message Presets {
Modality modality = 2;
}
// The [ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec] entries that can be overridden at
// [online explanation][google.cloud.aiplatform.v1beta1.PredictionService.Explain] time.
// The [ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec]
// entries that can be overridden at [online
// explanation][google.cloud.aiplatform.v1beta1.PredictionService.Explain] time.
message ExplanationSpecOverride {
// The parameters to be overridden. Note that the
// [method][google.cloud.aiplatform.v1beta1.ExplanationParameters.method] cannot be changed. If not specified,
// no parameter is overridden.
// [method][google.cloud.aiplatform.v1beta1.ExplanationParameters.method]
// cannot be changed. If not specified, no parameter is overridden.
ExplanationParameters parameters = 1;
// The metadata to be overridden. If not specified, no metadata is overridden.
@ -454,11 +493,14 @@ message ExplanationSpecOverride {
ExamplesOverride examples_override = 3;
}
// The [ExplanationMetadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata] entries that can be overridden at
// [online explanation][google.cloud.aiplatform.v1beta1.PredictionService.Explain] time.
// The
// [ExplanationMetadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata]
// entries that can be overridden at [online
// explanation][google.cloud.aiplatform.v1beta1.PredictionService.Explain] time.
message ExplanationMetadataOverride {
// The [input metadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata] entries to be
// overridden.
// The [input
// metadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata]
// entries to be overridden.
message InputMetadataOverride {
// Baseline inputs for this feature.
//
@ -469,12 +511,14 @@ message ExplanationMetadataOverride {
repeated google.protobuf.Value input_baselines = 1;
}
// Required. Overrides the [input metadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs] of the features.
// The key is the name of the feature to be overridden. The keys specified
// here must exist in the input metadata to be overridden. If a feature is
// not specified here, the corresponding feature's input metadata is not
// overridden.
map<string, InputMetadataOverride> inputs = 1 [(google.api.field_behavior) = REQUIRED];
// Required. Overrides the [input
// metadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs] of
// the features. The key is the name of the feature to be overridden. The keys
// specified here must exist in the input metadata to be overridden. If a
// feature is not specified here, the corresponding feature's input metadata
// is not overridden.
map<string, InputMetadataOverride> inputs = 1
[(google.api.field_behavior) = REQUIRED];
}
// Overrides for example-based explanations.

@ -31,8 +31,10 @@ option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
message ExplanationMetadata {
// Metadata of the input of a feature.
//
// Fields other than [InputMetadata.input_baselines][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.input_baselines] are applicable only
// for Models that are using Vertex AI-provided images for Tensorflow.
// Fields other than
// [InputMetadata.input_baselines][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.input_baselines]
// are applicable only for Models that are using Vertex AI-provided images for
// Tensorflow.
message InputMetadata {
// Domain details of the input feature value. Provides numeric information
// about the feature, such as its range (min, max). If the feature has been
@ -63,7 +65,8 @@ message ExplanationMetadata {
// Visualization configurations for image explanation.
message Visualization {
// Type of the image visualization. Only applicable to
// [Integrated Gradients attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution].
// [Integrated Gradients
// attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution].
enum Type {
// Should not be used.
TYPE_UNSPECIFIED = 0;
@ -141,7 +144,8 @@ message ExplanationMetadata {
}
// Type of the image visualization. Only applicable to
// [Integrated Gradients attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution].
// [Integrated Gradients
// attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution].
// OUTLINES shows regions of attribution, while PIXELS shows per-pixel
// attribution. Defaults to OUTLINES.
Type type = 1;
@ -153,12 +157,14 @@ message ExplanationMetadata {
// The color scheme used for the highlighted areas.
//
// Defaults to PINK_GREEN for
// [Integrated Gradients attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution],
// [Integrated Gradients
// attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution],
// which shows positive attributions in green and negative in pink.
//
// Defaults to VIRIDIS for
// [XRAI attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution], which
// highlights the most influential regions in yellow and the least
// [XRAI
// attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution],
// which highlights the most influential regions in yellow and the least
// influential in blue.
ColorMap color_map = 3;
@ -188,8 +194,9 @@ message ExplanationMetadata {
IDENTITY = 1;
// The tensor represents a bag of features where each index maps to
// a feature. [InputMetadata.index_feature_mapping][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.index_feature_mapping] must be provided for
// this encoding. For example:
// a feature.
// [InputMetadata.index_feature_mapping][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.index_feature_mapping]
// must be provided for this encoding. For example:
// ```
// input = [27, 6.0, 150]
// index_feature_mapping = ["age", "height", "weight"]
@ -198,8 +205,9 @@ message ExplanationMetadata {
// The tensor represents a bag of features where each index maps to a
// feature. Zero values in the tensor indicates feature being
// non-existent. [InputMetadata.index_feature_mapping][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.index_feature_mapping] must be provided
// for this encoding. For example:
// non-existent.
// [InputMetadata.index_feature_mapping][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.index_feature_mapping]
// must be provided for this encoding. For example:
// ```
// input = [2, 0, 5, 0, 1]
// index_feature_mapping = ["a", "b", "c", "d", "e"]
@ -207,7 +215,8 @@ message ExplanationMetadata {
BAG_OF_FEATURES_SPARSE = 3;
// The tensor is a list of binaries representing whether a feature exists
// or not (1 indicates existence). [InputMetadata.index_feature_mapping][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.index_feature_mapping]
// or not (1 indicates existence).
// [InputMetadata.index_feature_mapping][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.index_feature_mapping]
// must be provided for this encoding. For example:
// ```
// input = [1, 0, 1, 0, 1]
@ -216,8 +225,9 @@ message ExplanationMetadata {
INDICATOR = 4;
// The tensor is encoded into a 1-dimensional array represented by an
// encoded tensor. [InputMetadata.encoded_tensor_name][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.encoded_tensor_name] must be provided
// for this encoding. For example:
// encoded tensor.
// [InputMetadata.encoded_tensor_name][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.encoded_tensor_name]
// must be provided for this encoding. For example:
// ```
// input = ["This", "is", "a", "test", "."]
// encoded = [0.1, 0.2, 0.3, 0.4, 0.5]
@ -226,9 +236,9 @@ message ExplanationMetadata {
// Select this encoding when the input tensor is encoded into a
// 2-dimensional array represented by an encoded tensor.
// [InputMetadata.encoded_tensor_name][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.encoded_tensor_name] must be provided for this
// encoding. The first dimension of the encoded tensor's shape is the same
// as the input tensor's shape. For example:
// [InputMetadata.encoded_tensor_name][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.encoded_tensor_name]
// must be provided for this encoding. The first dimension of the encoded
// tensor's shape is the same as the input tensor's shape. For example:
// ```
// input = ["This", "is", "a", "test", "."]
// encoded = [[0.1, 0.2, 0.3, 0.4, 0.5],
@ -244,7 +254,8 @@ message ExplanationMetadata {
//
// If no baseline is specified, Vertex AI chooses the baseline for this
// feature. If multiple baselines are specified, Vertex AI returns the
// average attributions across them in [Attribution.feature_attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions].
// average attributions across them in
// [Attribution.feature_attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions].
//
// For Vertex AI-provided Tensorflow images (both 1.x and 2.x), the shape
// of each baseline must match the shape of the input tensor. If a scalar is
@ -252,8 +263,9 @@ message ExplanationMetadata {
//
// For custom images, the element of the baselines must be in the same
// format as the feature's input in the
// [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances][]. The schema of any single instance
// may be specified via Endpoint's DeployedModels'
// [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances][].
// The schema of any single instance may be specified via Endpoint's
// DeployedModels'
// [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model]
// [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata]
// [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri].
@ -287,15 +299,18 @@ message ExplanationMetadata {
string dense_shape_tensor_name = 7;
// A list of feature names for each index in the input tensor.
// Required when the input [InputMetadata.encoding][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.encoding] is BAG_OF_FEATURES,
// BAG_OF_FEATURES_SPARSE, INDICATOR.
// Required when the input
// [InputMetadata.encoding][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.encoding]
// is BAG_OF_FEATURES, BAG_OF_FEATURES_SPARSE, INDICATOR.
repeated string index_feature_mapping = 8;
// Encoded tensor is a transformation of the input tensor. Must be provided
// if choosing
// [Integrated Gradients attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution]
// or [XRAI attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution] and the
// input tensor is not differentiable.
// [Integrated Gradients
// attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution]
// or [XRAI
// attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution]
// and the input tensor is not differentiable.
//
// An encoded tensor is generated if the input tensor is encoded by a lookup
// table.
@ -315,17 +330,21 @@ message ExplanationMetadata {
// name will be treated as one feature when computing attributions. Features
// grouped together can have different shapes in value. If provided, there
// will be one single attribution generated in
// [Attribution.feature_attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions], keyed by the group name.
// [Attribution.feature_attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions],
// keyed by the group name.
string group_name = 12;
}
// Metadata of the prediction output to be explained.
message OutputMetadata {
// Defines how to map [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] to
// Defines how to map
// [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]
// to
// [Attribution.output_display_name][google.cloud.aiplatform.v1beta1.Attribution.output_display_name].
//
// If neither of the fields are specified,
// [Attribution.output_display_name][google.cloud.aiplatform.v1beta1.Attribution.output_display_name] will not be populated.
// [Attribution.output_display_name][google.cloud.aiplatform.v1beta1.Attribution.output_display_name]
// will not be populated.
oneof display_name_mapping {
// Static mapping between the index and display name.
//
@ -337,8 +356,10 @@ message ExplanationMetadata {
//
// The shape of the value must be an n-dimensional array of strings. The
// number of dimensions must match that of the outputs to be explained.
// The [Attribution.output_display_name][google.cloud.aiplatform.v1beta1.Attribution.output_display_name] is populated by locating in the
// mapping with [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index].
// The
// [Attribution.output_display_name][google.cloud.aiplatform.v1beta1.Attribution.output_display_name]
// is populated by locating in the mapping with
// [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index].
google.protobuf.Value index_display_name_mapping = 1;
// Specify a field name in the prediction to look for the display name.
@ -346,8 +367,9 @@ message ExplanationMetadata {
// Use this if the prediction contains the display names for the outputs.
//
// The display names in the prediction must have the same shape of the
// outputs, so that it can be located by [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] for
// a specific output.
// outputs, so that it can be located by
// [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]
// for a specific output.
string display_name_mapping_key = 2;
}
@ -356,21 +378,23 @@ message ExplanationMetadata {
string output_tensor_name = 3;
}
// Required. Map from feature names to feature input metadata. Keys are the name of the
// features. Values are the specification of the feature.
// Required. Map from feature names to feature input metadata. Keys are the
// name of the features. Values are the specification of the feature.
//
// An empty InputMetadata is valid. It describes a text feature which has the
// name specified as the key in [ExplanationMetadata.inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs]. The baseline
// of the empty feature is chosen by Vertex AI.
// name specified as the key in
// [ExplanationMetadata.inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs].
// The baseline of the empty feature is chosen by Vertex AI.
//
// For Vertex AI-provided Tensorflow images, the key can be any friendly
// name of the feature. Once specified,
// [featureAttributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions] are keyed by
// this key (if not grouped with another feature).
// [featureAttributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions]
// are keyed by this key (if not grouped with another feature).
//
// For custom images, the key must match with the key in
// [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances].
map<string, InputMetadata> inputs = 1 [(google.api.field_behavior) = REQUIRED];
map<string, InputMetadata> inputs = 1
[(google.api.field_behavior) = REQUIRED];
// Required. Map from output names to output metadata.
//
@ -381,10 +405,12 @@ message ExplanationMetadata {
// to be explained.
//
// Currently only one key is allowed.
map<string, OutputMetadata> outputs = 2 [(google.api.field_behavior) = REQUIRED];
map<string, OutputMetadata> outputs = 2
[(google.api.field_behavior) = REQUIRED];
// Points to a YAML file stored on Google Cloud Storage describing the format
// of the [feature attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions].
// of the [feature
// attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions].
// The schema is defined as an OpenAPI 3.0.2 [Schema
// Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
// AutoML tabular Models always have this field populated by Vertex AI.

@ -42,7 +42,9 @@ message Feature {
// A list of historical [Snapshot
// Analysis][FeaturestoreMonitoringConfig.SnapshotAnalysis] or [Import Feature
// Analysis] [FeaturestoreMonitoringConfig.ImportFeatureAnalysis] stats
// requested by user, sorted by [FeatureStatsAnomaly.start_time][google.cloud.aiplatform.v1beta1.FeatureStatsAnomaly.start_time] descending.
// requested by user, sorted by
// [FeatureStatsAnomaly.start_time][google.cloud.aiplatform.v1beta1.FeatureStatsAnomaly.start_time]
// descending.
message MonitoringStatsAnomaly {
// If the objective in the request is both
// Import Feature Analysis and Snapshot Analysis, this objective could be
@ -63,7 +65,8 @@ message Feature {
Objective objective = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The stats and anomalies generated at specific timestamp.
FeatureStatsAnomaly feature_stats_anomaly = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
FeatureStatsAnomaly feature_stats_anomaly = 2
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// An enum representing the value type of a feature.
@ -119,10 +122,12 @@ message Feature {
];
// Output only. Timestamp when this EntityType was created.
google.protobuf.Timestamp create_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 4
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this EntityType was most recently updated.
google.protobuf.Timestamp update_time = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp update_time = 5
[(google.api.field_behavior) = OUTPUT_ONLY];
// Optional. The labels with user-defined metadata to organize your Features.
//
@ -141,11 +146,11 @@ message Feature {
// "overwrite" update happens.
string etag = 7;
// Optional. Deprecated: The custom monitoring configuration for this Feature, if not
// set, use the monitoring_config defined for the EntityType this Feature
// belongs to.
// Only Features with type ([Feature.ValueType][google.cloud.aiplatform.v1beta1.Feature.ValueType]) BOOL, STRING, DOUBLE or
// INT64 can enable monitoring.
// Optional. Deprecated: The custom monitoring configuration for this Feature,
// if not set, use the monitoring_config defined for the EntityType this
// Feature belongs to. Only Features with type
// ([Feature.ValueType][google.cloud.aiplatform.v1beta1.Feature.ValueType])
// BOOL, STRING, DOUBLE or INT64 can enable monitoring.
//
// If this is populated with
// [FeaturestoreMonitoringConfig.disabled][] = true, snapshot analysis
@ -153,15 +158,13 @@ message Feature {
// [FeaturestoreMonitoringConfig.monitoring_interval][] specified, snapshot
// analysis monitoring is enabled. Otherwise, snapshot analysis monitoring
// config is same as the EntityType's this Feature belongs to.
FeaturestoreMonitoringConfig monitoring_config = 9 [
deprecated = true,
(google.api.field_behavior) = OPTIONAL
];
FeaturestoreMonitoringConfig monitoring_config = 9
[deprecated = true, (google.api.field_behavior) = OPTIONAL];
// Optional. If not set, use the monitoring_config defined for the EntityType this
// Feature belongs to.
// Only Features with type ([Feature.ValueType][google.cloud.aiplatform.v1beta1.Feature.ValueType]) BOOL, STRING, DOUBLE or
// INT64 can enable monitoring.
// Optional. If not set, use the monitoring_config defined for the EntityType
// this Feature belongs to. Only Features with type
// ([Feature.ValueType][google.cloud.aiplatform.v1beta1.Feature.ValueType])
// BOOL, STRING, DOUBLE or INT64 can enable monitoring.
//
// If set to true, all types of data monitoring are disabled despite the
// config on EntityType.
@ -169,10 +172,14 @@ message Feature {
// Output only. A list of historical [Snapshot
// Analysis][FeaturestoreMonitoringConfig.SnapshotAnalysis]
// stats requested by user, sorted by [FeatureStatsAnomaly.start_time][google.cloud.aiplatform.v1beta1.FeatureStatsAnomaly.start_time]
// stats requested by user, sorted by
// [FeatureStatsAnomaly.start_time][google.cloud.aiplatform.v1beta1.FeatureStatsAnomaly.start_time]
// descending.
repeated FeatureStatsAnomaly monitoring_stats = 10 [(google.api.field_behavior) = OUTPUT_ONLY];
repeated FeatureStatsAnomaly monitoring_stats = 10
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The list of historical stats and anomalies with specified objectives.
repeated MonitoringStatsAnomaly monitoring_stats_anomalies = 11 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The list of historical stats and anomalies with specified
// objectives.
repeated MonitoringStatsAnomaly monitoring_stats_anomalies = 11
[(google.api.field_behavior) = OUTPUT_ONLY];
}

@ -39,7 +39,8 @@ message FeatureStatsAnomaly {
// Feature importance score, only populated when cross-feature monitoring is
// enabled. For now only used to represent feature attribution score within
// range [0, 1] for
// [ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_SKEW][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_SKEW] and
// [ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_SKEW][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_SKEW]
// and
// [ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_DRIFT][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_DRIFT].
double score = 1;

@ -45,8 +45,8 @@ message Featurestore {
// max_node_count are set to the same value, the cluster will be configured
// with the fixed number of node (no auto-scaling).
message Scaling {
// Required. The minimum number of nodes to scale down to. Must be greater than or
// equal to 1.
// Required. The minimum number of nodes to scale down to. Must be greater
// than or equal to 1.
int32 min_node_count = 1 [(google.api.field_behavior) = REQUIRED];
// The maximum number of nodes to scale up to. Must be greater than
@ -94,16 +94,19 @@ message Featurestore {
string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this Featurestore was created.
google.protobuf.Timestamp create_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 3
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this Featurestore was last updated.
google.protobuf.Timestamp update_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp update_time = 4
[(google.api.field_behavior) = OUTPUT_ONLY];
// Optional. Used to perform consistent read-modify-write updates. If not set, a blind
// "overwrite" update happens.
// Optional. Used to perform consistent read-modify-write updates. If not set,
// a blind "overwrite" update happens.
string etag = 5 [(google.api.field_behavior) = OPTIONAL];
// Optional. The labels with user-defined metadata to organize your Featurestore.
// Optional. The labels with user-defined metadata to organize your
// Featurestore.
//
// Label keys and values can be no longer than 64 characters
// (Unicode codepoints), can only contain lowercase letters, numeric
@ -116,25 +119,25 @@ message Featurestore {
// and are immutable.
map<string, string> labels = 6 [(google.api.field_behavior) = OPTIONAL];
// Optional. Config for online storage resources. The field should not co-exist with the
// field of `OnlineStoreReplicationConfig`. If both of it and
// OnlineStoreReplicationConfig are unset, the feature store will not have an
// online store and cannot be used for online serving.
OnlineServingConfig online_serving_config = 7 [(google.api.field_behavior) = OPTIONAL];
// Optional. Config for online storage resources. The field should not
// co-exist with the field of `OnlineStoreReplicationConfig`. If both of it
// and OnlineStoreReplicationConfig are unset, the feature store will not have
// an online store and cannot be used for online serving.
OnlineServingConfig online_serving_config = 7
[(google.api.field_behavior) = OPTIONAL];
// Output only. State of the featurestore.
State state = 8 [(google.api.field_behavior) = OUTPUT_ONLY];
// Optional. TTL in days for feature values that will be stored in online serving
// storage. The Feature Store online storage periodically removes obsolete
// feature values older than `online_storage_ttl_days` since the feature
// generation time.
// Note that `online_storage_ttl_days` should be less than or equal to
// `offline_storage_ttl_days` for each EntityType under a featurestore.
// If not set, default to 4000 days
// Optional. TTL in days for feature values that will be stored in online
// serving storage. The Feature Store online storage periodically removes
// obsolete feature values older than `online_storage_ttl_days` since the
// feature generation time. Note that `online_storage_ttl_days` should be less
// than or equal to `offline_storage_ttl_days` for each EntityType under a
// featurestore. If not set, default to 4000 days
int32 online_storage_ttl_days = 13 [(google.api.field_behavior) = OPTIONAL];
// Optional. Customer-managed encryption key spec for data storage. If set, both of the
// online and offline data storage will be secured by this key.
// Optional. Customer-managed encryption key spec for data storage. If set,
// both of the online and offline data storage will be secured by this key.
EncryptionSpec encryption_spec = 10 [(google.api.field_behavior) = OPTIONAL];
}

@ -54,7 +54,8 @@ message FeaturestoreMonitoringConfig {
// running interval. The value indicates number of days.
// If both
// [FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval_days][google.cloud.aiplatform.v1beta1.FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval_days]
// and [FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval][google.cloud.aiplatform.v1beta1.FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval]
// and
// [FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval][google.cloud.aiplatform.v1beta1.FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval]
// are set when creating/updating EntityTypes/Features,
// [FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval_days][google.cloud.aiplatform.v1beta1.FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval_days]
// will be used.
@ -145,11 +146,15 @@ message FeaturestoreMonitoringConfig {
// Threshold for numerical features of anomaly detection.
// This is shared by all objectives of Featurestore Monitoring for numerical
// features (i.e. Features with type ([Feature.ValueType][google.cloud.aiplatform.v1beta1.Feature.ValueType]) DOUBLE or INT64).
// features (i.e. Features with type
// ([Feature.ValueType][google.cloud.aiplatform.v1beta1.Feature.ValueType])
// DOUBLE or INT64).
ThresholdConfig numerical_threshold_config = 3;
// Threshold for categorical features of anomaly detection.
// This is shared by all types of Featurestore Monitoring for categorical
// features (i.e. Features with type ([Feature.ValueType][google.cloud.aiplatform.v1beta1.Feature.ValueType]) BOOL or STRING).
// features (i.e. Features with type
// ([Feature.ValueType][google.cloud.aiplatform.v1beta1.Feature.ValueType])
// BOOL or STRING).
ThresholdConfig categorical_threshold_config = 4;
}

@ -35,12 +35,14 @@ option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
// A service for serving online feature values.
service FeaturestoreOnlineServingService {
option (google.api.default_host) = "aiplatform.googleapis.com";
option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
option (google.api.oauth_scopes) =
"https://www.googleapis.com/auth/cloud-platform";
// Reads Feature values of a specific entity of an EntityType. For reading
// feature values of multiple entities of an EntityType, please use
// StreamingReadFeatureValues.
rpc ReadFeatureValues(ReadFeatureValuesRequest) returns (ReadFeatureValuesResponse) {
rpc ReadFeatureValues(ReadFeatureValuesRequest)
returns (ReadFeatureValuesResponse) {
option (google.api.http) = {
post: "/v1beta1/{entity_type=projects/*/locations/*/featurestores/*/entityTypes/*}:readFeatureValues"
body: "*"
@ -51,7 +53,8 @@ service FeaturestoreOnlineServingService {
// Reads Feature values for multiple entities. Depending on their size, data
// for different entities may be broken
// up across multiple responses.
rpc StreamingReadFeatureValues(StreamingReadFeatureValuesRequest) returns (stream ReadFeatureValuesResponse) {
rpc StreamingReadFeatureValues(StreamingReadFeatureValuesRequest)
returns (stream ReadFeatureValuesResponse) {
option (google.api.http) = {
post: "/v1beta1/{entity_type=projects/*/locations/*/featurestores/*/entityTypes/*}:streamingReadFeatureValues"
body: "*"
@ -64,7 +67,8 @@ service FeaturestoreOnlineServingService {
// The Feature values are merged into existing entities if any. The Feature
// values to be written must have timestamp within the online storage
// retention.
rpc WriteFeatureValues(WriteFeatureValuesRequest) returns (WriteFeatureValuesResponse) {
rpc WriteFeatureValues(WriteFeatureValuesRequest)
returns (WriteFeatureValuesResponse) {
option (google.api.http) = {
post: "/v1beta1/{entity_type=projects/*/locations/*/featurestores/*/entityTypes/*}:writeFeatureValues"
body: "*"
@ -73,10 +77,12 @@ service FeaturestoreOnlineServingService {
}
}
// Request message for [FeaturestoreOnlineServingService.WriteFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.WriteFeatureValues].
// Request message for
// [FeaturestoreOnlineServingService.WriteFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.WriteFeatureValues].
message WriteFeatureValuesRequest {
// Required. The resource name of the EntityType for the entities being written.
// Value format: `projects/{project}/locations/{location}/featurestores/
// Required. The resource name of the EntityType for the entities being
// written. Value format:
// `projects/{project}/locations/{location}/featurestores/
// {featurestore}/entityTypes/{entityType}`. For example,
// for a machine learning model predicting user clicks on a website, an
// EntityType ID could be `user`.
@ -87,9 +93,10 @@ message WriteFeatureValuesRequest {
}
];
// Required. The entities to be written. Up to 100,000 feature values can be written
// across all `payloads`.
repeated WriteFeatureValuesPayload payloads = 2 [(google.api.field_behavior) = REQUIRED];
// Required. The entities to be written. Up to 100,000 feature values can be
// written across all `payloads`.
repeated WriteFeatureValuesPayload payloads = 2
[(google.api.field_behavior) = REQUIRED];
}
// Contains Feature values to be written for a specific entity.
@ -97,19 +104,20 @@ message WriteFeatureValuesPayload {
// Required. The ID of the entity.
string entity_id = 1 [(google.api.field_behavior) = REQUIRED];
// Required. Feature values to be written, mapping from Feature ID to value. Up to
// 100,000 `feature_values` entries may be written across all payloads. The
// feature generation time, aligned by days, must be no older than five
// Required. Feature values to be written, mapping from Feature ID to value.
// Up to 100,000 `feature_values` entries may be written across all payloads.
// The feature generation time, aligned by days, must be no older than five
// years (1825 days) and no later than one year (366 days) in the future.
map<string, FeatureValue> feature_values = 2 [(google.api.field_behavior) = REQUIRED];
map<string, FeatureValue> feature_values = 2
[(google.api.field_behavior) = REQUIRED];
}
// Response message for [FeaturestoreOnlineServingService.WriteFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.WriteFeatureValues].
message WriteFeatureValuesResponse {
}
// Response message for
// [FeaturestoreOnlineServingService.WriteFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.WriteFeatureValues].
message WriteFeatureValuesResponse {}
// Request message for [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues].
// Request message for
// [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues].
message ReadFeatureValuesRequest {
// Required. The resource name of the EntityType for the entity being read.
// Value format:
@ -132,7 +140,8 @@ message ReadFeatureValuesRequest {
FeatureSelector feature_selector = 3 [(google.api.field_behavior) = REQUIRED];
}
// Response message for [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues].
// Response message for
// [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues].
message ReadFeatureValuesResponse {
// Metadata for requested Features.
message FeatureDescriptor {
@ -141,14 +150,16 @@ message ReadFeatureValuesResponse {
}
// Response header with metadata for the requested
// [ReadFeatureValuesRequest.entity_type][google.cloud.aiplatform.v1beta1.ReadFeatureValuesRequest.entity_type] and Features.
// [ReadFeatureValuesRequest.entity_type][google.cloud.aiplatform.v1beta1.ReadFeatureValuesRequest.entity_type]
// and Features.
message Header {
// The resource name of the EntityType from the
// [ReadFeatureValuesRequest][google.cloud.aiplatform.v1beta1.ReadFeatureValuesRequest]. Value format:
// [ReadFeatureValuesRequest][google.cloud.aiplatform.v1beta1.ReadFeatureValuesRequest].
// Value format:
// `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}`.
string entity_type = 1 [(google.api.resource_reference) = {
type: "aiplatform.googleapis.com/EntityType"
}];
type: "aiplatform.googleapis.com/EntityType"
}];
// List of Feature metadata corresponding to each piece of
// [ReadFeatureValuesResponse.data][].
@ -179,7 +190,8 @@ message ReadFeatureValuesResponse {
// requested values for one requested Feature. If no values
// for the requested Feature exist, the corresponding cell will be empty.
// This has the same size and is in the same order as the features from the
// header [ReadFeatureValuesResponse.header][google.cloud.aiplatform.v1beta1.ReadFeatureValuesResponse.header].
// header
// [ReadFeatureValuesResponse.header][google.cloud.aiplatform.v1beta1.ReadFeatureValuesResponse.header].
repeated Data data = 2;
}
@ -209,13 +221,13 @@ message StreamingReadFeatureValuesRequest {
}
];
// Required. IDs of entities to read Feature values of. The maximum number of IDs is
// 100. For example, for a machine learning model predicting user clicks on a
// website, an entity ID could be `user_123`.
// Required. IDs of entities to read Feature values of. The maximum number of
// IDs is 100. For example, for a machine learning model predicting user
// clicks on a website, an entity ID could be `user_123`.
repeated string entity_ids = 2 [(google.api.field_behavior) = REQUIRED];
// Required. Selector choosing Features of the target EntityType. Feature IDs will be
// deduplicated.
// Required. Selector choosing Features of the target EntityType. Feature IDs
// will be deduplicated.
FeatureSelector feature_selector = 3 [(google.api.field_behavior) = REQUIRED];
}

@ -42,16 +42,19 @@ option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
// The service that handles CRUD and List for resources for Featurestore.
service FeaturestoreService {
option (google.api.default_host) = "aiplatform.googleapis.com";
option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
option (google.api.oauth_scopes) =
"https://www.googleapis.com/auth/cloud-platform";
// Creates a new Featurestore in a given project and location.
rpc CreateFeaturestore(CreateFeaturestoreRequest) returns (google.longrunning.Operation) {
rpc CreateFeaturestore(CreateFeaturestoreRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*}/featurestores"
body: "featurestore"
};
option (google.api.method_signature) = "parent,featurestore";
option (google.api.method_signature) = "parent,featurestore,featurestore_id";
option (google.api.method_signature) =
"parent,featurestore,featurestore_id";
option (google.longrunning.operation_info) = {
response_type: "Featurestore"
metadata_type: "CreateFeaturestoreOperationMetadata"
@ -67,7 +70,8 @@ service FeaturestoreService {
}
// Lists Featurestores in a given project and location.
rpc ListFeaturestores(ListFeaturestoresRequest) returns (ListFeaturestoresResponse) {
rpc ListFeaturestores(ListFeaturestoresRequest)
returns (ListFeaturestoresResponse) {
option (google.api.http) = {
get: "/v1beta1/{parent=projects/*/locations/*}/featurestores"
};
@ -75,7 +79,8 @@ service FeaturestoreService {
}
// Updates the parameters of a single Featurestore.
rpc UpdateFeaturestore(UpdateFeaturestoreRequest) returns (google.longrunning.Operation) {
rpc UpdateFeaturestore(UpdateFeaturestoreRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
patch: "/v1beta1/{featurestore.name=projects/*/locations/*/featurestores/*}"
body: "featurestore"
@ -89,7 +94,8 @@ service FeaturestoreService {
// Deletes a single Featurestore. The Featurestore must not contain any
// EntityTypes or `force` must be set to true for the request to succeed.
rpc DeleteFeaturestore(DeleteFeaturestoreRequest) returns (google.longrunning.Operation) {
rpc DeleteFeaturestore(DeleteFeaturestoreRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
delete: "/v1beta1/{name=projects/*/locations/*/featurestores/*}"
};
@ -102,7 +108,8 @@ service FeaturestoreService {
}
// Creates a new EntityType in a given Featurestore.
rpc CreateEntityType(CreateEntityTypeRequest) returns (google.longrunning.Operation) {
rpc CreateEntityType(CreateEntityTypeRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*/featurestores/*}/entityTypes"
body: "entity_type"
@ -124,7 +131,8 @@ service FeaturestoreService {
}
// Lists EntityTypes in a given Featurestore.
rpc ListEntityTypes(ListEntityTypesRequest) returns (ListEntityTypesResponse) {
rpc ListEntityTypes(ListEntityTypesRequest)
returns (ListEntityTypesResponse) {
option (google.api.http) = {
get: "/v1beta1/{parent=projects/*/locations/*/featurestores/*}/entityTypes"
};
@ -142,7 +150,8 @@ service FeaturestoreService {
// Deletes a single EntityType. The EntityType must not have any Features
// or `force` must be set to true for the request to succeed.
rpc DeleteEntityType(DeleteEntityTypeRequest) returns (google.longrunning.Operation) {
rpc DeleteEntityType(DeleteEntityTypeRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
delete: "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*}"
};
@ -155,7 +164,8 @@ service FeaturestoreService {
}
// Creates a new Feature in a given EntityType.
rpc CreateFeature(CreateFeatureRequest) returns (google.longrunning.Operation) {
rpc CreateFeature(CreateFeatureRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*/featurestores/*/entityTypes/*}/features"
body: "feature"
@ -169,7 +179,8 @@ service FeaturestoreService {
}
// Creates a batch of Features in a given EntityType.
rpc BatchCreateFeatures(BatchCreateFeaturesRequest) returns (google.longrunning.Operation) {
rpc BatchCreateFeatures(BatchCreateFeaturesRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*/featurestores/*/entityTypes/*}/features:batchCreate"
body: "*"
@ -207,7 +218,8 @@ service FeaturestoreService {
}
// Deletes a single Feature.
rpc DeleteFeature(DeleteFeatureRequest) returns (google.longrunning.Operation) {
rpc DeleteFeature(DeleteFeatureRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
delete: "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}"
};
@ -238,7 +250,8 @@ service FeaturestoreService {
// include but are not limited to changing storage location, storage class,
// or retention policy.
// - Online serving cluster is under-provisioned.
rpc ImportFeatureValues(ImportFeatureValuesRequest) returns (google.longrunning.Operation) {
rpc ImportFeatureValues(ImportFeatureValuesRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta1/{entity_type=projects/*/locations/*/featurestores/*/entityTypes/*}:importFeatureValues"
body: "*"
@ -256,7 +269,8 @@ service FeaturestoreService {
// instance in the batch may read Feature values of entities from one or
// more EntityTypes. Point-in-time correctness is guaranteed for Feature
// values of each read instance as of each instance's read timestamp.
rpc BatchReadFeatureValues(BatchReadFeatureValuesRequest) returns (google.longrunning.Operation) {
rpc BatchReadFeatureValues(BatchReadFeatureValuesRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta1/{featurestore=projects/*/locations/*/featurestores/*}:batchReadFeatureValues"
body: "*"
@ -269,7 +283,8 @@ service FeaturestoreService {
}
// Exports Feature values from all the entities of a target EntityType.
rpc ExportFeatureValues(ExportFeatureValuesRequest) returns (google.longrunning.Operation) {
rpc ExportFeatureValues(ExportFeatureValuesRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta1/{entity_type=projects/*/locations/*/featurestores/*/entityTypes/*}:exportFeatureValues"
body: "*"
@ -291,7 +306,8 @@ service FeaturestoreService {
// returned from reads and exports may be inconsistent. If consistency is
// required, the caller must retry the same delete request again and wait till
// the new operation returned is marked as successfully done.
rpc DeleteFeatureValues(DeleteFeatureValuesRequest) returns (google.longrunning.Operation) {
rpc DeleteFeatureValues(DeleteFeatureValuesRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta1/{entity_type=projects/*/locations/*/featurestores/*/entityTypes/*}:deleteFeatureValues"
body: "*"
@ -313,7 +329,8 @@ service FeaturestoreService {
}
}
// Request message for [FeaturestoreService.CreateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeaturestore].
// Request message for
// [FeaturestoreService.CreateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeaturestore].
message CreateFeaturestoreRequest {
// Required. The resource name of the Location to create Featurestores.
// Format:
@ -328,8 +345,8 @@ message CreateFeaturestoreRequest {
// Required. The Featurestore to create.
Featurestore featurestore = 2 [(google.api.field_behavior) = REQUIRED];
// Required. The ID to use for this Featurestore, which will become the final component
// of the Featurestore's resource name.
// Required. The ID to use for this Featurestore, which will become the final
// component of the Featurestore's resource name.
//
// This value may be up to 60 characters, and valid characters are
// `[a-z0-9_]`. The first character cannot be a number.
@ -338,7 +355,8 @@ message CreateFeaturestoreRequest {
string featurestore_id = 3 [(google.api.field_behavior) = REQUIRED];
}
// Request message for [FeaturestoreService.GetFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeaturestore].
// Request message for
// [FeaturestoreService.GetFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeaturestore].
message GetFeaturestoreRequest {
// Required. The name of the Featurestore resource.
string name = 1 [
@ -349,7 +367,8 @@ message GetFeaturestoreRequest {
];
}
// Request message for [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores].
// Request message for
// [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores].
message ListFeaturestoresRequest {
// Required. The resource name of the Location to list Featurestores.
// Format:
@ -389,12 +408,12 @@ message ListFeaturestoresRequest {
int32 page_size = 3;
// A page token, received from a previous
// [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores] call.
// Provide this to retrieve the subsequent page.
// [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores]
// call. Provide this to retrieve the subsequent page.
//
// When paginating, all other parameters provided to
// [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores] must
// match the call that provided the page token.
// [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores]
// must match the call that provided the page token.
string page_token = 4;
// A comma-separated list of fields to order by, sorted in ascending order.
@ -410,22 +429,24 @@ message ListFeaturestoresRequest {
google.protobuf.FieldMask read_mask = 6;
}
// Response message for [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores].
// Response message for
// [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores].
message ListFeaturestoresResponse {
// The Featurestores matching the request.
repeated Featurestore featurestores = 1;
// A token, which can be sent as [ListFeaturestoresRequest.page_token][google.cloud.aiplatform.v1beta1.ListFeaturestoresRequest.page_token] to
// retrieve the next page.
// If this field is omitted, there are no subsequent pages.
// A token, which can be sent as
// [ListFeaturestoresRequest.page_token][google.cloud.aiplatform.v1beta1.ListFeaturestoresRequest.page_token]
// to retrieve the next page. If this field is omitted, there are no
// subsequent pages.
string next_page_token = 2;
}
// Request message for [FeaturestoreService.UpdateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeaturestore].
// Request message for
// [FeaturestoreService.UpdateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeaturestore].
message UpdateFeaturestoreRequest {
// Required. The Featurestore's `name` field is used to identify the Featurestore to be
// updated.
// Format:
// Required. The Featurestore's `name` field is used to identify the
// Featurestore to be updated. Format:
// `projects/{project}/locations/{location}/featurestores/{featurestore}`
Featurestore featurestore = 1 [(google.api.field_behavior) = REQUIRED];
@ -442,11 +463,12 @@ message UpdateFeaturestoreRequest {
// * `labels`
// * `online_serving_config.fixed_node_count`
// * `online_serving_config.scaling`
// * `online_storage_ttl_days`
// * `online_storage_ttl_days` (available in Preview)
google.protobuf.FieldMask update_mask = 2;
}
// Request message for [FeaturestoreService.DeleteFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeaturestore].
// Request message for
// [FeaturestoreService.DeleteFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeaturestore].
message DeleteFeaturestoreRequest {
// Required. The name of the Featurestore to be deleted.
// Format:
@ -464,12 +486,13 @@ message DeleteFeaturestoreRequest {
bool force = 2;
}
// Request message for [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues].
// Request message for
// [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues].
message ImportFeatureValuesRequest {
// Defines the Feature value(s) to import.
message FeatureSpec {
// Required. ID of the Feature to import values of. This Feature must exist in the
// target EntityType, or the request will fail.
// Required. ID of the Feature to import values of. This Feature must exist
// in the target EntityType, or the request will fail.
string id = 1 [(google.api.field_behavior) = REQUIRED];
// Source column to get the Feature values from. If not set, uses the column
@ -499,8 +522,8 @@ message ImportFeatureValuesRequest {
google.protobuf.Timestamp feature_time = 7;
}
// Required. The resource name of the EntityType grouping the Features for which values
// are being imported. Format:
// Required. The resource name of the EntityType grouping the Features for
// which values are being imported. Format:
// `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}`
string entity_type = 1 [
(google.api.field_behavior) = REQUIRED,
@ -513,10 +536,11 @@ message ImportFeatureValuesRequest {
// extracted from the column named `entity_id`.
string entity_id_field = 5;
// Required. Specifications defining which Feature values to import from the entity. The
// request fails if no feature_specs are provided, and having multiple
// feature_specs for one Feature is not allowed.
repeated FeatureSpec feature_specs = 8 [(google.api.field_behavior) = REQUIRED];
// Required. Specifications defining which Feature values to import from the
// entity. The request fails if no feature_specs are provided, and having
// multiple feature_specs for one Feature is not allowed.
repeated FeatureSpec feature_specs = 8
[(google.api.field_behavior) = REQUIRED];
// If set, data will not be imported for online serving. This
// is typically used for backfilling, where Feature generation timestamps are
@ -535,7 +559,8 @@ message ImportFeatureValuesRequest {
bool disable_ingestion_analysis = 12;
}
// Response message for [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues].
// Response message for
// [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues].
message ImportFeatureValuesResponse {
// Number of entities that have been imported by the operation.
int64 imported_entity_count = 1;
@ -555,25 +580,30 @@ message ImportFeatureValuesResponse {
int64 timestamp_outside_retention_rows_count = 4;
}
// Request message for [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues].
// Request message for
// [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues].
message BatchReadFeatureValuesRequest {
// Describe pass-through fields in read_instance source.
message PassThroughField {
// Required. The name of the field in the CSV header or the name of the column in
// BigQuery table. The naming restriction is the same as [Feature.name][google.cloud.aiplatform.v1beta1.Feature.name].
// Required. The name of the field in the CSV header or the name of the
// column in BigQuery table. The naming restriction is the same as
// [Feature.name][google.cloud.aiplatform.v1beta1.Feature.name].
string field_name = 1 [(google.api.field_behavior) = REQUIRED];
}
// Selects Features of an EntityType to read values of and specifies read
// settings.
message EntityTypeSpec {
// Required. ID of the EntityType to select Features. The EntityType id is the
// [entity_type_id][google.cloud.aiplatform.v1beta1.CreateEntityTypeRequest.entity_type_id] specified
// during EntityType creation.
// Required. ID of the EntityType to select Features. The EntityType id is
// the
// [entity_type_id][google.cloud.aiplatform.v1beta1.CreateEntityTypeRequest.entity_type_id]
// specified during EntityType creation.
string entity_type_id = 1 [(google.api.field_behavior) = REQUIRED];
// Required. Selectors choosing which Feature values to read from the EntityType.
FeatureSelector feature_selector = 2 [(google.api.field_behavior) = REQUIRED];
// Required. Selectors choosing which Feature values to read from the
// EntityType.
FeatureSelector feature_selector = 2
[(google.api.field_behavior) = REQUIRED];
// Per-Feature settings for the batch read.
repeated DestinationFeatureSetting settings = 3;
@ -610,8 +640,8 @@ message BatchReadFeatureValuesRequest {
BigQuerySource bigquery_read_instances = 5;
}
// Required. The resource name of the Featurestore from which to query Feature values.
// Format:
// Required. The resource name of the Featurestore from which to query Feature
// values. Format:
// `projects/{project}/locations/{location}/featurestores/{featurestore}`
string featurestore = 1 [
(google.api.field_behavior) = REQUIRED,
@ -621,7 +651,8 @@ message BatchReadFeatureValuesRequest {
];
// Required. Specifies output location and format.
FeatureValueDestination destination = 4 [(google.api.field_behavior) = REQUIRED];
FeatureValueDestination destination = 4
[(google.api.field_behavior) = REQUIRED];
// When not empty, the specified fields in the *_read_instances source will be
// joined as-is in the output, in addition to those fields from the
@ -632,20 +663,23 @@ message BatchReadFeatureValuesRequest {
// passed as opaque bytes.
repeated PassThroughField pass_through_fields = 8;
// Required. Specifies EntityType grouping Features to read values of and settings.
// Each EntityType referenced in
// Required. Specifies EntityType grouping Features to read values of and
// settings. Each EntityType referenced in
// [BatchReadFeatureValuesRequest.entity_type_specs] must have a column
// specifying entity IDs in the EntityType in
// [BatchReadFeatureValuesRequest.request][] .
repeated EntityTypeSpec entity_type_specs = 7 [(google.api.field_behavior) = REQUIRED];
repeated EntityTypeSpec entity_type_specs = 7
[(google.api.field_behavior) = REQUIRED];
// Optional. Excludes Feature values with feature generation timestamp before this
// timestamp. If not set, retrieve oldest values kept in Feature Store.
// Optional. Excludes Feature values with feature generation timestamp before
// this timestamp. If not set, retrieve oldest values kept in Feature Store.
// Timestamp, if present, must not have higher than millisecond precision.
google.protobuf.Timestamp start_time = 11 [(google.api.field_behavior) = OPTIONAL];
google.protobuf.Timestamp start_time = 11
[(google.api.field_behavior) = OPTIONAL];
}
// Request message for [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues].
// Request message for
// [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues].
message ExportFeatureValuesRequest {
// Describes exporting the latest Feature values of all entities of the
// EntityType between [start_time, snapshot_time].
@ -686,8 +720,8 @@ message ExportFeatureValuesRequest {
FullExport full_export = 7;
}
// Required. The resource name of the EntityType from which to export Feature values.
// Format:
// Required. The resource name of the EntityType from which to export Feature
// values. Format:
// `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`
string entity_type = 1 [
(google.api.field_behavior) = REQUIRED,
@ -697,7 +731,8 @@ message ExportFeatureValuesRequest {
];
// Required. Specifies destination location and format.
FeatureValueDestination destination = 4 [(google.api.field_behavior) = REQUIRED];
FeatureValueDestination destination = 4
[(google.api.field_behavior) = REQUIRED];
// Required. Selects Features to export values of.
FeatureSelector feature_selector = 5 [(google.api.field_behavior) = REQUIRED];
@ -719,8 +754,10 @@ message DestinationFeatureSetting {
message FeatureValueDestination {
oneof destination {
// Output in BigQuery format.
// [BigQueryDestination.output_uri][google.cloud.aiplatform.v1beta1.BigQueryDestination.output_uri] in
// [FeatureValueDestination.bigquery_destination][google.cloud.aiplatform.v1beta1.FeatureValueDestination.bigquery_destination] must refer to a table.
// [BigQueryDestination.output_uri][google.cloud.aiplatform.v1beta1.BigQueryDestination.output_uri]
// in
// [FeatureValueDestination.bigquery_destination][google.cloud.aiplatform.v1beta1.FeatureValueDestination.bigquery_destination]
// must refer to a table.
BigQueryDestination bigquery_destination = 1;
// Output in TFRecord format.
@ -742,17 +779,16 @@ message FeatureValueDestination {
}
}
// Response message for [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues].
message ExportFeatureValuesResponse {
}
// Response message for [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues].
message BatchReadFeatureValuesResponse {
// Response message for
// [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues].
message ExportFeatureValuesResponse {}
}
// Response message for
// [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues].
message BatchReadFeatureValuesResponse {}
// Request message for [FeaturestoreService.CreateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateEntityType].
// Request message for
// [FeaturestoreService.CreateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateEntityType].
message CreateEntityTypeRequest {
// Required. The resource name of the Featurestore to create EntityTypes.
// Format:
@ -767,8 +803,8 @@ message CreateEntityTypeRequest {
// The EntityType to create.
EntityType entity_type = 2;
// Required. The ID to use for the EntityType, which will become the final component of
// the EntityType's resource name.
// Required. The ID to use for the EntityType, which will become the final
// component of the EntityType's resource name.
//
// This value may be up to 60 characters, and valid characters are
// `[a-z0-9_]`. The first character cannot be a number.
@ -777,7 +813,8 @@ message CreateEntityTypeRequest {
string entity_type_id = 3 [(google.api.field_behavior) = REQUIRED];
}
// Request message for [FeaturestoreService.GetEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetEntityType].
// Request message for
// [FeaturestoreService.GetEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetEntityType].
message GetEntityTypeRequest {
// Required. The name of the EntityType resource.
// Format:
@ -790,7 +827,8 @@ message GetEntityTypeRequest {
];
}
// Request message for [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes].
// Request message for
// [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes].
message ListEntityTypesRequest {
// Required. The resource name of the Featurestore to list EntityTypes.
// Format:
@ -829,12 +867,12 @@ message ListEntityTypesRequest {
int32 page_size = 3;
// A page token, received from a previous
// [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes] call.
// Provide this to retrieve the subsequent page.
// [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes]
// call. Provide this to retrieve the subsequent page.
//
// When paginating, all other parameters provided to
// [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes] must
// match the call that provided the page token.
// [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes]
// must match the call that provided the page token.
string page_token = 4;
// A comma-separated list of fields to order by, sorted in ascending order.
@ -851,22 +889,24 @@ message ListEntityTypesRequest {
google.protobuf.FieldMask read_mask = 6;
}
// Response message for [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes].
// Response message for
// [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes].
message ListEntityTypesResponse {
// The EntityTypes matching the request.
repeated EntityType entity_types = 1;
// A token, which can be sent as [ListEntityTypesRequest.page_token][google.cloud.aiplatform.v1beta1.ListEntityTypesRequest.page_token] to
// retrieve the next page.
// If this field is omitted, there are no subsequent pages.
// A token, which can be sent as
// [ListEntityTypesRequest.page_token][google.cloud.aiplatform.v1beta1.ListEntityTypesRequest.page_token]
// to retrieve the next page. If this field is omitted, there are no
// subsequent pages.
string next_page_token = 2;
}
// Request message for [FeaturestoreService.UpdateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateEntityType].
// Request message for
// [FeaturestoreService.UpdateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateEntityType].
message UpdateEntityTypeRequest {
// Required. The EntityType's `name` field is used to identify the EntityType to be
// updated.
// Format:
// Required. The EntityType's `name` field is used to identify the EntityType
// to be updated. Format:
// `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`
EntityType entity_type = 1 [(google.api.field_behavior) = REQUIRED];
@ -889,7 +929,7 @@ message UpdateEntityTypeRequest {
// * `monitoring_config.import_features_analysis.anomaly_detection_baseline`
// * `monitoring_config.numerical_threshold_config.value`
// * `monitoring_config.categorical_threshold_config.value`
// * `offline_storage_ttl_days`
// * `offline_storage_ttl_days` (available in Preview)
google.protobuf.FieldMask update_mask = 2;
}
@ -910,7 +950,8 @@ message DeleteEntityTypeRequest {
bool force = 2;
}
// Request message for [FeaturestoreService.CreateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeature].
// Request message for
// [FeaturestoreService.CreateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeature].
message CreateFeatureRequest {
// Required. The resource name of the EntityType to create a Feature.
// Format:
@ -925,8 +966,8 @@ message CreateFeatureRequest {
// Required. The Feature to create.
Feature feature = 2 [(google.api.field_behavior) = REQUIRED];
// Required. The ID to use for the Feature, which will become the final component of
// the Feature's resource name.
// Required. The ID to use for the Feature, which will become the final
// component of the Feature's resource name.
//
// This value may be up to 128 characters, and valid characters are
// `[a-z0-9_]`. The first character cannot be a number.
@ -935,10 +976,11 @@ message CreateFeatureRequest {
string feature_id = 3 [(google.api.field_behavior) = REQUIRED];
}
// Request message for [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures].
// Request message for
// [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures].
message BatchCreateFeaturesRequest {
// Required. The resource name of the EntityType to create the batch of Features under.
// Format:
// Required. The resource name of the EntityType to create the batch of
// Features under. Format:
// `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
@ -947,20 +989,24 @@ message BatchCreateFeaturesRequest {
}
];
// Required. The request message specifying the Features to create. All Features must be
// created under the same parent EntityType. The `parent` field in each child
// request message can be omitted. If `parent` is set in a child request, then
// the value must match the `parent` value in this request message.
repeated CreateFeatureRequest requests = 2 [(google.api.field_behavior) = REQUIRED];
// Required. The request message specifying the Features to create. All
// Features must be created under the same parent EntityType. The `parent`
// field in each child request message can be omitted. If `parent` is set in a
// child request, then the value must match the `parent` value in this request
// message.
repeated CreateFeatureRequest requests = 2
[(google.api.field_behavior) = REQUIRED];
}
// Response message for [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures].
// Response message for
// [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures].
message BatchCreateFeaturesResponse {
// The Features created.
repeated Feature features = 1;
}
// Request message for [FeaturestoreService.GetFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeature].
// Request message for
// [FeaturestoreService.GetFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeature].
message GetFeatureRequest {
// Required. The name of the Feature resource.
// Format:
@ -973,7 +1019,8 @@ message GetFeatureRequest {
];
}
// Request message for [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures].
// Request message for
// [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures].
message ListFeaturesRequest {
// Required. The resource name of the Location to list Features.
// Format:
@ -1014,12 +1061,12 @@ message ListFeaturesRequest {
int32 page_size = 3;
// A page token, received from a previous
// [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures] call.
// Provide this to retrieve the subsequent page.
// [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures]
// call. Provide this to retrieve the subsequent page.
//
// When paginating, all other parameters provided to
// [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures] must
// match the call that provided the page token.
// [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures]
// must match the call that provided the page token.
string page_token = 4;
// A comma-separated list of fields to order by, sorted in ascending order.
@ -1035,25 +1082,30 @@ message ListFeaturesRequest {
// Mask specifying which fields to read.
google.protobuf.FieldMask read_mask = 6;
// If set, return the most recent [ListFeaturesRequest.latest_stats_count][google.cloud.aiplatform.v1beta1.ListFeaturesRequest.latest_stats_count]
// If set, return the most recent
// [ListFeaturesRequest.latest_stats_count][google.cloud.aiplatform.v1beta1.ListFeaturesRequest.latest_stats_count]
// of stats for each Feature in response. Valid value is [0, 10]. If number of
// stats exists < [ListFeaturesRequest.latest_stats_count][google.cloud.aiplatform.v1beta1.ListFeaturesRequest.latest_stats_count], return all
// existing stats.
// stats exists <
// [ListFeaturesRequest.latest_stats_count][google.cloud.aiplatform.v1beta1.ListFeaturesRequest.latest_stats_count],
// return all existing stats.
int32 latest_stats_count = 7;
}
// Response message for [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures].
// Response message for
// [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures].
message ListFeaturesResponse {
// The Features matching the request.
repeated Feature features = 1;
// A token, which can be sent as [ListFeaturesRequest.page_token][google.cloud.aiplatform.v1beta1.ListFeaturesRequest.page_token] to
// retrieve the next page.
// If this field is omitted, there are no subsequent pages.
// A token, which can be sent as
// [ListFeaturesRequest.page_token][google.cloud.aiplatform.v1beta1.ListFeaturesRequest.page_token]
// to retrieve the next page. If this field is omitted, there are no
// subsequent pages.
string next_page_token = 2;
}
// Request message for [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures].
// Request message for
// [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures].
message SearchFeaturesRequest {
// Required. The resource name of the Location to search Features.
// Format:
@ -1133,16 +1185,17 @@ message SearchFeaturesRequest {
int32 page_size = 4;
// A page token, received from a previous
// [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures] call.
// Provide this to retrieve the subsequent page.
// [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures]
// call. Provide this to retrieve the subsequent page.
//
// When paginating, all other parameters provided to
// [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures], except `page_size`, must
// match the call that provided the page token.
// [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures],
// except `page_size`, must match the call that provided the page token.
string page_token = 5;
}
// Response message for [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures].
// Response message for
// [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures].
message SearchFeaturesResponse {
// The Features matching the request.
//
@ -1155,13 +1208,15 @@ message SearchFeaturesResponse {
// * `update_time`
repeated Feature features = 1;
// A token, which can be sent as [SearchFeaturesRequest.page_token][google.cloud.aiplatform.v1beta1.SearchFeaturesRequest.page_token] to
// retrieve the next page.
// If this field is omitted, there are no subsequent pages.
// A token, which can be sent as
// [SearchFeaturesRequest.page_token][google.cloud.aiplatform.v1beta1.SearchFeaturesRequest.page_token]
// to retrieve the next page. If this field is omitted, there are no
// subsequent pages.
string next_page_token = 2;
}
// Request message for [FeaturestoreService.UpdateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeature].
// Request message for
// [FeaturestoreService.UpdateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeature].
message UpdateFeatureRequest {
// Required. The Feature's `name` field is used to identify the Feature to be
// updated.
@ -1185,7 +1240,8 @@ message UpdateFeatureRequest {
google.protobuf.FieldMask update_mask = 2;
}
// Request message for [FeaturestoreService.DeleteFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeature].
// Request message for
// [FeaturestoreService.DeleteFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeature].
message DeleteFeatureRequest {
// Required. The name of the Features to be deleted.
// Format:
@ -1272,15 +1328,17 @@ message BatchCreateFeaturesOperationMetadata {
GenericOperationMetadata generic_metadata = 1;
}
// Request message for [FeaturestoreService.DeleteFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeatureValues].
// Request message for
// [FeaturestoreService.DeleteFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeatureValues].
message DeleteFeatureValuesRequest {
// Message to select entity.
// If an entity id is selected, all the feature values corresponding to the
// entity id will be deleted, including the entityId.
message SelectEntity {
// Required. Selectors choosing feature values of which entity id to be deleted from
// the EntityType.
EntityIdSelector entity_id_selector = 1 [(google.api.field_behavior) = REQUIRED];
// Required. Selectors choosing feature values of which entity id to be
// deleted from the EntityType.
EntityIdSelector entity_id_selector = 1
[(google.api.field_behavior) = REQUIRED];
}
// Message to select time range and feature.
@ -1292,11 +1350,13 @@ message DeleteFeatureValuesRequest {
message SelectTimeRangeAndFeature {
// Required. Select feature generated within a half-inclusive time range.
// The time range is lower inclusive and upper exclusive.
google.type.Interval time_range = 1 [(google.api.field_behavior) = REQUIRED];
google.type.Interval time_range = 1
[(google.api.field_behavior) = REQUIRED];
// Required. Selectors choosing which feature values to be deleted from the
// EntityType.
FeatureSelector feature_selector = 2 [(google.api.field_behavior) = REQUIRED];
FeatureSelector feature_selector = 2
[(google.api.field_behavior) = REQUIRED];
// If set, data will not be deleted from online storage.
// When time range is older than the data in online storage, setting this to
@ -1314,8 +1374,8 @@ message DeleteFeatureValuesRequest {
SelectTimeRangeAndFeature select_time_range_and_feature = 3;
}
// Required. The resource name of the EntityType grouping the Features for which values
// are being deleted from. Format:
// Required. The resource name of the EntityType grouping the Features for
// which values are being deleted from. Format:
// `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}`
string entity_type = 1 [
(google.api.field_behavior) = REQUIRED,
@ -1325,10 +1385,9 @@ message DeleteFeatureValuesRequest {
];
}
// Response message for [FeaturestoreService.DeleteFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeatureValues].
message DeleteFeatureValuesResponse {
}
// Response message for
// [FeaturestoreService.DeleteFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeatureValues].
message DeleteFeatureValuesResponse {}
// Selector for entityId. Getting ids from the given source.
message EntityIdSelector {

@ -66,8 +66,8 @@ message HyperparameterTuningJob {
// before the whole job fails.
int32 max_failed_trial_count = 7;
// Required. The spec of a trial job. The same spec applies to the CustomJobs created
// in all the trials.
// Required. The spec of a trial job. The same spec applies to the CustomJobs
// created in all the trials.
CustomJobSpec trial_job_spec = 8 [(google.api.field_behavior) = REQUIRED];
// Output only. Trials of the HyperparameterTuningJob.
@ -77,18 +77,24 @@ message HyperparameterTuningJob {
JobState state = 10 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Time when the HyperparameterTuningJob was created.
google.protobuf.Timestamp create_time = 11 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Time when the HyperparameterTuningJob for the first time entered the
// `JOB_STATE_RUNNING` state.
google.protobuf.Timestamp start_time = 12 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Time when the HyperparameterTuningJob entered any of the following states:
// `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`.
google.protobuf.Timestamp end_time = 13 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Time when the HyperparameterTuningJob was most recently updated.
google.protobuf.Timestamp update_time = 14 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 11
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Time when the HyperparameterTuningJob for the first time
// entered the `JOB_STATE_RUNNING` state.
google.protobuf.Timestamp start_time = 12
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Time when the HyperparameterTuningJob entered any of the
// following states: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`,
// `JOB_STATE_CANCELLED`.
google.protobuf.Timestamp end_time = 13
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Time when the HyperparameterTuningJob was most recently
// updated.
google.protobuf.Timestamp update_time = 14
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Only populated when job's state is JOB_STATE_FAILED or
// JOB_STATE_CANCELLED.

@ -64,10 +64,10 @@ message Index {
// The description of the Index.
string description = 3;
// Immutable. Points to a YAML file stored on Google Cloud Storage describing additional
// information about the Index, that is specific to it. Unset if the Index
// does not have any additional information.
// The schema is defined as an OpenAPI 3.0.2 [Schema
// Immutable. Points to a YAML file stored on Google Cloud Storage describing
// additional information about the Index, that is specific to it. Unset if
// the Index does not have any additional information. The schema is defined
// as an OpenAPI 3.0.2 [Schema
// Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
// Note: The URI given on output will be immutable and probably different,
// including the URI scheme, than the one given on input. The output URI will
@ -75,13 +75,15 @@ message Index {
string metadata_schema_uri = 4 [(google.api.field_behavior) = IMMUTABLE];
// An additional information about the Index; the schema of the metadata can
// be found in [metadata_schema][google.cloud.aiplatform.v1beta1.Index.metadata_schema_uri].
// be found in
// [metadata_schema][google.cloud.aiplatform.v1beta1.Index.metadata_schema_uri].
google.protobuf.Value metadata = 6;
// Output only. The pointers to DeployedIndexes created from this Index.
// An Index can be only deleted if all its DeployedIndexes had been undeployed
// first.
repeated DeployedIndexRef deployed_indexes = 7 [(google.api.field_behavior) = OUTPUT_ONLY];
repeated DeployedIndexRef deployed_indexes = 7
[(google.api.field_behavior) = OUTPUT_ONLY];
// Used to perform consistent read-modify-write updates. If not set, a blind
// "overwrite" update happens.
@ -97,24 +99,27 @@ message Index {
map<string, string> labels = 9;
// Output only. Timestamp when this Index was created.
google.protobuf.Timestamp create_time = 10 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 10
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this Index was most recently updated.
// This also includes any update to the contents of the Index.
// Note that Operations working on this Index may have their
// [Operations.metadata.generic_metadata.update_time]
// [google.cloud.aiplatform.v1beta1.GenericOperationMetadata.update_time] a little after the value of this
// timestamp, yet that does not mean their results are not already reflected
// in the Index. Result of any successfully completed Operation on the Index
// is reflected in it.
google.protobuf.Timestamp update_time = 11 [(google.api.field_behavior) = OUTPUT_ONLY];
// [google.cloud.aiplatform.v1beta1.GenericOperationMetadata.update_time] a
// little after the value of this timestamp, yet that does not mean their
// results are not already reflected in the Index. Result of any successfully
// completed Operation on the Index is reflected in it.
google.protobuf.Timestamp update_time = 11
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Stats of the index resource.
IndexStats index_stats = 14 [(google.api.field_behavior) = OUTPUT_ONLY];
// Immutable. The update method to use with this Index. If not set, BATCH_UPDATE will be
// used by default.
IndexUpdateMethod index_update_method = 16 [(google.api.field_behavior) = IMMUTABLE];
// Immutable. The update method to use with this Index. If not set,
// BATCH_UPDATE will be used by default.
IndexUpdateMethod index_update_method = 16
[(google.api.field_behavior) = IMMUTABLE];
}
// A datapoint of Index.
@ -151,14 +156,14 @@ message IndexDatapoint {
// [NearestNeighborSearchConfig.dimensions].
repeated float feature_vector = 2 [(google.api.field_behavior) = REQUIRED];
// Optional. List of Restrict of the datapoint, used to perform "restricted searches"
// where boolean rule are used to filter the subset of the database eligible
// for matching.
// See: https://cloud.google.com/vertex-ai/docs/matching-engine/filtering
// Optional. List of Restrict of the datapoint, used to perform "restricted
// searches" where boolean rule are used to filter the subset of the database
// eligible for matching. See:
// https://cloud.google.com/vertex-ai/docs/matching-engine/filtering
repeated Restriction restricts = 4 [(google.api.field_behavior) = OPTIONAL];
// Optional. CrowdingTag of the datapoint, the number of neighbors to return in each
// crowding can be configured during query.
// Optional. CrowdingTag of the datapoint, the number of neighbors to return
// in each crowding can be configured during query.
CrowdingTag crowding_tag = 5 [(google.api.field_behavior) = OPTIONAL];
}

@ -49,7 +49,8 @@ message IndexEndpoint {
string description = 3;
// Output only. The indexes deployed in this endpoint.
repeated DeployedIndex deployed_indexes = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
repeated DeployedIndex deployed_indexes = 4
[(google.api.field_behavior) = OUTPUT_ONLY];
// Used to perform consistent read-modify-write updates. If not set, a blind
// "overwrite" update happens.
@ -65,13 +66,15 @@ message IndexEndpoint {
map<string, string> labels = 6;
// Output only. Timestamp when this IndexEndpoint was created.
google.protobuf.Timestamp create_time = 7 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 7
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this IndexEndpoint was last updated.
// This timestamp is not updated when the endpoint's DeployedIndexes are
// updated, e.g. due to updates of the original Indexes they are the
// deployments of.
google.protobuf.Timestamp update_time = 8 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp update_time = 8
[(google.api.field_behavior) = OUTPUT_ONLY];
// Optional. The full name of the Google Compute Engine
// [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
@ -90,15 +93,15 @@ message IndexEndpoint {
// network name.
string network = 9 [(google.api.field_behavior) = OPTIONAL];
// Optional. Deprecated: If true, expose the IndexEndpoint via private service connect.
// Optional. Deprecated: If true, expose the IndexEndpoint via private service
// connect.
//
// Only one of the fields, [network][google.cloud.aiplatform.v1beta1.IndexEndpoint.network] or
// Only one of the fields,
// [network][google.cloud.aiplatform.v1beta1.IndexEndpoint.network] or
// [enable_private_service_connect][google.cloud.aiplatform.v1beta1.IndexEndpoint.enable_private_service_connect],
// can be set.
bool enable_private_service_connect = 10 [
deprecated = true,
(google.api.field_behavior) = OPTIONAL
];
bool enable_private_service_connect = 10
[deprecated = true, (google.api.field_behavior) = OPTIONAL];
}
// A deployment of an Index. IndexEndpoints contain one or more DeployedIndexes.
@ -123,20 +126,25 @@ message DeployedIndex {
string display_name = 3;
// Output only. Timestamp when the DeployedIndex was created.
google.protobuf.Timestamp create_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Provides paths for users to send requests directly to the deployed index
// services running on Cloud via private services access. This field is
// populated if [network][google.cloud.aiplatform.v1beta1.IndexEndpoint.network] is configured.
IndexPrivateEndpoints private_endpoints = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The DeployedIndex may depend on various data on its original Index.
// Additionally when certain changes to the original Index are being done
// (e.g. when what the Index contains is being changed) the DeployedIndex may
// be asynchronously updated in the background to reflect this changes.
// If this timestamp's value is at least the [Index.update_time][google.cloud.aiplatform.v1beta1.Index.update_time] of the
// original Index, it means that this DeployedIndex and the original Index are
// in sync. If this timestamp is older, then to see which updates this
google.protobuf.Timestamp create_time = 4
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Provides paths for users to send requests directly to the
// deployed index services running on Cloud via private services access. This
// field is populated if
// [network][google.cloud.aiplatform.v1beta1.IndexEndpoint.network] is
// configured.
IndexPrivateEndpoints private_endpoints = 5
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The DeployedIndex may depend on various data on its original
// Index. Additionally when certain changes to the original Index are being
// done (e.g. when what the Index contains is being changed) the DeployedIndex
// may be asynchronously updated in the background to reflect this changes. If
// this timestamp's value is at least the
// [Index.update_time][google.cloud.aiplatform.v1beta1.Index.update_time] of
// the original Index, it means that this DeployedIndex and the original Index
// are in sync. If this timestamp is older, then to see which updates this
// DeployedIndex already contains (and which not), one must
// [list][Operations.ListOperations] [Operations][Operation]
// [working][Operation.name] on the original Index. Only
@ -144,30 +152,41 @@ message DeployedIndex {
// [Operations.metadata.generic_metadata.update_time]
// [google.cloud.aiplatform.v1beta1.GenericOperationMetadata.update_time]
// equal or before this sync time are contained in this DeployedIndex.
google.protobuf.Timestamp index_sync_time = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp index_sync_time = 6
[(google.api.field_behavior) = OUTPUT_ONLY];
// Optional. A description of resources that the DeployedIndex uses, which to large
// degree are decided by Vertex AI, and optionally allows only a modest
// Optional. A description of resources that the DeployedIndex uses, which to
// large degree are decided by Vertex AI, and optionally allows only a modest
// additional configuration.
// If min_replica_count is not set, the default value is 2 (we don't provide
// SLA when min_replica_count=1). If max_replica_count is not set, the
// default value is min_replica_count. The max allowed replica count is
// 1000.
AutomaticResources automatic_resources = 7 [(google.api.field_behavior) = OPTIONAL];
// Optional. A description of resources that are dedicated to the DeployedIndex, and
// that need a higher degree of manual configuration.
// If min_replica_count is not set, the default value is 2 (we don't provide
// SLA when min_replica_count=1). If max_replica_count is not set, the
// default value is min_replica_count. The max allowed replica count is
// 1000.
AutomaticResources automatic_resources = 7
[(google.api.field_behavior) = OPTIONAL];
// Optional. A description of resources that are dedicated to the
// DeployedIndex, and that need a higher degree of manual configuration. If
// min_replica_count is not set, the default value is 2 (we don't provide SLA
// when min_replica_count=1). If max_replica_count is not set, the default
// value is min_replica_count. The max allowed replica count is 1000.
//
// Available machine types for SMALL shard:
// e2-standard-2 and all machine types available for MEDIUM and LARGE shard.
//
// Available machine types for MEDIUM shard:
// e2-standard-16 and all machine types available for LARGE shard.
//
// Available machine types for LARGE shard:
// e2-standard-32, e2-highmem-16, n2d-standard-32.
//
// Available machine types:
// n1-standard-16
// n1-standard-32
DedicatedResources dedicated_resources = 16 [(google.api.field_behavior) = OPTIONAL];
// n1-standard-16 and n1-standard-32 are still available, but we recommend
// e2-standard-16 and e2-standard-32 for cost efficiency.
DedicatedResources dedicated_resources = 16
[(google.api.field_behavior) = OPTIONAL];
// Optional. If true, private endpoint's access logs are sent to StackDriver Logging.
// Optional. If true, private endpoint's access logs are sent to StackDriver
// Logging.
//
// These logs are like standard server access logs, containing
// information like timestamp and latency for each MatchRequest.
@ -178,7 +197,8 @@ message DeployedIndex {
bool enable_access_logging = 8 [(google.api.field_behavior) = OPTIONAL];
// Optional. If set, the authentication is enabled for the private endpoint.
DeployedIndexAuthConfig deployed_index_auth_config = 9 [(google.api.field_behavior) = OPTIONAL];
DeployedIndexAuthConfig deployed_index_auth_config = 9
[(google.api.field_behavior) = OPTIONAL];
// Optional. A list of reserved ip ranges under the VPC network that can be
// used for this DeployedIndex.
@ -190,7 +210,8 @@ message DeployedIndex {
// The value sohuld be the name of the address
// (https://cloud.google.com/compute/docs/reference/rest/v1/addresses)
// Example: 'vertex-ai-ip-range'.
repeated string reserved_ip_ranges = 10 [(google.api.field_behavior) = OPTIONAL];
repeated string reserved_ip_ranges = 10
[(google.api.field_behavior) = OPTIONAL];
// Optional. The deployment group can be no longer than 64 characters (eg:
// 'test', 'prod'). If not set, we will use the 'default' deployment group.
@ -239,7 +260,7 @@ message IndexPrivateEndpoints {
// Output only. The ip address used to send match gRPC requests.
string match_grpc_address = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The name of the service attachment resource. Populated if private service
// connect is enabled.
// Output only. The name of the service attachment resource. Populated if
// private service connect is enabled.
string service_attachment = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
}

@ -36,10 +36,12 @@ option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
// A service for managing Vertex AI's IndexEndpoints.
service IndexEndpointService {
option (google.api.default_host) = "aiplatform.googleapis.com";
option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
option (google.api.oauth_scopes) =
"https://www.googleapis.com/auth/cloud-platform";
// Creates an IndexEndpoint.
rpc CreateIndexEndpoint(CreateIndexEndpointRequest) returns (google.longrunning.Operation) {
rpc CreateIndexEndpoint(CreateIndexEndpointRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*}/indexEndpoints"
body: "index_endpoint"
@ -60,7 +62,8 @@ service IndexEndpointService {
}
// Lists IndexEndpoints in a Location.
rpc ListIndexEndpoints(ListIndexEndpointsRequest) returns (ListIndexEndpointsResponse) {
rpc ListIndexEndpoints(ListIndexEndpointsRequest)
returns (ListIndexEndpointsResponse) {
option (google.api.http) = {
get: "/v1beta1/{parent=projects/*/locations/*}/indexEndpoints"
};
@ -77,7 +80,8 @@ service IndexEndpointService {
}
// Deletes an IndexEndpoint.
rpc DeleteIndexEndpoint(DeleteIndexEndpointRequest) returns (google.longrunning.Operation) {
rpc DeleteIndexEndpoint(DeleteIndexEndpointRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
delete: "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*}"
};
@ -105,7 +109,8 @@ service IndexEndpointService {
// Undeploys an Index from an IndexEndpoint, removing a DeployedIndex from it,
// and freeing all resources it's using.
rpc UndeployIndex(UndeployIndexRequest) returns (google.longrunning.Operation) {
rpc UndeployIndex(UndeployIndexRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta1/{index_endpoint=projects/*/locations/*/indexEndpoints/*}:undeployIndex"
body: "*"
@ -118,7 +123,8 @@ service IndexEndpointService {
}
// Update an existing DeployedIndex under an IndexEndpoint.
rpc MutateDeployedIndex(MutateDeployedIndexRequest) returns (google.longrunning.Operation) {
rpc MutateDeployedIndex(MutateDeployedIndexRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta1/{index_endpoint=projects/*/locations/*/indexEndpoints/*}:mutateDeployedIndex"
body: "deployed_index"
@ -131,7 +137,8 @@ service IndexEndpointService {
}
}
// Request message for [IndexEndpointService.CreateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.CreateIndexEndpoint].
// Request message for
// [IndexEndpointService.CreateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.CreateIndexEndpoint].
message CreateIndexEndpointRequest {
// Required. The resource name of the Location to create the IndexEndpoint in.
// Format: `projects/{project}/locations/{location}`
@ -153,7 +160,8 @@ message CreateIndexEndpointOperationMetadata {
GenericOperationMetadata generic_metadata = 1;
}
// Request message for [IndexEndpointService.GetIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.GetIndexEndpoint]
// Request message for
// [IndexEndpointService.GetIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.GetIndexEndpoint]
message GetIndexEndpointRequest {
// Required. The name of the IndexEndpoint resource.
// Format:
@ -166,10 +174,11 @@ message GetIndexEndpointRequest {
];
}
// Request message for [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints].
// Request message for
// [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints].
message ListIndexEndpointsRequest {
// Required. The resource name of the Location from which to list the IndexEndpoints.
// Format: `projects/{project}/locations/{location}`
// Required. The resource name of the Location from which to list the
// IndexEndpoints. Format: `projects/{project}/locations/{location}`
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
@ -177,8 +186,8 @@ message ListIndexEndpointsRequest {
}
];
// Optional. An expression for filtering the results of the request. For field names
// both snake_case and camelCase are supported.
// Optional. An expression for filtering the results of the request. For field
// names both snake_case and camelCase are supported.
//
// * `index_endpoint` supports = and !=. `index_endpoint` represents the
// IndexEndpoint ID, ie. the last segment of the IndexEndpoint's
@ -202,34 +211,44 @@ message ListIndexEndpointsRequest {
// Optional. The standard list page token.
// Typically obtained via
// [ListIndexEndpointsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListIndexEndpointsResponse.next_page_token] of the previous
// [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints] call.
// [ListIndexEndpointsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListIndexEndpointsResponse.next_page_token]
// of the previous
// [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints]
// call.
string page_token = 4 [(google.api.field_behavior) = OPTIONAL];
// Optional. Mask specifying which fields to read.
google.protobuf.FieldMask read_mask = 5 [(google.api.field_behavior) = OPTIONAL];
google.protobuf.FieldMask read_mask = 5
[(google.api.field_behavior) = OPTIONAL];
}
// Response message for [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints].
// Response message for
// [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints].
message ListIndexEndpointsResponse {
// List of IndexEndpoints in the requested page.
repeated IndexEndpoint index_endpoints = 1;
// A token to retrieve next page of results.
// Pass to [ListIndexEndpointsRequest.page_token][google.cloud.aiplatform.v1beta1.ListIndexEndpointsRequest.page_token] to obtain that page.
// Pass to
// [ListIndexEndpointsRequest.page_token][google.cloud.aiplatform.v1beta1.ListIndexEndpointsRequest.page_token]
// to obtain that page.
string next_page_token = 2;
}
// Request message for [IndexEndpointService.UpdateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.UpdateIndexEndpoint].
// Request message for
// [IndexEndpointService.UpdateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.UpdateIndexEndpoint].
message UpdateIndexEndpointRequest {
// Required. The IndexEndpoint which replaces the resource on the server.
IndexEndpoint index_endpoint = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The update mask applies to the resource. See [google.protobuf.FieldMask][google.protobuf.FieldMask].
google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED];
// Required. The update mask applies to the resource. See
// [google.protobuf.FieldMask][google.protobuf.FieldMask].
google.protobuf.FieldMask update_mask = 2
[(google.api.field_behavior) = REQUIRED];
}
// Request message for [IndexEndpointService.DeleteIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeleteIndexEndpoint].
// Request message for
// [IndexEndpointService.DeleteIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeleteIndexEndpoint].
message DeleteIndexEndpointRequest {
// Required. The name of the IndexEndpoint resource to be deleted.
// Format:
@ -242,10 +261,11 @@ message DeleteIndexEndpointRequest {
];
}
// Request message for [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex].
// Request message for
// [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex].
message DeployIndexRequest {
// Required. The name of the IndexEndpoint resource into which to deploy an Index.
// Format:
// Required. The name of the IndexEndpoint resource into which to deploy an
// Index. Format:
// `projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`
string index_endpoint = 1 [
(google.api.field_behavior) = REQUIRED,
@ -258,13 +278,15 @@ message DeployIndexRequest {
DeployedIndex deployed_index = 2 [(google.api.field_behavior) = REQUIRED];
}
// Response message for [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex].
// Response message for
// [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex].
message DeployIndexResponse {
// The DeployedIndex that had been deployed in the IndexEndpoint.
DeployedIndex deployed_index = 1;
}
// Runtime operation information for [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex].
// Runtime operation information for
// [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex].
message DeployIndexOperationMetadata {
// The operation generic information.
GenericOperationMetadata generic_metadata = 1;
@ -273,10 +295,11 @@ message DeployIndexOperationMetadata {
string deployed_index_id = 2;
}
// Request message for [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex].
// Request message for
// [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex].
message UndeployIndexRequest {
// Required. The name of the IndexEndpoint resource from which to undeploy an Index.
// Format:
// Required. The name of the IndexEndpoint resource from which to undeploy an
// Index. Format:
// `projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`
string index_endpoint = 1 [
(google.api.field_behavior) = REQUIRED,
@ -285,25 +308,27 @@ message UndeployIndexRequest {
}
];
// Required. The ID of the DeployedIndex to be undeployed from the IndexEndpoint.
// Required. The ID of the DeployedIndex to be undeployed from the
// IndexEndpoint.
string deployed_index_id = 2 [(google.api.field_behavior) = REQUIRED];
}
// Response message for [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex].
message UndeployIndexResponse {
// Response message for
// [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex].
message UndeployIndexResponse {}
}
// Runtime operation information for [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex].
// Runtime operation information for
// [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex].
message UndeployIndexOperationMetadata {
// The operation generic information.
GenericOperationMetadata generic_metadata = 1;
}
// Request message for [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex].
// Request message for
// [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex].
message MutateDeployedIndexRequest {
// Required. The name of the IndexEndpoint resource into which to deploy an Index.
// Format:
// Required. The name of the IndexEndpoint resource into which to deploy an
// Index. Format:
// `projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`
string index_endpoint = 1 [
(google.api.field_behavior) = REQUIRED,
@ -318,7 +343,8 @@ message MutateDeployedIndexRequest {
DeployedIndex deployed_index = 2 [(google.api.field_behavior) = REQUIRED];
}
// Response message for [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex].
// Response message for
// [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex].
message MutateDeployedIndexResponse {
// The DeployedIndex that had been updated in the IndexEndpoint.
DeployedIndex deployed_index = 1;

@ -36,7 +36,8 @@ option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
// A service for creating and managing Vertex AI's Index resources.
service IndexService {
option (google.api.default_host) = "aiplatform.googleapis.com";
option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
option (google.api.oauth_scopes) =
"https://www.googleapis.com/auth/cloud-platform";
// Creates an Index.
rpc CreateIndex(CreateIndexRequest) returns (google.longrunning.Operation) {
@ -82,7 +83,8 @@ service IndexService {
// Deletes an Index.
// An Index can only be deleted when all its
// [DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes] had been undeployed.
// [DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes]
// had been undeployed.
rpc DeleteIndex(DeleteIndexRequest) returns (google.longrunning.Operation) {
option (google.api.http) = {
delete: "/v1beta1/{name=projects/*/locations/*/indexes/*}"
@ -95,7 +97,8 @@ service IndexService {
}
// Add/update Datapoints into an Index.
rpc UpsertDatapoints(UpsertDatapointsRequest) returns (UpsertDatapointsResponse) {
rpc UpsertDatapoints(UpsertDatapointsRequest)
returns (UpsertDatapointsResponse) {
option (google.api.http) = {
post: "/v1beta1/{index=projects/*/locations/*/indexes/*}:upsertDatapoints"
body: "*"
@ -103,7 +106,8 @@ service IndexService {
}
// Remove Datapoints from an Index.
rpc RemoveDatapoints(RemoveDatapointsRequest) returns (RemoveDatapointsResponse) {
rpc RemoveDatapoints(RemoveDatapointsRequest)
returns (RemoveDatapointsResponse) {
option (google.api.http) = {
post: "/v1beta1/{index=projects/*/locations/*/indexes/*}:removeDatapoints"
body: "*"
@ -111,7 +115,8 @@ service IndexService {
}
}
// Request message for [IndexService.CreateIndex][google.cloud.aiplatform.v1beta1.IndexService.CreateIndex].
// Request message for
// [IndexService.CreateIndex][google.cloud.aiplatform.v1beta1.IndexService.CreateIndex].
message CreateIndexRequest {
// Required. The resource name of the Location to create the Index in.
// Format: `projects/{project}/locations/{location}`
@ -126,16 +131,19 @@ message CreateIndexRequest {
Index index = 2 [(google.api.field_behavior) = REQUIRED];
}
// Runtime operation information for [IndexService.CreateIndex][google.cloud.aiplatform.v1beta1.IndexService.CreateIndex].
// Runtime operation information for
// [IndexService.CreateIndex][google.cloud.aiplatform.v1beta1.IndexService.CreateIndex].
message CreateIndexOperationMetadata {
// The operation generic information.
GenericOperationMetadata generic_metadata = 1;
// The operation metadata with regard to Matching Engine Index operation.
NearestNeighborSearchOperationMetadata nearest_neighbor_search_operation_metadata = 2;
NearestNeighborSearchOperationMetadata
nearest_neighbor_search_operation_metadata = 2;
}
// Request message for [IndexService.GetIndex][google.cloud.aiplatform.v1beta1.IndexService.GetIndex]
// Request message for
// [IndexService.GetIndex][google.cloud.aiplatform.v1beta1.IndexService.GetIndex]
message GetIndexRequest {
// Required. The name of the Index resource.
// Format:
@ -148,7 +156,8 @@ message GetIndexRequest {
];
}
// Request message for [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes].
// Request message for
// [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes].
message ListIndexesRequest {
// Required. The resource name of the Location from which to list the Indexes.
// Format: `projects/{project}/locations/{location}`
@ -167,44 +176,54 @@ message ListIndexesRequest {
// The standard list page token.
// Typically obtained via
// [ListIndexesResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListIndexesResponse.next_page_token] of the previous
// [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes] call.
// [ListIndexesResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListIndexesResponse.next_page_token]
// of the previous
// [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes]
// call.
string page_token = 4;
// Mask specifying which fields to read.
google.protobuf.FieldMask read_mask = 5;
}
// Response message for [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes].
// Response message for
// [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes].
message ListIndexesResponse {
// List of indexes in the requested page.
repeated Index indexes = 1;
// A token to retrieve next page of results.
// Pass to [ListIndexesRequest.page_token][google.cloud.aiplatform.v1beta1.ListIndexesRequest.page_token] to obtain that page.
// Pass to
// [ListIndexesRequest.page_token][google.cloud.aiplatform.v1beta1.ListIndexesRequest.page_token]
// to obtain that page.
string next_page_token = 2;
}
// Request message for [IndexService.UpdateIndex][google.cloud.aiplatform.v1beta1.IndexService.UpdateIndex].
// Request message for
// [IndexService.UpdateIndex][google.cloud.aiplatform.v1beta1.IndexService.UpdateIndex].
message UpdateIndexRequest {
// Required. The Index which updates the resource on the server.
Index index = 1 [(google.api.field_behavior) = REQUIRED];
// The update mask applies to the resource.
// For the `FieldMask` definition, see [google.protobuf.FieldMask][google.protobuf.FieldMask].
// For the `FieldMask` definition, see
// [google.protobuf.FieldMask][google.protobuf.FieldMask].
google.protobuf.FieldMask update_mask = 2;
}
// Runtime operation information for [IndexService.UpdateIndex][google.cloud.aiplatform.v1beta1.IndexService.UpdateIndex].
// Runtime operation information for
// [IndexService.UpdateIndex][google.cloud.aiplatform.v1beta1.IndexService.UpdateIndex].
message UpdateIndexOperationMetadata {
// The operation generic information.
GenericOperationMetadata generic_metadata = 1;
// The operation metadata with regard to Matching Engine Index operation.
NearestNeighborSearchOperationMetadata nearest_neighbor_search_operation_metadata = 2;
NearestNeighborSearchOperationMetadata
nearest_neighbor_search_operation_metadata = 2;
}
// Request message for [IndexService.DeleteIndex][google.cloud.aiplatform.v1beta1.IndexService.DeleteIndex].
// Request message for
// [IndexService.DeleteIndex][google.cloud.aiplatform.v1beta1.IndexService.DeleteIndex].
message DeleteIndexRequest {
// Required. The name of the Index resource to be deleted.
// Format:
@ -217,7 +236,8 @@ message DeleteIndexRequest {
];
}
// Request message for [IndexService.UpsertDatapoints][google.cloud.aiplatform.v1beta1.IndexService.UpsertDatapoints]
// Request message for
// [IndexService.UpsertDatapoints][google.cloud.aiplatform.v1beta1.IndexService.UpsertDatapoints]
message UpsertDatapointsRequest {
// Required. The name of the Index resource to be updated.
// Format:
@ -233,12 +253,12 @@ message UpsertDatapointsRequest {
repeated IndexDatapoint datapoints = 2;
}
// Response message for [IndexService.UpsertDatapoints][google.cloud.aiplatform.v1beta1.IndexService.UpsertDatapoints]
message UpsertDatapointsResponse {
// Response message for
// [IndexService.UpsertDatapoints][google.cloud.aiplatform.v1beta1.IndexService.UpsertDatapoints]
message UpsertDatapointsResponse {}
}
// Request message for [IndexService.RemoveDatapoints][google.cloud.aiplatform.v1beta1.IndexService.RemoveDatapoints]
// Request message for
// [IndexService.RemoveDatapoints][google.cloud.aiplatform.v1beta1.IndexService.RemoveDatapoints]
message RemoveDatapointsRequest {
// Required. The name of the Index resource to be updated.
// Format:
@ -254,10 +274,9 @@ message RemoveDatapointsRequest {
repeated string datapoint_ids = 2;
}
// Response message for [IndexService.RemoveDatapoints][google.cloud.aiplatform.v1beta1.IndexService.RemoveDatapoints]
message RemoveDatapointsResponse {
}
// Response message for
// [IndexService.RemoveDatapoints][google.cloud.aiplatform.v1beta1.IndexService.RemoveDatapoints]
message RemoveDatapointsResponse {}
// Runtime operation metadata with regard to Matching Engine Index.
message NearestNeighborSearchOperationMetadata {
@ -325,9 +344,10 @@ message NearestNeighborSearchOperationMetadata {
// The validation stats of the content (per file) to be inserted or
// updated on the Matching Engine Index resource. Populated if
// contentsDeltaUri is provided as part of [Index.metadata][google.cloud.aiplatform.v1beta1.Index.metadata]. Please note
// that, currently for those files that are broken or has unsupported file
// format, we will not have the stats for those files.
// contentsDeltaUri is provided as part of
// [Index.metadata][google.cloud.aiplatform.v1beta1.Index.metadata]. Please
// note that, currently for those files that are broken or has unsupported
// file format, we will not have the stats for those files.
repeated ContentValidationStats content_validation_stats = 1;
// The ingested data size in bytes.

@ -48,7 +48,8 @@ message GcsSource {
// The Google Cloud Storage location where the output is to be written to.
message GcsDestination {
// Required. Google Cloud Storage URI to output directory. If the uri doesn't end with
// Required. Google Cloud Storage URI to output directory. If the uri doesn't
// end with
// '/', a '/' will be automatically appended. The directory is created if it
// doesn't exist.
string output_uri_prefix = 1 [(google.api.field_behavior) = REQUIRED];

@ -42,7 +42,8 @@ option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
// A service for creating and managing Vertex AI's jobs.
service JobService {
option (google.api.default_host) = "aiplatform.googleapis.com";
option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
option (google.api.oauth_scopes) =
"https://www.googleapis.com/auth/cloud-platform";
// Creates a CustomJob. A created CustomJob right away
// will be attempted to be run.
@ -71,7 +72,8 @@ service JobService {
}
// Deletes a CustomJob.
rpc DeleteCustomJob(DeleteCustomJobRequest) returns (google.longrunning.Operation) {
rpc DeleteCustomJob(DeleteCustomJobRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
delete: "/v1beta1/{name=projects/*/locations/*/customJobs/*}"
};
@ -85,13 +87,16 @@ service JobService {
// Cancels a CustomJob.
// Starts asynchronous cancellation on the CustomJob. The server
// makes a best effort to cancel the job, but success is not
// guaranteed. Clients can use [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob] or
// other methods to check whether the cancellation succeeded or whether the
// guaranteed. Clients can use
// [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob]
// or other methods to check whether the cancellation succeeded or whether the
// job completed despite cancellation. On successful cancellation,
// the CustomJob is not deleted; instead it becomes a job with
// a [CustomJob.error][google.cloud.aiplatform.v1beta1.CustomJob.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
// corresponding to `Code.CANCELLED`, and [CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state] is set to
// `CANCELLED`.
// a [CustomJob.error][google.cloud.aiplatform.v1beta1.CustomJob.error] value
// with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding
// to `Code.CANCELLED`, and
// [CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state] is set
// to `CANCELLED`.
rpc CancelCustomJob(CancelCustomJobRequest) returns (google.protobuf.Empty) {
option (google.api.http) = {
post: "/v1beta1/{name=projects/*/locations/*/customJobs/*}:cancel"
@ -101,7 +106,8 @@ service JobService {
}
// Creates a DataLabelingJob.
rpc CreateDataLabelingJob(CreateDataLabelingJobRequest) returns (DataLabelingJob) {
rpc CreateDataLabelingJob(CreateDataLabelingJobRequest)
returns (DataLabelingJob) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*}/dataLabelingJobs"
body: "data_labeling_job"
@ -118,7 +124,8 @@ service JobService {
}
// Lists DataLabelingJobs in a Location.
rpc ListDataLabelingJobs(ListDataLabelingJobsRequest) returns (ListDataLabelingJobsResponse) {
rpc ListDataLabelingJobs(ListDataLabelingJobsRequest)
returns (ListDataLabelingJobsResponse) {
option (google.api.http) = {
get: "/v1beta1/{parent=projects/*/locations/*}/dataLabelingJobs"
};
@ -126,7 +133,8 @@ service JobService {
}
// Deletes a DataLabelingJob.
rpc DeleteDataLabelingJob(DeleteDataLabelingJobRequest) returns (google.longrunning.Operation) {
rpc DeleteDataLabelingJob(DeleteDataLabelingJobRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
delete: "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*}"
};
@ -138,7 +146,8 @@ service JobService {
}
// Cancels a DataLabelingJob. Success of cancellation is not guaranteed.
rpc CancelDataLabelingJob(CancelDataLabelingJobRequest) returns (google.protobuf.Empty) {
rpc CancelDataLabelingJob(CancelDataLabelingJobRequest)
returns (google.protobuf.Empty) {
option (google.api.http) = {
post: "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*}:cancel"
body: "*"
@ -147,7 +156,8 @@ service JobService {
}
// Creates a HyperparameterTuningJob
rpc CreateHyperparameterTuningJob(CreateHyperparameterTuningJobRequest) returns (HyperparameterTuningJob) {
rpc CreateHyperparameterTuningJob(CreateHyperparameterTuningJobRequest)
returns (HyperparameterTuningJob) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*}/hyperparameterTuningJobs"
body: "hyperparameter_tuning_job"
@ -156,7 +166,8 @@ service JobService {
}
// Gets a HyperparameterTuningJob
rpc GetHyperparameterTuningJob(GetHyperparameterTuningJobRequest) returns (HyperparameterTuningJob) {
rpc GetHyperparameterTuningJob(GetHyperparameterTuningJobRequest)
returns (HyperparameterTuningJob) {
option (google.api.http) = {
get: "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*}"
};
@ -164,7 +175,8 @@ service JobService {
}
// Lists HyperparameterTuningJobs in a Location.
rpc ListHyperparameterTuningJobs(ListHyperparameterTuningJobsRequest) returns (ListHyperparameterTuningJobsResponse) {
rpc ListHyperparameterTuningJobs(ListHyperparameterTuningJobsRequest)
returns (ListHyperparameterTuningJobsResponse) {
option (google.api.http) = {
get: "/v1beta1/{parent=projects/*/locations/*}/hyperparameterTuningJobs"
};
@ -172,7 +184,8 @@ service JobService {
}
// Deletes a HyperparameterTuningJob.
rpc DeleteHyperparameterTuningJob(DeleteHyperparameterTuningJobRequest) returns (google.longrunning.Operation) {
rpc DeleteHyperparameterTuningJob(DeleteHyperparameterTuningJobRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
delete: "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*}"
};
@ -186,14 +199,19 @@ service JobService {
// Cancels a HyperparameterTuningJob.
// Starts asynchronous cancellation on the HyperparameterTuningJob. The server
// makes a best effort to cancel the job, but success is not
// guaranteed. Clients can use [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob] or
// other methods to check whether the cancellation succeeded or whether the
// guaranteed. Clients can use
// [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob]
// or other methods to check whether the cancellation succeeded or whether the
// job completed despite cancellation. On successful cancellation,
// the HyperparameterTuningJob is not deleted; instead it becomes a job with
// a [HyperparameterTuningJob.error][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.error] value with a [google.rpc.Status.code][google.rpc.Status.code]
// of 1, corresponding to `Code.CANCELLED`, and
// [HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state] is set to `CANCELLED`.
rpc CancelHyperparameterTuningJob(CancelHyperparameterTuningJobRequest) returns (google.protobuf.Empty) {
// a
// [HyperparameterTuningJob.error][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.error]
// value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
// corresponding to `Code.CANCELLED`, and
// [HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state]
// is set to `CANCELLED`.
rpc CancelHyperparameterTuningJob(CancelHyperparameterTuningJobRequest)
returns (google.protobuf.Empty) {
option (google.api.http) = {
post: "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*}:cancel"
body: "*"
@ -203,7 +221,8 @@ service JobService {
// Creates a BatchPredictionJob. A BatchPredictionJob once created will
// right away be attempted to start.
rpc CreateBatchPredictionJob(CreateBatchPredictionJobRequest) returns (BatchPredictionJob) {
rpc CreateBatchPredictionJob(CreateBatchPredictionJobRequest)
returns (BatchPredictionJob) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*}/batchPredictionJobs"
body: "batch_prediction_job"
@ -212,7 +231,8 @@ service JobService {
}
// Gets a BatchPredictionJob
rpc GetBatchPredictionJob(GetBatchPredictionJobRequest) returns (BatchPredictionJob) {
rpc GetBatchPredictionJob(GetBatchPredictionJobRequest)
returns (BatchPredictionJob) {
option (google.api.http) = {
get: "/v1beta1/{name=projects/*/locations/*/batchPredictionJobs/*}"
};
@ -220,7 +240,8 @@ service JobService {
}
// Lists BatchPredictionJobs in a Location.
rpc ListBatchPredictionJobs(ListBatchPredictionJobsRequest) returns (ListBatchPredictionJobsResponse) {
rpc ListBatchPredictionJobs(ListBatchPredictionJobsRequest)
returns (ListBatchPredictionJobsResponse) {
option (google.api.http) = {
get: "/v1beta1/{parent=projects/*/locations/*}/batchPredictionJobs"
};
@ -229,7 +250,8 @@ service JobService {
// Deletes a BatchPredictionJob. Can only be called on jobs that already
// finished.
rpc DeleteBatchPredictionJob(DeleteBatchPredictionJobRequest) returns (google.longrunning.Operation) {
rpc DeleteBatchPredictionJob(DeleteBatchPredictionJobRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
delete: "/v1beta1/{name=projects/*/locations/*/batchPredictionJobs/*}"
};
@ -244,13 +266,16 @@ service JobService {
//
// Starts asynchronous cancellation on the BatchPredictionJob. The server
// makes the best effort to cancel the job, but success is not
// guaranteed. Clients can use [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob] or
// other methods to check whether the cancellation succeeded or whether the
// guaranteed. Clients can use
// [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob]
// or other methods to check whether the cancellation succeeded or whether the
// job completed despite cancellation. On a successful cancellation,
// the BatchPredictionJob is not deleted;instead its
// [BatchPredictionJob.state][google.cloud.aiplatform.v1beta1.BatchPredictionJob.state] is set to `CANCELLED`. Any files already
// outputted by the job are not deleted.
rpc CancelBatchPredictionJob(CancelBatchPredictionJobRequest) returns (google.protobuf.Empty) {
// [BatchPredictionJob.state][google.cloud.aiplatform.v1beta1.BatchPredictionJob.state]
// is set to `CANCELLED`. Any files already outputted by the job are not
// deleted.
rpc CancelBatchPredictionJob(CancelBatchPredictionJobRequest)
returns (google.protobuf.Empty) {
option (google.api.http) = {
post: "/v1beta1/{name=projects/*/locations/*/batchPredictionJobs/*}:cancel"
body: "*"
@ -260,25 +285,32 @@ service JobService {
// Creates a ModelDeploymentMonitoringJob. It will run periodically on a
// configured interval.
rpc CreateModelDeploymentMonitoringJob(CreateModelDeploymentMonitoringJobRequest) returns (ModelDeploymentMonitoringJob) {
rpc CreateModelDeploymentMonitoringJob(
CreateModelDeploymentMonitoringJobRequest)
returns (ModelDeploymentMonitoringJob) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*}/modelDeploymentMonitoringJobs"
body: "model_deployment_monitoring_job"
};
option (google.api.method_signature) = "parent,model_deployment_monitoring_job";
option (google.api.method_signature) =
"parent,model_deployment_monitoring_job";
}
// Searches Model Monitoring Statistics generated within a given time window.
rpc SearchModelDeploymentMonitoringStatsAnomalies(SearchModelDeploymentMonitoringStatsAnomaliesRequest) returns (SearchModelDeploymentMonitoringStatsAnomaliesResponse) {
rpc SearchModelDeploymentMonitoringStatsAnomalies(
SearchModelDeploymentMonitoringStatsAnomaliesRequest)
returns (SearchModelDeploymentMonitoringStatsAnomaliesResponse) {
option (google.api.http) = {
post: "/v1beta1/{model_deployment_monitoring_job=projects/*/locations/*/modelDeploymentMonitoringJobs/*}:searchModelDeploymentMonitoringStatsAnomalies"
body: "*"
};
option (google.api.method_signature) = "model_deployment_monitoring_job,deployed_model_id";
option (google.api.method_signature) =
"model_deployment_monitoring_job,deployed_model_id";
}
// Gets a ModelDeploymentMonitoringJob.
rpc GetModelDeploymentMonitoringJob(GetModelDeploymentMonitoringJobRequest) returns (ModelDeploymentMonitoringJob) {
rpc GetModelDeploymentMonitoringJob(GetModelDeploymentMonitoringJobRequest)
returns (ModelDeploymentMonitoringJob) {
option (google.api.http) = {
get: "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}"
};
@ -286,7 +318,9 @@ service JobService {
}
// Lists ModelDeploymentMonitoringJobs in a Location.
rpc ListModelDeploymentMonitoringJobs(ListModelDeploymentMonitoringJobsRequest) returns (ListModelDeploymentMonitoringJobsResponse) {
rpc ListModelDeploymentMonitoringJobs(
ListModelDeploymentMonitoringJobsRequest)
returns (ListModelDeploymentMonitoringJobsResponse) {
option (google.api.http) = {
get: "/v1beta1/{parent=projects/*/locations/*}/modelDeploymentMonitoringJobs"
};
@ -294,12 +328,15 @@ service JobService {
}
// Updates a ModelDeploymentMonitoringJob.
rpc UpdateModelDeploymentMonitoringJob(UpdateModelDeploymentMonitoringJobRequest) returns (google.longrunning.Operation) {
rpc UpdateModelDeploymentMonitoringJob(
UpdateModelDeploymentMonitoringJobRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
patch: "/v1beta1/{model_deployment_monitoring_job.name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}"
body: "model_deployment_monitoring_job"
};
option (google.api.method_signature) = "model_deployment_monitoring_job,update_mask";
option (google.api.method_signature) =
"model_deployment_monitoring_job,update_mask";
option (google.longrunning.operation_info) = {
response_type: "ModelDeploymentMonitoringJob"
metadata_type: "UpdateModelDeploymentMonitoringJobOperationMetadata"
@ -307,7 +344,9 @@ service JobService {
}
// Deletes a ModelDeploymentMonitoringJob.
rpc DeleteModelDeploymentMonitoringJob(DeleteModelDeploymentMonitoringJobRequest) returns (google.longrunning.Operation) {
rpc DeleteModelDeploymentMonitoringJob(
DeleteModelDeploymentMonitoringJobRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
delete: "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}"
};
@ -320,8 +359,11 @@ service JobService {
// Pauses a ModelDeploymentMonitoringJob. If the job is running, the server
// makes a best effort to cancel the job. Will mark
// [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state] to 'PAUSED'.
rpc PauseModelDeploymentMonitoringJob(PauseModelDeploymentMonitoringJobRequest) returns (google.protobuf.Empty) {
// [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state]
// to 'PAUSED'.
rpc PauseModelDeploymentMonitoringJob(
PauseModelDeploymentMonitoringJobRequest)
returns (google.protobuf.Empty) {
option (google.api.http) = {
post: "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}:pause"
body: "*"
@ -332,7 +374,9 @@ service JobService {
// Resumes a paused ModelDeploymentMonitoringJob. It will start to run from
// next scheduled time. A deleted ModelDeploymentMonitoringJob can't be
// resumed.
rpc ResumeModelDeploymentMonitoringJob(ResumeModelDeploymentMonitoringJobRequest) returns (google.protobuf.Empty) {
rpc ResumeModelDeploymentMonitoringJob(
ResumeModelDeploymentMonitoringJobRequest)
returns (google.protobuf.Empty) {
option (google.api.http) = {
post: "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}:resume"
body: "*"
@ -341,7 +385,8 @@ service JobService {
}
}
// Request message for [JobService.CreateCustomJob][google.cloud.aiplatform.v1beta1.JobService.CreateCustomJob].
// Request message for
// [JobService.CreateCustomJob][google.cloud.aiplatform.v1beta1.JobService.CreateCustomJob].
message CreateCustomJobRequest {
// Required. The resource name of the Location to create the CustomJob in.
// Format: `projects/{project}/locations/{location}`
@ -356,7 +401,8 @@ message CreateCustomJobRequest {
CustomJob custom_job = 2 [(google.api.field_behavior) = REQUIRED];
}
// Request message for [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob].
// Request message for
// [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob].
message GetCustomJobRequest {
// Required. The name of the CustomJob resource.
// Format:
@ -369,7 +415,8 @@ message GetCustomJobRequest {
];
}
// Request message for [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs].
// Request message for
// [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs].
message ListCustomJobsRequest {
// Required. The resource name of the Location to list the CustomJobs from.
// Format: `projects/{project}/locations/{location}`
@ -407,25 +454,31 @@ message ListCustomJobsRequest {
// The standard list page token.
// Typically obtained via
// [ListCustomJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListCustomJobsResponse.next_page_token] of the previous
// [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs] call.
// [ListCustomJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListCustomJobsResponse.next_page_token]
// of the previous
// [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs]
// call.
string page_token = 4;
// Mask specifying which fields to read.
google.protobuf.FieldMask read_mask = 5;
}
// Response message for [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs]
// Response message for
// [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs]
message ListCustomJobsResponse {
// List of CustomJobs in the requested page.
repeated CustomJob custom_jobs = 1;
// A token to retrieve the next page of results.
// Pass to [ListCustomJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListCustomJobsRequest.page_token] to obtain that page.
// Pass to
// [ListCustomJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListCustomJobsRequest.page_token]
// to obtain that page.
string next_page_token = 2;
}
// Request message for [JobService.DeleteCustomJob][google.cloud.aiplatform.v1beta1.JobService.DeleteCustomJob].
// Request message for
// [JobService.DeleteCustomJob][google.cloud.aiplatform.v1beta1.JobService.DeleteCustomJob].
message DeleteCustomJobRequest {
// Required. The name of the CustomJob resource to be deleted.
// Format:
@ -438,7 +491,8 @@ message DeleteCustomJobRequest {
];
}
// Request message for [JobService.CancelCustomJob][google.cloud.aiplatform.v1beta1.JobService.CancelCustomJob].
// Request message for
// [JobService.CancelCustomJob][google.cloud.aiplatform.v1beta1.JobService.CancelCustomJob].
message CancelCustomJobRequest {
// Required. The name of the CustomJob to cancel.
// Format:
@ -451,7 +505,8 @@ message CancelCustomJobRequest {
];
}
// Request message for [JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CreateDataLabelingJob].
// Request message for
// [JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CreateDataLabelingJob].
message CreateDataLabelingJobRequest {
// Required. The parent of the DataLabelingJob.
// Format: `projects/{project}/locations/{location}`
@ -463,10 +518,12 @@ message CreateDataLabelingJobRequest {
];
// Required. The DataLabelingJob to create.
DataLabelingJob data_labeling_job = 2 [(google.api.field_behavior) = REQUIRED];
DataLabelingJob data_labeling_job = 2
[(google.api.field_behavior) = REQUIRED];
}
// Request message for [JobService.GetDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.GetDataLabelingJob].
// Request message for
// [JobService.GetDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.GetDataLabelingJob].
message GetDataLabelingJobRequest {
// Required. The name of the DataLabelingJob.
// Format:
@ -479,7 +536,8 @@ message GetDataLabelingJobRequest {
];
}
// Request message for [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs].
// Request message for
// [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs].
message ListDataLabelingJobsRequest {
// Required. The parent of the DataLabelingJob.
// Format: `projects/{project}/locations/{location}`
@ -530,7 +588,8 @@ message ListDataLabelingJobsRequest {
string order_by = 6;
}
// Response message for [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs].
// Response message for
// [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs].
message ListDataLabelingJobsResponse {
// A list of DataLabelingJobs that matches the specified filter in the
// request.
@ -540,7 +599,8 @@ message ListDataLabelingJobsResponse {
string next_page_token = 2;
}
// Request message for [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.DeleteDataLabelingJob].
// Request message for
// [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.DeleteDataLabelingJob].
message DeleteDataLabelingJobRequest {
// Required. The name of the DataLabelingJob to be deleted.
// Format:
@ -553,7 +613,8 @@ message DeleteDataLabelingJobRequest {
];
}
// Request message for [JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CancelDataLabelingJob].
// Request message for
// [JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CancelDataLabelingJob].
message CancelDataLabelingJobRequest {
// Required. The name of the DataLabelingJob.
// Format:
@ -566,10 +627,12 @@ message CancelDataLabelingJobRequest {
];
}
// Request message for [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CreateHyperparameterTuningJob].
// Request message for
// [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CreateHyperparameterTuningJob].
message CreateHyperparameterTuningJobRequest {
// Required. The resource name of the Location to create the HyperparameterTuningJob in.
// Format: `projects/{project}/locations/{location}`
// Required. The resource name of the Location to create the
// HyperparameterTuningJob in. Format:
// `projects/{project}/locations/{location}`
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
@ -578,10 +641,12 @@ message CreateHyperparameterTuningJobRequest {
];
// Required. The HyperparameterTuningJob to create.
HyperparameterTuningJob hyperparameter_tuning_job = 2 [(google.api.field_behavior) = REQUIRED];
HyperparameterTuningJob hyperparameter_tuning_job = 2
[(google.api.field_behavior) = REQUIRED];
}
// Request message for [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob].
// Request message for
// [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob].
message GetHyperparameterTuningJobRequest {
// Required. The name of the HyperparameterTuningJob resource.
// Format:
@ -594,10 +659,12 @@ message GetHyperparameterTuningJobRequest {
];
}
// Request message for [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs].
// Request message for
// [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs].
message ListHyperparameterTuningJobsRequest {
// Required. The resource name of the Location to list the HyperparameterTuningJobs
// from. Format: `projects/{project}/locations/{location}`
// Required. The resource name of the Location to list the
// HyperparameterTuningJobs from. Format:
// `projects/{project}/locations/{location}`
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
@ -632,27 +699,33 @@ message ListHyperparameterTuningJobsRequest {
// The standard list page token.
// Typically obtained via
// [ListHyperparameterTuningJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListHyperparameterTuningJobsResponse.next_page_token] of the previous
// [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs] call.
// [ListHyperparameterTuningJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListHyperparameterTuningJobsResponse.next_page_token]
// of the previous
// [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs]
// call.
string page_token = 4;
// Mask specifying which fields to read.
google.protobuf.FieldMask read_mask = 5;
}
// Response message for [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs]
// Response message for
// [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs]
message ListHyperparameterTuningJobsResponse {
// List of HyperparameterTuningJobs in the requested page.
// [HyperparameterTuningJob.trials][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.trials] of the jobs will be not be returned.
// [HyperparameterTuningJob.trials][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.trials]
// of the jobs will be not be returned.
repeated HyperparameterTuningJob hyperparameter_tuning_jobs = 1;
// A token to retrieve the next page of results.
// Pass to [ListHyperparameterTuningJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListHyperparameterTuningJobsRequest.page_token] to obtain that
// page.
// Pass to
// [ListHyperparameterTuningJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListHyperparameterTuningJobsRequest.page_token]
// to obtain that page.
string next_page_token = 2;
}
// Request message for [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.DeleteHyperparameterTuningJob].
// Request message for
// [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.DeleteHyperparameterTuningJob].
message DeleteHyperparameterTuningJobRequest {
// Required. The name of the HyperparameterTuningJob resource to be deleted.
// Format:
@ -665,7 +738,8 @@ message DeleteHyperparameterTuningJobRequest {
];
}
// Request message for [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CancelHyperparameterTuningJob].
// Request message for
// [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CancelHyperparameterTuningJob].
message CancelHyperparameterTuningJobRequest {
// Required. The name of the HyperparameterTuningJob to cancel.
// Format:
@ -678,10 +752,11 @@ message CancelHyperparameterTuningJobRequest {
];
}
// Request message for [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CreateBatchPredictionJob].
// Request message for
// [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CreateBatchPredictionJob].
message CreateBatchPredictionJobRequest {
// Required. The resource name of the Location to create the BatchPredictionJob in.
// Format: `projects/{project}/locations/{location}`
// Required. The resource name of the Location to create the
// BatchPredictionJob in. Format: `projects/{project}/locations/{location}`
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
@ -690,10 +765,12 @@ message CreateBatchPredictionJobRequest {
];
// Required. The BatchPredictionJob to create.
BatchPredictionJob batch_prediction_job = 2 [(google.api.field_behavior) = REQUIRED];
BatchPredictionJob batch_prediction_job = 2
[(google.api.field_behavior) = REQUIRED];
}
// Request message for [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob].
// Request message for
// [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob].
message GetBatchPredictionJobRequest {
// Required. The name of the BatchPredictionJob resource.
// Format:
@ -706,7 +783,8 @@ message GetBatchPredictionJobRequest {
];
}
// Request message for [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs].
// Request message for
// [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs].
message ListBatchPredictionJobsRequest {
// Required. The resource name of the Location to list the BatchPredictionJobs
// from. Format: `projects/{project}/locations/{location}`
@ -745,26 +823,31 @@ message ListBatchPredictionJobsRequest {
// The standard list page token.
// Typically obtained via
// [ListBatchPredictionJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListBatchPredictionJobsResponse.next_page_token] of the previous
// [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs] call.
// [ListBatchPredictionJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListBatchPredictionJobsResponse.next_page_token]
// of the previous
// [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs]
// call.
string page_token = 4;
// Mask specifying which fields to read.
google.protobuf.FieldMask read_mask = 5;
}
// Response message for [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs]
// Response message for
// [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs]
message ListBatchPredictionJobsResponse {
// List of BatchPredictionJobs in the requested page.
repeated BatchPredictionJob batch_prediction_jobs = 1;
// A token to retrieve the next page of results.
// Pass to [ListBatchPredictionJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListBatchPredictionJobsRequest.page_token] to obtain that
// page.
// Pass to
// [ListBatchPredictionJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListBatchPredictionJobsRequest.page_token]
// to obtain that page.
string next_page_token = 2;
}
// Request message for [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.DeleteBatchPredictionJob].
// Request message for
// [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.DeleteBatchPredictionJob].
message DeleteBatchPredictionJobRequest {
// Required. The name of the BatchPredictionJob resource to be deleted.
// Format:
@ -777,7 +860,8 @@ message DeleteBatchPredictionJobRequest {
];
}
// Request message for [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CancelBatchPredictionJob].
// Request message for
// [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CancelBatchPredictionJob].
message CancelBatchPredictionJobRequest {
// Required. The name of the BatchPredictionJob to cancel.
// Format:
@ -803,7 +887,8 @@ message CreateModelDeploymentMonitoringJobRequest {
];
// Required. The ModelDeploymentMonitoringJob to create
ModelDeploymentMonitoringJob model_deployment_monitoring_job = 2 [(google.api.field_behavior) = REQUIRED];
ModelDeploymentMonitoringJob model_deployment_monitoring_job = 2
[(google.api.field_behavior) = REQUIRED];
}
// Request message for
@ -814,11 +899,12 @@ message SearchModelDeploymentMonitoringStatsAnomaliesRequest {
ModelDeploymentMonitoringObjectiveType type = 1;
// If set, all attribution scores between
// [SearchModelDeploymentMonitoringStatsAnomaliesRequest.start_time][google.cloud.aiplatform.v1beta1.SearchModelDeploymentMonitoringStatsAnomaliesRequest.start_time] and
// [SearchModelDeploymentMonitoringStatsAnomaliesRequest.end_time][google.cloud.aiplatform.v1beta1.SearchModelDeploymentMonitoringStatsAnomaliesRequest.end_time] are
// fetched, and page token doesn't take effect in this case.
// Only used to retrieve attribution score for the top Features which has
// the highest attribution score in the latest monitoring run.
// [SearchModelDeploymentMonitoringStatsAnomaliesRequest.start_time][google.cloud.aiplatform.v1beta1.SearchModelDeploymentMonitoringStatsAnomaliesRequest.start_time]
// and
// [SearchModelDeploymentMonitoringStatsAnomaliesRequest.end_time][google.cloud.aiplatform.v1beta1.SearchModelDeploymentMonitoringStatsAnomaliesRequest.end_time]
// are fetched, and page token doesn't take effect in this case. Only used
// to retrieve attribution score for the top Features which has the highest
// attribution score in the latest monitoring run.
int32 top_feature_count = 4;
}
@ -843,7 +929,8 @@ message SearchModelDeploymentMonitoringStatsAnomaliesRequest {
string feature_display_name = 3;
// Required. Objectives of the stats to retrieve.
repeated StatsAnomaliesObjective objectives = 4 [(google.api.field_behavior) = REQUIRED];
repeated StatsAnomaliesObjective objectives = 4
[(google.api.field_behavior) = REQUIRED];
// The standard list page size.
int32 page_size = 5;
@ -949,18 +1036,18 @@ message ListModelDeploymentMonitoringJobsResponse {
// Request message for
// [JobService.UpdateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.UpdateModelDeploymentMonitoringJob].
message UpdateModelDeploymentMonitoringJobRequest {
// Required. The model monitoring configuration which replaces the resource on the
// server.
ModelDeploymentMonitoringJob model_deployment_monitoring_job = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The update mask is used to specify the fields to be overwritten in the
// ModelDeploymentMonitoringJob resource by the update.
// The fields specified in the update_mask are relative to the resource, not
// the full request. A field will be overwritten if it is in the mask. If the
// user does not provide a mask then only the non-empty fields present in the
// request will be overwritten. Set the update_mask to `*` to override all
// fields.
// For the objective config, the user can either provide the update mask for
// Required. The model monitoring configuration which replaces the resource on
// the server.
ModelDeploymentMonitoringJob model_deployment_monitoring_job = 1
[(google.api.field_behavior) = REQUIRED];
// Required. The update mask is used to specify the fields to be overwritten
// in the ModelDeploymentMonitoringJob resource by the update. The fields
// specified in the update_mask are relative to the resource, not the full
// request. A field will be overwritten if it is in the mask. If the user does
// not provide a mask then only the non-empty fields present in the request
// will be overwritten. Set the update_mask to `*` to override all fields. For
// the objective config, the user can either provide the update mask for
// model_deployment_monitoring_objective_configs or any combination of its
// nested fields, such as:
// model_deployment_monitoring_objective_configs.objective_config.training_dataset.
@ -980,7 +1067,8 @@ message UpdateModelDeploymentMonitoringJobRequest {
// * `model_deployment_monitoring_objective_configs.objective_config.training_dataset`
// * `model_deployment_monitoring_objective_configs.objective_config.training_prediction_skew_detection_config`
// * `model_deployment_monitoring_objective_configs.objective_config.prediction_drift_detection_config`
google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED];
google.protobuf.FieldMask update_mask = 2
[(google.api.field_behavior) = REQUIRED];
}
// Request message for

@ -37,14 +37,18 @@ message MachineSpec {
// See the [list of machine types supported for custom
// training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types).
//
// For [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] this field is optional, and the default
// value is `n1-standard-2`. For [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob] or as part of
// [WorkerPoolSpec][google.cloud.aiplatform.v1beta1.WorkerPoolSpec] this field is required.
// For [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] this
// field is optional, and the default value is `n1-standard-2`. For
// [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob] or
// as part of [WorkerPoolSpec][google.cloud.aiplatform.v1beta1.WorkerPoolSpec]
// this field is required.
string machine_type = 1 [(google.api.field_behavior) = IMMUTABLE];
// Immutable. The type of accelerator(s) that may be attached to the machine as per
// Immutable. The type of accelerator(s) that may be attached to the machine
// as per
// [accelerator_count][google.cloud.aiplatform.v1beta1.MachineSpec.accelerator_count].
AcceleratorType accelerator_type = 2 [(google.api.field_behavior) = IMMUTABLE];
AcceleratorType accelerator_type = 2
[(google.api.field_behavior) = IMMUTABLE];
// The number of accelerators to attach to the machine.
int32 accelerator_count = 3;
@ -53,14 +57,16 @@ message MachineSpec {
// A description of resources that are dedicated to a DeployedModel, and
// that need a higher degree of manual configuration.
message DedicatedResources {
// Required. Immutable. The specification of a single machine used by the prediction.
// Required. Immutable. The specification of a single machine used by the
// prediction.
MachineSpec machine_spec = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.field_behavior) = IMMUTABLE
];
// Required. Immutable. The minimum number of machine replicas this DeployedModel will be always
// deployed on. This value must be greater than or equal to 1.
// Required. Immutable. The minimum number of machine replicas this
// DeployedModel will be always deployed on. This value must be greater than
// or equal to 1.
//
// If traffic against the DeployedModel increases, it may dynamically be
// deployed onto more replicas, and as traffic decreases, some of these extra
@ -70,14 +76,15 @@ message DedicatedResources {
(google.api.field_behavior) = IMMUTABLE
];
// Immutable. The maximum number of replicas this DeployedModel may be deployed on when
// the traffic against it increases. If the requested value is too large,
// the deployment will error, but if deployment succeeds then the ability
// to scale the model to that many replicas is guaranteed (barring service
// outages). If traffic against the DeployedModel increases beyond what its
// replicas at maximum may handle, a portion of the traffic will be dropped.
// If this value is not provided, will use [min_replica_count][google.cloud.aiplatform.v1beta1.DedicatedResources.min_replica_count] as the
// default value.
// Immutable. The maximum number of replicas this DeployedModel may be
// deployed on when the traffic against it increases. If the requested value
// is too large, the deployment will error, but if deployment succeeds then
// the ability to scale the model to that many replicas is guaranteed (barring
// service outages). If traffic against the DeployedModel increases beyond
// what its replicas at maximum may handle, a portion of the traffic will be
// dropped. If this value is not provided, will use
// [min_replica_count][google.cloud.aiplatform.v1beta1.DedicatedResources.min_replica_count]
// as the default value.
//
// The value of this field impacts the charge against Vertex CPU and GPU
// quotas. Specifically, you will be charged for (max_replica_count *
@ -90,44 +97,49 @@ message DedicatedResources {
// target value (default to 60 if not set). At most one entry is allowed per
// metric.
//
// If [machine_spec.accelerator_count][google.cloud.aiplatform.v1beta1.MachineSpec.accelerator_count] is
// above 0, the autoscaling will be based on both CPU utilization and
// If
// [machine_spec.accelerator_count][google.cloud.aiplatform.v1beta1.MachineSpec.accelerator_count]
// is above 0, the autoscaling will be based on both CPU utilization and
// accelerator's duty cycle metrics and scale up when either metrics exceeds
// its target value while scale down if both metrics are under their target
// value. The default target value is 60 for both metrics.
//
// If [machine_spec.accelerator_count][google.cloud.aiplatform.v1beta1.MachineSpec.accelerator_count] is
// 0, the autoscaling will be based on CPU utilization metric only with
// If
// [machine_spec.accelerator_count][google.cloud.aiplatform.v1beta1.MachineSpec.accelerator_count]
// is 0, the autoscaling will be based on CPU utilization metric only with
// default target value 60 if not explicitly set.
//
// For example, in the case of Online Prediction, if you want to override
// target CPU utilization to 80, you should set
// [autoscaling_metric_specs.metric_name][google.cloud.aiplatform.v1beta1.AutoscalingMetricSpec.metric_name]
// to `aiplatform.googleapis.com/prediction/online/cpu/utilization` and
// [autoscaling_metric_specs.target][google.cloud.aiplatform.v1beta1.AutoscalingMetricSpec.target] to `80`.
repeated AutoscalingMetricSpec autoscaling_metric_specs = 4 [(google.api.field_behavior) = IMMUTABLE];
// [autoscaling_metric_specs.target][google.cloud.aiplatform.v1beta1.AutoscalingMetricSpec.target]
// to `80`.
repeated AutoscalingMetricSpec autoscaling_metric_specs = 4
[(google.api.field_behavior) = IMMUTABLE];
}
// A description of resources that to large degree are decided by Vertex AI,
// and require only a modest additional configuration.
// Each Model supporting these resources documents its specific guidelines.
message AutomaticResources {
// Immutable. The minimum number of replicas this DeployedModel will be always deployed
// on. If traffic against it increases, it may dynamically be deployed onto
// more replicas up to [max_replica_count][google.cloud.aiplatform.v1beta1.AutomaticResources.max_replica_count], and as traffic decreases, some
// of these extra replicas may be freed.
// If the requested value is too large, the deployment will error.
// Immutable. The minimum number of replicas this DeployedModel will be always
// deployed on. If traffic against it increases, it may dynamically be
// deployed onto more replicas up to
// [max_replica_count][google.cloud.aiplatform.v1beta1.AutomaticResources.max_replica_count],
// and as traffic decreases, some of these extra replicas may be freed. If the
// requested value is too large, the deployment will error.
int32 min_replica_count = 1 [(google.api.field_behavior) = IMMUTABLE];
// Immutable. The maximum number of replicas this DeployedModel may be deployed on when
// the traffic against it increases. If the requested value is too large,
// the deployment will error, but if deployment succeeds then the ability
// to scale the model to that many replicas is guaranteed (barring service
// outages). If traffic against the DeployedModel increases beyond what its
// replicas at maximum may handle, a portion of the traffic will be dropped.
// If this value is not provided, a no upper bound for scaling under heavy
// traffic will be assume, though Vertex AI may be unable to scale beyond
// certain replica number.
// Immutable. The maximum number of replicas this DeployedModel may be
// deployed on when the traffic against it increases. If the requested value
// is too large, the deployment will error, but if deployment succeeds then
// the ability to scale the model to that many replicas is guaranteed (barring
// service outages). If traffic against the DeployedModel increases beyond
// what its replicas at maximum may handle, a portion of the traffic will be
// dropped. If this value is not provided, a no upper bound for scaling under
// heavy traffic will be assume, though Vertex AI may be unable to scale
// beyond certain replica number.
int32 max_replica_count = 2 [(google.api.field_behavior) = IMMUTABLE];
}
@ -140,21 +152,21 @@ message BatchDedicatedResources {
(google.api.field_behavior) = IMMUTABLE
];
// Immutable. The number of machine replicas used at the start of the batch operation.
// If not set, Vertex AI decides starting number, not greater than
// Immutable. The number of machine replicas used at the start of the batch
// operation. If not set, Vertex AI decides starting number, not greater than
// [max_replica_count][google.cloud.aiplatform.v1beta1.BatchDedicatedResources.max_replica_count]
int32 starting_replica_count = 2 [(google.api.field_behavior) = IMMUTABLE];
// Immutable. The maximum number of machine replicas the batch operation may be scaled
// to. The default value is 10.
// Immutable. The maximum number of machine replicas the batch operation may
// be scaled to. The default value is 10.
int32 max_replica_count = 3 [(google.api.field_behavior) = IMMUTABLE];
}
// Statistics information about resource consumption.
message ResourcesConsumed {
// Output only. The number of replica hours used. Note that many replicas may run in
// parallel, and additionally any given work may be queued for some time.
// Therefore this value is not strictly related to wall time.
// Output only. The number of replica hours used. Note that many replicas may
// run in parallel, and additionally any given work may be queued for some
// time. Therefore this value is not strictly related to wall time.
double replica_hours = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
}
@ -179,8 +191,8 @@ message NfsMount {
// the source mount path in the form of `server:path`
string path = 2 [(google.api.field_behavior) = REQUIRED];
// Required. Destination mount path. The NFS will be mounted for the user under
// /mnt/nfs/<mount_point>
// Required. Destination mount path. The NFS will be mounted for the user
// under /mnt/nfs/<mount_point>
string mount_point = 3 [(google.api.field_behavior) = REQUIRED];
}

@ -28,11 +28,11 @@ option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
// Manual batch tuning parameters.
message ManualBatchTuningParameters {
// Immutable. The number of the records (e.g. instances) of the operation given in
// each batch to a machine replica. Machine type, and size of a single
// record should be considered when setting this parameter, higher value
// speeds up the batch operation's execution, but too high value will result
// in a whole batch not fitting in a machine's memory, and the whole
// Immutable. The number of the records (e.g. instances) of the operation
// given in each batch to a machine replica. Machine type, and size of a
// single record should be considered when setting this parameter, higher
// value speeds up the batch operation's execution, but too high value will
// result in a whole batch not fitting in a machine's memory, and the whole
// operation will fail.
// The default value is 64.
int32 batch_size = 1 [(google.api.field_behavior) = IMMUTABLE];

@ -58,9 +58,9 @@ message MetadataSchema {
// allow to order/compare different versions. Example: 1.0.0, 1.0.1, etc.
string schema_version = 2;
// Required. The raw YAML string representation of the MetadataSchema. The combination
// of [MetadataSchema.version] and the schema name given by `title` in
// [MetadataSchema.schema] must be unique within a MetadataStore.
// Required. The raw YAML string representation of the MetadataSchema. The
// combination of [MetadataSchema.version] and the schema name given by
// `title` in [MetadataSchema.schema] must be unique within a MetadataStore.
//
// The schema is defined as an OpenAPI 3.0.2
// [MetadataSchema
@ -72,7 +72,8 @@ message MetadataSchema {
MetadataSchemaType schema_type = 4;
// Output only. Timestamp when this MetadataSchema was created.
google.protobuf.Timestamp create_time = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 5
[(google.api.field_behavior) = OUTPUT_ONLY];
// Description of the Metadata Schema
string description = 6;

@ -42,15 +42,18 @@ option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
// Service for reading and writing metadata entries.
service MetadataService {
option (google.api.default_host) = "aiplatform.googleapis.com";
option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
option (google.api.oauth_scopes) =
"https://www.googleapis.com/auth/cloud-platform";
// Initializes a MetadataStore, including allocation of resources.
rpc CreateMetadataStore(CreateMetadataStoreRequest) returns (google.longrunning.Operation) {
rpc CreateMetadataStore(CreateMetadataStoreRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*}/metadataStores"
body: "metadata_store"
};
option (google.api.method_signature) = "parent,metadata_store,metadata_store_id";
option (google.api.method_signature) =
"parent,metadata_store,metadata_store_id";
option (google.longrunning.operation_info) = {
response_type: "MetadataStore"
metadata_type: "CreateMetadataStoreOperationMetadata"
@ -66,7 +69,8 @@ service MetadataService {
}
// Lists MetadataStores for a Location.
rpc ListMetadataStores(ListMetadataStoresRequest) returns (ListMetadataStoresResponse) {
rpc ListMetadataStores(ListMetadataStoresRequest)
returns (ListMetadataStoresResponse) {
option (google.api.http) = {
get: "/v1beta1/{parent=projects/*/locations/*}/metadataStores"
};
@ -75,7 +79,8 @@ service MetadataService {
// Deletes a single MetadataStore and all its child resources (Artifacts,
// Executions, and Contexts).
rpc DeleteMetadataStore(DeleteMetadataStoreRequest) returns (google.longrunning.Operation) {
rpc DeleteMetadataStore(DeleteMetadataStoreRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
delete: "/v1beta1/{name=projects/*/locations/*/metadataStores/*}"
};
@ -121,7 +126,8 @@ service MetadataService {
}
// Deletes an Artifact.
rpc DeleteArtifact(DeleteArtifactRequest) returns (google.longrunning.Operation) {
rpc DeleteArtifact(DeleteArtifactRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
delete: "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*}"
};
@ -133,7 +139,8 @@ service MetadataService {
}
// Purges Artifacts.
rpc PurgeArtifacts(PurgeArtifactsRequest) returns (google.longrunning.Operation) {
rpc PurgeArtifacts(PurgeArtifactsRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*/metadataStores/*}/artifacts:purge"
body: "*"
@ -180,7 +187,8 @@ service MetadataService {
}
// Deletes a stored Context.
rpc DeleteContext(DeleteContextRequest) returns (google.longrunning.Operation) {
rpc DeleteContext(DeleteContextRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
delete: "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*}"
};
@ -192,7 +200,8 @@ service MetadataService {
}
// Purges Contexts.
rpc PurgeContexts(PurgeContextsRequest) returns (google.longrunning.Operation) {
rpc PurgeContexts(PurgeContextsRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*/metadataStores/*}/contexts:purge"
body: "*"
@ -207,7 +216,8 @@ service MetadataService {
// Adds a set of Artifacts and Executions to a Context. If any of the
// Artifacts or Executions have already been added to a Context, they are
// simply skipped.
rpc AddContextArtifactsAndExecutions(AddContextArtifactsAndExecutionsRequest) returns (AddContextArtifactsAndExecutionsResponse) {
rpc AddContextArtifactsAndExecutions(AddContextArtifactsAndExecutionsRequest)
returns (AddContextArtifactsAndExecutionsResponse) {
option (google.api.http) = {
post: "/v1beta1/{context=projects/*/locations/*/metadataStores/*/contexts/*}:addContextArtifactsAndExecutions"
body: "*"
@ -220,7 +230,8 @@ service MetadataService {
// simply skipped. If this call would create a cycle or cause any Context to
// have more than 10 parents, the request will fail with an INVALID_ARGUMENT
// error.
rpc AddContextChildren(AddContextChildrenRequest) returns (AddContextChildrenResponse) {
rpc AddContextChildren(AddContextChildrenRequest)
returns (AddContextChildrenResponse) {
option (google.api.http) = {
post: "/v1beta1/{context=projects/*/locations/*/metadataStores/*/contexts/*}:addContextChildren"
body: "*"
@ -231,7 +242,8 @@ service MetadataService {
// Remove a set of children contexts from a parent Context. If any of the
// child Contexts were NOT added to the parent Context, they are
// simply skipped.
rpc RemoveContextChildren(RemoveContextChildrenRequest) returns (RemoveContextChildrenResponse) {
rpc RemoveContextChildren(RemoveContextChildrenRequest)
returns (RemoveContextChildrenResponse) {
option (google.api.http) = {
post: "/v1beta1/{context=projects/*/locations/*/metadataStores/*/contexts/*}:removeContextChildren"
body: "*"
@ -241,7 +253,8 @@ service MetadataService {
// Retrieves Artifacts and Executions within the specified Context, connected
// by Event edges and returned as a LineageSubgraph.
rpc QueryContextLineageSubgraph(QueryContextLineageSubgraphRequest) returns (LineageSubgraph) {
rpc QueryContextLineageSubgraph(QueryContextLineageSubgraphRequest)
returns (LineageSubgraph) {
option (google.api.http) = {
get: "/v1beta1/{context=projects/*/locations/*/metadataStores/*/contexts/*}:queryContextLineageSubgraph"
};
@ -283,7 +296,8 @@ service MetadataService {
}
// Deletes an Execution.
rpc DeleteExecution(DeleteExecutionRequest) returns (google.longrunning.Operation) {
rpc DeleteExecution(DeleteExecutionRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
delete: "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*}"
};
@ -295,7 +309,8 @@ service MetadataService {
}
// Purges Executions.
rpc PurgeExecutions(PurgeExecutionsRequest) returns (google.longrunning.Operation) {
rpc PurgeExecutions(PurgeExecutionsRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*/metadataStores/*}/executions:purge"
body: "*"
@ -311,7 +326,8 @@ service MetadataService {
// Artifact was used as an input or output for an Execution. If an Event
// already exists between the Execution and the Artifact, the Event is
// skipped.
rpc AddExecutionEvents(AddExecutionEventsRequest) returns (AddExecutionEventsResponse) {
rpc AddExecutionEvents(AddExecutionEventsRequest)
returns (AddExecutionEventsResponse) {
option (google.api.http) = {
post: "/v1beta1/{execution=projects/*/locations/*/metadataStores/*/executions/*}:addExecutionEvents"
body: "*"
@ -322,7 +338,8 @@ service MetadataService {
// Obtains the set of input and output Artifacts for this Execution, in the
// form of LineageSubgraph that also contains the Execution and connecting
// Events.
rpc QueryExecutionInputsAndOutputs(QueryExecutionInputsAndOutputsRequest) returns (LineageSubgraph) {
rpc QueryExecutionInputsAndOutputs(QueryExecutionInputsAndOutputsRequest)
returns (LineageSubgraph) {
option (google.api.http) = {
get: "/v1beta1/{execution=projects/*/locations/*/metadataStores/*/executions/*}:queryExecutionInputsAndOutputs"
};
@ -330,12 +347,14 @@ service MetadataService {
}
// Creates a MetadataSchema.
rpc CreateMetadataSchema(CreateMetadataSchemaRequest) returns (MetadataSchema) {
rpc CreateMetadataSchema(CreateMetadataSchemaRequest)
returns (MetadataSchema) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*/metadataStores/*}/metadataSchemas"
body: "metadata_schema"
};
option (google.api.method_signature) = "parent,metadata_schema,metadata_schema_id";
option (google.api.method_signature) =
"parent,metadata_schema,metadata_schema_id";
}
// Retrieves a specific MetadataSchema.
@ -347,7 +366,8 @@ service MetadataService {
}
// Lists MetadataSchemas.
rpc ListMetadataSchemas(ListMetadataSchemasRequest) returns (ListMetadataSchemasResponse) {
rpc ListMetadataSchemas(ListMetadataSchemasRequest)
returns (ListMetadataSchemasResponse) {
option (google.api.http) = {
get: "/v1beta1/{parent=projects/*/locations/*/metadataStores/*}/metadataSchemas"
};
@ -356,7 +376,8 @@ service MetadataService {
// Retrieves lineage of an Artifact represented through Artifacts and
// Executions connected by Event edges and returned as a LineageSubgraph.
rpc QueryArtifactLineageSubgraph(QueryArtifactLineageSubgraphRequest) returns (LineageSubgraph) {
rpc QueryArtifactLineageSubgraph(QueryArtifactLineageSubgraphRequest)
returns (LineageSubgraph) {
option (google.api.http) = {
get: "/v1beta1/{artifact=projects/*/locations/*/metadataStores/*/artifacts/*}:queryArtifactLineageSubgraph"
};
@ -364,7 +385,8 @@ service MetadataService {
}
}
// Request message for [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore].
// Request message for
// [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore].
message CreateMetadataStoreRequest {
// Required. The resource name of the Location where the MetadataStore should
// be created.
@ -390,13 +412,15 @@ message CreateMetadataStoreRequest {
string metadata_store_id = 3;
}
// Details of operations that perform [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore].
// Details of operations that perform
// [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore].
message CreateMetadataStoreOperationMetadata {
// Operation metadata for creating a MetadataStore.
GenericOperationMetadata generic_metadata = 1;
}
// Request message for [MetadataService.GetMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataStore].
// Request message for
// [MetadataService.GetMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataStore].
message GetMetadataStoreRequest {
// Required. The resource name of the MetadataStore to retrieve.
// Format:
@ -409,7 +433,8 @@ message GetMetadataStoreRequest {
];
}
// Request message for [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores].
// Request message for
// [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores].
message ListMetadataStoresRequest {
// Required. The Location whose MetadataStores should be listed.
// Format:
@ -427,8 +452,8 @@ message ListMetadataStoresRequest {
int32 page_size = 2;
// A page token, received from a previous
// [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores] call. Provide this to retrieve the
// subsequent page.
// [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores]
// call. Provide this to retrieve the subsequent page.
//
// When paginating, all other provided parameters must match the call that
// provided the page token. (Otherwise the request will fail with
@ -436,18 +461,21 @@ message ListMetadataStoresRequest {
string page_token = 3;
}
// Response message for [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores].
// Response message for
// [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores].
message ListMetadataStoresResponse {
// The MetadataStores found for the Location.
repeated MetadataStore metadata_stores = 1;
// A token, which can be sent as
// [ListMetadataStoresRequest.page_token][google.cloud.aiplatform.v1beta1.ListMetadataStoresRequest.page_token] to retrieve the next
// page. If this field is not populated, there are no subsequent pages.
// [ListMetadataStoresRequest.page_token][google.cloud.aiplatform.v1beta1.ListMetadataStoresRequest.page_token]
// to retrieve the next page. If this field is not populated, there are no
// subsequent pages.
string next_page_token = 2;
}
// Request message for [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore].
// Request message for
// [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore].
message DeleteMetadataStoreRequest {
// Required. The resource name of the MetadataStore to delete.
// Format:
@ -463,13 +491,15 @@ message DeleteMetadataStoreRequest {
bool force = 2 [deprecated = true];
}
// Details of operations that perform [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore].
// Details of operations that perform
// [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore].
message DeleteMetadataStoreOperationMetadata {
// Operation metadata for deleting a MetadataStore.
GenericOperationMetadata generic_metadata = 1;
}
// Request message for [MetadataService.CreateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.CreateArtifact].
// Request message for
// [MetadataService.CreateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.CreateArtifact].
message CreateArtifactRequest {
// Required. The resource name of the MetadataStore where the Artifact should
// be created.
@ -495,7 +525,8 @@ message CreateArtifactRequest {
string artifact_id = 3;
}
// Request message for [MetadataService.GetArtifact][google.cloud.aiplatform.v1beta1.MetadataService.GetArtifact].
// Request message for
// [MetadataService.GetArtifact][google.cloud.aiplatform.v1beta1.MetadataService.GetArtifact].
message GetArtifactRequest {
// Required. The resource name of the Artifact to retrieve.
// Format:
@ -508,7 +539,8 @@ message GetArtifactRequest {
];
}
// Request message for [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts].
// Request message for
// [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts].
message ListArtifactsRequest {
// Required. The MetadataStore whose Artifacts should be listed.
// Format:
@ -524,7 +556,8 @@ message ListArtifactsRequest {
// Must be in range 1-1000, inclusive. Defaults to 100.
int32 page_size = 2;
// A page token, received from a previous [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]
// A page token, received from a previous
// [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]
// call. Provide this to retrieve the subsequent page.
//
// When paginating, all other provided parameters must match the call that
@ -571,36 +604,42 @@ message ListArtifactsRequest {
string order_by = 5;
}
// Response message for [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts].
// Response message for
// [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts].
message ListArtifactsResponse {
// The Artifacts retrieved from the MetadataStore.
repeated Artifact artifacts = 1;
// A token, which can be sent as [ListArtifactsRequest.page_token][google.cloud.aiplatform.v1beta1.ListArtifactsRequest.page_token]
// A token, which can be sent as
// [ListArtifactsRequest.page_token][google.cloud.aiplatform.v1beta1.ListArtifactsRequest.page_token]
// to retrieve the next page.
// If this field is not populated, there are no subsequent pages.
string next_page_token = 2;
}
// Request message for [MetadataService.UpdateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.UpdateArtifact].
// Request message for
// [MetadataService.UpdateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.UpdateArtifact].
message UpdateArtifactRequest {
// Required. The Artifact containing updates.
// The Artifact's [Artifact.name][google.cloud.aiplatform.v1beta1.Artifact.name] field is used to identify the Artifact to
// be updated.
// Format:
// The Artifact's
// [Artifact.name][google.cloud.aiplatform.v1beta1.Artifact.name] field is
// used to identify the Artifact to be updated. Format:
// `projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`
Artifact artifact = 1 [(google.api.field_behavior) = REQUIRED];
// Optional. A FieldMask indicating which fields should be updated.
// Functionality of this field is not yet supported.
google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = OPTIONAL];
google.protobuf.FieldMask update_mask = 2
[(google.api.field_behavior) = OPTIONAL];
// If set to true, and the [Artifact][google.cloud.aiplatform.v1beta1.Artifact] is not found, a new [Artifact][google.cloud.aiplatform.v1beta1.Artifact] is
// created.
// If set to true, and the
// [Artifact][google.cloud.aiplatform.v1beta1.Artifact] is not found, a new
// [Artifact][google.cloud.aiplatform.v1beta1.Artifact] is created.
bool allow_missing = 3;
}
// Request message for [MetadataService.DeleteArtifact][google.cloud.aiplatform.v1beta1.MetadataService.DeleteArtifact].
// Request message for
// [MetadataService.DeleteArtifact][google.cloud.aiplatform.v1beta1.MetadataService.DeleteArtifact].
message DeleteArtifactRequest {
// Required. The resource name of the Artifact to delete.
// Format:
@ -618,7 +657,8 @@ message DeleteArtifactRequest {
string etag = 2 [(google.api.field_behavior) = OPTIONAL];
}
// Request message for [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeArtifacts].
// Request message for
// [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeArtifacts].
message PurgeArtifactsRequest {
// Required. The metadata store to purge Artifacts from.
// Format:
@ -640,7 +680,8 @@ message PurgeArtifactsRequest {
bool force = 3 [(google.api.field_behavior) = OPTIONAL];
}
// Response message for [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeArtifacts].
// Response message for
// [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeArtifacts].
message PurgeArtifactsResponse {
// The number of Artifacts that this request deleted (or, if `force` is false,
// the number of Artifacts that will be deleted). This can be an estimate.
@ -650,21 +691,22 @@ message PurgeArtifactsResponse {
// Only populated if `force` is set to false. The maximum number of samples is
// 100 (it is possible to return fewer).
repeated string purge_sample = 2 [(google.api.resource_reference) = {
type: "aiplatform.googleapis.com/Artifact"
}];
type: "aiplatform.googleapis.com/Artifact"
}];
}
// Details of operations that perform [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeArtifacts].
// Details of operations that perform
// [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeArtifacts].
message PurgeArtifactsMetadata {
// Operation metadata for purging Artifacts.
GenericOperationMetadata generic_metadata = 1;
}
// Request message for [MetadataService.CreateContext][google.cloud.aiplatform.v1beta1.MetadataService.CreateContext].
// Request message for
// [MetadataService.CreateContext][google.cloud.aiplatform.v1beta1.MetadataService.CreateContext].
message CreateContextRequest {
// Required. The resource name of the MetadataStore where the Context should be
// created.
// Format:
// Required. The resource name of the MetadataStore where the Context should
// be created. Format:
// `projects/{project}/locations/{location}/metadataStores/{metadatastore}`
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
@ -686,7 +728,8 @@ message CreateContextRequest {
string context_id = 3;
}
// Request message for [MetadataService.GetContext][google.cloud.aiplatform.v1beta1.MetadataService.GetContext].
// Request message for
// [MetadataService.GetContext][google.cloud.aiplatform.v1beta1.MetadataService.GetContext].
message GetContextRequest {
// Required. The resource name of the Context to retrieve.
// Format:
@ -699,7 +742,8 @@ message GetContextRequest {
];
}
// Request message for [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts]
// Request message for
// [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts]
message ListContextsRequest {
// Required. The MetadataStore whose Contexts should be listed.
// Format:
@ -715,7 +759,8 @@ message ListContextsRequest {
// Must be in range 1-1000, inclusive. Defaults to 100.
int32 page_size = 2;
// A page token, received from a previous [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts]
// A page token, received from a previous
// [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts]
// call. Provide this to retrieve the subsequent page.
//
// When paginating, all other provided parameters must match the call that
@ -766,36 +811,41 @@ message ListContextsRequest {
string order_by = 5;
}
// Response message for [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts].
// Response message for
// [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts].
message ListContextsResponse {
// The Contexts retrieved from the MetadataStore.
repeated Context contexts = 1;
// A token, which can be sent as [ListContextsRequest.page_token][google.cloud.aiplatform.v1beta1.ListContextsRequest.page_token]
// A token, which can be sent as
// [ListContextsRequest.page_token][google.cloud.aiplatform.v1beta1.ListContextsRequest.page_token]
// to retrieve the next page.
// If this field is not populated, there are no subsequent pages.
string next_page_token = 2;
}
// Request message for [MetadataService.UpdateContext][google.cloud.aiplatform.v1beta1.MetadataService.UpdateContext].
// Request message for
// [MetadataService.UpdateContext][google.cloud.aiplatform.v1beta1.MetadataService.UpdateContext].
message UpdateContextRequest {
// Required. The Context containing updates.
// The Context's [Context.name][google.cloud.aiplatform.v1beta1.Context.name] field is used to identify the Context to be
// updated.
// Format:
// The Context's [Context.name][google.cloud.aiplatform.v1beta1.Context.name]
// field is used to identify the Context to be updated. Format:
// `projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`
Context context = 1 [(google.api.field_behavior) = REQUIRED];
// Optional. A FieldMask indicating which fields should be updated.
// Functionality of this field is not yet supported.
google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = OPTIONAL];
google.protobuf.FieldMask update_mask = 2
[(google.api.field_behavior) = OPTIONAL];
// If set to true, and the [Context][google.cloud.aiplatform.v1beta1.Context] is not found, a new [Context][google.cloud.aiplatform.v1beta1.Context] is
// If set to true, and the [Context][google.cloud.aiplatform.v1beta1.Context]
// is not found, a new [Context][google.cloud.aiplatform.v1beta1.Context] is
// created.
bool allow_missing = 3;
}
// Request message for [MetadataService.DeleteContext][google.cloud.aiplatform.v1beta1.MetadataService.DeleteContext].
// Request message for
// [MetadataService.DeleteContext][google.cloud.aiplatform.v1beta1.MetadataService.DeleteContext].
message DeleteContextRequest {
// Required. The resource name of the Context to delete.
// Format:
@ -817,7 +867,8 @@ message DeleteContextRequest {
string etag = 3 [(google.api.field_behavior) = OPTIONAL];
}
// Request message for [MetadataService.PurgeContexts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeContexts].
// Request message for
// [MetadataService.PurgeContexts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeContexts].
message PurgeContextsRequest {
// Required. The metadata store to purge Contexts from.
// Format:
@ -839,7 +890,8 @@ message PurgeContextsRequest {
bool force = 3 [(google.api.field_behavior) = OPTIONAL];
}
// Response message for [MetadataService.PurgeContexts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeContexts].
// Response message for
// [MetadataService.PurgeContexts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeContexts].
message PurgeContextsResponse {
// The number of Contexts that this request deleted (or, if `force` is false,
// the number of Contexts that will be deleted). This can be an estimate.
@ -849,21 +901,22 @@ message PurgeContextsResponse {
// Only populated if `force` is set to false. The maximum number of samples is
// 100 (it is possible to return fewer).
repeated string purge_sample = 2 [(google.api.resource_reference) = {
type: "aiplatform.googleapis.com/Context"
}];
type: "aiplatform.googleapis.com/Context"
}];
}
// Details of operations that perform [MetadataService.PurgeContexts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeContexts].
// Details of operations that perform
// [MetadataService.PurgeContexts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeContexts].
message PurgeContextsMetadata {
// Operation metadata for purging Contexts.
GenericOperationMetadata generic_metadata = 1;
}
// Request message for [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions].
// Request message for
// [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions].
message AddContextArtifactsAndExecutionsRequest {
// Required. The resource name of the Context that the Artifacts and Executions
// belong to.
// Format:
// Required. The resource name of the Context that the Artifacts and
// Executions belong to. Format:
// `projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`
string context = 1 [
(google.api.field_behavior) = REQUIRED,
@ -877,8 +930,8 @@ message AddContextArtifactsAndExecutionsRequest {
// Format:
// `projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`
repeated string artifacts = 2 [(google.api.resource_reference) = {
type: "aiplatform.googleapis.com/Artifact"
}];
type: "aiplatform.googleapis.com/Artifact"
}];
// The resource names of the Executions to associate with the
// Context.
@ -886,16 +939,16 @@ message AddContextArtifactsAndExecutionsRequest {
// Format:
// `projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`
repeated string executions = 3 [(google.api.resource_reference) = {
type: "aiplatform.googleapis.com/Execution"
}];
type: "aiplatform.googleapis.com/Execution"
}];
}
// Response message for [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions].
message AddContextArtifactsAndExecutionsResponse {
// Response message for
// [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions].
message AddContextArtifactsAndExecutionsResponse {}
}
// Request message for [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren].
// Request message for
// [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren].
message AddContextChildrenRequest {
// Required. The resource name of the parent Context.
//
@ -910,14 +963,13 @@ message AddContextChildrenRequest {
// The resource names of the child Contexts.
repeated string child_contexts = 2 [(google.api.resource_reference) = {
type: "aiplatform.googleapis.com/Context"
}];
type: "aiplatform.googleapis.com/Context"
}];
}
// Response message for [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren].
message AddContextChildrenResponse {
}
// Response message for
// [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren].
message AddContextChildrenResponse {}
// Request message for
// [MetadataService.DeleteContextChildrenRequest][].
@ -935,16 +987,16 @@ message RemoveContextChildrenRequest {
// The resource names of the child Contexts.
repeated string child_contexts = 2 [(google.api.resource_reference) = {
type: "aiplatform.googleapis.com/Context"
}];
type: "aiplatform.googleapis.com/Context"
}];
}
// Response message for [MetadataService.RemoveContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.RemoveContextChildren].
message RemoveContextChildrenResponse {
// Response message for
// [MetadataService.RemoveContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.RemoveContextChildren].
message RemoveContextChildrenResponse {}
}
// Request message for [MetadataService.QueryContextLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryContextLineageSubgraph].
// Request message for
// [MetadataService.QueryContextLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryContextLineageSubgraph].
message QueryContextLineageSubgraphRequest {
// Required. The resource name of the Context whose Artifacts and Executions
// should be retrieved as a LineageSubgraph.
@ -962,7 +1014,8 @@ message QueryContextLineageSubgraphRequest {
];
}
// Request message for [MetadataService.CreateExecution][google.cloud.aiplatform.v1beta1.MetadataService.CreateExecution].
// Request message for
// [MetadataService.CreateExecution][google.cloud.aiplatform.v1beta1.MetadataService.CreateExecution].
message CreateExecutionRequest {
// Required. The resource name of the MetadataStore where the Execution should
// be created.
@ -989,7 +1042,8 @@ message CreateExecutionRequest {
string execution_id = 3;
}
// Request message for [MetadataService.GetExecution][google.cloud.aiplatform.v1beta1.MetadataService.GetExecution].
// Request message for
// [MetadataService.GetExecution][google.cloud.aiplatform.v1beta1.MetadataService.GetExecution].
message GetExecutionRequest {
// Required. The resource name of the Execution to retrieve.
// Format:
@ -1002,7 +1056,8 @@ message GetExecutionRequest {
];
}
// Request message for [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions].
// Request message for
// [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions].
message ListExecutionsRequest {
// Required. The MetadataStore whose Executions should be listed.
// Format:
@ -1018,7 +1073,8 @@ message ListExecutionsRequest {
// Must be in range 1-1000, inclusive. Defaults to 100.
int32 page_size = 2;
// A page token, received from a previous [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]
// A page token, received from a previous
// [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]
// call. Provide this to retrieve the subsequent page.
//
// When paginating, all other provided parameters must match the call that
@ -1065,36 +1121,42 @@ message ListExecutionsRequest {
string order_by = 5;
}
// Response message for [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions].
// Response message for
// [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions].
message ListExecutionsResponse {
// The Executions retrieved from the MetadataStore.
repeated Execution executions = 1;
// A token, which can be sent as [ListExecutionsRequest.page_token][google.cloud.aiplatform.v1beta1.ListExecutionsRequest.page_token]
// A token, which can be sent as
// [ListExecutionsRequest.page_token][google.cloud.aiplatform.v1beta1.ListExecutionsRequest.page_token]
// to retrieve the next page.
// If this field is not populated, there are no subsequent pages.
string next_page_token = 2;
}
// Request message for [MetadataService.UpdateExecution][google.cloud.aiplatform.v1beta1.MetadataService.UpdateExecution].
// Request message for
// [MetadataService.UpdateExecution][google.cloud.aiplatform.v1beta1.MetadataService.UpdateExecution].
message UpdateExecutionRequest {
// Required. The Execution containing updates.
// The Execution's [Execution.name][google.cloud.aiplatform.v1beta1.Execution.name] field is used to identify the Execution
// to be updated.
// Format:
// The Execution's
// [Execution.name][google.cloud.aiplatform.v1beta1.Execution.name] field is
// used to identify the Execution to be updated. Format:
// `projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`
Execution execution = 1 [(google.api.field_behavior) = REQUIRED];
// Optional. A FieldMask indicating which fields should be updated.
// Functionality of this field is not yet supported.
google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = OPTIONAL];
google.protobuf.FieldMask update_mask = 2
[(google.api.field_behavior) = OPTIONAL];
// If set to true, and the [Execution][google.cloud.aiplatform.v1beta1.Execution] is not found, a new [Execution][google.cloud.aiplatform.v1beta1.Execution]
// is created.
// If set to true, and the
// [Execution][google.cloud.aiplatform.v1beta1.Execution] is not found, a new
// [Execution][google.cloud.aiplatform.v1beta1.Execution] is created.
bool allow_missing = 3;
}
// Request message for [MetadataService.DeleteExecution][google.cloud.aiplatform.v1beta1.MetadataService.DeleteExecution].
// Request message for
// [MetadataService.DeleteExecution][google.cloud.aiplatform.v1beta1.MetadataService.DeleteExecution].
message DeleteExecutionRequest {
// Required. The resource name of the Execution to delete.
// Format:
@ -1112,7 +1174,8 @@ message DeleteExecutionRequest {
string etag = 2 [(google.api.field_behavior) = OPTIONAL];
}
// Request message for [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1beta1.MetadataService.PurgeExecutions].
// Request message for
// [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1beta1.MetadataService.PurgeExecutions].
message PurgeExecutionsRequest {
// Required. The metadata store to purge Executions from.
// Format:
@ -1134,7 +1197,8 @@ message PurgeExecutionsRequest {
bool force = 3 [(google.api.field_behavior) = OPTIONAL];
}
// Response message for [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1beta1.MetadataService.PurgeExecutions].
// Response message for
// [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1beta1.MetadataService.PurgeExecutions].
message PurgeExecutionsResponse {
// The number of Executions that this request deleted (or, if `force` is
// false, the number of Executions that will be deleted). This can be an
@ -1145,17 +1209,19 @@ message PurgeExecutionsResponse {
// Only populated if `force` is set to false. The maximum number of samples is
// 100 (it is possible to return fewer).
repeated string purge_sample = 2 [(google.api.resource_reference) = {
type: "aiplatform.googleapis.com/Execution"
}];
type: "aiplatform.googleapis.com/Execution"
}];
}
// Details of operations that perform [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1beta1.MetadataService.PurgeExecutions].
// Details of operations that perform
// [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1beta1.MetadataService.PurgeExecutions].
message PurgeExecutionsMetadata {
// Operation metadata for purging Executions.
GenericOperationMetadata generic_metadata = 1;
}
// Request message for [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents].
// Request message for
// [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents].
message AddExecutionEventsRequest {
// Required. The resource name of the Execution that the Events connect
// Artifacts with.
@ -1172,16 +1238,15 @@ message AddExecutionEventsRequest {
repeated Event events = 2;
}
// Response message for [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents].
message AddExecutionEventsResponse {
// Response message for
// [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents].
message AddExecutionEventsResponse {}
}
// Request message for [MetadataService.QueryExecutionInputsAndOutputs][google.cloud.aiplatform.v1beta1.MetadataService.QueryExecutionInputsAndOutputs].
// Request message for
// [MetadataService.QueryExecutionInputsAndOutputs][google.cloud.aiplatform.v1beta1.MetadataService.QueryExecutionInputsAndOutputs].
message QueryExecutionInputsAndOutputsRequest {
// Required. The resource name of the Execution whose input and output Artifacts should
// be retrieved as a LineageSubgraph.
// Format:
// Required. The resource name of the Execution whose input and output
// Artifacts should be retrieved as a LineageSubgraph. Format:
// `projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`
string execution = 1 [
(google.api.field_behavior) = REQUIRED,
@ -1191,11 +1256,11 @@ message QueryExecutionInputsAndOutputsRequest {
];
}
// Request message for [MetadataService.CreateMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataSchema].
// Request message for
// [MetadataService.CreateMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataSchema].
message CreateMetadataSchemaRequest {
// Required. The resource name of the MetadataStore where the MetadataSchema should
// be created.
// Format:
// Required. The resource name of the MetadataStore where the MetadataSchema
// should be created. Format:
// `projects/{project}/locations/{location}/metadataStores/{metadatastore}`
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
@ -1218,7 +1283,8 @@ message CreateMetadataSchemaRequest {
string metadata_schema_id = 3;
}
// Request message for [MetadataService.GetMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataSchema].
// Request message for
// [MetadataService.GetMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataSchema].
message GetMetadataSchemaRequest {
// Required. The resource name of the MetadataSchema to retrieve.
// Format:
@ -1231,7 +1297,8 @@ message GetMetadataSchemaRequest {
];
}
// Request message for [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas].
// Request message for
// [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas].
message ListMetadataSchemasRequest {
// Required. The MetadataStore whose MetadataSchemas should be listed.
// Format:
@ -1249,8 +1316,8 @@ message ListMetadataSchemasRequest {
int32 page_size = 2;
// A page token, received from a previous
// [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas] call. Provide this to retrieve the
// next page.
// [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas]
// call. Provide this to retrieve the next page.
//
// When paginating, all other provided parameters must match the call that
// provided the page token. (Otherwise the request will fail with
@ -1261,22 +1328,24 @@ message ListMetadataSchemasRequest {
string filter = 4;
}
// Response message for [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas].
// Response message for
// [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas].
message ListMetadataSchemasResponse {
// The MetadataSchemas found for the MetadataStore.
repeated MetadataSchema metadata_schemas = 1;
// A token, which can be sent as
// [ListMetadataSchemasRequest.page_token][google.cloud.aiplatform.v1beta1.ListMetadataSchemasRequest.page_token] to retrieve the next
// page. If this field is not populated, there are no subsequent pages.
// [ListMetadataSchemasRequest.page_token][google.cloud.aiplatform.v1beta1.ListMetadataSchemasRequest.page_token]
// to retrieve the next page. If this field is not populated, there are no
// subsequent pages.
string next_page_token = 2;
}
// Request message for [MetadataService.QueryArtifactLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryArtifactLineageSubgraph].
// Request message for
// [MetadataService.QueryArtifactLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryArtifactLineageSubgraph].
message QueryArtifactLineageSubgraphRequest {
// Required. The resource name of the Artifact whose Lineage needs to be retrieved as a
// LineageSubgraph.
// Format:
// Required. The resource name of the Artifact whose Lineage needs to be
// retrieved as a LineageSubgraph. Format:
// `projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`
//
// The request may error with FAILED_PRECONDITION if the number of Artifacts,

@ -47,10 +47,12 @@ message MetadataStore {
string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this MetadataStore was created.
google.protobuf.Timestamp create_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 3
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this MetadataStore was last updated.
google.protobuf.Timestamp update_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp update_time = 4
[(google.api.field_behavior) = OUTPUT_ONLY];
// Customer-managed encryption key spec for a Metadata Store. If set, this
// Metadata Store and all sub-resources of this Metadata Store are secured

@ -65,9 +65,9 @@ message MigratableResource {
// Full resource name of ml engine model Version.
// Format: `projects/{project}/models/{model}/versions/{version}`.
string version = 2 [(google.api.resource_reference) = {
type: "ml.googleapis.com/Version"
}];
string version = 2 [
(google.api.resource_reference) = { type: "ml.googleapis.com/Version" }
];
}
// Represents one Model in automl.googleapis.com.
@ -75,9 +75,9 @@ message MigratableResource {
// Full resource name of automl Model.
// Format:
// `projects/{project}/locations/{location}/models/{model}`.
string model = 1 [(google.api.resource_reference) = {
type: "automl.googleapis.com/Model"
}];
string model = 1 [
(google.api.resource_reference) = { type: "automl.googleapis.com/Model" }
];
// The Model's display name in automl.googleapis.com.
string model_display_name = 3;
@ -89,8 +89,8 @@ message MigratableResource {
// Format:
// `projects/{project}/locations/{location}/datasets/{dataset}`.
string dataset = 1 [(google.api.resource_reference) = {
type: "automl.googleapis.com/Dataset"
}];
type: "automl.googleapis.com/Dataset"
}];
// The Dataset's display name in automl.googleapis.com.
string dataset_display_name = 4;
@ -104,8 +104,8 @@ message MigratableResource {
// Format:
// `projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}`.
string annotated_dataset = 1 [(google.api.resource_reference) = {
type: "datalabeling.googleapis.com/AnnotatedDataset"
}];
type: "datalabeling.googleapis.com/AnnotatedDataset"
}];
// The AnnotatedDataset's display name in datalabeling.googleapis.com.
string annotated_dataset_display_name = 3;
@ -115,8 +115,8 @@ message MigratableResource {
// Format:
// `projects/{project}/datasets/{dataset}`.
string dataset = 1 [(google.api.resource_reference) = {
type: "datalabeling.googleapis.com/Dataset"
}];
type: "datalabeling.googleapis.com/Dataset"
}];
// The Dataset's display name in datalabeling.googleapis.com.
string dataset_display_name = 4;
@ -128,23 +128,28 @@ message MigratableResource {
oneof resource {
// Output only. Represents one Version in ml.googleapis.com.
MlEngineModelVersion ml_engine_model_version = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
MlEngineModelVersion ml_engine_model_version = 1
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Represents one Model in automl.googleapis.com.
AutomlModel automl_model = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Represents one Dataset in automl.googleapis.com.
AutomlDataset automl_dataset = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
AutomlDataset automl_dataset = 3
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Represents one Dataset in datalabeling.googleapis.com.
DataLabelingDataset data_labeling_dataset = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
DataLabelingDataset data_labeling_dataset = 4
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// Output only. Timestamp when the last migration attempt on this MigratableResource
// started. Will not be set if there's no migration attempt on this
// MigratableResource.
google.protobuf.Timestamp last_migrate_time = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when the last migration attempt on this
// MigratableResource started. Will not be set if there's no migration attempt
// on this MigratableResource.
google.protobuf.Timestamp last_migrate_time = 5
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this MigratableResource was last updated.
google.protobuf.Timestamp last_update_time = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp last_update_time = 6
[(google.api.field_behavior) = OUTPUT_ONLY];
}

@ -37,12 +37,14 @@ option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
// datalabeling.googleapis.com and ml.googleapis.com to Vertex AI.
service MigrationService {
option (google.api.default_host) = "aiplatform.googleapis.com";
option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
option (google.api.oauth_scopes) =
"https://www.googleapis.com/auth/cloud-platform";
// Searches all of the resources in automl.googleapis.com,
// datalabeling.googleapis.com and ml.googleapis.com that can be migrated to
// Vertex AI's given location.
rpc SearchMigratableResources(SearchMigratableResourcesRequest) returns (SearchMigratableResourcesResponse) {
rpc SearchMigratableResources(SearchMigratableResourcesRequest)
returns (SearchMigratableResourcesResponse) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*}/migratableResources:search"
body: "*"
@ -52,7 +54,8 @@ service MigrationService {
// Batch migrates resources from ml.googleapis.com, automl.googleapis.com,
// and datalabeling.googleapis.com to Vertex AI.
rpc BatchMigrateResources(BatchMigrateResourcesRequest) returns (google.longrunning.Operation) {
rpc BatchMigrateResources(BatchMigrateResourcesRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*}/migratableResources:batchMigrate"
body: "*"
@ -65,12 +68,12 @@ service MigrationService {
}
}
// Request message for [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources].
// Request message for
// [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources].
message SearchMigratableResourcesRequest {
// Required. The location that the migratable resources should be searched from.
// It's the Vertex AI location that the resources can be migrated to, not
// the resources' original location.
// Format:
// Required. The location that the migratable resources should be searched
// from. It's the Vertex AI location that the resources can be migrated to,
// not the resources' original location. Format:
// `projects/{project}/locations/{location}`
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
@ -89,7 +92,8 @@ message SearchMigratableResourcesRequest {
// A filter for your search. You can use the following types of filters:
//
// * Resource type filters. The following strings filter for a specific type
// of [MigratableResource][google.cloud.aiplatform.v1beta1.MigratableResource]:
// of
// [MigratableResource][google.cloud.aiplatform.v1beta1.MigratableResource]:
// * `ml_engine_model_version:*`
// * `automl_model:*`
// * `automl_dataset:*`
@ -101,7 +105,8 @@ message SearchMigratableResourcesRequest {
string filter = 4;
}
// Response message for [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources].
// Response message for
// [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources].
message SearchMigratableResourcesResponse {
// All migratable resources that can be migrated to the
// location specified in the request.
@ -113,7 +118,8 @@ message SearchMigratableResourcesResponse {
string next_page_token = 2;
}
// Request message for [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources].
// Request message for
// [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources].
message BatchMigrateResourcesRequest {
// Required. The location of the migrated resource will live in.
// Format: `projects/{project}/locations/{location}`
@ -127,7 +133,8 @@ message BatchMigrateResourcesRequest {
// Required. The request messages specifying the resources to migrate.
// They must be in the same location as the destination.
// Up to 50 resources can be migrated in one batch.
repeated MigrateResourceRequest migrate_resource_requests = 2 [(google.api.field_behavior) = REQUIRED];
repeated MigrateResourceRequest migrate_resource_requests = 2
[(google.api.field_behavior) = REQUIRED];
}
// Config of migrating one resource from automl.googleapis.com,
@ -135,9 +142,8 @@ message BatchMigrateResourcesRequest {
message MigrateResourceRequest {
// Config for migrating version in ml.googleapis.com to Vertex AI's Model.
message MigrateMlEngineModelVersionConfig {
// Required. The ml.googleapis.com endpoint that this model version should be migrated
// from.
// Example values:
// Required. The ml.googleapis.com endpoint that this model version should
// be migrated from. Example values:
//
// * ml.googleapis.com
//
@ -152,9 +158,7 @@ message MigrateResourceRequest {
// Format: `projects/{project}/models/{model}/versions/{version}`.
string model_version = 2 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "ml.googleapis.com/Version"
}
(google.api.resource_reference) = { type: "ml.googleapis.com/Version" }
];
// Required. Display name of the model in Vertex AI.
@ -169,9 +173,7 @@ message MigrateResourceRequest {
// `projects/{project}/locations/{location}/models/{model}`.
string model = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "automl.googleapis.com/Model"
}
(google.api.resource_reference) = { type: "automl.googleapis.com/Model" }
];
// Optional. Display name of the model in Vertex AI.
@ -228,15 +230,18 @@ message MigrateResourceRequest {
// System will pick a display name if unspecified.
string dataset_display_name = 2 [(google.api.field_behavior) = OPTIONAL];
// Optional. Configs for migrating AnnotatedDataset in datalabeling.googleapis.com to
// Vertex AI's SavedQuery. The specified AnnotatedDatasets have to belong
// to the datalabeling Dataset.
repeated MigrateDataLabelingAnnotatedDatasetConfig migrate_data_labeling_annotated_dataset_configs = 3 [(google.api.field_behavior) = OPTIONAL];
// Optional. Configs for migrating AnnotatedDataset in
// datalabeling.googleapis.com to Vertex AI's SavedQuery. The specified
// AnnotatedDatasets have to belong to the datalabeling Dataset.
repeated MigrateDataLabelingAnnotatedDatasetConfig
migrate_data_labeling_annotated_dataset_configs = 3
[(google.api.field_behavior) = OPTIONAL];
}
oneof request {
// Config for migrating Version in ml.googleapis.com to Vertex AI's Model.
MigrateMlEngineModelVersionConfig migrate_ml_engine_model_version_config = 1;
MigrateMlEngineModelVersionConfig migrate_ml_engine_model_version_config =
1;
// Config for migrating Model in automl.googleapis.com to Vertex AI's
// Model.
@ -252,7 +257,8 @@ message MigrateResourceRequest {
}
}
// Response message for [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources].
// Response message for
// [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources].
message BatchMigrateResourcesResponse {
// Successfully migrated resources.
repeated MigrateResourceResponse migrate_resource_responses = 1;
@ -264,13 +270,13 @@ message MigrateResourceResponse {
oneof migrated_resource {
// Migrated Dataset's resource name.
string dataset = 1 [(google.api.resource_reference) = {
type: "aiplatform.googleapis.com/Dataset"
}];
type: "aiplatform.googleapis.com/Dataset"
}];
// Migrated Model's resource name.
string model = 2 [(google.api.resource_reference) = {
type: "aiplatform.googleapis.com/Model"
}];
type: "aiplatform.googleapis.com/Model"
}];
}
// Before migration, the identifier in ml.googleapis.com,
@ -278,7 +284,8 @@ message MigrateResourceResponse {
MigratableResource migratable_resource = 3;
}
// Runtime operation information for [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources].
// Runtime operation information for
// [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources].
message BatchMigrateResourcesOperationMetadata {
// Represents a partial result in batch migration operation for one
// [MigrateResourceRequest][google.cloud.aiplatform.v1beta1.MigrateResourceRequest].
@ -292,13 +299,13 @@ message BatchMigrateResourcesOperationMetadata {
// Migrated model resource name.
string model = 3 [(google.api.resource_reference) = {
type: "aiplatform.googleapis.com/Model"
}];
type: "aiplatform.googleapis.com/Model"
}];
// Migrated dataset resource name.
string dataset = 4 [(google.api.resource_reference) = {
type: "aiplatform.googleapis.com/Dataset"
}];
type: "aiplatform.googleapis.com/Dataset"
}];
}
// It's the same as the value in

@ -50,12 +50,15 @@ message Model {
// Model artifact and any of its supported files. Will be exported to the
// location specified by the `artifactDestination` field of the
// [ExportModelRequest.output_config][google.cloud.aiplatform.v1beta1.ExportModelRequest.output_config] object.
// [ExportModelRequest.output_config][google.cloud.aiplatform.v1beta1.ExportModelRequest.output_config]
// object.
ARTIFACT = 1;
// The container image that is to be used when deploying this Model. Will
// be exported to the location specified by the `imageDestination` field
// of the [ExportModelRequest.output_config][google.cloud.aiplatform.v1beta1.ExportModelRequest.output_config] object.
// of the
// [ExportModelRequest.output_config][google.cloud.aiplatform.v1beta1.ExportModelRequest.output_config]
// object.
IMAGE = 2;
}
@ -83,7 +86,8 @@ message Model {
string id = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The content of this Model that may be exported.
repeated ExportableContent exportable_contents = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
repeated ExportableContent exportable_contents = 2
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// Identifies a type of Model's prediction resources.
@ -91,16 +95,20 @@ message Model {
// Should not be used.
DEPLOYMENT_RESOURCES_TYPE_UNSPECIFIED = 0;
// Resources that are dedicated to the [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel], and that need a
// higher degree of manual configuration.
// Resources that are dedicated to the
// [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel], and that
// need a higher degree of manual configuration.
DEDICATED_RESOURCES = 1;
// Resources that to large degree are decided by Vertex AI, and require
// only a modest additional configuration.
AUTOMATIC_RESOURCES = 2;
// Resources that can be shared by multiple [DeployedModels][google.cloud.aiplatform.v1beta1.DeployedModel].
// A pre-configured [DeploymentResourcePool][google.cloud.aiplatform.v1beta1.DeploymentResourcePool] is required.
// Resources that can be shared by multiple
// [DeployedModels][google.cloud.aiplatform.v1beta1.DeployedModel]. A
// pre-configured
// [DeploymentResourcePool][google.cloud.aiplatform.v1beta1.DeploymentResourcePool]
// is required.
SHARED_RESOURCES = 3;
}
@ -128,10 +136,12 @@ message Model {
repeated string version_aliases = 29;
// Output only. Timestamp when this version was created.
google.protobuf.Timestamp version_create_time = 31 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp version_create_time = 31
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this version was most recently updated.
google.protobuf.Timestamp version_update_time = 32 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp version_update_time = 32
[(google.api.field_behavior) = OUTPUT_ONLY];
// Required. The display name of the Model.
// The name can be up to 128 characters long and can consist of any UTF-8
@ -146,13 +156,15 @@ message Model {
// The schemata that describe formats of the Model's predictions and
// explanations as given and returned via
// [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] and [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain].
// [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]
// and
// [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain].
PredictSchemata predict_schemata = 4;
// Immutable. Points to a YAML file stored on Google Cloud Storage describing additional
// information about the Model, that is specific to it. Unset if the Model
// does not have any additional information.
// The schema is defined as an OpenAPI 3.0.2 [Schema
// Immutable. Points to a YAML file stored on Google Cloud Storage describing
// additional information about the Model, that is specific to it. Unset if
// the Model does not have any additional information. The schema is defined
// as an OpenAPI 3.0.2 [Schema
// Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
// AutoML Models always have this field populated by Vertex AI, if no
// additional metadata is needed, this field is set to an empty string.
@ -161,17 +173,19 @@ message Model {
// point to a location where the user only has a read access.
string metadata_schema_uri = 5 [(google.api.field_behavior) = IMMUTABLE];
// Immutable. An additional information about the Model; the schema of the metadata can
// be found in [metadata_schema][google.cloud.aiplatform.v1beta1.Model.metadata_schema_uri].
// Immutable. An additional information about the Model; the schema of the
// metadata can be found in
// [metadata_schema][google.cloud.aiplatform.v1beta1.Model.metadata_schema_uri].
// Unset if the Model does not have any additional information.
google.protobuf.Value metadata = 6 [(google.api.field_behavior) = IMMUTABLE];
// Output only. The formats in which this Model may be exported. If empty, this Model is
// not available for export.
repeated ExportFormat supported_export_formats = 20 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The formats in which this Model may be exported. If empty,
// this Model is not available for export.
repeated ExportFormat supported_export_formats = 20
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The resource name of the TrainingPipeline that uploaded this Model, if
// any.
// Output only. The resource name of the TrainingPipeline that uploaded this
// Model, if any.
string training_pipeline = 7 [
(google.api.field_behavior) = OUTPUT_ONLY,
(google.api.resource_reference) = {
@ -179,35 +193,43 @@ message Model {
}
];
// Input only. The specification of the container that is to be used when deploying
// this Model. The specification is ingested upon
// [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel], and all binaries it contains are copied
// and stored internally by Vertex AI.
// Input only. The specification of the container that is to be used when
// deploying this Model. The specification is ingested upon
// [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel],
// and all binaries it contains are copied and stored internally by Vertex AI.
// Not present for AutoML Models.
ModelContainerSpec container_spec = 9 [(google.api.field_behavior) = INPUT_ONLY];
ModelContainerSpec container_spec = 9
[(google.api.field_behavior) = INPUT_ONLY];
// Immutable. The path to the directory containing the Model artifact and any of its
// supporting files.
// Not present for AutoML Models.
// Immutable. The path to the directory containing the Model artifact and any
// of its supporting files. Not present for AutoML Models.
string artifact_uri = 26 [(google.api.field_behavior) = IMMUTABLE];
// Output only. When this Model is deployed, its prediction resources are described by the
// `prediction_resources` field of the [Endpoint.deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] object.
// Because not all Models support all resource configuration types, the
// configuration types this Model supports are listed here. If no
// Output only. When this Model is deployed, its prediction resources are
// described by the `prediction_resources` field of the
// [Endpoint.deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models]
// object. Because not all Models support all resource configuration types,
// the configuration types this Model supports are listed here. If no
// configuration types are listed, the Model cannot be deployed to an
// [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] and does not support
// online predictions ([PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] or
// [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]). Such a Model can serve predictions by
// using a [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob], if it has at least one entry each in
// [supported_input_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_input_storage_formats] and
// online predictions
// ([PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]
// or
// [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]).
// Such a Model can serve predictions by using a
// [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob],
// if it has at least one entry each in
// [supported_input_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_input_storage_formats]
// and
// [supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats].
repeated DeploymentResourcesType supported_deployment_resources_types = 10 [(google.api.field_behavior) = OUTPUT_ONLY];
repeated DeploymentResourcesType supported_deployment_resources_types = 10
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The formats this Model supports in
// [BatchPredictionJob.input_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. If
// [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] exists, the instances
// should be given as per that schema.
// [BatchPredictionJob.input_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config].
// If
// [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]
// exists, the instances should be given as per that schema.
//
// The possible formats are:
//
@ -218,11 +240,13 @@ message Model {
// * `csv`
// The CSV format, where each instance is a single comma-separated line.
// The first line in the file is the header, containing comma-separated field
// names. Uses [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source].
// names. Uses
// [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source].
//
// * `tf-record`
// The TFRecord format, where each instance is a single record in tfrecord
// syntax. Uses [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source].
// syntax. Uses
// [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source].
//
// * `tf-record-gzip`
// Similar to `tf-record`, but the file is gzipped. Uses
@ -235,23 +259,31 @@ message Model {
// * `file-list`
// Each line of the file is the location of an instance to process, uses
// `gcs_source` field of the
// [InputConfig][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig] object.
// [InputConfig][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig]
// object.
//
//
// If this Model doesn't support any of these formats it means it cannot be
// used with a [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. However, if it has
// [supported_deployment_resources_types][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types], it could serve online
// predictions by using [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] or
// used with a
// [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob].
// However, if it has
// [supported_deployment_resources_types][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types],
// it could serve online predictions by using
// [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]
// or
// [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain].
repeated string supported_input_storage_formats = 11 [(google.api.field_behavior) = OUTPUT_ONLY];
repeated string supported_input_storage_formats = 11
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The formats this Model supports in
// [BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config]. If both
// [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] and
// [PredictSchemata.prediction_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.prediction_schema_uri] exist, the predictions
// are returned together with their instances. In other words, the
// prediction has the original instance data first, followed
// by the actual prediction content (as per the schema).
// [BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config].
// If both
// [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]
// and
// [PredictSchemata.prediction_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.prediction_schema_uri]
// exist, the predictions are returned together with their instances. In other
// words, the prediction has the original instance data first, followed by the
// actual prediction content (as per the schema).
//
// The possible formats are:
//
@ -272,43 +304,57 @@ message Model {
//
//
// If this Model doesn't support any of these formats it means it cannot be
// used with a [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. However, if it has
// [supported_deployment_resources_types][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types], it could serve online
// predictions by using [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] or
// used with a
// [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob].
// However, if it has
// [supported_deployment_resources_types][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types],
// it could serve online predictions by using
// [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]
// or
// [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain].
repeated string supported_output_storage_formats = 12 [(google.api.field_behavior) = OUTPUT_ONLY];
repeated string supported_output_storage_formats = 12
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this Model was uploaded into Vertex AI.
google.protobuf.Timestamp create_time = 13 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 13
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this Model was most recently updated.
google.protobuf.Timestamp update_time = 14 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp update_time = 14
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The pointers to DeployedModels created from this Model. Note that
// Model could have been deployed to Endpoints in different Locations.
repeated DeployedModelRef deployed_models = 15 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The pointers to DeployedModels created from this Model. Note
// that Model could have been deployed to Endpoints in different Locations.
repeated DeployedModelRef deployed_models = 15
[(google.api.field_behavior) = OUTPUT_ONLY];
// The default explanation specification for this Model.
//
// The Model can be used for [requesting
// explanation][PredictionService.Explain] after being
// [deployed][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel] if it is populated.
// The Model can be used for [batch
// [deployed][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel] if
// it is populated. The Model can be used for [batch
// explanation][BatchPredictionJob.generate_explanation] if it is populated.
//
// All fields of the explanation_spec can be overridden by
// [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] of
// [DeployModelRequest.deployed_model][google.cloud.aiplatform.v1beta1.DeployModelRequest.deployed_model], or
// [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] of
// [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec]
// of
// [DeployModelRequest.deployed_model][google.cloud.aiplatform.v1beta1.DeployModelRequest.deployed_model],
// or
// [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec]
// of
// [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob].
//
// If the default explanation specification is not set for this Model, this
// Model can still be used for [requesting
// explanation][PredictionService.Explain] by setting
// [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] of
// [DeployModelRequest.deployed_model][google.cloud.aiplatform.v1beta1.DeployModelRequest.deployed_model] and for [batch
// explanation][BatchPredictionJob.generate_explanation] by setting
// [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] of
// [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec]
// of
// [DeployModelRequest.deployed_model][google.cloud.aiplatform.v1beta1.DeployModelRequest.deployed_model]
// and for [batch explanation][BatchPredictionJob.generate_explanation] by
// setting
// [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec]
// of
// [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob].
ExplanationSpec explanation_spec = 23;
@ -329,23 +375,28 @@ message Model {
// Model and all sub-resources of this Model will be secured by this key.
EncryptionSpec encryption_spec = 24;
// Output only. Source of a model. It can either be automl training pipeline, custom
// training pipeline, BigQuery ML, or existing Vertex AI Model.
ModelSourceInfo model_source_info = 38 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Source of a model. It can either be automl training pipeline,
// custom training pipeline, BigQuery ML, or existing Vertex AI Model.
ModelSourceInfo model_source_info = 38
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The resource name of the Artifact that was created in MetadataStore when
// creating the Model. The Artifact resource name pattern is
// Output only. The resource name of the Artifact that was created in
// MetadataStore when creating the Model. The Artifact resource name pattern
// is
// `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`.
string metadata_artifact = 44 [(google.api.field_behavior) = OUTPUT_ONLY];
}
// Contains the schemata used in Model's predictions and explanations via
// [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict], [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain] and
// [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob].
// [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict],
// [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]
// and [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob].
message PredictSchemata {
// Immutable. Points to a YAML file stored on Google Cloud Storage describing the format
// of a single instance, which are used in [PredictRequest.instances][google.cloud.aiplatform.v1beta1.PredictRequest.instances],
// [ExplainRequest.instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances] and
// Immutable. Points to a YAML file stored on Google Cloud Storage describing
// the format of a single instance, which are used in
// [PredictRequest.instances][google.cloud.aiplatform.v1beta1.PredictRequest.instances],
// [ExplainRequest.instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances]
// and
// [BatchPredictionJob.input_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config].
// The schema is defined as an OpenAPI 3.0.2 [Schema
// Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
@ -355,9 +406,11 @@ message PredictSchemata {
// point to a location where the user only has a read access.
string instance_schema_uri = 1 [(google.api.field_behavior) = IMMUTABLE];
// Immutable. Points to a YAML file stored on Google Cloud Storage describing the
// parameters of prediction and explanation via
// [PredictRequest.parameters][google.cloud.aiplatform.v1beta1.PredictRequest.parameters], [ExplainRequest.parameters][google.cloud.aiplatform.v1beta1.ExplainRequest.parameters] and
// Immutable. Points to a YAML file stored on Google Cloud Storage describing
// the parameters of prediction and explanation via
// [PredictRequest.parameters][google.cloud.aiplatform.v1beta1.PredictRequest.parameters],
// [ExplainRequest.parameters][google.cloud.aiplatform.v1beta1.ExplainRequest.parameters]
// and
// [BatchPredictionJob.model_parameters][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model_parameters].
// The schema is defined as an OpenAPI 3.0.2 [Schema
// Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
@ -368,9 +421,12 @@ message PredictSchemata {
// point to a location where the user only has a read access.
string parameters_schema_uri = 2 [(google.api.field_behavior) = IMMUTABLE];
// Immutable. Points to a YAML file stored on Google Cloud Storage describing the format
// of a single prediction produced by this Model, which are returned via
// [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions], [ExplainResponse.explanations][google.cloud.aiplatform.v1beta1.ExplainResponse.explanations], and
// Immutable. Points to a YAML file stored on Google Cloud Storage describing
// the format of a single prediction produced by this Model, which are
// returned via
// [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions],
// [ExplainResponse.explanations][google.cloud.aiplatform.v1beta1.ExplainResponse.explanations],
// and
// [BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config].
// The schema is defined as an OpenAPI 3.0.2 [Schema
// Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
@ -385,14 +441,16 @@ message PredictSchemata {
// message correspond to fields in the [Kubernetes Container v1 core
// specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
message ModelContainerSpec {
// Required. Immutable. URI of the Docker image to be used as the custom container for serving
// predictions. This URI must identify an image in Artifact Registry or
// Container Registry. Learn more about the [container publishing
// Required. Immutable. URI of the Docker image to be used as the custom
// container for serving predictions. This URI must identify an image in
// Artifact Registry or Container Registry. Learn more about the [container
// publishing
// requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#publishing),
// including permissions requirements for the Vertex AI Service Agent.
//
// The container image is ingested upon [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel], stored
// internally, and this original path is afterwards not used.
// The container image is ingested upon
// [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel],
// stored internally, and this original path is afterwards not used.
//
// To learn about the requirements for the Docker image itself, see
// [Custom container
@ -406,18 +464,20 @@ message ModelContainerSpec {
(google.api.field_behavior) = IMMUTABLE
];
// Immutable. Specifies the command that runs when the container starts. This overrides
// the container's
// Immutable. Specifies the command that runs when the container starts. This
// overrides the container's
// [ENTRYPOINT](https://docs.docker.com/engine/reference/builder/#entrypoint).
// Specify this field as an array of executable and arguments, similar to a
// Docker `ENTRYPOINT`'s "exec" form, not its "shell" form.
//
// If you do not specify this field, then the container's `ENTRYPOINT` runs,
// in conjunction with the [args][google.cloud.aiplatform.v1beta1.ModelContainerSpec.args] field or the
// container's [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd),
// if either exists. If this field is not specified and the container does not
// have an `ENTRYPOINT`, then refer to the Docker documentation about [how
// `CMD` and `ENTRYPOINT`
// in conjunction with the
// [args][google.cloud.aiplatform.v1beta1.ModelContainerSpec.args] field or
// the container's
// [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd), if either
// exists. If this field is not specified and the container does not have an
// `ENTRYPOINT`, then refer to the Docker documentation about [how `CMD` and
// `ENTRYPOINT`
// interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact).
//
// If you specify this field, then you can also specify the `args` field to
@ -429,9 +489,10 @@ message ModelContainerSpec {
//
// In this field, you can reference [environment variables set by Vertex
// AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables)
// and environment variables set in the [env][google.cloud.aiplatform.v1beta1.ModelContainerSpec.env] field.
// You cannot reference environment variables set in the Docker image. In
// order for environment variables to be expanded, reference them by using the
// and environment variables set in the
// [env][google.cloud.aiplatform.v1beta1.ModelContainerSpec.env] field. You
// cannot reference environment variables set in the Docker image. In order
// for environment variables to be expanded, reference them by using the
// following syntax:
// <code>$(<var>VARIABLE_NAME</var>)</code>
// Note that this differs from Bash variable expansion, which does not use
@ -444,16 +505,16 @@ message ModelContainerSpec {
// API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
repeated string command = 2 [(google.api.field_behavior) = IMMUTABLE];
// Immutable. Specifies arguments for the command that runs when the container starts.
// This overrides the container's
// Immutable. Specifies arguments for the command that runs when the container
// starts. This overrides the container's
// [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd). Specify
// this field as an array of executable and arguments, similar to a Docker
// `CMD`'s "default parameters" form.
//
// If you don't specify this field but do specify the
// [command][google.cloud.aiplatform.v1beta1.ModelContainerSpec.command] field, then the command from the
// `command` field runs without any additional arguments. See the
// [Kubernetes documentation about how the
// [command][google.cloud.aiplatform.v1beta1.ModelContainerSpec.command]
// field, then the command from the `command` field runs without any
// additional arguments. See the [Kubernetes documentation about how the
// `command` and `args` fields interact with a container's `ENTRYPOINT` and
// `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes).
//
@ -467,9 +528,10 @@ message ModelContainerSpec {
// In this field, you can reference [environment variables
// set by Vertex
// AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables)
// and environment variables set in the [env][google.cloud.aiplatform.v1beta1.ModelContainerSpec.env] field.
// You cannot reference environment variables set in the Docker image. In
// order for environment variables to be expanded, reference them by using the
// and environment variables set in the
// [env][google.cloud.aiplatform.v1beta1.ModelContainerSpec.env] field. You
// cannot reference environment variables set in the Docker image. In order
// for environment variables to be expanded, reference them by using the
// following syntax:
// <code>$(<var>VARIABLE_NAME</var>)</code>
// Note that this differs from Bash variable expansion, which does not use
@ -482,14 +544,16 @@ message ModelContainerSpec {
// API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
repeated string args = 3 [(google.api.field_behavior) = IMMUTABLE];
// Immutable. List of environment variables to set in the container. After the container
// starts running, code running in the container can read these environment
// variables.
// Immutable. List of environment variables to set in the container. After the
// container starts running, code running in the container can read these
// environment variables.
//
// Additionally, the [command][google.cloud.aiplatform.v1beta1.ModelContainerSpec.command] and
// [args][google.cloud.aiplatform.v1beta1.ModelContainerSpec.args] fields can reference these variables. Later
// entries in this list can also reference earlier entries. For example, the
// following example sets the variable `VAR_2` to have the value `foo bar`:
// Additionally, the
// [command][google.cloud.aiplatform.v1beta1.ModelContainerSpec.command] and
// [args][google.cloud.aiplatform.v1beta1.ModelContainerSpec.args] fields can
// reference these variables. Later entries in this list can also reference
// earlier entries. For example, the following example sets the variable
// `VAR_2` to have the value `foo bar`:
//
// ```json
// [
@ -535,11 +599,11 @@ message ModelContainerSpec {
// API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
repeated Port ports = 5 [(google.api.field_behavior) = IMMUTABLE];
// Immutable. HTTP path on the container to send prediction requests to. Vertex AI
// forwards requests sent using
// [projects.locations.endpoints.predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] to this
// path on the container's IP address and port. Vertex AI then returns the
// container's response in the API response.
// Immutable. HTTP path on the container to send prediction requests to.
// Vertex AI forwards requests sent using
// [projects.locations.endpoints.predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]
// to this path on the container's IP address and port. Vertex AI then returns
// the container's response in the API response.
//
// For example, if you set this field to `/foo`, then when Vertex AI
// receives a prediction request, it forwards the request body in a POST
@ -548,7 +612,8 @@ message ModelContainerSpec {
// [ports][google.cloud.aiplatform.v1beta1.ModelContainerSpec.ports] field.
//
// If you don't specify this field, it defaults to the following value when
// you [deploy this Model to an Endpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]:
// you [deploy this Model to an
// Endpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]:
// <code>/v1/endpoints/<var>ENDPOINT</var>/deployedModels/<var>DEPLOYED_MODEL</var>:predict</code>
// The placeholders in this value are replaced as follows:
//
@ -558,7 +623,9 @@ message ModelContainerSpec {
// as the [`AIP_ENDPOINT_ID` environment
// variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
//
// * <var>DEPLOYED_MODEL</var>: [DeployedModel.id][google.cloud.aiplatform.v1beta1.DeployedModel.id] of the `DeployedModel`.
// * <var>DEPLOYED_MODEL</var>:
// [DeployedModel.id][google.cloud.aiplatform.v1beta1.DeployedModel.id] of the
// `DeployedModel`.
// (Vertex AI makes this value available to your container code
// as the [`AIP_DEPLOYED_MODEL_ID` environment
// variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
@ -576,7 +643,8 @@ message ModelContainerSpec {
// [ports][google.cloud.aiplatform.v1beta1.ModelContainerSpec.ports] field.
//
// If you don't specify this field, it defaults to the following value when
// you [deploy this Model to an Endpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]:
// you [deploy this Model to an
// Endpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]:
// <code>/v1/endpoints/<var>ENDPOINT</var>/deployedModels/<var>DEPLOYED_MODEL</var>:predict</code>
// The placeholders in this value are replaced as follows:
//
@ -586,7 +654,9 @@ message ModelContainerSpec {
// as the [`AIP_ENDPOINT_ID` environment
// variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
//
// * <var>DEPLOYED_MODEL</var>: [DeployedModel.id][google.cloud.aiplatform.v1beta1.DeployedModel.id] of the `DeployedModel`.
// * <var>DEPLOYED_MODEL</var>:
// [DeployedModel.id][google.cloud.aiplatform.v1beta1.DeployedModel.id] of the
// `DeployedModel`.
// (Vertex AI makes this value available to your container code as the
// [`AIP_DEPLOYED_MODEL_ID` environment
// variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)

@ -118,20 +118,27 @@ message ModelDeploymentMonitoringJob {
JobState state = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Schedule state when the monitoring job is in Running state.
MonitoringScheduleState schedule_state = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
MonitoringScheduleState schedule_state = 5
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Latest triggered monitoring pipeline metadata.
LatestMonitoringPipelineMetadata latest_monitoring_pipeline_metadata = 25 [(google.api.field_behavior) = OUTPUT_ONLY];
LatestMonitoringPipelineMetadata latest_monitoring_pipeline_metadata = 25
[(google.api.field_behavior) = OUTPUT_ONLY];
// Required. The config for monitoring objectives. This is a per DeployedModel config.
// Each DeployedModel needs to be configured separately.
repeated ModelDeploymentMonitoringObjectiveConfig model_deployment_monitoring_objective_configs = 6 [(google.api.field_behavior) = REQUIRED];
// Required. The config for monitoring objectives. This is a per DeployedModel
// config. Each DeployedModel needs to be configured separately.
repeated ModelDeploymentMonitoringObjectiveConfig
model_deployment_monitoring_objective_configs = 6
[(google.api.field_behavior) = REQUIRED];
// Required. Schedule config for running the monitoring job.
ModelDeploymentMonitoringScheduleConfig model_deployment_monitoring_schedule_config = 7 [(google.api.field_behavior) = REQUIRED];
ModelDeploymentMonitoringScheduleConfig
model_deployment_monitoring_schedule_config = 7
[(google.api.field_behavior) = REQUIRED];
// Required. Sample Strategy for logging.
SamplingStrategy logging_sampling_strategy = 8 [(google.api.field_behavior) = REQUIRED];
SamplingStrategy logging_sampling_strategy = 8
[(google.api.field_behavior) = REQUIRED];
// Alert config for model monitoring.
ModelMonitoringAlertConfig model_monitoring_alert_config = 15;
@ -142,10 +149,12 @@ message ModelDeploymentMonitoringJob {
// requests.
string predict_instance_schema_uri = 9;
// Sample Predict instance, same format as [PredictRequest.instances][google.cloud.aiplatform.v1beta1.PredictRequest.instances],
// Sample Predict instance, same format as
// [PredictRequest.instances][google.cloud.aiplatform.v1beta1.PredictRequest.instances],
// this can be set as a replacement of
// [ModelDeploymentMonitoringJob.predict_instance_schema_uri][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.predict_instance_schema_uri]. If not set,
// we will generate predict schema from collected predict requests.
// [ModelDeploymentMonitoringJob.predict_instance_schema_uri][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.predict_instance_schema_uri].
// If not set, we will generate predict schema from collected predict
// requests.
google.protobuf.Value sample_predict_instance = 19;
// YAML schema file uri describing the format of a single instance that you
@ -161,12 +170,13 @@ message ModelDeploymentMonitoringJob {
// fields in predict instance formatted as string.
string analysis_instance_schema_uri = 16;
// Output only. The created bigquery tables for the job under customer project. Customer
// could do their own query & analysis. There could be 4 log tables in
// maximum:
// Output only. The created bigquery tables for the job under customer
// project. Customer could do their own query & analysis. There could be 4 log
// tables in maximum:
// 1. Training data logging predict request/response
// 2. Serving data logging predict request/response
repeated ModelDeploymentMonitoringBigQueryTable bigquery_tables = 10 [(google.api.field_behavior) = OUTPUT_ONLY];
repeated ModelDeploymentMonitoringBigQueryTable bigquery_tables = 10
[(google.api.field_behavior) = OUTPUT_ONLY];
// The TTL of BigQuery tables in user projects which stores logs.
// A day is the basic unit of the TTL and we take the ceil of TTL/86400(a
@ -184,14 +194,18 @@ message ModelDeploymentMonitoringJob {
map<string, string> labels = 11;
// Output only. Timestamp when this ModelDeploymentMonitoringJob was created.
google.protobuf.Timestamp create_time = 12 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 12
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this ModelDeploymentMonitoringJob was updated most recently.
google.protobuf.Timestamp update_time = 13 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this ModelDeploymentMonitoringJob was updated
// most recently.
google.protobuf.Timestamp update_time = 13
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this monitoring pipeline will be scheduled to run for the
// next round.
google.protobuf.Timestamp next_schedule_time = 14 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this monitoring pipeline will be scheduled to
// run for the next round.
google.protobuf.Timestamp next_schedule_time = 14
[(google.api.field_behavior) = OUTPUT_ONLY];
// Stats anomalies base folder path.
GcsDestination stats_anomalies_base_directory = 20;
@ -263,17 +277,19 @@ message ModelDeploymentMonitoringObjectiveConfig {
// The config for scheduling monitoring job.
message ModelDeploymentMonitoringScheduleConfig {
// Required. The model monitoring job scheduling interval. It will be rounded up to next
// full hour. This defines how often the monitoring jobs are triggered.
google.protobuf.Duration monitor_interval = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The model monitoring job scheduling interval. It will be rounded
// up to next full hour. This defines how often the monitoring jobs are
// triggered.
google.protobuf.Duration monitor_interval = 1
[(google.api.field_behavior) = REQUIRED];
// The time window of the prediction data being included in each prediction
// dataset. This window specifies how long the data should be collected from
// historical model results for each run. If not set,
// [ModelDeploymentMonitoringScheduleConfig.monitor_interval][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringScheduleConfig.monitor_interval] will be used.
// e.g. If currently the cutoff time is 2022-01-08 14:30:00 and the
// monitor_window is set to be 3600, then data from 2022-01-08 13:30:00
// to 2022-01-08 14:30:00 will be retrieved and aggregated to calculate the
// [ModelDeploymentMonitoringScheduleConfig.monitor_interval][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringScheduleConfig.monitor_interval]
// will be used. e.g. If currently the cutoff time is 2022-01-08 14:30:00 and
// the monitor_window is set to be 3600, then data from 2022-01-08 13:30:00 to
// 2022-01-08 14:30:00 will be retrieved and aggregated to calculate the
// monitoring statistics.
google.protobuf.Duration monitor_window = 2;
}

@ -58,8 +58,8 @@ message ModelEvaluation {
string display_name = 10;
// Points to a YAML file stored on Google Cloud Storage describing the
// [metrics][google.cloud.aiplatform.v1beta1.ModelEvaluation.metrics] of this ModelEvaluation. The schema is
// defined as an OpenAPI 3.0.2 [Schema
// [metrics][google.cloud.aiplatform.v1beta1.ModelEvaluation.metrics] of this
// ModelEvaluation. The schema is defined as an OpenAPI 3.0.2 [Schema
// Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
string metrics_schema_uri = 2;
@ -68,12 +68,13 @@ message ModelEvaluation {
google.protobuf.Value metrics = 3;
// Output only. Timestamp when this ModelEvaluation was created.
google.protobuf.Timestamp create_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 4
[(google.api.field_behavior) = OUTPUT_ONLY];
// All possible [dimensions][ModelEvaluationSlice.slice.dimension] of
// ModelEvaluationSlices. The dimensions can be used as the filter of the
// [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices] request, in the form of
// `slice.dimension = <dimension>`.
// [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]
// request, in the form of `slice.dimension = <dimension>`.
repeated string slice_dimensions = 5;
// Aggregated explanation metrics for the Model's prediction output over the
@ -82,8 +83,9 @@ message ModelEvaluation {
//
ModelExplanation model_explanation = 8;
// Describes the values of [ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec] that are used for explaining
// the predicted values on the evaluated data.
// Describes the values of
// [ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec] that are
// used for explaining the predicted values on the evaluated data.
repeated ModelEvaluationExplanationSpec explanation_specs = 9;
// The metadata of the ModelEvaluation.

@ -42,8 +42,10 @@ message ModelEvaluationSlice {
// Output only. The dimension of the slice.
// Well-known dimensions are:
// * `annotationSpec`: This slice is on the test data that has either
// ground truth or prediction with [AnnotationSpec.display_name][google.cloud.aiplatform.v1beta1.AnnotationSpec.display_name]
// equals to [value][google.cloud.aiplatform.v1beta1.ModelEvaluationSlice.Slice.value].
// ground truth or prediction with
// [AnnotationSpec.display_name][google.cloud.aiplatform.v1beta1.AnnotationSpec.display_name]
// equals to
// [value][google.cloud.aiplatform.v1beta1.ModelEvaluationSlice.Slice.value].
string dimension = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The value of the dimension in this slice.
@ -56,16 +58,20 @@ message ModelEvaluationSlice {
// Output only. The slice of the test data that is used to evaluate the Model.
Slice slice = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Points to a YAML file stored on Google Cloud Storage describing the
// [metrics][google.cloud.aiplatform.v1beta1.ModelEvaluationSlice.metrics] of this ModelEvaluationSlice. The
// schema is defined as an OpenAPI 3.0.2 [Schema
// Output only. Points to a YAML file stored on Google Cloud Storage
// describing the
// [metrics][google.cloud.aiplatform.v1beta1.ModelEvaluationSlice.metrics] of
// this ModelEvaluationSlice. The schema is defined as an OpenAPI 3.0.2
// [Schema
// Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
string metrics_schema_uri = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Sliced evaluation metrics of the Model. The schema of the metrics is stored
// in [metrics_schema_uri][google.cloud.aiplatform.v1beta1.ModelEvaluationSlice.metrics_schema_uri]
// Output only. Sliced evaluation metrics of the Model. The schema of the
// metrics is stored in
// [metrics_schema_uri][google.cloud.aiplatform.v1beta1.ModelEvaluationSlice.metrics_schema_uri]
google.protobuf.Value metrics = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this ModelEvaluationSlice was created.
google.protobuf.Timestamp create_time = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 5
[(google.api.field_behavior) = OUTPUT_ONLY];
}

@ -59,8 +59,8 @@ message ModelMonitoringObjectiveConfig {
oneof data_source {
// The resource name of the Dataset used to train this Model.
string dataset = 3 [(google.api.resource_reference) = {
type: "aiplatform.googleapis.com/Dataset"
}];
type: "aiplatform.googleapis.com/Dataset"
}];
// The Google Cloud Storage uri of the unmanaged Dataset used to train
// this Model.
@ -135,8 +135,10 @@ message ModelMonitoringObjectiveConfig {
// The config for integrating with Vertex Explainable AI. Only applicable if
// the Model has explanation_spec populated.
message ExplanationConfig {
// Output from [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob] for Model Monitoring baseline dataset,
// which can be used to generate baseline attribution scores.
// Output from
// [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]
// for Model Monitoring baseline dataset, which can be used to generate
// baseline attribution scores.
message ExplanationBaseline {
// The storage format of the predictions generated BatchPrediction job.
enum PredictionFormat {
@ -178,7 +180,8 @@ message ModelMonitoringObjectiveConfig {
TrainingDataset training_dataset = 1;
// The config for skew between training data and prediction data.
TrainingPredictionSkewDetectionConfig training_prediction_skew_detection_config = 2;
TrainingPredictionSkewDetectionConfig
training_prediction_skew_detection_config = 2;
// The config for drift of prediction data.
PredictionDriftDetectionConfig prediction_drift_detection_config = 3;

@ -40,7 +40,8 @@ option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
// A service for managing Vertex AI's machine learning Models.
service ModelService {
option (google.api.default_host) = "aiplatform.googleapis.com";
option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
option (google.api.oauth_scopes) =
"https://www.googleapis.com/auth/cloud-platform";
// Uploads a Model artifact into Vertex AI.
rpc UploadModel(UploadModelRequest) returns (google.longrunning.Operation) {
@ -72,7 +73,8 @@ service ModelService {
}
// Lists versions of the specified model.
rpc ListModelVersions(ListModelVersionsRequest) returns (ListModelVersionsResponse) {
rpc ListModelVersions(ListModelVersionsRequest)
returns (ListModelVersionsResponse) {
option (google.api.http) = {
get: "/v1beta1/{name=projects/*/locations/*/models/*}:listVersions"
};
@ -89,7 +91,8 @@ service ModelService {
}
// Incrementally update the dataset used for an examples model.
rpc UpdateExplanationDataset(UpdateExplanationDatasetRequest) returns (google.longrunning.Operation) {
rpc UpdateExplanationDataset(UpdateExplanationDatasetRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta1/{model=projects/*/locations/*/models/*}:updateExplanationDataset"
body: "*"
@ -103,9 +106,12 @@ service ModelService {
// Deletes a Model.
//
// A model cannot be deleted if any [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] resource has a
// [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] based on the model in its
// [deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] field.
// A model cannot be deleted if any
// [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] resource has a
// [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] based on the
// model in its
// [deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models]
// field.
rpc DeleteModel(DeleteModelRequest) returns (google.longrunning.Operation) {
option (google.api.http) = {
delete: "/v1beta1/{name=projects/*/locations/*/models/*}"
@ -121,8 +127,10 @@ service ModelService {
//
// Model version can only be deleted if there are no [DeployedModels][]
// created from it. Deleting the only version in the Model is not allowed. Use
// [DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel] for deleting the Model instead.
rpc DeleteModelVersion(DeleteModelVersionRequest) returns (google.longrunning.Operation) {
// [DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel] for
// deleting the Model instead.
rpc DeleteModelVersion(DeleteModelVersionRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
delete: "/v1beta1/{name=projects/*/locations/*/models/*}:deleteVersion"
};
@ -144,7 +152,8 @@ service ModelService {
// Exports a trained, exportable Model to a location specified by the
// user. A Model is considered to be exportable if it has at least one
// [supported export format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats].
// [supported export
// format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats].
rpc ExportModel(ExportModelRequest) returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta1/{name=projects/*/locations/*/models/*}:export"
@ -158,7 +167,8 @@ service ModelService {
}
// Imports an externally generated ModelEvaluation.
rpc ImportModelEvaluation(ImportModelEvaluationRequest) returns (ModelEvaluation) {
rpc ImportModelEvaluation(ImportModelEvaluationRequest)
returns (ModelEvaluation) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*/models/*}/evaluations:import"
body: "*"
@ -167,7 +177,8 @@ service ModelService {
}
// Imports a list of externally generated ModelEvaluationSlice.
rpc BatchImportModelEvaluationSlices(BatchImportModelEvaluationSlicesRequest) returns (BatchImportModelEvaluationSlicesResponse) {
rpc BatchImportModelEvaluationSlices(BatchImportModelEvaluationSlicesRequest)
returns (BatchImportModelEvaluationSlicesResponse) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*/models/*/evaluations/*}/slices:batchImport"
body: "*"
@ -184,7 +195,8 @@ service ModelService {
}
// Lists ModelEvaluations in a Model.
rpc ListModelEvaluations(ListModelEvaluationsRequest) returns (ListModelEvaluationsResponse) {
rpc ListModelEvaluations(ListModelEvaluationsRequest)
returns (ListModelEvaluationsResponse) {
option (google.api.http) = {
get: "/v1beta1/{parent=projects/*/locations/*/models/*}/evaluations"
};
@ -192,7 +204,8 @@ service ModelService {
}
// Gets a ModelEvaluationSlice.
rpc GetModelEvaluationSlice(GetModelEvaluationSliceRequest) returns (ModelEvaluationSlice) {
rpc GetModelEvaluationSlice(GetModelEvaluationSliceRequest)
returns (ModelEvaluationSlice) {
option (google.api.http) = {
get: "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/slices/*}"
};
@ -200,7 +213,8 @@ service ModelService {
}
// Lists ModelEvaluationSlices in a ModelEvaluation.
rpc ListModelEvaluationSlices(ListModelEvaluationSlicesRequest) returns (ListModelEvaluationSlicesResponse) {
rpc ListModelEvaluationSlices(ListModelEvaluationSlicesRequest)
returns (ListModelEvaluationSlicesResponse) {
option (google.api.http) = {
get: "/v1beta1/{parent=projects/*/locations/*/models/*/evaluations/*}/slices"
};
@ -208,7 +222,8 @@ service ModelService {
}
}
// Request message for [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel].
// Request message for
// [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel].
message UploadModelRequest {
// Required. The resource name of the Location into which to upload the Model.
// Format: `projects/{project}/locations/{location}`
@ -219,8 +234,8 @@ message UploadModelRequest {
}
];
// Optional. The resource name of the model into which to upload the version. Only
// specify this field when uploading a new version.
// Optional. The resource name of the model into which to upload the version.
// Only specify this field when uploading a new version.
string parent_model = 4 [(google.api.field_behavior) = OPTIONAL];
// Optional. The ID to use for the uploaded Model, which will become the final
@ -243,25 +258,30 @@ message UploadModelRequest {
string service_account = 6 [(google.api.field_behavior) = OPTIONAL];
}
// Details of [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] operation.
// Details of
// [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel]
// operation.
message UploadModelOperationMetadata {
// The common part of the operation metadata.
GenericOperationMetadata generic_metadata = 1;
}
// Response message of [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] operation.
// Response message of
// [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel]
// operation.
message UploadModelResponse {
// The name of the uploaded Model resource.
// Format: `projects/{project}/locations/{location}/models/{model}`
string model = 1 [(google.api.resource_reference) = {
type: "aiplatform.googleapis.com/Model"
}];
type: "aiplatform.googleapis.com/Model"
}];
// Output only. The version ID of the model that is uploaded.
string model_version_id = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
}
// Request message for [ModelService.GetModel][google.cloud.aiplatform.v1beta1.ModelService.GetModel].
// Request message for
// [ModelService.GetModel][google.cloud.aiplatform.v1beta1.ModelService.GetModel].
message GetModelRequest {
// Required. The name of the Model resource.
// Format: `projects/{project}/locations/{location}/models/{model}`
@ -283,7 +303,8 @@ message GetModelRequest {
];
}
// Request message for [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels].
// Request message for
// [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels].
message ListModelsRequest {
// Required. The resource name of the Location to list the Models from.
// Format: `projects/{project}/locations/{location}`
@ -298,7 +319,8 @@ message ListModelsRequest {
// both snake_case and camelCase are supported.
//
// * `model` supports = and !=. `model` represents the Model ID,
// i.e. the last segment of the Model's [resource name][google.cloud.aiplatform.v1beta1.Model.name].
// i.e. the last segment of the Model's [resource
// name][google.cloud.aiplatform.v1beta1.Model.name].
// * `display_name` supports = and !=
// * `labels` supports general map functions that is:
// * `labels.key=value` - key:value equality
@ -317,25 +339,31 @@ message ListModelsRequest {
// The standard list page token.
// Typically obtained via
// [ListModelsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListModelsResponse.next_page_token] of the previous
// [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels] call.
// [ListModelsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListModelsResponse.next_page_token]
// of the previous
// [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels]
// call.
string page_token = 4;
// Mask specifying which fields to read.
google.protobuf.FieldMask read_mask = 5;
}
// Response message for [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels]
// Response message for
// [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels]
message ListModelsResponse {
// List of Models in the requested page.
repeated Model models = 1;
// A token to retrieve next page of results.
// Pass to [ListModelsRequest.page_token][google.cloud.aiplatform.v1beta1.ListModelsRequest.page_token] to obtain that page.
// Pass to
// [ListModelsRequest.page_token][google.cloud.aiplatform.v1beta1.ListModelsRequest.page_token]
// to obtain that page.
string next_page_token = 2;
}
// Request message for [ModelService.ListModelVersions][google.cloud.aiplatform.v1beta1.ModelService.ListModelVersions].
// Request message for
// [ModelService.ListModelVersions][google.cloud.aiplatform.v1beta1.ModelService.ListModelVersions].
message ListModelVersionsRequest {
// Required. The name of the model to list versions for.
string name = 1 [
@ -350,8 +378,8 @@ message ListModelVersionsRequest {
// The standard list page token.
// Typically obtained via
// [ListModelVersionsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListModelVersionsResponse.next_page_token] of the previous
// [ModelService.ListModelversions][] call.
// [ListModelVersionsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListModelVersionsResponse.next_page_token]
// of the previous [ModelService.ListModelversions][] call.
string page_token = 3;
// An expression for filtering the results of the request. For field names
@ -369,9 +397,20 @@ message ListModelVersionsRequest {
// Mask specifying which fields to read.
google.protobuf.FieldMask read_mask = 5;
// A comma-separated list of fields to order by, sorted in ascending order.
// Use "desc" after a field name for descending.
// Supported fields:
//
// * `create_time`
// * `update_time`
//
// Example: `update_time asc, create_time desc`.
string order_by = 6;
}
// Response message for [ModelService.ListModelVersions][google.cloud.aiplatform.v1beta1.ModelService.ListModelVersions]
// Response message for
// [ModelService.ListModelVersions][google.cloud.aiplatform.v1beta1.ModelService.ListModelVersions]
message ListModelVersionsResponse {
// List of Model versions in the requested page.
// In the returned Model name field, version ID instead of regvision tag will
@ -379,11 +418,14 @@ message ListModelVersionsResponse {
repeated Model models = 1;
// A token to retrieve the next page of results.
// Pass to [ListModelVersionsRequest.page_token][google.cloud.aiplatform.v1beta1.ListModelVersionsRequest.page_token] to obtain that page.
// Pass to
// [ListModelVersionsRequest.page_token][google.cloud.aiplatform.v1beta1.ListModelVersionsRequest.page_token]
// to obtain that page.
string next_page_token = 2;
}
// Request message for [ModelService.UpdateModel][google.cloud.aiplatform.v1beta1.ModelService.UpdateModel].
// Request message for
// [ModelService.UpdateModel][google.cloud.aiplatform.v1beta1.ModelService.UpdateModel].
message UpdateModelRequest {
// Required. The Model which replaces the resource on the server.
// When Model Versioning is enabled, the model.name will be used to determine
@ -406,8 +448,10 @@ message UpdateModelRequest {
Model model = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The update mask applies to the resource.
// For the `FieldMask` definition, see [google.protobuf.FieldMask][google.protobuf.FieldMask].
google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED];
// For the `FieldMask` definition, see
// [google.protobuf.FieldMask][google.protobuf.FieldMask].
google.protobuf.FieldMask update_mask = 2
[(google.api.field_behavior) = REQUIRED];
}
// Request message for
@ -433,7 +477,8 @@ message UpdateExplanationDatasetOperationMetadata {
GenericOperationMetadata generic_metadata = 1;
}
// Request message for [ModelService.DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel].
// Request message for
// [ModelService.DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel].
message DeleteModelRequest {
// Required. The name of the Model resource to be deleted.
// Format: `projects/{project}/locations/{location}/models/{model}`
@ -445,10 +490,11 @@ message DeleteModelRequest {
];
}
// Request message for [ModelService.DeleteModelVersion][google.cloud.aiplatform.v1beta1.ModelService.DeleteModelVersion].
// Request message for
// [ModelService.DeleteModelVersion][google.cloud.aiplatform.v1beta1.ModelService.DeleteModelVersion].
message DeleteModelVersionRequest {
// Required. The name of the model version to be deleted, with a version ID explicitly
// included.
// Required. The name of the model version to be deleted, with a version ID
// explicitly included.
//
// Example: `projects/{project}/locations/{location}/models/{model}@1234`
string name = 1 [
@ -459,7 +505,8 @@ message DeleteModelVersionRequest {
];
}
// Request message for [ModelService.MergeVersionAliases][google.cloud.aiplatform.v1beta1.ModelService.MergeVersionAliases].
// Request message for
// [ModelService.MergeVersionAliases][google.cloud.aiplatform.v1beta1.ModelService.MergeVersionAliases].
message MergeVersionAliasesRequest {
// Required. The name of the model version to merge aliases, with a version ID
// explicitly included.
@ -487,12 +534,14 @@ message MergeVersionAliasesRequest {
repeated string version_aliases = 2 [(google.api.field_behavior) = REQUIRED];
}
// Request message for [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel].
// Request message for
// [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel].
message ExportModelRequest {
// Output configuration for the Model export.
message OutputConfig {
// The ID of the format in which the Model must be exported. Each Model
// lists the [export formats it supports][google.cloud.aiplatform.v1beta1.Model.supported_export_formats].
// lists the [export formats it
// supports][google.cloud.aiplatform.v1beta1.Model.supported_export_formats].
// If no value is provided here, then the first from the list of the Model's
// supported formats is used by default.
string export_format_id = 1;
@ -528,39 +577,43 @@ message ExportModelRequest {
OutputConfig output_config = 2 [(google.api.field_behavior) = REQUIRED];
}
// Details of [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel] operation.
// Details of
// [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel]
// operation.
message ExportModelOperationMetadata {
// Further describes the output of the ExportModel. Supplements
// [ExportModelRequest.OutputConfig][google.cloud.aiplatform.v1beta1.ExportModelRequest.OutputConfig].
message OutputInfo {
// Output only. If the Model artifact is being exported to Google Cloud Storage this is
// the full path of the directory created, into which the Model files are
// being written to.
// Output only. If the Model artifact is being exported to Google Cloud
// Storage this is the full path of the directory created, into which the
// Model files are being written to.
string artifact_output_uri = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. If the Model image is being exported to Google Container Registry or
// Artifact Registry this is the full path of the image created.
// Output only. If the Model image is being exported to Google Container
// Registry or Artifact Registry this is the full path of the image created.
string image_output_uri = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
}
// The common part of the operation metadata.
GenericOperationMetadata generic_metadata = 1;
// Output only. Information further describing the output of this Model export.
// Output only. Information further describing the output of this Model
// export.
OutputInfo output_info = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
}
// Response message of [ModelService.UpdateExplanationDataset][google.cloud.aiplatform.v1beta1.ModelService.UpdateExplanationDataset] operation.
message UpdateExplanationDatasetResponse {
}
// Response message of [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel] operation.
message ExportModelResponse {
// Response message of
// [ModelService.UpdateExplanationDataset][google.cloud.aiplatform.v1beta1.ModelService.UpdateExplanationDataset]
// operation.
message UpdateExplanationDatasetResponse {}
}
// Response message of
// [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel]
// operation.
message ExportModelResponse {}
// Request message for [ModelService.ImportModelEvaluation][google.cloud.aiplatform.v1beta1.ModelService.ImportModelEvaluation]
// Request message for
// [ModelService.ImportModelEvaluation][google.cloud.aiplatform.v1beta1.ModelService.ImportModelEvaluation]
message ImportModelEvaluationRequest {
// Required. The name of the parent model resource.
// Format: `projects/{project}/locations/{location}/models/{model}`
@ -575,7 +628,8 @@ message ImportModelEvaluationRequest {
ModelEvaluation model_evaluation = 2 [(google.api.field_behavior) = REQUIRED];
}
// Request message for [ModelService.BatchImportModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.BatchImportModelEvaluationSlices]
// Request message for
// [ModelService.BatchImportModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.BatchImportModelEvaluationSlices]
message BatchImportModelEvaluationSlicesRequest {
// Required. The name of the parent ModelEvaluation resource.
// Format:
@ -588,16 +642,21 @@ message BatchImportModelEvaluationSlicesRequest {
];
// Required. Model evaluation slice resource to be imported.
repeated ModelEvaluationSlice model_evaluation_slices = 2 [(google.api.field_behavior) = REQUIRED];
repeated ModelEvaluationSlice model_evaluation_slices = 2
[(google.api.field_behavior) = REQUIRED];
}
// Response message for [ModelService.BatchImportModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.BatchImportModelEvaluationSlices]
// Response message for
// [ModelService.BatchImportModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.BatchImportModelEvaluationSlices]
message BatchImportModelEvaluationSlicesResponse {
// Output only. List of imported [ModelEvaluationSlice.name][google.cloud.aiplatform.v1beta1.ModelEvaluationSlice.name].
repeated string imported_model_evaluation_slices = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. List of imported
// [ModelEvaluationSlice.name][google.cloud.aiplatform.v1beta1.ModelEvaluationSlice.name].
repeated string imported_model_evaluation_slices = 1
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// Request message for [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluation].
// Request message for
// [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluation].
message GetModelEvaluationRequest {
// Required. The name of the ModelEvaluation resource.
// Format:
@ -610,7 +669,8 @@ message GetModelEvaluationRequest {
];
}
// Request message for [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations].
// Request message for
// [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations].
message ListModelEvaluationsRequest {
// Required. The resource name of the Model to list the ModelEvaluations from.
// Format: `projects/{project}/locations/{location}/models/{model}`
@ -629,25 +689,31 @@ message ListModelEvaluationsRequest {
// The standard list page token.
// Typically obtained via
// [ListModelEvaluationsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationsResponse.next_page_token] of the previous
// [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations] call.
// [ListModelEvaluationsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationsResponse.next_page_token]
// of the previous
// [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]
// call.
string page_token = 4;
// Mask specifying which fields to read.
google.protobuf.FieldMask read_mask = 5;
}
// Response message for [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations].
// Response message for
// [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations].
message ListModelEvaluationsResponse {
// List of ModelEvaluations in the requested page.
repeated ModelEvaluation model_evaluations = 1;
// A token to retrieve next page of results.
// Pass to [ListModelEvaluationsRequest.page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationsRequest.page_token] to obtain that page.
// Pass to
// [ListModelEvaluationsRequest.page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationsRequest.page_token]
// to obtain that page.
string next_page_token = 2;
}
// Request message for [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluationSlice].
// Request message for
// [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluationSlice].
message GetModelEvaluationSliceRequest {
// Required. The name of the ModelEvaluationSlice resource.
// Format:
@ -660,10 +726,11 @@ message GetModelEvaluationSliceRequest {
];
}
// Request message for [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices].
// Request message for
// [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices].
message ListModelEvaluationSlicesRequest {
// Required. The resource name of the ModelEvaluation to list the ModelEvaluationSlices
// from. Format:
// Required. The resource name of the ModelEvaluation to list the
// ModelEvaluationSlices from. Format:
// `projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
@ -682,21 +749,25 @@ message ListModelEvaluationSlicesRequest {
// The standard list page token.
// Typically obtained via
// [ListModelEvaluationSlicesResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationSlicesResponse.next_page_token] of the previous
// [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices] call.
// [ListModelEvaluationSlicesResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationSlicesResponse.next_page_token]
// of the previous
// [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]
// call.
string page_token = 4;
// Mask specifying which fields to read.
google.protobuf.FieldMask read_mask = 5;
}
// Response message for [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices].
// Response message for
// [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices].
message ListModelEvaluationSlicesResponse {
// List of ModelEvaluations in the requested page.
repeated ModelEvaluationSlice model_evaluation_slices = 1;
// A token to retrieve next page of results.
// Pass to [ListModelEvaluationSlicesRequest.page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationSlicesRequest.page_token] to obtain that
// page.
// Pass to
// [ListModelEvaluationSlicesRequest.page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationSlicesRequest.page_token]
// to obtain that page.
string next_page_token = 2;
}

@ -34,15 +34,18 @@ message GenericOperationMetadata {
// E.g. single files that couldn't be read.
// This field should never exceed 20 entries.
// Status details field will contain standard Google Cloud error details.
repeated google.rpc.Status partial_failures = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
repeated google.rpc.Status partial_failures = 1
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Time when the operation was created.
google.protobuf.Timestamp create_time = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 2
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Time when the operation was updated for the last time.
// If the operation has finished (successfully or not), this is the finish
// time.
google.protobuf.Timestamp update_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp update_time = 3
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// Details of operations that perform deletes of any entities.

@ -62,16 +62,18 @@ message PipelineJob {
}
}
// Deprecated. Use [RuntimeConfig.parameter_values][google.cloud.aiplatform.v1beta1.PipelineJob.RuntimeConfig.parameter_values] instead. The runtime
// parameters of the PipelineJob. The parameters will be passed into
// [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec] to replace the placeholders at runtime.
// This field is used by pipelines built using
// `PipelineJob.pipeline_spec.schema_version` 2.0.0 or lower, such as
// pipelines built using Kubeflow Pipelines SDK 1.8 or lower.
// Deprecated. Use
// [RuntimeConfig.parameter_values][google.cloud.aiplatform.v1beta1.PipelineJob.RuntimeConfig.parameter_values]
// instead. The runtime parameters of the PipelineJob. The parameters will
// be passed into
// [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec]
// to replace the placeholders at runtime. This field is used by pipelines
// built using `PipelineJob.pipeline_spec.schema_version` 2.0.0 or lower,
// such as pipelines built using Kubeflow Pipelines SDK 1.8 or lower.
map<string, Value> parameters = 1 [deprecated = true];
// Required. A path in a Cloud Storage bucket, which will be treated as the root
// output directory of the pipeline. It is used by the system to
// Required. A path in a Cloud Storage bucket, which will be treated as the
// root output directory of the pipeline. It is used by the system to
// generate the paths of output artifacts. The artifact paths are generated
// with a sub-path pattern `{job_id}/{task_id}/{output_key}` under the
// specified output directory. The service account specified in this
@ -80,10 +82,12 @@ message PipelineJob {
string gcs_output_directory = 2 [(google.api.field_behavior) = REQUIRED];
// The runtime parameters of the PipelineJob. The parameters will be
// passed into [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec] to replace the placeholders
// at runtime. This field is used by pipelines built using
// `PipelineJob.pipeline_spec.schema_version` 2.1.0, such as pipelines built
// using Kubeflow Pipelines SDK 1.9 or higher and the v2 DSL.
// passed into
// [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec]
// to replace the placeholders at runtime. This field is used by pipelines
// built using `PipelineJob.pipeline_spec.schema_version` 2.1.0, such as
// pipelines built using Kubeflow Pipelines SDK 1.9 or higher and the v2
// DSL.
map<string, google.protobuf.Value> parameter_values = 3;
// Represents the failure policy of a pipeline. Currently, the default of a
@ -108,16 +112,20 @@ message PipelineJob {
string display_name = 2;
// Output only. Pipeline creation time.
google.protobuf.Timestamp create_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 3
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Pipeline start time.
google.protobuf.Timestamp start_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp start_time = 4
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Pipeline end time.
google.protobuf.Timestamp end_time = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp end_time = 5
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this PipelineJob was most recently updated.
google.protobuf.Timestamp update_time = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp update_time = 6
[(google.api.field_behavior) = OUTPUT_ONLY];
// The spec of the pipeline.
google.protobuf.Struct pipeline_spec = 7;
@ -172,27 +180,32 @@ message PipelineJob {
// resources being launched, if applied, such as Vertex AI
// Training or Dataflow job. If left unspecified, the workload is not peered
// with any network.
string network = 18 [(google.api.resource_reference) = {
type: "compute.googleapis.com/Network"
}];
string network = 18 [
(google.api.resource_reference) = { type: "compute.googleapis.com/Network" }
];
// A template uri from where the [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec], if empty, will
// be downloaded.
// A template uri from where the
// [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec],
// if empty, will be downloaded.
string template_uri = 19;
// Output only. Pipeline template metadata. Will fill up fields if
// [PipelineJob.template_uri][google.cloud.aiplatform.v1beta1.PipelineJob.template_uri] is from supported template registry.
PipelineTemplateMetadata template_metadata = 20 [(google.api.field_behavior) = OUTPUT_ONLY];
// [PipelineJob.template_uri][google.cloud.aiplatform.v1beta1.PipelineJob.template_uri]
// is from supported template registry.
PipelineTemplateMetadata template_metadata = 20
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// Pipeline template metadata if [PipelineJob.template_uri][google.cloud.aiplatform.v1beta1.PipelineJob.template_uri] is from supported
// template registry. Currently, the only supported registry is Artifact
// Registry.
// Pipeline template metadata if
// [PipelineJob.template_uri][google.cloud.aiplatform.v1beta1.PipelineJob.template_uri]
// is from supported template registry. Currently, the only supported registry
// is Artifact Registry.
message PipelineTemplateMetadata {
// The version_name in artifact registry.
//
// Will always be presented in output if the [PipelineJob.template_uri][google.cloud.aiplatform.v1beta1.PipelineJob.template_uri] is
// from supported template registry.
// Will always be presented in output if the
// [PipelineJob.template_uri][google.cloud.aiplatform.v1beta1.PipelineJob.template_uri]
// is from supported template registry.
//
// Format is "sha256:abcdef123456...".
string version = 3;
@ -207,7 +220,8 @@ message PipelineJobDetail {
Context pipeline_run_context = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The runtime details of the tasks under the pipeline.
repeated PipelineTaskDetail task_details = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
repeated PipelineTaskDetail task_details = 3
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// The runtime detail of a task execution.
@ -215,17 +229,17 @@ message PipelineTaskDetail {
// A single record of the task status.
message PipelineTaskStatus {
// Output only. Update time of this status.
google.protobuf.Timestamp update_time = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp update_time = 1
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The state of the task.
State state = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The error that occurred during the state. May be set when the state is
// any of the non-final state (PENDING/RUNNING/CANCELLING) or FAILED state.
// If the state is FAILED, the error here is final and not going to be
// retried.
// If the state is a non-final state, the error indicates a system-error
// being retried.
// Output only. The error that occurred during the state. May be set when
// the state is any of the non-final state (PENDING/RUNNING/CANCELLING) or
// FAILED state. If the state is FAILED, the error here is final and not
// going to be retried. If the state is a non-final state, the error
// indicates a system-error being retried.
google.rpc.Status error = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
}
@ -266,15 +280,16 @@ message PipelineTaskDetail {
// Specifies that the task was not triggered because the task's trigger
// policy is not satisfied. The trigger policy is specified in the
// `condition` field of [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec].
// `condition` field of
// [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec].
NOT_TRIGGERED = 9;
}
// Output only. The system generated ID of the task.
int64 task_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The id of the parent task if the task is within a component scope.
// Empty if the task is at the root level.
// Output only. The id of the parent task if the task is within a component
// scope. Empty if the task is at the root level.
int64 parent_task_id = 12 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The user specified name of the task that is defined in
@ -282,16 +297,20 @@ message PipelineTaskDetail {
string task_name = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Task create time.
google.protobuf.Timestamp create_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 3
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Task start time.
google.protobuf.Timestamp start_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp start_time = 4
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Task end time.
google.protobuf.Timestamp end_time = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp end_time = 5
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The detailed execution info.
PipelineTaskExecutorDetail executor_detail = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
PipelineTaskExecutorDetail executor_detail = 6
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. State of the task.
State state = 7 [(google.api.field_behavior) = OUTPUT_ONLY];
@ -303,15 +322,18 @@ message PipelineTaskDetail {
// Only populated when the task's state is FAILED or CANCELLED.
google.rpc.Status error = 9 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. A list of task status. This field keeps a record of task status evolving
// over time.
repeated PipelineTaskStatus pipeline_task_status = 13 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. A list of task status. This field keeps a record of task
// status evolving over time.
repeated PipelineTaskStatus pipeline_task_status = 13
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The runtime input artifacts of the task.
map<string, ArtifactList> inputs = 10 [(google.api.field_behavior) = OUTPUT_ONLY];
map<string, ArtifactList> inputs = 10
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The runtime output artifacts of the task.
map<string, ArtifactList> outputs = 11 [(google.api.field_behavior) = OUTPUT_ONLY];
map<string, ArtifactList> outputs = 11
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// The runtime detail of a pipeline executor.
@ -319,7 +341,9 @@ message PipelineTaskExecutorDetail {
// The detail of a container execution. It contains the job names of the
// lifecycle of a container execution.
message ContainerDetail {
// Output only. The name of the [CustomJob][google.cloud.aiplatform.v1beta1.CustomJob] for the main container execution.
// Output only. The name of the
// [CustomJob][google.cloud.aiplatform.v1beta1.CustomJob] for the main
// container execution.
string main_job = 1 [
(google.api.field_behavior) = OUTPUT_ONLY,
(google.api.resource_reference) = {
@ -327,10 +351,11 @@ message PipelineTaskExecutorDetail {
}
];
// Output only. The name of the [CustomJob][google.cloud.aiplatform.v1beta1.CustomJob] for the pre-caching-check container
// execution. This job will be available if the
// [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec] specifies the `pre_caching_check` hook in
// the lifecycle events.
// Output only. The name of the
// [CustomJob][google.cloud.aiplatform.v1beta1.CustomJob] for the
// pre-caching-check container execution. This job will be available if the
// [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec]
// specifies the `pre_caching_check` hook in the lifecycle events.
string pre_caching_check_job = 2 [
(google.api.field_behavior) = OUTPUT_ONLY,
(google.api.resource_reference) = {
@ -338,23 +363,29 @@ message PipelineTaskExecutorDetail {
}
];
// Output only. The names of the previously failed [CustomJob][google.cloud.aiplatform.v1beta1.CustomJob] for the main container
// executions. The list includes the all attempts in chronological order.
repeated string failed_main_jobs = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The names of the previously failed
// [CustomJob][google.cloud.aiplatform.v1beta1.CustomJob] for the main
// container executions. The list includes the all attempts in chronological
// order.
repeated string failed_main_jobs = 3
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The names of the previously failed [CustomJob][google.cloud.aiplatform.v1beta1.CustomJob] for the
// Output only. The names of the previously failed
// [CustomJob][google.cloud.aiplatform.v1beta1.CustomJob] for the
// pre-caching-check container executions. This job will be available if the
// [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec] specifies the `pre_caching_check` hook in
// the lifecycle events.
// The list includes the all attempts in chronological order.
repeated string failed_pre_caching_check_jobs = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
// [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec]
// specifies the `pre_caching_check` hook in the lifecycle events. The list
// includes the all attempts in chronological order.
repeated string failed_pre_caching_check_jobs = 4
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// The detailed info for a custom job executor.
message CustomJobDetail {
option deprecated = true;
// Output only. The name of the [CustomJob][google.cloud.aiplatform.v1beta1.CustomJob].
// Output only. The name of the
// [CustomJob][google.cloud.aiplatform.v1beta1.CustomJob].
string job = 1 [
(google.api.field_behavior) = OUTPUT_ONLY,
(google.api.resource_reference) = {
@ -365,12 +396,11 @@ message PipelineTaskExecutorDetail {
oneof details {
// Output only. The detailed info for a container executor.
ContainerDetail container_detail = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
ContainerDetail container_detail = 1
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The detailed info for a custom job executor.
CustomJobDetail custom_job_detail = 2 [
deprecated = true,
(google.api.field_behavior) = OUTPUT_ONLY
];
CustomJobDetail custom_job_detail = 2
[deprecated = true, (google.api.field_behavior) = OUTPUT_ONLY];
}
}

@ -39,11 +39,13 @@ option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
// `PipelineJob` resources (used for Vertex AI Pipelines).
service PipelineService {
option (google.api.default_host) = "aiplatform.googleapis.com";
option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
option (google.api.oauth_scopes) =
"https://www.googleapis.com/auth/cloud-platform";
// Creates a TrainingPipeline. A created TrainingPipeline right away will be
// attempted to be run.
rpc CreateTrainingPipeline(CreateTrainingPipelineRequest) returns (TrainingPipeline) {
rpc CreateTrainingPipeline(CreateTrainingPipelineRequest)
returns (TrainingPipeline) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*}/trainingPipelines"
body: "training_pipeline"
@ -52,7 +54,8 @@ service PipelineService {
}
// Gets a TrainingPipeline.
rpc GetTrainingPipeline(GetTrainingPipelineRequest) returns (TrainingPipeline) {
rpc GetTrainingPipeline(GetTrainingPipelineRequest)
returns (TrainingPipeline) {
option (google.api.http) = {
get: "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*}"
};
@ -60,7 +63,8 @@ service PipelineService {
}
// Lists TrainingPipelines in a Location.
rpc ListTrainingPipelines(ListTrainingPipelinesRequest) returns (ListTrainingPipelinesResponse) {
rpc ListTrainingPipelines(ListTrainingPipelinesRequest)
returns (ListTrainingPipelinesResponse) {
option (google.api.http) = {
get: "/v1beta1/{parent=projects/*/locations/*}/trainingPipelines"
};
@ -68,7 +72,8 @@ service PipelineService {
}
// Deletes a TrainingPipeline.
rpc DeleteTrainingPipeline(DeleteTrainingPipelineRequest) returns (google.longrunning.Operation) {
rpc DeleteTrainingPipeline(DeleteTrainingPipelineRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
delete: "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*}"
};
@ -82,14 +87,19 @@ service PipelineService {
// Cancels a TrainingPipeline.
// Starts asynchronous cancellation on the TrainingPipeline. The server
// makes a best effort to cancel the pipeline, but success is not
// guaranteed. Clients can use [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline] or
// other methods to check whether the cancellation succeeded or whether the
// guaranteed. Clients can use
// [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline]
// or other methods to check whether the cancellation succeeded or whether the
// pipeline completed despite cancellation. On successful cancellation,
// the TrainingPipeline is not deleted; instead it becomes a pipeline with
// a [TrainingPipeline.error][google.cloud.aiplatform.v1beta1.TrainingPipeline.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
// corresponding to `Code.CANCELLED`, and [TrainingPipeline.state][google.cloud.aiplatform.v1beta1.TrainingPipeline.state] is set to
// `CANCELLED`.
rpc CancelTrainingPipeline(CancelTrainingPipelineRequest) returns (google.protobuf.Empty) {
// a
// [TrainingPipeline.error][google.cloud.aiplatform.v1beta1.TrainingPipeline.error]
// value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
// corresponding to `Code.CANCELLED`, and
// [TrainingPipeline.state][google.cloud.aiplatform.v1beta1.TrainingPipeline.state]
// is set to `CANCELLED`.
rpc CancelTrainingPipeline(CancelTrainingPipelineRequest)
returns (google.protobuf.Empty) {
option (google.api.http) = {
post: "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*}:cancel"
body: "*"
@ -103,7 +113,8 @@ service PipelineService {
post: "/v1beta1/{parent=projects/*/locations/*}/pipelineJobs"
body: "pipeline_job"
};
option (google.api.method_signature) = "parent,pipeline_job,pipeline_job_id";
option (google.api.method_signature) =
"parent,pipeline_job,pipeline_job_id";
}
// Gets a PipelineJob.
@ -115,7 +126,8 @@ service PipelineService {
}
// Lists PipelineJobs in a Location.
rpc ListPipelineJobs(ListPipelineJobsRequest) returns (ListPipelineJobsResponse) {
rpc ListPipelineJobs(ListPipelineJobsRequest)
returns (ListPipelineJobsResponse) {
option (google.api.http) = {
get: "/v1beta1/{parent=projects/*/locations/*}/pipelineJobs"
};
@ -123,7 +135,8 @@ service PipelineService {
}
// Deletes a PipelineJob.
rpc DeletePipelineJob(DeletePipelineJobRequest) returns (google.longrunning.Operation) {
rpc DeletePipelineJob(DeletePipelineJobRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
delete: "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*}"
};
@ -137,14 +150,18 @@ service PipelineService {
// Cancels a PipelineJob.
// Starts asynchronous cancellation on the PipelineJob. The server
// makes a best effort to cancel the pipeline, but success is not
// guaranteed. Clients can use [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob] or
// other methods to check whether the cancellation succeeded or whether the
// guaranteed. Clients can use
// [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob]
// or other methods to check whether the cancellation succeeded or whether the
// pipeline completed despite cancellation. On successful cancellation,
// the PipelineJob is not deleted; instead it becomes a pipeline with
// a [PipelineJob.error][google.cloud.aiplatform.v1beta1.PipelineJob.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
// corresponding to `Code.CANCELLED`, and [PipelineJob.state][google.cloud.aiplatform.v1beta1.PipelineJob.state] is set to
// `CANCELLED`.
rpc CancelPipelineJob(CancelPipelineJobRequest) returns (google.protobuf.Empty) {
// a [PipelineJob.error][google.cloud.aiplatform.v1beta1.PipelineJob.error]
// value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
// corresponding to `Code.CANCELLED`, and
// [PipelineJob.state][google.cloud.aiplatform.v1beta1.PipelineJob.state] is
// set to `CANCELLED`.
rpc CancelPipelineJob(CancelPipelineJobRequest)
returns (google.protobuf.Empty) {
option (google.api.http) = {
post: "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*}:cancel"
body: "*"
@ -153,10 +170,11 @@ service PipelineService {
}
}
// Request message for [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CreateTrainingPipeline].
// Request message for
// [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CreateTrainingPipeline].
message CreateTrainingPipelineRequest {
// Required. The resource name of the Location to create the TrainingPipeline in.
// Format: `projects/{project}/locations/{location}`
// Required. The resource name of the Location to create the TrainingPipeline
// in. Format: `projects/{project}/locations/{location}`
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
@ -165,10 +183,12 @@ message CreateTrainingPipelineRequest {
];
// Required. The TrainingPipeline to create.
TrainingPipeline training_pipeline = 2 [(google.api.field_behavior) = REQUIRED];
TrainingPipeline training_pipeline = 2
[(google.api.field_behavior) = REQUIRED];
}
// Request message for [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline].
// Request message for
// [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline].
message GetTrainingPipelineRequest {
// Required. The name of the TrainingPipeline resource.
// Format:
@ -181,10 +201,11 @@ message GetTrainingPipelineRequest {
];
}
// Request message for [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines].
// Request message for
// [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines].
message ListTrainingPipelinesRequest {
// Required. The resource name of the Location to list the TrainingPipelines from.
// Format: `projects/{project}/locations/{location}`
// Required. The resource name of the Location to list the TrainingPipelines
// from. Format: `projects/{project}/locations/{location}`
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
@ -219,25 +240,31 @@ message ListTrainingPipelinesRequest {
// The standard list page token.
// Typically obtained via
// [ListTrainingPipelinesResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListTrainingPipelinesResponse.next_page_token] of the previous
// [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines] call.
// [ListTrainingPipelinesResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListTrainingPipelinesResponse.next_page_token]
// of the previous
// [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines]
// call.
string page_token = 4;
// Mask specifying which fields to read.
google.protobuf.FieldMask read_mask = 5;
}
// Response message for [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines]
// Response message for
// [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines]
message ListTrainingPipelinesResponse {
// List of TrainingPipelines in the requested page.
repeated TrainingPipeline training_pipelines = 1;
// A token to retrieve the next page of results.
// Pass to [ListTrainingPipelinesRequest.page_token][google.cloud.aiplatform.v1beta1.ListTrainingPipelinesRequest.page_token] to obtain that page.
// Pass to
// [ListTrainingPipelinesRequest.page_token][google.cloud.aiplatform.v1beta1.ListTrainingPipelinesRequest.page_token]
// to obtain that page.
string next_page_token = 2;
}
// Request message for [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.DeleteTrainingPipeline].
// Request message for
// [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.DeleteTrainingPipeline].
message DeleteTrainingPipelineRequest {
// Required. The name of the TrainingPipeline resource to be deleted.
// Format:
@ -250,7 +277,8 @@ message DeleteTrainingPipelineRequest {
];
}
// Request message for [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CancelTrainingPipeline].
// Request message for
// [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CancelTrainingPipeline].
message CancelTrainingPipelineRequest {
// Required. The name of the TrainingPipeline to cancel.
// Format:
@ -263,7 +291,8 @@ message CancelTrainingPipelineRequest {
];
}
// Request message for [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CreatePipelineJob].
// Request message for
// [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CreatePipelineJob].
message CreatePipelineJobRequest {
// Required. The resource name of the Location to create the PipelineJob in.
// Format: `projects/{project}/locations/{location}`
@ -286,7 +315,8 @@ message CreatePipelineJobRequest {
string pipeline_job_id = 3;
}
// Request message for [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob].
// Request message for
// [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob].
message GetPipelineJobRequest {
// Required. The name of the PipelineJob resource.
// Format:
@ -299,7 +329,8 @@ message GetPipelineJobRequest {
];
}
// Request message for [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs].
// Request message for
// [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs].
message ListPipelineJobsRequest {
// Required. The resource name of the Location to list the PipelineJobs from.
// Format: `projects/{project}/locations/{location}`
@ -351,8 +382,10 @@ message ListPipelineJobsRequest {
// The standard list page token.
// Typically obtained via
// [ListPipelineJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListPipelineJobsResponse.next_page_token] of the previous
// [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs] call.
// [ListPipelineJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListPipelineJobsResponse.next_page_token]
// of the previous
// [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs]
// call.
string page_token = 4;
// A comma-separated list of fields to order by. The default sort order is in
@ -374,17 +407,21 @@ message ListPipelineJobsRequest {
google.protobuf.FieldMask read_mask = 7;
}
// Response message for [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs]
// Response message for
// [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs]
message ListPipelineJobsResponse {
// List of PipelineJobs in the requested page.
repeated PipelineJob pipeline_jobs = 1;
// A token to retrieve the next page of results.
// Pass to [ListPipelineJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListPipelineJobsRequest.page_token] to obtain that page.
// Pass to
// [ListPipelineJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListPipelineJobsRequest.page_token]
// to obtain that page.
string next_page_token = 2;
}
// Request message for [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.DeletePipelineJob].
// Request message for
// [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.DeletePipelineJob].
message DeletePipelineJobRequest {
// Required. The name of the PipelineJob resource to be deleted.
// Format:
@ -397,7 +434,8 @@ message DeletePipelineJobRequest {
];
}
// Request message for [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CancelPipelineJob].
// Request message for
// [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CancelPipelineJob].
message CancelPipelineJobRequest {
// Required. The name of the PipelineJob to cancel.
// Format:

@ -35,7 +35,8 @@ option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
// A service for online predictions and explanations.
service PredictionService {
option (google.api.default_host) = "aiplatform.googleapis.com";
option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
option (google.api.oauth_scopes) =
"https://www.googleapis.com/auth/cloud-platform";
// Perform an online prediction.
rpc Predict(PredictRequest) returns (PredictResponse) {
@ -50,11 +51,13 @@ service PredictionService {
//
// The response includes the following HTTP headers:
//
// * `X-Vertex-AI-Endpoint-Id`: ID of the [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that served this
// * `X-Vertex-AI-Endpoint-Id`: ID of the
// [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that served this
// prediction.
//
// * `X-Vertex-AI-Deployed-Model-Id`: ID of the Endpoint's [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel]
// that served this prediction.
// * `X-Vertex-AI-Deployed-Model-Id`: ID of the Endpoint's
// [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] that served
// this prediction.
rpc RawPredict(RawPredictRequest) returns (google.api.HttpBody) {
option (google.api.http) = {
post: "/v1beta1/{endpoint=projects/*/locations/*/endpoints/*}:rawPredict"
@ -65,10 +68,12 @@ service PredictionService {
// Perform an online explanation.
//
// If [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] is specified,
// the corresponding DeployModel must have
// If
// [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id]
// is specified, the corresponding DeployModel must have
// [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec]
// populated. If [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id]
// populated. If
// [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id]
// is not specified, all DeployedModels must have
// [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec]
// populated. Only deployed AutoML tabular Models have
@ -78,11 +83,13 @@ service PredictionService {
post: "/v1beta1/{endpoint=projects/*/locations/*/endpoints/*}:explain"
body: "*"
};
option (google.api.method_signature) = "endpoint,instances,parameters,deployed_model_id";
option (google.api.method_signature) =
"endpoint,instances,parameters,deployed_model_id";
}
}
// Request message for [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict].
// Request message for
// [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict].
message PredictRequest {
// Required. The name of the Endpoint requested to serve the prediction.
// Format:
@ -100,23 +107,28 @@ message PredictRequest {
// in case of AutoML Models, or, in case of customer created Models, the
// behaviour is as documented by that Model.
// The schema of any single instance may be specified via Endpoint's
// DeployedModels' [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model]
// DeployedModels'
// [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model]
// [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata]
// [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri].
repeated google.protobuf.Value instances = 2 [(google.api.field_behavior) = REQUIRED];
repeated google.protobuf.Value instances = 2
[(google.api.field_behavior) = REQUIRED];
// The parameters that govern the prediction. The schema of the parameters may
// be specified via Endpoint's DeployedModels' [Model's ][google.cloud.aiplatform.v1beta1.DeployedModel.model]
// be specified via Endpoint's DeployedModels' [Model's
// ][google.cloud.aiplatform.v1beta1.DeployedModel.model]
// [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata]
// [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri].
google.protobuf.Value parameters = 3;
}
// Response message for [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict].
// Response message for
// [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict].
message PredictResponse {
// The predictions that are the output of the predictions call.
// The schema of any single prediction may be specified via Endpoint's
// DeployedModels' [Model's ][google.cloud.aiplatform.v1beta1.DeployedModel.model]
// DeployedModels' [Model's
// ][google.cloud.aiplatform.v1beta1.DeployedModel.model]
// [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata]
// [prediction_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.prediction_schema_uri].
repeated google.protobuf.Value predictions = 1;
@ -124,8 +136,8 @@ message PredictResponse {
// ID of the Endpoint's DeployedModel that served this prediction.
string deployed_model_id = 2;
// Output only. The resource name of the Model which is deployed as the DeployedModel that
// this prediction hits.
// Output only. The resource name of the Model which is deployed as the
// DeployedModel that this prediction hits.
string model = 3 [
(google.api.field_behavior) = OUTPUT_ONLY,
(google.api.resource_reference) = {
@ -133,16 +145,18 @@ message PredictResponse {
}
];
// Output only. The version ID of the Model which is deployed as the DeployedModel that
// this prediction hits.
// Output only. The version ID of the Model which is deployed as the
// DeployedModel that this prediction hits.
string model_version_id = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The [display name][google.cloud.aiplatform.v1beta1.Model.display_name] of the Model which is deployed as
// the DeployedModel that this prediction hits.
// Output only. The [display
// name][google.cloud.aiplatform.v1beta1.Model.display_name] of the Model
// which is deployed as the DeployedModel that this prediction hits.
string model_display_name = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
}
// Request message for [PredictionService.RawPredict][google.cloud.aiplatform.v1beta1.PredictionService.RawPredict].
// Request message for
// [PredictionService.RawPredict][google.cloud.aiplatform.v1beta1.PredictionService.RawPredict].
message RawPredictRequest {
// Required. The name of the Endpoint requested to serve the prediction.
// Format:
@ -156,21 +170,24 @@ message RawPredictRequest {
// The prediction input. Supports HTTP headers and arbitrary data payload.
//
// A [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] may have an upper limit on the number of instances it
// supports per request. When this limit it is exceeded for an AutoML model,
// the [RawPredict][google.cloud.aiplatform.v1beta1.PredictionService.RawPredict] method returns an error.
// When this limit is exceeded for a custom-trained model, the behavior varies
// depending on the model.
// A [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] may have
// an upper limit on the number of instances it supports per request. When
// this limit it is exceeded for an AutoML model, the
// [RawPredict][google.cloud.aiplatform.v1beta1.PredictionService.RawPredict]
// method returns an error. When this limit is exceeded for a custom-trained
// model, the behavior varies depending on the model.
//
// You can specify the schema for each instance in the
// [predict_schemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]
// field when you create a [Model][google.cloud.aiplatform.v1beta1.Model]. This schema applies when you deploy the
// `Model` as a `DeployedModel` to an [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] and use the `RawPredict`
// method.
// field when you create a [Model][google.cloud.aiplatform.v1beta1.Model].
// This schema applies when you deploy the `Model` as a `DeployedModel` to an
// [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] and use the
// `RawPredict` method.
google.api.HttpBody http_body = 2;
}
// Request message for [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain].
// Request message for
// [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain].
message ExplainRequest {
// Required. The name of the Endpoint requested to serve the explanation.
// Format:
@ -188,21 +205,24 @@ message ExplainRequest {
// in case of AutoML Models, or, in case of customer created Models, the
// behaviour is as documented by that Model.
// The schema of any single instance may be specified via Endpoint's
// DeployedModels' [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model]
// DeployedModels'
// [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model]
// [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata]
// [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri].
repeated google.protobuf.Value instances = 2 [(google.api.field_behavior) = REQUIRED];
repeated google.protobuf.Value instances = 2
[(google.api.field_behavior) = REQUIRED];
// The parameters that govern the prediction. The schema of the parameters may
// be specified via Endpoint's DeployedModels' [Model's ][google.cloud.aiplatform.v1beta1.DeployedModel.model]
// be specified via Endpoint's DeployedModels' [Model's
// ][google.cloud.aiplatform.v1beta1.DeployedModel.model]
// [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata]
// [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri].
google.protobuf.Value parameters = 4;
// If specified, overrides the
// [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] of the DeployedModel.
// Can be used for explaining prediction results with different
// configurations, such as:
// [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec]
// of the DeployedModel. Can be used for explaining prediction results with
// different configurations, such as:
// - Explaining top-5 predictions results as opposed to top-1;
// - Increasing path count or step count of the attribution methods to reduce
// approximate errors;
@ -210,22 +230,27 @@ message ExplainRequest {
ExplanationSpecOverride explanation_spec_override = 5;
// If specified, this ExplainRequest will be served by the chosen
// DeployedModel, overriding [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split].
// DeployedModel, overriding
// [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split].
string deployed_model_id = 3;
}
// Response message for [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain].
// Response message for
// [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain].
message ExplainResponse {
// The explanations of the Model's [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions].
// The explanations of the Model's
// [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions].
//
// It has the same number of elements as [instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances]
// to be explained.
// It has the same number of elements as
// [instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances] to be
// explained.
repeated Explanation explanations = 1;
// ID of the Endpoint's DeployedModel that served this explanation.
string deployed_model_id = 2;
// The predictions that are the output of the predictions call.
// Same as [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions].
// Same as
// [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions].
repeated google.protobuf.Value predictions = 3;
}

@ -49,10 +49,12 @@ message SavedQuery {
google.protobuf.Value metadata = 12;
// Output only. Timestamp when this SavedQuery was created.
google.protobuf.Timestamp create_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 3
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when SavedQuery was last updated.
google.protobuf.Timestamp update_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp update_time = 4
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Filters on the Annotations in the dataset.
string annotation_filter = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
@ -79,7 +81,7 @@ message SavedQuery {
// "overwrite" update happens.
string etag = 8;
// Output only. If the Annotations belonging to the SavedQuery can be used for AutoML
// training.
// Output only. If the Annotations belonging to the SavedQuery can be used for
// AutoML training.
bool support_automl_training = 9 [(google.api.field_behavior) = OUTPUT_ONLY];
}

@ -49,13 +49,15 @@ message SpecialistPool {
string display_name = 2 [(google.api.field_behavior) = REQUIRED];
// Output only. The number of managers in this SpecialistPool.
int32 specialist_managers_count = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
int32 specialist_managers_count = 3
[(google.api.field_behavior) = OUTPUT_ONLY];
// The email addresses of the managers in the SpecialistPool.
repeated string specialist_manager_emails = 4;
// Output only. The resource name of the pending data labeling jobs.
repeated string pending_data_labeling_jobs = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
repeated string pending_data_labeling_jobs = 5
[(google.api.field_behavior) = OUTPUT_ONLY];
// The email addresses of workers in the SpecialistPool.
repeated string specialist_worker_emails = 7;

@ -41,10 +41,12 @@ option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
// CrowdCompute console.
service SpecialistPoolService {
option (google.api.default_host) = "aiplatform.googleapis.com";
option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
option (google.api.oauth_scopes) =
"https://www.googleapis.com/auth/cloud-platform";
// Creates a SpecialistPool.
rpc CreateSpecialistPool(CreateSpecialistPoolRequest) returns (google.longrunning.Operation) {
rpc CreateSpecialistPool(CreateSpecialistPoolRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*}/specialistPools"
body: "specialist_pool"
@ -65,7 +67,8 @@ service SpecialistPoolService {
}
// Lists SpecialistPools in a Location.
rpc ListSpecialistPools(ListSpecialistPoolsRequest) returns (ListSpecialistPoolsResponse) {
rpc ListSpecialistPools(ListSpecialistPoolsRequest)
returns (ListSpecialistPoolsResponse) {
option (google.api.http) = {
get: "/v1beta1/{parent=projects/*/locations/*}/specialistPools"
};
@ -73,7 +76,8 @@ service SpecialistPoolService {
}
// Deletes a SpecialistPool as well as all Specialists in the pool.
rpc DeleteSpecialistPool(DeleteSpecialistPoolRequest) returns (google.longrunning.Operation) {
rpc DeleteSpecialistPool(DeleteSpecialistPoolRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
delete: "/v1beta1/{name=projects/*/locations/*/specialistPools/*}"
};
@ -85,7 +89,8 @@ service SpecialistPoolService {
}
// Updates a SpecialistPool.
rpc UpdateSpecialistPool(UpdateSpecialistPoolRequest) returns (google.longrunning.Operation) {
rpc UpdateSpecialistPool(UpdateSpecialistPoolRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
patch: "/v1beta1/{specialist_pool.name=projects/*/locations/*/specialistPools/*}"
body: "specialist_pool"
@ -98,7 +103,8 @@ service SpecialistPoolService {
}
}
// Request message for [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool].
// Request message for
// [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool].
message CreateSpecialistPoolRequest {
// Required. The parent Project name for the new SpecialistPool.
// The form is `projects/{project}/locations/{location}`.
@ -120,7 +126,8 @@ message CreateSpecialistPoolOperationMetadata {
GenericOperationMetadata generic_metadata = 1;
}
// Request message for [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.GetSpecialistPool].
// Request message for
// [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.GetSpecialistPool].
message GetSpecialistPoolRequest {
// Required. The name of the SpecialistPool resource.
// The form is
@ -133,7 +140,8 @@ message GetSpecialistPoolRequest {
];
}
// Request message for [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools].
// Request message for
// [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools].
message ListSpecialistPoolsRequest {
// Required. The name of the SpecialistPool's parent resource.
// Format: `projects/{project}/locations/{location}`
@ -148,16 +156,19 @@ message ListSpecialistPoolsRequest {
int32 page_size = 2;
// The standard list page token.
// Typically obtained by [ListSpecialistPoolsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListSpecialistPoolsResponse.next_page_token] of
// the previous [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools] call. Return
// first page if empty.
// Typically obtained by
// [ListSpecialistPoolsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListSpecialistPoolsResponse.next_page_token]
// of the previous
// [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]
// call. Return first page if empty.
string page_token = 3;
// Mask specifying which fields to read. FieldMask represents a set of
google.protobuf.FieldMask read_mask = 4;
}
// Response message for [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools].
// Response message for
// [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools].
message ListSpecialistPoolsResponse {
// A list of SpecialistPools that matches the specified filter in the request.
repeated SpecialistPool specialist_pools = 1;
@ -166,7 +177,8 @@ message ListSpecialistPoolsResponse {
string next_page_token = 2;
}
// Request message for [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.DeleteSpecialistPool].
// Request message for
// [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.DeleteSpecialistPool].
message DeleteSpecialistPoolRequest {
// Required. The resource name of the SpecialistPool to delete. Format:
// `projects/{project}/locations/{location}/specialistPools/{specialist_pool}`
@ -183,20 +195,22 @@ message DeleteSpecialistPoolRequest {
bool force = 2;
}
// Request message for [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool].
// Request message for
// [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool].
message UpdateSpecialistPoolRequest {
// Required. The SpecialistPool which replaces the resource on the server.
SpecialistPool specialist_pool = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The update mask applies to the resource.
google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED];
google.protobuf.FieldMask update_mask = 2
[(google.api.field_behavior) = REQUIRED];
}
// Runtime operation metadata for
// [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool].
message UpdateSpecialistPoolOperationMetadata {
// Output only. The name of the SpecialistPool to which the specialists are being added.
// Format:
// Output only. The name of the SpecialistPool to which the specialists are
// being added. Format:
// `projects/{project_id}/locations/{location_id}/specialistPools/{specialist_pool}`
string specialist_pool = 1 [
(google.api.field_behavior) = OUTPUT_ONLY,

@ -67,7 +67,8 @@ message Study {
State state = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Time at which the study was created.
google.protobuf.Timestamp create_time = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 5
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. A human readable reason why the Study is inactive.
// This should be empty if a study is ACTIVE or COMPLETED.
@ -86,7 +87,8 @@ message Trial {
// A message representing a parameter to be tuned.
message Parameter {
// Output only. The ID of the parameter. The parameter should be defined in
// [StudySpec's Parameters][google.cloud.aiplatform.v1beta1.StudySpec.parameters].
// [StudySpec's
// Parameters][google.cloud.aiplatform.v1beta1.StudySpec.parameters].
string parameter_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The value of the parameter.
@ -139,17 +141,21 @@ message Trial {
// Output only. A list of measurements that are strictly lexicographically
// ordered by their induced tuples (steps, elapsed_duration).
// These are used for early stopping computations.
repeated Measurement measurements = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
repeated Measurement measurements = 6
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Time when the Trial was started.
google.protobuf.Timestamp start_time = 7 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp start_time = 7
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Time when the Trial's status changed to `SUCCEEDED` or `INFEASIBLE`.
google.protobuf.Timestamp end_time = 8 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Time when the Trial's status changed to `SUCCEEDED` or
// `INFEASIBLE`.
google.protobuf.Timestamp end_time = 8
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The identifier of the client that originally requested this Trial.
// Each client is identified by a unique client_id. When a client
// asks for a suggestion, Vertex AI Vizier will assign it a Trial. The client
// Output only. The identifier of the client that originally requested this
// Trial. Each client is identified by a unique client_id. When a client asks
// for a suggestion, Vertex AI Vizier will assign it a Trial. The client
// should evaluate the Trial, complete it, and report back to Vertex AI
// Vizier. If suggestion is asked again by same client_id before the Trial is
// completed, the same Trial will be returned. Multiple clients with
@ -173,9 +179,11 @@ message Trial {
// Output only. URIs for accessing [interactive
// shells](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell)
// (one URI for each training node). Only available if this trial is part of
// a [HyperparameterTuningJob][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob] and the job's
// [trial_job_spec.enable_web_access][google.cloud.aiplatform.v1beta1.CustomJobSpec.enable_web_access] field
// is `true`.
// a
// [HyperparameterTuningJob][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob]
// and the job's
// [trial_job_spec.enable_web_access][google.cloud.aiplatform.v1beta1.CustomJobSpec.enable_web_access]
// field is `true`.
//
// The keys are names of each node used for the trial; for example,
// `workerpool0-0` for the primary node, `workerpool1-0` for the first node in
@ -183,7 +191,8 @@ message Trial {
// second worker pool.
//
// The values are the URIs for each node's interactive shell.
map<string, string> web_access_uris = 12 [(google.api.field_behavior) = OUTPUT_ONLY];
map<string, string> web_access_uris = 12
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// Represents specification of a Study.
@ -202,8 +211,8 @@ message StudySpec {
MINIMIZE = 2;
}
// Required. The ID of the metric. Must not contain whitespaces and must be unique
// amongst all MetricSpecs.
// Required. The ID of the metric. Must not contain whitespaces and must be
// unique amongst all MetricSpecs.
string metric_id = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The optimization goal of the metric.
@ -298,8 +307,8 @@ message StudySpec {
// Represents the spec to match categorical values from parent parameter.
message CategoricalValueCondition {
// Required. Matches values of the parent parameter of 'CATEGORICAL' type.
// All values must exist in `categorical_value_spec` of parent
// Required. Matches values of the parent parameter of 'CATEGORICAL'
// type. All values must exist in `categorical_value_spec` of parent
// parameter.
repeated string values = 1 [(google.api.field_behavior) = REQUIRED];
}
@ -357,8 +366,8 @@ message StudySpec {
DiscreteValueSpec discrete_value_spec = 5;
}
// Required. The ID of the parameter. Must not contain whitespaces and must be unique
// amongst all ParameterSpecs.
// Required. The ID of the parameter. Must not contain whitespaces and must
// be unique amongst all ParameterSpecs.
string parameter_id = 1 [(google.api.field_behavior) = REQUIRED];
// How the parameter should be scaled.
@ -379,9 +388,11 @@ message StudySpec {
// Trial. Early stopping is requested for the current Trial if there is very
// low probability to exceed the optimal value found so far.
message DecayCurveAutomatedStoppingSpec {
// True if [Measurement.elapsed_duration][google.cloud.aiplatform.v1beta1.Measurement.elapsed_duration] is used as the x-axis of each
// Trials Decay Curve. Otherwise, [Measurement.step_count][google.cloud.aiplatform.v1beta1.Measurement.step_count] will be used
// as the x-axis.
// True if
// [Measurement.elapsed_duration][google.cloud.aiplatform.v1beta1.Measurement.elapsed_duration]
// is used as the x-axis of each Trials Decay Curve. Otherwise,
// [Measurement.step_count][google.cloud.aiplatform.v1beta1.Measurement.step_count]
// will be used as the x-axis.
bool use_elapsed_duration = 1;
}
@ -392,9 +403,10 @@ message StudySpec {
// values reported by the Trial in each measurement.
message MedianAutomatedStoppingSpec {
// True if median automated stopping rule applies on
// [Measurement.elapsed_duration][google.cloud.aiplatform.v1beta1.Measurement.elapsed_duration]. It means that elapsed_duration
// field of latest measurement of current Trial is used to compute median
// objective value for each completed Trials.
// [Measurement.elapsed_duration][google.cloud.aiplatform.v1beta1.Measurement.elapsed_duration].
// It means that elapsed_duration field of latest measurement of current
// Trial is used to compute median objective value for each completed
// Trials.
bool use_elapsed_duration = 1;
}
@ -444,6 +456,15 @@ message StudySpec {
// min_num_steps are overloaded to contain max_elapsed_seconds and
// min_elapsed_seconds.
bool use_elapsed_duration = 5;
// ConvexAutomatedStoppingSpec by default only updates the trials that needs
// to be early stopped using a newly trained auto-regressive model. When
// this flag is set to True, all stopped trials from the beginning are
// potentially updated in terms of their `final_measurement`. Also, note
// that the training logic of autoregressive models is different in this
// case. Enabling this option has shown better results and this may be the
// default option in the future.
optional bool update_all_stopped_trials = 6;
}
// Configuration for ConvexStopPolicy.
@ -495,7 +516,8 @@ message StudySpec {
bool disable_transfer_learning = 1;
// Output only. Names of previously completed studies
repeated string prior_study_names = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
repeated string prior_study_names = 2
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// The available search algorithms for the Study.
@ -574,7 +596,8 @@ message StudySpec {
repeated MetricSpec metrics = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The set of parameters to tune.
repeated ParameterSpec parameters = 2 [(google.api.field_behavior) = REQUIRED];
repeated ParameterSpec parameters = 2
[(google.api.field_behavior) = REQUIRED];
// The search algorithm specified for the Study.
Algorithm algorithm = 3;
@ -606,14 +629,16 @@ message Measurement {
double value = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
}
// Output only. Time that the Trial has been running at the point of this Measurement.
google.protobuf.Duration elapsed_duration = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Time that the Trial has been running at the point of this
// Measurement.
google.protobuf.Duration elapsed_duration = 1
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The number of steps the machine learning model has been trained for.
// Must be non-negative.
// Output only. The number of steps the machine learning model has been
// trained for. Must be non-negative.
int64 step_count = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. A list of metrics got by evaluating the objective functions using suggested
// Parameter values.
// Output only. A list of metrics got by evaluating the objective functions
// using suggested Parameter values.
repeated Metric metrics = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
}

@ -54,18 +54,21 @@ message Tensorboard {
// this key.
EncryptionSpec encryption_spec = 11;
// Output only. Consumer project Cloud Storage path prefix used to store blob data, which
// can either be a bucket or directory. Does not end with a '/'.
string blob_storage_path_prefix = 10 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Consumer project Cloud Storage path prefix used to store blob
// data, which can either be a bucket or directory. Does not end with a '/'.
string blob_storage_path_prefix = 10
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The number of Runs stored in this Tensorboard.
int32 run_count = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this Tensorboard was created.
google.protobuf.Timestamp create_time = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 6
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this Tensorboard was last updated.
google.protobuf.Timestamp update_time = 7 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp update_time = 7
[(google.api.field_behavior) = OUTPUT_ONLY];
// The labels with user-defined metadata to organize your Tensorboards.
//

@ -30,19 +30,21 @@ option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
// All the data stored in a TensorboardTimeSeries.
message TimeSeriesData {
// Required. The ID of the TensorboardTimeSeries, which will become the final component
// of the TensorboardTimeSeries' resource name
string tensorboard_time_series_id = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The ID of the TensorboardTimeSeries, which will become the final
// component of the TensorboardTimeSeries' resource name
string tensorboard_time_series_id = 1
[(google.api.field_behavior) = REQUIRED];
// Required. Immutable. The value type of this time series. All the values in this time series data
// must match this value type.
// Required. Immutable. The value type of this time series. All the values in
// this time series data must match this value type.
TensorboardTimeSeries.ValueType value_type = 2 [
(google.api.field_behavior) = REQUIRED,
(google.api.field_behavior) = IMMUTABLE
];
// Required. Data points in this time series.
repeated TimeSeriesDataPoint values = 3 [(google.api.field_behavior) = REQUIRED];
repeated TimeSeriesDataPoint values = 3
[(google.api.field_behavior) = REQUIRED];
}
// A TensorboardTimeSeries data point.
@ -78,7 +80,8 @@ message TensorboardTensor {
// https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/tensor.proto
bytes value = 1 [(google.api.field_behavior) = REQUIRED];
// Optional. Version number of TensorProto used to serialize [value][google.cloud.aiplatform.v1beta1.TensorboardTensor.value].
// Optional. Version number of TensorProto used to serialize
// [value][google.cloud.aiplatform.v1beta1.TensorboardTensor.value].
int32 version_number = 2 [(google.api.field_behavior) = OPTIONAL];
}
@ -91,8 +94,8 @@ message TensorboardBlobSequence {
// One blob (e.g, image, graph) viewable on a blob metric plot.
message TensorboardBlob {
// Output only. A URI safe key uniquely identifying a blob. Can be used to locate the blob
// stored in the Cloud Storage bucket of the consumer project.
// Output only. A URI safe key uniquely identifying a blob. Can be used to
// locate the blob stored in the Cloud Storage bucket of the consumer project.
string id = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Optional. The bytes of the blob is not present unless it's returned by the

@ -48,10 +48,12 @@ message TensorboardExperiment {
string description = 3;
// Output only. Timestamp when this TensorboardExperiment was created.
google.protobuf.Timestamp create_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 4
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this TensorboardExperiment was last updated.
google.protobuf.Timestamp update_time = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp update_time = 5
[(google.api.field_behavior) = OUTPUT_ONLY];
// The labels with user-defined metadata to organize your Datasets.
//
@ -73,6 +75,7 @@ message TensorboardExperiment {
// "overwrite" update happens.
string etag = 7;
// Immutable. Source of the TensorboardExperiment. Example: a custom training job.
// Immutable. Source of the TensorboardExperiment. Example: a custom training
// job.
string source = 8 [(google.api.field_behavior) = IMMUTABLE];
}

@ -50,10 +50,12 @@ message TensorboardRun {
string description = 3;
// Output only. Timestamp when this TensorboardRun was created.
google.protobuf.Timestamp create_time = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 6
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this TensorboardRun was last updated.
google.protobuf.Timestamp update_time = 7 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp update_time = 7
[(google.api.field_behavior) = OUTPUT_ONLY];
// The labels with user-defined metadata to organize your TensorboardRuns.
//

@ -40,10 +40,12 @@ option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
// TensorboardService
service TensorboardService {
option (google.api.default_host) = "aiplatform.googleapis.com";
option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
option (google.api.oauth_scopes) =
"https://www.googleapis.com/auth/cloud-platform";
// Creates a Tensorboard.
rpc CreateTensorboard(CreateTensorboardRequest) returns (google.longrunning.Operation) {
rpc CreateTensorboard(CreateTensorboardRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*}/tensorboards"
body: "tensorboard"
@ -63,8 +65,18 @@ service TensorboardService {
option (google.api.method_signature) = "name";
}
// Returns a list of monthly active users for a given TensorBoard instance.
rpc ReadTensorboardUsage(ReadTensorboardUsageRequest)
returns (ReadTensorboardUsageResponse) {
option (google.api.http) = {
get: "/v1beta1/{tensorboard=projects/*/locations/*/tensorboards/*}:readUsage"
};
option (google.api.method_signature) = "tensorboard";
}
// Updates a Tensorboard.
rpc UpdateTensorboard(UpdateTensorboardRequest) returns (google.longrunning.Operation) {
rpc UpdateTensorboard(UpdateTensorboardRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
patch: "/v1beta1/{tensorboard.name=projects/*/locations/*/tensorboards/*}"
body: "tensorboard"
@ -77,7 +89,8 @@ service TensorboardService {
}
// Lists Tensorboards in a Location.
rpc ListTensorboards(ListTensorboardsRequest) returns (ListTensorboardsResponse) {
rpc ListTensorboards(ListTensorboardsRequest)
returns (ListTensorboardsResponse) {
option (google.api.http) = {
get: "/v1beta1/{parent=projects/*/locations/*}/tensorboards"
};
@ -85,7 +98,8 @@ service TensorboardService {
}
// Deletes a Tensorboard.
rpc DeleteTensorboard(DeleteTensorboardRequest) returns (google.longrunning.Operation) {
rpc DeleteTensorboard(DeleteTensorboardRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
delete: "/v1beta1/{name=projects/*/locations/*/tensorboards/*}"
};
@ -97,16 +111,19 @@ service TensorboardService {
}
// Creates a TensorboardExperiment.
rpc CreateTensorboardExperiment(CreateTensorboardExperimentRequest) returns (TensorboardExperiment) {
rpc CreateTensorboardExperiment(CreateTensorboardExperimentRequest)
returns (TensorboardExperiment) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*/tensorboards/*}/experiments"
body: "tensorboard_experiment"
};
option (google.api.method_signature) = "parent,tensorboard_experiment,tensorboard_experiment_id";
option (google.api.method_signature) =
"parent,tensorboard_experiment,tensorboard_experiment_id";
}
// Gets a TensorboardExperiment.
rpc GetTensorboardExperiment(GetTensorboardExperimentRequest) returns (TensorboardExperiment) {
rpc GetTensorboardExperiment(GetTensorboardExperimentRequest)
returns (TensorboardExperiment) {
option (google.api.http) = {
get: "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*}"
};
@ -114,7 +131,8 @@ service TensorboardService {
}
// Updates a TensorboardExperiment.
rpc UpdateTensorboardExperiment(UpdateTensorboardExperimentRequest) returns (TensorboardExperiment) {
rpc UpdateTensorboardExperiment(UpdateTensorboardExperimentRequest)
returns (TensorboardExperiment) {
option (google.api.http) = {
patch: "/v1beta1/{tensorboard_experiment.name=projects/*/locations/*/tensorboards/*/experiments/*}"
body: "tensorboard_experiment"
@ -123,7 +141,8 @@ service TensorboardService {
}
// Lists TensorboardExperiments in a Location.
rpc ListTensorboardExperiments(ListTensorboardExperimentsRequest) returns (ListTensorboardExperimentsResponse) {
rpc ListTensorboardExperiments(ListTensorboardExperimentsRequest)
returns (ListTensorboardExperimentsResponse) {
option (google.api.http) = {
get: "/v1beta1/{parent=projects/*/locations/*/tensorboards/*}/experiments"
};
@ -131,7 +150,8 @@ service TensorboardService {
}
// Deletes a TensorboardExperiment.
rpc DeleteTensorboardExperiment(DeleteTensorboardExperimentRequest) returns (google.longrunning.Operation) {
rpc DeleteTensorboardExperiment(DeleteTensorboardExperimentRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
delete: "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*}"
};
@ -143,16 +163,19 @@ service TensorboardService {
}
// Creates a TensorboardRun.
rpc CreateTensorboardRun(CreateTensorboardRunRequest) returns (TensorboardRun) {
rpc CreateTensorboardRun(CreateTensorboardRunRequest)
returns (TensorboardRun) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*/tensorboards/*/experiments/*}/runs"
body: "tensorboard_run"
};
option (google.api.method_signature) = "parent,tensorboard_run,tensorboard_run_id";
option (google.api.method_signature) =
"parent,tensorboard_run,tensorboard_run_id";
}
// Batch create TensorboardRuns.
rpc BatchCreateTensorboardRuns(BatchCreateTensorboardRunsRequest) returns (BatchCreateTensorboardRunsResponse) {
rpc BatchCreateTensorboardRuns(BatchCreateTensorboardRunsRequest)
returns (BatchCreateTensorboardRunsResponse) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*/tensorboards/*/experiments/*}/runs:batchCreate"
body: "*"
@ -169,7 +192,8 @@ service TensorboardService {
}
// Updates a TensorboardRun.
rpc UpdateTensorboardRun(UpdateTensorboardRunRequest) returns (TensorboardRun) {
rpc UpdateTensorboardRun(UpdateTensorboardRunRequest)
returns (TensorboardRun) {
option (google.api.http) = {
patch: "/v1beta1/{tensorboard_run.name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}"
body: "tensorboard_run"
@ -178,7 +202,8 @@ service TensorboardService {
}
// Lists TensorboardRuns in a Location.
rpc ListTensorboardRuns(ListTensorboardRunsRequest) returns (ListTensorboardRunsResponse) {
rpc ListTensorboardRuns(ListTensorboardRunsRequest)
returns (ListTensorboardRunsResponse) {
option (google.api.http) = {
get: "/v1beta1/{parent=projects/*/locations/*/tensorboards/*/experiments/*}/runs"
};
@ -186,7 +211,8 @@ service TensorboardService {
}
// Deletes a TensorboardRun.
rpc DeleteTensorboardRun(DeleteTensorboardRunRequest) returns (google.longrunning.Operation) {
rpc DeleteTensorboardRun(DeleteTensorboardRunRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
delete: "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}"
};
@ -198,7 +224,8 @@ service TensorboardService {
}
// Batch create TensorboardTimeSeries that belong to a TensorboardExperiment.
rpc BatchCreateTensorboardTimeSeries(BatchCreateTensorboardTimeSeriesRequest) returns (BatchCreateTensorboardTimeSeriesResponse) {
rpc BatchCreateTensorboardTimeSeries(BatchCreateTensorboardTimeSeriesRequest)
returns (BatchCreateTensorboardTimeSeriesResponse) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*/tensorboards/*/experiments/*}/runs/*/timeSeries:batchCreate"
body: "*"
@ -207,7 +234,8 @@ service TensorboardService {
}
// Creates a TensorboardTimeSeries.
rpc CreateTensorboardTimeSeries(CreateTensorboardTimeSeriesRequest) returns (TensorboardTimeSeries) {
rpc CreateTensorboardTimeSeries(CreateTensorboardTimeSeriesRequest)
returns (TensorboardTimeSeries) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/timeSeries"
body: "tensorboard_time_series"
@ -216,7 +244,8 @@ service TensorboardService {
}
// Gets a TensorboardTimeSeries.
rpc GetTensorboardTimeSeries(GetTensorboardTimeSeriesRequest) returns (TensorboardTimeSeries) {
rpc GetTensorboardTimeSeries(GetTensorboardTimeSeriesRequest)
returns (TensorboardTimeSeries) {
option (google.api.http) = {
get: "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}"
};
@ -224,16 +253,19 @@ service TensorboardService {
}
// Updates a TensorboardTimeSeries.
rpc UpdateTensorboardTimeSeries(UpdateTensorboardTimeSeriesRequest) returns (TensorboardTimeSeries) {
rpc UpdateTensorboardTimeSeries(UpdateTensorboardTimeSeriesRequest)
returns (TensorboardTimeSeries) {
option (google.api.http) = {
patch: "/v1beta1/{tensorboard_time_series.name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}"
body: "tensorboard_time_series"
};
option (google.api.method_signature) = "tensorboard_time_series,update_mask";
option (google.api.method_signature) =
"tensorboard_time_series,update_mask";
}
// Lists TensorboardTimeSeries in a Location.
rpc ListTensorboardTimeSeries(ListTensorboardTimeSeriesRequest) returns (ListTensorboardTimeSeriesResponse) {
rpc ListTensorboardTimeSeries(ListTensorboardTimeSeriesRequest)
returns (ListTensorboardTimeSeriesResponse) {
option (google.api.http) = {
get: "/v1beta1/{parent=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/timeSeries"
};
@ -241,7 +273,8 @@ service TensorboardService {
}
// Deletes a TensorboardTimeSeries.
rpc DeleteTensorboardTimeSeries(DeleteTensorboardTimeSeriesRequest) returns (google.longrunning.Operation) {
rpc DeleteTensorboardTimeSeries(DeleteTensorboardTimeSeriesRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
delete: "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}"
};
@ -254,10 +287,12 @@ service TensorboardService {
// Reads multiple TensorboardTimeSeries' data. The data point number limit is
// 1000 for scalars, 100 for tensors and blob references. If the number of
// data points stored is less than the limit, all data will be returned.
// Otherwise, that limit number of data points will be randomly selected from
// data points stored is less than the limit, all data is returned.
// Otherwise, the number limit of data points is randomly selected from
// this time series and returned.
rpc BatchReadTensorboardTimeSeriesData(BatchReadTensorboardTimeSeriesDataRequest) returns (BatchReadTensorboardTimeSeriesDataResponse) {
rpc BatchReadTensorboardTimeSeriesData(
BatchReadTensorboardTimeSeriesDataRequest)
returns (BatchReadTensorboardTimeSeriesDataResponse) {
option (google.api.http) = {
get: "/v1beta1/{tensorboard=projects/*/locations/*/tensorboards/*}/experiments/*/runs/*/timeSeries:batchRead"
};
@ -265,11 +300,12 @@ service TensorboardService {
}
// Reads a TensorboardTimeSeries' data. By default, if the number of data
// points stored is less than 1000, all data will be returned. Otherwise, 1000
// data points will be randomly selected from this time series and returned.
// points stored is less than 1000, all data is returned. Otherwise, 1000
// data points is randomly selected from this time series and returned.
// This value can be changed by changing max_data_points, which can't be
// greater than 10k.
rpc ReadTensorboardTimeSeriesData(ReadTensorboardTimeSeriesDataRequest) returns (ReadTensorboardTimeSeriesDataResponse) {
rpc ReadTensorboardTimeSeriesData(ReadTensorboardTimeSeriesDataRequest)
returns (ReadTensorboardTimeSeriesDataResponse) {
option (google.api.http) = {
get: "/v1beta1/{tensorboard_time_series=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}:read"
};
@ -280,7 +316,8 @@ service TensorboardService {
// This is to allow reading blob data stored in consumer project's Cloud
// Storage bucket without users having to obtain Cloud Storage access
// permission.
rpc ReadTensorboardBlobData(ReadTensorboardBlobDataRequest) returns (stream ReadTensorboardBlobDataResponse) {
rpc ReadTensorboardBlobData(ReadTensorboardBlobDataRequest)
returns (stream ReadTensorboardBlobDataResponse) {
option (google.api.http) = {
get: "/v1beta1/{time_series=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}:readBlobData"
};
@ -288,20 +325,21 @@ service TensorboardService {
}
// Write time series data points of multiple TensorboardTimeSeries in multiple
// TensorboardRun's. If any data fail to be ingested, an error will be
// returned.
rpc WriteTensorboardExperimentData(WriteTensorboardExperimentDataRequest) returns (WriteTensorboardExperimentDataResponse) {
// TensorboardRun's. If any data fail to be ingested, an error is returned.
rpc WriteTensorboardExperimentData(WriteTensorboardExperimentDataRequest)
returns (WriteTensorboardExperimentDataResponse) {
option (google.api.http) = {
post: "/v1beta1/{tensorboard_experiment=projects/*/locations/*/tensorboards/*/experiments/*}:write"
body: "*"
};
option (google.api.method_signature) = "tensorboard_experiment,write_run_data_requests";
option (google.api.method_signature) =
"tensorboard_experiment,write_run_data_requests";
}
// Write time series data points into multiple TensorboardTimeSeries under
// a TensorboardRun. If any data fail to be ingested, an error will be
// returned.
rpc WriteTensorboardRunData(WriteTensorboardRunDataRequest) returns (WriteTensorboardRunDataResponse) {
// a TensorboardRun. If any data fail to be ingested, an error is returned.
rpc WriteTensorboardRunData(WriteTensorboardRunDataRequest)
returns (WriteTensorboardRunDataResponse) {
option (google.api.http) = {
post: "/v1beta1/{tensorboard_run=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}:write"
body: "*"
@ -311,7 +349,8 @@ service TensorboardService {
// Exports a TensorboardTimeSeries' data. Data is returned in paginated
// responses.
rpc ExportTensorboardTimeSeriesData(ExportTensorboardTimeSeriesDataRequest) returns (ExportTensorboardTimeSeriesDataResponse) {
rpc ExportTensorboardTimeSeriesData(ExportTensorboardTimeSeriesDataRequest)
returns (ExportTensorboardTimeSeriesDataResponse) {
option (google.api.http) = {
post: "/v1beta1/{tensorboard_time_series=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}:exportTensorboardTimeSeries"
body: "*"
@ -320,7 +359,8 @@ service TensorboardService {
}
}
// Request message for [TensorboardService.CreateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboard].
// Request message for
// [TensorboardService.CreateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboard].
message CreateTensorboardRequest {
// Required. The resource name of the Location to create the Tensorboard in.
// Format: `projects/{project}/locations/{location}`
@ -335,7 +375,8 @@ message CreateTensorboardRequest {
Tensorboard tensorboard = 2 [(google.api.field_behavior) = REQUIRED];
}
// Request message for [TensorboardService.GetTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboard].
// Request message for
// [TensorboardService.GetTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboard].
message GetTensorboardRequest {
// Required. The name of the Tensorboard resource.
// Format:
@ -348,7 +389,42 @@ message GetTensorboardRequest {
];
}
// Request message for [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards].
// Request message for [TensorboardService.GetTensorboardUsage][].
message ReadTensorboardUsageRequest {
// Required. The name of the Tensorboard resource.
// Format:
// `projects/{project}/locations/{location}/tensorboards/{tensorboard}`
string tensorboard = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "aiplatform.googleapis.com/Tensorboard"
}
];
}
// Response message for [TensorboardService.GetTensorboardUsage][].
message ReadTensorboardUsageResponse {
// Per user usage data.
message PerUserUsageData {
// User's username
string username = 1;
// Number of times the user has read data within the Tensorboard.
int64 view_count = 2;
}
// Per month usage data
message PerMonthUsageData {
// Usage data for each user in the given month.
repeated PerUserUsageData user_usage_data = 1;
}
// Maps year-month (YYYYMM) string to per month usage data.
map<string, PerMonthUsageData> monthly_usage_data = 1;
}
// Request message for
// [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards].
message ListTensorboardsRequest {
// Required. The resource name of the Location to list Tensorboards.
// Format:
@ -364,18 +440,18 @@ message ListTensorboardsRequest {
string filter = 2;
// The maximum number of Tensorboards to return. The service may return
// fewer than this value. If unspecified, at most 100 Tensorboards will be
// returned. The maximum value is 100; values above 100 will be coerced to
// fewer than this value. If unspecified, at most 100 Tensorboards are
// returned. The maximum value is 100; values above 100 are coerced to
// 100.
int32 page_size = 3;
// A page token, received from a previous
// [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards] call.
// Provide this to retrieve the subsequent page.
// [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]
// call. Provide this to retrieve the subsequent page.
//
// When paginating, all other parameters provided to
// [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards] must
// match the call that provided the page token.
// [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]
// must match the call that provided the page token.
string page_token = 4;
// Field to use to sort the list.
@ -385,26 +461,30 @@ message ListTensorboardsRequest {
google.protobuf.FieldMask read_mask = 6;
}
// Response message for [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards].
// Response message for
// [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards].
message ListTensorboardsResponse {
// The Tensorboards mathching the request.
repeated Tensorboard tensorboards = 1;
// A token, which can be sent as [ListTensorboardsRequest.page_token][google.cloud.aiplatform.v1beta1.ListTensorboardsRequest.page_token]
// A token, which can be sent as
// [ListTensorboardsRequest.page_token][google.cloud.aiplatform.v1beta1.ListTensorboardsRequest.page_token]
// to retrieve the next page. If this field is omitted, there are no
// subsequent pages.
string next_page_token = 2;
}
// Request message for [TensorboardService.UpdateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboard].
// Request message for
// [TensorboardService.UpdateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboard].
message UpdateTensorboardRequest {
// Required. Field mask is used to specify the fields to be overwritten in the
// Tensorboard resource by the update.
// The fields specified in the update_mask are relative to the resource, not
// the full request. A field will be overwritten if it is in the mask. If the
// user does not provide a mask then all fields will be overwritten if new
// the full request. A field is overwritten if it's in the mask. If the
// user does not provide a mask then all fields are overwritten if new
// values are specified.
google.protobuf.FieldMask update_mask = 1 [(google.api.field_behavior) = REQUIRED];
google.protobuf.FieldMask update_mask = 1
[(google.api.field_behavior) = REQUIRED];
// Required. The Tensorboard's `name` field is used to identify the
// Tensorboard to be updated. Format:
@ -412,7 +492,8 @@ message UpdateTensorboardRequest {
Tensorboard tensorboard = 2 [(google.api.field_behavior) = REQUIRED];
}
// Request message for [TensorboardService.DeleteTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboard].
// Request message for
// [TensorboardService.DeleteTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboard].
message DeleteTensorboardRequest {
// Required. The name of the Tensorboard to be deleted.
// Format:
@ -425,10 +506,11 @@ message DeleteTensorboardRequest {
];
}
// Request message for [TensorboardService.CreateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardExperiment].
// Request message for
// [TensorboardService.CreateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardExperiment].
message CreateTensorboardExperimentRequest {
// Required. The resource name of the Tensorboard to create the TensorboardExperiment
// in. Format:
// Required. The resource name of the Tensorboard to create the
// TensorboardExperiment in. Format:
// `projects/{project}/locations/{location}/tensorboards/{tensorboard}`
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
@ -440,15 +522,16 @@ message CreateTensorboardExperimentRequest {
// The TensorboardExperiment to create.
TensorboardExperiment tensorboard_experiment = 2;
// Required. The ID to use for the Tensorboard experiment, which will become the final
// component of the Tensorboard experiment's resource name.
// Required. The ID to use for the Tensorboard experiment, which becomes the
// final component of the Tensorboard experiment's resource name.
//
// This value should be 1-128 characters, and valid characters
// are /[a-z][0-9]-/.
string tensorboard_experiment_id = 3 [(google.api.field_behavior) = REQUIRED];
}
// Request message for [TensorboardService.GetTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardExperiment].
// Request message for
// [TensorboardService.GetTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardExperiment].
message GetTensorboardExperimentRequest {
// Required. The name of the TensorboardExperiment resource.
// Format:
@ -461,10 +544,11 @@ message GetTensorboardExperimentRequest {
];
}
// Request message for [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments].
// Request message for
// [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments].
message ListTensorboardExperimentsRequest {
// Required. The resource name of the Tensorboard to list TensorboardExperiments.
// Format:
// Required. The resource name of the Tensorboard to list
// TensorboardExperiments. Format:
// `projects/{project}/locations/{location}/tensorboards/{tensorboard}`
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
@ -478,17 +562,17 @@ message ListTensorboardExperimentsRequest {
// The maximum number of TensorboardExperiments to return. The service may
// return fewer than this value. If unspecified, at most 50
// TensorboardExperiments will be returned. The maximum value is 1000; values
// above 1000 will be coerced to 1000.
// TensorboardExperiments are returned. The maximum value is 1000; values
// above 1000 are coerced to 1000.
int32 page_size = 3;
// A page token, received from a previous
// [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments] call.
// Provide this to retrieve the subsequent page.
// [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments]
// call. Provide this to retrieve the subsequent page.
//
// When paginating, all other parameters provided to
// [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments] must
// match the call that provided the page token.
// [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments]
// must match the call that provided the page token.
string page_token = 4;
// Field to use to sort the list.
@ -498,34 +582,40 @@ message ListTensorboardExperimentsRequest {
google.protobuf.FieldMask read_mask = 6;
}
// Response message for [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments].
// Response message for
// [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments].
message ListTensorboardExperimentsResponse {
// The TensorboardExperiments mathching the request.
repeated TensorboardExperiment tensorboard_experiments = 1;
// A token, which can be sent as
// [ListTensorboardExperimentsRequest.page_token][google.cloud.aiplatform.v1beta1.ListTensorboardExperimentsRequest.page_token] to retrieve the next page.
// If this field is omitted, there are no subsequent pages.
// [ListTensorboardExperimentsRequest.page_token][google.cloud.aiplatform.v1beta1.ListTensorboardExperimentsRequest.page_token]
// to retrieve the next page. If this field is omitted, there are no
// subsequent pages.
string next_page_token = 2;
}
// Request message for [TensorboardService.UpdateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardExperiment].
// Request message for
// [TensorboardService.UpdateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardExperiment].
message UpdateTensorboardExperimentRequest {
// Required. Field mask is used to specify the fields to be overwritten in the
// TensorboardExperiment resource by the update.
// The fields specified in the update_mask are relative to the resource, not
// the full request. A field will be overwritten if it is in the mask. If the
// user does not provide a mask then all fields will be overwritten if new
// the full request. A field is overwritten if it's in the mask. If the
// user does not provide a mask then all fields are overwritten if new
// values are specified.
google.protobuf.FieldMask update_mask = 1 [(google.api.field_behavior) = REQUIRED];
google.protobuf.FieldMask update_mask = 1
[(google.api.field_behavior) = REQUIRED];
// Required. The TensorboardExperiment's `name` field is used to identify the
// TensorboardExperiment to be updated. Format:
// `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`
TensorboardExperiment tensorboard_experiment = 2 [(google.api.field_behavior) = REQUIRED];
TensorboardExperiment tensorboard_experiment = 2
[(google.api.field_behavior) = REQUIRED];
}
// Request message for [TensorboardService.DeleteTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardExperiment].
// Request message for
// [TensorboardService.DeleteTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardExperiment].
message DeleteTensorboardExperimentRequest {
// Required. The name of the TensorboardExperiment to be deleted.
// Format:
@ -538,7 +628,8 @@ message DeleteTensorboardExperimentRequest {
];
}
// Request message for [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardRuns].
// Request message for
// [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardRuns].
message BatchCreateTensorboardRunsRequest {
// Required. The resource name of the TensorboardExperiment to create the
// TensorboardRuns in. Format:
@ -554,19 +645,22 @@ message BatchCreateTensorboardRunsRequest {
// Required. The request message specifying the TensorboardRuns to create.
// A maximum of 1000 TensorboardRuns can be created in a batch.
repeated CreateTensorboardRunRequest requests = 2 [(google.api.field_behavior) = REQUIRED];
repeated CreateTensorboardRunRequest requests = 2
[(google.api.field_behavior) = REQUIRED];
}
// Response message for [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardRuns].
// Response message for
// [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardRuns].
message BatchCreateTensorboardRunsResponse {
// The created TensorboardRuns.
repeated TensorboardRun tensorboard_runs = 1;
}
// Request message for [TensorboardService.CreateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardRun].
// Request message for
// [TensorboardService.CreateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardRun].
message CreateTensorboardRunRequest {
// Required. The resource name of the TensorboardExperiment to create the TensorboardRun
// in. Format:
// Required. The resource name of the TensorboardExperiment to create the
// TensorboardRun in. Format:
// `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
@ -578,7 +672,7 @@ message CreateTensorboardRunRequest {
// Required. The TensorboardRun to create.
TensorboardRun tensorboard_run = 2 [(google.api.field_behavior) = REQUIRED];
// Required. The ID to use for the Tensorboard run, which will become the final
// Required. The ID to use for the Tensorboard run, which becomes the final
// component of the Tensorboard run's resource name.
//
// This value should be 1-128 characters, and valid characters
@ -586,7 +680,8 @@ message CreateTensorboardRunRequest {
string tensorboard_run_id = 3 [(google.api.field_behavior) = REQUIRED];
}
// Request message for [TensorboardService.GetTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardRun].
// Request message for
// [TensorboardService.GetTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardRun].
message GetTensorboardRunRequest {
// Required. The name of the TensorboardRun resource.
// Format:
@ -599,7 +694,8 @@ message GetTensorboardRunRequest {
];
}
// Request message for [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData].
// Request message for
// [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData].
message ReadTensorboardBlobDataRequest {
// Required. The resource name of the TensorboardTimeSeries to list Blobs.
// Format:
@ -615,16 +711,18 @@ message ReadTensorboardBlobDataRequest {
repeated string blob_ids = 2;
}
// Response message for [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData].
// Response message for
// [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData].
message ReadTensorboardBlobDataResponse {
// Blob messages containing blob bytes.
repeated TensorboardBlob blobs = 1;
}
// Request message for [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns].
// Request message for
// [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns].
message ListTensorboardRunsRequest {
// Required. The resource name of the TensorboardExperiment to list TensorboardRuns.
// Format:
// Required. The resource name of the TensorboardExperiment to list
// TensorboardRuns. Format:
// `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
@ -637,18 +735,18 @@ message ListTensorboardRunsRequest {
string filter = 2;
// The maximum number of TensorboardRuns to return. The service may return
// fewer than this value. If unspecified, at most 50 TensorboardRuns will be
// returned. The maximum value is 1000; values above 1000 will be coerced to
// fewer than this value. If unspecified, at most 50 TensorboardRuns are
// returned. The maximum value is 1000; values above 1000 are coerced to
// 1000.
int32 page_size = 3;
// A page token, received from a previous
// [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns] call.
// Provide this to retrieve the subsequent page.
// [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]
// call. Provide this to retrieve the subsequent page.
//
// When paginating, all other parameters provided to
// [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns] must
// match the call that provided the page token.
// [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]
// must match the call that provided the page token.
string page_token = 4;
// Field to use to sort the list.
@ -658,34 +756,39 @@ message ListTensorboardRunsRequest {
google.protobuf.FieldMask read_mask = 6;
}
// Response message for [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns].
// Response message for
// [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns].
message ListTensorboardRunsResponse {
// The TensorboardRuns mathching the request.
repeated TensorboardRun tensorboard_runs = 1;
// A token, which can be sent as [ListTensorboardRunsRequest.page_token][google.cloud.aiplatform.v1beta1.ListTensorboardRunsRequest.page_token] to
// retrieve the next page.
// If this field is omitted, there are no subsequent pages.
// A token, which can be sent as
// [ListTensorboardRunsRequest.page_token][google.cloud.aiplatform.v1beta1.ListTensorboardRunsRequest.page_token]
// to retrieve the next page. If this field is omitted, there are no
// subsequent pages.
string next_page_token = 2;
}
// Request message for [TensorboardService.UpdateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardRun].
// Request message for
// [TensorboardService.UpdateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardRun].
message UpdateTensorboardRunRequest {
// Required. Field mask is used to specify the fields to be overwritten in the
// TensorboardRun resource by the update.
// The fields specified in the update_mask are relative to the resource, not
// the full request. A field will be overwritten if it is in the mask. If the
// user does not provide a mask then all fields will be overwritten if new
// the full request. A field is overwritten if it's in the mask. If the
// user does not provide a mask then all fields are overwritten if new
// values are specified.
google.protobuf.FieldMask update_mask = 1 [(google.api.field_behavior) = REQUIRED];
google.protobuf.FieldMask update_mask = 1
[(google.api.field_behavior) = REQUIRED];
// Required. The TensorboardRun's `name` field is used to identify the TensorboardRun to
// be updated. Format:
// Required. The TensorboardRun's `name` field is used to identify the
// TensorboardRun to be updated. Format:
// `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`
TensorboardRun tensorboard_run = 2 [(google.api.field_behavior) = REQUIRED];
}
// Request message for [TensorboardService.DeleteTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardRun].
// Request message for
// [TensorboardService.DeleteTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardRun].
message DeleteTensorboardRunRequest {
// Required. The name of the TensorboardRun to be deleted.
// Format:
@ -698,7 +801,8 @@ message DeleteTensorboardRunRequest {
];
}
// Request message for [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardTimeSeries].
// Request message for
// [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardTimeSeries].
message BatchCreateTensorboardTimeSeriesRequest {
// Required. The resource name of the TensorboardExperiment to create the
// TensorboardTimeSeries in.
@ -714,18 +818,21 @@ message BatchCreateTensorboardTimeSeriesRequest {
}
];
// Required. The request message specifying the TensorboardTimeSeries to create.
// A maximum of 1000 TensorboardTimeSeries can be created in a batch.
repeated CreateTensorboardTimeSeriesRequest requests = 2 [(google.api.field_behavior) = REQUIRED];
// Required. The request message specifying the TensorboardTimeSeries to
// create. A maximum of 1000 TensorboardTimeSeries can be created in a batch.
repeated CreateTensorboardTimeSeriesRequest requests = 2
[(google.api.field_behavior) = REQUIRED];
}
// Response message for [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardTimeSeries].
// Response message for
// [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardTimeSeries].
message BatchCreateTensorboardTimeSeriesResponse {
// The created TensorboardTimeSeries.
repeated TensorboardTimeSeries tensorboard_time_series = 1;
}
// Request message for [TensorboardService.CreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardTimeSeries].
// Request message for
// [TensorboardService.CreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardTimeSeries].
message CreateTensorboardTimeSeriesRequest {
// Required. The resource name of the TensorboardRun to create the
// TensorboardTimeSeries in.
@ -738,17 +845,20 @@ message CreateTensorboardTimeSeriesRequest {
}
];
// Optional. The user specified unique ID to use for the TensorboardTimeSeries, which
// will become the final component of the TensorboardTimeSeries's resource
// name.
// This value should match "[a-z0-9][a-z0-9-]{0, 127}"
string tensorboard_time_series_id = 3 [(google.api.field_behavior) = OPTIONAL];
// Optional. The user specified unique ID to use for the
// TensorboardTimeSeries, which becomes the final component of the
// TensorboardTimeSeries's resource name. This value should match
// "[a-z0-9][a-z0-9-]{0, 127}"
string tensorboard_time_series_id = 3
[(google.api.field_behavior) = OPTIONAL];
// Required. The TensorboardTimeSeries to create.
TensorboardTimeSeries tensorboard_time_series = 2 [(google.api.field_behavior) = REQUIRED];
TensorboardTimeSeries tensorboard_time_series = 2
[(google.api.field_behavior) = REQUIRED];
}
// Request message for [TensorboardService.GetTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardTimeSeries].
// Request message for
// [TensorboardService.GetTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardTimeSeries].
message GetTensorboardTimeSeriesRequest {
// Required. The name of the TensorboardTimeSeries resource.
// Format:
@ -761,10 +871,11 @@ message GetTensorboardTimeSeriesRequest {
];
}
// Request message for [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries].
// Request message for
// [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries].
message ListTensorboardTimeSeriesRequest {
// Required. The resource name of the TensorboardRun to list TensorboardTimeSeries.
// Format:
// Required. The resource name of the TensorboardRun to list
// TensorboardTimeSeries. Format:
// `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
@ -778,17 +889,17 @@ message ListTensorboardTimeSeriesRequest {
// The maximum number of TensorboardTimeSeries to return. The service may
// return fewer than this value. If unspecified, at most 50
// TensorboardTimeSeries will be returned. The maximum value is 1000; values
// above 1000 will be coerced to 1000.
// TensorboardTimeSeries are returned. The maximum value is 1000; values
// above 1000 are coerced to 1000.
int32 page_size = 3;
// A page token, received from a previous
// [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries] call.
// Provide this to retrieve the subsequent page.
// [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries]
// call. Provide this to retrieve the subsequent page.
//
// When paginating, all other parameters provided to
// [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries] must
// match the call that provided the page token.
// [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries]
// must match the call that provided the page token.
string page_token = 4;
// Field to use to sort the list.
@ -798,35 +909,41 @@ message ListTensorboardTimeSeriesRequest {
google.protobuf.FieldMask read_mask = 6;
}
// Response message for [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries].
// Response message for
// [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries].
message ListTensorboardTimeSeriesResponse {
// The TensorboardTimeSeries mathching the request.
repeated TensorboardTimeSeries tensorboard_time_series = 1;
// A token, which can be sent as
// [ListTensorboardTimeSeriesRequest.page_token][google.cloud.aiplatform.v1beta1.ListTensorboardTimeSeriesRequest.page_token] to retrieve the next page.
// If this field is omitted, there are no subsequent pages.
// [ListTensorboardTimeSeriesRequest.page_token][google.cloud.aiplatform.v1beta1.ListTensorboardTimeSeriesRequest.page_token]
// to retrieve the next page. If this field is omitted, there are no
// subsequent pages.
string next_page_token = 2;
}
// Request message for [TensorboardService.UpdateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardTimeSeries].
// Request message for
// [TensorboardService.UpdateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardTimeSeries].
message UpdateTensorboardTimeSeriesRequest {
// Required. Field mask is used to specify the fields to be overwritten in the
// TensorboardTimeSeries resource by the update.
// The fields specified in the update_mask are relative to the resource, not
// the full request. A field will be overwritten if it is in the mask. If the
// user does not provide a mask then all fields will be overwritten if new
// the full request. A field is overwritten if it's in the mask. If the
// user does not provide a mask then all fields are overwritten if new
// values are specified.
google.protobuf.FieldMask update_mask = 1 [(google.api.field_behavior) = REQUIRED];
google.protobuf.FieldMask update_mask = 1
[(google.api.field_behavior) = REQUIRED];
// Required. The TensorboardTimeSeries' `name` field is used to identify the
// TensorboardTimeSeries to be updated.
// Format:
// `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`
TensorboardTimeSeries tensorboard_time_series = 2 [(google.api.field_behavior) = REQUIRED];
TensorboardTimeSeries tensorboard_time_series = 2
[(google.api.field_behavior) = REQUIRED];
}
// Request message for [TensorboardService.DeleteTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardTimeSeries].
// Request message for
// [TensorboardService.DeleteTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardTimeSeries].
message DeleteTensorboardTimeSeriesRequest {
// Required. The name of the TensorboardTimeSeries to be deleted.
// Format:
@ -842,11 +959,12 @@ message DeleteTensorboardTimeSeriesRequest {
// Request message for
// [TensorboardService.BatchReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.BatchReadTensorboardTimeSeriesData].
message BatchReadTensorboardTimeSeriesDataRequest {
// Required. The resource name of the Tensorboard containing TensorboardTimeSeries to
// read data from. Format:
// Required. The resource name of the Tensorboard containing
// TensorboardTimeSeries to read data from. Format:
// `projects/{project}/locations/{location}/tensorboards/{tensorboard}`.
// The TensorboardTimeSeries referenced by [time_series][google.cloud.aiplatform.v1beta1.BatchReadTensorboardTimeSeriesDataRequest.time_series] must be sub
// resources of this Tensorboard.
// The TensorboardTimeSeries referenced by
// [time_series][google.cloud.aiplatform.v1beta1.BatchReadTensorboardTimeSeriesDataRequest.time_series]
// must be sub resources of this Tensorboard.
string tensorboard = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
@ -854,7 +972,8 @@ message BatchReadTensorboardTimeSeriesDataRequest {
}
];
// Required. The resource names of the TensorboardTimeSeries to read data from. Format:
// Required. The resource names of the TensorboardTimeSeries to read data
// from. Format:
// `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`
repeated string time_series = 2 [
(google.api.field_behavior) = REQUIRED,
@ -871,7 +990,8 @@ message BatchReadTensorboardTimeSeriesDataResponse {
repeated TimeSeriesData time_series_data = 1;
}
// Request message for [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData].
// Request message for
// [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData].
message ReadTensorboardTimeSeriesDataRequest {
// Required. The resource name of the TensorboardTimeSeries to read data from.
// Format:
@ -893,13 +1013,15 @@ message ReadTensorboardTimeSeriesDataRequest {
string filter = 3;
}
// Response message for [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData].
// Response message for
// [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData].
message ReadTensorboardTimeSeriesDataResponse {
// The returned time series data.
TimeSeriesData time_series_data = 1;
}
// Request message for [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardExperimentData].
// Request message for
// [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardExperimentData].
message WriteTensorboardExperimentDataRequest {
// Required. The resource name of the TensorboardExperiment to write data to.
// Format:
@ -912,15 +1034,16 @@ message WriteTensorboardExperimentDataRequest {
];
// Required. Requests containing per-run TensorboardTimeSeries data to write.
repeated WriteTensorboardRunDataRequest write_run_data_requests = 2 [(google.api.field_behavior) = REQUIRED];
repeated WriteTensorboardRunDataRequest write_run_data_requests = 2
[(google.api.field_behavior) = REQUIRED];
}
// Response message for [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardExperimentData].
message WriteTensorboardExperimentDataResponse {
}
// Response message for
// [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardExperimentData].
message WriteTensorboardExperimentDataResponse {}
// Request message for [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData].
// Request message for
// [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData].
message WriteTensorboardRunDataRequest {
// Required. The resource name of the TensorboardRun to write data to.
// Format:
@ -937,18 +1060,19 @@ message WriteTensorboardRunDataRequest {
// Repeated writes to the same step will overwrite the existing value for that
// step.
// The upper limit of data points per write request is 5000.
repeated TimeSeriesData time_series_data = 2 [(google.api.field_behavior) = REQUIRED];
repeated TimeSeriesData time_series_data = 2
[(google.api.field_behavior) = REQUIRED];
}
// Response message for [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData].
message WriteTensorboardRunDataResponse {
}
// Response message for
// [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData].
message WriteTensorboardRunDataResponse {}
// Request message for [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData].
// Request message for
// [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData].
message ExportTensorboardTimeSeriesDataRequest {
// Required. The resource name of the TensorboardTimeSeries to export data from.
// Format:
// Required. The resource name of the TensorboardTimeSeries to export data
// from. Format:
// `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`
string tensorboard_time_series = 1 [
(google.api.field_behavior) = REQUIRED,
@ -961,8 +1085,8 @@ message ExportTensorboardTimeSeriesDataRequest {
string filter = 2;
// The maximum number of data points to return per page.
// The default page_size will be 1000. Values must be between 1 and 10000.
// Values above 10000 will be coerced to 10000.
// The default page_size is 1000. Values must be between 1 and 10000.
// Values above 10000 are coerced to 10000.
int32 page_size = 3;
// A page token, received from a previous
@ -975,12 +1099,13 @@ message ExportTensorboardTimeSeriesDataRequest {
string page_token = 4;
// Field to use to sort the TensorboardTimeSeries' data.
// By default, TensorboardTimeSeries' data will be returned in a pseudo random
// By default, TensorboardTimeSeries' data is returned in a pseudo random
// order.
string order_by = 5;
}
// Response message for [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData].
// Response message for
// [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData].
message ExportTensorboardTimeSeriesDataResponse {
// The returned time series data points.
repeated TimeSeriesDataPoint time_series_data_points = 1;

@ -37,16 +37,19 @@ message TensorboardTimeSeries {
// Describes metadata for a TensorboardTimeSeries.
message Metadata {
// Output only. Max step index of all data points within a TensorboardTimeSeries.
// Output only. Max step index of all data points within a
// TensorboardTimeSeries.
int64 max_step = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Max wall clock timestamp of all data points within a
// TensorboardTimeSeries.
google.protobuf.Timestamp max_wall_time = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp max_wall_time = 2
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The largest blob sequence length (number of blobs) of all data points in
// this time series, if its ValueType is BLOB_SEQUENCE.
int64 max_blob_sequence_length = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The largest blob sequence length (number of blobs) of all
// data points in this time series, if its ValueType is BLOB_SEQUENCE.
int64 max_blob_sequence_length = 3
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// An enum representing the value type of a TensorboardTimeSeries.
@ -85,22 +88,25 @@ message TensorboardTimeSeries {
];
// Output only. Timestamp when this TensorboardTimeSeries was created.
google.protobuf.Timestamp create_time = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 5
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this TensorboardTimeSeries was last updated.
google.protobuf.Timestamp update_time = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp update_time = 6
[(google.api.field_behavior) = OUTPUT_ONLY];
// Used to perform a consistent read-modify-write updates. If not set, a blind
// "overwrite" update happens.
string etag = 7;
// Immutable. Name of the plugin this time series pertain to. Such as Scalar, Tensor,
// Blob
// Immutable. Name of the plugin this time series pertain to. Such as Scalar,
// Tensor, Blob
string plugin_name = 8 [(google.api.field_behavior) = IMMUTABLE];
// Data of the current plugin, with the size limited to 65KB.
bytes plugin_data = 9;
// Output only. Scalar, Tensor, or Blob metadata for this TensorboardTimeSeries.
// Output only. Scalar, Tensor, or Blob metadata for this
// TensorboardTimeSeries.
Metadata metadata = 10 [(google.api.field_behavior) = OUTPUT_ONLY];
}

@ -37,8 +37,8 @@ option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
// The TrainingPipeline orchestrates tasks associated with training a Model. It
// always executes the training task, and optionally may also
// export data from Vertex AI's Dataset which becomes the training input,
// [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] the Model to Vertex AI, and evaluate the
// Model.
// [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] the Model
// to Vertex AI, and evaluate the Model.
message TrainingPipeline {
option (google.api.resource) = {
type: "aiplatform.googleapis.com/TrainingPipeline"
@ -52,17 +52,20 @@ message TrainingPipeline {
string display_name = 2 [(google.api.field_behavior) = REQUIRED];
// Specifies Vertex AI owned input data that may be used for training the
// Model. The TrainingPipeline's [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition] should make
// clear whether this config is used and if there are any special requirements
// on how it should be filled. If nothing about this config is mentioned in
// the [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition], then it should be assumed that the
// TrainingPipeline does not depend on this configuration.
// Model. The TrainingPipeline's
// [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]
// should make clear whether this config is used and if there are any special
// requirements on how it should be filled. If nothing about this config is
// mentioned in the
// [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition],
// then it should be assumed that the TrainingPipeline does not depend on this
// configuration.
InputDataConfig input_data_config = 3;
// Required. A Google Cloud Storage path to the YAML file that defines the training task
// which is responsible for producing the model artifact, and may also include
// additional auxiliary work.
// The definition files that can be used here are found in
// Required. A Google Cloud Storage path to the YAML file that defines the
// training task which is responsible for producing the model artifact, and
// may also include additional auxiliary work. The definition files that can
// be used here are found in
// gs://google-cloud-aiplatform/schema/trainingjob/definition/.
// Note: The URI given on output will be immutable and probably different,
// including the URI scheme, than the one given on input. The output URI will
@ -70,28 +73,37 @@ message TrainingPipeline {
string training_task_definition = 4 [(google.api.field_behavior) = REQUIRED];
// Required. The training task's parameter(s), as specified in the
// [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]'s `inputs`.
google.protobuf.Value training_task_inputs = 5 [(google.api.field_behavior) = REQUIRED];
// [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]'s
// `inputs`.
google.protobuf.Value training_task_inputs = 5
[(google.api.field_behavior) = REQUIRED];
// Output only. The metadata information as specified in the [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]'s
// Output only. The metadata information as specified in the
// [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]'s
// `metadata`. This metadata is an auxiliary runtime and final information
// about the training task. While the pipeline is running this information is
// populated only at a best effort basis. Only present if the
// pipeline's [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition] contains `metadata` object.
google.protobuf.Value training_task_metadata = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
// Describes the Model that may be uploaded (via [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel])
// pipeline's
// [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]
// contains `metadata` object.
google.protobuf.Value training_task_metadata = 6
[(google.api.field_behavior) = OUTPUT_ONLY];
// Describes the Model that may be uploaded (via
// [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel])
// by this TrainingPipeline. The TrainingPipeline's
// [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition] should make clear whether this Model
// description should be populated, and if there are any special requirements
// regarding how it should be filled. If nothing is mentioned in the
// [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition], then it should be assumed that this field
// should not be filled and the training task either uploads the Model without
// a need of this information, or that training task does not support
// uploading a Model as part of the pipeline.
// When the Pipeline's state becomes `PIPELINE_STATE_SUCCEEDED` and
// [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]
// should make clear whether this Model description should be populated, and
// if there are any special requirements regarding how it should be filled. If
// nothing is mentioned in the
// [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition],
// then it should be assumed that this field should not be filled and the
// training task either uploads the Model without a need of this information,
// or that training task does not support uploading a Model as part of the
// pipeline. When the Pipeline's state becomes `PIPELINE_STATE_SUCCEEDED` and
// the trained Model had been uploaded into Vertex AI, then the
// model_to_upload's resource [name][google.cloud.aiplatform.v1beta1.Model.name] is populated. The Model
// model_to_upload's resource
// [name][google.cloud.aiplatform.v1beta1.Model.name] is populated. The Model
// is always uploaded into the Project and Location in which this pipeline
// is.
Model model_to_upload = 7;
@ -103,31 +115,36 @@ message TrainingPipeline {
// `[a-z0-9_-]`. The first character cannot be a number or hyphen.
string model_id = 22 [(google.api.field_behavior) = OPTIONAL];
// Optional. When specify this field, the `model_to_upload` will not be uploaded as a
// new model, instead, it will become a new version of this `parent_model`.
// Optional. When specify this field, the `model_to_upload` will not be
// uploaded as a new model, instead, it will become a new version of this
// `parent_model`.
string parent_model = 21 [(google.api.field_behavior) = OPTIONAL];
// Output only. The detailed state of the pipeline.
PipelineState state = 9 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Only populated when the pipeline's state is `PIPELINE_STATE_FAILED` or
// `PIPELINE_STATE_CANCELLED`.
// Output only. Only populated when the pipeline's state is
// `PIPELINE_STATE_FAILED` or `PIPELINE_STATE_CANCELLED`.
google.rpc.Status error = 10 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Time when the TrainingPipeline was created.
google.protobuf.Timestamp create_time = 11 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 11
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Time when the TrainingPipeline for the first time entered the
// `PIPELINE_STATE_RUNNING` state.
google.protobuf.Timestamp start_time = 12 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp start_time = 12
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Time when the TrainingPipeline entered any of the following states:
// `PIPELINE_STATE_SUCCEEDED`, `PIPELINE_STATE_FAILED`,
// Output only. Time when the TrainingPipeline entered any of the following
// states: `PIPELINE_STATE_SUCCEEDED`, `PIPELINE_STATE_FAILED`,
// `PIPELINE_STATE_CANCELLED`.
google.protobuf.Timestamp end_time = 13 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp end_time = 13
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Time when the TrainingPipeline was most recently updated.
google.protobuf.Timestamp update_time = 14 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp update_time = 14
[(google.api.field_behavior) = OUTPUT_ONLY];
// The labels with user-defined metadata to organize TrainingPipelines.
//
@ -142,7 +159,8 @@ message TrainingPipeline {
// TrainingPipeline will be secured by this key.
//
// Note: Model trained by this TrainingPipeline is also secured by this key if
// [model_to_upload][google.cloud.aiplatform.v1beta1.TrainingPipeline.encryption_spec] is not set separately.
// [model_to_upload][google.cloud.aiplatform.v1beta1.TrainingPipeline.encryption_spec]
// is not set separately.
EncryptionSpec encryption_spec = 18;
}
@ -151,7 +169,9 @@ message TrainingPipeline {
message InputDataConfig {
// The instructions how the input data should be split between the
// training, validation and test sets.
// If no split type is provided, the [fraction_split][google.cloud.aiplatform.v1beta1.InputDataConfig.fraction_split] is used by default.
// If no split type is provided, the
// [fraction_split][google.cloud.aiplatform.v1beta1.InputDataConfig.fraction_split]
// is used by default.
oneof split {
// Split based on fractions defining the size of each set.
FractionSplit fraction_split = 2;
@ -235,9 +255,9 @@ message InputDataConfig {
BigQueryDestination bigquery_destination = 10;
}
// Required. The ID of the Dataset in the same Project and Location which data will be
// used to train the Model. The Dataset must use schema compatible with
// Model being trained, and what is compatible should be described in the
// Required. The ID of the Dataset in the same Project and Location which data
// will be used to train the Model. The Dataset must use schema compatible
// with Model being trained, and what is compatible should be described in the
// used TrainingPipeline's [training_task_definition]
// [google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition].
// For tabular Datasets, all their data is exported to training, to pick
@ -251,9 +271,9 @@ message InputDataConfig {
// are used in respectively training, validation or test role, depending on
// the role of the DataItem they are on (for the auto-assigned that role is
// decided by Vertex AI). A filter with same syntax as the one used in
// [ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations] may be used, but note
// here it filters across all Annotations of the Dataset, and not just within
// a single DataItem.
// [ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]
// may be used, but note here it filters across all Annotations of the
// Dataset, and not just within a single DataItem.
string annotations_filter = 6;
// Applicable only to custom training with Datasets that have DataItems and
@ -265,30 +285,41 @@ message InputDataConfig {
// The schema files that can be used here are found in
// gs://google-cloud-aiplatform/schema/dataset/annotation/ , note that the
// chosen schema must be consistent with
// [metadata][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri] of the Dataset specified by
// [metadata][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri] of
// the Dataset specified by
// [dataset_id][google.cloud.aiplatform.v1beta1.InputDataConfig.dataset_id].
//
// Only Annotations that both match this schema and belong to DataItems not
// ignored by the split method are used in respectively training, validation
// or test role, depending on the role of the DataItem they are on.
//
// When used in conjunction with [annotations_filter][google.cloud.aiplatform.v1beta1.InputDataConfig.annotations_filter], the Annotations used
// for training are filtered by both [annotations_filter][google.cloud.aiplatform.v1beta1.InputDataConfig.annotations_filter] and
// When used in conjunction with
// [annotations_filter][google.cloud.aiplatform.v1beta1.InputDataConfig.annotations_filter],
// the Annotations used for training are filtered by both
// [annotations_filter][google.cloud.aiplatform.v1beta1.InputDataConfig.annotations_filter]
// and
// [annotation_schema_uri][google.cloud.aiplatform.v1beta1.InputDataConfig.annotation_schema_uri].
string annotation_schema_uri = 9;
// Only applicable to Datasets that have SavedQueries.
//
// The ID of a SavedQuery (annotation set) under the Dataset specified by
// [dataset_id][google.cloud.aiplatform.v1beta1.InputDataConfig.dataset_id] used for filtering Annotations for training.
// [dataset_id][google.cloud.aiplatform.v1beta1.InputDataConfig.dataset_id]
// used for filtering Annotations for training.
//
// Only Annotations that are associated with this SavedQuery are used in
// respectively training. When used in conjunction with
// [annotations_filter][google.cloud.aiplatform.v1beta1.InputDataConfig.annotations_filter], the Annotations used for training are filtered by
// both [saved_query_id][google.cloud.aiplatform.v1beta1.InputDataConfig.saved_query_id] and [annotations_filter][google.cloud.aiplatform.v1beta1.InputDataConfig.annotations_filter].
// [annotations_filter][google.cloud.aiplatform.v1beta1.InputDataConfig.annotations_filter],
// the Annotations used for training are filtered by both
// [saved_query_id][google.cloud.aiplatform.v1beta1.InputDataConfig.saved_query_id]
// and
// [annotations_filter][google.cloud.aiplatform.v1beta1.InputDataConfig.annotations_filter].
//
// Only one of [saved_query_id][google.cloud.aiplatform.v1beta1.InputDataConfig.saved_query_id] and [annotation_schema_uri][google.cloud.aiplatform.v1beta1.InputDataConfig.annotation_schema_uri] should be
// specified as both of them represent the same thing: problem type.
// Only one of
// [saved_query_id][google.cloud.aiplatform.v1beta1.InputDataConfig.saved_query_id]
// and
// [annotation_schema_uri][google.cloud.aiplatform.v1beta1.InputDataConfig.annotation_schema_uri]
// should be specified as both of them represent the same thing: problem type.
string saved_query_id = 7;
// Whether to persist the ML use assignment to data item system labels.
@ -323,26 +354,29 @@ message FractionSplit {
message FilterSplit {
// Required. A filter on DataItems of the Dataset. DataItems that match
// this filter are used to train the Model. A filter with same syntax
// as the one used in [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems] may be used. If a
// single DataItem is matched by more than one of the FilterSplit filters,
// then it is assigned to the first set that applies to it in the
// training, validation, test order.
// as the one used in
// [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]
// may be used. If a single DataItem is matched by more than one of the
// FilterSplit filters, then it is assigned to the first set that applies to
// it in the training, validation, test order.
string training_filter = 1 [(google.api.field_behavior) = REQUIRED];
// Required. A filter on DataItems of the Dataset. DataItems that match
// this filter are used to validate the Model. A filter with same syntax
// as the one used in [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems] may be used. If a
// single DataItem is matched by more than one of the FilterSplit filters,
// then it is assigned to the first set that applies to it in the
// training, validation, test order.
// as the one used in
// [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]
// may be used. If a single DataItem is matched by more than one of the
// FilterSplit filters, then it is assigned to the first set that applies to
// it in the training, validation, test order.
string validation_filter = 2 [(google.api.field_behavior) = REQUIRED];
// Required. A filter on DataItems of the Dataset. DataItems that match
// this filter are used to test the Model. A filter with same syntax
// as the one used in [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems] may be used. If a
// single DataItem is matched by more than one of the FilterSplit filters,
// then it is assigned to the first set that applies to it in the
// training, validation, test order.
// as the one used in
// [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]
// may be used. If a single DataItem is matched by more than one of the
// FilterSplit filters, then it is assigned to the first set that applies to
// it in the training, validation, test order.
string test_filter = 3 [(google.api.field_behavior) = REQUIRED];
}

@ -37,7 +37,8 @@ message UnmanagedContainerModel {
// Contains the schemata used in Model's predictions and explanations
PredictSchemata predict_schemata = 2;
// Input only. The specification of the container that is to be used when deploying
// this Model.
ModelContainerSpec container_spec = 3 [(google.api.field_behavior) = INPUT_ONLY];
// Input only. The specification of the container that is to be used when
// deploying this Model.
ModelContainerSpec container_spec = 3
[(google.api.field_behavior) = INPUT_ONLY];
}

@ -41,7 +41,8 @@ option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
// learning architectures.
service VizierService {
option (google.api.default_host) = "aiplatform.googleapis.com";
option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
option (google.api.oauth_scopes) =
"https://www.googleapis.com/auth/cloud-platform";
// Creates a Study. A resource name will be generated after creation of the
// Study.
@ -92,7 +93,8 @@ service VizierService {
// operation associated with the generation of Trial suggestions.
// When this long-running operation succeeds, it will contain
// a [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse].
rpc SuggestTrials(SuggestTrialsRequest) returns (google.longrunning.Operation) {
rpc SuggestTrials(SuggestTrialsRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*/studies/*}/trials:suggest"
body: "*"
@ -157,7 +159,8 @@ service VizierService {
// long-running operation. When the operation is successful,
// it will contain a
// [CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse].
rpc CheckTrialEarlyStoppingState(CheckTrialEarlyStoppingStateRequest) returns (google.longrunning.Operation) {
rpc CheckTrialEarlyStoppingState(CheckTrialEarlyStoppingStateRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta1/{trial_name=projects/*/locations/*/studies/*/trials/*}:checkTrialEarlyStoppingState"
body: "*"
@ -180,7 +183,8 @@ service VizierService {
// optimal Trials for single-objective Study. The definition of
// pareto-optimal can be checked in wiki page.
// https://en.wikipedia.org/wiki/Pareto_efficiency
rpc ListOptimalTrials(ListOptimalTrialsRequest) returns (ListOptimalTrialsResponse) {
rpc ListOptimalTrials(ListOptimalTrialsRequest)
returns (ListOptimalTrialsResponse) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*/studies/*}/trials:listOptimalTrials"
body: "*"
@ -189,7 +193,8 @@ service VizierService {
}
}
// Request message for [VizierService.GetStudy][google.cloud.aiplatform.v1beta1.VizierService.GetStudy].
// Request message for
// [VizierService.GetStudy][google.cloud.aiplatform.v1beta1.VizierService.GetStudy].
message GetStudyRequest {
// Required. The name of the Study resource.
// Format: `projects/{project}/locations/{location}/studies/{study}`
@ -201,7 +206,8 @@ message GetStudyRequest {
];
}
// Request message for [VizierService.CreateStudy][google.cloud.aiplatform.v1beta1.VizierService.CreateStudy].
// Request message for
// [VizierService.CreateStudy][google.cloud.aiplatform.v1beta1.VizierService.CreateStudy].
message CreateStudyRequest {
// Required. The resource name of the Location to create the CustomJob in.
// Format: `projects/{project}/locations/{location}`
@ -216,7 +222,8 @@ message CreateStudyRequest {
Study study = 2 [(google.api.field_behavior) = REQUIRED];
}
// Request message for [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies].
// Request message for
// [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies].
message ListStudiesRequest {
// Required. The resource name of the Location to list the Study from.
// Format: `projects/{project}/locations/{location}`
@ -236,7 +243,8 @@ message ListStudiesRequest {
int32 page_size = 3 [(google.api.field_behavior) = OPTIONAL];
}
// Response message for [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies].
// Response message for
// [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies].
message ListStudiesResponse {
// The studies associated with the project.
repeated Study studies = 1;
@ -247,7 +255,8 @@ message ListStudiesResponse {
string next_page_token = 2;
}
// Request message for [VizierService.DeleteStudy][google.cloud.aiplatform.v1beta1.VizierService.DeleteStudy].
// Request message for
// [VizierService.DeleteStudy][google.cloud.aiplatform.v1beta1.VizierService.DeleteStudy].
message DeleteStudyRequest {
// Required. The name of the Study resource to be deleted.
// Format: `projects/{project}/locations/{location}/studies/{study}`
@ -259,7 +268,8 @@ message DeleteStudyRequest {
];
}
// Request message for [VizierService.LookupStudy][google.cloud.aiplatform.v1beta1.VizierService.LookupStudy].
// Request message for
// [VizierService.LookupStudy][google.cloud.aiplatform.v1beta1.VizierService.LookupStudy].
message LookupStudyRequest {
// Required. The resource name of the Location to get the Study from.
// Format: `projects/{project}/locations/{location}`
@ -274,7 +284,8 @@ message LookupStudyRequest {
string display_name = 2 [(google.api.field_behavior) = REQUIRED];
}
// Request message for [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials].
// Request message for
// [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials].
message SuggestTrialsRequest {
// Required. The project and location that the Study belongs to.
// Format: `projects/{project}/locations/{location}/studies/{study}`
@ -296,7 +307,8 @@ message SuggestTrialsRequest {
string client_id = 3 [(google.api.field_behavior) = REQUIRED];
}
// Response message for [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials].
// Response message for
// [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials].
message SuggestTrialsResponse {
// A list of Trials.
repeated Trial trials = 1;
@ -324,7 +336,8 @@ message SuggestTrialsMetadata {
string client_id = 2;
}
// Request message for [VizierService.CreateTrial][google.cloud.aiplatform.v1beta1.VizierService.CreateTrial].
// Request message for
// [VizierService.CreateTrial][google.cloud.aiplatform.v1beta1.VizierService.CreateTrial].
message CreateTrialRequest {
// Required. The resource name of the Study to create the Trial in.
// Format: `projects/{project}/locations/{location}/studies/{study}`
@ -339,7 +352,8 @@ message CreateTrialRequest {
Trial trial = 2 [(google.api.field_behavior) = REQUIRED];
}
// Request message for [VizierService.GetTrial][google.cloud.aiplatform.v1beta1.VizierService.GetTrial].
// Request message for
// [VizierService.GetTrial][google.cloud.aiplatform.v1beta1.VizierService.GetTrial].
message GetTrialRequest {
// Required. The name of the Trial resource.
// Format:
@ -352,7 +366,8 @@ message GetTrialRequest {
];
}
// Request message for [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials].
// Request message for
// [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials].
message ListTrialsRequest {
// Required. The resource name of the Study to list the Trial from.
// Format: `projects/{project}/locations/{location}/studies/{study}`
@ -372,7 +387,8 @@ message ListTrialsRequest {
int32 page_size = 3 [(google.api.field_behavior) = OPTIONAL];
}
// Response message for [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials].
// Response message for
// [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials].
message ListTrialsResponse {
// The Trials associated with the Study.
repeated Trial trials = 1;
@ -383,7 +399,8 @@ message ListTrialsResponse {
string next_page_token = 2;
}
// Request message for [VizierService.AddTrialMeasurement][google.cloud.aiplatform.v1beta1.VizierService.AddTrialMeasurement].
// Request message for
// [VizierService.AddTrialMeasurement][google.cloud.aiplatform.v1beta1.VizierService.AddTrialMeasurement].
message AddTrialMeasurementRequest {
// Required. The name of the trial to add measurement.
// Format:
@ -399,7 +416,8 @@ message AddTrialMeasurementRequest {
Measurement measurement = 3 [(google.api.field_behavior) = REQUIRED];
}
// Request message for [VizierService.CompleteTrial][google.cloud.aiplatform.v1beta1.VizierService.CompleteTrial].
// Request message for
// [VizierService.CompleteTrial][google.cloud.aiplatform.v1beta1.VizierService.CompleteTrial].
message CompleteTrialRequest {
// Required. The Trial's name.
// Format:
@ -425,7 +443,8 @@ message CompleteTrialRequest {
string infeasible_reason = 4 [(google.api.field_behavior) = OPTIONAL];
}
// Request message for [VizierService.DeleteTrial][google.cloud.aiplatform.v1beta1.VizierService.DeleteTrial].
// Request message for
// [VizierService.DeleteTrial][google.cloud.aiplatform.v1beta1.VizierService.DeleteTrial].
message DeleteTrialRequest {
// Required. The Trial's name.
// Format:
@ -438,7 +457,8 @@ message DeleteTrialRequest {
];
}
// Request message for [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState].
// Request message for
// [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState].
message CheckTrialEarlyStoppingStateRequest {
// Required. The Trial's name.
// Format:
@ -451,7 +471,8 @@ message CheckTrialEarlyStoppingStateRequest {
];
}
// Response message for [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState].
// Response message for
// [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState].
message CheckTrialEarlyStoppingStateResponse {
// True if the Trial should stop.
bool should_stop = 1;
@ -471,7 +492,8 @@ message CheckTrialEarlyStoppingStateMetatdata {
string trial = 3;
}
// Request message for [VizierService.StopTrial][google.cloud.aiplatform.v1beta1.VizierService.StopTrial].
// Request message for
// [VizierService.StopTrial][google.cloud.aiplatform.v1beta1.VizierService.StopTrial].
message StopTrialRequest {
// Required. The Trial's name.
// Format:
@ -484,7 +506,8 @@ message StopTrialRequest {
];
}
// Request message for [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials].
// Request message for
// [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials].
message ListOptimalTrialsRequest {
// Required. The name of the Study that the optimal Trial belongs to.
string parent = 1 [
@ -495,7 +518,8 @@ message ListOptimalTrialsRequest {
];
}
// Response message for [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials].
// Response message for
// [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials].
message ListOptimalTrialsResponse {
// The pareto-optimal Trials for multiple objective Study or the
// optimal trial for single objective Study. The definition of

Loading…
Cancel
Save