feat: aiplatform v1beta1

BREAKING CHANGE: sampled_shapley_attribution moved into a oneof in explanation.proto.
BREAKING CHANGE: field min renamed to min_value in ExplanationMetadata.InputMetadata.FeatureValueDomain in explanation_metadata.proto.
BREAKING CHANGE: field max renamed to max_value in ExplanationMetadata.InputMetadata.FeatureValueDomain in explanation_metadata.proto.

PiperOrigin-RevId: 340440278
pull/627/head
Google APIs 4 years ago committed by Copybara-Service
parent 2d93f74422
commit c570f55ea7
  1. 9
      google/cloud/aiplatform/v1beta1/BUILD.bazel
  2. 24
      google/cloud/aiplatform/v1beta1/aiplatform_v1beta1.yaml
  3. 40
      google/cloud/aiplatform/v1beta1/batch_prediction_job.proto
  4. 21
      google/cloud/aiplatform/v1beta1/custom_job.proto
  5. 5
      google/cloud/aiplatform/v1beta1/data_labeling_job.proto
  6. 2
      google/cloud/aiplatform/v1beta1/dataset.proto
  7. 11
      google/cloud/aiplatform/v1beta1/endpoint.proto
  8. 213
      google/cloud/aiplatform/v1beta1/explanation.proto
  9. 303
      google/cloud/aiplatform/v1beta1/explanation_metadata.proto
  10. 11
      google/cloud/aiplatform/v1beta1/machine_resources.proto
  11. 147
      google/cloud/aiplatform/v1beta1/migratable_resource.proto
  12. 270
      google/cloud/aiplatform/v1beta1/migration_service.proto
  13. 229
      google/cloud/aiplatform/v1beta1/model.proto
  14. 2
      google/cloud/aiplatform/v1beta1/operation.proto
  15. 17
      google/cloud/aiplatform/v1beta1/prediction_service.proto
  16. 53
      google/cloud/aiplatform/v1beta1/study.proto
  17. 49
      google/cloud/aiplatform/v1beta1/training_pipeline.proto

@ -43,6 +43,8 @@ proto_library(
"job_state.proto",
"machine_resources.proto",
"manual_batch_tuning_parameters.proto",
"migratable_resource.proto",
"migration_service.proto",
"model.proto",
"model_evaluation.proto",
"model_evaluation_slice.proto",
@ -125,6 +127,7 @@ java_gapic_test(
"com.google.cloud.aiplatform.v1beta1.DatasetServiceClientTest",
"com.google.cloud.aiplatform.v1beta1.EndpointServiceClientTest",
"com.google.cloud.aiplatform.v1beta1.JobServiceClientTest",
"com.google.cloud.aiplatform.v1beta1.MigrationServiceClientTest",
"com.google.cloud.aiplatform.v1beta1.ModelServiceClientTest",
"com.google.cloud.aiplatform.v1beta1.PipelineServiceClientTest",
"com.google.cloud.aiplatform.v1beta1.PredictionServiceClientTest",
@ -176,9 +179,9 @@ go_gapic_library(
service_yaml = "aiplatform_v1beta1.yaml",
deps = [
":aiplatform_go_proto",
"@com_google_cloud_go//longrunning/autogen:go_default_library",
"//google/longrunning:longrunning_go_proto",
"@com_google_cloud_go//longrunning:go_default_library",
"@com_google_cloud_go//longrunning/autogen:go_default_library",
"@io_bazel_rules_go//proto/wkt:duration_go_proto",
"@io_bazel_rules_go//proto/wkt:struct_go_proto",
],
@ -282,10 +285,10 @@ load(
nodejs_gapic_library(
name = "aiplatform_nodejs_gapic",
package_name = "@google-cloud/aiplatform",
src = ":aiplatform_proto_with_info",
grpc_service_config = "aiplatform_grpc_service_config.json",
package = "google.cloud.aiplatform.v1beta1",
package_name = "@google-cloud/aiplatform",
service_yaml = "aiplatform_v1beta1.yaml",
deps = [],
)
@ -368,8 +371,8 @@ csharp_grpc_library(
csharp_gapic_library(
name = "aiplatform_csharp_gapic",
srcs = [":aiplatform_proto_with_info"],
grpc_service_config = "aiplatform_grpc_service_config.json",
common_resources_config = "@gax_dotnet//:Google.Api.Gax/ResourceNames/CommonResourcesConfig.json",
grpc_service_config = "aiplatform_grpc_service_config.json",
deps = [
":aiplatform_csharp_grpc",
":aiplatform_csharp_proto",

@ -7,12 +7,15 @@ apis:
- name: google.cloud.aiplatform.v1beta1.DatasetService
- name: google.cloud.aiplatform.v1beta1.EndpointService
- name: google.cloud.aiplatform.v1beta1.JobService
- name: google.cloud.aiplatform.v1beta1.MigrationService
- name: google.cloud.aiplatform.v1beta1.ModelService
- name: google.cloud.aiplatform.v1beta1.PipelineService
- name: google.cloud.aiplatform.v1beta1.PredictionService
- name: google.cloud.aiplatform.v1beta1.SpecialistPoolService
types:
- name: google.cloud.aiplatform.v1beta1.BatchMigrateResourcesOperationMetadata
- name: google.cloud.aiplatform.v1beta1.BatchMigrateResourcesResponse
- name: google.cloud.aiplatform.v1beta1.CreateDatasetOperationMetadata
- name: google.cloud.aiplatform.v1beta1.CreateEndpointOperationMetadata
- name: google.cloud.aiplatform.v1beta1.CreateSpecialistPoolOperationMetadata
@ -37,11 +40,10 @@ documentation:
Train high-quality custom machine learning models with minimum effort and
machine learning expertise.
overview: |-
Cloud AI Platform is a suite of machine learning tools that enables
developers to train high-quality models specific to their business needs.
It offers both novices and experts the best workbench for machine learning
development by leveraging Google's state-of-the-art transfer learning and
Neural Architecture Search technology.
AI Platform (Unified) enables data scientists, developers, and AI newcomers
to create custom machine learning models specific to their business needs
by leveraging Google's state-of-the-art transfer learning and innovative
AI research.
backend:
rules:
@ -51,6 +53,10 @@ backend:
deadline: 60.0
- selector: 'google.cloud.aiplatform.v1beta1.JobService.*'
deadline: 60.0
- selector: google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources
deadline: 60.0
- selector: google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources
deadline: 60.0
- selector: 'google.cloud.aiplatform.v1beta1.ModelService.*'
deadline: 60.0
- selector: 'google.cloud.aiplatform.v1beta1.PipelineService.*'
@ -78,6 +84,14 @@ authentication:
oauth:
canonical_scopes: |-
https://www.googleapis.com/auth/cloud-platform
- selector: google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources
oauth:
canonical_scopes: |-
https://www.googleapis.com/auth/cloud-platform
- selector: google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources
oauth:
canonical_scopes: |-
https://www.googleapis.com/auth/cloud-platform
- selector: 'google.cloud.aiplatform.v1beta1.ModelService.*'
oauth:
canonical_scopes: |-

@ -19,6 +19,7 @@ package google.cloud.aiplatform.v1beta1;
import "google/api/field_behavior.proto";
import "google/api/resource.proto";
import "google/cloud/aiplatform/v1beta1/completion_stats.proto";
import "google/cloud/aiplatform/v1beta1/explanation.proto";
import "google/cloud/aiplatform/v1beta1/io.proto";
import "google/cloud/aiplatform/v1beta1/job_state.proto";
import "google/cloud/aiplatform/v1beta1/machine_resources.proto";
@ -193,28 +194,29 @@ message BatchPredictionJob {
// Generate explanation along with the batch prediction results.
//
// This can only be set to true for AutoML tabular Models, and only when the
// output destination is BigQuery. When it's true, the batch prediction
// output will include a column named `feature_attributions`.
//
// For AutoML tabular Models, the value of the `feature_attributions` column
// is a struct that maps from string to number. The keys in the map are the
// names of the features. The values in the map are the how much the features
// contribute to the predicted result. Features are defined as follows:
//
// * A scalar column defines a feature of the same name as the column.
//
// * A struct column defines multiple features, one feature per leaf field.
// The feature name is the fully qualified path for the leaf field,
// separated by ".". For example a column `key1` in the format of
// {"value1": {"prop1": number}, "value2": number} defines two features:
// `key1.value1.prop1` and `key1.value2`
//
// Attributions of each feature is represented as an extra column in the
// batch prediction output BigQuery table.
// When it's true, the batch prediction output will change based on the
// [output format][BatchPredictionJob.output_config.predictions_format]:
//
// * `bigquery`: output will include a column named `explanation`. The value
// is a struct that conforms to the [Explanation][google.cloud.aiplatform.v1beta1.Explanation] object.
// * `jsonl`: The JSON objects on each line will include an additional entry
// keyed `explanation`. The value of the entry is a JSON object that
// conforms to the [Explanation][google.cloud.aiplatform.v1beta1.Explanation] object.
// * `csv`: Generating explanations for CSV format is not supported.
bool generate_explanation = 23;
// Explanation configuration for this BatchPredictionJob. Can only be
// specified if [generate_explanation][google.cloud.aiplatform.v1beta1.BatchPredictionJob.generate_explanation] is set to `true`. It's invalid to
// specified it with generate_explanation set to false or unset.
//
// This value overrides the value of [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec]. All fields of
// [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] are optional in the request. If a field of
// [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] is not populated, the value of the same field of
// [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] is inherited. The corresponding
// [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] must be populated, otherwise explanation for
// this Model is not allowed.
ExplanationSpec explanation_spec = 25;
// Output only. Information further describing the output of this job.
OutputInfo output_info = 9 [(google.api.field_behavior) = OUTPUT_ONLY];

@ -92,6 +92,24 @@ message CustomJobSpec {
// Scheduling options for a CustomJob.
Scheduling scheduling = 3;
// Specifies the service account for workload run-as account.
// Users submitting jobs must have act-as permission on this run-as account.
string service_account = 4;
// The full name of the Compute Engine
// [network](/compute/docs/networks-and-firewalls#networks) to which the Job
// should be peered. For example, projects/12345/global/networks/myVPC.
//
// [Format](https:
// //cloud.google.com/compute/docs/reference/rest/v1/networks/insert)
// is of the form projects/{project}/global/networks/{network}.
// Where {project} is a project number, as in '12345', and {network} is
// network name.
//
// Private services access must already be configured for the network. If left
// unspecified, the job is not peered with any network.
string network = 5;
// The Google Cloud Storage location to store the output of this CustomJob or
// HyperparameterTuningJob. For HyperparameterTuningJob,
// [base_output_directory][CustomJob.job_spec.base_output_directory] of
@ -134,6 +152,9 @@ message WorkerPoolSpec {
// Required. The number of worker replicas to use for this worker pool.
int64 replica_count = 2 [(google.api.field_behavior) = REQUIRED];
// Disk spec.
DiskSpec disk_spec = 5;
}
// The spec of a Container.

@ -23,6 +23,7 @@ import "google/cloud/aiplatform/v1beta1/job_state.proto";
import "google/cloud/aiplatform/v1beta1/specialist_pool.proto";
import "google/protobuf/struct.proto";
import "google/protobuf/timestamp.proto";
import "google/rpc/status.proto";
import "google/type/money.proto";
import "google/api/annotations.proto";
@ -104,6 +105,10 @@ message DataLabelingJob {
// Output only. Timestamp when this DataLabelingJob was updated most recently.
google.protobuf.Timestamp update_time = 10 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. DataLabelingJob errors. It is only populated when job's state is
// `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`.
google.rpc.Status error = 22 [(google.api.field_behavior) = OUTPUT_ONLY];
// The labels with user-defined metadata to organize your DataLabelingJobs.
//
// Label keys and values can be no longer than 64 characters

@ -116,7 +116,7 @@ message ExportDataConfig {
// The Google Cloud Storage location where the output is to be written to.
// In the given directory a new directory will be created with name:
// `export-data-<dataset-display-name>-<timestamp-of-export-call>` where
// timestamp is in YYYYMMDDHHMMSS format. All export
// timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. All export
// output will be written into that directory. Inside that directory,
// annotations with the same schema will be grouped into sub directories
// which are named with the corresponding annotations' schema title. Inside

@ -127,10 +127,17 @@ message DeployedModel {
// [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] is inherited. The corresponding
// [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] must be populated, otherwise explanation for
// this Model is not allowed.
//
// Currently, only AutoML tabular Models support explanation_spec.
ExplanationSpec explanation_spec = 9;
// The service account that the DeployedModel's container runs as. Specify the
// email address of the service account. If this service account is not
// specified, the container runs as a service account that doesn't have access
// to the resource project.
//
// Users deploying the Model must have the `iam.serviceAccounts.actAs`
// permission on this service account.
string service_account = 11;
// If true, the container of the DeployedModel instances will send `stderr`
// and `stdout` streams to Stackdriver Logging.
//

@ -26,10 +26,8 @@ option java_multiple_files = true;
option java_outer_classname = "ExplanationProto";
option java_package = "com.google.cloud.aiplatform.v1beta1";
// Explanation of a [prediction][ExplainResponse.predictions] produced by the
// Model on a given [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances].
//
// Currently, only AutoML tabular Models support explanation.
// Explanation of a prediction (provided in [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions])
// produced by the Model on a given [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances].
message Explanation {
// Output only. Feature attributions grouped by predicted outputs.
//
@ -40,12 +38,15 @@ message Explanation {
// specific item. [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] can be used to identify which
// output this attribution is explaining.
//
// If users set [ExplanationParameters.top_k][google.cloud.aiplatform.v1beta1.ExplanationParameters.top_k], the attributions are sorted
// by [instance_output_value][Attributions.instance_output_value] in
// descending order. If [ExplanationParameters.output_indices][google.cloud.aiplatform.v1beta1.ExplanationParameters.output_indices] is specified,
// the attributions are stored by [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] in the same
// order as they appear in the output_indices.
repeated Attribution attributions = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
}
// Aggregated explanation metrics for a Model over a set of instances.
//
// Currently, only AutoML tabular Models support aggregated explanation.
message ModelExplanation {
// Output only. Aggregated attributions explaning the Model's prediction outputs over the
// set of instances. The attributions are grouped by outputs.
@ -75,8 +76,8 @@ message Attribution {
// The field name of the output is determined by the key in
// [ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs].
//
// If the Model predicted output is a tensor value (for example, an ndarray),
// this is the value in the output located by [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index].
// If the Model's predicted output has multiple dimensions (rank > 1), this is
// the value in the output located by [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index].
//
// If there are multiple baselines, their output values are averaged.
double baseline_output_value = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
@ -85,13 +86,13 @@ message Attribution {
// instance][ExplainRequest.instances]. The field name of the output is
// determined by the key in [ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs].
//
// If the Model predicted output is a tensor value (for example, an ndarray),
// this is the value in the output located by [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index].
// If the Model predicted output has multiple dimensions, this is the value in
// the output located by [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index].
double instance_output_value = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Attributions of each explained feature. Features are extracted from
// the [prediction instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances] according to
// [explanation input metadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs].
// [explanation metadata for inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs].
//
// The value is a struct, whose keys are the name of the feature. The values
// are how much the feature in the [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances]
@ -120,10 +121,10 @@ message Attribution {
// Output only. The index that locates the explained prediction output.
//
// If the prediction output is a scalar value, output_index is not populated.
// If the prediction output is a tensor value (for example, an ndarray),
// the length of output_index is the same as the number of dimensions of the
// output. The i-th element in output_index is the element index of the i-th
// dimension of the output vector. Indexes start from 0.
// If the prediction output has multiple dimensions, the length of the
// output_index list is the same as the number of dimensions of the output.
// The i-th element in output_index is the element index of the i-th dimension
// of the output vector. Indices start from 0.
repeated int32 output_index = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The display name of the output identified by [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index], e.g. the
@ -138,17 +139,30 @@ message Attribution {
// Output only. Error of [feature_attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions] caused by approximation used in the
// explanation method. Lower value means more precise attributions.
//
// For Sampled Shapley
// [attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.sampled_shapley_attribution],
// increasing [path_count][google.cloud.aiplatform.v1beta1.SampledShapleyAttribution.path_count] might reduce
// the error.
// * For [Sampled Shapley
// attribution][ExplanationParameters.sampled_shapley_attribution], increasing
// [path_count][google.cloud.aiplatform.v1beta1.SampledShapleyAttribution.path_count] may reduce the error.
// * For [Integrated Gradients
// attribution][ExplanationParameters.integrated_gradients_attribution],
// increasing [step_count][google.cloud.aiplatform.v1beta1.IntegratedGradientsAttribution.step_count] may
// reduce the error.
// * For [XRAI
// attribution][ExplanationParameters.xrai_attribution], increasing
// [step_count][google.cloud.aiplatform.v1beta1.XraiAttribution.step_count] may reduce the error.
//
// Refer to AI Explanations Whitepaper for more details:
//
// https:
// //storage.googleapis.com/cloud-ai-whitep
// // apers/AI%20Explainability%20Whitepaper.pdf
double approximation_error = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Name of the explain output. Specified as the key in
// [ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs].
string output_name = 7 [(google.api.field_behavior) = OUTPUT_ONLY];
}
// Specification of Model explanation.
//
// Currently, only AutoML tabular Models support explanation.
message ExplanationSpec {
// Required. Parameters that configure explaining of the Model's predictions.
ExplanationParameters parameters = 1 [(google.api.field_behavior) = REQUIRED];
@ -159,10 +173,48 @@ message ExplanationSpec {
// Parameters to configure explaining for Model's predictions.
message ExplanationParameters {
// An attribution method that approximates Shapley values for features that
// contribute to the label being predicted. A sampling strategy is used to
// approximate the value rather than considering all subsets of features.
SampledShapleyAttribution sampled_shapley_attribution = 1;
oneof method {
// An attribution method that approximates Shapley values for features that
// contribute to the label being predicted. A sampling strategy is used to
// approximate the value rather than considering all subsets of features.
// Refer to this paper for model details: https://arxiv.org/abs/1306.4265.
SampledShapleyAttribution sampled_shapley_attribution = 1;
// An attribution method that computes Aumann-Shapley values taking
// advantage of the model's fully differentiable structure. Refer to this
// paper for more details: https://arxiv.org/abs/1703.01365
IntegratedGradientsAttribution integrated_gradients_attribution = 2;
// An attribution method that redistributes Integrated Gradients
// attribution to segmented regions, taking advantage of the model's fully
// differentiable structure. Refer to this paper for
// more details: https://arxiv.org/abs/1906.02825
//
// XRAI currently performs better on natural images, like a picture of a
// house or an animal. If the images are taken in artificial environments,
// like a lab or manufacturing line, or from diagnostic equipment, like
// x-rays or quality-control cameras, use Integrated Gradients instead.
XraiAttribution xrai_attribution = 3;
}
// If populated, returns attributions for top K indices of outputs
// (defaults to 1). Only applies to Models that predicts more than one outputs
// (e,g, multi-class Models). When set to -1, returns explanations for all
// outputs.
int32 top_k = 4;
// If populated, only returns attributions that have
// [output_index][Attributions.output_index] contained in output_indices. It
// must be an ndarray of integers, with the same shape of the output it's
// explaining.
//
// If not populated, returns attributions for [top_k][google.cloud.aiplatform.v1beta1.ExplanationParameters.top_k] indices of outputs.
// If neither top_k nor output_indeices is populated, returns the argmax
// index of the outputs.
//
// Only applicable to Models that predict multiple outputs (e,g, multi-class
// Models that predict multiple classes).
google.protobuf.ListValue output_indices = 5;
}
// An attribution method that approximates Shapley values for features that
@ -175,3 +227,114 @@ message SampledShapleyAttribution {
// Valid range of its value is [1, 50], inclusively.
int32 path_count = 1 [(google.api.field_behavior) = REQUIRED];
}
// An attribution method that computes the Aumann-Shapley value taking advantage
// of the model's fully differentiable structure. Refer to this paper for
// more details: https://arxiv.org/abs/1703.01365
message IntegratedGradientsAttribution {
// Required. The number of steps for approximating the path integral.
// A good value to start is 50 and gradually increase until the
// sum to diff property is within the desired error range.
//
// Valid range of its value is [1, 100], inclusively.
int32 step_count = 1 [(google.api.field_behavior) = REQUIRED];
// Config for SmoothGrad approximation of gradients.
//
// When enabled, the gradients are approximated by averaging the gradients
// from noisy samples in the vicinity of the inputs. Adding
// noise can help improve the computed gradients. Refer to this paper for more
// details: https://arxiv.org/pdf/1706.03825.pdf
SmoothGradConfig smooth_grad_config = 2;
}
// An explanation method that redistributes Integrated Gradients
// attributions to segmented regions, taking advantage of the model's fully
// differentiable structure. Refer to this paper for more details:
// https://arxiv.org/abs/1906.02825
//
// Only supports image Models ([modality][InputMetadata.modality] is IMAGE).
message XraiAttribution {
// Required. The number of steps for approximating the path integral.
// A good value to start is 50 and gradually increase until the
// sum to diff property is met within the desired error range.
//
// Valid range of its value is [1, 100], inclusively.
int32 step_count = 1 [(google.api.field_behavior) = REQUIRED];
// Config for SmoothGrad approximation of gradients.
//
// When enabled, the gradients are approximated by averaging the gradients
// from noisy samples in the vicinity of the inputs. Adding
// noise can help improve the computed gradients. Refer to this paper for more
// details: https://arxiv.org/pdf/1706.03825.pdf
SmoothGradConfig smooth_grad_config = 2;
}
// Config for SmoothGrad approximation of gradients.
//
// When enabled, the gradients are approximated by averaging the gradients from
// noisy samples in the vicinity of the inputs. Adding noise can help improve
// the computed gradients. Refer to this paper for more details:
// https://arxiv.org/pdf/1706.03825.pdf
message SmoothGradConfig {
// Represents the standard deviation of the gaussian kernel
// that will be used to add noise to the interpolated inputs
// prior to computing gradients.
oneof GradientNoiseSigma {
// This is a single float value and will be used to add noise to all the
// features. Use this field when all features are normalized to have the
// same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where
// features are normalized to have 0-mean and 1-variance. Refer to
// this doc for more details about normalization:
//
// https:
// //developers.google.com/machine-learning
// // /data-prep/transform/normalization.
//
// For best results the recommended value is about 10% - 20% of the standard
// deviation of the input feature. Refer to section 3.2 of the SmoothGrad
// paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1.
//
// If the distribution is different per feature, set
// [feature_noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.feature_noise_sigma] instead
// for each feature.
float noise_sigma = 1;
// This is similar to [noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma], but
// provides additional flexibility. A separate noise sigma can be provided
// for each feature, which is useful if their distributions are different.
// No noise is added to features that are not set. If this field is unset,
// [noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma] will be used for all
// features.
FeatureNoiseSigma feature_noise_sigma = 2;
}
// The number of gradient samples to use for
// approximation. The higher this number, the more accurate the gradient
// is, but the runtime complexity increases by this factor as well.
// Valid range of its value is [1, 50]. Defaults to 3.
int32 noisy_sample_count = 3;
}
// Noise sigma by features. Noise sigma represents the standard deviation of the
// gaussian kernel that will be used to add noise to interpolated inputs prior
// to computing gradients.
message FeatureNoiseSigma {
// Noise sigma for a single feature.
message NoiseSigmaForFeature {
// The name of the input feature for which noise sigma is provided. The
// features are defined in
// [explanation metadata inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs].
string name = 1;
// This represents the standard deviation of the Gaussian kernel that will
// be used to add noise to the feature prior to computing gradients. Similar
// to [noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma] but represents the
// noise added to the current feature. Defaults to 0.1.
float sigma = 2;
}
// Noise sigma per feature. No noise is added to features that are not set.
repeated NoiseSigmaForFeature noise_sigma = 1;
}

@ -28,7 +28,219 @@ option java_package = "com.google.cloud.aiplatform.v1beta1";
// Metadata describing the Model's input and output for explanation.
message ExplanationMetadata {
// Metadata of the input of a feature.
//
// Fields other than [InputMetadata.input_baselines][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.input_baselines] are applicable only
// for Models that are using AI Platform-provided images for Tensorflow.
message InputMetadata {
// Domain details of the input feature value. Provides numeric information
// about the feature, such as its range (min, max). If the feature has been
// pre-processed, for example with z-scoring, then it provides information
// about how to recover the original feature. For example, if the input
// feature is an image and it has been pre-processed to obtain 0-mean and
// stddev = 1 values, then original_mean, and original_stddev refer to the
// mean and stddev of the original feature (e.g. image tensor) from which
// input feature (with mean = 0 and stddev = 1) was obtained.
message FeatureValueDomain {
// The minimum permissible value for this feature.
float min_value = 1;
// The maximum permissible value for this feature.
float max_value = 2;
// If this input feature has been normalized to a mean value of 0,
// the original_mean specifies the mean value of the domain prior to
// normalization.
float original_mean = 3;
// If this input feature has been normalized to a standard deviation of
// 1.0, the original_stddev specifies the standard deviation of the domain
// prior to normalization.
float original_stddev = 4;
}
// Visualization configurations for image explanation.
message Visualization {
// Type of the image visualization. Only applicable to [Integrated
// Gradients attribution]
// [ExplanationParameters.integrated_gradients_attribution].
enum Type {
// Should not be used.
TYPE_UNSPECIFIED = 0;
// Shows which pixel contributed to the image prediction.
PIXELS = 1;
// Shows which region contributed to the image prediction by outlining
// the region.
OUTLINES = 2;
}
// Whether to only highlight pixels with positive contributions, negative
// or both. Defaults to POSITIVE.
enum Polarity {
// Default value. This is the same as POSITIVE.
POLARITY_UNSPECIFIED = 0;
// Highlights the pixels/outlines that were most influential to the
// model's prediction.
POSITIVE = 1;
// Setting polarity to negative highlights areas that does not lead to
// the models's current prediction.
NEGATIVE = 2;
// Shows both positive and negative attributions.
BOTH = 3;
}
// The color scheme used for highlighting areas.
enum ColorMap {
// Should not be used.
COLOR_MAP_UNSPECIFIED = 0;
// Positive: green. Negative: pink.
PINK_GREEN = 1;
// Viridis color map: A perceptually uniform color mapping which is
// easier to see by those with colorblindness and progresses from yellow
// to green to blue. Positive: yellow. Negative: blue.
VIRIDIS = 2;
// Positive: red. Negative: red.
RED = 3;
// Positive: green. Negative: green.
GREEN = 4;
// Positive: green. Negative: red.
RED_GREEN = 6;
// PiYG palette.
PINK_WHITE_GREEN = 5;
}
// How the original image is displayed in the visualization.
enum OverlayType {
// Default value. This is the same as NONE.
OVERLAY_TYPE_UNSPECIFIED = 0;
// No overlay.
NONE = 1;
// The attributions are shown on top of the original image.
ORIGINAL = 2;
// The attributions are shown on top of grayscaled version of the
// original image.
GRAYSCALE = 3;
// The attributions are used as a mask to reveal predictive parts of
// the image and hide the un-predictive parts.
MASK_BLACK = 4;
}
// Type of the image visualization. Only applicable to [Integrated
// Gradients attribution]
// [ExplanationParameters.integrated_gradients_attribution]. OUTLINES
// shows regions of attribution, while PIXELS shows per-pixel attribution.
// Defaults to OUTLINES.
Type type = 1;
// Whether to only highlight pixels with positive contributions, negative
// or both. Defaults to POSITIVE.
Polarity polarity = 2;
// The color scheme used for the highlighted areas.
//
// Defaults to PINK_GREEN for [Integrated Gradients
// attribution][ExplanationParameters.integrated_gradients_attribution],
// which shows positive attributions in green and negative in pink.
//
// Defaults to VIRIDIS for
// [XRAI attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution], which
// highlights the most influential regions in yellow and the least
// influential in blue.
ColorMap color_map = 3;
// Excludes attributions above the specified percentile from the
// highlighted areas. Using the clip_percent_upperbound and
// clip_percent_lowerbound together can be useful for filtering out noise
// and making it easier to see areas of strong attribution. Defaults to
// 99.9.
float clip_percent_upperbound = 4;
// Excludes attributions below the specified percentile, from the
// highlighted areas. Defaults to 35.
float clip_percent_lowerbound = 5;
// How the original image is displayed in the visualization.
// Adjusting the overlay can help increase visual clarity if the original
// image makes it difficult to view the visualization. Defaults to NONE.
OverlayType overlay_type = 6;
}
// Defines how the feature is encoded to [encoded_tensor][]. Defaults to
// IDENTITY.
enum Encoding {
// Default value. This is the same as IDENTITY.
ENCODING_UNSPECIFIED = 0;
// The tensor represents one feature.
IDENTITY = 1;
// The tensor represents a bag of features where each index maps to
// a feature. [InputMetadata.index_feature_mapping][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.index_feature_mapping] must be provided for
// this encoding. For example:
// ```
// input = [27, 6.0, 150]
// index_feature_mapping = ["age", "height", "weight"]
// ```
BAG_OF_FEATURES = 2;
// The tensor represents a bag of features where each index maps to a
// feature. Zero values in the tensor indicates feature being
// non-existent. [InputMetadata.index_feature_mapping][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.index_feature_mapping] must be provided
// for this encoding. For example:
// ```
// input = [2, 0, 5, 0, 1]
// index_feature_mapping = ["a", "b", "c", "d", "e"]
// ```
BAG_OF_FEATURES_SPARSE = 3;
// The tensor is a list of binaries representing whether a feature exists
// or not (1 indicates existence). [InputMetadata.index_feature_mapping][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.index_feature_mapping]
// must be provided for this encoding. For example:
// ```
// input = [1, 0, 1, 0, 1]
// index_feature_mapping = ["a", "b", "c", "d", "e"]
// ```
INDICATOR = 4;
// The tensor is encoded into a 1-dimensional array represented by an
// encoded tensor. [InputMetadata.encoded_tensor_name][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.encoded_tensor_name] must be provided
// for this encoding. For example:
// ```
// input = ["This", "is", "a", "test", "."]
// encoded = [0.1, 0.2, 0.3, 0.4, 0.5]
// ```
COMBINED_EMBEDDING = 5;
// Select this encoding when the input tensor is encoded into a
// 2-dimensional array represented by an encoded tensor.
// [InputMetadata.encoded_tensor_name][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.encoded_tensor_name] must be provided for this
// encoding. The first dimension of the encoded tensor's shape is the same
// as the input tensor's shape. For example:
// ```
// input = ["This", "is", "a", "test", "."]
// encoded = [[0.1, 0.2, 0.3, 0.4, 0.5],
// [0.2, 0.1, 0.4, 0.3, 0.5],
// [0.5, 0.1, 0.3, 0.5, 0.4],
// [0.5, 0.3, 0.1, 0.2, 0.4],
// [0.4, 0.3, 0.2, 0.5, 0.1]]
// ```
CONCAT_EMBEDDING = 6;
}
// Baseline inputs for this feature.
//
// If no baseline is specified, AI Platform chooses the baseline for this
@ -36,13 +248,78 @@ message ExplanationMetadata {
// average attributions across them in
// [Attributions.baseline_attribution][].
//
// The element of the baselines must be in the same format as the feature's
// input in the [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances][]. The schema of any
// single instance may be specified via Endpoint's DeployedModels'
// For AI Platform provided Tensorflow images (both 1.x and 2.x), the shape
// of each baseline must match the shape of the input tensor. If a scalar is
// provided, we broadcast to the same shape as the input tensor.
//
// For custom images, the element of the baselines must be in the same
// format as the feature's input in the
// [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances][]. The schema of any single instance
// may be specified via Endpoint's DeployedModels'
// [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model]
// [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata]
// [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri].
repeated google.protobuf.Value input_baselines = 1;
// Name of the input tensor for this feature. Required and is only
// applicable to AI Platform provided images for Tensorflow.
string input_tensor_name = 2;
// Defines how the feature is encoded into the input tensor. Defaults to
// IDENTITY.
Encoding encoding = 3;
// Modality of the feature. Valid values are: numeric, image. Defaults to
// numeric.
string modality = 4;
// The domain details of the input feature value. Like min/max, original
// mean or standard deviation if normalized.
FeatureValueDomain feature_value_domain = 5;
// Specifies the index of the values of the input tensor.
// Required when the input tensor is a sparse representation. Refer to
// Tensorflow documentation for more details:
// https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor.
string indices_tensor_name = 6;
// Specifies the shape of the values of the input if the input is a sparse
// representation. Refer to Tensorflow documentation for more details:
// https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor.
string dense_shape_tensor_name = 7;
// A list of feature names for each index in the input tensor.
// Required when the input [InputMetadata.encoding][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.encoding] is BAG_OF_FEATURES,
// BAG_OF_FEATURES_SPARSE, INDICATOR.
repeated string index_feature_mapping = 8;
// Encoded tensor is a transformation of the input tensor. Must be provided
// if choosing [Integrated Gradients
// attribution][ExplanationParameters.integrated_gradients_attribution] or
// [XRAI attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution]
// and the input tensor is not differentiable.
//
// An encoded tensor is generated if the input tensor is encoded by a lookup
// table.
string encoded_tensor_name = 9;
// A list of baselines for the encoded tensor.
//
// The shape of each baseline should match the shape of the encoded tensor.
// If a scalar is provided, AI Platform broadcast to the same shape as the
// encoded tensor.
repeated google.protobuf.Value encoded_baselines = 10;
// Visualization configurations for image explanation.
Visualization visualization = 11;
// Name of the group that the input belongs to. Features with the same group
// name will be treated as one feature when computing attributions. Features
// grouped together can have different shapes in value. If provided, there
// will be one single attribution generated in [
// featureAttributions][Attribution.feature_attributions], keyed by the
// group name.
string group_name = 12;
}
// Metadata of the prediction output to be explained.
@ -65,7 +342,6 @@ message ExplanationMetadata {
// number of dimentions must match that of the outputs to be explained.
// The [Attribution.output_display_name][google.cloud.aiplatform.v1beta1.Attribution.output_display_name] is populated by locating in the
// mapping with [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index].
//
google.protobuf.Value index_display_name_mapping = 1;
// Specify a field name in the prediction to look for the display name.
@ -77,6 +353,10 @@ message ExplanationMetadata {
// a specific output.
string display_name_mapping_key = 2;
}
// Name of the output tensor. Required and is only applicable to AI
// Platform provided images for Tensorflow.
string output_tensor_name = 3;
}
// Required. Map from feature names to feature input metadata. Keys are the name of the
@ -86,13 +366,24 @@ message ExplanationMetadata {
// name specified as the key in [ExplanationMetadata.inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs]. The baseline
// of the empty feature is chosen by AI Platform.
//
// For AI Platform provided Tensorflow images, the key can be any friendly
// name of the feature . Once specified, [
// featureAttributions][Attribution.feature_attributions] will be keyed by
// this key (if not grouped with another feature).
//
// For custom images, the key must match with the key in
// [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances].
map<string, InputMetadata> inputs = 1 [(google.api.field_behavior) = REQUIRED];
// Required. Map from output names to output metadata.
//
// Keys are the name of the output field in the prediction to be explained.
// Currently only one key is allowed.
// For AI Platform provided Tensorflow images, keys can be any string user
// defines.
//
// For custom images, keys are the name of the output field in the prediction
// to be explained.
//
// Currently only one key is allowed.
map<string, OutputMetadata> outputs = 2 [(google.api.field_behavior) = REQUIRED];
// Points to a YAML file stored on Google Cloud Storage describing the format

@ -154,3 +154,14 @@ message ResourcesConsumed {
// Therefore this value is not strictly related to wall time.
double replica_hours = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
}
// Represents the spec of disk options.
message DiskSpec {
// Type of the boot disk (default is "pd-standard").
// Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or
// "pd-standard" (Persistent Disk Hard Disk Drive).
string boot_disk_type = 1;
// Size in GB of the boot disk (default is 100GB).
int32 boot_disk_size_gb = 2;
}

@ -0,0 +1,147 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.cloud.aiplatform.v1beta1;
import "google/api/field_behavior.proto";
import "google/api/resource.proto";
import "google/protobuf/timestamp.proto";
import "google/api/annotations.proto";
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform";
option java_multiple_files = true;
option java_outer_classname = "MigratableResourceProto";
option java_package = "com.google.cloud.aiplatform.v1beta1";
option (google.api.resource_definition) = {
type: "ml.googleapis.com/Version"
pattern: "projects/{project}/models/{model}/versions/{version}"
};
option (google.api.resource_definition) = {
type: "automl.googleapis.com/Model"
pattern: "projects/{project}/locations/{location}/models/{model}"
};
option (google.api.resource_definition) = {
type: "automl.googleapis.com/Dataset"
pattern: "projects/{project}/locations/{location}/datasets/{dataset}"
};
option (google.api.resource_definition) = {
type: "datalabeling.googleapis.com/Dataset"
pattern: "projects/{project}/datasets/{dataset}"
};
option (google.api.resource_definition) = {
type: "datalabeling.googleapis.com/AnnotatedDataset"
pattern: "projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}"
};
// Represents one resource that exists in automl.googleapis.com,
// datalabeling.googleapis.com or ml.googleapis.com.
message MigratableResource {
// Represents one model Version in ml.googleapis.com.
message MlEngineModelVersion {
// The ml.googleapis.com endpoint that this model Version currently lives
// in.
// Example values:
// * ml.googleapis.com
// * us-centrall-ml.googleapis.com
// * europe-west4-ml.googleapis.com
// * asia-east1-ml.googleapis.com
string endpoint = 1;
// Full resource name of ml engine model Version.
// Format: `projects/{project}/models/{model}/versions/{version}`.
string version = 2 [(google.api.resource_reference) = {
type: "ml.googleapis.com/Version"
}];
}
// Represents one Model in automl.googleapis.com.
message AutomlModel {
// Full resource name of automl Model.
// Format:
// `projects/{project}/locations/{location}/models/{model}`.
string model = 1 [(google.api.resource_reference) = {
type: "automl.googleapis.com/Model"
}];
// The Model's display name in automl.googleapis.com.
string model_display_name = 3;
}
// Represents one Dataset in automl.googleapis.com.
message AutomlDataset {
// Full resource name of automl Dataset.
// Format:
// `projects/{project}/locations/{location}/datasets/{dataset}`.
string dataset = 1 [(google.api.resource_reference) = {
type: "automl.googleapis.com/Dataset"
}];
// The Dataset's display name in automl.googleapis.com.
string dataset_display_name = 4;
}
// Represents one Dataset in datalabeling.googleapis.com.
message DataLabelingDataset {
// Represents one AnnotatedDataset in datalabeling.googleapis.com.
message DataLabelingAnnotatedDataset {
// Full resource name of data labeling AnnotatedDataset.
// Format:
//
// `projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}`.
string annotated_dataset = 1 [(google.api.resource_reference) = {
type: "datalabeling.googleapis.com/AnnotatedDataset"
}];
// The AnnotatedDataset's display name in datalabeling.googleapis.com.
string annotated_dataset_display_name = 3;
}
// Full resource name of data labeling Dataset.
// Format:
// `projects/{project}/datasets/{dataset}`.
string dataset = 1 [(google.api.resource_reference) = {
type: "datalabeling.googleapis.com/Dataset"
}];
// The Dataset's display name in datalabeling.googleapis.com.
string dataset_display_name = 4;
// The migratable AnnotatedDataset in datalabeling.googleapis.com belongs to
// the data labeling Dataset.
repeated DataLabelingAnnotatedDataset data_labeling_annotated_datasets = 3;
}
oneof resource {
// Output only. Represents one Version in ml.googleapis.com.
MlEngineModelVersion ml_engine_model_version = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Represents one Model in automl.googleapis.com.
AutomlModel automl_model = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Represents one Dataset in automl.googleapis.com.
AutomlDataset automl_dataset = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Represents one Dataset in datalabeling.googleapis.com.
DataLabelingDataset data_labeling_dataset = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
}
// Output only. Timestamp when last migrate attempt on this MigratableResource started.
// Will not be set if there's no migrate attempt on this MigratableResource.
google.protobuf.Timestamp last_migrate_time = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this MigratableResource was last updated.
google.protobuf.Timestamp last_update_time = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
}

@ -0,0 +1,270 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.cloud.aiplatform.v1beta1;
import "google/cloud/aiplatform/v1beta1/dataset.proto";
import "google/cloud/aiplatform/v1beta1/model.proto";
import "google/api/annotations.proto";
import "google/api/client.proto";
import "google/api/field_behavior.proto";
import "google/api/resource.proto";
import "google/cloud/aiplatform/v1beta1/migratable_resource.proto";
import "google/cloud/aiplatform/v1beta1/operation.proto";
import "google/longrunning/operations.proto";
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform";
option java_multiple_files = true;
option java_outer_classname = "MigrationServiceProto";
option java_package = "com.google.cloud.aiplatform.v1beta1";
// A service that migrates resources from automl.googleapis.com,
// datalabeling.googleapis.com and ml.googleapis.com to AI Platform.
service MigrationService {
option (google.api.default_host) = "aiplatform.googleapis.com";
option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
// Searches all of the resources in automl.googleapis.com,
// datalabeling.googleapis.com and ml.googleapis.com that can be migrated to
// AI Platform's given location.
rpc SearchMigratableResources(SearchMigratableResourcesRequest) returns (SearchMigratableResourcesResponse) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*}/migratableResources:search"
body: "*"
};
option (google.api.method_signature) = "parent";
}
// Batch migrates resources from ml.googleapis.com, automl.googleapis.com,
// and datalabeling.googleapis.com to AI Platform (Unified).
rpc BatchMigrateResources(BatchMigrateResourcesRequest) returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*}/migratableResources:batchMigrate"
body: "*"
};
option (google.api.method_signature) = "parent,migrate_resource_requests";
option (google.longrunning.operation_info) = {
response_type: "BatchMigrateResourcesResponse"
metadata_type: "BatchMigrateResourcesOperationMetadata"
};
}
}
// Request message for [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources].
message SearchMigratableResourcesRequest {
// Required. The location that the migratable resources should be searched from.
// It's the AI Platform location that the resources can be migrated to, not
// the resources' original location.
// Format:
// `projects/{project}/locations/{location}`
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "locations.googleapis.com/Location"
}
];
// The standard page size.
// The default and maximum value is 100.
int32 page_size = 2;
// The standard page token.
string page_token = 3;
}
// Response message for [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources].
message SearchMigratableResourcesResponse {
// All migratable resources that can be migrated to the
// location specified in the request.
repeated MigratableResource migratable_resources = 1;
// The standard next-page token.
// The migratable_resources may not fill page_size in
// SearchMigratableResourcesRequest even when there are subsequent pages.
string next_page_token = 2;
}
// Request message for [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources].
message BatchMigrateResourcesRequest {
// Required. The location of the migrated resource will live in.
// Format: `projects/{project}/locations/{location}`
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "locations.googleapis.com/Location"
}
];
// Required. The request messages specifying the resources to migrate.
// They must be in the same location as the destination.
// Up to 50 resources can be migrated in one batch.
repeated MigrateResourceRequest migrate_resource_requests = 2 [(google.api.field_behavior) = REQUIRED];
}
// Config of migrating one resource from automl.googleapis.com,
// datalabeling.googleapis.com and ml.googleapis.com to AI Platform.
message MigrateResourceRequest {
// Config for migrating version in ml.googleapis.com to AI Platform's Model.
message MigrateMlEngineModelVersionConfig {
// Required. The ml.googleapis.com endpoint that this model version should be migrated
// from.
// Example values:
//
// * ml.googleapis.com
//
// * us-centrall-ml.googleapis.com
//
// * europe-west4-ml.googleapis.com
//
// * asia-east1-ml.googleapis.com
string endpoint = 1 [(google.api.field_behavior) = REQUIRED];
// Required. Full resource name of ml engine model version.
// Format: `projects/{project}/models/{model}/versions/{version}`.
string model_version = 2 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "ml.googleapis.com/Version"
}
];
// Required. Display name of the model in AI Platform.
// System will pick a display name if unspecified.
string model_display_name = 3 [(google.api.field_behavior) = REQUIRED];
}
// Config for migrating Model in automl.googleapis.com to AI Platform's Model.
message MigrateAutomlModelConfig {
// Required. Full resource name of automl Model.
// Format:
// `projects/{project}/locations/{location}/models/{model}`.
string model = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "automl.googleapis.com/Model"
}
];
// Optional. Display name of the model in AI Platform.
// System will pick a display name if unspecified.
string model_display_name = 2 [(google.api.field_behavior) = OPTIONAL];
}
// Config for migrating Dataset in automl.googleapis.com to AI Platform's
// Dataset.
message MigrateAutomlDatasetConfig {
// Required. Full resource name of automl Dataset.
// Format:
// `projects/{project}/locations/{location}/datasets/{dataset}`.
string dataset = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "automl.googleapis.com/Dataset"
}
];
// Required. Display name of the Dataset in AI Platform.
// System will pick a display name if unspecified.
string dataset_display_name = 2 [(google.api.field_behavior) = REQUIRED];
}
// Config for migrating Dataset in datalabeling.googleapis.com to AI
// Platform's Dataset.
message MigrateDataLabelingDatasetConfig {
// Config for migrating AnnotatedDataset in datalabeling.googleapis.com to
// AI Platform's SavedQuery.
message MigrateDataLabelingAnnotatedDatasetConfig {
// Required. Full resource name of data labeling AnnotatedDataset.
// Format:
//
// `projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}`.
string annotated_dataset = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "datalabeling.googleapis.com/AnnotatedDataset"
}
];
}
// Required. Full resource name of data labeling Dataset.
// Format:
// `projects/{project}/datasets/{dataset}`.
string dataset = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "datalabeling.googleapis.com/Dataset"
}
];
// Optional. Display name of the Dataset in AI Platform.
// System will pick a display name if unspecified.
string dataset_display_name = 2 [(google.api.field_behavior) = OPTIONAL];
// Optional. Configs for migrating AnnotatedDataset in datalabeling.googleapis.com to
// AI Platform's SavedQuery. The specified AnnotatedDatasets have to belong
// to the datalabeling Dataset.
repeated MigrateDataLabelingAnnotatedDatasetConfig migrate_data_labeling_annotated_dataset_configs = 3 [(google.api.field_behavior) = OPTIONAL];
}
oneof request {
// Config for migrating Version in ml.googleapis.com to AI Platform's Model.
MigrateMlEngineModelVersionConfig migrate_ml_engine_model_version_config = 1;
// Config for migrating Model in automl.googleapis.com to AI Platform's
// Model.
MigrateAutomlModelConfig migrate_automl_model_config = 2;
// Config for migrating Dataset in automl.googleapis.com to AI Platform's
// Dataset.
MigrateAutomlDatasetConfig migrate_automl_dataset_config = 3;
// Config for migrating Dataset in datalabeling.googleapis.com to
// AI Platform's Dataset.
MigrateDataLabelingDatasetConfig migrate_data_labeling_dataset_config = 4;
}
}
// Response message for [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources].
message BatchMigrateResourcesResponse {
// Successfully migrated resources.
repeated MigrateResourceResponse migrate_resource_responses = 1;
}
// Describes a successfully migrated resource.
message MigrateResourceResponse {
// After migration, the resource name in AI Platform.
oneof migrated_resource {
// Migrated Dataset's resource name.
string dataset = 1 [(google.api.resource_reference) = {
type: "aiplatform.googleapis.com/Dataset"
}];
// Migrated Model's resource name.
string model = 2 [(google.api.resource_reference) = {
type: "aiplatform.googleapis.com/Model"
}];
}
// Before migration, the identifier in ml.googleapis.com,
// automl.googleapis.com or datalabeling.googleapis.com.
MigratableResource migratable_resource = 3;
}
// Runtime operation information for [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources].
message BatchMigrateResourcesOperationMetadata {
// The common part of the operation metadata.
GenericOperationMetadata generic_metadata = 1;
}

@ -47,12 +47,13 @@ message Model {
EXPORTABLE_CONTENT_UNSPECIFIED = 0;
// Model artifact and any of its supported files. Will be exported to the
// specified [ExportModelRequest.output_config.artifact_destination]
// location specified by the `artifactDestination` field of the
// [ExportModelRequest.output_config][google.cloud.aiplatform.v1beta1.ExportModelRequest.output_config] object.
ARTIFACT = 1;
// The container image that is to be used when deploying this Model. Will
// be exported to the specified
// [ExportModelRequest.output_config.image_destination]
// be exported to the location specified by the `imageDestination` field
// of the [ExportModelRequest.output_config][google.cloud.aiplatform.v1beta1.ExportModelRequest.output_config] object.
IMAGE = 2;
}
@ -131,7 +132,7 @@ message Model {
google.protobuf.Value metadata = 6 [(google.api.field_behavior) = IMMUTABLE];
// Output only. The formats in which this Model may be exported. If empty, this Model is
// not avaiable for export.
// not available for export.
repeated ExportFormat supported_export_formats = 20 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The resource name of the TrainingPipeline that uploaded this Model, if any.
@ -316,57 +317,209 @@ message PredictSchemata {
string prediction_schema_uri = 3 [(google.api.field_behavior) = IMMUTABLE];
}
// Specification of the container to be deployed for this Model.
// The ModelContainerSpec is based on the Kubernetes Container
// [specification](https://tinyurl.com/k8s-io-api/v1.10/#container-v1-core).
// Specification of a container for serving predictions. This message is a
// subset of the Kubernetes Container v1 core
// [specification](https://tinyurl.com/k8s-io-api/v1.18/#container-v1-core).
message ModelContainerSpec {
// Required. Immutable. The URI of the Model serving container file in the Container Registry. The
// container image is ingested upon [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel], stored
// Required. Immutable. URI of the Docker image to be used as the custom container for serving
// predictions. This URI must identify an image in Artifact Registry or
// Container Registry. Learn more about the container publishing
// requirements, including permissions requirements for the AI Platform
// Service Agent,
// [here](https://tinyurl.com/cust-cont-reqs#publishing).
//
// The container image is ingested upon [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel], stored
// internally, and this original path is afterwards not used.
//
// To learn about the requirements for the Docker image itself, see
// [Custom container requirements](https://tinyurl.com/cust-cont-reqs).
string image_uri = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.field_behavior) = IMMUTABLE
];
// Immutable. The command with which the container is run. Not executed within a shell.
// The Docker image's ENTRYPOINT is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's
// environment. If a variable cannot be resolved, the reference in the input
// string will be unchanged. The $(VAR_NAME) syntax can be escaped with a
// double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
// regardless of whether the variable exists or not.
// More info: https://tinyurl.com/y42hmlxe
// Immutable. Specifies the command that runs when the container starts. This overrides
// the container's
// [ENTRYPOINT](https://docs.docker.com/engine/reference/builder/#entrypoint).
// Specify this field as an array of executable and arguments, similar to a
// Docker `ENTRYPOINT`'s "exec" form, not its "shell" form.
//
// If you do not specify this field, then the container's `ENTRYPOINT` runs,
// in conjunction with the [args][google.cloud.aiplatform.v1beta1.ModelContainerSpec.args] field or the
// container's [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd),
// if either exists. If this field is not specified and the container does not
// have an `ENTRYPOINT`, then refer to the Docker documentation about how
// `CMD` and `ENTRYPOINT`
// [interact](https://tinyurl.com/h3kdcgs).
//
// If you specify this field, then you can also specify the `args` field to
// provide additional arguments for this command. However, if you specify this
// field, then the container's `CMD` is ignored. See the
// [Kubernetes documentation](https://tinyurl.com/y8bvllf4) about how the
// `command` and `args` fields interact with a container's `ENTRYPOINT` and
// `CMD`.
//
// In this field, you can reference environment variables
// [set by AI Platform](https://tinyurl.com/cust-cont-reqs#aip-variables)
// and environment variables set in the [env][google.cloud.aiplatform.v1beta1.ModelContainerSpec.env] field.
// You cannot reference environment variables set in the Docker image. In
// order for environment variables to be expanded, reference them by using the
// following syntax:
// <code>$(<var>VARIABLE_NAME</var>)</code>
// Note that this differs from Bash variable expansion, which does not use
// parentheses. If a variable cannot be resolved, the reference in the input
// string is used unchanged. To avoid variable expansion, you can escape this
// syntax with `$$`; for example:
// <code>$$(<var>VARIABLE_NAME</var>)</code>
// This field corresponds to the `command` field of the Kubernetes Containers
// [v1 core API](https://tinyurl.com/k8s-io-api/v1.18/#container-v1-core).
repeated string command = 2 [(google.api.field_behavior) = IMMUTABLE];
// Immutable. The arguments to the command.
// The Docker image's CMD is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's
// environment. If a variable cannot be resolved, the reference in the input
// string will be unchanged. The $(VAR_NAME) syntax can be escaped with a
// double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
// regardless of whether the variable exists or not.
// More info: https://tinyurl.com/y42hmlxe
// Immutable. Specifies arguments for the command that runs when the container starts.
// This overrides the container's
// [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd). Specify
// this field as an array of executable and arguments, similar to a Docker
// `CMD`'s "default parameters" form.
//
// If you don't specify this field but do specify the
// [command][google.cloud.aiplatform.v1beta1.ModelContainerSpec.command] field, then the command from the
// `command` field runs without any additional arguments. See the
// [Kubernetes documentation](https://tinyurl.com/y8bvllf4) about how the
// `command` and `args` fields interact with a container's `ENTRYPOINT` and
// `CMD`.
//
// If you don't specify this field and don't specify the `command` field,
// then the container's
// [`ENTRYPOINT`](https://docs.docker.com/engine/reference/builder/#cmd) and
// `CMD` determine what runs based on their default behavior. See the Docker
// documentation about how `CMD` and `ENTRYPOINT`
// [interact](https://tinyurl.com/h3kdcgs).
//
// In this field, you can reference environment variables
// [set by AI Platform](https://tinyurl.com/cust-cont-reqs#aip-variables)
// and environment variables set in the [env][google.cloud.aiplatform.v1beta1.ModelContainerSpec.env] field.
// You cannot reference environment variables set in the Docker image. In
// order for environment variables to be expanded, reference them by using the
// following syntax:
// <code>$(<var>VARIABLE_NAME</var>)</code>
// Note that this differs from Bash variable expansion, which does not use
// parentheses. If a variable cannot be resolved, the reference in the input
// string is used unchanged. To avoid variable expansion, you can escape this
// syntax with `$$`; for example:
// <code>$$(<var>VARIABLE_NAME</var>)</code>
// This field corresponds to the `args` field of the Kubernetes Containers
// [v1 core API](https://tinyurl.com/k8s-io-api/v1.18/#container-v1-core).
repeated string args = 3 [(google.api.field_behavior) = IMMUTABLE];
// Immutable. The environment variables that are to be present in the container.
// Immutable. List of environment variables to set in the container. After the container
// starts running, code running in the container can read these environment
// variables.
//
// Additionally, the [command][google.cloud.aiplatform.v1beta1.ModelContainerSpec.command] and
// [args][google.cloud.aiplatform.v1beta1.ModelContainerSpec.args] fields can reference these variables. Later
// entries in this list can also reference earlier entries. For example, the
// following example sets the variable `VAR_2` to have the value `foo bar`:
//
// ```json
// [
// {
// "name": "VAR_1",
// "value": "foo"
// },
// {
// "name": "VAR_2",
// "value": "$(VAR_1) bar"
// }
// ]
// ```
//
// If you switch the order of the variables in the example, then the expansion
// does not occur.
//
// This field corresponds to the `env` field of the Kubernetes Containers
// [v1 core API](https://tinyurl.com/k8s-io-api/v1.18/#container-v1-core).
repeated EnvVar env = 4 [(google.api.field_behavior) = IMMUTABLE];
// Immutable. Declaration of ports that are exposed by the container. This field is
// primarily informational, it gives AI Platform information about the
// network connections the container uses. Listing or not a port
// here has no impact on whether the port is actually exposed, any port
// listening on the default "0.0.0.0" address inside a container will be
// accessible from the network.
// Immutable. List of ports to expose from the container. AI Platform sends any
// prediction requests that it receives to the first port on this list. AI
// Platform also sends
// [liveness and health checks](https://tinyurl.com/cust-cont-reqs#health)
// to this port.
//
// If you do not specify this field, it defaults to following value:
//
// ```json
// [
// {
// "containerPort": 8080
// }
// ]
// ```
//
// AI Platform does not use ports other than the first one listed. This field
// corresponds to the `ports` field of the Kubernetes Containers
// [v1 core API](https://tinyurl.com/k8s-io-api/v1.18/#container-v1-core).
repeated Port ports = 5 [(google.api.field_behavior) = IMMUTABLE];
// Immutable. An HTTP path to send prediction requests to the container, and which
// must be supported by it. If not specified a default HTTP path will be
// used by AI Platform.
// Immutable. HTTP path on the container to send prediction requests to. AI Platform
// forwards requests sent using
// [projects.locations.endpoints.predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] to this
// path on the container's IP address and port. AI Platform then returns the
// container's response in the API response.
//
// For example, if you set this field to `/foo`, then when AI Platform
// receives a prediction request, it forwards the request body in a POST
// request to the following URL on the container:
// <code>localhost:<var>PORT</var>/foo</code>
// <var>PORT</var> refers to the first value of this `ModelContainerSpec`'s
// [ports][google.cloud.aiplatform.v1beta1.ModelContainerSpec.ports] field.
//
// If you don't specify this field, it defaults to the following value when
// you [deploy this Model to an Endpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]:
// <code>/v1/endpoints/<var>ENDPOINT</var>/deployedModels/<var>DEPLOYED_MODEL</var>:predict</code>
// The placeholders in this value are replaced as follows:
//
// * <var>ENDPOINT</var>: The last segment (following `endpoints/`)of the
// Endpoint.name][] field of the Endpoint where this Model has been
// deployed. (AI Platform makes this value available to your container code
// as the
// [`AIP_ENDPOINT_ID`](https://tinyurl.com/cust-cont-reqs#aip-variables)
// environment variable.)
//
// * <var>DEPLOYED_MODEL</var>: [DeployedModel.id][google.cloud.aiplatform.v1beta1.DeployedModel.id] of the `DeployedModel`.
// (AI Platform makes this value available to your container code
// as the [`AIP_DEPLOYED_MODEL_ID` environment
// variable](https://tinyurl.com/cust-cont-reqs#aip-variables).)
string predict_route = 6 [(google.api.field_behavior) = IMMUTABLE];
// Immutable. An HTTP path to send health check requests to the container, and which
// must be supported by it. If not specified a standard HTTP path will be
// used by AI Platform.
// Immutable. HTTP path on the container to send health checkss to. AI Platform
// intermittently sends GET requests to this path on the container's IP
// address and port to check that the container is healthy. Read more about
// [health
// checks](https://tinyurl.com/cust-cont-reqs#checks).
//
// For example, if you set this field to `/bar`, then AI Platform
// intermittently sends a GET request to the following URL on the container:
// <code>localhost:<var>PORT</var>/bar</code>
// <var>PORT</var> refers to the first value of this `ModelContainerSpec`'s
// [ports][google.cloud.aiplatform.v1beta1.ModelContainerSpec.ports] field.
//
// If you don't specify this field, it defaults to the following value when
// you [deploy this Model to an Endpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]:
// <code>/v1/endpoints/<var>ENDPOINT</var>/deployedModels/<var>DEPLOYED_MODEL</var>:predict</code>
// The placeholders in this value are replaced as follows:
//
// * <var>ENDPOINT</var>: The last segment (following `endpoints/`)of the
// Endpoint.name][] field of the Endpoint where this Model has been
// deployed. (AI Platform makes this value available to your container code
// as the
// [`AIP_ENDPOINT_ID`](https://tinyurl.com/cust-cont-reqs#aip-variables)
// environment variable.)
//
// * <var>DEPLOYED_MODEL</var>: [DeployedModel.id][google.cloud.aiplatform.v1beta1.DeployedModel.id] of the `DeployedModel`.
// (AI Platform makes this value available to your container code as the
// [`AIP_DEPLOYED_MODEL_ID`](https://tinyurl.com/cust-cont-reqs#aip-variables)
// environment variable.)
string health_route = 7 [(google.api.field_behavior) = IMMUTABLE];
}

@ -38,6 +38,8 @@ message GenericOperationMetadata {
google.protobuf.Timestamp create_time = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Time when the operation was updated for the last time.
// If the operation has finished (successfully or not), this is the finish
// time.
google.protobuf.Timestamp update_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
}

@ -44,10 +44,12 @@ service PredictionService {
// Perform an online explanation.
//
// If [ExplainRequest.deployed_model_id] is specified, the corresponding
// DeployModel must have [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec]
// populated. If [ExplainRequest.deployed_model_id] is not specified, all
// DeployedModels must have [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec]
// If [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] is specified,
// the corresponding DeployModel must have
// [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec]
// populated. If [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id]
// is not specified, all DeployedModels must have
// [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec]
// populated. Only deployed AutoML tabular Models have
// explanation_spec.
rpc Explain(ExplainRequest) returns (ExplainResponse) {
@ -138,8 +140,7 @@ message ExplainRequest {
// Response message for [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain].
message ExplainResponse {
// The explanations of the [Model's
// predictions][PredictionResponse.predictions][].
// The explanations of the Model's [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions].
//
// It has the same number of elements as [instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances]
// to be explained.
@ -147,4 +148,8 @@ message ExplainResponse {
// ID of the Endpoint's DeployedModel that served this explanation.
string deployed_model_id = 2;
// The predictions that are the output of the predictions call.
// Same as [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions].
repeated google.protobuf.Value predictions = 3;
}

@ -157,6 +157,52 @@ message StudySpec {
repeated double values = 1 [(google.api.field_behavior) = REQUIRED];
}
// Represents a parameter spec with condition from its parent parameter.
message ConditionalParameterSpec {
// Represents the spec to match discrete values from parent parameter.
message DiscreteValueCondition {
// Required. Matches values of the parent parameter of 'DISCRETE' type.
// All values must exist in `discrete_value_spec` of parent parameter.
//
// The Epsilon of the value matching is 1e-10.
repeated double values = 1 [(google.api.field_behavior) = REQUIRED];
}
// Represents the spec to match integer values from parent parameter.
message IntValueCondition {
// Required. Matches values of the parent parameter of 'INTEGER' type.
// All values must lie in `integer_value_spec` of parent parameter.
repeated int64 values = 1 [(google.api.field_behavior) = REQUIRED];
}
// Represents the spec to match categorical values from parent parameter.
message CategoricalValueCondition {
// Required. Matches values of the parent parameter of 'CATEGORICAL' type.
// All values must exist in `categorical_value_spec` of parent
// parameter.
repeated string values = 1 [(google.api.field_behavior) = REQUIRED];
}
// A set of parameter values from the parent ParameterSpec's feasible
// space.
oneof parent_value_condition {
// The spec for matching values from a parent parameter of
// `DISCRETE` type.
DiscreteValueCondition parent_discrete_values = 2;
// The spec for matching values from a parent parameter of `INTEGER`
// type.
IntValueCondition parent_int_values = 3;
// The spec for matching values from a parent parameter of
// `CATEGORICAL` type.
CategoricalValueCondition parent_categorical_values = 4;
}
// Required. The spec for a conditional parameter.
ParameterSpec parameter_spec = 1 [(google.api.field_behavior) = REQUIRED];
}
// The type of scaling that should be applied to this parameter.
enum ScaleType {
// By default, no scaling is applied.
@ -197,6 +243,13 @@ message StudySpec {
// How the parameter should be scaled.
// Leave unset for `CATEGORICAL` parameters.
ScaleType scale_type = 6;
// A conditional parameter node is active if the parameter's value matches
// the conditional node's parent_value_condition.
//
// If two items in conditional_parameter_specs have the same name, they
// must have disjoint parent_value_condition.
repeated ConditionalParameterSpec conditional_parameter_specs = 10;
}
// The available search algorithms for the Study.

@ -153,27 +153,60 @@ message InputDataConfig {
// Only applicable to Custom and Hyperparameter Tuning TrainingPipelines.
//
// The destination of the input data to be written to.
// The destination of the training data to be written to.
//
// Supported destination file formats:
// * For non-tabular data: "jsonl".
// * For tabular data: "csv" and "bigquery".
//
// Following AI Platform environment variables will be passed to containers
// or python modules of the training task when this field is set:
//
// * AIP_DATA_FORMAT : Exported data format. Supported formats: "jsonl".
// * AIP_DATA_FORMAT : Exported data format.
// * AIP_TRAINING_DATA_URI : Sharded exported training data uris.
// * AIP_VALIDATION_DATA_URI : Sharded exported validation data uris.
// * AIP_TEST_DATA_URI : Sharded exported test data uris.
oneof destination {
// The Google Cloud Storage location.
// The Google Cloud Storage location where the training data is to be
// written to. In the given directory a new directory will be created with
// name:
// `dataset-<dataset-id>-<annotation-type>-<timestamp-of-training-call>`
// where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format.
// All training input data will be written into that directory.
//
// The AI Platform environment variables representing Google Cloud Storage
// data URIs will always be represented in the Google Cloud Storage wildcard
// format to support sharded data. e.g.: "gs://.../training-*
// format to support sharded data. e.g.: "gs://.../training-*.jsonl"
//
// * AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for tabular data
// * AIP_TRAINING_DATA_URI =
//
// "gcs_destination/dataset-<dataset-id>-<annotation-type>-<time>/training-*.${AIP_DATA_FORMAT}"
// * AIP_VALIDATION_DATA_URI =
//
// "gcs_destination/dataset-<dataset-id>-<annotation-type>-<time>/validation-*.${AIP_DATA_FORMAT}"
// * AIP_TEST_DATA_URI =
//
// * AIP_DATA_FORMAT = "jsonl".
// * AIP_TRAINING_DATA_URI = "gcs_destination/training-*"
// * AIP_VALIDATION_DATA_URI = "gcs_destination/validation-*"
// * AIP_TEST_DATA_URI = "gcs_destination/test-*"
// "gcs_destination/dataset-<dataset-id>-<annotation-type>-<time>/test-*.${AIP_DATA_FORMAT}"
GcsDestination gcs_destination = 8;
// The BigQuery project location where the training data is to be written
// to. In the given project a new dataset is created with name
// `dataset_<dataset-id>_<annotation-type>_<timestamp-of-training-call>`
// where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All training
// input data will be written into that dataset. In the dataset three
// tables will be created, `training`, `validation` and `test`.
//
// * AIP_DATA_FORMAT = "bigquery".
// * AIP_TRAINING_DATA_URI =
//
// "bigquery_destination.dataset_<dataset-id>_<annotation-type>_<time>.training"
// * AIP_VALIDATION_DATA_URI =
//
// "bigquery_destination.dataset_<dataset-id>_<annotation-type>_<time>.validation"
// * AIP_TEST_DATA_URI =
// "bigquery_destination.dataset_<dataset-id>_<annotation-type>_<time>.test"
BigQueryDestination bigquery_destination = 10;
}
// Required. The ID of the Dataset in the same Project and Location which data will be

Loading…
Cancel
Save