docs: correct link to fieldmask

docs: changes product name to 'Vertex AI'

PiperOrigin-RevId: 374890837
pull/744/head
Google APIs 4 years ago committed by Copybara-Service
parent 599fed858d
commit 77da865645
  1. 10
      google/cloud/aiplatform/v1beta1/aiplatform_v1beta1.yaml
  2. 4
      google/cloud/aiplatform/v1beta1/artifact.proto
  3. 4
      google/cloud/aiplatform/v1beta1/batch_prediction_job.proto
  4. 13
      google/cloud/aiplatform/v1beta1/custom_job.proto
  5. 4
      google/cloud/aiplatform/v1beta1/execution.proto
  6. 2
      google/cloud/aiplatform/v1beta1/explanation.proto
  7. 20
      google/cloud/aiplatform/v1beta1/explanation_metadata.proto
  8. 2
      google/cloud/aiplatform/v1beta1/feature_monitoring_stats.proto
  9. 4
      google/cloud/aiplatform/v1beta1/featurestore_service.proto
  10. 2
      google/cloud/aiplatform/v1beta1/hyperparameter_tuning_job.proto
  11. 2
      google/cloud/aiplatform/v1beta1/index_endpoint.proto
  12. 2
      google/cloud/aiplatform/v1beta1/index_endpoint_service.proto
  13. 4
      google/cloud/aiplatform/v1beta1/index_service.proto
  14. 2
      google/cloud/aiplatform/v1beta1/job_service.proto
  15. 6
      google/cloud/aiplatform/v1beta1/machine_resources.proto
  16. 5
      google/cloud/aiplatform/v1beta1/metadata_schema.proto
  17. 159
      google/cloud/aiplatform/v1beta1/metadata_service.proto
  18. 8
      google/cloud/aiplatform/v1beta1/metadata_store.proto
  19. 38
      google/cloud/aiplatform/v1beta1/migration_service.proto
  20. 44
      google/cloud/aiplatform/v1beta1/model.proto
  21. 2
      google/cloud/aiplatform/v1beta1/model_deployment_monitoring_job.proto
  22. 4
      google/cloud/aiplatform/v1beta1/model_service.proto
  23. 9
      google/cloud/aiplatform/v1beta1/pipeline_job.proto
  24. 20
      google/cloud/aiplatform/v1beta1/pipeline_service.proto
  25. 4
      google/cloud/aiplatform/v1beta1/pipeline_state.proto
  26. 10
      google/cloud/aiplatform/v1beta1/study.proto
  27. 18
      google/cloud/aiplatform/v1beta1/training_pipeline.proto
  28. 4
      google/cloud/aiplatform/v1beta1/vizier_service.proto

@ -1,7 +1,7 @@
type: google.api.Service
config_version: 3
name: aiplatform.googleapis.com
title: Cloud AI Platform API
title: Vertex AI API
apis:
- name: google.cloud.aiplatform.v1beta1.DatasetService
@ -78,10 +78,10 @@ documentation:
Train high-quality custom machine learning models with minimal machine
learning expertise and effort.
overview: |-
AI Platform (Unified) enables data scientists, developers, and AI newcomers
to create custom machine learning models specific to their business needs
by leveraging Google's state-of-the-art transfer learning and innovative
AI research.
Vertex AI enables data scientists, developers, and AI newcomers to create
custom machine learning models specific to their business needs by
leveraging Google's state-of-the-art transfer learning and innovative AI
research.
rules:
- selector: google.cloud.location.Locations.GetLocation
description: Gets information about a location.

@ -40,7 +40,7 @@ message Artifact {
// Unspecified state for the Artifact.
STATE_UNSPECIFIED = 0;
// A state used by systems like Managed Pipelines to indicate that the
// A state used by systems like Vertex Pipelines to indicate that the
// underlying data item represented by this Artifact is being created.
PENDING = 1;
@ -81,7 +81,7 @@ message Artifact {
// The state of this Artifact. This is a property of the Artifact, and does
// not imply or capture any ongoing process. This property is managed by
// clients (such as AI Platform Pipelines), and the system does not prescribe
// clients (such as Vertex Pipelines), and the system does not prescribe
// or check the validity of state transitions.
State state = 13;

@ -119,7 +119,7 @@ message BatchPredictionJob {
BigQueryDestination bigquery_destination = 3;
}
// Required. The format in which AI Platform gives the predictions, must be one of the
// Required. The format in which Vertex AI gives the predictions, must be one of the
// [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model]
// [supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats].
string predictions_format = 1 [(google.api.field_behavior) = REQUIRED];
@ -189,7 +189,7 @@ message BatchPredictionJob {
BatchDedicatedResources dedicated_resources = 7;
// Immutable. Parameters configuring the batch behavior. Currently only applicable when
// [dedicated_resources][google.cloud.aiplatform.v1beta1.BatchPredictionJob.dedicated_resources] are used (in other cases AI Platform does
// [dedicated_resources][google.cloud.aiplatform.v1beta1.BatchPredictionJob.dedicated_resources] are used (in other cases Vertex AI does
// the tuning itself).
ManualBatchTuningParameters manual_batch_tuning_parameters = 8 [(google.api.field_behavior) = IMMUTABLE];

@ -102,8 +102,9 @@ message CustomJobSpec {
// Specifies the service account for workload run-as account.
// Users submitting jobs must have act-as permission on this run-as account.
// If unspecified, the AI Platform Custom Code Service Agent for the
// CustomJob's project is used.
// If unspecified, the [AI Platform Custom Code Service
// Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents)
// for the CustomJob's project is used.
string service_account = 4;
// The full name of the Compute Engine
@ -127,7 +128,7 @@ message CustomJobSpec {
// [id][google.cloud.aiplatform.v1beta1.Trial.id] under its parent HyperparameterTuningJob's
// baseOutputDirectory.
//
// The following AI Platform environment variables will be passed to
// The following Vertex AI environment variables will be passed to
// containers or python modules when this field is set:
//
// For CustomJob:
@ -143,7 +144,7 @@ message CustomJobSpec {
// * AIP_TENSORBOARD_LOG_DIR = `<base_output_directory>/<trial_id>/logs/`
GcsDestination base_output_directory = 6;
// Optional. The name of an AI Platform [Tensorboard][google.cloud.aiplatform.v1beta1.Tensorboard] resource to which this CustomJob
// Optional. The name of a Vertex AI [Tensorboard][google.cloud.aiplatform.v1beta1.Tensorboard] resource to which this CustomJob
// will upload Tensorboard logs.
// Format:
// `projects/{project}/locations/{location}/tensorboards/{tensorboard}`
@ -196,10 +197,10 @@ message ContainerSpec {
// The spec of a Python packaged code.
message PythonPackageSpec {
// Required. The URI of a container image in Artifact Registry that will run the
// provided Python package. AI Platform provides a wide range of executor
// provided Python package. Vertex AI provides a wide range of executor
// images with pre-installed packages to meet users' various use cases. See
// the list of [pre-built containers for
// training](https://cloud.google.com/ai-platform-unified/docs/training/pre-built-containers).
// training](https://cloud.google.com/vertex-ai/docs/training/pre-built-containers).
// You must use an image from this list.
string executor_image_uri = 1 [(google.api.field_behavior) = REQUIRED];

@ -62,7 +62,7 @@ message Execution {
// The state of this Execution. This is a property of the Execution, and does
// not imply or capture any ongoing process. This property is managed by
// clients (such as AI Platform Pipelines) and the system does not prescribe
// clients (such as Vertex Pipelines) and the system does not prescribe
// or check the validity of state transitions.
State state = 6;
@ -92,7 +92,7 @@ message Execution {
// schemas within the local metadata store.
string schema_title = 13;
// The version of the schema in schema_name to use.
// The version of the schema in `schema_title` to use.
//
// Schema title and version is expected to be registered in earlier Create
// Schema calls. And both are used together as unique identifiers to identify

@ -152,7 +152,7 @@ message Attribution {
// increasing
// [step_count][google.cloud.aiplatform.v1beta1.XraiAttribution.step_count] might reduce the error.
//
// See [this introduction](/ai-platform-unified/docs/explainable-ai/overview)
// See [this introduction](/vertex-ai/docs/explainable-ai/overview)
// for more information.
double approximation_error = 6 [(google.api.field_behavior) = OUTPUT_ONLY];

@ -30,7 +30,7 @@ message ExplanationMetadata {
// Metadata of the input of a feature.
//
// Fields other than [InputMetadata.input_baselines][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.input_baselines] are applicable only
// for Models that are using AI Platform-provided images for Tensorflow.
// for Models that are using Vertex AI-provided images for Tensorflow.
message InputMetadata {
// Domain details of the input feature value. Provides numeric information
// about the feature, such as its range (min, max). If the feature has been
@ -243,12 +243,12 @@ message ExplanationMetadata {
// Baseline inputs for this feature.
//
// If no baseline is specified, AI Platform chooses the baseline for this
// feature. If multiple baselines are specified, AI Platform returns the
// If no baseline is specified, Vertex AI chooses the baseline for this
// feature. If multiple baselines are specified, Vertex AI returns the
// average attributions across them in
// [Attributions.baseline_attribution][].
//
// For AI Platform provided Tensorflow images (both 1.x and 2.x), the shape
// For Vertex AI-provided Tensorflow images (both 1.x and 2.x), the shape
// of each baseline must match the shape of the input tensor. If a scalar is
// provided, we broadcast to the same shape as the input tensor.
//
@ -262,7 +262,7 @@ message ExplanationMetadata {
repeated google.protobuf.Value input_baselines = 1;
// Name of the input tensor for this feature. Required and is only
// applicable to AI Platform provided images for Tensorflow.
// applicable to Vertex AI-provided images for Tensorflow.
string input_tensor_name = 2;
// Defines how the feature is encoded into the input tensor. Defaults to
@ -306,7 +306,7 @@ message ExplanationMetadata {
// A list of baselines for the encoded tensor.
//
// The shape of each baseline should match the shape of the encoded tensor.
// If a scalar is provided, AI Platform broadcast to the same shape as the
// If a scalar is provided, Vertex AI broadcasts to the same shape as the
// encoded tensor.
repeated google.protobuf.Value encoded_baselines = 10;
@ -364,9 +364,9 @@ message ExplanationMetadata {
//
// An empty InputMetadata is valid. It describes a text feature which has the
// name specified as the key in [ExplanationMetadata.inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs]. The baseline
// of the empty feature is chosen by AI Platform.
// of the empty feature is chosen by Vertex AI.
//
// For AI Platform provided Tensorflow images, the key can be any friendly
// For Vertex AI-provided Tensorflow images, the key can be any friendly
// name of the feature. Once specified,
// [featureAttributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions] are keyed by
// this key (if not grouped with another feature).
@ -377,7 +377,7 @@ message ExplanationMetadata {
// Required. Map from output names to output metadata.
//
// For AI Platform provided Tensorflow images, keys can be any user defined
// For Vertex AI-provided Tensorflow images, keys can be any user defined
// string that consists of any UTF-8 characters.
//
// For custom images, keys are the name of the output field in the prediction
@ -390,7 +390,7 @@ message ExplanationMetadata {
// of the [feature attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions].
// The schema is defined as an OpenAPI 3.0.2
// [Schema Object](https://tinyurl.com/y538mdwt#schema-object).
// AutoML tabular Models always have this field populated by AI Platform.
// AutoML tabular Models always have this field populated by Vertex AI.
// Note: The URI given on output may be different, including the URI scheme,
// than the one given on input. The output URI will point to a location where
// the user only has a read access.

@ -31,7 +31,7 @@ option java_package = "com.google.cloud.aiplatform.v1beta1";
// window, start_time = end_time. Timestamp of the stats and anomalies always
// refers to end_time. Raw stats and anomalies are stored in stats_uri or
// anomaly_uri in the tensorflow defined protos. Field data_stats contains
// almost identical information with the raw stats in AI Platform
// almost identical information with the raw stats in Vertex AI
// defined proto, for UI to display.
message FeatureStatsAnomaly {
// Feature importance score, only populated when cross-feature monitoring is

@ -333,7 +333,6 @@ message ListFeaturestoresRequest {
// Lists the featurestores that match the filter expression. The following
// fields are supported:
//
// * `display_name`: Supports =, != comparisons.
// * `create_time`: Supports =, !=, <, >, <=, and >= comparisons. Values must
// be
// in RFC 3339 format.
@ -371,7 +370,6 @@ message ListFeaturestoresRequest {
// Use "desc" after a field name for descending.
// Supported Fields:
//
// * `display_name`
// * `create_time`
// * `update_time`
// * `online_serving_config.fixed_node_count`
@ -410,10 +408,8 @@ message UpdateFeaturestoreRequest {
//
// Updatable fields:
//
// * `display_name`
// * `labels`
// * `online_serving_config.fixed_node_count`
// * `retention_policy.online_storage_ttl_days`
google.protobuf.FieldMask update_mask = 2;
}

@ -60,7 +60,7 @@ message HyperparameterTuningJob {
// The number of failed Trials that need to be seen before failing
// the HyperparameterTuningJob.
//
// If set to 0, AI Platform decides how many Trials must fail
// If set to 0, Vertex AI decides how many Trials must fail
// before the whole job fails.
int32 max_failed_trial_count = 7;

@ -135,7 +135,7 @@ message DeployedIndex {
google.protobuf.Timestamp index_sync_time = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
// Optional. A description of resources that the DeployedIndex uses, which to large
// degree are decided by AI Platform, and optionally allows only a modest
// degree are decided by Vertex AI, and optionally allows only a modest
// additional configuration.
// If min_replica_count is not set, the default value is 1. If
// max_replica_count is not set, the default value is min_replica_count. The

@ -30,7 +30,7 @@ option java_multiple_files = true;
option java_outer_classname = "IndexEndpointServiceProto";
option java_package = "com.google.cloud.aiplatform.v1beta1";
// A service for managing AI Platform's IndexEndpoints.
// A service for managing Vertex AI's IndexEndpoints.
service IndexEndpointService {
option (google.api.default_host) = "aiplatform.googleapis.com";
option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";

@ -30,7 +30,7 @@ option java_multiple_files = true;
option java_outer_classname = "IndexServiceProto";
option java_package = "com.google.cloud.aiplatform.v1beta1";
// A service for creating and managing AI Platform's Index resources.
// A service for creating and managing Vertex AI's Index resources.
service IndexService {
option (google.api.default_host) = "aiplatform.googleapis.com";
option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
@ -173,7 +173,7 @@ message UpdateIndexRequest {
// The update mask applies to the resource.
// For the `FieldMask` definition, see
// [FieldMask](https://tinyurl.com/protobufs#google.protobuf.FieldMask).
// [FieldMask](https://tinyurl.com/protobufs/google.protobuf#fieldmask).
google.protobuf.FieldMask update_mask = 2;
}

@ -36,7 +36,7 @@ option java_multiple_files = true;
option java_outer_classname = "JobServiceProto";
option java_package = "com.google.cloud.aiplatform.v1beta1";
// A service for creating and managing AI Platform's jobs.
// A service for creating and managing Vertex AI's jobs.
service JobService {
option (google.api.default_host) = "aiplatform.googleapis.com";
option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";

@ -99,7 +99,7 @@ message DedicatedResources {
repeated AutoscalingMetricSpec autoscaling_metric_specs = 4 [(google.api.field_behavior) = IMMUTABLE];
}
// A description of resources that to large degree are decided by AI Platform,
// A description of resources that to large degree are decided by Vertex AI,
// and require only a modest additional configuration.
// Each Model supporting these resources documents its specific guidelines.
message AutomaticResources {
@ -117,7 +117,7 @@ message AutomaticResources {
// outages). If traffic against the DeployedModel increases beyond what its
// replicas at maximum may handle, a portion of the traffic will be dropped.
// If this value is not provided, a no upper bound for scaling under heavy
// traffic will be assume, though AI Platform may be unable to scale beyond
// traffic will be assume, though Vertex AI may be unable to scale beyond
// certain replica number.
int32 max_replica_count = 2 [(google.api.field_behavior) = IMMUTABLE];
}
@ -132,7 +132,7 @@ message BatchDedicatedResources {
];
// Immutable. The number of machine replicas used at the start of the batch operation.
// If not set, AI Platform decides starting number, not greater than
// If not set, Vertex AI decides starting number, not greater than
// [max_replica_count][google.cloud.aiplatform.v1beta1.BatchDedicatedResources.max_replica_count]
int32 starting_replica_count = 2 [(google.api.field_behavior) = IMMUTABLE];

@ -60,8 +60,9 @@ message MetadataSchema {
// of [MetadataSchema.version] and the schema name given by `title` in
// [MetadataSchema.schema] must be unique within a MetadataStore.
//
// The schema is defined as an OpenAPI 3.0.2 [MetadataSchema Object](
// https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#schemaObject)
// The schema is defined as an OpenAPI 3.0.2
// [MetadataSchema
// Object](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#schemaObject)
string schema = 3 [(google.api.field_behavior) = REQUIRED];
// The type of the MetadataSchema. This is a property that identifies which

@ -176,7 +176,7 @@ service MetadataService {
// Adds a set of Contexts as children to a parent Context. If any of the
// child Contexts have already been added to the parent Context, they are
// simply skipped. If this call would create a cycle or cause any Context to
// have more than 10 parents, the request will fail with INVALID_ARGUMENT
// have more than 10 parents, the request will fail with an INVALID_ARGUMENT
// error.
rpc AddContextChildren(AddContextChildrenRequest) returns (AddContextChildrenResponse) {
option (google.api.http) = {
@ -229,9 +229,10 @@ service MetadataService {
option (google.api.method_signature) = "execution,update_mask";
}
// Adds Events for denoting whether each Artifact was an input or output for a
// given Execution. If any Events already exist between the Execution and any
// of the specified Artifacts they are simply skipped.
// Adds Events to the specified Execution. An Event indicates whether an
// Artifact was used as an input or output for an Execution. If an Event
// already exists between the Execution and the Artifact, the Event is
// skipped.
rpc AddExecutionEvents(AddExecutionEventsRequest) returns (AddExecutionEventsResponse) {
option (google.api.http) = {
post: "/v1beta1/{execution=projects/*/locations/*/metadataStores/*/executions/*}:addExecutionEvents"
@ -250,7 +251,7 @@ service MetadataService {
option (google.api.method_signature) = "execution";
}
// Creates an MetadataSchema.
// Creates a MetadataSchema.
rpc CreateMetadataSchema(CreateMetadataSchemaRequest) returns (MetadataSchema) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*/metadataStores/*}/metadataSchemas"
@ -459,28 +460,29 @@ message ListArtifactsRequest {
// order to be part of the result set.
// The syntax to define filter query is based on https://google.aip.dev/160.
// The supported set of filters include the following:
// 1. Attributes filtering
// e.g. display_name = "test"
//
// Supported fields include: name, display_name, uri, state,
// schema_title, create_time and update_time.
// Time fields, i.e. create_time and update_time, require values to
// specified in RFC-3339 format.
// e.g. create_time = "2020-11-19T11:30:00-04:00"
// 2. Metadata field
// To filter on metadata fields use traversal operation as follows:
// metadata.<field_name>.<type_value>
// e.g. metadata.field_1.number_value = 10.0
// 3. Context based filtering
// To filter Artifacts based on the contexts to which they belong use the
// function operator with the full resource name
// "in_context(<context-name>)"
// e.g.
// in_context("projects/<project_number>/locations/<location>/metadataStores/<metadatastore_name>/contexts/<context-id>")
// * **Attribute filtering**:
// For example: `display_name = "test"`.
// Supported fields include: `name`, `display_name`, `uri`, `state`,
// `schema_title`, `create_time`, and `update_time`.
// Time fields, such as `create_time` and `update_time`, require values
// specified in RFC-3339 format.
// For example: `create_time = "2020-11-19T11:30:00-04:00"`
// * **Metadata field**:
// To filter on metadata fields use traversal operation as follows:
// `metadata.<field_name>.<type_value>`.
// For example: `metadata.field_1.number_value = 10.0`
// * **Context based filtering**:
// To filter Artifacts based on the contexts to which they belong, use the
// function operator with the full resource name
// `in_context(<context-name>)`.
// For example:
// `in_context("projects/<project_number>/locations/<location>/metadataStores/<metadatastore_name>/contexts/<context-id>")`
//
// Each of the above supported filter types can be combined together using
// Logical operators (AND & OR).
// e.g. display_name = "test" AND metadata.field1.bool_value = true.
// logical operators (`AND` & `OR`).
//
// For example: `display_name = "test" AND metadata.field1.bool_value = true`.
string filter = 4;
}
@ -529,7 +531,7 @@ message CreateContextRequest {
Context context = 2 [(google.api.field_behavior) = REQUIRED];
// The {context} portion of the resource name with the format:
// projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}
// projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}.
// If not provided, the Context's ID will be a UUID generated by the service.
// Must be 4-128 characters in length. Valid characters are /[a-z][0-9]-/.
// Must be unique across all Contexts in the parent MetadataStore. (Otherwise
@ -575,6 +577,37 @@ message ListContextsRequest {
// INVALID_ARGUMENT error.)
string page_token = 3;
// Filter specifying the boolean condition for the Contexts to satisfy in
// order to be part of the result set.
// The syntax to define filter query is based on https://google.aip.dev/160.
// Following are the supported set of filters:
//
// * **Attribute filtering**:
// For example: `display_name = "test"`.
// Supported fields include: `name`, `display_name`, `schema_title`,
// `create_time`, and `update_time`.
// Time fields, such as `create_time` and `update_time`, require values
// specified in RFC-3339 format.
// For example: `create_time = "2020-11-19T11:30:00-04:00"`.
// * **Metadata field**:
// To filter on metadata fields use traversal operation as follows:
// `metadata.<field_name>.<type_value>`.
// For example: `metadata.field_1.number_value = 10.0`.
// * **Parent Child filtering**:
// To filter Contexts based on parent-child relationship use the HAS
// operator as follows:
//
// ```
// parent_contexts:
// "projects/<project_number>/locations/<location>/metadataStores/<metadatastore_name>/contexts/<context_id>"
// child_contexts:
// "projects/<project_number>/locations/<location>/metadataStores/<metadatastore_name>/contexts/<context_id>"
// ```
//
// Each of the above supported filters can be combined together using
// logical operators (`AND` & `OR`).
//
// For example: `display_name = "test" AND metadata.field1.bool_value = true`.
string filter = 4;
}
@ -639,12 +672,18 @@ message AddContextArtifactsAndExecutionsRequest {
];
// The resource names of the Artifacts to attribute to the Context.
//
// Format:
// projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}
repeated string artifacts = 2 [(google.api.resource_reference) = {
type: "aiplatform.googleapis.com/Artifact"
}];
// The resource names of the Executions to associate with the
// Context.
//
// Format:
// projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}
repeated string executions = 3 [(google.api.resource_reference) = {
type: "aiplatform.googleapis.com/Execution"
}];
@ -658,6 +697,7 @@ message AddContextArtifactsAndExecutionsResponse {
// Request message for [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren].
message AddContextChildrenRequest {
// Required. The resource name of the parent Context.
//
// Format:
// projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}
string context = 1 [
@ -756,7 +796,7 @@ message ListExecutionsRequest {
// call. Provide this to retrieve the subsequent page.
//
// When paginating, all other provided parameters must match the call that
// provided the page token. (Otherwise the request will fail with
// provided the page token. (Otherwise the request will fail with an
// INVALID_ARGUMENT error.)
string page_token = 3;
@ -764,28 +804,28 @@ message ListExecutionsRequest {
// order to be part of the result set.
// The syntax to define filter query is based on https://google.aip.dev/160.
// Following are the supported set of filters:
// 1. Attributes filtering
// e.g. display_name = "test"
//
// supported fields include: name, display_name, state,
// schema_title, create_time and update_time.
// Time fields, i.e. create_time and update_time, require values to
// specified in RFC-3339 format.
// e.g. create_time = "2020-11-19T11:30:00-04:00"
// 2. Metadata field
// To filter on metadata fields use traversal operation as follows:
// metadata.<field_name>.<type_value>
// e.g. metadata.field_1.number_value = 10.0
// 3. Context based filtering
// To filter Executions based on the contexts to which they belong use
// the function operator with the full resource name
// "in_context(<context-name>)"
// e.g.
// in_context("projects/<project_number>/locations/<location>/metadataStores/<metadatastore_name>/contexts/<context-id>")
// * **Attribute filtering**:
// For example: `display_name = "test"`.
// Supported fields include: `name`, `display_name`, `state`,
// `schema_title`, `create_time`, and `update_time`.
// Time fields, such as `create_time` and `update_time`, require values
// specified in RFC-3339 format.
// For example: `create_time = "2020-11-19T11:30:00-04:00"`.
// * **Metadata field**:
// To filter on metadata fields use traversal operation as follows:
// `metadata.<field_name>.<type_value>`
// For example: `metadata.field_1.number_value = 10.0`
// * **Context based filtering**:
// To filter Executions based on the contexts to which they belong use
// the function operator with the full resource name:
// `in_context(<context-name>)`.
// For example:
// `in_context("projects/<project_number>/locations/<location>/metadataStores/<metadatastore_name>/contexts/<context-id>")`
//
// Each of the above supported filters can be combined together using
// Logical operators (AND & OR).
// e.g. display_name = "test" AND metadata.field1.bool_value = true.
// logical operators (`AND` & `OR`).
// For example: `display_name = "test" AND metadata.field1.bool_value = true`.
string filter = 4;
}
@ -912,7 +952,7 @@ message ListMetadataSchemasRequest {
// A page token, received from a previous
// [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas] call. Provide this to retrieve the
// subsequent page.
// next page.
//
// When paginating, all other provided parameters must match the call that
// provided the page token. (Otherwise the request will fail with
@ -962,21 +1002,22 @@ message QueryArtifactLineageSubgraphRequest {
// order to be part of the Lineage Subgraph.
// The syntax to define filter query is based on https://google.aip.dev/160.
// The supported set of filters include the following:
// 1. Attributes filtering
// e.g. display_name = "test"
//
// supported fields include: name, display_name, uri, state,
// schema_title, create_time and update_time.
// Time fields, i.e. create_time and update_time, require values to
// specified in RFC-3339 format.
// e.g. create_time = "2020-11-19T11:30:00-04:00"
// 2. Metadata field
// To filter on metadata fields use traversal operation as follows:
// metadata.<field_name>.<type_value>
// e.g. metadata.field_1.number_value = 10.0
// * **Attribute filtering**:
// For example: `display_name = "test"`
// Supported fields include: `name`, `display_name`, `uri`, `state`,
// `schema_title`, `create_time`, and `update_time`.
// Time fields, such as `create_time` and `update_time`, require values
// specified in RFC-3339 format.
// For example: `create_time = "2020-11-19T11:30:00-04:00"`
// * **Metadata field**:
// To filter on metadata fields use traversal operation as follows:
// `metadata.<field_name>.<type_value>`.
// For example: `metadata.field_1.number_value = 10.0`
//
// Each of the above supported filter types can be combined together using
// Logical operators (AND & OR).
// e.g. display_name = "test" AND metadata.field1.bool_value = true.
// logical operators (`AND` & `OR`).
//
// For example: `display_name = "test" AND metadata.field1.bool_value = true`.
string filter = 3;
}

@ -35,7 +35,7 @@ message MetadataStore {
pattern: "projects/{project}/locations/{location}/metadataStores/{metadata_store}"
};
// Represent state information for a MetadataStore.
// Represents state information for a MetadataStore.
message MetadataStoreState {
// The disk utilization of the MetadataStore in bytes.
int64 disk_utilization_bytes = 1;
@ -50,9 +50,9 @@ message MetadataStore {
// Output only. Timestamp when this MetadataStore was last updated.
google.protobuf.Timestamp update_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
// Customer-managed encryption key spec for an Metadata Store. If set, this
// Metadata Store and all sub-resources of this Metadata Store will be secured
// by this key.
// Customer-managed encryption key spec for a Metadata Store. If set, this
// Metadata Store and all sub-resources of this Metadata Store are secured
// using this key.
EncryptionSpec encryption_spec = 5;
// Description of the MetadataStore.

@ -33,14 +33,14 @@ option java_outer_classname = "MigrationServiceProto";
option java_package = "com.google.cloud.aiplatform.v1beta1";
// A service that migrates resources from automl.googleapis.com,
// datalabeling.googleapis.com and ml.googleapis.com to AI Platform.
// datalabeling.googleapis.com and ml.googleapis.com to Vertex AI.
service MigrationService {
option (google.api.default_host) = "aiplatform.googleapis.com";
option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
// Searches all of the resources in automl.googleapis.com,
// datalabeling.googleapis.com and ml.googleapis.com that can be migrated to
// AI Platform's given location.
// Vertex AI's given location.
rpc SearchMigratableResources(SearchMigratableResourcesRequest) returns (SearchMigratableResourcesResponse) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*}/migratableResources:search"
@ -50,7 +50,7 @@ service MigrationService {
}
// Batch migrates resources from ml.googleapis.com, automl.googleapis.com,
// and datalabeling.googleapis.com to AI Platform (Unified).
// and datalabeling.googleapis.com to Vertex AI.
rpc BatchMigrateResources(BatchMigrateResourcesRequest) returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*}/migratableResources:batchMigrate"
@ -67,7 +67,7 @@ service MigrationService {
// Request message for [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources].
message SearchMigratableResourcesRequest {
// Required. The location that the migratable resources should be searched from.
// It's the AI Platform location that the resources can be migrated to, not
// It's the Vertex AI location that the resources can be migrated to, not
// the resources' original location.
// Format:
// `projects/{project}/locations/{location}`
@ -130,9 +130,9 @@ message BatchMigrateResourcesRequest {
}
// Config of migrating one resource from automl.googleapis.com,
// datalabeling.googleapis.com and ml.googleapis.com to AI Platform.
// datalabeling.googleapis.com and ml.googleapis.com to Vertex AI.
message MigrateResourceRequest {
// Config for migrating version in ml.googleapis.com to AI Platform's Model.
// Config for migrating version in ml.googleapis.com to Vertex AI's Model.
message MigrateMlEngineModelVersionConfig {
// Required. The ml.googleapis.com endpoint that this model version should be migrated
// from.
@ -156,12 +156,12 @@ message MigrateResourceRequest {
}
];
// Required. Display name of the model in AI Platform.
// Required. Display name of the model in Vertex AI.
// System will pick a display name if unspecified.
string model_display_name = 3 [(google.api.field_behavior) = REQUIRED];
}
// Config for migrating Model in automl.googleapis.com to AI Platform's Model.
// Config for migrating Model in automl.googleapis.com to Vertex AI's Model.
message MigrateAutomlModelConfig {
// Required. Full resource name of automl Model.
// Format:
@ -173,12 +173,12 @@ message MigrateResourceRequest {
}
];
// Optional. Display name of the model in AI Platform.
// Optional. Display name of the model in Vertex AI.
// System will pick a display name if unspecified.
string model_display_name = 2 [(google.api.field_behavior) = OPTIONAL];
}
// Config for migrating Dataset in automl.googleapis.com to AI Platform's
// Config for migrating Dataset in automl.googleapis.com to Vertex AI's
// Dataset.
message MigrateAutomlDatasetConfig {
// Required. Full resource name of automl Dataset.
@ -191,7 +191,7 @@ message MigrateResourceRequest {
}
];
// Required. Display name of the Dataset in AI Platform.
// Required. Display name of the Dataset in Vertex AI.
// System will pick a display name if unspecified.
string dataset_display_name = 2 [(google.api.field_behavior) = REQUIRED];
}
@ -200,7 +200,7 @@ message MigrateResourceRequest {
// Platform's Dataset.
message MigrateDataLabelingDatasetConfig {
// Config for migrating AnnotatedDataset in datalabeling.googleapis.com to
// AI Platform's SavedQuery.
// Vertex AI's SavedQuery.
message MigrateDataLabelingAnnotatedDatasetConfig {
// Required. Full resource name of data labeling AnnotatedDataset.
// Format:
@ -223,30 +223,30 @@ message MigrateResourceRequest {
}
];
// Optional. Display name of the Dataset in AI Platform.
// Optional. Display name of the Dataset in Vertex AI.
// System will pick a display name if unspecified.
string dataset_display_name = 2 [(google.api.field_behavior) = OPTIONAL];
// Optional. Configs for migrating AnnotatedDataset in datalabeling.googleapis.com to
// AI Platform's SavedQuery. The specified AnnotatedDatasets have to belong
// Vertex AI's SavedQuery. The specified AnnotatedDatasets have to belong
// to the datalabeling Dataset.
repeated MigrateDataLabelingAnnotatedDatasetConfig migrate_data_labeling_annotated_dataset_configs = 3 [(google.api.field_behavior) = OPTIONAL];
}
oneof request {
// Config for migrating Version in ml.googleapis.com to AI Platform's Model.
// Config for migrating Version in ml.googleapis.com to Vertex AI's Model.
MigrateMlEngineModelVersionConfig migrate_ml_engine_model_version_config = 1;
// Config for migrating Model in automl.googleapis.com to AI Platform's
// Config for migrating Model in automl.googleapis.com to Vertex AI's
// Model.
MigrateAutomlModelConfig migrate_automl_model_config = 2;
// Config for migrating Dataset in automl.googleapis.com to AI Platform's
// Config for migrating Dataset in automl.googleapis.com to Vertex AI's
// Dataset.
MigrateAutomlDatasetConfig migrate_automl_dataset_config = 3;
// Config for migrating Dataset in datalabeling.googleapis.com to
// AI Platform's Dataset.
// Vertex AI's Dataset.
MigrateDataLabelingDatasetConfig migrate_data_labeling_dataset_config = 4;
}
}
@ -259,7 +259,7 @@ message BatchMigrateResourcesResponse {
// Describes a successfully migrated resource.
message MigrateResourceResponse {
// After migration, the resource name in AI Platform.
// After migration, the resource name in Vertex AI.
oneof migrated_resource {
// Migrated Dataset's resource name.
string dataset = 1 [(google.api.resource_reference) = {

@ -94,7 +94,7 @@ message Model {
// higher degree of manual configuration.
DEDICATED_RESOURCES = 1;
// Resources that to large degree are decided by AI Platform, and require
// Resources that to large degree are decided by Vertex AI, and require
// only a modest additional configuration.
AUTOMATIC_RESOURCES = 2;
}
@ -120,7 +120,7 @@ message Model {
// does not have any additional information.
// The schema is defined as an OpenAPI 3.0.2
// [Schema Object](https://tinyurl.com/y538mdwt#schema-object).
// AutoML Models always have this field populated by AI Platform, if no
// AutoML Models always have this field populated by Vertex AI, if no
// additional metadata is needed, this field is set to an empty string.
// Note: The URI given on output will be immutable and probably different,
// including the URI scheme, than the one given on input. The output URI will
@ -147,7 +147,7 @@ message Model {
// Input only. The specification of the container that is to be used when deploying
// this Model. The specification is ingested upon
// [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel], and all binaries it contains are copied
// and stored internally by AI Platform.
// and stored internally by Vertex AI.
// Not present for AutoML Models.
ModelContainerSpec container_spec = 9 [(google.api.field_behavior) = INPUT_ONLY];
@ -243,7 +243,7 @@ message Model {
// [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain].
repeated string supported_output_storage_formats = 12 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this Model was uploaded into AI Platform.
// Output only. Timestamp when this Model was uploaded into Vertex AI.
google.protobuf.Timestamp create_time = 13 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this Model was most recently updated.
@ -305,7 +305,7 @@ message PredictSchemata {
// [BatchPredictionJob.input_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config].
// The schema is defined as an OpenAPI 3.0.2
// [Schema Object](https://tinyurl.com/y538mdwt#schema-object).
// AutoML Models always have this field populated by AI Platform.
// AutoML Models always have this field populated by Vertex AI.
// Note: The URI given on output will be immutable and probably different,
// including the URI scheme, than the one given on input. The output URI will
// point to a location where the user only has a read access.
@ -317,7 +317,7 @@ message PredictSchemata {
// [BatchPredictionJob.model_parameters][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model_parameters].
// The schema is defined as an OpenAPI 3.0.2
// [Schema Object](https://tinyurl.com/y538mdwt#schema-object).
// AutoML Models always have this field populated by AI Platform, if no
// AutoML Models always have this field populated by Vertex AI, if no
// parameters are supported, then it is set to an empty string.
// Note: The URI given on output will be immutable and probably different,
// including the URI scheme, than the one given on input. The output URI will
@ -330,7 +330,7 @@ message PredictSchemata {
// [BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config].
// The schema is defined as an OpenAPI 3.0.2
// [Schema Object](https://tinyurl.com/y538mdwt#schema-object).
// AutoML Models always have this field populated by AI Platform.
// AutoML Models always have this field populated by Vertex AI.
// Note: The URI given on output will be immutable and probably different,
// including the URI scheme, than the one given on input. The output URI will
// point to a location where the user only has a read access.
@ -354,8 +354,8 @@ message ModelContainerSpec {
// To learn about the requirements for the Docker image itself, see
// [Custom container requirements](https://tinyurl.com/cust-cont-reqs).
//
// You can use the URI to one of AI Platform's [pre-built container images for
// prediction](https://cloud.google.com/ai-platform-unified/docs/predictions/pre-built-containers)
// You can use the URI to one of Vertex AI's [pre-built container images for
// prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers)
// in this field.
string image_uri = 1 [
(google.api.field_behavior) = REQUIRED,
@ -384,7 +384,7 @@ message ModelContainerSpec {
// `CMD`.
//
// In this field, you can reference environment variables
// [set by AI Platform](https://tinyurl.com/cust-cont-reqs#aip-variables)
// [set by Vertex AI](https://tinyurl.com/cust-cont-reqs#aip-variables)
// and environment variables set in the [env][google.cloud.aiplatform.v1beta1.ModelContainerSpec.env] field.
// You cannot reference environment variables set in the Docker image. In
// order for environment variables to be expanded, reference them by using the
@ -420,7 +420,7 @@ message ModelContainerSpec {
// [interact](https://tinyurl.com/h3kdcgs).
//
// In this field, you can reference environment variables
// [set by AI Platform](https://tinyurl.com/cust-cont-reqs#aip-variables)
// [set by Vertex AI](https://tinyurl.com/cust-cont-reqs#aip-variables)
// and environment variables set in the [env][google.cloud.aiplatform.v1beta1.ModelContainerSpec.env] field.
// You cannot reference environment variables set in the Docker image. In
// order for environment variables to be expanded, reference them by using the
@ -464,7 +464,7 @@ message ModelContainerSpec {
// [v1 core API](https://tinyurl.com/k8s-io-api/v1.18/#container-v1-core).
repeated EnvVar env = 4 [(google.api.field_behavior) = IMMUTABLE];
// Immutable. List of ports to expose from the container. AI Platform sends any
// Immutable. List of ports to expose from the container. Vertex AI sends any
// prediction requests that it receives to the first port on this list. AI
// Platform also sends
// [liveness and health checks](https://tinyurl.com/cust-cont-reqs#health)
@ -480,18 +480,18 @@ message ModelContainerSpec {
// ]
// ```
//
// AI Platform does not use ports other than the first one listed. This field
// Vertex AI does not use ports other than the first one listed. This field
// corresponds to the `ports` field of the Kubernetes Containers
// [v1 core API](https://tinyurl.com/k8s-io-api/v1.18/#container-v1-core).
repeated Port ports = 5 [(google.api.field_behavior) = IMMUTABLE];
// Immutable. HTTP path on the container to send prediction requests to. AI Platform
// Immutable. HTTP path on the container to send prediction requests to. Vertex AI
// forwards requests sent using
// [projects.locations.endpoints.predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] to this
// path on the container's IP address and port. AI Platform then returns the
// path on the container's IP address and port. Vertex AI then returns the
// container's response in the API response.
//
// For example, if you set this field to `/foo`, then when AI Platform
// For example, if you set this field to `/foo`, then when Vertex AI
// receives a prediction request, it forwards the request body in a POST
// request to the `/foo` path on the port of your container specified by the
// first value of this `ModelContainerSpec`'s
@ -504,24 +504,24 @@ message ModelContainerSpec {
//
// * <var>ENDPOINT</var>: The last segment (following `endpoints/`)of the
// Endpoint.name][] field of the Endpoint where this Model has been
// deployed. (AI Platform makes this value available to your container code
// deployed. (Vertex AI makes this value available to your container code
// as the
// [`AIP_ENDPOINT_ID`](https://tinyurl.com/cust-cont-reqs#aip-variables)
// environment variable.)
//
// * <var>DEPLOYED_MODEL</var>: [DeployedModel.id][google.cloud.aiplatform.v1beta1.DeployedModel.id] of the `DeployedModel`.
// (AI Platform makes this value available to your container code
// (Vertex AI makes this value available to your container code
// as the [`AIP_DEPLOYED_MODEL_ID` environment
// variable](https://tinyurl.com/cust-cont-reqs#aip-variables).)
string predict_route = 6 [(google.api.field_behavior) = IMMUTABLE];
// Immutable. HTTP path on the container to send health checks to. AI Platform
// Immutable. HTTP path on the container to send health checks to. Vertex AI
// intermittently sends GET requests to this path on the container's IP
// address and port to check that the container is healthy. Read more about
// [health
// checks](https://tinyurl.com/cust-cont-reqs#checks).
//
// For example, if you set this field to `/bar`, then AI Platform
// For example, if you set this field to `/bar`, then Vertex AI
// intermittently sends a GET request to the `/bar` path on the port of your
// container specified by the first value of this `ModelContainerSpec`'s
// [ports][google.cloud.aiplatform.v1beta1.ModelContainerSpec.ports] field.
@ -533,13 +533,13 @@ message ModelContainerSpec {
//
// * <var>ENDPOINT</var>: The last segment (following `endpoints/`)of the
// Endpoint.name][] field of the Endpoint where this Model has been
// deployed. (AI Platform makes this value available to your container code
// deployed. (Vertex AI makes this value available to your container code
// as the
// [`AIP_ENDPOINT_ID`](https://tinyurl.com/cust-cont-reqs#aip-variables)
// environment variable.)
//
// * <var>DEPLOYED_MODEL</var>: [DeployedModel.id][google.cloud.aiplatform.v1beta1.DeployedModel.id] of the `DeployedModel`.
// (AI Platform makes this value available to your container code as the
// (Vertex AI makes this value available to your container code as the
// [`AIP_DEPLOYED_MODEL_ID`](https://tinyurl.com/cust-cont-reqs#aip-variables)
// environment variable.)
string health_route = 7 [(google.api.field_behavior) = IMMUTABLE];

@ -140,7 +140,7 @@ message ModelDeploymentMonitoringJob {
// prediction request/response.
// If there are any data type differences between predict instance and TFDV
// instance, this field can be used to override the schema.
// For models trained with AI Platform, this field must be set as all the
// For models trained with Vertex AI, this field must be set as all the
// fields in predict instance formatted as string.
string analysis_instance_schema_uri = 16;

@ -33,12 +33,12 @@ option java_multiple_files = true;
option java_outer_classname = "ModelServiceProto";
option java_package = "com.google.cloud.aiplatform.v1beta1";
// A service for managing AI Platform's machine learning Models.
// A service for managing Vertex AI's machine learning Models.
service ModelService {
option (google.api.default_host) = "aiplatform.googleapis.com";
option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
// Uploads a Model artifact into AI Platform.
// Uploads a Model artifact into Vertex AI.
rpc UploadModel(UploadModelRequest) returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*}/models:upload"

@ -83,8 +83,6 @@ message PipelineJob {
google.protobuf.Timestamp update_time = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
// Required. The spec of the pipeline.
// The spec contains a `schema_version` field which indicates the Kubeflow
// Pipeline schema version to decode the struct.
google.protobuf.Struct pipeline_spec = 7 [(google.api.field_behavior) = REQUIRED];
// Output only. The detailed state of the job.
@ -134,7 +132,7 @@ message PipelineJob {
//
// Private services access must already be configured for the network.
// Pipeline job will apply the network configuration to the GCP resources
// being launched, if applied, such as Cloud AI Platform
// being launched, if applied, such as Vertex AI
// Training or Dataflow job. If left unspecified, the workload is not peered
// with any network.
string network = 18 [(google.api.resource_reference) = {
@ -191,8 +189,9 @@ message PipelineTaskDetail {
// Specifies task was skipped due to cache hit.
SKIPPED = 8;
// Specifies task was not triggered because the trigger policy of the task
// in the [PipelineJob.pipeline_spec.condition][] is not satisfied.
// Specifies that the task was not triggered because the task's trigger
// policy is not satisfied. The trigger policy is specified in the
// `condition` field of [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec].
NOT_TRIGGERED = 9;
}

@ -31,7 +31,9 @@ option java_multiple_files = true;
option java_outer_classname = "PipelineServiceProto";
option java_package = "com.google.cloud.aiplatform.v1beta1";
// A service for creating and managing AI Platform's pipelines.
// A service for creating and managing Vertex AI's pipelines. This includes both
// `TrainingPipeline` resources (used for AutoML and custom training) and
// `PipelineJob` resources (used for Vertex Pipelines).
service PipelineService {
option (google.api.default_host) = "aiplatform.googleapis.com";
option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
@ -303,14 +305,16 @@ message ListPipelineJobsRequest {
// The standard list filter.
// Supported fields:
// * `display_name` supports = and !=.
// * `state` supports = and !=.
//
// Some examples of using the filter are:
// * `state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"`
// * `state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"`
// * `NOT display_name="my_pipeline"`
// * `state="PIPELINE_STATE_FAILED"`
// * `display_name` supports `=` and `!=`.
// * `state` supports `=` and `!=`.
//
// The following examples demonstrate how to filter the list of PipelineJobs:
//
// * `state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"`
// * `state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"`
// * `NOT display_name="my_pipeline"`
// * `state="PIPELINE_STATE_FAILED"`
string filter = 2;
// The standard list page size.

@ -28,7 +28,7 @@ enum PipelineState {
// The pipeline state is unspecified.
PIPELINE_STATE_UNSPECIFIED = 0;
// The pipeline has been just created or resumed and processing has not yet
// The pipeline has been created or resumed, and processing has not yet
// begun.
PIPELINE_STATE_QUEUED = 1;
@ -44,7 +44,7 @@ enum PipelineState {
// The pipeline failed.
PIPELINE_STATE_FAILED = 5;
// The pipeline is being cancelled. From this state the pipeline may only go
// The pipeline is being cancelled. From this state, the pipeline may only go
// to either PIPELINE_STATE_SUCCEEDED, PIPELINE_STATE_FAILED or
// PIPELINE_STATE_CANCELLED.
PIPELINE_STATE_CANCELLING = 6;

@ -388,7 +388,9 @@ message StudySpec {
// The available search algorithms for the Study.
enum Algorithm {
// The default algorithm used by AI Platform Optimization service.
// The default algorithm used by Vertex AI for [hyperparameter
// tuning](https://cloud.google.com/vertex-ai/docs/training/hyperparameter-tuning-overview)
// and [Vertex Vizier](https://cloud.google.com/vertex-ai/docs/vizier).
ALGORITHM_UNSPECIFIED = 0;
// Simple grid search within the feasible space. To use grid search,
@ -404,15 +406,15 @@ message StudySpec {
// "Noisy" means that the repeated observations with the same Trial parameters
// may lead to different metric evaluations.
enum ObservationNoise {
// The default noise level chosen by the AI Platform service.
// The default noise level chosen by Vertex AI.
OBSERVATION_NOISE_UNSPECIFIED = 0;
// AI Platform Vizier assumes that the objective function is (nearly)
// Vertex AI assumes that the objective function is (nearly)
// perfectly reproducible, and will never repeat the same Trial
// parameters.
LOW = 1;
// AI Platform Vizier will estimate the amount of noise in metric
// Vertex AI will estimate the amount of noise in metric
// evaluations, it may repeat the same Trial parameters more than once.
HIGH = 2;
}

@ -36,8 +36,8 @@ option java_package = "com.google.cloud.aiplatform.v1beta1";
// The TrainingPipeline orchestrates tasks associated with training a Model. It
// always executes the training task, and optionally may also
// export data from AI Platform's Dataset which becomes the training input,
// [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] the Model to AI Platform, and evaluate the
// export data from Vertex AI's Dataset which becomes the training input,
// [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] the Model to Vertex AI, and evaluate the
// Model.
message TrainingPipeline {
option (google.api.resource) = {
@ -51,7 +51,7 @@ message TrainingPipeline {
// Required. The user-defined name of this TrainingPipeline.
string display_name = 2 [(google.api.field_behavior) = REQUIRED];
// Specifies AI Platform owned input data that may be used for training the
// Specifies Vertex AI owned input data that may be used for training the
// Model. The TrainingPipeline's [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition] should make
// clear whether this config is used and if there are any special requirements
// on how it should be filled. If nothing about this config is mentioned in
@ -90,7 +90,7 @@ message TrainingPipeline {
// a need of this information, or that training task does not support
// uploading a Model as part of the pipeline.
// When the Pipeline's state becomes `PIPELINE_STATE_SUCCEEDED` and
// the trained Model had been uploaded into AI Platform, then the
// the trained Model had been uploaded into Vertex AI, then the
// model_to_upload's resource [name][google.cloud.aiplatform.v1beta1.Model.name] is populated. The Model
// is always uploaded into the Project and Location in which this pipeline
// is.
@ -135,7 +135,7 @@ message TrainingPipeline {
EncryptionSpec encryption_spec = 18;
}
// Specifies AI Platform owned input data to be used for training, and
// Specifies Vertex AI owned input data to be used for training, and
// possibly evaluating, the Model.
message InputDataConfig {
// The instructions how the input data should be split between the
@ -167,7 +167,7 @@ message InputDataConfig {
// * For non-tabular data: "jsonl".
// * For tabular data: "csv" and "bigquery".
//
// The following AI Platform environment variables are passed to containers
// The following Vertex AI environment variables are passed to containers
// or python modules of the training task when this field is set:
//
// * AIP_DATA_FORMAT : Exported data format.
@ -182,7 +182,7 @@ message InputDataConfig {
// where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format.
// All training input data is written into that directory.
//
// The AI Platform environment variables representing Cloud Storage
// The Vertex AI environment variables representing Cloud Storage
// data URIs are represented in the Cloud Storage wildcard
// format to support sharded data. e.g.: "gs://.../training-*.jsonl"
//
@ -234,7 +234,7 @@ message InputDataConfig {
// match this filter and belong to DataItems not ignored by the split method
// are used in respectively training, validation or test role, depending on
// the role of the DataItem they are on (for the auto-assigned that role is
// decided by AI Platform). A filter with same syntax as the one used in
// decided by Vertex AI). A filter with same syntax as the one used in
// [ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations] may be used, but note
// here it filters across all Annotations of the Dataset, and not just within
// a single DataItem.
@ -266,7 +266,7 @@ message InputDataConfig {
// given fractions. Any of `training_fraction`, `validation_fraction` and
// `test_fraction` may optionally be provided, they must sum to up to 1. If the
// provided ones sum to less than 1, the remainder is assigned to sets as
// decided by AI Platform. If none of the fractions are set, by default roughly
// decided by Vertex AI. If none of the fractions are set, by default roughly
// 80% of data is used for training, 10% for validation, and 10% for test.
message FractionSplit {
// The fraction of the input data that is to be used to train the Model.

@ -31,7 +31,7 @@ option java_multiple_files = true;
option java_outer_classname = "VizierServiceProto";
option java_package = "com.google.cloud.aiplatform.v1beta1";
// Cloud AI Platform Vizier API.
// Vertex Vizier API.
//
// Vizier service is a GCP service to solve blackbox optimization problems,
// such as tuning machine learning hyperparameters and searching over deep
@ -85,7 +85,7 @@ service VizierService {
}
// Adds one or more Trials to a Study, with parameter values
// suggested by AI Platform Vizier. Returns a long-running
// suggested by Vertex Vizier. Returns a long-running
// operation associated with the generation of Trial suggestions.
// When this long-running operation succeeds, it will contain
// a [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse].

Loading…
Cancel
Save