feat: add enable_private_service_connect field to Endpoint

feat: add id field to DeployedModel
feat: add service_attachment field to PrivateEndpoints
feat: add endpoint_id to CreateEndpointRequest and method signature to CreateEndpoint
feat: add method signature to CreateFeatureStore, CreateEntityType, CreateFeature
feat: add network and enable_private_service_connect to IndexEndpoint
feat: add service_attachment to IndexPrivateEndpoints
feat: add stratified_split field to training_pipeline InputDataConfig
fix: remove invalid resource annotations in LineageSubgraph

PiperOrigin-RevId: 413686247
pull/690/head
Google APIs 3 years ago committed by Copybara-Service
parent eecbbee1d2
commit 244a89dbd9
  1. 1
      google/cloud/aiplatform/v1beta1/BUILD.bazel
  2. 44
      google/cloud/aiplatform/v1beta1/aiplatform_v1beta1.yaml
  3. 4
      google/cloud/aiplatform/v1beta1/artifact.proto
  4. 18
      google/cloud/aiplatform/v1beta1/batch_prediction_job.proto
  5. 7
      google/cloud/aiplatform/v1beta1/custom_job.proto
  6. 38
      google/cloud/aiplatform/v1beta1/endpoint.proto
  7. 11
      google/cloud/aiplatform/v1beta1/endpoint_service.proto
  8. 2
      google/cloud/aiplatform/v1beta1/execution.proto
  9. 30
      google/cloud/aiplatform/v1beta1/explanation.proto
  10. 2
      google/cloud/aiplatform/v1beta1/feature.proto
  11. 4
      google/cloud/aiplatform/v1beta1/featurestore.proto
  12. 4
      google/cloud/aiplatform/v1beta1/featurestore_online_service.proto
  13. 40
      google/cloud/aiplatform/v1beta1/featurestore_service.proto
  14. 36
      google/cloud/aiplatform/v1beta1/index_endpoint.proto
  15. 47
      google/cloud/aiplatform/v1beta1/index_endpoint_service.proto
  16. 2
      google/cloud/aiplatform/v1beta1/job_service.proto
  17. 9
      google/cloud/aiplatform/v1beta1/lineage_subgraph.proto
  18. 2
      google/cloud/aiplatform/v1beta1/metadata_schema.proto
  19. 4
      google/cloud/aiplatform/v1beta1/model.proto
  20. 7
      google/cloud/aiplatform/v1beta1/model_deployment_monitoring_job.proto
  21. 19
      google/cloud/aiplatform/v1beta1/model_monitoring.proto
  22. 7
      google/cloud/aiplatform/v1beta1/model_service.proto
  23. 9
      google/cloud/aiplatform/v1beta1/pipeline_job.proto
  24. 4
      google/cloud/aiplatform/v1beta1/pipeline_service.proto
  25. 18
      google/cloud/aiplatform/v1beta1/prediction_service.proto
  26. 19
      google/cloud/aiplatform/v1beta1/study.proto
  27. 37
      google/cloud/aiplatform/v1beta1/training_pipeline.proto
  28. 44
      google/cloud/aiplatform/v1beta1/unmanaged_container_model.proto
  29. 6
      google/cloud/aiplatform/v1beta1/vizier_service.proto

@ -89,6 +89,7 @@ proto_library(
"tensorboard_time_series.proto",
"training_pipeline.proto",
"types.proto",
"unmanaged_container_model.proto",
"user_action_reference.proto",
"value.proto",
"vizier_service.proto",

@ -55,6 +55,8 @@ types:
- name: google.cloud.aiplatform.v1beta1.ImportDataResponse
- name: google.cloud.aiplatform.v1beta1.ImportFeatureValuesOperationMetadata
- name: google.cloud.aiplatform.v1beta1.ImportFeatureValuesResponse
- name: google.cloud.aiplatform.v1beta1.MutateDeployedIndexOperationMetadata
- name: google.cloud.aiplatform.v1beta1.MutateDeployedIndexResponse
- name: google.cloud.aiplatform.v1beta1.PurgeArtifactsMetadata
- name: google.cloud.aiplatform.v1beta1.PurgeArtifactsResponse
- name: google.cloud.aiplatform.v1beta1.PurgeContextsMetadata
@ -83,11 +85,6 @@ documentation:
summary: |-
Train high-quality custom machine learning models with minimal machine
learning expertise and effort.
overview: |-
Vertex AI enables data scientists, developers, and AI newcomers to create
custom machine learning models specific to their business needs by
leveraging Google's state-of-the-art transfer learning and innovative AI
research.
rules:
- selector: google.cloud.location.Locations.GetLocation
description: Gets information about a location.
@ -95,6 +92,29 @@ documentation:
- selector: google.cloud.location.Locations.ListLocations
description: Lists information about the supported locations for this service.
- selector: google.iam.v1.IAMPolicy.GetIamPolicy
description: |-
Gets the access control policy for a resource. Returns an empty policy
if the resource exists and does not have a policy set.
- selector: google.iam.v1.IAMPolicy.SetIamPolicy
description: |-
Sets the access control policy on the specified resource. Replaces
any existing policy.
Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED`
errors.
- selector: google.iam.v1.IAMPolicy.TestIamPermissions
description: |-
Returns permissions that a caller has on the specified resource. If the
resource does not exist, this will return an empty set of
permissions, not a `NOT_FOUND` error.
Note: This operation is designed to be used for building
permission-aware UIs and command-line tools, not for authorization
checking. This operation may "fail open" without warning.
backend:
rules:
- selector: 'google.cloud.aiplatform.v1beta1.DatasetService.*'
@ -135,6 +155,8 @@ backend:
deadline: 30.0
- selector: google.cloud.location.Locations.ListLocations
deadline: 30.0
- selector: 'google.iam.v1.IAMPolicy.*'
deadline: 60.0
- selector: 'google.longrunning.Operations.*'
deadline: 60.0
@ -148,6 +170,14 @@ http:
get: '/ui/{name=projects/*}/locations'
additional_bindings:
- get: '/v1beta1/{name=projects/*}/locations'
- selector: google.iam.v1.IAMPolicy.GetIamPolicy
get: '/v1beta1/{resource=projects/*/locations/*/**}:getIamPolicy'
- selector: google.iam.v1.IAMPolicy.SetIamPolicy
post: '/v1beta1/{resource=projects/*/locations/*/**}:setIamPolicy'
body: '*'
- selector: google.iam.v1.IAMPolicy.TestIamPermissions
post: '/v1beta1/{resource=projects/*/locations/*/**}:testIamPermissions'
body: '*'
- selector: google.longrunning.Operations.CancelOperation
post: '/ui/{name=projects/*/locations/*/operations/*}:cancel'
additional_bindings:
@ -528,6 +558,10 @@ authentication:
oauth:
canonical_scopes: |-
https://www.googleapis.com/auth/cloud-platform
- selector: 'google.iam.v1.IAMPolicy.*'
oauth:
canonical_scopes: |-
https://www.googleapis.com/auth/cloud-platform
- selector: 'google.longrunning.Operations.*'
oauth:
canonical_scopes: |-

@ -43,7 +43,7 @@ message Artifact {
// Unspecified state for the Artifact.
STATE_UNSPECIFIED = 0;
// A state used by systems like Vertex Pipelines to indicate that the
// A state used by systems like Vertex AI Pipelines to indicate that the
// underlying data item represented by this Artifact is being created.
PENDING = 1;
@ -84,7 +84,7 @@ message Artifact {
// The state of this Artifact. This is a property of the Artifact, and does
// not imply or capture any ongoing process. This property is managed by
// clients (such as Vertex Pipelines), and the system does not prescribe
// clients (such as Vertex AI Pipelines), and the system does not prescribe
// or check the validity of state transitions.
State state = 13;

@ -26,6 +26,7 @@ import "google/cloud/aiplatform/v1beta1/job_state.proto";
import "google/cloud/aiplatform/v1beta1/machine_resources.proto";
import "google/cloud/aiplatform/v1beta1/manual_batch_tuning_parameters.proto";
import "google/cloud/aiplatform/v1beta1/model_monitoring.proto";
import "google/cloud/aiplatform/v1beta1/unmanaged_container_model.proto";
import "google/protobuf/struct.proto";
import "google/protobuf/timestamp.proto";
import "google/rpc/status.proto";
@ -155,16 +156,19 @@ message BatchPredictionJob {
// Required. The user-defined name of this BatchPredictionJob.
string display_name = 2 [(google.api.field_behavior) = REQUIRED];
// Required. The name of the Model that produces the predictions via this job,
// The name of the Model resoure that produces the predictions via this job,
// must share the same ancestor Location.
// Starting this job has no impact on any existing deployments of the Model
// and their resources.
string model = 3 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "aiplatform.googleapis.com/Model"
}
];
// Exactly one of model and unmanaged_container_model must be set.
string model = 3 [(google.api.resource_reference) = {
type: "aiplatform.googleapis.com/Model"
}];
// Contains model information necessary to perform batch prediction without
// requiring uploading to model registry.
// Exactly one of model and unmanaged_container_model must be set.
UnmanagedContainerModel unmanaged_container_model = 28;
// Required. Input configuration of the instances on which predictions are performed.
// The schema of any single instance may be specified via

@ -131,8 +131,11 @@ message CustomJobSpec {
// Where {project} is a project number, as in `12345`, and {network} is a
// network name.
//
// Private services access must already be configured for the network. If left
// unspecified, the job is not peered with any network.
// To specify this field, you must have already [configured VPC Network
// Peering for Vertex
// AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering).
//
// If this field is left unspecified, the job is not peered with any network.
string network = 5 [(google.api.resource_reference) = {
type: "compute.googleapis.com/Network"
}];

@ -20,6 +20,7 @@ import "google/api/field_behavior.proto";
import "google/api/resource.proto";
import "google/cloud/aiplatform/v1beta1/encryption_spec.proto";
import "google/cloud/aiplatform/v1beta1/explanation.proto";
import "google/cloud/aiplatform/v1beta1/io.proto";
import "google/cloud/aiplatform/v1beta1/machine_resources.proto";
import "google/cloud/aiplatform/v1beta1/model_deployment_monitoring_job.proto";
import "google/cloud/aiplatform/v1beta1/model_monitoring.proto";
@ -93,20 +94,31 @@ message Endpoint {
EncryptionSpec encryption_spec = 10;
// The full name of the Google Compute Engine
// [network](/compute/docs/networks-and-firewalls#networks) to which the
// Endpoint should be peered.
// [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks)
// to which the Endpoint should be peered.
//
// Private services access must already be configured for the network. If left
// unspecified, the Endpoint is not peered with any network.
//
// Only one of the fields, [network][google.cloud.aiplatform.v1beta1.Endpoint.network] or
// [enable_private_service_connect][google.cloud.aiplatform.v1beta1.Endpoint.enable_private_service_connect],
// can be set.
//
// [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert):
// projects/{project}/global/networks/{network}.
// Where {project} is a project number, as in '12345', and {network} is
// `projects/{project}/global/networks/{network}`.
// Where `{project}` is a project number, as in `12345`, and `{network}` is
// network name.
string network = 13 [(google.api.resource_reference) = {
type: "compute.googleapis.com/Network"
}];
// If true, expose the Endpoint via private service connect.
//
// Only one of the fields, [network][google.cloud.aiplatform.v1beta1.Endpoint.network] or
// [enable_private_service_connect][google.cloud.aiplatform.v1beta1.Endpoint.enable_private_service_connect],
// can be set.
bool enable_private_service_connect = 17;
// Output only. Resource name of the Model Monitoring job associated with this Endpoint
// if monitoring is enabled by [CreateModelDeploymentMonitoringJob][].
// Format:
@ -136,8 +148,11 @@ message DeployedModel {
AutomaticResources automatic_resources = 8;
}
// Output only. The ID of the DeployedModel.
string id = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Immutable. The ID of the DeployedModel. If not provided upon deployment, Vertex AI
// will generate a value for this ID.
//
// This value should be 1-10 characters, and valid characters are /[0-9]/.
string id = 1 [(google.api.field_behavior) = IMMUTABLE];
// Required. The name of the Model that this is the deployment of. Note that the Model
// may be in a different location than the DeployedModel's Endpoint.
@ -195,8 +210,11 @@ message DeployedModel {
PrivateEndpoints private_endpoints = 14 [(google.api.field_behavior) = OUTPUT_ONLY];
}
// PrivateEndpoints is used to provide paths for users to send
// requests via private services access.
// PrivateEndpoints proto is used to provide paths for users to send
// requests privately.
// To send request via private service access, use predict_http_uri,
// explain_http_uri or health_http_uri. To send request via private service
// connect, use service_attachment.
message PrivateEndpoints {
// Output only. Http(s) path to send prediction requests.
string predict_http_uri = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
@ -206,4 +224,8 @@ message PrivateEndpoints {
// Output only. Http(s) path to send health check requests.
string health_http_uri = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The name of the service attachment resource. Populated if private service
// connect is enabled.
string service_attachment = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
}

@ -45,6 +45,7 @@ service EndpointService {
body: "endpoint"
};
option (google.api.method_signature) = "parent,endpoint";
option (google.api.method_signature) = "parent,endpoint,endpoint_id";
option (google.longrunning.operation_info) = {
response_type: "Endpoint"
metadata_type: "CreateEndpointOperationMetadata"
@ -129,6 +130,16 @@ message CreateEndpointRequest {
// Required. The Endpoint to create.
Endpoint endpoint = 2 [(google.api.field_behavior) = REQUIRED];
// Immutable. The ID to use for endpoint, which will become the final
// component of the endpoint resource name.
// If not provided, Vertex AI will generate a value for this ID.
//
// This value should be 1-10 characters, and valid characters are /[0-9]/.
// When using HTTP/JSON, this field is populated based on a query string
// argument, such as `?endpoint_id=12345`. This is the fallback for fields
// that are not included in either the URI or the body.
string endpoint_id = 4 [(google.api.field_behavior) = IMMUTABLE];
}
// Runtime operation information for [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint].

@ -71,7 +71,7 @@ message Execution {
// The state of this Execution. This is a property of the Execution, and does
// not imply or capture any ongoing process. This property is managed by
// clients (such as Vertex Pipelines) and the system does not prescribe
// clients (such as Vertex AI Pipelines) and the system does not prescribe
// or check the validity of state transitions.
State state = 6;

@ -252,6 +252,14 @@ message IntegratedGradientsAttribution {
// noise can help improve the computed gradients. Refer to this paper for more
// details: https://arxiv.org/pdf/1706.03825.pdf
SmoothGradConfig smooth_grad_config = 2;
// Config for IG with blur baseline.
//
// When enabled, a linear path from the maximally blurred image to the input
// image is created. Using a blurred baseline instead of zero (black image) is
// motivated by the BlurIG approach explained here:
// https://arxiv.org/abs/2004.03383
BlurBaselineConfig blur_baseline_config = 3;
}
// An explanation method that redistributes Integrated Gradients
@ -275,6 +283,14 @@ message XraiAttribution {
// noise can help improve the computed gradients. Refer to this paper for more
// details: https://arxiv.org/pdf/1706.03825.pdf
SmoothGradConfig smooth_grad_config = 2;
// Config for XRAI with blur baseline.
//
// When enabled, a linear path from the maximally blurred image to the input
// image is created. Using a blurred baseline instead of zero (black image) is
// motivated by the BlurIG approach explained here:
// https://arxiv.org/abs/2004.03383
BlurBaselineConfig blur_baseline_config = 3;
}
// Config for SmoothGrad approximation of gradients.
@ -341,6 +357,20 @@ message FeatureNoiseSigma {
repeated NoiseSigmaForFeature noise_sigma = 1;
}
// Config for blur baseline.
//
// When enabled, a linear path from the maximally blurred image to the input
// image is created. Using a blurred baseline instead of zero (black image) is
// motivated by the BlurIG approach explained here:
// https://arxiv.org/abs/2004.03383
message BlurBaselineConfig {
// The standard deviation of the blur kernel for the blurred baseline. The
// same blurring parameter is used for both the height and the width
// dimension. If not set, the method defaults to the zero (i.e. black for
// images) baseline.
float max_blur_sigma = 1;
}
// Similarity explainability that returns the nearest neighbors from the
// provided dataset.
message Similarity {

@ -129,7 +129,7 @@ message Feature {
FeaturestoreMonitoringConfig monitoring_config = 9 [(google.api.field_behavior) = OPTIONAL];
// Output only. A list of historical [Snapshot
// Analysis][google.cloud.aiplatform.master.FeaturestoreMonitoringConfig.SnapshotAnalysis]
// Analysis][FeaturestoreMonitoringConfig.SnapshotAnalysis]
// stats requested by user, sorted by [FeatureStatsAnomaly.start_time][google.cloud.aiplatform.v1beta1.FeatureStatsAnomaly.start_time]
// descending.
repeated FeatureStatsAnomaly monitoring_stats = 10 [(google.api.field_behavior) = OUTPUT_ONLY];

@ -30,7 +30,9 @@ option java_package = "com.google.cloud.aiplatform.v1beta1";
option php_namespace = "Google\\Cloud\\AIPlatform\\V1beta1";
option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
// Featurestore configuration information on how the Featurestore is configured.
// Vertex AI Feature Store provides a centralized repository for organizing,
// storing, and serving ML features. The Featurestore is a top-level container
// for your features and their values.
message Featurestore {
option (google.api.resource) = {
type: "aiplatform.googleapis.com/Featurestore"

@ -178,7 +178,9 @@ message FeatureValue {
// Feature generation timestamp. Typically, it is provided by user at
// feature ingestion time. If not, feature store
// will use the system timestamp when the data is ingested into feature
// store.
// store. For streaming ingestion, the time, aligned by days, must be no
// older than five years (1825 days) and no later than one year (366 days)
// in the future.
google.protobuf.Timestamp generate_time = 1;
}

@ -51,6 +51,7 @@ service FeaturestoreService {
body: "featurestore"
};
option (google.api.method_signature) = "parent,featurestore";
option (google.api.method_signature) = "parent,featurestore,featurestore_id";
option (google.longrunning.operation_info) = {
response_type: "Featurestore"
metadata_type: "CreateFeaturestoreOperationMetadata"
@ -107,6 +108,7 @@ service FeaturestoreService {
body: "entity_type"
};
option (google.api.method_signature) = "parent,entity_type";
option (google.api.method_signature) = "parent,entity_type,entity_type_id";
option (google.longrunning.operation_info) = {
response_type: "EntityType"
metadata_type: "CreateEntityTypeOperationMetadata"
@ -159,6 +161,7 @@ service FeaturestoreService {
body: "feature"
};
option (google.api.method_signature) = "parent,feature";
option (google.api.method_signature) = "parent,feature,feature_id";
option (google.longrunning.operation_info) = {
response_type: "Feature"
metadata_type: "CreateFeatureOperationMetadata"
@ -608,18 +611,43 @@ message BatchReadFeatureValuesRequest {
// Request message for [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues].
message ExportFeatureValuesRequest {
// Describes exporting Feature values as of the snapshot timestamp.
// Describes exporting the latest Feature values of all entities of the
// EntityType between [start_time, snapshot_time].
message SnapshotExport {
// Exports Feature values as of this timestamp. If not set,
// retrieve values as of now. Timestamp, if present, must not have higher
// than millisecond precision.
google.protobuf.Timestamp snapshot_time = 1;
// Excludes Feature values with feature generation timestamp before this
// timestamp. If not set, retrieve oldest values kept in Feature Store.
// Timestamp, if present, must not have higher than millisecond precision.
google.protobuf.Timestamp start_time = 2;
}
// Describes exporting all historical Feature values of all entities of the
// EntityType between [start_time, end_time].
message FullExport {
// Excludes Feature values with feature generation timestamp before this
// timestamp. If not set, retrieve oldest values kept in Feature Store.
// Timestamp, if present, must not have higher than millisecond precision.
google.protobuf.Timestamp start_time = 2;
// Exports Feature values as of this timestamp. If not set,
// retrieve values as of now. Timestamp, if present, must not have higher
// than millisecond precision.
google.protobuf.Timestamp end_time = 1;
}
// Required. The mode in which Feature values are exported.
oneof mode {
// Exports Feature values of all entities of the EntityType as of a snapshot
// time.
// Exports the latest Feature values of all entities of the EntityType
// within a time range.
SnapshotExport snapshot_export = 3;
// Exports all historical values of all entities of the EntityType within a
// time range
FullExport full_export = 7;
}
// Required. The resource name of the EntityType from which to export Feature values.
@ -1141,15 +1169,15 @@ message UpdateFeaturestoreOperationMetadata {
GenericOperationMetadata generic_metadata = 1;
}
// Details of operations that perform import feature values.
// Details of operations that perform import Feature values.
message ImportFeatureValuesOperationMetadata {
// Operation metadata for Featurestore import feature values.
// Operation metadata for Featurestore import Feature values.
GenericOperationMetadata generic_metadata = 1;
// Number of entities that have been imported by the operation.
int64 imported_entity_count = 2;
// Number of feature values that have been imported by the operation.
// Number of Feature values that have been imported by the operation.
int64 imported_feature_value_count = 3;
// The number of rows in input source that weren't imported due to either

@ -75,21 +75,29 @@ message IndexEndpoint {
// deployments of.
google.protobuf.Timestamp update_time = 8 [(google.api.field_behavior) = OUTPUT_ONLY];
// Required. Immutable. The full name of the Google Compute Engine
// Optional. The full name of the Google Compute Engine
// [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
// to which the IndexEndpoint should be peered.
//
// Private services access must already be configured for the network. If left
// unspecified, the Endpoint is not peered with any network.
//
// Only one of the fields, [network][google.cloud.aiplatform.v1beta1.IndexEndpoint.network] or
// [enable_private_service_connect][google.cloud.aiplatform.v1beta1.IndexEndpoint.enable_private_service_connect],
// can be set.
//
// [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert):
// projects/{project}/global/networks/{network}.
// Where {project} is a project number, as in '12345', and {network} is
// network name.
string network = 9 [
(google.api.field_behavior) = REQUIRED,
(google.api.field_behavior) = IMMUTABLE
];
string network = 9 [(google.api.field_behavior) = OPTIONAL];
// Optional. If true, expose the IndexEndpoint via private service connect.
//
// Only one of the fields, [network][google.cloud.aiplatform.v1beta1.IndexEndpoint.network] or
// [enable_private_service_connect][google.cloud.aiplatform.v1beta1.IndexEndpoint.enable_private_service_connect],
// can be set.
bool enable_private_service_connect = 10 [(google.api.field_behavior) = OPTIONAL];
}
// A deployment of an Index. IndexEndpoints contain one or more DeployedIndexes.
@ -140,11 +148,10 @@ message DeployedIndex {
// Optional. A description of resources that the DeployedIndex uses, which to large
// degree are decided by Vertex AI, and optionally allows only a modest
// additional configuration.
// If min_replica_count is not set, the default value is 1. If
// max_replica_count is not set, the default value is min_replica_count. The
// max allowed replica count is 1000.
// The user is billed for the resources (at least their minimal amount)
// even if the DeployedIndex receives no traffic.
// If min_replica_count is not set, the default value is 2 (we don't provide
// SLA when min_replica_count=1). If max_replica_count is not set, the
// default value is min_replica_count. The max allowed replica count is
// 1000.
AutomaticResources automatic_resources = 7 [(google.api.field_behavior) = OPTIONAL];
// Optional. If true, private endpoint's access logs are sent to StackDriver Logging.
@ -211,8 +218,15 @@ message DeployedIndexAuthConfig {
}
// IndexPrivateEndpoints proto is used to provide paths for users to send
// requests via private services access.
// requests via private endpoints (e.g. private service access, private service
// connect).
// To send request via private service access, use match_grpc_address.
// To send request via private service connect, use service_attachment.
message IndexPrivateEndpoints {
// Output only. The ip address used to send match gRPC requests.
string match_grpc_address = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The name of the service attachment resource. Populated if private service
// connect is enabled.
string service_attachment = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
}

@ -116,6 +116,19 @@ service IndexEndpointService {
metadata_type: "UndeployIndexOperationMetadata"
};
}
// Update an existing DeployedIndex under an IndexEndpoint.
rpc MutateDeployedIndex(MutateDeployedIndexRequest) returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta1/{index_endpoint=projects/*/locations/*/indexEndpoints/*}:mutateDeployedIndex"
body: "deployed_index"
};
option (google.api.method_signature) = "index_endpoint,deployed_index";
option (google.longrunning.operation_info) = {
response_type: "MutateDeployedIndexResponse"
metadata_type: "MutateDeployedIndexOperationMetadata"
};
}
}
// Request message for [IndexEndpointService.CreateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.CreateIndexEndpoint].
@ -286,3 +299,37 @@ message UndeployIndexOperationMetadata {
// The operation generic information.
GenericOperationMetadata generic_metadata = 1;
}
// Request message for [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex].
message MutateDeployedIndexRequest {
// Required. The name of the IndexEndpoint resource into which to deploy an Index.
// Format:
// `projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`
string index_endpoint = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "aiplatform.googleapis.com/IndexEndpoint"
}
];
// Required. The DeployedIndex to be updated within the IndexEndpoint.
// Currently, the updatable fields are [DeployedIndex][automatic_resources]
// and [DeployedIndex][dedicated_resources]
DeployedIndex deployed_index = 2 [(google.api.field_behavior) = REQUIRED];
}
// Response message for [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex].
message MutateDeployedIndexResponse {
// The DeployedIndex that had been updated in the IndexEndpoint.
DeployedIndex deployed_index = 1;
}
// Runtime operation information for
// [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex].
message MutateDeployedIndexOperationMetadata {
// The operation generic information.
GenericOperationMetadata generic_metadata = 1;
// The unique index id specified by user
string deployed_index_id = 2;
}

@ -822,7 +822,7 @@ message SearchModelDeploymentMonitoringStatsAnomaliesRequest {
];
// Required. The DeployedModel ID of the
// [google.cloud.aiplatform.master.ModelDeploymentMonitoringObjectiveConfig.deployed_model_id].
// [ModelDeploymentMonitoringObjectiveConfig.deployed_model_id].
string deployed_model_id = 2 [(google.api.field_behavior) = REQUIRED];
// The feature display name. If specified, only return the stats belonging to

@ -16,7 +16,6 @@ syntax = "proto3";
package google.cloud.aiplatform.v1beta1;
import "google/api/resource.proto";
import "google/cloud/aiplatform/v1beta1/artifact.proto";
import "google/cloud/aiplatform/v1beta1/event.proto";
import "google/cloud/aiplatform/v1beta1/execution.proto";
@ -34,14 +33,10 @@ option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
// Execution nodes.
message LineageSubgraph {
// The Artifact nodes in the subgraph.
repeated Artifact artifacts = 1 [(google.api.resource_reference) = {
type: "aiplatform.googleapis.com/Artifact"
}];
repeated Artifact artifacts = 1;
// The Execution nodes in the subgraph.
repeated Execution executions = 2 [(google.api.resource_reference) = {
type: "aiplatform.googleapis.com/Execution"
}];
repeated Execution executions = 2;
// The Event edges between Artifacts and Executions in the subgraph.
repeated Event events = 3;

@ -56,7 +56,7 @@ message MetadataSchema {
// The version of the MetadataSchema. The version's format must match
// the following regular expression: `^[0-9]+[.][0-9]+[.][0-9]+$`, which would
// allow to order/compare different versions.Example: 1.0.0, 1.0.1, etc.
// allow to order/compare different versions. Example: 1.0.0, 1.0.1, etc.
string schema_version = 2;
// Required. The raw YAML string representation of the MetadataSchema. The combination

@ -23,6 +23,7 @@ import "google/cloud/aiplatform/v1beta1/deployed_model_ref.proto";
import "google/cloud/aiplatform/v1beta1/encryption_spec.proto";
import "google/cloud/aiplatform/v1beta1/env_var.proto";
import "google/cloud/aiplatform/v1beta1/explanation.proto";
import "google/protobuf/struct.proto";
import "google/protobuf/timestamp.proto";
import "google/api/annotations.proto";
@ -139,7 +140,8 @@ message Model {
// not available for export.
repeated ExportFormat supported_export_formats = 20 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The resource name of the TrainingPipeline that uploaded this Model, if any.
// Output only. The resource name of the TrainingPipeline that uploaded this Model, if
// any.
string training_pipeline = 7 [
(google.api.field_behavior) = OUTPUT_ONLY,
(google.api.resource_reference) = {

@ -190,9 +190,10 @@ message ModelDeploymentMonitoringJob {
// ModelDeploymentMonitoringJob will be secured by this key.
EncryptionSpec encryption_spec = 21;
// If true, the scheduled monitoring pipeline status logs are sent to
// Google Cloud Logging. Please note the logs incur cost, which are subject to
// [Cloud Logging pricing](https://cloud.google.com/logging#pricing).
// If true, the scheduled monitoring pipeline logs are sent to
// Google Cloud Logging, including pipeline status and anomalies detected.
// Please note the logs incur cost, which are subject to [Cloud Logging
// pricing](https://cloud.google.com/logging#pricing).
bool enable_monitoring_pipeline_logs = 22;
// Output only. Only populated when the job's state is `JOB_STATE_FAILED` or

@ -96,8 +96,8 @@ message ModelMonitoringObjectiveConfig {
map<string, ThresholdConfig> attribution_score_drift_thresholds = 2;
}
// The config for integrated with Explainable AI. Only applicable if the Model
// has explanation_spec populated.
// The config for integrating with Vertex Explainable AI. Only applicable if
// the Model has explanation_spec populated.
message ExplanationConfig {
// Output from [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob] for Model Monitoring baseline dataset,
// which can be used to generate baseline attribution scores.
@ -128,8 +128,8 @@ message ModelMonitoringObjectiveConfig {
PredictionFormat prediction_format = 1;
}
// If want to analyze the Explainable AI feature attribute scores or not.
// If set to true, Vertex AI will log the feature attributions from
// If want to analyze the Vertex Explainable AI feature attribute scores or
// not. If set to true, Vertex AI will log the feature attributions from
// explain response and do the skew/drift detection for them.
bool enable_feature_attributes = 1;
@ -147,11 +147,11 @@ message ModelMonitoringObjectiveConfig {
// The config for drift of prediction data.
PredictionDriftDetectionConfig prediction_drift_detection_config = 3;
// The config for integrated with Explainable AI.
// The config for integrating with Vertex Explainable AI.
ExplanationConfig explanation_config = 5;
}
// Next ID: 2
// Next ID: 3
message ModelMonitoringAlertConfig {
// The config for email alert.
message EmailAlertConfig {
@ -163,6 +163,13 @@ message ModelMonitoringAlertConfig {
// Email alert config.
EmailAlertConfig email_alert_config = 1;
}
// Dump the anomalies to Cloud Logging. The anomalies will be put to json
// payload encoded from proto
// [google.cloud.aiplatform.logging.ModelMonitoringAnomaliesLogEntry][].
// This can be further sinked to Pub/Sub or any other services supported
// by Cloud Logging.
bool enable_logging = 2;
}
// The config for feature monitoring threshold.

@ -81,8 +81,9 @@ service ModelService {
// Deletes a Model.
//
// Model can only be deleted if there are no [DeployedModels][] created
// from it.
// A model cannot be deleted if any [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] resource has a
// [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] based on the model in its
// [deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] field.
rpc DeleteModel(DeleteModelRequest) returns (google.longrunning.Operation) {
option (google.api.http) = {
delete: "/v1beta1/{name=projects/*/locations/*/models/*}"
@ -94,7 +95,7 @@ service ModelService {
};
}
// Exports a trained, exportable, Model to a location specified by the
// Exports a trained, exportable Model to a location specified by the
// user. A Model is considered to be exportable if it has at least one
// [supported export format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats].
rpc ExportModel(ExportModelRequest) returns (google.longrunning.Operation) {

@ -50,9 +50,12 @@ message PipelineJob {
// The runtime config of a PipelineJob.
message RuntimeConfig {
// Deprecated. Use [RuntimeConfig.parameter_values] instead. The runtime
// Deprecated. Use [RuntimeConfig.parameter_values][google.cloud.aiplatform.v1beta1.PipelineJob.RuntimeConfig.parameter_values] instead. The runtime
// parameters of the PipelineJob. The parameters will be passed into
// [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec] to replace the placeholders at runtime.
// This field is used by pipelines built using
// `PipelineJob.pipeline_spec.schema_version` 2.0.0 or lower, such as
// pipelines built using Kubeflow Pipelines SDK 1.8 or lower.
map<string, Value> parameters = 1 [deprecated = true];
// Required. A path in a Cloud Storage bucket, which will be treated as the root
@ -66,7 +69,9 @@ message PipelineJob {
// The runtime parameters of the PipelineJob. The parameters will be
// passed into [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec] to replace the placeholders
// at runtime.
// at runtime. This field is used by pipelines built using
// `PipelineJob.pipeline_spec.schema_version` 2.1.0, such as pipelines built
// using Kubeflow Pipelines SDK 1.9 or higher and the v2 DSL.
map<string, google.protobuf.Value> parameter_values = 3;
}

@ -36,7 +36,7 @@ option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
// A service for creating and managing Vertex AI's pipelines. This includes both
// `TrainingPipeline` resources (used for AutoML and custom training) and
// `PipelineJob` resources (used for Vertex Pipelines).
// `PipelineJob` resources (used for Vertex AI Pipelines).
service PipelineService {
option (google.api.default_host) = "aiplatform.googleapis.com";
option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
@ -314,6 +314,7 @@ message ListPipelineJobsRequest {
// * `pipeline_job_user_id`: Supports `=`, `!=` comparisons, and `:` wildcard.
// for example, can check if pipeline's display_name contains *step* by doing
// display_name:\"*step*\"
// * `state`: Supports `=` and `!=` comparisons.
// * `create_time`: Supports `=`, `!=`, `<`, `>`, `<=`, and `>=` comparisons.
// Values must be in RFC 3339 format.
// * `update_time`: Supports `=`, `!=`, `<`, `>`, `<=`, and `>=` comparisons.
@ -358,6 +359,7 @@ message ListPipelineJobsRequest {
// * `create_time`
// * `update_time`
// * `end_time`
// * `start_time`
string order_by = 6;
}

@ -46,7 +46,15 @@ service PredictionService {
option (google.api.method_signature) = "endpoint,instances,parameters";
}
// Perform an online prediction with arbitrary http payload.
// Perform an online prediction with an arbitrary HTTP payload.
//
// The response includes the following HTTP headers:
//
// * `X-Vertex-AI-Endpoint-Id`: ID of the [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that served this
// prediction.
//
// * `X-Vertex-AI-Deployed-Model-Id`: ID of the Endpoint's [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel]
// that served this prediction.
rpc RawPredict(RawPredictRequest) returns (google.api.HttpBody) {
option (google.api.http) = {
post: "/v1beta1/{endpoint=projects/*/locations/*/endpoints/*}:rawPredict"
@ -116,8 +124,8 @@ message PredictResponse {
// ID of the Endpoint's DeployedModel that served this prediction.
string deployed_model_id = 2;
// Output only. The name of the Model this DeployedModel, that served this prediction, was
// created from.
// Output only. The resource name of the Model which is deployed as the DeployedModel that
// this prediction hits.
string model = 3 [
(google.api.field_behavior) = OUTPUT_ONLY,
(google.api.resource_reference) = {
@ -125,8 +133,8 @@ message PredictResponse {
}
];
// Output only. The [display name][google.cloud.aiplatform.v1beta1.Model.display_name] of the Model this DeployedModel,
// that served this prediction, was created from.
// Output only. The [display name][google.cloud.aiplatform.v1beta1.Model.display_name] of the Model which is deployed as
// the DeployedModel that this prediction hits.
string model_display_name = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
}

@ -31,6 +31,7 @@ option java_package = "com.google.cloud.aiplatform.v1beta1";
option php_namespace = "Google\\Cloud\\AIPlatform\\V1beta1";
option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
// LINT.IfChange
// A message representing a Study.
message Study {
option (google.api.resource) = {
@ -150,9 +151,9 @@ message Trial {
// Output only. The identifier of the client that originally requested this Trial.
// Each client is identified by a unique client_id. When a client
// asks for a suggestion, Vizier will assign it a Trial. The client should
// evaluate the Trial, complete it, and report back to Vizier.
// If suggestion is asked again by same client_id before the Trial is
// asks for a suggestion, Vertex AI Vizier will assign it a Trial. The client
// should evaluate the Trial, complete it, and report back to Vertex AI
// Vizier. If suggestion is asked again by same client_id before the Trial is
// completed, the same Trial will be returned. Multiple clients with
// different client_ids can ask for suggestions simultaneously, each of them
// will get their own Trial.
@ -225,8 +226,8 @@ message StudySpec {
// relatively good starting point. Unset value signals that there is no
// offered starting point.
//
// Currently only supported by the Vizier service. Not supported by
// HyperparamterTuningJob or TrainingPipeline.
// Currently only supported by the Vertex AI Vizier service. Not supported
// by HyperparamterTuningJob or TrainingPipeline.
optional double default_value = 4;
}
@ -242,8 +243,8 @@ message StudySpec {
// relatively good starting point. Unset value signals that there is no
// offered starting point.
//
// Currently only supported by the Vizier service. Not supported by
// HyperparamterTuningJob or TrainingPipeline.
// Currently only supported by the Vertex AI Vizier service. Not supported
// by HyperparamterTuningJob or TrainingPipeline.
optional int64 default_value = 4;
}
@ -442,7 +443,7 @@ message StudySpec {
enum Algorithm {
// The default algorithm used by Vertex AI for [hyperparameter
// tuning](https://cloud.google.com/vertex-ai/docs/training/hyperparameter-tuning-overview)
// and [Vertex Vizier](https://cloud.google.com/vertex-ai/docs/vizier).
// and [Vertex AI Vizier](https://cloud.google.com/vertex-ai/docs/vizier).
ALGORITHM_UNSPECIFIED = 0;
// Simple grid search within the feasible space. To use grid search,
@ -517,7 +518,7 @@ message StudySpec {
Algorithm algorithm = 3;
// The observation noise level of the study.
// Currently only supported by the Vizier service. Not supported by
// Currently only supported by the Vertex AI Vizier service. Not supported by
// HyperparamterTuningJob or TrainingPipeline.
ObservationNoise observation_noise = 6;

@ -160,6 +160,11 @@ message InputDataConfig {
//
// Split based on the timestamp of the input data pieces.
TimestampSplit timestamp_split = 5;
// Supported only for tabular Datasets.
//
// Split based on the distribution of the specified column.
StratifiedSplit stratified_split = 12;
}
// Only applicable to Custom and Hyperparameter Tuning TrainingPipelines.
@ -352,3 +357,35 @@ message TimestampSplit {
// present or has an invalid value, that piece is ignored by the pipeline.
string key = 4 [(google.api.field_behavior) = REQUIRED];
}
// Assigns input data to the training, validation, and test sets so that the
// distribution of values found in the categorical column (as specified by the
// `key` field) is mirrored within each split. The fraction values determine
// the relative sizes of the splits.
//
// For example, if the specified column has three values, with 50% of the rows
// having value "A", 25% value "B", and 25% value "C", and the split fractions
// are specified as 80/10/10, then the training set will constitute 80% of the
// training data, with about 50% of the training set rows having the value "A"
// for the specified column, about 25% having the value "B", and about 25%
// having the value "C".
//
// Only the top 500 occurring values are used; any values not in the top
// 500 values are randomly assigned to a split. If less than three rows contain
// a specific value, those rows are randomly assigned.
//
// Supported only for tabular Datasets.
message StratifiedSplit {
// The fraction of the input data that is to be used to train the Model.
double training_fraction = 1;
// The fraction of the input data that is to be used to validate the Model.
double validation_fraction = 2;
// The fraction of the input data that is to be used to evaluate the Model.
double test_fraction = 3;
// Required. The key is a name of one of the Dataset's data columns.
// The key provided must be for a categorical column.
string key = 4 [(google.api.field_behavior) = REQUIRED];
}

@ -0,0 +1,44 @@
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.cloud.aiplatform.v1beta1;
import "google/api/field_behavior.proto";
import "google/cloud/aiplatform/v1beta1/model.proto";
import "google/api/annotations.proto";
option csharp_namespace = "Google.Cloud.AIPlatform.V1Beta1";
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform";
option java_multiple_files = true;
option java_outer_classname = "UnmanagedContainerModelProto";
option java_package = "com.google.cloud.aiplatform.v1beta1";
option php_namespace = "Google\\Cloud\\AIPlatform\\V1beta1";
option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
// Contains model information necessary to perform batch prediction without
// requiring a full model import.
message UnmanagedContainerModel {
// The path to the directory containing the Model artifact and any of its
// supporting files.
string artifact_uri = 1;
// Contains the schemata used in Model's predictions and explanations
PredictSchemata predict_schemata = 2;
// Input only. The specification of the container that is to be used when deploying
// this Model.
ModelContainerSpec container_spec = 3 [(google.api.field_behavior) = INPUT_ONLY];
}

@ -34,9 +34,9 @@ option java_package = "com.google.cloud.aiplatform.v1beta1";
option php_namespace = "Google\\Cloud\\AIPlatform\\V1beta1";
option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
// Vertex Vizier API.
// Vertex AI Vizier API.
//
// Vizier service is a GCP service to solve blackbox optimization problems,
// Vertex AI Vizier is a service to solve blackbox optimization problems,
// such as tuning machine learning hyperparameters and searching over deep
// learning architectures.
service VizierService {
@ -88,7 +88,7 @@ service VizierService {
}
// Adds one or more Trials to a Study, with parameter values
// suggested by Vertex Vizier. Returns a long-running
// suggested by Vertex AI Vizier. Returns a long-running
// operation associated with the generation of Trial suggestions.
// When this long-running operation succeeds, it will contain
// a [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse].

Loading…
Cancel
Save