feat: add prediction service RPC RawPredict to aiplatform_v1beta1

feat: add tensorboard service RPCs to aiplatform_v1beta1: BatchCreateTensorboardRuns, BatchCreateTensorboardTimeSeries, WriteTensorboardExperimentData
feat: add model_deployment_monitoring_job to Endpoint in aiplatform_v1beta1
feat: add deployment_group to DeployedIndex in aiplatform_v1beta1
feat: add ModelEvaluationExplanationSpec in aiplatform_v1beta1

Committer: @dizcology
PiperOrigin-RevId: 393890669
pull/671/head^2
Google APIs 4 years ago committed by Copybara-Service
parent 298a980ac0
commit 321abab214
  1. 2
      google/cloud/aiplatform/v1beta1/BUILD.bazel
  2. 27
      google/cloud/aiplatform/v1beta1/aiplatform_v1beta1.yaml
  3. 1
      google/cloud/aiplatform/v1beta1/annotation.proto
  4. 1
      google/cloud/aiplatform/v1beta1/artifact.proto
  5. 1
      google/cloud/aiplatform/v1beta1/context.proto
  6. 25
      google/cloud/aiplatform/v1beta1/custom_job.proto
  7. 1
      google/cloud/aiplatform/v1beta1/data_item.proto
  8. 3
      google/cloud/aiplatform/v1beta1/dataset.proto
  9. 2
      google/cloud/aiplatform/v1beta1/dataset_service.proto
  10. 16
      google/cloud/aiplatform/v1beta1/endpoint.proto
  11. 1
      google/cloud/aiplatform/v1beta1/endpoint_service.proto
  12. 1
      google/cloud/aiplatform/v1beta1/execution.proto
  13. 4
      google/cloud/aiplatform/v1beta1/explanation.proto
  14. 41
      google/cloud/aiplatform/v1beta1/explanation_metadata.proto
  15. 4
      google/cloud/aiplatform/v1beta1/featurestore.proto
  16. 6
      google/cloud/aiplatform/v1beta1/featurestore_monitoring.proto
  17. 1
      google/cloud/aiplatform/v1beta1/index.proto
  18. 14
      google/cloud/aiplatform/v1beta1/index_endpoint.proto
  19. 3
      google/cloud/aiplatform/v1beta1/index_service.proto
  20. 22
      google/cloud/aiplatform/v1beta1/metadata_service.proto
  21. 4
      google/cloud/aiplatform/v1beta1/migration_service.proto
  22. 6
      google/cloud/aiplatform/v1beta1/model.proto
  23. 11
      google/cloud/aiplatform/v1beta1/model_deployment_monitoring_job.proto
  24. 3
      google/cloud/aiplatform/v1beta1/model_monitoring.proto
  25. 64
      google/cloud/aiplatform/v1beta1/pipeline_service.proto
  26. 37
      google/cloud/aiplatform/v1beta1/prediction_service.proto
  27. 14
      google/cloud/aiplatform/v1beta1/specialist_pool.proto
  28. 21
      google/cloud/aiplatform/v1beta1/study.proto
  29. 18
      google/cloud/aiplatform/v1beta1/tensorboard_run.proto
  30. 110
      google/cloud/aiplatform/v1beta1/tensorboard_service.proto

@ -164,7 +164,7 @@ java_gapic_library(
":aiplatform_java_grpc",
] + _JAVA_GRPC_SUBPACKAGE_DEPS,
deps = [
":aiplatform_java_proto",
":aiplatform_java_proto","//google/api:api_java_proto",
] + _JAVA_PROTO_SUBPACKAGE_DEPS,
)

@ -55,6 +55,12 @@ types:
- name: google.cloud.aiplatform.v1beta1.ImportDataResponse
- name: google.cloud.aiplatform.v1beta1.ImportFeatureValuesOperationMetadata
- name: google.cloud.aiplatform.v1beta1.ImportFeatureValuesResponse
- name: google.cloud.aiplatform.v1beta1.PurgeArtifactsMetadata
- name: google.cloud.aiplatform.v1beta1.PurgeArtifactsResponse
- name: google.cloud.aiplatform.v1beta1.PurgeContextsMetadata
- name: google.cloud.aiplatform.v1beta1.PurgeContextsResponse
- name: google.cloud.aiplatform.v1beta1.PurgeExecutionsMetadata
- name: google.cloud.aiplatform.v1beta1.PurgeExecutionsResponse
- name: google.cloud.aiplatform.v1beta1.SpecialistPool
- name: google.cloud.aiplatform.v1beta1.SuggestTrialsMetadata
- name: google.cloud.aiplatform.v1beta1.SuggestTrialsResponse
@ -117,9 +123,7 @@ backend:
deadline: 60.0
- selector: 'google.cloud.aiplatform.v1beta1.PipelineService.*'
deadline: 60.0
- selector: google.cloud.aiplatform.v1beta1.PredictionService.Explain
deadline: 600.0
- selector: google.cloud.aiplatform.v1beta1.PredictionService.Predict
- selector: 'google.cloud.aiplatform.v1beta1.PredictionService.*'
deadline: 600.0
- selector: 'google.cloud.aiplatform.v1beta1.SpecialistPoolService.*'
deadline: 60.0
@ -152,6 +156,7 @@ http:
- post: '/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel'
- post: '/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel'
- post: '/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel'
- post: '/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel'
- post: '/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel'
- post: '/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel'
- post: '/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel'
@ -180,6 +185,7 @@ http:
- post: '/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel'
- post: '/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel'
- post: '/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel'
- post: '/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel'
- post: '/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel'
- post: '/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel'
- post: '/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel'
@ -210,6 +216,7 @@ http:
- delete: '/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}'
- delete: '/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}'
- delete: '/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}'
- delete: '/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}'
- delete: '/ui/{name=projects/*/locations/*/endpoints/*/operations/*}'
- delete: '/ui/{name=projects/*/locations/*/featurestores/*/operations/*}'
- delete: '/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}'
@ -238,6 +245,7 @@ http:
- delete: '/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}'
- delete: '/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}'
- delete: '/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}'
- delete: '/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}'
- delete: '/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}'
- delete: '/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}'
- delete: '/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}'
@ -268,6 +276,8 @@ http:
- get: '/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}'
- get: '/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}'
- get: '/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}'
- get: '/ui/{name=projects/*/locations/*/edgeDeploymentJobs/*/operations/*}'
- get: '/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}'
- get: '/ui/{name=projects/*/locations/*/endpoints/*/operations/*}'
- get: '/ui/{name=projects/*/locations/*/featurestores/*/operations/*}'
- get: '/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}'
@ -296,6 +306,7 @@ http:
- get: '/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}'
- get: '/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}'
- get: '/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}'
- get: '/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}'
- get: '/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}'
- get: '/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}'
- get: '/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}'
@ -326,6 +337,7 @@ http:
- get: '/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations'
- get: '/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations'
- get: '/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations'
- get: '/ui/{name=projects/*/locations/*/edgeDevices/*}/operations'
- get: '/ui/{name=projects/*/locations/*/endpoints/*}/operations'
- get: '/ui/{name=projects/*/locations/*/featurestores/*}/operations'
- get: '/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations'
@ -354,6 +366,7 @@ http:
- get: '/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations'
- get: '/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations'
- get: '/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations'
- get: '/v1beta1/{name=projects/*/locations/*/edgeDevices/*}/operations'
- get: '/v1beta1/{name=projects/*/locations/*/endpoints/*}/operations'
- get: '/v1beta1/{name=projects/*/locations/*/featurestores/*}/operations'
- get: '/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations'
@ -384,6 +397,7 @@ http:
- post: '/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait'
- post: '/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait'
- post: '/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait'
- post: '/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait'
- post: '/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:wait'
- post: '/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:wait'
- post: '/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait'
@ -412,6 +426,7 @@ http:
- post: '/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait'
- post: '/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait'
- post: '/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait'
- post: '/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait'
- post: '/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}:wait'
- post: '/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}:wait'
- post: '/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait'
@ -489,11 +504,7 @@ authentication:
oauth:
canonical_scopes: |-
https://www.googleapis.com/auth/cloud-platform
- selector: google.cloud.aiplatform.v1beta1.PredictionService.Explain
oauth:
canonical_scopes: |-
https://www.googleapis.com/auth/cloud-platform
- selector: google.cloud.aiplatform.v1beta1.PredictionService.Predict
- selector: 'google.cloud.aiplatform.v1beta1.PredictionService.*'
oauth:
canonical_scopes: |-
https://www.googleapis.com/auth/cloud-platform

@ -18,6 +18,7 @@ package google.cloud.aiplatform.v1beta1;
import "google/api/field_behavior.proto";
import "google/api/resource.proto";
import "google/cloud/aiplatform/v1beta1/user_action_reference.proto";
import "google/protobuf/struct.proto";
import "google/protobuf/timestamp.proto";

@ -103,6 +103,7 @@ message Artifact {
string schema_version = 15;
// Properties of the Artifact.
// The size of this field should not exceed 200KB.
google.protobuf.Struct metadata = 16;
// Description of the Artifact

@ -88,6 +88,7 @@ message Context {
string schema_version = 14;
// Properties of the Context.
// The size of this field should not exceed 200KB.
google.protobuf.Struct metadata = 15;
// Description of the Context

@ -92,9 +92,17 @@ message CustomJob {
// provided encryption key.
EncryptionSpec encryption_spec = 12;
// Output only. The web access URIs for the training job.
// The keys are the node names in the training jobs, e.g. workerpool0-0.
// The values are the URIs for each node's web portal in the job.
// Output only. URIs for accessing [interactive
// shells](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell)
// (one URI for each training node). Only available if
// [job_spec.enable_web_access][google.cloud.aiplatform.v1beta1.CustomJobSpec.enable_web_access] is `true`.
//
// The keys are names of each node in the training job; for example,
// `workerpool0-0` for the primary node, `workerpool1-0` for the first node in
// the second worker pool, and `workerpool1-1` for the second node in the
// second worker pool.
//
// The values are the URIs for each node's interactive shell.
map<string, string> web_access_uris = 16 [(google.api.field_behavior) = OUTPUT_ONLY];
}
@ -110,7 +118,7 @@ message CustomJobSpec {
// Specifies the service account for workload run-as account.
// Users submitting jobs must have act-as permission on this run-as account.
// If unspecified, the [AI Platform Custom Code Service
// If unspecified, the [Vertex AI Custom Code Service
// Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents)
// for the CustomJob's project is used.
string service_account = 4;
@ -163,8 +171,13 @@ message CustomJobSpec {
}
];
// Optional. Vertex AI will enable web portal access to the containers. The portals
// can be accessed on web via the URLs given by [web_access_uris][].
// Optional. Whether you want Vertex AI to enable [interactive shell
// access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell)
// to training containers.
//
// If set to `true`, you can access interactive shells at the URIs given
// by [CustomJob.web_access_uris][google.cloud.aiplatform.v1beta1.CustomJob.web_access_uris] or [Trial.web_access_uris][google.cloud.aiplatform.v1beta1.Trial.web_access_uris] (within
// [HyperparameterTuningJob.trials][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.trials]).
bool enable_web_access = 10 [(google.api.field_behavior) = OPTIONAL];
}

@ -18,6 +18,7 @@ package google.cloud.aiplatform.v1beta1;
import "google/api/field_behavior.proto";
import "google/api/resource.proto";
import "google/protobuf/struct.proto";
import "google/protobuf/timestamp.proto";
import "google/api/annotations.proto";

@ -47,6 +47,9 @@ message Dataset {
// characters.
string display_name = 2 [(google.api.field_behavior) = REQUIRED];
// Optional. The description of the Dataset.
string description = 16 [(google.api.field_behavior) = OPTIONAL];
// Required. Points to a YAML file stored on Google Cloud Storage describing additional
// information about the Dataset.
// The schema is defined as an OpenAPI 3.0.2 Schema Object.

@ -37,6 +37,8 @@ option java_package = "com.google.cloud.aiplatform.v1beta1";
option php_namespace = "Google\\Cloud\\AIPlatform\\V1beta1";
option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
// The service that handles the CRUD of Vertex AI Dataset and its child
// resources.
service DatasetService {
option (google.api.default_host) = "aiplatform.googleapis.com";
option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";

@ -22,6 +22,7 @@ import "google/cloud/aiplatform/v1beta1/encryption_spec.proto";
import "google/cloud/aiplatform/v1beta1/explanation.proto";
import "google/cloud/aiplatform/v1beta1/machine_resources.proto";
import "google/cloud/aiplatform/v1beta1/model_deployment_monitoring_job.proto";
import "google/cloud/aiplatform/v1beta1/model_monitoring.proto";
import "google/protobuf/timestamp.proto";
import "google/api/annotations.proto";
@ -105,6 +106,17 @@ message Endpoint {
string network = 13 [(google.api.resource_reference) = {
type: "compute.googleapis.com/Network"
}];
// Output only. Resource name of the Model Monitoring job associated with this Endpoint
// if monitoring is enabled by [CreateModelDeploymentMonitoringJob][].
// Format:
// `projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`
string model_deployment_monitoring_job = 14 [
(google.api.field_behavior) = OUTPUT_ONLY,
(google.api.resource_reference) = {
type: "aiplatform.googleapis.com/ModelDeploymentMonitoringJob"
}
];
}
// A deployment of a Model. Endpoints contain one or more DeployedModels.
@ -119,8 +131,8 @@ message DeployedModel {
// that need a higher degree of manual configuration.
DedicatedResources dedicated_resources = 7;
// A description of resources that to large degree are decided by AI
// Platform, and require only a modest additional configuration.
// A description of resources that to large degree are decided by Vertex
// AI, and require only a modest additional configuration.
AutomaticResources automatic_resources = 8;
}

@ -33,6 +33,7 @@ option java_package = "com.google.cloud.aiplatform.v1beta1";
option php_namespace = "Google\\Cloud\\AIPlatform\\V1beta1";
option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
// A service for managing Vertex AI's Endpoints.
service EndpointService {
option (google.api.default_host) = "aiplatform.googleapis.com";
option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";

@ -109,6 +109,7 @@ message Execution {
string schema_version = 14;
// Properties of the Execution.
// The size of this field should not exceed 200KB.
google.protobuf.Struct metadata = 15;
// Description of the Execution

@ -337,8 +337,8 @@ message FeatureNoiseSigma {
repeated NoiseSigmaForFeature noise_sigma = 1;
}
// The [ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec] entries that can be overridden at [online
// explanation][PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain] time.
// The [ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec] entries that can be overridden at
// [online explanation][google.cloud.aiplatform.v1beta1.PredictionService.Explain] time.
message ExplanationSpecOverride {
// The parameters to be overridden. Note that the
// [method][google.cloud.aiplatform.v1beta1.ExplanationParameters.method] cannot be changed. If not specified,

@ -63,9 +63,8 @@ message ExplanationMetadata {
// Visualization configurations for image explanation.
message Visualization {
// Type of the image visualization. Only applicable to [Integrated
// Gradients attribution]
// [ExplanationParameters.integrated_gradients_attribution].
// Type of the image visualization. Only applicable to
// [Integrated Gradients attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution].
enum Type {
// Should not be used.
TYPE_UNSPECIFIED = 0;
@ -142,11 +141,10 @@ message ExplanationMetadata {
MASK_BLACK = 4;
}
// Type of the image visualization. Only applicable to [Integrated
// Gradients attribution]
// [ExplanationParameters.integrated_gradients_attribution]. OUTLINES
// shows regions of attribution, while PIXELS shows per-pixel attribution.
// Defaults to OUTLINES.
// Type of the image visualization. Only applicable to
// [Integrated Gradients attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution].
// OUTLINES shows regions of attribution, while PIXELS shows per-pixel
// attribution. Defaults to OUTLINES.
Type type = 1;
// Whether to only highlight pixels with positive contributions, negative
@ -155,8 +153,8 @@ message ExplanationMetadata {
// The color scheme used for the highlighted areas.
//
// Defaults to PINK_GREEN for [Integrated Gradients
// attribution][ExplanationParameters.integrated_gradients_attribution],
// Defaults to PINK_GREEN for
// [Integrated Gradients attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution],
// which shows positive attributions in green and negative in pink.
//
// Defaults to VIRIDIS for
@ -182,8 +180,7 @@ message ExplanationMetadata {
OverlayType overlay_type = 6;
}
// Defines how the feature is encoded to [encoded_tensor][]. Defaults to
// IDENTITY.
// Defines how a feature is encoded. Defaults to IDENTITY.
enum Encoding {
// Default value. This is the same as IDENTITY.
ENCODING_UNSPECIFIED = 0;
@ -248,8 +245,7 @@ message ExplanationMetadata {
//
// If no baseline is specified, Vertex AI chooses the baseline for this
// feature. If multiple baselines are specified, Vertex AI returns the
// average attributions across them in
// [Attributions.baseline_attribution][].
// average attributions across them in [Attribution.feature_attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions].
//
// For Vertex AI-provided Tensorflow images (both 1.x and 2.x), the shape
// of each baseline must match the shape of the input tensor. If a scalar is
@ -297,10 +293,10 @@ message ExplanationMetadata {
repeated string index_feature_mapping = 8;
// Encoded tensor is a transformation of the input tensor. Must be provided
// if choosing [Integrated Gradients
// attribution][ExplanationParameters.integrated_gradients_attribution] or
// [XRAI attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution]
// and the input tensor is not differentiable.
// if choosing
// [Integrated Gradients attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution]
// or [XRAI attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution] and the
// input tensor is not differentiable.
//
// An encoded tensor is generated if the input tensor is encoded by a lookup
// table.
@ -319,9 +315,8 @@ message ExplanationMetadata {
// Name of the group that the input belongs to. Features with the same group
// name will be treated as one feature when computing attributions. Features
// grouped together can have different shapes in value. If provided, there
// will be one single attribution generated in [
// featureAttributions][Attribution.feature_attributions], keyed by the
// group name.
// will be one single attribution generated in
// [Attribution.feature_attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions], keyed by the group name.
string group_name = 12;
}
@ -357,8 +352,8 @@ message ExplanationMetadata {
string display_name_mapping_key = 2;
}
// Name of the output tensor. Required and is only applicable to AI
// Platform provided images for Tensorflow.
// Name of the output tensor. Required and is only applicable to Vertex
// AI provided images for Tensorflow.
string output_tensor_name = 3;
}

@ -40,10 +40,10 @@ message Featurestore {
// OnlineServingConfig specifies the details for provisioning online serving
// resources.
message OnlineServingConfig {
// Required. The number of nodes for each cluster. The number of nodes will not
// The number of nodes for each cluster. The number of nodes will not
// scale automatically but can be scaled manually by providing different
// values when updating.
int32 fixed_node_count = 2 [(google.api.field_behavior) = REQUIRED];
int32 fixed_node_count = 2;
}
// Possible states a Featurestore can have.

@ -47,9 +47,9 @@ message FeaturestoreMonitoringConfig {
// Explicitly Disable the snapshot analysis based monitoring.
bool disabled = 1;
// Configuration of the snapshot analysis based monitoring pipeline
// running interval. The value is rolled up to full day.
google.protobuf.Duration monitoring_interval = 2;
// Configuration of the snapshot analysis based monitoring pipeline running
// interval. The value is rolled up to full day.
google.protobuf.Duration monitoring_interval = 2 [deprecated = true];
// Configuration of the snapshot analysis based monitoring pipeline
// running interval. The value indicates number of days.

@ -19,6 +19,7 @@ package google.cloud.aiplatform.v1beta1;
import "google/api/field_behavior.proto";
import "google/api/resource.proto";
import "google/cloud/aiplatform/v1beta1/deployed_index_ref.proto";
import "google/protobuf/struct.proto";
import "google/protobuf/timestamp.proto";
import "google/api/annotations.proto";

@ -172,6 +172,20 @@ message DeployedIndex {
// (https://cloud.google.com/compute/docs/reference/rest/v1/addresses)
// Example: 'vertex-ai-ip-range'.
repeated string reserved_ip_ranges = 10 [(google.api.field_behavior) = OPTIONAL];
// Optional. The deployment group can be no longer than 64 characters (eg:
// 'test', 'prod'). If not set, we will use the 'default' deployment group.
//
// Creating `deployment_groups` with `reserved_ip_ranges` is a recommended
// practice when the peered network has multiple peering ranges. This creates
// your deployments from predictable IP spaces for easier traffic
// administration. Also, one deployment_group (except 'default') can only be
// used with the same reserved_ip_ranges which means if the deployment_group
// has been used with reserved_ip_ranges: [a, b, c], using it with [a, b] or
// [d, e] is disallowed.
//
// Note: we only support up to 5 deployment groups(not including 'default').
string deployment_group = 11 [(google.api.field_behavior) = OPTIONAL];
}
// Used to set up the auth on the DeployedIndex's private endpoint.

@ -271,4 +271,7 @@ message NearestNeighborSearchOperationMetadata {
// that, currently for those files that are broken or has unsupported file
// format, we will not have the stats for those files.
repeated ContentValidationStats content_validation_stats = 1;
// The ingested data size in bytes.
int64 data_bytes_count = 2;
}

@ -73,7 +73,8 @@ service MetadataService {
option (google.api.method_signature) = "parent";
}
// Deletes a single MetadataStore.
// Deletes a single MetadataStore and all its child resources (Artifacts,
// Executions, and Contexts).
rpc DeleteMetadataStore(DeleteMetadataStoreRequest) returns (google.longrunning.Operation) {
option (google.api.http) = {
delete: "/v1beta1/{name=projects/*/locations/*/metadataStores/*}"
@ -447,10 +448,8 @@ message DeleteMetadataStoreRequest {
}
];
// If set to true, any child resources of this MetadataStore will be deleted.
// (Otherwise, the request will fail with a FAILED_PRECONDITION error if the
// MetadataStore has any child resources.)
bool force = 2;
// Deprecated: Field is no longer supported.
bool force = 2 [deprecated = true];
}
// Details of operations that perform [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore].
@ -573,10 +572,11 @@ message UpdateArtifactRequest {
Artifact artifact = 1 [(google.api.field_behavior) = REQUIRED];
// Required. A FieldMask indicating which fields should be updated.
// Functionality of this field is not yet supported.
google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED];
// If set to true, and the [Artifact][google.cloud.aiplatform.v1beta1.Artifact] is not found, a new [Artifact][google.cloud.aiplatform.v1beta1.Artifact] will
// be created. In this situation, `update_mask` is ignored.
// If set to true, and the [Artifact][google.cloud.aiplatform.v1beta1.Artifact] is not found, a new [Artifact][google.cloud.aiplatform.v1beta1.Artifact] is
// created.
bool allow_missing = 3;
}
@ -758,10 +758,11 @@ message UpdateContextRequest {
Context context = 1 [(google.api.field_behavior) = REQUIRED];
// Required. A FieldMask indicating which fields should be updated.
// Functionality of this field is not yet supported.
google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED];
// If set to true, and the [Context][google.cloud.aiplatform.v1beta1.Context] is not found, a new [Context][google.cloud.aiplatform.v1beta1.Context] will be
// created. In this situation, `update_mask` is ignored.
// If set to true, and the [Context][google.cloud.aiplatform.v1beta1.Context] is not found, a new [Context][google.cloud.aiplatform.v1beta1.Context] is
// created.
bool allow_missing = 3;
}
@ -1021,10 +1022,11 @@ message UpdateExecutionRequest {
Execution execution = 1 [(google.api.field_behavior) = REQUIRED];
// Required. A FieldMask indicating which fields should be updated.
// Functionality of this field is not yet supported.
google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED];
// If set to true, and the [Execution][google.cloud.aiplatform.v1beta1.Execution] is not found, a new [Execution][google.cloud.aiplatform.v1beta1.Execution]
// will be created. In this situation, `update_mask` is ignored.
// is created.
bool allow_missing = 3;
}

@ -199,8 +199,8 @@ message MigrateResourceRequest {
string dataset_display_name = 2 [(google.api.field_behavior) = REQUIRED];
}
// Config for migrating Dataset in datalabeling.googleapis.com to AI
// Platform's Dataset.
// Config for migrating Dataset in datalabeling.googleapis.com to Vertex
// AI's Dataset.
message MigrateDataLabelingDatasetConfig {
// Config for migrating AnnotatedDataset in datalabeling.googleapis.com to
// Vertex AI's SavedQuery.

@ -348,7 +348,7 @@ message ModelContainerSpec {
// predictions. This URI must identify an image in Artifact Registry or
// Container Registry. Learn more about the [container publishing
// requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#publishing),
// including permissions requirements for the AI Platform Service Agent.
// including permissions requirements for the Vertex AI Service Agent.
//
// The container image is ingested upon [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel], stored
// internally, and this original path is afterwards not used.
@ -472,8 +472,8 @@ message ModelContainerSpec {
repeated EnvVar env = 4 [(google.api.field_behavior) = IMMUTABLE];
// Immutable. List of ports to expose from the container. Vertex AI sends any
// prediction requests that it receives to the first port on this list. AI
// Platform also sends
// prediction requests that it receives to the first port on this list. Vertex
// AI also sends
// [liveness and health
// checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#liveness)
// to this port.

@ -26,6 +26,8 @@ import "google/cloud/aiplatform/v1beta1/model_monitoring.proto";
import "google/protobuf/duration.proto";
import "google/protobuf/struct.proto";
import "google/protobuf/timestamp.proto";
import "google/rpc/status.proto";
import "google/api/annotations.proto";
option csharp_namespace = "Google.Cloud.AIPlatform.V1Beta1";
@ -182,6 +184,15 @@ message ModelDeploymentMonitoringJob {
// Stats anomalies base folder path.
GcsDestination stats_anomalies_base_directory = 20;
// Customer-managed encryption key spec for a ModelDeploymentMonitoringJob. If
// set, this ModelDeploymentMonitoringJob and all sub-resources of this
// ModelDeploymentMonitoringJob will be secured by this key.
EncryptionSpec encryption_spec = 21;
// Output only. Only populated when the job's state is `JOB_STATE_FAILED` or
// `JOB_STATE_CANCELLED`.
google.rpc.Status error = 23 [(google.api.field_behavior) = OUTPUT_ONLY];
}
// ModelDeploymentMonitoringBigQueryTable specifies the BigQuery table name

@ -107,8 +107,7 @@ message ModelMonitoringObjectiveConfig {
// Should not be set.
PREDICTION_FORMAT_UNSPECIFIED = 0;
// Predictions are in JSONL files, consistent from the definition here
// (http://shortn/_4bS0hL7ofb).
// Predictions are in JSONL files.
JSONL = 2;
// Predictions are in BigQuery.

@ -192,32 +192,22 @@ message ListTrainingPipelinesRequest {
}
];
// Lists the PipelineJobs that match the filter expression. The following
// fields are supported:
// The standard list filter.
// Supported fields:
//
// * `pipeline_name`: Supports `=` and `!=` comparisons.
// * `create_time`: Supports `=`, `!=`, `<`, `>`, `<=`, and `>=` comparisons.
// Values must be in RFC 3339 format.
// * `update_time`: Supports `=`, `!=`, `<`, `>`, `<=`, and `>=` comparisons.
// Values must be in RFC 3339 format.
// * `end_time`: Supports `=`, `!=`, `<`, `>`, `<=`, and `>=` comparisons.
// Values must be in RFC 3339 format.
// * `labels`: Supports key-value equality and key presence.
// * `display_name` supports = and !=.
//
// Filter expressions can be combined together using logical operators
// (`AND` & `OR`).
// For example: `pipeline_name="test" AND create_time>"2020-05-18T13:30:00Z"`.
// * `state` supports = and !=.
//
// The syntax to define filter expression is based on
// https://google.aip.dev/160.
// Some examples of using the filter are:
//
// Examples:
// * `state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"`
//
// * `create_time>"2021-05-18T00:00:00Z" OR
// update_time>"2020-05-18T00:00:00Z"` PipelineJobs created or updated
// after 2020-05-18 00:00:00 UTC.
// * `labels.env = "prod"`
// PipelineJobs with label "env" set to "prod".
// * `state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"`
//
// * `NOT display_name="my_pipeline"`
//
// * `state="PIPELINE_STATE_FAILED"`
string filter = 2;
// The standard list page size.
@ -316,18 +306,32 @@ message ListPipelineJobsRequest {
}
];
// The standard list filter.
// Supported fields:
// Lists the PipelineJobs that match the filter expression. The following
// fields are supported:
//
// * `display_name` supports `=` and `!=`.
// * `state` supports `=` and `!=`.
// * `pipeline_name`: Supports `=` and `!=` comparisons.
// * `create_time`: Supports `=`, `!=`, `<`, `>`, `<=`, and `>=` comparisons.
// Values must be in RFC 3339 format.
// * `update_time`: Supports `=`, `!=`, `<`, `>`, `<=`, and `>=` comparisons.
// Values must be in RFC 3339 format.
// * `end_time`: Supports `=`, `!=`, `<`, `>`, `<=`, and `>=` comparisons.
// Values must be in RFC 3339 format.
// * `labels`: Supports key-value equality and key presence.
//
// The following examples demonstrate how to filter the list of PipelineJobs:
// Filter expressions can be combined together using logical operators
// (`AND` & `OR`).
// For example: `pipeline_name="test" AND create_time>"2020-05-18T13:30:00Z"`.
//
// The syntax to define filter expression is based on
// https://google.aip.dev/160.
//
// Examples:
//
// * `state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"`
// * `state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"`
// * `NOT display_name="my_pipeline"`
// * `state="PIPELINE_STATE_FAILED"`
// * `create_time>"2021-05-18T00:00:00Z" OR
// update_time>"2020-05-18T00:00:00Z"` PipelineJobs created or updated
// after 2020-05-18 00:00:00 UTC.
// * `labels.env = "prod"`
// PipelineJobs with label "env" set to "prod".
string filter = 2;
// The standard list page size.

@ -46,6 +46,15 @@ service PredictionService {
option (google.api.method_signature) = "endpoint,instances,parameters";
}
// Perform an online prediction with arbitrary http payload.
rpc RawPredict(RawPredictRequest) returns (google.api.HttpBody) {
option (google.api.http) = {
post: "/v1beta1/{endpoint=projects/*/locations/*/endpoints/*}:rawPredict"
body: "*"
};
option (google.api.method_signature) = "endpoint,http_body";
}
// Perform an online explanation.
//
// If [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] is specified,
@ -108,6 +117,34 @@ message PredictResponse {
string deployed_model_id = 2;
}
// Request message for [PredictionService.RawPredict][google.cloud.aiplatform.v1beta1.PredictionService.RawPredict].
message RawPredictRequest {
// Required. The name of the Endpoint requested to serve the prediction.
// Format:
// `projects/{project}/locations/{location}/endpoints/{endpoint}`
string endpoint = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "aiplatform.googleapis.com/Endpoint"
}
];
// The prediction input. Supports HTTP headers and arbitrary data payload.
//
// A [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] may have an upper limit on the number of instances it
// supports per request. When this limit it is exceeded for an AutoML model,
// the [RawPredict][google.cloud.aiplatform.v1beta1.PredictionService.RawPredict] method returns an error.
// When this limit is exceeded for a custom-trained model, the behavior varies
// depending on the model.
//
// You can specify the schema for each instance in the
// [predict_schemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]
// field when you create a [Model][google.cloud.aiplatform.v1beta1.Model]. This schema applies when you deploy the
// `Model` as a `DeployedModel` to an [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] and use the `RawPredict`
// method.
google.api.HttpBody http_body = 2;
}
// Request message for [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain].
message ExplainRequest {
// Required. The name of the Endpoint requested to serve the explanation.

@ -29,11 +29,11 @@ option php_namespace = "Google\\Cloud\\AIPlatform\\V1beta1";
option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
// SpecialistPool represents customers' own workforce to work on their data
// labeling jobs. It includes a group of specialist managers who are responsible
// for managing the labelers in this pool as well as customers' data labeling
// jobs associated with this pool.
// Customers create specialist pool as well as start data labeling jobs on
// Cloud, managers and labelers work with the jobs using CrowdCompute console.
// labeling jobs. It includes a group of specialist managers and workers.
// Managers are responsible for managing the workers in this pool as well as
// customers' data labeling jobs associated with this pool. Customers create
// specialist pool as well as start data labeling jobs on Cloud, managers and
// workers handle the jobs using CrowdCompute console.
message SpecialistPool {
option (google.api.resource) = {
type: "aiplatform.googleapis.com/SpecialistPool"
@ -49,10 +49,10 @@ message SpecialistPool {
// This field should be unique on project-level.
string display_name = 2 [(google.api.field_behavior) = REQUIRED];
// Output only. The number of Specialists in this SpecialistPool.
// Output only. The number of managers in this SpecialistPool.
int32 specialist_managers_count = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
// The email addresses of the specialists in the SpecialistPool.
// The email addresses of the managers in the SpecialistPool.
repeated string specialist_manager_emails = 4;
// Output only. The resource name of the pending data labeling jobs.

@ -171,9 +171,19 @@ message Trial {
}
];
// Output only. The web access URIs for the training job.
// The keys are the node names in the training jobs, e.g. workerpool0-0.
// The values are the URIs for each node's web portal in the job.
// Output only. URIs for accessing [interactive
// shells](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell)
// (one URI for each training node). Only available if this trial is part of
// a [HyperparameterTuningJob][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob] and the job's
// [trial_job_spec.enable_web_access][google.cloud.aiplatform.v1beta1.CustomJobSpec.enable_web_access] field
// is `true`.
//
// The keys are names of each node used for the trial; for example,
// `workerpool0-0` for the primary node, `workerpool1-0` for the first node in
// the second worker pool, and `workerpool1-1` for the second node in the
// second worker pool.
//
// The values are the URIs for each node's interactive shell.
map<string, string> web_access_uris = 12 [(google.api.field_behavior) = OUTPUT_ONLY];
}
@ -391,6 +401,8 @@ message StudySpec {
// Configuration for ConvexStopPolicy.
message ConvexStopConfig {
option deprecated = true;
// Steps used in predicting the final objective for early stopped trials. In
// general, it's set to be the same as the defined steps in training /
// tuning. When use_steps is false, this field is set to the maximum elapsed
@ -490,8 +502,9 @@ message StudySpec {
// The automated early stopping spec using median rule.
MedianAutomatedStoppingSpec median_automated_stopping_spec = 5;
// Deprecated.
// The automated early stopping using convex stopping rule.
ConvexStopConfig convex_stop_config = 8;
ConvexStopConfig convex_stop_config = 8 [deprecated = true];
}
// Required. Metric specs for the Study.

@ -56,6 +56,24 @@ message TensorboardRun {
// Output only. Timestamp when this TensorboardRun was last updated.
google.protobuf.Timestamp update_time = 7 [(google.api.field_behavior) = OUTPUT_ONLY];
// The labels with user-defined metadata to organize your TensorboardRuns.
//
// This field will be used to filter and visualize Runs in the Tensorboard UI.
// For example, a Vertex AI training job can set a label
// aiplatform.googleapis.com/training_job_id=xxxxx to all the runs created
// within that job. An end user can set a label experiment_id=xxxxx for all
// the runs produced in a Jupyter notebook. These runs can be grouped by a
// label value and visualized together in the Tensorboard UI.
//
// Label keys and values can be no longer than 64 characters
// (Unicode codepoints), can only contain lowercase letters, numeric
// characters, underscores and dashes. International characters are allowed.
// No more than 64 user labels can be associated with one TensorboardRun
// (System labels are excluded).
//
// See https://goo.gl/xmQnxf for more information and examples of labels.
// System reserved label keys are prefixed with "aiplatform.googleapis.com/"
// and are immutable.
map<string, string> labels = 8;
// Used to perform a consistent read-modify-write updates. If not set, a blind

@ -151,6 +151,15 @@ service TensorboardService {
option (google.api.method_signature) = "parent,tensorboard_run,tensorboard_run_id";
}
// Batch create TensorboardRuns.
rpc BatchCreateTensorboardRuns(BatchCreateTensorboardRunsRequest) returns (BatchCreateTensorboardRunsResponse) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*/tensorboards/*/experiments/*}/runs:batchCreate"
body: "*"
};
option (google.api.method_signature) = "parent,requests";
}
// Gets a TensorboardRun.
rpc GetTensorboardRun(GetTensorboardRunRequest) returns (TensorboardRun) {
option (google.api.http) = {
@ -188,6 +197,15 @@ service TensorboardService {
};
}
// Batch create TensorboardTimeSeries that belong to a TensorboardExperiment.
rpc BatchCreateTensorboardTimeSeries(BatchCreateTensorboardTimeSeriesRequest) returns (BatchCreateTensorboardTimeSeriesResponse) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/locations/*/tensorboards/*/experiments/*}/runs/*/timeSeries:batchCreate"
body: "*"
};
option (google.api.method_signature) = "parent,requests";
}
// Creates a TensorboardTimeSeries.
rpc CreateTensorboardTimeSeries(CreateTensorboardTimeSeriesRequest) returns (TensorboardTimeSeries) {
option (google.api.http) = {
@ -257,6 +275,17 @@ service TensorboardService {
option (google.api.method_signature) = "time_series";
}
// Write time series data points of multiple TensorboardTimeSeries in multiple
// TensorboardRun's. If any data fail to be ingested, an error will be
// returned.
rpc WriteTensorboardExperimentData(WriteTensorboardExperimentDataRequest) returns (WriteTensorboardExperimentDataResponse) {
option (google.api.http) = {
post: "/v1beta1/{tensorboard_experiment=projects/*/locations/*/tensorboards/*/experiments/*}:write"
body: "*"
};
option (google.api.method_signature) = "tensorboard_experiment,write_run_data_requests";
}
// Write time series data points into multiple TensorboardTimeSeries under
// a TensorboardRun. If any data fail to be ingested, an error will be
// returned.
@ -311,7 +340,7 @@ message GetTensorboardRequest {
message ListTensorboardsRequest {
// Required. The resource name of the Location to list Tensorboards.
// Format:
// 'projects/{project}/locations/{location}'
// `projects/{project}/locations/{location}`
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
@ -497,10 +526,35 @@ message DeleteTensorboardExperimentRequest {
];
}
// Request message for [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardRuns].
message BatchCreateTensorboardRunsRequest {
// Required. The resource name of the TensorboardExperiment to create the
// TensorboardRuns in. Format:
// `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`
// The parent field in the CreateTensorboardRunRequest messages must match
// this field.
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "aiplatform.googleapis.com/TensorboardExperiment"
}
];
// Required. The request message specifying the TensorboardRuns to create.
// A maximum of 1000 TensorboardRuns can be created in a batch.
repeated CreateTensorboardRunRequest requests = 2 [(google.api.field_behavior) = REQUIRED];
}
// Response message for [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardRuns].
message BatchCreateTensorboardRunsResponse {
// The created TensorboardRuns.
repeated TensorboardRun tensorboard_runs = 1;
}
// Request message for [TensorboardService.CreateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardRun].
message CreateTensorboardRunRequest {
// Required. The resource name of the Tensorboard to create the TensorboardRun in.
// Format:
// Required. The resource name of the TensorboardExperiment to create the TensorboardRun
// in. Format:
// `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
@ -557,7 +611,7 @@ message ReadTensorboardBlobDataResponse {
// Request message for [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns].
message ListTensorboardRunsRequest {
// Required. The resource name of the Tensorboard to list TensorboardRuns.
// Required. The resource name of the TensorboardExperiment to list TensorboardRuns.
// Format:
// 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}'
string parent = 1 [
@ -632,6 +686,33 @@ message DeleteTensorboardRunRequest {
];
}
// Request message for [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardTimeSeries].
message BatchCreateTensorboardTimeSeriesRequest {
// Required. The resource name of the TensorboardExperiment to create the
// TensorboardTimeSeries in.
// Format:
// `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`
// The TensorboardRuns referenced by the parent fields in the
// CreateTensorboardTimeSeriesRequest messages must be sub resources of this
// TensorboardExperiment.
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "aiplatform.googleapis.com/TensorboardExperiment"
}
];
// Required. The request message specifying the TensorboardTimeSeries to create.
// A maximum of 1000 TensorboardTimeSeries can be created in a batch.
repeated CreateTensorboardTimeSeriesRequest requests = 2 [(google.api.field_behavior) = REQUIRED];
}
// Response message for [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardTimeSeries].
message BatchCreateTensorboardTimeSeriesResponse {
// The created TensorboardTimeSeries.
repeated TensorboardTimeSeries tensorboard_time_series = 1;
}
// Request message for [TensorboardService.CreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardTimeSeries].
message CreateTensorboardTimeSeriesRequest {
// Required. The resource name of the TensorboardRun to create the
@ -775,6 +856,27 @@ message ReadTensorboardTimeSeriesDataResponse {
TimeSeriesData time_series_data = 1;
}
// Request message for [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardExperimentData].
message WriteTensorboardExperimentDataRequest {
// Required. The resource name of the TensorboardExperiment to write data to.
// Format:
// `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`
string tensorboard_experiment = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "aiplatform.googleapis.com/TensorboardExperiment"
}
];
// Required. Requests containing per-run TensorboardTimeSeries data to write.
repeated WriteTensorboardRunDataRequest write_run_data_requests = 2 [(google.api.field_behavior) = REQUIRED];
}
// Response message for [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardExperimentData].
message WriteTensorboardExperimentDataResponse {
}
// Request message for [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData].
message WriteTensorboardRunDataRequest {
// Required. The resource name of the TensorboardRun to write data to.

Loading…
Cancel
Save