Update Cloud ML protos

pull/175/head
Jacob Geiger 9 years ago
parent b29baf4de9
commit b56d92d6b7
  1. 211
      google/cloud/ml/v1beta1/job_service.proto
  2. 205
      google/cloud/ml/v1beta1/model_service.proto
  3. 37
      google/cloud/ml/v1beta1/operation_metadata.proto
  4. 226
      google/cloud/ml/v1beta1/prediction_service.proto
  5. 12
      google/cloud/ml/v1beta1/project_service.proto

@ -31,24 +31,24 @@ option java_package = "com.google.cloud.ml.api.v1beta1";
// Allows creating and managing training and prediction jobs.
// Service to create and manage training and batch prediction jobs.
service JobService {
// Create a training or a prediction job.
// Creates a training or a batch prediction job.
rpc CreateJob(CreateJobRequest) returns (Job) {
option (google.api.http) = { post: "/v1beta1/{parent=projects/*}/jobs" body: "job" };
}
// List jobs in the project.
// Lists the jobs in the project.
rpc ListJobs(ListJobsRequest) returns (ListJobsResponse) {
option (google.api.http) = { get: "/v1beta1/{parent=projects/*}/jobs" };
}
// Describe a job.
// Describes a job.
rpc GetJob(GetJobRequest) returns (Job) {
option (google.api.http) = { get: "/v1beta1/{name=projects/*/jobs/*}" };
}
// Cancel a running job.
// Cancels a running job.
rpc CancelJob(CancelJobRequest) returns (google.protobuf.Empty) {
option (google.api.http) = { post: "/v1beta1/{name=projects/*/jobs/*}:cancel" body: "*" };
}
@ -56,75 +56,110 @@ service JobService {
// Represents input parameters for a training job.
message TrainingInput {
// Scale tiers.
// A scale tier is an abstract representation of the resources Cloud ML
// will allocate to a training job. When selecting a scale tier for your
// training job, you should consider the size of your training dataset and
// the complexity of your model. As the tiers increase, virtual machines are
// added to handle your job, and the individual machines in the cluster
// generally have more memory and greater processing power than they do at
// lower tiers. The number of training units charged per hour of processing
// increases as tiers get more advanced. Refer to the
// [pricing guide](/ml/pricing) for more details.
enum ScaleTier {
// A single worker instance and no parameter servers.
// A single worker instance. This tier is suitable for learning how to use
// Cloud ML, and for experimenting with new models using small datasets.
BASIC = 0;
// A few workers and one parameter server.
// Many workers and a few parameter servers.
STANDARD_1 = 1;
// A medium amount of workers and a few parameter servers.
STANDARD_2 = 2;
// A large amount of worker with more parameter servers.
// A large number of workers with many parameter servers.
PREMIUM_1 = 3;
// A very large amount of workers with even more parameter servers.
PREMIUM_2 = 4;
// Specify your own amounts of replicas in the `worker_count` and
// `parameter_server_count` fields, as well as machine types for the master,
// the workers and the parameter servers.
// The CUSTOM tier is not a set tier, but rather enables you to use your
// own cluster specification. When you use this tier, you must also set
// valid values for `worker_count` and `parameter_server_count`, and you can
// specify the type of virtual machines to use for the different types of
// workers by setting `master_type`, `worker_type`, and
// `parameter_server_type`.
CUSTOM = 5;
}
// Required. Specifies the machine types, the amounts of replicas for workers
// Required. Specifies the machine types, the number of replicas for workers
// and parameter servers.
ScaleTier scale_tier = 1;
// Optional. Specifies the master machine type.
// Optional. Specifies the type of virtual machine to use for your training
// job's master worker.
//
// The following types are supported:
//
// - `standard`
// - `large_model`
// - `complex_model_s`
// - `complex_model_m`
// - `complex_model_l`
// <dl>
// <dt>standard</dt>
// <dd>
// A basic machine configuration suitable for training simple models with
// small to moderate datasets.
// </dd>
// <dt>large_model</dt>
// <dd>
// A machine with a lot of memory, specially suited for parameter servers
// when your model is large (having many hidden layers or layers with very
// large numbers of nodes).
// </dd>
// <dt>complex_model_s</dt>
// <dd>
// A machine suitable for the master and workers of the cluster when your
// model requires more computation than the standard machine can handle
// satisfactorily.
// </dd>
// <dt>complex_model_m</dt>
// <dd>
// A machine with roughly twice the number of cores and roughly double the
// memory of `complex_model_s`.
// </dd>
// <dt>complex_model_l</dt>
// <dd>
// A machine with roughly twice the number of cores and roughly double the
// memory of `complex_model_m`.
// </dd>
// </dl>
//
// Cannot be used in combination with a standard scale tier.
// This value can only be used when `ScaleTier` is set to `CUSTOM`.
string master_type = 2;
// Optional. Specifies the worker machine type.
// The following types are supported:
// Optional. Specifies the type of virtual machine to use for your training
// job's worker nodes.
//
// - `standard`
// - `large_model`
// - `complex_model_s`
// - `complex_model_m`
// - `complex_model_l`
// The supported values are the same as those described in the entry for
// `master_type`.
//
// Cannot be used in combination with a standard scale tier.
// This value must be present when `scale_tier` is set to `CUSTOM` and
// `worker_count` is greater than zero.
string worker_type = 3;
// Optional. Specifies the parameter server machine type.
// The following types are supported:
// Optional. Specifies the type of virtual machine to use for your training
// job's parameter server.
//
// - `standard`
// - `large_model`
// - `complex_model_s`
// - `complex_model_m`
// - `complex_model_l`
// The supported values are the same as those described in the entry for
// `master_type`.
//
// Cannot be used in combination with a standard scale tier.
// This value must be present when `scale_tier` is set to `CUSTOM` and
// `parameter_server_count` is greater than zero.
string parameter_server_type = 4;
// Optional. Specifies the required amount of worker replicas.
// Cannot be used in combination with a standard scale tier.
// Optional. The number of worker replicas to use for the training job. Each
// replica in the cluster will be of the type specified in `worker_type`.
//
// This value can only be used when `scale_tier` is set to `CUSTOM`. If you
// set this value, you must also set `worker_type`.
int64 worker_count = 5;
// Optional. Specifies the required amount of parameter server replicas.
// Cannot be used in combination with a standard scale tier.
// Optional. The number of parameter server replicas to use for the training
// job. Each replica in the cluster will be of the type specified in
// `parameter_server_type`.
//
// This value can only be used when `scale_tier` is set to `CUSTOM`.If you
// set this value, you must also set `parameter_server_type`.
int64 parameter_server_count = 6;
// Required. The Google Cloud Storage location of the packages with
@ -146,7 +181,7 @@ message TrainingInput {
// Represents a set of hyperparameters to optimize.
message HyperparameterSpec {
// The optimization goal of the objective value.
// The available types of optimization goals.
enum GoalType {
// Goal Type will default to maximize.
GOAL_TYPE_UNSPECIFIED = 0;
@ -158,20 +193,30 @@ message HyperparameterSpec {
MINIMIZE = 2;
}
// Required. Should the evaluation metric be maximized or minimized?
// Required. The type of goal to use for tuning. Available types are
// `MAXIMIZE` and `MINIMIZE`.
//
// Defaults to `MAXIMIZE`.
GoalType goal = 1;
// Required. The set of parameters to tune.
repeated ParameterSpec params = 2;
// Optional. How many training trials should be attempted to optimize.
// Optional. How many training trials should be attempted to optimize
// the specified hyperparameters.
//
// Defaults to one.
int32 max_trials = 3;
// Optional. How many training trials should be run in parallel.
// More parallelization will be faster, but parallel trials only benefit
// from the information gained by previous trials.
// Optional. The number of training trials to run concurrently.
// You can reduce the time it takes to perform hyperparameter tuning by adding
// trials in parallel. However, each trail only benefits from the information
// gained in completed trials. That means that a trial does not get access to
// the results of trials running at the same time, which could reduce the
// quality of the overall optimization.
//
// Each trial will use the same scale tier and machine types.
//
// Defaults to one.
int32 max_parallel_trials = 4;
}
@ -180,8 +225,8 @@ message HyperparameterSpec {
message ParameterSpec {
// The type of the parameter.
enum ParameterType {
// Parameter type must be specified. Unspecified values will be treated
// as an error.
// You must specify a valid type. Using this unspecified type will result in
// an error.
PARAMETER_TYPE_UNSPECIFIED = 0;
// Type for real-valued parameters.
@ -243,7 +288,7 @@ message ParameterSpec {
// A list of feasible points.
// The list should be in strictly increasing order. For instance, this
// parameter might have possible settings of 1.5, 2.5, and 4.0. This list
// shouldn't be too large - probably not more than 1,000 points.
// should not contain more than 1,000 values.
repeated double discrete_values = 6;
// Optional. How the parameter should be scaled to the hypercube.
@ -253,7 +298,10 @@ message ParameterSpec {
ScaleType scale_type = 7;
}
// Represents the result of a hyperparameter tuning trial from a training job.
// Represents the result of a single hyperparameter tuning trial from a
// training job. The TrainingOutput object that is returned on successful
// completion of a training job with hyperparameter tuning includes a list
// of HyperparameterOutput objects, one for each successful trial.
message HyperparameterOutput {
// An observed value of a metric.
message HyperparameterMetric {
@ -279,7 +327,7 @@ message HyperparameterOutput {
// Represents results of a training job.
message TrainingOutput {
// The number of tuning trials completed successfully.
// The number of hyperparameter tuning trials that completed successfully.
int64 completed_trial_count = 1;
// Results for individual Hyperparameter trials.
@ -299,16 +347,24 @@ message PredictionInput {
// The source file is a TFRecord file.
TF_RECORD = 2;
// The source file is a GZIP-compressed TFRecord file.
TF_RECORD_GZIP = 3;
}
// Required. The model or the version to use for prediction.
oneof model_version {
// The name of the model. The default version will be used.
// E.g "project/your_project/models/your_model"
// Use this field if you want to use the default version for the specified
// model. The string must use the following format:
//
// `"project/<var>[YOUR_PROJECT]</var>/models/<var>[YOUR_MODEL]</var>"`
string model_name = 1;
// The version to be used.
// E.g "project/your_project/models/your_model/versions/your_version"
// Use this field if you want to specify a version of the model to use. The
// string is formatted the same way as `model_version`, with the addition
// of the version information:
//
// `"project/<var>[YOUR_PROJECT]</var>/models/<var>YOUR_MODEL/versions/<var>[YOUR_VERSION]</var>"`
string version_name = 2;
}
@ -349,13 +405,13 @@ message Job {
// The job state is unspecified.
STATE_UNSPECIFIED = 0;
// The job has been just created and is awaiting to be processed.
// The job has been just created and processing has not yet begun.
QUEUED = 1;
// The job is being prepared to run.
// The service is preparing to run the job.
PREPARING = 2;
// Training or prediction is in progress.
// The job is in progress.
RUNNING = 3;
// The job completed successfully.
@ -414,6 +470,7 @@ message Job {
// Request message for the CreateJob method.
message CreateJobRequest {
// Required. The project name.
//
// Authorization: requires `Editor` role on the specified project.
string parent = 1;
@ -423,20 +480,25 @@ message CreateJobRequest {
// Request message for the ListJobs method.
message ListJobsRequest {
// Required. The name of the project whose jobs are to be listed.
// Required. The name of the project for which to list jobs.
//
// Authorization: requires `Viewer` role on the specified project.
string parent = 1;
// Optional. Specifies the subset of jobs to retrieve.
string filter = 2;
// Optional. Specifies the ordering of the jobs.
string order_by = 3;
// Optional. A token for continuing the enumeration.
// Optional. A page token to request the next page of results.
//
// You get the token from the `next_page_token` field of the response from
// the previous call.
string page_token = 4;
// Optional. The page size.
// Optional. The number of jobs to retrieve per "page" of results. If there
// are more remaining results than this number, the response message will
// contain a valid value in the `next_page_token` field.
//
// The default value is 20, and the maximum page size is 100.
int32 page_size = 5;
}
@ -445,20 +507,23 @@ message ListJobsResponse {
// The list of jobs.
repeated Job jobs = 1;
// Optional pagination token to use for retrieving the next page of results.
// Optional. Pass this token as the `page_token` field of the request for a
// subsequent call.
string next_page_token = 2;
}
// Request message for the GetJob method.
message GetJobRequest {
// Required. The name of the job.
// Required. The name of the job to get the description of.
//
// Authorization: requires `Viewer` role on the parent project.
string name = 1;
}
// Request message for the CancelJob method.
message CancelJobRequest {
// Required. The name of the job.
// Required. The name of the job to cancel.
//
// Authorization: requires `Editor` role on the parent project.
string name = 1;
}

@ -33,94 +33,188 @@ option java_package = "com.google.cloud.ml.api.v1beta1";
// Allows managing the set of machine learning models and model versions
// in the project.
// Provides methods that create and manage machine learning models and their
// versions.
//
// A model in this context is a container for versions. The model can't provide
// predictions without first having a version created for it.
//
// Each version is a trained machine learning model, and each is assumed to be
// an iteration of the same machine learning problem as the other versions of
// the same model.
//
// Your project can define multiple models, each with multiple versions.
//
// The basic life cycle of a model is:
//
// * Create and train the machine learning model and save it to a
// Google Cloud Storage location.
// * Use
// [projects.models.create](/ml/reference/rest/v1beta1/projects.models/create)
// to make a new model in your project.
// * Use
// [projects.models.versions.create](/ml/reference/rest/v1beta1/projects.models.versions/create)
// to deploy your saved model.
// * Use [projects.predict](/ml/reference/rest/v1beta1/projects/predict to
// request predictions of a version of your model, or use
// [projects.jobs.create](/ml/reference/rest/v1beta1/projects.jobs/create)
// to start a batch prediction job.
service ModelService {
// Create a model which will later contain a set of model versions.
// Creates a model which will later contain one or more versions.
//
// You must add at least one version before you can request predictions from
// the model. Add versions by calling
// [projects.models.versions.create](/ml/reference/rest/v1beta1/projects.models.versions/create).
rpc CreateModel(CreateModelRequest) returns (Model) {
option (google.api.http) = { post: "/v1beta1/{parent=projects/*}/models" body: "model" };
}
// List models in the project.
// Lists the models in a project.
//
// Each project can contain multiple models, and each model can have multiple
// versions.
rpc ListModels(ListModelsRequest) returns (ListModelsResponse) {
option (google.api.http) = { get: "/v1beta1/{parent=projects/*}/models" };
}
// Describe a model and versions in it.
// Gets information about a model, including its name, the description (if
// set), and the default version (if at least one version of the model has
// been deployed).
rpc GetModel(GetModelRequest) returns (Model) {
option (google.api.http) = { get: "/v1beta1/{name=projects/*/models/*}" };
}
// Delete the model and all versions in it.
rpc DeleteModel(DeleteModelRequest) returns (google.protobuf.Empty) {
// Deletes a model.
//
// You can only delete a model if there are no versions in it. You can delete
// versions by calling
// [projects.models.versions.delete](/ml/reference/rest/v1beta1/projects.models.versions/delete).
rpc DeleteModel(DeleteModelRequest) returns (google.longrunning.Operation) {
option (google.api.http) = { delete: "/v1beta1/{name=projects/*/models/*}" };
}
// Upload a trained TensorFlow model version. The result of the operation
// is a Version.
// Creates a new version of a model from a trained TensorFlow model.
//
// If the version created in the cloud by this call is the first deployed
// version of the specified model, it will be made the default version of the
// model. When you add a version to a model that already has one or more
// versions, the default version does not automatically change. If you want a
// new version to be the default, you must call
// [projects.models.versions.setDefault](/ml/reference/rest/v1beta1/projects.models.versions/setDefault).
rpc CreateVersion(CreateVersionRequest) returns (google.longrunning.Operation) {
option (google.api.http) = { post: "/v1beta1/{parent=projects/*/models/*}/versions" body: "version" };
}
// List versions in the model.
// Gets basic information about all the versions of a model.
//
// If you expect that a model has a lot of versions, or if you need to handle
// only a limited number of results at a time, you can request that the list
// be retrieved in batches (called pages):
rpc ListVersions(ListVersionsRequest) returns (ListVersionsResponse) {
option (google.api.http) = { get: "/v1beta1/{parent=projects/*/models/*}/versions" };
}
// Get version metadata.
// Gets information about a model version.
//
// Models can have multiple versions. You can call
// [projects.models.versions.list](/ml/reference/rest/v1beta1/projects.models.versions/list)
// to get the same information that this method returns for all of the
// versions of a model.
rpc GetVersion(GetVersionRequest) returns (Version) {
option (google.api.http) = { get: "/v1beta1/{name=projects/*/models/*/versions/*}" };
}
// Delete a version.
// Deletes a model version.
//
// Each model can have multiple versions deployed and in use at any given
// time. Use this method to remove a single version.
//
// Note: You cannot delete the version that is set as the default version
// of the model unless it is the only remaining version.
rpc DeleteVersion(DeleteVersionRequest) returns (google.longrunning.Operation) {
option (google.api.http) = { delete: "/v1beta1/{name=projects/*/models/*/versions/*}" };
}
// Mark the version as default within the model.
// Designates a version to be the default for the model.
//
// The default version is used for prediction requests made against the model
// that don't specify a version.
//
// The first version to be created for a model is automatically set as the
// default. You must make any subsequent changes to the default version
// setting manually using this method.
rpc SetDefaultVersion(SetDefaultVersionRequest) returns (Version) {
option (google.api.http) = { post: "/v1beta1/{name=projects/*/models/*/versions/*}:setDefault" body: "*" };
}
}
// Represents a machine learning model resource that can be used to perform
// prediction.
// Represents a machine learning solution.
//
// A model can have multiple versions, each of which is a deployed, trained
// model ready to receive prediction requests. The model itself is just a
// container.
message Model {
// Required. The user-specified name of the model.
// Required. The name specified for the model when it was created.
//
// The model name must be unique within the project it is created in.
string name = 1;
// Optional. The description of the model.
// Optional. The description specified for the model when it was created.
string description = 2;
// Output only. The default version of the model.
// Output only. The default version of the model. This version will be used to
// handle prediction requests that do not specify a version.
//
// You can change the default version by calling
// [projects.methods.versions.setDefault](/ml/reference/rest/v1beta1/projects.models.versions/setDefault).
Version default_version = 3;
}
// Represents a version of the model.
//
// Each version is a trained model deployed in the cloud, ready to handle
// prediction requests. A model can have multiple versions. You can get
// information about all of the versions of a given model by calling
// [projects.models.versions.list](/ml/reference/rest/v1beta1/projects.models.versions/list).
message Version {
// Required.The user-specified name of the model version.
// Required.The name specified for the version when it was created.
//
// The version name must be unique within the model it is created in.
string name = 1;
// Optional. The description of the model version.
// Optional. The description specified for the version when it was created.
string description = 2;
// Output only. Whether the version is default within the model.
// Output only. If true, this version will be used to handle prediction
// requests that do not specify a version.
//
// You can change the default version by calling
// [projects.methods.versions.setDefault](/ml/reference/rest/v1beta1/projects.models.versions/setDefault).
bool is_default = 3;
// Required. Google Cloud Storage object containing the model graph, weights
// and additional metadata at the moment when the version is created.
// Required. The Google Cloud Storage location of the trained model used to
// create the version. See the
// [overview of model deployment](/ml/docs/concepts/deployment-overview) for
// more informaiton.
//
// When passing Version to
// [projects.models.versions.create](/ml/reference/rest/v1beta1/projects.models.versions/create)
// the model service uses the specified location as the source of the model.
// Once deployed, the model version is hosted by the prediction service, so
// this location is useful only as a historical record.
string deployment_uri = 4;
// Output only. The creation time of the version.
// Output only. The time the version was created.
google.protobuf.Timestamp create_time = 5;
// Output only. The last usage time of the version.
// Output only. The time the version was last used for prediction.
google.protobuf.Timestamp last_use_time = 6;
}
// Request message for the CreateModel method.
message CreateModelRequest {
// Required. The project name.
//
// Authorization: requires `Editor` role on the specified project.
string parent = 1;
@ -131,19 +225,21 @@ message CreateModelRequest {
// Request message for the ListModels method.
message ListModelsRequest {
// Required. The name of the project whose models are to be listed.
//
// Authorization: requires `Viewer` role on the specified project.
string parent = 1;
// Optional. Specifies the subset of models to retrieve.
string filter = 2;
// Optional. Specifies the ordering of the models.
string order_by = 3;
// Optional. A token for for continuing the enumeration.
// Optional. A page token to request the next page of results.
//
// You get the token from the `next_page_token` field of the response from
// the previous call.
string page_token = 4;
// Optional. The page size.
// Optional. The number of models to retrieve per "page" of results. If there
// are more remaining results than this number, the response message will
// contain a valid value in the `next_page_token` field.
//
// The default value is 20, and the maximum page size is 100.
int32 page_size = 5;
}
@ -152,13 +248,15 @@ message ListModelsResponse {
// The list of models.
repeated Model models = 1;
// Optional pagination token to use for retrieving the next page of results.
// Optional. Pass this token as the `page_token` field of the request for a
// subsequent call.
string next_page_token = 2;
}
// Request message for the GetModel method.
message GetModelRequest {
// Required. The name of the model.
//
// Authorization: requires `Viewer` role on the parent project.
string name = 1;
}
@ -166,6 +264,7 @@ message GetModelRequest {
// Request message for the DeleteModel method.
message DeleteModelRequest {
// Required. The name of the model.
//
// Authorization: requires `Editor` role on the parent project.
string name = 1;
}
@ -173,6 +272,7 @@ message DeleteModelRequest {
// Uploads the provided trained model version to Cloud Machine Learning.
message CreateVersionRequest {
// Required. The name of the model.
//
// Authorization: requires `Editor` role on the parent project.
string parent = 1;
@ -182,20 +282,22 @@ message CreateVersionRequest {
// Request message for the ListVersions method.
message ListVersionsRequest {
// Required. The name of the model whose versions are to be listed.
// Required. The name of the model for which to list the version.
//
// Authorization: requires `Viewer` role on the parent project.
string parent = 1;
// Optional. Specifies the subset of versions to retrieve.
string filter = 2;
// Optional. Specifies the ordering of the versions.
string order_by = 3;
// Optional. A token for continuing the enumeration.
// Optional. A page token to request the next page of results.
//
// You get the token from the `next_page_token` field of the response from
// the previous call.
string page_token = 4;
// Optional. The page size.
// Optional. The number of versions to retrieve per "page" of results. If
// there are more remaining results than this number, the response message
// will contain a valid value in the `next_page_token` field.
//
// The default value is 20, and the maximum page size is 100.
int32 page_size = 5;
}
@ -204,26 +306,35 @@ message ListVersionsResponse {
// The list of versions.
repeated Version versions = 1;
// Optional pagination token to use for retrieving the next page of results.
// Optional. Pass this token as the `page_token` field of the request for a
// subsequent call.
string next_page_token = 2;
}
// Request message for the GetVersion method.
message GetVersionRequest {
// Required. The name of the version.
//
// Authorization: requires `Viewer` role on the parent project.
string name = 1;
}
// Request message for the DeleteVersion method.
// Request message for the DeleteVerionRequest method.
message DeleteVersionRequest {
// Required. The name of the version.
// Required. The name of the version. You can get the names of all the
// versions of a model by calling
// [projects.models.versions.list](/ml/reference/rest/v1beta1/projects.models.versions/list).
//
// Authorization: requires `Editor` role on the parent project.
string name = 1;
}
// Request message for the SetDefaultVersion request.
message SetDefaultVersionRequest {
// Required. The version name which is being made default within the model.
// Required. The name of the version to make the default for the model. You
// can get the names of all the versions of a model by calling
// [projects.models.versions.list](/ml/reference/rest/v1beta1/projects.models.versions/list).
//
// Authorization: requires `Editor` role on the parent project.
string name = 1;
}

@ -17,6 +17,7 @@ syntax = "proto3";
package google.cloud.ml.v1beta1;
import "google/api/annotations.proto";
import "google/cloud/ml/v1beta1/model_service.proto";
import "google/protobuf/timestamp.proto";
option java_multiple_files = true;
@ -30,17 +31,41 @@ option java_package = "com.google.cloud.ml.api.v1beta1";
// Represents the metadata of the longrunning.Operation.
// Represents the metadata of the long-running operation.
message OperationMetadata {
// When the operation was submitted.
// The operation type.
enum OperationType {
// Unspecified operation type.
OPERATION_TYPE_UNSPECIFIED = 0;
// An operation to create a new version.
CREATE_VERSION = 1;
// An operation to delete an existing version.
DELETE_VERSION = 2;
// An operation to delete an existing model.
DELETE_MODEL = 3;
}
// The time the operation was submitted.
google.protobuf.Timestamp create_time = 1;
// When the operation processing was started.
// The time operation processing started.
google.protobuf.Timestamp start_time = 2;
// When the operation processing was completed.
// The time operation processing completed.
google.protobuf.Timestamp end_time = 3;
// Whether the cancellation of this operation has been requested.
bool is_cancellation_requested = 6;
// Indicates whether a request to cancel this operation has been made.
bool is_cancellation_requested = 4;
// The operation type.
OperationType operation_type = 5;
// Contains the name of the model associated with the operation.
string model_name = 6;
// Contains the version associated with the operation.
Version version = 7;
}

@ -17,6 +17,7 @@ syntax = "proto3";
package google.cloud.ml.v1beta1;
import "google/api/annotations.proto";
import "google/api/httpbody.proto";
option java_multiple_files = true;
option java_outer_classname = "PredictionServiceProto";
@ -36,41 +37,47 @@ service OnlinePredictionService {
// Responses are very similar to requests. There are two top-level fields,
// each of which are JSON lists:
//
// * `predictions`: The list of predictions for each of the inputs
// in the request.
// * `error`: An error message if any instance produced an error.
// <dl>
// <dt>predictions</dt>
// <dd>The list of predictions, one per instance in the request.</dd>
// <dt>error</dt>
// <dd>An error message returned instead of a prediction list if any
// instance produced an error.</dd>
// </dl>
//
// There is a one-to-one correspondence between the predictions and the
// instances in the request. Each individual prediction takes the same form
// as an instance in the request, namely JSON strings, numbers, booleans,
// or lists thereof. If your model has more than one output tensor, each
// prediction will be a JSON object with the keys being the output aliases
// in the graph.
// If the call is successful, the response body will contain one prediction
// entry per instance in the request body. If prediction fails for any
// instance, the response body will contain no predictions and will contian
// a single error entry instead.
//
// If there is an error processing any single instance, no predictions
// are returned and the `error` field is populated with the error message.
// Even though there is one prediction per instance, the format of a
// prediction is not directly related to the format of an instance.
// Predictions take whatever format is specified in the outputs collection
// defined in the model. The collection of predictions is returned in a JSON
// list. Each member of the list can be a simple value, a list, or a JSON
// object of any complexity. If your model has more than one output tensor,
// each prediction will be a JSON object containing a name/value pair for each
// output. The names identify the output aliases in the graph.
//
// Examples:
// The following examples show some possible responses:
//
// A simple set of predictions for three input instances, where each
// prediction is an integer value:
// <pre>
// # Predictions for three input instances, predictions are an integer label,
//
// # e.g., a digit in digit recognition
//
// {"predictions": [5, 4, 3]}
//
// # Predictions for two input instances in a two-class classification
//
// # problem. The labels are strings and scores are the probability of
//
// # "car" and "beach".
//
// </pre>
// A more complex set of predictions, each containing two named values that
// correspond to output tensors, named **label** and **scores** respectively.
// The value of **label** is the predicted category ("car" or "beach") and
// **scores** contains a list of probabilities for that instance across the
// possible categories.
// <pre>
// {"predictions": [{"label": "beach", "scores": [0.1, 0.9]},
// {"label": "car", "scores": [0.75, 0.25]}]}
//
// # An error:
//
// {"error": "Divide by zero"}
// </pre>
// A response when there is an error processing an input instance:
// <pre>
// {"error": "Divide by zero"}
// </pre>
rpc Predict(PredictRequest) returns (google.api.HttpBody) {
option (google.api.http) = { post: "/v1beta1/{name=projects/**}:predict" body: "*" };
@ -79,128 +86,99 @@ service OnlinePredictionService {
// Request for predictions to be issued against a trained model.
//
// The body of the request consists of a single JSON object with a single
// top-level field:
// The body of the request is a single JSON object with a single top-level
// field:
//
// * `instances`: a list of JSON values representing the instances to use for
// prediction.
// <dl>
// <dt>instances</dt>
// <dd>A JSON array containing values representing the instances to use for
// prediction.</dd>
// </dl>
//
// The structure of each element of the instances list is the type of data
// your model expects to work on. There are two types of instances: those
// that include named inputs and those that do not.
// The structure of each element of the instances list is determined by your
// model's input definition. Instances can include named inputs or can contain
// only unlabeled values.
//
// Most data does not include named inputs. In this case, each instance will
// be a JSON boolean, number, string, or (possibly deeply nested) list of
// any of the above. For instance, if your model accepts rows of CSV data,
// then each element is a string; if each data instance is a vector of ints
// or floats, use a JSON list of numbers, etc. More examples are as follows:
// Most data does not include named inputs. Some instances will be simple
// JSON values (boolean, number, or string). However, instances are often lists
// of simple values, or complex nested lists. Here are some examples of request
// bodies:
//
// CSV data with each row encoded as a string value:
// <pre>
// # CSV data
//
// {"instances": ["1.0,true,\\"x\\"", "-2.0,false,\\"y\\""]}
//
// # Text
//
// </pre>
// Plain text:
// <pre>
// {"instances": ["the quick brown fox", "la bruja le dio"]}
//
// # Sentences, each a list of words (vectors of strings).
//
// </pre>
// Sentences encoded as lists of words (vectors of strings):
// <pre>
// {"instances": [["the","quick","brown"], ["la","bruja","le"]]}
//
// # Three instances, each a floating point scalar, e.g., to compute f(x).
//
// {"instances": [0.0, 1.1, 2.2]} # 3 instances (integer scalars)
//
// # Two instances, each a 3 element vecor of ints.
//
// {"instances": [[0,1,2], [3,4,5],...]}
//
// # A single instance, which is 2x3 matrix of ints.
//
// {"instances": [[[0,1,2], [3,4,5]], ...]}
//
// # A single image represented as a 3-dimensional list with dimesions:
//
// # height, width, and channels (3).
//
// {"instances": [[[[0,1,2], [3,4,5], ]]]]}
// </pre>
//
// Importantly, if your data is not UTF-8 (the only currently supported
// character set), you will need to base64 encode the data and mark it as
// binary. The latter is accomplished by using a JSON object of the form:
// Floating point scalar values:
// <pre>
// {"instances": [0.0, 1.1, 2.2]}
// </pre>
// Vectors of integers:
// <pre>
// {"instances": [[0, 1, 2], [3, 4, 5],...]}
// </pre>
// Tensors (in this case, two-dimensional tensors):
// <pre>
// {"instances": [[[0, 1, 2], [3, 4, 5]], ...]}
// </pre>
// Images represented as a three-dimensional list. In this encoding scheme the
// first two dimensions represent the rows and columns of the image, and the
// third contains the R, G, and B values for each pixel.
// <pre>
// {"instances": [[[[138, 30, 66], [130, 20, 56], ...]]]]}
// </pre>
// Data must be encoded as UTF-8. If your data uses another character encoding,
// you must base64 encode the data and mark it as binary. To mark a JSON string
// as binary, replace it with an object with a single attribute named `b`:
// <pre>{"b": "..."} </pre>
// in place of any JSON string that is base64 encoded. For example:
// For example:
//
// Two Serialized tf.Examples (fake data, for illustrative purposes only):
// <pre>
// # Two Serialized tf.Examples (fake data, for illustrative purposes only)
//
// {"instances": [{"b": "X5ad6u"}, {"b": "IA9j4nx"}]}
//
// # Two JPEG image byte strings (fake data, for illustrative purposes only)
//
// {"instances": [{"b": "ASa8asdf"}, {"b": "JLK7ljk3"}]}
// {"instances": [{"b64": "X5ad6u"}, {"b64": "IA9j4nx"}]}
// </pre>
//
// In the case that your data includes named references, you will send a
// JSON object with the named references as the keys. For instance, if
// you used Cloud ML's preprocessing library and used the JSON key-value
// pair data format, you would send instances as follows:
//
// Two JPEG image byte strings (fake data, for illustrative purposes only):
// <pre>
// # JSON input data to be preprocessed.
// {"instances": [{"b64": "ASa8asdf"}, {"b64": "JLK7ljk3"}]}
// </pre>
// If your data includes named references, format each instance as a JSON object
// with the named references as the keys:
//
// JSON input data to be preprocessed:
// <pre>
// {"instances": [{"a": 1.0, "b": true, "c": "x"},
// {"a": -2.0, "b": false, "c": "y"}]}
// </pre>
// Some models have an underlying TensorFlow graph that accepts multiple input
// tensors. In this case, you should use the names of JSON name/value pairs to
// identify the input tensors, as shown in the following exmaples:
//
// Another use case is if your underlying TensorFlow graph contains multiple
// input tensors, then the keys would be the aliases to the input tensors, e.g.,
//
// For a graph with input tensor aliases "tag" (string) and "image"
// (base64-encoded string):
// <pre>
// # Graph with input tensor aliases "tag" (string) and "image" (base64
//
// # encoded string).
//
// {"instances": [{"tag": "beach", "image": {"b": "ASa8asdf"}},
// {"tag": "car", "image": {"b": "JLK7ljk3"}}]}
//
// # Graph with input tensor aliases "tag" (string) and "image"
//
// # (3-dimensional array of 8-bit ints).
//
// {"instances": [{"tag": "beach", "image": [[[263,1,10], [262,2,11], ...]]},
// {"tag": "car", "image": [[[10,11,24], [23,10,15], ...]]}]}
// {"instances": [{"tag": "beach", "image": {"b64": "ASa8asdf"}},
// {"tag": "car", "image": {"b64": "JLK7ljk3"}}]}
// </pre>
//
// There is a one-to-one correspondence between the predictions and the
// instances in the request. Each individual prediction takes the same form
// as an instance in the request, namely JSON strings, numbers, booleans, or
// lists thereof. If your model has more than one output tensor, each
// prediction will be a JSON object with the keys being the output aliases
// in the graph.
//
// Examples:
//
// For a graph with input tensor aliases "tag" (string) and "image"
// (3-dimensional array of 8-bit ints):
// <pre>
// # Predictions for three input instances, predictions are an integer label,
//
// # e.g., a digit in digit recognition
//
// {"predictions": [5, 4, 3]}
//
// # Predictions for two input instances in a two-class classification
//
// # problem. The labels are strings and scores are the probability of "car"
//
// # and "beach".
//
// {"predictions": [{"label": "beach", "scores": [0.1, 0.9]},
// {"label": "car", "scores": [0.75, 0.25]}]}
// {"instances": [{"tag": "beach", "image": [[[263, 1, 10], [262, 2, 11], ...]]},
// {"tag": "car", "image": [[[10, 11, 24], [23, 10, 15], ...]]}]}
// </pre>
// If the call is successful, the response body will contain one prediction
// entry per instance in the request body. If prediction fails for any
// instance, the response body will contain no predictions and will contian
// a single error entry instead.
message PredictRequest {
// Required. The resource name of a model or a version.
//
// Authorization: requires `Viewer` role on the parent project.
string name = 1;

@ -25,24 +25,28 @@ option java_package = "com.google.cloud.ml.api.v1beta1";
// Allows retrieving project related information.
service ProjectManagementService {
// Get the service config associated with a given project.
// Get the service account information associated with your project. You need
// this information in order to grant the service account persmissions for
// the Google Cloud Storage location where you put your model training code
// for training the model with Google Cloud Machine Learning.
rpc GetConfig(GetConfigRequest) returns (GetConfigResponse) {
option (google.api.http) = { get: "/v1beta1/{name=projects/*}:getConfig" };
}
}
// Service configuration request associated with a given project.
// Requests service account information associated with a project.
message GetConfigRequest {
// Required. The project name.
//
// Authorization: requires `Viewer` role on the specified project.
string name = 1;
}
// Returns service configuration associated with a given project.
// Returns service account information associated with a project.
message GetConfigResponse {
// The service account Cloud ML uses to access resources in the project.
string service_account = 1;
// Project number associated with 'service_account'.
// The project number for `service_account`.
int64 service_account_project = 2;
}

Loading…
Cancel
Save