grpc 第三方依赖 就是grpc的 third_party 文件夹
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

323 lines
13 KiB

// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.cloud.automl.v1;
import "google/api/annotations.proto";
import "google/api/client.proto";
import "google/api/field_behavior.proto";
import "google/api/resource.proto";
import "google/cloud/automl/v1/annotation_payload.proto";
import "google/cloud/automl/v1/data_items.proto";
import "google/cloud/automl/v1/io.proto";
import "google/cloud/automl/v1/operations.proto";
import "google/longrunning/operations.proto";
option csharp_namespace = "Google.Cloud.AutoML.V1";
option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl";
option java_multiple_files = true;
option java_outer_classname = "PredictionServiceProto";
option java_package = "com.google.cloud.automl.v1";
option php_namespace = "Google\\Cloud\\AutoMl\\V1";
option ruby_package = "Google::Cloud::AutoML::V1";
// AutoML Prediction API.
//
// On any input that is documented to expect a string parameter in
// snake_case or kebab-case, either of those cases is accepted.
service PredictionService {
option (google.api.default_host) = "automl.googleapis.com";
option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
// Perform an online prediction. The prediction result is directly
// returned in the response.
// Available for following ML scenarios, and their expected request payloads:
//
// AutoML Vision Classification
//
// * An image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB.
//
// AutoML Vision Object Detection
//
// * An image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB.
//
// AutoML Natural Language Classification
//
// * A TextSnippet up to 60,000 characters, UTF-8 encoded or a document in
// .PDF, .TIF or .TIFF format with size upto 2MB.
//
// AutoML Natural Language Entity Extraction
//
// * A TextSnippet up to 10,000 characters, UTF-8 NFC encoded or a document
// in .PDF, .TIF or .TIFF format with size upto 20MB.
//
// AutoML Natural Language Sentiment Analysis
//
// * A TextSnippet up to 60,000 characters, UTF-8 encoded or a document in
// .PDF, .TIF or .TIFF format with size upto 2MB.
//
// AutoML Translation
//
// * A TextSnippet up to 25,000 characters, UTF-8 encoded.
//
// AutoML Tables
//
// * A row with column values matching
// the columns of the model, up to 5MB. Not available for FORECASTING
// `prediction_type`.
rpc Predict(PredictRequest) returns (PredictResponse) {
option (google.api.http) = {
post: "/v1/{name=projects/*/locations/*/models/*}:predict"
body: "*"
};
option (google.api.method_signature) = "name,payload,params";
}
// Perform a batch prediction. Unlike the online [Predict][google.cloud.automl.v1.PredictionService.Predict], batch
// prediction result won't be immediately available in the response. Instead,
// a long running operation object is returned. User can poll the operation
// result via [GetOperation][google.longrunning.Operations.GetOperation]
// method. Once the operation is done, [BatchPredictResult][google.cloud.automl.v1.BatchPredictResult] is returned in
// the [response][google.longrunning.Operation.response] field.
// Available for following ML scenarios:
//
// * AutoML Vision Classification
// * AutoML Vision Object Detection
// * AutoML Video Intelligence Classification
// * AutoML Video Intelligence Object Tracking * AutoML Natural Language Classification
// * AutoML Natural Language Entity Extraction
// * AutoML Natural Language Sentiment Analysis
// * AutoML Tables
rpc BatchPredict(BatchPredictRequest) returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1/{name=projects/*/locations/*/models/*}:batchPredict"
body: "*"
};
option (google.api.method_signature) = "name,input_config,output_config,params";
option (google.longrunning.operation_info) = {
response_type: "BatchPredictResult"
metadata_type: "OperationMetadata"
};
}
}
// Request message for [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict].
message PredictRequest {
// Required. Name of the model requested to serve the prediction.
string name = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "automl.googleapis.com/Model"
}
];
// Required. Payload to perform a prediction on. The payload must match the
// problem type that the model was trained to solve.
ExamplePayload payload = 2 [(google.api.field_behavior) = REQUIRED];
// Additional domain-specific parameters, any string must be up to 25000
// characters long.
//
// AutoML Vision Classification
//
// `score_threshold`
// : (float) A value from 0.0 to 1.0. When the model
// makes predictions for an image, it will only produce results that have
// at least this confidence score. The default is 0.5.
//
// AutoML Vision Object Detection
//
// `score_threshold`
// : (float) When Model detects objects on the image,
// it will only produce bounding boxes which have at least this
// confidence score. Value in 0 to 1 range, default is 0.5.
//
// `max_bounding_box_count`
// : (int64) The maximum number of bounding
// boxes returned. The default is 100. The
// number of returned bounding boxes might be limited by the server.
//
// AutoML Tables
//
// `feature_importance`
// : (boolean) Whether
//
// [feature_importance][google.cloud.automl.v1.TablesModelColumnInfo.feature_importance]
// is populated in the returned list of
// [TablesAnnotation][google.cloud.automl.v1.TablesAnnotation]
// objects. The default is false.
map<string, string> params = 3;
}
// Response message for [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict].
message PredictResponse {
// Prediction result.
// AutoML Translation and AutoML Natural Language Sentiment Analysis
// return precisely one payload.
repeated AnnotationPayload payload = 1;
// The preprocessed example that AutoML actually makes prediction on.
// Empty if AutoML does not preprocess the input example.
//
// For AutoML Natural Language (Classification, Entity Extraction, and
// Sentiment Analysis), if the input is a document, the recognized text is
// returned in the
// [document_text][google.cloud.automl.v1.Document.document_text]
// property.
ExamplePayload preprocessed_input = 3;
// Additional domain-specific prediction response metadata.
//
// AutoML Vision Object Detection
//
// `max_bounding_box_count`
// : (int64) The maximum number of bounding boxes to return per image.
//
// AutoML Natural Language Sentiment Analysis
//
// `sentiment_score`
// : (float, deprecated) A value between -1 and 1,
// -1 maps to least positive sentiment, while 1 maps to the most positive
// one and the higher the score, the more positive the sentiment in the
// document is. Yet these values are relative to the training data, so
// e.g. if all data was positive then -1 is also positive (though
// the least).
// `sentiment_score` is not the same as "score" and "magnitude"
// from Sentiment Analysis in the Natural Language API.
map<string, string> metadata = 2;
}
// Request message for [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict].
message BatchPredictRequest {
// Required. Name of the model requested to serve the batch prediction.
string name = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "automl.googleapis.com/Model"
}
];
// Required. The input configuration for batch prediction.
BatchPredictInputConfig input_config = 3 [(google.api.field_behavior) = REQUIRED];
// Required. The Configuration specifying where output predictions should
// be written.
BatchPredictOutputConfig output_config = 4 [(google.api.field_behavior) = REQUIRED];
// Additional domain-specific parameters for the predictions, any string must
// be up to 25000 characters long.
//
// AutoML Natural Language Classification
//
// `score_threshold`
// : (float) A value from 0.0 to 1.0. When the model
// makes predictions for a text snippet, it will only produce results
// that have at least this confidence score. The default is 0.5.
//
//
// AutoML Vision Classification
//
// `score_threshold`
// : (float) A value from 0.0 to 1.0. When the model
// makes predictions for an image, it will only produce results that
// have at least this confidence score. The default is 0.5.
//
// AutoML Vision Object Detection
//
// `score_threshold`
// : (float) When Model detects objects on the image,
// it will only produce bounding boxes which have at least this
// confidence score. Value in 0 to 1 range, default is 0.5.
//
// `max_bounding_box_count`
// : (int64) The maximum number of bounding
// boxes returned per image. The default is 100, the
// number of bounding boxes returned might be limited by the server.
// AutoML Video Intelligence Classification
//
// `score_threshold`
// : (float) A value from 0.0 to 1.0. When the model
// makes predictions for a video, it will only produce results that
// have at least this confidence score. The default is 0.5.
//
// `segment_classification`
// : (boolean) Set to true to request
// segment-level classification. AutoML Video Intelligence returns
// labels and their confidence scores for the entire segment of the
// video that user specified in the request configuration.
// The default is true.
//
// `shot_classification`
// : (boolean) Set to true to request shot-level
// classification. AutoML Video Intelligence determines the boundaries
// for each camera shot in the entire segment of the video that user
// specified in the request configuration. AutoML Video Intelligence
// then returns labels and their confidence scores for each detected
// shot, along with the start and end time of the shot.
// The default is false.
//
// WARNING: Model evaluation is not done for this classification type,
// the quality of it depends on training data, but there are no metrics
// provided to describe that quality.
//
// `1s_interval_classification`
// : (boolean) Set to true to request
// classification for a video at one-second intervals. AutoML Video
// Intelligence returns labels and their confidence scores for each
// second of the entire segment of the video that user specified in the
// request configuration. The default is false.
//
// WARNING: Model evaluation is not done for this classification
// type, the quality of it depends on training data, but there are no
// metrics provided to describe that quality.
//
// AutoML Video Intelligence Object Tracking
//
// `score_threshold`
// : (float) When Model detects objects on video frames,
// it will only produce bounding boxes which have at least this
// confidence score. Value in 0 to 1 range, default is 0.5.
//
// `max_bounding_box_count`
// : (int64) The maximum number of bounding
// boxes returned per image. The default is 100, the
// number of bounding boxes returned might be limited by the server.
//
// `min_bounding_box_size`
// : (float) Only bounding boxes with shortest edge
// at least that long as a relative value of video frame size are
// returned. Value in 0 to 1 range. Default is 0.
//
map<string, string> params = 5;
}
// Result of the Batch Predict. This message is returned in
// [response][google.longrunning.Operation.response] of the operation returned
// by the [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict].
message BatchPredictResult {
// Additional domain-specific prediction response metadata.
//
// AutoML Vision Object Detection
//
// `max_bounding_box_count`
// : (int64) The maximum number of bounding boxes returned per image.
//
// AutoML Video Intelligence Object Tracking
//
// `max_bounding_box_count`
// : (int64) The maximum number of bounding boxes returned per frame.
map<string, string> metadata = 1;
}