Provides defined Message types for formatting data assigned to protobuf.Value fields in AI Platform. PiperOrigin-RevId: 342967619pull/629/head
parent
5fdb685a68
commit
e3e7e7ddb0
53 changed files with 4482 additions and 18 deletions
@ -0,0 +1,179 @@ |
||||
# This file was automatically generated by BuildFileGenerator |
||||
|
||||
# This is an API workspace, having public visibility by default makes perfect sense. |
||||
package(default_visibility = ["//visibility:public"]) |
||||
|
||||
############################################################################## |
||||
# Common |
||||
############################################################################## |
||||
load("@rules_proto//proto:defs.bzl", "proto_library") |
||||
|
||||
proto_library( |
||||
name = "schema_proto", |
||||
srcs = [ |
||||
"annotation_payload.proto", |
||||
"annotation_spec_color.proto", |
||||
"data_item_payload.proto", |
||||
"dataset_metadata.proto", |
||||
"geometry.proto", |
||||
"io_format.proto", |
||||
"saved_query_metadata.proto", |
||||
], |
||||
deps = [ |
||||
"//google/api:annotations_proto", |
||||
"//google/api:field_behavior_proto", |
||||
"//google/type:color_proto", |
||||
"@com_google_protobuf//:duration_proto", |
||||
"@com_google_protobuf//:struct_proto", |
||||
"@com_google_protobuf//:wrappers_proto", |
||||
], |
||||
) |
||||
|
||||
############################################################################## |
||||
# Java |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"java_grpc_library", |
||||
"java_proto_library", |
||||
) |
||||
|
||||
java_proto_library( |
||||
name = "schema_java_proto", |
||||
deps = [":schema_proto"], |
||||
) |
||||
|
||||
java_grpc_library( |
||||
name = "schema_java_grpc", |
||||
srcs = [":schema_proto"], |
||||
deps = [":schema_java_proto"], |
||||
) |
||||
|
||||
############################################################################## |
||||
# Go |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"go_proto_library", |
||||
) |
||||
|
||||
go_proto_library( |
||||
name = "schema_go_proto", |
||||
compilers = ["@io_bazel_rules_go//proto:go_grpc"], |
||||
importpath = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema", |
||||
protos = [":schema_proto"], |
||||
deps = [ |
||||
"//google/api:annotations_go_proto", |
||||
"//google/type:color_go_proto", |
||||
], |
||||
) |
||||
|
||||
############################################################################## |
||||
# Python |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"moved_proto_library", |
||||
"py_grpc_library", |
||||
"py_proto_library", |
||||
) |
||||
|
||||
moved_proto_library( |
||||
name = "schema_moved_proto", |
||||
srcs = [":schema_proto"], |
||||
deps = [ |
||||
"//google/api:annotations_proto", |
||||
"//google/api:field_behavior_proto", |
||||
"//google/type:color_proto", |
||||
"@com_google_protobuf//:duration_proto", |
||||
"@com_google_protobuf//:struct_proto", |
||||
"@com_google_protobuf//:wrappers_proto", |
||||
], |
||||
) |
||||
|
||||
py_proto_library( |
||||
name = "schema_py_proto", |
||||
plugin = "@protoc_docs_plugin//:docs_plugin", |
||||
deps = [":schema_moved_proto"], |
||||
) |
||||
|
||||
py_grpc_library( |
||||
name = "schema_py_grpc", |
||||
srcs = [":schema_moved_proto"], |
||||
deps = [":schema_py_proto"], |
||||
) |
||||
|
||||
############################################################################## |
||||
# PHP |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"php_grpc_library", |
||||
"php_proto_library", |
||||
) |
||||
|
||||
php_proto_library( |
||||
name = "schema_php_proto", |
||||
deps = [":schema_proto"], |
||||
) |
||||
|
||||
php_grpc_library( |
||||
name = "schema_php_grpc", |
||||
srcs = [":schema_proto"], |
||||
deps = [":schema_php_proto"], |
||||
) |
||||
|
||||
############################################################################## |
||||
# Node.js |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"nodejs_gapic_assembly_pkg", |
||||
"nodejs_gapic_library", |
||||
) |
||||
|
||||
|
||||
############################################################################## |
||||
# Ruby |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"ruby_grpc_library", |
||||
"ruby_proto_library", |
||||
) |
||||
|
||||
ruby_proto_library( |
||||
name = "schema_ruby_proto", |
||||
deps = [":schema_proto"], |
||||
) |
||||
|
||||
ruby_grpc_library( |
||||
name = "schema_ruby_grpc", |
||||
srcs = [":schema_proto"], |
||||
deps = [":schema_ruby_proto"], |
||||
) |
||||
|
||||
############################################################################## |
||||
# C# |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"csharp_grpc_library", |
||||
"csharp_proto_library", |
||||
) |
||||
|
||||
csharp_proto_library( |
||||
name = "schema_csharp_proto", |
||||
deps = [":schema_proto"], |
||||
) |
||||
|
||||
csharp_grpc_library( |
||||
name = "schema_csharp_grpc", |
||||
srcs = [":schema_proto"], |
||||
deps = [":schema_csharp_proto"], |
||||
) |
||||
|
||||
############################################################################## |
||||
# C++ |
||||
############################################################################## |
||||
# Put your C++ code here |
@ -0,0 +1,124 @@ |
||||
type: google.api.Service |
||||
config_version: 3 |
||||
name: aiplatform.googleapis.com |
||||
title: Cloud AI Platform API |
||||
|
||||
types: |
||||
- name: google.cloud.aiplatform.v1beta1.schema.ClassificationPredictionResult |
||||
- name: google.cloud.aiplatform.v1beta1.schema.ImageBoundingBoxAnnotation |
||||
- name: google.cloud.aiplatform.v1beta1.schema.ImageClassificationAnnotation |
||||
- name: google.cloud.aiplatform.v1beta1.schema.ImageClassificationPredictionInstance |
||||
- name: google.cloud.aiplatform.v1beta1.schema.ImageClassificationPredictionParams |
||||
- name: google.cloud.aiplatform.v1beta1.schema.ImageDataItem |
||||
- name: google.cloud.aiplatform.v1beta1.schema.ImageDatasetMetadata |
||||
- name: google.cloud.aiplatform.v1beta1.schema.ImageObjectDetectionPredictionInstance |
||||
- name: google.cloud.aiplatform.v1beta1.schema.ImageObjectDetectionPredictionParams |
||||
- name: google.cloud.aiplatform.v1beta1.schema.ImageObjectDetectionPredictionResult |
||||
- name: google.cloud.aiplatform.v1beta1.schema.ImageSegmentationAnnotation |
||||
- name: google.cloud.aiplatform.v1beta1.schema.ImageSegmentationPredictionInstance |
||||
- name: google.cloud.aiplatform.v1beta1.schema.ImageSegmentationPredictionParams |
||||
- name: google.cloud.aiplatform.v1beta1.schema.PredictionResult |
||||
- name: google.cloud.aiplatform.v1beta1.schema.TablesDatasetMetadata |
||||
- name: google.cloud.aiplatform.v1beta1.schema.TextClassificationAnnotation |
||||
- name: google.cloud.aiplatform.v1beta1.schema.TextClassificationPredictionInstance |
||||
- name: google.cloud.aiplatform.v1beta1.schema.TextDataItem |
||||
- name: google.cloud.aiplatform.v1beta1.schema.TextDatasetMetadata |
||||
- name: google.cloud.aiplatform.v1beta1.schema.TextExtractionAnnotation |
||||
- name: google.cloud.aiplatform.v1beta1.schema.TextExtractionPredictionInstance |
||||
- name: google.cloud.aiplatform.v1beta1.schema.TextExtractionPredictionResult |
||||
- name: google.cloud.aiplatform.v1beta1.schema.TextSentimentAnnotation |
||||
- name: google.cloud.aiplatform.v1beta1.schema.TextSentimentPredictionInstance |
||||
- name: google.cloud.aiplatform.v1beta1.schema.TextSentimentPredictionResult |
||||
- name: google.cloud.aiplatform.v1beta1.schema.TextSentimentSavedQueryMetadata |
||||
- name: google.cloud.aiplatform.v1beta1.schema.TimeSeriesDatasetMetadata |
||||
- name: google.cloud.aiplatform.v1beta1.schema.VideoActionRecognitionAnnotation |
||||
- name: google.cloud.aiplatform.v1beta1.schema.VideoActionRecognitionPredictionInstance |
||||
- name: google.cloud.aiplatform.v1beta1.schema.VideoActionRecognitionPredictionParams |
||||
- name: google.cloud.aiplatform.v1beta1.schema.VideoClassificationAnnotation |
||||
- name: google.cloud.aiplatform.v1beta1.schema.VideoClassificationPredictionInstance |
||||
- name: google.cloud.aiplatform.v1beta1.schema.VideoClassificationPredictionParams |
||||
- name: google.cloud.aiplatform.v1beta1.schema.VideoClassificationPredictionResult |
||||
- name: google.cloud.aiplatform.v1beta1.schema.VideoDataItem |
||||
- name: google.cloud.aiplatform.v1beta1.schema.VideoDatasetMetadata |
||||
- name: google.cloud.aiplatform.v1beta1.schema.VideoObjectTrackingAnnotation |
||||
- name: google.cloud.aiplatform.v1beta1.schema.VideoObjectTrackingPredictionInstance |
||||
- name: google.cloud.aiplatform.v1beta1.schema.VideoObjectTrackingPredictionParams |
||||
- name: google.cloud.aiplatform.v1beta1.schema.VideoObjectTrackingPredictionResult |
||||
- name: google.cloud.aiplatform.v1beta1.schema.VisualInspectionClassificationLabelSavedQueryMetadata |
||||
- name: google.cloud.aiplatform.v1beta1.schema.VisualInspectionMaskSavedQueryMetadata |
||||
- name: google.cloud.aiplatform.v1beta1.schema.predict.instance.ImageClassificationPredictionInstance |
||||
- name: google.cloud.aiplatform.v1beta1.schema.predict.instance.ImageObjectDetectionPredictionInstance |
||||
- name: google.cloud.aiplatform.v1beta1.schema.predict.instance.ImageSegmentationPredictionInstance |
||||
- name: google.cloud.aiplatform.v1beta1.schema.predict.instance.TextClassificationPredictionInstance |
||||
- name: google.cloud.aiplatform.v1beta1.schema.predict.instance.TextExtractionPredictionInstance |
||||
- name: google.cloud.aiplatform.v1beta1.schema.predict.instance.TextSentimentPredictionInstance |
||||
- name: google.cloud.aiplatform.v1beta1.schema.predict.instance.VideoActionRecognitionPredictionInstance |
||||
- name: google.cloud.aiplatform.v1beta1.schema.predict.instance.VideoClassificationPredictionInstance |
||||
- name: google.cloud.aiplatform.v1beta1.schema.predict.instance.VideoObjectTrackingPredictionInstance |
||||
- name: google.cloud.aiplatform.v1beta1.schema.predict.params.ImageClassificationPredictionParams |
||||
- name: google.cloud.aiplatform.v1beta1.schema.predict.params.ImageObjectDetectionPredictionParams |
||||
- name: google.cloud.aiplatform.v1beta1.schema.predict.params.ImageSegmentationPredictionParams |
||||
- name: google.cloud.aiplatform.v1beta1.schema.predict.params.VideoActionRecognitionPredictionParams |
||||
- name: google.cloud.aiplatform.v1beta1.schema.predict.params.VideoClassificationPredictionParams |
||||
- name: google.cloud.aiplatform.v1beta1.schema.predict.params.VideoObjectTrackingPredictionParams |
||||
- name: google.cloud.aiplatform.v1beta1.schema.predict.prediction.ClassificationPredictionResult |
||||
- name: google.cloud.aiplatform.v1beta1.schema.predict.prediction.ImageObjectDetectionPredictionResult |
||||
- name: google.cloud.aiplatform.v1beta1.schema.predict.prediction.ImageSegmentationPredictionResult |
||||
- name: google.cloud.aiplatform.v1beta1.schema.predict.prediction.TabularClassificationPredictionResult |
||||
- name: google.cloud.aiplatform.v1beta1.schema.predict.prediction.TabularRegressionPredictionResult |
||||
- name: google.cloud.aiplatform.v1beta1.schema.predict.prediction.TextExtractionPredictionResult |
||||
- name: google.cloud.aiplatform.v1beta1.schema.predict.prediction.TextSentimentPredictionResult |
||||
- name: google.cloud.aiplatform.v1beta1.schema.predict.prediction.TimeSeriesForecastingPredictionResult |
||||
- name: google.cloud.aiplatform.v1beta1.schema.predict.prediction.VideoActionRecognitionPredictionResult |
||||
- name: google.cloud.aiplatform.v1beta1.schema.predict.prediction.VideoClassificationPredictionResult |
||||
- name: google.cloud.aiplatform.v1beta1.schema.predict.prediction.VideoObjectTrackingPredictionResult |
||||
- name: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition.AutoMlForecasting |
||||
- name: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition.AutoMlForecastingInputs |
||||
- name: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition.AutoMlForecastingMetadata |
||||
- name: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition.AutoMlImageClassification |
||||
- name: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition.AutoMlImageClassificationInputs |
||||
- name: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition.AutoMlImageClassificationMetadata |
||||
- name: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition.AutoMlImageObjectDetection |
||||
- name: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition.AutoMlImageObjectDetectionInputs |
||||
- name: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition.AutoMlImageObjectDetectionMetadata |
||||
- name: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition.AutoMlImageSegmentation |
||||
- name: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition.AutoMlImageSegmentationInputs |
||||
- name: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition.AutoMlImageSegmentationMetadata |
||||
- name: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition.AutoMlTables |
||||
- name: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition.AutoMlTablesInputs |
||||
- name: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition.AutoMlTablesMetadata |
||||
- name: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition.AutoMlTextClassification |
||||
- name: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition.AutoMlTextClassificationInputs |
||||
- name: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition.AutoMlTextExtraction |
||||
- name: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition.AutoMlTextExtractionInputs |
||||
- name: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition.AutoMlTextSentiment |
||||
- name: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition.AutoMlTextSentimentInputs |
||||
- name: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition.AutoMlVideoActionRecognition |
||||
- name: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition.AutoMlVideoActionRecognitionInputs |
||||
- name: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition.AutoMlVideoClassification |
||||
- name: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition.AutoMlVideoClassificationInputs |
||||
- name: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition.AutoMlVideoObjectTracking |
||||
- name: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition.AutoMlVideoObjectTrackingInputs |
||||
- name: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition.ExportEvaluatedDataItemsConfig |
||||
|
||||
documentation: |
||||
summary: |- |
||||
Train high-quality custom machine learning models with minimum effort and |
||||
machine learning expertise. |
||||
overview: |- |
||||
AI Platform (Unified) enables data scientists, developers, and AI newcomers |
||||
to create custom machine learning models specific to their business needs |
||||
by leveraging Google's state-of-the-art transfer learning and innovative |
||||
AI research. |
||||
|
||||
backend: |
||||
rules: |
||||
- selector: 'google.longrunning.Operations.*' |
||||
deadline: 60.0 |
||||
|
||||
authentication: |
||||
rules: |
||||
- selector: 'google.longrunning.Operations.*' |
||||
oauth: |
||||
canonical_scopes: |- |
||||
https://www.googleapis.com/auth/cloud-platform |
@ -0,0 +1,228 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema; |
||||
|
||||
import "google/cloud/aiplatform/v1beta1/schema/annotation_spec_color.proto"; |
||||
import "google/cloud/aiplatform/v1beta1/schema/geometry.proto"; |
||||
import "google/protobuf/duration.proto"; |
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema;schema"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "AnnotationPayloadProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema"; |
||||
|
||||
// Annotation details specific to image classification. |
||||
message ImageClassificationAnnotation { |
||||
// The resource Id of the AnnotationSpec that this Annotation pertains to. |
||||
string annotation_spec_id = 1; |
||||
|
||||
// The display name of the AnnotationSpec that this Annotation pertains to. |
||||
string display_name = 2; |
||||
} |
||||
|
||||
// Annotation details specific to image object detection. |
||||
message ImageBoundingBoxAnnotation { |
||||
// The resource Id of the AnnotationSpec that this Annotation pertains to. |
||||
string annotation_spec_id = 1; |
||||
|
||||
// The display name of the AnnotationSpec that this Annotation pertains to. |
||||
string display_name = 2; |
||||
|
||||
// The leftmost coordinate of the bounding box. |
||||
double x_min = 3; |
||||
|
||||
// The rightmost coordinate of the bounding box. |
||||
double x_max = 4; |
||||
|
||||
// The topmost coordinate of the bounding box. |
||||
double y_min = 5; |
||||
|
||||
// The bottommost coordinate of the bounding box. |
||||
double y_max = 6; |
||||
} |
||||
|
||||
// Annotation details specific to image segmentation. |
||||
message ImageSegmentationAnnotation { |
||||
// The mask based segmentation annotation. |
||||
message MaskAnnotation { |
||||
// Google Cloud Storage URI that points to the mask image. The image must be |
||||
// in PNG format. It must have the same size as the DataItem's image. Each |
||||
// pixel in the image mask represents the AnnotationSpec which the pixel in |
||||
// the image DataItem belong to. Each color is mapped to one AnnotationSpec |
||||
// based on annotation_spec_colors. |
||||
string mask_gcs_uri = 1; |
||||
|
||||
// The mapping between color and AnnotationSpec for this Annotation. |
||||
repeated AnnotationSpecColor annotation_spec_colors = 2; |
||||
} |
||||
|
||||
// Represents a polygon in image. |
||||
message PolygonAnnotation { |
||||
// The vertexes are connected one by one and the last vertex is connected to |
||||
// the first one to represent a polygon. |
||||
repeated Vertex vertexes = 1; |
||||
|
||||
// The resource Id of the AnnotationSpec that this Annotation pertains to. |
||||
string annotation_spec_id = 2; |
||||
|
||||
// The display name of the AnnotationSpec that this Annotation pertains to. |
||||
string display_name = 3; |
||||
} |
||||
|
||||
// Represents a polyline in image. |
||||
message PolylineAnnotation { |
||||
// The vertexes are connected one by one and the last vertex in not |
||||
// connected to the first one. |
||||
repeated Vertex vertexes = 1; |
||||
|
||||
// The resource Id of the AnnotationSpec that this Annotation pertains to. |
||||
string annotation_spec_id = 2; |
||||
|
||||
// The display name of the AnnotationSpec that this Annotation pertains to. |
||||
string display_name = 3; |
||||
} |
||||
|
||||
oneof annotation { |
||||
// Mask based segmentation annotation. Only one mask annotation can exist |
||||
// for one image. |
||||
MaskAnnotation mask_annotation = 3; |
||||
|
||||
// Polygon annotation. |
||||
PolygonAnnotation polygon_annotation = 4; |
||||
|
||||
// Polyline annotation. |
||||
PolylineAnnotation polyline_annotation = 5; |
||||
} |
||||
} |
||||
|
||||
// Annotation details specific to text classification. |
||||
message TextClassificationAnnotation { |
||||
// The resource Id of the AnnotationSpec that this Annotation pertains to. |
||||
string annotation_spec_id = 1; |
||||
|
||||
// The display name of the AnnotationSpec that this Annotation pertains to. |
||||
string display_name = 2; |
||||
} |
||||
|
||||
// Annotation details specific to text extraction. |
||||
message TextExtractionAnnotation { |
||||
// The segment of the text content. |
||||
TextSegment text_segment = 1; |
||||
|
||||
// The resource Id of the AnnotationSpec that this Annotation pertains to. |
||||
string annotation_spec_id = 2; |
||||
|
||||
// The display name of the AnnotationSpec that this Annotation pertains to. |
||||
string display_name = 3; |
||||
} |
||||
|
||||
// The text segment inside of DataItem. |
||||
message TextSegment { |
||||
// Zero-based character index of the first character of the text |
||||
// segment (counting characters from the beginning of the text). |
||||
uint64 start_offset = 1; |
||||
|
||||
// Zero-based character index of the first character past the end of |
||||
// the text segment (counting character from the beginning of the text). |
||||
// The character at the end_offset is NOT included in the text segment. |
||||
uint64 end_offset = 2; |
||||
|
||||
// The text content in the segment for output only. |
||||
string content = 3; |
||||
} |
||||
|
||||
// Annotation details specific to text sentiment. |
||||
message TextSentimentAnnotation { |
||||
// The sentiment score for text. |
||||
int32 sentiment = 1; |
||||
|
||||
// The sentiment max score for text. |
||||
int32 sentiment_max = 2; |
||||
|
||||
// The resource Id of the AnnotationSpec that this Annotation pertains to. |
||||
string annotation_spec_id = 3; |
||||
|
||||
// The display name of the AnnotationSpec that this Annotation pertains to. |
||||
string display_name = 4; |
||||
} |
||||
|
||||
// Annotation details specific to video classification. |
||||
message VideoClassificationAnnotation { |
||||
// This Annotation applies to the time period represented by the TimeSegment. |
||||
// If it's not set, the Annotation applies to the whole video. |
||||
TimeSegment time_segment = 1; |
||||
|
||||
// The resource Id of the AnnotationSpec that this Annotation pertains to. |
||||
string annotation_spec_id = 2; |
||||
|
||||
// The display name of the AnnotationSpec that this Annotation pertains to. |
||||
string display_name = 3; |
||||
} |
||||
|
||||
// A time period inside of a DataItem that has a time dimension (e.g. video). |
||||
message TimeSegment { |
||||
// Start of the time segment (inclusive), represented as the duration since |
||||
// the start of the DataItem. |
||||
google.protobuf.Duration start_time_offset = 1; |
||||
|
||||
// End of the time segment (exclusive), represented as the duration since the |
||||
// start of the DataItem. |
||||
google.protobuf.Duration end_time_offset = 2; |
||||
} |
||||
|
||||
// Annotation details specific to video object tracking. |
||||
message VideoObjectTrackingAnnotation { |
||||
// A time (frame) of a video to which this annotation pertains. |
||||
// Represented as the duration since the video's start. |
||||
google.protobuf.Duration time_offset = 1; |
||||
|
||||
// The leftmost coordinate of the bounding box. |
||||
double x_min = 2; |
||||
|
||||
// The rightmost coordinate of the bounding box. |
||||
double x_max = 3; |
||||
|
||||
// The topmost coordinate of the bounding box. |
||||
double y_min = 4; |
||||
|
||||
// The bottommost coordinate of the bounding box. |
||||
double y_max = 5; |
||||
|
||||
// The instance of the object, expressed as a positive integer. Used to track |
||||
// the same object across different frames. |
||||
int64 instance_id = 6; |
||||
|
||||
// The resource Id of the AnnotationSpec that this Annotation pertains to. |
||||
string annotation_spec_id = 7; |
||||
|
||||
// The display name of the AnnotationSpec that this Annotation pertains to. |
||||
string display_name = 8; |
||||
} |
||||
|
||||
// Annotation details specific to video action recognition. |
||||
message VideoActionRecognitionAnnotation { |
||||
// This Annotation applies to the time period represented by the TimeSegment. |
||||
// If it's not set, the Annotation applies to the whole video. |
||||
TimeSegment time_segment = 1; |
||||
|
||||
// The resource Id of the AnnotationSpec that this Annotation pertains to. |
||||
string annotation_spec_id = 2; |
||||
|
||||
// The display name of the AnnotationSpec that this Annotation pertains to. |
||||
string display_name = 3; |
||||
} |
@ -0,0 +1,40 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema; |
||||
|
||||
import "google/type/color.proto"; |
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema;schema"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "AnnotationSpecColorProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema"; |
||||
|
||||
// An entry of mapping between color and AnnotationSpec. The mapping is used in |
||||
// segmentation mask. |
||||
message AnnotationSpecColor { |
||||
// The color of the AnnotationSpec in a segmentation mask. |
||||
google.type.Color color = 1; |
||||
|
||||
// The display name of the AnnotationSpec represented by the color in the |
||||
// segmentation mask. |
||||
string display_name = 2; |
||||
|
||||
// The ID of the AnnotationSpec represented by the color in the segmentation |
||||
// mask. |
||||
string id = 3; |
||||
} |
@ -0,0 +1,66 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema; |
||||
|
||||
import "google/api/field_behavior.proto"; |
||||
import "google/protobuf/duration.proto"; |
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema;schema"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "DataItemPayloadProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema"; |
||||
|
||||
// Payload of Image DataItem. |
||||
message ImageDataItem { |
||||
// Required. Google Cloud Storage URI points to the original image in user's bucket. |
||||
// The image is up to 30MB in size. |
||||
string gcs_uri = 1 [(google.api.field_behavior) = REQUIRED]; |
||||
|
||||
// Output only. The mime type of the content of the image. Only the images in below listed |
||||
// mime types are supported. |
||||
// - image/jpeg |
||||
// - image/gif |
||||
// - image/png |
||||
// - image/webp |
||||
// - image/bmp |
||||
// - image/tiff |
||||
// - image/vnd.microsoft.icon |
||||
string mime_type = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; |
||||
} |
||||
|
||||
// Payload of Video DataItem. |
||||
message VideoDataItem { |
||||
// Required. Google Cloud Storage URI points to the original video in user's bucket. |
||||
// The video is up to 50 GB in size and up to 3 hour in duration. |
||||
string gcs_uri = 1 [(google.api.field_behavior) = REQUIRED]; |
||||
|
||||
// Output only. The mime type of the content of the video. Only the videos in below listed |
||||
// mime types are supported. |
||||
// Supported mime_type: |
||||
// - video/mp4 |
||||
// - video/avi |
||||
// - video/quicktime |
||||
string mime_type = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; |
||||
} |
||||
|
||||
// Payload of Text DataItem. |
||||
message TextDataItem { |
||||
// Output only. Google Cloud Storage URI points to the original text in user's bucket. |
||||
// The text file is up to 10MB in size. |
||||
string gcs_uri = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; |
||||
} |
@ -0,0 +1,115 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema; |
||||
|
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema;schema"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "DatasetMetadataProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema"; |
||||
|
||||
// The metadata of Datasets that contain Image DataItems. |
||||
message ImageDatasetMetadata { |
||||
// Points to a YAML file stored on Google Cloud Storage describing payload of |
||||
// the Image DataItems that belong to this Dataset. |
||||
string data_item_schema_uri = 1; |
||||
|
||||
// Google Cloud Storage Bucket name that contains the blob data of this |
||||
// Dataset. |
||||
string gcs_bucket = 2; |
||||
} |
||||
|
||||
// The metadata of Datasets that contain Text DataItems. |
||||
message TextDatasetMetadata { |
||||
// Points to a YAML file stored on Google Cloud Storage describing payload of |
||||
// the Text DataItems that belong to this Dataset. |
||||
string data_item_schema_uri = 1; |
||||
|
||||
// Google Cloud Storage Bucket name that contains the blob data of this |
||||
// Dataset. |
||||
string gcs_bucket = 2; |
||||
} |
||||
|
||||
// The metadata of Datasets that contain Video DataItems. |
||||
message VideoDatasetMetadata { |
||||
// Points to a YAML file stored on Google Cloud Storage describing payload of |
||||
// the Video DataItems that belong to this Dataset. |
||||
string data_item_schema_uri = 1; |
||||
|
||||
// Google Cloud Storage Bucket name that contains the blob data of this |
||||
// Dataset. |
||||
string gcs_bucket = 2; |
||||
} |
||||
|
||||
// The metadata of Datasets that contain tables data. |
||||
message TablesDatasetMetadata { |
||||
// The tables Dataset's data source. The Dataset doesn't store the data |
||||
// directly, but only pointer(s) to its data. |
||||
message InputConfig { |
||||
oneof source { |
||||
GcsSource gcs_source = 1; |
||||
|
||||
BigQuerySource bigquery_source = 2; |
||||
} |
||||
} |
||||
|
||||
message GcsSource { |
||||
// Google Cloud Storage URI to a input file, only .csv file is supported. |
||||
repeated string uri = 1; |
||||
} |
||||
|
||||
message BigQuerySource { |
||||
// The URI of a BigQuery table. |
||||
string uri = 1; |
||||
} |
||||
|
||||
InputConfig input_config = 1; |
||||
} |
||||
|
||||
// The metadata of Datasets that contain time series data. |
||||
message TimeSeriesDatasetMetadata { |
||||
// The time series Dataset's data source. The Dataset doesn't store the data |
||||
// directly, but only pointer(s) to its data. |
||||
message InputConfig { |
||||
oneof source { |
||||
GcsSource gcs_source = 1; |
||||
|
||||
BigQuerySource bigquery_source = 2; |
||||
} |
||||
} |
||||
|
||||
message GcsSource { |
||||
// Google Cloud Storage URI to a input file, only .csv file is supported. |
||||
repeated string uri = 1; |
||||
} |
||||
|
||||
message BigQuerySource { |
||||
// The URI of a BigQuery table. |
||||
string uri = 1; |
||||
} |
||||
|
||||
InputConfig input_config = 1; |
||||
|
||||
// The column name of the time series identifier column that identifies the |
||||
// time series. |
||||
string time_series_identifier_column = 2; |
||||
|
||||
// The column name of the time column that identifies time order in the time |
||||
// series. |
||||
string time_column = 3; |
||||
} |
@ -0,0 +1,35 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema; |
||||
|
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema;schema"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "GeometryProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema"; |
||||
|
||||
// A vertex represents a 2D point in the image. |
||||
// NOTE: the normalized vertex coordinates are relative to the original image |
||||
// and range from 0 to 1. |
||||
message Vertex { |
||||
// X coordinate. |
||||
double x = 1; |
||||
|
||||
// Y coordinate. |
||||
double y = 2; |
||||
} |
@ -0,0 +1,480 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema; |
||||
|
||||
import "google/cloud/aiplatform/v1beta1/schema/annotation_spec_color.proto"; |
||||
import "google/cloud/aiplatform/v1beta1/schema/geometry.proto"; |
||||
import "google/protobuf/duration.proto"; |
||||
import "google/protobuf/struct.proto"; |
||||
import "google/protobuf/wrappers.proto"; |
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema;schema"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "IoFormatProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema"; |
||||
|
||||
// Prediction input format for Image Classification. |
||||
message ImageClassificationPredictionInstance { |
||||
// The image bytes or GCS URI to make the prediction on. |
||||
string content = 1; |
||||
|
||||
// The MIME type of the content of the image. Only the images in below listed |
||||
// MIME types are supported. |
||||
// - image/jpeg |
||||
// - image/gif |
||||
// - image/png |
||||
// - image/webp |
||||
// - image/bmp |
||||
// - image/tiff |
||||
// - image/vnd.microsoft.icon |
||||
string mime_type = 2; |
||||
} |
||||
|
||||
// Prediction input format for Image Object Detection. |
||||
message ImageObjectDetectionPredictionInstance { |
||||
// The image bytes or GCS URI to make the prediction on. |
||||
string content = 1; |
||||
|
||||
// The MIME type of the content of the image. Only the images in below listed |
||||
// MIME types are supported. |
||||
// - image/jpeg |
||||
// - image/gif |
||||
// - image/png |
||||
// - image/webp |
||||
// - image/bmp |
||||
// - image/tiff |
||||
// - image/vnd.microsoft.icon |
||||
string mime_type = 2; |
||||
} |
||||
|
||||
// Prediction input format for Image Segmentation. |
||||
message ImageSegmentationPredictionInstance { |
||||
// The image bytes to make the predictions on. |
||||
string content = 1; |
||||
|
||||
// The MIME type of the content of the image. Only the images in below listed |
||||
// MIME types are supported. |
||||
// - image/jpeg |
||||
// - image/png |
||||
string mime_type = 2; |
||||
} |
||||
|
||||
// Prediction input format for Video Classification. |
||||
message VideoClassificationPredictionInstance { |
||||
// The Google Cloud Storage location of the video on which to perform the |
||||
// prediction. |
||||
string content = 1; |
||||
|
||||
// The MIME type of the content of the video. Only the following are |
||||
// supported: video/mp4 video/avi video/quicktime |
||||
string mime_type = 2; |
||||
|
||||
// The beginning, inclusive, of the video's time segment on which to perform |
||||
// the prediction. Expressed as a number of seconds as measured from the |
||||
// start of the video, with "s" appended at the end. Fractions are allowed, |
||||
// up to a microsecond precision. |
||||
string time_segment_start = 3; |
||||
|
||||
// The end, exclusive, of the video's time segment on which to perform |
||||
// the prediction. Expressed as a number of seconds as measured from the |
||||
// start of the video, with "s" appended at the end. Fractions are allowed, |
||||
// up to a microsecond precision, and "Infinity" is allowed, which means the |
||||
// end of the video. |
||||
string time_segment_end = 4; |
||||
} |
||||
|
||||
// Prediction input format for Video Classification. |
||||
message VideoObjectTrackingPredictionInstance { |
||||
// The Google Cloud Storage location of the video on which to perform the |
||||
// prediction. |
||||
string content = 1; |
||||
|
||||
// The MIME type of the content of the video. Only the following are |
||||
// supported: video/mp4 video/avi video/quicktime |
||||
string mime_type = 2; |
||||
|
||||
// The beginning, inclusive, of the video's time segment on which to perform |
||||
// the prediction. Expressed as a number of seconds as measured from the |
||||
// start of the video, with "s" appended at the end. Fractions are allowed, |
||||
// up to a microsecond precision. |
||||
string time_segment_start = 3; |
||||
|
||||
// The end, exclusive, of the video's time segment on which to perform |
||||
// the prediction. Expressed as a number of seconds as measured from the |
||||
// start of the video, with "s" appended at the end. Fractions are allowed, |
||||
// up to a microsecond precision, and "Infinity" is allowed, which means the |
||||
// end of the video. |
||||
string time_segment_end = 4; |
||||
} |
||||
|
||||
// Prediction input format for Video Action Recognition. |
||||
message VideoActionRecognitionPredictionInstance { |
||||
// The Google Cloud Storage location of the video on which to perform the |
||||
// prediction. |
||||
string content = 1; |
||||
|
||||
// The MIME type of the content of the video. Only the following are |
||||
// supported: video/mp4 video/avi video/quicktime |
||||
string mime_type = 2; |
||||
|
||||
// The beginning, inclusive, of the video's time segment on which to perform |
||||
// the prediction. Expressed as a number of seconds as measured from the |
||||
// start of the video, with "s" appended at the end. Fractions are allowed, |
||||
// up to a microsecond precision. |
||||
string time_segment_start = 3; |
||||
|
||||
// The end, exclusive, of the video's time segment on which to perform |
||||
// the prediction. Expressed as a number of seconds as measured from the |
||||
// start of the video, with "s" appended at the end. Fractions are allowed, |
||||
// up to a microsecond precision, and "Infinity" is allowed, which means the |
||||
// end of the video. |
||||
string time_segment_end = 4; |
||||
} |
||||
|
||||
// Prediction input format for Text Classification. |
||||
message TextClassificationPredictionInstance { |
||||
// The text snippet to make the predictions on. |
||||
string content = 1; |
||||
|
||||
// The MIME type of the text snippet. The supported MIME types are listed |
||||
// below. |
||||
// - text/plain |
||||
string mime_type = 2; |
||||
} |
||||
|
||||
// Prediction input format for Text Sentiment. |
||||
message TextSentimentPredictionInstance { |
||||
// The text snippet to make the predictions on. |
||||
string content = 1; |
||||
|
||||
// The MIME type of the text snippet. The supported MIME types are listed |
||||
// below. |
||||
// - text/plain |
||||
string mime_type = 2; |
||||
} |
||||
|
||||
// Prediction input format for Text Extraction. |
||||
message TextExtractionPredictionInstance { |
||||
// The text snippet to make the predictions on. |
||||
string content = 1; |
||||
|
||||
// The MIME type of the text snippet. The supported MIME types are listed |
||||
// below. |
||||
// - text/plain |
||||
string mime_type = 2; |
||||
|
||||
// This field is only used for batch prediction. If a key is provided, the |
||||
// batch prediction result will by mapped to this key. If omitted, then the |
||||
// batch prediction result will contain the entire input instance. AI Platform |
||||
// will not check if keys in the request are duplicates, so it is up to the |
||||
// caller to ensure the keys are unique. |
||||
string key = 3; |
||||
} |
||||
|
||||
// Prediction model parameters for Image Classification. |
||||
message ImageClassificationPredictionParams { |
||||
// The Model only returns predictions with at least this confidence score. |
||||
// Default value is 0.0 |
||||
float confidence_threshold = 1; |
||||
|
||||
// The Model only returns up to that many top, by confidence score, |
||||
// predictions per instance. If this number is very high, the Model may return |
||||
// fewer predictions. Default value is 10. |
||||
int32 max_predictions = 2; |
||||
} |
||||
|
||||
// Prediction model parameters for Image Object Detection. |
||||
message ImageObjectDetectionPredictionParams { |
||||
// The Model only returns predictions with at least this confidence score. |
||||
// Default value is 0.0 |
||||
float confidence_threshold = 1; |
||||
|
||||
// The Model only returns up to that many top, by confidence score, |
||||
// predictions per instance. Note that number of returned predictions is also |
||||
// limited by metadata's predictionsLimit. Default value is 10. |
||||
int32 max_predictions = 2; |
||||
} |
||||
|
||||
// Prediction model parameters for Image Segmentation. |
||||
message ImageSegmentationPredictionParams { |
||||
// When the model predicts category of pixels of the image, it will only |
||||
// provide predictions for pixels that it is at least this much confident |
||||
// about. All other pixels will be classified as background. Default value is |
||||
// 0.5. |
||||
float confidence_threshold = 1; |
||||
} |
||||
|
||||
// Prediction model parameters for Video Classification. |
||||
message VideoClassificationPredictionParams { |
||||
// The Model only returns predictions with at least this confidence score. |
||||
// Default value is 0.0 |
||||
float confidence_threshold = 1; |
||||
|
||||
// The Model only returns up to that many top, by confidence score, |
||||
// predictions per instance. If this number is very high, the Model may return |
||||
// fewer predictions. Default value is 10,000. |
||||
int32 max_predictions = 2; |
||||
|
||||
// Set to true to request segment-level classification. AI Platform returns |
||||
// labels and their confidence scores for the entire time segment of the |
||||
// video that user specified in the input instance. |
||||
// Default value is true |
||||
bool segment_classification = 3; |
||||
|
||||
// Set to true to request shot-level classification. AI Platform determines |
||||
// the boundaries for each camera shot in the entire time segment of the |
||||
// video that user specified in the input instance. AI Platform then |
||||
// returns labels and their confidence scores for each detected shot, along |
||||
// with the start and end time of the shot. |
||||
// WARNING: Model evaluation is not done for this classification type, |
||||
// the quality of it depends on the training data, but there are no metrics |
||||
// provided to describe that quality. |
||||
// Default value is false |
||||
bool shot_classification = 4; |
||||
|
||||
// Set to true to request classification for a video at one-second intervals. |
||||
// AI Platform returns labels and their confidence scores for each second of |
||||
// the entire time segment of the video that user specified in the input |
||||
// WARNING: Model evaluation is not done for this classification type, the |
||||
// quality of it depends on the training data, but there are no metrics |
||||
// provided to describe that quality. Default value is false |
||||
bool one_sec_interval_classification = 5; |
||||
} |
||||
|
||||
// Prediction model parameters for Video Object Tracking. |
||||
message VideoObjectTrackingPredictionParams { |
||||
// The Model only returns predictions with at least this confidence score. |
||||
// Default value is 0.0 |
||||
float confidence_threshold = 1; |
||||
|
||||
// The model only returns up to that many top, by confidence score, |
||||
// predictions per frame of the video. If this number is very high, the |
||||
// Model may return fewer predictions per frame. Default value is 50. |
||||
int32 max_predictions = 2; |
||||
|
||||
// Only bounding boxes with shortest edge at least that long as a relative |
||||
// value of video frame size are returned. Default value is 0.0. |
||||
float min_bounding_box_size = 3; |
||||
} |
||||
|
||||
// Prediction model parameters for Video Action Recognition. |
||||
message VideoActionRecognitionPredictionParams { |
||||
// The Model only returns predictions with at least this confidence score. |
||||
// Default value is 0.0 |
||||
float confidence_threshold = 1; |
||||
|
||||
// The model only returns up to that many top, by confidence score, |
||||
// predictions per frame of the video. If this number is very high, the |
||||
// Model may return fewer predictions per frame. Default value is 50. |
||||
int32 max_predictions = 2; |
||||
} |
||||
|
||||
// Represents a line of JSONL in the batch prediction output file. |
||||
message PredictionResult { |
||||
// Some identifier from the input so that the prediction can be mapped back to |
||||
// the input instance. |
||||
oneof input { |
||||
// User's input instance. |
||||
// Struct is used here instead of Any so that JsonFormat does not append an |
||||
// extra "@type" field when we convert the proto to JSON. |
||||
google.protobuf.Struct instance = 1; |
||||
|
||||
// Optional user-provided key from the input instance. |
||||
string key = 2; |
||||
} |
||||
|
||||
// The prediction result. |
||||
// Value is used here instead of Any so that JsonFormat does not append an |
||||
// extra "@type" field when we convert the proto to JSON and so we can |
||||
// represent array of objects. |
||||
google.protobuf.Value prediction = 3; |
||||
} |
||||
|
||||
// Represents a line of JSONL in the text sentiment batch prediction output |
||||
// file. This is a hack to allow printing of integer values. |
||||
message TextSentimentPredictionResult { |
||||
// Prediction output format for Text Sentiment. |
||||
message Prediction { |
||||
// The integer sentiment labels between 0 (inclusive) and sentimentMax label |
||||
// (inclusive), while 0 maps to the least positive sentiment and |
||||
// sentimentMax maps to the most positive one. The higher the score is, the |
||||
// more positive the sentiment in the text snippet is. Note: sentimentMax is |
||||
// an integer value between 1 (inclusive) and 10 (inclusive). |
||||
int32 sentiment = 1; |
||||
} |
||||
|
||||
// User's input instance. |
||||
TextSentimentPredictionInstance instance = 1; |
||||
|
||||
// The prediction result. |
||||
Prediction prediction = 2; |
||||
} |
||||
|
||||
// Prediction output format for Image Classification. |
||||
message ClassificationPredictionResult { |
||||
// The resource IDs of the AnnotationSpecs that had been identified, ordered |
||||
// by the confidence score descendingly. |
||||
repeated int64 ids = 1; |
||||
|
||||
// The display names of the AnnotationSpecs that had been identified, order |
||||
// matches the IDs. |
||||
repeated string display_names = 2; |
||||
|
||||
// The Model's confidences in correctness of the predicted IDs, higher value |
||||
// means higher confidence. Order matches the Ids. |
||||
repeated float confidences = 3; |
||||
} |
||||
|
||||
// Prediction output format for Image Object Detection. |
||||
message ImageObjectDetectionPredictionResult { |
||||
// The resource IDs of the AnnotationSpecs that had been identified, ordered |
||||
// by the confidence score descendingly. |
||||
repeated int64 ids = 1; |
||||
|
||||
// The display names of the AnnotationSpecs that had been identified, order |
||||
// matches the IDs. |
||||
repeated string display_names = 2; |
||||
|
||||
// The Model's confidences in correctness of the predicted IDs, higher value |
||||
// means higher confidence. Order matches the Ids. |
||||
repeated float confidences = 3; |
||||
|
||||
// Bounding boxes, i.e. the rectangles over the image, that pinpoint |
||||
// the found AnnotationSpecs. Given in order that matches the IDs. Each |
||||
// bounding box is an array of 4 numbers `xMin`, `xMax`, `yMin`, and |
||||
// `yMax`, which represent the extremal coordinates of the box. They are |
||||
// relative to the image size, and the point 0,0 is in the top left |
||||
// of the image. |
||||
repeated google.protobuf.ListValue bboxes = 4; |
||||
} |
||||
|
||||
// Prediction output format for Video Classification. |
||||
message VideoClassificationPredictionResult { |
||||
// The resource ID of the AnnotationSpec that had been identified. |
||||
string id = 1; |
||||
|
||||
// The display name of the AnnotationSpec that had been identified. |
||||
string display_name = 2; |
||||
|
||||
// The type of the prediction. The requested types can be configured |
||||
// via parameters. This will be one of |
||||
// - segment-classification |
||||
// - shot-classification |
||||
// - one-sec-interval-classification |
||||
string type = 3; |
||||
|
||||
// The beginning, inclusive, of the video's time segment in which the |
||||
// AnnotationSpec has been identified. Expressed as a number of seconds as |
||||
// measured from the start of the video, with fractions up to a microsecond |
||||
// precision, and with "s" appended at the end. Note that for |
||||
// 'segment-classification' prediction type, this equals the original |
||||
// 'timeSegmentStart' from the input instance, for other types it is the |
||||
// start of a shot or a 1 second interval respectively. |
||||
google.protobuf.Duration time_segment_start = 4; |
||||
|
||||
// The end, exclusive, of the video's time segment in which the |
||||
// AnnotationSpec has been identified. Expressed as a number of seconds as |
||||
// measured from the start of the video, with fractions up to a microsecond |
||||
// precision, and with "s" appended at the end. Note that for |
||||
// 'segment-classification' prediction type, this equals the original |
||||
// 'timeSegmentEnd' from the input instance, for other types it is the end |
||||
// of a shot or a 1 second interval respectively. |
||||
google.protobuf.Duration time_segment_end = 5; |
||||
|
||||
// The Model's confidence in correction of this prediction, higher |
||||
// value means higher confidence. |
||||
google.protobuf.FloatValue confidence = 6; |
||||
} |
||||
|
||||
// Prediction output format for Video Object Tracking. |
||||
message VideoObjectTrackingPredictionResult { |
||||
// The fields `xMin`, `xMax`, `yMin`, and `yMax` refer to a bounding box, |
||||
// i.e. the rectangle over the video frame pinpointing the found |
||||
// AnnotationSpec. The coordinates are relative to the frame size, and the |
||||
// point 0,0 is in the top left of the frame. |
||||
message Frame { |
||||
// A time (frame) of a video in which the object has been detected. |
||||
// Expressed as a number of seconds as measured from the |
||||
// start of the video, with fractions up to a microsecond precision, and |
||||
// with "s" appended at the end. |
||||
google.protobuf.Duration time_offset = 1; |
||||
|
||||
// The leftmost coordinate of the bounding box. |
||||
google.protobuf.FloatValue x_min = 2; |
||||
|
||||
// The rightmost coordinate of the bounding box. |
||||
google.protobuf.FloatValue x_max = 3; |
||||
|
||||
// The topmost coordinate of the bounding box. |
||||
google.protobuf.FloatValue y_min = 4; |
||||
|
||||
// The bottommost coordinate of the bounding box. |
||||
google.protobuf.FloatValue y_max = 5; |
||||
} |
||||
|
||||
// The resource ID of the AnnotationSpec that had been identified. |
||||
string id = 1; |
||||
|
||||
// The display name of the AnnotationSpec that had been identified. |
||||
string display_name = 2; |
||||
|
||||
// The beginning, inclusive, of the video's time segment in which the |
||||
// object instance has been detected. Expressed as a number of seconds as |
||||
// measured from the start of the video, with fractions up to a microsecond |
||||
// precision, and with "s" appended at the end. |
||||
google.protobuf.Duration time_segment_start = 3; |
||||
|
||||
// The end, inclusive, of the video's time segment in which the |
||||
// object instance has been detected. Expressed as a number of seconds as |
||||
// measured from the start of the video, with fractions up to a microsecond |
||||
// precision, and with "s" appended at the end. |
||||
google.protobuf.Duration time_segment_end = 4; |
||||
|
||||
// The Model's confidence in correction of this prediction, higher |
||||
// value means higher confidence. |
||||
google.protobuf.FloatValue confidence = 5; |
||||
|
||||
// All of the frames of the video in which a single object instance has been |
||||
// detected. The bounding boxes in the frames identify the same object. |
||||
repeated Frame frames = 6; |
||||
} |
||||
|
||||
// Prediction output format for Text Extraction. |
||||
message TextExtractionPredictionResult { |
||||
// The resource IDs of the AnnotationSpecs that had been identified, |
||||
// ordered by the confidence score descendingly. |
||||
repeated int64 ids = 1; |
||||
|
||||
// The display names of the AnnotationSpecs that had been identified, |
||||
// order matches the IDs. |
||||
repeated string display_names = 2; |
||||
|
||||
// The start offsets, inclusive, of the text segment in which the |
||||
// AnnotationSpec has been identified. Expressed as a zero-based number |
||||
// of characters as measured from the start of the text snippet. |
||||
repeated int64 text_segment_start_offsets = 3; |
||||
|
||||
// The end offsets, inclusive, of the text segment in which the |
||||
// AnnotationSpec has been identified. Expressed as a zero-based number |
||||
// of characters as measured from the start of the text snippet. |
||||
repeated int64 text_segment_end_offsets = 4; |
||||
|
||||
// The Model's confidences in correctness of the predicted IDs, higher |
||||
// value means higher confidence. Order matches the Ids. |
||||
repeated float confidences = 5; |
||||
} |
@ -0,0 +1,189 @@ |
||||
# This file was automatically generated by BuildFileGenerator |
||||
|
||||
# This is an API workspace, having public visibility by default makes perfect sense. |
||||
package(default_visibility = ["//visibility:public"]) |
||||
|
||||
############################################################################## |
||||
# Common |
||||
############################################################################## |
||||
load("@rules_proto//proto:defs.bzl", "proto_library") |
||||
|
||||
proto_library( |
||||
name = "instance_proto", |
||||
srcs = [ |
||||
"image_classification.proto", |
||||
"image_object_detection.proto", |
||||
"image_segmentation.proto", |
||||
"text_classification.proto", |
||||
"text_extraction.proto", |
||||
"text_sentiment.proto", |
||||
"video_action_recognition.proto", |
||||
"video_classification.proto", |
||||
"video_object_tracking.proto", |
||||
], |
||||
deps = [ |
||||
"//google/api:annotations_proto", |
||||
], |
||||
) |
||||
|
||||
############################################################################## |
||||
# Java |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"java_grpc_library", |
||||
"java_proto_library", |
||||
) |
||||
|
||||
java_proto_library( |
||||
name = "instance_java_proto", |
||||
deps = [":instance_proto"], |
||||
) |
||||
|
||||
java_grpc_library( |
||||
name = "instance_java_grpc", |
||||
srcs = [":instance_proto"], |
||||
deps = [":instance_java_proto"], |
||||
) |
||||
|
||||
############################################################################## |
||||
# Go |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"go_proto_library", |
||||
) |
||||
|
||||
go_proto_library( |
||||
name = "instance_go_proto", |
||||
compilers = ["@io_bazel_rules_go//proto:go_grpc"], |
||||
importpath = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/predict/instance", |
||||
protos = [":instance_proto"], |
||||
deps = [ |
||||
"//google/api:annotations_go_proto", |
||||
], |
||||
) |
||||
|
||||
############################################################################## |
||||
# Python |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"moved_proto_library", |
||||
"py_grpc_library", |
||||
"py_proto_library", |
||||
py_gapic_assembly_pkg = "py_gapic_assembly_pkg2", |
||||
py_gapic_library = "py_gapic_library2", |
||||
) |
||||
|
||||
moved_proto_library( |
||||
name = "instance_moved_proto", |
||||
srcs = [":instance_proto"], |
||||
deps = [ |
||||
"//google/api:annotations_proto", |
||||
], |
||||
) |
||||
|
||||
py_proto_library( |
||||
name = "instance_py_proto", |
||||
plugin = "@protoc_docs_plugin//:docs_plugin", |
||||
deps = [":instance_moved_proto"], |
||||
) |
||||
|
||||
py_grpc_library( |
||||
name = "instance_py_grpc", |
||||
srcs = [":instance_moved_proto"], |
||||
deps = [":instance_py_proto"], |
||||
) |
||||
|
||||
py_gapic_library( |
||||
name = "instance_py_gapic", |
||||
opt_args = [ |
||||
"python-gapic-namespace=google.cloud.aiplatform.v1beta1.schema.predict", |
||||
"python-gapic-name=instance", |
||||
], |
||||
srcs = [":instance_proto"], |
||||
) |
||||
|
||||
# Open Source Packages |
||||
py_gapic_assembly_pkg( |
||||
name = "instance-py", |
||||
deps = [ |
||||
":instance_py_gapic", |
||||
] |
||||
) |
||||
|
||||
############################################################################## |
||||
# PHP |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"php_grpc_library", |
||||
"php_proto_library", |
||||
) |
||||
|
||||
php_proto_library( |
||||
name = "instance_php_proto", |
||||
deps = [":instance_proto"], |
||||
) |
||||
|
||||
php_grpc_library( |
||||
name = "instance_php_grpc", |
||||
srcs = [":instance_proto"], |
||||
deps = [":instance_php_proto"], |
||||
) |
||||
|
||||
############################################################################## |
||||
# Node.js |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"nodejs_gapic_assembly_pkg", |
||||
"nodejs_gapic_library", |
||||
) |
||||
|
||||
|
||||
############################################################################## |
||||
# Ruby |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"ruby_grpc_library", |
||||
"ruby_proto_library", |
||||
) |
||||
|
||||
ruby_proto_library( |
||||
name = "instance_ruby_proto", |
||||
deps = [":instance_proto"], |
||||
) |
||||
|
||||
ruby_grpc_library( |
||||
name = "instance_ruby_grpc", |
||||
srcs = [":instance_proto"], |
||||
deps = [":instance_ruby_proto"], |
||||
) |
||||
|
||||
############################################################################## |
||||
# C# |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"csharp_grpc_library", |
||||
"csharp_proto_library", |
||||
) |
||||
|
||||
csharp_proto_library( |
||||
name = "instance_csharp_proto", |
||||
deps = [":instance_proto"], |
||||
) |
||||
|
||||
csharp_grpc_library( |
||||
name = "instance_csharp_grpc", |
||||
srcs = [":instance_proto"], |
||||
deps = [":instance_csharp_proto"], |
||||
) |
||||
|
||||
############################################################################## |
||||
# C++ |
||||
############################################################################## |
||||
# Put your C++ code here |
@ -0,0 +1,41 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.predict.instance; |
||||
|
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/predict/instance;instance"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "ImageClassificationPredictionInstanceProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.predict.instance"; |
||||
|
||||
// Prediction input format for Image Classification. |
||||
message ImageClassificationPredictionInstance { |
||||
// The image bytes or GCS URI to make the prediction on. |
||||
string content = 1; |
||||
|
||||
// The MIME type of the content of the image. Only the images in below listed |
||||
// MIME types are supported. |
||||
// - image/jpeg |
||||
// - image/gif |
||||
// - image/png |
||||
// - image/webp |
||||
// - image/bmp |
||||
// - image/tiff |
||||
// - image/vnd.microsoft.icon |
||||
string mime_type = 2; |
||||
} |
@ -0,0 +1,41 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.predict.instance; |
||||
|
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/predict/instance;instance"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "ImageObjectDetectionPredictionInstanceProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.predict.instance"; |
||||
|
||||
// Prediction input format for Image Object Detection. |
||||
message ImageObjectDetectionPredictionInstance { |
||||
// The image bytes or GCS URI to make the prediction on. |
||||
string content = 1; |
||||
|
||||
// The MIME type of the content of the image. Only the images in below listed |
||||
// MIME types are supported. |
||||
// - image/jpeg |
||||
// - image/gif |
||||
// - image/png |
||||
// - image/webp |
||||
// - image/bmp |
||||
// - image/tiff |
||||
// - image/vnd.microsoft.icon |
||||
string mime_type = 2; |
||||
} |
@ -0,0 +1,36 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.predict.instance; |
||||
|
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/predict/instance;instance"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "ImageSegmentationPredictionInstanceProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.predict.instance"; |
||||
|
||||
// Prediction input format for Image Segmentation. |
||||
message ImageSegmentationPredictionInstance { |
||||
// The image bytes to make the predictions on. |
||||
string content = 1; |
||||
|
||||
// The MIME type of the content of the image. Only the images in below listed |
||||
// MIME types are supported. |
||||
// - image/jpeg |
||||
// - image/png |
||||
string mime_type = 2; |
||||
} |
@ -0,0 +1,35 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.predict.instance; |
||||
|
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/predict/instance;instance"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "TextClassificationPredictionInstanceProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.predict.instance"; |
||||
|
||||
// Prediction input format for Text Classification. |
||||
message TextClassificationPredictionInstance { |
||||
// The text snippet to make the predictions on. |
||||
string content = 1; |
||||
|
||||
// The MIME type of the text snippet. The supported MIME types are listed |
||||
// below. |
||||
// - text/plain |
||||
string mime_type = 2; |
||||
} |
@ -0,0 +1,42 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.predict.instance; |
||||
|
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/predict/instance;instance"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "TextExtractionPredictionInstanceProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.predict.instance"; |
||||
|
||||
// Prediction input format for Text Extraction. |
||||
message TextExtractionPredictionInstance { |
||||
// The text snippet to make the predictions on. |
||||
string content = 1; |
||||
|
||||
// The MIME type of the text snippet. The supported MIME types are listed |
||||
// below. |
||||
// - text/plain |
||||
string mime_type = 2; |
||||
|
||||
// This field is only used for batch prediction. If a key is provided, the |
||||
// batch prediction result will by mapped to this key. If omitted, then the |
||||
// batch prediction result will contain the entire input instance. AI Platform |
||||
// will not check if keys in the request are duplicates, so it is up to the |
||||
// caller to ensure the keys are unique. |
||||
string key = 3; |
||||
} |
@ -0,0 +1,35 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.predict.instance; |
||||
|
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/predict/instance;instance"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "TextSentimentPredictionInstanceProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.predict.instance"; |
||||
|
||||
// Prediction input format for Text Sentiment. |
||||
message TextSentimentPredictionInstance { |
||||
// The text snippet to make the predictions on. |
||||
string content = 1; |
||||
|
||||
// The MIME type of the text snippet. The supported MIME types are listed |
||||
// below. |
||||
// - text/plain |
||||
string mime_type = 2; |
||||
} |
@ -0,0 +1,48 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.predict.instance; |
||||
|
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/predict/instance;instance"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "VideoActionRecognitionPredictionInstanceProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.predict.instance"; |
||||
|
||||
// Prediction input format for Video Action Recognition. |
||||
message VideoActionRecognitionPredictionInstance { |
||||
// The Google Cloud Storage location of the video on which to perform the |
||||
// prediction. |
||||
string content = 1; |
||||
|
||||
// The MIME type of the content of the video. Only the following are |
||||
// supported: video/mp4 video/avi video/quicktime |
||||
string mime_type = 2; |
||||
|
||||
// The beginning, inclusive, of the video's time segment on which to perform |
||||
// the prediction. Expressed as a number of seconds as measured from the |
||||
// start of the video, with "s" appended at the end. Fractions are allowed, |
||||
// up to a microsecond precision. |
||||
string time_segment_start = 3; |
||||
|
||||
// The end, exclusive, of the video's time segment on which to perform |
||||
// the prediction. Expressed as a number of seconds as measured from the |
||||
// start of the video, with "s" appended at the end. Fractions are allowed, |
||||
// up to a microsecond precision, and "Infinity" is allowed, which means the |
||||
// end of the video. |
||||
string time_segment_end = 4; |
||||
} |
@ -0,0 +1,48 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.predict.instance; |
||||
|
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/predict/instance;instance"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "VideoClassificationPredictionInstanceProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.predict.instance"; |
||||
|
||||
// Prediction input format for Video Classification. |
||||
message VideoClassificationPredictionInstance { |
||||
// The Google Cloud Storage location of the video on which to perform the |
||||
// prediction. |
||||
string content = 1; |
||||
|
||||
// The MIME type of the content of the video. Only the following are |
||||
// supported: video/mp4 video/avi video/quicktime |
||||
string mime_type = 2; |
||||
|
||||
// The beginning, inclusive, of the video's time segment on which to perform |
||||
// the prediction. Expressed as a number of seconds as measured from the |
||||
// start of the video, with "s" appended at the end. Fractions are allowed, |
||||
// up to a microsecond precision. |
||||
string time_segment_start = 3; |
||||
|
||||
// The end, exclusive, of the video's time segment on which to perform |
||||
// the prediction. Expressed as a number of seconds as measured from the |
||||
// start of the video, with "s" appended at the end. Fractions are allowed, |
||||
// up to a microsecond precision, and "Infinity" is allowed, which means the |
||||
// end of the video. |
||||
string time_segment_end = 4; |
||||
} |
@ -0,0 +1,48 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.predict.instance; |
||||
|
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/predict/instance;instance"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "VideoObjectTrackingPredictionInstanceProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.predict.instance"; |
||||
|
||||
// Prediction input format for Video Object Tracking. |
||||
message VideoObjectTrackingPredictionInstance { |
||||
// The Google Cloud Storage location of the video on which to perform the |
||||
// prediction. |
||||
string content = 1; |
||||
|
||||
// The MIME type of the content of the video. Only the following are |
||||
// supported: video/mp4 video/avi video/quicktime |
||||
string mime_type = 2; |
||||
|
||||
// The beginning, inclusive, of the video's time segment on which to perform |
||||
// the prediction. Expressed as a number of seconds as measured from the |
||||
// start of the video, with "s" appended at the end. Fractions are allowed, |
||||
// up to a microsecond precision. |
||||
string time_segment_start = 3; |
||||
|
||||
// The end, exclusive, of the video's time segment on which to perform |
||||
// the prediction. Expressed as a number of seconds as measured from the |
||||
// start of the video, with "s" appended at the end. Fractions are allowed, |
||||
// up to a microsecond precision, and "Infinity" is allowed, which means the |
||||
// end of the video. |
||||
string time_segment_end = 4; |
||||
} |
@ -0,0 +1,186 @@ |
||||
# This file was automatically generated by BuildFileGenerator |
||||
|
||||
# This is an API workspace, having public visibility by default makes perfect sense. |
||||
package(default_visibility = ["//visibility:public"]) |
||||
|
||||
############################################################################## |
||||
# Common |
||||
############################################################################## |
||||
load("@rules_proto//proto:defs.bzl", "proto_library") |
||||
|
||||
proto_library( |
||||
name = "params_proto", |
||||
srcs = [ |
||||
"image_classification.proto", |
||||
"image_object_detection.proto", |
||||
"image_segmentation.proto", |
||||
"video_action_recognition.proto", |
||||
"video_classification.proto", |
||||
"video_object_tracking.proto", |
||||
], |
||||
deps = [ |
||||
"//google/api:annotations_proto", |
||||
], |
||||
) |
||||
|
||||
############################################################################## |
||||
# Java |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"java_grpc_library", |
||||
"java_proto_library", |
||||
) |
||||
|
||||
java_proto_library( |
||||
name = "params_java_proto", |
||||
deps = [":params_proto"], |
||||
) |
||||
|
||||
java_grpc_library( |
||||
name = "params_java_grpc", |
||||
srcs = [":params_proto"], |
||||
deps = [":params_java_proto"], |
||||
) |
||||
|
||||
############################################################################## |
||||
# Go |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"go_proto_library", |
||||
) |
||||
|
||||
go_proto_library( |
||||
name = "params_go_proto", |
||||
compilers = ["@io_bazel_rules_go//proto:go_grpc"], |
||||
importpath = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/predict/params", |
||||
protos = [":params_proto"], |
||||
deps = [ |
||||
"//google/api:annotations_go_proto", |
||||
], |
||||
) |
||||
|
||||
############################################################################## |
||||
# Python |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"moved_proto_library", |
||||
"py_grpc_library", |
||||
"py_proto_library", |
||||
py_gapic_assembly_pkg = "py_gapic_assembly_pkg2", |
||||
py_gapic_library = "py_gapic_library2", |
||||
) |
||||
|
||||
moved_proto_library( |
||||
name = "params_moved_proto", |
||||
srcs = [":params_proto"], |
||||
deps = [ |
||||
"//google/api:annotations_proto", |
||||
], |
||||
) |
||||
|
||||
py_proto_library( |
||||
name = "params_py_proto", |
||||
plugin = "@protoc_docs_plugin//:docs_plugin", |
||||
deps = [":params_moved_proto"], |
||||
) |
||||
|
||||
py_grpc_library( |
||||
name = "params_py_grpc", |
||||
srcs = [":params_moved_proto"], |
||||
deps = [":params_py_proto"], |
||||
) |
||||
|
||||
py_gapic_library( |
||||
name = "params_py_gapic", |
||||
opt_args = [ |
||||
"python-gapic-namespace=google.cloud.aiplatform.v1beta1.schema.predict", |
||||
"python-gapic-name=params", |
||||
], |
||||
srcs = [":params_proto"], |
||||
) |
||||
|
||||
# Open Source Packages |
||||
py_gapic_assembly_pkg( |
||||
name = "params-py", |
||||
deps = [ |
||||
":params_py_gapic", |
||||
] |
||||
) |
||||
|
||||
############################################################################## |
||||
# PHP |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"php_grpc_library", |
||||
"php_proto_library", |
||||
) |
||||
|
||||
php_proto_library( |
||||
name = "params_php_proto", |
||||
deps = [":params_proto"], |
||||
) |
||||
|
||||
php_grpc_library( |
||||
name = "params_php_grpc", |
||||
srcs = [":params_proto"], |
||||
deps = [":params_php_proto"], |
||||
) |
||||
|
||||
############################################################################## |
||||
# Node.js |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"nodejs_gapic_assembly_pkg", |
||||
"nodejs_gapic_library", |
||||
) |
||||
|
||||
|
||||
############################################################################## |
||||
# Ruby |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"ruby_grpc_library", |
||||
"ruby_proto_library", |
||||
) |
||||
|
||||
ruby_proto_library( |
||||
name = "params_ruby_proto", |
||||
deps = [":params_proto"], |
||||
) |
||||
|
||||
ruby_grpc_library( |
||||
name = "params_ruby_grpc", |
||||
srcs = [":params_proto"], |
||||
deps = [":params_ruby_proto"], |
||||
) |
||||
|
||||
############################################################################## |
||||
# C# |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"csharp_grpc_library", |
||||
"csharp_proto_library", |
||||
) |
||||
|
||||
csharp_proto_library( |
||||
name = "params_csharp_proto", |
||||
deps = [":params_proto"], |
||||
) |
||||
|
||||
csharp_grpc_library( |
||||
name = "params_csharp_grpc", |
||||
srcs = [":params_proto"], |
||||
deps = [":params_csharp_proto"], |
||||
) |
||||
|
||||
############################################################################## |
||||
# C++ |
||||
############################################################################## |
||||
# Put your C++ code here |
@ -0,0 +1,36 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.predict.params; |
||||
|
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/predict/params;params"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "ImageClassificationPredictionParamsProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.predict.params"; |
||||
|
||||
// Prediction model parameters for Image Classification. |
||||
message ImageClassificationPredictionParams { |
||||
// The Model only returns predictions with at least this confidence score. |
||||
// Default value is 0.0 |
||||
float confidence_threshold = 1; |
||||
|
||||
// The Model only returns up to that many top, by confidence score, |
||||
// predictions per instance. If this number is very high, the Model may return |
||||
// fewer predictions. Default value is 10. |
||||
int32 max_predictions = 2; |
||||
} |
@ -0,0 +1,36 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.predict.params; |
||||
|
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/predict/params;params"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "ImageObjectDetectionPredictionParamsProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.predict.params"; |
||||
|
||||
// Prediction model parameters for Image Object Detection. |
||||
message ImageObjectDetectionPredictionParams { |
||||
// The Model only returns predictions with at least this confidence score. |
||||
// Default value is 0.0 |
||||
float confidence_threshold = 1; |
||||
|
||||
// The Model only returns up to that many top, by confidence score, |
||||
// predictions per instance. Note that number of returned predictions is also |
||||
// limited by metadata's predictionsLimit. Default value is 10. |
||||
int32 max_predictions = 2; |
||||
} |
@ -0,0 +1,33 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.predict.params; |
||||
|
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/predict/params;params"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "ImageSegmentationPredictionParamsProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.predict.params"; |
||||
|
||||
// Prediction model parameters for Image Segmentation. |
||||
message ImageSegmentationPredictionParams { |
||||
// When the model predicts category of pixels of the image, it will only |
||||
// provide predictions for pixels that it is at least this much confident |
||||
// about. All other pixels will be classified as background. Default value is |
||||
// 0.5. |
||||
float confidence_threshold = 1; |
||||
} |
@ -0,0 +1,36 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.predict.params; |
||||
|
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/predict/params;params"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "VideoActionRecognitionPredictionParamsProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.predict.params"; |
||||
|
||||
// Prediction model parameters for Video Action Recognition. |
||||
message VideoActionRecognitionPredictionParams { |
||||
// The Model only returns predictions with at least this confidence score. |
||||
// Default value is 0.0 |
||||
float confidence_threshold = 1; |
||||
|
||||
// The model only returns up to that many top, by confidence score, |
||||
// predictions per frame of the video. If this number is very high, the |
||||
// Model may return fewer predictions per frame. Default value is 50. |
||||
int32 max_predictions = 2; |
||||
} |
@ -0,0 +1,61 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.predict.params; |
||||
|
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/predict/params;params"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "VideoClassificationPredictionParamsProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.predict.params"; |
||||
|
||||
// Prediction model parameters for Video Classification. |
||||
message VideoClassificationPredictionParams { |
||||
// The Model only returns predictions with at least this confidence score. |
||||
// Default value is 0.0 |
||||
float confidence_threshold = 1; |
||||
|
||||
// The Model only returns up to that many top, by confidence score, |
||||
// predictions per instance. If this number is very high, the Model may return |
||||
// fewer predictions. Default value is 10,000. |
||||
int32 max_predictions = 2; |
||||
|
||||
// Set to true to request segment-level classification. AI Platform returns |
||||
// labels and their confidence scores for the entire time segment of the |
||||
// video that user specified in the input instance. |
||||
// Default value is true |
||||
bool segment_classification = 3; |
||||
|
||||
// Set to true to request shot-level classification. AI Platform determines |
||||
// the boundaries for each camera shot in the entire time segment of the |
||||
// video that user specified in the input instance. AI Platform then |
||||
// returns labels and their confidence scores for each detected shot, along |
||||
// with the start and end time of the shot. |
||||
// WARNING: Model evaluation is not done for this classification type, |
||||
// the quality of it depends on the training data, but there are no metrics |
||||
// provided to describe that quality. |
||||
// Default value is false |
||||
bool shot_classification = 4; |
||||
|
||||
// Set to true to request classification for a video at one-second intervals. |
||||
// AI Platform returns labels and their confidence scores for each second of |
||||
// the entire time segment of the video that user specified in the input |
||||
// WARNING: Model evaluation is not done for this classification type, the |
||||
// quality of it depends on the training data, but there are no metrics |
||||
// provided to describe that quality. Default value is false |
||||
bool one_sec_interval_classification = 5; |
||||
} |
@ -0,0 +1,40 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.predict.params; |
||||
|
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/predict/params;params"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "VideoObjectTrackingPredictionParamsProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.predict.params"; |
||||
|
||||
// Prediction model parameters for Video Object Tracking. |
||||
message VideoObjectTrackingPredictionParams { |
||||
// The Model only returns predictions with at least this confidence score. |
||||
// Default value is 0.0 |
||||
float confidence_threshold = 1; |
||||
|
||||
// The model only returns up to that many top, by confidence score, |
||||
// predictions per frame of the video. If this number is very high, the |
||||
// Model may return fewer predictions per frame. Default value is 50. |
||||
int32 max_predictions = 2; |
||||
|
||||
// Only bounding boxes with shortest edge at least that long as a relative |
||||
// value of video frame size are returned. Default value is 0.0. |
||||
float min_bounding_box_size = 3; |
||||
} |
@ -0,0 +1,200 @@ |
||||
# This file was automatically generated by BuildFileGenerator |
||||
|
||||
# This is an API workspace, having public visibility by default makes perfect sense. |
||||
package(default_visibility = ["//visibility:public"]) |
||||
|
||||
############################################################################## |
||||
# Common |
||||
############################################################################## |
||||
load("@rules_proto//proto:defs.bzl", "proto_library") |
||||
|
||||
proto_library( |
||||
name = "prediction_proto", |
||||
srcs = [ |
||||
"classification.proto", |
||||
"image_object_detection.proto", |
||||
"image_segmentation.proto", |
||||
"tabular_classification.proto", |
||||
"tabular_regression.proto", |
||||
"text_extraction.proto", |
||||
"text_sentiment.proto", |
||||
"time_series_forecasting.proto", |
||||
"video_action_recognition.proto", |
||||
"video_classification.proto", |
||||
"video_object_tracking.proto", |
||||
], |
||||
deps = [ |
||||
"//google/api:annotations_proto", |
||||
"//google/cloud/aiplatform/v1beta1/schema/predict/instance:instance_proto", |
||||
"@com_google_protobuf//:duration_proto", |
||||
"@com_google_protobuf//:struct_proto", |
||||
"@com_google_protobuf//:wrappers_proto", |
||||
], |
||||
) |
||||
|
||||
############################################################################## |
||||
# Java |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"java_grpc_library", |
||||
"java_proto_library", |
||||
) |
||||
|
||||
java_proto_library( |
||||
name = "prediction_java_proto", |
||||
deps = [":prediction_proto"], |
||||
) |
||||
|
||||
java_grpc_library( |
||||
name = "prediction_java_grpc", |
||||
srcs = [":prediction_proto"], |
||||
deps = [":prediction_java_proto"], |
||||
) |
||||
|
||||
############################################################################## |
||||
# Go |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"go_proto_library", |
||||
) |
||||
|
||||
go_proto_library( |
||||
name = "prediction_go_proto", |
||||
compilers = ["@io_bazel_rules_go//proto:go_grpc"], |
||||
importpath = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/predict/prediction", |
||||
protos = [":prediction_proto"], |
||||
deps = [ |
||||
"//google/api:annotations_go_proto", |
||||
"//google/cloud/aiplatform/v1beta1/schema/predict/instance:instance_go_proto", |
||||
], |
||||
) |
||||
|
||||
############################################################################## |
||||
# Python |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"moved_proto_library", |
||||
"py_grpc_library", |
||||
"py_proto_library", |
||||
py_gapic_assembly_pkg = "py_gapic_assembly_pkg2", |
||||
py_gapic_library = "py_gapic_library2", |
||||
) |
||||
|
||||
moved_proto_library( |
||||
name = "prediction_moved_proto", |
||||
srcs = [":prediction_proto"], |
||||
deps = [ |
||||
"//google/api:annotations_proto", |
||||
"//google/cloud/aiplatform/v1beta1/schema/predict/instance:instance_proto", |
||||
"@com_google_protobuf//:duration_proto", |
||||
"@com_google_protobuf//:struct_proto", |
||||
"@com_google_protobuf//:wrappers_proto", |
||||
], |
||||
) |
||||
|
||||
py_proto_library( |
||||
name = "prediction_py_proto", |
||||
plugin = "@protoc_docs_plugin//:docs_plugin", |
||||
deps = [":prediction_moved_proto"], |
||||
) |
||||
|
||||
py_grpc_library( |
||||
name = "prediction_py_grpc", |
||||
srcs = [":prediction_moved_proto"], |
||||
deps = [":prediction_py_proto"], |
||||
) |
||||
|
||||
py_gapic_library( |
||||
name = "prediction_py_gapic", |
||||
opt_args = [ |
||||
"python-gapic-namespace=google.cloud.aiplatform.v1beta1.schema.predict", |
||||
"python-gapic-name=prediction", |
||||
], |
||||
srcs = [":prediction_proto"], |
||||
) |
||||
|
||||
# Open Source Packages |
||||
py_gapic_assembly_pkg( |
||||
name = "prediction-py", |
||||
deps = [ |
||||
":prediction_py_gapic", |
||||
] |
||||
) |
||||
|
||||
############################################################################## |
||||
# PHP |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"php_grpc_library", |
||||
"php_proto_library", |
||||
) |
||||
|
||||
php_proto_library( |
||||
name = "prediction_php_proto", |
||||
deps = [":prediction_proto"], |
||||
) |
||||
|
||||
php_grpc_library( |
||||
name = "prediction_php_grpc", |
||||
srcs = [":prediction_proto"], |
||||
deps = [":prediction_php_proto"], |
||||
) |
||||
|
||||
############################################################################## |
||||
# Node.js |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"nodejs_gapic_assembly_pkg", |
||||
"nodejs_gapic_library", |
||||
) |
||||
|
||||
|
||||
############################################################################## |
||||
# Ruby |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"ruby_grpc_library", |
||||
"ruby_proto_library", |
||||
) |
||||
|
||||
ruby_proto_library( |
||||
name = "prediction_ruby_proto", |
||||
deps = [":prediction_proto"], |
||||
) |
||||
|
||||
ruby_grpc_library( |
||||
name = "prediction_ruby_grpc", |
||||
srcs = [":prediction_proto"], |
||||
deps = [":prediction_ruby_proto"], |
||||
) |
||||
|
||||
############################################################################## |
||||
# C# |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"csharp_grpc_library", |
||||
"csharp_proto_library", |
||||
) |
||||
|
||||
csharp_proto_library( |
||||
name = "prediction_csharp_proto", |
||||
deps = [":prediction_proto"], |
||||
) |
||||
|
||||
csharp_grpc_library( |
||||
name = "prediction_csharp_grpc", |
||||
srcs = [":prediction_proto"], |
||||
deps = [":prediction_csharp_proto"], |
||||
) |
||||
|
||||
############################################################################## |
||||
# C++ |
||||
############################################################################## |
||||
# Put your C++ code here |
@ -0,0 +1,39 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.predict.prediction; |
||||
|
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/predict/prediction;prediction"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "ClassificationPredictionResultProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.predict.prediction"; |
||||
|
||||
// Prediction output format for Image and Text Classification. |
||||
message ClassificationPredictionResult { |
||||
// The resource IDs of the AnnotationSpecs that had been identified, ordered |
||||
// by the confidence score descendingly. |
||||
repeated int64 ids = 1; |
||||
|
||||
// The display names of the AnnotationSpecs that had been identified, order |
||||
// matches the IDs. |
||||
repeated string display_names = 2; |
||||
|
||||
// The Model's confidences in correctness of the predicted IDs, higher value |
||||
// means higher confidence. Order matches the Ids. |
||||
repeated float confidences = 3; |
||||
} |
@ -0,0 +1,48 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.predict.prediction; |
||||
|
||||
import "google/protobuf/struct.proto"; |
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/predict/prediction;prediction"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "ImageObjectDetectionPredictionResultProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.predict.prediction"; |
||||
|
||||
// Prediction output format for Image Object Detection. |
||||
message ImageObjectDetectionPredictionResult { |
||||
// The resource IDs of the AnnotationSpecs that had been identified, ordered |
||||
// by the confidence score descendingly. |
||||
repeated int64 ids = 1; |
||||
|
||||
// The display names of the AnnotationSpecs that had been identified, order |
||||
// matches the IDs. |
||||
repeated string display_names = 2; |
||||
|
||||
// The Model's confidences in correctness of the predicted IDs, higher value |
||||
// means higher confidence. Order matches the Ids. |
||||
repeated float confidences = 3; |
||||
|
||||
// Bounding boxes, i.e. the rectangles over the image, that pinpoint |
||||
// the found AnnotationSpecs. Given in order that matches the IDs. Each |
||||
// bounding box is an array of 4 numbers `xMin`, `xMax`, `yMin`, and |
||||
// `yMax`, which represent the extremal coordinates of the box. They are |
||||
// relative to the image size, and the point 0,0 is in the top left |
||||
// of the image. |
||||
repeated google.protobuf.ListValue bboxes = 4; |
||||
} |
@ -0,0 +1,42 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.predict.prediction; |
||||
|
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/predict/prediction;prediction"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "ImageSegmentationPredictionResultProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.predict.prediction"; |
||||
|
||||
// Prediction output format for Image Segmentation. |
||||
message ImageSegmentationPredictionResult { |
||||
// A PNG image where each pixel in the mask represents the category in which |
||||
// the pixel in the original image was predicted to belong to. The size of |
||||
// this image will be the same as the original image. The mapping between the |
||||
// AnntoationSpec and the color can be found in model's metadata. The model |
||||
// will choose the most likely category and if none of the categories reach |
||||
// the confidence threshold, the pixel will be marked as background. |
||||
bytes category_mask = 1; |
||||
|
||||
// A one channel image which is encoded as an 8bit lossless PNG. The size of |
||||
// the image will be the same as the original image. For a specific pixel, |
||||
// darker color means less confidence in correctness of the cateogry in the |
||||
// categoryMask for the corresponding pixel. Black means no confidence and |
||||
// white means complete confidence. |
||||
bytes confidence_mask = 2; |
||||
} |
@ -0,0 +1,36 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.predict.prediction; |
||||
|
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/predict/prediction;prediction"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "TabularClassificationPredictionResultProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.predict.prediction"; |
||||
|
||||
// Prediction output format for Tabular Classification. |
||||
message TabularClassificationPredictionResult { |
||||
// The name of the classes being classified, contains all possible values of |
||||
// the target column. |
||||
repeated string classes = 1; |
||||
|
||||
// The model's confidence in each class being correct, higher |
||||
// value means higher confidence. The N-th score corresponds to |
||||
// the N-th class in classes. |
||||
repeated float scores = 2; |
||||
} |
@ -0,0 +1,36 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.predict.prediction; |
||||
|
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/predict/prediction;prediction"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "TabularRegressionPredictionResultProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.predict.prediction"; |
||||
|
||||
// Prediction output format for Tabular Regression. |
||||
message TabularRegressionPredictionResult { |
||||
// The regression value. |
||||
float value = 1; |
||||
|
||||
// The lower bound of the prediction interval. |
||||
float lower_bound = 2; |
||||
|
||||
// The upper bound of the prediction interval. |
||||
float upper_bound = 3; |
||||
} |
@ -0,0 +1,49 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.predict.prediction; |
||||
|
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/predict/prediction;prediction"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "TextExtractionPredictionResultProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.predict.prediction"; |
||||
|
||||
// Prediction output format for Text Extraction. |
||||
message TextExtractionPredictionResult { |
||||
// The resource IDs of the AnnotationSpecs that had been identified, |
||||
// ordered by the confidence score descendingly. |
||||
repeated int64 ids = 1; |
||||
|
||||
// The display names of the AnnotationSpecs that had been identified, |
||||
// order matches the IDs. |
||||
repeated string display_names = 2; |
||||
|
||||
// The start offsets, inclusive, of the text segment in which the |
||||
// AnnotationSpec has been identified. Expressed as a zero-based number |
||||
// of characters as measured from the start of the text snippet. |
||||
repeated int64 text_segment_start_offsets = 3; |
||||
|
||||
// The end offsets, inclusive, of the text segment in which the |
||||
// AnnotationSpec has been identified. Expressed as a zero-based number |
||||
// of characters as measured from the start of the text snippet. |
||||
repeated int64 text_segment_end_offsets = 4; |
||||
|
||||
// The Model's confidences in correctness of the predicted IDs, higher |
||||
// value means higher confidence. Order matches the Ids. |
||||
repeated float confidences = 5; |
||||
} |
@ -0,0 +1,45 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.predict.prediction; |
||||
|
||||
import "google/cloud/aiplatform/v1beta1/schema/predict/instance/text_sentiment.proto"; |
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/predict/prediction;prediction"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "TextSentimentPredictionResultProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.predict.prediction"; |
||||
|
||||
// Represents a line of JSONL in the text sentiment batch prediction output |
||||
// file. This is a hack to allow printing of integer values. |
||||
message TextSentimentPredictionResult { |
||||
// Prediction output format for Text Sentiment. |
||||
message Prediction { |
||||
// The integer sentiment labels between 0 (inclusive) and sentimentMax label |
||||
// (inclusive), while 0 maps to the least positive sentiment and |
||||
// sentimentMax maps to the most positive one. The higher the score is, the |
||||
// more positive the sentiment in the text snippet is. Note: sentimentMax is |
||||
// an integer value between 1 (inclusive) and 10 (inclusive). |
||||
int32 sentiment = 1; |
||||
} |
||||
|
||||
// User's input instance. |
||||
google.cloud.aiplatform.v1beta1.schema.predict.instance.TextSentimentPredictionInstance instance = 1; |
||||
|
||||
// The prediction result. |
||||
Prediction prediction = 2; |
||||
} |
@ -0,0 +1,36 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.predict.prediction; |
||||
|
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/predict/prediction;prediction"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "TimeSeriesForecastingPredictionResultProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.predict.prediction"; |
||||
|
||||
// Prediction output format for Time Series Forecasting. |
||||
message TimeSeriesForecastingPredictionResult { |
||||
// The regression value. |
||||
float value = 1; |
||||
|
||||
// The lower bound of the prediction interval. |
||||
float lower_bound = 2; |
||||
|
||||
// The upper bound of the prediction interval. |
||||
float upper_bound = 3; |
||||
} |
@ -0,0 +1,51 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.predict.prediction; |
||||
|
||||
import "google/protobuf/duration.proto"; |
||||
import "google/protobuf/wrappers.proto"; |
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/predict/prediction;prediction"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "VideoActionRecognitionPredictionResultProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.predict.prediction"; |
||||
|
||||
// Prediction output format for Video Action Recognition. |
||||
message VideoActionRecognitionPredictionResult { |
||||
// The resource ID of the AnnotationSpec that had been identified. |
||||
string id = 1; |
||||
|
||||
// The display name of the AnnotationSpec that had been identified. |
||||
string display_name = 2; |
||||
|
||||
// The beginning, inclusive, of the video's time segment in which the |
||||
// AnnotationSpec has been identified. Expressed as a number of seconds as |
||||
// measured from the start of the video, with fractions up to a microsecond |
||||
// precision, and with "s" appended at the end. |
||||
google.protobuf.Duration time_segment_start = 4; |
||||
|
||||
// The end, exclusive, of the video's time segment in which the |
||||
// AnnotationSpec has been identified. Expressed as a number of seconds as |
||||
// measured from the start of the video, with fractions up to a microsecond |
||||
// precision, and with "s" appended at the end. |
||||
google.protobuf.Duration time_segment_end = 5; |
||||
|
||||
// The Model's confidence in correction of this prediction, higher |
||||
// value means higher confidence. |
||||
google.protobuf.FloatValue confidence = 6; |
||||
} |
@ -0,0 +1,64 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.predict.prediction; |
||||
|
||||
import "google/protobuf/duration.proto"; |
||||
import "google/protobuf/wrappers.proto"; |
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/predict/prediction;prediction"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "VideoClassificationPredictionResultProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.predict.prediction"; |
||||
|
||||
// Prediction output format for Video Classification. |
||||
message VideoClassificationPredictionResult { |
||||
// The resource ID of the AnnotationSpec that had been identified. |
||||
string id = 1; |
||||
|
||||
// The display name of the AnnotationSpec that had been identified. |
||||
string display_name = 2; |
||||
|
||||
// The type of the prediction. The requested types can be configured |
||||
// via parameters. This will be one of |
||||
// - segment-classification |
||||
// - shot-classification |
||||
// - one-sec-interval-classification |
||||
string type = 3; |
||||
|
||||
// The beginning, inclusive, of the video's time segment in which the |
||||
// AnnotationSpec has been identified. Expressed as a number of seconds as |
||||
// measured from the start of the video, with fractions up to a microsecond |
||||
// precision, and with "s" appended at the end. Note that for |
||||
// 'segment-classification' prediction type, this equals the original |
||||
// 'timeSegmentStart' from the input instance, for other types it is the |
||||
// start of a shot or a 1 second interval respectively. |
||||
google.protobuf.Duration time_segment_start = 4; |
||||
|
||||
// The end, exclusive, of the video's time segment in which the |
||||
// AnnotationSpec has been identified. Expressed as a number of seconds as |
||||
// measured from the start of the video, with fractions up to a microsecond |
||||
// precision, and with "s" appended at the end. Note that for |
||||
// 'segment-classification' prediction type, this equals the original |
||||
// 'timeSegmentEnd' from the input instance, for other types it is the end |
||||
// of a shot or a 1 second interval respectively. |
||||
google.protobuf.Duration time_segment_end = 5; |
||||
|
||||
// The Model's confidence in correction of this prediction, higher |
||||
// value means higher confidence. |
||||
google.protobuf.FloatValue confidence = 6; |
||||
} |
@ -0,0 +1,79 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.predict.prediction; |
||||
|
||||
import "google/protobuf/duration.proto"; |
||||
import "google/protobuf/wrappers.proto"; |
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/predict/prediction;prediction"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "VideoObjectTrackingPredictionResultProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.predict.prediction"; |
||||
|
||||
// Prediction output format for Video Object Tracking. |
||||
message VideoObjectTrackingPredictionResult { |
||||
// The fields `xMin`, `xMax`, `yMin`, and `yMax` refer to a bounding box, |
||||
// i.e. the rectangle over the video frame pinpointing the found |
||||
// AnnotationSpec. The coordinates are relative to the frame size, and the |
||||
// point 0,0 is in the top left of the frame. |
||||
message Frame { |
||||
// A time (frame) of a video in which the object has been detected. |
||||
// Expressed as a number of seconds as measured from the |
||||
// start of the video, with fractions up to a microsecond precision, and |
||||
// with "s" appended at the end. |
||||
google.protobuf.Duration time_offset = 1; |
||||
|
||||
// The leftmost coordinate of the bounding box. |
||||
google.protobuf.FloatValue x_min = 2; |
||||
|
||||
// The rightmost coordinate of the bounding box. |
||||
google.protobuf.FloatValue x_max = 3; |
||||
|
||||
// The topmost coordinate of the bounding box. |
||||
google.protobuf.FloatValue y_min = 4; |
||||
|
||||
// The bottommost coordinate of the bounding box. |
||||
google.protobuf.FloatValue y_max = 5; |
||||
} |
||||
|
||||
// The resource ID of the AnnotationSpec that had been identified. |
||||
string id = 1; |
||||
|
||||
// The display name of the AnnotationSpec that had been identified. |
||||
string display_name = 2; |
||||
|
||||
// The beginning, inclusive, of the video's time segment in which the |
||||
// object instance has been detected. Expressed as a number of seconds as |
||||
// measured from the start of the video, with fractions up to a microsecond |
||||
// precision, and with "s" appended at the end. |
||||
google.protobuf.Duration time_segment_start = 3; |
||||
|
||||
// The end, inclusive, of the video's time segment in which the |
||||
// object instance has been detected. Expressed as a number of seconds as |
||||
// measured from the start of the video, with fractions up to a microsecond |
||||
// precision, and with "s" appended at the end. |
||||
google.protobuf.Duration time_segment_end = 4; |
||||
|
||||
// The Model's confidence in correction of this prediction, higher |
||||
// value means higher confidence. |
||||
google.protobuf.FloatValue confidence = 5; |
||||
|
||||
// All of the frames of the video in which a single object instance has been |
||||
// detected. The bounding boxes in the frames identify the same object. |
||||
repeated Frame frames = 6; |
||||
} |
@ -0,0 +1,41 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema; |
||||
|
||||
import "google/cloud/aiplatform/v1beta1/schema/annotation_spec_color.proto"; |
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema;schema"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "SavedQueryMetadataProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema"; |
||||
|
||||
// The metadata of SavedQuery contains TextSentiment Annotations. |
||||
message TextSentimentSavedQueryMetadata { |
||||
// The maximum sentiment of sentiment Anntoation in this SavedQuery. |
||||
int32 sentiment_max = 1; |
||||
} |
||||
|
||||
message VisualInspectionClassificationLabelSavedQueryMetadata { |
||||
// Whether or not the classification label is multi_label. |
||||
bool multi_label = 1; |
||||
} |
||||
|
||||
message VisualInspectionMaskSavedQueryMetadata { |
||||
// The mapping between color and AnnotationSpec for this SavedQuery. |
||||
repeated AnnotationSpecColor color_map = 2; |
||||
} |
@ -0,0 +1,192 @@ |
||||
# This file was automatically generated by BuildFileGenerator |
||||
|
||||
# This is an API workspace, having public visibility by default makes perfect sense. |
||||
package(default_visibility = ["//visibility:public"]) |
||||
|
||||
############################################################################## |
||||
# Common |
||||
############################################################################## |
||||
load("@rules_proto//proto:defs.bzl", "proto_library") |
||||
|
||||
proto_library( |
||||
name = "definition_proto", |
||||
srcs = [ |
||||
"automl_forecasting.proto", |
||||
"automl_image_classification.proto", |
||||
"automl_image_object_detection.proto", |
||||
"automl_image_segmentation.proto", |
||||
"automl_tables.proto", |
||||
"automl_text_classification.proto", |
||||
"automl_text_extraction.proto", |
||||
"automl_text_sentiment.proto", |
||||
"automl_video_action_recognition.proto", |
||||
"automl_video_classification.proto", |
||||
"automl_video_object_tracking.proto", |
||||
"export_evaluated_data_items_config.proto", |
||||
], |
||||
deps = [ |
||||
"//google/api:annotations_proto", |
||||
], |
||||
) |
||||
|
||||
############################################################################## |
||||
# Java |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"java_grpc_library", |
||||
"java_proto_library", |
||||
) |
||||
|
||||
java_proto_library( |
||||
name = "definition_java_proto", |
||||
deps = [":definition_proto"], |
||||
) |
||||
|
||||
java_grpc_library( |
||||
name = "definition_java_grpc", |
||||
srcs = [":definition_proto"], |
||||
deps = [":definition_java_proto"], |
||||
) |
||||
|
||||
############################################################################## |
||||
# Go |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"go_proto_library", |
||||
) |
||||
|
||||
go_proto_library( |
||||
name = "definition_go_proto", |
||||
compilers = ["@io_bazel_rules_go//proto:go_grpc"], |
||||
importpath = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/trainingjob/definition", |
||||
protos = [":definition_proto"], |
||||
deps = [ |
||||
"//google/api:annotations_go_proto", |
||||
], |
||||
) |
||||
|
||||
############################################################################## |
||||
# Python |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"moved_proto_library", |
||||
"py_grpc_library", |
||||
"py_proto_library", |
||||
py_gapic_assembly_pkg = "py_gapic_assembly_pkg2", |
||||
py_gapic_library = "py_gapic_library2", |
||||
) |
||||
|
||||
moved_proto_library( |
||||
name = "definition_moved_proto", |
||||
srcs = [":definition_proto"], |
||||
deps = [ |
||||
"//google/api:annotations_proto", |
||||
], |
||||
) |
||||
|
||||
py_proto_library( |
||||
name = "definition_py_proto", |
||||
plugin = "@protoc_docs_plugin//:docs_plugin", |
||||
deps = [":definition_moved_proto"], |
||||
) |
||||
|
||||
py_grpc_library( |
||||
name = "definition_py_grpc", |
||||
srcs = [":definition_moved_proto"], |
||||
deps = [":definition_py_proto"], |
||||
) |
||||
|
||||
py_gapic_library( |
||||
name = "definition_py_gapic", |
||||
opt_args = [ |
||||
"python-gapic-namespace=google.cloud.aiplatform.v1beta1.schema.trainingjob", |
||||
"python-gapic-name=definition", |
||||
], |
||||
srcs = [":definition_proto"], |
||||
) |
||||
|
||||
# Open Source Packages |
||||
py_gapic_assembly_pkg( |
||||
name = "definition_py", |
||||
deps = [ |
||||
":definition_py_gapic", |
||||
] |
||||
) |
||||
|
||||
############################################################################## |
||||
# PHP |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"php_grpc_library", |
||||
"php_proto_library", |
||||
) |
||||
|
||||
php_proto_library( |
||||
name = "definition_php_proto", |
||||
deps = [":definition_proto"], |
||||
) |
||||
|
||||
php_grpc_library( |
||||
name = "definition_php_grpc", |
||||
srcs = [":definition_proto"], |
||||
deps = [":definition_php_proto"], |
||||
) |
||||
|
||||
############################################################################## |
||||
# Node.js |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"nodejs_gapic_assembly_pkg", |
||||
"nodejs_gapic_library", |
||||
) |
||||
|
||||
|
||||
############################################################################## |
||||
# Ruby |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"ruby_grpc_library", |
||||
"ruby_proto_library", |
||||
) |
||||
|
||||
ruby_proto_library( |
||||
name = "definition_ruby_proto", |
||||
deps = [":definition_proto"], |
||||
) |
||||
|
||||
ruby_grpc_library( |
||||
name = "definition_ruby_grpc", |
||||
srcs = [":definition_proto"], |
||||
deps = [":definition_ruby_proto"], |
||||
) |
||||
|
||||
############################################################################## |
||||
# C# |
||||
############################################################################## |
||||
load( |
||||
"@com_google_googleapis_imports//:imports.bzl", |
||||
"csharp_grpc_library", |
||||
"csharp_proto_library", |
||||
) |
||||
|
||||
csharp_proto_library( |
||||
name = "definition_csharp_proto", |
||||
deps = [":definition_proto"], |
||||
) |
||||
|
||||
csharp_grpc_library( |
||||
name = "definition_csharp_grpc", |
||||
srcs = [":definition_proto"], |
||||
deps = [":definition_csharp_proto"], |
||||
) |
||||
|
||||
############################################################################## |
||||
# C++ |
||||
############################################################################## |
||||
# Put your C++ code here |
@ -0,0 +1,5 @@ |
||||
type: com.google.api.codegen.ConfigProto |
||||
config_schema_version: 2.0.0 |
||||
language_settings: |
||||
python: |
||||
package_name: google.cloud.aiplatform_v1beta1.schema.trainingjob.definition.gapic |
@ -0,0 +1,296 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.trainingjob.definition; |
||||
|
||||
import "google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/export_evaluated_data_items_config.proto"; |
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/trainingjob/definition;definition"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "AutoMLForecastingProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.trainingjob.definition"; |
||||
|
||||
// A TrainingJob that trains and uploads an AutoML Forecasting Model. |
||||
message AutoMlForecasting { |
||||
// The input parameters of this TrainingJob. |
||||
AutoMlForecastingInputs inputs = 1; |
||||
|
||||
// The metadata information. |
||||
AutoMlForecastingMetadata metadata = 2; |
||||
} |
||||
|
||||
message AutoMlForecastingInputs { |
||||
message Transformation { |
||||
// Training pipeline will infer the proper transformation based on the |
||||
// statistic of dataset. |
||||
message AutoTransformation { |
||||
string column_name = 1; |
||||
} |
||||
|
||||
// Training pipeline will perform following transformation functions. |
||||
// * The value converted to float32. |
||||
// * The z_score of the value. |
||||
// * log(value+1) when the value is greater than or equal to 0. Otherwise, |
||||
// this transformation is not applied and the value is considered a |
||||
// missing value. |
||||
// * z_score of log(value+1) when the value is greater than or equal to 0. |
||||
// Otherwise, this transformation is not applied and the value is |
||||
// considered a missing value. |
||||
// * A boolean value that indicates whether the value is valid. |
||||
message NumericTransformation { |
||||
string column_name = 1; |
||||
|
||||
// If invalid values is allowed, the training pipeline will create a |
||||
// boolean feature that indicated whether the value is valid. |
||||
// Otherwise, the training pipeline will discard the input row from |
||||
// trainining data. |
||||
bool invalid_values_allowed = 2; |
||||
} |
||||
|
||||
// Training pipeline will perform following transformation functions. |
||||
// * The categorical string as is--no change to case, punctuation, |
||||
// spelling, |
||||
// tense, and so on. |
||||
// * Convert the category name to a dictionary lookup index and generate an |
||||
// embedding for each index. |
||||
// * Categories that appear less than 5 times in the training dataset are |
||||
// treated as the "unknown" category. The "unknown" category gets its own |
||||
// special lookup index and resulting embedding. |
||||
message CategoricalTransformation { |
||||
string column_name = 1; |
||||
} |
||||
|
||||
// Training pipeline will perform following transformation functions. |
||||
// * Apply the transformation functions for Numerical columns. |
||||
// * Determine the year, month, day,and weekday. Treat each value from the |
||||
// * timestamp as a Categorical column. |
||||
// * Invalid numerical values (for example, values that fall outside of a |
||||
// typical timestamp range, or are extreme values) receive no special |
||||
// treatment and are not removed. |
||||
message TimestampTransformation { |
||||
string column_name = 1; |
||||
|
||||
// The format in which that time field is expressed. The time_format must |
||||
// either be one of: |
||||
// * `unix-seconds` |
||||
// * `unix-milliseconds` |
||||
// * `unix-microseconds` |
||||
// * `unix-nanoseconds` |
||||
// (for respectively number of seconds, milliseconds, microseconds and |
||||
// nanoseconds since start of the Unix epoch); |
||||
// or be written in `strftime` syntax. If time_format is not set, then the |
||||
// default format is RFC 3339 `date-time` format, where |
||||
// `time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z) |
||||
string time_format = 2; |
||||
|
||||
// If invalid values is allowed, the training pipeline will create a |
||||
// boolean feature that indicated whether the value is valid. |
||||
// Otherwise, the training pipeline will discard the input row from |
||||
// trainining data. |
||||
bool invalid_values_allowed = 3; |
||||
} |
||||
|
||||
// Training pipeline will perform following transformation functions. |
||||
// * The text as is--no change to case, punctuation, spelling, tense, and |
||||
// so |
||||
// on. |
||||
// * Tokenize text to words. Convert each words to a dictionary lookup |
||||
// index |
||||
// and generate an embedding for each index. Combine the embedding of all |
||||
// elements into a single embedding using the mean. |
||||
// * Tokenization is based on unicode script boundaries. |
||||
// * Missing values get their own lookup index and resulting embedding. |
||||
// * Stop-words receive no special treatment and are not removed. |
||||
message TextTransformation { |
||||
string column_name = 1; |
||||
} |
||||
|
||||
// Treats the column as numerical array and performs following |
||||
// transformation functions. |
||||
// * All transformations for Numerical types applied to the average of the |
||||
// all elements. |
||||
// * The average of empty arrays is treated as zero. |
||||
message NumericArrayTransformation { |
||||
string column_name = 1; |
||||
|
||||
// If invalid values is allowed, the training pipeline will create a |
||||
// boolean feature that indicated whether the value is valid. |
||||
// Otherwise, the training pipeline will discard the input row from |
||||
// trainining data. |
||||
bool invalid_values_allowed = 2; |
||||
} |
||||
|
||||
// Treats the column as categorical array and performs following |
||||
// transformation functions. |
||||
// * For each element in the array, convert the category name to a |
||||
// dictionary |
||||
// lookup index and generate an embedding for each index. |
||||
// Combine the embedding of all elements into a single embedding using |
||||
// the mean. |
||||
// * Empty arrays treated as an embedding of zeroes. |
||||
message CategoricalArrayTransformation { |
||||
string column_name = 1; |
||||
} |
||||
|
||||
// Treats the column as text array and performs following transformation |
||||
// functions. |
||||
// * Concatenate all text values in the array into a single text value |
||||
// using |
||||
// a space (" ") as a delimiter, and then treat the result as a single |
||||
// text value. Apply the transformations for Text columns. |
||||
// * Empty arrays treated as an empty text. |
||||
message TextArrayTransformation { |
||||
string column_name = 1; |
||||
} |
||||
|
||||
// The transformation that the training pipeline will apply to the input |
||||
// columns. |
||||
oneof transformation_detail { |
||||
AutoTransformation auto = 1; |
||||
|
||||
NumericTransformation numeric = 2; |
||||
|
||||
CategoricalTransformation categorical = 3; |
||||
|
||||
TimestampTransformation timestamp = 4; |
||||
|
||||
TextTransformation text = 5; |
||||
|
||||
NumericArrayTransformation repeated_numeric = 6; |
||||
|
||||
CategoricalArrayTransformation repeated_categorical = 7; |
||||
|
||||
TextArrayTransformation repeated_text = 8; |
||||
} |
||||
} |
||||
|
||||
// A duration of time expressed in time granularity units. |
||||
message Period { |
||||
// The time granularity unit of this time period. |
||||
// The supported unit are: |
||||
// "hour" |
||||
// "day" |
||||
// "week" |
||||
// "month" |
||||
// "year" |
||||
string unit = 1; |
||||
|
||||
// The number of units per period, e.g. 3 weeks or 2 months. |
||||
int64 quantity = 2; |
||||
} |
||||
|
||||
// The name of the column that the model is to predict. |
||||
string target_column = 1; |
||||
|
||||
// The name of the column that identifies the time series. |
||||
string time_series_identifier_column = 2; |
||||
|
||||
// The name of the column that identifies time order in the time series. |
||||
string time_column = 3; |
||||
|
||||
// Each transformation will apply transform function to given input column. |
||||
// And the result will be used for training. |
||||
// When creating transformation for BigQuery Struct column, the column should |
||||
// be flattened using "." as the delimiter. |
||||
repeated Transformation transformations = 4; |
||||
|
||||
// Objective function the model is optimizing towards. The training process |
||||
// creates a model that optimizes the value of the objective |
||||
// function over the validation set. |
||||
// |
||||
// The supported optimization objectives: |
||||
// "minimize-rmse" (default) - Minimize root-mean-squared error (RMSE). |
||||
// "minimize-mae" - Minimize mean-absolute error (MAE). |
||||
// "minimize-rmsle" - Minimize root-mean-squared log error (RMSLE). |
||||
// "minimize-rmspe" - Minimize root-mean-squared percentage error (RMSPE). |
||||
// "minimize-wape-mae" - Minimize the combination of weighted absolute |
||||
// percentage error (WAPE) and mean-absolute-error (MAE). |
||||
string optimization_objective = 5; |
||||
|
||||
// Required. The train budget of creating this model, expressed in milli node |
||||
// hours i.e. 1,000 value in this field means 1 node hour. |
||||
// |
||||
// The training cost of the model will not exceed this budget. The final cost |
||||
// will be attempted to be close to the budget, though may end up being (even) |
||||
// noticeably smaller - at the backend's discretion. This especially may |
||||
// happen when further model training ceases to provide any improvements. |
||||
// |
||||
// If the budget is set to a value known to be insufficient to train a |
||||
// model for the given dataset, the training won't be attempted and |
||||
// will error. |
||||
// |
||||
// The train budget must be between 1,000 and 72,000 milli node hours, |
||||
// inclusive. |
||||
int64 train_budget_milli_node_hours = 6; |
||||
|
||||
// Column name that should be used as the weight column. |
||||
// Higher values in this column give more importance to the row |
||||
// during model training. The column must have numeric values between 0 and |
||||
// 10000 inclusively; 0 means the row is ignored for training. If weight |
||||
// column field is not set, then all rows are assumed to have equal weight |
||||
// of 1. |
||||
string weight_column = 7; |
||||
|
||||
// Column names that should be used as static columns. |
||||
// The value of these columns are static per time series. |
||||
repeated string static_columns = 8; |
||||
|
||||
// Column names that should be used as time variant past only columns. |
||||
// This column contains information for the given entity (identified by the |
||||
// time_series_identifier_column) that is known for the past but not the |
||||
// future (e.g. population of a city in a given year, or weather on a given |
||||
// day). |
||||
repeated string time_variant_past_only_columns = 9; |
||||
|
||||
// Column names that should be used as time variant past and future columns. |
||||
// This column contains information for the given entity (identified by the |
||||
// key column) that is known for the past and the future |
||||
repeated string time_variant_past_and_future_columns = 10; |
||||
|
||||
// Expected difference in time granularity between rows in the data. If it is |
||||
// not set, the period is inferred from data. |
||||
Period period = 11; |
||||
|
||||
// The number of periods offset into the future as the start of the forecast |
||||
// window (the window of future values to predict, relative to the present.), |
||||
// where each period is one unit of granularity as defined by the `period` |
||||
// field above. Default to 0. Inclusive. |
||||
int64 forecast_window_start = 12; |
||||
|
||||
// The number of periods offset into the future as the end of the forecast |
||||
// window (the window of future values to predict, relative to the present.), |
||||
// where each period is one unit of granularity as defined by the `period` |
||||
// field above. Inclusive. |
||||
int64 forecast_window_end = 13; |
||||
|
||||
// The number of periods offset into the past to restrict past sequence, where |
||||
// each period is one unit of granularity as defined by the `period`. Default |
||||
// value 0 means that it lets algorithm to define the value. Inclusive. |
||||
int64 past_horizon = 14; |
||||
|
||||
// Configuration for exporting test set predictions to a BigQuery table. If |
||||
// this configuration is absent, then the export is not performed. |
||||
ExportEvaluatedDataItemsConfig export_evaluated_data_items_config = 15; |
||||
} |
||||
|
||||
// Model metadata specific to AutoML Forecasting. |
||||
message AutoMlForecastingMetadata { |
||||
// Output only. The actual training cost of the model, expressed in milli |
||||
// node hours, i.e. 1,000 value in this field means 1 node hour. Guaranteed |
||||
// to not exceed the train budget. |
||||
int64 train_cost_milli_node_hours = 1; |
||||
} |
@ -0,0 +1,125 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.trainingjob.definition; |
||||
|
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/trainingjob/definition;definition"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "AutoMLImageClassificationProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.trainingjob.definition"; |
||||
|
||||
// A TrainingJob that trains and uploads an AutoML Image Classification Model. |
||||
message AutoMlImageClassification { |
||||
// The input parameters of this TrainingJob. |
||||
AutoMlImageClassificationInputs inputs = 1; |
||||
|
||||
// The metadata information. |
||||
AutoMlImageClassificationMetadata metadata = 2; |
||||
} |
||||
|
||||
message AutoMlImageClassificationInputs { |
||||
enum ModelType { |
||||
// Should not be set. |
||||
MODEL_TYPE_UNSPECIFIED = 0; |
||||
|
||||
// A Model best tailored to be used within Google Cloud, and which cannot |
||||
// be exported. |
||||
// Default. |
||||
CLOUD = 1; |
||||
|
||||
// A model that, in addition to being available within Google |
||||
// Cloud, can also be exported (see ModelService.ExportModel) as TensorFlow |
||||
// or Core ML model and used on a mobile or edge device afterwards. |
||||
// Expected to have low latency, but may have lower prediction |
||||
// quality than other mobile models. |
||||
MOBILE_TF_LOW_LATENCY_1 = 2; |
||||
|
||||
// A model that, in addition to being available within Google |
||||
// Cloud, can also be exported (see ModelService.ExportModel) as TensorFlow |
||||
// or Core ML model and used on a mobile or edge device with afterwards. |
||||
MOBILE_TF_VERSATILE_1 = 3; |
||||
|
||||
// A model that, in addition to being available within Google |
||||
// Cloud, can also be exported (see ModelService.ExportModel) as TensorFlow |
||||
// or Core ML model and used on a mobile or edge device afterwards. |
||||
// Expected to have a higher latency, but should also have a higher |
||||
// prediction quality than other mobile models. |
||||
MOBILE_TF_HIGH_ACCURACY_1 = 4; |
||||
} |
||||
|
||||
ModelType model_type = 1; |
||||
|
||||
// The ID of the `base` model. If it is specified, the new model will be |
||||
// trained based on the `base` model. Otherwise, the new model will be |
||||
// trained from scratch. The `base` model must be in the same |
||||
// Project and Location as the new Model to train, and have the same |
||||
// modelType. |
||||
string base_model_id = 2; |
||||
|
||||
// The training budget of creating this model, expressed in milli node |
||||
// hours i.e. 1,000 value in this field means 1 node hour. The actual |
||||
// metadata.costMilliNodeHours will be equal or less than this value. |
||||
// If further model training ceases to provide any improvements, it will |
||||
// stop without using the full budget and the metadata.successfulStopReason |
||||
// will be `model-converged`. |
||||
// Note, node_hour = actual_hour * number_of_nodes_involved. |
||||
// For modelType `cloud`(default), the budget must be between 8,000 |
||||
// and 800,000 milli node hours, inclusive. The default value is 192,000 |
||||
// which represents one day in wall time, considering 8 nodes are used. |
||||
// For model types `mobile-tf-low-latency-1`, `mobile-tf-versatile-1`, |
||||
// `mobile-tf-high-accuracy-1`, the training budget must be between |
||||
// 1,000 and 100,000 milli node hours, inclusive. |
||||
// The default value is 24,000 which represents one day in wall time on a |
||||
// single node that is used. |
||||
int64 budget_milli_node_hours = 3; |
||||
|
||||
// Use the entire training budget. This disables the early stopping feature. |
||||
// When false the early stopping feature is enabled, which means that |
||||
// AutoML Image Classification might stop training before the entire |
||||
// training budget has been used. |
||||
bool disable_early_stopping = 4; |
||||
|
||||
// If false, a single-label (multi-class) Model will be trained (i.e. |
||||
// assuming that for each image just up to one annotation may be |
||||
// applicable). If true, a multi-label Model will be trained (i.e. |
||||
// assuming that for each image multiple annotations may be applicable). |
||||
bool multi_label = 5; |
||||
} |
||||
|
||||
message AutoMlImageClassificationMetadata { |
||||
enum SuccessfulStopReason { |
||||
// Should not be set. |
||||
SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0; |
||||
|
||||
// The inputs.budgetMilliNodeHours had been reached. |
||||
BUDGET_REACHED = 1; |
||||
|
||||
// Further training of the Model ceased to increase its quality, since it |
||||
// already has converged. |
||||
MODEL_CONVERGED = 2; |
||||
} |
||||
|
||||
// The actual training cost of creating this model, expressed in |
||||
// milli node hours, i.e. 1,000 value in this field means 1 node hour. |
||||
// Guaranteed to not exceed inputs.budgetMilliNodeHours. |
||||
int64 cost_milli_node_hours = 1; |
||||
|
||||
// For successful job completions, this is the reason why the job has |
||||
// finished. |
||||
SuccessfulStopReason successful_stop_reason = 2; |
||||
} |
@ -0,0 +1,117 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.trainingjob.definition; |
||||
|
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/trainingjob/definition;definition"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "AutoMLImageObjectDetectionProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.trainingjob.definition"; |
||||
|
||||
// A TrainingJob that trains and uploads an AutoML Image Object Detection Model. |
||||
message AutoMlImageObjectDetection { |
||||
// The input parameters of this TrainingJob. |
||||
AutoMlImageObjectDetectionInputs inputs = 1; |
||||
|
||||
// The metadata information |
||||
AutoMlImageObjectDetectionMetadata metadata = 2; |
||||
} |
||||
|
||||
message AutoMlImageObjectDetectionInputs { |
||||
enum ModelType { |
||||
// Should not be set. |
||||
MODEL_TYPE_UNSPECIFIED = 0; |
||||
|
||||
// A model best tailored to be used within Google Cloud, and which cannot |
||||
// be exported. Expected to have a higher latency, but should also have a |
||||
// higher prediction quality than other cloud models. |
||||
CLOUD_HIGH_ACCURACY_1 = 1; |
||||
|
||||
// A model best tailored to be used within Google Cloud, and which cannot |
||||
// be exported. Expected to have a low latency, but may have lower |
||||
// prediction quality than other cloud models. |
||||
CLOUD_LOW_LATENCY_1 = 2; |
||||
|
||||
// A model that, in addition to being available within Google |
||||
// Cloud can also be exported (see ModelService.ExportModel) and |
||||
// used on a mobile or edge device with TensorFlow afterwards. |
||||
// Expected to have low latency, but may have lower prediction |
||||
// quality than other mobile models. |
||||
MOBILE_TF_LOW_LATENCY_1 = 3; |
||||
|
||||
// A model that, in addition to being available within Google |
||||
// Cloud can also be exported (see ModelService.ExportModel) and |
||||
// used on a mobile or edge device with TensorFlow afterwards. |
||||
MOBILE_TF_VERSATILE_1 = 4; |
||||
|
||||
// A model that, in addition to being available within Google |
||||
// Cloud, can also be exported (see ModelService.ExportModel) and |
||||
// used on a mobile or edge device with TensorFlow afterwards. |
||||
// Expected to have a higher latency, but should also have a higher |
||||
// prediction quality than other mobile models. |
||||
MOBILE_TF_HIGH_ACCURACY_1 = 5; |
||||
} |
||||
|
||||
ModelType model_type = 1; |
||||
|
||||
// The training budget of creating this model, expressed in milli node |
||||
// hours i.e. 1,000 value in this field means 1 node hour. The actual |
||||
// metadata.costMilliNodeHours will be equal or less than this value. |
||||
// If further model training ceases to provide any improvements, it will |
||||
// stop without using the full budget and the metadata.successfulStopReason |
||||
// will be `model-converged`. |
||||
// Note, node_hour = actual_hour * number_of_nodes_involved. |
||||
// For modelType `cloud`(default), the budget must be between 20,000 |
||||
// and 900,000 milli node hours, inclusive. The default value is 216,000 |
||||
// which represents one day in wall time, considering 9 nodes are used. |
||||
// For model types `mobile-tf-low-latency-1`, `mobile-tf-versatile-1`, |
||||
// `mobile-tf-high-accuracy-1` |
||||
// the training budget must be between 1,000 and 100,000 milli node hours, |
||||
// inclusive. The default value is 24,000 which represents one day in |
||||
// wall time on a single node that is used. |
||||
int64 budget_milli_node_hours = 2; |
||||
|
||||
// Use the entire training budget. This disables the early stopping feature. |
||||
// When false the early stopping feature is enabled, which means that AutoML |
||||
// Image Object Detection might stop training before the entire training |
||||
// budget has been used. |
||||
bool disable_early_stopping = 3; |
||||
} |
||||
|
||||
message AutoMlImageObjectDetectionMetadata { |
||||
enum SuccessfulStopReason { |
||||
// Should not be set. |
||||
SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0; |
||||
|
||||
// The inputs.budgetMilliNodeHours had been reached. |
||||
BUDGET_REACHED = 1; |
||||
|
||||
// Further training of the Model ceased to increase its quality, since it |
||||
// already has converged. |
||||
MODEL_CONVERGED = 2; |
||||
} |
||||
|
||||
// The actual training cost of creating this model, expressed in |
||||
// milli node hours, i.e. 1,000 value in this field means 1 node hour. |
||||
// Guaranteed to not exceed inputs.budgetMilliNodeHours. |
||||
int64 cost_milli_node_hours = 1; |
||||
|
||||
// For successful job completions, this is the reason why the job has |
||||
// finished. |
||||
SuccessfulStopReason successful_stop_reason = 2; |
||||
} |
@ -0,0 +1,96 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.trainingjob.definition; |
||||
|
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/trainingjob/definition;definition"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "AutoMLImageSegmentationProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.trainingjob.definition"; |
||||
|
||||
// A TrainingJob that trains and uploads an AutoML Image Segmentation Model. |
||||
message AutoMlImageSegmentation { |
||||
// The input parameters of this TrainingJob. |
||||
AutoMlImageSegmentationInputs inputs = 1; |
||||
|
||||
// The metadata information. |
||||
AutoMlImageSegmentationMetadata metadata = 2; |
||||
} |
||||
|
||||
message AutoMlImageSegmentationInputs { |
||||
enum ModelType { |
||||
// Should not be set. |
||||
MODEL_TYPE_UNSPECIFIED = 0; |
||||
|
||||
// A model to be used via prediction calls to uCAIP API. Expected |
||||
// to have a higher latency, but should also have a higher prediction |
||||
// quality than other models. |
||||
CLOUD_HIGH_ACCURACY_1 = 1; |
||||
|
||||
// A model to be used via prediction calls to uCAIP API. Expected |
||||
// to have a lower latency but relatively lower prediction quality. |
||||
CLOUD_LOW_ACCURACY_1 = 2; |
||||
} |
||||
|
||||
ModelType model_type = 1; |
||||
|
||||
// The training budget of creating this model, expressed in milli node |
||||
// hours i.e. 1,000 value in this field means 1 node hour. The actual |
||||
// metadata.costMilliNodeHours will be equal or less than this value. |
||||
// If further model training ceases to provide any improvements, it will |
||||
// stop without using the full budget and the metadata.successfulStopReason |
||||
// will be `model-converged`. |
||||
// Note, node_hour = actual_hour * number_of_nodes_involved. Or |
||||
// actaul_wall_clock_hours = train_budget_milli_node_hours / |
||||
// (number_of_nodes_involved * 1000) |
||||
// For modelType `cloud-high-accuracy-1`(default), the budget must be between |
||||
// 20,000 and 2,000,000 milli node hours, inclusive. The default value is |
||||
// 192,000 which represents one day in wall time |
||||
// (1000 milli * 24 hours * 8 nodes). |
||||
int64 budget_milli_node_hours = 2; |
||||
|
||||
// The ID of the `base` model. If it is specified, the new model will be |
||||
// trained based on the `base` model. Otherwise, the new model will be |
||||
// trained from scratch. The `base` model must be in the same |
||||
// Project and Location as the new Model to train, and have the same |
||||
// modelType. |
||||
string base_model_id = 3; |
||||
} |
||||
|
||||
message AutoMlImageSegmentationMetadata { |
||||
enum SuccessfulStopReason { |
||||
// Should not be set. |
||||
SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0; |
||||
|
||||
// The inputs.budgetMilliNodeHours had been reached. |
||||
BUDGET_REACHED = 1; |
||||
|
||||
// Further training of the Model ceased to increase its quality, since it |
||||
// already has converged. |
||||
MODEL_CONVERGED = 2; |
||||
} |
||||
|
||||
// The actual training cost of creating this model, expressed in |
||||
// milli node hours, i.e. 1,000 value in this field means 1 node hour. |
||||
// Guaranteed to not exceed inputs.budgetMilliNodeHours. |
||||
int64 cost_milli_node_hours = 1; |
||||
|
||||
// For successful job completions, this is the reason why the job has |
||||
// finished. |
||||
SuccessfulStopReason successful_stop_reason = 2; |
||||
} |
@ -0,0 +1,278 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.trainingjob.definition; |
||||
|
||||
import "google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/export_evaluated_data_items_config.proto"; |
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/trainingjob/definition;definition"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "AutoMLTablesProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.trainingjob.definition"; |
||||
|
||||
// A TrainingJob that trains and uploads an AutoML Tables Model. |
||||
message AutoMlTables { |
||||
// The input parameters of this TrainingJob. |
||||
AutoMlTablesInputs inputs = 1; |
||||
|
||||
// The metadata information. |
||||
AutoMlTablesMetadata metadata = 2; |
||||
} |
||||
|
||||
message AutoMlTablesInputs { |
||||
message Transformation { |
||||
// Training pipeline will infer the proper transformation based on the |
||||
// statistic of dataset. |
||||
message AutoTransformation { |
||||
string column_name = 1; |
||||
} |
||||
|
||||
// Training pipeline will perform following transformation functions. |
||||
// * The value converted to float32. |
||||
// * The z_score of the value. |
||||
// * log(value+1) when the value is greater than or equal to 0. Otherwise, |
||||
// this transformation is not applied and the value is considered a |
||||
// missing value. |
||||
// * z_score of log(value+1) when the value is greater than or equal to 0. |
||||
// Otherwise, this transformation is not applied and the value is |
||||
// considered a missing value. |
||||
// * A boolean value that indicates whether the value is valid. |
||||
message NumericTransformation { |
||||
string column_name = 1; |
||||
|
||||
// If invalid values is allowed, the training pipeline will create a |
||||
// boolean feature that indicated whether the value is valid. |
||||
// Otherwise, the training pipeline will discard the input row from |
||||
// trainining data. |
||||
bool invalid_values_allowed = 2; |
||||
} |
||||
|
||||
// Training pipeline will perform following transformation functions. |
||||
// * The categorical string as is--no change to case, punctuation, |
||||
// spelling, |
||||
// tense, and so on. |
||||
// * Convert the category name to a dictionary lookup index and generate an |
||||
// embedding for each index. |
||||
// * Categories that appear less than 5 times in the training dataset are |
||||
// treated as the "unknown" category. The "unknown" category gets its own |
||||
// special lookup index and resulting embedding. |
||||
message CategoricalTransformation { |
||||
string column_name = 1; |
||||
} |
||||
|
||||
// Training pipeline will perform following transformation functions. |
||||
// * Apply the transformation functions for Numerical columns. |
||||
// * Determine the year, month, day,and weekday. Treat each value from the |
||||
// * timestamp as a Categorical column. |
||||
// * Invalid numerical values (for example, values that fall outside of a |
||||
// typical timestamp range, or are extreme values) receive no special |
||||
// treatment and are not removed. |
||||
message TimestampTransformation { |
||||
string column_name = 1; |
||||
|
||||
// The format in which that time field is expressed. The time_format must |
||||
// either be one of: |
||||
// * `unix-seconds` |
||||
// * `unix-milliseconds` |
||||
// * `unix-microseconds` |
||||
// * `unix-nanoseconds` |
||||
// (for respectively number of seconds, milliseconds, microseconds and |
||||
// nanoseconds since start of the Unix epoch); |
||||
// or be written in `strftime` syntax. If time_format is not set, then the |
||||
// default format is RFC 3339 `date-time` format, where |
||||
// `time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z) |
||||
string time_format = 2; |
||||
|
||||
// If invalid values is allowed, the training pipeline will create a |
||||
// boolean feature that indicated whether the value is valid. |
||||
// Otherwise, the training pipeline will discard the input row from |
||||
// trainining data. |
||||
bool invalid_values_allowed = 3; |
||||
} |
||||
|
||||
// Training pipeline will perform following transformation functions. |
||||
// * The text as is--no change to case, punctuation, spelling, tense, and |
||||
// so |
||||
// on. |
||||
// * Tokenize text to words. Convert each words to a dictionary lookup |
||||
// index |
||||
// and generate an embedding for each index. Combine the embedding of all |
||||
// elements into a single embedding using the mean. |
||||
// * Tokenization is based on unicode script boundaries. |
||||
// * Missing values get their own lookup index and resulting embedding. |
||||
// * Stop-words receive no special treatment and are not removed. |
||||
message TextTransformation { |
||||
string column_name = 1; |
||||
} |
||||
|
||||
// Treats the column as numerical array and performs following |
||||
// transformation functions. |
||||
// * All transformations for Numerical types applied to the average of the |
||||
// all elements. |
||||
// * The average of empty arrays is treated as zero. |
||||
message NumericArrayTransformation { |
||||
string column_name = 1; |
||||
|
||||
// If invalid values is allowed, the training pipeline will create a |
||||
// boolean feature that indicated whether the value is valid. |
||||
// Otherwise, the training pipeline will discard the input row from |
||||
// trainining data. |
||||
bool invalid_values_allowed = 2; |
||||
} |
||||
|
||||
// Treats the column as categorical array and performs following |
||||
// transformation functions. |
||||
// * For each element in the array, convert the category name to a |
||||
// dictionary |
||||
// lookup index and generate an embedding for each index. |
||||
// Combine the embedding of all elements into a single embedding using |
||||
// the mean. |
||||
// * Empty arrays treated as an embedding of zeroes. |
||||
message CategoricalArrayTransformation { |
||||
string column_name = 1; |
||||
} |
||||
|
||||
// Treats the column as text array and performs following transformation |
||||
// functions. |
||||
// * Concatenate all text values in the array into a single text value |
||||
// using |
||||
// a space (" ") as a delimiter, and then treat the result as a single |
||||
// text value. Apply the transformations for Text columns. |
||||
// * Empty arrays treated as an empty text. |
||||
message TextArrayTransformation { |
||||
string column_name = 1; |
||||
} |
||||
|
||||
// The transformation that the training pipeline will apply to the input |
||||
// columns. |
||||
oneof transformation_detail { |
||||
AutoTransformation auto = 1; |
||||
|
||||
NumericTransformation numeric = 2; |
||||
|
||||
CategoricalTransformation categorical = 3; |
||||
|
||||
TimestampTransformation timestamp = 4; |
||||
|
||||
TextTransformation text = 5; |
||||
|
||||
NumericArrayTransformation repeated_numeric = 6; |
||||
|
||||
CategoricalArrayTransformation repeated_categorical = 7; |
||||
|
||||
TextArrayTransformation repeated_text = 8; |
||||
} |
||||
} |
||||
|
||||
// Additional optimization objective configuration. Required for |
||||
// `maximize-precision-at-recall` and `maximize-recall-at-precision`, |
||||
// otherwise unused. |
||||
oneof additional_optimization_objective_config { |
||||
// Required when optimization_objective is "maximize-precision-at-recall". |
||||
// Must be between 0 and 1, inclusive. |
||||
float optimization_objective_recall_value = 5; |
||||
|
||||
// Required when optimization_objective is "maximize-recall-at-precision". |
||||
// Must be between 0 and 1, inclusive. |
||||
float optimization_objective_precision_value = 6; |
||||
} |
||||
|
||||
// The type of prediction the Model is to produce. |
||||
// "classification" - Predict one out of multiple target values is |
||||
// picked for each row. |
||||
// "regression" - Predict a value based on its relation to other values. |
||||
// This type is available only to columns that contain |
||||
// semantically numeric values, i.e. integers or floating |
||||
// point number, even if stored as e.g. strings. |
||||
string prediction_type = 1; |
||||
|
||||
// The column name of the target column that the model is to predict. |
||||
string target_column = 2; |
||||
|
||||
// Each transformation will apply transform function to given input column. |
||||
// And the result will be used for training. |
||||
// When creating transformation for BigQuery Struct column, the column should |
||||
// be flattened using "." as the delimiter. |
||||
repeated Transformation transformations = 3; |
||||
|
||||
// Objective function the model is optimizing towards. The training process |
||||
// creates a model that maximizes/minimizes the value of the objective |
||||
// function over the validation set. |
||||
// |
||||
// The supported optimization objectives depend on the prediction type. |
||||
// If the field is not set, a default objective function is used. |
||||
// |
||||
// classification (binary): |
||||
// "maximize-au-roc" (default) - Maximize the area under the receiver |
||||
// operating characteristic (ROC) curve. |
||||
// "minimize-log-loss" - Minimize log loss. |
||||
// "maximize-au-prc" - Maximize the area under the precision-recall curve. |
||||
// "maximize-precision-at-recall" - Maximize precision for a specified |
||||
// recall value. |
||||
// "maximize-recall-at-precision" - Maximize recall for a specified |
||||
// precision value. |
||||
// |
||||
// classification (multi-class): |
||||
// "minimize-log-loss" (default) - Minimize log loss. |
||||
// |
||||
// regression: |
||||
// "minimize-rmse" (default) - Minimize root-mean-squared error (RMSE). |
||||
// "minimize-mae" - Minimize mean-absolute error (MAE). |
||||
// "minimize-rmsle" - Minimize root-mean-squared log error (RMSLE). |
||||
string optimization_objective = 4; |
||||
|
||||
// Required. The train budget of creating this model, expressed in milli node |
||||
// hours i.e. 1,000 value in this field means 1 node hour. |
||||
// |
||||
// The training cost of the model will not exceed this budget. The final cost |
||||
// will be attempted to be close to the budget, though may end up being (even) |
||||
// noticeably smaller - at the backend's discretion. This especially may |
||||
// happen when further model training ceases to provide any improvements. |
||||
// |
||||
// If the budget is set to a value known to be insufficient to train a |
||||
// model for the given dataset, the training won't be attempted and |
||||
// will error. |
||||
// |
||||
// The train budget must be between 1,000 and 72,000 milli node hours, |
||||
// inclusive. |
||||
int64 train_budget_milli_node_hours = 7; |
||||
|
||||
// Use the entire training budget. This disables the early stopping feature. |
||||
// By default, the early stopping feature is enabled, which means that AutoML |
||||
// Tables might stop training before the entire training budget has been used. |
||||
bool disable_early_stopping = 8; |
||||
|
||||
// Column name that should be used as the weight column. |
||||
// Higher values in this column give more importance to the row |
||||
// during model training. The column must have numeric values between 0 and |
||||
// 10000 inclusively; 0 means the row is ignored for training. If weight |
||||
// column field is not set, then all rows are assumed to have equal weight |
||||
// of 1. |
||||
string weight_column_name = 9; |
||||
|
||||
// Configuration for exporting test set predictions to a BigQuery table. If |
||||
// this configuration is absent, then the export is not performed. |
||||
ExportEvaluatedDataItemsConfig export_evaluated_data_items_config = 10; |
||||
} |
||||
|
||||
// Model metadata specific to AutoML Tables. |
||||
message AutoMlTablesMetadata { |
||||
// Output only. The actual training cost of the model, expressed in milli |
||||
// node hours, i.e. 1,000 value in this field means 1 node hour. Guaranteed |
||||
// to not exceed the train budget. |
||||
int64 train_cost_milli_node_hours = 1; |
||||
} |
@ -0,0 +1,34 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.trainingjob.definition; |
||||
|
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/trainingjob/definition;definition"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "AutoMLTextClassificationProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.trainingjob.definition"; |
||||
|
||||
// A TrainingJob that trains and uploads an AutoML Text Classification Model. |
||||
message AutoMlTextClassification { |
||||
// The input parameters of this TrainingJob. |
||||
AutoMlTextClassificationInputs inputs = 1; |
||||
} |
||||
|
||||
message AutoMlTextClassificationInputs { |
||||
bool multi_label = 1; |
||||
} |
@ -0,0 +1,34 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.trainingjob.definition; |
||||
|
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/trainingjob/definition;definition"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "AutoMLTextExtractionProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.trainingjob.definition"; |
||||
|
||||
// A TrainingJob that trains and uploads an AutoML Text Extraction Model. |
||||
message AutoMlTextExtraction { |
||||
// The input parameters of this TrainingJob. |
||||
AutoMlTextExtractionInputs inputs = 1; |
||||
} |
||||
|
||||
message AutoMlTextExtractionInputs { |
||||
|
||||
} |
@ -0,0 +1,41 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.trainingjob.definition; |
||||
|
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/trainingjob/definition;definition"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "AutoMLTextSentimentProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.trainingjob.definition"; |
||||
|
||||
// A TrainingJob that trains and uploads an AutoML Text Sentiment Model. |
||||
message AutoMlTextSentiment { |
||||
// The input parameters of this TrainingJob. |
||||
AutoMlTextSentimentInputs inputs = 1; |
||||
} |
||||
|
||||
message AutoMlTextSentimentInputs { |
||||
// A sentiment is expressed as an integer ordinal, where higher value |
||||
// means a more positive sentiment. The range of sentiments that will be used |
||||
// is between 0 and sentimentMax (inclusive on both ends), and all the values |
||||
// in the range must be represented in the dataset before a model can be |
||||
// created. |
||||
// Only the Annotations with this sentimentMax will be used for training. |
||||
// sentimentMax value must be between 1 and 10 (inclusive). |
||||
int32 sentiment_max = 1; |
||||
} |
@ -0,0 +1,49 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.trainingjob.definition; |
||||
|
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/trainingjob/definition;definition"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "AutoMLVideoActionRecognitionProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.trainingjob.definition"; |
||||
|
||||
// A TrainingJob that trains and uploads an AutoML Video Action Recognition |
||||
// Model. |
||||
message AutoMlVideoActionRecognition { |
||||
// The input parameters of this TrainingJob. |
||||
AutoMlVideoActionRecognitionInputs inputs = 1; |
||||
} |
||||
|
||||
message AutoMlVideoActionRecognitionInputs { |
||||
enum ModelType { |
||||
// Should not be set. |
||||
MODEL_TYPE_UNSPECIFIED = 0; |
||||
|
||||
// A model best tailored to be used within Google Cloud, and which c annot |
||||
// be exported. Default. |
||||
CLOUD = 1; |
||||
|
||||
// A model that, in addition to being available within Google Cloud, can |
||||
// also be exported (see ModelService.ExportModel) as a TensorFlow or |
||||
// TensorFlow Lite model and used on a mobile or edge device afterwards. |
||||
MOBILE_VERSATILE_1 = 2; |
||||
} |
||||
|
||||
ModelType model_type = 1; |
||||
} |
@ -0,0 +1,48 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.trainingjob.definition; |
||||
|
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/trainingjob/definition;definition"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "AutoMLVideoClassificationProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.trainingjob.definition"; |
||||
|
||||
// A TrainingJob that trains and uploads an AutoML Video Classification Model. |
||||
message AutoMlVideoClassification { |
||||
// The input parameters of this TrainingJob. |
||||
AutoMlVideoClassificationInputs inputs = 1; |
||||
} |
||||
|
||||
message AutoMlVideoClassificationInputs { |
||||
enum ModelType { |
||||
// Should not be set. |
||||
MODEL_TYPE_UNSPECIFIED = 0; |
||||
|
||||
// A model best tailored to be used within Google Cloud, and which cannot |
||||
// be exported. Default. |
||||
CLOUD = 1; |
||||
|
||||
// A model that, in addition to being available within Google Cloud, can |
||||
// also be exported (see ModelService.ExportModel) as a TensorFlow or |
||||
// TensorFlow Lite model and used on a mobile or edge device afterwards. |
||||
MOBILE_VERSATILE_1 = 2; |
||||
} |
||||
|
||||
ModelType model_type = 1; |
||||
} |
@ -0,0 +1,64 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.trainingjob.definition; |
||||
|
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/trainingjob/definition;definition"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "AutoMLVideoObjectTrackingProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.trainingjob.definition"; |
||||
|
||||
// A TrainingJob that trains and uploads an AutoML Video ObjectTracking Model. |
||||
message AutoMlVideoObjectTracking { |
||||
// The input parameters of this TrainingJob. |
||||
AutoMlVideoObjectTrackingInputs inputs = 1; |
||||
} |
||||
|
||||
message AutoMlVideoObjectTrackingInputs { |
||||
enum ModelType { |
||||
// Should not be set. |
||||
MODEL_TYPE_UNSPECIFIED = 0; |
||||
|
||||
// A model best tailored to be used within Google Cloud, and which c annot |
||||
// be exported. Default. |
||||
CLOUD = 1; |
||||
|
||||
// A model that, in addition to being available within Google Cloud, can |
||||
// also be exported (see ModelService.ExportModel) as a TensorFlow or |
||||
// TensorFlow Lite model and used on a mobile or edge device afterwards. |
||||
MOBILE_VERSATILE_1 = 2; |
||||
|
||||
// A versatile model that is meant to be exported (see |
||||
// ModelService.ExportModel) and used on a Google Coral device. |
||||
MOBILE_CORAL_VERSATILE_1 = 3; |
||||
|
||||
// A model that trades off quality for low latency, to be exported (see |
||||
// ModelService.ExportModel) and used on a Google Coral device. |
||||
MOBILE_CORAL_LOW_LATENCY_1 = 4; |
||||
|
||||
// A versatile model that is meant to be exported (see |
||||
// ModelService.ExportModel) and used on an NVIDIA Jetson device. |
||||
MOBILE_JETSON_VERSATILE_1 = 5; |
||||
|
||||
// A model that trades off quality for low latency, to be exported (see |
||||
// ModelService.ExportModel) and used on an NVIDIA Jetson device. |
||||
MOBILE_JETSON_LOW_LATENCY_1 = 6; |
||||
} |
||||
|
||||
ModelType model_type = 1; |
||||
} |
@ -0,0 +1,39 @@ |
||||
// Copyright 2020 Google LLC |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.cloud.aiplatform.v1beta1.schema.trainingjob.definition; |
||||
|
||||
import "google/api/annotations.proto"; |
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/trainingjob/definition;definition"; |
||||
option java_multiple_files = true; |
||||
option java_outer_classname = "ExportEvaluatedDataItemsConfigProto"; |
||||
option java_package = "com.google.cloud.aiplatform.v1beta1.schema.trainingjob.definition"; |
||||
|
||||
// Configuration for exporting test set predictions to a BigQuery table. |
||||
message ExportEvaluatedDataItemsConfig { |
||||
// URI of desired destination BigQuery table. If not specified, then results |
||||
// are exported to the following auto-created BigQuery table: |
||||
// |
||||
// <project_id>:export_evaluated_examples_<model_name>_<yyyy_MM_dd'T'HH_mm_ss_SSS'Z'>.evaluated_examples |
||||
string destination_bigquery_uri = 1; |
||||
|
||||
// If true and an export destination is specified, then the contents of the |
||||
// destination will be overwritten. Otherwise, if the export destination |
||||
// already exists, then the export operation will not trigger and a failure |
||||
// response is returned. |
||||
bool override_existing_table = 2; |
||||
} |
Loading…
Reference in new issue