Update cloud vision protos.

pull/57/head
Ethan Bao 9 years ago
parent 37987cddc8
commit 3748d006c4
  1. 53
      google/cloud/vision/v1/geometry.proto
  2. 10
      google/cloud/vision/v1/image_annotator.proto

@ -0,0 +1,53 @@
// Copyright 2016 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.cloud.vision.v1;
option cc_enable_arenas = true;
option java_multiple_files = true;
option java_outer_classname = "GeometryProto";
option java_package = "com.google.cloud.vision.v1";
// A vertex represents a 2D point in the image.
// NOTE: the vertex coordinates are in the same scale as the original image.
message Vertex {
// X coordinate.
int32 x = 1;
// Y coordinate.
int32 y = 2;
}
// A bounding polygon for the detected image annotation.
message BoundingPoly {
// The bounding polygon vertices.
repeated Vertex vertices = 1;
}
// A 3D position in the image, used primarily for Face detection landmarks.
// A valid Position must have both x and y coordinates.
// The position coordinates are in the same scale as the original image.
message Position {
// X coordinate.
float x = 1;
// Y coordinate.
float y = 2;
// Z coordinate (or depth).
float z = 3;
}

@ -18,8 +18,6 @@ package google.cloud.vision.v1;
import "google/api/annotations.proto";
import "google/cloud/vision/v1/geometry.proto";
import "google/cloud/vision/v1/image_context_search_extension.proto";
import "google/cloud/vision/v1/query_annotation.proto";
import "google/rpc/status.proto";
import "google/type/color.proto";
import "google/type/latlng.proto";
@ -223,7 +221,7 @@ message FaceAnnotation {
Type type = 3;
// Face landmark position.
google.cloud.vision.v1.Position position = 4;
Position position = 4;
}
// The bounding polygon around the face. The coordinates of the bounding box
@ -233,7 +231,7 @@ message FaceAnnotation {
// Note that one or more x and/or y coordinates may not be generated in the
// BoundingPoly (the polygon will be unbounded) if only a partial face appears in
// the image to be annotated.
google.cloud.vision.v1.BoundingPoly bounding_poly = 1;
BoundingPoly bounding_poly = 1;
// This bounding polygon is tighter than the previous
// <code>boundingPoly</code>, and
@ -242,7 +240,7 @@ message FaceAnnotation {
// "amount of skin" visible in an image. It is not based on the
// landmarker results, only on the initial face detection, hence
// the <code>fd</code> (face detection) prefix.
google.cloud.vision.v1.BoundingPoly fd_bounding_poly = 2;
BoundingPoly fd_bounding_poly = 2;
// Detected face landmarks.
repeated Landmark landmarks = 3;
@ -339,7 +337,7 @@ message EntityAnnotation {
// for `LABEL_DETECTION` features. For `TEXT_DETECTION` (OCR), `boundingPoly`s
// are produced for the entire text detected in an image region, followed by
// `boundingPoly`s for each word within the detected text.
google.cloud.vision.v1.BoundingPoly bounding_poly = 7;
BoundingPoly bounding_poly = 7;
// The location information for the detected entity. Multiple
// <code>LocationInfo</code> elements can be present since one location may

Loading…
Cancel
Save