Public interface definitions of Google APIs. Topics (grpc依赖)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 

1008 lines
41 KiB

// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.cloud.dialogflow.cx.v3;
import "google/api/annotations.proto";
import "google/api/client.proto";
import "google/api/field_behavior.proto";
import "google/api/resource.proto";
import "google/cloud/dialogflow/cx/v3/audio_config.proto";
import "google/cloud/dialogflow/cx/v3/intent.proto";
import "google/cloud/dialogflow/cx/v3/page.proto";
import "google/cloud/dialogflow/cx/v3/response_message.proto";
import "google/cloud/dialogflow/cx/v3/session_entity_type.proto";
import "google/protobuf/duration.proto";
import "google/protobuf/struct.proto";
import "google/rpc/status.proto";
import "google/type/latlng.proto";
option cc_enable_arenas = true;
option csharp_namespace = "Google.Cloud.Dialogflow.Cx.V3";
option go_package = "cloud.google.com/go/dialogflow/cx/apiv3/cxpb;cxpb";
option java_multiple_files = true;
option java_outer_classname = "SessionProto";
option java_package = "com.google.cloud.dialogflow.cx.v3";
option objc_class_prefix = "DF";
option ruby_package = "Google::Cloud::Dialogflow::CX::V3";
option (google.api.resource_definition) = {
type: "dialogflow.googleapis.com/Session"
pattern: "projects/{project}/locations/{location}/agents/{agent}/sessions/{session}"
pattern: "projects/{project}/locations/{location}/agents/{agent}/environments/{environment}/sessions/{session}"
};
// A session represents an interaction with a user. You retrieve user input
// and pass it to the
// [DetectIntent][google.cloud.dialogflow.cx.v3.Sessions.DetectIntent] method to
// determine user intent and respond.
service Sessions {
option (google.api.default_host) = "dialogflow.googleapis.com";
option (google.api.oauth_scopes) =
"https://www.googleapis.com/auth/cloud-platform,"
"https://www.googleapis.com/auth/dialogflow";
// Processes a natural language query and returns structured, actionable data
// as a result. This method is not idempotent, because it may cause session
// entity types to be updated, which in turn might affect results of future
// queries.
//
// Note: Always use agent versions for production traffic.
// See [Versions and
// environments](https://cloud.google.com/dialogflow/cx/docs/concept/version).
rpc DetectIntent(DetectIntentRequest) returns (DetectIntentResponse) {
option (google.api.http) = {
post: "/v3/{session=projects/*/locations/*/agents/*/sessions/*}:detectIntent"
body: "*"
additional_bindings {
post: "/v3/{session=projects/*/locations/*/agents/*/environments/*/sessions/*}:detectIntent"
body: "*"
}
};
}
// Processes a natural language query in audio format in a streaming fashion
// and returns structured, actionable data as a result. This method is only
// available via the gRPC API (not REST).
//
// Note: Always use agent versions for production traffic.
// See [Versions and
// environments](https://cloud.google.com/dialogflow/cx/docs/concept/version).
rpc StreamingDetectIntent(stream StreamingDetectIntentRequest)
returns (stream StreamingDetectIntentResponse) {}
// Returns preliminary intent match results, doesn't change the session
// status.
rpc MatchIntent(MatchIntentRequest) returns (MatchIntentResponse) {
option (google.api.http) = {
post: "/v3/{session=projects/*/locations/*/agents/*/sessions/*}:matchIntent"
body: "*"
additional_bindings {
post: "/v3/{session=projects/*/locations/*/agents/*/environments/*/sessions/*}:matchIntent"
body: "*"
}
};
}
// Fulfills a matched intent returned by
// [MatchIntent][google.cloud.dialogflow.cx.v3.Sessions.MatchIntent]. Must be
// called after
// [MatchIntent][google.cloud.dialogflow.cx.v3.Sessions.MatchIntent], with
// input from
// [MatchIntentResponse][google.cloud.dialogflow.cx.v3.MatchIntentResponse].
// Otherwise, the behavior is undefined.
rpc FulfillIntent(FulfillIntentRequest) returns (FulfillIntentResponse) {
option (google.api.http) = {
post: "/v3/{match_intent_request.session=projects/*/locations/*/agents/*/sessions/*}:fulfillIntent"
body: "*"
additional_bindings {
post: "/v3/{match_intent_request.session=projects/*/locations/*/agents/*/environments/*/sessions/*}:fulfillIntent"
body: "*"
}
};
}
}
// The request to detect user's intent.
message DetectIntentRequest {
// Required. The name of the session this query is sent to.
// Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
// ID>/sessions/<Session ID>` or `projects/<Project ID>/locations/<Location
// ID>/agents/<Agent ID>/environments/<Environment ID>/sessions/<Session ID>`.
// If `Environment ID` is not specified, we assume default 'draft'
// environment.
// It's up to the API caller to choose an appropriate `Session ID`. It can be
// a random number or some type of session identifiers (preferably hashed).
// The length of the `Session ID` must not exceed 36 characters.
//
// For more information, see the [sessions
// guide](https://cloud.google.com/dialogflow/cx/docs/concept/session).
//
// Note: Always use agent versions for production traffic.
// See [Versions and
// environments](https://cloud.google.com/dialogflow/cx/docs/concept/version).
string session = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Session"
}
];
// The parameters of this query.
QueryParameters query_params = 2;
// Required. The input specification.
QueryInput query_input = 3 [(google.api.field_behavior) = REQUIRED];
// Instructs the speech synthesizer how to generate the output audio.
OutputAudioConfig output_audio_config = 4;
}
// The message returned from the DetectIntent method.
message DetectIntentResponse {
// Represents different DetectIntentResponse types.
enum ResponseType {
// Not specified. This should never happen.
RESPONSE_TYPE_UNSPECIFIED = 0;
// Partial response. e.g. Aggregated responses in a Fulfillment that enables
// `return_partial_response` can be returned as partial response.
// WARNING: partial response is not eligible for barge-in.
PARTIAL = 1;
// Final response.
FINAL = 2;
}
// Output only. The unique identifier of the response. It can be used to
// locate a response in the training example set or for reporting issues.
string response_id = 1;
// The result of the conversational query.
QueryResult query_result = 2;
// The audio data bytes encoded as specified in the request.
// Note: The output audio is generated based on the values of default platform
// text responses found in the
// [`query_result.response_messages`][google.cloud.dialogflow.cx.v3.QueryResult.response_messages]
// field. If multiple default text responses exist, they will be concatenated
// when generating audio. If no default platform text responses exist, the
// generated audio content will be empty.
//
// In some scenarios, multiple output audio fields may be present in the
// response structure. In these cases, only the top-most-level audio output
// has content.
bytes output_audio = 4;
// The config used by the speech synthesizer to generate the output audio.
OutputAudioConfig output_audio_config = 5;
// Response type.
ResponseType response_type = 6;
// Indicates whether the partial response can be cancelled when a later
// response arrives. e.g. if the agent specified some music as partial
// response, it can be cancelled.
bool allow_cancellation = 7;
}
// The top-level message sent by the client to the
// [Sessions.StreamingDetectIntent][google.cloud.dialogflow.cx.v3.Sessions.StreamingDetectIntent]
// method.
//
// Multiple request messages should be sent in order:
//
// 1. The first message must contain
// [session][google.cloud.dialogflow.cx.v3.StreamingDetectIntentRequest.session],
// [query_input][google.cloud.dialogflow.cx.v3.StreamingDetectIntentRequest.query_input]
// plus optionally
// [query_params][google.cloud.dialogflow.cx.v3.StreamingDetectIntentRequest.query_params].
// If the client wants to receive an audio response, it should also contain
// [output_audio_config][google.cloud.dialogflow.cx.v3.StreamingDetectIntentRequest.output_audio_config].
//
// 2. If
// [query_input][google.cloud.dialogflow.cx.v3.StreamingDetectIntentRequest.query_input]
// was set to
// [query_input.audio.config][google.cloud.dialogflow.cx.v3.AudioInput.config],
// all subsequent messages must contain
// [query_input.audio.audio][google.cloud.dialogflow.cx.v3.AudioInput.audio]
// to continue with Speech recognition. If you decide to rather detect an
// intent from text input after you already started Speech recognition,
// please send a message with
// [query_input.text][google.cloud.dialogflow.cx.v3.QueryInput.text].
//
// However, note that:
//
// * Dialogflow will bill you for the audio duration so far.
// * Dialogflow discards all Speech recognition results in favor of the
// input text.
// * Dialogflow will use the language code from the first message.
//
// After you sent all input, you must half-close or abort the request stream.
message StreamingDetectIntentRequest {
// The name of the session this query is sent to.
// Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
// ID>/sessions/<Session ID>` or `projects/<Project ID>/locations/<Location
// ID>/agents/<Agent ID>/environments/<Environment ID>/sessions/<Session ID>`.
// If `Environment ID` is not specified, we assume default 'draft'
// environment.
// It's up to the API caller to choose an appropriate `Session ID`. It can be
// a random number or some type of session identifiers (preferably hashed).
// The length of the `Session ID` must not exceed 36 characters.
// Note: session must be set in the first request.
//
// For more information, see the [sessions
// guide](https://cloud.google.com/dialogflow/cx/docs/concept/session).
//
// Note: Always use agent versions for production traffic.
// See [Versions and
// environments](https://cloud.google.com/dialogflow/cx/docs/concept/version).
string session = 1 [(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Session"
}];
// The parameters of this query.
QueryParameters query_params = 2;
// Required. The input specification.
QueryInput query_input = 3 [(google.api.field_behavior) = REQUIRED];
// Instructs the speech synthesizer how to generate the output audio.
OutputAudioConfig output_audio_config = 4;
// Enable partial detect intent response. If this flag is not enabled,
// response stream still contains only one final `DetectIntentResponse` even
// if some `Fulfillment`s in the agent have been configured to return partial
// responses.
bool enable_partial_response = 5;
// If true, `StreamingDetectIntentResponse.debugging_info` will get populated.
bool enable_debugging_info = 8;
}
// Cloud conversation info for easier debugging.
// It will get populated in `StreamingDetectIntentResponse` or
// `StreamingAnalyzeContentResponse` when the flag `enable_debugging_info` is
// set to true in corresponding requests.
message CloudConversationDebuggingInfo {
// Number of input audio data chunks in streaming requests.
int32 audio_data_chunks = 1;
// Time offset of the end of speech utterance relative to the
// beginning of the first audio chunk.
google.protobuf.Duration result_end_time_offset = 2;
// Duration of first audio chunk.
google.protobuf.Duration first_audio_duration = 3;
// Whether client used single utterance mode.
bool single_utterance = 5;
// Time offsets of the speech partial results relative to the beginning of
// the stream.
repeated google.protobuf.Duration speech_partial_results_end_times = 6;
// Time offsets of the speech final results (is_final=true) relative to the
// beginning of the stream.
repeated google.protobuf.Duration speech_final_results_end_times = 7;
// Total number of partial responses.
int32 partial_responses = 8;
// Time offset of Speaker ID stream close time relative to the Speech stream
// close time in milliseconds. Only meaningful for conversations involving
// passive verification.
int32 speaker_id_passive_latency_ms_offset = 9;
// Whether a barge-in event is triggered in this request.
bool bargein_event_triggered = 10;
// Whether speech uses single utterance mode.
bool speech_single_utterance = 11;
// Time offsets of the DTMF partial results relative to the beginning of
// the stream.
repeated google.protobuf.Duration dtmf_partial_results_times = 12;
// Time offsets of the DTMF final results relative to the beginning of
// the stream.
repeated google.protobuf.Duration dtmf_final_results_times = 13;
// Time offset of the end-of-single-utterance signal relative to the
// beginning of the stream.
google.protobuf.Duration single_utterance_end_time_offset = 14;
// No speech timeout settings observed at runtime.
google.protobuf.Duration no_speech_timeout = 15;
// Whether the streaming terminates with an injected text query.
bool is_input_text = 16;
// Client half close time in terms of input audio duration.
google.protobuf.Duration client_half_close_time_offset = 17;
// Client half close time in terms of API streaming duration.
google.protobuf.Duration client_half_close_streaming_time_offset = 18;
}
// The top-level message returned from the
// [StreamingDetectIntent][google.cloud.dialogflow.cx.v3.Sessions.StreamingDetectIntent]
// method.
//
// Multiple response messages (N) can be returned in order.
//
// The first (N-1) responses set either the `recognition_result` or
// `detect_intent_response` field, depending on the request:
//
// * If the `StreamingDetectIntentRequest.query_input.audio` field was
// set, and the `StreamingDetectIntentRequest.enable_partial_response`
// field was false, the `recognition_result` field is populated for each
// of the (N-1) responses.
// See the
// [StreamingRecognitionResult][google.cloud.dialogflow.cx.v3.StreamingRecognitionResult]
// message for details about the result message sequence.
//
// * If the `StreamingDetectIntentRequest.enable_partial_response` field was
// true, the `detect_intent_response` field is populated for each
// of the (N-1) responses, where 1 <= N <= 4.
// These responses set the
// [DetectIntentResponse.response_type][google.cloud.dialogflow.cx.v3.DetectIntentResponse.response_type]
// field to `PARTIAL`.
//
// For the final Nth response message, the `detect_intent_response` is fully
// populated, and
// [DetectIntentResponse.response_type][google.cloud.dialogflow.cx.v3.DetectIntentResponse.response_type]
// is set to `FINAL`.
message StreamingDetectIntentResponse {
// The output response.
oneof response {
// The result of speech recognition.
StreamingRecognitionResult recognition_result = 1;
// The response from detect intent.
DetectIntentResponse detect_intent_response = 2;
}
// Debugging info that would get populated when
// `StreamingDetectIntentRequest.enable_debugging_info` is set to true.
CloudConversationDebuggingInfo debugging_info = 4;
}
// Contains a speech recognition result corresponding to a portion of the audio
// that is currently being processed or an indication that this is the end
// of the single requested utterance.
//
// While end-user audio is being processed, Dialogflow sends a series of
// results. Each result may contain a `transcript` value. A transcript
// represents a portion of the utterance. While the recognizer is processing
// audio, transcript values may be interim values or finalized values.
// Once a transcript is finalized, the `is_final` value is set to true and
// processing continues for the next transcript.
//
// If `StreamingDetectIntentRequest.query_input.audio.config.single_utterance`
// was true, and the recognizer has completed processing audio,
// the `message_type` value is set to `END_OF_SINGLE_UTTERANCE and the
// following (last) result contains the last finalized transcript.
//
// The complete end-user utterance is determined by concatenating the
// finalized transcript values received for the series of results.
//
// In the following example, single utterance is enabled. In the case where
// single utterance is not enabled, result 7 would not occur.
//
// ```
// Num | transcript | message_type | is_final
// --- | ----------------------- | ----------------------- | --------
// 1 | "tube" | TRANSCRIPT | false
// 2 | "to be a" | TRANSCRIPT | false
// 3 | "to be" | TRANSCRIPT | false
// 4 | "to be or not to be" | TRANSCRIPT | true
// 5 | "that's" | TRANSCRIPT | false
// 6 | "that is | TRANSCRIPT | false
// 7 | unset | END_OF_SINGLE_UTTERANCE | unset
// 8 | " that is the question" | TRANSCRIPT | true
// ```
//
// Concatenating the finalized transcripts with `is_final` set to true,
// the complete utterance becomes "to be or not to be that is the question".
message StreamingRecognitionResult {
// Type of the response message.
enum MessageType {
// Not specified. Should never be used.
MESSAGE_TYPE_UNSPECIFIED = 0;
// Message contains a (possibly partial) transcript.
TRANSCRIPT = 1;
// Event indicates that the server has detected the end of the user's speech
// utterance and expects no additional speech. Therefore, the server will
// not process additional audio (although it may subsequently return
// additional results). The client should stop sending additional audio
// data, half-close the gRPC connection, and wait for any additional results
// until the server closes the gRPC connection. This message is only sent if
// [`single_utterance`][google.cloud.dialogflow.cx.v3.InputAudioConfig.single_utterance]
// was set to `true`, and is not used otherwise.
END_OF_SINGLE_UTTERANCE = 2;
}
// Type of the result message.
MessageType message_type = 1;
// Transcript text representing the words that the user spoke.
// Populated if and only if `message_type` = `TRANSCRIPT`.
string transcript = 2;
// If `false`, the `StreamingRecognitionResult` represents an
// interim result that may change. If `true`, the recognizer will not return
// any further hypotheses about this piece of the audio. May only be populated
// for `message_type` = `TRANSCRIPT`.
bool is_final = 3;
// The Speech confidence between 0.0 and 1.0 for the current portion of audio.
// A higher number indicates an estimated greater likelihood that the
// recognized words are correct. The default of 0.0 is a sentinel value
// indicating that confidence was not set.
//
// This field is typically only provided if `is_final` is true and you should
// not rely on it being accurate or even set.
float confidence = 4;
// An estimate of the likelihood that the speech recognizer will
// not change its guess about this interim recognition result:
// * If the value is unspecified or 0.0, Dialogflow didn't compute the
// stability. In particular, Dialogflow will only provide stability for
// `TRANSCRIPT` results with `is_final = false`.
// * Otherwise, the value is in (0.0, 1.0] where 0.0 means completely
// unstable and 1.0 means completely stable.
float stability = 6;
// Word-specific information for the words recognized by Speech in
// [transcript][google.cloud.dialogflow.cx.v3.StreamingRecognitionResult.transcript].
// Populated if and only if `message_type` = `TRANSCRIPT` and
// [InputAudioConfig.enable_word_info] is set.
repeated SpeechWordInfo speech_word_info = 7;
// Time offset of the end of this Speech recognition result relative to the
// beginning of the audio. Only populated for `message_type` =
// `TRANSCRIPT`.
google.protobuf.Duration speech_end_offset = 8;
// Detected language code for the transcript.
string language_code = 10;
}
// Represents the parameters of a conversational query.
message QueryParameters {
// The time zone of this conversational query from the [time zone
// database](https://www.iana.org/time-zones), e.g., America/New_York,
// Europe/Paris. If not provided, the time zone specified in the agent is
// used.
string time_zone = 1;
// The geo location of this conversational query.
google.type.LatLng geo_location = 2;
// Additional session entity types to replace or extend developer entity types
// with. The entity synonyms apply to all languages and persist for the
// session of this query.
repeated SessionEntityType session_entity_types = 3;
// This field can be used to pass custom data into the webhook associated with
// the agent. Arbitrary JSON objects are supported.
// Some integrations that query a Dialogflow agent may provide additional
// information in the payload.
// In particular, for the Dialogflow Phone Gateway integration, this field has
// the form:
// ```
// {
// "telephony": {
// "caller_id": "+18558363987"
// }
// }
// ```
google.protobuf.Struct payload = 4;
// Additional parameters to be put into [session
// parameters][SessionInfo.parameters]. To remove a
// parameter from the session, clients should explicitly set the parameter
// value to null.
//
// You can reference the session parameters in the agent with the following
// format: $session.params.parameter-id.
//
// Depending on your protocol or client library language, this is a
// map, associative array, symbol table, dictionary, or JSON object
// composed of a collection of (MapKey, MapValue) pairs:
//
// * MapKey type: string
// * MapKey value: parameter name
// * MapValue type: If parameter's entity type is a composite entity then use
// map, otherwise, depending on the parameter value type, it could be one of
// string, number, boolean, null, list or map.
// * MapValue value: If parameter's entity type is a composite entity then use
// map from composite entity property names to property values, otherwise,
// use parameter value.
google.protobuf.Struct parameters = 5;
// The unique identifier of the [page][google.cloud.dialogflow.cx.v3.Page] to
// override the [current page][QueryResult.current_page] in the session.
// Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
// ID>/flows/<Flow ID>/pages/<Page ID>`.
//
// If `current_page` is specified, the previous state of the session will be
// ignored by Dialogflow, including the [previous
// page][QueryResult.current_page] and the [previous session
// parameters][QueryResult.parameters].
// In most cases,
// [current_page][google.cloud.dialogflow.cx.v3.QueryParameters.current_page]
// and [parameters][google.cloud.dialogflow.cx.v3.QueryParameters.parameters]
// should be configured together to direct a session to a specific state.
string current_page = 6 [
(google.api.resource_reference) = { type: "dialogflow.googleapis.com/Page" }
];
// Whether to disable webhook calls for this request.
bool disable_webhook = 7;
// Configures whether sentiment analysis should be performed. If not
// provided, sentiment analysis is not performed.
bool analyze_query_text_sentiment = 8;
// This field can be used to pass HTTP headers for a webhook
// call. These headers will be sent to webhook along with the headers that
// have been configured through Dialogflow web console. The headers defined
// within this field will overwrite the headers configured through Dialogflow
// console if there is a conflict. Header names are case-insensitive.
// Google's specified headers are not allowed. Including: "Host",
// "Content-Length", "Connection", "From", "User-Agent", "Accept-Encoding",
// "If-Modified-Since", "If-None-Match", "X-Forwarded-For", etc.
map<string, string> webhook_headers = 10;
// A list of flow versions to override for the request.
// Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
// ID>/flows/<Flow ID>/versions/<Version ID>`.
//
// If version 1 of flow X is included in this list, the traffic of
// flow X will go through version 1 regardless of the version configuration in
// the environment. Each flow can have at most one version specified in this
// list.
repeated string flow_versions = 14 [(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Version"
}];
// The channel which this query is for.
//
// If specified, only the
// [ResponseMessage][google.cloud.dialogflow.cx.v3.ResponseMessage] associated
// with the channel will be returned. If no
// [ResponseMessage][google.cloud.dialogflow.cx.v3.ResponseMessage] is
// associated with the channel, it falls back to the
// [ResponseMessage][google.cloud.dialogflow.cx.v3.ResponseMessage] with
// unspecified channel.
//
// If unspecified, the
// [ResponseMessage][google.cloud.dialogflow.cx.v3.ResponseMessage] with
// unspecified channel will be returned.
string channel = 15;
// Optional. Sets Dialogflow session life time.
// By default, a Dialogflow session remains active and its data is stored for
// 30 minutes after the last request is sent for the session.
// This value should be no longer than 1 day.
google.protobuf.Duration session_ttl = 16
[(google.api.field_behavior) = OPTIONAL];
}
// Represents the query input. It can contain one of:
//
// 1. A conversational query in the form of text.
//
// 2. An intent query that specifies which intent to trigger.
//
// 3. Natural language speech audio to be processed.
//
// 4. An event to be triggered.
//
// 5. DTMF digits to invoke an intent and fill in parameter value.
message QueryInput {
// Required. The input specification.
oneof input {
// The natural language text to be processed.
TextInput text = 2;
// The intent to be triggered.
IntentInput intent = 3;
// The natural language speech audio to be processed.
AudioInput audio = 5;
// The event to be triggered.
EventInput event = 6;
// The DTMF event to be handled.
DtmfInput dtmf = 7;
}
// Required. The language of the input. See [Language
// Support](https://cloud.google.com/dialogflow/cx/docs/reference/language)
// for a list of the currently supported language codes. Note that queries in
// the same session do not necessarily need to specify the same language.
string language_code = 4 [(google.api.field_behavior) = REQUIRED];
}
// Represents the result of a conversational query.
message QueryResult {
// The original conversational query.
oneof query {
// If [natural language text][google.cloud.dialogflow.cx.v3.TextInput] was
// provided as input, this field will contain a copy of the text.
string text = 1;
// If an [intent][google.cloud.dialogflow.cx.v3.IntentInput] was provided as
// input, this field will contain a copy of the intent identifier. Format:
// `projects/<Project ID>/locations/<Location ID>/agents/<Agent
// ID>/intents/<Intent ID>`.
string trigger_intent = 11 [(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Intent"
}];
// If [natural language speech
// audio][google.cloud.dialogflow.cx.v3.AudioInput] was provided as input,
// this field will contain the transcript for the audio.
string transcript = 12;
// If an [event][google.cloud.dialogflow.cx.v3.EventInput] was provided as
// input, this field will contain the name of the event.
string trigger_event = 14;
// If a [DTMF][google.cloud.dialogflow.cx.v3.DtmfInput] was provided as
// input, this field will contain a copy of the
// [DtmfInput][google.cloud.dialogflow.cx.v3.DtmfInput].
DtmfInput dtmf = 23;
}
// The language that was triggered during intent detection.
// See [Language
// Support](https://cloud.google.com/dialogflow/cx/docs/reference/language)
// for a list of the currently supported language codes.
string language_code = 2;
// The collected [session
// parameters][google.cloud.dialogflow.cx.v3.SessionInfo.parameters].
//
// Depending on your protocol or client library language, this is a
// map, associative array, symbol table, dictionary, or JSON object
// composed of a collection of (MapKey, MapValue) pairs:
//
// * MapKey type: string
// * MapKey value: parameter name
// * MapValue type: If parameter's entity type is a composite entity then use
// map, otherwise, depending on the parameter value type, it could be one of
// string, number, boolean, null, list or map.
// * MapValue value: If parameter's entity type is a composite entity then use
// map from composite entity property names to property values, otherwise,
// use parameter value.
google.protobuf.Struct parameters = 3;
// The list of rich messages returned to the client. Responses vary from
// simple text messages to more sophisticated, structured payloads used
// to drive complex logic.
repeated ResponseMessage response_messages = 4;
// The list of webhook call status in the order of call sequence.
repeated google.rpc.Status webhook_statuses = 13;
// The list of webhook payload in
// [WebhookResponse.payload][google.cloud.dialogflow.cx.v3.WebhookResponse.payload],
// in the order of call sequence. If some webhook call fails or doesn't return
// any payload, an empty `Struct` would be used instead.
repeated google.protobuf.Struct webhook_payloads = 6;
// The current [Page][google.cloud.dialogflow.cx.v3.Page]. Some, not all
// fields are filled in this message, including but not limited to `name` and
// `display_name`.
Page current_page = 7;
// The [Intent][google.cloud.dialogflow.cx.v3.Intent] that matched the
// conversational query. Some, not all fields are filled in this message,
// including but not limited to: `name` and `display_name`. This field is
// deprecated, please use
// [QueryResult.match][google.cloud.dialogflow.cx.v3.QueryResult.match]
// instead.
Intent intent = 8 [deprecated = true];
// The intent detection confidence. Values range from 0.0 (completely
// uncertain) to 1.0 (completely certain).
// This value is for informational purpose only and is only used to
// help match the best intent within the classification threshold.
// This value may change for the same end-user expression at any time due to a
// model retraining or change in implementation.
// This field is deprecated, please use
// [QueryResult.match][google.cloud.dialogflow.cx.v3.QueryResult.match]
// instead.
float intent_detection_confidence = 9 [deprecated = true];
// Intent match result, could be an intent or an event.
Match match = 15;
// The free-form diagnostic info. For example, this field could contain
// webhook call latency. The fields of this data can change without notice,
// so you should not write code that depends on its structure.
//
// One of the fields is called "Alternative Matched Intents", which may
// aid with debugging. The following describes these intent results:
//
// - The list is empty if no intent was matched to end-user input.
// - Only intents that are referenced in the currently active flow are
// included.
// - The matched intent is included.
// - Other intents that could have matched end-user input, but did not match
// because they are referenced by intent routes that are out of
// [scope](https://cloud.google.com/dialogflow/cx/docs/concept/handler#scope),
// are included.
// - Other intents referenced by intent routes in scope that matched end-user
// input, but had a lower confidence score.
google.protobuf.Struct diagnostic_info = 10;
// The sentiment analyss result, which depends on
// [`analyze_query_text_sentiment`]
// [google.cloud.dialogflow.cx.v3.QueryParameters.analyze_query_text_sentiment],
// specified in the request.
SentimentAnalysisResult sentiment_analysis_result = 17;
}
// Represents the natural language text to be processed.
message TextInput {
// Required. The UTF-8 encoded natural language text to be processed. Text
// length must not exceed 256 characters.
string text = 1 [(google.api.field_behavior) = REQUIRED];
}
// Represents the intent to trigger programmatically rather than as a result of
// natural language processing.
message IntentInput {
// Required. The unique identifier of the intent.
// Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
// ID>/intents/<Intent ID>`.
string intent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Intent"
}
];
}
// Represents the natural speech audio to be processed.
message AudioInput {
// Required. Instructs the speech recognizer how to process the speech audio.
InputAudioConfig config = 1 [(google.api.field_behavior) = REQUIRED];
// The natural language speech audio to be processed.
// A single request can contain up to 2 minutes of speech audio data.
// The [transcribed
// text][google.cloud.dialogflow.cx.v3.QueryResult.transcript] cannot contain
// more than 256 bytes.
//
// For non-streaming audio detect intent, both `config` and `audio` must be
// provided.
// For streaming audio detect intent, `config` must be provided in
// the first request and `audio` must be provided in all following requests.
bytes audio = 2;
}
// Represents the event to trigger.
message EventInput {
// Name of the event.
string event = 1;
}
// Represents the input for dtmf event.
message DtmfInput {
// The dtmf digits.
string digits = 1;
// The finish digit (if any).
string finish_digit = 2;
}
// Represents one match result of [MatchIntent][].
message Match {
// Type of a Match.
enum MatchType {
// Not specified. Should never be used.
MATCH_TYPE_UNSPECIFIED = 0;
// The query was matched to an intent.
INTENT = 1;
// The query directly triggered an intent.
DIRECT_INTENT = 2;
// The query was used for parameter filling.
PARAMETER_FILLING = 3;
// No match was found for the query.
NO_MATCH = 4;
// Indicates an empty query.
NO_INPUT = 5;
// The query directly triggered an event.
EVENT = 6;
}
// The [Intent][google.cloud.dialogflow.cx.v3.Intent] that matched the query.
// Some, not all fields are filled in this message, including but not limited
// to: `name` and `display_name`. Only filled for
// [`INTENT`][google.cloud.dialogflow.cx.v3.Match.MatchType] match type.
Intent intent = 1;
// The event that matched the query. Filled for
// [`EVENT`][google.cloud.dialogflow.cx.v3.Match.MatchType],
// [`NO_MATCH`][google.cloud.dialogflow.cx.v3.Match.MatchType] and
// [`NO_INPUT`][google.cloud.dialogflow.cx.v3.Match.MatchType] match types.
string event = 6;
// The collection of parameters extracted from the query.
//
// Depending on your protocol or client library language, this is a
// map, associative array, symbol table, dictionary, or JSON object
// composed of a collection of (MapKey, MapValue) pairs:
//
// * MapKey type: string
// * MapKey value: parameter name
// * MapValue type: If parameter's entity type is a composite entity then use
// map, otherwise, depending on the parameter value type, it could be one of
// string, number, boolean, null, list or map.
// * MapValue value: If parameter's entity type is a composite entity then use
// map from composite entity property names to property values, otherwise,
// use parameter value.
google.protobuf.Struct parameters = 2;
// Final text input which was matched during MatchIntent. This value can be
// different from original input sent in request because of spelling
// correction or other processing.
string resolved_input = 3;
// Type of this [Match][google.cloud.dialogflow.cx.v3.Match].
MatchType match_type = 4;
// The confidence of this match. Values range from 0.0 (completely uncertain)
// to 1.0 (completely certain).
// This value is for informational purpose only and is only used to help match
// the best intent within the classification threshold. This value may change
// for the same end-user expression at any time due to a model retraining or
// change in implementation.
float confidence = 5;
}
// Request of [MatchIntent][].
message MatchIntentRequest {
// Required. The name of the session this query is sent to.
// Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
// ID>/sessions/<Session ID>` or `projects/<Project ID>/locations/<Location
// ID>/agents/<Agent ID>/environments/<Environment ID>/sessions/<Session ID>`.
// If `Environment ID` is not specified, we assume default 'draft'
// environment.
// It's up to the API caller to choose an appropriate `Session ID`. It can be
// a random number or some type of session identifiers (preferably hashed).
// The length of the `Session ID` must not exceed 36 characters.
//
// For more information, see the [sessions
// guide](https://cloud.google.com/dialogflow/cx/docs/concept/session).
string session = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Session"
}
];
// The parameters of this query.
QueryParameters query_params = 2;
// Required. The input specification.
QueryInput query_input = 3 [(google.api.field_behavior) = REQUIRED];
// Persist session parameter changes from `query_params`.
bool persist_parameter_changes = 5;
}
// Response of [MatchIntent][].
message MatchIntentResponse {
// The original conversational query.
oneof query {
// If [natural language text][google.cloud.dialogflow.cx.v3.TextInput] was
// provided as input, this field will contain a copy of the text.
string text = 1;
// If an [intent][google.cloud.dialogflow.cx.v3.IntentInput] was provided as
// input, this field will contain a copy of the intent identifier. Format:
// `projects/<Project ID>/locations/<Location ID>/agents/<Agent
// ID>/intents/<Intent ID>`.
string trigger_intent = 2 [(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Intent"
}];
// If [natural language speech
// audio][google.cloud.dialogflow.cx.v3.AudioInput] was provided as input,
// this field will contain the transcript for the audio.
string transcript = 3;
// If an [event][google.cloud.dialogflow.cx.v3.EventInput] was provided as
// input, this field will contain a copy of the event name.
string trigger_event = 6;
}
// Match results, if more than one, ordered descendingly by the confidence
// we have that the particular intent matches the query.
repeated Match matches = 4;
// The current [Page][google.cloud.dialogflow.cx.v3.Page]. Some, not all
// fields are filled in this message, including but not limited to `name` and
// `display_name`.
Page current_page = 5;
}
// Request of [FulfillIntent][]
message FulfillIntentRequest {
// Must be same as the corresponding MatchIntent request, otherwise the
// behavior is undefined.
MatchIntentRequest match_intent_request = 1;
// The matched intent/event to fulfill.
Match match = 2;
// Instructs the speech synthesizer how to generate output audio.
OutputAudioConfig output_audio_config = 3;
}
// Response of [FulfillIntent][]
message FulfillIntentResponse {
// Output only. The unique identifier of the response. It can be used to
// locate a response in the training example set or for reporting issues.
string response_id = 1;
// The result of the conversational query.
QueryResult query_result = 2;
// The audio data bytes encoded as specified in the request.
// Note: The output audio is generated based on the values of default platform
// text responses found in the
// [`query_result.response_messages`][google.cloud.dialogflow.cx.v3.QueryResult.response_messages]
// field. If multiple default text responses exist, they will be concatenated
// when generating audio. If no default platform text responses exist, the
// generated audio content will be empty.
//
// In some scenarios, multiple output audio fields may be present in the
// response structure. In these cases, only the top-most-level audio output
// has content.
bytes output_audio = 3;
// The config used by the speech synthesizer to generate the output audio.
OutputAudioConfig output_audio_config = 4;
}
// The result of sentiment analysis. Sentiment analysis inspects user input
// and identifies the prevailing subjective opinion, especially to determine a
// user's attitude as positive, negative, or neutral.
message SentimentAnalysisResult {
// Sentiment score between -1.0 (negative sentiment) and 1.0 (positive
// sentiment).
float score = 1;
// A non-negative number in the [0, +inf) range, which represents the absolute
// magnitude of sentiment, regardless of score (positive or negative).
float magnitude = 2;
}