session.proto 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854
  1. // Copyright 2021 Google LLC
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. syntax = "proto3";
  15. package google.cloud.dialogflow.cx.v3;
  16. import "google/api/annotations.proto";
  17. import "google/api/field_behavior.proto";
  18. import "google/api/resource.proto";
  19. import "google/cloud/dialogflow/cx/v3/advanced_settings.proto";
  20. import "google/cloud/dialogflow/cx/v3/audio_config.proto";
  21. import "google/cloud/dialogflow/cx/v3/flow.proto";
  22. import "google/cloud/dialogflow/cx/v3/intent.proto";
  23. import "google/cloud/dialogflow/cx/v3/page.proto";
  24. import "google/cloud/dialogflow/cx/v3/response_message.proto";
  25. import "google/cloud/dialogflow/cx/v3/session_entity_type.proto";
  26. import "google/protobuf/duration.proto";
  27. import "google/protobuf/struct.proto";
  28. import "google/rpc/status.proto";
  29. import "google/type/latlng.proto";
  30. import "google/api/client.proto";
  31. option cc_enable_arenas = true;
  32. option csharp_namespace = "Google.Cloud.Dialogflow.Cx.V3";
  33. option go_package = "google.golang.org/genproto/googleapis/cloud/dialogflow/cx/v3;cx";
  34. option java_multiple_files = true;
  35. option java_outer_classname = "SessionProto";
  36. option java_package = "com.google.cloud.dialogflow.cx.v3";
  37. option objc_class_prefix = "DF";
  38. option (google.api.resource_definition) = {
  39. type: "dialogflow.googleapis.com/Session"
  40. pattern: "projects/{project}/locations/{location}/agents/{agent}/sessions/{session}"
  41. pattern: "projects/{project}/locations/{location}/agents/{agent}/environments/{environment}/sessions/{session}"
  42. };
  43. // A session represents an interaction with a user. You retrieve user input
  44. // and pass it to the [DetectIntent][google.cloud.dialogflow.cx.v3.Sessions.DetectIntent] method to determine
  45. // user intent and respond.
  46. service Sessions {
  47. option (google.api.default_host) = "dialogflow.googleapis.com";
  48. option (google.api.oauth_scopes) =
  49. "https://www.googleapis.com/auth/cloud-platform,"
  50. "https://www.googleapis.com/auth/dialogflow";
  51. // Processes a natural language query and returns structured, actionable data
  52. // as a result. This method is not idempotent, because it may cause session
  53. // entity types to be updated, which in turn might affect results of future
  54. // queries.
  55. //
  56. // Note: Always use agent versions for production traffic.
  57. // See [Versions and
  58. // environments](https://cloud.google.com/dialogflow/cx/docs/concept/version).
  59. rpc DetectIntent(DetectIntentRequest) returns (DetectIntentResponse) {
  60. option (google.api.http) = {
  61. post: "/v3/{session=projects/*/locations/*/agents/*/sessions/*}:detectIntent"
  62. body: "*"
  63. additional_bindings {
  64. post: "/v3/{session=projects/*/locations/*/agents/*/environments/*/sessions/*}:detectIntent"
  65. body: "*"
  66. }
  67. };
  68. }
  69. // Processes a natural language query in audio format in a streaming fashion
  70. // and returns structured, actionable data as a result. This method is only
  71. // available via the gRPC API (not REST).
  72. //
  73. // Note: Always use agent versions for production traffic.
  74. // See [Versions and
  75. // environments](https://cloud.google.com/dialogflow/cx/docs/concept/version).
  76. rpc StreamingDetectIntent(stream StreamingDetectIntentRequest) returns (stream StreamingDetectIntentResponse) {
  77. }
  78. // Returns preliminary intent match results, doesn't change the session
  79. // status.
  80. rpc MatchIntent(MatchIntentRequest) returns (MatchIntentResponse) {
  81. option (google.api.http) = {
  82. post: "/v3/{session=projects/*/locations/*/agents/*/sessions/*}:matchIntent"
  83. body: "*"
  84. additional_bindings {
  85. post: "/v3/{session=projects/*/locations/*/agents/*/environments/*/sessions/*}:matchIntent"
  86. body: "*"
  87. }
  88. };
  89. }
  90. // Fulfills a matched intent returned by [MatchIntent][google.cloud.dialogflow.cx.v3.Sessions.MatchIntent].
  91. // Must be called after [MatchIntent][google.cloud.dialogflow.cx.v3.Sessions.MatchIntent], with input from
  92. // [MatchIntentResponse][google.cloud.dialogflow.cx.v3.MatchIntentResponse]. Otherwise, the behavior is undefined.
  93. rpc FulfillIntent(FulfillIntentRequest) returns (FulfillIntentResponse) {
  94. option (google.api.http) = {
  95. post: "/v3/{match_intent_request.session=projects/*/locations/*/agents/*/sessions/*}:fulfillIntent"
  96. body: "*"
  97. additional_bindings {
  98. post: "/v3/{match_intent_request.session=projects/*/locations/*/agents/*/environments/*/sessions/*}:fulfillIntent"
  99. body: "*"
  100. }
  101. };
  102. }
  103. }
  104. // The request to detect user's intent.
  105. message DetectIntentRequest {
  106. // Required. The name of the session this query is sent to.
  107. // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
  108. // ID>/sessions/<Session ID>` or `projects/<Project ID>/locations/<Location
  109. // ID>/agents/<Agent ID>/environments/<Environment ID>/sessions/<Session ID>`.
  110. // If `Environment ID` is not specified, we assume default 'draft'
  111. // environment.
  112. // It's up to the API caller to choose an appropriate `Session ID`. It can be
  113. // a random number or some type of session identifiers (preferably hashed).
  114. // The length of the `Session ID` must not exceed 36 characters.
  115. //
  116. // For more information, see the [sessions
  117. // guide](https://cloud.google.com/dialogflow/cx/docs/concept/session).
  118. //
  119. // Note: Always use agent versions for production traffic.
  120. // See [Versions and
  121. // environments](https://cloud.google.com/dialogflow/cx/docs/concept/version).
  122. string session = 1 [
  123. (google.api.field_behavior) = REQUIRED,
  124. (google.api.resource_reference) = {
  125. type: "dialogflow.googleapis.com/Session"
  126. }
  127. ];
  128. // The parameters of this query.
  129. QueryParameters query_params = 2;
  130. // Required. The input specification.
  131. QueryInput query_input = 3 [(google.api.field_behavior) = REQUIRED];
  132. // Instructs the speech synthesizer how to generate the output audio.
  133. OutputAudioConfig output_audio_config = 4;
  134. }
  135. // The message returned from the DetectIntent method.
  136. message DetectIntentResponse {
  137. // Represents different DetectIntentResponse types.
  138. enum ResponseType {
  139. // Not specified. This should never happen.
  140. RESPONSE_TYPE_UNSPECIFIED = 0;
  141. // Partial response. e.g. Aggregated responses in a Fulfillment that enables
  142. // `return_partial_response` can be returned as partial response.
  143. // WARNING: partial response is not eligible for barge-in.
  144. PARTIAL = 1;
  145. // Final response.
  146. FINAL = 2;
  147. }
  148. // Output only. The unique identifier of the response. It can be used to
  149. // locate a response in the training example set or for reporting issues.
  150. string response_id = 1;
  151. // The result of the conversational query.
  152. QueryResult query_result = 2;
  153. // The audio data bytes encoded as specified in the request.
  154. // Note: The output audio is generated based on the values of default platform
  155. // text responses found in the
  156. // [`query_result.response_messages`][google.cloud.dialogflow.cx.v3.QueryResult.response_messages] field. If
  157. // multiple default text responses exist, they will be concatenated when
  158. // generating audio. If no default platform text responses exist, the
  159. // generated audio content will be empty.
  160. //
  161. // In some scenarios, multiple output audio fields may be present in the
  162. // response structure. In these cases, only the top-most-level audio output
  163. // has content.
  164. bytes output_audio = 4;
  165. // The config used by the speech synthesizer to generate the output audio.
  166. OutputAudioConfig output_audio_config = 5;
  167. // Response type.
  168. ResponseType response_type = 6;
  169. // Indicates whether the partial response can be cancelled when a later
  170. // response arrives. e.g. if the agent specified some music as partial
  171. // response, it can be cancelled.
  172. bool allow_cancellation = 7;
  173. }
  174. // The top-level message sent by the client to the
  175. // [Sessions.StreamingDetectIntent][google.cloud.dialogflow.cx.v3.Sessions.StreamingDetectIntent] method.
  176. //
  177. // Multiple request messages should be sent in order:
  178. //
  179. // 1. The first message must contain
  180. // [session][google.cloud.dialogflow.cx.v3.StreamingDetectIntentRequest.session],
  181. // [query_input][google.cloud.dialogflow.cx.v3.StreamingDetectIntentRequest.query_input] plus optionally
  182. // [query_params][google.cloud.dialogflow.cx.v3.StreamingDetectIntentRequest.query_params]. If the client
  183. // wants to receive an audio response, it should also contain
  184. // [output_audio_config][google.cloud.dialogflow.cx.v3.StreamingDetectIntentRequest.output_audio_config].
  185. //
  186. // 2. If [query_input][google.cloud.dialogflow.cx.v3.StreamingDetectIntentRequest.query_input] was set to
  187. // [query_input.audio.config][google.cloud.dialogflow.cx.v3.AudioInput.config], all subsequent messages
  188. // must contain [query_input.audio.audio][google.cloud.dialogflow.cx.v3.AudioInput.audio] to continue with
  189. // Speech recognition.
  190. // If you decide to rather detect an intent from text
  191. // input after you already started Speech recognition, please send a message
  192. // with [query_input.text][google.cloud.dialogflow.cx.v3.QueryInput.text].
  193. //
  194. // However, note that:
  195. //
  196. // * Dialogflow will bill you for the audio duration so far.
  197. // * Dialogflow discards all Speech recognition results in favor of the
  198. // input text.
  199. // * Dialogflow will use the language code from the first message.
  200. //
  201. // After you sent all input, you must half-close or abort the request stream.
  202. message StreamingDetectIntentRequest {
  203. // The name of the session this query is sent to.
  204. // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
  205. // ID>/sessions/<Session ID>` or `projects/<Project ID>/locations/<Location
  206. // ID>/agents/<Agent ID>/environments/<Environment ID>/sessions/<Session ID>`.
  207. // If `Environment ID` is not specified, we assume default 'draft'
  208. // environment.
  209. // It's up to the API caller to choose an appropriate `Session ID`. It can be
  210. // a random number or some type of session identifiers (preferably hashed).
  211. // The length of the `Session ID` must not exceed 36 characters.
  212. // Note: session must be set in the first request.
  213. //
  214. // For more information, see the [sessions
  215. // guide](https://cloud.google.com/dialogflow/cx/docs/concept/session).
  216. //
  217. // Note: Always use agent versions for production traffic.
  218. // See [Versions and
  219. // environments](https://cloud.google.com/dialogflow/cx/docs/concept/version).
  220. string session = 1 [(google.api.resource_reference) = {
  221. type: "dialogflow.googleapis.com/Session"
  222. }];
  223. // The parameters of this query.
  224. QueryParameters query_params = 2;
  225. // Required. The input specification.
  226. QueryInput query_input = 3 [(google.api.field_behavior) = REQUIRED];
  227. // Instructs the speech synthesizer how to generate the output audio.
  228. OutputAudioConfig output_audio_config = 4;
  229. // Enable partial detect intent response. If this flag is not enabled,
  230. // response stream still contains only one final `DetectIntentResponse` even
  231. // if some `Fulfillment`s in the agent have been configured to return partial
  232. // responses.
  233. bool enable_partial_response = 5;
  234. }
  235. // The top-level message returned from the `StreamingDetectIntent` method.
  236. //
  237. // Multiple response messages can be returned in order:
  238. //
  239. // 1. If the input was set to streaming audio, the first one or more messages
  240. // contain `recognition_result`. Each `recognition_result` represents a more
  241. // complete transcript of what the user said. The last `recognition_result`
  242. // has `is_final` set to `true`.
  243. //
  244. // 2. If `enable_partial_response` is true, the following N messages
  245. // (currently 1 <= N <= 4) contain `detect_intent_response`. The first (N-1)
  246. // `detect_intent_response`s will have `response_type` set to `PARTIAL`.
  247. // The last `detect_intent_response` has `response_type` set to `FINAL`.
  248. // If `response_type` is false, response stream only contains
  249. // the final `detect_intent_response`.
  250. message StreamingDetectIntentResponse {
  251. // The output response.
  252. oneof response {
  253. // The result of speech recognition.
  254. StreamingRecognitionResult recognition_result = 1;
  255. // The response from detect intent.
  256. DetectIntentResponse detect_intent_response = 2;
  257. }
  258. }
  259. // Contains a speech recognition result corresponding to a portion of the audio
  260. // that is currently being processed or an indication that this is the end
  261. // of the single requested utterance.
  262. //
  263. // Example:
  264. //
  265. // 1. transcript: "tube"
  266. //
  267. // 2. transcript: "to be a"
  268. //
  269. // 3. transcript: "to be"
  270. //
  271. // 4. transcript: "to be or not to be"
  272. // is_final: true
  273. //
  274. // 5. transcript: " that's"
  275. //
  276. // 6. transcript: " that is"
  277. //
  278. // 7. message_type: `END_OF_SINGLE_UTTERANCE`
  279. //
  280. // 8. transcript: " that is the question"
  281. // is_final: true
  282. //
  283. // Only two of the responses contain final results (#4 and #8 indicated by
  284. // `is_final: true`). Concatenating these generates the full transcript: "to be
  285. // or not to be that is the question".
  286. //
  287. // In each response we populate:
  288. //
  289. // * for `TRANSCRIPT`: `transcript` and possibly `is_final`.
  290. //
  291. // * for `END_OF_SINGLE_UTTERANCE`: only `message_type`.
  292. message StreamingRecognitionResult {
  293. // Type of the response message.
  294. enum MessageType {
  295. // Not specified. Should never be used.
  296. MESSAGE_TYPE_UNSPECIFIED = 0;
  297. // Message contains a (possibly partial) transcript.
  298. TRANSCRIPT = 1;
  299. // Event indicates that the server has detected the end of the user's speech
  300. // utterance and expects no additional speech. Therefore, the server will
  301. // not process additional audio (although it may subsequently return
  302. // additional results). The client should stop sending additional audio
  303. // data, half-close the gRPC connection, and wait for any additional results
  304. // until the server closes the gRPC connection. This message is only sent if
  305. // [`single_utterance`][google.cloud.dialogflow.cx.v3.InputAudioConfig.single_utterance] was set to
  306. // `true`, and is not used otherwise.
  307. END_OF_SINGLE_UTTERANCE = 2;
  308. }
  309. // Type of the result message.
  310. MessageType message_type = 1;
  311. // Transcript text representing the words that the user spoke.
  312. // Populated if and only if `message_type` = `TRANSCRIPT`.
  313. string transcript = 2;
  314. // If `false`, the `StreamingRecognitionResult` represents an
  315. // interim result that may change. If `true`, the recognizer will not return
  316. // any further hypotheses about this piece of the audio. May only be populated
  317. // for `message_type` = `TRANSCRIPT`.
  318. bool is_final = 3;
  319. // The Speech confidence between 0.0 and 1.0 for the current portion of audio.
  320. // A higher number indicates an estimated greater likelihood that the
  321. // recognized words are correct. The default of 0.0 is a sentinel value
  322. // indicating that confidence was not set.
  323. //
  324. // This field is typically only provided if `is_final` is true and you should
  325. // not rely on it being accurate or even set.
  326. float confidence = 4;
  327. // An estimate of the likelihood that the speech recognizer will
  328. // not change its guess about this interim recognition result:
  329. // * If the value is unspecified or 0.0, Dialogflow didn't compute the
  330. // stability. In particular, Dialogflow will only provide stability for
  331. // `TRANSCRIPT` results with `is_final = false`.
  332. // * Otherwise, the value is in (0.0, 1.0] where 0.0 means completely
  333. // unstable and 1.0 means completely stable.
  334. float stability = 6;
  335. // Word-specific information for the words recognized by Speech in
  336. // [transcript][google.cloud.dialogflow.cx.v3.StreamingRecognitionResult.transcript]. Populated if and only if `message_type` = `TRANSCRIPT` and
  337. // [InputAudioConfig.enable_word_info] is set.
  338. repeated SpeechWordInfo speech_word_info = 7;
  339. // Time offset of the end of this Speech recognition result relative to the
  340. // beginning of the audio. Only populated for `message_type` =
  341. // `TRANSCRIPT`.
  342. google.protobuf.Duration speech_end_offset = 8;
  343. // Detected language code for the transcript.
  344. string language_code = 10;
  345. }
  346. // Represents the parameters of a conversational query.
  347. message QueryParameters {
  348. // The time zone of this conversational query from the [time zone
  349. // database](https://www.iana.org/time-zones), e.g., America/New_York,
  350. // Europe/Paris. If not provided, the time zone specified in the agent is
  351. // used.
  352. string time_zone = 1;
  353. // The geo location of this conversational query.
  354. google.type.LatLng geo_location = 2;
  355. // Additional session entity types to replace or extend developer entity types
  356. // with. The entity synonyms apply to all languages and persist for the
  357. // session of this query.
  358. repeated SessionEntityType session_entity_types = 3;
  359. // This field can be used to pass custom data into the webhook associated with
  360. // the agent. Arbitrary JSON objects are supported.
  361. // Some integrations that query a Dialogflow agent may provide additional
  362. // information in the payload.
  363. // In particular, for the Dialogflow Phone Gateway integration, this field has
  364. // the form:
  365. // ```
  366. // {
  367. // "telephony": {
  368. // "caller_id": "+18558363987"
  369. // }
  370. // }
  371. // ```
  372. google.protobuf.Struct payload = 4;
  373. // Additional parameters to be put into [session
  374. // parameters][SessionInfo.parameters]. To remove a
  375. // parameter from the session, clients should explicitly set the parameter
  376. // value to null.
  377. //
  378. // You can reference the session parameters in the agent with the following
  379. // format: $session.params.parameter-id.
  380. //
  381. // Depending on your protocol or client library language, this is a
  382. // map, associative array, symbol table, dictionary, or JSON object
  383. // composed of a collection of (MapKey, MapValue) pairs:
  384. //
  385. // - MapKey type: string
  386. // - MapKey value: parameter name
  387. // - MapValue type:
  388. // - If parameter's entity type is a composite entity: map
  389. // - Else: depending on parameter value type, could be one of string,
  390. // number, boolean, null, list or map
  391. // - MapValue value:
  392. // - If parameter's entity type is a composite entity:
  393. // map from composite entity property names to property values
  394. // - Else: parameter value
  395. google.protobuf.Struct parameters = 5;
  396. // The unique identifier of the [page][google.cloud.dialogflow.cx.v3.Page] to override the [current
  397. // page][QueryResult.current_page] in the session.
  398. // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
  399. // ID>/flows/<Flow ID>/pages/<Page ID>`.
  400. //
  401. // If `current_page` is specified, the previous state of the session will be
  402. // ignored by Dialogflow, including the [previous
  403. // page][QueryResult.current_page] and the [previous session
  404. // parameters][QueryResult.parameters].
  405. // In most cases, [current_page][google.cloud.dialogflow.cx.v3.QueryParameters.current_page] and
  406. // [parameters][google.cloud.dialogflow.cx.v3.QueryParameters.parameters] should be configured together to
  407. // direct a session to a specific state.
  408. string current_page = 6 [(google.api.resource_reference) = {
  409. type: "dialogflow.googleapis.com/Page"
  410. }];
  411. // Whether to disable webhook calls for this request.
  412. bool disable_webhook = 7;
  413. // Configures whether sentiment analysis should be performed. If not
  414. // provided, sentiment analysis is not performed.
  415. bool analyze_query_text_sentiment = 8;
  416. // This field can be used to pass HTTP headers for a webhook
  417. // call. These headers will be sent to webhook along with the headers that
  418. // have been configured through Dialogflow web console. The headers defined
  419. // within this field will overwrite the headers configured through Dialogflow
  420. // console if there is a conflict. Header names are case-insensitive.
  421. // Google's specified headers are not allowed. Including: "Host",
  422. // "Content-Length", "Connection", "From", "User-Agent", "Accept-Encoding",
  423. // "If-Modified-Since", "If-None-Match", "X-Forwarded-For", etc.
  424. map<string, string> webhook_headers = 10;
  425. // A list of flow versions to override for the request.
  426. // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
  427. // ID>/flows/<Flow ID>/versions/<Version ID>`.
  428. //
  429. // If version 1 of flow X is included in this list, the traffic of
  430. // flow X will go through version 1 regardless of the version configuration in
  431. // the environment. Each flow can have at most one version specified in this
  432. // list.
  433. repeated string flow_versions = 14 [(google.api.resource_reference) = {
  434. type: "dialogflow.googleapis.com/Version"
  435. }];
  436. }
  437. // Represents the query input. It can contain one of:
  438. //
  439. // 1. A conversational query in the form of text.
  440. //
  441. // 2. An intent query that specifies which intent to trigger.
  442. //
  443. // 3. Natural language speech audio to be processed.
  444. //
  445. // 4. An event to be triggered.
  446. //
  447. message QueryInput {
  448. // Required. The input specification.
  449. oneof input {
  450. // The natural language text to be processed.
  451. TextInput text = 2;
  452. // The intent to be triggered.
  453. IntentInput intent = 3;
  454. // The natural language speech audio to be processed.
  455. AudioInput audio = 5;
  456. // The event to be triggered.
  457. EventInput event = 6;
  458. // The DTMF event to be handled.
  459. DtmfInput dtmf = 7;
  460. }
  461. // Required. The language of the input. See [Language
  462. // Support](https://cloud.google.com/dialogflow/cx/docs/reference/language)
  463. // for a list of the currently supported language codes. Note that queries in
  464. // the same session do not necessarily need to specify the same language.
  465. string language_code = 4 [(google.api.field_behavior) = REQUIRED];
  466. }
  467. // Represents the result of a conversational query.
  468. message QueryResult {
  469. // The original conversational query.
  470. oneof query {
  471. // If [natural language text][google.cloud.dialogflow.cx.v3.TextInput] was provided as input, this field
  472. // will contain a copy of the text.
  473. string text = 1;
  474. // If an [intent][google.cloud.dialogflow.cx.v3.IntentInput] was provided as input, this field will
  475. // contain a copy of the intent identifier.
  476. // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
  477. // ID>/intents/<Intent ID>`.
  478. string trigger_intent = 11 [(google.api.resource_reference) = {
  479. type: "dialogflow.googleapis.com/Intent"
  480. }];
  481. // If [natural language speech audio][google.cloud.dialogflow.cx.v3.AudioInput] was provided as input,
  482. // this field will contain the transcript for the audio.
  483. string transcript = 12;
  484. // If an [event][google.cloud.dialogflow.cx.v3.EventInput] was provided as input, this field will contain
  485. // the name of the event.
  486. string trigger_event = 14;
  487. }
  488. // The language that was triggered during intent detection.
  489. // See [Language
  490. // Support](https://cloud.google.com/dialogflow/cx/docs/reference/language)
  491. // for a list of the currently supported language codes.
  492. string language_code = 2;
  493. // The collected [session parameters][google.cloud.dialogflow.cx.v3.SessionInfo.parameters].
  494. //
  495. // Depending on your protocol or client library language, this is a
  496. // map, associative array, symbol table, dictionary, or JSON object
  497. // composed of a collection of (MapKey, MapValue) pairs:
  498. //
  499. // - MapKey type: string
  500. // - MapKey value: parameter name
  501. // - MapValue type:
  502. // - If parameter's entity type is a composite entity: map
  503. // - Else: depending on parameter value type, could be one of string,
  504. // number, boolean, null, list or map
  505. // - MapValue value:
  506. // - If parameter's entity type is a composite entity:
  507. // map from composite entity property names to property values
  508. // - Else: parameter value
  509. google.protobuf.Struct parameters = 3;
  510. // The list of rich messages returned to the client. Responses vary from
  511. // simple text messages to more sophisticated, structured payloads used
  512. // to drive complex logic.
  513. repeated ResponseMessage response_messages = 4;
  514. // The list of webhook call status in the order of call sequence.
  515. repeated google.rpc.Status webhook_statuses = 13;
  516. // The list of webhook payload in [WebhookResponse.payload][google.cloud.dialogflow.cx.v3.WebhookResponse.payload], in
  517. // the order of call sequence. If some webhook call fails or doesn't return
  518. // any payload, an empty `Struct` would be used instead.
  519. repeated google.protobuf.Struct webhook_payloads = 6;
  520. // The current [Page][google.cloud.dialogflow.cx.v3.Page]. Some, not all fields are filled in this message,
  521. // including but not limited to `name` and `display_name`.
  522. Page current_page = 7;
  523. // The [Intent][google.cloud.dialogflow.cx.v3.Intent] that matched the conversational query. Some, not all fields
  524. // are filled in this message, including but not limited to: `name` and
  525. // `display_name`.
  526. // This field is deprecated, please use [QueryResult.match][google.cloud.dialogflow.cx.v3.QueryResult.match] instead.
  527. Intent intent = 8 [deprecated = true];
  528. // The intent detection confidence. Values range from 0.0 (completely
  529. // uncertain) to 1.0 (completely certain).
  530. // This value is for informational purpose only and is only used to
  531. // help match the best intent within the classification threshold.
  532. // This value may change for the same end-user expression at any time due to a
  533. // model retraining or change in implementation.
  534. // This field is deprecated, please use [QueryResult.match][google.cloud.dialogflow.cx.v3.QueryResult.match] instead.
  535. float intent_detection_confidence = 9 [deprecated = true];
  536. // Intent match result, could be an intent or an event.
  537. Match match = 15;
  538. // The free-form diagnostic info. For example, this field could contain
  539. // webhook call latency. The string keys of the Struct's fields map can change
  540. // without notice.
  541. google.protobuf.Struct diagnostic_info = 10;
  542. // The sentiment analyss result, which depends on
  543. // [`analyze_query_text_sentiment`]
  544. // [google.cloud.dialogflow.cx.v3.QueryParameters.analyze_query_text_sentiment], specified in the request.
  545. SentimentAnalysisResult sentiment_analysis_result = 17;
  546. }
  547. // Represents the natural language text to be processed.
  548. message TextInput {
  549. // Required. The UTF-8 encoded natural language text to be processed. Text length must
  550. // not exceed 256 characters.
  551. string text = 1 [(google.api.field_behavior) = REQUIRED];
  552. }
  553. // Represents the intent to trigger programmatically rather than as a result of
  554. // natural language processing.
  555. message IntentInput {
  556. // Required. The unique identifier of the intent.
  557. // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
  558. // ID>/intents/<Intent ID>`.
  559. string intent = 1 [
  560. (google.api.field_behavior) = REQUIRED,
  561. (google.api.resource_reference) = {
  562. type: "dialogflow.googleapis.com/Intent"
  563. }
  564. ];
  565. }
  566. // Represents the natural speech audio to be processed.
  567. message AudioInput {
  568. // Required. Instructs the speech recognizer how to process the speech audio.
  569. InputAudioConfig config = 1 [(google.api.field_behavior) = REQUIRED];
  570. // The natural language speech audio to be processed.
  571. // A single request can contain up to 1 minute of speech audio data.
  572. // The [transcribed text][google.cloud.dialogflow.cx.v3.QueryResult.transcript] cannot contain more than 256
  573. // bytes.
  574. //
  575. // For non-streaming audio detect intent, both `config` and `audio` must be
  576. // provided.
  577. // For streaming audio detect intent, `config` must be provided in
  578. // the first request and `audio` must be provided in all following requests.
  579. bytes audio = 2;
  580. }
  581. // Represents the event to trigger.
  582. message EventInput {
  583. // Name of the event.
  584. string event = 1;
  585. }
  586. // Represents the input for dtmf event.
  587. message DtmfInput {
  588. // The dtmf digits.
  589. string digits = 1;
  590. // The finish digit (if any).
  591. string finish_digit = 2;
  592. }
  593. // Represents one match result of [MatchIntent][].
  594. message Match {
  595. // Type of a Match.
  596. enum MatchType {
  597. // Not specified. Should never be used.
  598. MATCH_TYPE_UNSPECIFIED = 0;
  599. // The query was matched to an intent.
  600. INTENT = 1;
  601. // The query directly triggered an intent.
  602. DIRECT_INTENT = 2;
  603. // The query was used for parameter filling.
  604. PARAMETER_FILLING = 3;
  605. // No match was found for the query.
  606. NO_MATCH = 4;
  607. // Indicates an empty query.
  608. NO_INPUT = 5;
  609. // The query directly triggered an event.
  610. EVENT = 6;
  611. }
  612. // The [Intent][google.cloud.dialogflow.cx.v3.Intent] that matched the query. Some, not all fields are filled in
  613. // this message, including but not limited to: `name` and `display_name`. Only
  614. // filled for [`INTENT`][google.cloud.dialogflow.cx.v3.Match.MatchType] match type.
  615. Intent intent = 1;
  616. // The event that matched the query. Only filled for
  617. // [`EVENT`][google.cloud.dialogflow.cx.v3.Match.MatchType] match type.
  618. string event = 6;
  619. // The collection of parameters extracted from the query.
  620. //
  621. // Depending on your protocol or client library language, this is a
  622. // map, associative array, symbol table, dictionary, or JSON object
  623. // composed of a collection of (MapKey, MapValue) pairs:
  624. //
  625. // - MapKey type: string
  626. // - MapKey value: parameter name
  627. // - MapValue type:
  628. // - If parameter's entity type is a composite entity: map
  629. // - Else: depending on parameter value type, could be one of string,
  630. // number, boolean, null, list or map
  631. // - MapValue value:
  632. // - If parameter's entity type is a composite entity:
  633. // map from composite entity property names to property values
  634. // - Else: parameter value
  635. google.protobuf.Struct parameters = 2;
  636. // Final text input which was matched during MatchIntent. This value can be
  637. // different from original input sent in request because of spelling
  638. // correction or other processing.
  639. string resolved_input = 3;
  640. // Type of this [Match][google.cloud.dialogflow.cx.v3.Match].
  641. MatchType match_type = 4;
  642. // The confidence of this match. Values range from 0.0 (completely uncertain)
  643. // to 1.0 (completely certain).
  644. // This value is for informational purpose only and is only used to help match
  645. // the best intent within the classification threshold. This value may change
  646. // for the same end-user expression at any time due to a model retraining or
  647. // change in implementation.
  648. float confidence = 5;
  649. }
  650. // Request of [MatchIntent][].
  651. message MatchIntentRequest {
  652. // Required. The name of the session this query is sent to.
  653. // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
  654. // ID>/sessions/<Session ID>` or `projects/<Project ID>/locations/<Location
  655. // ID>/agents/<Agent ID>/environments/<Environment ID>/sessions/<Session ID>`.
  656. // If `Environment ID` is not specified, we assume default 'draft'
  657. // environment.
  658. // It's up to the API caller to choose an appropriate `Session ID`. It can be
  659. // a random number or some type of session identifiers (preferably hashed).
  660. // The length of the `Session ID` must not exceed 36 characters.
  661. //
  662. // For more information, see the [sessions
  663. // guide](https://cloud.google.com/dialogflow/cx/docs/concept/session).
  664. string session = 1 [
  665. (google.api.field_behavior) = REQUIRED,
  666. (google.api.resource_reference) = {
  667. type: "dialogflow.googleapis.com/Session"
  668. }
  669. ];
  670. // The parameters of this query.
  671. QueryParameters query_params = 2;
  672. // Required. The input specification.
  673. QueryInput query_input = 3 [(google.api.field_behavior) = REQUIRED];
  674. }
  675. // Response of [MatchIntent][].
  676. message MatchIntentResponse {
  677. // The original conversational query.
  678. oneof query {
  679. // If [natural language text][google.cloud.dialogflow.cx.v3.TextInput] was provided as input, this field
  680. // will contain a copy of the text.
  681. string text = 1;
  682. // If an [intent][google.cloud.dialogflow.cx.v3.IntentInput] was provided as input, this field will
  683. // contain a copy of the intent identifier.
  684. // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
  685. // ID>/intents/<Intent ID>`.
  686. string trigger_intent = 2 [(google.api.resource_reference) = {
  687. type: "dialogflow.googleapis.com/Intent"
  688. }];
  689. // If [natural language speech audio][google.cloud.dialogflow.cx.v3.AudioInput] was provided as input,
  690. // this field will contain the transcript for the audio.
  691. string transcript = 3;
  692. // If an [event][google.cloud.dialogflow.cx.v3.EventInput] was provided as input, this field will
  693. // contain a copy of the event name.
  694. string trigger_event = 6;
  695. }
  696. // Match results, if more than one, ordered descendingly by the confidence
  697. // we have that the particular intent matches the query.
  698. repeated Match matches = 4;
  699. // The current [Page][google.cloud.dialogflow.cx.v3.Page]. Some, not all fields are filled in this message,
  700. // including but not limited to `name` and `display_name`.
  701. Page current_page = 5;
  702. }
  703. // Request of [FulfillIntent][]
  704. message FulfillIntentRequest {
  705. // Must be same as the corresponding MatchIntent request, otherwise the
  706. // behavior is undefined.
  707. MatchIntentRequest match_intent_request = 1;
  708. // The matched intent/event to fulfill.
  709. Match match = 2;
  710. // Instructs the speech synthesizer how to generate output audio.
  711. OutputAudioConfig output_audio_config = 3;
  712. }
  713. // Response of [FulfillIntent][]
  714. message FulfillIntentResponse {
  715. // Output only. The unique identifier of the response. It can be used to
  716. // locate a response in the training example set or for reporting issues.
  717. string response_id = 1;
  718. // The result of the conversational query.
  719. QueryResult query_result = 2;
  720. // The audio data bytes encoded as specified in the request.
  721. // Note: The output audio is generated based on the values of default platform
  722. // text responses found in the
  723. // [`query_result.response_messages`][google.cloud.dialogflow.cx.v3.QueryResult.response_messages] field. If
  724. // multiple default text responses exist, they will be concatenated when
  725. // generating audio. If no default platform text responses exist, the
  726. // generated audio content will be empty.
  727. //
  728. // In some scenarios, multiple output audio fields may be present in the
  729. // response structure. In these cases, only the top-most-level audio output
  730. // has content.
  731. bytes output_audio = 3;
  732. // The config used by the speech synthesizer to generate the output audio.
  733. OutputAudioConfig output_audio_config = 4;
  734. }
  735. // The result of sentiment analysis. Sentiment analysis inspects user input
  736. // and identifies the prevailing subjective opinion, especially to determine a
  737. // user's attitude as positive, negative, or neutral.
  738. message SentimentAnalysisResult {
  739. // Sentiment score between -1.0 (negative sentiment) and 1.0 (positive
  740. // sentiment).
  741. float score = 1;
  742. // A non-negative number in the [0, +inf) range, which represents the absolute
  743. // magnitude of sentiment, regardless of score (positive or negative).
  744. float magnitude = 2;
  745. }