session.proto 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669
  1. // Copyright 2021 Google LLC
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. syntax = "proto3";
  15. package google.cloud.dialogflow.v2;
  16. import "google/api/annotations.proto";
  17. import "google/api/client.proto";
  18. import "google/api/field_behavior.proto";
  19. import "google/api/resource.proto";
  20. import "google/cloud/dialogflow/v2/audio_config.proto";
  21. import "google/cloud/dialogflow/v2/context.proto";
  22. import "google/cloud/dialogflow/v2/intent.proto";
  23. import "google/cloud/dialogflow/v2/session_entity_type.proto";
  24. import "google/protobuf/duration.proto";
  25. import "google/protobuf/field_mask.proto";
  26. import "google/protobuf/struct.proto";
  27. import "google/rpc/status.proto";
  28. import "google/type/latlng.proto";
  29. option cc_enable_arenas = true;
  30. option csharp_namespace = "Google.Cloud.Dialogflow.V2";
  31. option go_package = "google.golang.org/genproto/googleapis/cloud/dialogflow/v2;dialogflow";
  32. option java_multiple_files = true;
  33. option java_outer_classname = "SessionProto";
  34. option java_package = "com.google.cloud.dialogflow.v2";
  35. option objc_class_prefix = "DF";
  36. option (google.api.resource_definition) = {
  37. type: "dialogflow.googleapis.com/Session"
  38. pattern: "projects/{project}/agent/sessions/{session}"
  39. pattern: "projects/{project}/agent/environments/{environment}/users/{user}/sessions/{session}"
  40. pattern: "projects/{project}/locations/{location}/agent/sessions/{session}"
  41. pattern: "projects/{project}/locations/{location}/agent/environments/{environment}/users/{user}/sessions/{session}"
  42. };
  43. // A service used for session interactions.
  44. //
  45. // For more information, see the [API interactions
  46. // guide](https://cloud.google.com/dialogflow/docs/api-overview).
  47. service Sessions {
  48. option (google.api.default_host) = "dialogflow.googleapis.com";
  49. option (google.api.oauth_scopes) =
  50. "https://www.googleapis.com/auth/cloud-platform,"
  51. "https://www.googleapis.com/auth/dialogflow";
  52. // Processes a natural language query and returns structured, actionable data
  53. // as a result. This method is not idempotent, because it may cause contexts
  54. // and session entity types to be updated, which in turn might affect
  55. // results of future queries.
  56. //
  57. // Note: Always use agent versions for production traffic.
  58. // See [Versions and
  59. // environments](https://cloud.google.com/dialogflow/es/docs/agents-versions).
  60. rpc DetectIntent(DetectIntentRequest) returns (DetectIntentResponse) {
  61. option (google.api.http) = {
  62. post: "/v2/{session=projects/*/agent/sessions/*}:detectIntent"
  63. body: "*"
  64. additional_bindings {
  65. post: "/v2/{session=projects/*/agent/environments/*/users/*/sessions/*}:detectIntent"
  66. body: "*"
  67. }
  68. additional_bindings {
  69. post: "/v2/{session=projects/*/locations/*/agent/sessions/*}:detectIntent"
  70. body: "*"
  71. }
  72. additional_bindings {
  73. post: "/v2/{session=projects/*/locations/*/agent/environments/*/users/*/sessions/*}:detectIntent"
  74. body: "*"
  75. }
  76. };
  77. option (google.api.method_signature) = "session,query_input";
  78. }
  79. // Processes a natural language query in audio format in a streaming fashion
  80. // and returns structured, actionable data as a result. This method is only
  81. // available via the gRPC API (not REST).
  82. //
  83. // Note: Always use agent versions for production traffic.
  84. // See [Versions and
  85. // environments](https://cloud.google.com/dialogflow/es/docs/agents-versions).
  86. rpc StreamingDetectIntent(stream StreamingDetectIntentRequest) returns (stream StreamingDetectIntentResponse) {
  87. }
  88. }
  89. // The request to detect user's intent.
  90. message DetectIntentRequest {
  91. // Required. The name of the session this query is sent to. Format:
  92. // `projects/<Project ID>/agent/sessions/<Session ID>`, or
  93. // `projects/<Project ID>/agent/environments/<Environment ID>/users/<User
  94. // ID>/sessions/<Session ID>`. If `Environment ID` is not specified, we assume
  95. // default 'draft' environment (`Environment ID` might be referred to as
  96. // environment name at some places). If `User ID` is not specified, we are
  97. // using "-". It's up to the API caller to choose an appropriate `Session ID`
  98. // and `User Id`. They can be a random number or some type of user and session
  99. // identifiers (preferably hashed). The length of the `Session ID` and
  100. // `User ID` must not exceed 36 characters.
  101. //
  102. // For more information, see the [API interactions
  103. // guide](https://cloud.google.com/dialogflow/docs/api-overview).
  104. //
  105. // Note: Always use agent versions for production traffic.
  106. // See [Versions and
  107. // environments](https://cloud.google.com/dialogflow/es/docs/agents-versions).
  108. string session = 1 [
  109. (google.api.field_behavior) = REQUIRED,
  110. (google.api.resource_reference) = {
  111. type: "dialogflow.googleapis.com/Session"
  112. }
  113. ];
  114. // The parameters of this query.
  115. QueryParameters query_params = 2;
  116. // Required. The input specification. It can be set to:
  117. //
  118. // 1. an audio config
  119. // which instructs the speech recognizer how to process the speech audio,
  120. //
  121. // 2. a conversational query in the form of text, or
  122. //
  123. // 3. an event that specifies which intent to trigger.
  124. QueryInput query_input = 3 [(google.api.field_behavior) = REQUIRED];
  125. // Instructs the speech synthesizer how to generate the output
  126. // audio. If this field is not set and agent-level speech synthesizer is not
  127. // configured, no output audio is generated.
  128. OutputAudioConfig output_audio_config = 4;
  129. // Mask for [output_audio_config][google.cloud.dialogflow.v2.DetectIntentRequest.output_audio_config] indicating which settings in this
  130. // request-level config should override speech synthesizer settings defined at
  131. // agent-level.
  132. //
  133. // If unspecified or empty, [output_audio_config][google.cloud.dialogflow.v2.DetectIntentRequest.output_audio_config] replaces the agent-level
  134. // config in its entirety.
  135. google.protobuf.FieldMask output_audio_config_mask = 7;
  136. // The natural language speech audio to be processed. This field
  137. // should be populated iff `query_input` is set to an input audio config.
  138. // A single request can contain up to 1 minute of speech audio data.
  139. bytes input_audio = 5;
  140. }
  141. // The message returned from the DetectIntent method.
  142. message DetectIntentResponse {
  143. // The unique identifier of the response. It can be used to
  144. // locate a response in the training example set or for reporting issues.
  145. string response_id = 1;
  146. // The selected results of the conversational query or event processing.
  147. // See `alternative_query_results` for additional potential results.
  148. QueryResult query_result = 2;
  149. // Specifies the status of the webhook request.
  150. google.rpc.Status webhook_status = 3;
  151. // The audio data bytes encoded as specified in the request.
  152. // Note: The output audio is generated based on the values of default platform
  153. // text responses found in the `query_result.fulfillment_messages` field. If
  154. // multiple default text responses exist, they will be concatenated when
  155. // generating audio. If no default platform text responses exist, the
  156. // generated audio content will be empty.
  157. //
  158. // In some scenarios, multiple output audio fields may be present in the
  159. // response structure. In these cases, only the top-most-level audio output
  160. // has content.
  161. bytes output_audio = 4;
  162. // The config used by the speech synthesizer to generate the output audio.
  163. OutputAudioConfig output_audio_config = 6;
  164. }
  165. // Represents the parameters of the conversational query.
  166. message QueryParameters {
  167. // The time zone of this conversational query from the
  168. // [time zone database](https://www.iana.org/time-zones), e.g.,
  169. // America/New_York, Europe/Paris. If not provided, the time zone specified in
  170. // agent settings is used.
  171. string time_zone = 1;
  172. // The geo location of this conversational query.
  173. google.type.LatLng geo_location = 2;
  174. // The collection of contexts to be activated before this query is
  175. // executed.
  176. repeated Context contexts = 3;
  177. // Specifies whether to delete all contexts in the current session
  178. // before the new ones are activated.
  179. bool reset_contexts = 4;
  180. // Additional session entity types to replace or extend developer
  181. // entity types with. The entity synonyms apply to all languages and persist
  182. // for the session of this query.
  183. repeated SessionEntityType session_entity_types = 5;
  184. // This field can be used to pass custom data to your webhook.
  185. // Arbitrary JSON objects are supported.
  186. // If supplied, the value is used to populate the
  187. // `WebhookRequest.original_detect_intent_request.payload`
  188. // field sent to your webhook.
  189. google.protobuf.Struct payload = 6;
  190. // Configures the type of sentiment analysis to perform. If not
  191. // provided, sentiment analysis is not performed.
  192. SentimentAnalysisRequestConfig sentiment_analysis_request_config = 10;
  193. // This field can be used to pass HTTP headers for a webhook
  194. // call. These headers will be sent to webhook along with the headers that
  195. // have been configured through the Dialogflow web console. The headers
  196. // defined within this field will overwrite the headers configured through the
  197. // Dialogflow console if there is a conflict. Header names are
  198. // case-insensitive. Google's specified headers are not allowed. Including:
  199. // "Host", "Content-Length", "Connection", "From", "User-Agent",
  200. // "Accept-Encoding", "If-Modified-Since", "If-None-Match", "X-Forwarded-For",
  201. // etc.
  202. map<string, string> webhook_headers = 14;
  203. }
  204. // Represents the query input. It can contain either:
  205. //
  206. // 1. An audio config which
  207. // instructs the speech recognizer how to process the speech audio.
  208. //
  209. // 2. A conversational query in the form of text,.
  210. //
  211. // 3. An event that specifies which intent to trigger.
  212. message QueryInput {
  213. // Required. The input specification.
  214. oneof input {
  215. // Instructs the speech recognizer how to process the speech audio.
  216. InputAudioConfig audio_config = 1;
  217. // The natural language text to be processed.
  218. TextInput text = 2;
  219. // The event to be processed.
  220. EventInput event = 3;
  221. }
  222. }
  223. // Represents the result of conversational query or event processing.
  224. message QueryResult {
  225. // The original conversational query text:
  226. //
  227. // - If natural language text was provided as input, `query_text` contains
  228. // a copy of the input.
  229. // - If natural language speech audio was provided as input, `query_text`
  230. // contains the speech recognition result. If speech recognizer produced
  231. // multiple alternatives, a particular one is picked.
  232. // - If automatic spell correction is enabled, `query_text` will contain the
  233. // corrected user input.
  234. string query_text = 1;
  235. // The language that was triggered during intent detection.
  236. // See [Language
  237. // Support](https://cloud.google.com/dialogflow/docs/reference/language)
  238. // for a list of the currently supported language codes.
  239. string language_code = 15;
  240. // The Speech recognition confidence between 0.0 and 1.0. A higher number
  241. // indicates an estimated greater likelihood that the recognized words are
  242. // correct. The default of 0.0 is a sentinel value indicating that confidence
  243. // was not set.
  244. //
  245. // This field is not guaranteed to be accurate or set. In particular this
  246. // field isn't set for StreamingDetectIntent since the streaming endpoint has
  247. // separate confidence estimates per portion of the audio in
  248. // StreamingRecognitionResult.
  249. float speech_recognition_confidence = 2;
  250. // The action name from the matched intent.
  251. string action = 3;
  252. // The collection of extracted parameters.
  253. //
  254. // Depending on your protocol or client library language, this is a
  255. // map, associative array, symbol table, dictionary, or JSON object
  256. // composed of a collection of (MapKey, MapValue) pairs:
  257. //
  258. // - MapKey type: string
  259. // - MapKey value: parameter name
  260. // - MapValue type:
  261. // - If parameter's entity type is a composite entity: map
  262. // - Else: depending on parameter value type, could be one of string,
  263. // number, boolean, null, list or map
  264. // - MapValue value:
  265. // - If parameter's entity type is a composite entity:
  266. // map from composite entity property names to property values
  267. // - Else: parameter value
  268. google.protobuf.Struct parameters = 4;
  269. // This field is set to:
  270. //
  271. // - `false` if the matched intent has required parameters and not all of
  272. // the required parameter values have been collected.
  273. // - `true` if all required parameter values have been collected, or if the
  274. // matched intent doesn't contain any required parameters.
  275. bool all_required_params_present = 5;
  276. // Indicates whether the conversational query triggers a cancellation for slot
  277. // filling.
  278. bool cancels_slot_filling = 21;
  279. // The text to be pronounced to the user or shown on the screen.
  280. // Note: This is a legacy field, `fulfillment_messages` should be preferred.
  281. string fulfillment_text = 6;
  282. // The collection of rich messages to present to the user.
  283. repeated Intent.Message fulfillment_messages = 7;
  284. // If the query was fulfilled by a webhook call, this field is set to the
  285. // value of the `source` field returned in the webhook response.
  286. string webhook_source = 8;
  287. // If the query was fulfilled by a webhook call, this field is set to the
  288. // value of the `payload` field returned in the webhook response.
  289. google.protobuf.Struct webhook_payload = 9;
  290. // The collection of output contexts. If applicable,
  291. // `output_contexts.parameters` contains entries with name
  292. // `<parameter name>.original` containing the original parameter values
  293. // before the query.
  294. repeated Context output_contexts = 10;
  295. // The intent that matched the conversational query. Some, not
  296. // all fields are filled in this message, including but not limited to:
  297. // `name`, `display_name`, `end_interaction` and `is_fallback`.
  298. Intent intent = 11;
  299. // The intent detection confidence. Values range from 0.0
  300. // (completely uncertain) to 1.0 (completely certain).
  301. // This value is for informational purpose only and is only used to
  302. // help match the best intent within the classification threshold.
  303. // This value may change for the same end-user expression at any time due to a
  304. // model retraining or change in implementation.
  305. // If there are `multiple knowledge_answers` messages, this value is set to
  306. // the greatest `knowledgeAnswers.match_confidence` value in the list.
  307. float intent_detection_confidence = 12;
  308. // Free-form diagnostic information for the associated detect intent request.
  309. // The fields of this data can change without notice, so you should not write
  310. // code that depends on its structure.
  311. // The data may contain:
  312. //
  313. // - webhook call latency
  314. // - webhook errors
  315. google.protobuf.Struct diagnostic_info = 14;
  316. // The sentiment analysis result, which depends on the
  317. // `sentiment_analysis_request_config` specified in the request.
  318. SentimentAnalysisResult sentiment_analysis_result = 17;
  319. }
  320. // The top-level message sent by the client to the
  321. // [Sessions.StreamingDetectIntent][google.cloud.dialogflow.v2.Sessions.StreamingDetectIntent] method.
  322. //
  323. // Multiple request messages should be sent in order:
  324. //
  325. // 1. The first message must contain
  326. // [session][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.session],
  327. // [query_input][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.query_input] plus optionally
  328. // [query_params][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.query_params]. If the client
  329. // wants to receive an audio response, it should also contain
  330. // [output_audio_config][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.output_audio_config].
  331. // The message must not contain
  332. // [input_audio][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.input_audio].
  333. // 2. If [query_input][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.query_input] was set to
  334. // [query_input.audio_config][google.cloud.dialogflow.v2.InputAudioConfig], all subsequent
  335. // messages must contain
  336. // [input_audio][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.input_audio] to continue with
  337. // Speech recognition.
  338. // If you decide to rather detect an intent from text input after you
  339. // already started Speech recognition, please send a message with
  340. // [query_input.text][google.cloud.dialogflow.v2.QueryInput.text].
  341. //
  342. // However, note that:
  343. //
  344. // * Dialogflow will bill you for the audio duration so far.
  345. // * Dialogflow discards all Speech recognition results in favor of the
  346. // input text.
  347. // * Dialogflow will use the language code from the first message.
  348. //
  349. // After you sent all input, you must half-close or abort the request stream.
  350. message StreamingDetectIntentRequest {
  351. // Required. The name of the session the query is sent to.
  352. // Format of the session name:
  353. // `projects/<Project ID>/agent/sessions/<Session ID>`, or
  354. // `projects/<Project ID>/agent/environments/<Environment ID>/users/<User
  355. // ID>/sessions/<Session ID>`. If `Environment ID` is not specified, we assume
  356. // default 'draft' environment. If `User ID` is not specified, we are using
  357. // "-". It's up to the API caller to choose an appropriate `Session ID` and
  358. // `User Id`. They can be a random number or some type of user and session
  359. // identifiers (preferably hashed). The length of the `Session ID` and
  360. // `User ID` must not exceed 36 characters.
  361. //
  362. // For more information, see the [API interactions
  363. // guide](https://cloud.google.com/dialogflow/docs/api-overview).
  364. //
  365. // Note: Always use agent versions for production traffic.
  366. // See [Versions and
  367. // environments](https://cloud.google.com/dialogflow/es/docs/agents-versions).
  368. string session = 1 [
  369. (google.api.field_behavior) = REQUIRED,
  370. (google.api.resource_reference) = {
  371. type: "dialogflow.googleapis.com/Session"
  372. }
  373. ];
  374. // The parameters of this query.
  375. QueryParameters query_params = 2;
  376. // Required. The input specification. It can be set to:
  377. //
  378. // 1. an audio config which instructs the speech recognizer how to process
  379. // the speech audio,
  380. //
  381. // 2. a conversational query in the form of text, or
  382. //
  383. // 3. an event that specifies which intent to trigger.
  384. QueryInput query_input = 3 [(google.api.field_behavior) = REQUIRED];
  385. // Please use [InputAudioConfig.single_utterance][google.cloud.dialogflow.v2.InputAudioConfig.single_utterance] instead.
  386. // If `false` (default), recognition does not cease until
  387. // the client closes the stream. If `true`, the recognizer will detect a
  388. // single spoken utterance in input audio. Recognition ceases when it detects
  389. // the audio's voice has stopped or paused. In this case, once a detected
  390. // intent is received, the client should close the stream and start a new
  391. // request with a new stream as needed.
  392. // This setting is ignored when `query_input` is a piece of text or an event.
  393. bool single_utterance = 4 [deprecated = true];
  394. // Instructs the speech synthesizer how to generate the output
  395. // audio. If this field is not set and agent-level speech synthesizer is not
  396. // configured, no output audio is generated.
  397. OutputAudioConfig output_audio_config = 5;
  398. // Mask for [output_audio_config][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.output_audio_config] indicating which settings in this
  399. // request-level config should override speech synthesizer settings defined at
  400. // agent-level.
  401. //
  402. // If unspecified or empty, [output_audio_config][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.output_audio_config] replaces the agent-level
  403. // config in its entirety.
  404. google.protobuf.FieldMask output_audio_config_mask = 7;
  405. // The input audio content to be recognized. Must be sent if
  406. // `query_input` was set to a streaming input audio config. The complete audio
  407. // over all streaming messages must not exceed 1 minute.
  408. bytes input_audio = 6;
  409. }
  410. // The top-level message returned from the
  411. // `StreamingDetectIntent` method.
  412. //
  413. // Multiple response messages can be returned in order:
  414. //
  415. // 1. If the input was set to streaming audio, the first one or more messages
  416. // contain `recognition_result`. Each `recognition_result` represents a more
  417. // complete transcript of what the user said. The last `recognition_result`
  418. // has `is_final` set to `true`.
  419. //
  420. // 2. The next message contains `response_id`, `query_result`
  421. // and optionally `webhook_status` if a WebHook was called.
  422. message StreamingDetectIntentResponse {
  423. // The unique identifier of the response. It can be used to
  424. // locate a response in the training example set or for reporting issues.
  425. string response_id = 1;
  426. // The result of speech recognition.
  427. StreamingRecognitionResult recognition_result = 2;
  428. // The result of the conversational query or event processing.
  429. QueryResult query_result = 3;
  430. // Specifies the status of the webhook request.
  431. google.rpc.Status webhook_status = 4;
  432. // The audio data bytes encoded as specified in the request.
  433. // Note: The output audio is generated based on the values of default platform
  434. // text responses found in the `query_result.fulfillment_messages` field. If
  435. // multiple default text responses exist, they will be concatenated when
  436. // generating audio. If no default platform text responses exist, the
  437. // generated audio content will be empty.
  438. //
  439. // In some scenarios, multiple output audio fields may be present in the
  440. // response structure. In these cases, only the top-most-level audio output
  441. // has content.
  442. bytes output_audio = 5;
  443. // The config used by the speech synthesizer to generate the output audio.
  444. OutputAudioConfig output_audio_config = 6;
  445. }
  446. // Contains a speech recognition result corresponding to a portion of the audio
  447. // that is currently being processed or an indication that this is the end
  448. // of the single requested utterance.
  449. //
  450. // Example:
  451. //
  452. // 1. transcript: "tube"
  453. //
  454. // 2. transcript: "to be a"
  455. //
  456. // 3. transcript: "to be"
  457. //
  458. // 4. transcript: "to be or not to be"
  459. // is_final: true
  460. //
  461. // 5. transcript: " that's"
  462. //
  463. // 6. transcript: " that is"
  464. //
  465. // 7. message_type: `END_OF_SINGLE_UTTERANCE`
  466. //
  467. // 8. transcript: " that is the question"
  468. // is_final: true
  469. //
  470. // Only two of the responses contain final results (#4 and #8 indicated by
  471. // `is_final: true`). Concatenating these generates the full transcript: "to be
  472. // or not to be that is the question".
  473. //
  474. // In each response we populate:
  475. //
  476. // * for `TRANSCRIPT`: `transcript` and possibly `is_final`.
  477. //
  478. // * for `END_OF_SINGLE_UTTERANCE`: only `message_type`.
  479. message StreamingRecognitionResult {
  480. // Type of the response message.
  481. enum MessageType {
  482. // Not specified. Should never be used.
  483. MESSAGE_TYPE_UNSPECIFIED = 0;
  484. // Message contains a (possibly partial) transcript.
  485. TRANSCRIPT = 1;
  486. // Event indicates that the server has detected the end of the user's speech
  487. // utterance and expects no additional inputs.
  488. // Therefore, the server will not process additional audio (although it may subsequently return additional results). The
  489. // client should stop sending additional audio data, half-close the gRPC
  490. // connection, and wait for any additional results until the server closes
  491. // the gRPC connection. This message is only sent if `single_utterance` was
  492. // set to `true`, and is not used otherwise.
  493. END_OF_SINGLE_UTTERANCE = 2;
  494. }
  495. // Type of the result message.
  496. MessageType message_type = 1;
  497. // Transcript text representing the words that the user spoke.
  498. // Populated if and only if `message_type` = `TRANSCRIPT`.
  499. string transcript = 2;
  500. // If `false`, the `StreamingRecognitionResult` represents an
  501. // interim result that may change. If `true`, the recognizer will not return
  502. // any further hypotheses about this piece of the audio. May only be populated
  503. // for `message_type` = `TRANSCRIPT`.
  504. bool is_final = 3;
  505. // The Speech confidence between 0.0 and 1.0 for the current portion of audio.
  506. // A higher number indicates an estimated greater likelihood that the
  507. // recognized words are correct. The default of 0.0 is a sentinel value
  508. // indicating that confidence was not set.
  509. //
  510. // This field is typically only provided if `is_final` is true and you should
  511. // not rely on it being accurate or even set.
  512. float confidence = 4;
  513. // Word-specific information for the words recognized by Speech in
  514. // [transcript][google.cloud.dialogflow.v2.StreamingRecognitionResult.transcript]. Populated if and only if `message_type` = `TRANSCRIPT` and
  515. // [InputAudioConfig.enable_word_info] is set.
  516. repeated SpeechWordInfo speech_word_info = 7;
  517. // Time offset of the end of this Speech recognition result relative to the
  518. // beginning of the audio. Only populated for `message_type` = `TRANSCRIPT`.
  519. google.protobuf.Duration speech_end_offset = 8;
  520. }
  521. // Represents the natural language text to be processed.
  522. message TextInput {
  523. // Required. The UTF-8 encoded natural language text to be processed.
  524. // Text length must not exceed 256 characters.
  525. string text = 1 [(google.api.field_behavior) = REQUIRED];
  526. // Required. The language of this conversational query. See [Language
  527. // Support](https://cloud.google.com/dialogflow/docs/reference/language)
  528. // for a list of the currently supported language codes. Note that queries in
  529. // the same session do not necessarily need to specify the same language.
  530. string language_code = 2 [(google.api.field_behavior) = REQUIRED];
  531. }
  532. // Events allow for matching intents by event name instead of the natural
  533. // language input. For instance, input `<event: { name: "welcome_event",
  534. // parameters: { name: "Sam" } }>` can trigger a personalized welcome response.
  535. // The parameter `name` may be used by the agent in the response:
  536. // `"Hello #welcome_event.name! What can I do for you today?"`.
  537. message EventInput {
  538. // Required. The unique identifier of the event.
  539. string name = 1 [(google.api.field_behavior) = REQUIRED];
  540. // The collection of parameters associated with the event.
  541. //
  542. // Depending on your protocol or client library language, this is a
  543. // map, associative array, symbol table, dictionary, or JSON object
  544. // composed of a collection of (MapKey, MapValue) pairs:
  545. //
  546. // - MapKey type: string
  547. // - MapKey value: parameter name
  548. // - MapValue type:
  549. // - If parameter's entity type is a composite entity: map
  550. // - Else: depending on parameter value type, could be one of string,
  551. // number, boolean, null, list or map
  552. // - MapValue value:
  553. // - If parameter's entity type is a composite entity:
  554. // map from composite entity property names to property values
  555. // - Else: parameter value
  556. google.protobuf.Struct parameters = 2;
  557. // Required. The language of this query. See [Language
  558. // Support](https://cloud.google.com/dialogflow/docs/reference/language)
  559. // for a list of the currently supported language codes. Note that queries in
  560. // the same session do not necessarily need to specify the same language.
  561. string language_code = 3 [(google.api.field_behavior) = REQUIRED];
  562. }
  563. // Configures the types of sentiment analysis to perform.
  564. message SentimentAnalysisRequestConfig {
  565. // Instructs the service to perform sentiment analysis on
  566. // `query_text`. If not provided, sentiment analysis is not performed on
  567. // `query_text`.
  568. bool analyze_query_text_sentiment = 1;
  569. }
  570. // The result of sentiment analysis. Sentiment analysis inspects user input
  571. // and identifies the prevailing subjective opinion, especially to determine a
  572. // user's attitude as positive, negative, or neutral.
  573. // For [Participants.DetectIntent][], it needs to be configured in
  574. // [DetectIntentRequest.query_params][google.cloud.dialogflow.v2.DetectIntentRequest.query_params]. For
  575. // [Participants.StreamingDetectIntent][], it needs to be configured in
  576. // [StreamingDetectIntentRequest.query_params][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.query_params].
  577. // And for [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent] and
  578. // [Participants.StreamingAnalyzeContent][google.cloud.dialogflow.v2.Participants.StreamingAnalyzeContent], it needs to be configured in
  579. // [ConversationProfile.human_agent_assistant_config][google.cloud.dialogflow.v2.ConversationProfile.human_agent_assistant_config]
  580. message SentimentAnalysisResult {
  581. // The sentiment analysis result for `query_text`.
  582. Sentiment query_text_sentiment = 1;
  583. }
  584. // The sentiment, such as positive/negative feeling or association, for a unit
  585. // of analysis, such as the query text.
  586. message Sentiment {
  587. // Sentiment score between -1.0 (negative sentiment) and 1.0 (positive
  588. // sentiment).
  589. float score = 1;
  590. // A non-negative number in the [0, +inf) range, which represents the absolute
  591. // magnitude of sentiment, regardless of score (positive or negative).
  592. float magnitude = 2;
  593. }