audio_config.proto 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372
  1. // Copyright 2021 Google LLC
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. syntax = "proto3";
  15. package google.cloud.dialogflow.v2;
  16. import "google/api/field_behavior.proto";
  17. import "google/api/resource.proto";
  18. import "google/protobuf/duration.proto";
  19. import "google/api/annotations.proto";
  20. option cc_enable_arenas = true;
  21. option csharp_namespace = "Google.Cloud.Dialogflow.V2";
  22. option go_package = "google.golang.org/genproto/googleapis/cloud/dialogflow/v2;dialogflow";
  23. option java_multiple_files = true;
  24. option java_outer_classname = "AudioConfigProto";
  25. option java_package = "com.google.cloud.dialogflow.v2";
  26. option objc_class_prefix = "DF";
  27. // Audio encoding of the audio content sent in the conversational query request.
  28. // Refer to the
  29. // [Cloud Speech API
  30. // documentation](https://cloud.google.com/speech-to-text/docs/basics) for more
  31. // details.
  32. enum AudioEncoding {
  33. // Not specified.
  34. AUDIO_ENCODING_UNSPECIFIED = 0;
  35. // Uncompressed 16-bit signed little-endian samples (Linear PCM).
  36. AUDIO_ENCODING_LINEAR_16 = 1;
  37. // [`FLAC`](https://xiph.org/flac/documentation.html) (Free Lossless Audio
  38. // Codec) is the recommended encoding because it is lossless (therefore
  39. // recognition is not compromised) and requires only about half the
  40. // bandwidth of `LINEAR16`. `FLAC` stream encoding supports 16-bit and
  41. // 24-bit samples, however, not all fields in `STREAMINFO` are supported.
  42. AUDIO_ENCODING_FLAC = 2;
  43. // 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
  44. AUDIO_ENCODING_MULAW = 3;
  45. // Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000.
  46. AUDIO_ENCODING_AMR = 4;
  47. // Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000.
  48. AUDIO_ENCODING_AMR_WB = 5;
  49. // Opus encoded audio frames in Ogg container
  50. // ([OggOpus](https://wiki.xiph.org/OggOpus)).
  51. // `sample_rate_hertz` must be 16000.
  52. AUDIO_ENCODING_OGG_OPUS = 6;
  53. // Although the use of lossy encodings is not recommended, if a very low
  54. // bitrate encoding is required, `OGG_OPUS` is highly preferred over
  55. // Speex encoding. The [Speex](https://speex.org/) encoding supported by
  56. // Dialogflow API has a header byte in each block, as in MIME type
  57. // `audio/x-speex-with-header-byte`.
  58. // It is a variant of the RTP Speex encoding defined in
  59. // [RFC 5574](https://tools.ietf.org/html/rfc5574).
  60. // The stream is a sequence of blocks, one block per RTP packet. Each block
  61. // starts with a byte containing the length of the block, in bytes, followed
  62. // by one or more frames of Speex data, padded to an integral number of
  63. // bytes (octets) as specified in RFC 5574. In other words, each RTP header
  64. // is replaced with a single byte containing the block length. Only Speex
  65. // wideband is supported. `sample_rate_hertz` must be 16000.
  66. AUDIO_ENCODING_SPEEX_WITH_HEADER_BYTE = 7;
  67. }
  68. // Hints for the speech recognizer to help with recognition in a specific
  69. // conversation state.
  70. message SpeechContext {
  71. // Optional. A list of strings containing words and phrases that the speech
  72. // recognizer should recognize with higher likelihood.
  73. //
  74. // This list can be used to:
  75. //
  76. // * improve accuracy for words and phrases you expect the user to say,
  77. // e.g. typical commands for your Dialogflow agent
  78. // * add additional words to the speech recognizer vocabulary
  79. // * ...
  80. //
  81. // See the [Cloud Speech
  82. // documentation](https://cloud.google.com/speech-to-text/quotas) for usage
  83. // limits.
  84. repeated string phrases = 1;
  85. // Optional. Boost for this context compared to other contexts:
  86. //
  87. // * If the boost is positive, Dialogflow will increase the probability that
  88. // the phrases in this context are recognized over similar sounding phrases.
  89. // * If the boost is unspecified or non-positive, Dialogflow will not apply
  90. // any boost.
  91. //
  92. // Dialogflow recommends that you use boosts in the range (0, 20] and that you
  93. // find a value that fits your use case with binary search.
  94. float boost = 2;
  95. }
  96. // Information for a word recognized by the speech recognizer.
  97. message SpeechWordInfo {
  98. // The word this info is for.
  99. string word = 3;
  100. // Time offset relative to the beginning of the audio that corresponds to the
  101. // start of the spoken word. This is an experimental feature and the accuracy
  102. // of the time offset can vary.
  103. google.protobuf.Duration start_offset = 1;
  104. // Time offset relative to the beginning of the audio that corresponds to the
  105. // end of the spoken word. This is an experimental feature and the accuracy of
  106. // the time offset can vary.
  107. google.protobuf.Duration end_offset = 2;
  108. // The Speech confidence between 0.0 and 1.0 for this word. A higher number
  109. // indicates an estimated greater likelihood that the recognized word is
  110. // correct. The default of 0.0 is a sentinel value indicating that confidence
  111. // was not set.
  112. //
  113. // This field is not guaranteed to be fully stable over time for the same
  114. // audio input. Users should also not rely on it to always be provided.
  115. float confidence = 4;
  116. }
  117. // Variant of the specified [Speech model][google.cloud.dialogflow.v2.InputAudioConfig.model] to use.
  118. //
  119. // See the [Cloud Speech
  120. // documentation](https://cloud.google.com/speech-to-text/docs/enhanced-models)
  121. // for which models have different variants. For example, the "phone_call" model
  122. // has both a standard and an enhanced variant. When you use an enhanced model,
  123. // you will generally receive higher quality results than for a standard model.
  124. enum SpeechModelVariant {
  125. // No model variant specified. In this case Dialogflow defaults to
  126. // USE_BEST_AVAILABLE.
  127. SPEECH_MODEL_VARIANT_UNSPECIFIED = 0;
  128. // Use the best available variant of the [Speech
  129. // model][InputAudioConfig.model] that the caller is eligible for.
  130. //
  131. // Please see the [Dialogflow
  132. // docs](https://cloud.google.com/dialogflow/docs/data-logging) for
  133. // how to make your project eligible for enhanced models.
  134. USE_BEST_AVAILABLE = 1;
  135. // Use standard model variant even if an enhanced model is available. See the
  136. // [Cloud Speech
  137. // documentation](https://cloud.google.com/speech-to-text/docs/enhanced-models)
  138. // for details about enhanced models.
  139. USE_STANDARD = 2;
  140. // Use an enhanced model variant:
  141. //
  142. // * If an enhanced variant does not exist for the given
  143. // [model][google.cloud.dialogflow.v2.InputAudioConfig.model] and request language, Dialogflow falls
  144. // back to the standard variant.
  145. //
  146. // The [Cloud Speech
  147. // documentation](https://cloud.google.com/speech-to-text/docs/enhanced-models)
  148. // describes which models have enhanced variants.
  149. //
  150. // * If the API caller isn't eligible for enhanced models, Dialogflow returns
  151. // an error. Please see the [Dialogflow
  152. // docs](https://cloud.google.com/dialogflow/docs/data-logging)
  153. // for how to make your project eligible.
  154. USE_ENHANCED = 3;
  155. }
  156. // Instructs the speech recognizer how to process the audio content.
  157. message InputAudioConfig {
  158. // Required. Audio encoding of the audio content to process.
  159. AudioEncoding audio_encoding = 1;
  160. // Required. Sample rate (in Hertz) of the audio content sent in the query.
  161. // Refer to
  162. // [Cloud Speech API
  163. // documentation](https://cloud.google.com/speech-to-text/docs/basics) for
  164. // more details.
  165. int32 sample_rate_hertz = 2;
  166. // Required. The language of the supplied audio. Dialogflow does not do
  167. // translations. See [Language
  168. // Support](https://cloud.google.com/dialogflow/docs/reference/language)
  169. // for a list of the currently supported language codes. Note that queries in
  170. // the same session do not necessarily need to specify the same language.
  171. string language_code = 3;
  172. // If `true`, Dialogflow returns [SpeechWordInfo][google.cloud.dialogflow.v2.SpeechWordInfo] in
  173. // [StreamingRecognitionResult][google.cloud.dialogflow.v2.StreamingRecognitionResult] with information about the recognized speech
  174. // words, e.g. start and end time offsets. If false or unspecified, Speech
  175. // doesn't return any word-level information.
  176. bool enable_word_info = 13;
  177. // A list of strings containing words and phrases that the speech
  178. // recognizer should recognize with higher likelihood.
  179. //
  180. // See [the Cloud Speech
  181. // documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
  182. // for more details.
  183. //
  184. // This field is deprecated. Please use [speech_contexts]() instead. If you
  185. // specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
  186. // treat the [phrase_hints]() as a single additional [SpeechContext]().
  187. repeated string phrase_hints = 4 [deprecated = true];
  188. // Context information to assist speech recognition.
  189. //
  190. // See [the Cloud Speech
  191. // documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
  192. // for more details.
  193. repeated SpeechContext speech_contexts = 11;
  194. // Which Speech model to select for the given request. Select the
  195. // model best suited to your domain to get best results. If a model is not
  196. // explicitly specified, then we auto-select a model based on the parameters
  197. // in the InputAudioConfig.
  198. // If enhanced speech model is enabled for the agent and an enhanced
  199. // version of the specified model for the language does not exist, then the
  200. // speech is recognized using the standard version of the specified model.
  201. // Refer to
  202. // [Cloud Speech API
  203. // documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
  204. // for more details.
  205. string model = 7;
  206. // Which variant of the [Speech model][google.cloud.dialogflow.v2.InputAudioConfig.model] to use.
  207. SpeechModelVariant model_variant = 10;
  208. // If `false` (default), recognition does not cease until the
  209. // client closes the stream.
  210. // If `true`, the recognizer will detect a single spoken utterance in input
  211. // audio. Recognition ceases when it detects the audio's voice has
  212. // stopped or paused. In this case, once a detected intent is received, the
  213. // client should close the stream and start a new request with a new stream as
  214. // needed.
  215. // Note: This setting is relevant only for streaming methods.
  216. // Note: When specified, InputAudioConfig.single_utterance takes precedence
  217. // over StreamingDetectIntentRequest.single_utterance.
  218. bool single_utterance = 8;
  219. // Only used in [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent] and
  220. // [Participants.StreamingAnalyzeContent][google.cloud.dialogflow.v2.Participants.StreamingAnalyzeContent].
  221. // If `false` and recognition doesn't return any result, trigger
  222. // `NO_SPEECH_RECOGNIZED` event to Dialogflow agent.
  223. bool disable_no_speech_recognized_event = 14;
  224. }
  225. // Description of which voice to use for speech synthesis.
  226. message VoiceSelectionParams {
  227. // Optional. The name of the voice. If not set, the service will choose a
  228. // voice based on the other parameters such as language_code and
  229. // [ssml_gender][google.cloud.dialogflow.v2.VoiceSelectionParams.ssml_gender].
  230. string name = 1;
  231. // Optional. The preferred gender of the voice. If not set, the service will
  232. // choose a voice based on the other parameters such as language_code and
  233. // [name][google.cloud.dialogflow.v2.VoiceSelectionParams.name]. Note that this is only a preference, not requirement. If a
  234. // voice of the appropriate gender is not available, the synthesizer should
  235. // substitute a voice with a different gender rather than failing the request.
  236. SsmlVoiceGender ssml_gender = 2;
  237. }
  238. // Configuration of how speech should be synthesized.
  239. message SynthesizeSpeechConfig {
  240. // Optional. Speaking rate/speed, in the range [0.25, 4.0]. 1.0 is the normal
  241. // native speed supported by the specific voice. 2.0 is twice as fast, and
  242. // 0.5 is half as fast. If unset(0.0), defaults to the native 1.0 speed. Any
  243. // other values < 0.25 or > 4.0 will return an error.
  244. double speaking_rate = 1;
  245. // Optional. Speaking pitch, in the range [-20.0, 20.0]. 20 means increase 20
  246. // semitones from the original pitch. -20 means decrease 20 semitones from the
  247. // original pitch.
  248. double pitch = 2;
  249. // Optional. Volume gain (in dB) of the normal native volume supported by the
  250. // specific voice, in the range [-96.0, 16.0]. If unset, or set to a value of
  251. // 0.0 (dB), will play at normal native signal amplitude. A value of -6.0 (dB)
  252. // will play at approximately half the amplitude of the normal native signal
  253. // amplitude. A value of +6.0 (dB) will play at approximately twice the
  254. // amplitude of the normal native signal amplitude. We strongly recommend not
  255. // to exceed +10 (dB) as there's usually no effective increase in loudness for
  256. // any value greater than that.
  257. double volume_gain_db = 3;
  258. // Optional. An identifier which selects 'audio effects' profiles that are
  259. // applied on (post synthesized) text to speech. Effects are applied on top of
  260. // each other in the order they are given.
  261. repeated string effects_profile_id = 5;
  262. // Optional. The desired voice of the synthesized audio.
  263. VoiceSelectionParams voice = 4;
  264. }
  265. // Gender of the voice as described in
  266. // [SSML voice element](https://www.w3.org/TR/speech-synthesis11/#edef_voice).
  267. enum SsmlVoiceGender {
  268. // An unspecified gender, which means that the client doesn't care which
  269. // gender the selected voice will have.
  270. SSML_VOICE_GENDER_UNSPECIFIED = 0;
  271. // A male voice.
  272. SSML_VOICE_GENDER_MALE = 1;
  273. // A female voice.
  274. SSML_VOICE_GENDER_FEMALE = 2;
  275. // A gender-neutral voice.
  276. SSML_VOICE_GENDER_NEUTRAL = 3;
  277. }
  278. // Instructs the speech synthesizer on how to generate the output audio content.
  279. // If this audio config is supplied in a request, it overrides all existing
  280. // text-to-speech settings applied to the agent.
  281. message OutputAudioConfig {
  282. // Required. Audio encoding of the synthesized audio content.
  283. OutputAudioEncoding audio_encoding = 1 [(google.api.field_behavior) = REQUIRED];
  284. // The synthesis sample rate (in hertz) for this audio. If not
  285. // provided, then the synthesizer will use the default sample rate based on
  286. // the audio encoding. If this is different from the voice's natural sample
  287. // rate, then the synthesizer will honor this request by converting to the
  288. // desired sample rate (which might result in worse audio quality).
  289. int32 sample_rate_hertz = 2;
  290. // Configuration of how speech should be synthesized.
  291. SynthesizeSpeechConfig synthesize_speech_config = 3;
  292. }
  293. // Configures speech transcription for [ConversationProfile][google.cloud.dialogflow.v2.ConversationProfile].
  294. message SpeechToTextConfig {
  295. // Optional. The speech model used in speech to text.
  296. // `SPEECH_MODEL_VARIANT_UNSPECIFIED`, `USE_BEST_AVAILABLE` will be treated as
  297. // `USE_ENHANCED`. It can be overridden in [AnalyzeContentRequest][google.cloud.dialogflow.v2.AnalyzeContentRequest] and
  298. // [StreamingAnalyzeContentRequest][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest] request.
  299. SpeechModelVariant speech_model_variant = 1 [(google.api.field_behavior) = OPTIONAL];
  300. }
  301. // Audio encoding of the output audio format in Text-To-Speech.
  302. enum OutputAudioEncoding {
  303. // Not specified.
  304. OUTPUT_AUDIO_ENCODING_UNSPECIFIED = 0;
  305. // Uncompressed 16-bit signed little-endian samples (Linear PCM).
  306. // Audio content returned as LINEAR16 also contains a WAV header.
  307. OUTPUT_AUDIO_ENCODING_LINEAR_16 = 1;
  308. // MP3 audio at 32kbps.
  309. OUTPUT_AUDIO_ENCODING_MP3 = 2;
  310. // MP3 audio at 64kbps.
  311. OUTPUT_AUDIO_ENCODING_MP3_64_KBPS = 4;
  312. // Opus encoded audio wrapped in an ogg container. The result will be a
  313. // file which can be played natively on Android, and in browsers (at least
  314. // Chrome and Firefox). The quality of the encoding is considerably higher
  315. // than MP3 while using approximately the same bitrate.
  316. OUTPUT_AUDIO_ENCODING_OGG_OPUS = 3;
  317. // 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
  318. OUTPUT_AUDIO_ENCODING_MULAW = 5;
  319. }