prediction_service.proto 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. // Copyright 2017 Google Inc.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. syntax = "proto3";
  15. package google.cloud.ml.v1;
  16. import "google/api/annotations.proto";
  17. import "google/api/httpbody.proto";
  18. option go_package = "google.golang.org/genproto/googleapis/cloud/ml/v1;ml";
  19. option java_multiple_files = true;
  20. option java_outer_classname = "PredictionServiceProto";
  21. option java_package = "com.google.cloud.ml.api.v1";
  22. // Copyright 2017 Google Inc. All Rights Reserved.
  23. //
  24. // Proto file for the Google Cloud Machine Learning Engine.
  25. // Describes the online prediction service.
  26. // The Prediction API, which serves predictions for models managed by
  27. // ModelService.
  28. service OnlinePredictionService {
  29. // Performs prediction on the data in the request.
  30. //
  31. // **** REMOVE FROM GENERATED DOCUMENTATION
  32. rpc Predict(PredictRequest) returns (google.api.HttpBody) {
  33. option (google.api.http) = {
  34. post: "/v1/{name=projects/**}:predict"
  35. body: "*"
  36. };
  37. }
  38. }
  39. // Request for predictions to be issued against a trained model.
  40. //
  41. // The body of the request is a single JSON object with a single top-level
  42. // field:
  43. //
  44. // <dl>
  45. // <dt>instances</dt>
  46. // <dd>A JSON array containing values representing the instances to use for
  47. // prediction.</dd>
  48. // </dl>
  49. //
  50. // The structure of each element of the instances list is determined by your
  51. // model's input definition. Instances can include named inputs or can contain
  52. // only unlabeled values.
  53. //
  54. // Not all data includes named inputs. Some instances will be simple
  55. // JSON values (boolean, number, or string). However, instances are often lists
  56. // of simple values, or complex nested lists. Here are some examples of request
  57. // bodies:
  58. //
  59. // CSV data with each row encoded as a string value:
  60. // <pre>
  61. // {"instances": ["1.0,true,\\"x\\"", "-2.0,false,\\"y\\""]}
  62. // </pre>
  63. // Plain text:
  64. // <pre>
  65. // {"instances": ["the quick brown fox", "la bruja le dio"]}
  66. // </pre>
  67. // Sentences encoded as lists of words (vectors of strings):
  68. // <pre>
  69. // {
  70. // "instances": [
  71. // ["the","quick","brown"],
  72. // ["la","bruja","le"],
  73. // ...
  74. // ]
  75. // }
  76. // </pre>
  77. // Floating point scalar values:
  78. // <pre>
  79. // {"instances": [0.0, 1.1, 2.2]}
  80. // </pre>
  81. // Vectors of integers:
  82. // <pre>
  83. // {
  84. // "instances": [
  85. // [0, 1, 2],
  86. // [3, 4, 5],
  87. // ...
  88. // ]
  89. // }
  90. // </pre>
  91. // Tensors (in this case, two-dimensional tensors):
  92. // <pre>
  93. // {
  94. // "instances": [
  95. // [
  96. // [0, 1, 2],
  97. // [3, 4, 5]
  98. // ],
  99. // ...
  100. // ]
  101. // }
  102. // </pre>
  103. // Images can be represented different ways. In this encoding scheme the first
  104. // two dimensions represent the rows and columns of the image, and the third
  105. // contains lists (vectors) of the R, G, and B values for each pixel.
  106. // <pre>
  107. // {
  108. // "instances": [
  109. // [
  110. // [
  111. // [138, 30, 66],
  112. // [130, 20, 56],
  113. // ...
  114. // ],
  115. // [
  116. // [126, 38, 61],
  117. // [122, 24, 57],
  118. // ...
  119. // ],
  120. // ...
  121. // ],
  122. // ...
  123. // ]
  124. // }
  125. // </pre>
  126. // JSON strings must be encoded as UTF-8. To send binary data, you must
  127. // base64-encode the data and mark it as binary. To mark a JSON string
  128. // as binary, replace it with a JSON object with a single attribute named `b64`:
  129. // <pre>{"b64": "..."} </pre>
  130. // For example:
  131. //
  132. // Two Serialized tf.Examples (fake data, for illustrative purposes only):
  133. // <pre>
  134. // {"instances": [{"b64": "X5ad6u"}, {"b64": "IA9j4nx"}]}
  135. // </pre>
  136. // Two JPEG image byte strings (fake data, for illustrative purposes only):
  137. // <pre>
  138. // {"instances": [{"b64": "ASa8asdf"}, {"b64": "JLK7ljk3"}]}
  139. // </pre>
  140. // If your data includes named references, format each instance as a JSON object
  141. // with the named references as the keys:
  142. //
  143. // JSON input data to be preprocessed:
  144. // <pre>
  145. // {
  146. // "instances": [
  147. // {
  148. // "a": 1.0,
  149. // "b": true,
  150. // "c": "x"
  151. // },
  152. // {
  153. // "a": -2.0,
  154. // "b": false,
  155. // "c": "y"
  156. // }
  157. // ]
  158. // }
  159. // </pre>
  160. // Some models have an underlying TensorFlow graph that accepts multiple input
  161. // tensors. In this case, you should use the names of JSON name/value pairs to
  162. // identify the input tensors, as shown in the following exmaples:
  163. //
  164. // For a graph with input tensor aliases "tag" (string) and "image"
  165. // (base64-encoded string):
  166. // <pre>
  167. // {
  168. // "instances": [
  169. // {
  170. // "tag": "beach",
  171. // "image": {"b64": "ASa8asdf"}
  172. // },
  173. // {
  174. // "tag": "car",
  175. // "image": {"b64": "JLK7ljk3"}
  176. // }
  177. // ]
  178. // }
  179. // </pre>
  180. // For a graph with input tensor aliases "tag" (string) and "image"
  181. // (3-dimensional array of 8-bit ints):
  182. // <pre>
  183. // {
  184. // "instances": [
  185. // {
  186. // "tag": "beach",
  187. // "image": [
  188. // [
  189. // [138, 30, 66],
  190. // [130, 20, 56],
  191. // ...
  192. // ],
  193. // [
  194. // [126, 38, 61],
  195. // [122, 24, 57],
  196. // ...
  197. // ],
  198. // ...
  199. // ]
  200. // },
  201. // {
  202. // "tag": "car",
  203. // "image": [
  204. // [
  205. // [255, 0, 102],
  206. // [255, 0, 97],
  207. // ...
  208. // ],
  209. // [
  210. // [254, 1, 101],
  211. // [254, 2, 93],
  212. // ...
  213. // ],
  214. // ...
  215. // ]
  216. // },
  217. // ...
  218. // ]
  219. // }
  220. // </pre>
  221. // If the call is successful, the response body will contain one prediction
  222. // entry per instance in the request body. If prediction fails for any
  223. // instance, the response body will contain no predictions and will contian
  224. // a single error entry instead.
  225. message PredictRequest {
  226. // Required. The resource name of a model or a version.
  227. //
  228. // Authorization: requires `Viewer` role on the parent project.
  229. string name = 1;
  230. //
  231. // Required. The prediction request body.
  232. google.api.HttpBody http_body = 2;
  233. }