protobuf.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470
  1. // Protocol Buffers - Google's data interchange format
  2. // Copyright 2014 Google Inc. All rights reserved.
  3. // https://developers.google.com/protocol-buffers/
  4. //
  5. // Redistribution and use in source and binary forms, with or without
  6. // modification, are permitted provided that the following conditions are
  7. // met:
  8. //
  9. // * Redistributions of source code must retain the above copyright
  10. // notice, this list of conditions and the following disclaimer.
  11. // * Redistributions in binary form must reproduce the above
  12. // copyright notice, this list of conditions and the following disclaimer
  13. // in the documentation and/or other materials provided with the
  14. // distribution.
  15. // * Neither the name of Google Inc. nor the names of its
  16. // contributors may be used to endorse or promote products derived from
  17. // this software without specific prior written permission.
  18. //
  19. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  20. // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  21. // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  22. // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  23. // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  24. // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  25. // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  26. // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  27. // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  28. // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  29. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  30. #include "protobuf.h"
  31. #include <ruby/version.h>
  32. #include "defs.h"
  33. #include "map.h"
  34. #include "message.h"
  35. #include "repeated_field.h"
  36. VALUE cParseError;
  37. VALUE cTypeError;
  38. const upb_fielddef* map_field_key(const upb_fielddef* field) {
  39. const upb_msgdef *entry = upb_fielddef_msgsubdef(field);
  40. return upb_msgdef_itof(entry, 1);
  41. }
  42. const upb_fielddef* map_field_value(const upb_fielddef* field) {
  43. const upb_msgdef *entry = upb_fielddef_msgsubdef(field);
  44. return upb_msgdef_itof(entry, 2);
  45. }
  46. // -----------------------------------------------------------------------------
  47. // StringBuilder, for inspect
  48. // -----------------------------------------------------------------------------
  49. struct StringBuilder {
  50. size_t size;
  51. size_t cap;
  52. char *data;
  53. };
  54. typedef struct StringBuilder StringBuilder;
  55. static size_t StringBuilder_SizeOf(size_t cap) {
  56. return sizeof(StringBuilder) + cap;
  57. }
  58. StringBuilder* StringBuilder_New() {
  59. const size_t cap = 128;
  60. StringBuilder* builder = malloc(sizeof(*builder));
  61. builder->size = 0;
  62. builder->cap = cap;
  63. builder->data = malloc(builder->cap);
  64. return builder;
  65. }
  66. void StringBuilder_Free(StringBuilder* b) {
  67. free(b->data);
  68. free(b);
  69. }
  70. void StringBuilder_Printf(StringBuilder* b, const char *fmt, ...) {
  71. size_t have = b->cap - b->size;
  72. size_t n;
  73. va_list args;
  74. va_start(args, fmt);
  75. n = vsnprintf(&b->data[b->size], have, fmt, args);
  76. va_end(args);
  77. if (have <= n) {
  78. while (have <= n) {
  79. b->cap *= 2;
  80. have = b->cap - b->size;
  81. }
  82. b->data = realloc(b->data, StringBuilder_SizeOf(b->cap));
  83. va_start(args, fmt);
  84. n = vsnprintf(&b->data[b->size], have, fmt, args);
  85. va_end(args);
  86. PBRUBY_ASSERT(n < have);
  87. }
  88. b->size += n;
  89. }
  90. VALUE StringBuilder_ToRubyString(StringBuilder* b) {
  91. VALUE ret = rb_str_new(b->data, b->size);
  92. rb_enc_associate(ret, rb_utf8_encoding());
  93. return ret;
  94. }
  95. static void StringBuilder_PrintEnum(StringBuilder* b, int32_t val,
  96. const upb_enumdef* e) {
  97. const char *name = upb_enumdef_iton(e, val);
  98. if (name) {
  99. StringBuilder_Printf(b, ":%s", name);
  100. } else {
  101. StringBuilder_Printf(b, "%" PRId32, val);
  102. }
  103. }
  104. void StringBuilder_PrintMsgval(StringBuilder* b, upb_msgval val,
  105. TypeInfo info) {
  106. switch (info.type) {
  107. case UPB_TYPE_BOOL:
  108. StringBuilder_Printf(b, "%s", val.bool_val ? "true" : "false");
  109. break;
  110. case UPB_TYPE_FLOAT: {
  111. VALUE str = rb_inspect(DBL2NUM(val.float_val));
  112. StringBuilder_Printf(b, "%s", RSTRING_PTR(str));
  113. break;
  114. }
  115. case UPB_TYPE_DOUBLE: {
  116. VALUE str = rb_inspect(DBL2NUM(val.double_val));
  117. StringBuilder_Printf(b, "%s", RSTRING_PTR(str));
  118. break;
  119. }
  120. case UPB_TYPE_INT32:
  121. StringBuilder_Printf(b, "%" PRId32, val.int32_val);
  122. break;
  123. case UPB_TYPE_UINT32:
  124. StringBuilder_Printf(b, "%" PRIu32, val.uint32_val);
  125. break;
  126. case UPB_TYPE_INT64:
  127. StringBuilder_Printf(b, "%" PRId64, val.int64_val);
  128. break;
  129. case UPB_TYPE_UINT64:
  130. StringBuilder_Printf(b, "%" PRIu64, val.uint64_val);
  131. break;
  132. case UPB_TYPE_STRING:
  133. StringBuilder_Printf(b, "\"%.*s\"", (int)val.str_val.size, val.str_val.data);
  134. break;
  135. case UPB_TYPE_BYTES:
  136. StringBuilder_Printf(b, "\"%.*s\"", (int)val.str_val.size, val.str_val.data);
  137. break;
  138. case UPB_TYPE_ENUM:
  139. StringBuilder_PrintEnum(b, val.int32_val, info.def.enumdef);
  140. break;
  141. case UPB_TYPE_MESSAGE:
  142. Message_PrintMessage(b, val.msg_val, info.def.msgdef);
  143. break;
  144. }
  145. }
  146. // -----------------------------------------------------------------------------
  147. // Arena
  148. // -----------------------------------------------------------------------------
  149. typedef struct {
  150. upb_arena *arena;
  151. VALUE pinned_objs;
  152. } Arena;
  153. static void Arena_mark(void *data) {
  154. Arena *arena = data;
  155. rb_gc_mark(arena->pinned_objs);
  156. }
  157. static void Arena_free(void *data) {
  158. Arena *arena = data;
  159. upb_arena_free(arena->arena);
  160. xfree(arena);
  161. }
  162. static VALUE cArena;
  163. const rb_data_type_t Arena_type = {
  164. "Google::Protobuf::Internal::Arena",
  165. { Arena_mark, Arena_free, NULL },
  166. .flags = RUBY_TYPED_FREE_IMMEDIATELY,
  167. };
  168. static VALUE Arena_alloc(VALUE klass) {
  169. Arena *arena = ALLOC(Arena);
  170. arena->arena = upb_arena_new();
  171. arena->pinned_objs = Qnil;
  172. return TypedData_Wrap_Struct(klass, &Arena_type, arena);
  173. }
  174. upb_arena *Arena_get(VALUE _arena) {
  175. Arena *arena;
  176. TypedData_Get_Struct(_arena, Arena, &Arena_type, arena);
  177. return arena->arena;
  178. }
  179. void Arena_fuse(VALUE _arena, upb_arena *other) {
  180. Arena *arena;
  181. TypedData_Get_Struct(_arena, Arena, &Arena_type, arena);
  182. if (!upb_arena_fuse(arena->arena, other)) {
  183. rb_raise(rb_eRuntimeError,
  184. "Unable to fuse arenas. This should never happen since Ruby does "
  185. "not use initial blocks");
  186. }
  187. }
  188. VALUE Arena_new() {
  189. return Arena_alloc(cArena);
  190. }
  191. void Arena_Pin(VALUE _arena, VALUE obj) {
  192. Arena *arena;
  193. TypedData_Get_Struct(_arena, Arena, &Arena_type, arena);
  194. if (arena->pinned_objs == Qnil) {
  195. arena->pinned_objs = rb_ary_new();
  196. }
  197. rb_ary_push(arena->pinned_objs, obj);
  198. }
  199. void Arena_register(VALUE module) {
  200. VALUE internal = rb_define_module_under(module, "Internal");
  201. VALUE klass = rb_define_class_under(internal, "Arena", rb_cObject);
  202. rb_define_alloc_func(klass, Arena_alloc);
  203. rb_gc_register_address(&cArena);
  204. cArena = klass;
  205. }
  206. // -----------------------------------------------------------------------------
  207. // Object Cache
  208. // -----------------------------------------------------------------------------
  209. // A pointer -> Ruby Object cache that keeps references to Ruby wrapper
  210. // objects. This allows us to look up any Ruby wrapper object by the address
  211. // of the object it is wrapping. That way we can avoid ever creating two
  212. // different wrapper objects for the same C object, which saves memory and
  213. // preserves object identity.
  214. //
  215. // We use WeakMap for the cache. For Ruby <2.7 we also need a secondary Hash
  216. // to store WeakMap keys because Ruby <2.7 WeakMap doesn't allow non-finalizable
  217. // keys.
  218. //
  219. // We also need the secondary Hash if sizeof(long) < sizeof(VALUE), because this
  220. // means it may not be possible to fit a pointer into a Fixnum. Keys are
  221. // pointers, and if they fit into a Fixnum, Ruby doesn't collect them, but if
  222. // they overflow and require allocating a Bignum, they could get collected
  223. // prematurely, thus removing the cache entry. This happens on 64-bit Windows,
  224. // on which pointers are 64 bits but longs are 32 bits. In this case, we enable
  225. // the secondary Hash to hold the keys and prevent them from being collected.
  226. #if RUBY_API_VERSION_CODE >= 20700 && SIZEOF_LONG >= SIZEOF_VALUE
  227. #define USE_SECONDARY_MAP 0
  228. #else
  229. #define USE_SECONDARY_MAP 1
  230. #endif
  231. #if USE_SECONDARY_MAP
  232. // Maps Numeric -> Object. The object is then used as a key into the WeakMap.
  233. // This is needed for Ruby <2.7 where a number cannot be a key to WeakMap.
  234. // The object is used only for its identity; it does not contain any data.
  235. VALUE secondary_map = Qnil;
  236. // Mutations to the map are under a mutex, because SeconaryMap_MaybeGC()
  237. // iterates over the map which cannot happen in parallel with insertions, or
  238. // Ruby will throw:
  239. // can't add a new key into hash during iteration (RuntimeError)
  240. VALUE secondary_map_mutex = Qnil;
  241. // Lambda that will GC entries from the secondary map that are no longer present
  242. // in the primary map.
  243. VALUE gc_secondary_map_lambda = Qnil;
  244. ID length;
  245. extern VALUE weak_obj_cache;
  246. static void SecondaryMap_Init() {
  247. rb_gc_register_address(&secondary_map);
  248. rb_gc_register_address(&gc_secondary_map_lambda);
  249. rb_gc_register_address(&secondary_map_mutex);
  250. secondary_map = rb_hash_new();
  251. gc_secondary_map_lambda = rb_eval_string(
  252. "->(secondary, weak) {\n"
  253. " secondary.delete_if { |k, v| !weak.key?(v) }\n"
  254. "}\n");
  255. secondary_map_mutex = rb_mutex_new();
  256. length = rb_intern("length");
  257. }
  258. // The secondary map is a regular Hash, and will never shrink on its own.
  259. // The main object cache is a WeakMap that will automatically remove entries
  260. // when the target object is no longer reachable, but unless we manually
  261. // remove the corresponding entries from the secondary map, it will grow
  262. // without bound.
  263. //
  264. // To avoid this unbounded growth we periodically remove entries from the
  265. // secondary map that are no longer present in the WeakMap. The logic of
  266. // how often to perform this GC is an artbirary tuning parameter that
  267. // represents a straightforward CPU/memory tradeoff.
  268. //
  269. // Requires: secondary_map_mutex is held.
  270. static void SecondaryMap_MaybeGC() {
  271. PBRUBY_ASSERT(rb_mutex_locked_p(secondary_map_mutex) == Qtrue);
  272. size_t weak_len = NUM2ULL(rb_funcall(weak_obj_cache, length, 0));
  273. size_t secondary_len = RHASH_SIZE(secondary_map);
  274. if (secondary_len < weak_len) {
  275. // Logically this case should not be possible: a valid entry cannot exist in
  276. // the weak table unless there is a corresponding entry in the secondary
  277. // table. It should *always* be the case that secondary_len >= weak_len.
  278. //
  279. // However ObjectSpace::WeakMap#length (and therefore weak_len) is
  280. // unreliable: it overreports its true length by including non-live objects.
  281. // However these non-live objects are not yielded in iteration, so we may
  282. // have previously deleted them from the secondary map in a previous
  283. // invocation of SecondaryMap_MaybeGC().
  284. //
  285. // In this case, we can't measure any waste, so we just return.
  286. return;
  287. }
  288. size_t waste = secondary_len - weak_len;
  289. // GC if we could remove at least 2000 entries or 20% of the table size
  290. // (whichever is greater). Since the cost of the GC pass is O(N), we
  291. // want to make sure that we condition this on overall table size, to
  292. // avoid O(N^2) CPU costs.
  293. size_t threshold = PBRUBY_MAX(secondary_len * 0.2, 2000);
  294. if (waste > threshold) {
  295. rb_funcall(gc_secondary_map_lambda, rb_intern("call"), 2,
  296. secondary_map, weak_obj_cache);
  297. }
  298. }
  299. // Requires: secondary_map_mutex is held by this thread iff create == true.
  300. static VALUE SecondaryMap_Get(VALUE key, bool create) {
  301. PBRUBY_ASSERT(!create || rb_mutex_locked_p(secondary_map_mutex) == Qtrue);
  302. VALUE ret = rb_hash_lookup(secondary_map, key);
  303. if (ret == Qnil && create) {
  304. SecondaryMap_MaybeGC();
  305. ret = rb_class_new_instance(0, NULL, rb_cObject);
  306. rb_hash_aset(secondary_map, key, ret);
  307. }
  308. return ret;
  309. }
  310. #endif
  311. // Requires: secondary_map_mutex is held by this thread iff create == true.
  312. static VALUE ObjectCache_GetKey(const void* key, bool create) {
  313. VALUE key_val = (VALUE)key;
  314. PBRUBY_ASSERT((key_val & 3) == 0);
  315. VALUE ret = LL2NUM(key_val >> 2);
  316. #if USE_SECONDARY_MAP
  317. ret = SecondaryMap_Get(ret, create);
  318. #endif
  319. return ret;
  320. }
  321. // Public ObjectCache API.
  322. VALUE weak_obj_cache = Qnil;
  323. ID item_get;
  324. ID item_set;
  325. static void ObjectCache_Init() {
  326. rb_gc_register_address(&weak_obj_cache);
  327. VALUE klass = rb_eval_string("ObjectSpace::WeakMap");
  328. weak_obj_cache = rb_class_new_instance(0, NULL, klass);
  329. item_get = rb_intern("[]");
  330. item_set = rb_intern("[]=");
  331. #if USE_SECONDARY_MAP
  332. SecondaryMap_Init();
  333. #endif
  334. }
  335. void ObjectCache_Add(const void* key, VALUE val) {
  336. PBRUBY_ASSERT(ObjectCache_Get(key) == Qnil);
  337. #if USE_SECONDARY_MAP
  338. rb_mutex_lock(secondary_map_mutex);
  339. #endif
  340. VALUE key_rb = ObjectCache_GetKey(key, true);
  341. rb_funcall(weak_obj_cache, item_set, 2, key_rb, val);
  342. #if USE_SECONDARY_MAP
  343. rb_mutex_unlock(secondary_map_mutex);
  344. #endif
  345. PBRUBY_ASSERT(ObjectCache_Get(key) == val);
  346. }
  347. // Returns the cached object for this key, if any. Otherwise returns Qnil.
  348. VALUE ObjectCache_Get(const void* key) {
  349. VALUE key_rb = ObjectCache_GetKey(key, false);
  350. return rb_funcall(weak_obj_cache, item_get, 1, key_rb);
  351. }
  352. /*
  353. * call-seq:
  354. * Google::Protobuf.discard_unknown(msg)
  355. *
  356. * Discard unknown fields in the given message object and recursively discard
  357. * unknown fields in submessages.
  358. */
  359. static VALUE Google_Protobuf_discard_unknown(VALUE self, VALUE msg_rb) {
  360. const upb_msgdef *m;
  361. upb_msg *msg = Message_GetMutable(msg_rb, &m);
  362. if (!upb_msg_discardunknown(msg, m, 128)) {
  363. rb_raise(rb_eRuntimeError, "Messages nested too deeply.");
  364. }
  365. return Qnil;
  366. }
  367. /*
  368. * call-seq:
  369. * Google::Protobuf.deep_copy(obj) => copy_of_obj
  370. *
  371. * Performs a deep copy of a RepeatedField instance, a Map instance, or a
  372. * message object, recursively copying its members.
  373. */
  374. VALUE Google_Protobuf_deep_copy(VALUE self, VALUE obj) {
  375. VALUE klass = CLASS_OF(obj);
  376. if (klass == cRepeatedField) {
  377. return RepeatedField_deep_copy(obj);
  378. } else if (klass == cMap) {
  379. return Map_deep_copy(obj);
  380. } else {
  381. VALUE new_arena_rb = Arena_new();
  382. upb_arena *new_arena = Arena_get(new_arena_rb);
  383. const upb_msgdef *m;
  384. const upb_msg *msg = Message_Get(obj, &m);
  385. upb_msg* new_msg = Message_deep_copy(msg, m, new_arena);
  386. return Message_GetRubyWrapper(new_msg, m, new_arena_rb);
  387. }
  388. }
  389. // -----------------------------------------------------------------------------
  390. // Initialization/entry point.
  391. // -----------------------------------------------------------------------------
  392. // This must be named "Init_protobuf_c" because the Ruby module is named
  393. // "protobuf_c" -- the VM looks for this symbol in our .so.
  394. __attribute__ ((visibility ("default")))
  395. void Init_protobuf_c() {
  396. ObjectCache_Init();
  397. VALUE google = rb_define_module("Google");
  398. VALUE protobuf = rb_define_module_under(google, "Protobuf");
  399. Arena_register(protobuf);
  400. Defs_register(protobuf);
  401. RepeatedField_register(protobuf);
  402. Map_register(protobuf);
  403. Message_register(protobuf);
  404. cParseError = rb_const_get(protobuf, rb_intern("ParseError"));
  405. rb_gc_register_mark_object(cParseError);
  406. cTypeError = rb_const_get(protobuf, rb_intern("TypeError"));
  407. rb_gc_register_mark_object(cTypeError);
  408. rb_define_singleton_method(protobuf, "discard_unknown",
  409. Google_Protobuf_discard_unknown, 1);
  410. rb_define_singleton_method(protobuf, "deep_copy",
  411. Google_Protobuf_deep_copy, 1);
  412. }