Chris Fallin | 973f425 | 2014-11-18 14:19:58 -0800 | [diff] [blame] | 1 | // Protocol Buffers - Google's data interchange format |
| 2 | // Copyright 2014 Google Inc. All rights reserved. |
| 3 | // https://developers.google.com/protocol-buffers/ |
| 4 | // |
| 5 | // Redistribution and use in source and binary forms, with or without |
| 6 | // modification, are permitted provided that the following conditions are |
| 7 | // met: |
| 8 | // |
| 9 | // * Redistributions of source code must retain the above copyright |
| 10 | // notice, this list of conditions and the following disclaimer. |
| 11 | // * Redistributions in binary form must reproduce the above |
| 12 | // copyright notice, this list of conditions and the following disclaimer |
| 13 | // in the documentation and/or other materials provided with the |
| 14 | // distribution. |
| 15 | // * Neither the name of Google Inc. nor the names of its |
| 16 | // contributors may be used to endorse or promote products derived from |
| 17 | // this software without specific prior written permission. |
| 18 | // |
| 19 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 20 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 21 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 22 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 23 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 24 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 25 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 26 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 27 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 28 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 29 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 30 | |
| 31 | #include "protobuf.h" |
| 32 | |
Joshua Haberman | 9abf6e2 | 2021-01-13 12:16:25 -0800 | [diff] [blame] | 33 | #include <ruby/version.h> |
| 34 | |
| 35 | #include "defs.h" |
| 36 | #include "map.h" |
| 37 | #include "message.h" |
| 38 | #include "repeated_field.h" |
| 39 | |
Josh Haberman | 181c7f2 | 2015-07-15 11:05:10 -0700 | [diff] [blame] | 40 | VALUE cError; |
Erik Benoist | 74f8e24 | 2018-06-26 22:24:24 -0500 | [diff] [blame] | 41 | VALUE cTypeError; |
Chris Fallin | 973f425 | 2014-11-18 14:19:58 -0800 | [diff] [blame] | 42 | |
Joshua Haberman | 9abf6e2 | 2021-01-13 12:16:25 -0800 | [diff] [blame] | 43 | const upb_fielddef* map_field_key(const upb_fielddef* field) { |
| 44 | const upb_msgdef *entry = upb_fielddef_msgsubdef(field); |
| 45 | return upb_msgdef_itof(entry, 1); |
Joshua Haberman | 1e37a94 | 2019-08-13 04:54:11 -0700 | [diff] [blame] | 46 | } |
| 47 | |
Joshua Haberman | 9abf6e2 | 2021-01-13 12:16:25 -0800 | [diff] [blame] | 48 | const upb_fielddef* map_field_value(const upb_fielddef* field) { |
| 49 | const upb_msgdef *entry = upb_fielddef_msgsubdef(field); |
| 50 | return upb_msgdef_itof(entry, 2); |
| 51 | } |
| 52 | |
| 53 | // ----------------------------------------------------------------------------- |
| 54 | // StringBuilder, for inspect |
| 55 | // ----------------------------------------------------------------------------- |
| 56 | |
| 57 | struct StringBuilder { |
| 58 | size_t size; |
| 59 | size_t cap; |
| 60 | char *data; |
| 61 | }; |
| 62 | |
| 63 | typedef struct StringBuilder StringBuilder; |
| 64 | |
| 65 | static size_t StringBuilder_SizeOf(size_t cap) { |
| 66 | return sizeof(StringBuilder) + cap; |
| 67 | } |
| 68 | |
| 69 | StringBuilder* StringBuilder_New() { |
| 70 | const size_t cap = 128; |
| 71 | StringBuilder* builder = malloc(sizeof(*builder)); |
| 72 | builder->size = 0; |
| 73 | builder->cap = cap; |
| 74 | builder->data = malloc(builder->cap); |
| 75 | return builder; |
| 76 | } |
| 77 | |
| 78 | void StringBuilder_Free(StringBuilder* b) { |
| 79 | free(b->data); |
| 80 | free(b); |
| 81 | } |
| 82 | |
| 83 | void StringBuilder_Printf(StringBuilder* b, const char *fmt, ...) { |
| 84 | size_t have = b->cap - b->size; |
| 85 | size_t n; |
| 86 | va_list args; |
| 87 | |
| 88 | va_start(args, fmt); |
| 89 | n = vsnprintf(&b->data[b->size], have, fmt, args); |
| 90 | va_end(args); |
| 91 | |
| 92 | if (have <= n) { |
| 93 | while (have <= n) { |
| 94 | b->cap *= 2; |
| 95 | have = b->cap - b->size; |
| 96 | } |
| 97 | b->data = realloc(b->data, StringBuilder_SizeOf(b->cap)); |
| 98 | va_start(args, fmt); |
| 99 | n = vsnprintf(&b->data[b->size], have, fmt, args); |
| 100 | va_end(args); |
| 101 | PBRUBY_ASSERT(n < have); |
| 102 | } |
| 103 | |
| 104 | b->size += n; |
| 105 | } |
| 106 | |
| 107 | VALUE StringBuilder_ToRubyString(StringBuilder* b) { |
| 108 | VALUE ret = rb_str_new(b->data, b->size); |
| 109 | rb_enc_associate(ret, rb_utf8_encoding()); |
| 110 | return ret; |
| 111 | } |
| 112 | |
| 113 | static void StringBuilder_PrintEnum(StringBuilder* b, int32_t val, |
| 114 | const upb_enumdef* e) { |
| 115 | const char *name = upb_enumdef_iton(e, val); |
| 116 | if (name) { |
| 117 | StringBuilder_Printf(b, ":%s", name); |
Joshua Haberman | 1e37a94 | 2019-08-13 04:54:11 -0700 | [diff] [blame] | 118 | } else { |
Joshua Haberman | 9abf6e2 | 2021-01-13 12:16:25 -0800 | [diff] [blame] | 119 | StringBuilder_Printf(b, "%" PRId32, val); |
| 120 | } |
| 121 | } |
| 122 | |
| 123 | void StringBuilder_PrintMsgval(StringBuilder* b, upb_msgval val, |
| 124 | TypeInfo info) { |
| 125 | switch (info.type) { |
| 126 | case UPB_TYPE_BOOL: |
| 127 | StringBuilder_Printf(b, "%s", val.bool_val ? "true" : "false"); |
| 128 | break; |
| 129 | case UPB_TYPE_FLOAT: { |
| 130 | VALUE str = rb_inspect(DBL2NUM(val.float_val)); |
| 131 | StringBuilder_Printf(b, "%s", RSTRING_PTR(str)); |
| 132 | break; |
| 133 | } |
| 134 | case UPB_TYPE_DOUBLE: { |
| 135 | VALUE str = rb_inspect(DBL2NUM(val.double_val)); |
| 136 | StringBuilder_Printf(b, "%s", RSTRING_PTR(str)); |
| 137 | break; |
| 138 | } |
| 139 | case UPB_TYPE_INT32: |
| 140 | StringBuilder_Printf(b, "%" PRId32, val.int32_val); |
| 141 | break; |
| 142 | case UPB_TYPE_UINT32: |
| 143 | StringBuilder_Printf(b, "%" PRIu32, val.uint32_val); |
| 144 | break; |
| 145 | case UPB_TYPE_INT64: |
| 146 | StringBuilder_Printf(b, "%" PRId64, val.int64_val); |
| 147 | break; |
| 148 | case UPB_TYPE_UINT64: |
| 149 | StringBuilder_Printf(b, "%" PRIu64, val.uint64_val); |
| 150 | break; |
| 151 | case UPB_TYPE_STRING: |
| 152 | StringBuilder_Printf(b, "\"%.*s\"", (int)val.str_val.size, val.str_val.data); |
| 153 | break; |
| 154 | case UPB_TYPE_BYTES: |
| 155 | StringBuilder_Printf(b, "\"%.*s\"", (int)val.str_val.size, val.str_val.data); |
| 156 | break; |
| 157 | case UPB_TYPE_ENUM: |
| 158 | StringBuilder_PrintEnum(b, val.int32_val, info.def.enumdef); |
| 159 | break; |
| 160 | case UPB_TYPE_MESSAGE: |
| 161 | Message_PrintMessage(b, val.msg_val, info.def.msgdef); |
| 162 | break; |
Joshua Haberman | 1e37a94 | 2019-08-13 04:54:11 -0700 | [diff] [blame] | 163 | } |
| 164 | } |
| 165 | |
Chris Fallin | 973f425 | 2014-11-18 14:19:58 -0800 | [diff] [blame] | 166 | // ----------------------------------------------------------------------------- |
Joshua Haberman | 9abf6e2 | 2021-01-13 12:16:25 -0800 | [diff] [blame] | 167 | // Arena |
Chris Fallin | 973f425 | 2014-11-18 14:19:58 -0800 | [diff] [blame] | 168 | // ----------------------------------------------------------------------------- |
| 169 | |
Joshua Haberman | 9879f42 | 2021-02-24 16:41:35 -0800 | [diff] [blame] | 170 | typedef struct { |
| 171 | upb_arena *arena; |
| 172 | VALUE pinned_objs; |
| 173 | } Arena; |
| 174 | |
| 175 | static void Arena_mark(void *data) { |
| 176 | Arena *arena = data; |
| 177 | rb_gc_mark(arena->pinned_objs); |
| 178 | } |
| 179 | |
| 180 | static void Arena_free(void *data) { |
| 181 | Arena *arena = data; |
| 182 | upb_arena_free(arena->arena); |
| 183 | } |
Joshua Haberman | 9abf6e2 | 2021-01-13 12:16:25 -0800 | [diff] [blame] | 184 | |
| 185 | static VALUE cArena; |
| 186 | |
| 187 | const rb_data_type_t Arena_type = { |
| 188 | "Google::Protobuf::Internal::Arena", |
Joshua Haberman | 9879f42 | 2021-02-24 16:41:35 -0800 | [diff] [blame] | 189 | { Arena_mark, Arena_free, NULL }, |
| 190 | .flags = RUBY_TYPED_FREE_IMMEDIATELY, |
Joshua Haberman | 9abf6e2 | 2021-01-13 12:16:25 -0800 | [diff] [blame] | 191 | }; |
| 192 | |
| 193 | static VALUE Arena_alloc(VALUE klass) { |
Joshua Haberman | 9879f42 | 2021-02-24 16:41:35 -0800 | [diff] [blame] | 194 | Arena *arena = ALLOC(Arena); |
| 195 | arena->arena = upb_arena_new(); |
| 196 | arena->pinned_objs = Qnil; |
Joshua Haberman | 9abf6e2 | 2021-01-13 12:16:25 -0800 | [diff] [blame] | 197 | return TypedData_Wrap_Struct(klass, &Arena_type, arena); |
Chris Fallin | 973f425 | 2014-11-18 14:19:58 -0800 | [diff] [blame] | 198 | } |
| 199 | |
Joshua Haberman | 9abf6e2 | 2021-01-13 12:16:25 -0800 | [diff] [blame] | 200 | upb_arena *Arena_get(VALUE _arena) { |
Joshua Haberman | 9879f42 | 2021-02-24 16:41:35 -0800 | [diff] [blame] | 201 | Arena *arena; |
| 202 | TypedData_Get_Struct(_arena, Arena, &Arena_type, arena); |
| 203 | return arena->arena; |
Joshua Haberman | 9abf6e2 | 2021-01-13 12:16:25 -0800 | [diff] [blame] | 204 | } |
Chris Fallin | 973f425 | 2014-11-18 14:19:58 -0800 | [diff] [blame] | 205 | |
Joshua Haberman | 9abf6e2 | 2021-01-13 12:16:25 -0800 | [diff] [blame] | 206 | VALUE Arena_new() { |
| 207 | return Arena_alloc(cArena); |
| 208 | } |
| 209 | |
Joshua Haberman | 9879f42 | 2021-02-24 16:41:35 -0800 | [diff] [blame] | 210 | void Arena_Pin(VALUE _arena, VALUE obj) { |
| 211 | Arena *arena; |
| 212 | TypedData_Get_Struct(_arena, Arena, &Arena_type, arena); |
| 213 | if (arena->pinned_objs == Qnil) { |
| 214 | arena->pinned_objs = rb_ary_new(); |
| 215 | } |
| 216 | rb_ary_push(arena->pinned_objs, obj); |
| 217 | } |
| 218 | |
Joshua Haberman | 9abf6e2 | 2021-01-13 12:16:25 -0800 | [diff] [blame] | 219 | void Arena_register(VALUE module) { |
| 220 | VALUE internal = rb_define_module_under(module, "Internal"); |
| 221 | VALUE klass = rb_define_class_under(internal, "Arena", rb_cObject); |
| 222 | rb_define_alloc_func(klass, Arena_alloc); |
| 223 | rb_gc_register_address(&cArena); |
| 224 | cArena = klass; |
| 225 | } |
| 226 | |
| 227 | // ----------------------------------------------------------------------------- |
| 228 | // Object Cache |
| 229 | // ----------------------------------------------------------------------------- |
| 230 | |
| 231 | // A pointer -> Ruby Object cache that keeps references to Ruby wrapper |
| 232 | // objects. This allows us to look up any Ruby wrapper object by the address |
| 233 | // of the object it is wrapping. That way we can avoid ever creating two |
| 234 | // different wrapper objects for the same C object, which saves memory and |
| 235 | // preserves object identity. |
Chris Fallin | 231886f | 2015-05-19 15:33:48 -0700 | [diff] [blame] | 236 | // |
Joshua Haberman | 9879f42 | 2021-02-24 16:41:35 -0800 | [diff] [blame] | 237 | // We use WeakMap for the cache. For Ruby <2.7 we also need a secondary Hash |
| 238 | // to store WeakMap keys because Ruby <2.7 WeakMap doesn't allow non-finalizable |
| 239 | // keys. |
Joshua Haberman | 9abf6e2 | 2021-01-13 12:16:25 -0800 | [diff] [blame] | 240 | |
| 241 | #if RUBY_API_VERSION_CODE >= 20700 |
Joshua Haberman | 9879f42 | 2021-02-24 16:41:35 -0800 | [diff] [blame] | 242 | #define USE_SECONDARY_MAP 0 |
Joshua Haberman | 9abf6e2 | 2021-01-13 12:16:25 -0800 | [diff] [blame] | 243 | #else |
Joshua Haberman | 9879f42 | 2021-02-24 16:41:35 -0800 | [diff] [blame] | 244 | #define USE_SECONDARY_MAP 1 |
| 245 | #endif |
| 246 | |
| 247 | #if USE_SECONDARY_MAP |
| 248 | |
| 249 | // Maps Numeric -> Object. The object is then used as a key into the WeakMap. |
| 250 | // This is needed for Ruby <2.7 where a number cannot be a key to WeakMap. |
| 251 | // The object is used only for its identity; it does not contain any data. |
| 252 | VALUE secondary_map = Qnil; |
| 253 | |
Joshua Haberman | f0d6fcb | 2021-03-25 11:06:35 -0700 | [diff] [blame] | 254 | // Mutations to the map are under a mutex, because SeconaryMap_MaybeGC() |
| 255 | // iterates over the map which cannot happen in parallel with insertions, or |
| 256 | // Ruby will throw: |
| 257 | // can't add a new key into hash during iteration (RuntimeError) |
| 258 | VALUE secondary_map_mutex = Qnil; |
| 259 | |
Joshua Haberman | b75a49f | 2021-03-25 10:45:15 -0700 | [diff] [blame] | 260 | // Lambda that will GC entries from the secondary map that are no longer present |
| 261 | // in the primary map. |
Joshua Haberman | 2fe27d8 | 2021-03-29 12:30:49 -0700 | [diff] [blame^] | 262 | VALUE gc_secondary_map_lambda = Qnil; |
Joshua Haberman | e1ac393 | 2021-03-25 11:34:29 -0700 | [diff] [blame] | 263 | ID length; |
Joshua Haberman | b75a49f | 2021-03-25 10:45:15 -0700 | [diff] [blame] | 264 | |
| 265 | extern VALUE weak_obj_cache; |
| 266 | |
Joshua Haberman | 9879f42 | 2021-02-24 16:41:35 -0800 | [diff] [blame] | 267 | static void SecondaryMap_Init() { |
| 268 | rb_gc_register_address(&secondary_map); |
Joshua Haberman | 2fe27d8 | 2021-03-29 12:30:49 -0700 | [diff] [blame^] | 269 | rb_gc_register_address(&gc_secondary_map_lambda); |
Joshua Haberman | f0d6fcb | 2021-03-25 11:06:35 -0700 | [diff] [blame] | 270 | rb_gc_register_address(&secondary_map_mutex); |
Joshua Haberman | 9879f42 | 2021-02-24 16:41:35 -0800 | [diff] [blame] | 271 | secondary_map = rb_hash_new(); |
Joshua Haberman | 2fe27d8 | 2021-03-29 12:30:49 -0700 | [diff] [blame^] | 272 | gc_secondary_map_lambda = rb_eval_string( |
Joshua Haberman | b75a49f | 2021-03-25 10:45:15 -0700 | [diff] [blame] | 273 | "->(secondary, weak) {\n" |
| 274 | " secondary.delete_if { |k, v| !weak.key?(v) }\n" |
| 275 | "}\n"); |
Joshua Haberman | f0d6fcb | 2021-03-25 11:06:35 -0700 | [diff] [blame] | 276 | secondary_map_mutex = rb_mutex_new(); |
Joshua Haberman | e1ac393 | 2021-03-25 11:34:29 -0700 | [diff] [blame] | 277 | length = rb_intern("length"); |
Joshua Haberman | b75a49f | 2021-03-25 10:45:15 -0700 | [diff] [blame] | 278 | } |
| 279 | |
Joshua Haberman | e1ac393 | 2021-03-25 11:34:29 -0700 | [diff] [blame] | 280 | // The secondary map is a regular Hash, and will never shrink on its own. |
| 281 | // The main object cache is a WeakMap that will automatically remove entries |
| 282 | // when the target object is no longer reachable, but unless we manually |
| 283 | // remove the corresponding entries from the secondary map, it will grow |
| 284 | // without bound. |
| 285 | // |
| 286 | // To avoid this unbounded growth we periodically remove entries from the |
| 287 | // secondary map that are no longer present in the WeakMap. The logic of |
| 288 | // how often to perform this GC is an artbirary tuning parameter that |
| 289 | // represents a straightforward CPU/memory tradeoff. |
Joshua Haberman | 2fe27d8 | 2021-03-29 12:30:49 -0700 | [diff] [blame^] | 290 | // |
| 291 | // Requires: secondary_map_mutex is held. |
Joshua Haberman | b75a49f | 2021-03-25 10:45:15 -0700 | [diff] [blame] | 292 | static void SecondaryMap_MaybeGC() { |
Joshua Haberman | 2fe27d8 | 2021-03-29 12:30:49 -0700 | [diff] [blame^] | 293 | PBRUBY_ASSERT(rb_mutex_locked_p(secondary_map_mutex) == Qtrue); |
Joshua Haberman | e1ac393 | 2021-03-25 11:34:29 -0700 | [diff] [blame] | 294 | size_t weak_len = NUM2ULL(rb_funcall(weak_obj_cache, length, 0)); |
Joshua Haberman | b75a49f | 2021-03-25 10:45:15 -0700 | [diff] [blame] | 295 | size_t secondary_len = RHASH_SIZE(secondary_map); |
Joshua Haberman | 2fe27d8 | 2021-03-29 12:30:49 -0700 | [diff] [blame^] | 296 | if (secondary_len < weak_len) { |
| 297 | // Logically this case should not be possible: a valid entry cannot exist in |
| 298 | // the weak table unless there is a corresponding entry in the secondary |
| 299 | // table. It should *always* be the case that secondary_len >= weak_len. |
| 300 | // |
| 301 | // However ObjectSpace::WeakMap#length (and therefore weak_len) is |
| 302 | // unreliable: it overreports its true length by including non-live objects. |
| 303 | // However these non-live objects are not yielded in iteration, so we may |
| 304 | // have previously deleted them from the secondary map in a previous |
| 305 | // invocation of SecondaryMap_MaybeGC(). |
| 306 | // |
| 307 | // In this case, we can't measure any waste, so we just return. |
| 308 | return; |
| 309 | } |
Joshua Haberman | b75a49f | 2021-03-25 10:45:15 -0700 | [diff] [blame] | 310 | size_t waste = secondary_len - weak_len; |
Joshua Haberman | e1ac393 | 2021-03-25 11:34:29 -0700 | [diff] [blame] | 311 | // GC if we could remove at least 2000 entries or 20% of the table size |
| 312 | // (whichever is greater). Since the cost of the GC pass is O(N), we |
| 313 | // want to make sure that we condition this on overall table size, to |
| 314 | // avoid O(N^2) CPU costs. |
| 315 | size_t threshold = PBRUBY_MAX(secondary_len * 0.2, 2000); |
| 316 | if (waste > threshold) { |
Joshua Haberman | 2fe27d8 | 2021-03-29 12:30:49 -0700 | [diff] [blame^] | 317 | rb_funcall(gc_secondary_map_lambda, rb_intern("call"), 2, |
| 318 | secondary_map, weak_obj_cache); |
Joshua Haberman | b75a49f | 2021-03-25 10:45:15 -0700 | [diff] [blame] | 319 | } |
Joshua Haberman | 9879f42 | 2021-02-24 16:41:35 -0800 | [diff] [blame] | 320 | } |
| 321 | |
Joshua Haberman | 2fe27d8 | 2021-03-29 12:30:49 -0700 | [diff] [blame^] | 322 | // Requires: secondary_map_mutex is held by this thread iff create == true. |
| 323 | static VALUE SecondaryMap_Get(VALUE key, bool create) { |
| 324 | PBRUBY_ASSERT(!create || rb_mutex_locked_p(secondary_map_mutex) == Qtrue); |
Joshua Haberman | 9879f42 | 2021-02-24 16:41:35 -0800 | [diff] [blame] | 325 | VALUE ret = rb_hash_lookup(secondary_map, key); |
Joshua Haberman | 2fe27d8 | 2021-03-29 12:30:49 -0700 | [diff] [blame^] | 326 | if (ret == Qnil && create) { |
Joshua Haberman | b75a49f | 2021-03-25 10:45:15 -0700 | [diff] [blame] | 327 | SecondaryMap_MaybeGC(); |
Joshua Haberman | 9879f42 | 2021-02-24 16:41:35 -0800 | [diff] [blame] | 328 | ret = rb_eval_string("Object.new"); |
| 329 | rb_hash_aset(secondary_map, key, ret); |
| 330 | } |
| 331 | return ret; |
| 332 | } |
| 333 | |
Joshua Haberman | 9abf6e2 | 2021-01-13 12:16:25 -0800 | [diff] [blame] | 334 | #endif |
| 335 | |
Joshua Haberman | 2fe27d8 | 2021-03-29 12:30:49 -0700 | [diff] [blame^] | 336 | // Requires: secondary_map_mutex is held by this thread iff create == true. |
| 337 | static VALUE ObjectCache_GetKey(const void* key, bool create) { |
Joshua Haberman | 9abf6e2 | 2021-01-13 12:16:25 -0800 | [diff] [blame] | 338 | char buf[sizeof(key)]; |
| 339 | memcpy(&buf, &key, sizeof(key)); |
| 340 | intptr_t key_int = (intptr_t)key; |
| 341 | PBRUBY_ASSERT((key_int & 3) == 0); |
Joshua Haberman | 9879f42 | 2021-02-24 16:41:35 -0800 | [diff] [blame] | 342 | VALUE ret = LL2NUM(key_int >> 2); |
| 343 | #if USE_SECONDARY_MAP |
Joshua Haberman | 2fe27d8 | 2021-03-29 12:30:49 -0700 | [diff] [blame^] | 344 | ret = SecondaryMap_Get(ret, create); |
Joshua Haberman | 9879f42 | 2021-02-24 16:41:35 -0800 | [diff] [blame] | 345 | #endif |
Joshua Haberman | 9abf6e2 | 2021-01-13 12:16:25 -0800 | [diff] [blame] | 346 | return ret; |
| 347 | } |
| 348 | |
Joshua Haberman | 9abf6e2 | 2021-01-13 12:16:25 -0800 | [diff] [blame] | 349 | // Public ObjectCache API. |
| 350 | |
Joshua Haberman | 9879f42 | 2021-02-24 16:41:35 -0800 | [diff] [blame] | 351 | VALUE weak_obj_cache = Qnil; |
| 352 | ID item_get; |
| 353 | ID item_set; |
| 354 | |
Joshua Haberman | 9abf6e2 | 2021-01-13 12:16:25 -0800 | [diff] [blame] | 355 | static void ObjectCache_Init() { |
Joshua Haberman | 9879f42 | 2021-02-24 16:41:35 -0800 | [diff] [blame] | 356 | rb_gc_register_address(&weak_obj_cache); |
| 357 | VALUE klass = rb_eval_string("ObjectSpace::WeakMap"); |
| 358 | weak_obj_cache = rb_class_new_instance(0, NULL, klass); |
| 359 | item_get = rb_intern("[]"); |
| 360 | item_set = rb_intern("[]="); |
| 361 | #if USE_SECONDARY_MAP |
| 362 | SecondaryMap_Init(); |
Joshua Haberman | 9abf6e2 | 2021-01-13 12:16:25 -0800 | [diff] [blame] | 363 | #endif |
| 364 | } |
| 365 | |
Joshua Haberman | 9879f42 | 2021-02-24 16:41:35 -0800 | [diff] [blame] | 366 | void ObjectCache_Add(const void* key, VALUE val) { |
| 367 | PBRUBY_ASSERT(ObjectCache_Get(key) == Qnil); |
Joshua Haberman | 2fe27d8 | 2021-03-29 12:30:49 -0700 | [diff] [blame^] | 368 | #if USE_SECONDARY_MAP |
| 369 | rb_mutex_lock(secondary_map_mutex); |
| 370 | #endif |
| 371 | VALUE key_rb = ObjectCache_GetKey(key, true); |
Joshua Haberman | 9879f42 | 2021-02-24 16:41:35 -0800 | [diff] [blame] | 372 | rb_funcall(weak_obj_cache, item_set, 2, key_rb, val); |
Joshua Haberman | 2fe27d8 | 2021-03-29 12:30:49 -0700 | [diff] [blame^] | 373 | #if USE_SECONDARY_MAP |
| 374 | rb_mutex_unlock(secondary_map_mutex); |
| 375 | #endif |
Joshua Haberman | 9879f42 | 2021-02-24 16:41:35 -0800 | [diff] [blame] | 376 | PBRUBY_ASSERT(ObjectCache_Get(key) == val); |
Joshua Haberman | 9abf6e2 | 2021-01-13 12:16:25 -0800 | [diff] [blame] | 377 | } |
| 378 | |
| 379 | // Returns the cached object for this key, if any. Otherwise returns Qnil. |
| 380 | VALUE ObjectCache_Get(const void* key) { |
Joshua Haberman | 2fe27d8 | 2021-03-29 12:30:49 -0700 | [diff] [blame^] | 381 | VALUE key_rb = ObjectCache_GetKey(key, false); |
Joshua Haberman | 9879f42 | 2021-02-24 16:41:35 -0800 | [diff] [blame] | 382 | return rb_funcall(weak_obj_cache, item_get, 1, key_rb); |
Joshua Haberman | 9abf6e2 | 2021-01-13 12:16:25 -0800 | [diff] [blame] | 383 | } |
| 384 | |
| 385 | /* |
| 386 | * call-seq: |
| 387 | * Google::Protobuf.discard_unknown(msg) |
| 388 | * |
| 389 | * Discard unknown fields in the given message object and recursively discard |
| 390 | * unknown fields in submessages. |
| 391 | */ |
| 392 | static VALUE Google_Protobuf_discard_unknown(VALUE self, VALUE msg_rb) { |
| 393 | const upb_msgdef *m; |
| 394 | upb_msg *msg = Message_GetMutable(msg_rb, &m); |
| 395 | if (!upb_msg_discardunknown(msg, m, 128)) { |
| 396 | rb_raise(rb_eRuntimeError, "Messages nested too deeply."); |
| 397 | } |
| 398 | |
| 399 | return Qnil; |
| 400 | } |
| 401 | |
| 402 | /* |
| 403 | * call-seq: |
| 404 | * Google::Protobuf.deep_copy(obj) => copy_of_obj |
| 405 | * |
| 406 | * Performs a deep copy of a RepeatedField instance, a Map instance, or a |
| 407 | * message object, recursively copying its members. |
| 408 | */ |
| 409 | VALUE Google_Protobuf_deep_copy(VALUE self, VALUE obj) { |
| 410 | VALUE klass = CLASS_OF(obj); |
| 411 | if (klass == cRepeatedField) { |
| 412 | return RepeatedField_deep_copy(obj); |
| 413 | } else if (klass == cMap) { |
| 414 | return Map_deep_copy(obj); |
| 415 | } else { |
| 416 | VALUE new_arena_rb = Arena_new(); |
| 417 | upb_arena *new_arena = Arena_get(new_arena_rb); |
| 418 | const upb_msgdef *m; |
| 419 | const upb_msg *msg = Message_Get(obj, &m); |
| 420 | upb_msg* new_msg = Message_deep_copy(msg, m, new_arena); |
| 421 | return Message_GetRubyWrapper(new_msg, m, new_arena_rb); |
| 422 | } |
| 423 | } |
Chris Fallin | 231886f | 2015-05-19 15:33:48 -0700 | [diff] [blame] | 424 | |
Chris Fallin | 973f425 | 2014-11-18 14:19:58 -0800 | [diff] [blame] | 425 | // ----------------------------------------------------------------------------- |
| 426 | // Initialization/entry point. |
| 427 | // ----------------------------------------------------------------------------- |
| 428 | |
| 429 | // This must be named "Init_protobuf_c" because the Ruby module is named |
| 430 | // "protobuf_c" -- the VM looks for this symbol in our .so. |
Joshua Haberman | 9abf6e2 | 2021-01-13 12:16:25 -0800 | [diff] [blame] | 431 | __attribute__ ((visibility ("default"))) |
Chris Fallin | 973f425 | 2014-11-18 14:19:58 -0800 | [diff] [blame] | 432 | void Init_protobuf_c() { |
Joshua Haberman | 9abf6e2 | 2021-01-13 12:16:25 -0800 | [diff] [blame] | 433 | ObjectCache_Init(); |
| 434 | |
Chris Fallin | 973f425 | 2014-11-18 14:19:58 -0800 | [diff] [blame] | 435 | VALUE google = rb_define_module("Google"); |
| 436 | VALUE protobuf = rb_define_module_under(google, "Protobuf"); |
Josh Haberman | a1daeab | 2015-07-10 11:56:06 -0700 | [diff] [blame] | 437 | |
Joshua Haberman | 9abf6e2 | 2021-01-13 12:16:25 -0800 | [diff] [blame] | 438 | Arena_register(protobuf); |
| 439 | Defs_register(protobuf); |
Chris Fallin | 973f425 | 2014-11-18 14:19:58 -0800 | [diff] [blame] | 440 | RepeatedField_register(protobuf); |
Chris Fallin | fd1a3ff | 2015-01-06 15:44:09 -0800 | [diff] [blame] | 441 | Map_register(protobuf); |
Joshua Haberman | 9abf6e2 | 2021-01-13 12:16:25 -0800 | [diff] [blame] | 442 | Message_register(protobuf); |
Chris Fallin | 973f425 | 2014-11-18 14:19:58 -0800 | [diff] [blame] | 443 | |
Josh Haberman | 181c7f2 | 2015-07-15 11:05:10 -0700 | [diff] [blame] | 444 | cError = rb_const_get(protobuf, rb_intern("Error")); |
Erik Benoist | 74f8e24 | 2018-06-26 22:24:24 -0500 | [diff] [blame] | 445 | cTypeError = rb_const_get(protobuf, rb_intern("TypeError")); |
Josh Haberman | 181c7f2 | 2015-07-15 11:05:10 -0700 | [diff] [blame] | 446 | |
Paul Yang | 0e7b589 | 2017-12-07 14:18:38 -0800 | [diff] [blame] | 447 | rb_define_singleton_method(protobuf, "discard_unknown", |
| 448 | Google_Protobuf_discard_unknown, 1); |
Chris Fallin | 973f425 | 2014-11-18 14:19:58 -0800 | [diff] [blame] | 449 | rb_define_singleton_method(protobuf, "deep_copy", |
| 450 | Google_Protobuf_deep_copy, 1); |
Chris Fallin | 973f425 | 2014-11-18 14:19:58 -0800 | [diff] [blame] | 451 | } |