blob: 65263a44cf1b09579da0e8b4ae226077b3261981 [file] [log] [blame]
Chris Fallin973f4252014-11-18 14:19:58 -08001// Protocol Buffers - Google's data interchange format
2// Copyright 2014 Google Inc. All rights reserved.
3// https://developers.google.com/protocol-buffers/
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are
7// met:
8//
9// * Redistributions of source code must retain the above copyright
10// notice, this list of conditions and the following disclaimer.
11// * Redistributions in binary form must reproduce the above
12// copyright notice, this list of conditions and the following disclaimer
13// in the documentation and/or other materials provided with the
14// distribution.
15// * Neither the name of Google Inc. nor the names of its
16// contributors may be used to endorse or promote products derived from
17// this software without specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31#include "protobuf.h"
32
Joshua Haberman9abf6e22021-01-13 12:16:25 -080033#include <ruby/version.h>
34
35#include "defs.h"
36#include "map.h"
37#include "message.h"
38#include "repeated_field.h"
39
Josh Haberman181c7f22015-07-15 11:05:10 -070040VALUE cError;
Erik Benoist74f8e242018-06-26 22:24:24 -050041VALUE cTypeError;
Chris Fallin973f4252014-11-18 14:19:58 -080042
Joshua Haberman9abf6e22021-01-13 12:16:25 -080043const upb_fielddef* map_field_key(const upb_fielddef* field) {
44 const upb_msgdef *entry = upb_fielddef_msgsubdef(field);
45 return upb_msgdef_itof(entry, 1);
Joshua Haberman1e37a942019-08-13 04:54:11 -070046}
47
Joshua Haberman9abf6e22021-01-13 12:16:25 -080048const upb_fielddef* map_field_value(const upb_fielddef* field) {
49 const upb_msgdef *entry = upb_fielddef_msgsubdef(field);
50 return upb_msgdef_itof(entry, 2);
51}
52
53// -----------------------------------------------------------------------------
54// StringBuilder, for inspect
55// -----------------------------------------------------------------------------
56
57struct StringBuilder {
58 size_t size;
59 size_t cap;
60 char *data;
61};
62
63typedef struct StringBuilder StringBuilder;
64
65static size_t StringBuilder_SizeOf(size_t cap) {
66 return sizeof(StringBuilder) + cap;
67}
68
69StringBuilder* StringBuilder_New() {
70 const size_t cap = 128;
71 StringBuilder* builder = malloc(sizeof(*builder));
72 builder->size = 0;
73 builder->cap = cap;
74 builder->data = malloc(builder->cap);
75 return builder;
76}
77
78void StringBuilder_Free(StringBuilder* b) {
79 free(b->data);
80 free(b);
81}
82
83void StringBuilder_Printf(StringBuilder* b, const char *fmt, ...) {
84 size_t have = b->cap - b->size;
85 size_t n;
86 va_list args;
87
88 va_start(args, fmt);
89 n = vsnprintf(&b->data[b->size], have, fmt, args);
90 va_end(args);
91
92 if (have <= n) {
93 while (have <= n) {
94 b->cap *= 2;
95 have = b->cap - b->size;
96 }
97 b->data = realloc(b->data, StringBuilder_SizeOf(b->cap));
98 va_start(args, fmt);
99 n = vsnprintf(&b->data[b->size], have, fmt, args);
100 va_end(args);
101 PBRUBY_ASSERT(n < have);
102 }
103
104 b->size += n;
105}
106
107VALUE StringBuilder_ToRubyString(StringBuilder* b) {
108 VALUE ret = rb_str_new(b->data, b->size);
109 rb_enc_associate(ret, rb_utf8_encoding());
110 return ret;
111}
112
113static void StringBuilder_PrintEnum(StringBuilder* b, int32_t val,
114 const upb_enumdef* e) {
115 const char *name = upb_enumdef_iton(e, val);
116 if (name) {
117 StringBuilder_Printf(b, ":%s", name);
Joshua Haberman1e37a942019-08-13 04:54:11 -0700118 } else {
Joshua Haberman9abf6e22021-01-13 12:16:25 -0800119 StringBuilder_Printf(b, "%" PRId32, val);
120 }
121}
122
123void StringBuilder_PrintMsgval(StringBuilder* b, upb_msgval val,
124 TypeInfo info) {
125 switch (info.type) {
126 case UPB_TYPE_BOOL:
127 StringBuilder_Printf(b, "%s", val.bool_val ? "true" : "false");
128 break;
129 case UPB_TYPE_FLOAT: {
130 VALUE str = rb_inspect(DBL2NUM(val.float_val));
131 StringBuilder_Printf(b, "%s", RSTRING_PTR(str));
132 break;
133 }
134 case UPB_TYPE_DOUBLE: {
135 VALUE str = rb_inspect(DBL2NUM(val.double_val));
136 StringBuilder_Printf(b, "%s", RSTRING_PTR(str));
137 break;
138 }
139 case UPB_TYPE_INT32:
140 StringBuilder_Printf(b, "%" PRId32, val.int32_val);
141 break;
142 case UPB_TYPE_UINT32:
143 StringBuilder_Printf(b, "%" PRIu32, val.uint32_val);
144 break;
145 case UPB_TYPE_INT64:
146 StringBuilder_Printf(b, "%" PRId64, val.int64_val);
147 break;
148 case UPB_TYPE_UINT64:
149 StringBuilder_Printf(b, "%" PRIu64, val.uint64_val);
150 break;
151 case UPB_TYPE_STRING:
152 StringBuilder_Printf(b, "\"%.*s\"", (int)val.str_val.size, val.str_val.data);
153 break;
154 case UPB_TYPE_BYTES:
155 StringBuilder_Printf(b, "\"%.*s\"", (int)val.str_val.size, val.str_val.data);
156 break;
157 case UPB_TYPE_ENUM:
158 StringBuilder_PrintEnum(b, val.int32_val, info.def.enumdef);
159 break;
160 case UPB_TYPE_MESSAGE:
161 Message_PrintMessage(b, val.msg_val, info.def.msgdef);
162 break;
Joshua Haberman1e37a942019-08-13 04:54:11 -0700163 }
164}
165
Chris Fallin973f4252014-11-18 14:19:58 -0800166// -----------------------------------------------------------------------------
Joshua Haberman9abf6e22021-01-13 12:16:25 -0800167// Arena
Chris Fallin973f4252014-11-18 14:19:58 -0800168// -----------------------------------------------------------------------------
169
Joshua Haberman9879f422021-02-24 16:41:35 -0800170typedef struct {
171 upb_arena *arena;
172 VALUE pinned_objs;
173} Arena;
174
175static void Arena_mark(void *data) {
176 Arena *arena = data;
177 rb_gc_mark(arena->pinned_objs);
178}
179
180static void Arena_free(void *data) {
181 Arena *arena = data;
182 upb_arena_free(arena->arena);
183}
Joshua Haberman9abf6e22021-01-13 12:16:25 -0800184
185static VALUE cArena;
186
187const rb_data_type_t Arena_type = {
188 "Google::Protobuf::Internal::Arena",
Joshua Haberman9879f422021-02-24 16:41:35 -0800189 { Arena_mark, Arena_free, NULL },
190 .flags = RUBY_TYPED_FREE_IMMEDIATELY,
Joshua Haberman9abf6e22021-01-13 12:16:25 -0800191};
192
193static VALUE Arena_alloc(VALUE klass) {
Joshua Haberman9879f422021-02-24 16:41:35 -0800194 Arena *arena = ALLOC(Arena);
195 arena->arena = upb_arena_new();
196 arena->pinned_objs = Qnil;
Joshua Haberman9abf6e22021-01-13 12:16:25 -0800197 return TypedData_Wrap_Struct(klass, &Arena_type, arena);
Chris Fallin973f4252014-11-18 14:19:58 -0800198}
199
Joshua Haberman9abf6e22021-01-13 12:16:25 -0800200upb_arena *Arena_get(VALUE _arena) {
Joshua Haberman9879f422021-02-24 16:41:35 -0800201 Arena *arena;
202 TypedData_Get_Struct(_arena, Arena, &Arena_type, arena);
203 return arena->arena;
Joshua Haberman9abf6e22021-01-13 12:16:25 -0800204}
Chris Fallin973f4252014-11-18 14:19:58 -0800205
Joshua Haberman9abf6e22021-01-13 12:16:25 -0800206VALUE Arena_new() {
207 return Arena_alloc(cArena);
208}
209
Joshua Haberman9879f422021-02-24 16:41:35 -0800210void Arena_Pin(VALUE _arena, VALUE obj) {
211 Arena *arena;
212 TypedData_Get_Struct(_arena, Arena, &Arena_type, arena);
213 if (arena->pinned_objs == Qnil) {
214 arena->pinned_objs = rb_ary_new();
215 }
216 rb_ary_push(arena->pinned_objs, obj);
217}
218
Joshua Haberman9abf6e22021-01-13 12:16:25 -0800219void Arena_register(VALUE module) {
220 VALUE internal = rb_define_module_under(module, "Internal");
221 VALUE klass = rb_define_class_under(internal, "Arena", rb_cObject);
222 rb_define_alloc_func(klass, Arena_alloc);
223 rb_gc_register_address(&cArena);
224 cArena = klass;
225}
226
227// -----------------------------------------------------------------------------
228// Object Cache
229// -----------------------------------------------------------------------------
230
231// A pointer -> Ruby Object cache that keeps references to Ruby wrapper
232// objects. This allows us to look up any Ruby wrapper object by the address
233// of the object it is wrapping. That way we can avoid ever creating two
234// different wrapper objects for the same C object, which saves memory and
235// preserves object identity.
Chris Fallin231886f2015-05-19 15:33:48 -0700236//
Joshua Haberman9879f422021-02-24 16:41:35 -0800237// We use WeakMap for the cache. For Ruby <2.7 we also need a secondary Hash
238// to store WeakMap keys because Ruby <2.7 WeakMap doesn't allow non-finalizable
239// keys.
Joshua Haberman9abf6e22021-01-13 12:16:25 -0800240
241#if RUBY_API_VERSION_CODE >= 20700
Joshua Haberman9879f422021-02-24 16:41:35 -0800242#define USE_SECONDARY_MAP 0
Joshua Haberman9abf6e22021-01-13 12:16:25 -0800243#else
Joshua Haberman9879f422021-02-24 16:41:35 -0800244#define USE_SECONDARY_MAP 1
245#endif
246
247#if USE_SECONDARY_MAP
248
249// Maps Numeric -> Object. The object is then used as a key into the WeakMap.
250// This is needed for Ruby <2.7 where a number cannot be a key to WeakMap.
251// The object is used only for its identity; it does not contain any data.
252VALUE secondary_map = Qnil;
253
Joshua Habermanf0d6fcb2021-03-25 11:06:35 -0700254// Mutations to the map are under a mutex, because SeconaryMap_MaybeGC()
255// iterates over the map which cannot happen in parallel with insertions, or
256// Ruby will throw:
257// can't add a new key into hash during iteration (RuntimeError)
258VALUE secondary_map_mutex = Qnil;
259
Joshua Habermanb75a49f2021-03-25 10:45:15 -0700260// Lambda that will GC entries from the secondary map that are no longer present
261// in the primary map.
Joshua Haberman2fe27d82021-03-29 12:30:49 -0700262VALUE gc_secondary_map_lambda = Qnil;
Joshua Habermane1ac3932021-03-25 11:34:29 -0700263ID length;
Joshua Habermanb75a49f2021-03-25 10:45:15 -0700264
265extern VALUE weak_obj_cache;
266
Joshua Haberman9879f422021-02-24 16:41:35 -0800267static void SecondaryMap_Init() {
268 rb_gc_register_address(&secondary_map);
Joshua Haberman2fe27d82021-03-29 12:30:49 -0700269 rb_gc_register_address(&gc_secondary_map_lambda);
Joshua Habermanf0d6fcb2021-03-25 11:06:35 -0700270 rb_gc_register_address(&secondary_map_mutex);
Joshua Haberman9879f422021-02-24 16:41:35 -0800271 secondary_map = rb_hash_new();
Joshua Haberman2fe27d82021-03-29 12:30:49 -0700272 gc_secondary_map_lambda = rb_eval_string(
Joshua Habermanb75a49f2021-03-25 10:45:15 -0700273 "->(secondary, weak) {\n"
274 " secondary.delete_if { |k, v| !weak.key?(v) }\n"
275 "}\n");
Joshua Habermanf0d6fcb2021-03-25 11:06:35 -0700276 secondary_map_mutex = rb_mutex_new();
Joshua Habermane1ac3932021-03-25 11:34:29 -0700277 length = rb_intern("length");
Joshua Habermanb75a49f2021-03-25 10:45:15 -0700278}
279
Joshua Habermane1ac3932021-03-25 11:34:29 -0700280// The secondary map is a regular Hash, and will never shrink on its own.
281// The main object cache is a WeakMap that will automatically remove entries
282// when the target object is no longer reachable, but unless we manually
283// remove the corresponding entries from the secondary map, it will grow
284// without bound.
285//
286// To avoid this unbounded growth we periodically remove entries from the
287// secondary map that are no longer present in the WeakMap. The logic of
288// how often to perform this GC is an artbirary tuning parameter that
289// represents a straightforward CPU/memory tradeoff.
Joshua Haberman2fe27d82021-03-29 12:30:49 -0700290//
291// Requires: secondary_map_mutex is held.
Joshua Habermanb75a49f2021-03-25 10:45:15 -0700292static void SecondaryMap_MaybeGC() {
Joshua Haberman2fe27d82021-03-29 12:30:49 -0700293 PBRUBY_ASSERT(rb_mutex_locked_p(secondary_map_mutex) == Qtrue);
Joshua Habermane1ac3932021-03-25 11:34:29 -0700294 size_t weak_len = NUM2ULL(rb_funcall(weak_obj_cache, length, 0));
Joshua Habermanb75a49f2021-03-25 10:45:15 -0700295 size_t secondary_len = RHASH_SIZE(secondary_map);
Joshua Haberman2fe27d82021-03-29 12:30:49 -0700296 if (secondary_len < weak_len) {
297 // Logically this case should not be possible: a valid entry cannot exist in
298 // the weak table unless there is a corresponding entry in the secondary
299 // table. It should *always* be the case that secondary_len >= weak_len.
300 //
301 // However ObjectSpace::WeakMap#length (and therefore weak_len) is
302 // unreliable: it overreports its true length by including non-live objects.
303 // However these non-live objects are not yielded in iteration, so we may
304 // have previously deleted them from the secondary map in a previous
305 // invocation of SecondaryMap_MaybeGC().
306 //
307 // In this case, we can't measure any waste, so we just return.
308 return;
309 }
Joshua Habermanb75a49f2021-03-25 10:45:15 -0700310 size_t waste = secondary_len - weak_len;
Joshua Habermane1ac3932021-03-25 11:34:29 -0700311 // GC if we could remove at least 2000 entries or 20% of the table size
312 // (whichever is greater). Since the cost of the GC pass is O(N), we
313 // want to make sure that we condition this on overall table size, to
314 // avoid O(N^2) CPU costs.
315 size_t threshold = PBRUBY_MAX(secondary_len * 0.2, 2000);
316 if (waste > threshold) {
Joshua Haberman2fe27d82021-03-29 12:30:49 -0700317 rb_funcall(gc_secondary_map_lambda, rb_intern("call"), 2,
318 secondary_map, weak_obj_cache);
Joshua Habermanb75a49f2021-03-25 10:45:15 -0700319 }
Joshua Haberman9879f422021-02-24 16:41:35 -0800320}
321
Joshua Haberman2fe27d82021-03-29 12:30:49 -0700322// Requires: secondary_map_mutex is held by this thread iff create == true.
323static VALUE SecondaryMap_Get(VALUE key, bool create) {
324 PBRUBY_ASSERT(!create || rb_mutex_locked_p(secondary_map_mutex) == Qtrue);
Joshua Haberman9879f422021-02-24 16:41:35 -0800325 VALUE ret = rb_hash_lookup(secondary_map, key);
Joshua Haberman2fe27d82021-03-29 12:30:49 -0700326 if (ret == Qnil && create) {
Joshua Habermanb75a49f2021-03-25 10:45:15 -0700327 SecondaryMap_MaybeGC();
Joshua Haberman9879f422021-02-24 16:41:35 -0800328 ret = rb_eval_string("Object.new");
329 rb_hash_aset(secondary_map, key, ret);
330 }
331 return ret;
332}
333
Joshua Haberman9abf6e22021-01-13 12:16:25 -0800334#endif
335
Joshua Haberman2fe27d82021-03-29 12:30:49 -0700336// Requires: secondary_map_mutex is held by this thread iff create == true.
337static VALUE ObjectCache_GetKey(const void* key, bool create) {
Joshua Haberman9abf6e22021-01-13 12:16:25 -0800338 char buf[sizeof(key)];
339 memcpy(&buf, &key, sizeof(key));
340 intptr_t key_int = (intptr_t)key;
341 PBRUBY_ASSERT((key_int & 3) == 0);
Joshua Haberman9879f422021-02-24 16:41:35 -0800342 VALUE ret = LL2NUM(key_int >> 2);
343#if USE_SECONDARY_MAP
Joshua Haberman2fe27d82021-03-29 12:30:49 -0700344 ret = SecondaryMap_Get(ret, create);
Joshua Haberman9879f422021-02-24 16:41:35 -0800345#endif
Joshua Haberman9abf6e22021-01-13 12:16:25 -0800346 return ret;
347}
348
Joshua Haberman9abf6e22021-01-13 12:16:25 -0800349// Public ObjectCache API.
350
Joshua Haberman9879f422021-02-24 16:41:35 -0800351VALUE weak_obj_cache = Qnil;
352ID item_get;
353ID item_set;
354
Joshua Haberman9abf6e22021-01-13 12:16:25 -0800355static void ObjectCache_Init() {
Joshua Haberman9879f422021-02-24 16:41:35 -0800356 rb_gc_register_address(&weak_obj_cache);
357 VALUE klass = rb_eval_string("ObjectSpace::WeakMap");
358 weak_obj_cache = rb_class_new_instance(0, NULL, klass);
359 item_get = rb_intern("[]");
360 item_set = rb_intern("[]=");
361#if USE_SECONDARY_MAP
362 SecondaryMap_Init();
Joshua Haberman9abf6e22021-01-13 12:16:25 -0800363#endif
364}
365
Joshua Haberman9879f422021-02-24 16:41:35 -0800366void ObjectCache_Add(const void* key, VALUE val) {
367 PBRUBY_ASSERT(ObjectCache_Get(key) == Qnil);
Joshua Haberman2fe27d82021-03-29 12:30:49 -0700368#if USE_SECONDARY_MAP
369 rb_mutex_lock(secondary_map_mutex);
370#endif
371 VALUE key_rb = ObjectCache_GetKey(key, true);
Joshua Haberman9879f422021-02-24 16:41:35 -0800372 rb_funcall(weak_obj_cache, item_set, 2, key_rb, val);
Joshua Haberman2fe27d82021-03-29 12:30:49 -0700373#if USE_SECONDARY_MAP
374 rb_mutex_unlock(secondary_map_mutex);
375#endif
Joshua Haberman9879f422021-02-24 16:41:35 -0800376 PBRUBY_ASSERT(ObjectCache_Get(key) == val);
Joshua Haberman9abf6e22021-01-13 12:16:25 -0800377}
378
379// Returns the cached object for this key, if any. Otherwise returns Qnil.
380VALUE ObjectCache_Get(const void* key) {
Joshua Haberman2fe27d82021-03-29 12:30:49 -0700381 VALUE key_rb = ObjectCache_GetKey(key, false);
Joshua Haberman9879f422021-02-24 16:41:35 -0800382 return rb_funcall(weak_obj_cache, item_get, 1, key_rb);
Joshua Haberman9abf6e22021-01-13 12:16:25 -0800383}
384
385/*
386 * call-seq:
387 * Google::Protobuf.discard_unknown(msg)
388 *
389 * Discard unknown fields in the given message object and recursively discard
390 * unknown fields in submessages.
391 */
392static VALUE Google_Protobuf_discard_unknown(VALUE self, VALUE msg_rb) {
393 const upb_msgdef *m;
394 upb_msg *msg = Message_GetMutable(msg_rb, &m);
395 if (!upb_msg_discardunknown(msg, m, 128)) {
396 rb_raise(rb_eRuntimeError, "Messages nested too deeply.");
397 }
398
399 return Qnil;
400}
401
402/*
403 * call-seq:
404 * Google::Protobuf.deep_copy(obj) => copy_of_obj
405 *
406 * Performs a deep copy of a RepeatedField instance, a Map instance, or a
407 * message object, recursively copying its members.
408 */
409VALUE Google_Protobuf_deep_copy(VALUE self, VALUE obj) {
410 VALUE klass = CLASS_OF(obj);
411 if (klass == cRepeatedField) {
412 return RepeatedField_deep_copy(obj);
413 } else if (klass == cMap) {
414 return Map_deep_copy(obj);
415 } else {
416 VALUE new_arena_rb = Arena_new();
417 upb_arena *new_arena = Arena_get(new_arena_rb);
418 const upb_msgdef *m;
419 const upb_msg *msg = Message_Get(obj, &m);
420 upb_msg* new_msg = Message_deep_copy(msg, m, new_arena);
421 return Message_GetRubyWrapper(new_msg, m, new_arena_rb);
422 }
423}
Chris Fallin231886f2015-05-19 15:33:48 -0700424
Chris Fallin973f4252014-11-18 14:19:58 -0800425// -----------------------------------------------------------------------------
426// Initialization/entry point.
427// -----------------------------------------------------------------------------
428
429// This must be named "Init_protobuf_c" because the Ruby module is named
430// "protobuf_c" -- the VM looks for this symbol in our .so.
Joshua Haberman9abf6e22021-01-13 12:16:25 -0800431__attribute__ ((visibility ("default")))
Chris Fallin973f4252014-11-18 14:19:58 -0800432void Init_protobuf_c() {
Joshua Haberman9abf6e22021-01-13 12:16:25 -0800433 ObjectCache_Init();
434
Chris Fallin973f4252014-11-18 14:19:58 -0800435 VALUE google = rb_define_module("Google");
436 VALUE protobuf = rb_define_module_under(google, "Protobuf");
Josh Habermana1daeab2015-07-10 11:56:06 -0700437
Joshua Haberman9abf6e22021-01-13 12:16:25 -0800438 Arena_register(protobuf);
439 Defs_register(protobuf);
Chris Fallin973f4252014-11-18 14:19:58 -0800440 RepeatedField_register(protobuf);
Chris Fallinfd1a3ff2015-01-06 15:44:09 -0800441 Map_register(protobuf);
Joshua Haberman9abf6e22021-01-13 12:16:25 -0800442 Message_register(protobuf);
Chris Fallin973f4252014-11-18 14:19:58 -0800443
Josh Haberman181c7f22015-07-15 11:05:10 -0700444 cError = rb_const_get(protobuf, rb_intern("Error"));
Erik Benoist74f8e242018-06-26 22:24:24 -0500445 cTypeError = rb_const_get(protobuf, rb_intern("TypeError"));
Josh Haberman181c7f22015-07-15 11:05:10 -0700446
Paul Yang0e7b5892017-12-07 14:18:38 -0800447 rb_define_singleton_method(protobuf, "discard_unknown",
448 Google_Protobuf_discard_unknown, 1);
Chris Fallin973f4252014-11-18 14:19:58 -0800449 rb_define_singleton_method(protobuf, "deep_copy",
450 Google_Protobuf_deep_copy, 1);
Chris Fallin973f4252014-11-18 14:19:58 -0800451}