Auto-generate files after cl/592345066
diff --git a/ruby/ext/google/protobuf_c/ruby-upb.h b/ruby/ext/google/protobuf_c/ruby-upb.h
index 5e3b140..d67f235 100755
--- a/ruby/ext/google/protobuf_c/ruby-upb.h
+++ b/ruby/ext/google/protobuf_c/ruby-upb.h
@@ -557,7 +557,6 @@
#include <stddef.h>
#include <stdint.h>
-#include <string.h>
#ifndef UPB_MEM_ALLOC_H_
@@ -630,17 +629,114 @@
#endif /* UPB_MEM_ALLOC_H_ */
+#ifndef UPB_MEM_INTERNAL_ARENA_H_
+#define UPB_MEM_INTERNAL_ARENA_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+// Must be last.
+
+// This is QUITE an ugly hack, which specifies the number of pointers needed
+// to equal (or exceed) the storage required for one upb_Arena.
+//
+// We need this because the decoder inlines a upb_Arena for performance but
+// the full struct is not visible outside of arena.c. Yes, I know, it's awful.
+#define UPB_ARENA_SIZE_HACK 7
+
+// LINT.IfChange(upb_Array)
+
+struct upb_Arena {
+ char* UPB_ONLYBITS(ptr);
+ char* UPB_ONLYBITS(end);
+};
+
+// LINT.ThenChange(//depot/google3/third_party/upb/bits/typescript/arena.ts:upb_Array)
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void UPB_PRIVATE(_upb_Arena_SwapIn)(struct upb_Arena* des,
+ const struct upb_Arena* src);
+void UPB_PRIVATE(_upb_Arena_SwapOut)(struct upb_Arena* des,
+ const struct upb_Arena* src);
+
+UPB_INLINE size_t UPB_PRIVATE(_upb_ArenaHas)(const struct upb_Arena* a) {
+ return (size_t)(a->UPB_ONLYBITS(end) - a->UPB_ONLYBITS(ptr));
+}
+
+UPB_INLINE void* UPB_PRIVATE(_upb_Arena_Malloc)(struct upb_Arena* a,
+ size_t size) {
+ void* UPB_PRIVATE(_upb_Arena_SlowMalloc)(struct upb_Arena * a, size_t size);
+
+ size = UPB_ALIGN_MALLOC(size);
+ const size_t span = size + UPB_ASAN_GUARD_SIZE;
+ if (UPB_UNLIKELY(UPB_PRIVATE(_upb_ArenaHas)(a) < span)) {
+ return UPB_PRIVATE(_upb_Arena_SlowMalloc)(a, span);
+ }
+
+ // We have enough space to do a fast malloc.
+ void* ret = a->UPB_ONLYBITS(ptr);
+ UPB_ASSERT(UPB_ALIGN_MALLOC((uintptr_t)ret) == (uintptr_t)ret);
+ UPB_ASSERT(UPB_ALIGN_MALLOC(size) == size);
+ UPB_UNPOISON_MEMORY_REGION(ret, size);
+
+ a->UPB_ONLYBITS(ptr) += span;
+
+ return ret;
+}
+
+UPB_INLINE void* UPB_PRIVATE(_upb_Arena_Realloc)(struct upb_Arena* a, void* ptr,
+ size_t oldsize, size_t size) {
+ oldsize = UPB_ALIGN_MALLOC(oldsize);
+ size = UPB_ALIGN_MALLOC(size);
+ bool is_most_recent_alloc =
+ (uintptr_t)ptr + oldsize == (uintptr_t)a->UPB_ONLYBITS(ptr);
+
+ if (is_most_recent_alloc) {
+ ptrdiff_t diff = size - oldsize;
+ if ((ptrdiff_t)UPB_PRIVATE(_upb_ArenaHas)(a) >= diff) {
+ a->UPB_ONLYBITS(ptr) += diff;
+ return ptr;
+ }
+ } else if (size <= oldsize) {
+ return ptr;
+ }
+
+ void* ret = UPB_PRIVATE(_upb_Arena_Malloc)(a, size);
+
+ if (ret && oldsize > 0) {
+ memcpy(ret, ptr, UPB_MIN(oldsize, size));
+ }
+
+ return ret;
+}
+
+UPB_INLINE void UPB_PRIVATE(_upb_Arena_ShrinkLast)(struct upb_Arena* a,
+ void* ptr, size_t oldsize,
+ size_t size) {
+ oldsize = UPB_ALIGN_MALLOC(oldsize);
+ size = UPB_ALIGN_MALLOC(size);
+ // Must be the last alloc.
+ UPB_ASSERT((char*)ptr + oldsize ==
+ a->UPB_ONLYBITS(ptr) - UPB_ASAN_GUARD_SIZE);
+ UPB_ASSERT(size <= oldsize);
+ a->UPB_ONLYBITS(ptr) = (char*)ptr + size;
+}
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+
+#endif /* UPB_MEM_INTERNAL_ARENA_H_ */
+
// Must be last.
typedef struct upb_Arena upb_Arena;
-// LINT.IfChange(struct_definition)
-typedef struct {
- char* UPB_ONLYBITS(ptr);
- char* UPB_ONLYBITS(end);
-} _upb_ArenaHead;
-// LINT.ThenChange(//depot/google3/third_party/upb/bits/typescript/arena.ts)
-
#ifdef __cplusplus
extern "C" {
#endif
@@ -656,33 +752,20 @@
bool upb_Arena_IncRefFor(upb_Arena* a, const void* owner);
void upb_Arena_DecRefFor(upb_Arena* a, const void* owner);
-void* UPB_PRIVATE(_upb_Arena_SlowMalloc)(upb_Arena* a, size_t size);
-
size_t upb_Arena_SpaceAllocated(upb_Arena* a);
uint32_t upb_Arena_DebugRefCount(upb_Arena* a);
-UPB_INLINE size_t UPB_PRIVATE(_upb_ArenaHas)(upb_Arena* a) {
- const _upb_ArenaHead* h = (_upb_ArenaHead*)a;
- return (size_t)(h->UPB_ONLYBITS(end) - h->UPB_ONLYBITS(ptr));
+UPB_API_INLINE upb_Arena* upb_Arena_New(void) {
+ return upb_Arena_Init(NULL, 0, &upb_alloc_global);
}
-UPB_API_INLINE void* upb_Arena_Malloc(upb_Arena* a, size_t size) {
- size = UPB_ALIGN_MALLOC(size);
- const size_t span = size + UPB_ASAN_GUARD_SIZE;
- if (UPB_UNLIKELY(UPB_PRIVATE(_upb_ArenaHas)(a) < span)) {
- return UPB_PRIVATE(_upb_Arena_SlowMalloc)(a, span);
- }
+UPB_API_INLINE void* upb_Arena_Malloc(struct upb_Arena* a, size_t size) {
+ return UPB_PRIVATE(_upb_Arena_Malloc)(a, size);
+}
- // We have enough space to do a fast malloc.
- _upb_ArenaHead* h = (_upb_ArenaHead*)a;
- void* ret = h->UPB_ONLYBITS(ptr);
- UPB_ASSERT(UPB_ALIGN_MALLOC((uintptr_t)ret) == (uintptr_t)ret);
- UPB_ASSERT(UPB_ALIGN_MALLOC(size) == size);
- UPB_UNPOISON_MEMORY_REGION(ret, size);
-
- h->UPB_ONLYBITS(ptr) += span;
-
- return ret;
+UPB_API_INLINE void* upb_Arena_Realloc(upb_Arena* a, void* ptr, size_t oldsize,
+ size_t size) {
+ return UPB_PRIVATE(_upb_Arena_Realloc)(a, ptr, oldsize, size);
}
// Shrinks the last alloc from arena.
@@ -691,45 +774,7 @@
// this was not the last alloc.
UPB_API_INLINE void upb_Arena_ShrinkLast(upb_Arena* a, void* ptr,
size_t oldsize, size_t size) {
- _upb_ArenaHead* h = (_upb_ArenaHead*)a;
- oldsize = UPB_ALIGN_MALLOC(oldsize);
- size = UPB_ALIGN_MALLOC(size);
- // Must be the last alloc.
- UPB_ASSERT((char*)ptr + oldsize ==
- h->UPB_ONLYBITS(ptr) - UPB_ASAN_GUARD_SIZE);
- UPB_ASSERT(size <= oldsize);
- h->UPB_ONLYBITS(ptr) = (char*)ptr + size;
-}
-
-UPB_API_INLINE void* upb_Arena_Realloc(upb_Arena* a, void* ptr, size_t oldsize,
- size_t size) {
- _upb_ArenaHead* h = (_upb_ArenaHead*)a;
- oldsize = UPB_ALIGN_MALLOC(oldsize);
- size = UPB_ALIGN_MALLOC(size);
- bool is_most_recent_alloc =
- (uintptr_t)ptr + oldsize == (uintptr_t)h->UPB_ONLYBITS(ptr);
-
- if (is_most_recent_alloc) {
- ptrdiff_t diff = size - oldsize;
- if ((ptrdiff_t)UPB_PRIVATE(_upb_ArenaHas)(a) >= diff) {
- h->UPB_ONLYBITS(ptr) += diff;
- return ptr;
- }
- } else if (size <= oldsize) {
- return ptr;
- }
-
- void* ret = upb_Arena_Malloc(a, size);
-
- if (ret && oldsize > 0) {
- memcpy(ret, ptr, UPB_MIN(oldsize, size));
- }
-
- return ret;
-}
-
-UPB_API_INLINE upb_Arena* upb_Arena_New(void) {
- return upb_Arena_Init(NULL, 0, &upb_alloc_global);
+ return UPB_PRIVATE(_upb_Arena_ShrinkLast)(a, ptr, oldsize, size);
}
#ifdef __cplusplus
@@ -11926,48 +11971,6 @@
#endif /* UPB_LEX_STRTOD_H_ */
-#ifndef UPB_MEM_INTERNAL_ARENA_H_
-#define UPB_MEM_INTERNAL_ARENA_H_
-
-
-// Must be last.
-
-typedef struct _upb_MemBlock _upb_MemBlock;
-
-// LINT.IfChange(struct_definition)
-struct upb_Arena {
- _upb_ArenaHead head;
-
- // upb_alloc* together with a low bit which signals if there is an initial
- // block.
- uintptr_t block_alloc;
-
- // When multiple arenas are fused together, each arena points to a parent
- // arena (root points to itself). The root tracks how many live arenas
- // reference it.
-
- // The low bit is tagged:
- // 0: pointer to parent
- // 1: count, left shifted by one
- UPB_ATOMIC(uintptr_t) parent_or_count;
-
- // All nodes that are fused together are in a singly-linked list.
- UPB_ATOMIC(upb_Arena*) next; // NULL at end of list.
-
- // The last element of the linked list. This is present only as an
- // optimization, so that we do not have to iterate over all members for every
- // fuse. Only significant for an arena root. In other cases it is ignored.
- UPB_ATOMIC(upb_Arena*) tail; // == self when no other list members.
-
- // Linked list of blocks to free/cleanup. Atomic only for the benefit of
- // upb_Arena_SpaceAllocated().
- UPB_ATOMIC(_upb_MemBlock*) blocks;
-};
-// LINT.ThenChange(//depot/google3/third_party/upb/bits/typescript/arena.ts)
-
-
-#endif /* UPB_MEM_INTERNAL_ARENA_H_ */
-
#ifndef UPB_PORT_ATOMIC_H_
#define UPB_PORT_ATOMIC_H_
@@ -13183,7 +13186,10 @@
uint32_t end_group; // field number of END_GROUP tag, else DECODE_NOGROUP.
uint16_t options;
bool missing_required;
- upb_Arena arena;
+ union {
+ upb_Arena arena;
+ void* foo[UPB_ARENA_SIZE_HACK];
+ };
upb_DecodeStatus status;
jmp_buf err;