blob: 4161cb06ddb409aaba1d2e45f3da7765f424b203 [file] [log] [blame]
Protobuf Team Bote4635f22022-06-21 10:43:08 -07001/*
2 * Copyright (c) 2009-2021, Google LLC
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of Google LLC nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT,
20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
Eric Salof409c992022-11-03 15:48:59 +000028#include "upb/mem/arena_internal.h"
Joshua Habermand4509902023-03-22 08:21:49 -070029#include "upb/port/atomic.h"
Protobuf Team Bote4635f22022-06-21 10:43:08 -070030
31// Must be last.
Eric Salof6307872022-11-05 16:16:27 -070032#include "upb/port/def.inc"
Protobuf Team Bote4635f22022-06-21 10:43:08 -070033
Protobuf Team Bote4635f22022-06-21 10:43:08 -070034static uint32_t* upb_cleanup_pointer(uintptr_t cleanup_metadata) {
35 return (uint32_t*)(cleanup_metadata & ~0x1);
36}
37
38static bool upb_cleanup_has_initial_block(uintptr_t cleanup_metadata) {
39 return cleanup_metadata & 0x1;
40}
41
42static uintptr_t upb_cleanup_metadata(uint32_t* cleanup,
43 bool has_initial_block) {
44 return (uintptr_t)cleanup | has_initial_block;
45}
46
Eric Saloe932f792022-11-20 20:10:54 -080047struct _upb_MemBlock {
48 struct _upb_MemBlock* next;
Protobuf Team Bote4635f22022-06-21 10:43:08 -070049 uint32_t size;
50 uint32_t cleanups;
Eric Saloe932f792022-11-20 20:10:54 -080051 // Data follows.
Protobuf Team Bote4635f22022-06-21 10:43:08 -070052};
53
54typedef struct cleanup_ent {
55 upb_CleanupFunc* cleanup;
56 void* ud;
57} cleanup_ent;
58
59static const size_t memblock_reserve =
Eric Saloe932f792022-11-20 20:10:54 -080060 UPB_ALIGN_UP(sizeof(_upb_MemBlock), UPB_MALLOC_ALIGN);
Protobuf Team Bote4635f22022-06-21 10:43:08 -070061
Joshua Habermand4509902023-03-22 08:21:49 -070062static upb_Arena* _upb_Arena_FindRoot(upb_Arena* a) {
63 uintptr_t poc = upb_Atomic_LoadAcquire(&a->parent_or_count);
64 while (_upb_Arena_IsTaggedPointer(poc)) {
65 upb_Arena* next = _upb_Arena_PointerFromTagged(poc);
66 uintptr_t next_poc = upb_Atomic_LoadAcquire(&next->parent_or_count);
67
68 if (_upb_Arena_IsTaggedPointer(next_poc)) {
69 // To keep complexity down, we lazily collapse levels of the tree. This
70 // keeps it flat in the final case, but doesn't cost much incrementally.
71 //
72 // Path splitting keeps time complexity down, see:
73 // https://en.wikipedia.org/wiki/Disjoint-set_data_structure
74 //
75 // We can safely use a relaxed atomic here because all threads doing this
76 // will converge on the same value and we don't need memory orderings to
77 // be visible.
78 //
79 // This is true because:
80 // - If no fuses occur, this will eventually become the root.
81 // - If fuses are actively occuring, the root may change, but the
82 // invariant is that `parent_or_count` merely points to *a* parent.
83 //
84 // In other words, it is moving towards "the" root, and that root may move
85 // further away over time, but the path towards that root will continue to
86 // be valid and the creation of the path carries all the memory orderings
87 // required.
88 upb_Atomic_StoreRelaxed(&a->parent_or_count, next_poc);
89 }
Protobuf Team Bote4635f22022-06-21 10:43:08 -070090 a = next;
Joshua Habermand4509902023-03-22 08:21:49 -070091 poc = next_poc;
Protobuf Team Bote4635f22022-06-21 10:43:08 -070092 }
93 return a;
94}
95
Jean Boussier23417412022-08-10 10:25:44 +020096size_t upb_Arena_SpaceAllocated(upb_Arena* arena) {
Joshua Habermand4509902023-03-22 08:21:49 -070097 arena = _upb_Arena_FindRoot(arena);
Jean Boussier23417412022-08-10 10:25:44 +020098 size_t memsize = 0;
99
Eric Saloe932f792022-11-20 20:10:54 -0800100 _upb_MemBlock* block = arena->freelist;
Jean Boussier23417412022-08-10 10:25:44 +0200101
102 while (block) {
Eric Saloe932f792022-11-20 20:10:54 -0800103 memsize += sizeof(_upb_MemBlock) + block->size;
Jean Boussier23417412022-08-10 10:25:44 +0200104 block = block->next;
105 }
106
107 return memsize;
108}
109
Joshua Habermand4509902023-03-22 08:21:49 -0700110uint32_t upb_Arena_DebugRefCount(upb_Arena* a) {
111 // These loads could probably be relaxed, but given that this is debug-only,
112 // it's not worth introducing a new variant for it.
113 uintptr_t poc = upb_Atomic_LoadAcquire(&a->parent_or_count);
114 while (_upb_Arena_IsTaggedPointer(poc)) {
115 a = _upb_Arena_PointerFromTagged(poc);
116 poc = upb_Atomic_LoadAcquire(&a->parent_or_count);
117 }
118 return _upb_Arena_RefCountFromTagged(poc);
Jean Boussier23417412022-08-10 10:25:44 +0200119}
120
Protobuf Team Bote4635f22022-06-21 10:43:08 -0700121static void upb_Arena_addblock(upb_Arena* a, upb_Arena* root, void* ptr,
122 size_t size) {
Eric Saloe932f792022-11-20 20:10:54 -0800123 _upb_MemBlock* block = ptr;
Protobuf Team Bote4635f22022-06-21 10:43:08 -0700124
125 /* The block is for arena |a|, but should appear in the freelist of |root|. */
126 block->next = root->freelist;
127 block->size = (uint32_t)size;
128 block->cleanups = 0;
129 root->freelist = block;
130 a->last_size = block->size;
131 if (!root->freelist_tail) root->freelist_tail = block;
132
133 a->head.ptr = UPB_PTR_AT(block, memblock_reserve, char);
134 a->head.end = UPB_PTR_AT(block, size, char);
135 a->cleanup_metadata = upb_cleanup_metadata(
136 &block->cleanups, upb_cleanup_has_initial_block(a->cleanup_metadata));
137
138 UPB_POISON_MEMORY_REGION(a->head.ptr, a->head.end - a->head.ptr);
139}
140
141static bool upb_Arena_Allocblock(upb_Arena* a, size_t size) {
Joshua Habermand4509902023-03-22 08:21:49 -0700142 upb_Arena* root = _upb_Arena_FindRoot(a);
Protobuf Team Bote4635f22022-06-21 10:43:08 -0700143 size_t block_size = UPB_MAX(size, a->last_size * 2) + memblock_reserve;
Eric Saloe932f792022-11-20 20:10:54 -0800144 _upb_MemBlock* block = upb_malloc(root->block_alloc, block_size);
Protobuf Team Bote4635f22022-06-21 10:43:08 -0700145
146 if (!block) return false;
147 upb_Arena_addblock(a, root, block, block_size);
148 return true;
149}
150
151void* _upb_Arena_SlowMalloc(upb_Arena* a, size_t size) {
152 if (!upb_Arena_Allocblock(a, size)) return NULL; /* Out of memory. */
153 UPB_ASSERT(_upb_ArenaHas(a) >= size);
154 return upb_Arena_Malloc(a, size);
155}
156
Protobuf Team Bote4635f22022-06-21 10:43:08 -0700157/* Public Arena API ***********************************************************/
158
Protobuf Team Bot7b05f252022-06-22 09:18:12 -0700159static upb_Arena* arena_initslow(void* mem, size_t n, upb_alloc* alloc) {
Protobuf Team Bote4635f22022-06-21 10:43:08 -0700160 const size_t first_block_overhead = sizeof(upb_Arena) + memblock_reserve;
161 upb_Arena* a;
162
163 /* We need to malloc the initial block. */
164 n = first_block_overhead + 256;
165 if (!alloc || !(mem = upb_malloc(alloc, n))) {
166 return NULL;
167 }
168
169 a = UPB_PTR_AT(mem, n - sizeof(*a), upb_Arena);
170 n -= sizeof(*a);
171
Protobuf Team Bote4635f22022-06-21 10:43:08 -0700172 a->block_alloc = alloc;
Joshua Habermand4509902023-03-22 08:21:49 -0700173 upb_Atomic_Init(&a->parent_or_count, _upb_Arena_TaggedFromRefcount(1));
Protobuf Team Bote4635f22022-06-21 10:43:08 -0700174 a->freelist = NULL;
175 a->freelist_tail = NULL;
176 a->cleanup_metadata = upb_cleanup_metadata(NULL, false);
177
178 upb_Arena_addblock(a, a, mem, n);
179
180 return a;
181}
182
183upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc) {
184 upb_Arena* a;
185
186 if (n) {
187 /* Align initial pointer up so that we return properly-aligned pointers. */
188 void* aligned = (void*)UPB_ALIGN_UP((uintptr_t)mem, UPB_MALLOC_ALIGN);
189 size_t delta = (uintptr_t)aligned - (uintptr_t)mem;
190 n = delta <= n ? n - delta : 0;
191 mem = aligned;
192 }
193
194 /* Round block size down to alignof(*a) since we will allocate the arena
195 * itself at the end. */
196 n = UPB_ALIGN_DOWN(n, UPB_ALIGN_OF(upb_Arena));
197
198 if (UPB_UNLIKELY(n < sizeof(upb_Arena))) {
199 return arena_initslow(mem, n, alloc);
200 }
201
202 a = UPB_PTR_AT(mem, n - sizeof(*a), upb_Arena);
203
Protobuf Team Bote4635f22022-06-21 10:43:08 -0700204 a->block_alloc = alloc;
Joshua Habermand4509902023-03-22 08:21:49 -0700205 upb_Atomic_Init(&a->parent_or_count, _upb_Arena_TaggedFromRefcount(1));
Protobuf Team Bote4635f22022-06-21 10:43:08 -0700206 a->last_size = UPB_MAX(128, n);
207 a->head.ptr = mem;
208 a->head.end = UPB_PTR_AT(mem, n - sizeof(*a), char);
209 a->freelist = NULL;
Eric Salo6a625a62022-09-23 12:49:51 -0700210 a->freelist_tail = NULL;
Protobuf Team Bote4635f22022-06-21 10:43:08 -0700211 a->cleanup_metadata = upb_cleanup_metadata(NULL, true);
212
213 return a;
214}
215
216static void arena_dofree(upb_Arena* a) {
Eric Saloe932f792022-11-20 20:10:54 -0800217 _upb_MemBlock* block = a->freelist;
Joshua Habermand4509902023-03-22 08:21:49 -0700218 UPB_ASSERT(_upb_Arena_RefCountFromTagged(a->parent_or_count) == 1);
Protobuf Team Bote4635f22022-06-21 10:43:08 -0700219
220 while (block) {
221 /* Load first since we are deleting block. */
Eric Saloe932f792022-11-20 20:10:54 -0800222 _upb_MemBlock* next = block->next;
Protobuf Team Bote4635f22022-06-21 10:43:08 -0700223
224 if (block->cleanups > 0) {
225 cleanup_ent* end = UPB_PTR_AT(block, block->size, void);
226 cleanup_ent* ptr = end - block->cleanups;
227
228 for (; ptr < end; ptr++) {
229 ptr->cleanup(ptr->ud);
230 }
231 }
232
233 upb_free(a->block_alloc, block);
234 block = next;
235 }
236}
237
238void upb_Arena_Free(upb_Arena* a) {
Joshua Habermand4509902023-03-22 08:21:49 -0700239 uintptr_t poc = upb_Atomic_LoadAcquire(&a->parent_or_count);
240retry:
241 while (_upb_Arena_IsTaggedPointer(poc)) {
242 a = _upb_Arena_PointerFromTagged(poc);
243 poc = upb_Atomic_LoadAcquire(&a->parent_or_count);
244 }
245
246 // compare_exchange or fetch_sub are RMW operations, which are more
247 // expensive then direct loads. As an optimization, we only do RMW ops
248 // when we need to update things for other threads to see.
249 if (poc == _upb_Arena_TaggedFromRefcount(1)) {
250 arena_dofree(a);
251 return;
252 }
253
254 if (upb_Atomic_CompareExchangeStrongAcqRel(
255 &a->parent_or_count, &poc,
256 _upb_Arena_TaggedFromRefcount(_upb_Arena_RefCountFromTagged(poc) -
257 1))) {
258 // We were >1 and we decremented it successfully, so we are done.
259 return;
260 }
261
262 // We failed our update, so someone has done something, retry the whole
263 // process, but the failed exchange reloaded `poc` for us.
264 goto retry;
Protobuf Team Bote4635f22022-06-21 10:43:08 -0700265}
266
267bool upb_Arena_AddCleanup(upb_Arena* a, void* ud, upb_CleanupFunc* func) {
268 cleanup_ent* ent;
269 uint32_t* cleanups = upb_cleanup_pointer(a->cleanup_metadata);
270
271 if (!cleanups || _upb_ArenaHas(a) < sizeof(cleanup_ent)) {
272 if (!upb_Arena_Allocblock(a, 128)) return false; /* Out of memory. */
273 UPB_ASSERT(_upb_ArenaHas(a) >= sizeof(cleanup_ent));
274 cleanups = upb_cleanup_pointer(a->cleanup_metadata);
275 }
276
277 a->head.end -= sizeof(cleanup_ent);
278 ent = (cleanup_ent*)a->head.end;
279 (*cleanups)++;
280 UPB_UNPOISON_MEMORY_REGION(ent, sizeof(cleanup_ent));
281
282 ent->cleanup = func;
283 ent->ud = ud;
284
285 return true;
286}
287
288bool upb_Arena_Fuse(upb_Arena* a1, upb_Arena* a2) {
Joshua Habermand4509902023-03-22 08:21:49 -0700289 // SAFE IN THE PRESENCE OF FUSE/FREE RACES BUT NOT IN THE
290 // PRESENCE OF FUSE/FUSE RACES!!!
291 //
292 // `parent_or_count` has two disctint modes
293 // - parent pointer mode
294 // - refcount mode
295 //
296 // In parent pointer mode, it may change what pointer it refers to in the
297 // tree, but it will always approach a root. Any operation that walks the
298 // tree to the root may collapse levels of the tree concurrently.
299 //
300 // In refcount mode, any free operation may lower the refcount.
301 //
302 // Only a fuse operation may increase the refcount.
303 // Only a fuse operation may switch `parent_or_count` from parent mode to
304 // refcount mode.
305 //
306 // Given that we do not allow fuse/fuse races, we may rely on the invariant
307 // that only refcounts can change once we have found the root. Because the
308 // threads doing the fuse must hold references, we can guarantee that no
309 // refcounts will reach zero concurrently.
310 upb_Arena* r1 = _upb_Arena_FindRoot(a1);
311 upb_Arena* r2 = _upb_Arena_FindRoot(a2);
Protobuf Team Bote4635f22022-06-21 10:43:08 -0700312
Joshua Habermand4509902023-03-22 08:21:49 -0700313 if (r1 == r2) return true; // Already fused.
Protobuf Team Bote4635f22022-06-21 10:43:08 -0700314
Joshua Habermand4509902023-03-22 08:21:49 -0700315 // Do not fuse initial blocks since we cannot lifetime extend them.
Protobuf Team Bote4635f22022-06-21 10:43:08 -0700316 if (upb_cleanup_has_initial_block(r1->cleanup_metadata)) return false;
317 if (upb_cleanup_has_initial_block(r2->cleanup_metadata)) return false;
318
Joshua Habermand4509902023-03-22 08:21:49 -0700319 // Only allow fuse with a common allocator
Protobuf Team Bote4635f22022-06-21 10:43:08 -0700320 if (r1->block_alloc != r2->block_alloc) return false;
321
Joshua Habermand4509902023-03-22 08:21:49 -0700322 uintptr_t r1_poc = upb_Atomic_LoadAcquire(&r1->parent_or_count);
323 uintptr_t r2_poc = upb_Atomic_LoadAcquire(&r2->parent_or_count);
324 UPB_ASSERT(_upb_Arena_IsTaggedRefcount(r1_poc));
325 UPB_ASSERT(_upb_Arena_IsTaggedRefcount(r2_poc));
326
327 // Keep the tree shallow by joining the smaller tree to the larger.
328 if (_upb_Arena_RefCountFromTagged(r1_poc) <
329 _upb_Arena_RefCountFromTagged(r2_poc)) {
Protobuf Team Bote4635f22022-06-21 10:43:08 -0700330 upb_Arena* tmp = r1;
331 r1 = r2;
332 r2 = tmp;
Joshua Habermand4509902023-03-22 08:21:49 -0700333
334 uintptr_t tmp_poc = r1_poc;
335 r1_poc = r2_poc;
336 r2_poc = tmp_poc;
Protobuf Team Bote4635f22022-06-21 10:43:08 -0700337 }
338
Joshua Habermand4509902023-03-22 08:21:49 -0700339 // r1 takes over r2's freelist, this must happen before we update
340 // refcounts since the refcount carriers the memory dependencies.
Protobuf Team Bote4635f22022-06-21 10:43:08 -0700341 if (r2->freelist_tail) {
342 UPB_ASSERT(r2->freelist_tail->next == NULL);
343 r2->freelist_tail->next = r1->freelist;
344 r1->freelist = r2->freelist;
345 }
Joshua Habermand4509902023-03-22 08:21:49 -0700346
347 // The moment we install `r1` as the parent for `r2` all racing frees may
348 // immediately begin decrementing `r1`'s refcount. So we must install all the
349 // refcounts that we know about first to prevent a premature unref to zero.
350 uint32_t r2_refcount = _upb_Arena_RefCountFromTagged(r2_poc);
351 upb_Atomic_AddRelease(&r1->parent_or_count, ((uintptr_t)r2_refcount) << 1);
352
353 // When installing `r1` as the parent for `r2` racing frees may have changed
354 // the refcount for `r2` so we need to capture the old value to fix up `r1`'s
355 // refcount based on the delta from what we saw the first time.
356 r2_poc = upb_Atomic_ExchangeAcqRel(&r2->parent_or_count,
357 _upb_Arena_TaggedFromPointer(r1));
358 UPB_ASSERT(_upb_Arena_IsTaggedRefcount(r2_poc));
359 uint32_t delta_refcount = r2_refcount - _upb_Arena_RefCountFromTagged(r2_poc);
360 if (delta_refcount != 0) {
361 upb_Atomic_SubRelease(&r1->parent_or_count, ((uintptr_t)delta_refcount)
362 << 1);
363 }
Protobuf Team Bote4635f22022-06-21 10:43:08 -0700364 return true;
365}