Protobuf Team Bot | e4635f2 | 2022-06-21 10:43:08 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2009-2021, Google LLC |
| 3 | * All rights reserved. |
| 4 | * |
| 5 | * Redistribution and use in source and binary forms, with or without |
| 6 | * modification, are permitted provided that the following conditions are met: |
| 7 | * * Redistributions of source code must retain the above copyright |
| 8 | * notice, this list of conditions and the following disclaimer. |
| 9 | * * Redistributions in binary form must reproduce the above copyright |
| 10 | * notice, this list of conditions and the following disclaimer in the |
| 11 | * documentation and/or other materials provided with the distribution. |
| 12 | * * Neither the name of Google LLC nor the |
| 13 | * names of its contributors may be used to endorse or promote products |
| 14 | * derived from this software without specific prior written permission. |
| 15 | * |
| 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
| 17 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 18 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 19 | * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, |
| 20 | * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
| 21 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
| 22 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
| 23 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 24 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
| 25 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 26 | */ |
| 27 | |
Eric Salo | f409c99 | 2022-11-03 15:48:59 +0000 | [diff] [blame] | 28 | #include "upb/mem/arena_internal.h" |
Joshua Haberman | d450990 | 2023-03-22 08:21:49 -0700 | [diff] [blame] | 29 | #include "upb/port/atomic.h" |
Protobuf Team Bot | e4635f2 | 2022-06-21 10:43:08 -0700 | [diff] [blame] | 30 | |
| 31 | // Must be last. |
Eric Salo | f630787 | 2022-11-05 16:16:27 -0700 | [diff] [blame] | 32 | #include "upb/port/def.inc" |
Protobuf Team Bot | e4635f2 | 2022-06-21 10:43:08 -0700 | [diff] [blame] | 33 | |
Protobuf Team Bot | e4635f2 | 2022-06-21 10:43:08 -0700 | [diff] [blame] | 34 | static uint32_t* upb_cleanup_pointer(uintptr_t cleanup_metadata) { |
| 35 | return (uint32_t*)(cleanup_metadata & ~0x1); |
| 36 | } |
| 37 | |
| 38 | static bool upb_cleanup_has_initial_block(uintptr_t cleanup_metadata) { |
| 39 | return cleanup_metadata & 0x1; |
| 40 | } |
| 41 | |
| 42 | static uintptr_t upb_cleanup_metadata(uint32_t* cleanup, |
| 43 | bool has_initial_block) { |
| 44 | return (uintptr_t)cleanup | has_initial_block; |
| 45 | } |
| 46 | |
Eric Salo | e932f79 | 2022-11-20 20:10:54 -0800 | [diff] [blame] | 47 | struct _upb_MemBlock { |
| 48 | struct _upb_MemBlock* next; |
Protobuf Team Bot | e4635f2 | 2022-06-21 10:43:08 -0700 | [diff] [blame] | 49 | uint32_t size; |
| 50 | uint32_t cleanups; |
Eric Salo | e932f79 | 2022-11-20 20:10:54 -0800 | [diff] [blame] | 51 | // Data follows. |
Protobuf Team Bot | e4635f2 | 2022-06-21 10:43:08 -0700 | [diff] [blame] | 52 | }; |
| 53 | |
| 54 | typedef struct cleanup_ent { |
| 55 | upb_CleanupFunc* cleanup; |
| 56 | void* ud; |
| 57 | } cleanup_ent; |
| 58 | |
| 59 | static const size_t memblock_reserve = |
Eric Salo | e932f79 | 2022-11-20 20:10:54 -0800 | [diff] [blame] | 60 | UPB_ALIGN_UP(sizeof(_upb_MemBlock), UPB_MALLOC_ALIGN); |
Protobuf Team Bot | e4635f2 | 2022-06-21 10:43:08 -0700 | [diff] [blame] | 61 | |
Joshua Haberman | d450990 | 2023-03-22 08:21:49 -0700 | [diff] [blame] | 62 | static upb_Arena* _upb_Arena_FindRoot(upb_Arena* a) { |
| 63 | uintptr_t poc = upb_Atomic_LoadAcquire(&a->parent_or_count); |
| 64 | while (_upb_Arena_IsTaggedPointer(poc)) { |
| 65 | upb_Arena* next = _upb_Arena_PointerFromTagged(poc); |
| 66 | uintptr_t next_poc = upb_Atomic_LoadAcquire(&next->parent_or_count); |
| 67 | |
| 68 | if (_upb_Arena_IsTaggedPointer(next_poc)) { |
| 69 | // To keep complexity down, we lazily collapse levels of the tree. This |
| 70 | // keeps it flat in the final case, but doesn't cost much incrementally. |
| 71 | // |
| 72 | // Path splitting keeps time complexity down, see: |
| 73 | // https://en.wikipedia.org/wiki/Disjoint-set_data_structure |
| 74 | // |
| 75 | // We can safely use a relaxed atomic here because all threads doing this |
| 76 | // will converge on the same value and we don't need memory orderings to |
| 77 | // be visible. |
| 78 | // |
| 79 | // This is true because: |
| 80 | // - If no fuses occur, this will eventually become the root. |
| 81 | // - If fuses are actively occuring, the root may change, but the |
| 82 | // invariant is that `parent_or_count` merely points to *a* parent. |
| 83 | // |
| 84 | // In other words, it is moving towards "the" root, and that root may move |
| 85 | // further away over time, but the path towards that root will continue to |
| 86 | // be valid and the creation of the path carries all the memory orderings |
| 87 | // required. |
| 88 | upb_Atomic_StoreRelaxed(&a->parent_or_count, next_poc); |
| 89 | } |
Protobuf Team Bot | e4635f2 | 2022-06-21 10:43:08 -0700 | [diff] [blame] | 90 | a = next; |
Joshua Haberman | d450990 | 2023-03-22 08:21:49 -0700 | [diff] [blame] | 91 | poc = next_poc; |
Protobuf Team Bot | e4635f2 | 2022-06-21 10:43:08 -0700 | [diff] [blame] | 92 | } |
| 93 | return a; |
| 94 | } |
| 95 | |
Jean Boussier | 2341741 | 2022-08-10 10:25:44 +0200 | [diff] [blame] | 96 | size_t upb_Arena_SpaceAllocated(upb_Arena* arena) { |
Joshua Haberman | d450990 | 2023-03-22 08:21:49 -0700 | [diff] [blame] | 97 | arena = _upb_Arena_FindRoot(arena); |
Jean Boussier | 2341741 | 2022-08-10 10:25:44 +0200 | [diff] [blame] | 98 | size_t memsize = 0; |
| 99 | |
Eric Salo | e932f79 | 2022-11-20 20:10:54 -0800 | [diff] [blame] | 100 | _upb_MemBlock* block = arena->freelist; |
Jean Boussier | 2341741 | 2022-08-10 10:25:44 +0200 | [diff] [blame] | 101 | |
| 102 | while (block) { |
Eric Salo | e932f79 | 2022-11-20 20:10:54 -0800 | [diff] [blame] | 103 | memsize += sizeof(_upb_MemBlock) + block->size; |
Jean Boussier | 2341741 | 2022-08-10 10:25:44 +0200 | [diff] [blame] | 104 | block = block->next; |
| 105 | } |
| 106 | |
| 107 | return memsize; |
| 108 | } |
| 109 | |
Joshua Haberman | d450990 | 2023-03-22 08:21:49 -0700 | [diff] [blame] | 110 | uint32_t upb_Arena_DebugRefCount(upb_Arena* a) { |
| 111 | // These loads could probably be relaxed, but given that this is debug-only, |
| 112 | // it's not worth introducing a new variant for it. |
| 113 | uintptr_t poc = upb_Atomic_LoadAcquire(&a->parent_or_count); |
| 114 | while (_upb_Arena_IsTaggedPointer(poc)) { |
| 115 | a = _upb_Arena_PointerFromTagged(poc); |
| 116 | poc = upb_Atomic_LoadAcquire(&a->parent_or_count); |
| 117 | } |
| 118 | return _upb_Arena_RefCountFromTagged(poc); |
Jean Boussier | 2341741 | 2022-08-10 10:25:44 +0200 | [diff] [blame] | 119 | } |
| 120 | |
Protobuf Team Bot | e4635f2 | 2022-06-21 10:43:08 -0700 | [diff] [blame] | 121 | static void upb_Arena_addblock(upb_Arena* a, upb_Arena* root, void* ptr, |
| 122 | size_t size) { |
Eric Salo | e932f79 | 2022-11-20 20:10:54 -0800 | [diff] [blame] | 123 | _upb_MemBlock* block = ptr; |
Protobuf Team Bot | e4635f2 | 2022-06-21 10:43:08 -0700 | [diff] [blame] | 124 | |
| 125 | /* The block is for arena |a|, but should appear in the freelist of |root|. */ |
| 126 | block->next = root->freelist; |
| 127 | block->size = (uint32_t)size; |
| 128 | block->cleanups = 0; |
| 129 | root->freelist = block; |
| 130 | a->last_size = block->size; |
| 131 | if (!root->freelist_tail) root->freelist_tail = block; |
| 132 | |
| 133 | a->head.ptr = UPB_PTR_AT(block, memblock_reserve, char); |
| 134 | a->head.end = UPB_PTR_AT(block, size, char); |
| 135 | a->cleanup_metadata = upb_cleanup_metadata( |
| 136 | &block->cleanups, upb_cleanup_has_initial_block(a->cleanup_metadata)); |
| 137 | |
| 138 | UPB_POISON_MEMORY_REGION(a->head.ptr, a->head.end - a->head.ptr); |
| 139 | } |
| 140 | |
| 141 | static bool upb_Arena_Allocblock(upb_Arena* a, size_t size) { |
Joshua Haberman | d450990 | 2023-03-22 08:21:49 -0700 | [diff] [blame] | 142 | upb_Arena* root = _upb_Arena_FindRoot(a); |
Protobuf Team Bot | e4635f2 | 2022-06-21 10:43:08 -0700 | [diff] [blame] | 143 | size_t block_size = UPB_MAX(size, a->last_size * 2) + memblock_reserve; |
Eric Salo | e932f79 | 2022-11-20 20:10:54 -0800 | [diff] [blame] | 144 | _upb_MemBlock* block = upb_malloc(root->block_alloc, block_size); |
Protobuf Team Bot | e4635f2 | 2022-06-21 10:43:08 -0700 | [diff] [blame] | 145 | |
| 146 | if (!block) return false; |
| 147 | upb_Arena_addblock(a, root, block, block_size); |
| 148 | return true; |
| 149 | } |
| 150 | |
| 151 | void* _upb_Arena_SlowMalloc(upb_Arena* a, size_t size) { |
| 152 | if (!upb_Arena_Allocblock(a, size)) return NULL; /* Out of memory. */ |
| 153 | UPB_ASSERT(_upb_ArenaHas(a) >= size); |
| 154 | return upb_Arena_Malloc(a, size); |
| 155 | } |
| 156 | |
Protobuf Team Bot | e4635f2 | 2022-06-21 10:43:08 -0700 | [diff] [blame] | 157 | /* Public Arena API ***********************************************************/ |
| 158 | |
Protobuf Team Bot | 7b05f25 | 2022-06-22 09:18:12 -0700 | [diff] [blame] | 159 | static upb_Arena* arena_initslow(void* mem, size_t n, upb_alloc* alloc) { |
Protobuf Team Bot | e4635f2 | 2022-06-21 10:43:08 -0700 | [diff] [blame] | 160 | const size_t first_block_overhead = sizeof(upb_Arena) + memblock_reserve; |
| 161 | upb_Arena* a; |
| 162 | |
| 163 | /* We need to malloc the initial block. */ |
| 164 | n = first_block_overhead + 256; |
| 165 | if (!alloc || !(mem = upb_malloc(alloc, n))) { |
| 166 | return NULL; |
| 167 | } |
| 168 | |
| 169 | a = UPB_PTR_AT(mem, n - sizeof(*a), upb_Arena); |
| 170 | n -= sizeof(*a); |
| 171 | |
Protobuf Team Bot | e4635f2 | 2022-06-21 10:43:08 -0700 | [diff] [blame] | 172 | a->block_alloc = alloc; |
Joshua Haberman | d450990 | 2023-03-22 08:21:49 -0700 | [diff] [blame] | 173 | upb_Atomic_Init(&a->parent_or_count, _upb_Arena_TaggedFromRefcount(1)); |
Protobuf Team Bot | e4635f2 | 2022-06-21 10:43:08 -0700 | [diff] [blame] | 174 | a->freelist = NULL; |
| 175 | a->freelist_tail = NULL; |
| 176 | a->cleanup_metadata = upb_cleanup_metadata(NULL, false); |
| 177 | |
| 178 | upb_Arena_addblock(a, a, mem, n); |
| 179 | |
| 180 | return a; |
| 181 | } |
| 182 | |
| 183 | upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc) { |
| 184 | upb_Arena* a; |
| 185 | |
| 186 | if (n) { |
| 187 | /* Align initial pointer up so that we return properly-aligned pointers. */ |
| 188 | void* aligned = (void*)UPB_ALIGN_UP((uintptr_t)mem, UPB_MALLOC_ALIGN); |
| 189 | size_t delta = (uintptr_t)aligned - (uintptr_t)mem; |
| 190 | n = delta <= n ? n - delta : 0; |
| 191 | mem = aligned; |
| 192 | } |
| 193 | |
| 194 | /* Round block size down to alignof(*a) since we will allocate the arena |
| 195 | * itself at the end. */ |
| 196 | n = UPB_ALIGN_DOWN(n, UPB_ALIGN_OF(upb_Arena)); |
| 197 | |
| 198 | if (UPB_UNLIKELY(n < sizeof(upb_Arena))) { |
| 199 | return arena_initslow(mem, n, alloc); |
| 200 | } |
| 201 | |
| 202 | a = UPB_PTR_AT(mem, n - sizeof(*a), upb_Arena); |
| 203 | |
Protobuf Team Bot | e4635f2 | 2022-06-21 10:43:08 -0700 | [diff] [blame] | 204 | a->block_alloc = alloc; |
Joshua Haberman | d450990 | 2023-03-22 08:21:49 -0700 | [diff] [blame] | 205 | upb_Atomic_Init(&a->parent_or_count, _upb_Arena_TaggedFromRefcount(1)); |
Protobuf Team Bot | e4635f2 | 2022-06-21 10:43:08 -0700 | [diff] [blame] | 206 | a->last_size = UPB_MAX(128, n); |
| 207 | a->head.ptr = mem; |
| 208 | a->head.end = UPB_PTR_AT(mem, n - sizeof(*a), char); |
| 209 | a->freelist = NULL; |
Eric Salo | 6a625a6 | 2022-09-23 12:49:51 -0700 | [diff] [blame] | 210 | a->freelist_tail = NULL; |
Protobuf Team Bot | e4635f2 | 2022-06-21 10:43:08 -0700 | [diff] [blame] | 211 | a->cleanup_metadata = upb_cleanup_metadata(NULL, true); |
| 212 | |
| 213 | return a; |
| 214 | } |
| 215 | |
| 216 | static void arena_dofree(upb_Arena* a) { |
Eric Salo | e932f79 | 2022-11-20 20:10:54 -0800 | [diff] [blame] | 217 | _upb_MemBlock* block = a->freelist; |
Joshua Haberman | d450990 | 2023-03-22 08:21:49 -0700 | [diff] [blame] | 218 | UPB_ASSERT(_upb_Arena_RefCountFromTagged(a->parent_or_count) == 1); |
Protobuf Team Bot | e4635f2 | 2022-06-21 10:43:08 -0700 | [diff] [blame] | 219 | |
| 220 | while (block) { |
| 221 | /* Load first since we are deleting block. */ |
Eric Salo | e932f79 | 2022-11-20 20:10:54 -0800 | [diff] [blame] | 222 | _upb_MemBlock* next = block->next; |
Protobuf Team Bot | e4635f2 | 2022-06-21 10:43:08 -0700 | [diff] [blame] | 223 | |
| 224 | if (block->cleanups > 0) { |
| 225 | cleanup_ent* end = UPB_PTR_AT(block, block->size, void); |
| 226 | cleanup_ent* ptr = end - block->cleanups; |
| 227 | |
| 228 | for (; ptr < end; ptr++) { |
| 229 | ptr->cleanup(ptr->ud); |
| 230 | } |
| 231 | } |
| 232 | |
| 233 | upb_free(a->block_alloc, block); |
| 234 | block = next; |
| 235 | } |
| 236 | } |
| 237 | |
| 238 | void upb_Arena_Free(upb_Arena* a) { |
Joshua Haberman | d450990 | 2023-03-22 08:21:49 -0700 | [diff] [blame] | 239 | uintptr_t poc = upb_Atomic_LoadAcquire(&a->parent_or_count); |
| 240 | retry: |
| 241 | while (_upb_Arena_IsTaggedPointer(poc)) { |
| 242 | a = _upb_Arena_PointerFromTagged(poc); |
| 243 | poc = upb_Atomic_LoadAcquire(&a->parent_or_count); |
| 244 | } |
| 245 | |
| 246 | // compare_exchange or fetch_sub are RMW operations, which are more |
| 247 | // expensive then direct loads. As an optimization, we only do RMW ops |
| 248 | // when we need to update things for other threads to see. |
| 249 | if (poc == _upb_Arena_TaggedFromRefcount(1)) { |
| 250 | arena_dofree(a); |
| 251 | return; |
| 252 | } |
| 253 | |
| 254 | if (upb_Atomic_CompareExchangeStrongAcqRel( |
| 255 | &a->parent_or_count, &poc, |
| 256 | _upb_Arena_TaggedFromRefcount(_upb_Arena_RefCountFromTagged(poc) - |
| 257 | 1))) { |
| 258 | // We were >1 and we decremented it successfully, so we are done. |
| 259 | return; |
| 260 | } |
| 261 | |
| 262 | // We failed our update, so someone has done something, retry the whole |
| 263 | // process, but the failed exchange reloaded `poc` for us. |
| 264 | goto retry; |
Protobuf Team Bot | e4635f2 | 2022-06-21 10:43:08 -0700 | [diff] [blame] | 265 | } |
| 266 | |
| 267 | bool upb_Arena_AddCleanup(upb_Arena* a, void* ud, upb_CleanupFunc* func) { |
| 268 | cleanup_ent* ent; |
| 269 | uint32_t* cleanups = upb_cleanup_pointer(a->cleanup_metadata); |
| 270 | |
| 271 | if (!cleanups || _upb_ArenaHas(a) < sizeof(cleanup_ent)) { |
| 272 | if (!upb_Arena_Allocblock(a, 128)) return false; /* Out of memory. */ |
| 273 | UPB_ASSERT(_upb_ArenaHas(a) >= sizeof(cleanup_ent)); |
| 274 | cleanups = upb_cleanup_pointer(a->cleanup_metadata); |
| 275 | } |
| 276 | |
| 277 | a->head.end -= sizeof(cleanup_ent); |
| 278 | ent = (cleanup_ent*)a->head.end; |
| 279 | (*cleanups)++; |
| 280 | UPB_UNPOISON_MEMORY_REGION(ent, sizeof(cleanup_ent)); |
| 281 | |
| 282 | ent->cleanup = func; |
| 283 | ent->ud = ud; |
| 284 | |
| 285 | return true; |
| 286 | } |
| 287 | |
| 288 | bool upb_Arena_Fuse(upb_Arena* a1, upb_Arena* a2) { |
Joshua Haberman | d450990 | 2023-03-22 08:21:49 -0700 | [diff] [blame] | 289 | // SAFE IN THE PRESENCE OF FUSE/FREE RACES BUT NOT IN THE |
| 290 | // PRESENCE OF FUSE/FUSE RACES!!! |
| 291 | // |
| 292 | // `parent_or_count` has two disctint modes |
| 293 | // - parent pointer mode |
| 294 | // - refcount mode |
| 295 | // |
| 296 | // In parent pointer mode, it may change what pointer it refers to in the |
| 297 | // tree, but it will always approach a root. Any operation that walks the |
| 298 | // tree to the root may collapse levels of the tree concurrently. |
| 299 | // |
| 300 | // In refcount mode, any free operation may lower the refcount. |
| 301 | // |
| 302 | // Only a fuse operation may increase the refcount. |
| 303 | // Only a fuse operation may switch `parent_or_count` from parent mode to |
| 304 | // refcount mode. |
| 305 | // |
| 306 | // Given that we do not allow fuse/fuse races, we may rely on the invariant |
| 307 | // that only refcounts can change once we have found the root. Because the |
| 308 | // threads doing the fuse must hold references, we can guarantee that no |
| 309 | // refcounts will reach zero concurrently. |
| 310 | upb_Arena* r1 = _upb_Arena_FindRoot(a1); |
| 311 | upb_Arena* r2 = _upb_Arena_FindRoot(a2); |
Protobuf Team Bot | e4635f2 | 2022-06-21 10:43:08 -0700 | [diff] [blame] | 312 | |
Joshua Haberman | d450990 | 2023-03-22 08:21:49 -0700 | [diff] [blame] | 313 | if (r1 == r2) return true; // Already fused. |
Protobuf Team Bot | e4635f2 | 2022-06-21 10:43:08 -0700 | [diff] [blame] | 314 | |
Joshua Haberman | d450990 | 2023-03-22 08:21:49 -0700 | [diff] [blame] | 315 | // Do not fuse initial blocks since we cannot lifetime extend them. |
Protobuf Team Bot | e4635f2 | 2022-06-21 10:43:08 -0700 | [diff] [blame] | 316 | if (upb_cleanup_has_initial_block(r1->cleanup_metadata)) return false; |
| 317 | if (upb_cleanup_has_initial_block(r2->cleanup_metadata)) return false; |
| 318 | |
Joshua Haberman | d450990 | 2023-03-22 08:21:49 -0700 | [diff] [blame] | 319 | // Only allow fuse with a common allocator |
Protobuf Team Bot | e4635f2 | 2022-06-21 10:43:08 -0700 | [diff] [blame] | 320 | if (r1->block_alloc != r2->block_alloc) return false; |
| 321 | |
Joshua Haberman | d450990 | 2023-03-22 08:21:49 -0700 | [diff] [blame] | 322 | uintptr_t r1_poc = upb_Atomic_LoadAcquire(&r1->parent_or_count); |
| 323 | uintptr_t r2_poc = upb_Atomic_LoadAcquire(&r2->parent_or_count); |
| 324 | UPB_ASSERT(_upb_Arena_IsTaggedRefcount(r1_poc)); |
| 325 | UPB_ASSERT(_upb_Arena_IsTaggedRefcount(r2_poc)); |
| 326 | |
| 327 | // Keep the tree shallow by joining the smaller tree to the larger. |
| 328 | if (_upb_Arena_RefCountFromTagged(r1_poc) < |
| 329 | _upb_Arena_RefCountFromTagged(r2_poc)) { |
Protobuf Team Bot | e4635f2 | 2022-06-21 10:43:08 -0700 | [diff] [blame] | 330 | upb_Arena* tmp = r1; |
| 331 | r1 = r2; |
| 332 | r2 = tmp; |
Joshua Haberman | d450990 | 2023-03-22 08:21:49 -0700 | [diff] [blame] | 333 | |
| 334 | uintptr_t tmp_poc = r1_poc; |
| 335 | r1_poc = r2_poc; |
| 336 | r2_poc = tmp_poc; |
Protobuf Team Bot | e4635f2 | 2022-06-21 10:43:08 -0700 | [diff] [blame] | 337 | } |
| 338 | |
Joshua Haberman | d450990 | 2023-03-22 08:21:49 -0700 | [diff] [blame] | 339 | // r1 takes over r2's freelist, this must happen before we update |
| 340 | // refcounts since the refcount carriers the memory dependencies. |
Protobuf Team Bot | e4635f2 | 2022-06-21 10:43:08 -0700 | [diff] [blame] | 341 | if (r2->freelist_tail) { |
| 342 | UPB_ASSERT(r2->freelist_tail->next == NULL); |
| 343 | r2->freelist_tail->next = r1->freelist; |
| 344 | r1->freelist = r2->freelist; |
| 345 | } |
Joshua Haberman | d450990 | 2023-03-22 08:21:49 -0700 | [diff] [blame] | 346 | |
| 347 | // The moment we install `r1` as the parent for `r2` all racing frees may |
| 348 | // immediately begin decrementing `r1`'s refcount. So we must install all the |
| 349 | // refcounts that we know about first to prevent a premature unref to zero. |
| 350 | uint32_t r2_refcount = _upb_Arena_RefCountFromTagged(r2_poc); |
| 351 | upb_Atomic_AddRelease(&r1->parent_or_count, ((uintptr_t)r2_refcount) << 1); |
| 352 | |
| 353 | // When installing `r1` as the parent for `r2` racing frees may have changed |
| 354 | // the refcount for `r2` so we need to capture the old value to fix up `r1`'s |
| 355 | // refcount based on the delta from what we saw the first time. |
| 356 | r2_poc = upb_Atomic_ExchangeAcqRel(&r2->parent_or_count, |
| 357 | _upb_Arena_TaggedFromPointer(r1)); |
| 358 | UPB_ASSERT(_upb_Arena_IsTaggedRefcount(r2_poc)); |
| 359 | uint32_t delta_refcount = r2_refcount - _upb_Arena_RefCountFromTagged(r2_poc); |
| 360 | if (delta_refcount != 0) { |
| 361 | upb_Atomic_SubRelease(&r1->parent_or_count, ((uintptr_t)delta_refcount) |
| 362 | << 1); |
| 363 | } |
Protobuf Team Bot | e4635f2 | 2022-06-21 10:43:08 -0700 | [diff] [blame] | 364 | return true; |
| 365 | } |