blob: 7412d62f56fda607449acaa21683e197398d3939 [file] [log] [blame]
Kurt Roeckx2f545ae2016-08-27 16:01:08 +02001/*
Matt Caswella28d06f2021-02-18 14:57:13 +00002 * Copyright 2016-2021 The OpenSSL Project Authors. All Rights Reserved.
Kurt Roeckx2f545ae2016-08-27 16:01:08 +02003 *
Richard Levitte48f4ad72018-12-06 13:12:35 +01004 * Licensed under the Apache License 2.0 (the "License"). You may not use
Kurt Roeckx2f545ae2016-08-27 16:01:08 +02005 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
Dr. Matthias St. Pierreae4186b2019-09-28 00:45:57 +02009#ifndef OSSL_INTERNAL_REFCOUNT_H
10# define OSSL_INTERNAL_REFCOUNT_H
FdaSilvaYY3a111aa2021-02-06 22:14:03 +010011# pragma once
Kurt Roeckx2f545ae2016-08-27 16:01:08 +020012
Paulia7e6a3d2020-12-01 11:30:10 +100013# include <openssl/e_os2.h>
Rich Salza9357912021-05-19 11:09:49 -040014# include <openssl/trace.h>
Paulicd420b02017-08-22 07:17:35 +100015
Richard Levitte503d4742019-03-14 09:59:00 +010016# ifndef OPENSSL_DEV_NO_ATOMICS
17# if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L \
18 && !defined(__STDC_NO_ATOMICS__)
19# include <stdatomic.h>
20# define HAVE_C11_ATOMICS
21# endif
Kurt Roeckx2f545ae2016-08-27 16:01:08 +020022
Richard Levitte503d4742019-03-14 09:59:00 +010023# if defined(HAVE_C11_ATOMICS) && defined(ATOMIC_INT_LOCK_FREE) \
24 && ATOMIC_INT_LOCK_FREE > 0
Kurt Roeckx2f545ae2016-08-27 16:01:08 +020025
Richard Levitte503d4742019-03-14 09:59:00 +010026# define HAVE_ATOMICS 1
Kurt Roeckx2f545ae2016-08-27 16:01:08 +020027
28typedef _Atomic int CRYPTO_REF_COUNT;
29
Paulia7e6a3d2020-12-01 11:30:10 +100030static inline int CRYPTO_UP_REF(_Atomic int *val, int *ret,
31 ossl_unused void *lock)
Kurt Roeckx2f545ae2016-08-27 16:01:08 +020032{
33 *ret = atomic_fetch_add_explicit(val, 1, memory_order_relaxed) + 1;
34 return 1;
35}
36
Andy Polyakov96d78522018-08-08 11:10:11 +020037/*
38 * Changes to shared structure other than reference counter have to be
39 * serialized. And any kind of serialization implies a release fence. This
40 * means that by the time reference counter is decremented all other
41 * changes are visible on all processors. Hence decrement itself can be
42 * relaxed. In case it hits zero, object will be destructed. Since it's
43 * last use of the object, destructor programmer might reason that access
44 * to mutable members doesn't have to be serialized anymore, which would
45 * otherwise imply an acquire fence. Hence conditional acquire fence...
46 */
Paulia7e6a3d2020-12-01 11:30:10 +100047static inline int CRYPTO_DOWN_REF(_Atomic int *val, int *ret,
48 ossl_unused void *lock)
Kurt Roeckx2f545ae2016-08-27 16:01:08 +020049{
Andy Polyakov96d78522018-08-08 11:10:11 +020050 *ret = atomic_fetch_sub_explicit(val, 1, memory_order_relaxed) - 1;
Kurt Roeckx2f545ae2016-08-27 16:01:08 +020051 if (*ret == 0)
52 atomic_thread_fence(memory_order_acquire);
53 return 1;
54}
55
Richard Levitte503d4742019-03-14 09:59:00 +010056# elif defined(__GNUC__) && defined(__ATOMIC_RELAXED) && __GCC_ATOMIC_INT_LOCK_FREE > 0
Kurt Roeckx2f545ae2016-08-27 16:01:08 +020057
Richard Levitte503d4742019-03-14 09:59:00 +010058# define HAVE_ATOMICS 1
Kurt Roeckx2f545ae2016-08-27 16:01:08 +020059
60typedef int CRYPTO_REF_COUNT;
61
Paulia7e6a3d2020-12-01 11:30:10 +100062static __inline__ int CRYPTO_UP_REF(int *val, int *ret, ossl_unused void *lock)
Kurt Roeckx2f545ae2016-08-27 16:01:08 +020063{
64 *ret = __atomic_fetch_add(val, 1, __ATOMIC_RELAXED) + 1;
65 return 1;
66}
67
Paulia7e6a3d2020-12-01 11:30:10 +100068static __inline__ int CRYPTO_DOWN_REF(int *val, int *ret,
69 ossl_unused void *lock)
Kurt Roeckx2f545ae2016-08-27 16:01:08 +020070{
Andy Polyakov96d78522018-08-08 11:10:11 +020071 *ret = __atomic_fetch_sub(val, 1, __ATOMIC_RELAXED) - 1;
Kurt Roeckx2f545ae2016-08-27 16:01:08 +020072 if (*ret == 0)
73 __atomic_thread_fence(__ATOMIC_ACQUIRE);
74 return 1;
75}
Shane Lontisecae0572019-09-18 15:26:19 +100076# elif defined(__ICL) && defined(_WIN32)
77# define HAVE_ATOMICS 1
78typedef volatile int CRYPTO_REF_COUNT;
79
Paulia7e6a3d2020-12-01 11:30:10 +100080static __inline int CRYPTO_UP_REF(volatile int *val, int *ret,
81 ossl_unused void *lock)
Shane Lontisecae0572019-09-18 15:26:19 +100082{
83 *ret = _InterlockedExchangeAdd((void *)val, 1) + 1;
84 return 1;
85}
86
Paulia7e6a3d2020-12-01 11:30:10 +100087static __inline int CRYPTO_DOWN_REF(volatile int *val, int *ret,
88 ossl_unused void *lock)
Shane Lontisecae0572019-09-18 15:26:19 +100089{
90 *ret = _InterlockedExchangeAdd((void *)val, -1) - 1;
91 return 1;
92}
Kurt Roeckx2f545ae2016-08-27 16:01:08 +020093
Richard Levitte503d4742019-03-14 09:59:00 +010094# elif defined(_MSC_VER) && _MSC_VER>=1200
Andy Polyakov96d78522018-08-08 11:10:11 +020095
Richard Levitte503d4742019-03-14 09:59:00 +010096# define HAVE_ATOMICS 1
Andy Polyakov96d78522018-08-08 11:10:11 +020097
98typedef volatile int CRYPTO_REF_COUNT;
99
Soujyu Tanaka88ffc8d2019-03-27 15:55:32 +0900100# if (defined(_M_ARM) && _M_ARM>=7 && !defined(_WIN32_WCE)) || defined(_M_ARM64)
Richard Levitte503d4742019-03-14 09:59:00 +0100101# include <intrin.h>
102# if defined(_M_ARM64) && !defined(_ARM_BARRIER_ISH)
103# define _ARM_BARRIER_ISH _ARM64_BARRIER_ISH
104# endif
Andy Polyakov96d78522018-08-08 11:10:11 +0200105
Paulia7e6a3d2020-12-01 11:30:10 +1000106static __inline int CRYPTO_UP_REF(volatile int *val, int *ret,
107 ossl_unused void *lock)
Andy Polyakov96d78522018-08-08 11:10:11 +0200108{
109 *ret = _InterlockedExchangeAdd_nf(val, 1) + 1;
110 return 1;
111}
112
Paulia7e6a3d2020-12-01 11:30:10 +1000113static __inline int CRYPTO_DOWN_REF(volatile int *val, int *ret,
114 ossl_unused void *lock)
Andy Polyakov96d78522018-08-08 11:10:11 +0200115{
116 *ret = _InterlockedExchangeAdd_nf(val, -1) - 1;
117 if (*ret == 0)
118 __dmb(_ARM_BARRIER_ISH);
119 return 1;
120}
Richard Levitte503d4742019-03-14 09:59:00 +0100121# else
Soujyu Tanaka88ffc8d2019-03-27 15:55:32 +0900122# if !defined(_WIN32_WCE)
123# pragma intrinsic(_InterlockedExchangeAdd)
124# else
125# if _WIN32_WCE >= 0x600
126 extern long __cdecl _InterlockedExchangeAdd(long volatile*, long);
127# else
Patrick Steueraa447d62019-04-07 13:30:26 +0200128 /* under Windows CE we still have old-style Interlocked* functions */
Soujyu Tanaka88ffc8d2019-03-27 15:55:32 +0900129 extern long __cdecl InterlockedExchangeAdd(long volatile*, long);
130# define _InterlockedExchangeAdd InterlockedExchangeAdd
131# endif
132# endif
Andy Polyakov96d78522018-08-08 11:10:11 +0200133
Paulia7e6a3d2020-12-01 11:30:10 +1000134static __inline int CRYPTO_UP_REF(volatile int *val, int *ret,
135 ossl_unused void *lock)
Andy Polyakov96d78522018-08-08 11:10:11 +0200136{
137 *ret = _InterlockedExchangeAdd(val, 1) + 1;
138 return 1;
139}
140
Paulia7e6a3d2020-12-01 11:30:10 +1000141static __inline int CRYPTO_DOWN_REF(volatile int *val, int *ret,
142 ossl_unused void *lock)
Andy Polyakov96d78522018-08-08 11:10:11 +0200143{
144 *ret = _InterlockedExchangeAdd(val, -1) - 1;
145 return 1;
146}
Richard Levitte503d4742019-03-14 09:59:00 +0100147# endif
Andy Polyakov96d78522018-08-08 11:10:11 +0200148
Richard Levitte503d4742019-03-14 09:59:00 +0100149# endif
150# endif /* !OPENSSL_DEV_NO_ATOMICS */
151
152/*
153 * All the refcounting implementations above define HAVE_ATOMICS, so if it's
FdaSilvaYY200ae2e2020-06-19 23:33:24 +0200154 * still undefined here (such as when OPENSSL_DEV_NO_ATOMICS is defined), it
Richard Levitte503d4742019-03-14 09:59:00 +0100155 * means we need to implement a fallback. This fallback uses locks.
156 */
157# ifndef HAVE_ATOMICS
Kurt Roeckx2f545ae2016-08-27 16:01:08 +0200158
159typedef int CRYPTO_REF_COUNT;
160
Kurt Roeckx2f545ae2016-08-27 16:01:08 +0200161# define CRYPTO_UP_REF(val, ret, lock) CRYPTO_atomic_add(val, 1, ret, lock)
162# define CRYPTO_DOWN_REF(val, ret, lock) CRYPTO_atomic_add(val, -1, ret, lock)
163
164# endif
Paulicd420b02017-08-22 07:17:35 +1000165
166# if !defined(NDEBUG) && !defined(OPENSSL_NO_STDIO)
167# define REF_ASSERT_ISNT(test) \
168 (void)((test) ? (OPENSSL_die("refcount error", __FILE__, __LINE__), 1) : 0)
169# else
170# define REF_ASSERT_ISNT(i)
171# endif
172
Rich Salza9357912021-05-19 11:09:49 -0400173# define REF_PRINT_EX(text, count, object) \
174 OSSL_TRACE3(REF_COUNT, "%p:%4d:%s\n", (object), (count), (text));
175# define REF_PRINT_COUNT(text, object) \
176 REF_PRINT_EX(text, object->references, (void *)object)
Paulicd420b02017-08-22 07:17:35 +1000177
Kurt Roeckx2f545ae2016-08-27 16:01:08 +0200178#endif