tag unused function arguments as ossl_unused
Reviewed-by: Tomas Mraz <tmraz@fedoraproject.org>
(Merged from https://github.com/openssl/openssl/pull/13577)
diff --git a/include/internal/refcount.h b/include/internal/refcount.h
index 0c07c42..5899e8c 100644
--- a/include/internal/refcount.h
+++ b/include/internal/refcount.h
@@ -9,6 +9,8 @@
#ifndef OSSL_INTERNAL_REFCOUNT_H
# define OSSL_INTERNAL_REFCOUNT_H
+# include <openssl/e_os2.h>
+
/* Used to checking reference counts, most while doing perl5 stuff :-) */
# if defined(OPENSSL_NO_STDIO)
# if defined(REF_PRINT)
@@ -30,7 +32,8 @@
typedef _Atomic int CRYPTO_REF_COUNT;
-static inline int CRYPTO_UP_REF(_Atomic int *val, int *ret, void *lock)
+static inline int CRYPTO_UP_REF(_Atomic int *val, int *ret,
+ ossl_unused void *lock)
{
*ret = atomic_fetch_add_explicit(val, 1, memory_order_relaxed) + 1;
return 1;
@@ -46,7 +49,8 @@
* to mutable members doesn't have to be serialized anymore, which would
* otherwise imply an acquire fence. Hence conditional acquire fence...
*/
-static inline int CRYPTO_DOWN_REF(_Atomic int *val, int *ret, void *lock)
+static inline int CRYPTO_DOWN_REF(_Atomic int *val, int *ret,
+ ossl_unused void *lock)
{
*ret = atomic_fetch_sub_explicit(val, 1, memory_order_relaxed) - 1;
if (*ret == 0)
@@ -60,13 +64,14 @@
typedef int CRYPTO_REF_COUNT;
-static __inline__ int CRYPTO_UP_REF(int *val, int *ret, void *lock)
+static __inline__ int CRYPTO_UP_REF(int *val, int *ret, ossl_unused void *lock)
{
*ret = __atomic_fetch_add(val, 1, __ATOMIC_RELAXED) + 1;
return 1;
}
-static __inline__ int CRYPTO_DOWN_REF(int *val, int *ret, void *lock)
+static __inline__ int CRYPTO_DOWN_REF(int *val, int *ret,
+ ossl_unused void *lock)
{
*ret = __atomic_fetch_sub(val, 1, __ATOMIC_RELAXED) - 1;
if (*ret == 0)
@@ -77,13 +82,15 @@
# define HAVE_ATOMICS 1
typedef volatile int CRYPTO_REF_COUNT;
-static __inline int CRYPTO_UP_REF(volatile int *val, int *ret, void *lock)
+static __inline int CRYPTO_UP_REF(volatile int *val, int *ret,
+ ossl_unused void *lock)
{
*ret = _InterlockedExchangeAdd((void *)val, 1) + 1;
return 1;
}
-static __inline int CRYPTO_DOWN_REF(volatile int *val, int *ret, void *lock)
+static __inline int CRYPTO_DOWN_REF(volatile int *val, int *ret,
+ ossl_unused void *lock)
{
*ret = _InterlockedExchangeAdd((void *)val, -1) - 1;
return 1;
@@ -101,13 +108,15 @@
# define _ARM_BARRIER_ISH _ARM64_BARRIER_ISH
# endif
-static __inline int CRYPTO_UP_REF(volatile int *val, int *ret, void *lock)
+static __inline int CRYPTO_UP_REF(volatile int *val, int *ret,
+ ossl_unused void *lock)
{
*ret = _InterlockedExchangeAdd_nf(val, 1) + 1;
return 1;
}
-static __inline int CRYPTO_DOWN_REF(volatile int *val, int *ret, void *lock)
+static __inline int CRYPTO_DOWN_REF(volatile int *val, int *ret,
+ ossl_unused void *lock)
{
*ret = _InterlockedExchangeAdd_nf(val, -1) - 1;
if (*ret == 0)
@@ -127,13 +136,15 @@
# endif
# endif
-static __inline int CRYPTO_UP_REF(volatile int *val, int *ret, void *lock)
+static __inline int CRYPTO_UP_REF(volatile int *val, int *ret,
+ ossl_unused void *lock)
{
*ret = _InterlockedExchangeAdd(val, 1) + 1;
return 1;
}
-static __inline int CRYPTO_DOWN_REF(volatile int *val, int *ret, void *lock)
+static __inline int CRYPTO_DOWN_REF(volatile int *val, int *ret,
+ ossl_unused void *lock)
{
*ret = _InterlockedExchangeAdd(val, -1) - 1;
return 1;