gh-108337: Add pyatomic.h header (#108701)

This adds a new header that provides atomic operations on common data
types. The intention is that this will be exposed through Python.h,
although that is not the case yet. The only immediate use is in
the test file.

Co-authored-by: Sam Gross <colesbury@gmail.com>
This commit is contained in:
Victor Stinner 2023-08-31 23:41:18 +02:00 committed by GitHub
parent c6d56135e1
commit 2bd960b579
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 3049 additions and 4 deletions

View File

@ -833,6 +833,9 @@ Build Changes
:ref:`debug build <debug-build>`.
(Contributed by Victor Stinner in :gh:`108634`.)
* Building CPython now requires a compiler with support for the C11 atomic
library, GCC built-in atomic functions, or MSVC interlocked intrinsics.
C API Changes
=============

506
Include/cpython/pyatomic.h Normal file
View File

@ -0,0 +1,506 @@
// This header provides cross-platform low-level atomic operations
// similar to C11 atomics.
//
// Operations are sequentially consistent unless they have a suffix indicating
// otherwise. If in doubt, prefer the sequentially consistent operations.
//
// The "_relaxed" suffix for load and store operations indicates the "relaxed"
// memory order. They don't provide synchronization, but (roughly speaking)
// guarantee somewhat sane behavior for races instead of undefined behavior.
// In practice, they correspond to "normal" hardware load and store
// instructions, so they are almost as inexpensive as plain loads and stores
// in C.
//
// Note that atomic read-modify-write operations like _Py_atomic_add_* return
// the previous value of the atomic variable, not the new value.
//
// See https://en.cppreference.com/w/c/atomic for more information on C11
// atomics.
// See https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p2055r0.pdf
// "A Relaxed Guide to memory_order_relaxed" for discussion of and common usage
// or relaxed atomics.
//
// Functions with pseudo Python code:
//
// def _Py_atomic_load(obj):
// return obj # sequential consistency
//
// def _Py_atomic_load_relaxed(obj):
// return obj # relaxed consistency
//
// def _Py_atomic_store(obj, value):
// obj = value # sequential consistency
//
// def _Py_atomic_store_relaxed(obj, value):
// obj = value # relaxed consistency
//
// def _Py_atomic_exchange(obj, value):
// # sequential consistency
// old_obj = obj
// obj = value
// return old_obj
//
// def _Py_atomic_compare_exchange(obj, expected, desired):
// # sequential consistency
// if obj == expected:
// obj = desired
// return True
// else:
// expected = obj
// return False
//
// def _Py_atomic_add(obj, value):
// # sequential consistency
// old_obj = obj
// obj += value
// return old_obj
//
// def _Py_atomic_and(obj, value):
// # sequential consistency
// old_obj = obj
// obj &= value
// return old_obj
//
// def _Py_atomic_or(obj, value):
// # sequential consistency
// old_obj = obj
// obj |= value
// return old_obj
//
// Other functions:
//
// def _Py_atomic_load_ptr_acquire(obj):
// return obj # acquire
//
// def _Py_atomic_store_ptr_release(obj):
// return obj # release
//
// def _Py_atomic_fence_seq_cst():
// # sequential consistency
// ...
//
// def _Py_atomic_fence_release():
// # release
// ...
#ifndef Py_ATOMIC_H
#define Py_ATOMIC_H
// --- _Py_atomic_add --------------------------------------------------------
// Atomically adds `value` to `obj` and returns the previous value
static inline int
_Py_atomic_add_int(int *obj, int value);
static inline int8_t
_Py_atomic_add_int8(int8_t *obj, int8_t value);
static inline int16_t
_Py_atomic_add_int16(int16_t *obj, int16_t value);
static inline int32_t
_Py_atomic_add_int32(int32_t *obj, int32_t value);
static inline int64_t
_Py_atomic_add_int64(int64_t *obj, int64_t value);
static inline intptr_t
_Py_atomic_add_intptr(intptr_t *obj, intptr_t value);
static inline unsigned int
_Py_atomic_add_uint(unsigned int *obj, unsigned int value);
static inline uint8_t
_Py_atomic_add_uint8(uint8_t *obj, uint8_t value);
static inline uint16_t
_Py_atomic_add_uint16(uint16_t *obj, uint16_t value);
static inline uint32_t
_Py_atomic_add_uint32(uint32_t *obj, uint32_t value);
static inline uint64_t
_Py_atomic_add_uint64(uint64_t *obj, uint64_t value);
static inline uintptr_t
_Py_atomic_add_uintptr(uintptr_t *obj, uintptr_t value);
static inline Py_ssize_t
_Py_atomic_add_ssize(Py_ssize_t *obj, Py_ssize_t value);
// --- _Py_atomic_compare_exchange -------------------------------------------
// Performs an atomic compare-and-exchange.
//
// - If `*obj` and `*expected` are equal, store `desired` into `*obj`
// and return 1 (success).
// - Otherwise, store the `*obj` current value into `*expected`
// and return 0 (failure).
//
// These correspond to the C11 atomic_compare_exchange_strong() function.
static inline int
_Py_atomic_compare_exchange_int(int *obj, int *expected, int desired);
static inline int
_Py_atomic_compare_exchange_int8(int8_t *obj, int8_t *expected, int8_t desired);
static inline int
_Py_atomic_compare_exchange_int16(int16_t *obj, int16_t *expected, int16_t desired);
static inline int
_Py_atomic_compare_exchange_int32(int32_t *obj, int32_t *expected, int32_t desired);
static inline int
_Py_atomic_compare_exchange_int64(int64_t *obj, int64_t *expected, int64_t desired);
static inline int
_Py_atomic_compare_exchange_intptr(intptr_t *obj, intptr_t *expected, intptr_t desired);
static inline int
_Py_atomic_compare_exchange_uint(unsigned int *obj, unsigned int *expected, unsigned int desired);
static inline int
_Py_atomic_compare_exchange_uint8(uint8_t *obj, uint8_t *expected, uint8_t desired);
static inline int
_Py_atomic_compare_exchange_uint16(uint16_t *obj, uint16_t *expected, uint16_t desired);
static inline int
_Py_atomic_compare_exchange_uint32(uint32_t *obj, uint32_t *expected, uint32_t desired);
static inline int
_Py_atomic_compare_exchange_uint64(uint64_t *obj, uint64_t *expected, uint64_t desired);
static inline int
_Py_atomic_compare_exchange_uintptr(uintptr_t *obj, uintptr_t *expected, uintptr_t desired);
static inline int
_Py_atomic_compare_exchange_ssize(Py_ssize_t *obj, Py_ssize_t *expected, Py_ssize_t desired);
// NOTE: `obj` and `expected` are logically `void**` types, but we use `void*`
// so that we can pass types like `PyObject**` without a cast.
static inline int
_Py_atomic_compare_exchange_ptr(void *obj, void *expected, void *value);
// --- _Py_atomic_exchange ---------------------------------------------------
// Atomically replaces `*obj` with `value` and returns the previous value of `*obj`.
static inline int
_Py_atomic_exchange_int(int *obj, int value);
static inline int8_t
_Py_atomic_exchange_int8(int8_t *obj, int8_t value);
static inline int16_t
_Py_atomic_exchange_int16(int16_t *obj, int16_t value);
static inline int32_t
_Py_atomic_exchange_int32(int32_t *obj, int32_t value);
static inline int64_t
_Py_atomic_exchange_int64(int64_t *obj, int64_t value);
static inline intptr_t
_Py_atomic_exchange_intptr(intptr_t *obj, intptr_t value);
static inline unsigned int
_Py_atomic_exchange_uint(unsigned int *obj, unsigned int value);
static inline uint8_t
_Py_atomic_exchange_uint8(uint8_t *obj, uint8_t value);
static inline uint16_t
_Py_atomic_exchange_uint16(uint16_t *obj, uint16_t value);
static inline uint32_t
_Py_atomic_exchange_uint32(uint32_t *obj, uint32_t value);
static inline uint64_t
_Py_atomic_exchange_uint64(uint64_t *obj, uint64_t value);
static inline uintptr_t
_Py_atomic_exchange_uintptr(uintptr_t *obj, uintptr_t value);
static inline Py_ssize_t
_Py_atomic_exchange_ssize(Py_ssize_t *obj, Py_ssize_t value);
static inline void *
_Py_atomic_exchange_ptr(void *obj, void *value);
// --- _Py_atomic_and --------------------------------------------------------
// Performs `*obj &= value` atomically and returns the previous value of `*obj`.
static inline uint8_t
_Py_atomic_and_uint8(uint8_t *obj, uint8_t value);
static inline uint16_t
_Py_atomic_and_uint16(uint16_t *obj, uint16_t value);
static inline uint32_t
_Py_atomic_and_uint32(uint32_t *obj, uint32_t value);
static inline uint64_t
_Py_atomic_and_uint64(uint64_t *obj, uint64_t value);
static inline uintptr_t
_Py_atomic_and_uintptr(uintptr_t *obj, uintptr_t value);
// --- _Py_atomic_or ---------------------------------------------------------
// Performs `*obj |= value` atomically and returns the previous value of `*obj`.
static inline uint8_t
_Py_atomic_or_uint8(uint8_t *obj, uint8_t value);
static inline uint16_t
_Py_atomic_or_uint16(uint16_t *obj, uint16_t value);
static inline uint32_t
_Py_atomic_or_uint32(uint32_t *obj, uint32_t value);
static inline uint64_t
_Py_atomic_or_uint64(uint64_t *obj, uint64_t value);
static inline uintptr_t
_Py_atomic_or_uintptr(uintptr_t *obj, uintptr_t value);
// --- _Py_atomic_load -------------------------------------------------------
// Atomically loads `*obj` (sequential consistency)
static inline int
_Py_atomic_load_int(const int *obj);
static inline int8_t
_Py_atomic_load_int8(const int8_t *obj);
static inline int16_t
_Py_atomic_load_int16(const int16_t *obj);
static inline int32_t
_Py_atomic_load_int32(const int32_t *obj);
static inline int64_t
_Py_atomic_load_int64(const int64_t *obj);
static inline intptr_t
_Py_atomic_load_intptr(const intptr_t *obj);
static inline uint8_t
_Py_atomic_load_uint8(const uint8_t *obj);
static inline uint16_t
_Py_atomic_load_uint16(const uint16_t *obj);
static inline uint32_t
_Py_atomic_load_uint32(const uint32_t *obj);
static inline uint64_t
_Py_atomic_load_uint64(const uint64_t *obj);
static inline uintptr_t
_Py_atomic_load_uintptr(const uintptr_t *obj);
static inline unsigned int
_Py_atomic_load_uint(const unsigned int *obj);
static inline Py_ssize_t
_Py_atomic_load_ssize(const Py_ssize_t *obj);
static inline void *
_Py_atomic_load_ptr(const void *obj);
// --- _Py_atomic_load_relaxed -----------------------------------------------
// Loads `*obj` (relaxed consistency, i.e., no ordering)
static inline int
_Py_atomic_load_int_relaxed(const int *obj);
static inline int8_t
_Py_atomic_load_int8_relaxed(const int8_t *obj);
static inline int16_t
_Py_atomic_load_int16_relaxed(const int16_t *obj);
static inline int32_t
_Py_atomic_load_int32_relaxed(const int32_t *obj);
static inline int64_t
_Py_atomic_load_int64_relaxed(const int64_t *obj);
static inline intptr_t
_Py_atomic_load_intptr_relaxed(const intptr_t *obj);
static inline uint8_t
_Py_atomic_load_uint8_relaxed(const uint8_t *obj);
static inline uint16_t
_Py_atomic_load_uint16_relaxed(const uint16_t *obj);
static inline uint32_t
_Py_atomic_load_uint32_relaxed(const uint32_t *obj);
static inline uint64_t
_Py_atomic_load_uint64_relaxed(const uint64_t *obj);
static inline uintptr_t
_Py_atomic_load_uintptr_relaxed(const uintptr_t *obj);
static inline unsigned int
_Py_atomic_load_uint_relaxed(const unsigned int *obj);
static inline Py_ssize_t
_Py_atomic_load_ssize_relaxed(const Py_ssize_t *obj);
static inline void *
_Py_atomic_load_ptr_relaxed(const void *obj);
// --- _Py_atomic_store ------------------------------------------------------
// Atomically performs `*obj = value` (sequential consistency)
static inline void
_Py_atomic_store_int(int *obj, int value);
static inline void
_Py_atomic_store_int8(int8_t *obj, int8_t value);
static inline void
_Py_atomic_store_int16(int16_t *obj, int16_t value);
static inline void
_Py_atomic_store_int32(int32_t *obj, int32_t value);
static inline void
_Py_atomic_store_int64(int64_t *obj, int64_t value);
static inline void
_Py_atomic_store_intptr(intptr_t *obj, intptr_t value);
static inline void
_Py_atomic_store_uint8(uint8_t *obj, uint8_t value);
static inline void
_Py_atomic_store_uint16(uint16_t *obj, uint16_t value);
static inline void
_Py_atomic_store_uint32(uint32_t *obj, uint32_t value);
static inline void
_Py_atomic_store_uint64(uint64_t *obj, uint64_t value);
static inline void
_Py_atomic_store_uintptr(uintptr_t *obj, uintptr_t value);
static inline void
_Py_atomic_store_uint(unsigned int *obj, unsigned int value);
static inline void
_Py_atomic_store_ptr(void *obj, void *value);
static inline void
_Py_atomic_store_ssize(Py_ssize_t* obj, Py_ssize_t value);
// --- _Py_atomic_store_relaxed ----------------------------------------------
// Stores `*obj = value` (relaxed consistency, i.e., no ordering)
static inline void
_Py_atomic_store_int_relaxed(int *obj, int value);
static inline void
_Py_atomic_store_int8_relaxed(int8_t *obj, int8_t value);
static inline void
_Py_atomic_store_int16_relaxed(int16_t *obj, int16_t value);
static inline void
_Py_atomic_store_int32_relaxed(int32_t *obj, int32_t value);
static inline void
_Py_atomic_store_int64_relaxed(int64_t *obj, int64_t value);
static inline void
_Py_atomic_store_intptr_relaxed(intptr_t *obj, intptr_t value);
static inline void
_Py_atomic_store_uint8_relaxed(uint8_t* obj, uint8_t value);
static inline void
_Py_atomic_store_uint16_relaxed(uint16_t *obj, uint16_t value);
static inline void
_Py_atomic_store_uint32_relaxed(uint32_t *obj, uint32_t value);
static inline void
_Py_atomic_store_uint64_relaxed(uint64_t *obj, uint64_t value);
static inline void
_Py_atomic_store_uintptr_relaxed(uintptr_t *obj, uintptr_t value);
static inline void
_Py_atomic_store_uint_relaxed(unsigned int *obj, unsigned int value);
static inline void
_Py_atomic_store_ptr_relaxed(void *obj, void *value);
static inline void
_Py_atomic_store_ssize_relaxed(Py_ssize_t *obj, Py_ssize_t value);
// --- _Py_atomic_load_ptr_acquire / _Py_atomic_store_ptr_release ------------
// Loads `*obj` (acquire operation)
static inline void *
_Py_atomic_load_ptr_acquire(const void *obj);
// Stores `*obj = value` (release operation)
static inline void
_Py_atomic_store_ptr_release(void *obj, void *value);
// --- _Py_atomic_fence ------------------------------------------------------
// Sequential consistency fence. C11 fences have complex semantics. When
// possible, use the atomic operations on variables defined above, which
// generally do not require explicit use of a fence.
// See https://en.cppreference.com/w/cpp/atomic/atomic_thread_fence
static inline void _Py_atomic_fence_seq_cst(void);
// Release fence
static inline void _Py_atomic_fence_release(void);
#ifndef _Py_USE_GCC_BUILTIN_ATOMICS
# if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))
# define _Py_USE_GCC_BUILTIN_ATOMICS 1
# elif defined(__clang__)
# if __has_builtin(__atomic_load)
# define _Py_USE_GCC_BUILTIN_ATOMICS 1
# endif
# endif
#endif
#if _Py_USE_GCC_BUILTIN_ATOMICS
# define Py_ATOMIC_GCC_H
# include "cpython/pyatomic_gcc.h"
# undef Py_ATOMIC_GCC_H
#elif __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__)
# define Py_ATOMIC_STD_H
# include "cpython/pyatomic_std.h"
# undef Py_ATOMIC_STD_H
#elif defined(_MSC_VER)
# define Py_ATOMIC_MSC_H
# include "cpython/pyatomic_msc.h"
# undef Py_ATOMIC_MSC_H
#else
# error "no available pyatomic implementation for this platform/compiler"
#endif
#endif /* Py_ATOMIC_H */

View File

@ -0,0 +1,499 @@
// This is the implementation of Python atomic operations using GCC's built-in
// functions that match the C+11 memory model. This implementation is preferred
// for GCC compatible compilers, such as Clang. These functions are available
// in GCC 4.8+ without needing to compile with --std=c11 or --std=gnu11.
#ifndef Py_ATOMIC_GCC_H
# error "this header file must not be included directly"
#endif
// --- _Py_atomic_add --------------------------------------------------------
static inline int
_Py_atomic_add_int(int *obj, int value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline int8_t
_Py_atomic_add_int8(int8_t *obj, int8_t value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline int16_t
_Py_atomic_add_int16(int16_t *obj, int16_t value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline int32_t
_Py_atomic_add_int32(int32_t *obj, int32_t value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline int64_t
_Py_atomic_add_int64(int64_t *obj, int64_t value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline intptr_t
_Py_atomic_add_intptr(intptr_t *obj, intptr_t value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline unsigned int
_Py_atomic_add_uint(unsigned int *obj, unsigned int value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline uint8_t
_Py_atomic_add_uint8(uint8_t *obj, uint8_t value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline uint16_t
_Py_atomic_add_uint16(uint16_t *obj, uint16_t value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline uint32_t
_Py_atomic_add_uint32(uint32_t *obj, uint32_t value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline uint64_t
_Py_atomic_add_uint64(uint64_t *obj, uint64_t value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline uintptr_t
_Py_atomic_add_uintptr(uintptr_t *obj, uintptr_t value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline Py_ssize_t
_Py_atomic_add_ssize(Py_ssize_t *obj, Py_ssize_t value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
// --- _Py_atomic_compare_exchange -------------------------------------------
static inline int
_Py_atomic_compare_exchange_int(int *obj, int *expected, int desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_int8(int8_t *obj, int8_t *expected, int8_t desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_int16(int16_t *obj, int16_t *expected, int16_t desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_int32(int32_t *obj, int32_t *expected, int32_t desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_int64(int64_t *obj, int64_t *expected, int64_t desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_intptr(intptr_t *obj, intptr_t *expected, intptr_t desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_uint(unsigned int *obj, unsigned int *expected, unsigned int desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_uint8(uint8_t *obj, uint8_t *expected, uint8_t desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_uint16(uint16_t *obj, uint16_t *expected, uint16_t desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_uint32(uint32_t *obj, uint32_t *expected, uint32_t desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_uint64(uint64_t *obj, uint64_t *expected, uint64_t desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_uintptr(uintptr_t *obj, uintptr_t *expected, uintptr_t desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_ssize(Py_ssize_t *obj, Py_ssize_t *expected, Py_ssize_t desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_ptr(void *obj, void *expected, void *desired)
{ return __atomic_compare_exchange_n((void **)obj, (void **)expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
// --- _Py_atomic_exchange ---------------------------------------------------
static inline int
_Py_atomic_exchange_int(int *obj, int value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline int8_t
_Py_atomic_exchange_int8(int8_t *obj, int8_t value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline int16_t
_Py_atomic_exchange_int16(int16_t *obj, int16_t value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline int32_t
_Py_atomic_exchange_int32(int32_t *obj, int32_t value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline int64_t
_Py_atomic_exchange_int64(int64_t *obj, int64_t value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline intptr_t
_Py_atomic_exchange_intptr(intptr_t *obj, intptr_t value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline unsigned int
_Py_atomic_exchange_uint(unsigned int *obj, unsigned int value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline uint8_t
_Py_atomic_exchange_uint8(uint8_t *obj, uint8_t value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline uint16_t
_Py_atomic_exchange_uint16(uint16_t *obj, uint16_t value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline uint32_t
_Py_atomic_exchange_uint32(uint32_t *obj, uint32_t value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline uint64_t
_Py_atomic_exchange_uint64(uint64_t *obj, uint64_t value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline uintptr_t
_Py_atomic_exchange_uintptr(uintptr_t *obj, uintptr_t value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline Py_ssize_t
_Py_atomic_exchange_ssize(Py_ssize_t *obj, Py_ssize_t value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void *
_Py_atomic_exchange_ptr(void *obj, void *value)
{ return __atomic_exchange_n((void **)obj, value, __ATOMIC_SEQ_CST); }
// --- _Py_atomic_and --------------------------------------------------------
static inline uint8_t
_Py_atomic_and_uint8(uint8_t *obj, uint8_t value)
{ return __atomic_fetch_and(obj, value, __ATOMIC_SEQ_CST); }
static inline uint16_t
_Py_atomic_and_uint16(uint16_t *obj, uint16_t value)
{ return __atomic_fetch_and(obj, value, __ATOMIC_SEQ_CST); }
static inline uint32_t
_Py_atomic_and_uint32(uint32_t *obj, uint32_t value)
{ return __atomic_fetch_and(obj, value, __ATOMIC_SEQ_CST); }
static inline uint64_t
_Py_atomic_and_uint64(uint64_t *obj, uint64_t value)
{ return __atomic_fetch_and(obj, value, __ATOMIC_SEQ_CST); }
static inline uintptr_t
_Py_atomic_and_uintptr(uintptr_t *obj, uintptr_t value)
{ return __atomic_fetch_and(obj, value, __ATOMIC_SEQ_CST); }
// --- _Py_atomic_or ---------------------------------------------------------
static inline uint8_t
_Py_atomic_or_uint8(uint8_t *obj, uint8_t value)
{ return __atomic_fetch_or(obj, value, __ATOMIC_SEQ_CST); }
static inline uint16_t
_Py_atomic_or_uint16(uint16_t *obj, uint16_t value)
{ return __atomic_fetch_or(obj, value, __ATOMIC_SEQ_CST); }
static inline uint32_t
_Py_atomic_or_uint32(uint32_t *obj, uint32_t value)
{ return __atomic_fetch_or(obj, value, __ATOMIC_SEQ_CST); }
static inline uint64_t
_Py_atomic_or_uint64(uint64_t *obj, uint64_t value)
{ return __atomic_fetch_or(obj, value, __ATOMIC_SEQ_CST); }
static inline uintptr_t
_Py_atomic_or_uintptr(uintptr_t *obj, uintptr_t value)
{ return __atomic_fetch_or(obj, value, __ATOMIC_SEQ_CST); }
// --- _Py_atomic_load -------------------------------------------------------
static inline int
_Py_atomic_load_int(const int *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline int8_t
_Py_atomic_load_int8(const int8_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline int16_t
_Py_atomic_load_int16(const int16_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline int32_t
_Py_atomic_load_int32(const int32_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline int64_t
_Py_atomic_load_int64(const int64_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline intptr_t
_Py_atomic_load_intptr(const intptr_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline uint8_t
_Py_atomic_load_uint8(const uint8_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline uint16_t
_Py_atomic_load_uint16(const uint16_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline uint32_t
_Py_atomic_load_uint32(const uint32_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline uint64_t
_Py_atomic_load_uint64(const uint64_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline uintptr_t
_Py_atomic_load_uintptr(const uintptr_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline unsigned int
_Py_atomic_load_uint(const unsigned int *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline Py_ssize_t
_Py_atomic_load_ssize(const Py_ssize_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline void *
_Py_atomic_load_ptr(const void *obj)
{ return (void *)__atomic_load_n((void **)obj, __ATOMIC_SEQ_CST); }
// --- _Py_atomic_load_relaxed -----------------------------------------------
static inline int
_Py_atomic_load_int_relaxed(const int *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline int8_t
_Py_atomic_load_int8_relaxed(const int8_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline int16_t
_Py_atomic_load_int16_relaxed(const int16_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline int32_t
_Py_atomic_load_int32_relaxed(const int32_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline int64_t
_Py_atomic_load_int64_relaxed(const int64_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline intptr_t
_Py_atomic_load_intptr_relaxed(const intptr_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline uint8_t
_Py_atomic_load_uint8_relaxed(const uint8_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline uint16_t
_Py_atomic_load_uint16_relaxed(const uint16_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline uint32_t
_Py_atomic_load_uint32_relaxed(const uint32_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline uint64_t
_Py_atomic_load_uint64_relaxed(const uint64_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline uintptr_t
_Py_atomic_load_uintptr_relaxed(const uintptr_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline unsigned int
_Py_atomic_load_uint_relaxed(const unsigned int *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline Py_ssize_t
_Py_atomic_load_ssize_relaxed(const Py_ssize_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline void *
_Py_atomic_load_ptr_relaxed(const void *obj)
{ return (void *)__atomic_load_n((const void **)obj, __ATOMIC_RELAXED); }
// --- _Py_atomic_store ------------------------------------------------------
static inline void
_Py_atomic_store_int(int *obj, int value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_int8(int8_t *obj, int8_t value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_int16(int16_t *obj, int16_t value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_int32(int32_t *obj, int32_t value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_int64(int64_t *obj, int64_t value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_intptr(intptr_t *obj, intptr_t value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_uint8(uint8_t *obj, uint8_t value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_uint16(uint16_t *obj, uint16_t value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_uint32(uint32_t *obj, uint32_t value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_uint64(uint64_t *obj, uint64_t value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_uintptr(uintptr_t *obj, uintptr_t value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_uint(unsigned int *obj, unsigned int value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_ptr(void *obj, void *value)
{ __atomic_store_n((void **)obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_ssize(Py_ssize_t *obj, Py_ssize_t value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
// --- _Py_atomic_store_relaxed ----------------------------------------------
static inline void
_Py_atomic_store_int_relaxed(int *obj, int value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_int8_relaxed(int8_t *obj, int8_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_int16_relaxed(int16_t *obj, int16_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_int32_relaxed(int32_t *obj, int32_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_int64_relaxed(int64_t *obj, int64_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_intptr_relaxed(intptr_t *obj, intptr_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_uint8_relaxed(uint8_t *obj, uint8_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_uint16_relaxed(uint16_t *obj, uint16_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_uint32_relaxed(uint32_t *obj, uint32_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_uint64_relaxed(uint64_t *obj, uint64_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_uintptr_relaxed(uintptr_t *obj, uintptr_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_uint_relaxed(unsigned int *obj, unsigned int value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_ptr_relaxed(void *obj, void *value)
{ __atomic_store_n((void **)obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_ssize_relaxed(Py_ssize_t *obj, Py_ssize_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
// --- _Py_atomic_load_ptr_acquire / _Py_atomic_store_ptr_release ------------
static inline void *
_Py_atomic_load_ptr_acquire(const void *obj)
{ return (void *)__atomic_load_n((void **)obj, __ATOMIC_ACQUIRE); }
static inline void
_Py_atomic_store_ptr_release(void *obj, void *value)
{ __atomic_store_n((void **)obj, value, __ATOMIC_RELEASE); }
// --- _Py_atomic_fence ------------------------------------------------------
static inline void
_Py_atomic_fence_seq_cst(void)
{ __atomic_thread_fence(__ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_fence_release(void)
{ __atomic_thread_fence(__ATOMIC_RELEASE); }

View File

@ -0,0 +1,944 @@
// This is the implementation of Python atomic operations for MSVC if the
// compiler does not support C11 or C++11 atomics.
//
// MSVC intrinsics are defined on char, short, long, __int64, and pointer
// types. Note that long and int are both 32-bits even on 64-bit Windows,
// so operations on int are cast to long.
//
// The volatile keyword has additional memory ordering semantics on MSVC. On
// x86 and x86-64, volatile accesses have acquire-release semantics. On ARM64,
// volatile accesses behave like C11's memory_order_relaxed.
#ifndef Py_ATOMIC_MSC_H
# error "this header file must not be included directly"
#endif
#include <intrin.h>
#define _Py_atomic_ASSERT_ARG_TYPE(TYPE) \
Py_BUILD_ASSERT(sizeof(*obj) == sizeof(TYPE))
// --- _Py_atomic_add --------------------------------------------------------
static inline int8_t
_Py_atomic_add_int8(int8_t *obj, int8_t value)
{
_Py_atomic_ASSERT_ARG_TYPE(char);
return (int8_t)_InterlockedExchangeAdd8((volatile char *)obj, (char)value);
}
static inline int16_t
_Py_atomic_add_int16(int16_t *obj, int16_t value)
{
_Py_atomic_ASSERT_ARG_TYPE(short);
return (int16_t)_InterlockedExchangeAdd16((volatile short *)obj, (short)value);
}
static inline int32_t
_Py_atomic_add_int32(int32_t *obj, int32_t value)
{
_Py_atomic_ASSERT_ARG_TYPE(long);
return (int32_t)_InterlockedExchangeAdd((volatile long *)obj, (long)value);
}
static inline int64_t
_Py_atomic_add_int64(int64_t *obj, int64_t value)
{
#if defined(_M_X64) || defined(_M_ARM64)
_Py_atomic_ASSERT_ARG_TYPE(__int64);
return (int64_t)_InterlockedExchangeAdd64((volatile __int64 *)obj, (__int64)value);
#else
int64_t old_value = _Py_atomic_load_int64_relaxed(obj);
for (;;) {
int64_t new_value = old_value + value;
if (_Py_atomic_compare_exchange_int64(obj, &old_value, new_value)) {
return old_value;
}
}
#endif
}
static inline uint8_t
_Py_atomic_add_uint8(uint8_t *obj, uint8_t value)
{
return (uint8_t)_Py_atomic_add_int8((int8_t *)obj, (int8_t)value);
}
static inline uint16_t
_Py_atomic_add_uint16(uint16_t *obj, uint16_t value)
{
return (uint16_t)_Py_atomic_add_int16((int16_t *)obj, (int16_t)value);
}
static inline uint32_t
_Py_atomic_add_uint32(uint32_t *obj, uint32_t value)
{
return (uint32_t)_Py_atomic_add_int32((int32_t *)obj, (int32_t)value);
}
static inline int
_Py_atomic_add_int(int *obj, int value)
{
_Py_atomic_ASSERT_ARG_TYPE(int32_t);
return (int)_Py_atomic_add_int32((int32_t *)obj, (int32_t)value);
}
static inline unsigned int
_Py_atomic_add_uint(unsigned int *obj, unsigned int value)
{
_Py_atomic_ASSERT_ARG_TYPE(int32_t);
return (unsigned int)_Py_atomic_add_int32((int32_t *)obj, (int32_t)value);
}
static inline uint64_t
_Py_atomic_add_uint64(uint64_t *obj, uint64_t value)
{
return (uint64_t)_Py_atomic_add_int64((int64_t *)obj, (int64_t)value);
}
static inline intptr_t
_Py_atomic_add_intptr(intptr_t *obj, intptr_t value)
{
#if SIZEOF_VOID_P == 8
_Py_atomic_ASSERT_ARG_TYPE(int64_t);
return (intptr_t)_Py_atomic_add_int64((int64_t *)obj, (int64_t)value);
#else
_Py_atomic_ASSERT_ARG_TYPE(int32_t);
return (intptr_t)_Py_atomic_add_int32((int32_t *)obj, (int32_t)value);
#endif
}
static inline uintptr_t
_Py_atomic_add_uintptr(uintptr_t *obj, uintptr_t value)
{
_Py_atomic_ASSERT_ARG_TYPE(intptr_t);
return (uintptr_t)_Py_atomic_add_intptr((intptr_t *)obj, (intptr_t)value);
}
static inline Py_ssize_t
_Py_atomic_add_ssize(Py_ssize_t *obj, Py_ssize_t value)
{
_Py_atomic_ASSERT_ARG_TYPE(intptr_t);
return (Py_ssize_t)_Py_atomic_add_intptr((intptr_t *)obj, (intptr_t)value);
}
// --- _Py_atomic_compare_exchange -------------------------------------------
static inline int
_Py_atomic_compare_exchange_int8(int8_t *obj, int8_t *expected, int8_t value)
{
_Py_atomic_ASSERT_ARG_TYPE(char);
int8_t initial = (int8_t)_InterlockedCompareExchange8(
(volatile char *)obj,
(char)value,
(char)*expected);
if (initial == *expected) {
return 1;
}
*expected = initial;
return 0;
}
static inline int
_Py_atomic_compare_exchange_int16(int16_t *obj, int16_t *expected, int16_t value)
{
_Py_atomic_ASSERT_ARG_TYPE(short);
int16_t initial = (int16_t)_InterlockedCompareExchange16(
(volatile short *)obj,
(short)value,
(short)*expected);
if (initial == *expected) {
return 1;
}
*expected = initial;
return 0;
}
static inline int
_Py_atomic_compare_exchange_int32(int32_t *obj, int32_t *expected, int32_t value)
{
_Py_atomic_ASSERT_ARG_TYPE(long);
int32_t initial = (int32_t)_InterlockedCompareExchange(
(volatile long *)obj,
(long)value,
(long)*expected);
if (initial == *expected) {
return 1;
}
*expected = initial;
return 0;
}
static inline int
_Py_atomic_compare_exchange_int64(int64_t *obj, int64_t *expected, int64_t value)
{
_Py_atomic_ASSERT_ARG_TYPE(__int64);
int64_t initial = (int64_t)_InterlockedCompareExchange64(
(volatile __int64 *)obj,
(__int64)value,
(__int64)*expected);
if (initial == *expected) {
return 1;
}
*expected = initial;
return 0;
}
static inline int
_Py_atomic_compare_exchange_ptr(void *obj, void *expected, void *value)
{
void *initial = _InterlockedCompareExchangePointer(
(void**)obj,
value,
*(void**)expected);
if (initial == *(void**)expected) {
return 1;
}
*(void**)expected = initial;
return 0;
}
static inline int
_Py_atomic_compare_exchange_uint8(uint8_t *obj, uint8_t *expected, uint8_t value)
{
return _Py_atomic_compare_exchange_int8((int8_t *)obj,
(int8_t *)expected,
(int8_t)value);
}
static inline int
_Py_atomic_compare_exchange_uint16(uint16_t *obj, uint16_t *expected, uint16_t value)
{
return _Py_atomic_compare_exchange_int16((int16_t *)obj,
(int16_t *)expected,
(int16_t)value);
}
static inline int
_Py_atomic_compare_exchange_uint32(uint32_t *obj, uint32_t *expected, uint32_t value)
{
return _Py_atomic_compare_exchange_int32((int32_t *)obj,
(int32_t *)expected,
(int32_t)value);
}
static inline int
_Py_atomic_compare_exchange_int(int *obj, int *expected, int value)
{
_Py_atomic_ASSERT_ARG_TYPE(int32_t);
return _Py_atomic_compare_exchange_int32((int32_t *)obj,
(int32_t *)expected,
(int32_t)value);
}
static inline int
_Py_atomic_compare_exchange_uint(unsigned int *obj, unsigned int *expected, unsigned int value)
{
_Py_atomic_ASSERT_ARG_TYPE(int32_t);
return _Py_atomic_compare_exchange_int32((int32_t *)obj,
(int32_t *)expected,
(int32_t)value);
}
static inline int
_Py_atomic_compare_exchange_uint64(uint64_t *obj, uint64_t *expected, uint64_t value)
{
return _Py_atomic_compare_exchange_int64((int64_t *)obj,
(int64_t *)expected,
(int64_t)value);
}
static inline int
_Py_atomic_compare_exchange_intptr(intptr_t *obj, intptr_t *expected, intptr_t value)
{
_Py_atomic_ASSERT_ARG_TYPE(void*);
return _Py_atomic_compare_exchange_ptr((void**)obj,
(void**)expected,
(void*)value);
}
static inline int
_Py_atomic_compare_exchange_uintptr(uintptr_t *obj, uintptr_t *expected, uintptr_t value)
{
_Py_atomic_ASSERT_ARG_TYPE(void*);
return _Py_atomic_compare_exchange_ptr((void**)obj,
(void**)expected,
(void*)value);
}
static inline int
_Py_atomic_compare_exchange_ssize(Py_ssize_t *obj, Py_ssize_t *expected, Py_ssize_t value)
{
_Py_atomic_ASSERT_ARG_TYPE(void*);
return _Py_atomic_compare_exchange_ptr((void**)obj,
(void**)expected,
(void*)value);
}
// --- _Py_atomic_exchange ---------------------------------------------------
static inline int8_t
_Py_atomic_exchange_int8(int8_t *obj, int8_t value)
{
_Py_atomic_ASSERT_ARG_TYPE(char);
return (int8_t)_InterlockedExchange8((volatile char *)obj, (char)value);
}
static inline int16_t
_Py_atomic_exchange_int16(int16_t *obj, int16_t value)
{
_Py_atomic_ASSERT_ARG_TYPE(short);
return (int16_t)_InterlockedExchange16((volatile short *)obj, (short)value);
}
static inline int32_t
_Py_atomic_exchange_int32(int32_t *obj, int32_t value)
{
_Py_atomic_ASSERT_ARG_TYPE(long);
return (int32_t)_InterlockedExchange((volatile long *)obj, (long)value);
}
static inline int64_t
_Py_atomic_exchange_int64(int64_t *obj, int64_t value)
{
#if defined(_M_X64) || defined(_M_ARM64)
_Py_atomic_ASSERT_ARG_TYPE(__int64);
return (int64_t)_InterlockedExchange64((volatile __int64 *)obj, (__int64)value);
#else
int64_t old_value = _Py_atomic_load_int64_relaxed(obj);
for (;;) {
if (_Py_atomic_compare_exchange_int64(obj, &old_value, value)) {
return old_value;
}
}
#endif
}
static inline void*
_Py_atomic_exchange_ptr(void *obj, void *value)
{
return (void*)_InterlockedExchangePointer((void * volatile *)obj, (void *)value);
}
static inline uint8_t
_Py_atomic_exchange_uint8(uint8_t *obj, uint8_t value)
{
return (uint8_t)_Py_atomic_exchange_int8((int8_t *)obj,
(int8_t)value);
}
static inline uint16_t
_Py_atomic_exchange_uint16(uint16_t *obj, uint16_t value)
{
return (uint16_t)_Py_atomic_exchange_int16((int16_t *)obj,
(int16_t)value);
}
static inline uint32_t
_Py_atomic_exchange_uint32(uint32_t *obj, uint32_t value)
{
return (uint32_t)_Py_atomic_exchange_int32((int32_t *)obj,
(int32_t)value);
}
static inline int
_Py_atomic_exchange_int(int *obj, int value)
{
_Py_atomic_ASSERT_ARG_TYPE(int32_t);
return (int)_Py_atomic_exchange_int32((int32_t *)obj,
(int32_t)value);
}
static inline unsigned int
_Py_atomic_exchange_uint(unsigned int *obj, unsigned int value)
{
_Py_atomic_ASSERT_ARG_TYPE(int32_t);
return (unsigned int)_Py_atomic_exchange_int32((int32_t *)obj,
(int32_t)value);
}
static inline uint64_t
_Py_atomic_exchange_uint64(uint64_t *obj, uint64_t value)
{
return (uint64_t)_Py_atomic_exchange_int64((int64_t *)obj,
(int64_t)value);
}
static inline intptr_t
_Py_atomic_exchange_intptr(intptr_t *obj, intptr_t value)
{
_Py_atomic_ASSERT_ARG_TYPE(void*);
return (intptr_t)_Py_atomic_exchange_ptr((void**)obj,
(void*)value);
}
static inline uintptr_t
_Py_atomic_exchange_uintptr(uintptr_t *obj, uintptr_t value)
{
_Py_atomic_ASSERT_ARG_TYPE(void*);
return (uintptr_t)_Py_atomic_exchange_ptr((void**)obj,
(void*)value);
}
static inline Py_ssize_t
_Py_atomic_exchange_ssize(Py_ssize_t *obj, Py_ssize_t value)
{
_Py_atomic_ASSERT_ARG_TYPE(void*);
return (Py_ssize_t)_Py_atomic_exchange_ptr((void**)obj,
(void*)value);
}
// --- _Py_atomic_and --------------------------------------------------------
static inline uint8_t
_Py_atomic_and_uint8(uint8_t *obj, uint8_t value)
{
_Py_atomic_ASSERT_ARG_TYPE(char);
return (uint8_t)_InterlockedAnd8((volatile char *)obj, (char)value);
}
static inline uint16_t
_Py_atomic_and_uint16(uint16_t *obj, uint16_t value)
{
_Py_atomic_ASSERT_ARG_TYPE(short);
return (uint16_t)_InterlockedAnd16((volatile short *)obj, (short)value);
}
static inline uint32_t
_Py_atomic_and_uint32(uint32_t *obj, uint32_t value)
{
_Py_atomic_ASSERT_ARG_TYPE(long);
return (uint32_t)_InterlockedAnd((volatile long *)obj, (long)value);
}
static inline uint64_t
_Py_atomic_and_uint64(uint64_t *obj, uint64_t value)
{
#if defined(_M_X64) || defined(_M_ARM64)
_Py_atomic_ASSERT_ARG_TYPE(__int64);
return (uint64_t)_InterlockedAnd64((volatile __int64 *)obj, (__int64)value);
#else
uint64_t old_value = _Py_atomic_load_uint64_relaxed(obj);
for (;;) {
uint64_t new_value = old_value & value;
if (_Py_atomic_compare_exchange_uint64(obj, &old_value, new_value)) {
return old_value;
}
}
#endif
}
static inline uintptr_t
_Py_atomic_and_uintptr(uintptr_t *obj, uintptr_t value)
{
#if SIZEOF_VOID_P == 8
_Py_atomic_ASSERT_ARG_TYPE(uint64_t);
return (uintptr_t)_Py_atomic_and_uint64((uint64_t *)obj,
(uint64_t)value);
#else
_Py_atomic_ASSERT_ARG_TYPE(uint32_t);
return (uintptr_t)_Py_atomic_and_uint32((uint32_t *)obj,
(uint32_t)value);
#endif
}
// --- _Py_atomic_or ---------------------------------------------------------
static inline uint8_t
_Py_atomic_or_uint8(uint8_t *obj, uint8_t value)
{
_Py_atomic_ASSERT_ARG_TYPE(char);
return (uint8_t)_InterlockedOr8((volatile char *)obj, (char)value);
}
static inline uint16_t
_Py_atomic_or_uint16(uint16_t *obj, uint16_t value)
{
_Py_atomic_ASSERT_ARG_TYPE(short);
return (uint16_t)_InterlockedOr16((volatile short *)obj, (short)value);
}
static inline uint32_t
_Py_atomic_or_uint32(uint32_t *obj, uint32_t value)
{
_Py_atomic_ASSERT_ARG_TYPE(long);
return (uint32_t)_InterlockedOr((volatile long *)obj, (long)value);
}
static inline uint64_t
_Py_atomic_or_uint64(uint64_t *obj, uint64_t value)
{
#if defined(_M_X64) || defined(_M_ARM64)
_Py_atomic_ASSERT_ARG_TYPE(__int64);
return (uint64_t)_InterlockedOr64((volatile __int64 *)obj, (__int64)value);
#else
uint64_t old_value = _Py_atomic_load_uint64_relaxed(obj);
for (;;) {
uint64_t new_value = old_value | value;
if (_Py_atomic_compare_exchange_uint64(obj, &old_value, new_value)) {
return old_value;
}
}
#endif
}
static inline uintptr_t
_Py_atomic_or_uintptr(uintptr_t *obj, uintptr_t value)
{
#if SIZEOF_VOID_P == 8
_Py_atomic_ASSERT_ARG_TYPE(uint64_t);
return (uintptr_t)_Py_atomic_or_uint64((uint64_t *)obj,
(uint64_t)value);
#else
_Py_atomic_ASSERT_ARG_TYPE(uint32_t);
return (uintptr_t)_Py_atomic_or_uint32((uint32_t *)obj,
(uint32_t)value);
#endif
}
// --- _Py_atomic_load -------------------------------------------------------
static inline uint8_t
_Py_atomic_load_uint8(const uint8_t *obj)
{
#if defined(_M_X64) || defined(_M_IX86)
return *(volatile uint8_t *)obj;
#elif defined(_M_ARM64)
return (uint8_t)__ldar8((unsigned __int8 volatile *)obj);
#else
# error "no implementation of _Py_atomic_load_uint8"
#endif
}
static inline uint16_t
_Py_atomic_load_uint16(const uint16_t *obj)
{
#if defined(_M_X64) || defined(_M_IX86)
return *(volatile uint16_t *)obj;
#elif defined(_M_ARM64)
return (uint16_t)__ldar16((unsigned __int16 volatile *)obj);
#else
# error "no implementation of _Py_atomic_load_uint16"
#endif
}
static inline uint32_t
_Py_atomic_load_uint32(const uint32_t *obj)
{
#if defined(_M_X64) || defined(_M_IX86)
return *(volatile uint32_t *)obj;
#elif defined(_M_ARM64)
return (uint32_t)__ldar32((unsigned __int32 volatile *)obj);
#else
# error "no implementation of _Py_atomic_load_uint32"
#endif
}
static inline uint64_t
_Py_atomic_load_uint64(const uint64_t *obj)
{
#if defined(_M_X64) || defined(_M_IX86)
return *(volatile uint64_t *)obj;
#elif defined(_M_ARM64)
return (uint64_t)__ldar64((unsigned __int64 volatile *)obj);
#else
# error "no implementation of _Py_atomic_load_uint64"
#endif
}
static inline int8_t
_Py_atomic_load_int8(const int8_t *obj)
{
return (int8_t)_Py_atomic_load_uint8((const uint8_t *)obj);
}
static inline int16_t
_Py_atomic_load_int16(const int16_t *obj)
{
return (int16_t)_Py_atomic_load_uint16((const uint16_t *)obj);
}
static inline int32_t
_Py_atomic_load_int32(const int32_t *obj)
{
return (int32_t)_Py_atomic_load_uint32((const uint32_t *)obj);
}
static inline int
_Py_atomic_load_int(const int *obj)
{
_Py_atomic_ASSERT_ARG_TYPE(uint32_t);
return (int)_Py_atomic_load_uint32((uint32_t *)obj);
}
static inline unsigned int
_Py_atomic_load_uint(const unsigned int *obj)
{
_Py_atomic_ASSERT_ARG_TYPE(uint32_t);
return (unsigned int)_Py_atomic_load_uint32((uint32_t *)obj);
}
static inline int64_t
_Py_atomic_load_int64(const int64_t *obj)
{
return (int64_t)_Py_atomic_load_uint64((const uint64_t *)obj);
}
static inline void*
_Py_atomic_load_ptr(const void *obj)
{
#if SIZEOF_VOID_P == 8
return (void*)_Py_atomic_load_uint64((const uint64_t *)obj);
#else
return (void*)_Py_atomic_load_uint32((const uint32_t *)obj);
#endif
}
static inline intptr_t
_Py_atomic_load_intptr(const intptr_t *obj)
{
_Py_atomic_ASSERT_ARG_TYPE(void*);
return (intptr_t)_Py_atomic_load_ptr((void*)obj);
}
static inline uintptr_t
_Py_atomic_load_uintptr(const uintptr_t *obj)
{
_Py_atomic_ASSERT_ARG_TYPE(void*);
return (uintptr_t)_Py_atomic_load_ptr((void*)obj);
}
static inline Py_ssize_t
_Py_atomic_load_ssize(const Py_ssize_t *obj)
{
_Py_atomic_ASSERT_ARG_TYPE(void*);
return (Py_ssize_t)_Py_atomic_load_ptr((void*)obj);
}
// --- _Py_atomic_load_relaxed -----------------------------------------------
static inline int
_Py_atomic_load_int_relaxed(const int *obj)
{
return *(volatile int *)obj;
}
static inline int8_t
_Py_atomic_load_int8_relaxed(const int8_t *obj)
{
return *(volatile int8_t *)obj;
}
static inline int16_t
_Py_atomic_load_int16_relaxed(const int16_t *obj)
{
return *(volatile int16_t *)obj;
}
static inline int32_t
_Py_atomic_load_int32_relaxed(const int32_t *obj)
{
return *(volatile int32_t *)obj;
}
static inline int64_t
_Py_atomic_load_int64_relaxed(const int64_t *obj)
{
return *(volatile int64_t *)obj;
}
static inline intptr_t
_Py_atomic_load_intptr_relaxed(const intptr_t *obj)
{
return *(volatile intptr_t *)obj;
}
static inline uint8_t
_Py_atomic_load_uint8_relaxed(const uint8_t *obj)
{
return *(volatile uint8_t *)obj;
}
static inline uint16_t
_Py_atomic_load_uint16_relaxed(const uint16_t *obj)
{
return *(volatile uint16_t *)obj;
}
static inline uint32_t
_Py_atomic_load_uint32_relaxed(const uint32_t *obj)
{
return *(volatile uint32_t *)obj;
}
static inline uint64_t
_Py_atomic_load_uint64_relaxed(const uint64_t *obj)
{
return *(volatile uint64_t *)obj;
}
static inline uintptr_t
_Py_atomic_load_uintptr_relaxed(const uintptr_t *obj)
{
return *(volatile uintptr_t *)obj;
}
static inline unsigned int
_Py_atomic_load_uint_relaxed(const unsigned int *obj)
{
return *(volatile unsigned int *)obj;
}
static inline Py_ssize_t
_Py_atomic_load_ssize_relaxed(const Py_ssize_t *obj)
{
return *(volatile Py_ssize_t *)obj;
}
static inline void*
_Py_atomic_load_ptr_relaxed(const void *obj)
{
return *(void * volatile *)obj;
}
// --- _Py_atomic_store ------------------------------------------------------
static inline void
_Py_atomic_store_int(int *obj, int value)
{
(void)_Py_atomic_exchange_int(obj, value);
}
static inline void
_Py_atomic_store_int8(int8_t *obj, int8_t value)
{
(void)_Py_atomic_exchange_int8(obj, value);
}
static inline void
_Py_atomic_store_int16(int16_t *obj, int16_t value)
{
(void)_Py_atomic_exchange_int16(obj, value);
}
static inline void
_Py_atomic_store_int32(int32_t *obj, int32_t value)
{
(void)_Py_atomic_exchange_int32(obj, value);
}
static inline void
_Py_atomic_store_int64(int64_t *obj, int64_t value)
{
(void)_Py_atomic_exchange_int64(obj, value);
}
static inline void
_Py_atomic_store_intptr(intptr_t *obj, intptr_t value)
{
(void)_Py_atomic_exchange_intptr(obj, value);
}
static inline void
_Py_atomic_store_uint8(uint8_t *obj, uint8_t value)
{
(void)_Py_atomic_exchange_uint8(obj, value);
}
static inline void
_Py_atomic_store_uint16(uint16_t *obj, uint16_t value)
{
(void)_Py_atomic_exchange_uint16(obj, value);
}
static inline void
_Py_atomic_store_uint32(uint32_t *obj, uint32_t value)
{
(void)_Py_atomic_exchange_uint32(obj, value);
}
static inline void
_Py_atomic_store_uint64(uint64_t *obj, uint64_t value)
{
(void)_Py_atomic_exchange_uint64(obj, value);
}
static inline void
_Py_atomic_store_uintptr(uintptr_t *obj, uintptr_t value)
{
(void)_Py_atomic_exchange_uintptr(obj, value);
}
static inline void
_Py_atomic_store_uint(unsigned int *obj, unsigned int value)
{
(void)_Py_atomic_exchange_uint(obj, value);
}
static inline void
_Py_atomic_store_ptr(void *obj, void *value)
{
(void)_Py_atomic_exchange_ptr(obj, value);
}
static inline void
_Py_atomic_store_ssize(Py_ssize_t *obj, Py_ssize_t value)
{
(void)_Py_atomic_exchange_ssize(obj, value);
}
// --- _Py_atomic_store_relaxed ----------------------------------------------
static inline void
_Py_atomic_store_int_relaxed(int *obj, int value)
{
*(volatile int *)obj = value;
}
static inline void
_Py_atomic_store_int8_relaxed(int8_t *obj, int8_t value)
{
*(volatile int8_t *)obj = value;
}
static inline void
_Py_atomic_store_int16_relaxed(int16_t *obj, int16_t value)
{
*(volatile int16_t *)obj = value;
}
static inline void
_Py_atomic_store_int32_relaxed(int32_t *obj, int32_t value)
{
*(volatile int32_t *)obj = value;
}
static inline void
_Py_atomic_store_int64_relaxed(int64_t *obj, int64_t value)
{
*(volatile int64_t *)obj = value;
}
static inline void
_Py_atomic_store_intptr_relaxed(intptr_t *obj, intptr_t value)
{
*(volatile intptr_t *)obj = value;
}
static inline void
_Py_atomic_store_uint8_relaxed(uint8_t *obj, uint8_t value)
{
*(volatile uint8_t *)obj = value;
}
static inline void
_Py_atomic_store_uint16_relaxed(uint16_t *obj, uint16_t value)
{
*(volatile uint16_t *)obj = value;
}
static inline void
_Py_atomic_store_uint32_relaxed(uint32_t *obj, uint32_t value)
{
*(volatile uint32_t *)obj = value;
}
static inline void
_Py_atomic_store_uint64_relaxed(uint64_t *obj, uint64_t value)
{
*(volatile uint64_t *)obj = value;
}
static inline void
_Py_atomic_store_uintptr_relaxed(uintptr_t *obj, uintptr_t value)
{
*(volatile uintptr_t *)obj = value;
}
static inline void
_Py_atomic_store_uint_relaxed(unsigned int *obj, unsigned int value)
{
*(volatile unsigned int *)obj = value;
}
static inline void
_Py_atomic_store_ptr_relaxed(void *obj, void* value)
{
*(void * volatile *)obj = value;
}
static inline void
_Py_atomic_store_ssize_relaxed(Py_ssize_t *obj, Py_ssize_t value)
{
*(volatile Py_ssize_t *)obj = value;
}
// --- _Py_atomic_load_ptr_acquire / _Py_atomic_store_ptr_release ------------
static inline void *
_Py_atomic_load_ptr_acquire(const void *obj)
{
#if defined(_M_X64) || defined(_M_IX86)
return *(void * volatile *)obj;
#elif defined(_M_ARM64)
return (void *)__ldar64((unsigned __int64 volatile *)obj);
#else
# error "no implementation of _Py_atomic_load_ptr_acquire"
#endif
}
static inline void
_Py_atomic_store_ptr_release(void *obj, void *value)
{
#if defined(_M_X64) || defined(_M_IX86)
*(void * volatile *)obj = value;
#elif defined(_M_ARM64)
__stlr64(obj, (uintptr_t)value);
#else
# error "no implementation of _Py_atomic_store_ptr_release"
#endif
}
// --- _Py_atomic_fence ------------------------------------------------------
static inline void
_Py_atomic_fence_seq_cst(void)
{
#if defined(_M_ARM64)
__dmb(_ARM64_BARRIER_ISH);
#elif defined(_M_X64)
__faststorefence();
#elif defined(_M_IX86)
_mm_mfence();
#else
# error "no implementation of _Py_atomic_fence_seq_cst"
#endif
}
static inline void
_Py_atomic_fence_release(void)
{
#if defined(_M_ARM64)
__dmb(_ARM64_BARRIER_ISH);
#elif defined(_M_X64) || defined(_M_IX86)
_ReadWriteBarrier();
#else
# error "no implementation of _Py_atomic_fence_release"
#endif
}
#undef _Py_atomic_ASSERT_ARG_TYPE

View File

@ -0,0 +1,872 @@
// This is the implementation of Python atomic operations using C++11 or C11
// atomics. Note that the pyatomic_gcc.h implementation is preferred for GCC
// compatible compilers, even if they support C++11 atomics.
#ifndef Py_ATOMIC_STD_H
# error "this header file must not be included directly"
#endif
#ifdef __cplusplus
extern "C++" {
# include <atomic>
}
# define _Py_USING_STD using namespace std
# define _Atomic(tp) atomic<tp>
#else
# define _Py_USING_STD
# include <stdatomic.h>
#endif
// --- _Py_atomic_add --------------------------------------------------------
static inline int
_Py_atomic_add_int(int *obj, int value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(int)*)obj, value);
}
static inline int8_t
_Py_atomic_add_int8(int8_t *obj, int8_t value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(int8_t)*)obj, value);
}
static inline int16_t
_Py_atomic_add_int16(int16_t *obj, int16_t value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(int16_t)*)obj, value);
}
static inline int32_t
_Py_atomic_add_int32(int32_t *obj, int32_t value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(int32_t)*)obj, value);
}
static inline int64_t
_Py_atomic_add_int64(int64_t *obj, int64_t value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(int64_t)*)obj, value);
}
static inline intptr_t
_Py_atomic_add_intptr(intptr_t *obj, intptr_t value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(intptr_t)*)obj, value);
}
static inline unsigned int
_Py_atomic_add_uint(unsigned int *obj, unsigned int value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(unsigned int)*)obj, value);
}
static inline uint8_t
_Py_atomic_add_uint8(uint8_t *obj, uint8_t value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(uint8_t)*)obj, value);
}
static inline uint16_t
_Py_atomic_add_uint16(uint16_t *obj, uint16_t value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(uint16_t)*)obj, value);
}
static inline uint32_t
_Py_atomic_add_uint32(uint32_t *obj, uint32_t value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(uint32_t)*)obj, value);
}
static inline uint64_t
_Py_atomic_add_uint64(uint64_t *obj, uint64_t value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(uint64_t)*)obj, value);
}
static inline uintptr_t
_Py_atomic_add_uintptr(uintptr_t *obj, uintptr_t value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(uintptr_t)*)obj, value);
}
static inline Py_ssize_t
_Py_atomic_add_ssize(Py_ssize_t *obj, Py_ssize_t value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(Py_ssize_t)*)obj, value);
}
// --- _Py_atomic_compare_exchange -------------------------------------------
static inline int
_Py_atomic_compare_exchange_int(int *obj, int *expected, int desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(int)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_int8(int8_t *obj, int8_t *expected, int8_t desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(int8_t)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_int16(int16_t *obj, int16_t *expected, int16_t desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(int16_t)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_int32(int32_t *obj, int32_t *expected, int32_t desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(int32_t)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_int64(int64_t *obj, int64_t *expected, int64_t desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(int64_t)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_intptr(intptr_t *obj, intptr_t *expected, intptr_t desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(intptr_t)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_uint(unsigned int *obj, unsigned int *expected, unsigned int desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(unsigned int)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_uint8(uint8_t *obj, uint8_t *expected, uint8_t desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(uint8_t)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_uint16(uint16_t *obj, uint16_t *expected, uint16_t desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(uint16_t)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_uint32(uint32_t *obj, uint32_t *expected, uint32_t desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(uint32_t)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_uint64(uint64_t *obj, uint64_t *expected, uint64_t desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(uint64_t)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_uintptr(uintptr_t *obj, uintptr_t *expected, uintptr_t desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(uintptr_t)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_ssize(Py_ssize_t *obj, Py_ssize_t *expected, Py_ssize_t desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(Py_ssize_t)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_ptr(void *obj, void *expected, void *desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(void *)*)obj,
(void **)expected, desired);
}
// --- _Py_atomic_exchange ---------------------------------------------------
static inline int
_Py_atomic_exchange_int(int *obj, int value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(int)*)obj, value);
}
static inline int8_t
_Py_atomic_exchange_int8(int8_t *obj, int8_t value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(int8_t)*)obj, value);
}
static inline int16_t
_Py_atomic_exchange_int16(int16_t *obj, int16_t value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(int16_t)*)obj, value);
}
static inline int32_t
_Py_atomic_exchange_int32(int32_t *obj, int32_t value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(int32_t)*)obj, value);
}
static inline int64_t
_Py_atomic_exchange_int64(int64_t *obj, int64_t value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(int64_t)*)obj, value);
}
static inline intptr_t
_Py_atomic_exchange_intptr(intptr_t *obj, intptr_t value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(intptr_t)*)obj, value);
}
static inline unsigned int
_Py_atomic_exchange_uint(unsigned int *obj, unsigned int value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(unsigned int)*)obj, value);
}
static inline uint8_t
_Py_atomic_exchange_uint8(uint8_t *obj, uint8_t value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(uint8_t)*)obj, value);
}
static inline uint16_t
_Py_atomic_exchange_uint16(uint16_t *obj, uint16_t value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(uint16_t)*)obj, value);
}
static inline uint32_t
_Py_atomic_exchange_uint32(uint32_t *obj, uint32_t value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(uint32_t)*)obj, value);
}
static inline uint64_t
_Py_atomic_exchange_uint64(uint64_t *obj, uint64_t value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(uint64_t)*)obj, value);
}
static inline uintptr_t
_Py_atomic_exchange_uintptr(uintptr_t *obj, uintptr_t value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(uintptr_t)*)obj, value);
}
static inline Py_ssize_t
_Py_atomic_exchange_ssize(Py_ssize_t *obj, Py_ssize_t value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(Py_ssize_t)*)obj, value);
}
static inline void*
_Py_atomic_exchange_ptr(void *obj, void *value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(void *)*)obj, value);
}
// --- _Py_atomic_and --------------------------------------------------------
static inline uint8_t
_Py_atomic_and_uint8(uint8_t *obj, uint8_t value)
{
_Py_USING_STD;
return atomic_fetch_and((_Atomic(uint8_t)*)obj, value);
}
static inline uint16_t
_Py_atomic_and_uint16(uint16_t *obj, uint16_t value)
{
_Py_USING_STD;
return atomic_fetch_and((_Atomic(uint16_t)*)obj, value);
}
static inline uint32_t
_Py_atomic_and_uint32(uint32_t *obj, uint32_t value)
{
_Py_USING_STD;
return atomic_fetch_and((_Atomic(uint32_t)*)obj, value);
}
static inline uint64_t
_Py_atomic_and_uint64(uint64_t *obj, uint64_t value)
{
_Py_USING_STD;
return atomic_fetch_and((_Atomic(uint64_t)*)obj, value);
}
static inline uintptr_t
_Py_atomic_and_uintptr(uintptr_t *obj, uintptr_t value)
{
_Py_USING_STD;
return atomic_fetch_and((_Atomic(uintptr_t)*)obj, value);
}
// --- _Py_atomic_or ---------------------------------------------------------
static inline uint8_t
_Py_atomic_or_uint8(uint8_t *obj, uint8_t value)
{
_Py_USING_STD;
return atomic_fetch_or((_Atomic(uint8_t)*)obj, value);
}
static inline uint16_t
_Py_atomic_or_uint16(uint16_t *obj, uint16_t value)
{
_Py_USING_STD;
return atomic_fetch_or((_Atomic(uint16_t)*)obj, value);
}
static inline uint32_t
_Py_atomic_or_uint32(uint32_t *obj, uint32_t value)
{
_Py_USING_STD;
return atomic_fetch_or((_Atomic(uint32_t)*)obj, value);
}
static inline uint64_t
_Py_atomic_or_uint64(uint64_t *obj, uint64_t value)
{
_Py_USING_STD;
return atomic_fetch_or((_Atomic(uint64_t)*)obj, value);
}
static inline uintptr_t
_Py_atomic_or_uintptr(uintptr_t *obj, uintptr_t value)
{
_Py_USING_STD;
return atomic_fetch_or((_Atomic(uintptr_t)*)obj, value);
}
// --- _Py_atomic_load -------------------------------------------------------
static inline int
_Py_atomic_load_int(const int *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(int)*)obj);
}
static inline int8_t
_Py_atomic_load_int8(const int8_t *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(int8_t)*)obj);
}
static inline int16_t
_Py_atomic_load_int16(const int16_t *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(int16_t)*)obj);
}
static inline int32_t
_Py_atomic_load_int32(const int32_t *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(int32_t)*)obj);
}
static inline int64_t
_Py_atomic_load_int64(const int64_t *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(int64_t)*)obj);
}
static inline intptr_t
_Py_atomic_load_intptr(const intptr_t *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(intptr_t)*)obj);
}
static inline uint8_t
_Py_atomic_load_uint8(const uint8_t *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(uint8_t)*)obj);
}
static inline uint16_t
_Py_atomic_load_uint16(const uint16_t *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(uint32_t)*)obj);
}
static inline uint32_t
_Py_atomic_load_uint32(const uint32_t *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(uint32_t)*)obj);
}
static inline uint64_t
_Py_atomic_load_uint64(const uint64_t *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(uint64_t)*)obj);
}
static inline uintptr_t
_Py_atomic_load_uintptr(const uintptr_t *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(uintptr_t)*)obj);
}
static inline unsigned int
_Py_atomic_load_uint(const unsigned int *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(unsigned int)*)obj);
}
static inline Py_ssize_t
_Py_atomic_load_ssize(const Py_ssize_t *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(Py_ssize_t)*)obj);
}
static inline void*
_Py_atomic_load_ptr(const void *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(void*)*)obj);
}
// --- _Py_atomic_load_relaxed -----------------------------------------------
static inline int
_Py_atomic_load_int_relaxed(const int *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(int)*)obj,
memory_order_relaxed);
}
static inline int8_t
_Py_atomic_load_int8_relaxed(const int8_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(int8_t)*)obj,
memory_order_relaxed);
}
static inline int16_t
_Py_atomic_load_int16_relaxed(const int16_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(int16_t)*)obj,
memory_order_relaxed);
}
static inline int32_t
_Py_atomic_load_int32_relaxed(const int32_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(int32_t)*)obj,
memory_order_relaxed);
}
static inline int64_t
_Py_atomic_load_int64_relaxed(const int64_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(int64_t)*)obj,
memory_order_relaxed);
}
static inline intptr_t
_Py_atomic_load_intptr_relaxed(const intptr_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(intptr_t)*)obj,
memory_order_relaxed);
}
static inline uint8_t
_Py_atomic_load_uint8_relaxed(const uint8_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(uint8_t)*)obj,
memory_order_relaxed);
}
static inline uint16_t
_Py_atomic_load_uint16_relaxed(const uint16_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(uint16_t)*)obj,
memory_order_relaxed);
}
static inline uint32_t
_Py_atomic_load_uint32_relaxed(const uint32_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(uint32_t)*)obj,
memory_order_relaxed);
}
static inline uint64_t
_Py_atomic_load_uint64_relaxed(const uint64_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(uint64_t)*)obj,
memory_order_relaxed);
}
static inline uintptr_t
_Py_atomic_load_uintptr_relaxed(const uintptr_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(uintptr_t)*)obj,
memory_order_relaxed);
}
static inline unsigned int
_Py_atomic_load_uint_relaxed(const unsigned int *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(unsigned int)*)obj,
memory_order_relaxed);
}
static inline Py_ssize_t
_Py_atomic_load_ssize_relaxed(const Py_ssize_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(Py_ssize_t)*)obj,
memory_order_relaxed);
}
static inline void*
_Py_atomic_load_ptr_relaxed(const void *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(void*)*)obj,
memory_order_relaxed);
}
// --- _Py_atomic_store ------------------------------------------------------
static inline void
_Py_atomic_store_int(int *obj, int value)
{
_Py_USING_STD;
atomic_store((_Atomic(int)*)obj, value);
}
static inline void
_Py_atomic_store_int8(int8_t *obj, int8_t value)
{
_Py_USING_STD;
atomic_store((_Atomic(int8_t)*)obj, value);
}
static inline void
_Py_atomic_store_int16(int16_t *obj, int16_t value)
{
_Py_USING_STD;
atomic_store((_Atomic(int16_t)*)obj, value);
}
static inline void
_Py_atomic_store_int32(int32_t *obj, int32_t value)
{
_Py_USING_STD;
atomic_store((_Atomic(int32_t)*)obj, value);
}
static inline void
_Py_atomic_store_int64(int64_t *obj, int64_t value)
{
_Py_USING_STD;
atomic_store((_Atomic(int64_t)*)obj, value);
}
static inline void
_Py_atomic_store_intptr(intptr_t *obj, intptr_t value)
{
_Py_USING_STD;
atomic_store((_Atomic(intptr_t)*)obj, value);
}
static inline void
_Py_atomic_store_uint8(uint8_t *obj, uint8_t value)
{
_Py_USING_STD;
atomic_store((_Atomic(uint8_t)*)obj, value);
}
static inline void
_Py_atomic_store_uint16(uint16_t *obj, uint16_t value)
{
_Py_USING_STD;
atomic_store((_Atomic(uint16_t)*)obj, value);
}
static inline void
_Py_atomic_store_uint32(uint32_t *obj, uint32_t value)
{
_Py_USING_STD;
atomic_store((_Atomic(uint32_t)*)obj, value);
}
static inline void
_Py_atomic_store_uint64(uint64_t *obj, uint64_t value)
{
_Py_USING_STD;
atomic_store((_Atomic(uint64_t)*)obj, value);
}
static inline void
_Py_atomic_store_uintptr(uintptr_t *obj, uintptr_t value)
{
_Py_USING_STD;
atomic_store((_Atomic(uintptr_t)*)obj, value);
}
static inline void
_Py_atomic_store_uint(unsigned int *obj, unsigned int value)
{
_Py_USING_STD;
atomic_store((_Atomic(unsigned int)*)obj, value);
}
static inline void
_Py_atomic_store_ptr(void *obj, void *value)
{
_Py_USING_STD;
atomic_store((_Atomic(void*)*)obj, value);
}
static inline void
_Py_atomic_store_ssize(Py_ssize_t *obj, Py_ssize_t value)
{
_Py_USING_STD;
atomic_store((_Atomic(Py_ssize_t)*)obj, value);
}
// --- _Py_atomic_store_relaxed ----------------------------------------------
static inline void
_Py_atomic_store_int_relaxed(int *obj, int value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(int)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_int8_relaxed(int8_t *obj, int8_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(int8_t)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_int16_relaxed(int16_t *obj, int16_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(int16_t)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_int32_relaxed(int32_t *obj, int32_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(int32_t)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_int64_relaxed(int64_t *obj, int64_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(int64_t)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_intptr_relaxed(intptr_t *obj, intptr_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(intptr_t)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_uint8_relaxed(uint8_t *obj, uint8_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(uint8_t)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_uint16_relaxed(uint16_t *obj, uint16_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(uint16_t)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_uint32_relaxed(uint32_t *obj, uint32_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(uint32_t)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_uint64_relaxed(uint64_t *obj, uint64_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(uint64_t)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_uintptr_relaxed(uintptr_t *obj, uintptr_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(uintptr_t)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_uint_relaxed(unsigned int *obj, unsigned int value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(unsigned int)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_ptr_relaxed(void *obj, void *value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(void*)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_ssize_relaxed(Py_ssize_t *obj, Py_ssize_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(Py_ssize_t)*)obj, value,
memory_order_relaxed);
}
// --- _Py_atomic_load_ptr_acquire / _Py_atomic_store_ptr_release ------------
static inline void *
_Py_atomic_load_ptr_acquire(const void *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(void*)*)obj,
memory_order_acquire);
}
static inline void
_Py_atomic_store_ptr_release(void *obj, void *value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(void*)*)obj, value,
memory_order_release);
}
// --- _Py_atomic_fence ------------------------------------------------------
static inline void
_Py_atomic_fence_seq_cst(void)
{
_Py_USING_STD;
atomic_thread_fence(memory_order_seq_cst);
}
static inline void
_Py_atomic_fence_release(void)
{
_Py_USING_STD;
atomic_thread_fence(memory_order_release);
}

View File

@ -1,5 +1,5 @@
#ifndef Py_ATOMIC_H
#define Py_ATOMIC_H
#ifndef Py_INTERNAL_ATOMIC_H
#define Py_INTERNAL_ATOMIC_H
#ifdef __cplusplus
extern "C" {
#endif
@ -554,4 +554,4 @@ typedef struct _Py_atomic_int {
#ifdef __cplusplus
}
#endif
#endif /* Py_ATOMIC_H */
#endif /* Py_INTERNAL_ATOMIC_H */

View File

@ -0,0 +1,15 @@
import unittest
from test.support import import_helper
# Skip this test if the _testcapi module isn't available.
_testcapi = import_helper.import_module('_testcapi')
class PyAtomicTests(unittest.TestCase):
pass
for name in sorted(dir(_testcapi)):
if name.startswith('test_atomic'):
setattr(PyAtomicTests, name, getattr(_testcapi, name))
if __name__ == "__main__":
unittest.main()

View File

@ -1713,6 +1713,9 @@ PYTHON_HEADERS= \
$(srcdir)/Include/cpython/optimizer.h \
$(srcdir)/Include/cpython/picklebufobject.h \
$(srcdir)/Include/cpython/pthread_stubs.h \
$(srcdir)/Include/cpython/pyatomic.h \
$(srcdir)/Include/cpython/pyatomic_gcc.h \
$(srcdir)/Include/cpython/pyatomic_std.h \
$(srcdir)/Include/cpython/pyctype.h \
$(srcdir)/Include/cpython/pydebug.h \
$(srcdir)/Include/cpython/pyerrors.h \

View File

@ -0,0 +1 @@
Add atomic operations on additional data types in pyatomic.h.

View File

@ -159,7 +159,7 @@
@MODULE__XXTESTFUZZ_TRUE@_xxtestfuzz _xxtestfuzz/_xxtestfuzz.c _xxtestfuzz/fuzzer.c
@MODULE__TESTBUFFER_TRUE@_testbuffer _testbuffer.c
@MODULE__TESTINTERNALCAPI_TRUE@_testinternalcapi _testinternalcapi.c
@MODULE__TESTCAPI_TRUE@_testcapi _testcapimodule.c _testcapi/vectorcall.c _testcapi/vectorcall_limited.c _testcapi/heaptype.c _testcapi/abstract.c _testcapi/unicode.c _testcapi/dict.c _testcapi/getargs.c _testcapi/datetime.c _testcapi/docstring.c _testcapi/mem.c _testcapi/watchers.c _testcapi/long.c _testcapi/float.c _testcapi/structmember.c _testcapi/exceptions.c _testcapi/code.c _testcapi/buffer.c _testcapi/pyos.c _testcapi/immortal.c _testcapi/heaptype_relative.c _testcapi/gc.c
@MODULE__TESTCAPI_TRUE@_testcapi _testcapimodule.c _testcapi/vectorcall.c _testcapi/vectorcall_limited.c _testcapi/heaptype.c _testcapi/abstract.c _testcapi/unicode.c _testcapi/dict.c _testcapi/getargs.c _testcapi/datetime.c _testcapi/docstring.c _testcapi/mem.c _testcapi/watchers.c _testcapi/long.c _testcapi/float.c _testcapi/structmember.c _testcapi/exceptions.c _testcapi/code.c _testcapi/buffer.c _testcapi/pyatomic.c _testcapi/pyos.c _testcapi/immortal.c _testcapi/heaptype_relative.c _testcapi/gc.c
@MODULE__TESTCLINIC_TRUE@_testclinic _testclinic.c
@MODULE__TESTCLINIC_LIMITED_TRUE@_testclinic_limited _testclinic_limited.c

View File

@ -22,6 +22,7 @@ int _PyTestCapi_Init_Structmember(PyObject *module);
int _PyTestCapi_Init_Exceptions(PyObject *module);
int _PyTestCapi_Init_Code(PyObject *module);
int _PyTestCapi_Init_Buffer(PyObject *module);
int _PyTestCapi_Init_PyAtomic(PyObject *module);
int _PyTestCapi_Init_PyOS(PyObject *module);
int _PyTestCapi_Init_Immortal(PyObject *module);
int _PyTestCapi_Init_GC(PyObject *mod);

View File

@ -0,0 +1,180 @@
/*
* C Extension module to smoke test pyatomic.h API.
*
* This only tests basic functionality, not any synchronizing ordering.
*/
/* Always enable assertions */
#undef NDEBUG
#include "Python.h"
#include "cpython/pyatomic.h"
#include "parts.h"
// We define atomic bitwise operations on these types
#define FOR_BITWISE_TYPES(V) \
V(uint8, uint8_t) \
V(uint16, uint16_t) \
V(uint32, uint32_t) \
V(uint64, uint64_t) \
V(uintptr, uintptr_t)
// We define atomic addition on these types
#define FOR_ARITHMETIC_TYPES(V) \
FOR_BITWISE_TYPES(V) \
V(int, int) \
V(uint, unsigned int) \
V(int8, int8_t) \
V(int16, int16_t) \
V(int32, int32_t) \
V(int64, int64_t) \
V(intptr, intptr_t) \
V(ssize, Py_ssize_t)
// We define atomic load, store, exchange, and compare_exchange on these types
#define FOR_ALL_TYPES(V) \
FOR_ARITHMETIC_TYPES(V) \
V(ptr, void*)
#define IMPL_TEST_ADD(suffix, dtype) \
static PyObject * \
test_atomic_add_##suffix(PyObject *self, PyObject *obj) { \
dtype x = 0; \
assert(_Py_atomic_add_##suffix(&x, 1) == 0); \
assert(x == 1); \
assert(_Py_atomic_add_##suffix(&x, 2) == 1); \
assert(x == 3); \
assert(_Py_atomic_add_##suffix(&x, -2) == 3); \
assert(x == 1); \
assert(_Py_atomic_add_##suffix(&x, -1) == 1); \
assert(x == 0); \
assert(_Py_atomic_add_##suffix(&x, -1) == 0); \
assert(x == (dtype)-1); \
assert(_Py_atomic_add_##suffix(&x, -2) == (dtype)-1); \
assert(x == (dtype)-3); \
assert(_Py_atomic_add_##suffix(&x, 2) == (dtype)-3); \
assert(x == (dtype)-1); \
Py_RETURN_NONE; \
}
FOR_ARITHMETIC_TYPES(IMPL_TEST_ADD)
#define IMPL_TEST_COMPARE_EXCHANGE(suffix, dtype) \
static PyObject * \
test_atomic_compare_exchange_##suffix(PyObject *self, PyObject *obj) { \
dtype x = (dtype)0; \
dtype y = (dtype)1; \
dtype z = (dtype)2; \
assert(_Py_atomic_compare_exchange_##suffix(&x, &y, z) == 0); \
assert(x == 0); \
assert(y == 0); \
assert(_Py_atomic_compare_exchange_##suffix(&x, &y, z) == 1); \
assert(x == z); \
assert(y == 0); \
assert(_Py_atomic_compare_exchange_##suffix(&x, &y, z) == 0); \
assert(x == z); \
assert(y == z); \
Py_RETURN_NONE; \
}
FOR_ALL_TYPES(IMPL_TEST_COMPARE_EXCHANGE)
#define IMPL_TEST_EXCHANGE(suffix, dtype) \
static PyObject * \
test_atomic_exchange_##suffix(PyObject *self, PyObject *obj) { \
dtype x = (dtype)0; \
dtype y = (dtype)1; \
dtype z = (dtype)2; \
assert(_Py_atomic_exchange_##suffix(&x, y) == (dtype)0); \
assert(x == (dtype)1); \
assert(_Py_atomic_exchange_##suffix(&x, z) == (dtype)1); \
assert(x == (dtype)2); \
assert(_Py_atomic_exchange_##suffix(&x, y) == (dtype)2); \
assert(x == (dtype)1); \
Py_RETURN_NONE; \
}
FOR_ALL_TYPES(IMPL_TEST_EXCHANGE)
#define IMPL_TEST_LOAD_STORE(suffix, dtype) \
static PyObject * \
test_atomic_load_store_##suffix(PyObject *self, PyObject *obj) { \
dtype x = (dtype)0; \
dtype y = (dtype)1; \
dtype z = (dtype)2; \
assert(_Py_atomic_load_##suffix(&x) == (dtype)0); \
assert(x == (dtype)0); \
_Py_atomic_store_##suffix(&x, y); \
assert(_Py_atomic_load_##suffix(&x) == (dtype)1); \
assert(x == (dtype)1); \
_Py_atomic_store_##suffix##_relaxed(&x, z); \
assert(_Py_atomic_load_##suffix##_relaxed(&x) == (dtype)2); \
assert(x == (dtype)2); \
Py_RETURN_NONE; \
}
FOR_ALL_TYPES(IMPL_TEST_LOAD_STORE)
#define IMPL_TEST_AND_OR(suffix, dtype) \
static PyObject * \
test_atomic_and_or_##suffix(PyObject *self, PyObject *obj) { \
dtype x = (dtype)0; \
dtype y = (dtype)1; \
dtype z = (dtype)3; \
assert(_Py_atomic_or_##suffix(&x, z) == (dtype)0); \
assert(x == (dtype)3); \
assert(_Py_atomic_and_##suffix(&x, y) == (dtype)3); \
assert(x == (dtype)1); \
Py_RETURN_NONE; \
}
FOR_BITWISE_TYPES(IMPL_TEST_AND_OR)
static PyObject *
test_atomic_fences(PyObject *self, PyObject *obj) {
// Just make sure that the fences compile. We are not
// testing any synchronizing ordering.
_Py_atomic_fence_seq_cst();
_Py_atomic_fence_release();
Py_RETURN_NONE;
}
static PyObject *
test_atomic_release_acquire(PyObject *self, PyObject *obj) {
void *x = NULL;
void *y = &y;
assert(_Py_atomic_load_ptr_acquire(&x) == NULL);
_Py_atomic_store_ptr_release(&x, y);
assert(x == y);
assert(_Py_atomic_load_ptr_acquire(&x) == y);
Py_RETURN_NONE;
}
// NOTE: all tests should start with "test_atomic_" to be included
// in test_pyatomic.py
#define BIND_TEST_ADD(suffix, dtype) \
{"test_atomic_add_" #suffix, test_atomic_add_##suffix, METH_NOARGS},
#define BIND_TEST_COMPARE_EXCHANGE(suffix, dtype) \
{"test_atomic_compare_exchange_" #suffix, test_atomic_compare_exchange_##suffix, METH_NOARGS},
#define BIND_TEST_EXCHANGE(suffix, dtype) \
{"test_atomic_exchange_" #suffix, test_atomic_exchange_##suffix, METH_NOARGS},
#define BIND_TEST_LOAD_STORE(suffix, dtype) \
{"test_atomic_load_store_" #suffix, test_atomic_load_store_##suffix, METH_NOARGS},
#define BIND_TEST_AND_OR(suffix, dtype) \
{"test_atomic_and_or_" #suffix, test_atomic_and_or_##suffix, METH_NOARGS},
static PyMethodDef test_methods[] = {
FOR_ARITHMETIC_TYPES(BIND_TEST_ADD)
FOR_ALL_TYPES(BIND_TEST_COMPARE_EXCHANGE)
FOR_ALL_TYPES(BIND_TEST_EXCHANGE)
FOR_ALL_TYPES(BIND_TEST_LOAD_STORE)
FOR_BITWISE_TYPES(BIND_TEST_AND_OR)
{"test_atomic_fences", test_atomic_fences, METH_NOARGS},
{"test_atomic_release_acquire", test_atomic_release_acquire, METH_NOARGS},
{NULL, NULL} /* sentinel */
};
int
_PyTestCapi_Init_PyAtomic(PyObject *mod)
{
if (PyModule_AddFunctions(mod, test_methods) < 0) {
return -1;
}
return 0;
}

View File

@ -4004,6 +4004,9 @@ PyInit__testcapi(void)
if (_PyTestCapi_Init_GC(m) < 0) {
return NULL;
}
if (_PyTestCapi_Init_PyAtomic(m) < 0) {
return NULL;
}
#ifndef LIMITED_API_AVAILABLE
PyModule_AddObjectRef(m, "LIMITED_API_AVAILABLE", Py_False);

View File

@ -112,6 +112,7 @@
<ClCompile Include="..\Modules\_testcapi\exceptions.c" />
<ClCompile Include="..\Modules\_testcapi\code.c" />
<ClCompile Include="..\Modules\_testcapi\buffer.c" />
<ClCompile Include="..\Modules\_testcapi\pyatomic.c" />
<ClCompile Include="..\Modules\_testcapi\pyos.c" />
<ClCompile Include="..\Modules\_testcapi\immortal.c" />
<ClCompile Include="..\Modules\_testcapi\gc.c" />

View File

@ -66,6 +66,9 @@
<ClCompile Include="..\Modules\_testcapi\buffer.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\Modules\_testcapi\pyatomic.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\Modules\_testcapi\pyos.c">
<Filter>Source Files</Filter>
</ClCompile>

View File

@ -166,6 +166,8 @@
<ClInclude Include="..\Include\cpython\parser_interface.h" />
<ClInclude Include="..\Include\cpython\picklebufobject.h" />
<ClInclude Include="..\Include\cpython\pyarena.h" />
<ClInclude Include="..\Include\cpython\pyatomic.h" />
<ClInclude Include="..\Include\cpython\pyatomic_msc.h" />
<ClInclude Include="..\Include\cpython\pyctype.h" />
<ClInclude Include="..\Include\cpython\pydebug.h" />
<ClInclude Include="..\Include\cpython\pyerrors.h" />

View File

@ -423,6 +423,18 @@
<ClInclude Include="..\Include\cpython\pyarena.h">
<Filter>Include</Filter>
</ClInclude>
<ClInclude Include="..\Include\cpython\pyatomic.h">
<Filter>Include</Filter>
</ClInclude>
<ClInclude Include="..\Include\cpython\pyatomic_gcc.h">
<Filter>Include</Filter>
</ClInclude>
<ClInclude Include="..\Include\cpython\pyatomic_msc.h">
<Filter>Include</Filter>
</ClInclude>
<ClInclude Include="..\Include\cpython\pyatomic_std.h">
<Filter>Include</Filter>
</ClInclude>
<ClInclude Include="..\Include\cpython\pyctype.h">
<Filter>Include</Filter>
</ClInclude>