| From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 |
| From: Tyler Veness <calcmogul@gmail.com> |
| Date: Thu, 13 Jul 2023 22:13:47 -0700 |
| Subject: [PATCH 08/10] Use C++ atomics |
| |
| --- |
| src/unix/async.c | 25 +++++++++++++------------ |
| src/unix/core.c | 3 ++- |
| src/unix/fs.c | 32 +++++++++++++++++--------------- |
| src/unix/kqueue.c | 10 ++++++---- |
| src/unix/linux.c | 45 +++++++++++++++++++++++---------------------- |
| src/unix/tty.c | 5 +++-- |
| src/uv-common.c | 2 +- |
| src/uv-common.h | 8 +++----- |
| 8 files changed, 68 insertions(+), 62 deletions(-) |
| |
| diff --git a/src/unix/async.c b/src/unix/async.c |
| index 0ff2669e30a628dbb2df9e28ba14b38cf14114e5..fef4ae93343edc0341179a1c4739dcd831ef6e26 100644 |
| --- a/src/unix/async.c |
| +++ b/src/unix/async.c |
| @@ -26,7 +26,6 @@ |
| #include "internal.h" |
| |
| #include <errno.h> |
| -#include <stdatomic.h> |
| #include <stdio.h> /* snprintf() */ |
| #include <assert.h> |
| #include <stdlib.h> |
| @@ -38,6 +37,8 @@ |
| #include <sys/eventfd.h> |
| #endif |
| |
| +#include <atomic> |
| + |
| static void uv__async_send(uv_loop_t* loop); |
| static int uv__async_start(uv_loop_t* loop); |
| static void uv__cpu_relax(void); |
| @@ -63,14 +64,14 @@ int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) { |
| |
| |
| int uv_async_send(uv_async_t* handle) { |
| - _Atomic int* pending; |
| - _Atomic int* busy; |
| + std::atomic<int>* pending; |
| + std::atomic<int>* busy; |
| |
| - pending = (_Atomic int*) &handle->pending; |
| - busy = (_Atomic int*) &handle->u.fd; |
| + pending = (std::atomic<int>*) &handle->pending; |
| + busy = (std::atomic<int>*) &handle->u.fd; |
| |
| /* Do a cheap read first. */ |
| - if (atomic_load_explicit(pending, memory_order_relaxed) != 0) |
| + if (atomic_load_explicit(pending, std::memory_order_relaxed) != 0) |
| return 0; |
| |
| /* Set the loop to busy. */ |
| @@ -90,12 +91,12 @@ int uv_async_send(uv_async_t* handle) { |
| /* Wait for the busy flag to clear before closing. |
| * Only call this from the event loop thread. */ |
| static void uv__async_spin(uv_async_t* handle) { |
| - _Atomic int* pending; |
| - _Atomic int* busy; |
| + std::atomic<int>* pending; |
| + std::atomic<int>* busy; |
| int i; |
| |
| - pending = (_Atomic int*) &handle->pending; |
| - busy = (_Atomic int*) &handle->u.fd; |
| + pending = (std::atomic<int>*) &handle->pending; |
| + busy = (std::atomic<int>*) &handle->u.fd; |
| |
| /* Set the pending flag first, so no new events will be added by other |
| * threads after this function returns. */ |
| @@ -135,7 +136,7 @@ static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) { |
| struct uv__queue queue; |
| struct uv__queue* q; |
| uv_async_t* h; |
| - _Atomic int *pending; |
| + std::atomic<int> *pending; |
| |
| assert(w == &loop->async_io_watcher); |
| |
| @@ -166,7 +167,7 @@ static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) { |
| uv__queue_insert_tail(&loop->async_handles, q); |
| |
| /* Atomically fetch and clear pending flag */ |
| - pending = (_Atomic int*) &h->pending; |
| + pending = (std::atomic<int>*) &h->pending; |
| if (atomic_exchange(pending, 0) == 0) |
| continue; |
| |
| diff --git a/src/unix/core.c b/src/unix/core.c |
| index f53adc156a7c454c492abaeac29d90be436785fc..ce7fd2cdfdd53410dc694450bd56dffc26ff4792 100644 |
| --- a/src/unix/core.c |
| +++ b/src/unix/core.c |
| @@ -45,6 +45,7 @@ |
| #include <sys/utsname.h> |
| #include <sys/time.h> |
| #include <time.h> /* clock_gettime */ |
| +#include <atomic> |
| |
| #ifdef __sun |
| # include <sys/filio.h> |
| @@ -263,7 +264,7 @@ int uv__getiovmax(void) { |
| #if defined(IOV_MAX) |
| return IOV_MAX; |
| #elif defined(_SC_IOV_MAX) |
| - static _Atomic int iovmax_cached = -1; |
| + static std::atomic<int> iovmax_cached = -1; |
| int iovmax; |
| |
| iovmax = atomic_load_explicit(&iovmax_cached, memory_order_relaxed); |
| diff --git a/src/unix/fs.c b/src/unix/fs.c |
| index e25d02e54dbe93e4b9c22b0140108c99ae2cb4f7..aba190a9c0240fba0128fb7fbc5d92d7fa86214b 100644 |
| --- a/src/unix/fs.c |
| +++ b/src/unix/fs.c |
| @@ -46,6 +46,8 @@ |
| #include <fcntl.h> |
| #include <poll.h> |
| |
| +#include <atomic> |
| + |
| #if defined(__DragonFly__) || \ |
| defined(__FreeBSD__) || \ |
| defined(__OpenBSD__) || \ |
| @@ -313,7 +315,7 @@ static int uv__fs_mkstemp(uv_fs_t* req) { |
| static uv_once_t once = UV_ONCE_INIT; |
| int r; |
| #ifdef O_CLOEXEC |
| - static _Atomic int no_cloexec_support; |
| + static std::atomic<int> no_cloexec_support; |
| #endif |
| static const char pattern[] = "XXXXXX"; |
| static const size_t pattern_size = sizeof(pattern) - 1; |
| @@ -338,7 +340,7 @@ static int uv__fs_mkstemp(uv_fs_t* req) { |
| uv_once(&once, uv__mkostemp_initonce); |
| |
| #ifdef O_CLOEXEC |
| - if (atomic_load_explicit(&no_cloexec_support, memory_order_relaxed) == 0 && |
| + if (atomic_load_explicit(&no_cloexec_support, std::memory_order_relaxed) == 0 && |
| uv__mkostemp != NULL) { |
| r = uv__mkostemp(path, O_CLOEXEC); |
| |
| @@ -352,7 +354,7 @@ static int uv__fs_mkstemp(uv_fs_t* req) { |
| |
| /* We set the static variable so that next calls don't even |
| try to use mkostemp. */ |
| - atomic_store_explicit(&no_cloexec_support, 1, memory_order_relaxed); |
| + atomic_store_explicit(&no_cloexec_support, 1, std::memory_order_relaxed); |
| } |
| #endif /* O_CLOEXEC */ |
| |
| @@ -462,7 +464,7 @@ static ssize_t uv__fs_preadv(uv_file fd, |
| |
| static ssize_t uv__fs_read(uv_fs_t* req) { |
| #if TRY_PREADV |
| - static _Atomic int no_preadv; |
| + static std::atomic<int> no_preadv; |
| #endif |
| unsigned int iovmax; |
| ssize_t result; |
| @@ -486,7 +488,7 @@ static ssize_t uv__fs_read(uv_fs_t* req) { |
| result = preadv(req->file, (struct iovec*) req->bufs, req->nbufs, req->off); |
| #else |
| # if TRY_PREADV |
| - if (atomic_load_explicit(&no_preadv, memory_order_relaxed)) retry: |
| + if (atomic_load_explicit(&no_preadv, std::memory_order_relaxed)) retry: |
| # endif |
| { |
| result = uv__fs_preadv(req->file, req->bufs, req->nbufs, req->off); |
| @@ -498,7 +500,7 @@ static ssize_t uv__fs_read(uv_fs_t* req) { |
| req->nbufs, |
| req->off); |
| if (result == -1 && errno == ENOSYS) { |
| - atomic_store_explicit(&no_preadv, 1, memory_order_relaxed); |
| + atomic_store_explicit(&no_preadv, 1, std::memory_order_relaxed); |
| goto retry; |
| } |
| } |
| @@ -939,10 +941,10 @@ static int uv__is_cifs_or_smb(int fd) { |
| |
| static ssize_t uv__fs_try_copy_file_range(int in_fd, off_t* off, |
| int out_fd, size_t len) { |
| - static _Atomic int no_copy_file_range_support; |
| + static std::atomic<int> no_copy_file_range_support; |
| ssize_t r; |
| |
| - if (atomic_load_explicit(&no_copy_file_range_support, memory_order_relaxed)) { |
| + if (atomic_load_explicit(&no_copy_file_range_support, std::memory_order_relaxed)) { |
| errno = ENOSYS; |
| return -1; |
| } |
| @@ -961,7 +963,7 @@ static ssize_t uv__fs_try_copy_file_range(int in_fd, off_t* off, |
| errno = ENOSYS; /* Use fallback. */ |
| break; |
| case ENOSYS: |
| - atomic_store_explicit(&no_copy_file_range_support, 1, memory_order_relaxed); |
| + atomic_store_explicit(&no_copy_file_range_support, 1, std::memory_order_relaxed); |
| break; |
| case EPERM: |
| /* It's been reported that CIFS spuriously fails. |
| @@ -1162,7 +1164,7 @@ static ssize_t uv__fs_lutime(uv_fs_t* req) { |
| |
| static ssize_t uv__fs_write(uv_fs_t* req) { |
| #if TRY_PREADV |
| - static _Atomic int no_pwritev; |
| + static std::atomic<int> no_pwritev; |
| #endif |
| ssize_t r; |
| |
| @@ -1191,7 +1193,7 @@ static ssize_t uv__fs_write(uv_fs_t* req) { |
| r = pwritev(req->file, (struct iovec*) req->bufs, req->nbufs, req->off); |
| #else |
| # if TRY_PREADV |
| - if (atomic_load_explicit(&no_pwritev, memory_order_relaxed)) retry: |
| + if (atomic_load_explicit(&no_pwritev, std::memory_order_relaxed)) retry: |
| # endif |
| { |
| r = pwrite(req->file, req->bufs[0].base, req->bufs[0].len, req->off); |
| @@ -1203,7 +1205,7 @@ static ssize_t uv__fs_write(uv_fs_t* req) { |
| req->nbufs, |
| req->off); |
| if (r == -1 && errno == ENOSYS) { |
| - atomic_store_explicit(&no_pwritev, 1, memory_order_relaxed); |
| + atomic_store_explicit(&no_pwritev, 1, std::memory_order_relaxed); |
| goto retry; |
| } |
| } |
| @@ -1483,14 +1485,14 @@ static int uv__fs_statx(int fd, |
| uv_stat_t* buf) { |
| STATIC_ASSERT(UV_ENOSYS != -1); |
| #ifdef __linux__ |
| - static _Atomic int no_statx; |
| + static std::atomic<int> no_statx; |
| struct uv__statx statxbuf; |
| int dirfd; |
| int flags; |
| int mode; |
| int rc; |
| |
| - if (atomic_load_explicit(&no_statx, memory_order_relaxed)) |
| + if (atomic_load_explicit(&no_statx, std::memory_order_relaxed)) |
| return UV_ENOSYS; |
| |
| dirfd = AT_FDCWD; |
| @@ -1524,7 +1526,7 @@ static int uv__fs_statx(int fd, |
| * implemented, rc might return 1 with 0 set as the error code in which |
| * case we return ENOSYS. |
| */ |
| - atomic_store_explicit(&no_statx, 1, memory_order_relaxed); |
| + atomic_store_explicit(&no_statx, 1, std::memory_order_relaxed); |
| return UV_ENOSYS; |
| } |
| |
| diff --git a/src/unix/kqueue.c b/src/unix/kqueue.c |
| index 28e55aae6c613576ede7024a5c73d746e134d865..ffe0f9191cc7b0c233447db358077d8814e0217e 100644 |
| --- a/src/unix/kqueue.c |
| +++ b/src/unix/kqueue.c |
| @@ -34,6 +34,8 @@ |
| #include <fcntl.h> |
| #include <time.h> |
| |
| +#include <atomic> |
| + |
| /* |
| * Required on |
| * - Until at least FreeBSD 11.0 |
| @@ -60,7 +62,7 @@ int uv__kqueue_init(uv_loop_t* loop) { |
| |
| |
| #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070 |
| -static _Atomic int uv__has_forked_with_cfrunloop; |
| +static std::atomic<int> uv__has_forked_with_cfrunloop; |
| #endif |
| |
| int uv__io_fork(uv_loop_t* loop) { |
| @@ -84,7 +86,7 @@ int uv__io_fork(uv_loop_t* loop) { |
| */ |
| atomic_store_explicit(&uv__has_forked_with_cfrunloop, |
| 1, |
| - memory_order_relaxed); |
| + std::memory_order_relaxed); |
| uv__free(loop->cf_state); |
| loop->cf_state = NULL; |
| } |
| @@ -541,7 +543,7 @@ int uv_fs_event_start(uv_fs_event_t* handle, |
| goto fallback; |
| |
| if (0 == atomic_load_explicit(&uv__has_forked_with_cfrunloop, |
| - memory_order_relaxed)) { |
| + std::memory_order_relaxed)) { |
| int r; |
| /* The fallback fd is no longer needed */ |
| uv__close_nocheckstdio(fd); |
| @@ -577,7 +579,7 @@ int uv_fs_event_stop(uv_fs_event_t* handle) { |
| |
| #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070 |
| if (0 == atomic_load_explicit(&uv__has_forked_with_cfrunloop, |
| - memory_order_relaxed)) |
| + std::memory_order_relaxed)) |
| if (handle->cf_cb != NULL) |
| r = uv__fsevents_close(handle); |
| #endif |
| diff --git a/src/unix/linux.c b/src/unix/linux.c |
| index 157443792f1216c83b4221c3810d17c81c5913c4..e3dfb186dc531e5c8197a81681c00d693e0913c6 100644 |
| --- a/src/unix/linux.c |
| +++ b/src/unix/linux.c |
| @@ -27,7 +27,6 @@ |
| #include "internal.h" |
| |
| #include <inttypes.h> |
| -#include <stdatomic.h> |
| #include <stddef.h> /* offsetof */ |
| #include <stdint.h> |
| #include <stdio.h> |
| @@ -133,6 +132,8 @@ |
| # include <netpacket/packet.h> |
| #endif /* HAVE_IFADDRS_H */ |
| |
| +#include <atomic> |
| + |
| enum { |
| UV__IORING_SETUP_SQPOLL = 2u, |
| }; |
| @@ -311,14 +312,14 @@ static struct watcher_root* uv__inotify_watchers(uv_loop_t* loop) { |
| |
| |
| unsigned uv__kernel_version(void) { |
| - static _Atomic unsigned cached_version; |
| + static std::atomic<unsigned int> cached_version; |
| struct utsname u; |
| unsigned version; |
| unsigned major; |
| unsigned minor; |
| unsigned patch; |
| |
| - version = atomic_load_explicit(&cached_version, memory_order_relaxed); |
| + version = std::atomic_load_explicit(&cached_version, std::memory_order_relaxed); |
| if (version != 0) |
| return version; |
| |
| @@ -329,7 +330,7 @@ unsigned uv__kernel_version(void) { |
| return 0; |
| |
| version = major * 65536 + minor * 256 + patch; |
| - atomic_store_explicit(&cached_version, version, memory_order_relaxed); |
| + std::atomic_store_explicit(&cached_version, version, std::memory_order_relaxed); |
| |
| return version; |
| } |
| @@ -424,16 +425,16 @@ static int uv__use_io_uring(void) { |
| return 0; /* Possibly available but blocked by seccomp. */ |
| #else |
| /* Ternary: unknown=0, yes=1, no=-1 */ |
| - static _Atomic int use_io_uring; |
| + static std::atomic<int> use_io_uring; |
| char* val; |
| int use; |
| |
| - use = atomic_load_explicit(&use_io_uring, memory_order_relaxed); |
| + use = std::atomic_load_explicit(&use_io_uring, std::memory_order_relaxed); |
| |
| if (use == 0) { |
| val = getenv("UV_USE_IO_URING"); |
| use = val == NULL || atoi(val) ? 1 : -1; |
| - atomic_store_explicit(&use_io_uring, use, memory_order_relaxed); |
| + std::atomic_store_explicit(&use_io_uring, use, std::memory_order_relaxed); |
| } |
| |
| return use > 0; |
| @@ -709,8 +710,8 @@ static struct uv__io_uring_sqe* uv__iou_get_sqe(struct uv__iou* iou, |
| if (iou->ringfd == -1) |
| return NULL; |
| |
| - head = atomic_load_explicit((_Atomic uint32_t*) iou->sqhead, |
| - memory_order_acquire); |
| + head = std::atomic_load_explicit((std::atomic<uint32_t>*) iou->sqhead, |
| + std::memory_order_acquire); |
| tail = *iou->sqtail; |
| mask = iou->sqmask; |
| |
| @@ -739,12 +740,12 @@ static struct uv__io_uring_sqe* uv__iou_get_sqe(struct uv__iou* iou, |
| static void uv__iou_submit(struct uv__iou* iou) { |
| uint32_t flags; |
| |
| - atomic_store_explicit((_Atomic uint32_t*) iou->sqtail, |
| + std::atomic_store_explicit((std::atomic<uint32_t>*) iou->sqtail, |
| *iou->sqtail + 1, |
| - memory_order_release); |
| + std::memory_order_release); |
| |
| - flags = atomic_load_explicit((_Atomic uint32_t*) iou->sqflags, |
| - memory_order_acquire); |
| + flags = std::atomic_load_explicit((std::atomic<uint32_t>*) iou->sqflags, |
| + std::memory_order_acquire); |
| |
| if (flags & UV__IORING_SQ_NEED_WAKEUP) |
| if (uv__io_uring_enter(iou->ringfd, 0, 0, UV__IORING_ENTER_SQ_WAKEUP)) |
| @@ -1076,8 +1077,8 @@ static void uv__poll_io_uring(uv_loop_t* loop, struct uv__iou* iou) { |
| int rc; |
| |
| head = *iou->cqhead; |
| - tail = atomic_load_explicit((_Atomic uint32_t*) iou->cqtail, |
| - memory_order_acquire); |
| + tail = std::atomic_load_explicit((std::atomic<uint32_t>*) iou->cqtail, |
| + std::memory_order_acquire); |
| mask = iou->cqmask; |
| cqe = (uv__io_uring_cqe*)iou->cqe; |
| nevents = 0; |
| @@ -1109,15 +1110,15 @@ static void uv__poll_io_uring(uv_loop_t* loop, struct uv__iou* iou) { |
| nevents++; |
| } |
| |
| - atomic_store_explicit((_Atomic uint32_t*) iou->cqhead, |
| + std::atomic_store_explicit((std::atomic<uint32_t>*) iou->cqhead, |
| tail, |
| - memory_order_release); |
| + std::memory_order_release); |
| |
| /* Check whether CQE's overflowed, if so enter the kernel to make them |
| * available. Don't grab them immediately but in the next loop iteration to |
| * avoid loop starvation. */ |
| - flags = atomic_load_explicit((_Atomic uint32_t*) iou->sqflags, |
| - memory_order_acquire); |
| + flags = std::atomic_load_explicit((std::atomic<uint32_t>*) iou->sqflags, |
| + std::memory_order_acquire); |
| |
| if (flags & UV__IORING_SQ_CQ_OVERFLOW) { |
| do |
| @@ -1531,7 +1532,7 @@ update_timeout: |
| } |
| |
| uint64_t uv__hrtime(uv_clocktype_t type) { |
| - static _Atomic clock_t fast_clock_id = -1; |
| + static std::atomic<clock_t> fast_clock_id = -1; |
| struct timespec t; |
| clock_t clock_id; |
| |
| @@ -1547,7 +1548,7 @@ uint64_t uv__hrtime(uv_clocktype_t type) { |
| if (type != UV_CLOCK_FAST) |
| goto done; |
| |
| - clock_id = atomic_load_explicit(&fast_clock_id, memory_order_relaxed); |
| + clock_id = std::atomic_load_explicit(&fast_clock_id, std::memory_order_relaxed); |
| if (clock_id != -1) |
| goto done; |
| |
| @@ -1556,7 +1557,7 @@ uint64_t uv__hrtime(uv_clocktype_t type) { |
| if (t.tv_nsec <= 1 * 1000 * 1000) |
| clock_id = CLOCK_MONOTONIC_COARSE; |
| |
| - atomic_store_explicit(&fast_clock_id, clock_id, memory_order_relaxed); |
| + std::atomic_store_explicit(&fast_clock_id, clock_id, std::memory_order_relaxed); |
| |
| done: |
| |
| diff --git a/src/unix/tty.c b/src/unix/tty.c |
| index 1bd217b5a15eed13a8349c479b53471dd36ca216..1304c6d8685cfd122cffea066dc668d1dfc9ae02 100644 |
| --- a/src/unix/tty.c |
| +++ b/src/unix/tty.c |
| @@ -22,7 +22,6 @@ |
| #include "uv.h" |
| #include "internal.h" |
| |
| -#include <stdatomic.h> |
| #include <stdlib.h> |
| #include <assert.h> |
| #include <unistd.h> |
| @@ -30,6 +29,8 @@ |
| #include <errno.h> |
| #include <sys/ioctl.h> |
| |
| +#include <atomic> |
| + |
| #if defined(__MVS__) && !defined(IMAXBEL) |
| #define IMAXBEL 0 |
| #endif |
| @@ -64,7 +65,7 @@ static int isreallyatty(int file) { |
| |
| static int orig_termios_fd = -1; |
| static struct termios orig_termios; |
| -static _Atomic int termios_spinlock; |
| +static std::atomic<int> termios_spinlock; |
| |
| int uv__tcsetattr(int fd, int how, const struct termios *term) { |
| int rc; |
| diff --git a/src/uv-common.c b/src/uv-common.c |
| index bfcc3ef10f4fd7763221638947da6e02e7a17c33..5c6d84155408ae4f7c3c6ff9b48bd09ccd16a92e 100644 |
| --- a/src/uv-common.c |
| +++ b/src/uv-common.c |
| @@ -951,7 +951,7 @@ void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) { |
| __attribute__((destructor)) |
| #endif |
| void uv_library_shutdown(void) { |
| - static int was_shutdown; |
| + static std::atomic<int> was_shutdown; |
| |
| if (uv__exchange_int_relaxed(&was_shutdown, 1)) |
| return; |
| diff --git a/src/uv-common.h b/src/uv-common.h |
| index cd57e5a35153d0557351b60cce0c5be7a4468b60..5dce8eaf2705b47935b218181f6dd69af0d5b61b 100644 |
| --- a/src/uv-common.h |
| +++ b/src/uv-common.h |
| @@ -32,15 +32,13 @@ |
| #include <stddef.h> |
| #include <stdint.h> |
| |
| +#include <atomic> |
| + |
| #include "uv.h" |
| #include "uv/tree.h" |
| #include "queue.h" |
| #include "strscpy.h" |
| |
| -#ifndef _MSC_VER |
| -# include <stdatomic.h> |
| -#endif |
| - |
| #if EDOM > 0 |
| # define UV__ERR(x) (-(x)) |
| #else |
| @@ -70,7 +68,7 @@ extern int snprintf(char*, size_t, const char*, ...); |
| InterlockedExchangeNoFence((LONG volatile*)(p), v) |
| #else |
| #define uv__exchange_int_relaxed(p, v) \ |
| - atomic_exchange_explicit((_Atomic int*)(p), v, memory_order_relaxed) |
| + std::atomic_exchange_explicit((std::atomic<int>*)(p), v, std::memory_order_relaxed) |
| #endif |
| |
| #define UV__UDP_DGRAM_MAXSIZE (64 * 1024) |