blob: 7fea79060d0f895d39d9ccf56571ecc39f03230a [file] [log] [blame]
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Tyler Veness <calcmogul@gmail.com>
Date: Thu, 13 Jul 2023 22:13:47 -0700
Subject: [PATCH 7/9] Use C++ atomics
---
src/unix/async.c | 25 +++++++++++++------------
src/unix/core.c | 3 ++-
src/unix/fs.c | 20 +++++++++++---------
src/unix/kqueue.c | 10 ++++++----
src/unix/linux.c | 45 +++++++++++++++++++++++----------------------
src/unix/tty.c | 5 +++--
src/uv-common.c | 2 +-
src/uv-common.h | 8 +++-----
8 files changed, 62 insertions(+), 56 deletions(-)
diff --git a/src/unix/async.c b/src/unix/async.c
index 0ff2669e30a628dbb2df9e28ba14b38cf14114e5..fef4ae93343edc0341179a1c4739dcd831ef6e26 100644
--- a/src/unix/async.c
+++ b/src/unix/async.c
@@ -26,7 +26,6 @@
#include "internal.h"
#include <errno.h>
-#include <stdatomic.h>
#include <stdio.h> /* snprintf() */
#include <assert.h>
#include <stdlib.h>
@@ -38,6 +37,8 @@
#include <sys/eventfd.h>
#endif
+#include <atomic>
+
static void uv__async_send(uv_loop_t* loop);
static int uv__async_start(uv_loop_t* loop);
static void uv__cpu_relax(void);
@@ -63,14 +64,14 @@ int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
int uv_async_send(uv_async_t* handle) {
- _Atomic int* pending;
- _Atomic int* busy;
+ std::atomic<int>* pending;
+ std::atomic<int>* busy;
- pending = (_Atomic int*) &handle->pending;
- busy = (_Atomic int*) &handle->u.fd;
+ pending = (std::atomic<int>*) &handle->pending;
+ busy = (std::atomic<int>*) &handle->u.fd;
/* Do a cheap read first. */
- if (atomic_load_explicit(pending, memory_order_relaxed) != 0)
+ if (atomic_load_explicit(pending, std::memory_order_relaxed) != 0)
return 0;
/* Set the loop to busy. */
@@ -90,12 +91,12 @@ int uv_async_send(uv_async_t* handle) {
/* Wait for the busy flag to clear before closing.
* Only call this from the event loop thread. */
static void uv__async_spin(uv_async_t* handle) {
- _Atomic int* pending;
- _Atomic int* busy;
+ std::atomic<int>* pending;
+ std::atomic<int>* busy;
int i;
- pending = (_Atomic int*) &handle->pending;
- busy = (_Atomic int*) &handle->u.fd;
+ pending = (std::atomic<int>*) &handle->pending;
+ busy = (std::atomic<int>*) &handle->u.fd;
/* Set the pending flag first, so no new events will be added by other
* threads after this function returns. */
@@ -135,7 +136,7 @@ static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
struct uv__queue queue;
struct uv__queue* q;
uv_async_t* h;
- _Atomic int *pending;
+ std::atomic<int> *pending;
assert(w == &loop->async_io_watcher);
@@ -166,7 +167,7 @@ static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
uv__queue_insert_tail(&loop->async_handles, q);
/* Atomically fetch and clear pending flag */
- pending = (_Atomic int*) &h->pending;
+ pending = (std::atomic<int>*) &h->pending;
if (atomic_exchange(pending, 0) == 0)
continue;
diff --git a/src/unix/core.c b/src/unix/core.c
index 268fc9652f437eb0d0cda2a9e0b06b9e91eb9742..1a52716792d2dbbb71a4ebdd2255173b7447979b 100644
--- a/src/unix/core.c
+++ b/src/unix/core.c
@@ -45,6 +45,7 @@
#include <sys/utsname.h>
#include <sys/time.h>
#include <time.h> /* clock_gettime */
+#include <atomic>
#ifdef __sun
# include <sys/filio.h>
@@ -263,7 +264,7 @@ int uv__getiovmax(void) {
#if defined(IOV_MAX)
return IOV_MAX;
#elif defined(_SC_IOV_MAX)
- static _Atomic int iovmax_cached = -1;
+ static std::atomic<int> iovmax_cached = -1;
int iovmax;
iovmax = atomic_load_explicit(&iovmax_cached, memory_order_relaxed);
diff --git a/src/unix/fs.c b/src/unix/fs.c
index bc00c90b07a32a823412b216df6f2d758dbc423b..afdf0c6e592c69961b1286a87b61e62601331763 100644
--- a/src/unix/fs.c
+++ b/src/unix/fs.c
@@ -45,6 +45,8 @@
#include <fcntl.h>
#include <poll.h>
+#include <atomic>
+
#if defined(__linux__)
# include <sys/sendfile.h>
#endif
@@ -307,7 +309,7 @@ static int uv__fs_mkstemp(uv_fs_t* req) {
static uv_once_t once = UV_ONCE_INIT;
int r;
#ifdef O_CLOEXEC
- static _Atomic int no_cloexec_support;
+ static std::atomic<int> no_cloexec_support;
#endif
static const char pattern[] = "XXXXXX";
static const size_t pattern_size = sizeof(pattern) - 1;
@@ -332,7 +334,7 @@ static int uv__fs_mkstemp(uv_fs_t* req) {
uv_once(&once, uv__mkostemp_initonce);
#ifdef O_CLOEXEC
- if (atomic_load_explicit(&no_cloexec_support, memory_order_relaxed) == 0 &&
+ if (atomic_load_explicit(&no_cloexec_support, std::memory_order_relaxed) == 0 &&
uv__mkostemp != NULL) {
r = uv__mkostemp(path, O_CLOEXEC);
@@ -346,7 +348,7 @@ static int uv__fs_mkstemp(uv_fs_t* req) {
/* We set the static variable so that next calls don't even
try to use mkostemp. */
- atomic_store_explicit(&no_cloexec_support, 1, memory_order_relaxed);
+ atomic_store_explicit(&no_cloexec_support, 1, std::memory_order_relaxed);
}
#endif /* O_CLOEXEC */
@@ -867,10 +869,10 @@ static int uv__is_cifs_or_smb(int fd) {
static ssize_t uv__fs_try_copy_file_range(int in_fd, off_t* off,
int out_fd, size_t len) {
- static _Atomic int no_copy_file_range_support;
+ static std::atomic<int> no_copy_file_range_support;
ssize_t r;
- if (atomic_load_explicit(&no_copy_file_range_support, memory_order_relaxed)) {
+ if (atomic_load_explicit(&no_copy_file_range_support, std::memory_order_relaxed)) {
errno = ENOSYS;
return -1;
}
@@ -889,7 +891,7 @@ static ssize_t uv__fs_try_copy_file_range(int in_fd, off_t* off,
errno = ENOSYS; /* Use fallback. */
break;
case ENOSYS:
- atomic_store_explicit(&no_copy_file_range_support, 1, memory_order_relaxed);
+ atomic_store_explicit(&no_copy_file_range_support, 1, std::memory_order_relaxed);
break;
case EPERM:
/* It's been reported that CIFS spuriously fails.
@@ -1380,14 +1382,14 @@ static int uv__fs_statx(int fd,
uv_stat_t* buf) {
STATIC_ASSERT(UV_ENOSYS != -1);
#ifdef __linux__
- static _Atomic int no_statx;
+ static std::atomic<int> no_statx;
struct uv__statx statxbuf;
int dirfd;
int flags;
int mode;
int rc;
- if (atomic_load_explicit(&no_statx, memory_order_relaxed))
+ if (atomic_load_explicit(&no_statx, std::memory_order_relaxed))
return UV_ENOSYS;
dirfd = AT_FDCWD;
@@ -1421,7 +1423,7 @@ static int uv__fs_statx(int fd,
* implemented, rc might return 1 with 0 set as the error code in which
* case we return ENOSYS.
*/
- atomic_store_explicit(&no_statx, 1, memory_order_relaxed);
+ atomic_store_explicit(&no_statx, 1, std::memory_order_relaxed);
return UV_ENOSYS;
}
diff --git a/src/unix/kqueue.c b/src/unix/kqueue.c
index 06fbdb24b4adc4adb781d32150d40836fa745531..939a42696778741f9184be29dcea7c858736d181 100644
--- a/src/unix/kqueue.c
+++ b/src/unix/kqueue.c
@@ -37,6 +37,8 @@
#include <fcntl.h>
#include <time.h>
+#include <atomic>
+
/*
* Required on
* - Until at least FreeBSD 11.0
@@ -63,7 +65,7 @@ int uv__kqueue_init(uv_loop_t* loop) {
#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
-static _Atomic int uv__has_forked_with_cfrunloop;
+static std::atomic<int> uv__has_forked_with_cfrunloop;
#endif
int uv__io_fork(uv_loop_t* loop) {
@@ -87,7 +89,7 @@ int uv__io_fork(uv_loop_t* loop) {
*/
atomic_store_explicit(&uv__has_forked_with_cfrunloop,
1,
- memory_order_relaxed);
+ std::memory_order_relaxed);
uv__free(loop->cf_state);
loop->cf_state = NULL;
}
@@ -555,7 +557,7 @@ int uv_fs_event_start(uv_fs_event_t* handle,
goto fallback;
if (0 == atomic_load_explicit(&uv__has_forked_with_cfrunloop,
- memory_order_relaxed)) {
+ std::memory_order_relaxed)) {
int r;
/* The fallback fd is no longer needed */
uv__close_nocheckstdio(fd);
@@ -591,7 +593,7 @@ int uv_fs_event_stop(uv_fs_event_t* handle) {
#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
if (0 == atomic_load_explicit(&uv__has_forked_with_cfrunloop,
- memory_order_relaxed))
+ std::memory_order_relaxed))
if (handle->cf_cb != NULL)
r = uv__fsevents_close(handle);
#endif
diff --git a/src/unix/linux.c b/src/unix/linux.c
index 2b8e1d8fe593a181d049aa50ff9edaf6da258a24..b23d88bd824843eebc3b439e5a18e6f796a747be 100644
--- a/src/unix/linux.c
+++ b/src/unix/linux.c
@@ -27,7 +27,6 @@
#include "internal.h"
#include <inttypes.h>
-#include <stdatomic.h>
#include <stddef.h> /* offsetof */
#include <stdint.h>
#include <stdio.h>
@@ -139,6 +138,8 @@
# include <netpacket/packet.h>
#endif /* HAVE_IFADDRS_H */
+#include <atomic>
+
enum {
UV__IORING_SETUP_SQPOLL = 2u,
};
@@ -317,7 +318,7 @@ static struct watcher_root* uv__inotify_watchers(uv_loop_t* loop) {
unsigned uv__kernel_version(void) {
- static _Atomic unsigned cached_version;
+ static std::atomic<unsigned int> cached_version;
struct utsname u;
unsigned version;
unsigned major;
@@ -326,7 +327,7 @@ unsigned uv__kernel_version(void) {
char v_sig[256];
char* needle;
- version = atomic_load_explicit(&cached_version, memory_order_relaxed);
+ version = std::atomic_load_explicit(&cached_version, std::memory_order_relaxed);
if (version != 0)
return version;
@@ -382,7 +383,7 @@ unsigned uv__kernel_version(void) {
calculate_version:
version = major * 65536 + minor * 256 + patch;
- atomic_store_explicit(&cached_version, version, memory_order_relaxed);
+ std::atomic_store_explicit(&cached_version, version, std::memory_order_relaxed);
return version;
}
@@ -480,11 +481,11 @@ static int uv__use_io_uring(void) {
return 0; /* All 32 bits kernels appear buggy. */
#else
/* Ternary: unknown=0, yes=1, no=-1 */
- static _Atomic int use_io_uring;
+ static std::atomic<int> use_io_uring;
char* val;
int use;
- use = atomic_load_explicit(&use_io_uring, memory_order_relaxed);
+ use = std::atomic_load_explicit(&use_io_uring, std::memory_order_relaxed);
if (use == 0) {
/* Older kernels have a bug where the sqpoll thread uses 100% CPU. */
@@ -495,7 +496,7 @@ static int uv__use_io_uring(void) {
if (val != NULL)
use = atoi(val) ? 1 : -1;
- atomic_store_explicit(&use_io_uring, use, memory_order_relaxed);
+ std::atomic_store_explicit(&use_io_uring, use, std::memory_order_relaxed);
}
return use > 0;
@@ -771,8 +772,8 @@ static struct uv__io_uring_sqe* uv__iou_get_sqe(struct uv__iou* iou,
if (iou->ringfd == -1)
return NULL;
- head = atomic_load_explicit((_Atomic uint32_t*) iou->sqhead,
- memory_order_acquire);
+ head = std::atomic_load_explicit((std::atomic<uint32_t>*) iou->sqhead,
+ std::memory_order_acquire);
tail = *iou->sqtail;
mask = iou->sqmask;
@@ -801,12 +802,12 @@ static struct uv__io_uring_sqe* uv__iou_get_sqe(struct uv__iou* iou,
static void uv__iou_submit(struct uv__iou* iou) {
uint32_t flags;
- atomic_store_explicit((_Atomic uint32_t*) iou->sqtail,
+ std::atomic_store_explicit((std::atomic<uint32_t>*) iou->sqtail,
*iou->sqtail + 1,
- memory_order_release);
+ std::memory_order_release);
- flags = atomic_load_explicit((_Atomic uint32_t*) iou->sqflags,
- memory_order_acquire);
+ flags = std::atomic_load_explicit((std::atomic<uint32_t>*) iou->sqflags,
+ std::memory_order_acquire);
if (flags & UV__IORING_SQ_NEED_WAKEUP)
if (uv__io_uring_enter(iou->ringfd, 0, 0, UV__IORING_ENTER_SQ_WAKEUP))
@@ -1147,8 +1148,8 @@ static void uv__poll_io_uring(uv_loop_t* loop, struct uv__iou* iou) {
int rc;
head = *iou->cqhead;
- tail = atomic_load_explicit((_Atomic uint32_t*) iou->cqtail,
- memory_order_acquire);
+ tail = std::atomic_load_explicit((std::atomic<uint32_t>*) iou->cqtail,
+ std::memory_order_acquire);
mask = iou->cqmask;
cqe = (uv__io_uring_cqe*)iou->cqe;
nevents = 0;
@@ -1180,15 +1181,15 @@ static void uv__poll_io_uring(uv_loop_t* loop, struct uv__iou* iou) {
nevents++;
}
- atomic_store_explicit((_Atomic uint32_t*) iou->cqhead,
+ std::atomic_store_explicit((std::atomic<uint32_t>*) iou->cqhead,
tail,
- memory_order_release);
+ std::memory_order_release);
/* Check whether CQE's overflowed, if so enter the kernel to make them
* available. Don't grab them immediately but in the next loop iteration to
* avoid loop starvation. */
- flags = atomic_load_explicit((_Atomic uint32_t*) iou->sqflags,
- memory_order_acquire);
+ flags = std::atomic_load_explicit((std::atomic<uint32_t>*) iou->sqflags,
+ std::memory_order_acquire);
if (flags & UV__IORING_SQ_CQ_OVERFLOW) {
do
@@ -1581,7 +1582,7 @@ update_timeout:
}
uint64_t uv__hrtime(uv_clocktype_t type) {
- static _Atomic clock_t fast_clock_id = -1;
+ static std::atomic<clock_t> fast_clock_id = -1;
struct timespec t;
clock_t clock_id;
@@ -1597,7 +1598,7 @@ uint64_t uv__hrtime(uv_clocktype_t type) {
if (type != UV_CLOCK_FAST)
goto done;
- clock_id = atomic_load_explicit(&fast_clock_id, memory_order_relaxed);
+ clock_id = std::atomic_load_explicit(&fast_clock_id, std::memory_order_relaxed);
if (clock_id != -1)
goto done;
@@ -1606,7 +1607,7 @@ uint64_t uv__hrtime(uv_clocktype_t type) {
if (t.tv_nsec <= 1 * 1000 * 1000)
clock_id = CLOCK_MONOTONIC_COARSE;
- atomic_store_explicit(&fast_clock_id, clock_id, memory_order_relaxed);
+ std::atomic_store_explicit(&fast_clock_id, clock_id, std::memory_order_relaxed);
done:
diff --git a/src/unix/tty.c b/src/unix/tty.c
index 1bd217b5a15eed13a8349c479b53471dd36ca216..1304c6d8685cfd122cffea066dc668d1dfc9ae02 100644
--- a/src/unix/tty.c
+++ b/src/unix/tty.c
@@ -22,7 +22,6 @@
#include "uv.h"
#include "internal.h"
-#include <stdatomic.h>
#include <stdlib.h>
#include <assert.h>
#include <unistd.h>
@@ -30,6 +29,8 @@
#include <errno.h>
#include <sys/ioctl.h>
+#include <atomic>
+
#if defined(__MVS__) && !defined(IMAXBEL)
#define IMAXBEL 0
#endif
@@ -64,7 +65,7 @@ static int isreallyatty(int file) {
static int orig_termios_fd = -1;
static struct termios orig_termios;
-static _Atomic int termios_spinlock;
+static std::atomic<int> termios_spinlock;
int uv__tcsetattr(int fd, int how, const struct termios *term) {
int rc;
diff --git a/src/uv-common.c b/src/uv-common.c
index 49126e50f07bac16d198775454b731f40630d1d1..1ce25c24d6c046f7aaeaa52dcfc4fafa5a738650 100644
--- a/src/uv-common.c
+++ b/src/uv-common.c
@@ -953,7 +953,7 @@ void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
__attribute__((destructor))
#endif
void uv_library_shutdown(void) {
- static int was_shutdown;
+ static std::atomic<int> was_shutdown;
if (uv__exchange_int_relaxed(&was_shutdown, 1))
return;
diff --git a/src/uv-common.h b/src/uv-common.h
index cd57e5a35153d0557351b60cce0c5be7a4468b60..5dce8eaf2705b47935b218181f6dd69af0d5b61b 100644
--- a/src/uv-common.h
+++ b/src/uv-common.h
@@ -32,15 +32,13 @@
#include <stddef.h>
#include <stdint.h>
+#include <atomic>
+
#include "uv.h"
#include "uv/tree.h"
#include "queue.h"
#include "strscpy.h"
-#ifndef _MSC_VER
-# include <stdatomic.h>
-#endif
-
#if EDOM > 0
# define UV__ERR(x) (-(x))
#else
@@ -70,7 +68,7 @@ extern int snprintf(char*, size_t, const char*, ...);
InterlockedExchangeNoFence((LONG volatile*)(p), v)
#else
#define uv__exchange_int_relaxed(p, v) \
- atomic_exchange_explicit((_Atomic int*)(p), v, memory_order_relaxed)
+ std::atomic_exchange_explicit((std::atomic<int>*)(p), v, std::memory_order_relaxed)
#endif
#define UV__UDP_DGRAM_MAXSIZE (64 * 1024)