Skip to content

UCS: Introduce lightweight rwlock #10355

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jun 18, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions src/ucs/Makefile.am
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,7 @@ noinst_HEADERS = \
time/timerq.h \
time/timer_wheel.h \
type/serialize.h \
type/rwlock.h \
type/float8.h \
async/async.h \
async/pipe.h \
Expand Down
5 changes: 5 additions & 0 deletions src/ucs/arch/aarch64/cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -301,6 +301,11 @@ static inline ucs_status_t ucs_arch_get_cache_size(size_t *cache_sizes)
return UCS_ERR_UNSUPPORTED;
}

static UCS_F_ALWAYS_INLINE void ucs_cpu_relax()
{
asm volatile ("yield" ::: "memory");
}

END_C_DECLS

#endif
1 change: 1 addition & 0 deletions src/ucs/arch/cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@

#include <ucs/sys/compiler_def.h>
#include <stddef.h>
#include <sched.h>

BEGIN_C_DECLS

Expand Down
7 changes: 7 additions & 0 deletions src/ucs/arch/ppc64/cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,13 @@ static inline ucs_status_t ucs_arch_get_cache_size(size_t *cache_sizes)
return UCS_ERR_UNSUPPORTED;
}

static UCS_F_ALWAYS_INLINE void ucs_cpu_relax()
{
asm volatile ("or 1, 1, 1 \n"); /* hw threading low priority */
asm volatile ("or 2, 2, 2 \n"); /* hw threading normal priority */
asm volatile ("" ::: "memory");
}

END_C_DECLS

#endif
5 changes: 5 additions & 0 deletions src/ucs/arch/rv64/cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,11 @@ ucs_memcpy_nontemporal(void *dst, const void *src, size_t len)
memcpy(dst, src, len);
}

static UCS_F_ALWAYS_INLINE void ucs_cpu_relax()
{
asm volatile ("" ::: "memory");
}

END_C_DECLS

#endif
12 changes: 12 additions & 0 deletions src/ucs/arch/x86_64/cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,9 @@
#ifdef __AVX__
# include <immintrin.h>
#endif
#ifdef __SSE2__
# include <emmintrin.h>
#endif

BEGIN_C_DECLS

Expand Down Expand Up @@ -132,6 +135,15 @@ ucs_memcpy_nontemporal(void *dst, const void *src, size_t len)
ucs_x86_memcpy_sse_movntdqa(dst, src, len);
}

static UCS_F_ALWAYS_INLINE void ucs_cpu_relax()
{
#ifdef __SSE2__
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

minor: do something else if SSE2 not defined, like "":::"memory" ?

_mm_pause();
#else
asm volatile ("" ::: "memory");
#endif
}

END_C_DECLS

#endif
Expand Down
149 changes: 149 additions & 0 deletions src/ucs/type/rwlock.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,149 @@
/*
* Copyright (c) NVIDIA CORPORATION & AFFILIATES, 2025. ALL RIGHTS RESERVED.
*
* See file LICENSE for terms.
*/

#ifndef UCS_RWLOCK_H
#define UCS_RWLOCK_H

#include <ucs/arch/cpu.h>
#include <ucs/debug/assert.h>
#include <ucs/sys/compiler_def.h>

/**
* The ucs_rw_spinlock_t type.
*
* Readers increment the counter by UCS_RWLOCK_READ (4)
* Writers set the UCS_RWLOCK_WRITE bit when lock is held
* and set the UCS_RWLOCK_WAIT bit while waiting.
* UCS_RWLOCK_WAIT bit is meant for all subsequent reader
* to let any writer go first to avoid write starvation.
*
* 31 2 1 0
* +-------------------+-+-+
* | readers | | |
* +-------------------+-+-+
* ^ ^
* | |
* WRITE: lock held ----/ |
* WAIT: writer pending --/
*/

#define UCS_RWLOCK_WAIT UCS_BIT(0) /* Writer is waiting */
#define UCS_RWLOCK_WRITE UCS_BIT(1) /* Writer has the lock */
#define UCS_RWLOCK_MASK (UCS_RWLOCK_WAIT | UCS_RWLOCK_WRITE)
#define UCS_RWLOCK_READ UCS_BIT(2) /* Reader increment */

#define UCS_RWLOCK_STATIC_INITIALIZER {0}


#define ucs_rw_spinlock_assert(_lock, _cond, _desc) \
ucs_assertv((_lock)->state _cond, "lock=%p " _desc " state=0x%x%s%s", \
(_lock), (_lock)->state, \
(_lock)->state & UCS_RWLOCK_WAIT ? " WAIT" : "", \
(_lock)->state & UCS_RWLOCK_WRITE ? " WRITE" : "")


/**
* Reader-writer spin lock.
*/
typedef struct {
uint32_t state;
} ucs_rw_spinlock_t;


static UCS_F_ALWAYS_INLINE void
ucs_rw_spinlock_read_lock(ucs_rw_spinlock_t *lock)
{
uint32_t x;

for (;;) {
while (__atomic_load_n(&lock->state, __ATOMIC_RELAXED) &
UCS_RWLOCK_MASK) {
ucs_cpu_relax();
}

x = __atomic_fetch_add(&lock->state, UCS_RWLOCK_READ, __ATOMIC_ACQUIRE);
if (!(x & UCS_RWLOCK_MASK)) {
return;
}

__atomic_fetch_sub(&lock->state, UCS_RWLOCK_READ, __ATOMIC_RELAXED);
}
}


static UCS_F_ALWAYS_INLINE void
ucs_rw_spinlock_read_unlock(ucs_rw_spinlock_t *lock)
{
ucs_rw_spinlock_assert(lock, >= UCS_RWLOCK_READ, "read underrun");
__atomic_fetch_sub(&lock->state, UCS_RWLOCK_READ, __ATOMIC_RELEASE);
}


static UCS_F_ALWAYS_INLINE void
ucs_rw_spinlock_write_lock(ucs_rw_spinlock_t *lock)
{
uint32_t x;

x = __atomic_load_n(&lock->state, __ATOMIC_RELAXED);
if (ucs_unlikely(x > UCS_RWLOCK_WAIT)) {
goto wait;
}

for (;;) {
if (__atomic_compare_exchange_n(&lock->state, &x, UCS_RWLOCK_WRITE, 0,
__ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
return;
}

wait:
if (ucs_likely(!(x & UCS_RWLOCK_WAIT))) {
__atomic_fetch_or(&lock->state, UCS_RWLOCK_WAIT, __ATOMIC_RELAXED);
}

while ((x = __atomic_load_n(&lock->state, __ATOMIC_RELAXED)) >
UCS_RWLOCK_WAIT) {
ucs_cpu_relax();
}
}
}


static UCS_F_ALWAYS_INLINE int
ucs_rw_spinlock_write_trylock(ucs_rw_spinlock_t *lock)
{
uint32_t x;

x = __atomic_load_n(&lock->state, __ATOMIC_RELAXED);
if ((x < UCS_RWLOCK_WRITE) &&
(__atomic_compare_exchange_n(&lock->state, &x, x | UCS_RWLOCK_WRITE, 1,
__ATOMIC_ACQUIRE, __ATOMIC_RELAXED))) {
return 1;
}

return 0;
}


static UCS_F_ALWAYS_INLINE void
ucs_rw_spinlock_write_unlock(ucs_rw_spinlock_t *lock)
{
ucs_rw_spinlock_assert(lock, >= UCS_RWLOCK_WRITE, "write underrun");
__atomic_fetch_sub(&lock->state, UCS_RWLOCK_WRITE, __ATOMIC_RELEASE);
}


static UCS_F_ALWAYS_INLINE void ucs_rw_spinlock_init(ucs_rw_spinlock_t *lock)
{
lock->state = 0;
}


static UCS_F_ALWAYS_INLINE void ucs_rw_spinlock_cleanup(ucs_rw_spinlock_t *lock)
{
ucs_rw_spinlock_assert(lock, == 0, "not released");
}

#endif
Loading
Loading