diff options
author | Julia Suvorova <jusual@redhat.com> | 2019-08-19 08:45:28 -0600 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2019-08-19 08:45:28 -0600 |
commit | 552c6a08d04c74d20eeaa86f535bfd553b352370 (patch) | |
tree | 2929f38ee29c4757ff78e815792c646736e3b378 /src | |
parent | 0520db454c29f1d96cda6cf6cedeb93df65301e8 (diff) |
liburing/barrier.h: Add prefix io_uring to barriers
The names of the barriers conflict with the namespaces of other projects
when trying to directly include liburing.h. Avoid using popular global
names.
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Julia Suvorova <jusual@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'src')
-rw-r--r-- | src/include/liburing.h | 9 | ||||
-rw-r--r-- | src/include/liburing/barrier.h | 42 | ||||
-rw-r--r-- | src/queue.c | 2 |
3 files changed, 29 insertions, 24 deletions
diff --git a/src/include/liburing.h b/src/include/liburing.h index fb78cd3..7d7c9df 100644 --- a/src/include/liburing.h +++ b/src/include/liburing.h @@ -88,9 +88,12 @@ extern int io_uring_register_eventfd(struct io_uring *ring, int fd); extern int io_uring_unregister_eventfd(struct io_uring *ring); #define io_uring_for_each_cqe(ring, head, cqe) \ - /* smp_load_acquire() enforces the order of tail and CQE reads. */ \ + /* \ + * io_uring_smp_load_acquire() enforces the order of tail \ + * and CQE reads. \ + */ \ for (head = *(ring)->cq.khead; \ - (cqe = (head != smp_load_acquire((ring)->cq.ktail) ? \ + (cqe = (head != io_uring_smp_load_acquire((ring)->cq.ktail) ? \ &(ring)->cq.cqes[head & (*(ring)->cq.kring_mask)] : NULL)); \ head++) \ @@ -108,7 +111,7 @@ static inline void io_uring_cq_advance(struct io_uring *ring, * Ensure that the kernel only sees the new value of the head * index after the CQEs have been read. */ - smp_store_release(cq->khead, *cq->khead + nr); + io_uring_smp_store_release(cq->khead, *cq->khead + nr); } } diff --git a/src/include/liburing/barrier.h b/src/include/liburing/barrier.h index b98193b..8efa6dd 100644 --- a/src/include/liburing/barrier.h +++ b/src/include/liburing/barrier.h @@ -23,7 +23,7 @@ after the acquire operation executes. This is implemented using /* From tools/include/linux/compiler.h */ /* Optimization barrier */ /* The "volatile" is due to gcc bugs */ -#define barrier() __asm__ __volatile__("": : :"memory") +#define io_uring_barrier() __asm__ __volatile__("": : :"memory") /* From tools/virtio/linux/compiler.h */ #define WRITE_ONCE(var, val) \ @@ -33,27 +33,29 @@ after the acquire operation executes. This is implemented using #if defined(__x86_64__) || defined(__i386__) /* Adapted from arch/x86/include/asm/barrier.h */ -#define mb() asm volatile("mfence" ::: "memory") -#define rmb() asm volatile("lfence" ::: "memory") -#define wmb() asm volatile("sfence" ::: "memory") -#define smp_rmb() barrier() -#define smp_wmb() barrier() +#define io_uring_mb() asm volatile("mfence" ::: "memory") +#define io_uring_rmb() asm volatile("lfence" ::: "memory") +#define io_uring_wmb() asm volatile("sfence" ::: "memory") +#define io_uring_smp_rmb() io_uring_barrier() +#define io_uring_smp_wmb() io_uring_barrier() #if defined(__i386__) -#define smp_mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory", "cc") +#define io_uring_smp_mb() asm volatile("lock; addl $0,0(%%esp)" \ + ::: "memory", "cc") #else -#define smp_mb() asm volatile("lock; addl $0,-132(%%rsp)" ::: "memory", "cc") +#define io_uring_smp_mb() asm volatile("lock; addl $0,-132(%%rsp)" \ + ::: "memory", "cc") #endif -#define smp_store_release(p, v) \ +#define io_uring_smp_store_release(p, v) \ do { \ - barrier(); \ + io_uring_barrier(); \ WRITE_ONCE(*(p), (v)); \ } while (0) -#define smp_load_acquire(p) \ +#define io_uring_smp_load_acquire(p) \ ({ \ __typeof(*p) ___p1 = READ_ONCE(*(p)); \ - barrier(); \ + io_uring_barrier(); \ ___p1; \ }) @@ -74,25 +76,25 @@ do { \ * Add arch appropriate definitions. Be safe and use full barriers for * archs we don't have support for. */ -#define smp_rmb() __sync_synchronize() -#define smp_wmb() __sync_synchronize() +#define io_uring_smp_rmb() __sync_synchronize() +#define io_uring_smp_wmb() __sync_synchronize() #endif /* defined(__x86_64__) || defined(__i386__) */ /* From tools/include/asm/barrier.h */ -#ifndef smp_store_release -# define smp_store_release(p, v) \ +#ifndef io_uring_smp_store_release +# define io_uring_smp_store_release(p, v) \ do { \ - smp_mb(); \ + io_uring_smp_mb(); \ WRITE_ONCE(*p, v); \ } while (0) #endif -#ifndef smp_load_acquire -# define smp_load_acquire(p) \ +#ifndef io_uring_smp_load_acquire +# define io_uring_smp_load_acquire(p) \ ({ \ __typeof(*p) ___p1 = READ_ONCE(*p); \ - smp_mb(); \ + io_uring_smp_mb(); \ ___p1; \ }) #endif diff --git a/src/queue.c b/src/queue.c index 74a077f..007733c 100644 --- a/src/queue.c +++ b/src/queue.c @@ -104,7 +104,7 @@ static int __io_uring_submit(struct io_uring *ring, unsigned wait_nr) * Ensure that the kernel sees the SQE updates before it sees the tail * update. */ - smp_store_release(sq->ktail, ktail); + io_uring_smp_store_release(sq->ktail, ktail); flags = 0; if (wait_nr || sq_ring_needs_enter(ring, &flags)) { |