summaryrefslogtreecommitdiff
path: root/src/include/liburing/barrier.h
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2019-07-24 09:24:50 +0100
committerJens Axboe <axboe@kernel.dk>2019-07-24 09:11:44 -0600
commitc31c7ec4bcd7bb0d7b28897d730431c02b9d4ea1 (patch)
treebaffe189e6dc69fa17adc8fe08ad1d133b328ade /src/include/liburing/barrier.h
parentb42c59d7afc79370cf0140d5b9978b1e8c350408 (diff)
src/Makefile: keep private headers in <liburing/*.h>
It is not possible to install barrier.h and compat.h into the top-level /usr/include directly since they are likely to conflict with other software. io_uring.h could be confused with the system's kernel header file. Put liburing headers into <liburing/*.h> so there is no chance of conflicts or confusion. Existing applications continue to build successfully since the location of <liburing.h> is unchanged. In-tree examples and tests require modification because src/liburing.h is moved to src/include/liburing.h. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'src/include/liburing/barrier.h')
-rw-r--r--src/include/liburing/barrier.h87
1 files changed, 87 insertions, 0 deletions
diff --git a/src/include/liburing/barrier.h b/src/include/liburing/barrier.h
new file mode 100644
index 0000000..98be9e5
--- /dev/null
+++ b/src/include/liburing/barrier.h
@@ -0,0 +1,87 @@
+#ifndef LIBURING_BARRIER_H
+#define LIBURING_BARRIER_H
+
+/*
+From the kernel documentation file refcount-vs-atomic.rst:
+
+A RELEASE memory ordering guarantees that all prior loads and
+stores (all po-earlier instructions) on the same CPU are completed
+before the operation. It also guarantees that all po-earlier
+stores on the same CPU and all propagated stores from other CPUs
+must propagate to all other CPUs before the release operation
+(A-cumulative property). This is implemented using
+:c:func:`smp_store_release`.
+
+An ACQUIRE memory ordering guarantees that all post loads and
+stores (all po-later instructions) on the same CPU are
+completed after the acquire operation. It also guarantees that all
+po-later stores on the same CPU must propagate to all other CPUs
+after the acquire operation executes. This is implemented using
+:c:func:`smp_acquire__after_ctrl_dep`.
+*/
+
+/* From tools/include/linux/compiler.h */
+/* Optimization barrier */
+/* The "volatile" is due to gcc bugs */
+#define barrier() __asm__ __volatile__("": : :"memory")
+
+/* From tools/virtio/linux/compiler.h */
+#define WRITE_ONCE(var, val) \
+ (*((volatile __typeof(val) *)(&(var))) = (val))
+#define READ_ONCE(var) (*((volatile __typeof(var) *)(&(var))))
+
+
+#if defined(__x86_64__) || defined(__i386__)
+/* Adapted from arch/x86/include/asm/barrier.h */
+#define mb() asm volatile("mfence" ::: "memory")
+#define rmb() asm volatile("lfence" ::: "memory")
+#define wmb() asm volatile("sfence" ::: "memory")
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#if defined(__i386__)
+#define smp_mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory", "cc")
+#else
+#define smp_mb() asm volatile("lock; addl $0,-132(%%rsp)" ::: "memory", "cc")
+#endif
+
+#define smp_store_release(p, v) \
+do { \
+ barrier(); \
+ WRITE_ONCE(*(p), (v)); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ __typeof(*p) ___p1 = READ_ONCE(*(p)); \
+ barrier(); \
+ ___p1; \
+})
+#else /* defined(__x86_64__) || defined(__i386__) */
+/*
+ * Add arch appropriate definitions. Be safe and use full barriers for
+ * archs we don't have support for.
+ */
+#define smp_rmb() __sync_synchronize()
+#define smp_wmb() __sync_synchronize()
+#endif /* defined(__x86_64__) || defined(__i386__) */
+
+/* From tools/include/asm/barrier.h */
+
+#ifndef smp_store_release
+# define smp_store_release(p, v) \
+do { \
+ smp_mb(); \
+ WRITE_ONCE(*p, v); \
+} while (0)
+#endif
+
+#ifndef smp_load_acquire
+# define smp_load_acquire(p) \
+({ \
+ __typeof(*p) ___p1 = READ_ONCE(*p); \
+ smp_mb(); \
+ ___p1; \
+})
+#endif
+
+#endif /* defined(LIBURING_BARRIER_H) */