summaryrefslogtreecommitdiff
path: root/src/queue.c
diff options
context:
space:
mode:
authorBart Van Assche <bvanassche@acm.org>2019-07-01 14:42:32 -0700
committerJens Axboe <axboe@kernel.dk>2019-07-02 07:33:52 -0600
commitecefd7958eb32602df07f12e9808598b2c2de84b (patch)
tree789bcf30a838d99b02237568dae0b215cc13f944 /src/queue.c
parentbbb30995a0b4c9e3489aa5d66d1807425734b791 (diff)
Fix the use of memory barriers
Introduce the smp_load_acquire() and smp_store_release() macros. Fix synchronization in io_uring_cq_advance() and __io_uring_get_cqe(). Remove a superfluous local variable, if-test and write barrier from __io_uring_submit(). Remove a superfluous barrier from test/io_uring_enter.c. Cc: Stefan Hajnoczi <stefanha@redhat.com> Cc: Roman Penyaev <rpenyaev@suse.de> Signed-off-by: Bart Van Assche <bvanassche@acm.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'src/queue.c')
-rw-r--r--src/queue.c30
1 files changed, 8 insertions, 22 deletions
diff --git a/src/queue.c b/src/queue.c
index bec363f..72b2293 100644
--- a/src/queue.c
+++ b/src/queue.c
@@ -77,7 +77,7 @@ static int __io_uring_submit(struct io_uring *ring, unsigned wait_nr)
{
struct io_uring_sq *sq = &ring->sq;
const unsigned mask = *sq->kring_mask;
- unsigned ktail, ktail_next, submitted, to_submit;
+ unsigned ktail, submitted, to_submit;
unsigned flags;
int ret;
@@ -88,15 +88,11 @@ static int __io_uring_submit(struct io_uring *ring, unsigned wait_nr)
* Fill in sqes that we have queued up, adding them to the kernel ring
*/
submitted = 0;
- ktail = ktail_next = *sq->ktail;
+ ktail = *sq->ktail;
to_submit = sq->sqe_tail - sq->sqe_head;
while (to_submit--) {
- ktail_next++;
- read_barrier();
-
sq->array[ktail & mask] = sq->sqe_head & mask;
- ktail = ktail_next;
-
+ ktail++;
sq->sqe_head++;
submitted++;
}
@@ -104,21 +100,11 @@ static int __io_uring_submit(struct io_uring *ring, unsigned wait_nr)
if (!submitted)
return 0;
- if (*sq->ktail != ktail) {
- /*
- * First write barrier ensures that the SQE stores are updated
- * with the tail update. This is needed so that the kernel
- * will never see a tail update without the preceeding sQE
- * stores being done.
- */
- write_barrier();
- *sq->ktail = ktail;
- /*
- * The kernel has the matching read barrier for reading the
- * SQ tail.
- */
- write_barrier();
- }
+ /*
+ * Ensure that the kernel sees the SQE updates before it sees the tail
+ * update.
+ */
+ smp_store_release(sq->ktail, ktail);
flags = 0;
if (wait_nr || sq_ring_needs_enter(ring, &flags)) {