From bbb30995a0b4c9e3489aa5d66d1807425734b791 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Mon, 1 Jul 2019 14:42:31 -0700 Subject: __io_uring_get_cqe(): Use io_uring_for_each_cqe() Use io_uring_for_each_cqe() inside __io_uring_get_cqe() such that it becomes possible to test the io_uring_for_each_cqe() implementation from inside the liburing project. Cc: Stefan Hajnoczi Cc: Roman Penyaev Signed-off-by: Bart Van Assche Signed-off-by: Jens Axboe --- src/queue.c | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/src/queue.c b/src/queue.c index 85e0c1e..bec363f 100644 --- a/src/queue.c +++ b/src/queue.c @@ -14,26 +14,14 @@ static int __io_uring_get_cqe(struct io_uring *ring, struct io_uring_cqe **cqe_ptr, int wait) { - struct io_uring_cq *cq = &ring->cq; - const unsigned mask = *cq->kring_mask; unsigned head; int ret; - *cqe_ptr = NULL; - head = *cq->khead; do { - /* - * It's necessary to use a read_barrier() before reading - * the CQ tail, since the kernel updates it locklessly. The - * kernel has the matching store barrier for the update. The - * kernel also ensures that previous stores to CQEs are ordered - * with the tail update. - */ - read_barrier(); - if (head != *cq->ktail) { - *cqe_ptr = &cq->cqes[head & mask]; + io_uring_for_each_cqe(ring, head, *cqe_ptr) + break; + if (*cqe_ptr) break; - } if (!wait) break; ret = io_uring_enter(ring->ring_fd, 0, 1, -- cgit