diff options
author | Roman Penyaev <rpenyaev@suse.de> | 2019-05-27 21:05:12 +0200 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2019-05-27 14:54:18 -0600 |
commit | f3b78a78240e2cf8826291501f9ee3f2112e7dfe (patch) | |
tree | a6049ff2ae436238267e54492db822e1542170fe /src | |
parent | e60729e64189c908c61254e9743daee7db47f88f (diff) |
liburing: introduce io_uring_for_each_cqe() and io_uring_cq_advance()
io_uring_cqe_seen() contains write memory barrier (at least for !x86),
so calling it can be very expensive for a big number of cqes.
Would be better to have a loop over all available cqes and only then
advance the cq ring once, e.g.:
struct io_uring_cqe *cqe;
unsigned nr, head;
nr = 0;
io_uring_for_each_cqe(&ring, head, cqe) {
/* handle cqe */
if (++nr == max)
break;
}
io_uring_cq_advance(&ring, nr);
Signed-off-by: Roman Penyaev <rpenyaev@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'src')
-rw-r--r-- | src/liburing.h | 32 |
1 files changed, 26 insertions, 6 deletions
diff --git a/src/liburing.h b/src/liburing.h index 7973af9..df01040 100644 --- a/src/liburing.h +++ b/src/liburing.h @@ -74,17 +74,26 @@ extern int io_uring_wait_cqe(struct io_uring *ring, extern int io_uring_submit(struct io_uring *ring); extern struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring); +#define io_uring_for_each_cqe(ring, head, cqe) \ + for (head = *(ring)->cq.khead; \ + /* See read_barrier() explanation in __io_uring_get_cqe() */ \ + ({read_barrier(); \ + cqe = (head != *(ring)->cq.ktail ? \ + &(ring)->cq.cqes[head & (*(ring)->cq.kring_mask)] : NULL);}); \ + head++) \ + + /* - * Must be called after io_uring_{peek,wait}_cqe() after the cqe has - * been processed by the application. + * Must be called after io_uring_for_each_cqe() */ -static inline void io_uring_cqe_seen(struct io_uring *ring, - struct io_uring_cqe *cqe) +static inline void io_uring_cq_advance(struct io_uring *ring, + unsigned nr) { - if (cqe) { + if (nr) { struct io_uring_cq *cq = &ring->cq; - (*cq->khead)++; + (*cq->khead) += nr; + /* * Ensure that the kernel sees our new head, the kernel has * the matching read barrier. @@ -94,6 +103,17 @@ static inline void io_uring_cqe_seen(struct io_uring *ring, } /* + * Must be called after io_uring_{peek,wait}_cqe() after the cqe has + * been processed by the application. + */ +static inline void io_uring_cqe_seen(struct io_uring *ring, + struct io_uring_cqe *cqe) +{ + if (cqe) + io_uring_cq_advance(ring, 1); +} + +/* * Command prep helpers */ static inline void io_uring_sqe_set_data(struct io_uring_sqe *sqe, void *data) |