summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2019-01-09 15:26:20 -0700
committerJens Axboe <axboe@kernel.dk>2019-01-09 15:26:20 -0700
commit7bf7e8e8e5cf1fe46194c6faf58e94cee815ac6a (patch)
tree648474e57bbee8f992a87a1c3f8e756747203808 /src
parent66a7d05bd82b362942c4f540754b94723c74e804 (diff)
Update to newer API
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'src')
-rw-r--r--src/io_uring.c66
-rw-r--r--src/io_uring.h27
-rw-r--r--src/liburing.h20
3 files changed, 56 insertions, 57 deletions
diff --git a/src/io_uring.c b/src/io_uring.c
index b3996b5..6168b56 100644
--- a/src/io_uring.c
+++ b/src/io_uring.c
@@ -10,18 +10,18 @@
#include "barrier.h"
static int __io_uring_get_completion(int fd, struct io_uring_cq *cq,
- struct io_uring_event **ev_ptr, int wait)
+ struct io_uring_cqe **cqe_ptr, int wait)
{
const unsigned mask = *cq->kring_mask;
unsigned head;
int ret;
- *ev_ptr = NULL;
+ *cqe_ptr = NULL;
head = *cq->khead;
do {
read_barrier();
if (head != *cq->ktail) {
- *ev_ptr = &cq->events[head & mask];
+ *cqe_ptr = &cq->cqes[head & mask];
break;
}
if (!wait)
@@ -31,7 +31,7 @@ static int __io_uring_get_completion(int fd, struct io_uring_cq *cq,
return -errno;
} while (1);
- if (*ev_ptr) {
+ if (*cqe_ptr) {
*cq->khead = head + 1;
write_barrier();
}
@@ -43,24 +43,24 @@ static int __io_uring_get_completion(int fd, struct io_uring_cq *cq,
* Return an IO completion, if one is readily available
*/
int io_uring_get_completion(struct io_uring *ring,
- struct io_uring_event **ev_ptr)
+ struct io_uring_cqe **cqe_ptr)
{
- return __io_uring_get_completion(ring->ring_fd, &ring->cq, ev_ptr, 0);
+ return __io_uring_get_completion(ring->ring_fd, &ring->cq, cqe_ptr, 0);
}
/*
* Return an IO completion, waiting for it if necessary
*/
int io_uring_wait_completion(struct io_uring *ring,
- struct io_uring_event **ev_ptr)
+ struct io_uring_cqe **cqe_ptr)
{
- return __io_uring_get_completion(ring->ring_fd, &ring->cq, ev_ptr, 1);
+ return __io_uring_get_completion(ring->ring_fd, &ring->cq, cqe_ptr, 1);
}
/*
- * Submit iocbs acquired from io_uring_get_iocb() to the kernel.
+ * Submit sqes acquired from io_uring_get_sqe() to the kernel.
*
- * Returns number of iocbs submitted
+ * Returns number of sqes submitted
*/
int io_uring_submit(struct io_uring *ring)
{
@@ -77,24 +77,24 @@ int io_uring_submit(struct io_uring *ring)
goto submit;
}
- if (sq->iocb_head == sq->iocb_tail)
+ if (sq->sqe_head == sq->sqe_tail)
return 0;
/*
- * Fill in iocbs that we have queued up, adding them to the kernel ring
+ * Fill in sqes that we have queued up, adding them to the kernel ring
*/
submitted = 0;
ktail = ktail_next = *sq->ktail;
- while (sq->iocb_head < sq->iocb_tail) {
+ while (sq->sqe_head < sq->sqe_tail) {
ktail_next++;
read_barrier();
if (ktail_next == *sq->khead)
break;
- sq->array[ktail & mask] = sq->iocb_head & mask;
+ sq->array[ktail & mask] = sq->sqe_head & mask;
ktail = ktail_next;
- sq->iocb_head++;
+ sq->sqe_head++;
submitted++;
}
@@ -113,27 +113,27 @@ submit:
}
/*
- * Return an iocb to fill. Application must later call io_uring_submit()
+ * Return an sqe to fill. Application must later call io_uring_submit()
* when it's ready to tell the kernel about it. The caller may call this
* function multiple times before calling io_uring_submit().
*
- * Returns a vacant iocb, or NULL if we're full.
+ * Returns a vacant sqe, or NULL if we're full.
*/
-struct io_uring_iocb *io_uring_get_iocb(struct io_uring *ring)
+struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring)
{
struct io_uring_sq *sq = &ring->sq;
- unsigned next = sq->iocb_tail + 1;
- struct io_uring_iocb *iocb;
+ unsigned next = sq->sqe_tail + 1;
+ struct io_uring_sqe *sqe;
/*
- * All iocbs are used
+ * All sqes are used
*/
- if (next - sq->iocb_head > *sq->kring_entries)
+ if (next - sq->sqe_head > *sq->kring_entries)
return NULL;
- iocb = &sq->iocbs[sq->iocb_tail & *sq->kring_mask];
- sq->iocb_tail = next;
- return iocb;
+ sqe = &sq->sqes[sq->sqe_tail & *sq->kring_mask];
+ sq->sqe_tail = next;
+ return sqe;
}
static int io_uring_mmap(int fd, struct io_uring_params *p,
@@ -156,23 +156,23 @@ static int io_uring_mmap(int fd, struct io_uring_params *p,
sq->kdropped = ptr + p->sq_off.dropped;
sq->array = ptr + p->sq_off.array;
- size = p->sq_entries * sizeof(struct io_uring_iocb);
- sq->iocbs = mmap(0, size, PROT_READ | PROT_WRITE,
+ size = p->sq_entries * sizeof(struct io_uring_sqe),
+ sq->sqes = mmap(0, size, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_POPULATE, fd,
- IORING_OFF_IOCB);
- if (sq->iocbs == MAP_FAILED) {
+ IORING_OFF_SQES);
+ if (sq->sqes == MAP_FAILED) {
ret = -errno;
err:
munmap(sq->khead, sq->ring_sz);
return ret;
}
- cq->ring_sz = p->cq_off.events + p->cq_entries * sizeof(struct io_uring_event);
+ cq->ring_sz = p->cq_off.cqes + p->cq_entries * sizeof(struct io_uring_cqe);
ptr = mmap(0, cq->ring_sz, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_CQ_RING);
if (ptr == MAP_FAILED) {
ret = -errno;
- munmap(sq->iocbs, p->sq_entries * sizeof(struct io_uring_iocb));
+ munmap(sq->sqes, p->sq_entries * sizeof(struct io_uring_sqe));
goto err;
}
cq->khead = ptr + p->cq_off.head;
@@ -180,7 +180,7 @@ err:
cq->kring_mask = ptr + p->cq_off.ring_mask;
cq->kring_entries = ptr + p->cq_off.ring_entries;
cq->koverflow = ptr + p->cq_off.overflow;
- cq->events = ptr + p->cq_off.events;
+ cq->cqes = ptr + p->cq_off.cqes;
return 0;
}
@@ -209,7 +209,7 @@ void io_uring_queue_exit(struct io_uring *ring)
struct io_uring_sq *sq = &ring->sq;
struct io_uring_cq *cq = &ring->cq;
- munmap(sq->iocbs, *sq->kring_entries * sizeof(struct io_uring_iocb));
+ munmap(sq->sqes, *sq->kring_entries * sizeof(struct io_uring_sqe));
munmap(sq->khead, sq->ring_sz);
munmap(cq->khead, cq->ring_sz);
close(ring->ring_fd);
diff --git a/src/io_uring.h b/src/io_uring.h
index 7dd2112..20e4c22 100644
--- a/src/io_uring.h
+++ b/src/io_uring.h
@@ -12,9 +12,9 @@
#include <linux/types.h>
/*
- * IO submission data structure
+ * IO submission data structure (Submission Queue Entry)
*/
-struct io_uring_iocb {
+struct io_uring_sqe {
__u8 opcode;
__u8 flags;
__u16 ioprio;
@@ -35,23 +35,22 @@ struct io_uring_iocb {
* io_uring_setup() flags
*/
#define IORING_SETUP_IOPOLL (1 << 0) /* io_context is polled */
-#define IORING_SETUP_FIXEDBUFS (1 << 1) /* IO buffers are fixed */
-#define IORING_SETUP_SQTHREAD (1 << 2) /* Use SQ thread */
-#define IORING_SETUP_SQWQ (1 << 3) /* Use SQ workqueue */
-#define IORING_SETUP_SQPOLL (1 << 4) /* SQ thread polls */
+#define IORING_SETUP_SQTHREAD (1 << 1) /* Use SQ thread */
+#define IORING_SETUP_SQWQ (1 << 2) /* Use SQ workqueue */
+#define IORING_SETUP_SQPOLL (1 << 3) /* SQ thread polls */
-#define IORING_OP_READ 1
-#define IORING_OP_WRITE 2
+#define IORING_OP_READV 1
+#define IORING_OP_WRITEV 2
#define IORING_OP_FSYNC 3
#define IORING_OP_FDSYNC 4
#define IORING_OP_READ_FIXED 5
#define IORING_OP_WRITE_FIXED 6
/*
- * IO completion data structure
+ * IO completion data structure (Completion Queue Entry)
*/
-struct io_uring_event {
- __u64 index; /* what iocb this event came from */
+struct io_uring_cqe {
+ __u64 index; /* what sqe this event came from */
__s32 res; /* result code for this event */
__u32 flags;
};
@@ -59,14 +58,14 @@ struct io_uring_event {
/*
* io_uring_event->flags
*/
-#define IOEV_FLAG_CACHEHIT (1 << 0) /* IO did not hit media */
+#define IOCQE_FLAG_CACHEHIT (1 << 0) /* IO did not hit media */
/*
* Magic offsets for the application to mmap the data it needs
*/
#define IORING_OFF_SQ_RING 0ULL
#define IORING_OFF_CQ_RING 0x8000000ULL
-#define IORING_OFF_IOCB 0x10000000ULL
+#define IORING_OFF_SQES 0x10000000ULL
/*
* Filled with the offset for mmap(2)
@@ -90,7 +89,7 @@ struct io_cqring_offsets {
__u32 ring_mask;
__u32 ring_entries;
__u32 overflow;
- __u32 events;
+ __u32 cqes;
__u32 resv[4];
};
diff --git a/src/liburing.h b/src/liburing.h
index ed23747..8adc9ec 100644
--- a/src/liburing.h
+++ b/src/liburing.h
@@ -15,10 +15,10 @@ struct io_uring_sq {
unsigned *kflags;
unsigned *kdropped;
unsigned *array;
- struct io_uring_iocb *iocbs;
+ struct io_uring_sqe *sqes;
- unsigned iocb_head;
- unsigned iocb_tail;
+ unsigned sqe_head;
+ unsigned sqe_tail;
size_t ring_sz;
};
@@ -29,7 +29,7 @@ struct io_uring_cq {
unsigned *kring_mask;
unsigned *kring_entries;
unsigned *koverflow;
- struct io_uring_event *events;
+ struct io_uring_cqe *cqes;
size_t ring_sz;
};
@@ -55,16 +55,16 @@ extern int io_uring_queue_init(unsigned entries, struct io_uring_params *p,
struct iovec *iovecs, struct io_uring *ring);
extern void io_uring_queue_exit(struct io_uring *ring);
extern int io_uring_get_completion(struct io_uring *ring,
- struct io_uring_event **ev_ptr);
+ struct io_uring_cqe **cqe_ptr);
extern int io_uring_wait_completion(struct io_uring *ring,
- struct io_uring_event **ev_ptr);
+ struct io_uring_cqe **cqe_ptr);
extern int io_uring_submit(struct io_uring *ring);
-extern struct io_uring_iocb *io_uring_get_iocb(struct io_uring *ring);
+extern struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring);
-static inline struct io_uring_iocb *
-io_uring_iocb_from_ev(struct io_uring *ring, struct io_uring_event *ev)
+static inline struct io_uring_sqe *
+io_uring_sqe_from_cqe(struct io_uring *ring, struct io_uring_cqe *cqe)
{
- return &ring->sq.iocbs[ev->index];
+ return &ring->sq.sqes[cqe->index];
}
#endif