diff options
author | Jens Axboe <axboe@kernel.dk> | 2019-01-17 21:40:30 -0700 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2019-01-17 21:40:30 -0700 |
commit | 213d6f39efcfc4a6758d50b383a699b98190aad7 (patch) | |
tree | 391bee5bb65c84101b1b76120a495bcd7a889feb | |
parent | 257578052a4dacd891e96e90b7fe775bb73325a5 (diff) |
Split src/io_uring.c up
Let's have the various helpers be in usefully named functions, no
need to bundle them all into the same one.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | src/Makefile | 2 | ||||
-rw-r--r-- | src/queue.c (renamed from src/io_uring.c) | 92 | ||||
-rw-r--r-- | src/setup.c | 92 |
3 files changed, 99 insertions, 87 deletions
diff --git a/src/Makefile b/src/Makefile index 635f65a..3899680 100644 --- a/src/Makefile +++ b/src/Makefile @@ -22,7 +22,7 @@ endif all: $(all_targets) -liburing_srcs := io_uring.c syscall.c +liburing_srcs := setup.c queue.c syscall.c liburing_objs := $(patsubst %.c,%.ol,$(liburing_srcs)) liburing_sobjs := $(patsubst %.c,%.os,$(liburing_srcs)) diff --git a/src/io_uring.c b/src/queue.c index 7205914..b8788b3 100644 --- a/src/io_uring.c +++ b/src/queue.c @@ -10,9 +10,10 @@ #include "liburing.h" #include "barrier.h" -static int __io_uring_get_completion(int fd, struct io_uring_cq *cq, +static int __io_uring_get_completion(struct io_uring *ring, struct io_uring_cqe **cqe_ptr, int wait) { + struct io_uring_cq *cq = &ring->cq; const unsigned mask = *cq->kring_mask; unsigned head; int ret; @@ -27,7 +28,8 @@ static int __io_uring_get_completion(int fd, struct io_uring_cq *cq, } if (!wait) break; - ret = io_uring_enter(fd, 0, 1, IORING_ENTER_GETEVENTS); + ret = io_uring_enter(ring->ring_fd, 0, 1, + IORING_ENTER_GETEVENTS); if (ret < 0) return -errno; } while (1); @@ -46,7 +48,7 @@ static int __io_uring_get_completion(int fd, struct io_uring_cq *cq, int io_uring_get_completion(struct io_uring *ring, struct io_uring_cqe **cqe_ptr) { - return __io_uring_get_completion(ring->ring_fd, &ring->cq, cqe_ptr, 0); + return __io_uring_get_completion(ring, cqe_ptr, 0); } /* @@ -55,7 +57,7 @@ int io_uring_get_completion(struct io_uring *ring, int io_uring_wait_completion(struct io_uring *ring, struct io_uring_cqe **cqe_ptr) { - return __io_uring_get_completion(ring->ring_fd, &ring->cq, cqe_ptr, 1); + return __io_uring_get_completion(ring, cqe_ptr, 1); } /* @@ -136,85 +138,3 @@ struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring) sq->sqe_tail = next; return sqe; } - -static int io_uring_mmap(int fd, struct io_uring_params *p, - struct io_uring_sq *sq, struct io_uring_cq *cq) -{ - size_t size; - void *ptr; - int ret; - - sq->ring_sz = p->sq_off.array + p->sq_entries * sizeof(unsigned); - ptr = mmap(0, sq->ring_sz, PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQ_RING); - if (ptr == MAP_FAILED) - return -errno; - sq->khead = ptr + p->sq_off.head; - sq->ktail = ptr + p->sq_off.tail; - sq->kring_mask = ptr + p->sq_off.ring_mask; - sq->kring_entries = ptr + p->sq_off.ring_entries; - sq->kflags = ptr + p->sq_off.flags; - sq->kdropped = ptr + p->sq_off.dropped; - sq->array = ptr + p->sq_off.array; - - size = p->sq_entries * sizeof(struct io_uring_sqe), - sq->sqes = mmap(0, size, PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_POPULATE, fd, - IORING_OFF_SQES); - if (sq->sqes == MAP_FAILED) { - ret = -errno; -err: - munmap(sq->khead, sq->ring_sz); - return ret; - } - - cq->ring_sz = p->cq_off.cqes + p->cq_entries * sizeof(struct io_uring_cqe); - ptr = mmap(0, cq->ring_sz, PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_CQ_RING); - if (ptr == MAP_FAILED) { - ret = -errno; - munmap(sq->sqes, p->sq_entries * sizeof(struct io_uring_sqe)); - goto err; - } - cq->khead = ptr + p->cq_off.head; - cq->ktail = ptr + p->cq_off.tail; - cq->kring_mask = ptr + p->cq_off.ring_mask; - cq->kring_entries = ptr + p->cq_off.ring_entries; - cq->koverflow = ptr + p->cq_off.overflow; - cq->cqes = ptr + p->cq_off.cqes; - return 0; -} - -/* - * Returns -1 on error, or zero on success. On success, 'ring' - * contains the necessary information to read/write to the rings. - */ -int io_uring_queue_init(unsigned entries, struct io_uring *ring, unsigned flags) -{ - struct io_uring_params p; - int fd, ret; - - memset(&p, 0, sizeof(p)); - p.flags = flags; - - fd = io_uring_setup(entries, &p); - if (fd < 0) - return fd; - - memset(ring, 0, sizeof(*ring)); - ret = io_uring_mmap(fd, &p, &ring->sq, &ring->cq); - if (!ret) - ring->ring_fd = fd; - return ret; -} - -void io_uring_queue_exit(struct io_uring *ring) -{ - struct io_uring_sq *sq = &ring->sq; - struct io_uring_cq *cq = &ring->cq; - - munmap(sq->sqes, *sq->kring_entries * sizeof(struct io_uring_sqe)); - munmap(sq->khead, sq->ring_sz); - munmap(cq->khead, cq->ring_sz); - close(ring->ring_fd); -} diff --git a/src/setup.c b/src/setup.c new file mode 100644 index 0000000..eeb5fef --- /dev/null +++ b/src/setup.c @@ -0,0 +1,92 @@ +#include <sys/types.h> +#include <sys/stat.h> +#include <sys/mman.h> +#include <unistd.h> +#include <errno.h> +#include <string.h> + +#include "compat.h" +#include "io_uring.h" +#include "liburing.h" + +static int io_uring_mmap(int fd, struct io_uring_params *p, + struct io_uring_sq *sq, struct io_uring_cq *cq) +{ + size_t size; + void *ptr; + int ret; + + sq->ring_sz = p->sq_off.array + p->sq_entries * sizeof(unsigned); + ptr = mmap(0, sq->ring_sz, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQ_RING); + if (ptr == MAP_FAILED) + return -errno; + sq->khead = ptr + p->sq_off.head; + sq->ktail = ptr + p->sq_off.tail; + sq->kring_mask = ptr + p->sq_off.ring_mask; + sq->kring_entries = ptr + p->sq_off.ring_entries; + sq->kflags = ptr + p->sq_off.flags; + sq->kdropped = ptr + p->sq_off.dropped; + sq->array = ptr + p->sq_off.array; + + size = p->sq_entries * sizeof(struct io_uring_sqe), + sq->sqes = mmap(0, size, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_POPULATE, fd, + IORING_OFF_SQES); + if (sq->sqes == MAP_FAILED) { + ret = -errno; +err: + munmap(sq->khead, sq->ring_sz); + return ret; + } + + cq->ring_sz = p->cq_off.cqes + p->cq_entries * sizeof(struct io_uring_cqe); + ptr = mmap(0, cq->ring_sz, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_CQ_RING); + if (ptr == MAP_FAILED) { + ret = -errno; + munmap(sq->sqes, p->sq_entries * sizeof(struct io_uring_sqe)); + goto err; + } + cq->khead = ptr + p->cq_off.head; + cq->ktail = ptr + p->cq_off.tail; + cq->kring_mask = ptr + p->cq_off.ring_mask; + cq->kring_entries = ptr + p->cq_off.ring_entries; + cq->koverflow = ptr + p->cq_off.overflow; + cq->cqes = ptr + p->cq_off.cqes; + return 0; +} + +/* + * Returns -1 on error, or zero on success. On success, 'ring' + * contains the necessary information to read/write to the rings. + */ +int io_uring_queue_init(unsigned entries, struct io_uring *ring, unsigned flags) +{ + struct io_uring_params p; + int fd, ret; + + memset(&p, 0, sizeof(p)); + p.flags = flags; + + fd = io_uring_setup(entries, &p); + if (fd < 0) + return fd; + + memset(ring, 0, sizeof(*ring)); + ret = io_uring_mmap(fd, &p, &ring->sq, &ring->cq); + if (!ret) + ring->ring_fd = fd; + return ret; +} + +void io_uring_queue_exit(struct io_uring *ring) +{ + struct io_uring_sq *sq = &ring->sq; + struct io_uring_cq *cq = &ring->cq; + + munmap(sq->sqes, *sq->kring_entries * sizeof(struct io_uring_sqe)); + munmap(sq->khead, sq->ring_sz); + munmap(cq->khead, cq->ring_sz); + close(ring->ring_fd); +} |