summaryrefslogtreecommitdiffstats
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorJens Axboe2019-01-09 17:10:43 +0100
committerJens Axboe2019-02-28 16:24:23 +0100
commit2579f913d41a086563bb81762c519f3d62ddee37 (patch)
tree46195e72b0a1e8ece5ea9ab662c327ac23b23aef /fs/io_uring.c
parentio_uring: use fget/fput_many() for file references (diff)
downloadkernel-qcow2-linux-2579f913d41a086563bb81762c519f3d62ddee37.tar.gz
kernel-qcow2-linux-2579f913d41a086563bb81762c519f3d62ddee37.tar.xz
kernel-qcow2-linux-2579f913d41a086563bb81762c519f3d62ddee37.zip
io_uring: batch io_kiocb allocation
Similarly to how we use the state->ios_left to know how many references to get to a file, we can use it to allocate the io_kiocb's we need in bulk. Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c45
1 files changed, 38 insertions, 7 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index dce7242f19fc..31f43ed894ba 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -169,6 +169,13 @@ struct io_submit_state {
struct blk_plug plug;
/*
+ * io_kiocb alloc cache
+ */
+ void *reqs[IO_IOPOLL_BATCH];
+ unsigned int free_reqs;
+ unsigned int cur_req;
+
+ /*
* File reference cache
*/
struct file *file;
@@ -305,20 +312,40 @@ static void io_ring_drop_ctx_refs(struct io_ring_ctx *ctx, unsigned refs)
wake_up(&ctx->wait);
}
-static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx)
+static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
+ struct io_submit_state *state)
{
struct io_kiocb *req;
if (!percpu_ref_tryget(&ctx->refs))
return NULL;
- req = kmem_cache_alloc(req_cachep, __GFP_NOWARN);
- if (req) {
- req->ctx = ctx;
- req->flags = 0;
- return req;
+ if (!state) {
+ req = kmem_cache_alloc(req_cachep, __GFP_NOWARN);
+ if (unlikely(!req))
+ goto out;
+ } else if (!state->free_reqs) {
+ size_t sz;
+ int ret;
+
+ sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
+ ret = kmem_cache_alloc_bulk(req_cachep, __GFP_NOWARN, sz,
+ state->reqs);
+ if (unlikely(ret <= 0))
+ goto out;
+ state->free_reqs = ret - 1;
+ state->cur_req = 1;
+ req = state->reqs[0];
+ } else {
+ req = state->reqs[state->cur_req];
+ state->free_reqs--;
+ state->cur_req++;
}
+ req->ctx = ctx;
+ req->flags = 0;
+ return req;
+out:
io_ring_drop_ctx_refs(ctx, 1);
return NULL;
}
@@ -1007,7 +1034,7 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
if (unlikely(s->sqe->flags))
return -EINVAL;
- req = io_get_req(ctx);
+ req = io_get_req(ctx, state);
if (unlikely(!req))
return -EAGAIN;
@@ -1041,6 +1068,9 @@ static void io_submit_state_end(struct io_submit_state *state)
{
blk_finish_plug(&state->plug);
io_file_put(state, NULL);
+ if (state->free_reqs)
+ kmem_cache_free_bulk(req_cachep, state->free_reqs,
+ &state->reqs[state->cur_req]);
}
/*
@@ -1050,6 +1080,7 @@ static void io_submit_state_start(struct io_submit_state *state,
struct io_ring_ctx *ctx, unsigned max_ios)
{
blk_start_plug(&state->plug);
+ state->free_reqs = 0;
state->file = NULL;
state->ios_left = max_ios;
}