Skip to content
Snippets Groups Projects
Commit ca0a2651 authored by Jens Axboe's avatar Jens Axboe
Browse files

io_uring: don't keep looping for more events if we can't flush overflow


It doesn't make sense to wait for more events to come in, if we can't
even flush the overflow we already have to the ring. Return -EBUSY for
that condition, just like we do for attempts to submit with overflow
pending.

Cc: stable@vger.kernel.org # 5.11
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 46fe18b1
No related branches found
No related tags found
No related merge requests found
...@@ -1451,18 +1451,22 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force, ...@@ -1451,18 +1451,22 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
return all_flushed; return all_flushed;
} }
static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force, static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
struct task_struct *tsk, struct task_struct *tsk,
struct files_struct *files) struct files_struct *files)
{ {
bool ret = true;
if (test_bit(0, &ctx->cq_check_overflow)) { if (test_bit(0, &ctx->cq_check_overflow)) {
/* iopoll syncs against uring_lock, not completion_lock */ /* iopoll syncs against uring_lock, not completion_lock */
if (ctx->flags & IORING_SETUP_IOPOLL) if (ctx->flags & IORING_SETUP_IOPOLL)
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
__io_cqring_overflow_flush(ctx, force, tsk, files); ret = __io_cqring_overflow_flush(ctx, force, tsk, files);
if (ctx->flags & IORING_SETUP_IOPOLL) if (ctx->flags & IORING_SETUP_IOPOLL)
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
} }
return ret;
} }
static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags) static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
...@@ -6883,11 +6887,16 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, ...@@ -6883,11 +6887,16 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts); iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
trace_io_uring_cqring_wait(ctx, min_events); trace_io_uring_cqring_wait(ctx, min_events);
do { do {
io_cqring_overflow_flush(ctx, false, NULL, NULL); /* if we can't even flush overflow, don't wait for more */
if (!io_cqring_overflow_flush(ctx, false, NULL, NULL)) {
ret = -EBUSY;
break;
}
prepare_to_wait_exclusive(&ctx->wait, &iowq.wq, prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
TASK_INTERRUPTIBLE); TASK_INTERRUPTIBLE);
ret = io_cqring_wait_schedule(ctx, &iowq, &timeout); ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
finish_wait(&ctx->wait, &iowq.wq); finish_wait(&ctx->wait, &iowq.wq);
cond_resched();
} while (ret > 0); } while (ret > 0);
restore_saved_sigmask_unless(ret == -EINTR); restore_saved_sigmask_unless(ret == -EINTR);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment