diff options
| author | Jens Axboe <axboe@kernel.dk> | 2025-08-20 20:03:33 -0600 |
|---|---|---|
| committer | Jens Axboe <axboe@kernel.dk> | 2025-08-24 11:41:12 -0600 |
| commit | 1b5add75d7c894c62506c9b55f1d9eaadae50ef1 (patch) | |
| tree | e80f2812163cf1ad5a3dbbd308cdf6dc94accb39 /io_uring/kbuf.h | |
| parent | b22743f29b7d3dc68c68f9bd39a1b2600ec6434e (diff) | |
io_uring/kbuf: pass in struct io_buffer_list to commit/recycle helpers
Rather than have this implied being in the io_kiocb, pass it in directly
so it's immediately obvious where these users of ->buf_list are coming
from.
Link: https://lore.kernel.org/r/20250821020750.598432-6-axboe@kernel.dk
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/kbuf.h')
| -rw-r--r-- | io_uring/kbuf.h | 22 |
1 files changed, 13 insertions, 9 deletions
diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h index 3d01778f378b..58451ab4ab8a 100644 --- a/io_uring/kbuf.h +++ b/io_uring/kbuf.h @@ -80,14 +80,16 @@ int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg); bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags); void io_kbuf_drop_legacy(struct io_kiocb *req); -unsigned int __io_put_kbufs(struct io_kiocb *req, int len, int nbufs); +unsigned int __io_put_kbufs(struct io_kiocb *req, struct io_buffer_list *bl, + int len, int nbufs); bool io_kbuf_commit(struct io_kiocb *req, struct io_buffer_list *bl, int len, int nr); struct io_mapped_region *io_pbuf_get_region(struct io_ring_ctx *ctx, unsigned int bgid); -static inline bool io_kbuf_recycle_ring(struct io_kiocb *req) +static inline bool io_kbuf_recycle_ring(struct io_kiocb *req, + struct io_buffer_list *bl) { /* * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear @@ -96,7 +98,7 @@ static inline bool io_kbuf_recycle_ring(struct io_kiocb *req) * The exception is partial io, that case we should increment bl->head * to monopolize the buffer. */ - if (req->buf_list) { + if (bl) { req->flags &= ~(REQ_F_BUFFER_RING|REQ_F_BUFFERS_COMMIT); return true; } @@ -110,29 +112,31 @@ static inline bool io_do_buffer_select(struct io_kiocb *req) return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)); } -static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags) +static inline bool io_kbuf_recycle(struct io_kiocb *req, struct io_buffer_list *bl, + unsigned issue_flags) { if (req->flags & REQ_F_BL_NO_RECYCLE) return false; if (req->flags & REQ_F_BUFFER_SELECTED) return io_kbuf_recycle_legacy(req, issue_flags); if (req->flags & REQ_F_BUFFER_RING) - return io_kbuf_recycle_ring(req); + return io_kbuf_recycle_ring(req, bl); return false; } -static inline unsigned int io_put_kbuf(struct io_kiocb *req, int len) +static inline unsigned int io_put_kbuf(struct io_kiocb *req, int len, + struct io_buffer_list *bl) { if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED))) return 0; - return __io_put_kbufs(req, len, 1); + return __io_put_kbufs(req, bl, len, 1); } static inline unsigned int io_put_kbufs(struct io_kiocb *req, int len, - int nbufs) + struct io_buffer_list *bl, int nbufs) { if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED))) return 0; - return __io_put_kbufs(req, len, nbufs); + return __io_put_kbufs(req, bl, len, nbufs); } #endif |
