1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/fs.h> 5 #include <linux/file.h> 6 #include <linux/blk-mq.h> 7 #include <linux/mm.h> 8 #include <linux/slab.h> 9 #include <linux/fsnotify.h> 10 #include <linux/poll.h> 11 #include <linux/nospec.h> 12 #include <linux/compat.h> 13 #include <linux/io_uring/cmd.h> 14 #include <linux/indirect_call_wrapper.h> 15 16 #include <uapi/linux/io_uring.h> 17 18 #include "io_uring.h" 19 #include "opdef.h" 20 #include "kbuf.h" 21 #include "alloc_cache.h" 22 #include "rsrc.h" 23 #include "poll.h" 24 #include "rw.h" 25 26 static void io_complete_rw(struct kiocb *kiocb, long res); 27 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res); 28 29 struct io_rw { 30 /* NOTE: kiocb has the file as the first member, so don't do it here */ 31 struct kiocb kiocb; 32 u64 addr; 33 u32 len; 34 rwf_t flags; 35 }; 36 37 static bool io_file_supports_nowait(struct io_kiocb *req, __poll_t mask) 38 { 39 /* If FMODE_NOWAIT is set for a file, we're golden */ 40 if (req->flags & REQ_F_SUPPORT_NOWAIT) 41 return true; 42 /* No FMODE_NOWAIT, if we can poll, check the status */ 43 if (io_file_can_poll(req)) { 44 struct poll_table_struct pt = { ._key = mask }; 45 46 return vfs_poll(req->file, &pt) & mask; 47 } 48 /* No FMODE_NOWAIT support, and file isn't pollable. Tough luck. */ 49 return false; 50 } 51 52 static int io_iov_compat_buffer_select_prep(struct io_rw *rw) 53 { 54 struct compat_iovec __user *uiov = u64_to_user_ptr(rw->addr); 55 struct compat_iovec iov; 56 57 if (copy_from_user(&iov, uiov, sizeof(iov))) 58 return -EFAULT; 59 rw->len = iov.iov_len; 60 return 0; 61 } 62 63 static int io_iov_buffer_select_prep(struct io_kiocb *req) 64 { 65 struct iovec __user *uiov; 66 struct iovec iov; 67 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 68 69 if (rw->len != 1) 70 return -EINVAL; 71 72 if (io_is_compat(req->ctx)) 73 return io_iov_compat_buffer_select_prep(rw); 74 75 uiov = u64_to_user_ptr(rw->addr); 76 if (copy_from_user(&iov, uiov, sizeof(*uiov))) 77 return -EFAULT; 78 rw->len = iov.iov_len; 79 return 0; 80 } 81 82 static int io_import_vec(int ddir, struct io_kiocb *req, 83 struct io_async_rw *io, 84 const struct iovec __user *uvec, 85 size_t uvec_segs) 86 { 87 int ret, nr_segs; 88 struct iovec *iov; 89 90 if (io->free_iovec) { 91 nr_segs = io->free_iov_nr; 92 iov = io->free_iovec; 93 } else { 94 nr_segs = 1; 95 iov = &io->fast_iov; 96 } 97 98 ret = __import_iovec(ddir, uvec, uvec_segs, nr_segs, &iov, &io->iter, 99 io_is_compat(req->ctx)); 100 if (unlikely(ret < 0)) 101 return ret; 102 if (iov) { 103 req->flags |= REQ_F_NEED_CLEANUP; 104 io->free_iov_nr = io->iter.nr_segs; 105 kfree(io->free_iovec); 106 io->free_iovec = iov; 107 } 108 return 0; 109 } 110 111 static int __io_import_rw_buffer(int ddir, struct io_kiocb *req, 112 struct io_async_rw *io, 113 unsigned int issue_flags) 114 { 115 const struct io_issue_def *def = &io_issue_defs[req->opcode]; 116 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 117 void __user *buf = u64_to_user_ptr(rw->addr); 118 size_t sqe_len = rw->len; 119 120 if (def->vectored && !(req->flags & REQ_F_BUFFER_SELECT)) 121 return io_import_vec(ddir, req, io, buf, sqe_len); 122 123 if (io_do_buffer_select(req)) { 124 buf = io_buffer_select(req, &sqe_len, issue_flags); 125 if (!buf) 126 return -ENOBUFS; 127 rw->addr = (unsigned long) buf; 128 rw->len = sqe_len; 129 } 130 return import_ubuf(ddir, buf, sqe_len, &io->iter); 131 } 132 133 static inline int io_import_rw_buffer(int rw, struct io_kiocb *req, 134 struct io_async_rw *io, 135 unsigned int issue_flags) 136 { 137 int ret; 138 139 ret = __io_import_rw_buffer(rw, req, io, issue_flags); 140 if (unlikely(ret < 0)) 141 return ret; 142 143 iov_iter_save_state(&io->iter, &io->iter_state); 144 return 0; 145 } 146 147 static void io_rw_recycle(struct io_kiocb *req, unsigned int issue_flags) 148 { 149 struct io_async_rw *rw = req->async_data; 150 151 if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) 152 return; 153 154 io_alloc_cache_kasan(&rw->free_iovec, &rw->free_iov_nr); 155 if (io_alloc_cache_put(&req->ctx->rw_cache, rw)) { 156 req->async_data = NULL; 157 req->flags &= ~REQ_F_ASYNC_DATA; 158 } 159 } 160 161 static void io_req_rw_cleanup(struct io_kiocb *req, unsigned int issue_flags) 162 { 163 /* 164 * Disable quick recycling for anything that's gone through io-wq. 165 * In theory, this should be fine to cleanup. However, some read or 166 * write iter handling touches the iovec AFTER having called into the 167 * handler, eg to reexpand or revert. This means we can have: 168 * 169 * task io-wq 170 * issue 171 * punt to io-wq 172 * issue 173 * blkdev_write_iter() 174 * ->ki_complete() 175 * io_complete_rw() 176 * queue tw complete 177 * run tw 178 * req_rw_cleanup 179 * iov_iter_count() <- look at iov_iter again 180 * 181 * which can lead to a UAF. This is only possible for io-wq offload 182 * as the cleanup can run in parallel. As io-wq is not the fast path, 183 * just leave cleanup to the end. 184 * 185 * This is really a bug in the core code that does this, any issue 186 * path should assume that a successful (or -EIOCBQUEUED) return can 187 * mean that the underlying data can be gone at any time. But that 188 * should be fixed seperately, and then this check could be killed. 189 */ 190 if (!(req->flags & (REQ_F_REISSUE | REQ_F_REFCOUNT))) { 191 req->flags &= ~REQ_F_NEED_CLEANUP; 192 io_rw_recycle(req, issue_flags); 193 } 194 } 195 196 static int io_rw_alloc_async(struct io_kiocb *req) 197 { 198 struct io_ring_ctx *ctx = req->ctx; 199 struct io_async_rw *rw; 200 201 rw = io_uring_alloc_async_data(&ctx->rw_cache, req); 202 if (!rw) 203 return -ENOMEM; 204 if (rw->free_iovec) 205 req->flags |= REQ_F_NEED_CLEANUP; 206 rw->bytes_done = 0; 207 return 0; 208 } 209 210 static inline void io_meta_save_state(struct io_async_rw *io) 211 { 212 io->meta_state.seed = io->meta.seed; 213 iov_iter_save_state(&io->meta.iter, &io->meta_state.iter_meta); 214 } 215 216 static inline void io_meta_restore(struct io_async_rw *io, struct kiocb *kiocb) 217 { 218 if (kiocb->ki_flags & IOCB_HAS_METADATA) { 219 io->meta.seed = io->meta_state.seed; 220 iov_iter_restore(&io->meta.iter, &io->meta_state.iter_meta); 221 } 222 } 223 224 static int io_prep_rw_pi(struct io_kiocb *req, struct io_rw *rw, int ddir, 225 u64 attr_ptr, u64 attr_type_mask) 226 { 227 struct io_uring_attr_pi pi_attr; 228 struct io_async_rw *io; 229 int ret; 230 231 if (copy_from_user(&pi_attr, u64_to_user_ptr(attr_ptr), 232 sizeof(pi_attr))) 233 return -EFAULT; 234 235 if (pi_attr.rsvd) 236 return -EINVAL; 237 238 io = req->async_data; 239 io->meta.flags = pi_attr.flags; 240 io->meta.app_tag = pi_attr.app_tag; 241 io->meta.seed = pi_attr.seed; 242 ret = import_ubuf(ddir, u64_to_user_ptr(pi_attr.addr), 243 pi_attr.len, &io->meta.iter); 244 if (unlikely(ret < 0)) 245 return ret; 246 req->flags |= REQ_F_HAS_METADATA; 247 io_meta_save_state(io); 248 return ret; 249 } 250 251 static int __io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, 252 int ddir) 253 { 254 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 255 unsigned ioprio; 256 u64 attr_type_mask; 257 int ret; 258 259 if (io_rw_alloc_async(req)) 260 return -ENOMEM; 261 262 rw->kiocb.ki_pos = READ_ONCE(sqe->off); 263 /* used for fixed read/write too - just read unconditionally */ 264 req->buf_index = READ_ONCE(sqe->buf_index); 265 266 ioprio = READ_ONCE(sqe->ioprio); 267 if (ioprio) { 268 ret = ioprio_check_cap(ioprio); 269 if (ret) 270 return ret; 271 272 rw->kiocb.ki_ioprio = ioprio; 273 } else { 274 rw->kiocb.ki_ioprio = get_current_ioprio(); 275 } 276 rw->kiocb.dio_complete = NULL; 277 rw->kiocb.ki_flags = 0; 278 279 if (req->ctx->flags & IORING_SETUP_IOPOLL) 280 rw->kiocb.ki_complete = io_complete_rw_iopoll; 281 else 282 rw->kiocb.ki_complete = io_complete_rw; 283 284 rw->addr = READ_ONCE(sqe->addr); 285 rw->len = READ_ONCE(sqe->len); 286 rw->flags = READ_ONCE(sqe->rw_flags); 287 288 attr_type_mask = READ_ONCE(sqe->attr_type_mask); 289 if (attr_type_mask) { 290 u64 attr_ptr; 291 292 /* only PI attribute is supported currently */ 293 if (attr_type_mask != IORING_RW_ATTR_FLAG_PI) 294 return -EINVAL; 295 296 attr_ptr = READ_ONCE(sqe->attr_ptr); 297 return io_prep_rw_pi(req, rw, ddir, attr_ptr, attr_type_mask); 298 } 299 return 0; 300 } 301 302 static int io_rw_do_import(struct io_kiocb *req, int ddir) 303 { 304 if (io_do_buffer_select(req)) 305 return 0; 306 307 return io_import_rw_buffer(ddir, req, req->async_data, 0); 308 } 309 310 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, 311 int ddir) 312 { 313 int ret; 314 315 ret = __io_prep_rw(req, sqe, ddir); 316 if (unlikely(ret)) 317 return ret; 318 319 return io_rw_do_import(req, ddir); 320 } 321 322 int io_prep_read(struct io_kiocb *req, const struct io_uring_sqe *sqe) 323 { 324 return io_prep_rw(req, sqe, ITER_DEST); 325 } 326 327 int io_prep_write(struct io_kiocb *req, const struct io_uring_sqe *sqe) 328 { 329 return io_prep_rw(req, sqe, ITER_SOURCE); 330 } 331 332 static int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe, 333 int ddir) 334 { 335 int ret; 336 337 ret = io_prep_rw(req, sqe, ddir); 338 if (unlikely(ret)) 339 return ret; 340 if (!(req->flags & REQ_F_BUFFER_SELECT)) 341 return 0; 342 343 /* 344 * Have to do this validation here, as this is in io_read() rw->len 345 * might have chanaged due to buffer selection 346 */ 347 return io_iov_buffer_select_prep(req); 348 } 349 350 int io_prep_readv(struct io_kiocb *req, const struct io_uring_sqe *sqe) 351 { 352 return io_prep_rwv(req, sqe, ITER_DEST); 353 } 354 355 int io_prep_writev(struct io_kiocb *req, const struct io_uring_sqe *sqe) 356 { 357 return io_prep_rwv(req, sqe, ITER_SOURCE); 358 } 359 360 static int io_init_rw_fixed(struct io_kiocb *req, unsigned int issue_flags, 361 int ddir) 362 { 363 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 364 struct io_async_rw *io = req->async_data; 365 int ret; 366 367 if (io->bytes_done) 368 return 0; 369 370 ret = io_import_reg_buf(req, &io->iter, rw->addr, rw->len, ddir, 371 issue_flags); 372 iov_iter_save_state(&io->iter, &io->iter_state); 373 return ret; 374 } 375 376 int io_prep_read_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) 377 { 378 return __io_prep_rw(req, sqe, ITER_DEST); 379 } 380 381 int io_prep_write_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) 382 { 383 return __io_prep_rw(req, sqe, ITER_SOURCE); 384 } 385 386 /* 387 * Multishot read is prepared just like a normal read/write request, only 388 * difference is that we set the MULTISHOT flag. 389 */ 390 int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 391 { 392 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 393 int ret; 394 395 /* must be used with provided buffers */ 396 if (!(req->flags & REQ_F_BUFFER_SELECT)) 397 return -EINVAL; 398 399 ret = __io_prep_rw(req, sqe, ITER_DEST); 400 if (unlikely(ret)) 401 return ret; 402 403 if (rw->addr || rw->len) 404 return -EINVAL; 405 406 req->flags |= REQ_F_APOLL_MULTISHOT; 407 return 0; 408 } 409 410 void io_readv_writev_cleanup(struct io_kiocb *req) 411 { 412 lockdep_assert_held(&req->ctx->uring_lock); 413 io_rw_recycle(req, 0); 414 } 415 416 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req) 417 { 418 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 419 420 if (rw->kiocb.ki_pos != -1) 421 return &rw->kiocb.ki_pos; 422 423 if (!(req->file->f_mode & FMODE_STREAM)) { 424 req->flags |= REQ_F_CUR_POS; 425 rw->kiocb.ki_pos = req->file->f_pos; 426 return &rw->kiocb.ki_pos; 427 } 428 429 rw->kiocb.ki_pos = 0; 430 return NULL; 431 } 432 433 static bool io_rw_should_reissue(struct io_kiocb *req) 434 { 435 #ifdef CONFIG_BLOCK 436 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 437 umode_t mode = file_inode(req->file)->i_mode; 438 struct io_async_rw *io = req->async_data; 439 struct io_ring_ctx *ctx = req->ctx; 440 441 if (!S_ISBLK(mode) && !S_ISREG(mode)) 442 return false; 443 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() && 444 !(ctx->flags & IORING_SETUP_IOPOLL))) 445 return false; 446 /* 447 * If ref is dying, we might be running poll reap from the exit work. 448 * Don't attempt to reissue from that path, just let it fail with 449 * -EAGAIN. 450 */ 451 if (percpu_ref_is_dying(&ctx->refs)) 452 return false; 453 454 io_meta_restore(io, &rw->kiocb); 455 iov_iter_restore(&io->iter, &io->iter_state); 456 return true; 457 #else 458 return false; 459 #endif 460 } 461 462 static void io_req_end_write(struct io_kiocb *req) 463 { 464 if (req->flags & REQ_F_ISREG) { 465 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 466 467 kiocb_end_write(&rw->kiocb); 468 } 469 } 470 471 /* 472 * Trigger the notifications after having done some IO, and finish the write 473 * accounting, if any. 474 */ 475 static void io_req_io_end(struct io_kiocb *req) 476 { 477 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 478 479 if (rw->kiocb.ki_flags & IOCB_WRITE) { 480 io_req_end_write(req); 481 fsnotify_modify(req->file); 482 } else { 483 fsnotify_access(req->file); 484 } 485 } 486 487 static void __io_complete_rw_common(struct io_kiocb *req, long res) 488 { 489 if (res == req->cqe.res) 490 return; 491 if (res == -EAGAIN && io_rw_should_reissue(req)) { 492 req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE; 493 } else { 494 req_set_fail(req); 495 req->cqe.res = res; 496 } 497 } 498 499 static inline int io_fixup_rw_res(struct io_kiocb *req, long res) 500 { 501 struct io_async_rw *io = req->async_data; 502 503 /* add previously done IO, if any */ 504 if (req_has_async_data(req) && io->bytes_done > 0) { 505 if (res < 0) 506 res = io->bytes_done; 507 else 508 res += io->bytes_done; 509 } 510 return res; 511 } 512 513 void io_req_rw_complete(struct io_kiocb *req, io_tw_token_t tw) 514 { 515 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 516 struct kiocb *kiocb = &rw->kiocb; 517 518 if ((kiocb->ki_flags & IOCB_DIO_CALLER_COMP) && kiocb->dio_complete) { 519 long res = kiocb->dio_complete(rw->kiocb.private); 520 521 io_req_set_res(req, io_fixup_rw_res(req, res), 0); 522 } 523 524 io_req_io_end(req); 525 526 if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) 527 req->cqe.flags |= io_put_kbuf(req, req->cqe.res, 0); 528 529 io_req_rw_cleanup(req, 0); 530 io_req_task_complete(req, tw); 531 } 532 533 static void io_complete_rw(struct kiocb *kiocb, long res) 534 { 535 struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb); 536 struct io_kiocb *req = cmd_to_io_kiocb(rw); 537 538 if (!kiocb->dio_complete || !(kiocb->ki_flags & IOCB_DIO_CALLER_COMP)) { 539 __io_complete_rw_common(req, res); 540 io_req_set_res(req, io_fixup_rw_res(req, res), 0); 541 } 542 req->io_task_work.func = io_req_rw_complete; 543 __io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE); 544 } 545 546 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res) 547 { 548 struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb); 549 struct io_kiocb *req = cmd_to_io_kiocb(rw); 550 551 if (kiocb->ki_flags & IOCB_WRITE) 552 io_req_end_write(req); 553 if (unlikely(res != req->cqe.res)) { 554 if (res == -EAGAIN && io_rw_should_reissue(req)) 555 req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE; 556 else 557 req->cqe.res = res; 558 } 559 560 /* order with io_iopoll_complete() checking ->iopoll_completed */ 561 smp_store_release(&req->iopoll_completed, 1); 562 } 563 564 static inline void io_rw_done(struct io_kiocb *req, ssize_t ret) 565 { 566 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 567 568 /* IO was queued async, completion will happen later */ 569 if (ret == -EIOCBQUEUED) 570 return; 571 572 /* transform internal restart error codes */ 573 if (unlikely(ret < 0)) { 574 switch (ret) { 575 case -ERESTARTSYS: 576 case -ERESTARTNOINTR: 577 case -ERESTARTNOHAND: 578 case -ERESTART_RESTARTBLOCK: 579 /* 580 * We can't just restart the syscall, since previously 581 * submitted sqes may already be in progress. Just fail 582 * this IO with EINTR. 583 */ 584 ret = -EINTR; 585 break; 586 } 587 } 588 589 if (req->ctx->flags & IORING_SETUP_IOPOLL) 590 io_complete_rw_iopoll(&rw->kiocb, ret); 591 else 592 io_complete_rw(&rw->kiocb, ret); 593 } 594 595 static int kiocb_done(struct io_kiocb *req, ssize_t ret, 596 unsigned int issue_flags) 597 { 598 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 599 unsigned final_ret = io_fixup_rw_res(req, ret); 600 601 if (ret >= 0 && req->flags & REQ_F_CUR_POS) 602 req->file->f_pos = rw->kiocb.ki_pos; 603 if (ret >= 0 && !(req->ctx->flags & IORING_SETUP_IOPOLL)) { 604 __io_complete_rw_common(req, ret); 605 /* 606 * Safe to call io_end from here as we're inline 607 * from the submission path. 608 */ 609 io_req_io_end(req); 610 io_req_set_res(req, final_ret, io_put_kbuf(req, ret, issue_flags)); 611 io_req_rw_cleanup(req, issue_flags); 612 return IOU_OK; 613 } else { 614 io_rw_done(req, ret); 615 } 616 617 return IOU_ISSUE_SKIP_COMPLETE; 618 } 619 620 static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb) 621 { 622 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos; 623 } 624 625 /* 626 * For files that don't have ->read_iter() and ->write_iter(), handle them 627 * by looping over ->read() or ->write() manually. 628 */ 629 static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter) 630 { 631 struct io_kiocb *req = cmd_to_io_kiocb(rw); 632 struct kiocb *kiocb = &rw->kiocb; 633 struct file *file = kiocb->ki_filp; 634 ssize_t ret = 0; 635 loff_t *ppos; 636 637 /* 638 * Don't support polled IO through this interface, and we can't 639 * support non-blocking either. For the latter, this just causes 640 * the kiocb to be handled from an async context. 641 */ 642 if (kiocb->ki_flags & IOCB_HIPRI) 643 return -EOPNOTSUPP; 644 if ((kiocb->ki_flags & IOCB_NOWAIT) && 645 !(kiocb->ki_filp->f_flags & O_NONBLOCK)) 646 return -EAGAIN; 647 if ((req->flags & REQ_F_BUF_NODE) && req->buf_node->buf->is_kbuf) 648 return -EFAULT; 649 650 ppos = io_kiocb_ppos(kiocb); 651 652 while (iov_iter_count(iter)) { 653 void __user *addr; 654 size_t len; 655 ssize_t nr; 656 657 if (iter_is_ubuf(iter)) { 658 addr = iter->ubuf + iter->iov_offset; 659 len = iov_iter_count(iter); 660 } else if (!iov_iter_is_bvec(iter)) { 661 addr = iter_iov_addr(iter); 662 len = iter_iov_len(iter); 663 } else { 664 addr = u64_to_user_ptr(rw->addr); 665 len = rw->len; 666 } 667 668 if (ddir == READ) 669 nr = file->f_op->read(file, addr, len, ppos); 670 else 671 nr = file->f_op->write(file, addr, len, ppos); 672 673 if (nr < 0) { 674 if (!ret) 675 ret = nr; 676 break; 677 } 678 ret += nr; 679 if (!iov_iter_is_bvec(iter)) { 680 iov_iter_advance(iter, nr); 681 } else { 682 rw->addr += nr; 683 rw->len -= nr; 684 if (!rw->len) 685 break; 686 } 687 if (nr != len) 688 break; 689 } 690 691 return ret; 692 } 693 694 /* 695 * This is our waitqueue callback handler, registered through __folio_lock_async() 696 * when we initially tried to do the IO with the iocb armed our waitqueue. 697 * This gets called when the page is unlocked, and we generally expect that to 698 * happen when the page IO is completed and the page is now uptodate. This will 699 * queue a task_work based retry of the operation, attempting to copy the data 700 * again. If the latter fails because the page was NOT uptodate, then we will 701 * do a thread based blocking retry of the operation. That's the unexpected 702 * slow path. 703 */ 704 static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode, 705 int sync, void *arg) 706 { 707 struct wait_page_queue *wpq; 708 struct io_kiocb *req = wait->private; 709 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 710 struct wait_page_key *key = arg; 711 712 wpq = container_of(wait, struct wait_page_queue, wait); 713 714 if (!wake_page_match(wpq, key)) 715 return 0; 716 717 rw->kiocb.ki_flags &= ~IOCB_WAITQ; 718 list_del_init(&wait->entry); 719 io_req_task_queue(req); 720 return 1; 721 } 722 723 /* 724 * This controls whether a given IO request should be armed for async page 725 * based retry. If we return false here, the request is handed to the async 726 * worker threads for retry. If we're doing buffered reads on a regular file, 727 * we prepare a private wait_page_queue entry and retry the operation. This 728 * will either succeed because the page is now uptodate and unlocked, or it 729 * will register a callback when the page is unlocked at IO completion. Through 730 * that callback, io_uring uses task_work to setup a retry of the operation. 731 * That retry will attempt the buffered read again. The retry will generally 732 * succeed, or in rare cases where it fails, we then fall back to using the 733 * async worker threads for a blocking retry. 734 */ 735 static bool io_rw_should_retry(struct io_kiocb *req) 736 { 737 struct io_async_rw *io = req->async_data; 738 struct wait_page_queue *wait = &io->wpq; 739 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 740 struct kiocb *kiocb = &rw->kiocb; 741 742 /* 743 * Never retry for NOWAIT or a request with metadata, we just complete 744 * with -EAGAIN. 745 */ 746 if (req->flags & (REQ_F_NOWAIT | REQ_F_HAS_METADATA)) 747 return false; 748 749 /* Only for buffered IO */ 750 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI)) 751 return false; 752 753 /* 754 * just use poll if we can, and don't attempt if the fs doesn't 755 * support callback based unlocks 756 */ 757 if (io_file_can_poll(req) || 758 !(req->file->f_op->fop_flags & FOP_BUFFER_RASYNC)) 759 return false; 760 761 wait->wait.func = io_async_buf_func; 762 wait->wait.private = req; 763 wait->wait.flags = 0; 764 INIT_LIST_HEAD(&wait->wait.entry); 765 kiocb->ki_flags |= IOCB_WAITQ; 766 kiocb->ki_flags &= ~IOCB_NOWAIT; 767 kiocb->ki_waitq = wait; 768 return true; 769 } 770 771 static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter) 772 { 773 struct file *file = rw->kiocb.ki_filp; 774 775 if (likely(file->f_op->read_iter)) 776 return file->f_op->read_iter(&rw->kiocb, iter); 777 else if (file->f_op->read) 778 return loop_rw_iter(READ, rw, iter); 779 else 780 return -EINVAL; 781 } 782 783 static bool need_complete_io(struct io_kiocb *req) 784 { 785 return req->flags & REQ_F_ISREG || 786 S_ISBLK(file_inode(req->file)->i_mode); 787 } 788 789 static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type) 790 { 791 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 792 struct kiocb *kiocb = &rw->kiocb; 793 struct io_ring_ctx *ctx = req->ctx; 794 struct file *file = req->file; 795 int ret; 796 797 if (unlikely(!(file->f_mode & mode))) 798 return -EBADF; 799 800 if (!(req->flags & REQ_F_FIXED_FILE)) 801 req->flags |= io_file_get_flags(file); 802 803 kiocb->ki_flags = file->f_iocb_flags; 804 ret = kiocb_set_rw_flags(kiocb, rw->flags, rw_type); 805 if (unlikely(ret)) 806 return ret; 807 kiocb->ki_flags |= IOCB_ALLOC_CACHE; 808 809 /* 810 * If the file is marked O_NONBLOCK, still allow retry for it if it 811 * supports async. Otherwise it's impossible to use O_NONBLOCK files 812 * reliably. If not, or it IOCB_NOWAIT is set, don't retry. 813 */ 814 if (kiocb->ki_flags & IOCB_NOWAIT || 815 ((file->f_flags & O_NONBLOCK && !(req->flags & REQ_F_SUPPORT_NOWAIT)))) 816 req->flags |= REQ_F_NOWAIT; 817 818 if (ctx->flags & IORING_SETUP_IOPOLL) { 819 if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll) 820 return -EOPNOTSUPP; 821 kiocb->private = NULL; 822 kiocb->ki_flags |= IOCB_HIPRI; 823 req->iopoll_completed = 0; 824 if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL) { 825 /* make sure every req only blocks once*/ 826 req->flags &= ~REQ_F_IOPOLL_STATE; 827 req->iopoll_start = ktime_get_ns(); 828 } 829 } else { 830 if (kiocb->ki_flags & IOCB_HIPRI) 831 return -EINVAL; 832 } 833 834 if (req->flags & REQ_F_HAS_METADATA) { 835 struct io_async_rw *io = req->async_data; 836 837 /* 838 * We have a union of meta fields with wpq used for buffered-io 839 * in io_async_rw, so fail it here. 840 */ 841 if (!(req->file->f_flags & O_DIRECT)) 842 return -EOPNOTSUPP; 843 kiocb->ki_flags |= IOCB_HAS_METADATA; 844 kiocb->private = &io->meta; 845 } 846 847 return 0; 848 } 849 850 static int __io_read(struct io_kiocb *req, unsigned int issue_flags) 851 { 852 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 853 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 854 struct io_async_rw *io = req->async_data; 855 struct kiocb *kiocb = &rw->kiocb; 856 ssize_t ret; 857 loff_t *ppos; 858 859 if (io_do_buffer_select(req)) { 860 ret = io_import_rw_buffer(ITER_DEST, req, io, issue_flags); 861 if (unlikely(ret < 0)) 862 return ret; 863 } 864 ret = io_rw_init_file(req, FMODE_READ, READ); 865 if (unlikely(ret)) 866 return ret; 867 req->cqe.res = iov_iter_count(&io->iter); 868 869 if (force_nonblock) { 870 /* If the file doesn't support async, just async punt */ 871 if (unlikely(!io_file_supports_nowait(req, EPOLLIN))) 872 return -EAGAIN; 873 kiocb->ki_flags |= IOCB_NOWAIT; 874 } else { 875 /* Ensure we clear previously set non-block flag */ 876 kiocb->ki_flags &= ~IOCB_NOWAIT; 877 } 878 879 ppos = io_kiocb_update_pos(req); 880 881 ret = rw_verify_area(READ, req->file, ppos, req->cqe.res); 882 if (unlikely(ret)) 883 return ret; 884 885 ret = io_iter_do_read(rw, &io->iter); 886 887 /* 888 * Some file systems like to return -EOPNOTSUPP for an IOCB_NOWAIT 889 * issue, even though they should be returning -EAGAIN. To be safe, 890 * retry from blocking context for either. 891 */ 892 if (ret == -EOPNOTSUPP && force_nonblock) 893 ret = -EAGAIN; 894 895 if (ret == -EAGAIN) { 896 /* If we can poll, just do that. */ 897 if (io_file_can_poll(req)) 898 return -EAGAIN; 899 /* IOPOLL retry should happen for io-wq threads */ 900 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) 901 goto done; 902 /* no retry on NONBLOCK nor RWF_NOWAIT */ 903 if (req->flags & REQ_F_NOWAIT) 904 goto done; 905 ret = 0; 906 } else if (ret == -EIOCBQUEUED) { 907 return IOU_ISSUE_SKIP_COMPLETE; 908 } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock || 909 (req->flags & REQ_F_NOWAIT) || !need_complete_io(req) || 910 (issue_flags & IO_URING_F_MULTISHOT)) { 911 /* read all, failed, already did sync or don't want to retry */ 912 goto done; 913 } 914 915 /* 916 * Don't depend on the iter state matching what was consumed, or being 917 * untouched in case of error. Restore it and we'll advance it 918 * manually if we need to. 919 */ 920 iov_iter_restore(&io->iter, &io->iter_state); 921 io_meta_restore(io, kiocb); 922 923 do { 924 /* 925 * We end up here because of a partial read, either from 926 * above or inside this loop. Advance the iter by the bytes 927 * that were consumed. 928 */ 929 iov_iter_advance(&io->iter, ret); 930 if (!iov_iter_count(&io->iter)) 931 break; 932 io->bytes_done += ret; 933 iov_iter_save_state(&io->iter, &io->iter_state); 934 935 /* if we can retry, do so with the callbacks armed */ 936 if (!io_rw_should_retry(req)) { 937 kiocb->ki_flags &= ~IOCB_WAITQ; 938 return -EAGAIN; 939 } 940 941 req->cqe.res = iov_iter_count(&io->iter); 942 /* 943 * Now retry read with the IOCB_WAITQ parts set in the iocb. If 944 * we get -EIOCBQUEUED, then we'll get a notification when the 945 * desired page gets unlocked. We can also get a partial read 946 * here, and if we do, then just retry at the new offset. 947 */ 948 ret = io_iter_do_read(rw, &io->iter); 949 if (ret == -EIOCBQUEUED) 950 return IOU_ISSUE_SKIP_COMPLETE; 951 /* we got some bytes, but not all. retry. */ 952 kiocb->ki_flags &= ~IOCB_WAITQ; 953 iov_iter_restore(&io->iter, &io->iter_state); 954 } while (ret > 0); 955 done: 956 /* it's faster to check here then delegate to kfree */ 957 return ret; 958 } 959 960 int io_read(struct io_kiocb *req, unsigned int issue_flags) 961 { 962 int ret; 963 964 ret = __io_read(req, issue_flags); 965 if (ret >= 0) 966 return kiocb_done(req, ret, issue_flags); 967 968 return ret; 969 } 970 971 int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags) 972 { 973 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 974 unsigned int cflags = 0; 975 int ret; 976 977 /* 978 * Multishot MUST be used on a pollable file 979 */ 980 if (!io_file_can_poll(req)) 981 return -EBADFD; 982 983 /* make it sync, multishot doesn't support async execution */ 984 rw->kiocb.ki_complete = NULL; 985 ret = __io_read(req, issue_flags); 986 987 /* 988 * If we get -EAGAIN, recycle our buffer and just let normal poll 989 * handling arm it. 990 */ 991 if (ret == -EAGAIN) { 992 /* 993 * Reset rw->len to 0 again to avoid clamping future mshot 994 * reads, in case the buffer size varies. 995 */ 996 if (io_kbuf_recycle(req, issue_flags)) 997 rw->len = 0; 998 if (issue_flags & IO_URING_F_MULTISHOT) 999 return IOU_ISSUE_SKIP_COMPLETE; 1000 return -EAGAIN; 1001 } else if (ret <= 0) { 1002 io_kbuf_recycle(req, issue_flags); 1003 if (ret < 0) 1004 req_set_fail(req); 1005 } else if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { 1006 cflags = io_put_kbuf(req, ret, issue_flags); 1007 } else { 1008 /* 1009 * Any successful return value will keep the multishot read 1010 * armed, if it's still set. Put our buffer and post a CQE. If 1011 * we fail to post a CQE, or multishot is no longer set, then 1012 * jump to the termination path. This request is then done. 1013 */ 1014 cflags = io_put_kbuf(req, ret, issue_flags); 1015 rw->len = 0; /* similarly to above, reset len to 0 */ 1016 1017 if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) { 1018 if (issue_flags & IO_URING_F_MULTISHOT) { 1019 /* 1020 * Force retry, as we might have more data to 1021 * be read and otherwise it won't get retried 1022 * until (if ever) another poll is triggered. 1023 */ 1024 io_poll_multishot_retry(req); 1025 return IOU_ISSUE_SKIP_COMPLETE; 1026 } 1027 return -EAGAIN; 1028 } 1029 } 1030 1031 /* 1032 * Either an error, or we've hit overflow posting the CQE. For any 1033 * multishot request, hitting overflow will terminate it. 1034 */ 1035 io_req_set_res(req, ret, cflags); 1036 io_req_rw_cleanup(req, issue_flags); 1037 if (issue_flags & IO_URING_F_MULTISHOT) 1038 return IOU_STOP_MULTISHOT; 1039 return IOU_OK; 1040 } 1041 1042 static bool io_kiocb_start_write(struct io_kiocb *req, struct kiocb *kiocb) 1043 { 1044 struct inode *inode; 1045 bool ret; 1046 1047 if (!(req->flags & REQ_F_ISREG)) 1048 return true; 1049 if (!(kiocb->ki_flags & IOCB_NOWAIT)) { 1050 kiocb_start_write(kiocb); 1051 return true; 1052 } 1053 1054 inode = file_inode(kiocb->ki_filp); 1055 ret = sb_start_write_trylock(inode->i_sb); 1056 if (ret) 1057 __sb_writers_release(inode->i_sb, SB_FREEZE_WRITE); 1058 return ret; 1059 } 1060 1061 int io_write(struct io_kiocb *req, unsigned int issue_flags) 1062 { 1063 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 1064 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 1065 struct io_async_rw *io = req->async_data; 1066 struct kiocb *kiocb = &rw->kiocb; 1067 ssize_t ret, ret2; 1068 loff_t *ppos; 1069 1070 ret = io_rw_init_file(req, FMODE_WRITE, WRITE); 1071 if (unlikely(ret)) 1072 return ret; 1073 req->cqe.res = iov_iter_count(&io->iter); 1074 1075 if (force_nonblock) { 1076 /* If the file doesn't support async, just async punt */ 1077 if (unlikely(!io_file_supports_nowait(req, EPOLLOUT))) 1078 goto ret_eagain; 1079 1080 /* Check if we can support NOWAIT. */ 1081 if (!(kiocb->ki_flags & IOCB_DIRECT) && 1082 !(req->file->f_op->fop_flags & FOP_BUFFER_WASYNC) && 1083 (req->flags & REQ_F_ISREG)) 1084 goto ret_eagain; 1085 1086 kiocb->ki_flags |= IOCB_NOWAIT; 1087 } else { 1088 /* Ensure we clear previously set non-block flag */ 1089 kiocb->ki_flags &= ~IOCB_NOWAIT; 1090 } 1091 1092 ppos = io_kiocb_update_pos(req); 1093 1094 ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res); 1095 if (unlikely(ret)) 1096 return ret; 1097 1098 if (unlikely(!io_kiocb_start_write(req, kiocb))) 1099 return -EAGAIN; 1100 kiocb->ki_flags |= IOCB_WRITE; 1101 1102 if (likely(req->file->f_op->write_iter)) 1103 ret2 = req->file->f_op->write_iter(kiocb, &io->iter); 1104 else if (req->file->f_op->write) 1105 ret2 = loop_rw_iter(WRITE, rw, &io->iter); 1106 else 1107 ret2 = -EINVAL; 1108 1109 /* 1110 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just 1111 * retry them without IOCB_NOWAIT. 1112 */ 1113 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT)) 1114 ret2 = -EAGAIN; 1115 /* no retry on NONBLOCK nor RWF_NOWAIT */ 1116 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT)) 1117 goto done; 1118 if (!force_nonblock || ret2 != -EAGAIN) { 1119 /* IOPOLL retry should happen for io-wq threads */ 1120 if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL)) 1121 goto ret_eagain; 1122 1123 if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) { 1124 trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2, 1125 req->cqe.res, ret2); 1126 1127 /* This is a partial write. The file pos has already been 1128 * updated, setup the async struct to complete the request 1129 * in the worker. Also update bytes_done to account for 1130 * the bytes already written. 1131 */ 1132 iov_iter_save_state(&io->iter, &io->iter_state); 1133 io->bytes_done += ret2; 1134 1135 if (kiocb->ki_flags & IOCB_WRITE) 1136 io_req_end_write(req); 1137 return -EAGAIN; 1138 } 1139 done: 1140 return kiocb_done(req, ret2, issue_flags); 1141 } else { 1142 ret_eagain: 1143 iov_iter_restore(&io->iter, &io->iter_state); 1144 io_meta_restore(io, kiocb); 1145 if (kiocb->ki_flags & IOCB_WRITE) 1146 io_req_end_write(req); 1147 return -EAGAIN; 1148 } 1149 } 1150 1151 int io_read_fixed(struct io_kiocb *req, unsigned int issue_flags) 1152 { 1153 int ret; 1154 1155 ret = io_init_rw_fixed(req, issue_flags, ITER_DEST); 1156 if (unlikely(ret)) 1157 return ret; 1158 1159 return io_read(req, issue_flags); 1160 } 1161 1162 int io_write_fixed(struct io_kiocb *req, unsigned int issue_flags) 1163 { 1164 int ret; 1165 1166 ret = io_init_rw_fixed(req, issue_flags, ITER_SOURCE); 1167 if (unlikely(ret)) 1168 return ret; 1169 1170 return io_write(req, issue_flags); 1171 } 1172 1173 void io_rw_fail(struct io_kiocb *req) 1174 { 1175 int res; 1176 1177 res = io_fixup_rw_res(req, req->cqe.res); 1178 io_req_set_res(req, res, req->cqe.flags); 1179 } 1180 1181 static int io_uring_classic_poll(struct io_kiocb *req, struct io_comp_batch *iob, 1182 unsigned int poll_flags) 1183 { 1184 struct file *file = req->file; 1185 1186 if (req->opcode == IORING_OP_URING_CMD) { 1187 struct io_uring_cmd *ioucmd; 1188 1189 ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); 1190 return file->f_op->uring_cmd_iopoll(ioucmd, iob, poll_flags); 1191 } else { 1192 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 1193 1194 return file->f_op->iopoll(&rw->kiocb, iob, poll_flags); 1195 } 1196 } 1197 1198 static u64 io_hybrid_iopoll_delay(struct io_ring_ctx *ctx, struct io_kiocb *req) 1199 { 1200 struct hrtimer_sleeper timer; 1201 enum hrtimer_mode mode; 1202 ktime_t kt; 1203 u64 sleep_time; 1204 1205 if (req->flags & REQ_F_IOPOLL_STATE) 1206 return 0; 1207 1208 if (ctx->hybrid_poll_time == LLONG_MAX) 1209 return 0; 1210 1211 /* Using half the running time to do schedule */ 1212 sleep_time = ctx->hybrid_poll_time / 2; 1213 1214 kt = ktime_set(0, sleep_time); 1215 req->flags |= REQ_F_IOPOLL_STATE; 1216 1217 mode = HRTIMER_MODE_REL; 1218 hrtimer_setup_sleeper_on_stack(&timer, CLOCK_MONOTONIC, mode); 1219 hrtimer_set_expires(&timer.timer, kt); 1220 set_current_state(TASK_INTERRUPTIBLE); 1221 hrtimer_sleeper_start_expires(&timer, mode); 1222 1223 if (timer.task) 1224 io_schedule(); 1225 1226 hrtimer_cancel(&timer.timer); 1227 __set_current_state(TASK_RUNNING); 1228 destroy_hrtimer_on_stack(&timer.timer); 1229 return sleep_time; 1230 } 1231 1232 static int io_uring_hybrid_poll(struct io_kiocb *req, 1233 struct io_comp_batch *iob, unsigned int poll_flags) 1234 { 1235 struct io_ring_ctx *ctx = req->ctx; 1236 u64 runtime, sleep_time; 1237 int ret; 1238 1239 sleep_time = io_hybrid_iopoll_delay(ctx, req); 1240 ret = io_uring_classic_poll(req, iob, poll_flags); 1241 runtime = ktime_get_ns() - req->iopoll_start - sleep_time; 1242 1243 /* 1244 * Use minimum sleep time if we're polling devices with different 1245 * latencies. We could get more completions from the faster ones. 1246 */ 1247 if (ctx->hybrid_poll_time > runtime) 1248 ctx->hybrid_poll_time = runtime; 1249 1250 return ret; 1251 } 1252 1253 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) 1254 { 1255 struct io_wq_work_node *pos, *start, *prev; 1256 unsigned int poll_flags = 0; 1257 DEFINE_IO_COMP_BATCH(iob); 1258 int nr_events = 0; 1259 1260 /* 1261 * Only spin for completions if we don't have multiple devices hanging 1262 * off our complete list. 1263 */ 1264 if (ctx->poll_multi_queue || force_nonspin) 1265 poll_flags |= BLK_POLL_ONESHOT; 1266 1267 wq_list_for_each(pos, start, &ctx->iopoll_list) { 1268 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); 1269 int ret; 1270 1271 /* 1272 * Move completed and retryable entries to our local lists. 1273 * If we find a request that requires polling, break out 1274 * and complete those lists first, if we have entries there. 1275 */ 1276 if (READ_ONCE(req->iopoll_completed)) 1277 break; 1278 1279 if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL) 1280 ret = io_uring_hybrid_poll(req, &iob, poll_flags); 1281 else 1282 ret = io_uring_classic_poll(req, &iob, poll_flags); 1283 1284 if (unlikely(ret < 0)) 1285 return ret; 1286 else if (ret) 1287 poll_flags |= BLK_POLL_ONESHOT; 1288 1289 /* iopoll may have completed current req */ 1290 if (!rq_list_empty(&iob.req_list) || 1291 READ_ONCE(req->iopoll_completed)) 1292 break; 1293 } 1294 1295 if (!rq_list_empty(&iob.req_list)) 1296 iob.complete(&iob); 1297 else if (!pos) 1298 return 0; 1299 1300 prev = start; 1301 wq_list_for_each_resume(pos, prev) { 1302 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); 1303 1304 /* order with io_complete_rw_iopoll(), e.g. ->result updates */ 1305 if (!smp_load_acquire(&req->iopoll_completed)) 1306 break; 1307 nr_events++; 1308 req->cqe.flags = io_put_kbuf(req, req->cqe.res, 0); 1309 if (req->opcode != IORING_OP_URING_CMD) 1310 io_req_rw_cleanup(req, 0); 1311 } 1312 if (unlikely(!nr_events)) 1313 return 0; 1314 1315 pos = start ? start->next : ctx->iopoll_list.first; 1316 wq_list_cut(&ctx->iopoll_list, prev, start); 1317 1318 if (WARN_ON_ONCE(!wq_list_empty(&ctx->submit_state.compl_reqs))) 1319 return 0; 1320 ctx->submit_state.compl_reqs.first = pos; 1321 __io_submit_flush_completions(ctx); 1322 return nr_events; 1323 } 1324 1325 void io_rw_cache_free(const void *entry) 1326 { 1327 struct io_async_rw *rw = (struct io_async_rw *) entry; 1328 1329 if (rw->free_iovec) 1330 kfree(rw->free_iovec); 1331 kfree(rw); 1332 } 1333