Lines Matching full:imu

115 	struct io_mapped_ubuf *imu = *slot;  in io_buffer_unmap()  local
119 if (imu != &dummy_ubuf) { in io_buffer_unmap()
120 if (!refcount_dec_and_test(&imu->refs)) in io_buffer_unmap()
122 for (i = 0; i < imu->nr_bvecs; i++) in io_buffer_unmap()
123 unpin_user_page(imu->bvec[i].bv_page); in io_buffer_unmap()
124 if (imu->acct_pages) in io_buffer_unmap()
125 io_unaccount_mem(ctx, imu->acct_pages); in io_buffer_unmap()
126 kvfree(imu); in io_buffer_unmap()
414 struct io_mapped_ubuf *imu; in __io_sqe_buffers_update() local
434 err = io_sqe_buffer_register(ctx, iov, &imu, &last_hpage); in __io_sqe_buffers_update()
443 io_buffer_unmap(ctx, &imu); in __io_sqe_buffers_update()
449 ctx->user_bufs[i] = imu; in __io_sqe_buffers_update()
816 struct io_mapped_ubuf *imu = ctx->user_bufs[i]; in headpage_already_acct() local
818 for (j = 0; j < imu->nr_bvecs; j++) { in headpage_already_acct()
819 if (!PageCompound(imu->bvec[j].bv_page)) in headpage_already_acct()
821 if (compound_head(imu->bvec[j].bv_page) == hpage) in headpage_already_acct()
830 int nr_pages, struct io_mapped_ubuf *imu, in io_buffer_account_pin() argument
835 imu->acct_pages = 0; in io_buffer_account_pin()
838 imu->acct_pages++; in io_buffer_account_pin()
848 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT; in io_buffer_account_pin()
852 if (!imu->acct_pages) in io_buffer_account_pin()
855 ret = io_account_mem(ctx, imu->acct_pages); in io_buffer_account_pin()
857 imu->acct_pages = 0; in io_buffer_account_pin()
957 struct io_mapped_ubuf *imu = NULL; in io_sqe_buffer_register() local
981 imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL); in io_sqe_buffer_register()
982 if (!imu) in io_sqe_buffer_register()
985 ret = io_buffer_account_pin(ctx, pages, nr_pages, imu, last_hpage); in io_sqe_buffer_register()
993 imu->ubuf = (unsigned long) iov->iov_base; in io_sqe_buffer_register()
994 imu->len = iov->iov_len; in io_sqe_buffer_register()
995 imu->nr_bvecs = nr_pages; in io_sqe_buffer_register()
996 imu->folio_shift = PAGE_SHIFT; in io_sqe_buffer_register()
998 imu->folio_shift = data.folio_shift; in io_sqe_buffer_register()
999 refcount_set(&imu->refs, 1); in io_sqe_buffer_register()
1000 off = (unsigned long) iov->iov_base & ((1UL << imu->folio_shift) - 1); in io_sqe_buffer_register()
1001 *pimu = imu; in io_sqe_buffer_register()
1007 vec_len = min_t(size_t, size, (1UL << imu->folio_shift) - off); in io_sqe_buffer_register()
1008 bvec_set_page(&imu->bvec[i], pages[i], vec_len, off); in io_sqe_buffer_register()
1014 kvfree(imu); in io_sqe_buffer_register()
1089 struct io_mapped_ubuf *imu, in io_import_fixed() argument
1095 if (WARN_ON_ONCE(!imu)) in io_import_fixed()
1100 if (unlikely(buf_addr < imu->ubuf || buf_end > (imu->ubuf + imu->len))) in io_import_fixed()
1107 offset = buf_addr - imu->ubuf; in io_import_fixed()
1108 iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len); in io_import_fixed()
1127 const struct bio_vec *bvec = imu->bvec; in io_import_fixed()
1138 seg_skip = 1 + (offset >> imu->folio_shift); in io_import_fixed()
1143 iter->iov_offset = offset & ((1UL << imu->folio_shift) - 1); in io_import_fixed()