1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <rdma/ib_umem_odp.h> 34 #include <linux/kernel.h> 35 #include <linux/dma-buf.h> 36 #include <linux/dma-resv.h> 37 #include <linux/hmm.h> 38 #include <linux/hmm-dma.h> 39 #include <linux/pci-p2pdma.h> 40 41 #include "mlx5_ib.h" 42 #include "cmd.h" 43 #include "umr.h" 44 #include "qp.h" 45 46 #include <linux/mlx5/eq.h> 47 48 /* Contains the details of a pagefault. */ 49 struct mlx5_pagefault { 50 u32 bytes_committed; 51 u64 token; 52 u8 event_subtype; 53 u8 type; 54 union { 55 /* Initiator or send message responder pagefault details. */ 56 struct { 57 /* Received packet size, only valid for responders. */ 58 u32 packet_size; 59 /* 60 * Number of resource holding WQE, depends on type. 61 */ 62 u32 wq_num; 63 /* 64 * WQE index. Refers to either the send queue or 65 * receive queue, according to event_subtype. 66 */ 67 u16 wqe_index; 68 } wqe; 69 /* RDMA responder pagefault details */ 70 struct { 71 u32 r_key; 72 /* 73 * Received packet size, minimal size page fault 74 * resolution required for forward progress. 75 */ 76 u32 packet_size; 77 u32 rdma_op_len; 78 u64 rdma_va; 79 } rdma; 80 struct { 81 u64 va; 82 u32 mkey; 83 u32 fault_byte_count; 84 u32 prefetch_before_byte_count; 85 u32 prefetch_after_byte_count; 86 u8 flags; 87 } memory; 88 }; 89 90 struct mlx5_ib_pf_eq *eq; 91 struct work_struct work; 92 }; 93 94 #define MAX_PREFETCH_LEN (4*1024*1024U) 95 96 /* Timeout in ms to wait for an active mmu notifier to complete when handling 97 * a pagefault. */ 98 #define MMU_NOTIFIER_TIMEOUT 1000 99 100 #define MLX5_IMR_MTT_BITS (30 - PAGE_SHIFT) 101 #define MLX5_IMR_MTT_SHIFT (MLX5_IMR_MTT_BITS + PAGE_SHIFT) 102 #define MLX5_IMR_MTT_ENTRIES BIT_ULL(MLX5_IMR_MTT_BITS) 103 #define MLX5_IMR_MTT_SIZE BIT_ULL(MLX5_IMR_MTT_SHIFT) 104 #define MLX5_IMR_MTT_MASK (~(MLX5_IMR_MTT_SIZE - 1)) 105 106 #define MLX5_KSM_PAGE_SHIFT MLX5_IMR_MTT_SHIFT 107 108 static u64 mlx5_imr_ksm_entries; 109 110 static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries, 111 struct mlx5_ib_mr *imr, int flags) 112 { 113 struct mlx5_core_dev *dev = mr_to_mdev(imr)->mdev; 114 struct mlx5_klm *end = pklm + nentries; 115 int step = MLX5_CAP_ODP(dev, mem_page_fault) ? MLX5_IMR_MTT_SIZE : 0; 116 __be32 key = MLX5_CAP_ODP(dev, mem_page_fault) ? 117 cpu_to_be32(imr->null_mmkey.key) : 118 mr_to_mdev(imr)->mkeys.null_mkey; 119 u64 va = 120 MLX5_CAP_ODP(dev, mem_page_fault) ? idx * MLX5_IMR_MTT_SIZE : 0; 121 122 if (flags & MLX5_IB_UPD_XLT_ZAP) { 123 for (; pklm != end; pklm++, idx++, va += step) { 124 pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE); 125 pklm->key = key; 126 pklm->va = cpu_to_be64(va); 127 } 128 return; 129 } 130 131 /* 132 * The locking here is pretty subtle. Ideally the implicit_children 133 * xarray would be protected by the umem_mutex, however that is not 134 * possible. Instead this uses a weaker update-then-lock pattern: 135 * 136 * xa_store() 137 * mutex_lock(umem_mutex) 138 * mlx5r_umr_update_xlt() 139 * mutex_unlock(umem_mutex) 140 * destroy lkey 141 * 142 * ie any change the xarray must be followed by the locked update_xlt 143 * before destroying. 144 * 145 * The umem_mutex provides the acquire/release semantic needed to make 146 * the xa_store() visible to a racing thread. 147 */ 148 lockdep_assert_held(&to_ib_umem_odp(imr->umem)->umem_mutex); 149 150 for (; pklm != end; pklm++, idx++, va += step) { 151 struct mlx5_ib_mr *mtt = xa_load(&imr->implicit_children, idx); 152 153 pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE); 154 if (mtt) { 155 pklm->key = cpu_to_be32(mtt->ibmr.lkey); 156 pklm->va = cpu_to_be64(idx * MLX5_IMR_MTT_SIZE); 157 } else { 158 pklm->key = key; 159 pklm->va = cpu_to_be64(va); 160 } 161 } 162 } 163 164 static int populate_mtt(__be64 *pas, size_t start, size_t nentries, 165 struct mlx5_ib_mr *mr, int flags) 166 { 167 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); 168 bool downgrade = flags & MLX5_IB_UPD_XLT_DOWNGRADE; 169 struct pci_p2pdma_map_state p2pdma_state = {}; 170 struct ib_device *dev = odp->umem.ibdev; 171 size_t i; 172 173 if (flags & MLX5_IB_UPD_XLT_ZAP) 174 return 0; 175 176 for (i = 0; i < nentries; i++) { 177 unsigned long pfn = odp->map.pfn_list[start + i]; 178 dma_addr_t dma_addr; 179 180 pfn = odp->map.pfn_list[start + i]; 181 if (!(pfn & HMM_PFN_VALID)) 182 /* ODP initialization */ 183 continue; 184 185 dma_addr = hmm_dma_map_pfn(dev->dma_device, &odp->map, 186 start + i, &p2pdma_state); 187 if (ib_dma_mapping_error(dev, dma_addr)) 188 return -EFAULT; 189 190 dma_addr |= MLX5_IB_MTT_READ; 191 if ((pfn & HMM_PFN_WRITE) && !downgrade) 192 dma_addr |= MLX5_IB_MTT_WRITE; 193 194 pas[i] = cpu_to_be64(dma_addr); 195 odp->npages++; 196 } 197 return 0; 198 } 199 200 int mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries, 201 struct mlx5_ib_mr *mr, int flags) 202 { 203 if (flags & MLX5_IB_UPD_XLT_INDIRECT) { 204 populate_klm(xlt, idx, nentries, mr, flags); 205 return 0; 206 } else { 207 return populate_mtt(xlt, idx, nentries, mr, flags); 208 } 209 } 210 211 /* 212 * This must be called after the mr has been removed from implicit_children. 213 * NOTE: The MR does not necessarily have to be 214 * empty here, parallel page faults could have raced with the free process and 215 * added pages to it. 216 */ 217 static void free_implicit_child_mr_work(struct work_struct *work) 218 { 219 struct mlx5_ib_mr *mr = 220 container_of(work, struct mlx5_ib_mr, odp_destroy.work); 221 struct mlx5_ib_mr *imr = mr->parent; 222 struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem); 223 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); 224 225 mlx5r_deref_wait_odp_mkey(&mr->mmkey); 226 227 mutex_lock(&odp_imr->umem_mutex); 228 mlx5r_umr_update_xlt(mr->parent, 229 ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT, 1, 0, 230 MLX5_IB_UPD_XLT_INDIRECT | MLX5_IB_UPD_XLT_ATOMIC); 231 mutex_unlock(&odp_imr->umem_mutex); 232 mlx5_ib_dereg_mr(&mr->ibmr, NULL); 233 234 mlx5r_deref_odp_mkey(&imr->mmkey); 235 } 236 237 static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr) 238 { 239 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); 240 unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT; 241 struct mlx5_ib_mr *imr = mr->parent; 242 243 /* 244 * If userspace is racing freeing the parent implicit ODP MR then we can 245 * loose the race with parent destruction. In this case 246 * mlx5_ib_free_odp_mr() will free everything in the implicit_children 247 * xarray so NOP is fine. This child MR cannot be destroyed here because 248 * we are under its umem_mutex. 249 */ 250 if (!refcount_inc_not_zero(&imr->mmkey.usecount)) 251 return; 252 253 xa_lock(&imr->implicit_children); 254 if (__xa_cmpxchg(&imr->implicit_children, idx, mr, NULL, GFP_KERNEL) != 255 mr) { 256 xa_unlock(&imr->implicit_children); 257 mlx5r_deref_odp_mkey(&imr->mmkey); 258 return; 259 } 260 261 if (MLX5_CAP_ODP(mr_to_mdev(mr)->mdev, mem_page_fault)) 262 xa_erase(&mr_to_mdev(mr)->odp_mkeys, 263 mlx5_base_mkey(mr->mmkey.key)); 264 xa_unlock(&imr->implicit_children); 265 266 /* Freeing a MR is a sleeping operation, so bounce to a work queue */ 267 INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work); 268 queue_work(system_unbound_wq, &mr->odp_destroy.work); 269 } 270 271 static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni, 272 const struct mmu_notifier_range *range, 273 unsigned long cur_seq) 274 { 275 struct ib_umem_odp *umem_odp = 276 container_of(mni, struct ib_umem_odp, notifier); 277 struct mlx5_ib_mr *mr; 278 const u64 umr_block_mask = MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT - 1; 279 u64 idx = 0, blk_start_idx = 0; 280 u64 invalidations = 0; 281 unsigned long start; 282 unsigned long end; 283 int in_block = 0; 284 u64 addr; 285 286 if (!mmu_notifier_range_blockable(range)) 287 return false; 288 289 mutex_lock(&umem_odp->umem_mutex); 290 mmu_interval_set_seq(mni, cur_seq); 291 /* 292 * If npages is zero then umem_odp->private may not be setup yet. This 293 * does not complete until after the first page is mapped for DMA. 294 */ 295 if (!umem_odp->npages) 296 goto out; 297 mr = umem_odp->private; 298 if (!mr) 299 goto out; 300 301 start = max_t(u64, ib_umem_start(umem_odp), range->start); 302 end = min_t(u64, ib_umem_end(umem_odp), range->end); 303 304 /* 305 * Iteration one - zap the HW's MTTs. The notifiers_count ensures that 306 * while we are doing the invalidation, no page fault will attempt to 307 * overwrite the same MTTs. Concurent invalidations might race us, 308 * but they will write 0s as well, so no difference in the end result. 309 */ 310 for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) { 311 idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift; 312 /* 313 * Strive to write the MTTs in chunks, but avoid overwriting 314 * non-existing MTTs. The huristic here can be improved to 315 * estimate the cost of another UMR vs. the cost of bigger 316 * UMR. 317 */ 318 if (umem_odp->map.pfn_list[idx] & HMM_PFN_VALID) { 319 if (!in_block) { 320 blk_start_idx = idx; 321 in_block = 1; 322 } 323 } else { 324 u64 umr_offset = idx & umr_block_mask; 325 326 if (in_block && umr_offset == 0) { 327 mlx5r_umr_update_xlt(mr, blk_start_idx, 328 idx - blk_start_idx, 0, 329 MLX5_IB_UPD_XLT_ZAP | 330 MLX5_IB_UPD_XLT_ATOMIC); 331 in_block = 0; 332 /* Count page invalidations */ 333 invalidations += idx - blk_start_idx + 1; 334 } 335 } 336 } 337 if (in_block) { 338 mlx5r_umr_update_xlt(mr, blk_start_idx, 339 idx - blk_start_idx + 1, 0, 340 MLX5_IB_UPD_XLT_ZAP | 341 MLX5_IB_UPD_XLT_ATOMIC); 342 /* Count page invalidations */ 343 invalidations += idx - blk_start_idx + 1; 344 } 345 346 mlx5_update_odp_stats_with_handled(mr, invalidations, invalidations); 347 348 /* 349 * We are now sure that the device will not access the 350 * memory. We can safely unmap it, and mark it as dirty if 351 * needed. 352 */ 353 354 ib_umem_odp_unmap_dma_pages(umem_odp, start, end); 355 356 if (unlikely(!umem_odp->npages && mr->parent)) 357 destroy_unused_implicit_child_mr(mr); 358 out: 359 mutex_unlock(&umem_odp->umem_mutex); 360 return true; 361 } 362 363 const struct mmu_interval_notifier_ops mlx5_mn_ops = { 364 .invalidate = mlx5_ib_invalidate_range, 365 }; 366 367 static void internal_fill_odp_caps(struct mlx5_ib_dev *dev) 368 { 369 struct ib_odp_caps *caps = &dev->odp_caps; 370 371 memset(caps, 0, sizeof(*caps)); 372 373 if (!MLX5_CAP_GEN(dev->mdev, pg) || !mlx5r_umr_can_load_pas(dev, 0)) 374 return; 375 376 caps->general_caps = IB_ODP_SUPPORT; 377 378 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) 379 dev->odp_max_size = U64_MAX; 380 else 381 dev->odp_max_size = BIT_ULL(MLX5_MAX_UMR_SHIFT + PAGE_SHIFT); 382 383 if (MLX5_CAP_ODP_SCHEME(dev->mdev, ud_odp_caps.send)) 384 caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND; 385 386 if (MLX5_CAP_ODP_SCHEME(dev->mdev, ud_odp_caps.srq_receive)) 387 caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV; 388 389 if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.send)) 390 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND; 391 392 if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.receive)) 393 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV; 394 395 if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.write)) 396 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE; 397 398 if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.read)) 399 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ; 400 401 if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.atomic)) 402 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC; 403 404 if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.srq_receive)) 405 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV; 406 407 if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.send)) 408 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SEND; 409 410 if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.receive)) 411 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_RECV; 412 413 if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.write)) 414 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_WRITE; 415 416 if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.read)) 417 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_READ; 418 419 if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.atomic)) 420 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_ATOMIC; 421 422 if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.srq_receive)) 423 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV; 424 425 if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) && 426 MLX5_CAP_GEN(dev->mdev, null_mkey) && 427 MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) && 428 !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled)) 429 caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT; 430 } 431 432 static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev, 433 struct mlx5_pagefault *pfault, 434 int error) 435 { 436 int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ? 437 pfault->wqe.wq_num : pfault->token; 438 u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {}; 439 void *info; 440 int err; 441 442 MLX5_SET(page_fault_resume_in, in, opcode, MLX5_CMD_OP_PAGE_FAULT_RESUME); 443 444 if (pfault->event_subtype == MLX5_PFAULT_SUBTYPE_MEMORY) { 445 info = MLX5_ADDR_OF(page_fault_resume_in, in, 446 page_fault_info.mem_page_fault_info); 447 MLX5_SET(mem_page_fault_info, info, fault_token_31_0, 448 pfault->token & 0xffffffff); 449 MLX5_SET(mem_page_fault_info, info, fault_token_47_32, 450 (pfault->token >> 32) & 0xffff); 451 MLX5_SET(mem_page_fault_info, info, error, !!error); 452 } else { 453 info = MLX5_ADDR_OF(page_fault_resume_in, in, 454 page_fault_info.trans_page_fault_info); 455 MLX5_SET(trans_page_fault_info, info, page_fault_type, 456 pfault->type); 457 MLX5_SET(trans_page_fault_info, info, fault_token, 458 pfault->token); 459 MLX5_SET(trans_page_fault_info, info, wq_number, wq_num); 460 MLX5_SET(trans_page_fault_info, info, error, !!error); 461 } 462 463 err = mlx5_cmd_exec_in(dev->mdev, page_fault_resume, in); 464 if (err) 465 mlx5_ib_err(dev, "Failed to resolve the page fault on WQ 0x%x err %d\n", 466 wq_num, err); 467 } 468 469 static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr, 470 unsigned long idx) 471 { 472 struct mlx5_ib_dev *dev = mr_to_mdev(imr); 473 struct ib_umem_odp *odp; 474 struct mlx5_ib_mr *mr; 475 struct mlx5_ib_mr *ret; 476 int err; 477 478 odp = ib_umem_odp_alloc_child(to_ib_umem_odp(imr->umem), 479 idx * MLX5_IMR_MTT_SIZE, 480 MLX5_IMR_MTT_SIZE, &mlx5_mn_ops); 481 if (IS_ERR(odp)) 482 return ERR_CAST(odp); 483 484 mr = mlx5_mr_cache_alloc(dev, imr->access_flags, 485 MLX5_MKC_ACCESS_MODE_MTT, 486 MLX5_IMR_MTT_ENTRIES); 487 if (IS_ERR(mr)) { 488 ib_umem_odp_release(odp); 489 return mr; 490 } 491 492 mr->access_flags = imr->access_flags; 493 mr->ibmr.pd = imr->ibmr.pd; 494 mr->ibmr.device = &mr_to_mdev(imr)->ib_dev; 495 mr->umem = &odp->umem; 496 mr->ibmr.lkey = mr->mmkey.key; 497 mr->ibmr.rkey = mr->mmkey.key; 498 mr->ibmr.iova = idx * MLX5_IMR_MTT_SIZE; 499 mr->parent = imr; 500 odp->private = mr; 501 502 /* 503 * First refcount is owned by the xarray and second refconut 504 * is returned to the caller. 505 */ 506 refcount_set(&mr->mmkey.usecount, 2); 507 508 err = mlx5r_umr_update_xlt(mr, 0, 509 MLX5_IMR_MTT_ENTRIES, 510 PAGE_SHIFT, 511 MLX5_IB_UPD_XLT_ZAP | 512 MLX5_IB_UPD_XLT_ENABLE); 513 if (err) { 514 ret = ERR_PTR(err); 515 goto out_mr; 516 } 517 518 xa_lock(&imr->implicit_children); 519 ret = __xa_cmpxchg(&imr->implicit_children, idx, NULL, mr, 520 GFP_KERNEL); 521 if (unlikely(ret)) { 522 if (xa_is_err(ret)) { 523 ret = ERR_PTR(xa_err(ret)); 524 goto out_lock; 525 } 526 /* 527 * Another thread beat us to creating the child mr, use 528 * theirs. 529 */ 530 refcount_inc(&ret->mmkey.usecount); 531 goto out_lock; 532 } 533 534 if (MLX5_CAP_ODP(dev->mdev, mem_page_fault)) { 535 ret = xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key), 536 &mr->mmkey, GFP_KERNEL); 537 if (xa_is_err(ret)) { 538 ret = ERR_PTR(xa_err(ret)); 539 __xa_erase(&imr->implicit_children, idx); 540 goto out_lock; 541 } 542 mr->mmkey.type = MLX5_MKEY_IMPLICIT_CHILD; 543 } 544 xa_unlock(&imr->implicit_children); 545 mlx5_ib_dbg(mr_to_mdev(imr), "key %x mr %p\n", mr->mmkey.key, mr); 546 return mr; 547 548 out_lock: 549 xa_unlock(&imr->implicit_children); 550 out_mr: 551 mlx5_ib_dereg_mr(&mr->ibmr, NULL); 552 return ret; 553 } 554 555 /* 556 * When using memory scheme ODP, implicit MRs can't use the reserved null mkey 557 * and each implicit MR needs to assign a private null mkey to get the page 558 * faults on. 559 * The null mkey is created with the properties to enable getting the page 560 * fault for every time it is accessed and having all relevant access flags. 561 */ 562 static int alloc_implicit_mr_null_mkey(struct mlx5_ib_dev *dev, 563 struct mlx5_ib_mr *imr, 564 struct mlx5_ib_pd *pd) 565 { 566 size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + 64; 567 void *mkc; 568 u32 *in; 569 int err; 570 571 in = kzalloc(inlen, GFP_KERNEL); 572 if (!in) 573 return -ENOMEM; 574 575 MLX5_SET(create_mkey_in, in, translations_octword_actual_size, 4); 576 MLX5_SET(create_mkey_in, in, pg_access, 1); 577 578 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 579 MLX5_SET(mkc, mkc, a, 1); 580 MLX5_SET(mkc, mkc, rw, 1); 581 MLX5_SET(mkc, mkc, rr, 1); 582 MLX5_SET(mkc, mkc, lw, 1); 583 MLX5_SET(mkc, mkc, lr, 1); 584 MLX5_SET(mkc, mkc, free, 0); 585 MLX5_SET(mkc, mkc, umr_en, 0); 586 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT); 587 588 MLX5_SET(mkc, mkc, translations_octword_size, 4); 589 MLX5_SET(mkc, mkc, log_page_size, 61); 590 MLX5_SET(mkc, mkc, length64, 1); 591 MLX5_SET(mkc, mkc, pd, pd->pdn); 592 MLX5_SET64(mkc, mkc, start_addr, 0); 593 MLX5_SET(mkc, mkc, qpn, 0xffffff); 594 595 err = mlx5_core_create_mkey(dev->mdev, &imr->null_mmkey.key, in, inlen); 596 if (err) 597 goto free_in; 598 599 imr->null_mmkey.type = MLX5_MKEY_NULL; 600 601 free_in: 602 kfree(in); 603 return err; 604 } 605 606 struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd, 607 int access_flags) 608 { 609 struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device); 610 struct ib_umem_odp *umem_odp; 611 struct mlx5_ib_mr *imr; 612 int err; 613 614 if (!mlx5r_umr_can_load_pas(dev, MLX5_IMR_MTT_ENTRIES * PAGE_SIZE)) 615 return ERR_PTR(-EOPNOTSUPP); 616 617 umem_odp = ib_umem_odp_alloc_implicit(&dev->ib_dev, access_flags); 618 if (IS_ERR(umem_odp)) 619 return ERR_CAST(umem_odp); 620 621 imr = mlx5_mr_cache_alloc(dev, access_flags, MLX5_MKC_ACCESS_MODE_KSM, 622 mlx5_imr_ksm_entries); 623 if (IS_ERR(imr)) { 624 ib_umem_odp_release(umem_odp); 625 return imr; 626 } 627 628 imr->access_flags = access_flags; 629 imr->ibmr.pd = &pd->ibpd; 630 imr->ibmr.iova = 0; 631 imr->umem = &umem_odp->umem; 632 imr->ibmr.lkey = imr->mmkey.key; 633 imr->ibmr.rkey = imr->mmkey.key; 634 imr->ibmr.device = &dev->ib_dev; 635 imr->is_odp_implicit = true; 636 xa_init(&imr->implicit_children); 637 638 if (MLX5_CAP_ODP(dev->mdev, mem_page_fault)) { 639 err = alloc_implicit_mr_null_mkey(dev, imr, pd); 640 if (err) 641 goto out_mr; 642 643 err = mlx5r_store_odp_mkey(dev, &imr->null_mmkey); 644 if (err) 645 goto out_mr; 646 } 647 648 err = mlx5r_umr_update_xlt(imr, 0, 649 mlx5_imr_ksm_entries, 650 MLX5_KSM_PAGE_SHIFT, 651 MLX5_IB_UPD_XLT_INDIRECT | 652 MLX5_IB_UPD_XLT_ZAP | 653 MLX5_IB_UPD_XLT_ENABLE); 654 if (err) 655 goto out_mr; 656 657 err = mlx5r_store_odp_mkey(dev, &imr->mmkey); 658 if (err) 659 goto out_mr; 660 661 mlx5_ib_dbg(dev, "key %x mr %p\n", imr->mmkey.key, imr); 662 return imr; 663 out_mr: 664 mlx5_ib_err(dev, "Failed to register MKEY %d\n", err); 665 mlx5_ib_dereg_mr(&imr->ibmr, NULL); 666 return ERR_PTR(err); 667 } 668 669 void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr) 670 { 671 struct mlx5_ib_mr *mtt; 672 unsigned long idx; 673 674 /* 675 * If this is an implicit MR it is already invalidated so we can just 676 * delete the children mkeys. 677 */ 678 xa_for_each(&mr->implicit_children, idx, mtt) { 679 xa_erase(&mr->implicit_children, idx); 680 mlx5_ib_dereg_mr(&mtt->ibmr, NULL); 681 } 682 683 if (mr->null_mmkey.key) { 684 xa_erase(&mr_to_mdev(mr)->odp_mkeys, 685 mlx5_base_mkey(mr->null_mmkey.key)); 686 687 mlx5_core_destroy_mkey(mr_to_mdev(mr)->mdev, 688 mr->null_mmkey.key); 689 } 690 } 691 692 #define MLX5_PF_FLAGS_DOWNGRADE BIT(1) 693 #define MLX5_PF_FLAGS_SNAPSHOT BIT(2) 694 #define MLX5_PF_FLAGS_ENABLE BIT(3) 695 static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp, 696 u64 user_va, size_t bcnt, u32 *bytes_mapped, 697 u32 flags) 698 { 699 int page_shift, ret, np; 700 bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE; 701 u64 access_mask = 0; 702 u64 start_idx; 703 bool fault = !(flags & MLX5_PF_FLAGS_SNAPSHOT); 704 u32 xlt_flags = MLX5_IB_UPD_XLT_ATOMIC; 705 706 if (flags & MLX5_PF_FLAGS_ENABLE) 707 xlt_flags |= MLX5_IB_UPD_XLT_ENABLE; 708 709 if (flags & MLX5_PF_FLAGS_DOWNGRADE) 710 xlt_flags |= MLX5_IB_UPD_XLT_DOWNGRADE; 711 712 page_shift = odp->page_shift; 713 start_idx = (user_va - ib_umem_start(odp)) >> page_shift; 714 715 if (odp->umem.writable && !downgrade) 716 access_mask |= HMM_PFN_WRITE; 717 718 np = ib_umem_odp_map_dma_and_lock(odp, user_va, bcnt, access_mask, fault); 719 if (np < 0) 720 return np; 721 722 /* 723 * No need to check whether the MTTs really belong to this MR, since 724 * ib_umem_odp_map_dma_and_lock already checks this. 725 */ 726 ret = mlx5r_umr_update_xlt(mr, start_idx, np, page_shift, xlt_flags); 727 mutex_unlock(&odp->umem_mutex); 728 729 if (ret < 0) { 730 if (ret != -EAGAIN) 731 mlx5_ib_err(mr_to_mdev(mr), 732 "Failed to update mkey page tables\n"); 733 goto out; 734 } 735 736 if (bytes_mapped) { 737 u32 new_mappings = (np << page_shift) - 738 (user_va - round_down(user_va, 1 << page_shift)); 739 740 *bytes_mapped += min_t(u32, new_mappings, bcnt); 741 } 742 743 return np << (page_shift - PAGE_SHIFT); 744 745 out: 746 return ret; 747 } 748 749 static int pagefault_implicit_mr(struct mlx5_ib_mr *imr, 750 struct ib_umem_odp *odp_imr, u64 user_va, 751 size_t bcnt, u32 *bytes_mapped, u32 flags) 752 { 753 unsigned long end_idx = (user_va + bcnt - 1) >> MLX5_IMR_MTT_SHIFT; 754 unsigned long upd_start_idx = end_idx + 1; 755 unsigned long upd_len = 0; 756 unsigned long npages = 0; 757 int err; 758 int ret; 759 760 if (unlikely(user_va >= mlx5_imr_ksm_entries * MLX5_IMR_MTT_SIZE || 761 mlx5_imr_ksm_entries * MLX5_IMR_MTT_SIZE - user_va < bcnt)) 762 return -EFAULT; 763 764 /* Fault each child mr that intersects with our interval. */ 765 while (bcnt) { 766 unsigned long idx = user_va >> MLX5_IMR_MTT_SHIFT; 767 struct ib_umem_odp *umem_odp; 768 struct mlx5_ib_mr *mtt; 769 u64 len; 770 771 xa_lock(&imr->implicit_children); 772 mtt = xa_load(&imr->implicit_children, idx); 773 if (unlikely(!mtt)) { 774 xa_unlock(&imr->implicit_children); 775 mtt = implicit_get_child_mr(imr, idx); 776 if (IS_ERR(mtt)) { 777 ret = PTR_ERR(mtt); 778 goto out; 779 } 780 upd_start_idx = min(upd_start_idx, idx); 781 upd_len = idx - upd_start_idx + 1; 782 } else { 783 refcount_inc(&mtt->mmkey.usecount); 784 xa_unlock(&imr->implicit_children); 785 } 786 787 umem_odp = to_ib_umem_odp(mtt->umem); 788 len = min_t(u64, user_va + bcnt, ib_umem_end(umem_odp)) - 789 user_va; 790 791 ret = pagefault_real_mr(mtt, umem_odp, user_va, len, 792 bytes_mapped, flags); 793 794 mlx5r_deref_odp_mkey(&mtt->mmkey); 795 796 if (ret < 0) 797 goto out; 798 user_va += len; 799 bcnt -= len; 800 npages += ret; 801 } 802 803 ret = npages; 804 805 /* 806 * Any time the implicit_children are changed we must perform an 807 * update of the xlt before exiting to ensure the HW and the 808 * implicit_children remains synchronized. 809 */ 810 out: 811 if (likely(!upd_len)) 812 return ret; 813 814 /* 815 * Notice this is not strictly ordered right, the KSM is updated after 816 * the implicit_children is updated, so a parallel page fault could 817 * see a MR that is not yet visible in the KSM. This is similar to a 818 * parallel page fault seeing a MR that is being concurrently removed 819 * from the KSM. Both of these improbable situations are resolved 820 * safely by resuming the HW and then taking another page fault. The 821 * next pagefault handler will see the new information. 822 */ 823 mutex_lock(&odp_imr->umem_mutex); 824 err = mlx5r_umr_update_xlt(imr, upd_start_idx, upd_len, 0, 825 MLX5_IB_UPD_XLT_INDIRECT | 826 MLX5_IB_UPD_XLT_ATOMIC); 827 mutex_unlock(&odp_imr->umem_mutex); 828 if (err) { 829 mlx5_ib_err(mr_to_mdev(imr), "Failed to update PAS\n"); 830 return err; 831 } 832 return ret; 833 } 834 835 static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt, 836 u32 *bytes_mapped, u32 flags) 837 { 838 struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem); 839 u32 xlt_flags = 0; 840 int err; 841 unsigned long page_size; 842 843 if (flags & MLX5_PF_FLAGS_ENABLE) 844 xlt_flags |= MLX5_IB_UPD_XLT_ENABLE; 845 846 dma_resv_lock(umem_dmabuf->attach->dmabuf->resv, NULL); 847 err = ib_umem_dmabuf_map_pages(umem_dmabuf); 848 if (err) { 849 dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv); 850 return err; 851 } 852 853 page_size = mlx5_umem_dmabuf_find_best_pgsz(umem_dmabuf); 854 if (!page_size) { 855 ib_umem_dmabuf_unmap_pages(umem_dmabuf); 856 err = -EINVAL; 857 } else { 858 if (mr->data_direct) 859 err = mlx5r_umr_update_data_direct_ksm_pas(mr, xlt_flags); 860 else 861 err = mlx5r_umr_update_mr_pas(mr, xlt_flags); 862 } 863 dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv); 864 865 if (err) 866 return err; 867 868 if (bytes_mapped) 869 *bytes_mapped += bcnt; 870 871 return ib_umem_num_pages(mr->umem); 872 } 873 874 /* 875 * Returns: 876 * -EFAULT: The io_virt->bcnt is not within the MR, it covers pages that are 877 * not accessible, or the MR is no longer valid. 878 * -EAGAIN/-ENOMEM: The operation should be retried 879 * 880 * -EINVAL/others: General internal malfunction 881 * >0: Number of pages mapped 882 */ 883 static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt, 884 u32 *bytes_mapped, u32 flags, bool permissive_fault) 885 { 886 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); 887 888 if (unlikely(io_virt < mr->ibmr.iova) && !permissive_fault) 889 return -EFAULT; 890 891 if (mr->umem->is_dmabuf) 892 return pagefault_dmabuf_mr(mr, bcnt, bytes_mapped, flags); 893 894 if (!odp->is_implicit_odp) { 895 u64 offset = io_virt < mr->ibmr.iova ? 0 : io_virt - mr->ibmr.iova; 896 u64 user_va; 897 898 if (check_add_overflow(offset, (u64)odp->umem.address, 899 &user_va)) 900 return -EFAULT; 901 902 if (permissive_fault) { 903 if (user_va < ib_umem_start(odp)) 904 user_va = ib_umem_start(odp); 905 if ((user_va + bcnt) > ib_umem_end(odp)) 906 bcnt = ib_umem_end(odp) - user_va; 907 } else if (unlikely(user_va >= ib_umem_end(odp) || 908 ib_umem_end(odp) - user_va < bcnt)) 909 return -EFAULT; 910 return pagefault_real_mr(mr, odp, user_va, bcnt, bytes_mapped, 911 flags); 912 } 913 return pagefault_implicit_mr(mr, odp, io_virt, bcnt, bytes_mapped, 914 flags); 915 } 916 917 int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr) 918 { 919 int ret; 920 921 ret = pagefault_real_mr(mr, to_ib_umem_odp(mr->umem), mr->umem->address, 922 mr->umem->length, NULL, 923 MLX5_PF_FLAGS_SNAPSHOT | MLX5_PF_FLAGS_ENABLE); 924 return ret >= 0 ? 0 : ret; 925 } 926 927 int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr) 928 { 929 int ret; 930 931 ret = pagefault_dmabuf_mr(mr, mr->umem->length, NULL, 932 MLX5_PF_FLAGS_ENABLE); 933 934 return ret >= 0 ? 0 : ret; 935 } 936 937 struct pf_frame { 938 struct pf_frame *next; 939 u32 key; 940 u64 io_virt; 941 size_t bcnt; 942 int depth; 943 }; 944 945 static bool mkey_is_eq(struct mlx5_ib_mkey *mmkey, u32 key) 946 { 947 if (!mmkey) 948 return false; 949 if (mmkey->type == MLX5_MKEY_MW || 950 mmkey->type == MLX5_MKEY_INDIRECT_DEVX) 951 return mlx5_base_mkey(mmkey->key) == mlx5_base_mkey(key); 952 return mmkey->key == key; 953 } 954 955 static struct mlx5_ib_mkey *find_odp_mkey(struct mlx5_ib_dev *dev, u32 key) 956 { 957 struct mlx5_ib_mkey *mmkey; 958 959 xa_lock(&dev->odp_mkeys); 960 mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(key)); 961 if (!mmkey) { 962 mmkey = ERR_PTR(-ENOENT); 963 goto out; 964 } 965 if (!mkey_is_eq(mmkey, key)) { 966 mmkey = ERR_PTR(-EFAULT); 967 goto out; 968 } 969 refcount_inc(&mmkey->usecount); 970 out: 971 xa_unlock(&dev->odp_mkeys); 972 973 return mmkey; 974 } 975 976 /* 977 * Handle a single data segment in a page-fault WQE or RDMA region. 978 * 979 * Returns zero on success. The caller may continue to the next data segment. 980 * Can return the following error codes: 981 * -EAGAIN to designate a temporary error. The caller will abort handling the 982 * page fault and resolve it. 983 * -EFAULT when there's an error mapping the requested pages. The caller will 984 * abort the page fault handling. 985 */ 986 static int pagefault_single_data_segment(struct mlx5_ib_dev *dev, 987 struct ib_pd *pd, u32 key, 988 u64 io_virt, size_t bcnt, 989 u32 *bytes_committed, 990 u32 *bytes_mapped) 991 { 992 int ret, i, outlen, cur_outlen = 0, depth = 0, pages_in_range; 993 struct pf_frame *head = NULL, *frame; 994 struct mlx5_ib_mkey *mmkey; 995 struct mlx5_ib_mr *mr; 996 struct mlx5_klm *pklm; 997 u32 *out = NULL; 998 size_t offset; 999 1000 io_virt += *bytes_committed; 1001 bcnt -= *bytes_committed; 1002 next_mr: 1003 mmkey = find_odp_mkey(dev, key); 1004 if (IS_ERR(mmkey)) { 1005 ret = PTR_ERR(mmkey); 1006 if (ret == -ENOENT) { 1007 mlx5_ib_dbg( 1008 dev, 1009 "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n", 1010 key); 1011 if (bytes_mapped) 1012 *bytes_mapped += bcnt; 1013 /* 1014 * The user could specify a SGL with multiple lkeys and 1015 * only some of them are ODP. Treat the non-ODP ones as 1016 * fully faulted. 1017 */ 1018 ret = 0; 1019 } 1020 goto end; 1021 } 1022 1023 switch (mmkey->type) { 1024 case MLX5_MKEY_MR: 1025 mr = container_of(mmkey, struct mlx5_ib_mr, mmkey); 1026 1027 pages_in_range = (ALIGN(io_virt + bcnt, PAGE_SIZE) - 1028 (io_virt & PAGE_MASK)) >> 1029 PAGE_SHIFT; 1030 ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0, false); 1031 if (ret < 0) 1032 goto end; 1033 1034 mlx5_update_odp_stats_with_handled(mr, faults, ret); 1035 1036 if (ret < pages_in_range) { 1037 ret = -EFAULT; 1038 goto end; 1039 } 1040 1041 ret = 0; 1042 break; 1043 1044 case MLX5_MKEY_MW: 1045 case MLX5_MKEY_INDIRECT_DEVX: 1046 if (depth >= MLX5_CAP_GEN(dev->mdev, max_indirection)) { 1047 mlx5_ib_dbg(dev, "indirection level exceeded\n"); 1048 ret = -EFAULT; 1049 goto end; 1050 } 1051 1052 outlen = MLX5_ST_SZ_BYTES(query_mkey_out) + 1053 sizeof(*pklm) * (mmkey->ndescs - 2); 1054 1055 if (outlen > cur_outlen) { 1056 kfree(out); 1057 out = kzalloc(outlen, GFP_KERNEL); 1058 if (!out) { 1059 ret = -ENOMEM; 1060 goto end; 1061 } 1062 cur_outlen = outlen; 1063 } 1064 1065 pklm = (struct mlx5_klm *)MLX5_ADDR_OF(query_mkey_out, out, 1066 bsf0_klm0_pas_mtt0_1); 1067 1068 ret = mlx5_core_query_mkey(dev->mdev, mmkey->key, out, outlen); 1069 if (ret) 1070 goto end; 1071 1072 offset = io_virt - MLX5_GET64(query_mkey_out, out, 1073 memory_key_mkey_entry.start_addr); 1074 1075 for (i = 0; bcnt && i < mmkey->ndescs; i++, pklm++) { 1076 if (offset >= be32_to_cpu(pklm->bcount)) { 1077 offset -= be32_to_cpu(pklm->bcount); 1078 continue; 1079 } 1080 1081 frame = kzalloc(sizeof(*frame), GFP_KERNEL); 1082 if (!frame) { 1083 ret = -ENOMEM; 1084 goto end; 1085 } 1086 1087 frame->key = be32_to_cpu(pklm->key); 1088 frame->io_virt = be64_to_cpu(pklm->va) + offset; 1089 frame->bcnt = min_t(size_t, bcnt, 1090 be32_to_cpu(pklm->bcount) - offset); 1091 frame->depth = depth + 1; 1092 frame->next = head; 1093 head = frame; 1094 1095 bcnt -= frame->bcnt; 1096 offset = 0; 1097 } 1098 break; 1099 1100 default: 1101 mlx5_ib_dbg(dev, "wrong mkey type %d\n", mmkey->type); 1102 ret = -EFAULT; 1103 goto end; 1104 } 1105 1106 if (head) { 1107 frame = head; 1108 head = frame->next; 1109 1110 key = frame->key; 1111 io_virt = frame->io_virt; 1112 bcnt = frame->bcnt; 1113 depth = frame->depth; 1114 kfree(frame); 1115 1116 mlx5r_deref_odp_mkey(mmkey); 1117 goto next_mr; 1118 } 1119 1120 end: 1121 if (!IS_ERR(mmkey)) 1122 mlx5r_deref_odp_mkey(mmkey); 1123 while (head) { 1124 frame = head; 1125 head = frame->next; 1126 kfree(frame); 1127 } 1128 kfree(out); 1129 1130 *bytes_committed = 0; 1131 return ret; 1132 } 1133 1134 /* 1135 * Parse a series of data segments for page fault handling. 1136 * 1137 * @dev: Pointer to mlx5 IB device 1138 * @pfault: contains page fault information. 1139 * @wqe: points at the first data segment in the WQE. 1140 * @wqe_end: points after the end of the WQE. 1141 * @bytes_mapped: receives the number of bytes that the function was able to 1142 * map. This allows the caller to decide intelligently whether 1143 * enough memory was mapped to resolve the page fault 1144 * successfully (e.g. enough for the next MTU, or the entire 1145 * WQE). 1146 * @total_wqe_bytes: receives the total data size of this WQE in bytes (minus 1147 * the committed bytes). 1148 * @receive_queue: receive WQE end of sg list 1149 * 1150 * Returns zero for success or a negative error code. 1151 */ 1152 static int pagefault_data_segments(struct mlx5_ib_dev *dev, 1153 struct mlx5_pagefault *pfault, 1154 void *wqe, 1155 void *wqe_end, u32 *bytes_mapped, 1156 u32 *total_wqe_bytes, bool receive_queue) 1157 { 1158 int ret = 0; 1159 u64 io_virt; 1160 __be32 key; 1161 u32 byte_count; 1162 size_t bcnt; 1163 int inline_segment; 1164 1165 if (bytes_mapped) 1166 *bytes_mapped = 0; 1167 if (total_wqe_bytes) 1168 *total_wqe_bytes = 0; 1169 1170 while (wqe < wqe_end) { 1171 struct mlx5_wqe_data_seg *dseg = wqe; 1172 1173 io_virt = be64_to_cpu(dseg->addr); 1174 key = dseg->lkey; 1175 byte_count = be32_to_cpu(dseg->byte_count); 1176 inline_segment = !!(byte_count & MLX5_INLINE_SEG); 1177 bcnt = byte_count & ~MLX5_INLINE_SEG; 1178 1179 if (inline_segment) { 1180 bcnt = bcnt & MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK; 1181 wqe += ALIGN(sizeof(struct mlx5_wqe_inline_seg) + bcnt, 1182 16); 1183 } else { 1184 wqe += sizeof(*dseg); 1185 } 1186 1187 /* receive WQE end of sg list. */ 1188 if (receive_queue && bcnt == 0 && 1189 key == dev->mkeys.terminate_scatter_list_mkey && 1190 io_virt == 0) 1191 break; 1192 1193 if (!inline_segment && total_wqe_bytes) { 1194 *total_wqe_bytes += bcnt - min_t(size_t, bcnt, 1195 pfault->bytes_committed); 1196 } 1197 1198 /* A zero length data segment designates a length of 2GB. */ 1199 if (bcnt == 0) 1200 bcnt = 1U << 31; 1201 1202 if (inline_segment || bcnt <= pfault->bytes_committed) { 1203 pfault->bytes_committed -= 1204 min_t(size_t, bcnt, 1205 pfault->bytes_committed); 1206 continue; 1207 } 1208 1209 ret = pagefault_single_data_segment(dev, NULL, be32_to_cpu(key), 1210 io_virt, bcnt, 1211 &pfault->bytes_committed, 1212 bytes_mapped); 1213 if (ret < 0) 1214 break; 1215 } 1216 1217 return ret; 1218 } 1219 1220 /* 1221 * Parse initiator WQE. Advances the wqe pointer to point at the 1222 * scatter-gather list, and set wqe_end to the end of the WQE. 1223 */ 1224 static int mlx5_ib_mr_initiator_pfault_handler( 1225 struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault, 1226 struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length) 1227 { 1228 struct mlx5_wqe_ctrl_seg *ctrl = *wqe; 1229 u16 wqe_index = pfault->wqe.wqe_index; 1230 struct mlx5_base_av *av; 1231 unsigned ds, opcode; 1232 u32 qpn = qp->trans_qp.base.mqp.qpn; 1233 1234 ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK; 1235 if (ds * MLX5_WQE_DS_UNITS > wqe_length) { 1236 mlx5_ib_err(dev, "Unable to read the complete WQE. ds = 0x%x, ret = 0x%x\n", 1237 ds, wqe_length); 1238 return -EFAULT; 1239 } 1240 1241 if (ds == 0) { 1242 mlx5_ib_err(dev, "Got WQE with zero DS. wqe_index=%x, qpn=%x\n", 1243 wqe_index, qpn); 1244 return -EFAULT; 1245 } 1246 1247 *wqe_end = *wqe + ds * MLX5_WQE_DS_UNITS; 1248 *wqe += sizeof(*ctrl); 1249 1250 opcode = be32_to_cpu(ctrl->opmod_idx_opcode) & 1251 MLX5_WQE_CTRL_OPCODE_MASK; 1252 1253 if (qp->type == IB_QPT_XRC_INI) 1254 *wqe += sizeof(struct mlx5_wqe_xrc_seg); 1255 1256 if (qp->type == IB_QPT_UD || qp->type == MLX5_IB_QPT_DCI) { 1257 av = *wqe; 1258 if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV)) 1259 *wqe += sizeof(struct mlx5_av); 1260 else 1261 *wqe += sizeof(struct mlx5_base_av); 1262 } 1263 1264 switch (opcode) { 1265 case MLX5_OPCODE_RDMA_WRITE: 1266 case MLX5_OPCODE_RDMA_WRITE_IMM: 1267 case MLX5_OPCODE_RDMA_READ: 1268 *wqe += sizeof(struct mlx5_wqe_raddr_seg); 1269 break; 1270 case MLX5_OPCODE_ATOMIC_CS: 1271 case MLX5_OPCODE_ATOMIC_FA: 1272 *wqe += sizeof(struct mlx5_wqe_raddr_seg); 1273 *wqe += sizeof(struct mlx5_wqe_atomic_seg); 1274 break; 1275 } 1276 1277 return 0; 1278 } 1279 1280 /* 1281 * Parse responder WQE and set wqe_end to the end of the WQE. 1282 */ 1283 static int mlx5_ib_mr_responder_pfault_handler_srq(struct mlx5_ib_dev *dev, 1284 struct mlx5_ib_srq *srq, 1285 void **wqe, void **wqe_end, 1286 int wqe_length) 1287 { 1288 int wqe_size = 1 << srq->msrq.wqe_shift; 1289 1290 if (wqe_size > wqe_length) { 1291 mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n"); 1292 return -EFAULT; 1293 } 1294 1295 *wqe_end = *wqe + wqe_size; 1296 *wqe += sizeof(struct mlx5_wqe_srq_next_seg); 1297 1298 return 0; 1299 } 1300 1301 static int mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev *dev, 1302 struct mlx5_ib_qp *qp, 1303 void *wqe, void **wqe_end, 1304 int wqe_length) 1305 { 1306 struct mlx5_ib_wq *wq = &qp->rq; 1307 int wqe_size = 1 << wq->wqe_shift; 1308 1309 if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) { 1310 mlx5_ib_err(dev, "ODP fault with WQE signatures is not supported\n"); 1311 return -EFAULT; 1312 } 1313 1314 if (wqe_size > wqe_length) { 1315 mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n"); 1316 return -EFAULT; 1317 } 1318 1319 *wqe_end = wqe + wqe_size; 1320 1321 return 0; 1322 } 1323 1324 static inline struct mlx5_core_rsc_common *odp_get_rsc(struct mlx5_ib_dev *dev, 1325 u32 wq_num, int pf_type) 1326 { 1327 struct mlx5_core_rsc_common *common = NULL; 1328 struct mlx5_core_srq *srq; 1329 1330 switch (pf_type) { 1331 case MLX5_WQE_PF_TYPE_RMP: 1332 srq = mlx5_cmd_get_srq(dev, wq_num); 1333 if (srq) 1334 common = &srq->common; 1335 break; 1336 case MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE: 1337 case MLX5_WQE_PF_TYPE_RESP: 1338 case MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC: 1339 common = mlx5_core_res_hold(dev, wq_num, MLX5_RES_QP); 1340 break; 1341 default: 1342 break; 1343 } 1344 1345 return common; 1346 } 1347 1348 static inline struct mlx5_ib_qp *res_to_qp(struct mlx5_core_rsc_common *res) 1349 { 1350 struct mlx5_core_qp *mqp = (struct mlx5_core_qp *)res; 1351 1352 return to_mibqp(mqp); 1353 } 1354 1355 static inline struct mlx5_ib_srq *res_to_srq(struct mlx5_core_rsc_common *res) 1356 { 1357 struct mlx5_core_srq *msrq = 1358 container_of(res, struct mlx5_core_srq, common); 1359 1360 return to_mibsrq(msrq); 1361 } 1362 1363 static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev, 1364 struct mlx5_pagefault *pfault) 1365 { 1366 bool sq = pfault->type & MLX5_PFAULT_REQUESTOR; 1367 u16 wqe_index = pfault->wqe.wqe_index; 1368 void *wqe, *wqe_start = NULL, *wqe_end = NULL; 1369 u32 bytes_mapped, total_wqe_bytes; 1370 struct mlx5_core_rsc_common *res; 1371 int resume_with_error = 1; 1372 struct mlx5_ib_qp *qp; 1373 size_t bytes_copied; 1374 int ret = 0; 1375 1376 res = odp_get_rsc(dev, pfault->wqe.wq_num, pfault->type); 1377 if (!res) { 1378 mlx5_ib_dbg(dev, "wqe page fault for missing resource %d\n", pfault->wqe.wq_num); 1379 return; 1380 } 1381 1382 if (res->res != MLX5_RES_QP && res->res != MLX5_RES_SRQ && 1383 res->res != MLX5_RES_XSRQ) { 1384 mlx5_ib_err(dev, "wqe page fault for unsupported type %d\n", 1385 pfault->type); 1386 goto resolve_page_fault; 1387 } 1388 1389 wqe_start = (void *)__get_free_page(GFP_KERNEL); 1390 if (!wqe_start) { 1391 mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n"); 1392 goto resolve_page_fault; 1393 } 1394 1395 wqe = wqe_start; 1396 qp = (res->res == MLX5_RES_QP) ? res_to_qp(res) : NULL; 1397 if (qp && sq) { 1398 ret = mlx5_ib_read_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE, 1399 &bytes_copied); 1400 if (ret) 1401 goto read_user; 1402 ret = mlx5_ib_mr_initiator_pfault_handler( 1403 dev, pfault, qp, &wqe, &wqe_end, bytes_copied); 1404 } else if (qp && !sq) { 1405 ret = mlx5_ib_read_wqe_rq(qp, wqe_index, wqe, PAGE_SIZE, 1406 &bytes_copied); 1407 if (ret) 1408 goto read_user; 1409 ret = mlx5_ib_mr_responder_pfault_handler_rq( 1410 dev, qp, wqe, &wqe_end, bytes_copied); 1411 } else if (!qp) { 1412 struct mlx5_ib_srq *srq = res_to_srq(res); 1413 1414 ret = mlx5_ib_read_wqe_srq(srq, wqe_index, wqe, PAGE_SIZE, 1415 &bytes_copied); 1416 if (ret) 1417 goto read_user; 1418 ret = mlx5_ib_mr_responder_pfault_handler_srq( 1419 dev, srq, &wqe, &wqe_end, bytes_copied); 1420 } 1421 1422 if (ret < 0 || wqe >= wqe_end) 1423 goto resolve_page_fault; 1424 1425 ret = pagefault_data_segments(dev, pfault, wqe, wqe_end, &bytes_mapped, 1426 &total_wqe_bytes, !sq); 1427 if (ret == -EAGAIN) 1428 goto out; 1429 1430 if (ret < 0 || total_wqe_bytes > bytes_mapped) 1431 goto resolve_page_fault; 1432 1433 out: 1434 ret = 0; 1435 resume_with_error = 0; 1436 1437 read_user: 1438 if (ret) 1439 mlx5_ib_err( 1440 dev, 1441 "Failed reading a WQE following page fault, error %d, wqe_index %x, qpn %llx\n", 1442 ret, wqe_index, pfault->token); 1443 1444 resolve_page_fault: 1445 mlx5_ib_page_fault_resume(dev, pfault, resume_with_error); 1446 mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, type: 0x%x\n", 1447 pfault->wqe.wq_num, resume_with_error, 1448 pfault->type); 1449 mlx5_core_res_put(res); 1450 free_page((unsigned long)wqe_start); 1451 } 1452 1453 static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev, 1454 struct mlx5_pagefault *pfault) 1455 { 1456 u64 address; 1457 u32 length; 1458 u32 prefetch_len = pfault->bytes_committed; 1459 int prefetch_activated = 0; 1460 u32 rkey = pfault->rdma.r_key; 1461 int ret; 1462 1463 /* The RDMA responder handler handles the page fault in two parts. 1464 * First it brings the necessary pages for the current packet 1465 * (and uses the pfault context), and then (after resuming the QP) 1466 * prefetches more pages. The second operation cannot use the pfault 1467 * context and therefore uses the dummy_pfault context allocated on 1468 * the stack */ 1469 pfault->rdma.rdma_va += pfault->bytes_committed; 1470 pfault->rdma.rdma_op_len -= min(pfault->bytes_committed, 1471 pfault->rdma.rdma_op_len); 1472 pfault->bytes_committed = 0; 1473 1474 address = pfault->rdma.rdma_va; 1475 length = pfault->rdma.rdma_op_len; 1476 1477 /* For some operations, the hardware cannot tell the exact message 1478 * length, and in those cases it reports zero. Use prefetch 1479 * logic. */ 1480 if (length == 0) { 1481 prefetch_activated = 1; 1482 length = pfault->rdma.packet_size; 1483 prefetch_len = min(MAX_PREFETCH_LEN, prefetch_len); 1484 } 1485 1486 ret = pagefault_single_data_segment(dev, NULL, rkey, address, length, 1487 &pfault->bytes_committed, NULL); 1488 if (ret == -EAGAIN) { 1489 /* We're racing with an invalidation, don't prefetch */ 1490 prefetch_activated = 0; 1491 } else if (ret < 0) { 1492 mlx5_ib_page_fault_resume(dev, pfault, 1); 1493 if (ret != -ENOENT) 1494 mlx5_ib_dbg(dev, "PAGE FAULT error %d. QP 0x%llx, type: 0x%x\n", 1495 ret, pfault->token, pfault->type); 1496 return; 1497 } 1498 1499 mlx5_ib_page_fault_resume(dev, pfault, 0); 1500 mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%llx, type: 0x%x, prefetch_activated: %d\n", 1501 pfault->token, pfault->type, 1502 prefetch_activated); 1503 1504 /* At this point, there might be a new pagefault already arriving in 1505 * the eq, switch to the dummy pagefault for the rest of the 1506 * processing. We're still OK with the objects being alive as the 1507 * work-queue is being fenced. */ 1508 1509 if (prefetch_activated) { 1510 u32 bytes_committed = 0; 1511 1512 ret = pagefault_single_data_segment(dev, NULL, rkey, address, 1513 prefetch_len, 1514 &bytes_committed, NULL); 1515 if (ret < 0 && ret != -EAGAIN) { 1516 mlx5_ib_dbg(dev, "Prefetch failed. ret: %d, QP 0x%llx, address: 0x%.16llx, length = 0x%.16x\n", 1517 ret, pfault->token, address, prefetch_len); 1518 } 1519 } 1520 } 1521 1522 #define MLX5_MEMORY_PAGE_FAULT_FLAGS_LAST BIT(7) 1523 static void mlx5_ib_mr_memory_pfault_handler(struct mlx5_ib_dev *dev, 1524 struct mlx5_pagefault *pfault) 1525 { 1526 u64 prefetch_va = 1527 pfault->memory.va - pfault->memory.prefetch_before_byte_count; 1528 size_t prefetch_size = pfault->memory.prefetch_before_byte_count + 1529 pfault->memory.fault_byte_count + 1530 pfault->memory.prefetch_after_byte_count; 1531 struct mlx5_ib_mkey *mmkey; 1532 struct mlx5_ib_mr *mr, *child_mr; 1533 int ret = 0; 1534 1535 mmkey = find_odp_mkey(dev, pfault->memory.mkey); 1536 if (IS_ERR(mmkey)) 1537 goto err; 1538 1539 switch (mmkey->type) { 1540 case MLX5_MKEY_IMPLICIT_CHILD: 1541 child_mr = container_of(mmkey, struct mlx5_ib_mr, mmkey); 1542 mr = child_mr->parent; 1543 break; 1544 case MLX5_MKEY_NULL: 1545 mr = container_of(mmkey, struct mlx5_ib_mr, null_mmkey); 1546 break; 1547 default: 1548 mr = container_of(mmkey, struct mlx5_ib_mr, mmkey); 1549 break; 1550 } 1551 1552 /* If prefetch fails, handle only demanded page fault */ 1553 ret = pagefault_mr(mr, prefetch_va, prefetch_size, NULL, 0, true); 1554 if (ret < 0) { 1555 ret = pagefault_mr(mr, pfault->memory.va, 1556 pfault->memory.fault_byte_count, NULL, 0, 1557 true); 1558 if (ret < 0) 1559 goto err; 1560 } 1561 1562 mlx5_update_odp_stats_with_handled(mr, faults, ret); 1563 mlx5r_deref_odp_mkey(mmkey); 1564 1565 if (pfault->memory.flags & MLX5_MEMORY_PAGE_FAULT_FLAGS_LAST) 1566 mlx5_ib_page_fault_resume(dev, pfault, 0); 1567 1568 mlx5_ib_dbg( 1569 dev, 1570 "PAGE FAULT completed %s. token 0x%llx, mkey: 0x%x, va: 0x%llx, byte_count: 0x%x\n", 1571 pfault->memory.flags & MLX5_MEMORY_PAGE_FAULT_FLAGS_LAST ? 1572 "" : 1573 "without resume cmd", 1574 pfault->token, pfault->memory.mkey, pfault->memory.va, 1575 pfault->memory.fault_byte_count); 1576 1577 return; 1578 1579 err: 1580 if (!IS_ERR(mmkey)) 1581 mlx5r_deref_odp_mkey(mmkey); 1582 mlx5_ib_page_fault_resume(dev, pfault, 1); 1583 mlx5_ib_dbg( 1584 dev, 1585 "PAGE FAULT error. token 0x%llx, mkey: 0x%x, va: 0x%llx, byte_count: 0x%x, err: %d\n", 1586 pfault->token, pfault->memory.mkey, pfault->memory.va, 1587 pfault->memory.fault_byte_count, ret); 1588 } 1589 1590 static void mlx5_ib_pfault(struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault) 1591 { 1592 u8 event_subtype = pfault->event_subtype; 1593 1594 switch (event_subtype) { 1595 case MLX5_PFAULT_SUBTYPE_WQE: 1596 mlx5_ib_mr_wqe_pfault_handler(dev, pfault); 1597 break; 1598 case MLX5_PFAULT_SUBTYPE_RDMA: 1599 mlx5_ib_mr_rdma_pfault_handler(dev, pfault); 1600 break; 1601 case MLX5_PFAULT_SUBTYPE_MEMORY: 1602 mlx5_ib_mr_memory_pfault_handler(dev, pfault); 1603 break; 1604 default: 1605 mlx5_ib_err(dev, "Invalid page fault event subtype: 0x%x\n", 1606 event_subtype); 1607 mlx5_ib_page_fault_resume(dev, pfault, 1); 1608 } 1609 } 1610 1611 static void mlx5_ib_eqe_pf_action(struct work_struct *work) 1612 { 1613 struct mlx5_pagefault *pfault = container_of(work, 1614 struct mlx5_pagefault, 1615 work); 1616 struct mlx5_ib_pf_eq *eq = pfault->eq; 1617 1618 mlx5_ib_pfault(eq->dev, pfault); 1619 mempool_free(pfault, eq->pool); 1620 } 1621 1622 #define MEMORY_SCHEME_PAGE_FAULT_GRANULARITY 4096 1623 static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq) 1624 { 1625 struct mlx5_eqe_page_fault *pf_eqe; 1626 struct mlx5_pagefault *pfault; 1627 struct mlx5_eqe *eqe; 1628 int cc = 0; 1629 1630 while ((eqe = mlx5_eq_get_eqe(eq->core, cc))) { 1631 pfault = mempool_alloc(eq->pool, GFP_ATOMIC); 1632 if (!pfault) { 1633 schedule_work(&eq->work); 1634 break; 1635 } 1636 1637 pf_eqe = &eqe->data.page_fault; 1638 pfault->event_subtype = eqe->sub_type; 1639 1640 switch (eqe->sub_type) { 1641 case MLX5_PFAULT_SUBTYPE_RDMA: 1642 /* RDMA based event */ 1643 pfault->bytes_committed = 1644 be32_to_cpu(pf_eqe->rdma.bytes_committed); 1645 pfault->type = 1646 be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24; 1647 pfault->token = 1648 be32_to_cpu(pf_eqe->rdma.pftype_token) & 1649 MLX5_24BIT_MASK; 1650 pfault->rdma.r_key = 1651 be32_to_cpu(pf_eqe->rdma.r_key); 1652 pfault->rdma.packet_size = 1653 be16_to_cpu(pf_eqe->rdma.packet_length); 1654 pfault->rdma.rdma_op_len = 1655 be32_to_cpu(pf_eqe->rdma.rdma_op_len); 1656 pfault->rdma.rdma_va = 1657 be64_to_cpu(pf_eqe->rdma.rdma_va); 1658 mlx5_ib_dbg( 1659 eq->dev, 1660 "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x, type:0x%x, token: 0x%06llx, r_key: 0x%08x\n", 1661 eqe->sub_type, pfault->bytes_committed, 1662 pfault->type, pfault->token, 1663 pfault->rdma.r_key); 1664 mlx5_ib_dbg(eq->dev, 1665 "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n", 1666 pfault->rdma.rdma_op_len, 1667 pfault->rdma.rdma_va); 1668 break; 1669 1670 case MLX5_PFAULT_SUBTYPE_WQE: 1671 /* WQE based event */ 1672 pfault->bytes_committed = 1673 be32_to_cpu(pf_eqe->wqe.bytes_committed); 1674 pfault->type = 1675 (be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7; 1676 pfault->token = 1677 be32_to_cpu(pf_eqe->wqe.token); 1678 pfault->wqe.wq_num = 1679 be32_to_cpu(pf_eqe->wqe.pftype_wq) & 1680 MLX5_24BIT_MASK; 1681 pfault->wqe.wqe_index = 1682 be16_to_cpu(pf_eqe->wqe.wqe_index); 1683 pfault->wqe.packet_size = 1684 be16_to_cpu(pf_eqe->wqe.packet_length); 1685 mlx5_ib_dbg( 1686 eq->dev, 1687 "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x, type:0x%x, token: 0x%06llx, wq_num: 0x%06x, wqe_index: 0x%04x\n", 1688 eqe->sub_type, pfault->bytes_committed, 1689 pfault->type, pfault->token, pfault->wqe.wq_num, 1690 pfault->wqe.wqe_index); 1691 break; 1692 1693 case MLX5_PFAULT_SUBTYPE_MEMORY: 1694 /* Memory based event */ 1695 pfault->bytes_committed = 0; 1696 pfault->token = 1697 be32_to_cpu(pf_eqe->memory.token31_0) | 1698 ((u64)be16_to_cpu(pf_eqe->memory.token47_32) 1699 << 32); 1700 pfault->memory.va = be64_to_cpu(pf_eqe->memory.va); 1701 pfault->memory.mkey = be32_to_cpu(pf_eqe->memory.mkey); 1702 pfault->memory.fault_byte_count = (be32_to_cpu( 1703 pf_eqe->memory.demand_fault_pages) >> 12) * 1704 MEMORY_SCHEME_PAGE_FAULT_GRANULARITY; 1705 pfault->memory.prefetch_before_byte_count = 1706 be16_to_cpu( 1707 pf_eqe->memory.pre_demand_fault_pages) * 1708 MEMORY_SCHEME_PAGE_FAULT_GRANULARITY; 1709 pfault->memory.prefetch_after_byte_count = 1710 be16_to_cpu( 1711 pf_eqe->memory.post_demand_fault_pages) * 1712 MEMORY_SCHEME_PAGE_FAULT_GRANULARITY; 1713 pfault->memory.flags = pf_eqe->memory.flags; 1714 mlx5_ib_dbg( 1715 eq->dev, 1716 "PAGE_FAULT: subtype: 0x%02x, token: 0x%06llx, mkey: 0x%06x, fault_byte_count: 0x%06x, va: 0x%016llx, flags: 0x%02x\n", 1717 eqe->sub_type, pfault->token, 1718 pfault->memory.mkey, 1719 pfault->memory.fault_byte_count, 1720 pfault->memory.va, pfault->memory.flags); 1721 mlx5_ib_dbg( 1722 eq->dev, 1723 "PAGE_FAULT: prefetch size: before: 0x%06x, after 0x%06x\n", 1724 pfault->memory.prefetch_before_byte_count, 1725 pfault->memory.prefetch_after_byte_count); 1726 break; 1727 1728 default: 1729 mlx5_ib_warn(eq->dev, 1730 "Unsupported page fault event sub-type: 0x%02hhx\n", 1731 eqe->sub_type); 1732 /* Unsupported page faults should still be 1733 * resolved by the page fault handler 1734 */ 1735 } 1736 1737 pfault->eq = eq; 1738 INIT_WORK(&pfault->work, mlx5_ib_eqe_pf_action); 1739 queue_work(eq->wq, &pfault->work); 1740 1741 cc = mlx5_eq_update_cc(eq->core, ++cc); 1742 } 1743 1744 mlx5_eq_update_ci(eq->core, cc, 1); 1745 } 1746 1747 static int mlx5_ib_eq_pf_int(struct notifier_block *nb, unsigned long type, 1748 void *data) 1749 { 1750 struct mlx5_ib_pf_eq *eq = 1751 container_of(nb, struct mlx5_ib_pf_eq, irq_nb); 1752 unsigned long flags; 1753 1754 if (spin_trylock_irqsave(&eq->lock, flags)) { 1755 mlx5_ib_eq_pf_process(eq); 1756 spin_unlock_irqrestore(&eq->lock, flags); 1757 } else { 1758 schedule_work(&eq->work); 1759 } 1760 1761 return IRQ_HANDLED; 1762 } 1763 1764 /* mempool_refill() was proposed but unfortunately wasn't accepted 1765 * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html 1766 * Cheap workaround. 1767 */ 1768 static void mempool_refill(mempool_t *pool) 1769 { 1770 while (pool->curr_nr < pool->min_nr) 1771 mempool_free(mempool_alloc(pool, GFP_KERNEL), pool); 1772 } 1773 1774 static void mlx5_ib_eq_pf_action(struct work_struct *work) 1775 { 1776 struct mlx5_ib_pf_eq *eq = 1777 container_of(work, struct mlx5_ib_pf_eq, work); 1778 1779 mempool_refill(eq->pool); 1780 1781 spin_lock_irq(&eq->lock); 1782 mlx5_ib_eq_pf_process(eq); 1783 spin_unlock_irq(&eq->lock); 1784 } 1785 1786 enum { 1787 MLX5_IB_NUM_PF_EQE = 0x1000, 1788 MLX5_IB_NUM_PF_DRAIN = 64, 1789 }; 1790 1791 int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq) 1792 { 1793 struct mlx5_eq_param param = {}; 1794 int err = 0; 1795 1796 mutex_lock(&dev->odp_eq_mutex); 1797 if (eq->core) 1798 goto unlock; 1799 INIT_WORK(&eq->work, mlx5_ib_eq_pf_action); 1800 spin_lock_init(&eq->lock); 1801 eq->dev = dev; 1802 1803 eq->pool = mempool_create_kmalloc_pool(MLX5_IB_NUM_PF_DRAIN, 1804 sizeof(struct mlx5_pagefault)); 1805 if (!eq->pool) { 1806 err = -ENOMEM; 1807 goto unlock; 1808 } 1809 1810 eq->wq = alloc_workqueue("mlx5_ib_page_fault", 1811 WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 1812 MLX5_NUM_CMD_EQE); 1813 if (!eq->wq) { 1814 err = -ENOMEM; 1815 goto err_mempool; 1816 } 1817 1818 eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int; 1819 param = (struct mlx5_eq_param) { 1820 .nent = MLX5_IB_NUM_PF_EQE, 1821 }; 1822 param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT; 1823 eq->core = mlx5_eq_create_generic(dev->mdev, ¶m); 1824 if (IS_ERR(eq->core)) { 1825 err = PTR_ERR(eq->core); 1826 goto err_wq; 1827 } 1828 err = mlx5_eq_enable(dev->mdev, eq->core, &eq->irq_nb); 1829 if (err) { 1830 mlx5_ib_err(dev, "failed to enable odp EQ %d\n", err); 1831 goto err_eq; 1832 } 1833 1834 mutex_unlock(&dev->odp_eq_mutex); 1835 return 0; 1836 err_eq: 1837 mlx5_eq_destroy_generic(dev->mdev, eq->core); 1838 err_wq: 1839 eq->core = NULL; 1840 destroy_workqueue(eq->wq); 1841 err_mempool: 1842 mempool_destroy(eq->pool); 1843 unlock: 1844 mutex_unlock(&dev->odp_eq_mutex); 1845 return err; 1846 } 1847 1848 static int 1849 mlx5_ib_odp_destroy_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq) 1850 { 1851 int err; 1852 1853 if (!eq->core) 1854 return 0; 1855 mlx5_eq_disable(dev->mdev, eq->core, &eq->irq_nb); 1856 err = mlx5_eq_destroy_generic(dev->mdev, eq->core); 1857 cancel_work_sync(&eq->work); 1858 destroy_workqueue(eq->wq); 1859 mempool_destroy(eq->pool); 1860 1861 return err; 1862 } 1863 1864 int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev) 1865 { 1866 struct mlx5r_cache_rb_key rb_key = { 1867 .access_mode = MLX5_MKC_ACCESS_MODE_KSM, 1868 .ndescs = mlx5_imr_ksm_entries, 1869 }; 1870 struct mlx5_cache_ent *ent; 1871 1872 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT)) 1873 return 0; 1874 1875 ent = mlx5r_cache_create_ent_locked(dev, rb_key, true); 1876 if (IS_ERR(ent)) 1877 return PTR_ERR(ent); 1878 1879 return 0; 1880 } 1881 1882 static const struct ib_device_ops mlx5_ib_dev_odp_ops = { 1883 .advise_mr = mlx5_ib_advise_mr, 1884 }; 1885 1886 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev) 1887 { 1888 internal_fill_odp_caps(dev); 1889 1890 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT)) 1891 return 0; 1892 1893 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops); 1894 1895 mutex_init(&dev->odp_eq_mutex); 1896 return 0; 1897 } 1898 1899 void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev) 1900 { 1901 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT)) 1902 return; 1903 1904 mlx5_ib_odp_destroy_eq(dev, &dev->odp_pf_eq); 1905 } 1906 1907 int mlx5_ib_odp_init(void) 1908 { 1909 mlx5_imr_ksm_entries = BIT_ULL(get_order(TASK_SIZE) - 1910 MLX5_IMR_MTT_BITS); 1911 1912 return 0; 1913 } 1914 1915 struct prefetch_mr_work { 1916 struct work_struct work; 1917 u32 pf_flags; 1918 u32 num_sge; 1919 struct { 1920 u64 io_virt; 1921 struct mlx5_ib_mr *mr; 1922 size_t length; 1923 } frags[]; 1924 }; 1925 1926 static void destroy_prefetch_work(struct prefetch_mr_work *work) 1927 { 1928 u32 i; 1929 1930 for (i = 0; i < work->num_sge; ++i) 1931 mlx5r_deref_odp_mkey(&work->frags[i].mr->mmkey); 1932 1933 kvfree(work); 1934 } 1935 1936 static struct mlx5_ib_mr * 1937 get_prefetchable_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice, 1938 u32 lkey) 1939 { 1940 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1941 struct mlx5_ib_mr *mr = NULL; 1942 struct mlx5_ib_mkey *mmkey; 1943 1944 xa_lock(&dev->odp_mkeys); 1945 mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(lkey)); 1946 if (!mmkey || mmkey->key != lkey) { 1947 mr = ERR_PTR(-ENOENT); 1948 goto end; 1949 } 1950 if (mmkey->type != MLX5_MKEY_MR) { 1951 mr = ERR_PTR(-EINVAL); 1952 goto end; 1953 } 1954 1955 mr = container_of(mmkey, struct mlx5_ib_mr, mmkey); 1956 1957 if (mr->ibmr.pd != pd) { 1958 mr = ERR_PTR(-EPERM); 1959 goto end; 1960 } 1961 1962 /* prefetch with write-access must be supported by the MR */ 1963 if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE && 1964 !mr->umem->writable) { 1965 mr = ERR_PTR(-EPERM); 1966 goto end; 1967 } 1968 1969 refcount_inc(&mmkey->usecount); 1970 end: 1971 xa_unlock(&dev->odp_mkeys); 1972 return mr; 1973 } 1974 1975 static void mlx5_ib_prefetch_mr_work(struct work_struct *w) 1976 { 1977 struct prefetch_mr_work *work = 1978 container_of(w, struct prefetch_mr_work, work); 1979 u32 bytes_mapped = 0; 1980 int ret; 1981 u32 i; 1982 1983 /* We rely on IB/core that work is executed if we have num_sge != 0 only. */ 1984 WARN_ON(!work->num_sge); 1985 for (i = 0; i < work->num_sge; ++i) { 1986 ret = pagefault_mr(work->frags[i].mr, work->frags[i].io_virt, 1987 work->frags[i].length, &bytes_mapped, 1988 work->pf_flags, false); 1989 if (ret <= 0) 1990 continue; 1991 mlx5_update_odp_stats(work->frags[i].mr, prefetch, ret); 1992 } 1993 1994 destroy_prefetch_work(work); 1995 } 1996 1997 static int init_prefetch_work(struct ib_pd *pd, 1998 enum ib_uverbs_advise_mr_advice advice, 1999 u32 pf_flags, struct prefetch_mr_work *work, 2000 struct ib_sge *sg_list, u32 num_sge) 2001 { 2002 u32 i; 2003 2004 INIT_WORK(&work->work, mlx5_ib_prefetch_mr_work); 2005 work->pf_flags = pf_flags; 2006 2007 for (i = 0; i < num_sge; ++i) { 2008 struct mlx5_ib_mr *mr; 2009 2010 mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey); 2011 if (IS_ERR(mr)) { 2012 work->num_sge = i; 2013 return PTR_ERR(mr); 2014 } 2015 work->frags[i].io_virt = sg_list[i].addr; 2016 work->frags[i].length = sg_list[i].length; 2017 work->frags[i].mr = mr; 2018 } 2019 work->num_sge = num_sge; 2020 return 0; 2021 } 2022 2023 static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd, 2024 enum ib_uverbs_advise_mr_advice advice, 2025 u32 pf_flags, struct ib_sge *sg_list, 2026 u32 num_sge) 2027 { 2028 u32 bytes_mapped = 0; 2029 int ret = 0; 2030 u32 i; 2031 2032 for (i = 0; i < num_sge; ++i) { 2033 struct mlx5_ib_mr *mr; 2034 2035 mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey); 2036 if (IS_ERR(mr)) 2037 return PTR_ERR(mr); 2038 ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length, 2039 &bytes_mapped, pf_flags, false); 2040 if (ret < 0) { 2041 mlx5r_deref_odp_mkey(&mr->mmkey); 2042 return ret; 2043 } 2044 mlx5_update_odp_stats(mr, prefetch, ret); 2045 mlx5r_deref_odp_mkey(&mr->mmkey); 2046 } 2047 2048 return 0; 2049 } 2050 2051 int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, 2052 enum ib_uverbs_advise_mr_advice advice, 2053 u32 flags, struct ib_sge *sg_list, u32 num_sge) 2054 { 2055 u32 pf_flags = 0; 2056 struct prefetch_mr_work *work; 2057 int rc; 2058 2059 if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH) 2060 pf_flags |= MLX5_PF_FLAGS_DOWNGRADE; 2061 2062 if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT) 2063 pf_flags |= MLX5_PF_FLAGS_SNAPSHOT; 2064 2065 if (flags & IB_UVERBS_ADVISE_MR_FLAG_FLUSH) 2066 return mlx5_ib_prefetch_sg_list(pd, advice, pf_flags, sg_list, 2067 num_sge); 2068 2069 work = kvzalloc(struct_size(work, frags, num_sge), GFP_KERNEL); 2070 if (!work) 2071 return -ENOMEM; 2072 2073 rc = init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge); 2074 if (rc) { 2075 destroy_prefetch_work(work); 2076 return rc; 2077 } 2078 queue_work(system_unbound_wq, &work->work); 2079 return 0; 2080 } 2081