1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2020, Intel Corporation. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 35 #include <linux/kref.h> 36 #include <linux/random.h> 37 #include <linux/debugfs.h> 38 #include <linux/export.h> 39 #include <linux/delay.h> 40 #include <linux/dma-buf.h> 41 #include <linux/dma-resv.h> 42 #include <rdma/ib_umem_odp.h> 43 #include "dm.h" 44 #include "mlx5_ib.h" 45 #include "umr.h" 46 47 enum { 48 MAX_PENDING_REG_MR = 8, 49 }; 50 51 #define MLX5_UMR_ALIGN 2048 52 53 static void 54 create_mkey_callback(int status, struct mlx5_async_work *context); 55 static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem, 56 u64 iova, int access_flags, 57 unsigned int page_size, bool populate); 58 59 static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr, 60 struct ib_pd *pd) 61 { 62 struct mlx5_ib_dev *dev = to_mdev(pd->device); 63 64 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC)); 65 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE)); 66 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ)); 67 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE)); 68 MLX5_SET(mkc, mkc, lr, 1); 69 70 if (acc & IB_ACCESS_RELAXED_ORDERING) { 71 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write)) 72 MLX5_SET(mkc, mkc, relaxed_ordering_write, 1); 73 74 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) || 75 (MLX5_CAP_GEN(dev->mdev, 76 relaxed_ordering_read_pci_enabled) && 77 pcie_relaxed_ordering_enabled(dev->mdev->pdev))) 78 MLX5_SET(mkc, mkc, relaxed_ordering_read, 1); 79 } 80 81 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); 82 MLX5_SET(mkc, mkc, qpn, 0xffffff); 83 MLX5_SET64(mkc, mkc, start_addr, start_addr); 84 } 85 86 static void assign_mkey_variant(struct mlx5_ib_dev *dev, u32 *mkey, u32 *in) 87 { 88 u8 key = atomic_inc_return(&dev->mkey_var); 89 void *mkc; 90 91 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 92 MLX5_SET(mkc, mkc, mkey_7_0, key); 93 *mkey = key; 94 } 95 96 static int mlx5_ib_create_mkey(struct mlx5_ib_dev *dev, 97 struct mlx5_ib_mkey *mkey, u32 *in, int inlen) 98 { 99 int ret; 100 101 assign_mkey_variant(dev, &mkey->key, in); 102 ret = mlx5_core_create_mkey(dev->mdev, &mkey->key, in, inlen); 103 if (!ret) 104 init_waitqueue_head(&mkey->wait); 105 106 return ret; 107 } 108 109 static int mlx5_ib_create_mkey_cb(struct mlx5r_async_create_mkey *async_create) 110 { 111 struct mlx5_ib_dev *dev = async_create->ent->dev; 112 size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 113 size_t outlen = MLX5_ST_SZ_BYTES(create_mkey_out); 114 115 MLX5_SET(create_mkey_in, async_create->in, opcode, 116 MLX5_CMD_OP_CREATE_MKEY); 117 assign_mkey_variant(dev, &async_create->mkey, async_create->in); 118 return mlx5_cmd_exec_cb(&dev->async_ctx, async_create->in, inlen, 119 async_create->out, outlen, create_mkey_callback, 120 &async_create->cb_work); 121 } 122 123 static int mkey_cache_max_order(struct mlx5_ib_dev *dev); 124 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent); 125 126 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) 127 { 128 WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key))); 129 130 return mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key); 131 } 132 133 static void create_mkey_warn(struct mlx5_ib_dev *dev, int status, void *out) 134 { 135 if (status == -ENXIO) /* core driver is not available */ 136 return; 137 138 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status); 139 if (status != -EREMOTEIO) /* driver specific failure */ 140 return; 141 142 /* Failed in FW, print cmd out failure details */ 143 mlx5_cmd_out_err(dev->mdev, MLX5_CMD_OP_CREATE_MKEY, 0, out); 144 } 145 146 static int push_mkey_locked(struct mlx5_cache_ent *ent, u32 mkey) 147 { 148 unsigned long tmp = ent->mkeys_queue.ci % NUM_MKEYS_PER_PAGE; 149 struct mlx5_mkeys_page *page; 150 151 lockdep_assert_held(&ent->mkeys_queue.lock); 152 if (ent->mkeys_queue.ci >= 153 ent->mkeys_queue.num_pages * NUM_MKEYS_PER_PAGE) { 154 page = kzalloc(sizeof(*page), GFP_ATOMIC); 155 if (!page) 156 return -ENOMEM; 157 ent->mkeys_queue.num_pages++; 158 list_add_tail(&page->list, &ent->mkeys_queue.pages_list); 159 } else { 160 page = list_last_entry(&ent->mkeys_queue.pages_list, 161 struct mlx5_mkeys_page, list); 162 } 163 164 page->mkeys[tmp] = mkey; 165 ent->mkeys_queue.ci++; 166 return 0; 167 } 168 169 static int pop_mkey_locked(struct mlx5_cache_ent *ent) 170 { 171 unsigned long tmp = (ent->mkeys_queue.ci - 1) % NUM_MKEYS_PER_PAGE; 172 struct mlx5_mkeys_page *last_page; 173 u32 mkey; 174 175 lockdep_assert_held(&ent->mkeys_queue.lock); 176 last_page = list_last_entry(&ent->mkeys_queue.pages_list, 177 struct mlx5_mkeys_page, list); 178 mkey = last_page->mkeys[tmp]; 179 last_page->mkeys[tmp] = 0; 180 ent->mkeys_queue.ci--; 181 if (ent->mkeys_queue.num_pages > 1 && !tmp) { 182 list_del(&last_page->list); 183 ent->mkeys_queue.num_pages--; 184 kfree(last_page); 185 } 186 return mkey; 187 } 188 189 static void create_mkey_callback(int status, struct mlx5_async_work *context) 190 { 191 struct mlx5r_async_create_mkey *mkey_out = 192 container_of(context, struct mlx5r_async_create_mkey, cb_work); 193 struct mlx5_cache_ent *ent = mkey_out->ent; 194 struct mlx5_ib_dev *dev = ent->dev; 195 unsigned long flags; 196 197 if (status) { 198 create_mkey_warn(dev, status, mkey_out->out); 199 kfree(mkey_out); 200 spin_lock_irqsave(&ent->mkeys_queue.lock, flags); 201 ent->pending--; 202 WRITE_ONCE(dev->fill_delay, 1); 203 spin_unlock_irqrestore(&ent->mkeys_queue.lock, flags); 204 mod_timer(&dev->delay_timer, jiffies + HZ); 205 return; 206 } 207 208 mkey_out->mkey |= mlx5_idx_to_mkey( 209 MLX5_GET(create_mkey_out, mkey_out->out, mkey_index)); 210 WRITE_ONCE(dev->cache.last_add, jiffies); 211 212 spin_lock_irqsave(&ent->mkeys_queue.lock, flags); 213 push_mkey_locked(ent, mkey_out->mkey); 214 /* If we are doing fill_to_high_water then keep going. */ 215 queue_adjust_cache_locked(ent); 216 ent->pending--; 217 spin_unlock_irqrestore(&ent->mkeys_queue.lock, flags); 218 kfree(mkey_out); 219 } 220 221 static int get_mkc_octo_size(unsigned int access_mode, unsigned int ndescs) 222 { 223 int ret = 0; 224 225 switch (access_mode) { 226 case MLX5_MKC_ACCESS_MODE_MTT: 227 ret = DIV_ROUND_UP(ndescs, MLX5_IB_UMR_OCTOWORD / 228 sizeof(struct mlx5_mtt)); 229 break; 230 case MLX5_MKC_ACCESS_MODE_KSM: 231 ret = DIV_ROUND_UP(ndescs, MLX5_IB_UMR_OCTOWORD / 232 sizeof(struct mlx5_klm)); 233 break; 234 default: 235 WARN_ON(1); 236 } 237 return ret; 238 } 239 240 static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc) 241 { 242 set_mkc_access_pd_addr_fields(mkc, ent->rb_key.access_flags, 0, 243 ent->dev->umrc.pd); 244 MLX5_SET(mkc, mkc, free, 1); 245 MLX5_SET(mkc, mkc, umr_en, 1); 246 MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3); 247 MLX5_SET(mkc, mkc, access_mode_4_2, 248 (ent->rb_key.access_mode >> 2) & 0x7); 249 MLX5_SET(mkc, mkc, ma_translation_mode, !!ent->rb_key.ats); 250 251 MLX5_SET(mkc, mkc, translations_octword_size, 252 get_mkc_octo_size(ent->rb_key.access_mode, 253 ent->rb_key.ndescs)); 254 MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT); 255 } 256 257 /* Asynchronously schedule new MRs to be populated in the cache. */ 258 static int add_keys(struct mlx5_cache_ent *ent, unsigned int num) 259 { 260 struct mlx5r_async_create_mkey *async_create; 261 void *mkc; 262 int err = 0; 263 int i; 264 265 for (i = 0; i < num; i++) { 266 async_create = kzalloc(sizeof(struct mlx5r_async_create_mkey), 267 GFP_KERNEL); 268 if (!async_create) 269 return -ENOMEM; 270 mkc = MLX5_ADDR_OF(create_mkey_in, async_create->in, 271 memory_key_mkey_entry); 272 set_cache_mkc(ent, mkc); 273 async_create->ent = ent; 274 275 spin_lock_irq(&ent->mkeys_queue.lock); 276 if (ent->pending >= MAX_PENDING_REG_MR) { 277 err = -EAGAIN; 278 goto free_async_create; 279 } 280 ent->pending++; 281 spin_unlock_irq(&ent->mkeys_queue.lock); 282 283 err = mlx5_ib_create_mkey_cb(async_create); 284 if (err) { 285 mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err); 286 goto err_create_mkey; 287 } 288 } 289 290 return 0; 291 292 err_create_mkey: 293 spin_lock_irq(&ent->mkeys_queue.lock); 294 ent->pending--; 295 free_async_create: 296 spin_unlock_irq(&ent->mkeys_queue.lock); 297 kfree(async_create); 298 return err; 299 } 300 301 /* Synchronously create a MR in the cache */ 302 static int create_cache_mkey(struct mlx5_cache_ent *ent, u32 *mkey) 303 { 304 size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 305 void *mkc; 306 u32 *in; 307 int err; 308 309 in = kzalloc(inlen, GFP_KERNEL); 310 if (!in) 311 return -ENOMEM; 312 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 313 set_cache_mkc(ent, mkc); 314 315 err = mlx5_core_create_mkey(ent->dev->mdev, mkey, in, inlen); 316 if (err) 317 goto free_in; 318 319 WRITE_ONCE(ent->dev->cache.last_add, jiffies); 320 free_in: 321 kfree(in); 322 return err; 323 } 324 325 static void remove_cache_mr_locked(struct mlx5_cache_ent *ent) 326 { 327 u32 mkey; 328 329 lockdep_assert_held(&ent->mkeys_queue.lock); 330 if (!ent->mkeys_queue.ci) 331 return; 332 mkey = pop_mkey_locked(ent); 333 spin_unlock_irq(&ent->mkeys_queue.lock); 334 mlx5_core_destroy_mkey(ent->dev->mdev, mkey); 335 spin_lock_irq(&ent->mkeys_queue.lock); 336 } 337 338 static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target, 339 bool limit_fill) 340 __acquires(&ent->mkeys_queue.lock) __releases(&ent->mkeys_queue.lock) 341 { 342 int err; 343 344 lockdep_assert_held(&ent->mkeys_queue.lock); 345 346 while (true) { 347 if (limit_fill) 348 target = ent->limit * 2; 349 if (target == ent->pending + ent->mkeys_queue.ci) 350 return 0; 351 if (target > ent->pending + ent->mkeys_queue.ci) { 352 u32 todo = target - (ent->pending + ent->mkeys_queue.ci); 353 354 spin_unlock_irq(&ent->mkeys_queue.lock); 355 err = add_keys(ent, todo); 356 if (err == -EAGAIN) 357 usleep_range(3000, 5000); 358 spin_lock_irq(&ent->mkeys_queue.lock); 359 if (err) { 360 if (err != -EAGAIN) 361 return err; 362 } else 363 return 0; 364 } else { 365 remove_cache_mr_locked(ent); 366 } 367 } 368 } 369 370 static ssize_t size_write(struct file *filp, const char __user *buf, 371 size_t count, loff_t *pos) 372 { 373 struct mlx5_cache_ent *ent = filp->private_data; 374 u32 target; 375 int err; 376 377 err = kstrtou32_from_user(buf, count, 0, &target); 378 if (err) 379 return err; 380 381 /* 382 * Target is the new value of total_mrs the user requests, however we 383 * cannot free MRs that are in use. Compute the target value for stored 384 * mkeys. 385 */ 386 spin_lock_irq(&ent->mkeys_queue.lock); 387 if (target < ent->in_use) { 388 err = -EINVAL; 389 goto err_unlock; 390 } 391 target = target - ent->in_use; 392 if (target < ent->limit || target > ent->limit*2) { 393 err = -EINVAL; 394 goto err_unlock; 395 } 396 err = resize_available_mrs(ent, target, false); 397 if (err) 398 goto err_unlock; 399 spin_unlock_irq(&ent->mkeys_queue.lock); 400 401 return count; 402 403 err_unlock: 404 spin_unlock_irq(&ent->mkeys_queue.lock); 405 return err; 406 } 407 408 static ssize_t size_read(struct file *filp, char __user *buf, size_t count, 409 loff_t *pos) 410 { 411 struct mlx5_cache_ent *ent = filp->private_data; 412 char lbuf[20]; 413 int err; 414 415 err = snprintf(lbuf, sizeof(lbuf), "%ld\n", 416 ent->mkeys_queue.ci + ent->in_use); 417 if (err < 0) 418 return err; 419 420 return simple_read_from_buffer(buf, count, pos, lbuf, err); 421 } 422 423 static const struct file_operations size_fops = { 424 .owner = THIS_MODULE, 425 .open = simple_open, 426 .write = size_write, 427 .read = size_read, 428 }; 429 430 static ssize_t limit_write(struct file *filp, const char __user *buf, 431 size_t count, loff_t *pos) 432 { 433 struct mlx5_cache_ent *ent = filp->private_data; 434 u32 var; 435 int err; 436 437 err = kstrtou32_from_user(buf, count, 0, &var); 438 if (err) 439 return err; 440 441 /* 442 * Upon set we immediately fill the cache to high water mark implied by 443 * the limit. 444 */ 445 spin_lock_irq(&ent->mkeys_queue.lock); 446 ent->limit = var; 447 err = resize_available_mrs(ent, 0, true); 448 spin_unlock_irq(&ent->mkeys_queue.lock); 449 if (err) 450 return err; 451 return count; 452 } 453 454 static ssize_t limit_read(struct file *filp, char __user *buf, size_t count, 455 loff_t *pos) 456 { 457 struct mlx5_cache_ent *ent = filp->private_data; 458 char lbuf[20]; 459 int err; 460 461 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit); 462 if (err < 0) 463 return err; 464 465 return simple_read_from_buffer(buf, count, pos, lbuf, err); 466 } 467 468 static const struct file_operations limit_fops = { 469 .owner = THIS_MODULE, 470 .open = simple_open, 471 .write = limit_write, 472 .read = limit_read, 473 }; 474 475 static bool someone_adding(struct mlx5_mkey_cache *cache) 476 { 477 struct mlx5_cache_ent *ent; 478 struct rb_node *node; 479 bool ret; 480 481 mutex_lock(&cache->rb_lock); 482 for (node = rb_first(&cache->rb_root); node; node = rb_next(node)) { 483 ent = rb_entry(node, struct mlx5_cache_ent, node); 484 spin_lock_irq(&ent->mkeys_queue.lock); 485 ret = ent->mkeys_queue.ci < ent->limit; 486 spin_unlock_irq(&ent->mkeys_queue.lock); 487 if (ret) { 488 mutex_unlock(&cache->rb_lock); 489 return true; 490 } 491 } 492 mutex_unlock(&cache->rb_lock); 493 return false; 494 } 495 496 /* 497 * Check if the bucket is outside the high/low water mark and schedule an async 498 * update. The cache refill has hysteresis, once the low water mark is hit it is 499 * refilled up to the high mark. 500 */ 501 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent) 502 { 503 lockdep_assert_held(&ent->mkeys_queue.lock); 504 505 if (ent->disabled || READ_ONCE(ent->dev->fill_delay) || ent->is_tmp) 506 return; 507 if (ent->mkeys_queue.ci < ent->limit) { 508 ent->fill_to_high_water = true; 509 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0); 510 } else if (ent->fill_to_high_water && 511 ent->mkeys_queue.ci + ent->pending < 2 * ent->limit) { 512 /* 513 * Once we start populating due to hitting a low water mark 514 * continue until we pass the high water mark. 515 */ 516 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0); 517 } else if (ent->mkeys_queue.ci == 2 * ent->limit) { 518 ent->fill_to_high_water = false; 519 } else if (ent->mkeys_queue.ci > 2 * ent->limit) { 520 /* Queue deletion of excess entries */ 521 ent->fill_to_high_water = false; 522 if (ent->pending) 523 queue_delayed_work(ent->dev->cache.wq, &ent->dwork, 524 msecs_to_jiffies(1000)); 525 else 526 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0); 527 } 528 } 529 530 static void __cache_work_func(struct mlx5_cache_ent *ent) 531 { 532 struct mlx5_ib_dev *dev = ent->dev; 533 struct mlx5_mkey_cache *cache = &dev->cache; 534 int err; 535 536 spin_lock_irq(&ent->mkeys_queue.lock); 537 if (ent->disabled) 538 goto out; 539 540 if (ent->fill_to_high_water && 541 ent->mkeys_queue.ci + ent->pending < 2 * ent->limit && 542 !READ_ONCE(dev->fill_delay)) { 543 spin_unlock_irq(&ent->mkeys_queue.lock); 544 err = add_keys(ent, 1); 545 spin_lock_irq(&ent->mkeys_queue.lock); 546 if (ent->disabled) 547 goto out; 548 if (err) { 549 /* 550 * EAGAIN only happens if there are pending MRs, so we 551 * will be rescheduled when storing them. The only 552 * failure path here is ENOMEM. 553 */ 554 if (err != -EAGAIN) { 555 mlx5_ib_warn( 556 dev, 557 "add keys command failed, err %d\n", 558 err); 559 queue_delayed_work(cache->wq, &ent->dwork, 560 msecs_to_jiffies(1000)); 561 } 562 } 563 } else if (ent->mkeys_queue.ci > 2 * ent->limit) { 564 bool need_delay; 565 566 /* 567 * The remove_cache_mr() logic is performed as garbage 568 * collection task. Such task is intended to be run when no 569 * other active processes are running. 570 * 571 * The need_resched() will return TRUE if there are user tasks 572 * to be activated in near future. 573 * 574 * In such case, we don't execute remove_cache_mr() and postpone 575 * the garbage collection work to try to run in next cycle, in 576 * order to free CPU resources to other tasks. 577 */ 578 spin_unlock_irq(&ent->mkeys_queue.lock); 579 need_delay = need_resched() || someone_adding(cache) || 580 !time_after(jiffies, 581 READ_ONCE(cache->last_add) + 300 * HZ); 582 spin_lock_irq(&ent->mkeys_queue.lock); 583 if (ent->disabled) 584 goto out; 585 if (need_delay) { 586 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); 587 goto out; 588 } 589 remove_cache_mr_locked(ent); 590 queue_adjust_cache_locked(ent); 591 } 592 out: 593 spin_unlock_irq(&ent->mkeys_queue.lock); 594 } 595 596 static void delayed_cache_work_func(struct work_struct *work) 597 { 598 struct mlx5_cache_ent *ent; 599 600 ent = container_of(work, struct mlx5_cache_ent, dwork.work); 601 __cache_work_func(ent); 602 } 603 604 static int cache_ent_key_cmp(struct mlx5r_cache_rb_key key1, 605 struct mlx5r_cache_rb_key key2) 606 { 607 int res; 608 609 res = key1.ats - key2.ats; 610 if (res) 611 return res; 612 613 res = key1.access_mode - key2.access_mode; 614 if (res) 615 return res; 616 617 res = key1.access_flags - key2.access_flags; 618 if (res) 619 return res; 620 621 /* 622 * keep ndescs the last in the compare table since the find function 623 * searches for an exact match on all properties and only closest 624 * match in size. 625 */ 626 return key1.ndescs - key2.ndescs; 627 } 628 629 static int mlx5_cache_ent_insert(struct mlx5_mkey_cache *cache, 630 struct mlx5_cache_ent *ent) 631 { 632 struct rb_node **new = &cache->rb_root.rb_node, *parent = NULL; 633 struct mlx5_cache_ent *cur; 634 int cmp; 635 636 /* Figure out where to put new node */ 637 while (*new) { 638 cur = rb_entry(*new, struct mlx5_cache_ent, node); 639 parent = *new; 640 cmp = cache_ent_key_cmp(cur->rb_key, ent->rb_key); 641 if (cmp > 0) 642 new = &((*new)->rb_left); 643 if (cmp < 0) 644 new = &((*new)->rb_right); 645 if (cmp == 0) 646 return -EEXIST; 647 } 648 649 /* Add new node and rebalance tree. */ 650 rb_link_node(&ent->node, parent, new); 651 rb_insert_color(&ent->node, &cache->rb_root); 652 653 return 0; 654 } 655 656 static struct mlx5_cache_ent * 657 mkey_cache_ent_from_rb_key(struct mlx5_ib_dev *dev, 658 struct mlx5r_cache_rb_key rb_key) 659 { 660 struct rb_node *node = dev->cache.rb_root.rb_node; 661 struct mlx5_cache_ent *cur, *smallest = NULL; 662 int cmp; 663 664 /* 665 * Find the smallest ent with order >= requested_order. 666 */ 667 while (node) { 668 cur = rb_entry(node, struct mlx5_cache_ent, node); 669 cmp = cache_ent_key_cmp(cur->rb_key, rb_key); 670 if (cmp > 0) { 671 smallest = cur; 672 node = node->rb_left; 673 } 674 if (cmp < 0) 675 node = node->rb_right; 676 if (cmp == 0) 677 return cur; 678 } 679 680 return (smallest && 681 smallest->rb_key.access_mode == rb_key.access_mode && 682 smallest->rb_key.access_flags == rb_key.access_flags && 683 smallest->rb_key.ats == rb_key.ats) ? 684 smallest : 685 NULL; 686 } 687 688 static struct mlx5_ib_mr *_mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, 689 struct mlx5_cache_ent *ent, 690 int access_flags) 691 { 692 struct mlx5_ib_mr *mr; 693 int err; 694 695 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 696 if (!mr) 697 return ERR_PTR(-ENOMEM); 698 699 spin_lock_irq(&ent->mkeys_queue.lock); 700 ent->in_use++; 701 702 if (!ent->mkeys_queue.ci) { 703 queue_adjust_cache_locked(ent); 704 ent->miss++; 705 spin_unlock_irq(&ent->mkeys_queue.lock); 706 err = create_cache_mkey(ent, &mr->mmkey.key); 707 if (err) { 708 spin_lock_irq(&ent->mkeys_queue.lock); 709 ent->in_use--; 710 spin_unlock_irq(&ent->mkeys_queue.lock); 711 kfree(mr); 712 return ERR_PTR(err); 713 } 714 } else { 715 mr->mmkey.key = pop_mkey_locked(ent); 716 queue_adjust_cache_locked(ent); 717 spin_unlock_irq(&ent->mkeys_queue.lock); 718 } 719 mr->mmkey.cache_ent = ent; 720 mr->mmkey.type = MLX5_MKEY_MR; 721 mr->mmkey.rb_key = ent->rb_key; 722 mr->mmkey.cacheable = true; 723 init_waitqueue_head(&mr->mmkey.wait); 724 return mr; 725 } 726 727 static int get_unchangeable_access_flags(struct mlx5_ib_dev *dev, 728 int access_flags) 729 { 730 int ret = 0; 731 732 if ((access_flags & IB_ACCESS_REMOTE_ATOMIC) && 733 MLX5_CAP_GEN(dev->mdev, atomic) && 734 MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)) 735 ret |= IB_ACCESS_REMOTE_ATOMIC; 736 737 if ((access_flags & IB_ACCESS_RELAXED_ORDERING) && 738 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) && 739 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr)) 740 ret |= IB_ACCESS_RELAXED_ORDERING; 741 742 if ((access_flags & IB_ACCESS_RELAXED_ORDERING) && 743 (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) || 744 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_pci_enabled)) && 745 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr)) 746 ret |= IB_ACCESS_RELAXED_ORDERING; 747 748 return ret; 749 } 750 751 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, 752 int access_flags, int access_mode, 753 int ndescs) 754 { 755 struct mlx5r_cache_rb_key rb_key = { 756 .ndescs = ndescs, 757 .access_mode = access_mode, 758 .access_flags = get_unchangeable_access_flags(dev, access_flags) 759 }; 760 struct mlx5_cache_ent *ent = mkey_cache_ent_from_rb_key(dev, rb_key); 761 762 if (!ent) 763 return ERR_PTR(-EOPNOTSUPP); 764 765 return _mlx5_mr_cache_alloc(dev, ent, access_flags); 766 } 767 768 static void clean_keys(struct mlx5_ib_dev *dev, struct mlx5_cache_ent *ent) 769 { 770 u32 mkey; 771 772 cancel_delayed_work(&ent->dwork); 773 spin_lock_irq(&ent->mkeys_queue.lock); 774 while (ent->mkeys_queue.ci) { 775 mkey = pop_mkey_locked(ent); 776 spin_unlock_irq(&ent->mkeys_queue.lock); 777 mlx5_core_destroy_mkey(dev->mdev, mkey); 778 spin_lock_irq(&ent->mkeys_queue.lock); 779 } 780 spin_unlock_irq(&ent->mkeys_queue.lock); 781 } 782 783 static void mlx5_mkey_cache_debugfs_cleanup(struct mlx5_ib_dev *dev) 784 { 785 if (!mlx5_debugfs_root || dev->is_rep) 786 return; 787 788 debugfs_remove_recursive(dev->cache.fs_root); 789 dev->cache.fs_root = NULL; 790 } 791 792 static void mlx5_mkey_cache_debugfs_add_ent(struct mlx5_ib_dev *dev, 793 struct mlx5_cache_ent *ent) 794 { 795 int order = order_base_2(ent->rb_key.ndescs); 796 struct dentry *dir; 797 798 if (!mlx5_debugfs_root || dev->is_rep) 799 return; 800 801 if (ent->rb_key.access_mode == MLX5_MKC_ACCESS_MODE_KSM) 802 order = MLX5_IMR_KSM_CACHE_ENTRY + 2; 803 804 sprintf(ent->name, "%d", order); 805 dir = debugfs_create_dir(ent->name, dev->cache.fs_root); 806 debugfs_create_file("size", 0600, dir, ent, &size_fops); 807 debugfs_create_file("limit", 0600, dir, ent, &limit_fops); 808 debugfs_create_ulong("cur", 0400, dir, &ent->mkeys_queue.ci); 809 debugfs_create_u32("miss", 0600, dir, &ent->miss); 810 } 811 812 static void mlx5_mkey_cache_debugfs_init(struct mlx5_ib_dev *dev) 813 { 814 struct dentry *dbg_root = mlx5_debugfs_get_dev_root(dev->mdev); 815 struct mlx5_mkey_cache *cache = &dev->cache; 816 817 if (!mlx5_debugfs_root || dev->is_rep) 818 return; 819 820 cache->fs_root = debugfs_create_dir("mr_cache", dbg_root); 821 } 822 823 static void delay_time_func(struct timer_list *t) 824 { 825 struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer); 826 827 WRITE_ONCE(dev->fill_delay, 0); 828 } 829 830 static int mlx5r_mkeys_init(struct mlx5_cache_ent *ent) 831 { 832 struct mlx5_mkeys_page *page; 833 834 page = kzalloc(sizeof(*page), GFP_KERNEL); 835 if (!page) 836 return -ENOMEM; 837 INIT_LIST_HEAD(&ent->mkeys_queue.pages_list); 838 spin_lock_init(&ent->mkeys_queue.lock); 839 list_add_tail(&page->list, &ent->mkeys_queue.pages_list); 840 ent->mkeys_queue.num_pages++; 841 return 0; 842 } 843 844 static void mlx5r_mkeys_uninit(struct mlx5_cache_ent *ent) 845 { 846 struct mlx5_mkeys_page *page; 847 848 WARN_ON(ent->mkeys_queue.ci || ent->mkeys_queue.num_pages > 1); 849 page = list_last_entry(&ent->mkeys_queue.pages_list, 850 struct mlx5_mkeys_page, list); 851 list_del(&page->list); 852 kfree(page); 853 } 854 855 struct mlx5_cache_ent * 856 mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev, 857 struct mlx5r_cache_rb_key rb_key, 858 bool persistent_entry) 859 { 860 struct mlx5_cache_ent *ent; 861 int order; 862 int ret; 863 864 ent = kzalloc(sizeof(*ent), GFP_KERNEL); 865 if (!ent) 866 return ERR_PTR(-ENOMEM); 867 868 ret = mlx5r_mkeys_init(ent); 869 if (ret) 870 goto mkeys_err; 871 ent->rb_key = rb_key; 872 ent->dev = dev; 873 ent->is_tmp = !persistent_entry; 874 875 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); 876 877 ret = mlx5_cache_ent_insert(&dev->cache, ent); 878 if (ret) 879 goto ent_insert_err; 880 881 if (persistent_entry) { 882 if (rb_key.access_mode == MLX5_MKC_ACCESS_MODE_KSM) 883 order = MLX5_IMR_KSM_CACHE_ENTRY; 884 else 885 order = order_base_2(rb_key.ndescs) - 2; 886 887 if ((dev->mdev->profile.mask & MLX5_PROF_MASK_MR_CACHE) && 888 !dev->is_rep && mlx5_core_is_pf(dev->mdev) && 889 mlx5r_umr_can_load_pas(dev, 0)) 890 ent->limit = dev->mdev->profile.mr_cache[order].limit; 891 else 892 ent->limit = 0; 893 894 mlx5_mkey_cache_debugfs_add_ent(dev, ent); 895 } else { 896 mod_delayed_work(ent->dev->cache.wq, 897 &ent->dev->cache.remove_ent_dwork, 898 msecs_to_jiffies(30 * 1000)); 899 } 900 901 return ent; 902 ent_insert_err: 903 mlx5r_mkeys_uninit(ent); 904 mkeys_err: 905 kfree(ent); 906 return ERR_PTR(ret); 907 } 908 909 static void remove_ent_work_func(struct work_struct *work) 910 { 911 struct mlx5_mkey_cache *cache; 912 struct mlx5_cache_ent *ent; 913 struct rb_node *cur; 914 915 cache = container_of(work, struct mlx5_mkey_cache, 916 remove_ent_dwork.work); 917 mutex_lock(&cache->rb_lock); 918 cur = rb_last(&cache->rb_root); 919 while (cur) { 920 ent = rb_entry(cur, struct mlx5_cache_ent, node); 921 cur = rb_prev(cur); 922 mutex_unlock(&cache->rb_lock); 923 924 spin_lock_irq(&ent->mkeys_queue.lock); 925 if (!ent->is_tmp) { 926 spin_unlock_irq(&ent->mkeys_queue.lock); 927 mutex_lock(&cache->rb_lock); 928 continue; 929 } 930 spin_unlock_irq(&ent->mkeys_queue.lock); 931 932 clean_keys(ent->dev, ent); 933 mutex_lock(&cache->rb_lock); 934 } 935 mutex_unlock(&cache->rb_lock); 936 } 937 938 int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev) 939 { 940 struct mlx5_mkey_cache *cache = &dev->cache; 941 struct rb_root *root = &dev->cache.rb_root; 942 struct mlx5r_cache_rb_key rb_key = { 943 .access_mode = MLX5_MKC_ACCESS_MODE_MTT, 944 }; 945 struct mlx5_cache_ent *ent; 946 struct rb_node *node; 947 int ret; 948 int i; 949 950 mutex_init(&dev->slow_path_mutex); 951 mutex_init(&dev->cache.rb_lock); 952 dev->cache.rb_root = RB_ROOT; 953 INIT_DELAYED_WORK(&dev->cache.remove_ent_dwork, remove_ent_work_func); 954 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM); 955 if (!cache->wq) { 956 mlx5_ib_warn(dev, "failed to create work queue\n"); 957 return -ENOMEM; 958 } 959 960 mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx); 961 timer_setup(&dev->delay_timer, delay_time_func, 0); 962 mlx5_mkey_cache_debugfs_init(dev); 963 mutex_lock(&cache->rb_lock); 964 for (i = 0; i <= mkey_cache_max_order(dev); i++) { 965 rb_key.ndescs = 1 << (i + 2); 966 ent = mlx5r_cache_create_ent_locked(dev, rb_key, true); 967 if (IS_ERR(ent)) { 968 ret = PTR_ERR(ent); 969 goto err; 970 } 971 } 972 973 ret = mlx5_odp_init_mkey_cache(dev); 974 if (ret) 975 goto err; 976 977 mutex_unlock(&cache->rb_lock); 978 for (node = rb_first(root); node; node = rb_next(node)) { 979 ent = rb_entry(node, struct mlx5_cache_ent, node); 980 spin_lock_irq(&ent->mkeys_queue.lock); 981 queue_adjust_cache_locked(ent); 982 spin_unlock_irq(&ent->mkeys_queue.lock); 983 } 984 985 return 0; 986 987 err: 988 mutex_unlock(&cache->rb_lock); 989 mlx5_mkey_cache_debugfs_cleanup(dev); 990 mlx5_ib_warn(dev, "failed to create mkey cache entry\n"); 991 return ret; 992 } 993 994 void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev) 995 { 996 struct rb_root *root = &dev->cache.rb_root; 997 struct mlx5_cache_ent *ent; 998 struct rb_node *node; 999 1000 if (!dev->cache.wq) 1001 return; 1002 1003 mutex_lock(&dev->cache.rb_lock); 1004 cancel_delayed_work(&dev->cache.remove_ent_dwork); 1005 for (node = rb_first(root); node; node = rb_next(node)) { 1006 ent = rb_entry(node, struct mlx5_cache_ent, node); 1007 spin_lock_irq(&ent->mkeys_queue.lock); 1008 ent->disabled = true; 1009 spin_unlock_irq(&ent->mkeys_queue.lock); 1010 cancel_delayed_work(&ent->dwork); 1011 } 1012 mutex_unlock(&dev->cache.rb_lock); 1013 1014 /* 1015 * After all entries are disabled and will not reschedule on WQ, 1016 * flush it and all async commands. 1017 */ 1018 flush_workqueue(dev->cache.wq); 1019 1020 mlx5_mkey_cache_debugfs_cleanup(dev); 1021 mlx5_cmd_cleanup_async_ctx(&dev->async_ctx); 1022 1023 /* At this point all entries are disabled and have no concurrent work. */ 1024 mutex_lock(&dev->cache.rb_lock); 1025 node = rb_first(root); 1026 while (node) { 1027 ent = rb_entry(node, struct mlx5_cache_ent, node); 1028 node = rb_next(node); 1029 clean_keys(dev, ent); 1030 rb_erase(&ent->node, root); 1031 mlx5r_mkeys_uninit(ent); 1032 kfree(ent); 1033 } 1034 mutex_unlock(&dev->cache.rb_lock); 1035 1036 destroy_workqueue(dev->cache.wq); 1037 del_timer_sync(&dev->delay_timer); 1038 } 1039 1040 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc) 1041 { 1042 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1043 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 1044 struct mlx5_ib_mr *mr; 1045 void *mkc; 1046 u32 *in; 1047 int err; 1048 1049 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 1050 if (!mr) 1051 return ERR_PTR(-ENOMEM); 1052 1053 in = kzalloc(inlen, GFP_KERNEL); 1054 if (!in) { 1055 err = -ENOMEM; 1056 goto err_free; 1057 } 1058 1059 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 1060 1061 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA); 1062 MLX5_SET(mkc, mkc, length64, 1); 1063 set_mkc_access_pd_addr_fields(mkc, acc | IB_ACCESS_RELAXED_ORDERING, 0, 1064 pd); 1065 1066 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); 1067 if (err) 1068 goto err_in; 1069 1070 kfree(in); 1071 mr->mmkey.type = MLX5_MKEY_MR; 1072 mr->ibmr.lkey = mr->mmkey.key; 1073 mr->ibmr.rkey = mr->mmkey.key; 1074 mr->umem = NULL; 1075 1076 return &mr->ibmr; 1077 1078 err_in: 1079 kfree(in); 1080 1081 err_free: 1082 kfree(mr); 1083 1084 return ERR_PTR(err); 1085 } 1086 1087 static int get_octo_len(u64 addr, u64 len, int page_shift) 1088 { 1089 u64 page_size = 1ULL << page_shift; 1090 u64 offset; 1091 int npages; 1092 1093 offset = addr & (page_size - 1); 1094 npages = ALIGN(len + offset, page_size) >> page_shift; 1095 return (npages + 1) / 2; 1096 } 1097 1098 static int mkey_cache_max_order(struct mlx5_ib_dev *dev) 1099 { 1100 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) 1101 return MKEY_CACHE_LAST_STD_ENTRY; 1102 return MLX5_MAX_UMR_SHIFT; 1103 } 1104 1105 static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, 1106 u64 length, int access_flags, u64 iova) 1107 { 1108 mr->ibmr.lkey = mr->mmkey.key; 1109 mr->ibmr.rkey = mr->mmkey.key; 1110 mr->ibmr.length = length; 1111 mr->ibmr.device = &dev->ib_dev; 1112 mr->ibmr.iova = iova; 1113 mr->access_flags = access_flags; 1114 } 1115 1116 static unsigned int mlx5_umem_dmabuf_default_pgsz(struct ib_umem *umem, 1117 u64 iova) 1118 { 1119 /* 1120 * The alignment of iova has already been checked upon entering 1121 * UVERBS_METHOD_REG_DMABUF_MR 1122 */ 1123 umem->iova = iova; 1124 return PAGE_SIZE; 1125 } 1126 1127 static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd, 1128 struct ib_umem *umem, u64 iova, 1129 int access_flags) 1130 { 1131 struct mlx5r_cache_rb_key rb_key = { 1132 .access_mode = MLX5_MKC_ACCESS_MODE_MTT, 1133 }; 1134 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1135 struct mlx5_cache_ent *ent; 1136 struct mlx5_ib_mr *mr; 1137 unsigned int page_size; 1138 1139 if (umem->is_dmabuf) 1140 page_size = mlx5_umem_dmabuf_default_pgsz(umem, iova); 1141 else 1142 page_size = mlx5_umem_find_best_pgsz(umem, mkc, log_page_size, 1143 0, iova); 1144 if (WARN_ON(!page_size)) 1145 return ERR_PTR(-EINVAL); 1146 1147 rb_key.ndescs = ib_umem_num_dma_blocks(umem, page_size); 1148 rb_key.ats = mlx5_umem_needs_ats(dev, umem, access_flags); 1149 rb_key.access_flags = get_unchangeable_access_flags(dev, access_flags); 1150 ent = mkey_cache_ent_from_rb_key(dev, rb_key); 1151 /* 1152 * If the MR can't come from the cache then synchronously create an uncached 1153 * one. 1154 */ 1155 if (!ent) { 1156 mutex_lock(&dev->slow_path_mutex); 1157 mr = reg_create(pd, umem, iova, access_flags, page_size, false); 1158 mutex_unlock(&dev->slow_path_mutex); 1159 if (IS_ERR(mr)) 1160 return mr; 1161 mr->mmkey.rb_key = rb_key; 1162 mr->mmkey.cacheable = true; 1163 return mr; 1164 } 1165 1166 mr = _mlx5_mr_cache_alloc(dev, ent, access_flags); 1167 if (IS_ERR(mr)) 1168 return mr; 1169 1170 mr->ibmr.pd = pd; 1171 mr->umem = umem; 1172 mr->page_shift = order_base_2(page_size); 1173 set_mr_fields(dev, mr, umem->length, access_flags, iova); 1174 1175 return mr; 1176 } 1177 1178 /* 1179 * If ibmr is NULL it will be allocated by reg_create. 1180 * Else, the given ibmr will be used. 1181 */ 1182 static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem, 1183 u64 iova, int access_flags, 1184 unsigned int page_size, bool populate) 1185 { 1186 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1187 struct mlx5_ib_mr *mr; 1188 __be64 *pas; 1189 void *mkc; 1190 int inlen; 1191 u32 *in; 1192 int err; 1193 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg)); 1194 1195 if (!page_size) 1196 return ERR_PTR(-EINVAL); 1197 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 1198 if (!mr) 1199 return ERR_PTR(-ENOMEM); 1200 1201 mr->ibmr.pd = pd; 1202 mr->access_flags = access_flags; 1203 mr->page_shift = order_base_2(page_size); 1204 1205 inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 1206 if (populate) 1207 inlen += sizeof(*pas) * 1208 roundup(ib_umem_num_dma_blocks(umem, page_size), 2); 1209 in = kvzalloc(inlen, GFP_KERNEL); 1210 if (!in) { 1211 err = -ENOMEM; 1212 goto err_1; 1213 } 1214 pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); 1215 if (populate) { 1216 if (WARN_ON(access_flags & IB_ACCESS_ON_DEMAND)) { 1217 err = -EINVAL; 1218 goto err_2; 1219 } 1220 mlx5_ib_populate_pas(umem, 1UL << mr->page_shift, pas, 1221 pg_cap ? MLX5_IB_MTT_PRESENT : 0); 1222 } 1223 1224 /* The pg_access bit allows setting the access flags 1225 * in the page list submitted with the command. 1226 */ 1227 MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap)); 1228 1229 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 1230 set_mkc_access_pd_addr_fields(mkc, access_flags, iova, 1231 populate ? pd : dev->umrc.pd); 1232 MLX5_SET(mkc, mkc, free, !populate); 1233 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT); 1234 MLX5_SET(mkc, mkc, umr_en, 1); 1235 1236 MLX5_SET64(mkc, mkc, len, umem->length); 1237 MLX5_SET(mkc, mkc, bsf_octword_size, 0); 1238 MLX5_SET(mkc, mkc, translations_octword_size, 1239 get_octo_len(iova, umem->length, mr->page_shift)); 1240 MLX5_SET(mkc, mkc, log_page_size, mr->page_shift); 1241 if (mlx5_umem_needs_ats(dev, umem, access_flags)) 1242 MLX5_SET(mkc, mkc, ma_translation_mode, 1); 1243 if (populate) { 1244 MLX5_SET(create_mkey_in, in, translations_octword_actual_size, 1245 get_octo_len(iova, umem->length, mr->page_shift)); 1246 } 1247 1248 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); 1249 if (err) { 1250 mlx5_ib_warn(dev, "create mkey failed\n"); 1251 goto err_2; 1252 } 1253 mr->mmkey.type = MLX5_MKEY_MR; 1254 mr->mmkey.ndescs = get_octo_len(iova, umem->length, mr->page_shift); 1255 mr->umem = umem; 1256 set_mr_fields(dev, mr, umem->length, access_flags, iova); 1257 kvfree(in); 1258 1259 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key); 1260 1261 return mr; 1262 1263 err_2: 1264 kvfree(in); 1265 err_1: 1266 kfree(mr); 1267 return ERR_PTR(err); 1268 } 1269 1270 static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr, 1271 u64 length, int acc, int mode) 1272 { 1273 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1274 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 1275 struct mlx5_ib_mr *mr; 1276 void *mkc; 1277 u32 *in; 1278 int err; 1279 1280 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 1281 if (!mr) 1282 return ERR_PTR(-ENOMEM); 1283 1284 in = kzalloc(inlen, GFP_KERNEL); 1285 if (!in) { 1286 err = -ENOMEM; 1287 goto err_free; 1288 } 1289 1290 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 1291 1292 MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3); 1293 MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7); 1294 MLX5_SET64(mkc, mkc, len, length); 1295 set_mkc_access_pd_addr_fields(mkc, acc, start_addr, pd); 1296 1297 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); 1298 if (err) 1299 goto err_in; 1300 1301 kfree(in); 1302 1303 set_mr_fields(dev, mr, length, acc, start_addr); 1304 1305 return &mr->ibmr; 1306 1307 err_in: 1308 kfree(in); 1309 1310 err_free: 1311 kfree(mr); 1312 1313 return ERR_PTR(err); 1314 } 1315 1316 int mlx5_ib_advise_mr(struct ib_pd *pd, 1317 enum ib_uverbs_advise_mr_advice advice, 1318 u32 flags, 1319 struct ib_sge *sg_list, 1320 u32 num_sge, 1321 struct uverbs_attr_bundle *attrs) 1322 { 1323 if (advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH && 1324 advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE && 1325 advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT) 1326 return -EOPNOTSUPP; 1327 1328 return mlx5_ib_advise_mr_prefetch(pd, advice, flags, 1329 sg_list, num_sge); 1330 } 1331 1332 struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm, 1333 struct ib_dm_mr_attr *attr, 1334 struct uverbs_attr_bundle *attrs) 1335 { 1336 struct mlx5_ib_dm *mdm = to_mdm(dm); 1337 struct mlx5_core_dev *dev = to_mdev(dm->device)->mdev; 1338 u64 start_addr = mdm->dev_addr + attr->offset; 1339 int mode; 1340 1341 switch (mdm->type) { 1342 case MLX5_IB_UAPI_DM_TYPE_MEMIC: 1343 if (attr->access_flags & ~MLX5_IB_DM_MEMIC_ALLOWED_ACCESS) 1344 return ERR_PTR(-EINVAL); 1345 1346 mode = MLX5_MKC_ACCESS_MODE_MEMIC; 1347 start_addr -= pci_resource_start(dev->pdev, 0); 1348 break; 1349 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: 1350 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM: 1351 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM: 1352 case MLX5_IB_UAPI_DM_TYPE_ENCAP_SW_ICM: 1353 if (attr->access_flags & ~MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS) 1354 return ERR_PTR(-EINVAL); 1355 1356 mode = MLX5_MKC_ACCESS_MODE_SW_ICM; 1357 break; 1358 default: 1359 return ERR_PTR(-EINVAL); 1360 } 1361 1362 return mlx5_ib_get_dm_mr(pd, start_addr, attr->length, 1363 attr->access_flags, mode); 1364 } 1365 1366 static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem, 1367 u64 iova, int access_flags) 1368 { 1369 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1370 struct mlx5_ib_mr *mr = NULL; 1371 bool xlt_with_umr; 1372 int err; 1373 1374 xlt_with_umr = mlx5r_umr_can_load_pas(dev, umem->length); 1375 if (xlt_with_umr) { 1376 mr = alloc_cacheable_mr(pd, umem, iova, access_flags); 1377 } else { 1378 unsigned int page_size = mlx5_umem_find_best_pgsz( 1379 umem, mkc, log_page_size, 0, iova); 1380 1381 mutex_lock(&dev->slow_path_mutex); 1382 mr = reg_create(pd, umem, iova, access_flags, page_size, true); 1383 mutex_unlock(&dev->slow_path_mutex); 1384 } 1385 if (IS_ERR(mr)) { 1386 ib_umem_release(umem); 1387 return ERR_CAST(mr); 1388 } 1389 1390 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); 1391 1392 atomic_add(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages); 1393 1394 if (xlt_with_umr) { 1395 /* 1396 * If the MR was created with reg_create then it will be 1397 * configured properly but left disabled. It is safe to go ahead 1398 * and configure it again via UMR while enabling it. 1399 */ 1400 err = mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ENABLE); 1401 if (err) { 1402 mlx5_ib_dereg_mr(&mr->ibmr, NULL); 1403 return ERR_PTR(err); 1404 } 1405 } 1406 return &mr->ibmr; 1407 } 1408 1409 static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length, 1410 u64 iova, int access_flags, 1411 struct ib_udata *udata) 1412 { 1413 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1414 struct ib_umem_odp *odp; 1415 struct mlx5_ib_mr *mr; 1416 int err; 1417 1418 if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) 1419 return ERR_PTR(-EOPNOTSUPP); 1420 1421 err = mlx5r_odp_create_eq(dev, &dev->odp_pf_eq); 1422 if (err) 1423 return ERR_PTR(err); 1424 if (!start && length == U64_MAX) { 1425 if (iova != 0) 1426 return ERR_PTR(-EINVAL); 1427 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT)) 1428 return ERR_PTR(-EINVAL); 1429 1430 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags); 1431 if (IS_ERR(mr)) 1432 return ERR_CAST(mr); 1433 return &mr->ibmr; 1434 } 1435 1436 /* ODP requires xlt update via umr to work. */ 1437 if (!mlx5r_umr_can_load_pas(dev, length)) 1438 return ERR_PTR(-EINVAL); 1439 1440 odp = ib_umem_odp_get(&dev->ib_dev, start, length, access_flags, 1441 &mlx5_mn_ops); 1442 if (IS_ERR(odp)) 1443 return ERR_CAST(odp); 1444 1445 mr = alloc_cacheable_mr(pd, &odp->umem, iova, access_flags); 1446 if (IS_ERR(mr)) { 1447 ib_umem_release(&odp->umem); 1448 return ERR_CAST(mr); 1449 } 1450 xa_init(&mr->implicit_children); 1451 1452 odp->private = mr; 1453 err = mlx5r_store_odp_mkey(dev, &mr->mmkey); 1454 if (err) 1455 goto err_dereg_mr; 1456 1457 err = mlx5_ib_init_odp_mr(mr); 1458 if (err) 1459 goto err_dereg_mr; 1460 return &mr->ibmr; 1461 1462 err_dereg_mr: 1463 mlx5_ib_dereg_mr(&mr->ibmr, NULL); 1464 return ERR_PTR(err); 1465 } 1466 1467 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 1468 u64 iova, int access_flags, 1469 struct ib_udata *udata) 1470 { 1471 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1472 struct ib_umem *umem; 1473 int err; 1474 1475 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM)) 1476 return ERR_PTR(-EOPNOTSUPP); 1477 1478 mlx5_ib_dbg(dev, "start 0x%llx, iova 0x%llx, length 0x%llx, access_flags 0x%x\n", 1479 start, iova, length, access_flags); 1480 1481 err = mlx5r_umr_resource_init(dev); 1482 if (err) 1483 return ERR_PTR(err); 1484 1485 if (access_flags & IB_ACCESS_ON_DEMAND) 1486 return create_user_odp_mr(pd, start, length, iova, access_flags, 1487 udata); 1488 umem = ib_umem_get(&dev->ib_dev, start, length, access_flags); 1489 if (IS_ERR(umem)) 1490 return ERR_CAST(umem); 1491 return create_real_mr(pd, umem, iova, access_flags); 1492 } 1493 1494 static void mlx5_ib_dmabuf_invalidate_cb(struct dma_buf_attachment *attach) 1495 { 1496 struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv; 1497 struct mlx5_ib_mr *mr = umem_dmabuf->private; 1498 1499 dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv); 1500 1501 if (!umem_dmabuf->sgt) 1502 return; 1503 1504 mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ZAP); 1505 ib_umem_dmabuf_unmap_pages(umem_dmabuf); 1506 } 1507 1508 static struct dma_buf_attach_ops mlx5_ib_dmabuf_attach_ops = { 1509 .allow_peer2peer = 1, 1510 .move_notify = mlx5_ib_dmabuf_invalidate_cb, 1511 }; 1512 1513 struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset, 1514 u64 length, u64 virt_addr, 1515 int fd, int access_flags, 1516 struct ib_udata *udata) 1517 { 1518 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1519 struct mlx5_ib_mr *mr = NULL; 1520 struct ib_umem_dmabuf *umem_dmabuf; 1521 int err; 1522 1523 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) || 1524 !IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) 1525 return ERR_PTR(-EOPNOTSUPP); 1526 1527 mlx5_ib_dbg(dev, 1528 "offset 0x%llx, virt_addr 0x%llx, length 0x%llx, fd %d, access_flags 0x%x\n", 1529 offset, virt_addr, length, fd, access_flags); 1530 1531 err = mlx5r_umr_resource_init(dev); 1532 if (err) 1533 return ERR_PTR(err); 1534 1535 /* dmabuf requires xlt update via umr to work. */ 1536 if (!mlx5r_umr_can_load_pas(dev, length)) 1537 return ERR_PTR(-EINVAL); 1538 1539 umem_dmabuf = ib_umem_dmabuf_get(&dev->ib_dev, offset, length, fd, 1540 access_flags, 1541 &mlx5_ib_dmabuf_attach_ops); 1542 if (IS_ERR(umem_dmabuf)) { 1543 mlx5_ib_dbg(dev, "umem_dmabuf get failed (%ld)\n", 1544 PTR_ERR(umem_dmabuf)); 1545 return ERR_CAST(umem_dmabuf); 1546 } 1547 1548 mr = alloc_cacheable_mr(pd, &umem_dmabuf->umem, virt_addr, 1549 access_flags); 1550 if (IS_ERR(mr)) { 1551 ib_umem_release(&umem_dmabuf->umem); 1552 return ERR_CAST(mr); 1553 } 1554 1555 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); 1556 1557 atomic_add(ib_umem_num_pages(mr->umem), &dev->mdev->priv.reg_pages); 1558 umem_dmabuf->private = mr; 1559 err = mlx5r_store_odp_mkey(dev, &mr->mmkey); 1560 if (err) 1561 goto err_dereg_mr; 1562 1563 err = mlx5_ib_init_dmabuf_mr(mr); 1564 if (err) 1565 goto err_dereg_mr; 1566 return &mr->ibmr; 1567 1568 err_dereg_mr: 1569 mlx5_ib_dereg_mr(&mr->ibmr, NULL); 1570 return ERR_PTR(err); 1571 } 1572 1573 /* 1574 * True if the change in access flags can be done via UMR, only some access 1575 * flags can be updated. 1576 */ 1577 static bool can_use_umr_rereg_access(struct mlx5_ib_dev *dev, 1578 unsigned int current_access_flags, 1579 unsigned int target_access_flags) 1580 { 1581 unsigned int diffs = current_access_flags ^ target_access_flags; 1582 1583 if (diffs & ~(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | 1584 IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING | 1585 IB_ACCESS_REMOTE_ATOMIC)) 1586 return false; 1587 return mlx5r_umr_can_reconfig(dev, current_access_flags, 1588 target_access_flags); 1589 } 1590 1591 static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr, 1592 struct ib_umem *new_umem, 1593 int new_access_flags, u64 iova, 1594 unsigned long *page_size) 1595 { 1596 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); 1597 1598 /* We only track the allocated sizes of MRs from the cache */ 1599 if (!mr->mmkey.cache_ent) 1600 return false; 1601 if (!mlx5r_umr_can_load_pas(dev, new_umem->length)) 1602 return false; 1603 1604 *page_size = 1605 mlx5_umem_find_best_pgsz(new_umem, mkc, log_page_size, 0, iova); 1606 if (WARN_ON(!*page_size)) 1607 return false; 1608 return (mr->mmkey.cache_ent->rb_key.ndescs) >= 1609 ib_umem_num_dma_blocks(new_umem, *page_size); 1610 } 1611 1612 static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd, 1613 int access_flags, int flags, struct ib_umem *new_umem, 1614 u64 iova, unsigned long page_size) 1615 { 1616 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); 1617 int upd_flags = MLX5_IB_UPD_XLT_ADDR | MLX5_IB_UPD_XLT_ENABLE; 1618 struct ib_umem *old_umem = mr->umem; 1619 int err; 1620 1621 /* 1622 * To keep everything simple the MR is revoked before we start to mess 1623 * with it. This ensure the change is atomic relative to any use of the 1624 * MR. 1625 */ 1626 err = mlx5r_umr_revoke_mr(mr); 1627 if (err) 1628 return err; 1629 1630 if (flags & IB_MR_REREG_PD) { 1631 mr->ibmr.pd = pd; 1632 upd_flags |= MLX5_IB_UPD_XLT_PD; 1633 } 1634 if (flags & IB_MR_REREG_ACCESS) { 1635 mr->access_flags = access_flags; 1636 upd_flags |= MLX5_IB_UPD_XLT_ACCESS; 1637 } 1638 1639 mr->ibmr.iova = iova; 1640 mr->ibmr.length = new_umem->length; 1641 mr->page_shift = order_base_2(page_size); 1642 mr->umem = new_umem; 1643 err = mlx5r_umr_update_mr_pas(mr, upd_flags); 1644 if (err) { 1645 /* 1646 * The MR is revoked at this point so there is no issue to free 1647 * new_umem. 1648 */ 1649 mr->umem = old_umem; 1650 return err; 1651 } 1652 1653 atomic_sub(ib_umem_num_pages(old_umem), &dev->mdev->priv.reg_pages); 1654 ib_umem_release(old_umem); 1655 atomic_add(ib_umem_num_pages(new_umem), &dev->mdev->priv.reg_pages); 1656 return 0; 1657 } 1658 1659 struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, 1660 u64 length, u64 iova, int new_access_flags, 1661 struct ib_pd *new_pd, 1662 struct ib_udata *udata) 1663 { 1664 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device); 1665 struct mlx5_ib_mr *mr = to_mmr(ib_mr); 1666 int err; 1667 1668 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM)) 1669 return ERR_PTR(-EOPNOTSUPP); 1670 1671 mlx5_ib_dbg( 1672 dev, 1673 "start 0x%llx, iova 0x%llx, length 0x%llx, access_flags 0x%x\n", 1674 start, iova, length, new_access_flags); 1675 1676 if (flags & ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) 1677 return ERR_PTR(-EOPNOTSUPP); 1678 1679 if (!(flags & IB_MR_REREG_ACCESS)) 1680 new_access_flags = mr->access_flags; 1681 if (!(flags & IB_MR_REREG_PD)) 1682 new_pd = ib_mr->pd; 1683 1684 if (!(flags & IB_MR_REREG_TRANS)) { 1685 struct ib_umem *umem; 1686 1687 /* Fast path for PD/access change */ 1688 if (can_use_umr_rereg_access(dev, mr->access_flags, 1689 new_access_flags)) { 1690 err = mlx5r_umr_rereg_pd_access(mr, new_pd, 1691 new_access_flags); 1692 if (err) 1693 return ERR_PTR(err); 1694 return NULL; 1695 } 1696 /* DM or ODP MR's don't have a normal umem so we can't re-use it */ 1697 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr)) 1698 goto recreate; 1699 1700 /* 1701 * Only one active MR can refer to a umem at one time, revoke 1702 * the old MR before assigning the umem to the new one. 1703 */ 1704 err = mlx5r_umr_revoke_mr(mr); 1705 if (err) 1706 return ERR_PTR(err); 1707 umem = mr->umem; 1708 mr->umem = NULL; 1709 atomic_sub(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages); 1710 1711 return create_real_mr(new_pd, umem, mr->ibmr.iova, 1712 new_access_flags); 1713 } 1714 1715 /* 1716 * DM doesn't have a PAS list so we can't re-use it, odp/dmabuf does 1717 * but the logic around releasing the umem is different 1718 */ 1719 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr)) 1720 goto recreate; 1721 1722 if (!(new_access_flags & IB_ACCESS_ON_DEMAND) && 1723 can_use_umr_rereg_access(dev, mr->access_flags, new_access_flags)) { 1724 struct ib_umem *new_umem; 1725 unsigned long page_size; 1726 1727 new_umem = ib_umem_get(&dev->ib_dev, start, length, 1728 new_access_flags); 1729 if (IS_ERR(new_umem)) 1730 return ERR_CAST(new_umem); 1731 1732 /* Fast path for PAS change */ 1733 if (can_use_umr_rereg_pas(mr, new_umem, new_access_flags, iova, 1734 &page_size)) { 1735 err = umr_rereg_pas(mr, new_pd, new_access_flags, flags, 1736 new_umem, iova, page_size); 1737 if (err) { 1738 ib_umem_release(new_umem); 1739 return ERR_PTR(err); 1740 } 1741 return NULL; 1742 } 1743 return create_real_mr(new_pd, new_umem, iova, new_access_flags); 1744 } 1745 1746 /* 1747 * Everything else has no state we can preserve, just create a new MR 1748 * from scratch 1749 */ 1750 recreate: 1751 return mlx5_ib_reg_user_mr(new_pd, start, length, iova, 1752 new_access_flags, udata); 1753 } 1754 1755 static int 1756 mlx5_alloc_priv_descs(struct ib_device *device, 1757 struct mlx5_ib_mr *mr, 1758 int ndescs, 1759 int desc_size) 1760 { 1761 struct mlx5_ib_dev *dev = to_mdev(device); 1762 struct device *ddev = &dev->mdev->pdev->dev; 1763 int size = ndescs * desc_size; 1764 int add_size; 1765 int ret; 1766 1767 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0); 1768 if (is_power_of_2(MLX5_UMR_ALIGN) && add_size) { 1769 int end = max_t(int, MLX5_UMR_ALIGN, roundup_pow_of_two(size)); 1770 1771 add_size = min_t(int, end - size, add_size); 1772 } 1773 1774 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL); 1775 if (!mr->descs_alloc) 1776 return -ENOMEM; 1777 1778 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN); 1779 1780 mr->desc_map = dma_map_single(ddev, mr->descs, size, DMA_TO_DEVICE); 1781 if (dma_mapping_error(ddev, mr->desc_map)) { 1782 ret = -ENOMEM; 1783 goto err; 1784 } 1785 1786 return 0; 1787 err: 1788 kfree(mr->descs_alloc); 1789 1790 return ret; 1791 } 1792 1793 static void 1794 mlx5_free_priv_descs(struct mlx5_ib_mr *mr) 1795 { 1796 if (!mr->umem && mr->descs) { 1797 struct ib_device *device = mr->ibmr.device; 1798 int size = mr->max_descs * mr->desc_size; 1799 struct mlx5_ib_dev *dev = to_mdev(device); 1800 1801 dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size, 1802 DMA_TO_DEVICE); 1803 kfree(mr->descs_alloc); 1804 mr->descs = NULL; 1805 } 1806 } 1807 1808 static int cache_ent_find_and_store(struct mlx5_ib_dev *dev, 1809 struct mlx5_ib_mr *mr) 1810 { 1811 struct mlx5_mkey_cache *cache = &dev->cache; 1812 struct mlx5_cache_ent *ent; 1813 int ret; 1814 1815 if (mr->mmkey.cache_ent) { 1816 spin_lock_irq(&mr->mmkey.cache_ent->mkeys_queue.lock); 1817 mr->mmkey.cache_ent->in_use--; 1818 goto end; 1819 } 1820 1821 mutex_lock(&cache->rb_lock); 1822 ent = mkey_cache_ent_from_rb_key(dev, mr->mmkey.rb_key); 1823 if (ent) { 1824 if (ent->rb_key.ndescs == mr->mmkey.rb_key.ndescs) { 1825 if (ent->disabled) { 1826 mutex_unlock(&cache->rb_lock); 1827 return -EOPNOTSUPP; 1828 } 1829 mr->mmkey.cache_ent = ent; 1830 spin_lock_irq(&mr->mmkey.cache_ent->mkeys_queue.lock); 1831 mutex_unlock(&cache->rb_lock); 1832 goto end; 1833 } 1834 } 1835 1836 ent = mlx5r_cache_create_ent_locked(dev, mr->mmkey.rb_key, false); 1837 mutex_unlock(&cache->rb_lock); 1838 if (IS_ERR(ent)) 1839 return PTR_ERR(ent); 1840 1841 mr->mmkey.cache_ent = ent; 1842 spin_lock_irq(&mr->mmkey.cache_ent->mkeys_queue.lock); 1843 1844 end: 1845 ret = push_mkey_locked(mr->mmkey.cache_ent, mr->mmkey.key); 1846 spin_unlock_irq(&mr->mmkey.cache_ent->mkeys_queue.lock); 1847 return ret; 1848 } 1849 1850 static int mlx5_revoke_mr(struct mlx5_ib_mr *mr) 1851 { 1852 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); 1853 struct mlx5_cache_ent *ent = mr->mmkey.cache_ent; 1854 1855 if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr)) 1856 return 0; 1857 1858 if (ent) { 1859 spin_lock_irq(&ent->mkeys_queue.lock); 1860 ent->in_use--; 1861 mr->mmkey.cache_ent = NULL; 1862 spin_unlock_irq(&ent->mkeys_queue.lock); 1863 } 1864 return destroy_mkey(dev, mr); 1865 } 1866 1867 int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) 1868 { 1869 struct mlx5_ib_mr *mr = to_mmr(ibmr); 1870 struct mlx5_ib_dev *dev = to_mdev(ibmr->device); 1871 int rc; 1872 1873 /* 1874 * Any async use of the mr must hold the refcount, once the refcount 1875 * goes to zero no other thread, such as ODP page faults, prefetch, any 1876 * UMR activity, etc can touch the mkey. Thus it is safe to destroy it. 1877 */ 1878 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && 1879 refcount_read(&mr->mmkey.usecount) != 0 && 1880 xa_erase(&mr_to_mdev(mr)->odp_mkeys, mlx5_base_mkey(mr->mmkey.key))) 1881 mlx5r_deref_wait_odp_mkey(&mr->mmkey); 1882 1883 if (ibmr->type == IB_MR_TYPE_INTEGRITY) { 1884 xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), 1885 mr->sig, NULL, GFP_KERNEL); 1886 1887 if (mr->mtt_mr) { 1888 rc = mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL); 1889 if (rc) 1890 return rc; 1891 mr->mtt_mr = NULL; 1892 } 1893 if (mr->klm_mr) { 1894 rc = mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL); 1895 if (rc) 1896 return rc; 1897 mr->klm_mr = NULL; 1898 } 1899 1900 if (mlx5_core_destroy_psv(dev->mdev, 1901 mr->sig->psv_memory.psv_idx)) 1902 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n", 1903 mr->sig->psv_memory.psv_idx); 1904 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx)) 1905 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n", 1906 mr->sig->psv_wire.psv_idx); 1907 kfree(mr->sig); 1908 mr->sig = NULL; 1909 } 1910 1911 /* Stop DMA */ 1912 rc = mlx5_revoke_mr(mr); 1913 if (rc) 1914 return rc; 1915 1916 if (mr->umem) { 1917 bool is_odp = is_odp_mr(mr); 1918 1919 if (!is_odp) 1920 atomic_sub(ib_umem_num_pages(mr->umem), 1921 &dev->mdev->priv.reg_pages); 1922 ib_umem_release(mr->umem); 1923 if (is_odp) 1924 mlx5_ib_free_odp_mr(mr); 1925 } 1926 1927 if (!mr->mmkey.cache_ent) 1928 mlx5_free_priv_descs(mr); 1929 1930 kfree(mr); 1931 return 0; 1932 } 1933 1934 static void mlx5_set_umr_free_mkey(struct ib_pd *pd, u32 *in, int ndescs, 1935 int access_mode, int page_shift) 1936 { 1937 void *mkc; 1938 1939 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 1940 1941 /* This is only used from the kernel, so setting the PD is OK. */ 1942 set_mkc_access_pd_addr_fields(mkc, IB_ACCESS_RELAXED_ORDERING, 0, pd); 1943 MLX5_SET(mkc, mkc, free, 1); 1944 MLX5_SET(mkc, mkc, translations_octword_size, ndescs); 1945 MLX5_SET(mkc, mkc, access_mode_1_0, access_mode & 0x3); 1946 MLX5_SET(mkc, mkc, access_mode_4_2, (access_mode >> 2) & 0x7); 1947 MLX5_SET(mkc, mkc, umr_en, 1); 1948 MLX5_SET(mkc, mkc, log_page_size, page_shift); 1949 } 1950 1951 static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, 1952 int ndescs, int desc_size, int page_shift, 1953 int access_mode, u32 *in, int inlen) 1954 { 1955 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1956 int err; 1957 1958 mr->access_mode = access_mode; 1959 mr->desc_size = desc_size; 1960 mr->max_descs = ndescs; 1961 1962 err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, desc_size); 1963 if (err) 1964 return err; 1965 1966 mlx5_set_umr_free_mkey(pd, in, ndescs, access_mode, page_shift); 1967 1968 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); 1969 if (err) 1970 goto err_free_descs; 1971 1972 mr->mmkey.type = MLX5_MKEY_MR; 1973 mr->ibmr.lkey = mr->mmkey.key; 1974 mr->ibmr.rkey = mr->mmkey.key; 1975 1976 return 0; 1977 1978 err_free_descs: 1979 mlx5_free_priv_descs(mr); 1980 return err; 1981 } 1982 1983 static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd, 1984 u32 max_num_sg, u32 max_num_meta_sg, 1985 int desc_size, int access_mode) 1986 { 1987 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 1988 int ndescs = ALIGN(max_num_sg + max_num_meta_sg, 4); 1989 int page_shift = 0; 1990 struct mlx5_ib_mr *mr; 1991 u32 *in; 1992 int err; 1993 1994 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 1995 if (!mr) 1996 return ERR_PTR(-ENOMEM); 1997 1998 mr->ibmr.pd = pd; 1999 mr->ibmr.device = pd->device; 2000 2001 in = kzalloc(inlen, GFP_KERNEL); 2002 if (!in) { 2003 err = -ENOMEM; 2004 goto err_free; 2005 } 2006 2007 if (access_mode == MLX5_MKC_ACCESS_MODE_MTT) 2008 page_shift = PAGE_SHIFT; 2009 2010 err = _mlx5_alloc_mkey_descs(pd, mr, ndescs, desc_size, page_shift, 2011 access_mode, in, inlen); 2012 if (err) 2013 goto err_free_in; 2014 2015 mr->umem = NULL; 2016 kfree(in); 2017 2018 return mr; 2019 2020 err_free_in: 2021 kfree(in); 2022 err_free: 2023 kfree(mr); 2024 return ERR_PTR(err); 2025 } 2026 2027 static int mlx5_alloc_mem_reg_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, 2028 int ndescs, u32 *in, int inlen) 2029 { 2030 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_mtt), 2031 PAGE_SHIFT, MLX5_MKC_ACCESS_MODE_MTT, in, 2032 inlen); 2033 } 2034 2035 static int mlx5_alloc_sg_gaps_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, 2036 int ndescs, u32 *in, int inlen) 2037 { 2038 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_klm), 2039 0, MLX5_MKC_ACCESS_MODE_KLMS, in, inlen); 2040 } 2041 2042 static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, 2043 int max_num_sg, int max_num_meta_sg, 2044 u32 *in, int inlen) 2045 { 2046 struct mlx5_ib_dev *dev = to_mdev(pd->device); 2047 u32 psv_index[2]; 2048 void *mkc; 2049 int err; 2050 2051 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL); 2052 if (!mr->sig) 2053 return -ENOMEM; 2054 2055 /* create mem & wire PSVs */ 2056 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn, 2, psv_index); 2057 if (err) 2058 goto err_free_sig; 2059 2060 mr->sig->psv_memory.psv_idx = psv_index[0]; 2061 mr->sig->psv_wire.psv_idx = psv_index[1]; 2062 2063 mr->sig->sig_status_checked = true; 2064 mr->sig->sig_err_exists = false; 2065 /* Next UMR, Arm SIGERR */ 2066 ++mr->sig->sigerr_count; 2067 mr->klm_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg, 2068 sizeof(struct mlx5_klm), 2069 MLX5_MKC_ACCESS_MODE_KLMS); 2070 if (IS_ERR(mr->klm_mr)) { 2071 err = PTR_ERR(mr->klm_mr); 2072 goto err_destroy_psv; 2073 } 2074 mr->mtt_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg, 2075 sizeof(struct mlx5_mtt), 2076 MLX5_MKC_ACCESS_MODE_MTT); 2077 if (IS_ERR(mr->mtt_mr)) { 2078 err = PTR_ERR(mr->mtt_mr); 2079 goto err_free_klm_mr; 2080 } 2081 2082 /* Set bsf descriptors for mkey */ 2083 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 2084 MLX5_SET(mkc, mkc, bsf_en, 1); 2085 MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE); 2086 2087 err = _mlx5_alloc_mkey_descs(pd, mr, 4, sizeof(struct mlx5_klm), 0, 2088 MLX5_MKC_ACCESS_MODE_KLMS, in, inlen); 2089 if (err) 2090 goto err_free_mtt_mr; 2091 2092 err = xa_err(xa_store(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), 2093 mr->sig, GFP_KERNEL)); 2094 if (err) 2095 goto err_free_descs; 2096 return 0; 2097 2098 err_free_descs: 2099 destroy_mkey(dev, mr); 2100 mlx5_free_priv_descs(mr); 2101 err_free_mtt_mr: 2102 mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL); 2103 mr->mtt_mr = NULL; 2104 err_free_klm_mr: 2105 mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL); 2106 mr->klm_mr = NULL; 2107 err_destroy_psv: 2108 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx)) 2109 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n", 2110 mr->sig->psv_memory.psv_idx); 2111 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx)) 2112 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n", 2113 mr->sig->psv_wire.psv_idx); 2114 err_free_sig: 2115 kfree(mr->sig); 2116 2117 return err; 2118 } 2119 2120 static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd, 2121 enum ib_mr_type mr_type, u32 max_num_sg, 2122 u32 max_num_meta_sg) 2123 { 2124 struct mlx5_ib_dev *dev = to_mdev(pd->device); 2125 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 2126 int ndescs = ALIGN(max_num_sg, 4); 2127 struct mlx5_ib_mr *mr; 2128 u32 *in; 2129 int err; 2130 2131 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 2132 if (!mr) 2133 return ERR_PTR(-ENOMEM); 2134 2135 in = kzalloc(inlen, GFP_KERNEL); 2136 if (!in) { 2137 err = -ENOMEM; 2138 goto err_free; 2139 } 2140 2141 mr->ibmr.device = pd->device; 2142 mr->umem = NULL; 2143 2144 switch (mr_type) { 2145 case IB_MR_TYPE_MEM_REG: 2146 err = mlx5_alloc_mem_reg_descs(pd, mr, ndescs, in, inlen); 2147 break; 2148 case IB_MR_TYPE_SG_GAPS: 2149 err = mlx5_alloc_sg_gaps_descs(pd, mr, ndescs, in, inlen); 2150 break; 2151 case IB_MR_TYPE_INTEGRITY: 2152 err = mlx5_alloc_integrity_descs(pd, mr, max_num_sg, 2153 max_num_meta_sg, in, inlen); 2154 break; 2155 default: 2156 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type); 2157 err = -EINVAL; 2158 } 2159 2160 if (err) 2161 goto err_free_in; 2162 2163 kfree(in); 2164 2165 return &mr->ibmr; 2166 2167 err_free_in: 2168 kfree(in); 2169 err_free: 2170 kfree(mr); 2171 return ERR_PTR(err); 2172 } 2173 2174 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, 2175 u32 max_num_sg) 2176 { 2177 return __mlx5_ib_alloc_mr(pd, mr_type, max_num_sg, 0); 2178 } 2179 2180 struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd, 2181 u32 max_num_sg, u32 max_num_meta_sg) 2182 { 2183 return __mlx5_ib_alloc_mr(pd, IB_MR_TYPE_INTEGRITY, max_num_sg, 2184 max_num_meta_sg); 2185 } 2186 2187 int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) 2188 { 2189 struct mlx5_ib_dev *dev = to_mdev(ibmw->device); 2190 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 2191 struct mlx5_ib_mw *mw = to_mmw(ibmw); 2192 unsigned int ndescs; 2193 u32 *in = NULL; 2194 void *mkc; 2195 int err; 2196 struct mlx5_ib_alloc_mw req = {}; 2197 struct { 2198 __u32 comp_mask; 2199 __u32 response_length; 2200 } resp = {}; 2201 2202 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req))); 2203 if (err) 2204 return err; 2205 2206 if (req.comp_mask || req.reserved1 || req.reserved2) 2207 return -EOPNOTSUPP; 2208 2209 if (udata->inlen > sizeof(req) && 2210 !ib_is_udata_cleared(udata, sizeof(req), 2211 udata->inlen - sizeof(req))) 2212 return -EOPNOTSUPP; 2213 2214 ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4); 2215 2216 in = kzalloc(inlen, GFP_KERNEL); 2217 if (!in) 2218 return -ENOMEM; 2219 2220 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 2221 2222 MLX5_SET(mkc, mkc, free, 1); 2223 MLX5_SET(mkc, mkc, translations_octword_size, ndescs); 2224 MLX5_SET(mkc, mkc, pd, to_mpd(ibmw->pd)->pdn); 2225 MLX5_SET(mkc, mkc, umr_en, 1); 2226 MLX5_SET(mkc, mkc, lr, 1); 2227 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS); 2228 MLX5_SET(mkc, mkc, en_rinval, !!((ibmw->type == IB_MW_TYPE_2))); 2229 MLX5_SET(mkc, mkc, qpn, 0xffffff); 2230 2231 err = mlx5_ib_create_mkey(dev, &mw->mmkey, in, inlen); 2232 if (err) 2233 goto free; 2234 2235 mw->mmkey.type = MLX5_MKEY_MW; 2236 ibmw->rkey = mw->mmkey.key; 2237 mw->mmkey.ndescs = ndescs; 2238 2239 resp.response_length = 2240 min(offsetofend(typeof(resp), response_length), udata->outlen); 2241 if (resp.response_length) { 2242 err = ib_copy_to_udata(udata, &resp, resp.response_length); 2243 if (err) 2244 goto free_mkey; 2245 } 2246 2247 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { 2248 err = mlx5r_store_odp_mkey(dev, &mw->mmkey); 2249 if (err) 2250 goto free_mkey; 2251 } 2252 2253 kfree(in); 2254 return 0; 2255 2256 free_mkey: 2257 mlx5_core_destroy_mkey(dev->mdev, mw->mmkey.key); 2258 free: 2259 kfree(in); 2260 return err; 2261 } 2262 2263 int mlx5_ib_dealloc_mw(struct ib_mw *mw) 2264 { 2265 struct mlx5_ib_dev *dev = to_mdev(mw->device); 2266 struct mlx5_ib_mw *mmw = to_mmw(mw); 2267 2268 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && 2269 xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key))) 2270 /* 2271 * pagefault_single_data_segment() may be accessing mmw 2272 * if the user bound an ODP MR to this MW. 2273 */ 2274 mlx5r_deref_wait_odp_mkey(&mmw->mmkey); 2275 2276 return mlx5_core_destroy_mkey(dev->mdev, mmw->mmkey.key); 2277 } 2278 2279 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, 2280 struct ib_mr_status *mr_status) 2281 { 2282 struct mlx5_ib_mr *mmr = to_mmr(ibmr); 2283 int ret = 0; 2284 2285 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) { 2286 pr_err("Invalid status check mask\n"); 2287 ret = -EINVAL; 2288 goto done; 2289 } 2290 2291 mr_status->fail_status = 0; 2292 if (check_mask & IB_MR_CHECK_SIG_STATUS) { 2293 if (!mmr->sig) { 2294 ret = -EINVAL; 2295 pr_err("signature status check requested on a non-signature enabled MR\n"); 2296 goto done; 2297 } 2298 2299 mmr->sig->sig_status_checked = true; 2300 if (!mmr->sig->sig_err_exists) 2301 goto done; 2302 2303 if (ibmr->lkey == mmr->sig->err_item.key) 2304 memcpy(&mr_status->sig_err, &mmr->sig->err_item, 2305 sizeof(mr_status->sig_err)); 2306 else { 2307 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD; 2308 mr_status->sig_err.sig_err_offset = 0; 2309 mr_status->sig_err.key = mmr->sig->err_item.key; 2310 } 2311 2312 mmr->sig->sig_err_exists = false; 2313 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS; 2314 } 2315 2316 done: 2317 return ret; 2318 } 2319 2320 static int 2321 mlx5_ib_map_pa_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, 2322 int data_sg_nents, unsigned int *data_sg_offset, 2323 struct scatterlist *meta_sg, int meta_sg_nents, 2324 unsigned int *meta_sg_offset) 2325 { 2326 struct mlx5_ib_mr *mr = to_mmr(ibmr); 2327 unsigned int sg_offset = 0; 2328 int n = 0; 2329 2330 mr->meta_length = 0; 2331 if (data_sg_nents == 1) { 2332 n++; 2333 mr->mmkey.ndescs = 1; 2334 if (data_sg_offset) 2335 sg_offset = *data_sg_offset; 2336 mr->data_length = sg_dma_len(data_sg) - sg_offset; 2337 mr->data_iova = sg_dma_address(data_sg) + sg_offset; 2338 if (meta_sg_nents == 1) { 2339 n++; 2340 mr->meta_ndescs = 1; 2341 if (meta_sg_offset) 2342 sg_offset = *meta_sg_offset; 2343 else 2344 sg_offset = 0; 2345 mr->meta_length = sg_dma_len(meta_sg) - sg_offset; 2346 mr->pi_iova = sg_dma_address(meta_sg) + sg_offset; 2347 } 2348 ibmr->length = mr->data_length + mr->meta_length; 2349 } 2350 2351 return n; 2352 } 2353 2354 static int 2355 mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, 2356 struct scatterlist *sgl, 2357 unsigned short sg_nents, 2358 unsigned int *sg_offset_p, 2359 struct scatterlist *meta_sgl, 2360 unsigned short meta_sg_nents, 2361 unsigned int *meta_sg_offset_p) 2362 { 2363 struct scatterlist *sg = sgl; 2364 struct mlx5_klm *klms = mr->descs; 2365 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0; 2366 u32 lkey = mr->ibmr.pd->local_dma_lkey; 2367 int i, j = 0; 2368 2369 mr->ibmr.iova = sg_dma_address(sg) + sg_offset; 2370 mr->ibmr.length = 0; 2371 2372 for_each_sg(sgl, sg, sg_nents, i) { 2373 if (unlikely(i >= mr->max_descs)) 2374 break; 2375 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset); 2376 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset); 2377 klms[i].key = cpu_to_be32(lkey); 2378 mr->ibmr.length += sg_dma_len(sg) - sg_offset; 2379 2380 sg_offset = 0; 2381 } 2382 2383 if (sg_offset_p) 2384 *sg_offset_p = sg_offset; 2385 2386 mr->mmkey.ndescs = i; 2387 mr->data_length = mr->ibmr.length; 2388 2389 if (meta_sg_nents) { 2390 sg = meta_sgl; 2391 sg_offset = meta_sg_offset_p ? *meta_sg_offset_p : 0; 2392 for_each_sg(meta_sgl, sg, meta_sg_nents, j) { 2393 if (unlikely(i + j >= mr->max_descs)) 2394 break; 2395 klms[i + j].va = cpu_to_be64(sg_dma_address(sg) + 2396 sg_offset); 2397 klms[i + j].bcount = cpu_to_be32(sg_dma_len(sg) - 2398 sg_offset); 2399 klms[i + j].key = cpu_to_be32(lkey); 2400 mr->ibmr.length += sg_dma_len(sg) - sg_offset; 2401 2402 sg_offset = 0; 2403 } 2404 if (meta_sg_offset_p) 2405 *meta_sg_offset_p = sg_offset; 2406 2407 mr->meta_ndescs = j; 2408 mr->meta_length = mr->ibmr.length - mr->data_length; 2409 } 2410 2411 return i + j; 2412 } 2413 2414 static int mlx5_set_page(struct ib_mr *ibmr, u64 addr) 2415 { 2416 struct mlx5_ib_mr *mr = to_mmr(ibmr); 2417 __be64 *descs; 2418 2419 if (unlikely(mr->mmkey.ndescs == mr->max_descs)) 2420 return -ENOMEM; 2421 2422 descs = mr->descs; 2423 descs[mr->mmkey.ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); 2424 2425 return 0; 2426 } 2427 2428 static int mlx5_set_page_pi(struct ib_mr *ibmr, u64 addr) 2429 { 2430 struct mlx5_ib_mr *mr = to_mmr(ibmr); 2431 __be64 *descs; 2432 2433 if (unlikely(mr->mmkey.ndescs + mr->meta_ndescs == mr->max_descs)) 2434 return -ENOMEM; 2435 2436 descs = mr->descs; 2437 descs[mr->mmkey.ndescs + mr->meta_ndescs++] = 2438 cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); 2439 2440 return 0; 2441 } 2442 2443 static int 2444 mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, 2445 int data_sg_nents, unsigned int *data_sg_offset, 2446 struct scatterlist *meta_sg, int meta_sg_nents, 2447 unsigned int *meta_sg_offset) 2448 { 2449 struct mlx5_ib_mr *mr = to_mmr(ibmr); 2450 struct mlx5_ib_mr *pi_mr = mr->mtt_mr; 2451 int n; 2452 2453 pi_mr->mmkey.ndescs = 0; 2454 pi_mr->meta_ndescs = 0; 2455 pi_mr->meta_length = 0; 2456 2457 ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map, 2458 pi_mr->desc_size * pi_mr->max_descs, 2459 DMA_TO_DEVICE); 2460 2461 pi_mr->ibmr.page_size = ibmr->page_size; 2462 n = ib_sg_to_pages(&pi_mr->ibmr, data_sg, data_sg_nents, data_sg_offset, 2463 mlx5_set_page); 2464 if (n != data_sg_nents) 2465 return n; 2466 2467 pi_mr->data_iova = pi_mr->ibmr.iova; 2468 pi_mr->data_length = pi_mr->ibmr.length; 2469 pi_mr->ibmr.length = pi_mr->data_length; 2470 ibmr->length = pi_mr->data_length; 2471 2472 if (meta_sg_nents) { 2473 u64 page_mask = ~((u64)ibmr->page_size - 1); 2474 u64 iova = pi_mr->data_iova; 2475 2476 n += ib_sg_to_pages(&pi_mr->ibmr, meta_sg, meta_sg_nents, 2477 meta_sg_offset, mlx5_set_page_pi); 2478 2479 pi_mr->meta_length = pi_mr->ibmr.length; 2480 /* 2481 * PI address for the HW is the offset of the metadata address 2482 * relative to the first data page address. 2483 * It equals to first data page address + size of data pages + 2484 * metadata offset at the first metadata page 2485 */ 2486 pi_mr->pi_iova = (iova & page_mask) + 2487 pi_mr->mmkey.ndescs * ibmr->page_size + 2488 (pi_mr->ibmr.iova & ~page_mask); 2489 /* 2490 * In order to use one MTT MR for data and metadata, we register 2491 * also the gaps between the end of the data and the start of 2492 * the metadata (the sig MR will verify that the HW will access 2493 * to right addresses). This mapping is safe because we use 2494 * internal mkey for the registration. 2495 */ 2496 pi_mr->ibmr.length = pi_mr->pi_iova + pi_mr->meta_length - iova; 2497 pi_mr->ibmr.iova = iova; 2498 ibmr->length += pi_mr->meta_length; 2499 } 2500 2501 ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map, 2502 pi_mr->desc_size * pi_mr->max_descs, 2503 DMA_TO_DEVICE); 2504 2505 return n; 2506 } 2507 2508 static int 2509 mlx5_ib_map_klm_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, 2510 int data_sg_nents, unsigned int *data_sg_offset, 2511 struct scatterlist *meta_sg, int meta_sg_nents, 2512 unsigned int *meta_sg_offset) 2513 { 2514 struct mlx5_ib_mr *mr = to_mmr(ibmr); 2515 struct mlx5_ib_mr *pi_mr = mr->klm_mr; 2516 int n; 2517 2518 pi_mr->mmkey.ndescs = 0; 2519 pi_mr->meta_ndescs = 0; 2520 pi_mr->meta_length = 0; 2521 2522 ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map, 2523 pi_mr->desc_size * pi_mr->max_descs, 2524 DMA_TO_DEVICE); 2525 2526 n = mlx5_ib_sg_to_klms(pi_mr, data_sg, data_sg_nents, data_sg_offset, 2527 meta_sg, meta_sg_nents, meta_sg_offset); 2528 2529 ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map, 2530 pi_mr->desc_size * pi_mr->max_descs, 2531 DMA_TO_DEVICE); 2532 2533 /* This is zero-based memory region */ 2534 pi_mr->data_iova = 0; 2535 pi_mr->ibmr.iova = 0; 2536 pi_mr->pi_iova = pi_mr->data_length; 2537 ibmr->length = pi_mr->ibmr.length; 2538 2539 return n; 2540 } 2541 2542 int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, 2543 int data_sg_nents, unsigned int *data_sg_offset, 2544 struct scatterlist *meta_sg, int meta_sg_nents, 2545 unsigned int *meta_sg_offset) 2546 { 2547 struct mlx5_ib_mr *mr = to_mmr(ibmr); 2548 struct mlx5_ib_mr *pi_mr = NULL; 2549 int n; 2550 2551 WARN_ON(ibmr->type != IB_MR_TYPE_INTEGRITY); 2552 2553 mr->mmkey.ndescs = 0; 2554 mr->data_length = 0; 2555 mr->data_iova = 0; 2556 mr->meta_ndescs = 0; 2557 mr->pi_iova = 0; 2558 /* 2559 * As a performance optimization, if possible, there is no need to 2560 * perform UMR operation to register the data/metadata buffers. 2561 * First try to map the sg lists to PA descriptors with local_dma_lkey. 2562 * Fallback to UMR only in case of a failure. 2563 */ 2564 n = mlx5_ib_map_pa_mr_sg_pi(ibmr, data_sg, data_sg_nents, 2565 data_sg_offset, meta_sg, meta_sg_nents, 2566 meta_sg_offset); 2567 if (n == data_sg_nents + meta_sg_nents) 2568 goto out; 2569 /* 2570 * As a performance optimization, if possible, there is no need to map 2571 * the sg lists to KLM descriptors. First try to map the sg lists to MTT 2572 * descriptors and fallback to KLM only in case of a failure. 2573 * It's more efficient for the HW to work with MTT descriptors 2574 * (especially in high load). 2575 * Use KLM (indirect access) only if it's mandatory. 2576 */ 2577 pi_mr = mr->mtt_mr; 2578 n = mlx5_ib_map_mtt_mr_sg_pi(ibmr, data_sg, data_sg_nents, 2579 data_sg_offset, meta_sg, meta_sg_nents, 2580 meta_sg_offset); 2581 if (n == data_sg_nents + meta_sg_nents) 2582 goto out; 2583 2584 pi_mr = mr->klm_mr; 2585 n = mlx5_ib_map_klm_mr_sg_pi(ibmr, data_sg, data_sg_nents, 2586 data_sg_offset, meta_sg, meta_sg_nents, 2587 meta_sg_offset); 2588 if (unlikely(n != data_sg_nents + meta_sg_nents)) 2589 return -ENOMEM; 2590 2591 out: 2592 /* This is zero-based memory region */ 2593 ibmr->iova = 0; 2594 mr->pi_mr = pi_mr; 2595 if (pi_mr) 2596 ibmr->sig_attrs->meta_length = pi_mr->meta_length; 2597 else 2598 ibmr->sig_attrs->meta_length = mr->meta_length; 2599 2600 return 0; 2601 } 2602 2603 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, 2604 unsigned int *sg_offset) 2605 { 2606 struct mlx5_ib_mr *mr = to_mmr(ibmr); 2607 int n; 2608 2609 mr->mmkey.ndescs = 0; 2610 2611 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map, 2612 mr->desc_size * mr->max_descs, 2613 DMA_TO_DEVICE); 2614 2615 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS) 2616 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset, NULL, 0, 2617 NULL); 2618 else 2619 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, 2620 mlx5_set_page); 2621 2622 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map, 2623 mr->desc_size * mr->max_descs, 2624 DMA_TO_DEVICE); 2625 2626 return n; 2627 } 2628