1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/slab.h> 35 #include <linux/errno.h> 36 37 #include "mthca_dev.h" 38 #include "mthca_cmd.h" 39 #include "mthca_memfree.h" 40 41 struct mthca_mtt { 42 struct mthca_buddy *buddy; 43 int order; 44 u32 first_seg; 45 }; 46 47 /* 48 * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. 49 */ 50 struct mthca_mpt_entry { 51 __be32 flags; 52 __be32 page_size; 53 __be32 key; 54 __be32 pd; 55 __be64 start; 56 __be64 length; 57 __be32 lkey; 58 __be32 window_count; 59 __be32 window_count_limit; 60 __be64 mtt_seg; 61 __be32 mtt_sz; /* Arbel only */ 62 u32 reserved[2]; 63 } __attribute__((packed)); 64 65 #define MTHCA_MPT_FLAG_SW_OWNS (0xfUL << 28) 66 #define MTHCA_MPT_FLAG_MIO (1 << 17) 67 #define MTHCA_MPT_FLAG_BIND_ENABLE (1 << 15) 68 #define MTHCA_MPT_FLAG_PHYSICAL (1 << 9) 69 #define MTHCA_MPT_FLAG_REGION (1 << 8) 70 71 #define MTHCA_MTT_FLAG_PRESENT 1 72 73 #define MTHCA_MPT_STATUS_SW 0xF0 74 #define MTHCA_MPT_STATUS_HW 0x00 75 76 #define SINAI_FMR_KEY_INC 0x1000000 77 78 /* 79 * Buddy allocator for MTT segments (currently not very efficient 80 * since it doesn't keep a free list and just searches linearly 81 * through the bitmaps) 82 */ 83 84 static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order) 85 { 86 int o; 87 int m; 88 u32 seg; 89 90 spin_lock(&buddy->lock); 91 92 for (o = order; o <= buddy->max_order; ++o) 93 if (buddy->num_free[o]) { 94 m = 1 << (buddy->max_order - o); 95 seg = find_first_bit(buddy->bits[o], m); 96 if (seg < m) 97 goto found; 98 } 99 100 spin_unlock(&buddy->lock); 101 return -1; 102 103 found: 104 clear_bit(seg, buddy->bits[o]); 105 --buddy->num_free[o]; 106 107 while (o > order) { 108 --o; 109 seg <<= 1; 110 set_bit(seg ^ 1, buddy->bits[o]); 111 ++buddy->num_free[o]; 112 } 113 114 spin_unlock(&buddy->lock); 115 116 seg <<= order; 117 118 return seg; 119 } 120 121 static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order) 122 { 123 seg >>= order; 124 125 spin_lock(&buddy->lock); 126 127 while (test_bit(seg ^ 1, buddy->bits[order])) { 128 clear_bit(seg ^ 1, buddy->bits[order]); 129 --buddy->num_free[order]; 130 seg >>= 1; 131 ++order; 132 } 133 134 set_bit(seg, buddy->bits[order]); 135 ++buddy->num_free[order]; 136 137 spin_unlock(&buddy->lock); 138 } 139 140 static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order) 141 { 142 int i, s; 143 144 buddy->max_order = max_order; 145 spin_lock_init(&buddy->lock); 146 147 buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *), 148 GFP_KERNEL); 149 buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free, 150 GFP_KERNEL); 151 if (!buddy->bits || !buddy->num_free) 152 goto err_out; 153 154 for (i = 0; i <= buddy->max_order; ++i) { 155 s = BITS_TO_LONGS(1 << (buddy->max_order - i)); 156 buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL); 157 if (!buddy->bits[i]) 158 goto err_out_free; 159 bitmap_zero(buddy->bits[i], 160 1 << (buddy->max_order - i)); 161 } 162 163 set_bit(0, buddy->bits[buddy->max_order]); 164 buddy->num_free[buddy->max_order] = 1; 165 166 return 0; 167 168 err_out_free: 169 for (i = 0; i <= buddy->max_order; ++i) 170 kfree(buddy->bits[i]); 171 172 err_out: 173 kfree(buddy->bits); 174 kfree(buddy->num_free); 175 176 return -ENOMEM; 177 } 178 179 static void mthca_buddy_cleanup(struct mthca_buddy *buddy) 180 { 181 int i; 182 183 for (i = 0; i <= buddy->max_order; ++i) 184 kfree(buddy->bits[i]); 185 186 kfree(buddy->bits); 187 kfree(buddy->num_free); 188 } 189 190 static u32 mthca_alloc_mtt_range(struct mthca_dev *dev, int order, 191 struct mthca_buddy *buddy) 192 { 193 u32 seg = mthca_buddy_alloc(buddy, order); 194 195 if (seg == -1) 196 return -1; 197 198 if (mthca_is_memfree(dev)) 199 if (mthca_table_get_range(dev, dev->mr_table.mtt_table, seg, 200 seg + (1 << order) - 1)) { 201 mthca_buddy_free(buddy, seg, order); 202 seg = -1; 203 } 204 205 return seg; 206 } 207 208 static struct mthca_mtt *__mthca_alloc_mtt(struct mthca_dev *dev, int size, 209 struct mthca_buddy *buddy) 210 { 211 struct mthca_mtt *mtt; 212 int i; 213 214 if (size <= 0) 215 return ERR_PTR(-EINVAL); 216 217 mtt = kmalloc(sizeof *mtt, GFP_KERNEL); 218 if (!mtt) 219 return ERR_PTR(-ENOMEM); 220 221 mtt->buddy = buddy; 222 mtt->order = 0; 223 for (i = dev->limits.mtt_seg_size / 8; i < size; i <<= 1) 224 ++mtt->order; 225 226 mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy); 227 if (mtt->first_seg == -1) { 228 kfree(mtt); 229 return ERR_PTR(-ENOMEM); 230 } 231 232 return mtt; 233 } 234 235 struct mthca_mtt *mthca_alloc_mtt(struct mthca_dev *dev, int size) 236 { 237 return __mthca_alloc_mtt(dev, size, &dev->mr_table.mtt_buddy); 238 } 239 240 void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt) 241 { 242 if (!mtt) 243 return; 244 245 mthca_buddy_free(mtt->buddy, mtt->first_seg, mtt->order); 246 247 mthca_table_put_range(dev, dev->mr_table.mtt_table, 248 mtt->first_seg, 249 mtt->first_seg + (1 << mtt->order) - 1); 250 251 kfree(mtt); 252 } 253 254 static int __mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, 255 int start_index, u64 *buffer_list, int list_len) 256 { 257 struct mthca_mailbox *mailbox; 258 __be64 *mtt_entry; 259 int err = 0; 260 int i; 261 262 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 263 if (IS_ERR(mailbox)) 264 return PTR_ERR(mailbox); 265 mtt_entry = mailbox->buf; 266 267 while (list_len > 0) { 268 mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base + 269 mtt->first_seg * dev->limits.mtt_seg_size + 270 start_index * 8); 271 mtt_entry[1] = 0; 272 for (i = 0; i < list_len && i < MTHCA_MAILBOX_SIZE / 8 - 2; ++i) 273 mtt_entry[i + 2] = cpu_to_be64(buffer_list[i] | 274 MTHCA_MTT_FLAG_PRESENT); 275 276 /* 277 * If we have an odd number of entries to write, add 278 * one more dummy entry for firmware efficiency. 279 */ 280 if (i & 1) 281 mtt_entry[i + 2] = 0; 282 283 err = mthca_WRITE_MTT(dev, mailbox, (i + 1) & ~1); 284 if (err) { 285 mthca_warn(dev, "WRITE_MTT failed (%d)\n", err); 286 goto out; 287 } 288 289 list_len -= i; 290 start_index += i; 291 buffer_list += i; 292 } 293 294 out: 295 mthca_free_mailbox(dev, mailbox); 296 return err; 297 } 298 299 int mthca_write_mtt_size(struct mthca_dev *dev) 300 { 301 if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy || 302 !(dev->mthca_flags & MTHCA_FLAG_FMR)) 303 /* 304 * Be friendly to WRITE_MTT command 305 * and leave two empty slots for the 306 * index and reserved fields of the 307 * mailbox. 308 */ 309 return PAGE_SIZE / sizeof (u64) - 2; 310 311 /* For Arbel, all MTTs must fit in the same page. */ 312 return mthca_is_memfree(dev) ? (PAGE_SIZE / sizeof (u64)) : 0x7ffffff; 313 } 314 315 static void mthca_tavor_write_mtt_seg(struct mthca_dev *dev, 316 struct mthca_mtt *mtt, int start_index, 317 u64 *buffer_list, int list_len) 318 { 319 u64 __iomem *mtts; 320 int i; 321 322 mtts = dev->mr_table.tavor_fmr.mtt_base + mtt->first_seg * dev->limits.mtt_seg_size + 323 start_index * sizeof (u64); 324 for (i = 0; i < list_len; ++i) 325 mthca_write64_raw(cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT), 326 mtts + i); 327 } 328 329 static void mthca_arbel_write_mtt_seg(struct mthca_dev *dev, 330 struct mthca_mtt *mtt, int start_index, 331 u64 *buffer_list, int list_len) 332 { 333 __be64 *mtts; 334 dma_addr_t dma_handle; 335 int i; 336 int s = start_index * sizeof (u64); 337 338 /* For Arbel, all MTTs must fit in the same page. */ 339 BUG_ON(s / PAGE_SIZE != (s + list_len * sizeof(u64) - 1) / PAGE_SIZE); 340 /* Require full segments */ 341 BUG_ON(s % dev->limits.mtt_seg_size); 342 343 mtts = mthca_table_find(dev->mr_table.mtt_table, mtt->first_seg + 344 s / dev->limits.mtt_seg_size, &dma_handle); 345 346 BUG_ON(!mtts); 347 348 dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle, 349 list_len * sizeof (u64), DMA_TO_DEVICE); 350 351 for (i = 0; i < list_len; ++i) 352 mtts[i] = cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT); 353 354 dma_sync_single_for_device(&dev->pdev->dev, dma_handle, 355 list_len * sizeof (u64), DMA_TO_DEVICE); 356 } 357 358 int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, 359 int start_index, u64 *buffer_list, int list_len) 360 { 361 int size = mthca_write_mtt_size(dev); 362 int chunk; 363 364 if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy || 365 !(dev->mthca_flags & MTHCA_FLAG_FMR)) 366 return __mthca_write_mtt(dev, mtt, start_index, buffer_list, list_len); 367 368 while (list_len > 0) { 369 chunk = min(size, list_len); 370 if (mthca_is_memfree(dev)) 371 mthca_arbel_write_mtt_seg(dev, mtt, start_index, 372 buffer_list, chunk); 373 else 374 mthca_tavor_write_mtt_seg(dev, mtt, start_index, 375 buffer_list, chunk); 376 377 list_len -= chunk; 378 start_index += chunk; 379 buffer_list += chunk; 380 } 381 382 return 0; 383 } 384 385 static inline u32 tavor_hw_index_to_key(u32 ind) 386 { 387 return ind; 388 } 389 390 static inline u32 tavor_key_to_hw_index(u32 key) 391 { 392 return key; 393 } 394 395 static inline u32 arbel_hw_index_to_key(u32 ind) 396 { 397 return (ind >> 24) | (ind << 8); 398 } 399 400 static inline u32 arbel_key_to_hw_index(u32 key) 401 { 402 return (key << 24) | (key >> 8); 403 } 404 405 static inline u32 hw_index_to_key(struct mthca_dev *dev, u32 ind) 406 { 407 if (mthca_is_memfree(dev)) 408 return arbel_hw_index_to_key(ind); 409 else 410 return tavor_hw_index_to_key(ind); 411 } 412 413 static inline u32 key_to_hw_index(struct mthca_dev *dev, u32 key) 414 { 415 if (mthca_is_memfree(dev)) 416 return arbel_key_to_hw_index(key); 417 else 418 return tavor_key_to_hw_index(key); 419 } 420 421 static inline u32 adjust_key(struct mthca_dev *dev, u32 key) 422 { 423 if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT) 424 return ((key << 20) & 0x800000) | (key & 0x7fffff); 425 else 426 return key; 427 } 428 429 int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift, 430 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr) 431 { 432 struct mthca_mailbox *mailbox; 433 struct mthca_mpt_entry *mpt_entry; 434 u32 key; 435 int i; 436 int err; 437 438 WARN_ON(buffer_size_shift >= 32); 439 440 key = mthca_alloc(&dev->mr_table.mpt_alloc); 441 if (key == -1) 442 return -ENOMEM; 443 key = adjust_key(dev, key); 444 mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key); 445 446 if (mthca_is_memfree(dev)) { 447 err = mthca_table_get(dev, dev->mr_table.mpt_table, key); 448 if (err) 449 goto err_out_mpt_free; 450 } 451 452 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 453 if (IS_ERR(mailbox)) { 454 err = PTR_ERR(mailbox); 455 goto err_out_table; 456 } 457 mpt_entry = mailbox->buf; 458 459 mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS | 460 MTHCA_MPT_FLAG_MIO | 461 MTHCA_MPT_FLAG_REGION | 462 access); 463 if (!mr->mtt) 464 mpt_entry->flags |= cpu_to_be32(MTHCA_MPT_FLAG_PHYSICAL); 465 466 mpt_entry->page_size = cpu_to_be32(buffer_size_shift - 12); 467 mpt_entry->key = cpu_to_be32(key); 468 mpt_entry->pd = cpu_to_be32(pd); 469 mpt_entry->start = cpu_to_be64(iova); 470 mpt_entry->length = cpu_to_be64(total_size); 471 472 memset(&mpt_entry->lkey, 0, 473 sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, lkey)); 474 475 if (mr->mtt) 476 mpt_entry->mtt_seg = 477 cpu_to_be64(dev->mr_table.mtt_base + 478 mr->mtt->first_seg * dev->limits.mtt_seg_size); 479 480 if (0) { 481 mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey); 482 for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) { 483 if (i % 4 == 0) 484 printk("[%02x] ", i * 4); 485 printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i])); 486 if ((i + 1) % 4 == 0) 487 printk("\n"); 488 } 489 } 490 491 err = mthca_SW2HW_MPT(dev, mailbox, 492 key & (dev->limits.num_mpts - 1)); 493 if (err) { 494 mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err); 495 goto err_out_mailbox; 496 } 497 498 mthca_free_mailbox(dev, mailbox); 499 return err; 500 501 err_out_mailbox: 502 mthca_free_mailbox(dev, mailbox); 503 504 err_out_table: 505 mthca_table_put(dev, dev->mr_table.mpt_table, key); 506 507 err_out_mpt_free: 508 mthca_free(&dev->mr_table.mpt_alloc, key); 509 return err; 510 } 511 512 int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd, 513 u32 access, struct mthca_mr *mr) 514 { 515 mr->mtt = NULL; 516 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr); 517 } 518 519 int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd, 520 u64 *buffer_list, int buffer_size_shift, 521 int list_len, u64 iova, u64 total_size, 522 u32 access, struct mthca_mr *mr) 523 { 524 int err; 525 526 mr->mtt = mthca_alloc_mtt(dev, list_len); 527 if (IS_ERR(mr->mtt)) 528 return PTR_ERR(mr->mtt); 529 530 err = mthca_write_mtt(dev, mr->mtt, 0, buffer_list, list_len); 531 if (err) { 532 mthca_free_mtt(dev, mr->mtt); 533 return err; 534 } 535 536 err = mthca_mr_alloc(dev, pd, buffer_size_shift, iova, 537 total_size, access, mr); 538 if (err) 539 mthca_free_mtt(dev, mr->mtt); 540 541 return err; 542 } 543 544 /* Free mr or fmr */ 545 static void mthca_free_region(struct mthca_dev *dev, u32 lkey) 546 { 547 mthca_table_put(dev, dev->mr_table.mpt_table, 548 key_to_hw_index(dev, lkey)); 549 550 mthca_free(&dev->mr_table.mpt_alloc, key_to_hw_index(dev, lkey)); 551 } 552 553 void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr) 554 { 555 int err; 556 557 err = mthca_HW2SW_MPT(dev, NULL, 558 key_to_hw_index(dev, mr->ibmr.lkey) & 559 (dev->limits.num_mpts - 1)); 560 if (err) 561 mthca_warn(dev, "HW2SW_MPT failed (%d)\n", err); 562 563 mthca_free_region(dev, mr->ibmr.lkey); 564 mthca_free_mtt(dev, mr->mtt); 565 } 566 567 int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd, 568 u32 access, struct mthca_fmr *mr) 569 { 570 struct mthca_mpt_entry *mpt_entry; 571 struct mthca_mailbox *mailbox; 572 u64 mtt_seg; 573 u32 key, idx; 574 int list_len = mr->attr.max_pages; 575 int err = -ENOMEM; 576 int i; 577 578 if (mr->attr.page_shift < 12 || mr->attr.page_shift >= 32) 579 return -EINVAL; 580 581 /* For Arbel, all MTTs must fit in the same page. */ 582 if (mthca_is_memfree(dev) && 583 mr->attr.max_pages * sizeof *mr->mem.arbel.mtts > PAGE_SIZE) 584 return -EINVAL; 585 586 mr->maps = 0; 587 588 key = mthca_alloc(&dev->mr_table.mpt_alloc); 589 if (key == -1) 590 return -ENOMEM; 591 key = adjust_key(dev, key); 592 593 idx = key & (dev->limits.num_mpts - 1); 594 mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key); 595 596 if (mthca_is_memfree(dev)) { 597 err = mthca_table_get(dev, dev->mr_table.mpt_table, key); 598 if (err) 599 goto err_out_mpt_free; 600 601 mr->mem.arbel.mpt = mthca_table_find(dev->mr_table.mpt_table, key, NULL); 602 BUG_ON(!mr->mem.arbel.mpt); 603 } else 604 mr->mem.tavor.mpt = dev->mr_table.tavor_fmr.mpt_base + 605 sizeof *(mr->mem.tavor.mpt) * idx; 606 607 mr->mtt = __mthca_alloc_mtt(dev, list_len, dev->mr_table.fmr_mtt_buddy); 608 if (IS_ERR(mr->mtt)) { 609 err = PTR_ERR(mr->mtt); 610 goto err_out_table; 611 } 612 613 mtt_seg = mr->mtt->first_seg * dev->limits.mtt_seg_size; 614 615 if (mthca_is_memfree(dev)) { 616 mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table, 617 mr->mtt->first_seg, 618 &mr->mem.arbel.dma_handle); 619 BUG_ON(!mr->mem.arbel.mtts); 620 } else 621 mr->mem.tavor.mtts = dev->mr_table.tavor_fmr.mtt_base + mtt_seg; 622 623 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 624 if (IS_ERR(mailbox)) { 625 err = PTR_ERR(mailbox); 626 goto err_out_free_mtt; 627 } 628 629 mpt_entry = mailbox->buf; 630 631 mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS | 632 MTHCA_MPT_FLAG_MIO | 633 MTHCA_MPT_FLAG_REGION | 634 access); 635 636 mpt_entry->page_size = cpu_to_be32(mr->attr.page_shift - 12); 637 mpt_entry->key = cpu_to_be32(key); 638 mpt_entry->pd = cpu_to_be32(pd); 639 memset(&mpt_entry->start, 0, 640 sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, start)); 641 mpt_entry->mtt_seg = cpu_to_be64(dev->mr_table.mtt_base + mtt_seg); 642 643 if (0) { 644 mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey); 645 for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) { 646 if (i % 4 == 0) 647 printk("[%02x] ", i * 4); 648 printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i])); 649 if ((i + 1) % 4 == 0) 650 printk("\n"); 651 } 652 } 653 654 err = mthca_SW2HW_MPT(dev, mailbox, 655 key & (dev->limits.num_mpts - 1)); 656 if (err) { 657 mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err); 658 goto err_out_mailbox_free; 659 } 660 661 mthca_free_mailbox(dev, mailbox); 662 return 0; 663 664 err_out_mailbox_free: 665 mthca_free_mailbox(dev, mailbox); 666 667 err_out_free_mtt: 668 mthca_free_mtt(dev, mr->mtt); 669 670 err_out_table: 671 mthca_table_put(dev, dev->mr_table.mpt_table, key); 672 673 err_out_mpt_free: 674 mthca_free(&dev->mr_table.mpt_alloc, key); 675 return err; 676 } 677 678 int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr) 679 { 680 if (fmr->maps) 681 return -EBUSY; 682 683 mthca_free_region(dev, fmr->ibmr.lkey); 684 mthca_free_mtt(dev, fmr->mtt); 685 686 return 0; 687 } 688 689 static inline int mthca_check_fmr(struct mthca_fmr *fmr, u64 *page_list, 690 int list_len, u64 iova) 691 { 692 int i, page_mask; 693 694 if (list_len > fmr->attr.max_pages) 695 return -EINVAL; 696 697 page_mask = (1 << fmr->attr.page_shift) - 1; 698 699 /* We are getting page lists, so va must be page aligned. */ 700 if (iova & page_mask) 701 return -EINVAL; 702 703 /* Trust the user not to pass misaligned data in page_list */ 704 if (0) 705 for (i = 0; i < list_len; ++i) { 706 if (page_list[i] & ~page_mask) 707 return -EINVAL; 708 } 709 710 if (fmr->maps >= fmr->attr.max_maps) 711 return -EINVAL; 712 713 return 0; 714 } 715 716 int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, 717 int list_len, u64 iova) 718 { 719 struct mthca_fmr *fmr = to_mfmr(ibfmr); 720 struct mthca_dev *dev = to_mdev(ibfmr->device); 721 struct mthca_mpt_entry mpt_entry; 722 u32 key; 723 int i, err; 724 725 err = mthca_check_fmr(fmr, page_list, list_len, iova); 726 if (err) 727 return err; 728 729 ++fmr->maps; 730 731 key = tavor_key_to_hw_index(fmr->ibmr.lkey); 732 key += dev->limits.num_mpts; 733 fmr->ibmr.lkey = fmr->ibmr.rkey = tavor_hw_index_to_key(key); 734 735 writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt); 736 737 for (i = 0; i < list_len; ++i) { 738 __be64 mtt_entry = cpu_to_be64(page_list[i] | 739 MTHCA_MTT_FLAG_PRESENT); 740 mthca_write64_raw(mtt_entry, fmr->mem.tavor.mtts + i); 741 } 742 743 mpt_entry.lkey = cpu_to_be32(key); 744 mpt_entry.length = cpu_to_be64(list_len * (1ull << fmr->attr.page_shift)); 745 mpt_entry.start = cpu_to_be64(iova); 746 747 __raw_writel((__force u32) mpt_entry.lkey, &fmr->mem.tavor.mpt->key); 748 memcpy_toio(&fmr->mem.tavor.mpt->start, &mpt_entry.start, 749 offsetof(struct mthca_mpt_entry, window_count) - 750 offsetof(struct mthca_mpt_entry, start)); 751 752 writeb(MTHCA_MPT_STATUS_HW, fmr->mem.tavor.mpt); 753 754 return 0; 755 } 756 757 int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, 758 int list_len, u64 iova) 759 { 760 struct mthca_fmr *fmr = to_mfmr(ibfmr); 761 struct mthca_dev *dev = to_mdev(ibfmr->device); 762 u32 key; 763 int i, err; 764 765 err = mthca_check_fmr(fmr, page_list, list_len, iova); 766 if (err) 767 return err; 768 769 ++fmr->maps; 770 771 key = arbel_key_to_hw_index(fmr->ibmr.lkey); 772 if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT) 773 key += SINAI_FMR_KEY_INC; 774 else 775 key += dev->limits.num_mpts; 776 fmr->ibmr.lkey = fmr->ibmr.rkey = arbel_hw_index_to_key(key); 777 778 *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW; 779 780 wmb(); 781 782 dma_sync_single_for_cpu(&dev->pdev->dev, fmr->mem.arbel.dma_handle, 783 list_len * sizeof(u64), DMA_TO_DEVICE); 784 785 for (i = 0; i < list_len; ++i) 786 fmr->mem.arbel.mtts[i] = cpu_to_be64(page_list[i] | 787 MTHCA_MTT_FLAG_PRESENT); 788 789 dma_sync_single_for_device(&dev->pdev->dev, fmr->mem.arbel.dma_handle, 790 list_len * sizeof(u64), DMA_TO_DEVICE); 791 792 fmr->mem.arbel.mpt->key = cpu_to_be32(key); 793 fmr->mem.arbel.mpt->lkey = cpu_to_be32(key); 794 fmr->mem.arbel.mpt->length = cpu_to_be64(list_len * (1ull << fmr->attr.page_shift)); 795 fmr->mem.arbel.mpt->start = cpu_to_be64(iova); 796 797 wmb(); 798 799 *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_HW; 800 801 wmb(); 802 803 return 0; 804 } 805 806 void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr) 807 { 808 if (!fmr->maps) 809 return; 810 811 fmr->maps = 0; 812 813 writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt); 814 } 815 816 void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr) 817 { 818 if (!fmr->maps) 819 return; 820 821 fmr->maps = 0; 822 823 *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW; 824 } 825 826 int mthca_init_mr_table(struct mthca_dev *dev) 827 { 828 phys_addr_t addr; 829 int mpts, mtts, err, i; 830 831 err = mthca_alloc_init(&dev->mr_table.mpt_alloc, 832 dev->limits.num_mpts, 833 ~0, dev->limits.reserved_mrws); 834 if (err) 835 return err; 836 837 if (!mthca_is_memfree(dev) && 838 (dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) 839 dev->limits.fmr_reserved_mtts = 0; 840 else 841 dev->mthca_flags |= MTHCA_FLAG_FMR; 842 843 if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT) 844 mthca_dbg(dev, "Memory key throughput optimization activated.\n"); 845 846 err = mthca_buddy_init(&dev->mr_table.mtt_buddy, 847 fls(dev->limits.num_mtt_segs - 1)); 848 849 if (err) 850 goto err_mtt_buddy; 851 852 dev->mr_table.tavor_fmr.mpt_base = NULL; 853 dev->mr_table.tavor_fmr.mtt_base = NULL; 854 855 if (dev->limits.fmr_reserved_mtts) { 856 i = fls(dev->limits.fmr_reserved_mtts - 1); 857 858 if (i >= 31) { 859 mthca_warn(dev, "Unable to reserve 2^31 FMR MTTs.\n"); 860 err = -EINVAL; 861 goto err_fmr_mpt; 862 } 863 mpts = mtts = 1 << i; 864 } else { 865 mtts = dev->limits.num_mtt_segs; 866 mpts = dev->limits.num_mpts; 867 } 868 869 if (!mthca_is_memfree(dev) && 870 (dev->mthca_flags & MTHCA_FLAG_FMR)) { 871 addr = pci_resource_start(dev->pdev, 4) + 872 ((pci_resource_len(dev->pdev, 4) - 1) & 873 dev->mr_table.mpt_base); 874 875 dev->mr_table.tavor_fmr.mpt_base = 876 ioremap(addr, mpts * sizeof(struct mthca_mpt_entry)); 877 878 if (!dev->mr_table.tavor_fmr.mpt_base) { 879 mthca_warn(dev, "MPT ioremap for FMR failed.\n"); 880 err = -ENOMEM; 881 goto err_fmr_mpt; 882 } 883 884 addr = pci_resource_start(dev->pdev, 4) + 885 ((pci_resource_len(dev->pdev, 4) - 1) & 886 dev->mr_table.mtt_base); 887 888 dev->mr_table.tavor_fmr.mtt_base = 889 ioremap(addr, mtts * dev->limits.mtt_seg_size); 890 if (!dev->mr_table.tavor_fmr.mtt_base) { 891 mthca_warn(dev, "MTT ioremap for FMR failed.\n"); 892 err = -ENOMEM; 893 goto err_fmr_mtt; 894 } 895 } 896 897 if (dev->limits.fmr_reserved_mtts) { 898 err = mthca_buddy_init(&dev->mr_table.tavor_fmr.mtt_buddy, fls(mtts - 1)); 899 if (err) 900 goto err_fmr_mtt_buddy; 901 902 /* Prevent regular MRs from using FMR keys */ 903 err = mthca_buddy_alloc(&dev->mr_table.mtt_buddy, fls(mtts - 1)); 904 if (err) 905 goto err_reserve_fmr; 906 907 dev->mr_table.fmr_mtt_buddy = 908 &dev->mr_table.tavor_fmr.mtt_buddy; 909 } else 910 dev->mr_table.fmr_mtt_buddy = &dev->mr_table.mtt_buddy; 911 912 /* FMR table is always the first, take reserved MTTs out of there */ 913 if (dev->limits.reserved_mtts) { 914 i = fls(dev->limits.reserved_mtts - 1); 915 916 if (mthca_alloc_mtt_range(dev, i, 917 dev->mr_table.fmr_mtt_buddy) == -1) { 918 mthca_warn(dev, "MTT table of order %d is too small.\n", 919 dev->mr_table.fmr_mtt_buddy->max_order); 920 err = -ENOMEM; 921 goto err_reserve_mtts; 922 } 923 } 924 925 return 0; 926 927 err_reserve_mtts: 928 err_reserve_fmr: 929 if (dev->limits.fmr_reserved_mtts) 930 mthca_buddy_cleanup(&dev->mr_table.tavor_fmr.mtt_buddy); 931 932 err_fmr_mtt_buddy: 933 if (dev->mr_table.tavor_fmr.mtt_base) 934 iounmap(dev->mr_table.tavor_fmr.mtt_base); 935 936 err_fmr_mtt: 937 if (dev->mr_table.tavor_fmr.mpt_base) 938 iounmap(dev->mr_table.tavor_fmr.mpt_base); 939 940 err_fmr_mpt: 941 mthca_buddy_cleanup(&dev->mr_table.mtt_buddy); 942 943 err_mtt_buddy: 944 mthca_alloc_cleanup(&dev->mr_table.mpt_alloc); 945 946 return err; 947 } 948 949 void mthca_cleanup_mr_table(struct mthca_dev *dev) 950 { 951 /* XXX check if any MRs are still allocated? */ 952 if (dev->limits.fmr_reserved_mtts) 953 mthca_buddy_cleanup(&dev->mr_table.tavor_fmr.mtt_buddy); 954 955 mthca_buddy_cleanup(&dev->mr_table.mtt_buddy); 956 957 if (dev->mr_table.tavor_fmr.mtt_base) 958 iounmap(dev->mr_table.tavor_fmr.mtt_base); 959 if (dev->mr_table.tavor_fmr.mpt_base) 960 iounmap(dev->mr_table.tavor_fmr.mpt_base); 961 962 mthca_alloc_cleanup(&dev->mr_table.mpt_alloc); 963 } 964