1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/init.h> 36 #include <linux/errno.h> 37 #include <linux/export.h> 38 #include <linux/slab.h> 39 #include <linux/kernel.h> 40 41 #include <linux/mlx4/cmd.h> 42 43 #include "mlx4.h" 44 #include "icm.h" 45 46 #define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28) 47 #define MLX4_MPT_FLAG_FREE (0x3UL << 28) 48 #define MLX4_MPT_FLAG_MIO (1 << 17) 49 #define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15) 50 #define MLX4_MPT_FLAG_PHYSICAL (1 << 9) 51 #define MLX4_MPT_FLAG_REGION (1 << 8) 52 53 #define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27) 54 #define MLX4_MPT_PD_FLAG_RAE (1 << 28) 55 #define MLX4_MPT_PD_FLAG_EN_INV (3 << 24) 56 57 #define MLX4_MPT_STATUS_SW 0xF0 58 #define MLX4_MPT_STATUS_HW 0x00 59 60 static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order) 61 { 62 int o; 63 int m; 64 u32 seg; 65 66 spin_lock(&buddy->lock); 67 68 for (o = order; o <= buddy->max_order; ++o) 69 if (buddy->num_free[o]) { 70 m = 1 << (buddy->max_order - o); 71 seg = find_first_bit(buddy->bits[o], m); 72 if (seg < m) 73 goto found; 74 } 75 76 spin_unlock(&buddy->lock); 77 return -1; 78 79 found: 80 clear_bit(seg, buddy->bits[o]); 81 --buddy->num_free[o]; 82 83 while (o > order) { 84 --o; 85 seg <<= 1; 86 set_bit(seg ^ 1, buddy->bits[o]); 87 ++buddy->num_free[o]; 88 } 89 90 spin_unlock(&buddy->lock); 91 92 seg <<= order; 93 94 return seg; 95 } 96 97 static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order) 98 { 99 seg >>= order; 100 101 spin_lock(&buddy->lock); 102 103 while (test_bit(seg ^ 1, buddy->bits[order])) { 104 clear_bit(seg ^ 1, buddy->bits[order]); 105 --buddy->num_free[order]; 106 seg >>= 1; 107 ++order; 108 } 109 110 set_bit(seg, buddy->bits[order]); 111 ++buddy->num_free[order]; 112 113 spin_unlock(&buddy->lock); 114 } 115 116 static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) 117 { 118 int i, s; 119 120 buddy->max_order = max_order; 121 spin_lock_init(&buddy->lock); 122 123 buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *), 124 GFP_KERNEL); 125 buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free, 126 GFP_KERNEL); 127 if (!buddy->bits || !buddy->num_free) 128 goto err_out; 129 130 for (i = 0; i <= buddy->max_order; ++i) { 131 s = BITS_TO_LONGS(1 << (buddy->max_order - i)); 132 buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL); 133 if (!buddy->bits[i]) 134 goto err_out_free; 135 bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i)); 136 } 137 138 set_bit(0, buddy->bits[buddy->max_order]); 139 buddy->num_free[buddy->max_order] = 1; 140 141 return 0; 142 143 err_out_free: 144 for (i = 0; i <= buddy->max_order; ++i) 145 kfree(buddy->bits[i]); 146 147 err_out: 148 kfree(buddy->bits); 149 kfree(buddy->num_free); 150 151 return -ENOMEM; 152 } 153 154 static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy) 155 { 156 int i; 157 158 for (i = 0; i <= buddy->max_order; ++i) 159 kfree(buddy->bits[i]); 160 161 kfree(buddy->bits); 162 kfree(buddy->num_free); 163 } 164 165 u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) 166 { 167 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 168 u32 seg; 169 int seg_order; 170 u32 offset; 171 172 seg_order = max_t(int, order - log_mtts_per_seg, 0); 173 174 seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order); 175 if (seg == -1) 176 return -1; 177 178 offset = seg * (1 << log_mtts_per_seg); 179 180 if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset, 181 offset + (1 << order) - 1)) { 182 mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order); 183 return -1; 184 } 185 186 return offset; 187 } 188 189 static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) 190 { 191 u64 in_param; 192 u64 out_param; 193 int err; 194 195 if (mlx4_is_mfunc(dev)) { 196 set_param_l(&in_param, order); 197 err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT, 198 RES_OP_RESERVE_AND_MAP, 199 MLX4_CMD_ALLOC_RES, 200 MLX4_CMD_TIME_CLASS_A, 201 MLX4_CMD_WRAPPED); 202 if (err) 203 return -1; 204 return get_param_l(&out_param); 205 } 206 return __mlx4_alloc_mtt_range(dev, order); 207 } 208 209 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, 210 struct mlx4_mtt *mtt) 211 { 212 int i; 213 214 if (!npages) { 215 mtt->order = -1; 216 mtt->page_shift = MLX4_ICM_PAGE_SHIFT; 217 return 0; 218 } else 219 mtt->page_shift = page_shift; 220 221 for (mtt->order = 0, i = 1; i < npages; i <<= 1) 222 ++mtt->order; 223 224 mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order); 225 if (mtt->offset == -1) 226 return -ENOMEM; 227 228 return 0; 229 } 230 EXPORT_SYMBOL_GPL(mlx4_mtt_init); 231 232 void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) 233 { 234 u32 first_seg; 235 int seg_order; 236 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 237 238 seg_order = max_t(int, order - log_mtts_per_seg, 0); 239 first_seg = offset / (1 << log_mtts_per_seg); 240 241 mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order); 242 mlx4_table_put_range(dev, &mr_table->mtt_table, offset, 243 offset + (1 << order) - 1); 244 } 245 246 static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) 247 { 248 u64 in_param; 249 int err; 250 251 if (mlx4_is_mfunc(dev)) { 252 set_param_l(&in_param, offset); 253 set_param_h(&in_param, order); 254 err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP, 255 MLX4_CMD_FREE_RES, 256 MLX4_CMD_TIME_CLASS_A, 257 MLX4_CMD_WRAPPED); 258 if (err) 259 mlx4_warn(dev, "Failed to free mtt range at:" 260 "%d order:%d\n", offset, order); 261 return; 262 } 263 __mlx4_free_mtt_range(dev, offset, order); 264 } 265 266 void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt) 267 { 268 if (mtt->order < 0) 269 return; 270 271 mlx4_free_mtt_range(dev, mtt->offset, mtt->order); 272 } 273 EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup); 274 275 u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt) 276 { 277 return (u64) mtt->offset * dev->caps.mtt_entry_sz; 278 } 279 EXPORT_SYMBOL_GPL(mlx4_mtt_addr); 280 281 static u32 hw_index_to_key(u32 ind) 282 { 283 return (ind >> 24) | (ind << 8); 284 } 285 286 static u32 key_to_hw_index(u32 key) 287 { 288 return (key << 24) | (key >> 8); 289 } 290 291 static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 292 int mpt_index) 293 { 294 return mlx4_cmd(dev, mailbox->dma | dev->caps.function , mpt_index, 295 0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B, 296 MLX4_CMD_WRAPPED); 297 } 298 299 static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 300 int mpt_index) 301 { 302 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index, 303 !mailbox, MLX4_CMD_HW2SW_MPT, 304 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); 305 } 306 307 static int mlx4_mr_reserve_range(struct mlx4_dev *dev, int cnt, int align, 308 u32 *base_mridx) 309 { 310 struct mlx4_priv *priv = mlx4_priv(dev); 311 u32 mridx; 312 313 mridx = mlx4_bitmap_alloc_range(&priv->mr_table.mpt_bitmap, cnt, align); 314 if (mridx == -1) 315 return -ENOMEM; 316 317 *base_mridx = mridx; 318 return 0; 319 320 } 321 EXPORT_SYMBOL_GPL(mlx4_mr_reserve_range); 322 323 static void mlx4_mr_release_range(struct mlx4_dev *dev, u32 base_mridx, int cnt) 324 { 325 struct mlx4_priv *priv = mlx4_priv(dev); 326 mlx4_bitmap_free_range(&priv->mr_table.mpt_bitmap, base_mridx, cnt); 327 } 328 EXPORT_SYMBOL_GPL(mlx4_mr_release_range); 329 330 static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd, 331 u64 iova, u64 size, u32 access, int npages, 332 int page_shift, struct mlx4_mr *mr) 333 { 334 mr->iova = iova; 335 mr->size = size; 336 mr->pd = pd; 337 mr->access = access; 338 mr->enabled = MLX4_MR_DISABLED; 339 mr->key = hw_index_to_key(mridx); 340 341 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); 342 } 343 EXPORT_SYMBOL_GPL(mlx4_mr_alloc_reserved); 344 345 static int mlx4_WRITE_MTT(struct mlx4_dev *dev, 346 struct mlx4_cmd_mailbox *mailbox, 347 int num_entries) 348 { 349 return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT, 350 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 351 } 352 353 int __mlx4_mr_reserve(struct mlx4_dev *dev) 354 { 355 struct mlx4_priv *priv = mlx4_priv(dev); 356 357 return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap); 358 } 359 360 static int mlx4_mr_reserve(struct mlx4_dev *dev) 361 { 362 u64 out_param; 363 364 if (mlx4_is_mfunc(dev)) { 365 if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE, 366 MLX4_CMD_ALLOC_RES, 367 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED)) 368 return -1; 369 return get_param_l(&out_param); 370 } 371 return __mlx4_mr_reserve(dev); 372 } 373 374 void __mlx4_mr_release(struct mlx4_dev *dev, u32 index) 375 { 376 struct mlx4_priv *priv = mlx4_priv(dev); 377 378 mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index); 379 } 380 381 static void mlx4_mr_release(struct mlx4_dev *dev, u32 index) 382 { 383 u64 in_param; 384 385 if (mlx4_is_mfunc(dev)) { 386 set_param_l(&in_param, index); 387 if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE, 388 MLX4_CMD_FREE_RES, 389 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED)) 390 mlx4_warn(dev, "Failed to release mr index:%d\n", 391 index); 392 return; 393 } 394 __mlx4_mr_release(dev, index); 395 } 396 397 int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index) 398 { 399 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 400 401 return mlx4_table_get(dev, &mr_table->dmpt_table, index); 402 } 403 404 static int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index) 405 { 406 u64 param; 407 408 if (mlx4_is_mfunc(dev)) { 409 set_param_l(¶m, index); 410 return mlx4_cmd_imm(dev, param, ¶m, RES_MPT, RES_OP_MAP_ICM, 411 MLX4_CMD_ALLOC_RES, 412 MLX4_CMD_TIME_CLASS_A, 413 MLX4_CMD_WRAPPED); 414 } 415 return __mlx4_mr_alloc_icm(dev, index); 416 } 417 418 void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index) 419 { 420 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 421 422 mlx4_table_put(dev, &mr_table->dmpt_table, index); 423 } 424 425 static void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index) 426 { 427 u64 in_param; 428 429 if (mlx4_is_mfunc(dev)) { 430 set_param_l(&in_param, index); 431 if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM, 432 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, 433 MLX4_CMD_WRAPPED)) 434 mlx4_warn(dev, "Failed to free icm of mr index:%d\n", 435 index); 436 return; 437 } 438 return __mlx4_mr_free_icm(dev, index); 439 } 440 441 int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, 442 int npages, int page_shift, struct mlx4_mr *mr) 443 { 444 u32 index; 445 int err; 446 447 index = mlx4_mr_reserve(dev); 448 if (index == -1) 449 return -ENOMEM; 450 451 err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size, 452 access, npages, page_shift, mr); 453 if (err) 454 mlx4_mr_release(dev, index); 455 456 return err; 457 } 458 EXPORT_SYMBOL_GPL(mlx4_mr_alloc); 459 460 static void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr) 461 { 462 int err; 463 464 if (mr->enabled == MLX4_MR_EN_HW) { 465 err = mlx4_HW2SW_MPT(dev, NULL, 466 key_to_hw_index(mr->key) & 467 (dev->caps.num_mpts - 1)); 468 if (err) 469 mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err); 470 471 mr->enabled = MLX4_MR_EN_SW; 472 } 473 mlx4_mtt_cleanup(dev, &mr->mtt); 474 } 475 EXPORT_SYMBOL_GPL(mlx4_mr_free_reserved); 476 477 void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr) 478 { 479 mlx4_mr_free_reserved(dev, mr); 480 if (mr->enabled) 481 mlx4_mr_free_icm(dev, key_to_hw_index(mr->key)); 482 mlx4_mr_release(dev, key_to_hw_index(mr->key)); 483 } 484 EXPORT_SYMBOL_GPL(mlx4_mr_free); 485 486 int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) 487 { 488 struct mlx4_cmd_mailbox *mailbox; 489 struct mlx4_mpt_entry *mpt_entry; 490 int err; 491 492 err = mlx4_mr_alloc_icm(dev, key_to_hw_index(mr->key)); 493 if (err) 494 return err; 495 496 mailbox = mlx4_alloc_cmd_mailbox(dev); 497 if (IS_ERR(mailbox)) { 498 err = PTR_ERR(mailbox); 499 goto err_table; 500 } 501 mpt_entry = mailbox->buf; 502 503 memset(mpt_entry, 0, sizeof *mpt_entry); 504 505 mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO | 506 MLX4_MPT_FLAG_REGION | 507 mr->access); 508 509 mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key)); 510 mpt_entry->pd_flags = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV); 511 mpt_entry->start = cpu_to_be64(mr->iova); 512 mpt_entry->length = cpu_to_be64(mr->size); 513 mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); 514 515 if (mr->mtt.order < 0) { 516 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); 517 mpt_entry->mtt_addr = 0; 518 } else { 519 mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev, 520 &mr->mtt)); 521 } 522 523 if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) { 524 /* fast register MR in free state */ 525 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); 526 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | 527 MLX4_MPT_PD_FLAG_RAE); 528 mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order); 529 } else { 530 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); 531 } 532 533 err = mlx4_SW2HW_MPT(dev, mailbox, 534 key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1)); 535 if (err) { 536 mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err); 537 goto err_cmd; 538 } 539 mr->enabled = MLX4_MR_EN_HW; 540 541 mlx4_free_cmd_mailbox(dev, mailbox); 542 543 return 0; 544 545 err_cmd: 546 mlx4_free_cmd_mailbox(dev, mailbox); 547 548 err_table: 549 mlx4_mr_free_icm(dev, key_to_hw_index(mr->key)); 550 return err; 551 } 552 EXPORT_SYMBOL_GPL(mlx4_mr_enable); 553 554 static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 555 int start_index, int npages, u64 *page_list) 556 { 557 struct mlx4_priv *priv = mlx4_priv(dev); 558 __be64 *mtts; 559 dma_addr_t dma_handle; 560 int i; 561 562 mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset + 563 start_index, &dma_handle); 564 565 if (!mtts) 566 return -ENOMEM; 567 568 dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle, 569 npages * sizeof (u64), DMA_TO_DEVICE); 570 571 for (i = 0; i < npages; ++i) 572 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); 573 574 dma_sync_single_for_device(&dev->pdev->dev, dma_handle, 575 npages * sizeof (u64), DMA_TO_DEVICE); 576 577 return 0; 578 } 579 580 int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 581 int start_index, int npages, u64 *page_list) 582 { 583 int err = 0; 584 int chunk; 585 int mtts_per_page; 586 int max_mtts_first_page; 587 588 /* compute how may mtts fit in the first page */ 589 mtts_per_page = PAGE_SIZE / sizeof(u64); 590 max_mtts_first_page = mtts_per_page - (mtt->offset + start_index) 591 % mtts_per_page; 592 593 chunk = min_t(int, max_mtts_first_page, npages); 594 595 while (npages > 0) { 596 err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list); 597 if (err) 598 return err; 599 npages -= chunk; 600 start_index += chunk; 601 page_list += chunk; 602 603 chunk = min_t(int, mtts_per_page, npages); 604 } 605 return err; 606 } 607 608 int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 609 int start_index, int npages, u64 *page_list) 610 { 611 struct mlx4_cmd_mailbox *mailbox = NULL; 612 __be64 *inbox = NULL; 613 int chunk; 614 int err = 0; 615 int i; 616 617 if (mtt->order < 0) 618 return -EINVAL; 619 620 if (mlx4_is_mfunc(dev)) { 621 mailbox = mlx4_alloc_cmd_mailbox(dev); 622 if (IS_ERR(mailbox)) 623 return PTR_ERR(mailbox); 624 inbox = mailbox->buf; 625 626 while (npages > 0) { 627 chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2, 628 npages); 629 inbox[0] = cpu_to_be64(mtt->offset + start_index); 630 inbox[1] = 0; 631 for (i = 0; i < chunk; ++i) 632 inbox[i + 2] = cpu_to_be64(page_list[i] | 633 MLX4_MTT_FLAG_PRESENT); 634 err = mlx4_WRITE_MTT(dev, mailbox, chunk); 635 if (err) { 636 mlx4_free_cmd_mailbox(dev, mailbox); 637 return err; 638 } 639 640 npages -= chunk; 641 start_index += chunk; 642 page_list += chunk; 643 } 644 mlx4_free_cmd_mailbox(dev, mailbox); 645 return err; 646 } 647 648 return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list); 649 } 650 EXPORT_SYMBOL_GPL(mlx4_write_mtt); 651 652 int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 653 struct mlx4_buf *buf) 654 { 655 u64 *page_list; 656 int err; 657 int i; 658 659 page_list = kmalloc(buf->npages * sizeof *page_list, GFP_KERNEL); 660 if (!page_list) 661 return -ENOMEM; 662 663 for (i = 0; i < buf->npages; ++i) 664 if (buf->nbufs == 1) 665 page_list[i] = buf->direct.map + (i << buf->page_shift); 666 else 667 page_list[i] = buf->page_list[i].map; 668 669 err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list); 670 671 kfree(page_list); 672 return err; 673 } 674 EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt); 675 676 int mlx4_init_mr_table(struct mlx4_dev *dev) 677 { 678 struct mlx4_priv *priv = mlx4_priv(dev); 679 struct mlx4_mr_table *mr_table = &priv->mr_table; 680 int err; 681 682 if (!is_power_of_2(dev->caps.num_mpts)) 683 return -EINVAL; 684 685 /* Nothing to do for slaves - all MR handling is forwarded 686 * to the master */ 687 if (mlx4_is_slave(dev)) 688 return 0; 689 690 err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts, 691 ~0, dev->caps.reserved_mrws, 0); 692 if (err) 693 return err; 694 695 err = mlx4_buddy_init(&mr_table->mtt_buddy, 696 ilog2(dev->caps.num_mtts / 697 (1 << log_mtts_per_seg))); 698 if (err) 699 goto err_buddy; 700 701 if (dev->caps.reserved_mtts) { 702 priv->reserved_mtts = 703 mlx4_alloc_mtt_range(dev, 704 fls(dev->caps.reserved_mtts - 1)); 705 if (priv->reserved_mtts < 0) { 706 mlx4_warn(dev, "MTT table of order %d is too small.\n", 707 mr_table->mtt_buddy.max_order); 708 err = -ENOMEM; 709 goto err_reserve_mtts; 710 } 711 } 712 713 return 0; 714 715 err_reserve_mtts: 716 mlx4_buddy_cleanup(&mr_table->mtt_buddy); 717 718 err_buddy: 719 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); 720 721 return err; 722 } 723 724 void mlx4_cleanup_mr_table(struct mlx4_dev *dev) 725 { 726 struct mlx4_priv *priv = mlx4_priv(dev); 727 struct mlx4_mr_table *mr_table = &priv->mr_table; 728 729 if (mlx4_is_slave(dev)) 730 return; 731 if (priv->reserved_mtts >= 0) 732 mlx4_free_mtt_range(dev, priv->reserved_mtts, 733 fls(dev->caps.reserved_mtts - 1)); 734 mlx4_buddy_cleanup(&mr_table->mtt_buddy); 735 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); 736 } 737 738 static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list, 739 int npages, u64 iova) 740 { 741 int i, page_mask; 742 743 if (npages > fmr->max_pages) 744 return -EINVAL; 745 746 page_mask = (1 << fmr->page_shift) - 1; 747 748 /* We are getting page lists, so va must be page aligned. */ 749 if (iova & page_mask) 750 return -EINVAL; 751 752 /* Trust the user not to pass misaligned data in page_list */ 753 if (0) 754 for (i = 0; i < npages; ++i) { 755 if (page_list[i] & ~page_mask) 756 return -EINVAL; 757 } 758 759 if (fmr->maps >= fmr->max_maps) 760 return -EINVAL; 761 762 return 0; 763 } 764 765 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, 766 int npages, u64 iova, u32 *lkey, u32 *rkey) 767 { 768 u32 key; 769 int i, err; 770 771 err = mlx4_check_fmr(fmr, page_list, npages, iova); 772 if (err) 773 return err; 774 775 ++fmr->maps; 776 777 key = key_to_hw_index(fmr->mr.key); 778 key += dev->caps.num_mpts; 779 *lkey = *rkey = fmr->mr.key = hw_index_to_key(key); 780 781 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW; 782 783 /* Make sure MPT status is visible before writing MTT entries */ 784 wmb(); 785 786 dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle, 787 npages * sizeof(u64), DMA_TO_DEVICE); 788 789 for (i = 0; i < npages; ++i) 790 fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); 791 792 dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle, 793 npages * sizeof(u64), DMA_TO_DEVICE); 794 795 fmr->mpt->key = cpu_to_be32(key); 796 fmr->mpt->lkey = cpu_to_be32(key); 797 fmr->mpt->length = cpu_to_be64(npages * (1ull << fmr->page_shift)); 798 fmr->mpt->start = cpu_to_be64(iova); 799 800 /* Make MTT entries are visible before setting MPT status */ 801 wmb(); 802 803 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_HW; 804 805 /* Make sure MPT status is visible before consumer can use FMR */ 806 wmb(); 807 808 return 0; 809 } 810 EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr); 811 812 int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, 813 int max_maps, u8 page_shift, struct mlx4_fmr *fmr) 814 { 815 struct mlx4_priv *priv = mlx4_priv(dev); 816 u64 mtt_offset; 817 int err = -ENOMEM; 818 819 if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32) 820 return -EINVAL; 821 822 /* All MTTs must fit in the same page */ 823 if (max_pages * sizeof *fmr->mtts > PAGE_SIZE) 824 return -EINVAL; 825 826 fmr->page_shift = page_shift; 827 fmr->max_pages = max_pages; 828 fmr->max_maps = max_maps; 829 fmr->maps = 0; 830 831 err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages, 832 page_shift, &fmr->mr); 833 if (err) 834 return err; 835 836 mtt_offset = fmr->mr.mtt.offset * dev->caps.mtt_entry_sz; 837 838 fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table, 839 fmr->mr.mtt.offset, 840 &fmr->dma_handle); 841 842 if (!fmr->mtts) { 843 err = -ENOMEM; 844 goto err_free; 845 } 846 847 return 0; 848 849 err_free: 850 mlx4_mr_free(dev, &fmr->mr); 851 return err; 852 } 853 EXPORT_SYMBOL_GPL(mlx4_fmr_alloc); 854 855 static int mlx4_fmr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, 856 u32 pd, u32 access, int max_pages, 857 int max_maps, u8 page_shift, struct mlx4_fmr *fmr) 858 { 859 struct mlx4_priv *priv = mlx4_priv(dev); 860 int err = -ENOMEM; 861 862 if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32) 863 return -EINVAL; 864 865 /* All MTTs must fit in the same page */ 866 if (max_pages * sizeof *fmr->mtts > PAGE_SIZE) 867 return -EINVAL; 868 869 fmr->page_shift = page_shift; 870 fmr->max_pages = max_pages; 871 fmr->max_maps = max_maps; 872 fmr->maps = 0; 873 874 err = mlx4_mr_alloc_reserved(dev, mridx, pd, 0, 0, access, max_pages, 875 page_shift, &fmr->mr); 876 if (err) 877 return err; 878 879 fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table, 880 fmr->mr.mtt.offset, 881 &fmr->dma_handle); 882 if (!fmr->mtts) { 883 err = -ENOMEM; 884 goto err_free; 885 } 886 887 return 0; 888 889 err_free: 890 mlx4_mr_free_reserved(dev, &fmr->mr); 891 return err; 892 } 893 EXPORT_SYMBOL_GPL(mlx4_fmr_alloc_reserved); 894 895 int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr) 896 { 897 struct mlx4_priv *priv = mlx4_priv(dev); 898 int err; 899 900 err = mlx4_mr_enable(dev, &fmr->mr); 901 if (err) 902 return err; 903 904 fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table, 905 key_to_hw_index(fmr->mr.key), NULL); 906 if (!fmr->mpt) 907 return -ENOMEM; 908 909 return 0; 910 } 911 EXPORT_SYMBOL_GPL(mlx4_fmr_enable); 912 913 void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, 914 u32 *lkey, u32 *rkey) 915 { 916 struct mlx4_cmd_mailbox *mailbox; 917 int err; 918 919 if (!fmr->maps) 920 return; 921 922 fmr->maps = 0; 923 924 mailbox = mlx4_alloc_cmd_mailbox(dev); 925 if (IS_ERR(mailbox)) { 926 err = PTR_ERR(mailbox); 927 printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox" 928 " failed (%d)\n", err); 929 return; 930 } 931 932 err = mlx4_HW2SW_MPT(dev, NULL, 933 key_to_hw_index(fmr->mr.key) & 934 (dev->caps.num_mpts - 1)); 935 mlx4_free_cmd_mailbox(dev, mailbox); 936 if (err) { 937 printk(KERN_WARNING "mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", 938 err); 939 return; 940 } 941 fmr->mr.enabled = MLX4_MR_EN_SW; 942 } 943 EXPORT_SYMBOL_GPL(mlx4_fmr_unmap); 944 945 int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr) 946 { 947 if (fmr->maps) 948 return -EBUSY; 949 950 mlx4_mr_free(dev, &fmr->mr); 951 fmr->mr.enabled = MLX4_MR_DISABLED; 952 953 return 0; 954 } 955 EXPORT_SYMBOL_GPL(mlx4_fmr_free); 956 957 static int mlx4_fmr_free_reserved(struct mlx4_dev *dev, struct mlx4_fmr *fmr) 958 { 959 if (fmr->maps) 960 return -EBUSY; 961 962 mlx4_mr_free_reserved(dev, &fmr->mr); 963 fmr->mr.enabled = MLX4_MR_DISABLED; 964 965 return 0; 966 } 967 EXPORT_SYMBOL_GPL(mlx4_fmr_free_reserved); 968 969 int mlx4_SYNC_TPT(struct mlx4_dev *dev) 970 { 971 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000, 972 MLX4_CMD_WRAPPED); 973 } 974 EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT); 975