1 /* 2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/errno.h> 35 #include <linux/slab.h> 36 #include <linux/mm.h> 37 #include <linux/export.h> 38 #include <linux/bitmap.h> 39 #include <linux/dma-mapping.h> 40 #include <linux/vmalloc.h> 41 42 #include "mlx4.h" 43 44 u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap) 45 { 46 u32 obj; 47 48 spin_lock(&bitmap->lock); 49 50 obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last); 51 if (obj >= bitmap->max) { 52 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) 53 & bitmap->mask; 54 obj = find_first_zero_bit(bitmap->table, bitmap->max); 55 } 56 57 if (obj < bitmap->max) { 58 set_bit(obj, bitmap->table); 59 bitmap->last = (obj + 1); 60 if (bitmap->last == bitmap->max) 61 bitmap->last = 0; 62 obj |= bitmap->top; 63 } else 64 obj = -1; 65 66 if (obj != -1) 67 --bitmap->avail; 68 69 spin_unlock(&bitmap->lock); 70 71 return obj; 72 } 73 74 void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr) 75 { 76 mlx4_bitmap_free_range(bitmap, obj, 1, use_rr); 77 } 78 79 static unsigned long find_aligned_range(unsigned long *bitmap, 80 u32 start, u32 nbits, 81 int len, int align, u32 skip_mask) 82 { 83 unsigned long end, i; 84 85 again: 86 start = ALIGN(start, align); 87 88 while ((start < nbits) && (test_bit(start, bitmap) || 89 (start & skip_mask))) 90 start += align; 91 92 if (start >= nbits) 93 return -1; 94 95 end = start+len; 96 if (end > nbits) 97 return -1; 98 99 for (i = start + 1; i < end; i++) { 100 if (test_bit(i, bitmap) || ((u32)i & skip_mask)) { 101 start = i + 1; 102 goto again; 103 } 104 } 105 106 return start; 107 } 108 109 u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, 110 int align, u32 skip_mask) 111 { 112 u32 obj; 113 114 if (likely(cnt == 1 && align == 1 && !skip_mask)) 115 return mlx4_bitmap_alloc(bitmap); 116 117 spin_lock(&bitmap->lock); 118 119 obj = find_aligned_range(bitmap->table, bitmap->last, 120 bitmap->max, cnt, align, skip_mask); 121 if (obj >= bitmap->max) { 122 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) 123 & bitmap->mask; 124 obj = find_aligned_range(bitmap->table, 0, bitmap->max, 125 cnt, align, skip_mask); 126 } 127 128 if (obj < bitmap->max) { 129 bitmap_set(bitmap->table, obj, cnt); 130 if (obj == bitmap->last) { 131 bitmap->last = (obj + cnt); 132 if (bitmap->last >= bitmap->max) 133 bitmap->last = 0; 134 } 135 obj |= bitmap->top; 136 } else 137 obj = -1; 138 139 if (obj != -1) 140 bitmap->avail -= cnt; 141 142 spin_unlock(&bitmap->lock); 143 144 return obj; 145 } 146 147 u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap) 148 { 149 return bitmap->avail; 150 } 151 152 static u32 mlx4_bitmap_masked_value(struct mlx4_bitmap *bitmap, u32 obj) 153 { 154 return obj & (bitmap->max + bitmap->reserved_top - 1); 155 } 156 157 void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt, 158 int use_rr) 159 { 160 obj &= bitmap->max + bitmap->reserved_top - 1; 161 162 spin_lock(&bitmap->lock); 163 if (!use_rr) { 164 bitmap->last = min(bitmap->last, obj); 165 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) 166 & bitmap->mask; 167 } 168 bitmap_clear(bitmap->table, obj, cnt); 169 bitmap->avail += cnt; 170 spin_unlock(&bitmap->lock); 171 } 172 173 int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, 174 u32 reserved_bot, u32 reserved_top) 175 { 176 /* num must be a power of 2 */ 177 if (num != roundup_pow_of_two(num)) 178 return -EINVAL; 179 180 bitmap->last = 0; 181 bitmap->top = 0; 182 bitmap->max = num - reserved_top; 183 bitmap->mask = mask; 184 bitmap->reserved_top = reserved_top; 185 bitmap->avail = num - reserved_top - reserved_bot; 186 bitmap->effective_len = bitmap->avail; 187 spin_lock_init(&bitmap->lock); 188 bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) * 189 sizeof (long), GFP_KERNEL); 190 if (!bitmap->table) 191 return -ENOMEM; 192 193 bitmap_set(bitmap->table, 0, reserved_bot); 194 195 return 0; 196 } 197 198 void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap) 199 { 200 kfree(bitmap->table); 201 } 202 203 struct mlx4_zone_allocator { 204 struct list_head entries; 205 struct list_head prios; 206 u32 last_uid; 207 u32 mask; 208 /* protect the zone_allocator from concurrent accesses */ 209 spinlock_t lock; 210 enum mlx4_zone_alloc_flags flags; 211 }; 212 213 struct mlx4_zone_entry { 214 struct list_head list; 215 struct list_head prio_list; 216 u32 uid; 217 struct mlx4_zone_allocator *allocator; 218 struct mlx4_bitmap *bitmap; 219 int use_rr; 220 int priority; 221 int offset; 222 enum mlx4_zone_flags flags; 223 }; 224 225 struct mlx4_zone_allocator *mlx4_zone_allocator_create(enum mlx4_zone_alloc_flags flags) 226 { 227 struct mlx4_zone_allocator *zones = kmalloc(sizeof(*zones), GFP_KERNEL); 228 229 if (NULL == zones) 230 return NULL; 231 232 INIT_LIST_HEAD(&zones->entries); 233 INIT_LIST_HEAD(&zones->prios); 234 spin_lock_init(&zones->lock); 235 zones->last_uid = 0; 236 zones->mask = 0; 237 zones->flags = flags; 238 239 return zones; 240 } 241 242 int mlx4_zone_add_one(struct mlx4_zone_allocator *zone_alloc, 243 struct mlx4_bitmap *bitmap, 244 u32 flags, 245 int priority, 246 int offset, 247 u32 *puid) 248 { 249 u32 mask = mlx4_bitmap_masked_value(bitmap, (u32)-1); 250 struct mlx4_zone_entry *it; 251 struct mlx4_zone_entry *zone = kmalloc(sizeof(*zone), GFP_KERNEL); 252 253 if (NULL == zone) 254 return -ENOMEM; 255 256 zone->flags = flags; 257 zone->bitmap = bitmap; 258 zone->use_rr = (flags & MLX4_ZONE_USE_RR) ? MLX4_USE_RR : 0; 259 zone->priority = priority; 260 zone->offset = offset; 261 262 spin_lock(&zone_alloc->lock); 263 264 zone->uid = zone_alloc->last_uid++; 265 zone->allocator = zone_alloc; 266 267 if (zone_alloc->mask < mask) 268 zone_alloc->mask = mask; 269 270 list_for_each_entry(it, &zone_alloc->prios, prio_list) 271 if (it->priority >= priority) 272 break; 273 274 if (&it->prio_list == &zone_alloc->prios || it->priority > priority) 275 list_add_tail(&zone->prio_list, &it->prio_list); 276 list_add_tail(&zone->list, &it->list); 277 278 spin_unlock(&zone_alloc->lock); 279 280 *puid = zone->uid; 281 282 return 0; 283 } 284 285 /* Should be called under a lock */ 286 static int __mlx4_zone_remove_one_entry(struct mlx4_zone_entry *entry) 287 { 288 struct mlx4_zone_allocator *zone_alloc = entry->allocator; 289 290 if (!list_empty(&entry->prio_list)) { 291 /* Check if we need to add an alternative node to the prio list */ 292 if (!list_is_last(&entry->list, &zone_alloc->entries)) { 293 struct mlx4_zone_entry *next = list_first_entry(&entry->list, 294 typeof(*next), 295 list); 296 297 if (next->priority == entry->priority) 298 list_add_tail(&next->prio_list, &entry->prio_list); 299 } 300 301 list_del(&entry->prio_list); 302 } 303 304 list_del(&entry->list); 305 306 if (zone_alloc->flags & MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP) { 307 u32 mask = 0; 308 struct mlx4_zone_entry *it; 309 310 list_for_each_entry(it, &zone_alloc->prios, prio_list) { 311 u32 cur_mask = mlx4_bitmap_masked_value(it->bitmap, (u32)-1); 312 313 if (mask < cur_mask) 314 mask = cur_mask; 315 } 316 zone_alloc->mask = mask; 317 } 318 319 return 0; 320 } 321 322 void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc) 323 { 324 struct mlx4_zone_entry *zone, *tmp; 325 326 spin_lock(&zone_alloc->lock); 327 328 list_for_each_entry_safe(zone, tmp, &zone_alloc->entries, list) { 329 list_del(&zone->list); 330 list_del(&zone->prio_list); 331 kfree(zone); 332 } 333 334 spin_unlock(&zone_alloc->lock); 335 kfree(zone_alloc); 336 } 337 338 /* Should be called under a lock */ 339 static u32 __mlx4_alloc_from_zone(struct mlx4_zone_entry *zone, int count, 340 int align, u32 skip_mask, u32 *puid) 341 { 342 u32 uid; 343 u32 res; 344 struct mlx4_zone_allocator *zone_alloc = zone->allocator; 345 struct mlx4_zone_entry *curr_node; 346 347 res = mlx4_bitmap_alloc_range(zone->bitmap, count, 348 align, skip_mask); 349 350 if (res != (u32)-1) { 351 res += zone->offset; 352 uid = zone->uid; 353 goto out; 354 } 355 356 list_for_each_entry(curr_node, &zone_alloc->prios, prio_list) { 357 if (unlikely(curr_node->priority == zone->priority)) 358 break; 359 } 360 361 if (zone->flags & MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO) { 362 struct mlx4_zone_entry *it = curr_node; 363 364 list_for_each_entry_continue_reverse(it, &zone_alloc->entries, list) { 365 res = mlx4_bitmap_alloc_range(it->bitmap, count, 366 align, skip_mask); 367 if (res != (u32)-1) { 368 res += it->offset; 369 uid = it->uid; 370 goto out; 371 } 372 } 373 } 374 375 if (zone->flags & MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO) { 376 struct mlx4_zone_entry *it = curr_node; 377 378 list_for_each_entry_from(it, &zone_alloc->entries, list) { 379 if (unlikely(it == zone)) 380 continue; 381 382 if (unlikely(it->priority != curr_node->priority)) 383 break; 384 385 res = mlx4_bitmap_alloc_range(it->bitmap, count, 386 align, skip_mask); 387 if (res != (u32)-1) { 388 res += it->offset; 389 uid = it->uid; 390 goto out; 391 } 392 } 393 } 394 395 if (zone->flags & MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO) { 396 if (list_is_last(&curr_node->prio_list, &zone_alloc->prios)) 397 goto out; 398 399 curr_node = list_first_entry(&curr_node->prio_list, 400 typeof(*curr_node), 401 prio_list); 402 403 list_for_each_entry_from(curr_node, &zone_alloc->entries, list) { 404 res = mlx4_bitmap_alloc_range(curr_node->bitmap, count, 405 align, skip_mask); 406 if (res != (u32)-1) { 407 res += curr_node->offset; 408 uid = curr_node->uid; 409 goto out; 410 } 411 } 412 } 413 414 out: 415 if (NULL != puid && res != (u32)-1) 416 *puid = uid; 417 return res; 418 } 419 420 /* Should be called under a lock */ 421 static void __mlx4_free_from_zone(struct mlx4_zone_entry *zone, u32 obj, 422 u32 count) 423 { 424 mlx4_bitmap_free_range(zone->bitmap, obj - zone->offset, count, zone->use_rr); 425 } 426 427 /* Should be called under a lock */ 428 static struct mlx4_zone_entry *__mlx4_find_zone_by_uid( 429 struct mlx4_zone_allocator *zones, u32 uid) 430 { 431 struct mlx4_zone_entry *zone; 432 433 list_for_each_entry(zone, &zones->entries, list) { 434 if (zone->uid == uid) 435 return zone; 436 } 437 438 return NULL; 439 } 440 441 struct mlx4_bitmap *mlx4_zone_get_bitmap(struct mlx4_zone_allocator *zones, u32 uid) 442 { 443 struct mlx4_zone_entry *zone; 444 struct mlx4_bitmap *bitmap; 445 446 spin_lock(&zones->lock); 447 448 zone = __mlx4_find_zone_by_uid(zones, uid); 449 450 bitmap = zone == NULL ? NULL : zone->bitmap; 451 452 spin_unlock(&zones->lock); 453 454 return bitmap; 455 } 456 457 int mlx4_zone_remove_one(struct mlx4_zone_allocator *zones, u32 uid) 458 { 459 struct mlx4_zone_entry *zone; 460 int res; 461 462 spin_lock(&zones->lock); 463 464 zone = __mlx4_find_zone_by_uid(zones, uid); 465 466 if (NULL == zone) { 467 res = -1; 468 goto out; 469 } 470 471 res = __mlx4_zone_remove_one_entry(zone); 472 473 out: 474 spin_unlock(&zones->lock); 475 kfree(zone); 476 477 return res; 478 } 479 480 /* Should be called under a lock */ 481 static struct mlx4_zone_entry *__mlx4_find_zone_by_uid_unique( 482 struct mlx4_zone_allocator *zones, u32 obj) 483 { 484 struct mlx4_zone_entry *zone, *zone_candidate = NULL; 485 u32 dist = (u32)-1; 486 487 /* Search for the smallest zone that this obj could be 488 * allocated from. This is done in order to handle 489 * situations when small bitmaps are allocated from bigger 490 * bitmaps (and the allocated space is marked as reserved in 491 * the bigger bitmap. 492 */ 493 list_for_each_entry(zone, &zones->entries, list) { 494 if (obj >= zone->offset) { 495 u32 mobj = (obj - zone->offset) & zones->mask; 496 497 if (mobj < zone->bitmap->max) { 498 u32 curr_dist = zone->bitmap->effective_len; 499 500 if (curr_dist < dist) { 501 dist = curr_dist; 502 zone_candidate = zone; 503 } 504 } 505 } 506 } 507 508 return zone_candidate; 509 } 510 511 u32 mlx4_zone_alloc_entries(struct mlx4_zone_allocator *zones, u32 uid, int count, 512 int align, u32 skip_mask, u32 *puid) 513 { 514 struct mlx4_zone_entry *zone; 515 int res = -1; 516 517 spin_lock(&zones->lock); 518 519 zone = __mlx4_find_zone_by_uid(zones, uid); 520 521 if (NULL == zone) 522 goto out; 523 524 res = __mlx4_alloc_from_zone(zone, count, align, skip_mask, puid); 525 526 out: 527 spin_unlock(&zones->lock); 528 529 return res; 530 } 531 532 u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones, u32 uid, u32 obj, u32 count) 533 { 534 struct mlx4_zone_entry *zone; 535 int res = 0; 536 537 spin_lock(&zones->lock); 538 539 zone = __mlx4_find_zone_by_uid(zones, uid); 540 541 if (NULL == zone) { 542 res = -1; 543 goto out; 544 } 545 546 __mlx4_free_from_zone(zone, obj, count); 547 548 out: 549 spin_unlock(&zones->lock); 550 551 return res; 552 } 553 554 u32 mlx4_zone_free_entries_unique(struct mlx4_zone_allocator *zones, u32 obj, u32 count) 555 { 556 struct mlx4_zone_entry *zone; 557 int res; 558 559 if (!(zones->flags & MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP)) 560 return -EFAULT; 561 562 spin_lock(&zones->lock); 563 564 zone = __mlx4_find_zone_by_uid_unique(zones, obj); 565 566 if (NULL == zone) { 567 res = -1; 568 goto out; 569 } 570 571 __mlx4_free_from_zone(zone, obj, count); 572 res = 0; 573 574 out: 575 spin_unlock(&zones->lock); 576 577 return res; 578 } 579 /* 580 * Handling for queue buffers -- we allocate a bunch of memory and 581 * register it in a memory region at HCA virtual address 0. If the 582 * requested size is > max_direct, we split the allocation into 583 * multiple pages, so we don't require too much contiguous memory. 584 */ 585 586 int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, 587 struct mlx4_buf *buf, gfp_t gfp) 588 { 589 dma_addr_t t; 590 591 if (size <= max_direct) { 592 buf->nbufs = 1; 593 buf->npages = 1; 594 buf->page_shift = get_order(size) + PAGE_SHIFT; 595 buf->direct.buf = dma_alloc_coherent(&dev->persist->pdev->dev, 596 size, &t, gfp); 597 if (!buf->direct.buf) 598 return -ENOMEM; 599 600 buf->direct.map = t; 601 602 while (t & ((1 << buf->page_shift) - 1)) { 603 --buf->page_shift; 604 buf->npages *= 2; 605 } 606 607 memset(buf->direct.buf, 0, size); 608 } else { 609 int i; 610 611 buf->direct.buf = NULL; 612 buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; 613 buf->npages = buf->nbufs; 614 buf->page_shift = PAGE_SHIFT; 615 buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), 616 gfp); 617 if (!buf->page_list) 618 return -ENOMEM; 619 620 for (i = 0; i < buf->nbufs; ++i) { 621 buf->page_list[i].buf = 622 dma_alloc_coherent(&dev->persist->pdev->dev, 623 PAGE_SIZE, 624 &t, gfp); 625 if (!buf->page_list[i].buf) 626 goto err_free; 627 628 buf->page_list[i].map = t; 629 630 memset(buf->page_list[i].buf, 0, PAGE_SIZE); 631 } 632 633 if (BITS_PER_LONG == 64) { 634 struct page **pages; 635 pages = kmalloc(sizeof *pages * buf->nbufs, gfp); 636 if (!pages) 637 goto err_free; 638 for (i = 0; i < buf->nbufs; ++i) 639 pages[i] = virt_to_page(buf->page_list[i].buf); 640 buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); 641 kfree(pages); 642 if (!buf->direct.buf) 643 goto err_free; 644 } 645 } 646 647 return 0; 648 649 err_free: 650 mlx4_buf_free(dev, size, buf); 651 652 return -ENOMEM; 653 } 654 EXPORT_SYMBOL_GPL(mlx4_buf_alloc); 655 656 void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf) 657 { 658 int i; 659 660 if (buf->nbufs == 1) 661 dma_free_coherent(&dev->persist->pdev->dev, size, 662 buf->direct.buf, 663 buf->direct.map); 664 else { 665 if (BITS_PER_LONG == 64) 666 vunmap(buf->direct.buf); 667 668 for (i = 0; i < buf->nbufs; ++i) 669 if (buf->page_list[i].buf) 670 dma_free_coherent(&dev->persist->pdev->dev, 671 PAGE_SIZE, 672 buf->page_list[i].buf, 673 buf->page_list[i].map); 674 kfree(buf->page_list); 675 } 676 } 677 EXPORT_SYMBOL_GPL(mlx4_buf_free); 678 679 static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device, 680 gfp_t gfp) 681 { 682 struct mlx4_db_pgdir *pgdir; 683 684 pgdir = kzalloc(sizeof *pgdir, gfp); 685 if (!pgdir) 686 return NULL; 687 688 bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2); 689 pgdir->bits[0] = pgdir->order0; 690 pgdir->bits[1] = pgdir->order1; 691 pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE, 692 &pgdir->db_dma, gfp); 693 if (!pgdir->db_page) { 694 kfree(pgdir); 695 return NULL; 696 } 697 698 return pgdir; 699 } 700 701 static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir, 702 struct mlx4_db *db, int order) 703 { 704 int o; 705 int i; 706 707 for (o = order; o <= 1; ++o) { 708 i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o); 709 if (i < MLX4_DB_PER_PAGE >> o) 710 goto found; 711 } 712 713 return -ENOMEM; 714 715 found: 716 clear_bit(i, pgdir->bits[o]); 717 718 i <<= o; 719 720 if (o > order) 721 set_bit(i ^ 1, pgdir->bits[order]); 722 723 db->u.pgdir = pgdir; 724 db->index = i; 725 db->db = pgdir->db_page + db->index; 726 db->dma = pgdir->db_dma + db->index * 4; 727 db->order = order; 728 729 return 0; 730 } 731 732 int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, gfp_t gfp) 733 { 734 struct mlx4_priv *priv = mlx4_priv(dev); 735 struct mlx4_db_pgdir *pgdir; 736 int ret = 0; 737 738 mutex_lock(&priv->pgdir_mutex); 739 740 list_for_each_entry(pgdir, &priv->pgdir_list, list) 741 if (!mlx4_alloc_db_from_pgdir(pgdir, db, order)) 742 goto out; 743 744 pgdir = mlx4_alloc_db_pgdir(&dev->persist->pdev->dev, gfp); 745 if (!pgdir) { 746 ret = -ENOMEM; 747 goto out; 748 } 749 750 list_add(&pgdir->list, &priv->pgdir_list); 751 752 /* This should never fail -- we just allocated an empty page: */ 753 WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order)); 754 755 out: 756 mutex_unlock(&priv->pgdir_mutex); 757 758 return ret; 759 } 760 EXPORT_SYMBOL_GPL(mlx4_db_alloc); 761 762 void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db) 763 { 764 struct mlx4_priv *priv = mlx4_priv(dev); 765 int o; 766 int i; 767 768 mutex_lock(&priv->pgdir_mutex); 769 770 o = db->order; 771 i = db->index; 772 773 if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) { 774 clear_bit(i ^ 1, db->u.pgdir->order0); 775 ++o; 776 } 777 i >>= o; 778 set_bit(i, db->u.pgdir->bits[o]); 779 780 if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) { 781 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, 782 db->u.pgdir->db_page, db->u.pgdir->db_dma); 783 list_del(&db->u.pgdir->list); 784 kfree(db->u.pgdir); 785 } 786 787 mutex_unlock(&priv->pgdir_mutex); 788 } 789 EXPORT_SYMBOL_GPL(mlx4_db_free); 790 791 int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, 792 int size, int max_direct) 793 { 794 int err; 795 796 err = mlx4_db_alloc(dev, &wqres->db, 1, GFP_KERNEL); 797 if (err) 798 return err; 799 800 *wqres->db.db = 0; 801 802 err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf, GFP_KERNEL); 803 if (err) 804 goto err_db; 805 806 err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift, 807 &wqres->mtt); 808 if (err) 809 goto err_buf; 810 811 err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf, GFP_KERNEL); 812 if (err) 813 goto err_mtt; 814 815 return 0; 816 817 err_mtt: 818 mlx4_mtt_cleanup(dev, &wqres->mtt); 819 err_buf: 820 mlx4_buf_free(dev, size, &wqres->buf); 821 err_db: 822 mlx4_db_free(dev, &wqres->db); 823 824 return err; 825 } 826 EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res); 827 828 void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, 829 int size) 830 { 831 mlx4_mtt_cleanup(dev, &wqres->mtt); 832 mlx4_buf_free(dev, size, &wqres->buf); 833 mlx4_db_free(dev, &wqres->db); 834 } 835 EXPORT_SYMBOL_GPL(mlx4_free_hwq_res); 836