1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 * 34 * $Id$ 35 */ 36 37 #include <linux/mm.h> 38 #include <linux/scatterlist.h> 39 40 #include <asm/page.h> 41 42 #include "mthca_memfree.h" 43 #include "mthca_dev.h" 44 #include "mthca_cmd.h" 45 46 /* 47 * We allocate in as big chunks as we can, up to a maximum of 256 KB 48 * per chunk. 49 */ 50 enum { 51 MTHCA_ICM_ALLOC_SIZE = 1 << 18, 52 MTHCA_TABLE_CHUNK_SIZE = 1 << 18 53 }; 54 55 struct mthca_user_db_table { 56 struct mutex mutex; 57 struct { 58 u64 uvirt; 59 struct scatterlist mem; 60 int refcount; 61 } page[0]; 62 }; 63 64 static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *chunk) 65 { 66 int i; 67 68 if (chunk->nsg > 0) 69 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages, 70 PCI_DMA_BIDIRECTIONAL); 71 72 for (i = 0; i < chunk->npages; ++i) 73 __free_pages(chunk->mem[i].page, 74 get_order(chunk->mem[i].length)); 75 } 76 77 static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chunk *chunk) 78 { 79 int i; 80 81 for (i = 0; i < chunk->npages; ++i) { 82 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length, 83 lowmem_page_address(chunk->mem[i].page), 84 sg_dma_address(&chunk->mem[i])); 85 } 86 } 87 88 void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm, int coherent) 89 { 90 struct mthca_icm_chunk *chunk, *tmp; 91 92 if (!icm) 93 return; 94 95 list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) { 96 if (coherent) 97 mthca_free_icm_coherent(dev, chunk); 98 else 99 mthca_free_icm_pages(dev, chunk); 100 101 kfree(chunk); 102 } 103 104 kfree(icm); 105 } 106 107 static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask) 108 { 109 mem->page = alloc_pages(gfp_mask, order); 110 if (!mem->page) 111 return -ENOMEM; 112 113 mem->length = PAGE_SIZE << order; 114 mem->offset = 0; 115 return 0; 116 } 117 118 static int mthca_alloc_icm_coherent(struct device *dev, struct scatterlist *mem, 119 int order, gfp_t gfp_mask) 120 { 121 void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, &sg_dma_address(mem), 122 gfp_mask); 123 if (!buf) 124 return -ENOMEM; 125 126 sg_set_buf(mem, buf, PAGE_SIZE << order); 127 BUG_ON(mem->offset); 128 sg_dma_len(mem) = PAGE_SIZE << order; 129 return 0; 130 } 131 132 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, 133 gfp_t gfp_mask, int coherent) 134 { 135 struct mthca_icm *icm; 136 struct mthca_icm_chunk *chunk = NULL; 137 int cur_order; 138 int ret; 139 140 /* We use sg_set_buf for coherent allocs, which assumes low memory */ 141 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM)); 142 143 icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); 144 if (!icm) 145 return icm; 146 147 icm->refcount = 0; 148 INIT_LIST_HEAD(&icm->chunk_list); 149 150 cur_order = get_order(MTHCA_ICM_ALLOC_SIZE); 151 152 while (npages > 0) { 153 if (!chunk) { 154 chunk = kmalloc(sizeof *chunk, 155 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); 156 if (!chunk) 157 goto fail; 158 159 chunk->npages = 0; 160 chunk->nsg = 0; 161 list_add_tail(&chunk->list, &icm->chunk_list); 162 } 163 164 while (1 << cur_order > npages) 165 --cur_order; 166 167 if (coherent) 168 ret = mthca_alloc_icm_coherent(&dev->pdev->dev, 169 &chunk->mem[chunk->npages], 170 cur_order, gfp_mask); 171 else 172 ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages], 173 cur_order, gfp_mask); 174 175 if (!ret) { 176 ++chunk->npages; 177 178 if (coherent) 179 ++chunk->nsg; 180 else if (chunk->npages == MTHCA_ICM_CHUNK_LEN) { 181 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, 182 chunk->npages, 183 PCI_DMA_BIDIRECTIONAL); 184 185 if (chunk->nsg <= 0) 186 goto fail; 187 } 188 189 if (chunk->npages == MTHCA_ICM_CHUNK_LEN) 190 chunk = NULL; 191 192 npages -= 1 << cur_order; 193 } else { 194 --cur_order; 195 if (cur_order < 0) 196 goto fail; 197 } 198 } 199 200 if (!coherent && chunk) { 201 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, 202 chunk->npages, 203 PCI_DMA_BIDIRECTIONAL); 204 205 if (chunk->nsg <= 0) 206 goto fail; 207 } 208 209 return icm; 210 211 fail: 212 mthca_free_icm(dev, icm, coherent); 213 return NULL; 214 } 215 216 int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int obj) 217 { 218 int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE; 219 int ret = 0; 220 u8 status; 221 222 mutex_lock(&table->mutex); 223 224 if (table->icm[i]) { 225 ++table->icm[i]->refcount; 226 goto out; 227 } 228 229 table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT, 230 (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | 231 __GFP_NOWARN, table->coherent); 232 if (!table->icm[i]) { 233 ret = -ENOMEM; 234 goto out; 235 } 236 237 if (mthca_MAP_ICM(dev, table->icm[i], table->virt + i * MTHCA_TABLE_CHUNK_SIZE, 238 &status) || status) { 239 mthca_free_icm(dev, table->icm[i], table->coherent); 240 table->icm[i] = NULL; 241 ret = -ENOMEM; 242 goto out; 243 } 244 245 ++table->icm[i]->refcount; 246 247 out: 248 mutex_unlock(&table->mutex); 249 return ret; 250 } 251 252 void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj) 253 { 254 int i; 255 u8 status; 256 257 if (!mthca_is_memfree(dev)) 258 return; 259 260 i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE; 261 262 mutex_lock(&table->mutex); 263 264 if (--table->icm[i]->refcount == 0) { 265 mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE, 266 MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE, 267 &status); 268 mthca_free_icm(dev, table->icm[i], table->coherent); 269 table->icm[i] = NULL; 270 } 271 272 mutex_unlock(&table->mutex); 273 } 274 275 void *mthca_table_find(struct mthca_icm_table *table, int obj, dma_addr_t *dma_handle) 276 { 277 int idx, offset, dma_offset, i; 278 struct mthca_icm_chunk *chunk; 279 struct mthca_icm *icm; 280 struct page *page = NULL; 281 282 if (!table->lowmem) 283 return NULL; 284 285 mutex_lock(&table->mutex); 286 287 idx = (obj & (table->num_obj - 1)) * table->obj_size; 288 icm = table->icm[idx / MTHCA_TABLE_CHUNK_SIZE]; 289 dma_offset = offset = idx % MTHCA_TABLE_CHUNK_SIZE; 290 291 if (!icm) 292 goto out; 293 294 list_for_each_entry(chunk, &icm->chunk_list, list) { 295 for (i = 0; i < chunk->npages; ++i) { 296 if (dma_handle && dma_offset >= 0) { 297 if (sg_dma_len(&chunk->mem[i]) > dma_offset) 298 *dma_handle = sg_dma_address(&chunk->mem[i]) + 299 dma_offset; 300 dma_offset -= sg_dma_len(&chunk->mem[i]); 301 } 302 /* DMA mapping can merge pages but not split them, 303 * so if we found the page, dma_handle has already 304 * been assigned to. */ 305 if (chunk->mem[i].length > offset) { 306 page = chunk->mem[i].page; 307 goto out; 308 } 309 offset -= chunk->mem[i].length; 310 } 311 } 312 313 out: 314 mutex_unlock(&table->mutex); 315 return page ? lowmem_page_address(page) + offset : NULL; 316 } 317 318 int mthca_table_get_range(struct mthca_dev *dev, struct mthca_icm_table *table, 319 int start, int end) 320 { 321 int inc = MTHCA_TABLE_CHUNK_SIZE / table->obj_size; 322 int i, err; 323 324 for (i = start; i <= end; i += inc) { 325 err = mthca_table_get(dev, table, i); 326 if (err) 327 goto fail; 328 } 329 330 return 0; 331 332 fail: 333 while (i > start) { 334 i -= inc; 335 mthca_table_put(dev, table, i); 336 } 337 338 return err; 339 } 340 341 void mthca_table_put_range(struct mthca_dev *dev, struct mthca_icm_table *table, 342 int start, int end) 343 { 344 int i; 345 346 if (!mthca_is_memfree(dev)) 347 return; 348 349 for (i = start; i <= end; i += MTHCA_TABLE_CHUNK_SIZE / table->obj_size) 350 mthca_table_put(dev, table, i); 351 } 352 353 struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev, 354 u64 virt, int obj_size, 355 int nobj, int reserved, 356 int use_lowmem, int use_coherent) 357 { 358 struct mthca_icm_table *table; 359 int num_icm; 360 unsigned chunk_size; 361 int i; 362 u8 status; 363 364 num_icm = (obj_size * nobj + MTHCA_TABLE_CHUNK_SIZE - 1) / MTHCA_TABLE_CHUNK_SIZE; 365 366 table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL); 367 if (!table) 368 return NULL; 369 370 table->virt = virt; 371 table->num_icm = num_icm; 372 table->num_obj = nobj; 373 table->obj_size = obj_size; 374 table->lowmem = use_lowmem; 375 table->coherent = use_coherent; 376 mutex_init(&table->mutex); 377 378 for (i = 0; i < num_icm; ++i) 379 table->icm[i] = NULL; 380 381 for (i = 0; i * MTHCA_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) { 382 chunk_size = MTHCA_TABLE_CHUNK_SIZE; 383 if ((i + 1) * MTHCA_TABLE_CHUNK_SIZE > nobj * obj_size) 384 chunk_size = nobj * obj_size - i * MTHCA_TABLE_CHUNK_SIZE; 385 386 table->icm[i] = mthca_alloc_icm(dev, chunk_size >> PAGE_SHIFT, 387 (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) | 388 __GFP_NOWARN, use_coherent); 389 if (!table->icm[i]) 390 goto err; 391 if (mthca_MAP_ICM(dev, table->icm[i], virt + i * MTHCA_TABLE_CHUNK_SIZE, 392 &status) || status) { 393 mthca_free_icm(dev, table->icm[i], table->coherent); 394 table->icm[i] = NULL; 395 goto err; 396 } 397 398 /* 399 * Add a reference to this ICM chunk so that it never 400 * gets freed (since it contains reserved firmware objects). 401 */ 402 ++table->icm[i]->refcount; 403 } 404 405 return table; 406 407 err: 408 for (i = 0; i < num_icm; ++i) 409 if (table->icm[i]) { 410 mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE, 411 MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE, 412 &status); 413 mthca_free_icm(dev, table->icm[i], table->coherent); 414 } 415 416 kfree(table); 417 418 return NULL; 419 } 420 421 void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table) 422 { 423 int i; 424 u8 status; 425 426 for (i = 0; i < table->num_icm; ++i) 427 if (table->icm[i]) { 428 mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE, 429 MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE, 430 &status); 431 mthca_free_icm(dev, table->icm[i], table->coherent); 432 } 433 434 kfree(table); 435 } 436 437 static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int page) 438 { 439 return dev->uar_table.uarc_base + 440 uar->index * dev->uar_table.uarc_size + 441 page * MTHCA_ICM_PAGE_SIZE; 442 } 443 444 int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar, 445 struct mthca_user_db_table *db_tab, int index, u64 uaddr) 446 { 447 int ret = 0; 448 u8 status; 449 int i; 450 451 if (!mthca_is_memfree(dev)) 452 return 0; 453 454 if (index < 0 || index > dev->uar_table.uarc_size / 8) 455 return -EINVAL; 456 457 mutex_lock(&db_tab->mutex); 458 459 i = index / MTHCA_DB_REC_PER_PAGE; 460 461 if ((db_tab->page[i].refcount >= MTHCA_DB_REC_PER_PAGE) || 462 (db_tab->page[i].uvirt && db_tab->page[i].uvirt != uaddr) || 463 (uaddr & 4095)) { 464 ret = -EINVAL; 465 goto out; 466 } 467 468 if (db_tab->page[i].refcount) { 469 ++db_tab->page[i].refcount; 470 goto out; 471 } 472 473 ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0, 474 &db_tab->page[i].mem.page, NULL); 475 if (ret < 0) 476 goto out; 477 478 db_tab->page[i].mem.length = MTHCA_ICM_PAGE_SIZE; 479 db_tab->page[i].mem.offset = uaddr & ~PAGE_MASK; 480 481 ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); 482 if (ret < 0) { 483 put_page(db_tab->page[i].mem.page); 484 goto out; 485 } 486 487 ret = mthca_MAP_ICM_page(dev, sg_dma_address(&db_tab->page[i].mem), 488 mthca_uarc_virt(dev, uar, i), &status); 489 if (!ret && status) 490 ret = -EINVAL; 491 if (ret) { 492 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); 493 put_page(db_tab->page[i].mem.page); 494 goto out; 495 } 496 497 db_tab->page[i].uvirt = uaddr; 498 db_tab->page[i].refcount = 1; 499 500 out: 501 mutex_unlock(&db_tab->mutex); 502 return ret; 503 } 504 505 void mthca_unmap_user_db(struct mthca_dev *dev, struct mthca_uar *uar, 506 struct mthca_user_db_table *db_tab, int index) 507 { 508 if (!mthca_is_memfree(dev)) 509 return; 510 511 /* 512 * To make our bookkeeping simpler, we don't unmap DB 513 * pages until we clean up the whole db table. 514 */ 515 516 mutex_lock(&db_tab->mutex); 517 518 --db_tab->page[index / MTHCA_DB_REC_PER_PAGE].refcount; 519 520 mutex_unlock(&db_tab->mutex); 521 } 522 523 struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev) 524 { 525 struct mthca_user_db_table *db_tab; 526 int npages; 527 int i; 528 529 if (!mthca_is_memfree(dev)) 530 return NULL; 531 532 npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; 533 db_tab = kmalloc(sizeof *db_tab + npages * sizeof *db_tab->page, GFP_KERNEL); 534 if (!db_tab) 535 return ERR_PTR(-ENOMEM); 536 537 mutex_init(&db_tab->mutex); 538 for (i = 0; i < npages; ++i) { 539 db_tab->page[i].refcount = 0; 540 db_tab->page[i].uvirt = 0; 541 } 542 543 return db_tab; 544 } 545 546 void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar, 547 struct mthca_user_db_table *db_tab) 548 { 549 int i; 550 u8 status; 551 552 if (!mthca_is_memfree(dev)) 553 return; 554 555 for (i = 0; i < dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; ++i) { 556 if (db_tab->page[i].uvirt) { 557 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status); 558 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); 559 put_page(db_tab->page[i].mem.page); 560 } 561 } 562 563 kfree(db_tab); 564 } 565 566 int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type, 567 u32 qn, __be32 **db) 568 { 569 int group; 570 int start, end, dir; 571 int i, j; 572 struct mthca_db_page *page; 573 int ret = 0; 574 u8 status; 575 576 mutex_lock(&dev->db_tab->mutex); 577 578 switch (type) { 579 case MTHCA_DB_TYPE_CQ_ARM: 580 case MTHCA_DB_TYPE_SQ: 581 group = 0; 582 start = 0; 583 end = dev->db_tab->max_group1; 584 dir = 1; 585 break; 586 587 case MTHCA_DB_TYPE_CQ_SET_CI: 588 case MTHCA_DB_TYPE_RQ: 589 case MTHCA_DB_TYPE_SRQ: 590 group = 1; 591 start = dev->db_tab->npages - 1; 592 end = dev->db_tab->min_group2; 593 dir = -1; 594 break; 595 596 default: 597 ret = -EINVAL; 598 goto out; 599 } 600 601 for (i = start; i != end; i += dir) 602 if (dev->db_tab->page[i].db_rec && 603 !bitmap_full(dev->db_tab->page[i].used, 604 MTHCA_DB_REC_PER_PAGE)) { 605 page = dev->db_tab->page + i; 606 goto found; 607 } 608 609 for (i = start; i != end; i += dir) 610 if (!dev->db_tab->page[i].db_rec) { 611 page = dev->db_tab->page + i; 612 goto alloc; 613 } 614 615 if (dev->db_tab->max_group1 >= dev->db_tab->min_group2 - 1) { 616 ret = -ENOMEM; 617 goto out; 618 } 619 620 if (group == 0) 621 ++dev->db_tab->max_group1; 622 else 623 --dev->db_tab->min_group2; 624 625 page = dev->db_tab->page + end; 626 627 alloc: 628 page->db_rec = dma_alloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE, 629 &page->mapping, GFP_KERNEL); 630 if (!page->db_rec) { 631 ret = -ENOMEM; 632 goto out; 633 } 634 memset(page->db_rec, 0, MTHCA_ICM_PAGE_SIZE); 635 636 ret = mthca_MAP_ICM_page(dev, page->mapping, 637 mthca_uarc_virt(dev, &dev->driver_uar, i), &status); 638 if (!ret && status) 639 ret = -EINVAL; 640 if (ret) { 641 dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE, 642 page->db_rec, page->mapping); 643 goto out; 644 } 645 646 bitmap_zero(page->used, MTHCA_DB_REC_PER_PAGE); 647 648 found: 649 j = find_first_zero_bit(page->used, MTHCA_DB_REC_PER_PAGE); 650 set_bit(j, page->used); 651 652 if (group == 1) 653 j = MTHCA_DB_REC_PER_PAGE - 1 - j; 654 655 ret = i * MTHCA_DB_REC_PER_PAGE + j; 656 657 page->db_rec[j] = cpu_to_be64((qn << 8) | (type << 5)); 658 659 *db = (__be32 *) &page->db_rec[j]; 660 661 out: 662 mutex_unlock(&dev->db_tab->mutex); 663 664 return ret; 665 } 666 667 void mthca_free_db(struct mthca_dev *dev, int type, int db_index) 668 { 669 int i, j; 670 struct mthca_db_page *page; 671 u8 status; 672 673 i = db_index / MTHCA_DB_REC_PER_PAGE; 674 j = db_index % MTHCA_DB_REC_PER_PAGE; 675 676 page = dev->db_tab->page + i; 677 678 mutex_lock(&dev->db_tab->mutex); 679 680 page->db_rec[j] = 0; 681 if (i >= dev->db_tab->min_group2) 682 j = MTHCA_DB_REC_PER_PAGE - 1 - j; 683 clear_bit(j, page->used); 684 685 if (bitmap_empty(page->used, MTHCA_DB_REC_PER_PAGE) && 686 i >= dev->db_tab->max_group1 - 1) { 687 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status); 688 689 dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE, 690 page->db_rec, page->mapping); 691 page->db_rec = NULL; 692 693 if (i == dev->db_tab->max_group1) { 694 --dev->db_tab->max_group1; 695 /* XXX may be able to unmap more pages now */ 696 } 697 if (i == dev->db_tab->min_group2) 698 ++dev->db_tab->min_group2; 699 } 700 701 mutex_unlock(&dev->db_tab->mutex); 702 } 703 704 int mthca_init_db_tab(struct mthca_dev *dev) 705 { 706 int i; 707 708 if (!mthca_is_memfree(dev)) 709 return 0; 710 711 dev->db_tab = kmalloc(sizeof *dev->db_tab, GFP_KERNEL); 712 if (!dev->db_tab) 713 return -ENOMEM; 714 715 mutex_init(&dev->db_tab->mutex); 716 717 dev->db_tab->npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; 718 dev->db_tab->max_group1 = 0; 719 dev->db_tab->min_group2 = dev->db_tab->npages - 1; 720 721 dev->db_tab->page = kmalloc(dev->db_tab->npages * 722 sizeof *dev->db_tab->page, 723 GFP_KERNEL); 724 if (!dev->db_tab->page) { 725 kfree(dev->db_tab); 726 return -ENOMEM; 727 } 728 729 for (i = 0; i < dev->db_tab->npages; ++i) 730 dev->db_tab->page[i].db_rec = NULL; 731 732 return 0; 733 } 734 735 void mthca_cleanup_db_tab(struct mthca_dev *dev) 736 { 737 int i; 738 u8 status; 739 740 if (!mthca_is_memfree(dev)) 741 return; 742 743 /* 744 * Because we don't always free our UARC pages when they 745 * become empty to make mthca_free_db() simpler we need to 746 * make a sweep through the doorbell pages and free any 747 * leftover pages now. 748 */ 749 for (i = 0; i < dev->db_tab->npages; ++i) { 750 if (!dev->db_tab->page[i].db_rec) 751 continue; 752 753 if (!bitmap_empty(dev->db_tab->page[i].used, MTHCA_DB_REC_PER_PAGE)) 754 mthca_warn(dev, "Kernel UARC page %d not empty\n", i); 755 756 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status); 757 758 dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE, 759 dev->db_tab->page[i].db_rec, 760 dev->db_tab->page[i].mapping); 761 } 762 763 kfree(dev->db_tab->page); 764 kfree(dev->db_tab); 765 } 766