xref: /linux/drivers/infiniband/hw/mthca/mthca_memfree.c (revision f3d9478b2ce468c3115b02ecae7e975990697f15)
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
4  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * $Id$
35  */
36 
37 #include <linux/mm.h>
38 
39 #include "mthca_memfree.h"
40 #include "mthca_dev.h"
41 #include "mthca_cmd.h"
42 
43 /*
44  * We allocate in as big chunks as we can, up to a maximum of 256 KB
45  * per chunk.
46  */
47 enum {
48 	MTHCA_ICM_ALLOC_SIZE   = 1 << 18,
49 	MTHCA_TABLE_CHUNK_SIZE = 1 << 18
50 };
51 
52 struct mthca_user_db_table {
53 	struct mutex mutex;
54 	struct {
55 		u64                uvirt;
56 		struct scatterlist mem;
57 		int                refcount;
58 	}                page[0];
59 };
60 
61 void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm)
62 {
63 	struct mthca_icm_chunk *chunk, *tmp;
64 	int i;
65 
66 	if (!icm)
67 		return;
68 
69 	list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
70 		if (chunk->nsg > 0)
71 			pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
72 				     PCI_DMA_BIDIRECTIONAL);
73 
74 		for (i = 0; i < chunk->npages; ++i)
75 			__free_pages(chunk->mem[i].page,
76 				     get_order(chunk->mem[i].length));
77 
78 		kfree(chunk);
79 	}
80 
81 	kfree(icm);
82 }
83 
84 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
85 				  gfp_t gfp_mask)
86 {
87 	struct mthca_icm *icm;
88 	struct mthca_icm_chunk *chunk = NULL;
89 	int cur_order;
90 
91 	icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
92 	if (!icm)
93 		return icm;
94 
95 	icm->refcount = 0;
96 	INIT_LIST_HEAD(&icm->chunk_list);
97 
98 	cur_order = get_order(MTHCA_ICM_ALLOC_SIZE);
99 
100 	while (npages > 0) {
101 		if (!chunk) {
102 			chunk = kmalloc(sizeof *chunk,
103 					gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
104 			if (!chunk)
105 				goto fail;
106 
107 			chunk->npages = 0;
108 			chunk->nsg    = 0;
109 			list_add_tail(&chunk->list, &icm->chunk_list);
110 		}
111 
112 		while (1 << cur_order > npages)
113 			--cur_order;
114 
115 		chunk->mem[chunk->npages].page = alloc_pages(gfp_mask, cur_order);
116 		if (chunk->mem[chunk->npages].page) {
117 			chunk->mem[chunk->npages].length = PAGE_SIZE << cur_order;
118 			chunk->mem[chunk->npages].offset = 0;
119 
120 			if (++chunk->npages == MTHCA_ICM_CHUNK_LEN) {
121 				chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
122 							chunk->npages,
123 							PCI_DMA_BIDIRECTIONAL);
124 
125 				if (chunk->nsg <= 0)
126 					goto fail;
127 
128 				chunk = NULL;
129 			}
130 
131 			npages -= 1 << cur_order;
132 		} else {
133 			--cur_order;
134 			if (cur_order < 0)
135 				goto fail;
136 		}
137 	}
138 
139 	if (chunk) {
140 		chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
141 					chunk->npages,
142 					PCI_DMA_BIDIRECTIONAL);
143 
144 		if (chunk->nsg <= 0)
145 			goto fail;
146 	}
147 
148 	return icm;
149 
150 fail:
151 	mthca_free_icm(dev, icm);
152 	return NULL;
153 }
154 
155 int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int obj)
156 {
157 	int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
158 	int ret = 0;
159 	u8 status;
160 
161 	mutex_lock(&table->mutex);
162 
163 	if (table->icm[i]) {
164 		++table->icm[i]->refcount;
165 		goto out;
166 	}
167 
168 	table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
169 					(table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
170 					__GFP_NOWARN);
171 	if (!table->icm[i]) {
172 		ret = -ENOMEM;
173 		goto out;
174 	}
175 
176 	if (mthca_MAP_ICM(dev, table->icm[i], table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
177 			  &status) || status) {
178 		mthca_free_icm(dev, table->icm[i]);
179 		table->icm[i] = NULL;
180 		ret = -ENOMEM;
181 		goto out;
182 	}
183 
184 	++table->icm[i]->refcount;
185 
186 out:
187 	mutex_unlock(&table->mutex);
188 	return ret;
189 }
190 
191 void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj)
192 {
193 	int i;
194 	u8 status;
195 
196 	if (!mthca_is_memfree(dev))
197 		return;
198 
199 	i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
200 
201 	mutex_lock(&table->mutex);
202 
203 	if (--table->icm[i]->refcount == 0) {
204 		mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
205 				MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
206 				&status);
207 		mthca_free_icm(dev, table->icm[i]);
208 		table->icm[i] = NULL;
209 	}
210 
211 	mutex_unlock(&table->mutex);
212 }
213 
214 void *mthca_table_find(struct mthca_icm_table *table, int obj)
215 {
216 	int idx, offset, i;
217 	struct mthca_icm_chunk *chunk;
218 	struct mthca_icm *icm;
219 	struct page *page = NULL;
220 
221 	if (!table->lowmem)
222 		return NULL;
223 
224 	mutex_lock(&table->mutex);
225 
226 	idx = (obj & (table->num_obj - 1)) * table->obj_size;
227 	icm = table->icm[idx / MTHCA_TABLE_CHUNK_SIZE];
228 	offset = idx % MTHCA_TABLE_CHUNK_SIZE;
229 
230 	if (!icm)
231 		goto out;
232 
233 	list_for_each_entry(chunk, &icm->chunk_list, list) {
234 		for (i = 0; i < chunk->npages; ++i) {
235 			if (chunk->mem[i].length >= offset) {
236 				page = chunk->mem[i].page;
237 				goto out;
238 			}
239 			offset -= chunk->mem[i].length;
240 		}
241 	}
242 
243 out:
244 	mutex_unlock(&table->mutex);
245 	return page ? lowmem_page_address(page) + offset : NULL;
246 }
247 
248 int mthca_table_get_range(struct mthca_dev *dev, struct mthca_icm_table *table,
249 			  int start, int end)
250 {
251 	int inc = MTHCA_TABLE_CHUNK_SIZE / table->obj_size;
252 	int i, err;
253 
254 	for (i = start; i <= end; i += inc) {
255 		err = mthca_table_get(dev, table, i);
256 		if (err)
257 			goto fail;
258 	}
259 
260 	return 0;
261 
262 fail:
263 	while (i > start) {
264 		i -= inc;
265 		mthca_table_put(dev, table, i);
266 	}
267 
268 	return err;
269 }
270 
271 void mthca_table_put_range(struct mthca_dev *dev, struct mthca_icm_table *table,
272 			   int start, int end)
273 {
274 	int i;
275 
276 	if (!mthca_is_memfree(dev))
277 		return;
278 
279 	for (i = start; i <= end; i += MTHCA_TABLE_CHUNK_SIZE / table->obj_size)
280 		mthca_table_put(dev, table, i);
281 }
282 
283 struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
284 					      u64 virt, int obj_size,
285 					      int nobj, int reserved,
286 					      int use_lowmem)
287 {
288 	struct mthca_icm_table *table;
289 	int num_icm;
290 	unsigned chunk_size;
291 	int i;
292 	u8 status;
293 
294 	num_icm = (obj_size * nobj + MTHCA_TABLE_CHUNK_SIZE - 1) / MTHCA_TABLE_CHUNK_SIZE;
295 
296 	table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL);
297 	if (!table)
298 		return NULL;
299 
300 	table->virt     = virt;
301 	table->num_icm  = num_icm;
302 	table->num_obj  = nobj;
303 	table->obj_size = obj_size;
304 	table->lowmem   = use_lowmem;
305 	mutex_init(&table->mutex);
306 
307 	for (i = 0; i < num_icm; ++i)
308 		table->icm[i] = NULL;
309 
310 	for (i = 0; i * MTHCA_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
311 		chunk_size = MTHCA_TABLE_CHUNK_SIZE;
312 		if ((i + 1) * MTHCA_TABLE_CHUNK_SIZE > nobj * obj_size)
313 			chunk_size = nobj * obj_size - i * MTHCA_TABLE_CHUNK_SIZE;
314 
315 		table->icm[i] = mthca_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
316 						(use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
317 						__GFP_NOWARN);
318 		if (!table->icm[i])
319 			goto err;
320 		if (mthca_MAP_ICM(dev, table->icm[i], virt + i * MTHCA_TABLE_CHUNK_SIZE,
321 				  &status) || status) {
322 			mthca_free_icm(dev, table->icm[i]);
323 			table->icm[i] = NULL;
324 			goto err;
325 		}
326 
327 		/*
328 		 * Add a reference to this ICM chunk so that it never
329 		 * gets freed (since it contains reserved firmware objects).
330 		 */
331 		++table->icm[i]->refcount;
332 	}
333 
334 	return table;
335 
336 err:
337 	for (i = 0; i < num_icm; ++i)
338 		if (table->icm[i]) {
339 			mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE,
340 					MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
341 				        &status);
342 			mthca_free_icm(dev, table->icm[i]);
343 		}
344 
345 	kfree(table);
346 
347 	return NULL;
348 }
349 
350 void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table)
351 {
352 	int i;
353 	u8 status;
354 
355 	for (i = 0; i < table->num_icm; ++i)
356 		if (table->icm[i]) {
357 			mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
358 					MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
359 					&status);
360 			mthca_free_icm(dev, table->icm[i]);
361 		}
362 
363 	kfree(table);
364 }
365 
366 static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int page)
367 {
368 	return dev->uar_table.uarc_base +
369 		uar->index * dev->uar_table.uarc_size +
370 		page * MTHCA_ICM_PAGE_SIZE;
371 }
372 
373 int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
374 		      struct mthca_user_db_table *db_tab, int index, u64 uaddr)
375 {
376 	int ret = 0;
377 	u8 status;
378 	int i;
379 
380 	if (!mthca_is_memfree(dev))
381 		return 0;
382 
383 	if (index < 0 || index > dev->uar_table.uarc_size / 8)
384 		return -EINVAL;
385 
386 	mutex_lock(&db_tab->mutex);
387 
388 	i = index / MTHCA_DB_REC_PER_PAGE;
389 
390 	if ((db_tab->page[i].refcount >= MTHCA_DB_REC_PER_PAGE)       ||
391 	    (db_tab->page[i].uvirt && db_tab->page[i].uvirt != uaddr) ||
392 	    (uaddr & 4095)) {
393 		ret = -EINVAL;
394 		goto out;
395 	}
396 
397 	if (db_tab->page[i].refcount) {
398 		++db_tab->page[i].refcount;
399 		goto out;
400 	}
401 
402 	ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0,
403 			     &db_tab->page[i].mem.page, NULL);
404 	if (ret < 0)
405 		goto out;
406 
407 	db_tab->page[i].mem.length = MTHCA_ICM_PAGE_SIZE;
408 	db_tab->page[i].mem.offset = uaddr & ~PAGE_MASK;
409 
410 	ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
411 	if (ret < 0) {
412 		put_page(db_tab->page[i].mem.page);
413 		goto out;
414 	}
415 
416 	ret = mthca_MAP_ICM_page(dev, sg_dma_address(&db_tab->page[i].mem),
417 				 mthca_uarc_virt(dev, uar, i), &status);
418 	if (!ret && status)
419 		ret = -EINVAL;
420 	if (ret) {
421 		pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
422 		put_page(db_tab->page[i].mem.page);
423 		goto out;
424 	}
425 
426 	db_tab->page[i].uvirt    = uaddr;
427 	db_tab->page[i].refcount = 1;
428 
429 out:
430 	mutex_unlock(&db_tab->mutex);
431 	return ret;
432 }
433 
434 void mthca_unmap_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
435 			 struct mthca_user_db_table *db_tab, int index)
436 {
437 	if (!mthca_is_memfree(dev))
438 		return;
439 
440 	/*
441 	 * To make our bookkeeping simpler, we don't unmap DB
442 	 * pages until we clean up the whole db table.
443 	 */
444 
445 	mutex_lock(&db_tab->mutex);
446 
447 	--db_tab->page[index / MTHCA_DB_REC_PER_PAGE].refcount;
448 
449 	mutex_unlock(&db_tab->mutex);
450 }
451 
452 struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev)
453 {
454 	struct mthca_user_db_table *db_tab;
455 	int npages;
456 	int i;
457 
458 	if (!mthca_is_memfree(dev))
459 		return NULL;
460 
461 	npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE;
462 	db_tab = kmalloc(sizeof *db_tab + npages * sizeof *db_tab->page, GFP_KERNEL);
463 	if (!db_tab)
464 		return ERR_PTR(-ENOMEM);
465 
466 	mutex_init(&db_tab->mutex);
467 	for (i = 0; i < npages; ++i) {
468 		db_tab->page[i].refcount = 0;
469 		db_tab->page[i].uvirt    = 0;
470 	}
471 
472 	return db_tab;
473 }
474 
475 void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
476 			       struct mthca_user_db_table *db_tab)
477 {
478 	int i;
479 	u8 status;
480 
481 	if (!mthca_is_memfree(dev))
482 		return;
483 
484 	for (i = 0; i < dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; ++i) {
485 		if (db_tab->page[i].uvirt) {
486 			mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status);
487 			pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
488 			put_page(db_tab->page[i].mem.page);
489 		}
490 	}
491 
492 	kfree(db_tab);
493 }
494 
495 int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
496 		   u32 qn, __be32 **db)
497 {
498 	int group;
499 	int start, end, dir;
500 	int i, j;
501 	struct mthca_db_page *page;
502 	int ret = 0;
503 	u8 status;
504 
505 	mutex_lock(&dev->db_tab->mutex);
506 
507 	switch (type) {
508 	case MTHCA_DB_TYPE_CQ_ARM:
509 	case MTHCA_DB_TYPE_SQ:
510 		group = 0;
511 		start = 0;
512 		end   = dev->db_tab->max_group1;
513 		dir   = 1;
514 		break;
515 
516 	case MTHCA_DB_TYPE_CQ_SET_CI:
517 	case MTHCA_DB_TYPE_RQ:
518 	case MTHCA_DB_TYPE_SRQ:
519 		group = 1;
520 		start = dev->db_tab->npages - 1;
521 		end   = dev->db_tab->min_group2;
522 		dir   = -1;
523 		break;
524 
525 	default:
526 		ret = -EINVAL;
527 		goto out;
528 	}
529 
530 	for (i = start; i != end; i += dir)
531 		if (dev->db_tab->page[i].db_rec &&
532 		    !bitmap_full(dev->db_tab->page[i].used,
533 				 MTHCA_DB_REC_PER_PAGE)) {
534 			page = dev->db_tab->page + i;
535 			goto found;
536 		}
537 
538 	for (i = start; i != end; i += dir)
539 		if (!dev->db_tab->page[i].db_rec) {
540 			page = dev->db_tab->page + i;
541 			goto alloc;
542 		}
543 
544 	if (dev->db_tab->max_group1 >= dev->db_tab->min_group2 - 1) {
545 		ret = -ENOMEM;
546 		goto out;
547 	}
548 
549 	if (group == 0)
550 		++dev->db_tab->max_group1;
551 	else
552 		--dev->db_tab->min_group2;
553 
554 	page = dev->db_tab->page + end;
555 
556 alloc:
557 	page->db_rec = dma_alloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
558 					  &page->mapping, GFP_KERNEL);
559 	if (!page->db_rec) {
560 		ret = -ENOMEM;
561 		goto out;
562 	}
563 	memset(page->db_rec, 0, MTHCA_ICM_PAGE_SIZE);
564 
565 	ret = mthca_MAP_ICM_page(dev, page->mapping,
566 				 mthca_uarc_virt(dev, &dev->driver_uar, i), &status);
567 	if (!ret && status)
568 		ret = -EINVAL;
569 	if (ret) {
570 		dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
571 				  page->db_rec, page->mapping);
572 		goto out;
573 	}
574 
575 	bitmap_zero(page->used, MTHCA_DB_REC_PER_PAGE);
576 
577 found:
578 	j = find_first_zero_bit(page->used, MTHCA_DB_REC_PER_PAGE);
579 	set_bit(j, page->used);
580 
581 	if (group == 1)
582 		j = MTHCA_DB_REC_PER_PAGE - 1 - j;
583 
584 	ret = i * MTHCA_DB_REC_PER_PAGE + j;
585 
586 	page->db_rec[j] = cpu_to_be64((qn << 8) | (type << 5));
587 
588 	*db = (__be32 *) &page->db_rec[j];
589 
590 out:
591 	mutex_unlock(&dev->db_tab->mutex);
592 
593 	return ret;
594 }
595 
596 void mthca_free_db(struct mthca_dev *dev, int type, int db_index)
597 {
598 	int i, j;
599 	struct mthca_db_page *page;
600 	u8 status;
601 
602 	i = db_index / MTHCA_DB_REC_PER_PAGE;
603 	j = db_index % MTHCA_DB_REC_PER_PAGE;
604 
605 	page = dev->db_tab->page + i;
606 
607 	mutex_lock(&dev->db_tab->mutex);
608 
609 	page->db_rec[j] = 0;
610 	if (i >= dev->db_tab->min_group2)
611 		j = MTHCA_DB_REC_PER_PAGE - 1 - j;
612 	clear_bit(j, page->used);
613 
614 	if (bitmap_empty(page->used, MTHCA_DB_REC_PER_PAGE) &&
615 	    i >= dev->db_tab->max_group1 - 1) {
616 		mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status);
617 
618 		dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
619 				  page->db_rec, page->mapping);
620 		page->db_rec = NULL;
621 
622 		if (i == dev->db_tab->max_group1) {
623 			--dev->db_tab->max_group1;
624 			/* XXX may be able to unmap more pages now */
625 		}
626 		if (i == dev->db_tab->min_group2)
627 			++dev->db_tab->min_group2;
628 	}
629 
630 	mutex_unlock(&dev->db_tab->mutex);
631 }
632 
633 int mthca_init_db_tab(struct mthca_dev *dev)
634 {
635 	int i;
636 
637 	if (!mthca_is_memfree(dev))
638 		return 0;
639 
640 	dev->db_tab = kmalloc(sizeof *dev->db_tab, GFP_KERNEL);
641 	if (!dev->db_tab)
642 		return -ENOMEM;
643 
644 	mutex_init(&dev->db_tab->mutex);
645 
646 	dev->db_tab->npages     = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE;
647 	dev->db_tab->max_group1 = 0;
648 	dev->db_tab->min_group2 = dev->db_tab->npages - 1;
649 
650 	dev->db_tab->page = kmalloc(dev->db_tab->npages *
651 				    sizeof *dev->db_tab->page,
652 				    GFP_KERNEL);
653 	if (!dev->db_tab->page) {
654 		kfree(dev->db_tab);
655 		return -ENOMEM;
656 	}
657 
658 	for (i = 0; i < dev->db_tab->npages; ++i)
659 		dev->db_tab->page[i].db_rec = NULL;
660 
661 	return 0;
662 }
663 
664 void mthca_cleanup_db_tab(struct mthca_dev *dev)
665 {
666 	int i;
667 	u8 status;
668 
669 	if (!mthca_is_memfree(dev))
670 		return;
671 
672 	/*
673 	 * Because we don't always free our UARC pages when they
674 	 * become empty to make mthca_free_db() simpler we need to
675 	 * make a sweep through the doorbell pages and free any
676 	 * leftover pages now.
677 	 */
678 	for (i = 0; i < dev->db_tab->npages; ++i) {
679 		if (!dev->db_tab->page[i].db_rec)
680 			continue;
681 
682 		if (!bitmap_empty(dev->db_tab->page[i].used, MTHCA_DB_REC_PER_PAGE))
683 			mthca_warn(dev, "Kernel UARC page %d not empty\n", i);
684 
685 		mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status);
686 
687 		dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
688 				  dev->db_tab->page[i].db_rec,
689 				  dev->db_tab->page[i].mapping);
690 	}
691 
692 	kfree(dev->db_tab->page);
693 	kfree(dev->db_tab);
694 }
695