1 /*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #include <linux/mm.h>
36 #include <linux/scatterlist.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/page.h>
40
41 #include "mthca_memfree.h"
42 #include "mthca_dev.h"
43 #include "mthca_cmd.h"
44
45 /*
46 * We allocate in as big chunks as we can, up to a maximum of 256 KB
47 * per chunk.
48 */
49 enum {
50 MTHCA_ICM_ALLOC_SIZE = 1 << 18,
51 MTHCA_TABLE_CHUNK_SIZE = 1 << 18
52 };
53
54 struct mthca_user_db_table {
55 struct mutex mutex;
56 struct {
57 u64 uvirt;
58 struct scatterlist mem;
59 int refcount;
60 } page[0];
61 };
62
mthca_free_icm_pages(struct mthca_dev * dev,struct mthca_icm_chunk * chunk)63 static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *chunk)
64 {
65 int i;
66
67 if (chunk->nsg > 0)
68 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
69 PCI_DMA_BIDIRECTIONAL);
70
71 for (i = 0; i < chunk->npages; ++i)
72 __free_pages(sg_page(&chunk->mem[i]),
73 get_order(chunk->mem[i].length));
74 }
75
mthca_free_icm_coherent(struct mthca_dev * dev,struct mthca_icm_chunk * chunk)76 static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chunk *chunk)
77 {
78 int i;
79
80 for (i = 0; i < chunk->npages; ++i) {
81 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
82 lowmem_page_address(sg_page(&chunk->mem[i])),
83 sg_dma_address(&chunk->mem[i]));
84 }
85 }
86
mthca_free_icm(struct mthca_dev * dev,struct mthca_icm * icm,int coherent)87 void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm, int coherent)
88 {
89 struct mthca_icm_chunk *chunk, *tmp;
90
91 if (!icm)
92 return;
93
94 list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
95 if (coherent)
96 mthca_free_icm_coherent(dev, chunk);
97 else
98 mthca_free_icm_pages(dev, chunk);
99
100 kfree(chunk);
101 }
102
103 kfree(icm);
104 }
105
mthca_alloc_icm_pages(struct scatterlist * mem,int order,gfp_t gfp_mask)106 static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
107 {
108 struct page *page;
109
110 /*
111 * Use __GFP_ZERO because buggy firmware assumes ICM pages are
112 * cleared, and subtle failures are seen if they aren't.
113 */
114 page = alloc_pages(gfp_mask | __GFP_ZERO, order);
115 if (!page)
116 return -ENOMEM;
117
118 sg_set_page(mem, page, PAGE_SIZE << order, 0);
119 return 0;
120 }
121
mthca_alloc_icm_coherent(struct device * dev,struct scatterlist * mem,int order,gfp_t gfp_mask)122 static int mthca_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
123 int order, gfp_t gfp_mask)
124 {
125 void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, &sg_dma_address(mem),
126 gfp_mask);
127 if (!buf)
128 return -ENOMEM;
129
130 sg_set_buf(mem, buf, PAGE_SIZE << order);
131 BUG_ON(mem->offset);
132 sg_dma_len(mem) = PAGE_SIZE << order;
133 return 0;
134 }
135
mthca_alloc_icm(struct mthca_dev * dev,int npages,gfp_t gfp_mask,int coherent)136 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
137 gfp_t gfp_mask, int coherent)
138 {
139 struct mthca_icm *icm;
140 struct mthca_icm_chunk *chunk = NULL;
141 int cur_order;
142 int ret;
143
144 /* We use sg_set_buf for coherent allocs, which assumes low memory */
145 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
146
147 icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
148 if (!icm)
149 return icm;
150
151 icm->refcount = 0;
152 INIT_LIST_HEAD(&icm->chunk_list);
153
154 cur_order = get_order(MTHCA_ICM_ALLOC_SIZE);
155
156 while (npages > 0) {
157 if (!chunk) {
158 chunk = kmalloc(sizeof *chunk,
159 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
160 if (!chunk)
161 goto fail;
162
163 sg_init_table(chunk->mem, MTHCA_ICM_CHUNK_LEN);
164 chunk->npages = 0;
165 chunk->nsg = 0;
166 list_add_tail(&chunk->list, &icm->chunk_list);
167 }
168
169 while (1 << cur_order > npages)
170 --cur_order;
171
172 if (coherent)
173 ret = mthca_alloc_icm_coherent(&dev->pdev->dev,
174 &chunk->mem[chunk->npages],
175 cur_order, gfp_mask);
176 else
177 ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages],
178 cur_order, gfp_mask);
179
180 if (!ret) {
181 ++chunk->npages;
182
183 if (coherent)
184 ++chunk->nsg;
185 else if (chunk->npages == MTHCA_ICM_CHUNK_LEN) {
186 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
187 chunk->npages,
188 PCI_DMA_BIDIRECTIONAL);
189
190 if (chunk->nsg <= 0)
191 goto fail;
192 }
193
194 if (chunk->npages == MTHCA_ICM_CHUNK_LEN)
195 chunk = NULL;
196
197 npages -= 1 << cur_order;
198 } else {
199 --cur_order;
200 if (cur_order < 0)
201 goto fail;
202 }
203 }
204
205 if (!coherent && chunk) {
206 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
207 chunk->npages,
208 PCI_DMA_BIDIRECTIONAL);
209
210 if (chunk->nsg <= 0)
211 goto fail;
212 }
213
214 return icm;
215
216 fail:
217 mthca_free_icm(dev, icm, coherent);
218 return NULL;
219 }
220
mthca_table_get(struct mthca_dev * dev,struct mthca_icm_table * table,int obj)221 int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int obj)
222 {
223 int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
224 int ret = 0;
225
226 mutex_lock(&table->mutex);
227
228 if (table->icm[i]) {
229 ++table->icm[i]->refcount;
230 goto out;
231 }
232
233 table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
234 (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
235 __GFP_NOWARN, table->coherent);
236 if (!table->icm[i]) {
237 ret = -ENOMEM;
238 goto out;
239 }
240
241 if (mthca_MAP_ICM(dev, table->icm[i],
242 table->virt + i * MTHCA_TABLE_CHUNK_SIZE)) {
243 mthca_free_icm(dev, table->icm[i], table->coherent);
244 table->icm[i] = NULL;
245 ret = -ENOMEM;
246 goto out;
247 }
248
249 ++table->icm[i]->refcount;
250
251 out:
252 mutex_unlock(&table->mutex);
253 return ret;
254 }
255
mthca_table_put(struct mthca_dev * dev,struct mthca_icm_table * table,int obj)256 void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj)
257 {
258 int i;
259
260 if (!mthca_is_memfree(dev))
261 return;
262
263 i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
264
265 mutex_lock(&table->mutex);
266
267 if (--table->icm[i]->refcount == 0) {
268 mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
269 MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE);
270 mthca_free_icm(dev, table->icm[i], table->coherent);
271 table->icm[i] = NULL;
272 }
273
274 mutex_unlock(&table->mutex);
275 }
276
mthca_table_find(struct mthca_icm_table * table,int obj,dma_addr_t * dma_handle)277 void *mthca_table_find(struct mthca_icm_table *table, int obj, dma_addr_t *dma_handle)
278 {
279 int idx, offset, dma_offset, i;
280 struct mthca_icm_chunk *chunk;
281 struct mthca_icm *icm;
282 struct page *page = NULL;
283
284 if (!table->lowmem)
285 return NULL;
286
287 mutex_lock(&table->mutex);
288
289 idx = (obj & (table->num_obj - 1)) * table->obj_size;
290 icm = table->icm[idx / MTHCA_TABLE_CHUNK_SIZE];
291 dma_offset = offset = idx % MTHCA_TABLE_CHUNK_SIZE;
292
293 if (!icm)
294 goto out;
295
296 list_for_each_entry(chunk, &icm->chunk_list, list) {
297 for (i = 0; i < chunk->npages; ++i) {
298 if (dma_handle && dma_offset >= 0) {
299 if (sg_dma_len(&chunk->mem[i]) > dma_offset)
300 *dma_handle = sg_dma_address(&chunk->mem[i]) +
301 dma_offset;
302 dma_offset -= sg_dma_len(&chunk->mem[i]);
303 }
304 /* DMA mapping can merge pages but not split them,
305 * so if we found the page, dma_handle has already
306 * been assigned to. */
307 if (chunk->mem[i].length > offset) {
308 page = sg_page(&chunk->mem[i]);
309 goto out;
310 }
311 offset -= chunk->mem[i].length;
312 }
313 }
314
315 out:
316 mutex_unlock(&table->mutex);
317 return page ? lowmem_page_address(page) + offset : NULL;
318 }
319
mthca_table_get_range(struct mthca_dev * dev,struct mthca_icm_table * table,int start,int end)320 int mthca_table_get_range(struct mthca_dev *dev, struct mthca_icm_table *table,
321 int start, int end)
322 {
323 int inc = MTHCA_TABLE_CHUNK_SIZE / table->obj_size;
324 int i, err;
325
326 for (i = start; i <= end; i += inc) {
327 err = mthca_table_get(dev, table, i);
328 if (err)
329 goto fail;
330 }
331
332 return 0;
333
334 fail:
335 while (i > start) {
336 i -= inc;
337 mthca_table_put(dev, table, i);
338 }
339
340 return err;
341 }
342
mthca_table_put_range(struct mthca_dev * dev,struct mthca_icm_table * table,int start,int end)343 void mthca_table_put_range(struct mthca_dev *dev, struct mthca_icm_table *table,
344 int start, int end)
345 {
346 int i;
347
348 if (!mthca_is_memfree(dev))
349 return;
350
351 for (i = start; i <= end; i += MTHCA_TABLE_CHUNK_SIZE / table->obj_size)
352 mthca_table_put(dev, table, i);
353 }
354
mthca_alloc_icm_table(struct mthca_dev * dev,u64 virt,int obj_size,int nobj,int reserved,int use_lowmem,int use_coherent)355 struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
356 u64 virt, int obj_size,
357 int nobj, int reserved,
358 int use_lowmem, int use_coherent)
359 {
360 struct mthca_icm_table *table;
361 int obj_per_chunk;
362 int num_icm;
363 unsigned chunk_size;
364 int i;
365
366 obj_per_chunk = MTHCA_TABLE_CHUNK_SIZE / obj_size;
367 num_icm = DIV_ROUND_UP(nobj, obj_per_chunk);
368
369 table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL);
370 if (!table)
371 return NULL;
372
373 table->virt = virt;
374 table->num_icm = num_icm;
375 table->num_obj = nobj;
376 table->obj_size = obj_size;
377 table->lowmem = use_lowmem;
378 table->coherent = use_coherent;
379 mutex_init(&table->mutex);
380
381 for (i = 0; i < num_icm; ++i)
382 table->icm[i] = NULL;
383
384 for (i = 0; i * MTHCA_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
385 chunk_size = MTHCA_TABLE_CHUNK_SIZE;
386 if ((i + 1) * MTHCA_TABLE_CHUNK_SIZE > nobj * obj_size)
387 chunk_size = nobj * obj_size - i * MTHCA_TABLE_CHUNK_SIZE;
388
389 table->icm[i] = mthca_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
390 (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
391 __GFP_NOWARN, use_coherent);
392 if (!table->icm[i])
393 goto err;
394 if (mthca_MAP_ICM(dev, table->icm[i],
395 virt + i * MTHCA_TABLE_CHUNK_SIZE)) {
396 mthca_free_icm(dev, table->icm[i], table->coherent);
397 table->icm[i] = NULL;
398 goto err;
399 }
400
401 /*
402 * Add a reference to this ICM chunk so that it never
403 * gets freed (since it contains reserved firmware objects).
404 */
405 ++table->icm[i]->refcount;
406 }
407
408 return table;
409
410 err:
411 for (i = 0; i < num_icm; ++i)
412 if (table->icm[i]) {
413 mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE,
414 MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE);
415 mthca_free_icm(dev, table->icm[i], table->coherent);
416 }
417
418 kfree(table);
419
420 return NULL;
421 }
422
mthca_free_icm_table(struct mthca_dev * dev,struct mthca_icm_table * table)423 void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table)
424 {
425 int i;
426
427 for (i = 0; i < table->num_icm; ++i)
428 if (table->icm[i]) {
429 mthca_UNMAP_ICM(dev,
430 table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
431 MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE);
432 mthca_free_icm(dev, table->icm[i], table->coherent);
433 }
434
435 kfree(table);
436 }
437
mthca_uarc_virt(struct mthca_dev * dev,struct mthca_uar * uar,int page)438 static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int page)
439 {
440 return dev->uar_table.uarc_base +
441 uar->index * dev->uar_table.uarc_size +
442 page * MTHCA_ICM_PAGE_SIZE;
443 }
444
mthca_map_user_db(struct mthca_dev * dev,struct mthca_uar * uar,struct mthca_user_db_table * db_tab,int index,u64 uaddr)445 int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
446 struct mthca_user_db_table *db_tab, int index, u64 uaddr)
447 {
448 struct page *pages[1];
449 int ret = 0;
450 int i;
451
452 if (!mthca_is_memfree(dev))
453 return 0;
454
455 if (index < 0 || index > dev->uar_table.uarc_size / 8)
456 return -EINVAL;
457
458 mutex_lock(&db_tab->mutex);
459
460 i = index / MTHCA_DB_REC_PER_PAGE;
461
462 if ((db_tab->page[i].refcount >= MTHCA_DB_REC_PER_PAGE) ||
463 (db_tab->page[i].uvirt && db_tab->page[i].uvirt != uaddr) ||
464 (uaddr & 4095)) {
465 ret = -EINVAL;
466 goto out;
467 }
468
469 if (db_tab->page[i].refcount) {
470 ++db_tab->page[i].refcount;
471 goto out;
472 }
473
474 ret = get_user_pages(uaddr & PAGE_MASK, 1, FOLL_WRITE, pages, NULL);
475 if (ret < 0)
476 goto out;
477
478 sg_set_page(&db_tab->page[i].mem, pages[0], MTHCA_ICM_PAGE_SIZE,
479 uaddr & ~PAGE_MASK);
480
481 ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
482 if (ret < 0) {
483 put_page(pages[0]);
484 goto out;
485 }
486
487 ret = mthca_MAP_ICM_page(dev, sg_dma_address(&db_tab->page[i].mem),
488 mthca_uarc_virt(dev, uar, i));
489 if (ret) {
490 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
491 put_page(sg_page(&db_tab->page[i].mem));
492 goto out;
493 }
494
495 db_tab->page[i].uvirt = uaddr;
496 db_tab->page[i].refcount = 1;
497
498 out:
499 mutex_unlock(&db_tab->mutex);
500 return ret;
501 }
502
mthca_unmap_user_db(struct mthca_dev * dev,struct mthca_uar * uar,struct mthca_user_db_table * db_tab,int index)503 void mthca_unmap_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
504 struct mthca_user_db_table *db_tab, int index)
505 {
506 if (!mthca_is_memfree(dev))
507 return;
508
509 /*
510 * To make our bookkeeping simpler, we don't unmap DB
511 * pages until we clean up the whole db table.
512 */
513
514 mutex_lock(&db_tab->mutex);
515
516 --db_tab->page[index / MTHCA_DB_REC_PER_PAGE].refcount;
517
518 mutex_unlock(&db_tab->mutex);
519 }
520
mthca_init_user_db_tab(struct mthca_dev * dev)521 struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev)
522 {
523 struct mthca_user_db_table *db_tab;
524 int npages;
525 int i;
526
527 if (!mthca_is_memfree(dev))
528 return NULL;
529
530 npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE;
531 db_tab = kmalloc(sizeof *db_tab + npages * sizeof *db_tab->page, GFP_KERNEL);
532 if (!db_tab)
533 return ERR_PTR(-ENOMEM);
534
535 mutex_init(&db_tab->mutex);
536 for (i = 0; i < npages; ++i) {
537 db_tab->page[i].refcount = 0;
538 db_tab->page[i].uvirt = 0;
539 sg_init_table(&db_tab->page[i].mem, 1);
540 }
541
542 return db_tab;
543 }
544
mthca_cleanup_user_db_tab(struct mthca_dev * dev,struct mthca_uar * uar,struct mthca_user_db_table * db_tab)545 void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
546 struct mthca_user_db_table *db_tab)
547 {
548 int i;
549
550 if (!mthca_is_memfree(dev))
551 return;
552
553 for (i = 0; i < dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; ++i) {
554 if (db_tab->page[i].uvirt) {
555 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1);
556 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
557 put_page(sg_page(&db_tab->page[i].mem));
558 }
559 }
560
561 kfree(db_tab);
562 }
563
mthca_alloc_db(struct mthca_dev * dev,enum mthca_db_type type,u32 qn,__be32 ** db)564 int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
565 u32 qn, __be32 **db)
566 {
567 int group;
568 int start, end, dir;
569 int i, j;
570 struct mthca_db_page *page;
571 int ret = 0;
572
573 mutex_lock(&dev->db_tab->mutex);
574
575 switch (type) {
576 case MTHCA_DB_TYPE_CQ_ARM:
577 case MTHCA_DB_TYPE_SQ:
578 group = 0;
579 start = 0;
580 end = dev->db_tab->max_group1;
581 dir = 1;
582 break;
583
584 case MTHCA_DB_TYPE_CQ_SET_CI:
585 case MTHCA_DB_TYPE_RQ:
586 case MTHCA_DB_TYPE_SRQ:
587 group = 1;
588 start = dev->db_tab->npages - 1;
589 end = dev->db_tab->min_group2;
590 dir = -1;
591 break;
592
593 default:
594 ret = -EINVAL;
595 goto out;
596 }
597
598 for (i = start; i != end; i += dir)
599 if (dev->db_tab->page[i].db_rec &&
600 !bitmap_full(dev->db_tab->page[i].used,
601 MTHCA_DB_REC_PER_PAGE)) {
602 page = dev->db_tab->page + i;
603 goto found;
604 }
605
606 for (i = start; i != end; i += dir)
607 if (!dev->db_tab->page[i].db_rec) {
608 page = dev->db_tab->page + i;
609 goto alloc;
610 }
611
612 if (dev->db_tab->max_group1 >= dev->db_tab->min_group2 - 1) {
613 ret = -ENOMEM;
614 goto out;
615 }
616
617 if (group == 0)
618 ++dev->db_tab->max_group1;
619 else
620 --dev->db_tab->min_group2;
621
622 page = dev->db_tab->page + end;
623
624 alloc:
625 page->db_rec = dma_alloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
626 &page->mapping, GFP_KERNEL);
627 if (!page->db_rec) {
628 ret = -ENOMEM;
629 goto out;
630 }
631 memset(page->db_rec, 0, MTHCA_ICM_PAGE_SIZE);
632
633 ret = mthca_MAP_ICM_page(dev, page->mapping,
634 mthca_uarc_virt(dev, &dev->driver_uar, i));
635 if (ret) {
636 dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
637 page->db_rec, page->mapping);
638 goto out;
639 }
640
641 bitmap_zero(page->used, MTHCA_DB_REC_PER_PAGE);
642
643 found:
644 j = find_first_zero_bit(page->used, MTHCA_DB_REC_PER_PAGE);
645 set_bit(j, page->used);
646
647 if (group == 1)
648 j = MTHCA_DB_REC_PER_PAGE - 1 - j;
649
650 ret = i * MTHCA_DB_REC_PER_PAGE + j;
651
652 page->db_rec[j] = cpu_to_be64((qn << 8) | (type << 5));
653
654 *db = (__be32 *) &page->db_rec[j];
655
656 out:
657 mutex_unlock(&dev->db_tab->mutex);
658
659 return ret;
660 }
661
mthca_free_db(struct mthca_dev * dev,int type,int db_index)662 void mthca_free_db(struct mthca_dev *dev, int type, int db_index)
663 {
664 int i, j;
665 struct mthca_db_page *page;
666
667 i = db_index / MTHCA_DB_REC_PER_PAGE;
668 j = db_index % MTHCA_DB_REC_PER_PAGE;
669
670 page = dev->db_tab->page + i;
671
672 mutex_lock(&dev->db_tab->mutex);
673
674 page->db_rec[j] = 0;
675 if (i >= dev->db_tab->min_group2)
676 j = MTHCA_DB_REC_PER_PAGE - 1 - j;
677 clear_bit(j, page->used);
678
679 if (bitmap_empty(page->used, MTHCA_DB_REC_PER_PAGE) &&
680 i >= dev->db_tab->max_group1 - 1) {
681 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1);
682
683 dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
684 page->db_rec, page->mapping);
685 page->db_rec = NULL;
686
687 if (i == dev->db_tab->max_group1) {
688 --dev->db_tab->max_group1;
689 /* XXX may be able to unmap more pages now */
690 }
691 if (i == dev->db_tab->min_group2)
692 ++dev->db_tab->min_group2;
693 }
694
695 mutex_unlock(&dev->db_tab->mutex);
696 }
697
mthca_init_db_tab(struct mthca_dev * dev)698 int mthca_init_db_tab(struct mthca_dev *dev)
699 {
700 int i;
701
702 if (!mthca_is_memfree(dev))
703 return 0;
704
705 dev->db_tab = kmalloc(sizeof *dev->db_tab, GFP_KERNEL);
706 if (!dev->db_tab)
707 return -ENOMEM;
708
709 mutex_init(&dev->db_tab->mutex);
710
711 dev->db_tab->npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE;
712 dev->db_tab->max_group1 = 0;
713 dev->db_tab->min_group2 = dev->db_tab->npages - 1;
714
715 dev->db_tab->page = kmalloc(dev->db_tab->npages *
716 sizeof *dev->db_tab->page,
717 GFP_KERNEL);
718 if (!dev->db_tab->page) {
719 kfree(dev->db_tab);
720 return -ENOMEM;
721 }
722
723 for (i = 0; i < dev->db_tab->npages; ++i)
724 dev->db_tab->page[i].db_rec = NULL;
725
726 return 0;
727 }
728
mthca_cleanup_db_tab(struct mthca_dev * dev)729 void mthca_cleanup_db_tab(struct mthca_dev *dev)
730 {
731 int i;
732
733 if (!mthca_is_memfree(dev))
734 return;
735
736 /*
737 * Because we don't always free our UARC pages when they
738 * become empty to make mthca_free_db() simpler we need to
739 * make a sweep through the doorbell pages and free any
740 * leftover pages now.
741 */
742 for (i = 0; i < dev->db_tab->npages; ++i) {
743 if (!dev->db_tab->page[i].db_rec)
744 continue;
745
746 if (!bitmap_empty(dev->db_tab->page[i].used, MTHCA_DB_REC_PER_PAGE))
747 mthca_warn(dev, "Kernel UARC page %d not empty\n", i);
748
749 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1);
750
751 dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
752 dev->db_tab->page[i].db_rec,
753 dev->db_tab->page[i].mapping);
754 }
755
756 kfree(dev->db_tab->page);
757 kfree(dev->db_tab);
758 }
759