xref: /linux/drivers/infiniband/hw/mthca/mthca_memfree.c (revision 776cfebb430c7b22c208b1b17add97f354d97cab)
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  * $Id$
33  */
34 
35 #include <linux/mm.h>
36 
37 #include "mthca_memfree.h"
38 #include "mthca_dev.h"
39 #include "mthca_cmd.h"
40 
41 /*
42  * We allocate in as big chunks as we can, up to a maximum of 256 KB
43  * per chunk.
44  */
45 enum {
46 	MTHCA_ICM_ALLOC_SIZE   = 1 << 18,
47 	MTHCA_TABLE_CHUNK_SIZE = 1 << 18
48 };
49 
50 void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm)
51 {
52 	struct mthca_icm_chunk *chunk, *tmp;
53 	int i;
54 
55 	if (!icm)
56 		return;
57 
58 	list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
59 		if (chunk->nsg > 0)
60 			pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
61 				     PCI_DMA_BIDIRECTIONAL);
62 
63 		for (i = 0; i < chunk->npages; ++i)
64 			__free_pages(chunk->mem[i].page,
65 				     get_order(chunk->mem[i].length));
66 
67 		kfree(chunk);
68 	}
69 
70 	kfree(icm);
71 }
72 
73 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
74 				  unsigned int gfp_mask)
75 {
76 	struct mthca_icm *icm;
77 	struct mthca_icm_chunk *chunk = NULL;
78 	int cur_order;
79 
80 	icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
81 	if (!icm)
82 		return icm;
83 
84 	icm->refcount = 0;
85 	INIT_LIST_HEAD(&icm->chunk_list);
86 
87 	cur_order = get_order(MTHCA_ICM_ALLOC_SIZE);
88 
89 	while (npages > 0) {
90 		if (!chunk) {
91 			chunk = kmalloc(sizeof *chunk,
92 					gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
93 			if (!chunk)
94 				goto fail;
95 
96 			chunk->npages = 0;
97 			chunk->nsg    = 0;
98 			list_add_tail(&chunk->list, &icm->chunk_list);
99 		}
100 
101 		while (1 << cur_order > npages)
102 			--cur_order;
103 
104 		chunk->mem[chunk->npages].page = alloc_pages(gfp_mask, cur_order);
105 		if (chunk->mem[chunk->npages].page) {
106 			chunk->mem[chunk->npages].length = PAGE_SIZE << cur_order;
107 			chunk->mem[chunk->npages].offset = 0;
108 
109 			if (++chunk->npages == MTHCA_ICM_CHUNK_LEN) {
110 				chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
111 							chunk->npages,
112 							PCI_DMA_BIDIRECTIONAL);
113 
114 				if (chunk->nsg <= 0)
115 					goto fail;
116 
117 				chunk = NULL;
118 			}
119 
120 			npages -= 1 << cur_order;
121 		} else {
122 			--cur_order;
123 			if (cur_order < 0)
124 				goto fail;
125 		}
126 	}
127 
128 	if (chunk) {
129 		chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
130 					chunk->npages,
131 					PCI_DMA_BIDIRECTIONAL);
132 
133 		if (chunk->nsg <= 0)
134 			goto fail;
135 	}
136 
137 	return icm;
138 
139 fail:
140 	mthca_free_icm(dev, icm);
141 	return NULL;
142 }
143 
144 int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int obj)
145 {
146 	int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
147 	int ret = 0;
148 	u8 status;
149 
150 	down(&table->mutex);
151 
152 	if (table->icm[i]) {
153 		++table->icm[i]->refcount;
154 		goto out;
155 	}
156 
157 	table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
158 					(table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
159 					__GFP_NOWARN);
160 	if (!table->icm[i]) {
161 		ret = -ENOMEM;
162 		goto out;
163 	}
164 
165 	if (mthca_MAP_ICM(dev, table->icm[i], table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
166 			  &status) || status) {
167 		mthca_free_icm(dev, table->icm[i]);
168 		table->icm[i] = NULL;
169 		ret = -ENOMEM;
170 		goto out;
171 	}
172 
173 	++table->icm[i]->refcount;
174 
175 out:
176 	up(&table->mutex);
177 	return ret;
178 }
179 
180 void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj)
181 {
182 	int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
183 	u8 status;
184 
185 	down(&table->mutex);
186 
187 	if (--table->icm[i]->refcount == 0) {
188 		mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
189 				MTHCA_TABLE_CHUNK_SIZE >> 12, &status);
190 		mthca_free_icm(dev, table->icm[i]);
191 		table->icm[i] = NULL;
192 	}
193 
194 	up(&table->mutex);
195 }
196 
197 void *mthca_table_find(struct mthca_icm_table *table, int obj)
198 {
199 	int idx, offset, i;
200 	struct mthca_icm_chunk *chunk;
201 	struct mthca_icm *icm;
202 	struct page *page = NULL;
203 
204 	if (!table->lowmem)
205 		return NULL;
206 
207 	down(&table->mutex);
208 
209 	idx = (obj & (table->num_obj - 1)) * table->obj_size;
210 	icm = table->icm[idx / MTHCA_TABLE_CHUNK_SIZE];
211 	offset = idx % MTHCA_TABLE_CHUNK_SIZE;
212 
213 	if (!icm)
214 		goto out;
215 
216 	list_for_each_entry(chunk, &icm->chunk_list, list) {
217 		for (i = 0; i < chunk->npages; ++i) {
218 			if (chunk->mem[i].length >= offset) {
219 				page = chunk->mem[i].page;
220 				break;
221 			}
222 			offset -= chunk->mem[i].length;
223 		}
224 	}
225 
226 out:
227 	up(&table->mutex);
228 	return page ? lowmem_page_address(page) + offset : NULL;
229 }
230 
231 int mthca_table_get_range(struct mthca_dev *dev, struct mthca_icm_table *table,
232 			  int start, int end)
233 {
234 	int inc = MTHCA_TABLE_CHUNK_SIZE / table->obj_size;
235 	int i, err;
236 
237 	for (i = start; i <= end; i += inc) {
238 		err = mthca_table_get(dev, table, i);
239 		if (err)
240 			goto fail;
241 	}
242 
243 	return 0;
244 
245 fail:
246 	while (i > start) {
247 		i -= inc;
248 		mthca_table_put(dev, table, i);
249 	}
250 
251 	return err;
252 }
253 
254 void mthca_table_put_range(struct mthca_dev *dev, struct mthca_icm_table *table,
255 			   int start, int end)
256 {
257 	int i;
258 
259 	for (i = start; i <= end; i += MTHCA_TABLE_CHUNK_SIZE / table->obj_size)
260 		mthca_table_put(dev, table, i);
261 }
262 
263 struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
264 					      u64 virt, int obj_size,
265 					      int nobj, int reserved,
266 					      int use_lowmem)
267 {
268 	struct mthca_icm_table *table;
269 	int num_icm;
270 	int i;
271 	u8 status;
272 
273 	num_icm = obj_size * nobj / MTHCA_TABLE_CHUNK_SIZE;
274 
275 	table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL);
276 	if (!table)
277 		return NULL;
278 
279 	table->virt     = virt;
280 	table->num_icm  = num_icm;
281 	table->num_obj  = nobj;
282 	table->obj_size = obj_size;
283 	table->lowmem   = use_lowmem;
284 	init_MUTEX(&table->mutex);
285 
286 	for (i = 0; i < num_icm; ++i)
287 		table->icm[i] = NULL;
288 
289 	for (i = 0; i * MTHCA_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
290 		table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
291 						(use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
292 						__GFP_NOWARN);
293 		if (!table->icm[i])
294 			goto err;
295 		if (mthca_MAP_ICM(dev, table->icm[i], virt + i * MTHCA_TABLE_CHUNK_SIZE,
296 				  &status) || status) {
297 			mthca_free_icm(dev, table->icm[i]);
298 			table->icm[i] = NULL;
299 			goto err;
300 		}
301 
302 		/*
303 		 * Add a reference to this ICM chunk so that it never
304 		 * gets freed (since it contains reserved firmware objects).
305 		 */
306 		++table->icm[i]->refcount;
307 	}
308 
309 	return table;
310 
311 err:
312 	for (i = 0; i < num_icm; ++i)
313 		if (table->icm[i]) {
314 			mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE,
315 					MTHCA_TABLE_CHUNK_SIZE >> 12, &status);
316 			mthca_free_icm(dev, table->icm[i]);
317 		}
318 
319 	kfree(table);
320 
321 	return NULL;
322 }
323 
324 void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table)
325 {
326 	int i;
327 	u8 status;
328 
329 	for (i = 0; i < table->num_icm; ++i)
330 		if (table->icm[i]) {
331 			mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
332 					MTHCA_TABLE_CHUNK_SIZE >> 12, &status);
333 			mthca_free_icm(dev, table->icm[i]);
334 		}
335 
336 	kfree(table);
337 }
338 
339 static u64 mthca_uarc_virt(struct mthca_dev *dev, int page)
340 {
341 	return dev->uar_table.uarc_base +
342 		dev->driver_uar.index * dev->uar_table.uarc_size +
343 		page * 4096;
344 }
345 
346 int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, u32 **db)
347 {
348 	int group;
349 	int start, end, dir;
350 	int i, j;
351 	struct mthca_db_page *page;
352 	int ret = 0;
353 	u8 status;
354 
355 	down(&dev->db_tab->mutex);
356 
357 	switch (type) {
358 	case MTHCA_DB_TYPE_CQ_ARM:
359 	case MTHCA_DB_TYPE_SQ:
360 		group = 0;
361 		start = 0;
362 		end   = dev->db_tab->max_group1;
363 		dir   = 1;
364 		break;
365 
366 	case MTHCA_DB_TYPE_CQ_SET_CI:
367 	case MTHCA_DB_TYPE_RQ:
368 	case MTHCA_DB_TYPE_SRQ:
369 		group = 1;
370 		start = dev->db_tab->npages - 1;
371 		end   = dev->db_tab->min_group2;
372 		dir   = -1;
373 		break;
374 
375 	default:
376 		ret = -EINVAL;
377 		goto out;
378 	}
379 
380 	for (i = start; i != end; i += dir)
381 		if (dev->db_tab->page[i].db_rec &&
382 		    !bitmap_full(dev->db_tab->page[i].used,
383 				 MTHCA_DB_REC_PER_PAGE)) {
384 			page = dev->db_tab->page + i;
385 			goto found;
386 		}
387 
388 	if (dev->db_tab->max_group1 >= dev->db_tab->min_group2 - 1) {
389 		ret = -ENOMEM;
390 		goto out;
391 	}
392 
393 	page = dev->db_tab->page + end;
394 	page->db_rec = dma_alloc_coherent(&dev->pdev->dev, 4096,
395 					  &page->mapping, GFP_KERNEL);
396 	if (!page->db_rec) {
397 		ret = -ENOMEM;
398 		goto out;
399 	}
400 	memset(page->db_rec, 0, 4096);
401 
402 	ret = mthca_MAP_ICM_page(dev, page->mapping, mthca_uarc_virt(dev, i), &status);
403 	if (!ret && status)
404 		ret = -EINVAL;
405 	if (ret) {
406 		dma_free_coherent(&dev->pdev->dev, 4096,
407 				  page->db_rec, page->mapping);
408 		goto out;
409 	}
410 
411 	bitmap_zero(page->used, MTHCA_DB_REC_PER_PAGE);
412 	if (group == 0)
413 		++dev->db_tab->max_group1;
414 	else
415 		--dev->db_tab->min_group2;
416 
417 found:
418 	j = find_first_zero_bit(page->used, MTHCA_DB_REC_PER_PAGE);
419 	set_bit(j, page->used);
420 
421 	if (group == 1)
422 		j = MTHCA_DB_REC_PER_PAGE - 1 - j;
423 
424 	ret = i * MTHCA_DB_REC_PER_PAGE + j;
425 
426 	page->db_rec[j] = cpu_to_be64((qn << 8) | (type << 5));
427 
428 	*db = (u32 *) &page->db_rec[j];
429 
430 out:
431 	up(&dev->db_tab->mutex);
432 
433 	return ret;
434 }
435 
436 void mthca_free_db(struct mthca_dev *dev, int type, int db_index)
437 {
438 	int i, j;
439 	struct mthca_db_page *page;
440 	u8 status;
441 
442 	i = db_index / MTHCA_DB_REC_PER_PAGE;
443 	j = db_index % MTHCA_DB_REC_PER_PAGE;
444 
445 	page = dev->db_tab->page + i;
446 
447 	down(&dev->db_tab->mutex);
448 
449 	page->db_rec[j] = 0;
450 	if (i >= dev->db_tab->min_group2)
451 		j = MTHCA_DB_REC_PER_PAGE - 1 - j;
452 	clear_bit(j, page->used);
453 
454 	if (bitmap_empty(page->used, MTHCA_DB_REC_PER_PAGE) &&
455 	    i >= dev->db_tab->max_group1 - 1) {
456 		mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, i), 1, &status);
457 
458 		dma_free_coherent(&dev->pdev->dev, 4096,
459 				  page->db_rec, page->mapping);
460 		page->db_rec = NULL;
461 
462 		if (i == dev->db_tab->max_group1) {
463 			--dev->db_tab->max_group1;
464 			/* XXX may be able to unmap more pages now */
465 		}
466 		if (i == dev->db_tab->min_group2)
467 			++dev->db_tab->min_group2;
468 	}
469 
470 	up(&dev->db_tab->mutex);
471 }
472 
473 int mthca_init_db_tab(struct mthca_dev *dev)
474 {
475 	int i;
476 
477 	if (!mthca_is_memfree(dev))
478 		return 0;
479 
480 	dev->db_tab = kmalloc(sizeof *dev->db_tab, GFP_KERNEL);
481 	if (!dev->db_tab)
482 		return -ENOMEM;
483 
484 	init_MUTEX(&dev->db_tab->mutex);
485 
486 	dev->db_tab->npages     = dev->uar_table.uarc_size / 4096;
487 	dev->db_tab->max_group1 = 0;
488 	dev->db_tab->min_group2 = dev->db_tab->npages - 1;
489 
490 	dev->db_tab->page = kmalloc(dev->db_tab->npages *
491 				    sizeof *dev->db_tab->page,
492 				    GFP_KERNEL);
493 	if (!dev->db_tab->page) {
494 		kfree(dev->db_tab);
495 		return -ENOMEM;
496 	}
497 
498 	for (i = 0; i < dev->db_tab->npages; ++i)
499 		dev->db_tab->page[i].db_rec = NULL;
500 
501 	return 0;
502 }
503 
504 void mthca_cleanup_db_tab(struct mthca_dev *dev)
505 {
506 	int i;
507 	u8 status;
508 
509 	if (!mthca_is_memfree(dev))
510 		return;
511 
512 	/*
513 	 * Because we don't always free our UARC pages when they
514 	 * become empty to make mthca_free_db() simpler we need to
515 	 * make a sweep through the doorbell pages and free any
516 	 * leftover pages now.
517 	 */
518 	for (i = 0; i < dev->db_tab->npages; ++i) {
519 		if (!dev->db_tab->page[i].db_rec)
520 			continue;
521 
522 		if (!bitmap_empty(dev->db_tab->page[i].used, MTHCA_DB_REC_PER_PAGE))
523 			mthca_warn(dev, "Kernel UARC page %d not empty\n", i);
524 
525 		mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, i), 1, &status);
526 
527 		dma_free_coherent(&dev->pdev->dev, 4096,
528 				  dev->db_tab->page[i].db_rec,
529 				  dev->db_tab->page[i].mapping);
530 	}
531 
532 	kfree(dev->db_tab->page);
533 	kfree(dev->db_tab);
534 }
535