xref: /linux/drivers/infiniband/hw/mthca/mthca_mr.c (revision de2fe5e07d58424bc286fff3fd3c1b0bf933cd58)
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  *
33  * $Id: mthca_mr.c 1349 2004-12-16 21:09:43Z roland $
34  */
35 
36 #include <linux/slab.h>
37 #include <linux/init.h>
38 #include <linux/errno.h>
39 
40 #include "mthca_dev.h"
41 #include "mthca_cmd.h"
42 #include "mthca_memfree.h"
43 
44 struct mthca_mtt {
45 	struct mthca_buddy *buddy;
46 	int                 order;
47 	u32                 first_seg;
48 };
49 
50 /*
51  * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
52  */
53 struct mthca_mpt_entry {
54 	__be32 flags;
55 	__be32 page_size;
56 	__be32 key;
57 	__be32 pd;
58 	__be64 start;
59 	__be64 length;
60 	__be32 lkey;
61 	__be32 window_count;
62 	__be32 window_count_limit;
63 	__be64 mtt_seg;
64 	__be32 mtt_sz;		/* Arbel only */
65 	u32    reserved[2];
66 } __attribute__((packed));
67 
68 #define MTHCA_MPT_FLAG_SW_OWNS       (0xfUL << 28)
69 #define MTHCA_MPT_FLAG_MIO           (1 << 17)
70 #define MTHCA_MPT_FLAG_BIND_ENABLE   (1 << 15)
71 #define MTHCA_MPT_FLAG_PHYSICAL      (1 <<  9)
72 #define MTHCA_MPT_FLAG_REGION        (1 <<  8)
73 
74 #define MTHCA_MTT_FLAG_PRESENT       1
75 
76 #define MTHCA_MPT_STATUS_SW 0xF0
77 #define MTHCA_MPT_STATUS_HW 0x00
78 
79 #define SINAI_FMR_KEY_INC 0x1000000
80 
81 /*
82  * Buddy allocator for MTT segments (currently not very efficient
83  * since it doesn't keep a free list and just searches linearly
84  * through the bitmaps)
85  */
86 
87 static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
88 {
89 	int o;
90 	int m;
91 	u32 seg;
92 
93 	spin_lock(&buddy->lock);
94 
95 	for (o = order; o <= buddy->max_order; ++o) {
96 		m = 1 << (buddy->max_order - o);
97 		seg = find_first_bit(buddy->bits[o], m);
98 		if (seg < m)
99 			goto found;
100 	}
101 
102 	spin_unlock(&buddy->lock);
103 	return -1;
104 
105  found:
106 	clear_bit(seg, buddy->bits[o]);
107 
108 	while (o > order) {
109 		--o;
110 		seg <<= 1;
111 		set_bit(seg ^ 1, buddy->bits[o]);
112 	}
113 
114 	spin_unlock(&buddy->lock);
115 
116 	seg <<= order;
117 
118 	return seg;
119 }
120 
121 static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order)
122 {
123 	seg >>= order;
124 
125 	spin_lock(&buddy->lock);
126 
127 	while (test_bit(seg ^ 1, buddy->bits[order])) {
128 		clear_bit(seg ^ 1, buddy->bits[order]);
129 		seg >>= 1;
130 		++order;
131 	}
132 
133 	set_bit(seg, buddy->bits[order]);
134 
135 	spin_unlock(&buddy->lock);
136 }
137 
138 static int __devinit mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
139 {
140 	int i, s;
141 
142 	buddy->max_order = max_order;
143 	spin_lock_init(&buddy->lock);
144 
145 	buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
146 			      GFP_KERNEL);
147 	if (!buddy->bits)
148 		goto err_out;
149 
150 	for (i = 0; i <= buddy->max_order; ++i) {
151 		s = BITS_TO_LONGS(1 << (buddy->max_order - i));
152 		buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL);
153 		if (!buddy->bits[i])
154 			goto err_out_free;
155 		bitmap_zero(buddy->bits[i],
156 			    1 << (buddy->max_order - i));
157 	}
158 
159 	set_bit(0, buddy->bits[buddy->max_order]);
160 
161 	return 0;
162 
163 err_out_free:
164 	for (i = 0; i <= buddy->max_order; ++i)
165 		kfree(buddy->bits[i]);
166 
167 	kfree(buddy->bits);
168 
169 err_out:
170 	return -ENOMEM;
171 }
172 
173 static void mthca_buddy_cleanup(struct mthca_buddy *buddy)
174 {
175 	int i;
176 
177 	for (i = 0; i <= buddy->max_order; ++i)
178 		kfree(buddy->bits[i]);
179 
180 	kfree(buddy->bits);
181 }
182 
183 static u32 mthca_alloc_mtt_range(struct mthca_dev *dev, int order,
184 				 struct mthca_buddy *buddy)
185 {
186 	u32 seg = mthca_buddy_alloc(buddy, order);
187 
188 	if (seg == -1)
189 		return -1;
190 
191 	if (mthca_is_memfree(dev))
192 		if (mthca_table_get_range(dev, dev->mr_table.mtt_table, seg,
193 					  seg + (1 << order) - 1)) {
194 			mthca_buddy_free(buddy, seg, order);
195 			seg = -1;
196 		}
197 
198 	return seg;
199 }
200 
201 static struct mthca_mtt *__mthca_alloc_mtt(struct mthca_dev *dev, int size,
202 					   struct mthca_buddy *buddy)
203 {
204 	struct mthca_mtt *mtt;
205 	int i;
206 
207 	if (size <= 0)
208 		return ERR_PTR(-EINVAL);
209 
210 	mtt = kmalloc(sizeof *mtt, GFP_KERNEL);
211 	if (!mtt)
212 		return ERR_PTR(-ENOMEM);
213 
214 	mtt->buddy = buddy;
215 	mtt->order = 0;
216 	for (i = MTHCA_MTT_SEG_SIZE / 8; i < size; i <<= 1)
217 		++mtt->order;
218 
219 	mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy);
220 	if (mtt->first_seg == -1) {
221 		kfree(mtt);
222 		return ERR_PTR(-ENOMEM);
223 	}
224 
225 	return mtt;
226 }
227 
228 struct mthca_mtt *mthca_alloc_mtt(struct mthca_dev *dev, int size)
229 {
230 	return __mthca_alloc_mtt(dev, size, &dev->mr_table.mtt_buddy);
231 }
232 
233 void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt)
234 {
235 	if (!mtt)
236 		return;
237 
238 	mthca_buddy_free(mtt->buddy, mtt->first_seg, mtt->order);
239 
240 	mthca_table_put_range(dev, dev->mr_table.mtt_table,
241 			      mtt->first_seg,
242 			      mtt->first_seg + (1 << mtt->order) - 1);
243 
244 	kfree(mtt);
245 }
246 
247 int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
248 		    int start_index, u64 *buffer_list, int list_len)
249 {
250 	struct mthca_mailbox *mailbox;
251 	__be64 *mtt_entry;
252 	int err = 0;
253 	u8 status;
254 	int i;
255 
256 	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
257 	if (IS_ERR(mailbox))
258 		return PTR_ERR(mailbox);
259 	mtt_entry = mailbox->buf;
260 
261 	while (list_len > 0) {
262 		mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base +
263 					   mtt->first_seg * MTHCA_MTT_SEG_SIZE +
264 					   start_index * 8);
265 		mtt_entry[1] = 0;
266 		for (i = 0; i < list_len && i < MTHCA_MAILBOX_SIZE / 8 - 2; ++i)
267 			mtt_entry[i + 2] = cpu_to_be64(buffer_list[i] |
268 						       MTHCA_MTT_FLAG_PRESENT);
269 
270 		/*
271 		 * If we have an odd number of entries to write, add
272 		 * one more dummy entry for firmware efficiency.
273 		 */
274 		if (i & 1)
275 			mtt_entry[i + 2] = 0;
276 
277 		err = mthca_WRITE_MTT(dev, mailbox, (i + 1) & ~1, &status);
278 		if (err) {
279 			mthca_warn(dev, "WRITE_MTT failed (%d)\n", err);
280 			goto out;
281 		}
282 		if (status) {
283 			mthca_warn(dev, "WRITE_MTT returned status 0x%02x\n",
284 				   status);
285 			err = -EINVAL;
286 			goto out;
287 		}
288 
289 		list_len    -= i;
290 		start_index += i;
291 		buffer_list += i;
292 	}
293 
294 out:
295 	mthca_free_mailbox(dev, mailbox);
296 	return err;
297 }
298 
299 static inline u32 tavor_hw_index_to_key(u32 ind)
300 {
301 	return ind;
302 }
303 
304 static inline u32 tavor_key_to_hw_index(u32 key)
305 {
306 	return key;
307 }
308 
309 static inline u32 arbel_hw_index_to_key(u32 ind)
310 {
311 	return (ind >> 24) | (ind << 8);
312 }
313 
314 static inline u32 arbel_key_to_hw_index(u32 key)
315 {
316 	return (key << 24) | (key >> 8);
317 }
318 
319 static inline u32 hw_index_to_key(struct mthca_dev *dev, u32 ind)
320 {
321 	if (mthca_is_memfree(dev))
322 		return arbel_hw_index_to_key(ind);
323 	else
324 		return tavor_hw_index_to_key(ind);
325 }
326 
327 static inline u32 key_to_hw_index(struct mthca_dev *dev, u32 key)
328 {
329 	if (mthca_is_memfree(dev))
330 		return arbel_key_to_hw_index(key);
331 	else
332 		return tavor_key_to_hw_index(key);
333 }
334 
335 static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
336 {
337 	if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
338 		return ((key << 20) & 0x800000) | (key & 0x7fffff);
339 	else
340 		return key;
341 }
342 
343 int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
344 		   u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
345 {
346 	struct mthca_mailbox *mailbox;
347 	struct mthca_mpt_entry *mpt_entry;
348 	u32 key;
349 	int i;
350 	int err;
351 	u8 status;
352 
353 	WARN_ON(buffer_size_shift >= 32);
354 
355 	key = mthca_alloc(&dev->mr_table.mpt_alloc);
356 	if (key == -1)
357 		return -ENOMEM;
358 	key = adjust_key(dev, key);
359 	mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
360 
361 	if (mthca_is_memfree(dev)) {
362 		err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
363 		if (err)
364 			goto err_out_mpt_free;
365 	}
366 
367 	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
368 	if (IS_ERR(mailbox)) {
369 		err = PTR_ERR(mailbox);
370 		goto err_out_table;
371 	}
372 	mpt_entry = mailbox->buf;
373 
374 	mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS     |
375 				       MTHCA_MPT_FLAG_MIO         |
376 				       MTHCA_MPT_FLAG_REGION      |
377 				       access);
378 	if (!mr->mtt)
379 		mpt_entry->flags |= cpu_to_be32(MTHCA_MPT_FLAG_PHYSICAL);
380 
381 	mpt_entry->page_size = cpu_to_be32(buffer_size_shift - 12);
382 	mpt_entry->key       = cpu_to_be32(key);
383 	mpt_entry->pd        = cpu_to_be32(pd);
384 	mpt_entry->start     = cpu_to_be64(iova);
385 	mpt_entry->length    = cpu_to_be64(total_size);
386 
387 	memset(&mpt_entry->lkey, 0,
388 	       sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, lkey));
389 
390 	if (mr->mtt)
391 		mpt_entry->mtt_seg =
392 			cpu_to_be64(dev->mr_table.mtt_base +
393 				    mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE);
394 
395 	if (0) {
396 		mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey);
397 		for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) {
398 			if (i % 4 == 0)
399 				printk("[%02x] ", i * 4);
400 			printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i]));
401 			if ((i + 1) % 4 == 0)
402 				printk("\n");
403 		}
404 	}
405 
406 	err = mthca_SW2HW_MPT(dev, mailbox,
407 			      key & (dev->limits.num_mpts - 1),
408 			      &status);
409 	if (err) {
410 		mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err);
411 		goto err_out_mailbox;
412 	} else if (status) {
413 		mthca_warn(dev, "SW2HW_MPT returned status 0x%02x\n",
414 			   status);
415 		err = -EINVAL;
416 		goto err_out_mailbox;
417 	}
418 
419 	mthca_free_mailbox(dev, mailbox);
420 	return err;
421 
422 err_out_mailbox:
423 	mthca_free_mailbox(dev, mailbox);
424 
425 err_out_table:
426 	mthca_table_put(dev, dev->mr_table.mpt_table, key);
427 
428 err_out_mpt_free:
429 	mthca_free(&dev->mr_table.mpt_alloc, key);
430 	return err;
431 }
432 
433 int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
434 			   u32 access, struct mthca_mr *mr)
435 {
436 	mr->mtt = NULL;
437 	return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
438 }
439 
440 int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
441 			u64 *buffer_list, int buffer_size_shift,
442 			int list_len, u64 iova, u64 total_size,
443 			u32 access, struct mthca_mr *mr)
444 {
445 	int err;
446 
447 	mr->mtt = mthca_alloc_mtt(dev, list_len);
448 	if (IS_ERR(mr->mtt))
449 		return PTR_ERR(mr->mtt);
450 
451 	err = mthca_write_mtt(dev, mr->mtt, 0, buffer_list, list_len);
452 	if (err) {
453 		mthca_free_mtt(dev, mr->mtt);
454 		return err;
455 	}
456 
457 	err = mthca_mr_alloc(dev, pd, buffer_size_shift, iova,
458 			     total_size, access, mr);
459 	if (err)
460 		mthca_free_mtt(dev, mr->mtt);
461 
462 	return err;
463 }
464 
465 /* Free mr or fmr */
466 static void mthca_free_region(struct mthca_dev *dev, u32 lkey)
467 {
468 	mthca_table_put(dev, dev->mr_table.mpt_table,
469 			key_to_hw_index(dev, lkey));
470 
471 	mthca_free(&dev->mr_table.mpt_alloc, key_to_hw_index(dev, lkey));
472 }
473 
474 void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr)
475 {
476 	int err;
477 	u8 status;
478 
479 	err = mthca_HW2SW_MPT(dev, NULL,
480 			      key_to_hw_index(dev, mr->ibmr.lkey) &
481 			      (dev->limits.num_mpts - 1),
482 			      &status);
483 	if (err)
484 		mthca_warn(dev, "HW2SW_MPT failed (%d)\n", err);
485 	else if (status)
486 		mthca_warn(dev, "HW2SW_MPT returned status 0x%02x\n",
487 			   status);
488 
489 	mthca_free_region(dev, mr->ibmr.lkey);
490 	mthca_free_mtt(dev, mr->mtt);
491 }
492 
493 int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
494 		    u32 access, struct mthca_fmr *mr)
495 {
496 	struct mthca_mpt_entry *mpt_entry;
497 	struct mthca_mailbox *mailbox;
498 	u64 mtt_seg;
499 	u32 key, idx;
500 	u8 status;
501 	int list_len = mr->attr.max_pages;
502 	int err = -ENOMEM;
503 	int i;
504 
505 	if (mr->attr.page_shift < 12 || mr->attr.page_shift >= 32)
506 		return -EINVAL;
507 
508 	/* For Arbel, all MTTs must fit in the same page. */
509 	if (mthca_is_memfree(dev) &&
510 	    mr->attr.max_pages * sizeof *mr->mem.arbel.mtts > PAGE_SIZE)
511 		return -EINVAL;
512 
513 	mr->maps = 0;
514 
515 	key = mthca_alloc(&dev->mr_table.mpt_alloc);
516 	if (key == -1)
517 		return -ENOMEM;
518 	key = adjust_key(dev, key);
519 
520 	idx = key & (dev->limits.num_mpts - 1);
521 	mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
522 
523 	if (mthca_is_memfree(dev)) {
524 		err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
525 		if (err)
526 			goto err_out_mpt_free;
527 
528 		mr->mem.arbel.mpt = mthca_table_find(dev->mr_table.mpt_table, key);
529 		BUG_ON(!mr->mem.arbel.mpt);
530 	} else
531 		mr->mem.tavor.mpt = dev->mr_table.tavor_fmr.mpt_base +
532 			sizeof *(mr->mem.tavor.mpt) * idx;
533 
534 	mr->mtt = __mthca_alloc_mtt(dev, list_len, dev->mr_table.fmr_mtt_buddy);
535 	if (IS_ERR(mr->mtt))
536 		goto err_out_table;
537 
538 	mtt_seg = mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE;
539 
540 	if (mthca_is_memfree(dev)) {
541 		mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table,
542 						      mr->mtt->first_seg);
543 		BUG_ON(!mr->mem.arbel.mtts);
544 	} else
545 		mr->mem.tavor.mtts = dev->mr_table.tavor_fmr.mtt_base + mtt_seg;
546 
547 	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
548 	if (IS_ERR(mailbox))
549 		goto err_out_free_mtt;
550 
551 	mpt_entry = mailbox->buf;
552 
553 	mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS     |
554 				       MTHCA_MPT_FLAG_MIO         |
555 				       MTHCA_MPT_FLAG_REGION      |
556 				       access);
557 
558 	mpt_entry->page_size = cpu_to_be32(mr->attr.page_shift - 12);
559 	mpt_entry->key       = cpu_to_be32(key);
560 	mpt_entry->pd        = cpu_to_be32(pd);
561 	memset(&mpt_entry->start, 0,
562 	       sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, start));
563 	mpt_entry->mtt_seg   = cpu_to_be64(dev->mr_table.mtt_base + mtt_seg);
564 
565 	if (0) {
566 		mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey);
567 		for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) {
568 			if (i % 4 == 0)
569 				printk("[%02x] ", i * 4);
570 			printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i]));
571 			if ((i + 1) % 4 == 0)
572 				printk("\n");
573 		}
574 	}
575 
576 	err = mthca_SW2HW_MPT(dev, mailbox,
577 			      key & (dev->limits.num_mpts - 1),
578 			      &status);
579 	if (err) {
580 		mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err);
581 		goto err_out_mailbox_free;
582 	}
583 	if (status) {
584 		mthca_warn(dev, "SW2HW_MPT returned status 0x%02x\n",
585 			   status);
586 		err = -EINVAL;
587 		goto err_out_mailbox_free;
588 	}
589 
590 	mthca_free_mailbox(dev, mailbox);
591 	return 0;
592 
593 err_out_mailbox_free:
594 	mthca_free_mailbox(dev, mailbox);
595 
596 err_out_free_mtt:
597 	mthca_free_mtt(dev, mr->mtt);
598 
599 err_out_table:
600 	mthca_table_put(dev, dev->mr_table.mpt_table, key);
601 
602 err_out_mpt_free:
603 	mthca_free(&dev->mr_table.mpt_alloc, mr->ibmr.lkey);
604 	return err;
605 }
606 
607 int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr)
608 {
609 	if (fmr->maps)
610 		return -EBUSY;
611 
612 	mthca_free_region(dev, fmr->ibmr.lkey);
613 	mthca_free_mtt(dev, fmr->mtt);
614 
615 	return 0;
616 }
617 
618 static inline int mthca_check_fmr(struct mthca_fmr *fmr, u64 *page_list,
619 				  int list_len, u64 iova)
620 {
621 	int i, page_mask;
622 
623 	if (list_len > fmr->attr.max_pages)
624 		return -EINVAL;
625 
626 	page_mask = (1 << fmr->attr.page_shift) - 1;
627 
628 	/* We are getting page lists, so va must be page aligned. */
629 	if (iova & page_mask)
630 		return -EINVAL;
631 
632 	/* Trust the user not to pass misaligned data in page_list */
633 	if (0)
634 		for (i = 0; i < list_len; ++i) {
635 			if (page_list[i] & ~page_mask)
636 				return -EINVAL;
637 		}
638 
639 	if (fmr->maps >= fmr->attr.max_maps)
640 		return -EINVAL;
641 
642 	return 0;
643 }
644 
645 
646 int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
647 			     int list_len, u64 iova)
648 {
649 	struct mthca_fmr *fmr = to_mfmr(ibfmr);
650 	struct mthca_dev *dev = to_mdev(ibfmr->device);
651 	struct mthca_mpt_entry mpt_entry;
652 	u32 key;
653 	int i, err;
654 
655 	err = mthca_check_fmr(fmr, page_list, list_len, iova);
656 	if (err)
657 		return err;
658 
659 	++fmr->maps;
660 
661 	key = tavor_key_to_hw_index(fmr->ibmr.lkey);
662 	key += dev->limits.num_mpts;
663 	fmr->ibmr.lkey = fmr->ibmr.rkey = tavor_hw_index_to_key(key);
664 
665 	writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt);
666 
667 	for (i = 0; i < list_len; ++i) {
668 		__be64 mtt_entry = cpu_to_be64(page_list[i] |
669 					       MTHCA_MTT_FLAG_PRESENT);
670 		mthca_write64_raw(mtt_entry, fmr->mem.tavor.mtts + i);
671 	}
672 
673 	mpt_entry.lkey   = cpu_to_be32(key);
674 	mpt_entry.length = cpu_to_be64(list_len * (1ull << fmr->attr.page_shift));
675 	mpt_entry.start  = cpu_to_be64(iova);
676 
677 	__raw_writel((__force u32) mpt_entry.lkey, &fmr->mem.tavor.mpt->key);
678 	memcpy_toio(&fmr->mem.tavor.mpt->start, &mpt_entry.start,
679 		    offsetof(struct mthca_mpt_entry, window_count) -
680 		    offsetof(struct mthca_mpt_entry, start));
681 
682 	writeb(MTHCA_MPT_STATUS_HW, fmr->mem.tavor.mpt);
683 
684 	return 0;
685 }
686 
687 int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
688 			     int list_len, u64 iova)
689 {
690 	struct mthca_fmr *fmr = to_mfmr(ibfmr);
691 	struct mthca_dev *dev = to_mdev(ibfmr->device);
692 	u32 key;
693 	int i, err;
694 
695 	err = mthca_check_fmr(fmr, page_list, list_len, iova);
696 	if (err)
697 		return err;
698 
699 	++fmr->maps;
700 
701 	key = arbel_key_to_hw_index(fmr->ibmr.lkey);
702 	if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
703 		key += SINAI_FMR_KEY_INC;
704 	else
705 		key += dev->limits.num_mpts;
706 	fmr->ibmr.lkey = fmr->ibmr.rkey = arbel_hw_index_to_key(key);
707 
708 	*(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW;
709 
710 	wmb();
711 
712 	for (i = 0; i < list_len; ++i)
713 		fmr->mem.arbel.mtts[i] = cpu_to_be64(page_list[i] |
714 						     MTHCA_MTT_FLAG_PRESENT);
715 
716 	fmr->mem.arbel.mpt->key    = cpu_to_be32(key);
717 	fmr->mem.arbel.mpt->lkey   = cpu_to_be32(key);
718 	fmr->mem.arbel.mpt->length = cpu_to_be64(list_len * (1ull << fmr->attr.page_shift));
719 	fmr->mem.arbel.mpt->start  = cpu_to_be64(iova);
720 
721 	wmb();
722 
723 	*(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_HW;
724 
725 	wmb();
726 
727 	return 0;
728 }
729 
730 void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
731 {
732 	u32 key;
733 
734 	if (!fmr->maps)
735 		return;
736 
737 	key = tavor_key_to_hw_index(fmr->ibmr.lkey);
738 	key &= dev->limits.num_mpts - 1;
739 	fmr->ibmr.lkey = fmr->ibmr.rkey = tavor_hw_index_to_key(key);
740 
741 	fmr->maps = 0;
742 
743 	writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt);
744 }
745 
746 void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
747 {
748 	u32 key;
749 
750 	if (!fmr->maps)
751 		return;
752 
753 	key = arbel_key_to_hw_index(fmr->ibmr.lkey);
754 	key &= dev->limits.num_mpts - 1;
755 	fmr->ibmr.lkey = fmr->ibmr.rkey = arbel_hw_index_to_key(key);
756 
757 	fmr->maps = 0;
758 
759 	*(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW;
760 }
761 
762 int __devinit mthca_init_mr_table(struct mthca_dev *dev)
763 {
764 	int err, i;
765 
766 	err = mthca_alloc_init(&dev->mr_table.mpt_alloc,
767 			       dev->limits.num_mpts,
768 			       ~0, dev->limits.reserved_mrws);
769 	if (err)
770 		return err;
771 
772 	if (!mthca_is_memfree(dev) &&
773 	    (dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN))
774 		dev->limits.fmr_reserved_mtts = 0;
775 	else
776 		dev->mthca_flags |= MTHCA_FLAG_FMR;
777 
778 	if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
779 		mthca_dbg(dev, "Memory key throughput optimization activated.\n");
780 
781 	err = mthca_buddy_init(&dev->mr_table.mtt_buddy,
782 			       fls(dev->limits.num_mtt_segs - 1));
783 
784 	if (err)
785 		goto err_mtt_buddy;
786 
787 	dev->mr_table.tavor_fmr.mpt_base = NULL;
788 	dev->mr_table.tavor_fmr.mtt_base = NULL;
789 
790 	if (dev->limits.fmr_reserved_mtts) {
791 		i = fls(dev->limits.fmr_reserved_mtts - 1);
792 
793 		if (i >= 31) {
794 			mthca_warn(dev, "Unable to reserve 2^31 FMR MTTs.\n");
795 			err = -EINVAL;
796 			goto err_fmr_mpt;
797 		}
798 
799 		dev->mr_table.tavor_fmr.mpt_base =
800 			ioremap(dev->mr_table.mpt_base,
801 				(1 << i) * sizeof (struct mthca_mpt_entry));
802 
803 		if (!dev->mr_table.tavor_fmr.mpt_base) {
804 			mthca_warn(dev, "MPT ioremap for FMR failed.\n");
805 			err = -ENOMEM;
806 			goto err_fmr_mpt;
807 		}
808 
809 		dev->mr_table.tavor_fmr.mtt_base =
810 			ioremap(dev->mr_table.mtt_base,
811 				(1 << i) * MTHCA_MTT_SEG_SIZE);
812 		if (!dev->mr_table.tavor_fmr.mtt_base) {
813 			mthca_warn(dev, "MTT ioremap for FMR failed.\n");
814 			err = -ENOMEM;
815 			goto err_fmr_mtt;
816 		}
817 
818 		err = mthca_buddy_init(&dev->mr_table.tavor_fmr.mtt_buddy, i);
819 		if (err)
820 			goto err_fmr_mtt_buddy;
821 
822 		/* Prevent regular MRs from using FMR keys */
823 		err = mthca_buddy_alloc(&dev->mr_table.mtt_buddy, i);
824 		if (err)
825 			goto err_reserve_fmr;
826 
827 		dev->mr_table.fmr_mtt_buddy =
828 			&dev->mr_table.tavor_fmr.mtt_buddy;
829 	} else
830 		dev->mr_table.fmr_mtt_buddy = &dev->mr_table.mtt_buddy;
831 
832 	/* FMR table is always the first, take reserved MTTs out of there */
833 	if (dev->limits.reserved_mtts) {
834 		i = fls(dev->limits.reserved_mtts - 1);
835 
836 		if (mthca_alloc_mtt_range(dev, i,
837 					  dev->mr_table.fmr_mtt_buddy) == -1) {
838 			mthca_warn(dev, "MTT table of order %d is too small.\n",
839 				  dev->mr_table.fmr_mtt_buddy->max_order);
840 			err = -ENOMEM;
841 			goto err_reserve_mtts;
842 		}
843 	}
844 
845 	return 0;
846 
847 err_reserve_mtts:
848 err_reserve_fmr:
849 	if (dev->limits.fmr_reserved_mtts)
850 		mthca_buddy_cleanup(&dev->mr_table.tavor_fmr.mtt_buddy);
851 
852 err_fmr_mtt_buddy:
853 	if (dev->mr_table.tavor_fmr.mtt_base)
854 		iounmap(dev->mr_table.tavor_fmr.mtt_base);
855 
856 err_fmr_mtt:
857 	if (dev->mr_table.tavor_fmr.mpt_base)
858 		iounmap(dev->mr_table.tavor_fmr.mpt_base);
859 
860 err_fmr_mpt:
861 	mthca_buddy_cleanup(&dev->mr_table.mtt_buddy);
862 
863 err_mtt_buddy:
864 	mthca_alloc_cleanup(&dev->mr_table.mpt_alloc);
865 
866 	return err;
867 }
868 
869 void mthca_cleanup_mr_table(struct mthca_dev *dev)
870 {
871 	/* XXX check if any MRs are still allocated? */
872 	if (dev->limits.fmr_reserved_mtts)
873 		mthca_buddy_cleanup(&dev->mr_table.tavor_fmr.mtt_buddy);
874 
875 	mthca_buddy_cleanup(&dev->mr_table.mtt_buddy);
876 
877 	if (dev->mr_table.tavor_fmr.mtt_base)
878 		iounmap(dev->mr_table.tavor_fmr.mtt_base);
879 	if (dev->mr_table.tavor_fmr.mpt_base)
880 		iounmap(dev->mr_table.tavor_fmr.mpt_base);
881 
882 	mthca_alloc_cleanup(&dev->mr_table.mpt_alloc);
883 }
884