xref: /linux/drivers/infiniband/hw/mthca/mthca_provider.c (revision d67b569f5f620c0fb95d5212642746b7ba9d29e4)
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  *
33  * $Id: mthca_provider.c 1397 2004-12-28 05:09:00Z roland $
34  */
35 
36 #include <ib_smi.h>
37 
38 #include "mthca_dev.h"
39 #include "mthca_cmd.h"
40 
41 static int mthca_query_device(struct ib_device *ibdev,
42 			      struct ib_device_attr *props)
43 {
44 	struct ib_smp *in_mad  = NULL;
45 	struct ib_smp *out_mad = NULL;
46 	int err = -ENOMEM;
47 	struct mthca_dev* mdev = to_mdev(ibdev);
48 
49 	u8 status;
50 
51 	in_mad  = kmalloc(sizeof *in_mad, GFP_KERNEL);
52 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
53 	if (!in_mad || !out_mad)
54 		goto out;
55 
56 	memset(props, 0, sizeof *props);
57 
58 	props->fw_ver              = mdev->fw_ver;
59 
60 	memset(in_mad, 0, sizeof *in_mad);
61 	in_mad->base_version       = 1;
62 	in_mad->mgmt_class     	   = IB_MGMT_CLASS_SUBN_LID_ROUTED;
63 	in_mad->class_version  	   = 1;
64 	in_mad->method         	   = IB_MGMT_METHOD_GET;
65 	in_mad->attr_id   	   = IB_SMP_ATTR_NODE_INFO;
66 
67 	err = mthca_MAD_IFC(mdev, 1, 1,
68 			    1, NULL, NULL, in_mad, out_mad,
69 			    &status);
70 	if (err)
71 		goto out;
72 	if (status) {
73 		err = -EINVAL;
74 		goto out;
75 	}
76 
77 	props->device_cap_flags    = mdev->device_cap_flags;
78 	props->vendor_id           = be32_to_cpup((u32 *) (out_mad->data + 36)) &
79 		0xffffff;
80 	props->vendor_part_id      = be16_to_cpup((u16 *) (out_mad->data + 30));
81 	props->hw_ver              = be16_to_cpup((u16 *) (out_mad->data + 32));
82 	memcpy(&props->sys_image_guid, out_mad->data +  4, 8);
83 	memcpy(&props->node_guid,      out_mad->data + 12, 8);
84 
85 	props->max_mr_size         = ~0ull;
86 	props->max_qp              = mdev->limits.num_qps - mdev->limits.reserved_qps;
87 	props->max_qp_wr           = 0xffff;
88 	props->max_sge             = mdev->limits.max_sg;
89 	props->max_cq              = mdev->limits.num_cqs - mdev->limits.reserved_cqs;
90 	props->max_cqe             = 0xffff;
91 	props->max_mr              = mdev->limits.num_mpts - mdev->limits.reserved_mrws;
92 	props->max_pd              = mdev->limits.num_pds - mdev->limits.reserved_pds;
93 	props->max_qp_rd_atom      = 1 << mdev->qp_table.rdb_shift;
94 	props->max_qp_init_rd_atom = 1 << mdev->qp_table.rdb_shift;
95 	props->local_ca_ack_delay  = mdev->limits.local_ca_ack_delay;
96 
97 	err = 0;
98  out:
99 	kfree(in_mad);
100 	kfree(out_mad);
101 	return err;
102 }
103 
104 static int mthca_query_port(struct ib_device *ibdev,
105 			    u8 port, struct ib_port_attr *props)
106 {
107 	struct ib_smp *in_mad  = NULL;
108 	struct ib_smp *out_mad = NULL;
109 	int err = -ENOMEM;
110 	u8 status;
111 
112 	in_mad  = kmalloc(sizeof *in_mad, GFP_KERNEL);
113 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
114 	if (!in_mad || !out_mad)
115 		goto out;
116 
117 	memset(in_mad, 0, sizeof *in_mad);
118 	in_mad->base_version       = 1;
119 	in_mad->mgmt_class     	   = IB_MGMT_CLASS_SUBN_LID_ROUTED;
120 	in_mad->class_version  	   = 1;
121 	in_mad->method         	   = IB_MGMT_METHOD_GET;
122 	in_mad->attr_id   	   = IB_SMP_ATTR_PORT_INFO;
123 	in_mad->attr_mod           = cpu_to_be32(port);
124 
125 	err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
126 			    port, NULL, NULL, in_mad, out_mad,
127 			    &status);
128 	if (err)
129 		goto out;
130 	if (status) {
131 		err = -EINVAL;
132 		goto out;
133 	}
134 
135 	props->lid               = be16_to_cpup((u16 *) (out_mad->data + 16));
136 	props->lmc               = out_mad->data[34] & 0x7;
137 	props->sm_lid            = be16_to_cpup((u16 *) (out_mad->data + 18));
138 	props->sm_sl             = out_mad->data[36] & 0xf;
139 	props->state             = out_mad->data[32] & 0xf;
140 	props->phys_state        = out_mad->data[33] >> 4;
141 	props->port_cap_flags    = be32_to_cpup((u32 *) (out_mad->data + 20));
142 	props->gid_tbl_len       = to_mdev(ibdev)->limits.gid_table_len;
143 	props->pkey_tbl_len      = to_mdev(ibdev)->limits.pkey_table_len;
144 	props->qkey_viol_cntr    = be16_to_cpup((u16 *) (out_mad->data + 48));
145 	props->active_width      = out_mad->data[31] & 0xf;
146 	props->active_speed      = out_mad->data[35] >> 4;
147 
148  out:
149 	kfree(in_mad);
150 	kfree(out_mad);
151 	return err;
152 }
153 
154 static int mthca_modify_port(struct ib_device *ibdev,
155 			     u8 port, int port_modify_mask,
156 			     struct ib_port_modify *props)
157 {
158 	struct mthca_set_ib_param set_ib;
159 	struct ib_port_attr attr;
160 	int err;
161 	u8 status;
162 
163 	if (down_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
164 		return -ERESTARTSYS;
165 
166 	err = mthca_query_port(ibdev, port, &attr);
167 	if (err)
168 		goto out;
169 
170 	set_ib.set_si_guid     = 0;
171 	set_ib.reset_qkey_viol = !!(port_modify_mask & IB_PORT_RESET_QKEY_CNTR);
172 
173 	set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
174 		~props->clr_port_cap_mask;
175 
176 	err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port, &status);
177 	if (err)
178 		goto out;
179 	if (status) {
180 		err = -EINVAL;
181 		goto out;
182 	}
183 
184 out:
185 	up(&to_mdev(ibdev)->cap_mask_mutex);
186 	return err;
187 }
188 
189 static int mthca_query_pkey(struct ib_device *ibdev,
190 			    u8 port, u16 index, u16 *pkey)
191 {
192 	struct ib_smp *in_mad  = NULL;
193 	struct ib_smp *out_mad = NULL;
194 	int err = -ENOMEM;
195 	u8 status;
196 
197 	in_mad  = kmalloc(sizeof *in_mad, GFP_KERNEL);
198 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
199 	if (!in_mad || !out_mad)
200 		goto out;
201 
202 	memset(in_mad, 0, sizeof *in_mad);
203 	in_mad->base_version       = 1;
204 	in_mad->mgmt_class     	   = IB_MGMT_CLASS_SUBN_LID_ROUTED;
205 	in_mad->class_version  	   = 1;
206 	in_mad->method         	   = IB_MGMT_METHOD_GET;
207 	in_mad->attr_id   	   = IB_SMP_ATTR_PKEY_TABLE;
208 	in_mad->attr_mod           = cpu_to_be32(index / 32);
209 
210 	err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
211 			    port, NULL, NULL, in_mad, out_mad,
212 			    &status);
213 	if (err)
214 		goto out;
215 	if (status) {
216 		err = -EINVAL;
217 		goto out;
218 	}
219 
220 	*pkey = be16_to_cpu(((u16 *) out_mad->data)[index % 32]);
221 
222  out:
223 	kfree(in_mad);
224 	kfree(out_mad);
225 	return err;
226 }
227 
228 static int mthca_query_gid(struct ib_device *ibdev, u8 port,
229 			   int index, union ib_gid *gid)
230 {
231 	struct ib_smp *in_mad  = NULL;
232 	struct ib_smp *out_mad = NULL;
233 	int err = -ENOMEM;
234 	u8 status;
235 
236 	in_mad  = kmalloc(sizeof *in_mad, GFP_KERNEL);
237 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
238 	if (!in_mad || !out_mad)
239 		goto out;
240 
241 	memset(in_mad, 0, sizeof *in_mad);
242 	in_mad->base_version       = 1;
243 	in_mad->mgmt_class     	   = IB_MGMT_CLASS_SUBN_LID_ROUTED;
244 	in_mad->class_version  	   = 1;
245 	in_mad->method         	   = IB_MGMT_METHOD_GET;
246 	in_mad->attr_id   	   = IB_SMP_ATTR_PORT_INFO;
247 	in_mad->attr_mod           = cpu_to_be32(port);
248 
249 	err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
250 			    port, NULL, NULL, in_mad, out_mad,
251 			    &status);
252 	if (err)
253 		goto out;
254 	if (status) {
255 		err = -EINVAL;
256 		goto out;
257 	}
258 
259 	memcpy(gid->raw, out_mad->data + 8, 8);
260 
261 	memset(in_mad, 0, sizeof *in_mad);
262 	in_mad->base_version       = 1;
263 	in_mad->mgmt_class     	   = IB_MGMT_CLASS_SUBN_LID_ROUTED;
264 	in_mad->class_version  	   = 1;
265 	in_mad->method         	   = IB_MGMT_METHOD_GET;
266 	in_mad->attr_id   	   = IB_SMP_ATTR_GUID_INFO;
267 	in_mad->attr_mod           = cpu_to_be32(index / 8);
268 
269 	err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
270 			    port, NULL, NULL, in_mad, out_mad,
271 			    &status);
272 	if (err)
273 		goto out;
274 	if (status) {
275 		err = -EINVAL;
276 		goto out;
277 	}
278 
279 	memcpy(gid->raw + 8, out_mad->data + (index % 8) * 16, 8);
280 
281  out:
282 	kfree(in_mad);
283 	kfree(out_mad);
284 	return err;
285 }
286 
287 static struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev)
288 {
289 	struct mthca_pd *pd;
290 	int err;
291 
292 	pd = kmalloc(sizeof *pd, GFP_KERNEL);
293 	if (!pd)
294 		return ERR_PTR(-ENOMEM);
295 
296 	err = mthca_pd_alloc(to_mdev(ibdev), pd);
297 	if (err) {
298 		kfree(pd);
299 		return ERR_PTR(err);
300 	}
301 
302 	return &pd->ibpd;
303 }
304 
305 static int mthca_dealloc_pd(struct ib_pd *pd)
306 {
307 	mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
308 	kfree(pd);
309 
310 	return 0;
311 }
312 
313 static struct ib_ah *mthca_ah_create(struct ib_pd *pd,
314 				     struct ib_ah_attr *ah_attr)
315 {
316 	int err;
317 	struct mthca_ah *ah;
318 
319 	ah = kmalloc(sizeof *ah, GFP_ATOMIC);
320 	if (!ah)
321 		return ERR_PTR(-ENOMEM);
322 
323 	err = mthca_create_ah(to_mdev(pd->device), to_mpd(pd), ah_attr, ah);
324 	if (err) {
325 		kfree(ah);
326 		return ERR_PTR(err);
327 	}
328 
329 	return &ah->ibah;
330 }
331 
332 static int mthca_ah_destroy(struct ib_ah *ah)
333 {
334 	mthca_destroy_ah(to_mdev(ah->device), to_mah(ah));
335 	kfree(ah);
336 
337 	return 0;
338 }
339 
340 static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
341 				     struct ib_qp_init_attr *init_attr)
342 {
343 	struct mthca_qp *qp;
344 	int err;
345 
346 	switch (init_attr->qp_type) {
347 	case IB_QPT_RC:
348 	case IB_QPT_UC:
349 	case IB_QPT_UD:
350 	{
351 		qp = kmalloc(sizeof *qp, GFP_KERNEL);
352 		if (!qp)
353 			return ERR_PTR(-ENOMEM);
354 
355 		qp->sq.max    = init_attr->cap.max_send_wr;
356 		qp->rq.max    = init_attr->cap.max_recv_wr;
357 		qp->sq.max_gs = init_attr->cap.max_send_sge;
358 		qp->rq.max_gs = init_attr->cap.max_recv_sge;
359 
360 		err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd),
361 				     to_mcq(init_attr->send_cq),
362 				     to_mcq(init_attr->recv_cq),
363 				     init_attr->qp_type, init_attr->sq_sig_type,
364 				     qp);
365 		qp->ibqp.qp_num = qp->qpn;
366 		break;
367 	}
368 	case IB_QPT_SMI:
369 	case IB_QPT_GSI:
370 	{
371 		qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
372 		if (!qp)
373 			return ERR_PTR(-ENOMEM);
374 
375 		qp->sq.max    = init_attr->cap.max_send_wr;
376 		qp->rq.max    = init_attr->cap.max_recv_wr;
377 		qp->sq.max_gs = init_attr->cap.max_send_sge;
378 		qp->rq.max_gs = init_attr->cap.max_recv_sge;
379 
380 		qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
381 
382 		err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd),
383 				      to_mcq(init_attr->send_cq),
384 				      to_mcq(init_attr->recv_cq),
385 				      init_attr->sq_sig_type,
386 				      qp->ibqp.qp_num, init_attr->port_num,
387 				      to_msqp(qp));
388 		break;
389 	}
390 	default:
391 		/* Don't support raw QPs */
392 		return ERR_PTR(-ENOSYS);
393 	}
394 
395 	if (err) {
396 		kfree(qp);
397 		return ERR_PTR(err);
398 	}
399 
400         init_attr->cap.max_inline_data = 0;
401 
402 	return &qp->ibqp;
403 }
404 
405 static int mthca_destroy_qp(struct ib_qp *qp)
406 {
407 	mthca_free_qp(to_mdev(qp->device), to_mqp(qp));
408 	kfree(qp);
409 	return 0;
410 }
411 
412 static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries)
413 {
414 	struct mthca_cq *cq;
415 	int nent;
416 	int err;
417 
418 	cq = kmalloc(sizeof *cq, GFP_KERNEL);
419 	if (!cq)
420 		return ERR_PTR(-ENOMEM);
421 
422 	for (nent = 1; nent <= entries; nent <<= 1)
423 		; /* nothing */
424 
425 	err = mthca_init_cq(to_mdev(ibdev), nent, cq);
426 	if (err) {
427 		kfree(cq);
428 		cq = ERR_PTR(err);
429 	}
430 
431 	return &cq->ibcq;
432 }
433 
434 static int mthca_destroy_cq(struct ib_cq *cq)
435 {
436 	mthca_free_cq(to_mdev(cq->device), to_mcq(cq));
437 	kfree(cq);
438 
439 	return 0;
440 }
441 
442 static inline u32 convert_access(int acc)
443 {
444 	return (acc & IB_ACCESS_REMOTE_ATOMIC ? MTHCA_MPT_FLAG_ATOMIC       : 0) |
445 	       (acc & IB_ACCESS_REMOTE_WRITE  ? MTHCA_MPT_FLAG_REMOTE_WRITE : 0) |
446 	       (acc & IB_ACCESS_REMOTE_READ   ? MTHCA_MPT_FLAG_REMOTE_READ  : 0) |
447 	       (acc & IB_ACCESS_LOCAL_WRITE   ? MTHCA_MPT_FLAG_LOCAL_WRITE  : 0) |
448 	       MTHCA_MPT_FLAG_LOCAL_READ;
449 }
450 
451 static struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc)
452 {
453 	struct mthca_mr *mr;
454 	int err;
455 
456 	mr = kmalloc(sizeof *mr, GFP_KERNEL);
457 	if (!mr)
458 		return ERR_PTR(-ENOMEM);
459 
460 	err = mthca_mr_alloc_notrans(to_mdev(pd->device),
461 				     to_mpd(pd)->pd_num,
462 				     convert_access(acc), mr);
463 
464 	if (err) {
465 		kfree(mr);
466 		return ERR_PTR(err);
467 	}
468 
469 	return &mr->ibmr;
470 }
471 
472 static struct ib_mr *mthca_reg_phys_mr(struct ib_pd       *pd,
473 				       struct ib_phys_buf *buffer_list,
474 				       int                 num_phys_buf,
475 				       int                 acc,
476 				       u64                *iova_start)
477 {
478 	struct mthca_mr *mr;
479 	u64 *page_list;
480 	u64 total_size;
481 	u64 mask;
482 	int shift;
483 	int npages;
484 	int err;
485 	int i, j, n;
486 
487 	/* First check that we have enough alignment */
488 	if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK))
489 		return ERR_PTR(-EINVAL);
490 
491 	if (num_phys_buf > 1 &&
492 	    ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK))
493 		return ERR_PTR(-EINVAL);
494 
495 	mask = 0;
496 	total_size = 0;
497 	for (i = 0; i < num_phys_buf; ++i) {
498 		if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
499 			return ERR_PTR(-EINVAL);
500 		if (i != 0 && i != num_phys_buf - 1 &&
501 		    (buffer_list[i].size & ~PAGE_MASK))
502 			return ERR_PTR(-EINVAL);
503 
504 		total_size += buffer_list[i].size;
505 		if (i > 0)
506 			mask |= buffer_list[i].addr;
507 	}
508 
509 	/* Find largest page shift we can use to cover buffers */
510 	for (shift = PAGE_SHIFT; shift < 31; ++shift)
511 		if (num_phys_buf > 1) {
512 			if ((1ULL << shift) & mask)
513 				break;
514 		} else {
515 			if (1ULL << shift >=
516 			    buffer_list[0].size +
517 			    (buffer_list[0].addr & ((1ULL << shift) - 1)))
518 				break;
519 		}
520 
521 	buffer_list[0].size += buffer_list[0].addr & ((1ULL << shift) - 1);
522 	buffer_list[0].addr &= ~0ull << shift;
523 
524 	mr = kmalloc(sizeof *mr, GFP_KERNEL);
525 	if (!mr)
526 		return ERR_PTR(-ENOMEM);
527 
528 	npages = 0;
529 	for (i = 0; i < num_phys_buf; ++i)
530 		npages += (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
531 
532 	if (!npages)
533 		return &mr->ibmr;
534 
535 	page_list = kmalloc(npages * sizeof *page_list, GFP_KERNEL);
536 	if (!page_list) {
537 		kfree(mr);
538 		return ERR_PTR(-ENOMEM);
539 	}
540 
541 	n = 0;
542 	for (i = 0; i < num_phys_buf; ++i)
543 		for (j = 0;
544 		     j < (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
545 		     ++j)
546 			page_list[n++] = buffer_list[i].addr + ((u64) j << shift);
547 
548 	mthca_dbg(to_mdev(pd->device), "Registering memory at %llx (iova %llx) "
549 		  "in PD %x; shift %d, npages %d.\n",
550 		  (unsigned long long) buffer_list[0].addr,
551 		  (unsigned long long) *iova_start,
552 		  to_mpd(pd)->pd_num,
553 		  shift, npages);
554 
555 	err = mthca_mr_alloc_phys(to_mdev(pd->device),
556 				  to_mpd(pd)->pd_num,
557 				  page_list, shift, npages,
558 				  *iova_start, total_size,
559 				  convert_access(acc), mr);
560 
561 	if (err) {
562 		kfree(page_list);
563 		kfree(mr);
564 		return ERR_PTR(err);
565 	}
566 
567 	kfree(page_list);
568 	return &mr->ibmr;
569 }
570 
571 static int mthca_dereg_mr(struct ib_mr *mr)
572 {
573 	struct mthca_mr *mmr = to_mmr(mr);
574 	mthca_free_mr(to_mdev(mr->device), mmr);
575 	kfree(mmr);
576 	return 0;
577 }
578 
579 static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
580 				      struct ib_fmr_attr *fmr_attr)
581 {
582 	struct mthca_fmr *fmr;
583 	int err;
584 
585 	fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
586 	if (!fmr)
587 		return ERR_PTR(-ENOMEM);
588 
589 	memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr);
590 	err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num,
591 			     convert_access(mr_access_flags), fmr);
592 
593 	if (err) {
594 		kfree(fmr);
595 		return ERR_PTR(err);
596 	}
597 
598 	return &fmr->ibmr;
599 }
600 
601 static int mthca_dealloc_fmr(struct ib_fmr *fmr)
602 {
603 	struct mthca_fmr *mfmr = to_mfmr(fmr);
604 	int err;
605 
606 	err = mthca_free_fmr(to_mdev(fmr->device), mfmr);
607 	if (err)
608 		return err;
609 
610 	kfree(mfmr);
611 	return 0;
612 }
613 
614 static int mthca_unmap_fmr(struct list_head *fmr_list)
615 {
616 	struct ib_fmr *fmr;
617 	int err;
618 	u8 status;
619 	struct mthca_dev *mdev = NULL;
620 
621 	list_for_each_entry(fmr, fmr_list, list) {
622 		if (mdev && to_mdev(fmr->device) != mdev)
623 			return -EINVAL;
624 		mdev = to_mdev(fmr->device);
625 	}
626 
627 	if (!mdev)
628 		return 0;
629 
630 	if (mthca_is_memfree(mdev)) {
631 		list_for_each_entry(fmr, fmr_list, list)
632 			mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr));
633 
634 		wmb();
635 	} else
636 		list_for_each_entry(fmr, fmr_list, list)
637 			mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr));
638 
639 	err = mthca_SYNC_TPT(mdev, &status);
640 	if (err)
641 		return err;
642 	if (status)
643 		return -EINVAL;
644 	return 0;
645 }
646 
647 static ssize_t show_rev(struct class_device *cdev, char *buf)
648 {
649 	struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
650 	return sprintf(buf, "%x\n", dev->rev_id);
651 }
652 
653 static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
654 {
655 	struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
656 	return sprintf(buf, "%x.%x.%x\n", (int) (dev->fw_ver >> 32),
657 		       (int) (dev->fw_ver >> 16) & 0xffff,
658 		       (int) dev->fw_ver & 0xffff);
659 }
660 
661 static ssize_t show_hca(struct class_device *cdev, char *buf)
662 {
663 	struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
664 	switch (dev->pdev->device) {
665 	case PCI_DEVICE_ID_MELLANOX_TAVOR:
666 		return sprintf(buf, "MT23108\n");
667 	case PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT:
668 		return sprintf(buf, "MT25208 (MT23108 compat mode)\n");
669 	case PCI_DEVICE_ID_MELLANOX_ARBEL:
670 		return sprintf(buf, "MT25208\n");
671 	case PCI_DEVICE_ID_MELLANOX_SINAI:
672 	case PCI_DEVICE_ID_MELLANOX_SINAI_OLD:
673 		return sprintf(buf, "MT25204\n");
674 	default:
675 		return sprintf(buf, "unknown\n");
676 	}
677 }
678 
679 static CLASS_DEVICE_ATTR(hw_rev,   S_IRUGO, show_rev,    NULL);
680 static CLASS_DEVICE_ATTR(fw_ver,   S_IRUGO, show_fw_ver, NULL);
681 static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca,    NULL);
682 
683 static struct class_device_attribute *mthca_class_attributes[] = {
684 	&class_device_attr_hw_rev,
685 	&class_device_attr_fw_ver,
686 	&class_device_attr_hca_type
687 };
688 
689 int mthca_register_device(struct mthca_dev *dev)
690 {
691 	int ret;
692 	int i;
693 
694 	strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX);
695 	dev->ib_dev.node_type            = IB_NODE_CA;
696 	dev->ib_dev.phys_port_cnt        = dev->limits.num_ports;
697 	dev->ib_dev.dma_device           = &dev->pdev->dev;
698 	dev->ib_dev.class_dev.dev        = &dev->pdev->dev;
699 	dev->ib_dev.query_device         = mthca_query_device;
700 	dev->ib_dev.query_port           = mthca_query_port;
701 	dev->ib_dev.modify_port          = mthca_modify_port;
702 	dev->ib_dev.query_pkey           = mthca_query_pkey;
703 	dev->ib_dev.query_gid            = mthca_query_gid;
704 	dev->ib_dev.alloc_pd             = mthca_alloc_pd;
705 	dev->ib_dev.dealloc_pd           = mthca_dealloc_pd;
706 	dev->ib_dev.create_ah            = mthca_ah_create;
707 	dev->ib_dev.destroy_ah           = mthca_ah_destroy;
708 	dev->ib_dev.create_qp            = mthca_create_qp;
709 	dev->ib_dev.modify_qp            = mthca_modify_qp;
710 	dev->ib_dev.destroy_qp           = mthca_destroy_qp;
711 	dev->ib_dev.create_cq            = mthca_create_cq;
712 	dev->ib_dev.destroy_cq           = mthca_destroy_cq;
713 	dev->ib_dev.poll_cq              = mthca_poll_cq;
714 	dev->ib_dev.get_dma_mr           = mthca_get_dma_mr;
715 	dev->ib_dev.reg_phys_mr          = mthca_reg_phys_mr;
716 	dev->ib_dev.dereg_mr             = mthca_dereg_mr;
717 
718 	if (dev->mthca_flags & MTHCA_FLAG_FMR) {
719 		dev->ib_dev.alloc_fmr            = mthca_alloc_fmr;
720 		dev->ib_dev.unmap_fmr            = mthca_unmap_fmr;
721 		dev->ib_dev.dealloc_fmr          = mthca_dealloc_fmr;
722 		if (mthca_is_memfree(dev))
723 			dev->ib_dev.map_phys_fmr = mthca_arbel_map_phys_fmr;
724 		else
725 			dev->ib_dev.map_phys_fmr = mthca_tavor_map_phys_fmr;
726 	}
727 
728 	dev->ib_dev.attach_mcast         = mthca_multicast_attach;
729 	dev->ib_dev.detach_mcast         = mthca_multicast_detach;
730 	dev->ib_dev.process_mad          = mthca_process_mad;
731 
732 	if (mthca_is_memfree(dev)) {
733 		dev->ib_dev.req_notify_cq = mthca_arbel_arm_cq;
734 		dev->ib_dev.post_send     = mthca_arbel_post_send;
735 		dev->ib_dev.post_recv     = mthca_arbel_post_receive;
736 	} else {
737 		dev->ib_dev.req_notify_cq = mthca_tavor_arm_cq;
738 		dev->ib_dev.post_send     = mthca_tavor_post_send;
739 		dev->ib_dev.post_recv     = mthca_tavor_post_receive;
740 	}
741 
742 	init_MUTEX(&dev->cap_mask_mutex);
743 
744 	ret = ib_register_device(&dev->ib_dev);
745 	if (ret)
746 		return ret;
747 
748 	for (i = 0; i < ARRAY_SIZE(mthca_class_attributes); ++i) {
749 		ret = class_device_create_file(&dev->ib_dev.class_dev,
750 					       mthca_class_attributes[i]);
751 		if (ret) {
752 			ib_unregister_device(&dev->ib_dev);
753 			return ret;
754 		}
755 	}
756 
757 	return 0;
758 }
759 
760 void mthca_unregister_device(struct mthca_dev *dev)
761 {
762 	ib_unregister_device(&dev->ib_dev);
763 }
764