1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 */
6
7 #include <linux/dma-mapping.h>
8 #include <net/addrconf.h>
9 #include <rdma/uverbs_ioctl.h>
10
11 #include "rxe.h"
12 #include "rxe_queue.h"
13 #include "rxe_hw_counters.h"
14
15 static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr);
16
17 /* dev */
rxe_query_device(struct ib_device * ibdev,struct ib_device_attr * attr,struct ib_udata * udata)18 static int rxe_query_device(struct ib_device *ibdev,
19 struct ib_device_attr *attr,
20 struct ib_udata *udata)
21 {
22 struct rxe_dev *rxe = to_rdev(ibdev);
23 int err;
24
25 if (udata->inlen || udata->outlen) {
26 rxe_dbg_dev(rxe, "malformed udata\n");
27 err = -EINVAL;
28 goto err_out;
29 }
30
31 memcpy(attr, &rxe->attr, sizeof(*attr));
32
33 return 0;
34
35 err_out:
36 rxe_err_dev(rxe, "returned err = %d\n", err);
37 return err;
38 }
39
rxe_query_port(struct ib_device * ibdev,u32 port_num,struct ib_port_attr * attr)40 static int rxe_query_port(struct ib_device *ibdev,
41 u32 port_num, struct ib_port_attr *attr)
42 {
43 struct rxe_dev *rxe = to_rdev(ibdev);
44 struct net_device *ndev;
45 int err, ret;
46
47 if (port_num != 1) {
48 err = -EINVAL;
49 rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num);
50 goto err_out;
51 }
52
53 ndev = rxe_ib_device_get_netdev(ibdev);
54 if (!ndev) {
55 err = -ENODEV;
56 goto err_out;
57 }
58
59 memcpy(attr, &rxe->port.attr, sizeof(*attr));
60
61 mutex_lock(&rxe->usdev_lock);
62 ret = ib_get_eth_speed(ibdev, port_num, &attr->active_speed,
63 &attr->active_width);
64
65 attr->state = ib_get_curr_port_state(ndev);
66 if (attr->state == IB_PORT_ACTIVE)
67 attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
68 else if (dev_get_flags(ndev) & IFF_UP)
69 attr->phys_state = IB_PORT_PHYS_STATE_POLLING;
70 else
71 attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
72
73 mutex_unlock(&rxe->usdev_lock);
74
75 dev_put(ndev);
76 return ret;
77
78 err_out:
79 rxe_err_dev(rxe, "returned err = %d\n", err);
80 return err;
81 }
82
rxe_query_pkey(struct ib_device * ibdev,u32 port_num,u16 index,u16 * pkey)83 static int rxe_query_pkey(struct ib_device *ibdev,
84 u32 port_num, u16 index, u16 *pkey)
85 {
86 struct rxe_dev *rxe = to_rdev(ibdev);
87 int err;
88
89 if (index != 0) {
90 err = -EINVAL;
91 rxe_dbg_dev(rxe, "bad pkey index = %d\n", index);
92 goto err_out;
93 }
94
95 *pkey = IB_DEFAULT_PKEY_FULL;
96 return 0;
97
98 err_out:
99 rxe_err_dev(rxe, "returned err = %d\n", err);
100 return err;
101 }
102
rxe_modify_device(struct ib_device * ibdev,int mask,struct ib_device_modify * attr)103 static int rxe_modify_device(struct ib_device *ibdev,
104 int mask, struct ib_device_modify *attr)
105 {
106 struct rxe_dev *rxe = to_rdev(ibdev);
107 int err;
108
109 if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
110 IB_DEVICE_MODIFY_NODE_DESC)) {
111 err = -EOPNOTSUPP;
112 rxe_dbg_dev(rxe, "unsupported mask = 0x%x\n", mask);
113 goto err_out;
114 }
115
116 if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
117 rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
118
119 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
120 memcpy(rxe->ib_dev.node_desc,
121 attr->node_desc, sizeof(rxe->ib_dev.node_desc));
122 }
123
124 return 0;
125
126 err_out:
127 rxe_err_dev(rxe, "returned err = %d\n", err);
128 return err;
129 }
130
rxe_modify_port(struct ib_device * ibdev,u32 port_num,int mask,struct ib_port_modify * attr)131 static int rxe_modify_port(struct ib_device *ibdev, u32 port_num,
132 int mask, struct ib_port_modify *attr)
133 {
134 struct rxe_dev *rxe = to_rdev(ibdev);
135 struct rxe_port *port;
136 int err;
137
138 if (port_num != 1) {
139 err = -EINVAL;
140 rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num);
141 goto err_out;
142 }
143
144 //TODO is shutdown useful
145 if (mask & ~(IB_PORT_RESET_QKEY_CNTR)) {
146 err = -EOPNOTSUPP;
147 rxe_dbg_dev(rxe, "unsupported mask = 0x%x\n", mask);
148 goto err_out;
149 }
150
151 port = &rxe->port;
152 port->attr.port_cap_flags |= attr->set_port_cap_mask;
153 port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
154
155 if (mask & IB_PORT_RESET_QKEY_CNTR)
156 port->attr.qkey_viol_cntr = 0;
157
158 return 0;
159
160 err_out:
161 rxe_err_dev(rxe, "returned err = %d\n", err);
162 return err;
163 }
164
rxe_get_link_layer(struct ib_device * ibdev,u32 port_num)165 static enum rdma_link_layer rxe_get_link_layer(struct ib_device *ibdev,
166 u32 port_num)
167 {
168 struct rxe_dev *rxe = to_rdev(ibdev);
169 int err;
170
171 if (port_num != 1) {
172 err = -EINVAL;
173 rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num);
174 goto err_out;
175 }
176
177 return IB_LINK_LAYER_ETHERNET;
178
179 err_out:
180 rxe_err_dev(rxe, "returned err = %d\n", err);
181 return err;
182 }
183
rxe_port_immutable(struct ib_device * ibdev,u32 port_num,struct ib_port_immutable * immutable)184 static int rxe_port_immutable(struct ib_device *ibdev, u32 port_num,
185 struct ib_port_immutable *immutable)
186 {
187 struct rxe_dev *rxe = to_rdev(ibdev);
188 struct ib_port_attr attr = {};
189 int err;
190
191 if (port_num != 1) {
192 err = -EINVAL;
193 rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num);
194 goto err_out;
195 }
196
197 err = ib_query_port(ibdev, port_num, &attr);
198 if (err)
199 goto err_out;
200
201 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
202 immutable->pkey_tbl_len = attr.pkey_tbl_len;
203 immutable->gid_tbl_len = attr.gid_tbl_len;
204 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
205
206 return 0;
207
208 err_out:
209 rxe_err_dev(rxe, "returned err = %d\n", err);
210 return err;
211 }
212
213 /* uc */
rxe_alloc_ucontext(struct ib_ucontext * ibuc,struct ib_udata * udata)214 static int rxe_alloc_ucontext(struct ib_ucontext *ibuc, struct ib_udata *udata)
215 {
216 struct rxe_dev *rxe = to_rdev(ibuc->device);
217 struct rxe_ucontext *uc = to_ruc(ibuc);
218 int err;
219
220 err = rxe_add_to_pool(&rxe->uc_pool, uc);
221 if (err)
222 rxe_err_dev(rxe, "unable to create uc\n");
223
224 return err;
225 }
226
rxe_dealloc_ucontext(struct ib_ucontext * ibuc)227 static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
228 {
229 struct rxe_ucontext *uc = to_ruc(ibuc);
230 int err;
231
232 err = rxe_cleanup(uc);
233 if (err)
234 rxe_err_uc(uc, "cleanup failed, err = %d\n", err);
235 }
236
237 /* pd */
rxe_alloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)238 static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
239 {
240 struct rxe_dev *rxe = to_rdev(ibpd->device);
241 struct rxe_pd *pd = to_rpd(ibpd);
242 int err;
243
244 err = rxe_add_to_pool(&rxe->pd_pool, pd);
245 if (err) {
246 rxe_dbg_dev(rxe, "unable to alloc pd\n");
247 goto err_out;
248 }
249
250 return 0;
251
252 err_out:
253 rxe_err_dev(rxe, "returned err = %d\n", err);
254 return err;
255 }
256
rxe_dealloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)257 static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
258 {
259 struct rxe_pd *pd = to_rpd(ibpd);
260 int err;
261
262 err = rxe_cleanup(pd);
263 if (err)
264 rxe_err_pd(pd, "cleanup failed, err = %d\n", err);
265
266 return 0;
267 }
268
269 /* ah */
rxe_create_ah(struct ib_ah * ibah,struct rdma_ah_init_attr * init_attr,struct ib_udata * udata)270 static int rxe_create_ah(struct ib_ah *ibah,
271 struct rdma_ah_init_attr *init_attr,
272 struct ib_udata *udata)
273 {
274 struct rxe_dev *rxe = to_rdev(ibah->device);
275 struct rxe_ah *ah = to_rah(ibah);
276 struct rxe_create_ah_resp __user *uresp = NULL;
277 int err, cleanup_err;
278
279 if (udata) {
280 /* test if new user provider */
281 if (udata->outlen >= sizeof(*uresp))
282 uresp = udata->outbuf;
283 ah->is_user = true;
284 } else {
285 ah->is_user = false;
286 }
287
288 err = rxe_add_to_pool_ah(&rxe->ah_pool, ah,
289 init_attr->flags & RDMA_CREATE_AH_SLEEPABLE);
290 if (err) {
291 rxe_dbg_dev(rxe, "unable to create ah\n");
292 goto err_out;
293 }
294
295 /* create index > 0 */
296 ah->ah_num = ah->elem.index;
297
298 err = rxe_ah_chk_attr(ah, init_attr->ah_attr);
299 if (err) {
300 rxe_dbg_ah(ah, "bad attr\n");
301 goto err_cleanup;
302 }
303
304 if (uresp) {
305 /* only if new user provider */
306 err = copy_to_user(&uresp->ah_num, &ah->ah_num,
307 sizeof(uresp->ah_num));
308 if (err) {
309 err = -EFAULT;
310 rxe_dbg_ah(ah, "unable to copy to user\n");
311 goto err_cleanup;
312 }
313 } else if (ah->is_user) {
314 /* only if old user provider */
315 ah->ah_num = 0;
316 }
317
318 rxe_init_av(init_attr->ah_attr, &ah->av);
319 rxe_finalize(ah);
320
321 return 0;
322
323 err_cleanup:
324 cleanup_err = rxe_cleanup(ah);
325 if (cleanup_err)
326 rxe_err_ah(ah, "cleanup failed, err = %d\n", cleanup_err);
327 err_out:
328 rxe_err_ah(ah, "returned err = %d\n", err);
329 return err;
330 }
331
rxe_modify_ah(struct ib_ah * ibah,struct rdma_ah_attr * attr)332 static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
333 {
334 struct rxe_ah *ah = to_rah(ibah);
335 int err;
336
337 err = rxe_ah_chk_attr(ah, attr);
338 if (err) {
339 rxe_dbg_ah(ah, "bad attr\n");
340 goto err_out;
341 }
342
343 rxe_init_av(attr, &ah->av);
344
345 return 0;
346
347 err_out:
348 rxe_err_ah(ah, "returned err = %d\n", err);
349 return err;
350 }
351
rxe_query_ah(struct ib_ah * ibah,struct rdma_ah_attr * attr)352 static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
353 {
354 struct rxe_ah *ah = to_rah(ibah);
355
356 memset(attr, 0, sizeof(*attr));
357 attr->type = ibah->type;
358 rxe_av_to_attr(&ah->av, attr);
359
360 return 0;
361 }
362
rxe_destroy_ah(struct ib_ah * ibah,u32 flags)363 static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
364 {
365 struct rxe_ah *ah = to_rah(ibah);
366 int err;
367
368 err = rxe_cleanup_ah(ah, flags & RDMA_DESTROY_AH_SLEEPABLE);
369 if (err)
370 rxe_err_ah(ah, "cleanup failed, err = %d\n", err);
371
372 return 0;
373 }
374
375 /* srq */
rxe_create_srq(struct ib_srq * ibsrq,struct ib_srq_init_attr * init,struct ib_udata * udata)376 static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
377 struct ib_udata *udata)
378 {
379 struct rxe_dev *rxe = to_rdev(ibsrq->device);
380 struct rxe_pd *pd = to_rpd(ibsrq->pd);
381 struct rxe_srq *srq = to_rsrq(ibsrq);
382 struct rxe_create_srq_resp __user *uresp = NULL;
383 int err, cleanup_err;
384
385 if (udata) {
386 if (udata->outlen < sizeof(*uresp)) {
387 err = -EINVAL;
388 rxe_err_dev(rxe, "malformed udata\n");
389 goto err_out;
390 }
391 uresp = udata->outbuf;
392 }
393
394 if (init->srq_type != IB_SRQT_BASIC) {
395 err = -EOPNOTSUPP;
396 rxe_dbg_dev(rxe, "srq type = %d, not supported\n",
397 init->srq_type);
398 goto err_out;
399 }
400
401 err = rxe_srq_chk_init(rxe, init);
402 if (err) {
403 rxe_dbg_dev(rxe, "invalid init attributes\n");
404 goto err_out;
405 }
406
407 err = rxe_add_to_pool(&rxe->srq_pool, srq);
408 if (err) {
409 rxe_dbg_dev(rxe, "unable to create srq, err = %d\n", err);
410 goto err_out;
411 }
412
413 rxe_get(pd);
414 srq->pd = pd;
415
416 err = rxe_srq_from_init(rxe, srq, init, udata, uresp);
417 if (err) {
418 rxe_dbg_srq(srq, "create srq failed, err = %d\n", err);
419 goto err_cleanup;
420 }
421
422 return 0;
423
424 err_cleanup:
425 cleanup_err = rxe_cleanup(srq);
426 if (cleanup_err)
427 rxe_err_srq(srq, "cleanup failed, err = %d\n", cleanup_err);
428 err_out:
429 rxe_err_dev(rxe, "returned err = %d\n", err);
430 return err;
431 }
432
rxe_modify_srq(struct ib_srq * ibsrq,struct ib_srq_attr * attr,enum ib_srq_attr_mask mask,struct ib_udata * udata)433 static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
434 enum ib_srq_attr_mask mask,
435 struct ib_udata *udata)
436 {
437 struct rxe_srq *srq = to_rsrq(ibsrq);
438 struct rxe_dev *rxe = to_rdev(ibsrq->device);
439 struct rxe_modify_srq_cmd cmd = {};
440 int err;
441
442 if (udata) {
443 if (udata->inlen < sizeof(cmd)) {
444 err = -EINVAL;
445 rxe_dbg_srq(srq, "malformed udata\n");
446 goto err_out;
447 }
448
449 err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
450 if (err) {
451 err = -EFAULT;
452 rxe_dbg_srq(srq, "unable to read udata\n");
453 goto err_out;
454 }
455 }
456
457 err = rxe_srq_chk_attr(rxe, srq, attr, mask);
458 if (err) {
459 rxe_dbg_srq(srq, "bad init attributes\n");
460 goto err_out;
461 }
462
463 err = rxe_srq_from_attr(rxe, srq, attr, mask, &cmd, udata);
464 if (err) {
465 rxe_dbg_srq(srq, "bad attr\n");
466 goto err_out;
467 }
468
469 return 0;
470
471 err_out:
472 rxe_err_srq(srq, "returned err = %d\n", err);
473 return err;
474 }
475
rxe_query_srq(struct ib_srq * ibsrq,struct ib_srq_attr * attr)476 static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
477 {
478 struct rxe_srq *srq = to_rsrq(ibsrq);
479 int err;
480
481 if (srq->error) {
482 err = -EINVAL;
483 rxe_dbg_srq(srq, "srq in error state\n");
484 goto err_out;
485 }
486
487 attr->max_wr = srq->rq.queue->buf->index_mask;
488 attr->max_sge = srq->rq.max_sge;
489 attr->srq_limit = srq->limit;
490 return 0;
491
492 err_out:
493 rxe_err_srq(srq, "returned err = %d\n", err);
494 return err;
495 }
496
rxe_post_srq_recv(struct ib_srq * ibsrq,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)497 static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
498 const struct ib_recv_wr **bad_wr)
499 {
500 int err = 0;
501 struct rxe_srq *srq = to_rsrq(ibsrq);
502 unsigned long flags;
503
504 spin_lock_irqsave(&srq->rq.producer_lock, flags);
505
506 while (wr) {
507 err = post_one_recv(&srq->rq, wr);
508 if (unlikely(err))
509 break;
510 wr = wr->next;
511 }
512
513 spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
514
515 if (err) {
516 *bad_wr = wr;
517 rxe_err_srq(srq, "returned err = %d\n", err);
518 }
519
520 return err;
521 }
522
rxe_destroy_srq(struct ib_srq * ibsrq,struct ib_udata * udata)523 static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
524 {
525 struct rxe_srq *srq = to_rsrq(ibsrq);
526 int err;
527
528 err = rxe_cleanup(srq);
529 if (err)
530 rxe_err_srq(srq, "cleanup failed, err = %d\n", err);
531
532 return 0;
533 }
534
535 /* qp */
rxe_create_qp(struct ib_qp * ibqp,struct ib_qp_init_attr * init,struct ib_udata * udata)536 static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
537 struct ib_udata *udata)
538 {
539 struct rxe_dev *rxe = to_rdev(ibqp->device);
540 struct rxe_pd *pd = to_rpd(ibqp->pd);
541 struct rxe_qp *qp = to_rqp(ibqp);
542 struct rxe_create_qp_resp __user *uresp = NULL;
543 int err, cleanup_err;
544
545 if (udata) {
546 if (udata->inlen) {
547 err = -EINVAL;
548 rxe_dbg_dev(rxe, "malformed udata, err = %d\n", err);
549 goto err_out;
550 }
551
552 if (udata->outlen < sizeof(*uresp)) {
553 err = -EINVAL;
554 rxe_dbg_dev(rxe, "malformed udata, err = %d\n", err);
555 goto err_out;
556 }
557
558 qp->is_user = true;
559 uresp = udata->outbuf;
560 } else {
561 qp->is_user = false;
562 }
563
564 if (init->create_flags) {
565 err = -EOPNOTSUPP;
566 rxe_dbg_dev(rxe, "unsupported create_flags, err = %d\n", err);
567 goto err_out;
568 }
569
570 err = rxe_qp_chk_init(rxe, init);
571 if (err) {
572 rxe_dbg_dev(rxe, "bad init attr, err = %d\n", err);
573 goto err_out;
574 }
575
576 err = rxe_add_to_pool(&rxe->qp_pool, qp);
577 if (err) {
578 rxe_dbg_dev(rxe, "unable to create qp, err = %d\n", err);
579 goto err_out;
580 }
581
582 err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibqp->pd, udata);
583 if (err) {
584 rxe_dbg_qp(qp, "create qp failed, err = %d\n", err);
585 goto err_cleanup;
586 }
587
588 rxe_finalize(qp);
589 return 0;
590
591 err_cleanup:
592 cleanup_err = rxe_cleanup(qp);
593 if (cleanup_err)
594 rxe_err_qp(qp, "cleanup failed, err = %d\n", cleanup_err);
595 err_out:
596 rxe_err_dev(rxe, "returned err = %d\n", err);
597 return err;
598 }
599
rxe_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int mask,struct ib_udata * udata)600 static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
601 int mask, struct ib_udata *udata)
602 {
603 struct rxe_dev *rxe = to_rdev(ibqp->device);
604 struct rxe_qp *qp = to_rqp(ibqp);
605 int err;
606
607 if (mask & ~IB_QP_ATTR_STANDARD_BITS) {
608 err = -EOPNOTSUPP;
609 rxe_dbg_qp(qp, "unsupported mask = 0x%x, err = %d\n",
610 mask, err);
611 goto err_out;
612 }
613
614 err = rxe_qp_chk_attr(rxe, qp, attr, mask);
615 if (err) {
616 rxe_dbg_qp(qp, "bad mask/attr, err = %d\n", err);
617 goto err_out;
618 }
619
620 err = rxe_qp_from_attr(qp, attr, mask, udata);
621 if (err) {
622 rxe_dbg_qp(qp, "modify qp failed, err = %d\n", err);
623 goto err_out;
624 }
625
626 if ((mask & IB_QP_AV) && (attr->ah_attr.ah_flags & IB_AH_GRH))
627 qp->src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label,
628 qp->ibqp.qp_num,
629 qp->attr.dest_qp_num);
630
631 return 0;
632
633 err_out:
634 rxe_err_qp(qp, "returned err = %d\n", err);
635 return err;
636 }
637
rxe_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int mask,struct ib_qp_init_attr * init)638 static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
639 int mask, struct ib_qp_init_attr *init)
640 {
641 struct rxe_qp *qp = to_rqp(ibqp);
642
643 rxe_qp_to_init(qp, init);
644 rxe_qp_to_attr(qp, attr, mask);
645
646 return 0;
647 }
648
rxe_destroy_qp(struct ib_qp * ibqp,struct ib_udata * udata)649 static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
650 {
651 struct rxe_qp *qp = to_rqp(ibqp);
652 int err;
653
654 err = rxe_qp_chk_destroy(qp);
655 if (err) {
656 rxe_dbg_qp(qp, "unable to destroy qp, err = %d\n", err);
657 goto err_out;
658 }
659
660 err = rxe_cleanup(qp);
661 if (err)
662 rxe_err_qp(qp, "cleanup failed, err = %d\n", err);
663
664 return 0;
665
666 err_out:
667 rxe_err_qp(qp, "returned err = %d\n", err);
668 return err;
669 }
670
671 /* send wr */
672
673 /* sanity check incoming send work request */
validate_send_wr(struct rxe_qp * qp,const struct ib_send_wr * ibwr,unsigned int * maskp,unsigned int * lengthp)674 static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
675 unsigned int *maskp, unsigned int *lengthp)
676 {
677 int num_sge = ibwr->num_sge;
678 struct rxe_sq *sq = &qp->sq;
679 unsigned int mask = 0;
680 unsigned long length = 0;
681 int err = -EINVAL;
682 int i;
683
684 do {
685 mask = wr_opcode_mask(ibwr->opcode, qp);
686 if (!mask) {
687 rxe_err_qp(qp, "bad wr opcode for qp type\n");
688 break;
689 }
690
691 if (num_sge > sq->max_sge) {
692 rxe_err_qp(qp, "num_sge > max_sge\n");
693 break;
694 }
695
696 length = 0;
697 for (i = 0; i < ibwr->num_sge; i++)
698 length += ibwr->sg_list[i].length;
699
700 if (length > RXE_PORT_MAX_MSG_SZ) {
701 rxe_err_qp(qp, "message length too long\n");
702 break;
703 }
704
705 if (mask & WR_ATOMIC_MASK) {
706 if (length != 8) {
707 rxe_err_qp(qp, "atomic length != 8\n");
708 break;
709 }
710 if (atomic_wr(ibwr)->remote_addr & 0x7) {
711 rxe_err_qp(qp, "misaligned atomic address\n");
712 break;
713 }
714 }
715 if (ibwr->send_flags & IB_SEND_INLINE) {
716 if (!(mask & WR_INLINE_MASK)) {
717 rxe_err_qp(qp, "opcode doesn't support inline data\n");
718 break;
719 }
720 if (length > sq->max_inline) {
721 rxe_err_qp(qp, "inline length too big\n");
722 break;
723 }
724 }
725
726 err = 0;
727 } while (0);
728
729 *maskp = mask;
730 *lengthp = (int)length;
731
732 return err;
733 }
734
init_send_wr(struct rxe_qp * qp,struct rxe_send_wr * wr,const struct ib_send_wr * ibwr)735 static int init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
736 const struct ib_send_wr *ibwr)
737 {
738 wr->wr_id = ibwr->wr_id;
739 wr->opcode = ibwr->opcode;
740 wr->send_flags = ibwr->send_flags;
741
742 if (qp_type(qp) == IB_QPT_UD ||
743 qp_type(qp) == IB_QPT_GSI) {
744 struct ib_ah *ibah = ud_wr(ibwr)->ah;
745
746 wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
747 wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
748 wr->wr.ud.ah_num = to_rah(ibah)->ah_num;
749 if (qp_type(qp) == IB_QPT_GSI)
750 wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
751
752 switch (wr->opcode) {
753 case IB_WR_SEND_WITH_IMM:
754 wr->ex.imm_data = ibwr->ex.imm_data;
755 break;
756 case IB_WR_SEND:
757 break;
758 default:
759 rxe_err_qp(qp, "bad wr opcode %d for UD/GSI QP\n",
760 wr->opcode);
761 return -EINVAL;
762 }
763 } else {
764 switch (wr->opcode) {
765 case IB_WR_RDMA_WRITE_WITH_IMM:
766 wr->ex.imm_data = ibwr->ex.imm_data;
767 fallthrough;
768 case IB_WR_RDMA_READ:
769 case IB_WR_RDMA_WRITE:
770 wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
771 wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
772 break;
773 case IB_WR_SEND_WITH_IMM:
774 wr->ex.imm_data = ibwr->ex.imm_data;
775 break;
776 case IB_WR_SEND_WITH_INV:
777 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
778 break;
779 case IB_WR_RDMA_READ_WITH_INV:
780 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
781 wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
782 wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
783 break;
784 case IB_WR_ATOMIC_CMP_AND_SWP:
785 case IB_WR_ATOMIC_FETCH_AND_ADD:
786 wr->wr.atomic.remote_addr =
787 atomic_wr(ibwr)->remote_addr;
788 wr->wr.atomic.compare_add =
789 atomic_wr(ibwr)->compare_add;
790 wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
791 wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
792 break;
793 case IB_WR_LOCAL_INV:
794 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
795 break;
796 case IB_WR_REG_MR:
797 wr->wr.reg.mr = reg_wr(ibwr)->mr;
798 wr->wr.reg.key = reg_wr(ibwr)->key;
799 wr->wr.reg.access = reg_wr(ibwr)->access;
800 break;
801 case IB_WR_SEND:
802 case IB_WR_BIND_MW:
803 case IB_WR_FLUSH:
804 case IB_WR_ATOMIC_WRITE:
805 break;
806 default:
807 rxe_err_qp(qp, "unsupported wr opcode %d\n",
808 wr->opcode);
809 return -EINVAL;
810 }
811 }
812
813 return 0;
814 }
815
copy_inline_data_to_wqe(struct rxe_send_wqe * wqe,const struct ib_send_wr * ibwr)816 static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe,
817 const struct ib_send_wr *ibwr)
818 {
819 struct ib_sge *sge = ibwr->sg_list;
820 u8 *p = wqe->dma.inline_data;
821 int i;
822
823 for (i = 0; i < ibwr->num_sge; i++, sge++) {
824 memcpy(p, ib_virt_dma_to_ptr(sge->addr), sge->length);
825 p += sge->length;
826 }
827 }
828
init_send_wqe(struct rxe_qp * qp,const struct ib_send_wr * ibwr,unsigned int mask,unsigned int length,struct rxe_send_wqe * wqe)829 static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
830 unsigned int mask, unsigned int length,
831 struct rxe_send_wqe *wqe)
832 {
833 int num_sge = ibwr->num_sge;
834 int err;
835
836 err = init_send_wr(qp, &wqe->wr, ibwr);
837 if (err)
838 return err;
839
840 /* local operation */
841 if (unlikely(mask & WR_LOCAL_OP_MASK)) {
842 wqe->mask = mask;
843 wqe->state = wqe_state_posted;
844 return 0;
845 }
846
847 if (unlikely(ibwr->send_flags & IB_SEND_INLINE))
848 copy_inline_data_to_wqe(wqe, ibwr);
849 else
850 memcpy(wqe->dma.sge, ibwr->sg_list,
851 num_sge * sizeof(struct ib_sge));
852
853 wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
854 mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
855 wqe->mask = mask;
856 wqe->dma.length = length;
857 wqe->dma.resid = length;
858 wqe->dma.num_sge = num_sge;
859 wqe->dma.cur_sge = 0;
860 wqe->dma.sge_offset = 0;
861 wqe->state = wqe_state_posted;
862 wqe->ssn = atomic_add_return(1, &qp->ssn);
863
864 return 0;
865 }
866
post_one_send(struct rxe_qp * qp,const struct ib_send_wr * ibwr)867 static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr)
868 {
869 int err;
870 struct rxe_sq *sq = &qp->sq;
871 struct rxe_send_wqe *send_wqe;
872 unsigned int mask;
873 unsigned int length;
874 int full;
875
876 err = validate_send_wr(qp, ibwr, &mask, &length);
877 if (err)
878 return err;
879
880 full = queue_full(sq->queue, QUEUE_TYPE_FROM_ULP);
881 if (unlikely(full)) {
882 rxe_err_qp(qp, "send queue full\n");
883 return -ENOMEM;
884 }
885
886 send_wqe = queue_producer_addr(sq->queue, QUEUE_TYPE_FROM_ULP);
887 err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
888 if (!err)
889 queue_advance_producer(sq->queue, QUEUE_TYPE_FROM_ULP);
890
891 return err;
892 }
893
rxe_post_send_kernel(struct rxe_qp * qp,const struct ib_send_wr * ibwr,const struct ib_send_wr ** bad_wr)894 static int rxe_post_send_kernel(struct rxe_qp *qp,
895 const struct ib_send_wr *ibwr,
896 const struct ib_send_wr **bad_wr)
897 {
898 int err = 0;
899 unsigned long flags;
900 int good = 0;
901
902 spin_lock_irqsave(&qp->sq.sq_lock, flags);
903 while (ibwr) {
904 err = post_one_send(qp, ibwr);
905 if (err) {
906 *bad_wr = ibwr;
907 break;
908 } else {
909 good++;
910 }
911 ibwr = ibwr->next;
912 }
913 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
914
915 /* kickoff processing of any posted wqes */
916 if (good)
917 rxe_sched_task(&qp->send_task);
918
919 return err;
920 }
921
rxe_post_send(struct ib_qp * ibqp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)922 static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
923 const struct ib_send_wr **bad_wr)
924 {
925 struct rxe_qp *qp = to_rqp(ibqp);
926 int err;
927 unsigned long flags;
928
929 spin_lock_irqsave(&qp->state_lock, flags);
930 /* caller has already called destroy_qp */
931 if (WARN_ON_ONCE(!qp->valid)) {
932 spin_unlock_irqrestore(&qp->state_lock, flags);
933 rxe_err_qp(qp, "qp has been destroyed\n");
934 return -EINVAL;
935 }
936
937 if (unlikely(qp_state(qp) < IB_QPS_RTS)) {
938 spin_unlock_irqrestore(&qp->state_lock, flags);
939 *bad_wr = wr;
940 rxe_err_qp(qp, "qp not ready to send\n");
941 return -EINVAL;
942 }
943 spin_unlock_irqrestore(&qp->state_lock, flags);
944
945 if (qp->is_user) {
946 /* Utilize process context to do protocol processing */
947 rxe_sched_task(&qp->send_task);
948 } else {
949 err = rxe_post_send_kernel(qp, wr, bad_wr);
950 if (err)
951 return err;
952 }
953
954 return 0;
955 }
956
957 /* recv wr */
post_one_recv(struct rxe_rq * rq,const struct ib_recv_wr * ibwr)958 static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
959 {
960 int i;
961 unsigned long length;
962 struct rxe_recv_wqe *recv_wqe;
963 int num_sge = ibwr->num_sge;
964 int full;
965 int err;
966
967 full = queue_full(rq->queue, QUEUE_TYPE_FROM_ULP);
968 if (unlikely(full)) {
969 err = -ENOMEM;
970 rxe_dbg("queue full\n");
971 goto err_out;
972 }
973
974 if (unlikely(num_sge > rq->max_sge)) {
975 err = -EINVAL;
976 rxe_dbg("bad num_sge > max_sge\n");
977 goto err_out;
978 }
979
980 length = 0;
981 for (i = 0; i < num_sge; i++)
982 length += ibwr->sg_list[i].length;
983
984 if (length > RXE_PORT_MAX_MSG_SZ) {
985 err = -EINVAL;
986 rxe_dbg("message length too long\n");
987 goto err_out;
988 }
989
990 recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_FROM_ULP);
991
992 recv_wqe->wr_id = ibwr->wr_id;
993 recv_wqe->dma.length = length;
994 recv_wqe->dma.resid = length;
995 recv_wqe->dma.num_sge = num_sge;
996 recv_wqe->dma.cur_sge = 0;
997 recv_wqe->dma.sge_offset = 0;
998 memcpy(recv_wqe->dma.sge, ibwr->sg_list,
999 num_sge * sizeof(struct ib_sge));
1000
1001 queue_advance_producer(rq->queue, QUEUE_TYPE_FROM_ULP);
1002
1003 return 0;
1004
1005 err_out:
1006 rxe_dbg("returned err = %d\n", err);
1007 return err;
1008 }
1009
rxe_post_recv(struct ib_qp * ibqp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)1010 static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
1011 const struct ib_recv_wr **bad_wr)
1012 {
1013 int err = 0;
1014 struct rxe_qp *qp = to_rqp(ibqp);
1015 struct rxe_rq *rq = &qp->rq;
1016 unsigned long flags;
1017
1018 spin_lock_irqsave(&qp->state_lock, flags);
1019 /* caller has already called destroy_qp */
1020 if (WARN_ON_ONCE(!qp->valid)) {
1021 spin_unlock_irqrestore(&qp->state_lock, flags);
1022 rxe_err_qp(qp, "qp has been destroyed\n");
1023 return -EINVAL;
1024 }
1025
1026 /* see C10-97.2.1 */
1027 if (unlikely((qp_state(qp) < IB_QPS_INIT))) {
1028 spin_unlock_irqrestore(&qp->state_lock, flags);
1029 *bad_wr = wr;
1030 rxe_dbg_qp(qp, "qp not ready to post recv\n");
1031 return -EINVAL;
1032 }
1033 spin_unlock_irqrestore(&qp->state_lock, flags);
1034
1035 if (unlikely(qp->srq)) {
1036 *bad_wr = wr;
1037 rxe_dbg_qp(qp, "qp has srq, use post_srq_recv instead\n");
1038 return -EINVAL;
1039 }
1040
1041 spin_lock_irqsave(&rq->producer_lock, flags);
1042
1043 while (wr) {
1044 err = post_one_recv(rq, wr);
1045 if (unlikely(err)) {
1046 *bad_wr = wr;
1047 break;
1048 }
1049 wr = wr->next;
1050 }
1051
1052 spin_unlock_irqrestore(&rq->producer_lock, flags);
1053
1054 spin_lock_irqsave(&qp->state_lock, flags);
1055 if (qp_state(qp) == IB_QPS_ERR)
1056 rxe_sched_task(&qp->recv_task);
1057 spin_unlock_irqrestore(&qp->state_lock, flags);
1058
1059 return err;
1060 }
1061
1062 /* cq */
rxe_create_cq(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct uverbs_attr_bundle * attrs)1063 static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1064 struct uverbs_attr_bundle *attrs)
1065 {
1066 struct ib_udata *udata = &attrs->driver_udata;
1067 struct ib_device *dev = ibcq->device;
1068 struct rxe_dev *rxe = to_rdev(dev);
1069 struct rxe_cq *cq = to_rcq(ibcq);
1070 struct rxe_create_cq_resp __user *uresp = NULL;
1071 int err, cleanup_err;
1072
1073 if (udata) {
1074 if (udata->outlen < sizeof(*uresp)) {
1075 err = -EINVAL;
1076 rxe_dbg_dev(rxe, "malformed udata, err = %d\n", err);
1077 goto err_out;
1078 }
1079 uresp = udata->outbuf;
1080 }
1081
1082 if (attr->flags) {
1083 err = -EOPNOTSUPP;
1084 rxe_dbg_dev(rxe, "bad attr->flags, err = %d\n", err);
1085 goto err_out;
1086 }
1087
1088 err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
1089 if (err) {
1090 rxe_dbg_dev(rxe, "bad init attributes, err = %d\n", err);
1091 goto err_out;
1092 }
1093
1094 err = rxe_add_to_pool(&rxe->cq_pool, cq);
1095 if (err) {
1096 rxe_dbg_dev(rxe, "unable to create cq, err = %d\n", err);
1097 goto err_out;
1098 }
1099
1100 err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata,
1101 uresp);
1102 if (err) {
1103 rxe_dbg_cq(cq, "create cq failed, err = %d\n", err);
1104 goto err_cleanup;
1105 }
1106
1107 return 0;
1108
1109 err_cleanup:
1110 cleanup_err = rxe_cleanup(cq);
1111 if (cleanup_err)
1112 rxe_err_cq(cq, "cleanup failed, err = %d\n", cleanup_err);
1113 err_out:
1114 rxe_err_dev(rxe, "returned err = %d\n", err);
1115 return err;
1116 }
1117
rxe_resize_cq(struct ib_cq * ibcq,int cqe,struct ib_udata * udata)1118 static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
1119 {
1120 struct rxe_cq *cq = to_rcq(ibcq);
1121 struct rxe_dev *rxe = to_rdev(ibcq->device);
1122 struct rxe_resize_cq_resp __user *uresp = NULL;
1123 int err;
1124
1125 if (udata) {
1126 if (udata->outlen < sizeof(*uresp)) {
1127 err = -EINVAL;
1128 rxe_dbg_cq(cq, "malformed udata\n");
1129 goto err_out;
1130 }
1131 uresp = udata->outbuf;
1132 }
1133
1134 err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
1135 if (err) {
1136 rxe_dbg_cq(cq, "bad attr, err = %d\n", err);
1137 goto err_out;
1138 }
1139
1140 err = rxe_cq_resize_queue(cq, cqe, uresp, udata);
1141 if (err) {
1142 rxe_dbg_cq(cq, "resize cq failed, err = %d\n", err);
1143 goto err_out;
1144 }
1145
1146 return 0;
1147
1148 err_out:
1149 rxe_err_cq(cq, "returned err = %d\n", err);
1150 return err;
1151 }
1152
rxe_poll_cq(struct ib_cq * ibcq,int num_entries,struct ib_wc * wc)1153 static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
1154 {
1155 int i;
1156 struct rxe_cq *cq = to_rcq(ibcq);
1157 struct rxe_cqe *cqe;
1158 unsigned long flags;
1159
1160 spin_lock_irqsave(&cq->cq_lock, flags);
1161 for (i = 0; i < num_entries; i++) {
1162 cqe = queue_head(cq->queue, QUEUE_TYPE_TO_ULP);
1163 if (!cqe)
1164 break; /* queue empty */
1165
1166 memcpy(wc++, &cqe->ibwc, sizeof(*wc));
1167 queue_advance_consumer(cq->queue, QUEUE_TYPE_TO_ULP);
1168 }
1169 spin_unlock_irqrestore(&cq->cq_lock, flags);
1170
1171 return i;
1172 }
1173
rxe_peek_cq(struct ib_cq * ibcq,int wc_cnt)1174 static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
1175 {
1176 struct rxe_cq *cq = to_rcq(ibcq);
1177 int count;
1178
1179 count = queue_count(cq->queue, QUEUE_TYPE_TO_ULP);
1180
1181 return (count > wc_cnt) ? wc_cnt : count;
1182 }
1183
rxe_req_notify_cq(struct ib_cq * ibcq,enum ib_cq_notify_flags flags)1184 static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
1185 {
1186 struct rxe_cq *cq = to_rcq(ibcq);
1187 int ret = 0;
1188 int empty;
1189 unsigned long irq_flags;
1190
1191 spin_lock_irqsave(&cq->cq_lock, irq_flags);
1192 cq->notify |= flags & IB_CQ_SOLICITED_MASK;
1193 empty = queue_empty(cq->queue, QUEUE_TYPE_TO_ULP);
1194
1195 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !empty)
1196 ret = 1;
1197
1198 spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
1199
1200 return ret;
1201 }
1202
rxe_destroy_cq(struct ib_cq * ibcq,struct ib_udata * udata)1203 static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
1204 {
1205 struct rxe_cq *cq = to_rcq(ibcq);
1206 int err;
1207
1208 /* See IBA C11-17: The CI shall return an error if this Verb is
1209 * invoked while a Work Queue is still associated with the CQ.
1210 */
1211 if (atomic_read(&cq->num_wq)) {
1212 err = -EINVAL;
1213 rxe_dbg_cq(cq, "still in use\n");
1214 goto err_out;
1215 }
1216
1217 err = rxe_cleanup(cq);
1218 if (err)
1219 rxe_err_cq(cq, "cleanup failed, err = %d\n", err);
1220
1221 return 0;
1222
1223 err_out:
1224 rxe_err_cq(cq, "returned err = %d\n", err);
1225 return err;
1226 }
1227
1228 /* mr */
rxe_get_dma_mr(struct ib_pd * ibpd,int access)1229 static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
1230 {
1231 struct rxe_dev *rxe = to_rdev(ibpd->device);
1232 struct rxe_pd *pd = to_rpd(ibpd);
1233 struct rxe_mr *mr;
1234 int err;
1235
1236 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1237 if (!mr)
1238 return ERR_PTR(-ENOMEM);
1239
1240 err = rxe_add_to_pool(&rxe->mr_pool, mr);
1241 if (err) {
1242 rxe_dbg_dev(rxe, "unable to create mr\n");
1243 goto err_free;
1244 }
1245
1246 rxe_get(pd);
1247 mr->ibmr.pd = ibpd;
1248 mr->ibmr.device = ibpd->device;
1249
1250 rxe_mr_init_dma(access, mr);
1251 rxe_finalize(mr);
1252 return &mr->ibmr;
1253
1254 err_free:
1255 kfree(mr);
1256 rxe_err_pd(pd, "returned err = %d\n", err);
1257 return ERR_PTR(err);
1258 }
1259
rxe_reg_user_mr(struct ib_pd * ibpd,u64 start,u64 length,u64 iova,int access,struct ib_udata * udata)1260 static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start,
1261 u64 length, u64 iova, int access,
1262 struct ib_udata *udata)
1263 {
1264 struct rxe_dev *rxe = to_rdev(ibpd->device);
1265 struct rxe_pd *pd = to_rpd(ibpd);
1266 struct rxe_mr *mr;
1267 int err, cleanup_err;
1268
1269 if (access & ~RXE_ACCESS_SUPPORTED_MR) {
1270 rxe_err_pd(pd, "access = %#x not supported (%#x)\n", access,
1271 RXE_ACCESS_SUPPORTED_MR);
1272 return ERR_PTR(-EOPNOTSUPP);
1273 }
1274
1275 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1276 if (!mr)
1277 return ERR_PTR(-ENOMEM);
1278
1279 err = rxe_add_to_pool(&rxe->mr_pool, mr);
1280 if (err) {
1281 rxe_dbg_pd(pd, "unable to create mr\n");
1282 goto err_free;
1283 }
1284
1285 rxe_get(pd);
1286 mr->ibmr.pd = ibpd;
1287 mr->ibmr.device = ibpd->device;
1288
1289 err = rxe_mr_init_user(rxe, start, length, access, mr);
1290 if (err) {
1291 rxe_dbg_mr(mr, "reg_user_mr failed, err = %d\n", err);
1292 goto err_cleanup;
1293 }
1294
1295 rxe_finalize(mr);
1296 return &mr->ibmr;
1297
1298 err_cleanup:
1299 cleanup_err = rxe_cleanup(mr);
1300 if (cleanup_err)
1301 rxe_err_mr(mr, "cleanup failed, err = %d\n", cleanup_err);
1302 err_free:
1303 kfree(mr);
1304 rxe_err_pd(pd, "returned err = %d\n", err);
1305 return ERR_PTR(err);
1306 }
1307
rxe_rereg_user_mr(struct ib_mr * ibmr,int flags,u64 start,u64 length,u64 iova,int access,struct ib_pd * ibpd,struct ib_udata * udata)1308 static struct ib_mr *rxe_rereg_user_mr(struct ib_mr *ibmr, int flags,
1309 u64 start, u64 length, u64 iova,
1310 int access, struct ib_pd *ibpd,
1311 struct ib_udata *udata)
1312 {
1313 struct rxe_mr *mr = to_rmr(ibmr);
1314 struct rxe_pd *old_pd = to_rpd(ibmr->pd);
1315 struct rxe_pd *pd = to_rpd(ibpd);
1316
1317 /* for now only support the two easy cases:
1318 * rereg_pd and rereg_access
1319 */
1320 if (flags & ~RXE_MR_REREG_SUPPORTED) {
1321 rxe_err_mr(mr, "flags = %#x not supported\n", flags);
1322 return ERR_PTR(-EOPNOTSUPP);
1323 }
1324
1325 if (flags & IB_MR_REREG_PD) {
1326 rxe_put(old_pd);
1327 rxe_get(pd);
1328 mr->ibmr.pd = ibpd;
1329 }
1330
1331 if (flags & IB_MR_REREG_ACCESS) {
1332 if (access & ~RXE_ACCESS_SUPPORTED_MR) {
1333 rxe_err_mr(mr, "access = %#x not supported\n", access);
1334 return ERR_PTR(-EOPNOTSUPP);
1335 }
1336 mr->access = access;
1337 }
1338
1339 return NULL;
1340 }
1341
rxe_alloc_mr(struct ib_pd * ibpd,enum ib_mr_type mr_type,u32 max_num_sg)1342 static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
1343 u32 max_num_sg)
1344 {
1345 struct rxe_dev *rxe = to_rdev(ibpd->device);
1346 struct rxe_pd *pd = to_rpd(ibpd);
1347 struct rxe_mr *mr;
1348 int err, cleanup_err;
1349
1350 if (mr_type != IB_MR_TYPE_MEM_REG) {
1351 err = -EINVAL;
1352 rxe_dbg_pd(pd, "mr type %d not supported, err = %d\n",
1353 mr_type, err);
1354 goto err_out;
1355 }
1356
1357 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1358 if (!mr)
1359 return ERR_PTR(-ENOMEM);
1360
1361 err = rxe_add_to_pool(&rxe->mr_pool, mr);
1362 if (err)
1363 goto err_free;
1364
1365 rxe_get(pd);
1366 mr->ibmr.pd = ibpd;
1367 mr->ibmr.device = ibpd->device;
1368
1369 err = rxe_mr_init_fast(max_num_sg, mr);
1370 if (err) {
1371 rxe_dbg_mr(mr, "alloc_mr failed, err = %d\n", err);
1372 goto err_cleanup;
1373 }
1374
1375 rxe_finalize(mr);
1376 return &mr->ibmr;
1377
1378 err_cleanup:
1379 cleanup_err = rxe_cleanup(mr);
1380 if (cleanup_err)
1381 rxe_err_mr(mr, "cleanup failed, err = %d\n", err);
1382 err_free:
1383 kfree(mr);
1384 err_out:
1385 rxe_err_pd(pd, "returned err = %d\n", err);
1386 return ERR_PTR(err);
1387 }
1388
rxe_dereg_mr(struct ib_mr * ibmr,struct ib_udata * udata)1389 static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1390 {
1391 struct rxe_mr *mr = to_rmr(ibmr);
1392 int err, cleanup_err;
1393
1394 /* See IBA 10.6.7.2.6 */
1395 if (atomic_read(&mr->num_mw) > 0) {
1396 err = -EINVAL;
1397 rxe_dbg_mr(mr, "mr has mw's bound\n");
1398 goto err_out;
1399 }
1400
1401 cleanup_err = rxe_cleanup(mr);
1402 if (cleanup_err)
1403 rxe_err_mr(mr, "cleanup failed, err = %d\n", cleanup_err);
1404
1405 kfree_rcu_mightsleep(mr);
1406 return 0;
1407
1408 err_out:
1409 rxe_err_mr(mr, "returned err = %d\n", err);
1410 return err;
1411 }
1412
parent_show(struct device * device,struct device_attribute * attr,char * buf)1413 static ssize_t parent_show(struct device *device,
1414 struct device_attribute *attr, char *buf)
1415 {
1416 struct rxe_dev *rxe =
1417 rdma_device_to_drv_device(device, struct rxe_dev, ib_dev);
1418
1419 return sysfs_emit(buf, "%s\n", rxe_parent_name(rxe, 1));
1420 }
1421
1422 static DEVICE_ATTR_RO(parent);
1423
1424 static struct attribute *rxe_dev_attributes[] = {
1425 &dev_attr_parent.attr,
1426 NULL
1427 };
1428
1429 static const struct attribute_group rxe_attr_group = {
1430 .attrs = rxe_dev_attributes,
1431 };
1432
rxe_enable_driver(struct ib_device * ib_dev)1433 static int rxe_enable_driver(struct ib_device *ib_dev)
1434 {
1435 struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev);
1436 struct net_device *ndev;
1437
1438 ndev = rxe_ib_device_get_netdev(ib_dev);
1439 if (!ndev)
1440 return -ENODEV;
1441
1442 rxe_set_port_state(rxe);
1443 dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(ndev));
1444
1445 dev_put(ndev);
1446 return 0;
1447 }
1448
1449 static const struct ib_device_ops rxe_dev_ops = {
1450 .owner = THIS_MODULE,
1451 .driver_id = RDMA_DRIVER_RXE,
1452 .uverbs_abi_ver = RXE_UVERBS_ABI_VERSION,
1453
1454 .alloc_hw_port_stats = rxe_ib_alloc_hw_port_stats,
1455 .alloc_mr = rxe_alloc_mr,
1456 .alloc_mw = rxe_alloc_mw,
1457 .alloc_pd = rxe_alloc_pd,
1458 .alloc_ucontext = rxe_alloc_ucontext,
1459 .attach_mcast = rxe_attach_mcast,
1460 .create_ah = rxe_create_ah,
1461 .create_cq = rxe_create_cq,
1462 .create_qp = rxe_create_qp,
1463 .create_srq = rxe_create_srq,
1464 .create_user_ah = rxe_create_ah,
1465 .dealloc_driver = rxe_dealloc,
1466 .dealloc_mw = rxe_dealloc_mw,
1467 .dealloc_pd = rxe_dealloc_pd,
1468 .dealloc_ucontext = rxe_dealloc_ucontext,
1469 .dereg_mr = rxe_dereg_mr,
1470 .destroy_ah = rxe_destroy_ah,
1471 .destroy_cq = rxe_destroy_cq,
1472 .destroy_qp = rxe_destroy_qp,
1473 .destroy_srq = rxe_destroy_srq,
1474 .detach_mcast = rxe_detach_mcast,
1475 .device_group = &rxe_attr_group,
1476 .enable_driver = rxe_enable_driver,
1477 .get_dma_mr = rxe_get_dma_mr,
1478 .get_hw_stats = rxe_ib_get_hw_stats,
1479 .get_link_layer = rxe_get_link_layer,
1480 .get_port_immutable = rxe_port_immutable,
1481 .map_mr_sg = rxe_map_mr_sg,
1482 .mmap = rxe_mmap,
1483 .modify_ah = rxe_modify_ah,
1484 .modify_device = rxe_modify_device,
1485 .modify_port = rxe_modify_port,
1486 .modify_qp = rxe_modify_qp,
1487 .modify_srq = rxe_modify_srq,
1488 .peek_cq = rxe_peek_cq,
1489 .poll_cq = rxe_poll_cq,
1490 .post_recv = rxe_post_recv,
1491 .post_send = rxe_post_send,
1492 .post_srq_recv = rxe_post_srq_recv,
1493 .query_ah = rxe_query_ah,
1494 .query_device = rxe_query_device,
1495 .query_pkey = rxe_query_pkey,
1496 .query_port = rxe_query_port,
1497 .query_qp = rxe_query_qp,
1498 .query_srq = rxe_query_srq,
1499 .reg_user_mr = rxe_reg_user_mr,
1500 .req_notify_cq = rxe_req_notify_cq,
1501 .rereg_user_mr = rxe_rereg_user_mr,
1502 .resize_cq = rxe_resize_cq,
1503
1504 INIT_RDMA_OBJ_SIZE(ib_ah, rxe_ah, ibah),
1505 INIT_RDMA_OBJ_SIZE(ib_cq, rxe_cq, ibcq),
1506 INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
1507 INIT_RDMA_OBJ_SIZE(ib_qp, rxe_qp, ibqp),
1508 INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq),
1509 INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc),
1510 INIT_RDMA_OBJ_SIZE(ib_mw, rxe_mw, ibmw),
1511 };
1512
rxe_register_device(struct rxe_dev * rxe,const char * ibdev_name,struct net_device * ndev)1513 int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name,
1514 struct net_device *ndev)
1515 {
1516 int err;
1517 struct ib_device *dev = &rxe->ib_dev;
1518
1519 strscpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
1520
1521 dev->node_type = RDMA_NODE_IB_CA;
1522 dev->phys_port_cnt = 1;
1523 dev->num_comp_vectors = num_possible_cpus();
1524 dev->local_dma_lkey = 0;
1525 addrconf_addr_eui48((unsigned char *)&dev->node_guid,
1526 ndev->dev_addr);
1527
1528 dev->uverbs_cmd_mask |= BIT_ULL(IB_USER_VERBS_CMD_POST_SEND) |
1529 BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ);
1530
1531 ib_set_device_ops(dev, &rxe_dev_ops);
1532 err = ib_device_set_netdev(&rxe->ib_dev, ndev, 1);
1533 if (err)
1534 return err;
1535
1536 err = rxe_icrc_init(rxe);
1537 if (err)
1538 return err;
1539
1540 err = ib_register_device(dev, ibdev_name, NULL);
1541 if (err)
1542 rxe_dbg_dev(rxe, "failed with error %d\n", err);
1543
1544 /*
1545 * Note that rxe may be invalid at this point if another thread
1546 * unregistered it.
1547 */
1548 return err;
1549 }
1550