1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
4 */
5
6 #include <linux/dma-buf.h>
7 #include <linux/dma-resv.h>
8 #include <linux/vmalloc.h>
9 #include <linux/log2.h>
10
11 #include <rdma/ib_addr.h>
12 #include <rdma/ib_umem.h>
13 #include <rdma/ib_user_verbs.h>
14 #include <rdma/ib_verbs.h>
15 #include <rdma/uverbs_ioctl.h>
16 #define UVERBS_MODULE_NAME efa_ib
17 #include <rdma/uverbs_named_ioctl.h>
18 #include <rdma/ib_user_ioctl_cmds.h>
19
20 #include "efa.h"
21 #include "efa_io_defs.h"
22
23 enum {
24 EFA_MMAP_DMA_PAGE = 0,
25 EFA_MMAP_IO_WC,
26 EFA_MMAP_IO_NC,
27 };
28
29 struct efa_user_mmap_entry {
30 struct rdma_user_mmap_entry rdma_entry;
31 u64 address;
32 u8 mmap_flag;
33 };
34
35 #define EFA_DEFINE_DEVICE_STATS(op) \
36 op(EFA_SUBMITTED_CMDS, "submitted_cmds") \
37 op(EFA_COMPLETED_CMDS, "completed_cmds") \
38 op(EFA_CMDS_ERR, "cmds_err") \
39 op(EFA_NO_COMPLETION_CMDS, "no_completion_cmds") \
40 op(EFA_KEEP_ALIVE_RCVD, "keep_alive_rcvd") \
41 op(EFA_ALLOC_PD_ERR, "alloc_pd_err") \
42 op(EFA_CREATE_QP_ERR, "create_qp_err") \
43 op(EFA_CREATE_CQ_ERR, "create_cq_err") \
44 op(EFA_REG_MR_ERR, "reg_mr_err") \
45 op(EFA_ALLOC_UCONTEXT_ERR, "alloc_ucontext_err") \
46 op(EFA_CREATE_AH_ERR, "create_ah_err") \
47 op(EFA_MMAP_ERR, "mmap_err")
48
49 #define EFA_DEFINE_PORT_STATS(op) \
50 op(EFA_TX_BYTES, "tx_bytes") \
51 op(EFA_TX_PKTS, "tx_pkts") \
52 op(EFA_RX_BYTES, "rx_bytes") \
53 op(EFA_RX_PKTS, "rx_pkts") \
54 op(EFA_RX_DROPS, "rx_drops") \
55 op(EFA_SEND_BYTES, "send_bytes") \
56 op(EFA_SEND_WRS, "send_wrs") \
57 op(EFA_RECV_BYTES, "recv_bytes") \
58 op(EFA_RECV_WRS, "recv_wrs") \
59 op(EFA_RDMA_READ_WRS, "rdma_read_wrs") \
60 op(EFA_RDMA_READ_BYTES, "rdma_read_bytes") \
61 op(EFA_RDMA_READ_WR_ERR, "rdma_read_wr_err") \
62 op(EFA_RDMA_READ_RESP_BYTES, "rdma_read_resp_bytes") \
63 op(EFA_RDMA_WRITE_WRS, "rdma_write_wrs") \
64 op(EFA_RDMA_WRITE_BYTES, "rdma_write_bytes") \
65 op(EFA_RDMA_WRITE_WR_ERR, "rdma_write_wr_err") \
66 op(EFA_RDMA_WRITE_RECV_BYTES, "rdma_write_recv_bytes") \
67
68 #define EFA_STATS_ENUM(ename, name) ename,
69 #define EFA_STATS_STR(ename, nam) \
70 [ename].name = nam,
71
72 enum efa_hw_device_stats {
73 EFA_DEFINE_DEVICE_STATS(EFA_STATS_ENUM)
74 };
75
76 static const struct rdma_stat_desc efa_device_stats_descs[] = {
77 EFA_DEFINE_DEVICE_STATS(EFA_STATS_STR)
78 };
79
80 enum efa_hw_port_stats {
81 EFA_DEFINE_PORT_STATS(EFA_STATS_ENUM)
82 };
83
84 static const struct rdma_stat_desc efa_port_stats_descs[] = {
85 EFA_DEFINE_PORT_STATS(EFA_STATS_STR)
86 };
87
88 #define EFA_CHUNK_PAYLOAD_SHIFT 12
89 #define EFA_CHUNK_PAYLOAD_SIZE BIT(EFA_CHUNK_PAYLOAD_SHIFT)
90 #define EFA_CHUNK_PAYLOAD_PTR_SIZE 8
91
92 #define EFA_CHUNK_SHIFT 12
93 #define EFA_CHUNK_SIZE BIT(EFA_CHUNK_SHIFT)
94 #define EFA_CHUNK_PTR_SIZE sizeof(struct efa_com_ctrl_buff_info)
95
96 #define EFA_PTRS_PER_CHUNK \
97 ((EFA_CHUNK_SIZE - EFA_CHUNK_PTR_SIZE) / EFA_CHUNK_PAYLOAD_PTR_SIZE)
98
99 #define EFA_CHUNK_USED_SIZE \
100 ((EFA_PTRS_PER_CHUNK * EFA_CHUNK_PAYLOAD_PTR_SIZE) + EFA_CHUNK_PTR_SIZE)
101
102 struct pbl_chunk {
103 dma_addr_t dma_addr;
104 u64 *buf;
105 u32 length;
106 };
107
108 struct pbl_chunk_list {
109 struct pbl_chunk *chunks;
110 unsigned int size;
111 };
112
113 struct pbl_context {
114 union {
115 struct {
116 dma_addr_t dma_addr;
117 } continuous;
118 struct {
119 u32 pbl_buf_size_in_pages;
120 struct scatterlist *sgl;
121 int sg_dma_cnt;
122 struct pbl_chunk_list chunk_list;
123 } indirect;
124 } phys;
125 u64 *pbl_buf;
126 u32 pbl_buf_size_in_bytes;
127 u8 physically_continuous;
128 };
129
to_edev(struct ib_device * ibdev)130 static inline struct efa_dev *to_edev(struct ib_device *ibdev)
131 {
132 return container_of(ibdev, struct efa_dev, ibdev);
133 }
134
to_eucontext(struct ib_ucontext * ibucontext)135 static inline struct efa_ucontext *to_eucontext(struct ib_ucontext *ibucontext)
136 {
137 return container_of(ibucontext, struct efa_ucontext, ibucontext);
138 }
139
to_epd(struct ib_pd * ibpd)140 static inline struct efa_pd *to_epd(struct ib_pd *ibpd)
141 {
142 return container_of(ibpd, struct efa_pd, ibpd);
143 }
144
to_emr(struct ib_mr * ibmr)145 static inline struct efa_mr *to_emr(struct ib_mr *ibmr)
146 {
147 return container_of(ibmr, struct efa_mr, ibmr);
148 }
149
to_eqp(struct ib_qp * ibqp)150 static inline struct efa_qp *to_eqp(struct ib_qp *ibqp)
151 {
152 return container_of(ibqp, struct efa_qp, ibqp);
153 }
154
to_ecq(struct ib_cq * ibcq)155 static inline struct efa_cq *to_ecq(struct ib_cq *ibcq)
156 {
157 return container_of(ibcq, struct efa_cq, ibcq);
158 }
159
to_eah(struct ib_ah * ibah)160 static inline struct efa_ah *to_eah(struct ib_ah *ibah)
161 {
162 return container_of(ibah, struct efa_ah, ibah);
163 }
164
165 static inline struct efa_user_mmap_entry *
to_emmap(struct rdma_user_mmap_entry * rdma_entry)166 to_emmap(struct rdma_user_mmap_entry *rdma_entry)
167 {
168 return container_of(rdma_entry, struct efa_user_mmap_entry, rdma_entry);
169 }
170
171 #define EFA_DEV_CAP(dev, cap) \
172 ((dev)->dev_attr.device_caps & \
173 EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_##cap##_MASK)
174
175 #define is_reserved_cleared(reserved) \
176 !memchr_inv(reserved, 0, sizeof(reserved))
177
efa_zalloc_mapped(struct efa_dev * dev,dma_addr_t * dma_addr,size_t size,enum dma_data_direction dir)178 static void *efa_zalloc_mapped(struct efa_dev *dev, dma_addr_t *dma_addr,
179 size_t size, enum dma_data_direction dir)
180 {
181 void *addr;
182
183 addr = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
184 if (!addr)
185 return NULL;
186
187 *dma_addr = dma_map_single(&dev->pdev->dev, addr, size, dir);
188 if (dma_mapping_error(&dev->pdev->dev, *dma_addr)) {
189 ibdev_err(&dev->ibdev, "Failed to map DMA address\n");
190 free_pages_exact(addr, size);
191 return NULL;
192 }
193
194 return addr;
195 }
196
efa_free_mapped(struct efa_dev * dev,void * cpu_addr,dma_addr_t dma_addr,size_t size,enum dma_data_direction dir)197 static void efa_free_mapped(struct efa_dev *dev, void *cpu_addr,
198 dma_addr_t dma_addr,
199 size_t size, enum dma_data_direction dir)
200 {
201 dma_unmap_single(&dev->pdev->dev, dma_addr, size, dir);
202 free_pages_exact(cpu_addr, size);
203 }
204
efa_query_device(struct ib_device * ibdev,struct ib_device_attr * props,struct ib_udata * udata)205 int efa_query_device(struct ib_device *ibdev,
206 struct ib_device_attr *props,
207 struct ib_udata *udata)
208 {
209 struct efa_com_get_device_attr_result *dev_attr;
210 struct efa_ibv_ex_query_device_resp resp = {};
211 struct efa_dev *dev = to_edev(ibdev);
212 int err;
213
214 if (udata && udata->inlen &&
215 !ib_is_udata_cleared(udata, 0, udata->inlen)) {
216 ibdev_dbg(ibdev,
217 "Incompatible ABI params, udata not cleared\n");
218 return -EINVAL;
219 }
220
221 dev_attr = &dev->dev_attr;
222
223 memset(props, 0, sizeof(*props));
224 props->max_mr_size = dev_attr->max_mr_pages * PAGE_SIZE;
225 props->page_size_cap = dev_attr->page_size_cap;
226 props->vendor_id = dev->pdev->vendor;
227 props->vendor_part_id = dev->pdev->device;
228 props->hw_ver = dev->pdev->subsystem_device;
229 props->max_qp = dev_attr->max_qp;
230 props->max_cq = dev_attr->max_cq;
231 props->max_pd = dev_attr->max_pd;
232 props->max_mr = dev_attr->max_mr;
233 props->max_ah = dev_attr->max_ah;
234 props->max_cqe = dev_attr->max_cq_depth;
235 props->max_qp_wr = min_t(u32, dev_attr->max_sq_depth,
236 dev_attr->max_rq_depth);
237 props->max_send_sge = dev_attr->max_sq_sge;
238 props->max_recv_sge = dev_attr->max_rq_sge;
239 props->max_sge_rd = dev_attr->max_wr_rdma_sge;
240 props->max_pkeys = 1;
241
242 if (udata && udata->outlen) {
243 resp.max_sq_sge = dev_attr->max_sq_sge;
244 resp.max_rq_sge = dev_attr->max_rq_sge;
245 resp.max_sq_wr = dev_attr->max_sq_depth;
246 resp.max_rq_wr = dev_attr->max_rq_depth;
247 resp.max_rdma_size = dev_attr->max_rdma_size;
248
249 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_WITH_SGID;
250 if (EFA_DEV_CAP(dev, RDMA_READ))
251 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_READ;
252
253 if (EFA_DEV_CAP(dev, RNR_RETRY))
254 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RNR_RETRY;
255
256 if (EFA_DEV_CAP(dev, DATA_POLLING_128))
257 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_DATA_POLLING_128;
258
259 if (EFA_DEV_CAP(dev, RDMA_WRITE))
260 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_WRITE;
261
262 if (EFA_DEV_CAP(dev, UNSOLICITED_WRITE_RECV))
263 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_UNSOLICITED_WRITE_RECV;
264
265 if (dev->neqs)
266 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_NOTIFICATIONS;
267
268 err = ib_copy_to_udata(udata, &resp,
269 min(sizeof(resp), udata->outlen));
270 if (err) {
271 ibdev_dbg(ibdev,
272 "Failed to copy udata for query_device\n");
273 return err;
274 }
275 }
276
277 return 0;
278 }
279
efa_query_port(struct ib_device * ibdev,u32 port,struct ib_port_attr * props)280 int efa_query_port(struct ib_device *ibdev, u32 port,
281 struct ib_port_attr *props)
282 {
283 struct efa_dev *dev = to_edev(ibdev);
284
285 props->lmc = 1;
286
287 props->state = IB_PORT_ACTIVE;
288 props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
289 props->gid_tbl_len = 1;
290 props->pkey_tbl_len = 1;
291 props->active_speed = IB_SPEED_EDR;
292 props->active_width = IB_WIDTH_4X;
293 props->max_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
294 props->active_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
295 props->max_msg_sz = dev->dev_attr.mtu;
296 props->max_vl_num = 1;
297
298 return 0;
299 }
300
efa_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_qp_init_attr * qp_init_attr)301 int efa_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
302 int qp_attr_mask,
303 struct ib_qp_init_attr *qp_init_attr)
304 {
305 struct efa_dev *dev = to_edev(ibqp->device);
306 struct efa_com_query_qp_params params = {};
307 struct efa_com_query_qp_result result;
308 struct efa_qp *qp = to_eqp(ibqp);
309 int err;
310
311 #define EFA_QUERY_QP_SUPP_MASK \
312 (IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | \
313 IB_QP_QKEY | IB_QP_SQ_PSN | IB_QP_CAP | IB_QP_RNR_RETRY)
314
315 if (qp_attr_mask & ~EFA_QUERY_QP_SUPP_MASK) {
316 ibdev_dbg(&dev->ibdev,
317 "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
318 qp_attr_mask, EFA_QUERY_QP_SUPP_MASK);
319 return -EOPNOTSUPP;
320 }
321
322 memset(qp_attr, 0, sizeof(*qp_attr));
323 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
324
325 params.qp_handle = qp->qp_handle;
326 err = efa_com_query_qp(&dev->edev, ¶ms, &result);
327 if (err)
328 return err;
329
330 qp_attr->qp_state = result.qp_state;
331 qp_attr->qkey = result.qkey;
332 qp_attr->sq_psn = result.sq_psn;
333 qp_attr->sq_draining = result.sq_draining;
334 qp_attr->port_num = 1;
335 qp_attr->rnr_retry = result.rnr_retry;
336
337 qp_attr->cap.max_send_wr = qp->max_send_wr;
338 qp_attr->cap.max_recv_wr = qp->max_recv_wr;
339 qp_attr->cap.max_send_sge = qp->max_send_sge;
340 qp_attr->cap.max_recv_sge = qp->max_recv_sge;
341 qp_attr->cap.max_inline_data = qp->max_inline_data;
342
343 qp_init_attr->qp_type = ibqp->qp_type;
344 qp_init_attr->recv_cq = ibqp->recv_cq;
345 qp_init_attr->send_cq = ibqp->send_cq;
346 qp_init_attr->qp_context = ibqp->qp_context;
347 qp_init_attr->cap = qp_attr->cap;
348
349 return 0;
350 }
351
efa_query_gid(struct ib_device * ibdev,u32 port,int index,union ib_gid * gid)352 int efa_query_gid(struct ib_device *ibdev, u32 port, int index,
353 union ib_gid *gid)
354 {
355 struct efa_dev *dev = to_edev(ibdev);
356
357 memcpy(gid->raw, dev->dev_attr.addr, sizeof(dev->dev_attr.addr));
358
359 return 0;
360 }
361
efa_query_pkey(struct ib_device * ibdev,u32 port,u16 index,u16 * pkey)362 int efa_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
363 u16 *pkey)
364 {
365 if (index > 0)
366 return -EINVAL;
367
368 *pkey = 0xffff;
369 return 0;
370 }
371
efa_pd_dealloc(struct efa_dev * dev,u16 pdn)372 static int efa_pd_dealloc(struct efa_dev *dev, u16 pdn)
373 {
374 struct efa_com_dealloc_pd_params params = {
375 .pdn = pdn,
376 };
377
378 return efa_com_dealloc_pd(&dev->edev, ¶ms);
379 }
380
efa_alloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)381 int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
382 {
383 struct efa_dev *dev = to_edev(ibpd->device);
384 struct efa_ibv_alloc_pd_resp resp = {};
385 struct efa_com_alloc_pd_result result;
386 struct efa_pd *pd = to_epd(ibpd);
387 int err;
388
389 if (udata->inlen &&
390 !ib_is_udata_cleared(udata, 0, udata->inlen)) {
391 ibdev_dbg(&dev->ibdev,
392 "Incompatible ABI params, udata not cleared\n");
393 err = -EINVAL;
394 goto err_out;
395 }
396
397 err = efa_com_alloc_pd(&dev->edev, &result);
398 if (err)
399 goto err_out;
400
401 pd->pdn = result.pdn;
402 resp.pdn = result.pdn;
403
404 if (udata->outlen) {
405 err = ib_copy_to_udata(udata, &resp,
406 min(sizeof(resp), udata->outlen));
407 if (err) {
408 ibdev_dbg(&dev->ibdev,
409 "Failed to copy udata for alloc_pd\n");
410 goto err_dealloc_pd;
411 }
412 }
413
414 ibdev_dbg(&dev->ibdev, "Allocated pd[%d]\n", pd->pdn);
415
416 return 0;
417
418 err_dealloc_pd:
419 efa_pd_dealloc(dev, result.pdn);
420 err_out:
421 atomic64_inc(&dev->stats.alloc_pd_err);
422 return err;
423 }
424
efa_dealloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)425 int efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
426 {
427 struct efa_dev *dev = to_edev(ibpd->device);
428 struct efa_pd *pd = to_epd(ibpd);
429
430 ibdev_dbg(&dev->ibdev, "Dealloc pd[%d]\n", pd->pdn);
431 efa_pd_dealloc(dev, pd->pdn);
432 return 0;
433 }
434
efa_destroy_qp_handle(struct efa_dev * dev,u32 qp_handle)435 static int efa_destroy_qp_handle(struct efa_dev *dev, u32 qp_handle)
436 {
437 struct efa_com_destroy_qp_params params = { .qp_handle = qp_handle };
438
439 return efa_com_destroy_qp(&dev->edev, ¶ms);
440 }
441
efa_qp_user_mmap_entries_remove(struct efa_qp * qp)442 static void efa_qp_user_mmap_entries_remove(struct efa_qp *qp)
443 {
444 rdma_user_mmap_entry_remove(qp->rq_mmap_entry);
445 rdma_user_mmap_entry_remove(qp->rq_db_mmap_entry);
446 rdma_user_mmap_entry_remove(qp->llq_desc_mmap_entry);
447 rdma_user_mmap_entry_remove(qp->sq_db_mmap_entry);
448 }
449
efa_destroy_qp(struct ib_qp * ibqp,struct ib_udata * udata)450 int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
451 {
452 struct efa_dev *dev = to_edev(ibqp->pd->device);
453 struct efa_qp *qp = to_eqp(ibqp);
454 int err;
455
456 ibdev_dbg(&dev->ibdev, "Destroy qp[%u]\n", ibqp->qp_num);
457
458 err = efa_destroy_qp_handle(dev, qp->qp_handle);
459 if (err)
460 return err;
461
462 efa_qp_user_mmap_entries_remove(qp);
463
464 if (qp->rq_cpu_addr) {
465 ibdev_dbg(&dev->ibdev,
466 "qp->cpu_addr[0x%p] freed: size[%lu], dma[%pad]\n",
467 qp->rq_cpu_addr, qp->rq_size,
468 &qp->rq_dma_addr);
469 efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
470 qp->rq_size, DMA_TO_DEVICE);
471 }
472
473 return 0;
474 }
475
476 static struct rdma_user_mmap_entry*
efa_user_mmap_entry_insert(struct ib_ucontext * ucontext,u64 address,size_t length,u8 mmap_flag,u64 * offset)477 efa_user_mmap_entry_insert(struct ib_ucontext *ucontext,
478 u64 address, size_t length,
479 u8 mmap_flag, u64 *offset)
480 {
481 struct efa_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
482 int err;
483
484 if (!entry)
485 return NULL;
486
487 entry->address = address;
488 entry->mmap_flag = mmap_flag;
489
490 err = rdma_user_mmap_entry_insert(ucontext, &entry->rdma_entry,
491 length);
492 if (err) {
493 kfree(entry);
494 return NULL;
495 }
496 *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
497
498 return &entry->rdma_entry;
499 }
500
qp_mmap_entries_setup(struct efa_qp * qp,struct efa_dev * dev,struct efa_ucontext * ucontext,struct efa_com_create_qp_params * params,struct efa_ibv_create_qp_resp * resp)501 static int qp_mmap_entries_setup(struct efa_qp *qp,
502 struct efa_dev *dev,
503 struct efa_ucontext *ucontext,
504 struct efa_com_create_qp_params *params,
505 struct efa_ibv_create_qp_resp *resp)
506 {
507 size_t length;
508 u64 address;
509
510 address = dev->db_bar_addr + resp->sq_db_offset;
511 qp->sq_db_mmap_entry =
512 efa_user_mmap_entry_insert(&ucontext->ibucontext,
513 address,
514 PAGE_SIZE, EFA_MMAP_IO_NC,
515 &resp->sq_db_mmap_key);
516 if (!qp->sq_db_mmap_entry)
517 return -ENOMEM;
518
519 resp->sq_db_offset &= ~PAGE_MASK;
520
521 address = dev->mem_bar_addr + resp->llq_desc_offset;
522 length = PAGE_ALIGN(params->sq_ring_size_in_bytes +
523 offset_in_page(resp->llq_desc_offset));
524
525 qp->llq_desc_mmap_entry =
526 efa_user_mmap_entry_insert(&ucontext->ibucontext,
527 address, length,
528 EFA_MMAP_IO_WC,
529 &resp->llq_desc_mmap_key);
530 if (!qp->llq_desc_mmap_entry)
531 goto err_remove_mmap;
532
533 resp->llq_desc_offset &= ~PAGE_MASK;
534
535 if (qp->rq_size) {
536 address = dev->db_bar_addr + resp->rq_db_offset;
537
538 qp->rq_db_mmap_entry =
539 efa_user_mmap_entry_insert(&ucontext->ibucontext,
540 address, PAGE_SIZE,
541 EFA_MMAP_IO_NC,
542 &resp->rq_db_mmap_key);
543 if (!qp->rq_db_mmap_entry)
544 goto err_remove_mmap;
545
546 resp->rq_db_offset &= ~PAGE_MASK;
547
548 address = virt_to_phys(qp->rq_cpu_addr);
549 qp->rq_mmap_entry =
550 efa_user_mmap_entry_insert(&ucontext->ibucontext,
551 address, qp->rq_size,
552 EFA_MMAP_DMA_PAGE,
553 &resp->rq_mmap_key);
554 if (!qp->rq_mmap_entry)
555 goto err_remove_mmap;
556
557 resp->rq_mmap_size = qp->rq_size;
558 }
559
560 return 0;
561
562 err_remove_mmap:
563 efa_qp_user_mmap_entries_remove(qp);
564
565 return -ENOMEM;
566 }
567
efa_qp_validate_cap(struct efa_dev * dev,struct ib_qp_init_attr * init_attr)568 static int efa_qp_validate_cap(struct efa_dev *dev,
569 struct ib_qp_init_attr *init_attr)
570 {
571 if (init_attr->cap.max_send_wr > dev->dev_attr.max_sq_depth) {
572 ibdev_dbg(&dev->ibdev,
573 "qp: requested send wr[%u] exceeds the max[%u]\n",
574 init_attr->cap.max_send_wr,
575 dev->dev_attr.max_sq_depth);
576 return -EINVAL;
577 }
578 if (init_attr->cap.max_recv_wr > dev->dev_attr.max_rq_depth) {
579 ibdev_dbg(&dev->ibdev,
580 "qp: requested receive wr[%u] exceeds the max[%u]\n",
581 init_attr->cap.max_recv_wr,
582 dev->dev_attr.max_rq_depth);
583 return -EINVAL;
584 }
585 if (init_attr->cap.max_send_sge > dev->dev_attr.max_sq_sge) {
586 ibdev_dbg(&dev->ibdev,
587 "qp: requested sge send[%u] exceeds the max[%u]\n",
588 init_attr->cap.max_send_sge, dev->dev_attr.max_sq_sge);
589 return -EINVAL;
590 }
591 if (init_attr->cap.max_recv_sge > dev->dev_attr.max_rq_sge) {
592 ibdev_dbg(&dev->ibdev,
593 "qp: requested sge recv[%u] exceeds the max[%u]\n",
594 init_attr->cap.max_recv_sge, dev->dev_attr.max_rq_sge);
595 return -EINVAL;
596 }
597 if (init_attr->cap.max_inline_data > dev->dev_attr.inline_buf_size) {
598 ibdev_dbg(&dev->ibdev,
599 "qp: requested inline data[%u] exceeds the max[%u]\n",
600 init_attr->cap.max_inline_data,
601 dev->dev_attr.inline_buf_size);
602 return -EINVAL;
603 }
604
605 return 0;
606 }
607
efa_qp_validate_attr(struct efa_dev * dev,struct ib_qp_init_attr * init_attr)608 static int efa_qp_validate_attr(struct efa_dev *dev,
609 struct ib_qp_init_attr *init_attr)
610 {
611 if (init_attr->qp_type != IB_QPT_DRIVER &&
612 init_attr->qp_type != IB_QPT_UD) {
613 ibdev_dbg(&dev->ibdev,
614 "Unsupported qp type %d\n", init_attr->qp_type);
615 return -EOPNOTSUPP;
616 }
617
618 if (init_attr->srq) {
619 ibdev_dbg(&dev->ibdev, "SRQ is not supported\n");
620 return -EOPNOTSUPP;
621 }
622
623 if (init_attr->create_flags) {
624 ibdev_dbg(&dev->ibdev, "Unsupported create flags\n");
625 return -EOPNOTSUPP;
626 }
627
628 return 0;
629 }
630
efa_create_qp(struct ib_qp * ibqp,struct ib_qp_init_attr * init_attr,struct ib_udata * udata)631 int efa_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
632 struct ib_udata *udata)
633 {
634 struct efa_com_create_qp_params create_qp_params = {};
635 struct efa_com_create_qp_result create_qp_resp;
636 struct efa_dev *dev = to_edev(ibqp->device);
637 struct efa_ibv_create_qp_resp resp = {};
638 struct efa_ibv_create_qp cmd = {};
639 struct efa_qp *qp = to_eqp(ibqp);
640 struct efa_ucontext *ucontext;
641 u16 supported_efa_flags = 0;
642 int err;
643
644 ucontext = rdma_udata_to_drv_context(udata, struct efa_ucontext,
645 ibucontext);
646
647 err = efa_qp_validate_cap(dev, init_attr);
648 if (err)
649 goto err_out;
650
651 err = efa_qp_validate_attr(dev, init_attr);
652 if (err)
653 goto err_out;
654
655 if (offsetofend(typeof(cmd), driver_qp_type) > udata->inlen) {
656 ibdev_dbg(&dev->ibdev,
657 "Incompatible ABI params, no input udata\n");
658 err = -EINVAL;
659 goto err_out;
660 }
661
662 if (udata->inlen > sizeof(cmd) &&
663 !ib_is_udata_cleared(udata, sizeof(cmd),
664 udata->inlen - sizeof(cmd))) {
665 ibdev_dbg(&dev->ibdev,
666 "Incompatible ABI params, unknown fields in udata\n");
667 err = -EINVAL;
668 goto err_out;
669 }
670
671 err = ib_copy_from_udata(&cmd, udata,
672 min(sizeof(cmd), udata->inlen));
673 if (err) {
674 ibdev_dbg(&dev->ibdev,
675 "Cannot copy udata for create_qp\n");
676 goto err_out;
677 }
678
679 if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_90)) {
680 ibdev_dbg(&dev->ibdev,
681 "Incompatible ABI params, unknown fields in udata\n");
682 err = -EINVAL;
683 goto err_out;
684 }
685
686 if (EFA_DEV_CAP(dev, UNSOLICITED_WRITE_RECV))
687 supported_efa_flags |= EFA_CREATE_QP_WITH_UNSOLICITED_WRITE_RECV;
688
689 if (cmd.flags & ~supported_efa_flags) {
690 ibdev_dbg(&dev->ibdev, "Unsupported EFA QP create flags[%#x], supported[%#x]\n",
691 cmd.flags, supported_efa_flags);
692 err = -EOPNOTSUPP;
693 goto err_out;
694 }
695
696 create_qp_params.uarn = ucontext->uarn;
697 create_qp_params.pd = to_epd(ibqp->pd)->pdn;
698
699 if (init_attr->qp_type == IB_QPT_UD) {
700 create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_UD;
701 } else if (cmd.driver_qp_type == EFA_QP_DRIVER_TYPE_SRD) {
702 create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_SRD;
703 } else {
704 ibdev_dbg(&dev->ibdev,
705 "Unsupported qp type %d driver qp type %d\n",
706 init_attr->qp_type, cmd.driver_qp_type);
707 err = -EOPNOTSUPP;
708 goto err_out;
709 }
710
711 ibdev_dbg(&dev->ibdev, "Create QP: qp type %d driver qp type %#x\n",
712 init_attr->qp_type, cmd.driver_qp_type);
713 create_qp_params.send_cq_idx = to_ecq(init_attr->send_cq)->cq_idx;
714 create_qp_params.recv_cq_idx = to_ecq(init_attr->recv_cq)->cq_idx;
715 create_qp_params.sq_depth = init_attr->cap.max_send_wr;
716 create_qp_params.sq_ring_size_in_bytes = cmd.sq_ring_size;
717
718 create_qp_params.rq_depth = init_attr->cap.max_recv_wr;
719 create_qp_params.rq_ring_size_in_bytes = cmd.rq_ring_size;
720 qp->rq_size = PAGE_ALIGN(create_qp_params.rq_ring_size_in_bytes);
721 if (qp->rq_size) {
722 qp->rq_cpu_addr = efa_zalloc_mapped(dev, &qp->rq_dma_addr,
723 qp->rq_size, DMA_TO_DEVICE);
724 if (!qp->rq_cpu_addr) {
725 err = -ENOMEM;
726 goto err_out;
727 }
728
729 ibdev_dbg(&dev->ibdev,
730 "qp->cpu_addr[0x%p] allocated: size[%lu], dma[%pad]\n",
731 qp->rq_cpu_addr, qp->rq_size, &qp->rq_dma_addr);
732 create_qp_params.rq_base_addr = qp->rq_dma_addr;
733 }
734
735 if (cmd.flags & EFA_CREATE_QP_WITH_UNSOLICITED_WRITE_RECV)
736 create_qp_params.unsolicited_write_recv = true;
737
738 err = efa_com_create_qp(&dev->edev, &create_qp_params,
739 &create_qp_resp);
740 if (err)
741 goto err_free_mapped;
742
743 resp.sq_db_offset = create_qp_resp.sq_db_offset;
744 resp.rq_db_offset = create_qp_resp.rq_db_offset;
745 resp.llq_desc_offset = create_qp_resp.llq_descriptors_offset;
746 resp.send_sub_cq_idx = create_qp_resp.send_sub_cq_idx;
747 resp.recv_sub_cq_idx = create_qp_resp.recv_sub_cq_idx;
748
749 err = qp_mmap_entries_setup(qp, dev, ucontext, &create_qp_params,
750 &resp);
751 if (err)
752 goto err_destroy_qp;
753
754 qp->qp_handle = create_qp_resp.qp_handle;
755 qp->ibqp.qp_num = create_qp_resp.qp_num;
756 qp->max_send_wr = init_attr->cap.max_send_wr;
757 qp->max_recv_wr = init_attr->cap.max_recv_wr;
758 qp->max_send_sge = init_attr->cap.max_send_sge;
759 qp->max_recv_sge = init_attr->cap.max_recv_sge;
760 qp->max_inline_data = init_attr->cap.max_inline_data;
761
762 if (udata->outlen) {
763 err = ib_copy_to_udata(udata, &resp,
764 min(sizeof(resp), udata->outlen));
765 if (err) {
766 ibdev_dbg(&dev->ibdev,
767 "Failed to copy udata for qp[%u]\n",
768 create_qp_resp.qp_num);
769 goto err_remove_mmap_entries;
770 }
771 }
772
773 ibdev_dbg(&dev->ibdev, "Created qp[%d]\n", qp->ibqp.qp_num);
774
775 return 0;
776
777 err_remove_mmap_entries:
778 efa_qp_user_mmap_entries_remove(qp);
779 err_destroy_qp:
780 efa_destroy_qp_handle(dev, create_qp_resp.qp_handle);
781 err_free_mapped:
782 if (qp->rq_size)
783 efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
784 qp->rq_size, DMA_TO_DEVICE);
785 err_out:
786 atomic64_inc(&dev->stats.create_qp_err);
787 return err;
788 }
789
790 static const struct {
791 int valid;
792 enum ib_qp_attr_mask req_param;
793 enum ib_qp_attr_mask opt_param;
794 } srd_qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
795 [IB_QPS_RESET] = {
796 [IB_QPS_RESET] = { .valid = 1 },
797 [IB_QPS_INIT] = {
798 .valid = 1,
799 .req_param = IB_QP_PKEY_INDEX |
800 IB_QP_PORT |
801 IB_QP_QKEY,
802 },
803 },
804 [IB_QPS_INIT] = {
805 [IB_QPS_RESET] = { .valid = 1 },
806 [IB_QPS_ERR] = { .valid = 1 },
807 [IB_QPS_INIT] = {
808 .valid = 1,
809 .opt_param = IB_QP_PKEY_INDEX |
810 IB_QP_PORT |
811 IB_QP_QKEY,
812 },
813 [IB_QPS_RTR] = {
814 .valid = 1,
815 .opt_param = IB_QP_PKEY_INDEX |
816 IB_QP_QKEY,
817 },
818 },
819 [IB_QPS_RTR] = {
820 [IB_QPS_RESET] = { .valid = 1 },
821 [IB_QPS_ERR] = { .valid = 1 },
822 [IB_QPS_RTS] = {
823 .valid = 1,
824 .req_param = IB_QP_SQ_PSN,
825 .opt_param = IB_QP_CUR_STATE |
826 IB_QP_QKEY |
827 IB_QP_RNR_RETRY,
828
829 }
830 },
831 [IB_QPS_RTS] = {
832 [IB_QPS_RESET] = { .valid = 1 },
833 [IB_QPS_ERR] = { .valid = 1 },
834 [IB_QPS_RTS] = {
835 .valid = 1,
836 .opt_param = IB_QP_CUR_STATE |
837 IB_QP_QKEY,
838 },
839 [IB_QPS_SQD] = {
840 .valid = 1,
841 .opt_param = IB_QP_EN_SQD_ASYNC_NOTIFY,
842 },
843 },
844 [IB_QPS_SQD] = {
845 [IB_QPS_RESET] = { .valid = 1 },
846 [IB_QPS_ERR] = { .valid = 1 },
847 [IB_QPS_RTS] = {
848 .valid = 1,
849 .opt_param = IB_QP_CUR_STATE |
850 IB_QP_QKEY,
851 },
852 [IB_QPS_SQD] = {
853 .valid = 1,
854 .opt_param = IB_QP_PKEY_INDEX |
855 IB_QP_QKEY,
856 }
857 },
858 [IB_QPS_SQE] = {
859 [IB_QPS_RESET] = { .valid = 1 },
860 [IB_QPS_ERR] = { .valid = 1 },
861 [IB_QPS_RTS] = {
862 .valid = 1,
863 .opt_param = IB_QP_CUR_STATE |
864 IB_QP_QKEY,
865 }
866 },
867 [IB_QPS_ERR] = {
868 [IB_QPS_RESET] = { .valid = 1 },
869 [IB_QPS_ERR] = { .valid = 1 },
870 }
871 };
872
efa_modify_srd_qp_is_ok(enum ib_qp_state cur_state,enum ib_qp_state next_state,enum ib_qp_attr_mask mask)873 static bool efa_modify_srd_qp_is_ok(enum ib_qp_state cur_state,
874 enum ib_qp_state next_state,
875 enum ib_qp_attr_mask mask)
876 {
877 enum ib_qp_attr_mask req_param, opt_param;
878
879 if (mask & IB_QP_CUR_STATE &&
880 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
881 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
882 return false;
883
884 if (!srd_qp_state_table[cur_state][next_state].valid)
885 return false;
886
887 req_param = srd_qp_state_table[cur_state][next_state].req_param;
888 opt_param = srd_qp_state_table[cur_state][next_state].opt_param;
889
890 if ((mask & req_param) != req_param)
891 return false;
892
893 if (mask & ~(req_param | opt_param | IB_QP_STATE))
894 return false;
895
896 return true;
897 }
898
efa_modify_qp_validate(struct efa_dev * dev,struct efa_qp * qp,struct ib_qp_attr * qp_attr,int qp_attr_mask,enum ib_qp_state cur_state,enum ib_qp_state new_state)899 static int efa_modify_qp_validate(struct efa_dev *dev, struct efa_qp *qp,
900 struct ib_qp_attr *qp_attr, int qp_attr_mask,
901 enum ib_qp_state cur_state,
902 enum ib_qp_state new_state)
903 {
904 int err;
905
906 #define EFA_MODIFY_QP_SUPP_MASK \
907 (IB_QP_STATE | IB_QP_CUR_STATE | IB_QP_EN_SQD_ASYNC_NOTIFY | \
908 IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_QKEY | IB_QP_SQ_PSN | \
909 IB_QP_RNR_RETRY)
910
911 if (qp_attr_mask & ~EFA_MODIFY_QP_SUPP_MASK) {
912 ibdev_dbg(&dev->ibdev,
913 "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
914 qp_attr_mask, EFA_MODIFY_QP_SUPP_MASK);
915 return -EOPNOTSUPP;
916 }
917
918 if (qp->ibqp.qp_type == IB_QPT_DRIVER)
919 err = !efa_modify_srd_qp_is_ok(cur_state, new_state,
920 qp_attr_mask);
921 else
922 err = !ib_modify_qp_is_ok(cur_state, new_state, IB_QPT_UD,
923 qp_attr_mask);
924
925 if (err) {
926 ibdev_dbg(&dev->ibdev, "Invalid modify QP parameters\n");
927 return -EINVAL;
928 }
929
930 if ((qp_attr_mask & IB_QP_PORT) && qp_attr->port_num != 1) {
931 ibdev_dbg(&dev->ibdev, "Can't change port num\n");
932 return -EOPNOTSUPP;
933 }
934
935 if ((qp_attr_mask & IB_QP_PKEY_INDEX) && qp_attr->pkey_index) {
936 ibdev_dbg(&dev->ibdev, "Can't change pkey index\n");
937 return -EOPNOTSUPP;
938 }
939
940 return 0;
941 }
942
efa_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_udata * udata)943 int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
944 int qp_attr_mask, struct ib_udata *udata)
945 {
946 struct efa_dev *dev = to_edev(ibqp->device);
947 struct efa_com_modify_qp_params params = {};
948 struct efa_qp *qp = to_eqp(ibqp);
949 enum ib_qp_state cur_state;
950 enum ib_qp_state new_state;
951 int err;
952
953 if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
954 return -EOPNOTSUPP;
955
956 if (udata->inlen &&
957 !ib_is_udata_cleared(udata, 0, udata->inlen)) {
958 ibdev_dbg(&dev->ibdev,
959 "Incompatible ABI params, udata not cleared\n");
960 return -EINVAL;
961 }
962
963 cur_state = qp_attr_mask & IB_QP_CUR_STATE ? qp_attr->cur_qp_state :
964 qp->state;
965 new_state = qp_attr_mask & IB_QP_STATE ? qp_attr->qp_state : cur_state;
966
967 err = efa_modify_qp_validate(dev, qp, qp_attr, qp_attr_mask, cur_state,
968 new_state);
969 if (err)
970 return err;
971
972 params.qp_handle = qp->qp_handle;
973
974 if (qp_attr_mask & IB_QP_STATE) {
975 EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_QP_STATE,
976 1);
977 EFA_SET(¶ms.modify_mask,
978 EFA_ADMIN_MODIFY_QP_CMD_CUR_QP_STATE, 1);
979 params.cur_qp_state = cur_state;
980 params.qp_state = new_state;
981 }
982
983 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
984 EFA_SET(¶ms.modify_mask,
985 EFA_ADMIN_MODIFY_QP_CMD_SQ_DRAINED_ASYNC_NOTIFY, 1);
986 params.sq_drained_async_notify = qp_attr->en_sqd_async_notify;
987 }
988
989 if (qp_attr_mask & IB_QP_QKEY) {
990 EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_QKEY, 1);
991 params.qkey = qp_attr->qkey;
992 }
993
994 if (qp_attr_mask & IB_QP_SQ_PSN) {
995 EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_SQ_PSN, 1);
996 params.sq_psn = qp_attr->sq_psn;
997 }
998
999 if (qp_attr_mask & IB_QP_RNR_RETRY) {
1000 EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_RNR_RETRY,
1001 1);
1002 params.rnr_retry = qp_attr->rnr_retry;
1003 }
1004
1005 err = efa_com_modify_qp(&dev->edev, ¶ms);
1006 if (err)
1007 return err;
1008
1009 qp->state = new_state;
1010
1011 return 0;
1012 }
1013
efa_destroy_cq_idx(struct efa_dev * dev,int cq_idx)1014 static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx)
1015 {
1016 struct efa_com_destroy_cq_params params = { .cq_idx = cq_idx };
1017
1018 return efa_com_destroy_cq(&dev->edev, ¶ms);
1019 }
1020
efa_cq_user_mmap_entries_remove(struct efa_cq * cq)1021 static void efa_cq_user_mmap_entries_remove(struct efa_cq *cq)
1022 {
1023 rdma_user_mmap_entry_remove(cq->db_mmap_entry);
1024 rdma_user_mmap_entry_remove(cq->mmap_entry);
1025 }
1026
efa_destroy_cq(struct ib_cq * ibcq,struct ib_udata * udata)1027 int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
1028 {
1029 struct efa_dev *dev = to_edev(ibcq->device);
1030 struct efa_cq *cq = to_ecq(ibcq);
1031
1032 ibdev_dbg(&dev->ibdev,
1033 "Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n",
1034 cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr);
1035
1036 efa_destroy_cq_idx(dev, cq->cq_idx);
1037 efa_cq_user_mmap_entries_remove(cq);
1038 if (cq->eq) {
1039 xa_erase(&dev->cqs_xa, cq->cq_idx);
1040 synchronize_irq(cq->eq->irq.irqn);
1041 }
1042 efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
1043 DMA_FROM_DEVICE);
1044 return 0;
1045 }
1046
efa_vec2eq(struct efa_dev * dev,int vec)1047 static struct efa_eq *efa_vec2eq(struct efa_dev *dev, int vec)
1048 {
1049 return &dev->eqs[vec];
1050 }
1051
cq_mmap_entries_setup(struct efa_dev * dev,struct efa_cq * cq,struct efa_ibv_create_cq_resp * resp,bool db_valid)1052 static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
1053 struct efa_ibv_create_cq_resp *resp,
1054 bool db_valid)
1055 {
1056 resp->q_mmap_size = cq->size;
1057 cq->mmap_entry = efa_user_mmap_entry_insert(&cq->ucontext->ibucontext,
1058 virt_to_phys(cq->cpu_addr),
1059 cq->size, EFA_MMAP_DMA_PAGE,
1060 &resp->q_mmap_key);
1061 if (!cq->mmap_entry)
1062 return -ENOMEM;
1063
1064 if (db_valid) {
1065 cq->db_mmap_entry =
1066 efa_user_mmap_entry_insert(&cq->ucontext->ibucontext,
1067 dev->db_bar_addr + resp->db_off,
1068 PAGE_SIZE, EFA_MMAP_IO_NC,
1069 &resp->db_mmap_key);
1070 if (!cq->db_mmap_entry) {
1071 rdma_user_mmap_entry_remove(cq->mmap_entry);
1072 return -ENOMEM;
1073 }
1074
1075 resp->db_off &= ~PAGE_MASK;
1076 resp->comp_mask |= EFA_CREATE_CQ_RESP_DB_OFF;
1077 }
1078
1079 return 0;
1080 }
1081
efa_create_cq(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct uverbs_attr_bundle * attrs)1082 int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1083 struct uverbs_attr_bundle *attrs)
1084 {
1085 struct ib_udata *udata = &attrs->driver_udata;
1086 struct efa_ucontext *ucontext = rdma_udata_to_drv_context(
1087 udata, struct efa_ucontext, ibucontext);
1088 struct efa_com_create_cq_params params = {};
1089 struct efa_ibv_create_cq_resp resp = {};
1090 struct efa_com_create_cq_result result;
1091 struct ib_device *ibdev = ibcq->device;
1092 struct efa_dev *dev = to_edev(ibdev);
1093 struct efa_ibv_create_cq cmd = {};
1094 struct efa_cq *cq = to_ecq(ibcq);
1095 int entries = attr->cqe;
1096 bool set_src_addr;
1097 int err;
1098
1099 ibdev_dbg(ibdev, "create_cq entries %d\n", entries);
1100
1101 if (attr->flags)
1102 return -EOPNOTSUPP;
1103
1104 if (entries < 1 || entries > dev->dev_attr.max_cq_depth) {
1105 ibdev_dbg(ibdev,
1106 "cq: requested entries[%u] non-positive or greater than max[%u]\n",
1107 entries, dev->dev_attr.max_cq_depth);
1108 err = -EINVAL;
1109 goto err_out;
1110 }
1111
1112 if (offsetofend(typeof(cmd), num_sub_cqs) > udata->inlen) {
1113 ibdev_dbg(ibdev,
1114 "Incompatible ABI params, no input udata\n");
1115 err = -EINVAL;
1116 goto err_out;
1117 }
1118
1119 if (udata->inlen > sizeof(cmd) &&
1120 !ib_is_udata_cleared(udata, sizeof(cmd),
1121 udata->inlen - sizeof(cmd))) {
1122 ibdev_dbg(ibdev,
1123 "Incompatible ABI params, unknown fields in udata\n");
1124 err = -EINVAL;
1125 goto err_out;
1126 }
1127
1128 err = ib_copy_from_udata(&cmd, udata,
1129 min(sizeof(cmd), udata->inlen));
1130 if (err) {
1131 ibdev_dbg(ibdev, "Cannot copy udata for create_cq\n");
1132 goto err_out;
1133 }
1134
1135 if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_58)) {
1136 ibdev_dbg(ibdev,
1137 "Incompatible ABI params, unknown fields in udata\n");
1138 err = -EINVAL;
1139 goto err_out;
1140 }
1141
1142 set_src_addr = !!(cmd.flags & EFA_CREATE_CQ_WITH_SGID);
1143 if ((cmd.cq_entry_size != sizeof(struct efa_io_rx_cdesc_ex)) &&
1144 (set_src_addr ||
1145 cmd.cq_entry_size != sizeof(struct efa_io_rx_cdesc))) {
1146 ibdev_dbg(ibdev,
1147 "Invalid entry size [%u]\n", cmd.cq_entry_size);
1148 err = -EINVAL;
1149 goto err_out;
1150 }
1151
1152 if (cmd.num_sub_cqs != dev->dev_attr.sub_cqs_per_cq) {
1153 ibdev_dbg(ibdev,
1154 "Invalid number of sub cqs[%u] expected[%u]\n",
1155 cmd.num_sub_cqs, dev->dev_attr.sub_cqs_per_cq);
1156 err = -EINVAL;
1157 goto err_out;
1158 }
1159
1160 cq->ucontext = ucontext;
1161 cq->size = PAGE_ALIGN(cmd.cq_entry_size * entries * cmd.num_sub_cqs);
1162 cq->cpu_addr = efa_zalloc_mapped(dev, &cq->dma_addr, cq->size,
1163 DMA_FROM_DEVICE);
1164 if (!cq->cpu_addr) {
1165 err = -ENOMEM;
1166 goto err_out;
1167 }
1168
1169 params.uarn = cq->ucontext->uarn;
1170 params.cq_depth = entries;
1171 params.dma_addr = cq->dma_addr;
1172 params.entry_size_in_bytes = cmd.cq_entry_size;
1173 params.num_sub_cqs = cmd.num_sub_cqs;
1174 params.set_src_addr = set_src_addr;
1175 if (cmd.flags & EFA_CREATE_CQ_WITH_COMPLETION_CHANNEL) {
1176 cq->eq = efa_vec2eq(dev, attr->comp_vector);
1177 params.eqn = cq->eq->eeq.eqn;
1178 params.interrupt_mode_enabled = true;
1179 }
1180
1181 err = efa_com_create_cq(&dev->edev, ¶ms, &result);
1182 if (err)
1183 goto err_free_mapped;
1184
1185 resp.db_off = result.db_off;
1186 resp.cq_idx = result.cq_idx;
1187 cq->cq_idx = result.cq_idx;
1188 cq->ibcq.cqe = result.actual_depth;
1189 WARN_ON_ONCE(entries != result.actual_depth);
1190
1191 err = cq_mmap_entries_setup(dev, cq, &resp, result.db_valid);
1192 if (err) {
1193 ibdev_dbg(ibdev, "Could not setup cq[%u] mmap entries\n",
1194 cq->cq_idx);
1195 goto err_destroy_cq;
1196 }
1197
1198 if (cq->eq) {
1199 err = xa_err(xa_store(&dev->cqs_xa, cq->cq_idx, cq, GFP_KERNEL));
1200 if (err) {
1201 ibdev_dbg(ibdev, "Failed to store cq[%u] in xarray\n",
1202 cq->cq_idx);
1203 goto err_remove_mmap;
1204 }
1205 }
1206
1207 if (udata->outlen) {
1208 err = ib_copy_to_udata(udata, &resp,
1209 min(sizeof(resp), udata->outlen));
1210 if (err) {
1211 ibdev_dbg(ibdev,
1212 "Failed to copy udata for create_cq\n");
1213 goto err_xa_erase;
1214 }
1215 }
1216
1217 ibdev_dbg(ibdev, "Created cq[%d], cq depth[%u]. dma[%pad] virt[0x%p]\n",
1218 cq->cq_idx, result.actual_depth, &cq->dma_addr, cq->cpu_addr);
1219
1220 return 0;
1221
1222 err_xa_erase:
1223 if (cq->eq)
1224 xa_erase(&dev->cqs_xa, cq->cq_idx);
1225 err_remove_mmap:
1226 efa_cq_user_mmap_entries_remove(cq);
1227 err_destroy_cq:
1228 efa_destroy_cq_idx(dev, cq->cq_idx);
1229 err_free_mapped:
1230 efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
1231 DMA_FROM_DEVICE);
1232
1233 err_out:
1234 atomic64_inc(&dev->stats.create_cq_err);
1235 return err;
1236 }
1237
umem_to_page_list(struct efa_dev * dev,struct ib_umem * umem,u64 * page_list,u32 hp_cnt,u8 hp_shift)1238 static int umem_to_page_list(struct efa_dev *dev,
1239 struct ib_umem *umem,
1240 u64 *page_list,
1241 u32 hp_cnt,
1242 u8 hp_shift)
1243 {
1244 u32 pages_in_hp = BIT(hp_shift - PAGE_SHIFT);
1245 struct ib_block_iter biter;
1246 unsigned int hp_idx = 0;
1247
1248 ibdev_dbg(&dev->ibdev, "hp_cnt[%u], pages_in_hp[%u]\n",
1249 hp_cnt, pages_in_hp);
1250
1251 rdma_umem_for_each_dma_block(umem, &biter, BIT(hp_shift))
1252 page_list[hp_idx++] = rdma_block_iter_dma_address(&biter);
1253
1254 return 0;
1255 }
1256
efa_vmalloc_buf_to_sg(u64 * buf,int page_cnt)1257 static struct scatterlist *efa_vmalloc_buf_to_sg(u64 *buf, int page_cnt)
1258 {
1259 struct scatterlist *sglist;
1260 struct page *pg;
1261 int i;
1262
1263 sglist = kmalloc_array(page_cnt, sizeof(*sglist), GFP_KERNEL);
1264 if (!sglist)
1265 return NULL;
1266 sg_init_table(sglist, page_cnt);
1267 for (i = 0; i < page_cnt; i++) {
1268 pg = vmalloc_to_page(buf);
1269 if (!pg)
1270 goto err;
1271 sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
1272 buf += PAGE_SIZE / sizeof(*buf);
1273 }
1274 return sglist;
1275
1276 err:
1277 kfree(sglist);
1278 return NULL;
1279 }
1280
1281 /*
1282 * create a chunk list of physical pages dma addresses from the supplied
1283 * scatter gather list
1284 */
pbl_chunk_list_create(struct efa_dev * dev,struct pbl_context * pbl)1285 static int pbl_chunk_list_create(struct efa_dev *dev, struct pbl_context *pbl)
1286 {
1287 struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
1288 int page_cnt = pbl->phys.indirect.pbl_buf_size_in_pages;
1289 struct scatterlist *pages_sgl = pbl->phys.indirect.sgl;
1290 unsigned int chunk_list_size, chunk_idx, payload_idx;
1291 int sg_dma_cnt = pbl->phys.indirect.sg_dma_cnt;
1292 struct efa_com_ctrl_buff_info *ctrl_buf;
1293 u64 *cur_chunk_buf, *prev_chunk_buf;
1294 struct ib_block_iter biter;
1295 dma_addr_t dma_addr;
1296 int i;
1297
1298 /* allocate a chunk list that consists of 4KB chunks */
1299 chunk_list_size = DIV_ROUND_UP(page_cnt, EFA_PTRS_PER_CHUNK);
1300
1301 chunk_list->size = chunk_list_size;
1302 chunk_list->chunks = kcalloc(chunk_list_size,
1303 sizeof(*chunk_list->chunks),
1304 GFP_KERNEL);
1305 if (!chunk_list->chunks)
1306 return -ENOMEM;
1307
1308 ibdev_dbg(&dev->ibdev,
1309 "chunk_list_size[%u] - pages[%u]\n", chunk_list_size,
1310 page_cnt);
1311
1312 /* allocate chunk buffers: */
1313 for (i = 0; i < chunk_list_size; i++) {
1314 chunk_list->chunks[i].buf = kzalloc(EFA_CHUNK_SIZE, GFP_KERNEL);
1315 if (!chunk_list->chunks[i].buf)
1316 goto chunk_list_dealloc;
1317
1318 chunk_list->chunks[i].length = EFA_CHUNK_USED_SIZE;
1319 }
1320 chunk_list->chunks[chunk_list_size - 1].length =
1321 ((page_cnt % EFA_PTRS_PER_CHUNK) * EFA_CHUNK_PAYLOAD_PTR_SIZE) +
1322 EFA_CHUNK_PTR_SIZE;
1323
1324 /* fill the dma addresses of sg list pages to chunks: */
1325 chunk_idx = 0;
1326 payload_idx = 0;
1327 cur_chunk_buf = chunk_list->chunks[0].buf;
1328 rdma_for_each_block(pages_sgl, &biter, sg_dma_cnt,
1329 EFA_CHUNK_PAYLOAD_SIZE) {
1330 cur_chunk_buf[payload_idx++] =
1331 rdma_block_iter_dma_address(&biter);
1332
1333 if (payload_idx == EFA_PTRS_PER_CHUNK) {
1334 chunk_idx++;
1335 cur_chunk_buf = chunk_list->chunks[chunk_idx].buf;
1336 payload_idx = 0;
1337 }
1338 }
1339
1340 /* map chunks to dma and fill chunks next ptrs */
1341 for (i = chunk_list_size - 1; i >= 0; i--) {
1342 dma_addr = dma_map_single(&dev->pdev->dev,
1343 chunk_list->chunks[i].buf,
1344 chunk_list->chunks[i].length,
1345 DMA_TO_DEVICE);
1346 if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
1347 ibdev_err(&dev->ibdev,
1348 "chunk[%u] dma_map_failed\n", i);
1349 goto chunk_list_unmap;
1350 }
1351
1352 chunk_list->chunks[i].dma_addr = dma_addr;
1353 ibdev_dbg(&dev->ibdev,
1354 "chunk[%u] mapped at [%pad]\n", i, &dma_addr);
1355
1356 if (!i)
1357 break;
1358
1359 prev_chunk_buf = chunk_list->chunks[i - 1].buf;
1360
1361 ctrl_buf = (struct efa_com_ctrl_buff_info *)
1362 &prev_chunk_buf[EFA_PTRS_PER_CHUNK];
1363 ctrl_buf->length = chunk_list->chunks[i].length;
1364
1365 efa_com_set_dma_addr(dma_addr,
1366 &ctrl_buf->address.mem_addr_high,
1367 &ctrl_buf->address.mem_addr_low);
1368 }
1369
1370 return 0;
1371
1372 chunk_list_unmap:
1373 for (; i < chunk_list_size; i++) {
1374 dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
1375 chunk_list->chunks[i].length, DMA_TO_DEVICE);
1376 }
1377 chunk_list_dealloc:
1378 for (i = 0; i < chunk_list_size; i++)
1379 kfree(chunk_list->chunks[i].buf);
1380
1381 kfree(chunk_list->chunks);
1382 return -ENOMEM;
1383 }
1384
pbl_chunk_list_destroy(struct efa_dev * dev,struct pbl_context * pbl)1385 static void pbl_chunk_list_destroy(struct efa_dev *dev, struct pbl_context *pbl)
1386 {
1387 struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
1388 int i;
1389
1390 for (i = 0; i < chunk_list->size; i++) {
1391 dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
1392 chunk_list->chunks[i].length, DMA_TO_DEVICE);
1393 kfree(chunk_list->chunks[i].buf);
1394 }
1395
1396 kfree(chunk_list->chunks);
1397 }
1398
1399 /* initialize pbl continuous mode: map pbl buffer to a dma address. */
pbl_continuous_initialize(struct efa_dev * dev,struct pbl_context * pbl)1400 static int pbl_continuous_initialize(struct efa_dev *dev,
1401 struct pbl_context *pbl)
1402 {
1403 dma_addr_t dma_addr;
1404
1405 dma_addr = dma_map_single(&dev->pdev->dev, pbl->pbl_buf,
1406 pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
1407 if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
1408 ibdev_err(&dev->ibdev, "Unable to map pbl to DMA address\n");
1409 return -ENOMEM;
1410 }
1411
1412 pbl->phys.continuous.dma_addr = dma_addr;
1413 ibdev_dbg(&dev->ibdev,
1414 "pbl continuous - dma_addr = %pad, size[%u]\n",
1415 &dma_addr, pbl->pbl_buf_size_in_bytes);
1416
1417 return 0;
1418 }
1419
1420 /*
1421 * initialize pbl indirect mode:
1422 * create a chunk list out of the dma addresses of the physical pages of
1423 * pbl buffer.
1424 */
pbl_indirect_initialize(struct efa_dev * dev,struct pbl_context * pbl)1425 static int pbl_indirect_initialize(struct efa_dev *dev, struct pbl_context *pbl)
1426 {
1427 u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, EFA_CHUNK_PAYLOAD_SIZE);
1428 struct scatterlist *sgl;
1429 int sg_dma_cnt, err;
1430
1431 BUILD_BUG_ON(EFA_CHUNK_PAYLOAD_SIZE > PAGE_SIZE);
1432 sgl = efa_vmalloc_buf_to_sg(pbl->pbl_buf, size_in_pages);
1433 if (!sgl)
1434 return -ENOMEM;
1435
1436 sg_dma_cnt = dma_map_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
1437 if (!sg_dma_cnt) {
1438 err = -EINVAL;
1439 goto err_map;
1440 }
1441
1442 pbl->phys.indirect.pbl_buf_size_in_pages = size_in_pages;
1443 pbl->phys.indirect.sgl = sgl;
1444 pbl->phys.indirect.sg_dma_cnt = sg_dma_cnt;
1445 err = pbl_chunk_list_create(dev, pbl);
1446 if (err) {
1447 ibdev_dbg(&dev->ibdev,
1448 "chunk_list creation failed[%d]\n", err);
1449 goto err_chunk;
1450 }
1451
1452 ibdev_dbg(&dev->ibdev,
1453 "pbl indirect - size[%u], chunks[%u]\n",
1454 pbl->pbl_buf_size_in_bytes,
1455 pbl->phys.indirect.chunk_list.size);
1456
1457 return 0;
1458
1459 err_chunk:
1460 dma_unmap_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
1461 err_map:
1462 kfree(sgl);
1463 return err;
1464 }
1465
pbl_indirect_terminate(struct efa_dev * dev,struct pbl_context * pbl)1466 static void pbl_indirect_terminate(struct efa_dev *dev, struct pbl_context *pbl)
1467 {
1468 pbl_chunk_list_destroy(dev, pbl);
1469 dma_unmap_sg(&dev->pdev->dev, pbl->phys.indirect.sgl,
1470 pbl->phys.indirect.pbl_buf_size_in_pages, DMA_TO_DEVICE);
1471 kfree(pbl->phys.indirect.sgl);
1472 }
1473
1474 /* create a page buffer list from a mapped user memory region */
pbl_create(struct efa_dev * dev,struct pbl_context * pbl,struct ib_umem * umem,int hp_cnt,u8 hp_shift)1475 static int pbl_create(struct efa_dev *dev,
1476 struct pbl_context *pbl,
1477 struct ib_umem *umem,
1478 int hp_cnt,
1479 u8 hp_shift)
1480 {
1481 int err;
1482
1483 pbl->pbl_buf_size_in_bytes = hp_cnt * EFA_CHUNK_PAYLOAD_PTR_SIZE;
1484 pbl->pbl_buf = kvzalloc(pbl->pbl_buf_size_in_bytes, GFP_KERNEL);
1485 if (!pbl->pbl_buf)
1486 return -ENOMEM;
1487
1488 if (is_vmalloc_addr(pbl->pbl_buf)) {
1489 pbl->physically_continuous = 0;
1490 err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
1491 hp_shift);
1492 if (err)
1493 goto err_free;
1494
1495 err = pbl_indirect_initialize(dev, pbl);
1496 if (err)
1497 goto err_free;
1498 } else {
1499 pbl->physically_continuous = 1;
1500 err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
1501 hp_shift);
1502 if (err)
1503 goto err_free;
1504
1505 err = pbl_continuous_initialize(dev, pbl);
1506 if (err)
1507 goto err_free;
1508 }
1509
1510 ibdev_dbg(&dev->ibdev,
1511 "user_pbl_created: user_pages[%u], continuous[%u]\n",
1512 hp_cnt, pbl->physically_continuous);
1513
1514 return 0;
1515
1516 err_free:
1517 kvfree(pbl->pbl_buf);
1518 return err;
1519 }
1520
pbl_destroy(struct efa_dev * dev,struct pbl_context * pbl)1521 static void pbl_destroy(struct efa_dev *dev, struct pbl_context *pbl)
1522 {
1523 if (pbl->physically_continuous)
1524 dma_unmap_single(&dev->pdev->dev, pbl->phys.continuous.dma_addr,
1525 pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
1526 else
1527 pbl_indirect_terminate(dev, pbl);
1528
1529 kvfree(pbl->pbl_buf);
1530 }
1531
efa_create_inline_pbl(struct efa_dev * dev,struct efa_mr * mr,struct efa_com_reg_mr_params * params)1532 static int efa_create_inline_pbl(struct efa_dev *dev, struct efa_mr *mr,
1533 struct efa_com_reg_mr_params *params)
1534 {
1535 int err;
1536
1537 params->inline_pbl = 1;
1538 err = umem_to_page_list(dev, mr->umem, params->pbl.inline_pbl_array,
1539 params->page_num, params->page_shift);
1540 if (err)
1541 return err;
1542
1543 ibdev_dbg(&dev->ibdev,
1544 "inline_pbl_array - pages[%u]\n", params->page_num);
1545
1546 return 0;
1547 }
1548
efa_create_pbl(struct efa_dev * dev,struct pbl_context * pbl,struct efa_mr * mr,struct efa_com_reg_mr_params * params)1549 static int efa_create_pbl(struct efa_dev *dev,
1550 struct pbl_context *pbl,
1551 struct efa_mr *mr,
1552 struct efa_com_reg_mr_params *params)
1553 {
1554 int err;
1555
1556 err = pbl_create(dev, pbl, mr->umem, params->page_num,
1557 params->page_shift);
1558 if (err) {
1559 ibdev_dbg(&dev->ibdev, "Failed to create pbl[%d]\n", err);
1560 return err;
1561 }
1562
1563 params->inline_pbl = 0;
1564 params->indirect = !pbl->physically_continuous;
1565 if (pbl->physically_continuous) {
1566 params->pbl.pbl.length = pbl->pbl_buf_size_in_bytes;
1567
1568 efa_com_set_dma_addr(pbl->phys.continuous.dma_addr,
1569 ¶ms->pbl.pbl.address.mem_addr_high,
1570 ¶ms->pbl.pbl.address.mem_addr_low);
1571 } else {
1572 params->pbl.pbl.length =
1573 pbl->phys.indirect.chunk_list.chunks[0].length;
1574
1575 efa_com_set_dma_addr(pbl->phys.indirect.chunk_list.chunks[0].dma_addr,
1576 ¶ms->pbl.pbl.address.mem_addr_high,
1577 ¶ms->pbl.pbl.address.mem_addr_low);
1578 }
1579
1580 return 0;
1581 }
1582
efa_alloc_mr(struct ib_pd * ibpd,int access_flags,struct ib_udata * udata)1583 static struct efa_mr *efa_alloc_mr(struct ib_pd *ibpd, int access_flags,
1584 struct ib_udata *udata)
1585 {
1586 struct efa_dev *dev = to_edev(ibpd->device);
1587 int supp_access_flags;
1588 struct efa_mr *mr;
1589
1590 if (udata && udata->inlen &&
1591 !ib_is_udata_cleared(udata, 0, sizeof(udata->inlen))) {
1592 ibdev_dbg(&dev->ibdev,
1593 "Incompatible ABI params, udata not cleared\n");
1594 return ERR_PTR(-EINVAL);
1595 }
1596
1597 supp_access_flags =
1598 IB_ACCESS_LOCAL_WRITE |
1599 (EFA_DEV_CAP(dev, RDMA_READ) ? IB_ACCESS_REMOTE_READ : 0) |
1600 (EFA_DEV_CAP(dev, RDMA_WRITE) ? IB_ACCESS_REMOTE_WRITE : 0);
1601
1602 access_flags &= ~IB_ACCESS_OPTIONAL;
1603 if (access_flags & ~supp_access_flags) {
1604 ibdev_dbg(&dev->ibdev,
1605 "Unsupported access flags[%#x], supported[%#x]\n",
1606 access_flags, supp_access_flags);
1607 return ERR_PTR(-EOPNOTSUPP);
1608 }
1609
1610 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1611 if (!mr)
1612 return ERR_PTR(-ENOMEM);
1613
1614 return mr;
1615 }
1616
efa_register_mr(struct ib_pd * ibpd,struct efa_mr * mr,u64 start,u64 length,u64 virt_addr,int access_flags)1617 static int efa_register_mr(struct ib_pd *ibpd, struct efa_mr *mr, u64 start,
1618 u64 length, u64 virt_addr, int access_flags)
1619 {
1620 struct efa_dev *dev = to_edev(ibpd->device);
1621 struct efa_com_reg_mr_params params = {};
1622 struct efa_com_reg_mr_result result = {};
1623 struct pbl_context pbl;
1624 unsigned int pg_sz;
1625 int inline_size;
1626 int err;
1627
1628 params.pd = to_epd(ibpd)->pdn;
1629 params.iova = virt_addr;
1630 params.mr_length_in_bytes = length;
1631 params.permissions = access_flags;
1632
1633 pg_sz = ib_umem_find_best_pgsz(mr->umem,
1634 dev->dev_attr.page_size_cap,
1635 virt_addr);
1636 if (!pg_sz) {
1637 ibdev_dbg(&dev->ibdev, "Failed to find a suitable page size in page_size_cap %#llx\n",
1638 dev->dev_attr.page_size_cap);
1639 return -EOPNOTSUPP;
1640 }
1641
1642 params.page_shift = order_base_2(pg_sz);
1643 params.page_num = ib_umem_num_dma_blocks(mr->umem, pg_sz);
1644
1645 ibdev_dbg(&dev->ibdev,
1646 "start %#llx length %#llx params.page_shift %u params.page_num %u\n",
1647 start, length, params.page_shift, params.page_num);
1648
1649 inline_size = ARRAY_SIZE(params.pbl.inline_pbl_array);
1650 if (params.page_num <= inline_size) {
1651 err = efa_create_inline_pbl(dev, mr, ¶ms);
1652 if (err)
1653 return err;
1654
1655 err = efa_com_register_mr(&dev->edev, ¶ms, &result);
1656 if (err)
1657 return err;
1658 } else {
1659 err = efa_create_pbl(dev, &pbl, mr, ¶ms);
1660 if (err)
1661 return err;
1662
1663 err = efa_com_register_mr(&dev->edev, ¶ms, &result);
1664 pbl_destroy(dev, &pbl);
1665
1666 if (err)
1667 return err;
1668 }
1669
1670 mr->ibmr.lkey = result.l_key;
1671 mr->ibmr.rkey = result.r_key;
1672 mr->ibmr.length = length;
1673 mr->ic_info.recv_ic_id = result.ic_info.recv_ic_id;
1674 mr->ic_info.rdma_read_ic_id = result.ic_info.rdma_read_ic_id;
1675 mr->ic_info.rdma_recv_ic_id = result.ic_info.rdma_recv_ic_id;
1676 mr->ic_info.recv_ic_id_valid = result.ic_info.recv_ic_id_valid;
1677 mr->ic_info.rdma_read_ic_id_valid = result.ic_info.rdma_read_ic_id_valid;
1678 mr->ic_info.rdma_recv_ic_id_valid = result.ic_info.rdma_recv_ic_id_valid;
1679 ibdev_dbg(&dev->ibdev, "Registered mr[%d]\n", mr->ibmr.lkey);
1680
1681 return 0;
1682 }
1683
efa_reg_user_mr_dmabuf(struct ib_pd * ibpd,u64 start,u64 length,u64 virt_addr,int fd,int access_flags,struct uverbs_attr_bundle * attrs)1684 struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start,
1685 u64 length, u64 virt_addr,
1686 int fd, int access_flags,
1687 struct uverbs_attr_bundle *attrs)
1688 {
1689 struct efa_dev *dev = to_edev(ibpd->device);
1690 struct ib_umem_dmabuf *umem_dmabuf;
1691 struct efa_mr *mr;
1692 int err;
1693
1694 mr = efa_alloc_mr(ibpd, access_flags, &attrs->driver_udata);
1695 if (IS_ERR(mr)) {
1696 err = PTR_ERR(mr);
1697 goto err_out;
1698 }
1699
1700 umem_dmabuf = ib_umem_dmabuf_get_pinned(ibpd->device, start, length, fd,
1701 access_flags);
1702 if (IS_ERR(umem_dmabuf)) {
1703 err = PTR_ERR(umem_dmabuf);
1704 ibdev_dbg(&dev->ibdev, "Failed to get dmabuf umem[%d]\n", err);
1705 goto err_free;
1706 }
1707
1708 mr->umem = &umem_dmabuf->umem;
1709 err = efa_register_mr(ibpd, mr, start, length, virt_addr, access_flags);
1710 if (err)
1711 goto err_release;
1712
1713 return &mr->ibmr;
1714
1715 err_release:
1716 ib_umem_release(mr->umem);
1717 err_free:
1718 kfree(mr);
1719 err_out:
1720 atomic64_inc(&dev->stats.reg_mr_err);
1721 return ERR_PTR(err);
1722 }
1723
efa_reg_mr(struct ib_pd * ibpd,u64 start,u64 length,u64 virt_addr,int access_flags,struct ib_udata * udata)1724 struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
1725 u64 virt_addr, int access_flags,
1726 struct ib_udata *udata)
1727 {
1728 struct efa_dev *dev = to_edev(ibpd->device);
1729 struct efa_mr *mr;
1730 int err;
1731
1732 mr = efa_alloc_mr(ibpd, access_flags, udata);
1733 if (IS_ERR(mr)) {
1734 err = PTR_ERR(mr);
1735 goto err_out;
1736 }
1737
1738 mr->umem = ib_umem_get(ibpd->device, start, length, access_flags);
1739 if (IS_ERR(mr->umem)) {
1740 err = PTR_ERR(mr->umem);
1741 ibdev_dbg(&dev->ibdev,
1742 "Failed to pin and map user space memory[%d]\n", err);
1743 goto err_free;
1744 }
1745
1746 err = efa_register_mr(ibpd, mr, start, length, virt_addr, access_flags);
1747 if (err)
1748 goto err_release;
1749
1750 return &mr->ibmr;
1751
1752 err_release:
1753 ib_umem_release(mr->umem);
1754 err_free:
1755 kfree(mr);
1756 err_out:
1757 atomic64_inc(&dev->stats.reg_mr_err);
1758 return ERR_PTR(err);
1759 }
1760
UVERBS_HANDLER(EFA_IB_METHOD_MR_QUERY)1761 static int UVERBS_HANDLER(EFA_IB_METHOD_MR_QUERY)(struct uverbs_attr_bundle *attrs)
1762 {
1763 struct ib_mr *ibmr = uverbs_attr_get_obj(attrs, EFA_IB_ATTR_QUERY_MR_HANDLE);
1764 struct efa_mr *mr = to_emr(ibmr);
1765 u16 ic_id_validity = 0;
1766 int ret;
1767
1768 ret = uverbs_copy_to(attrs, EFA_IB_ATTR_QUERY_MR_RESP_RECV_IC_ID,
1769 &mr->ic_info.recv_ic_id, sizeof(mr->ic_info.recv_ic_id));
1770 if (ret)
1771 return ret;
1772
1773 ret = uverbs_copy_to(attrs, EFA_IB_ATTR_QUERY_MR_RESP_RDMA_READ_IC_ID,
1774 &mr->ic_info.rdma_read_ic_id, sizeof(mr->ic_info.rdma_read_ic_id));
1775 if (ret)
1776 return ret;
1777
1778 ret = uverbs_copy_to(attrs, EFA_IB_ATTR_QUERY_MR_RESP_RDMA_RECV_IC_ID,
1779 &mr->ic_info.rdma_recv_ic_id, sizeof(mr->ic_info.rdma_recv_ic_id));
1780 if (ret)
1781 return ret;
1782
1783 if (mr->ic_info.recv_ic_id_valid)
1784 ic_id_validity |= EFA_QUERY_MR_VALIDITY_RECV_IC_ID;
1785 if (mr->ic_info.rdma_read_ic_id_valid)
1786 ic_id_validity |= EFA_QUERY_MR_VALIDITY_RDMA_READ_IC_ID;
1787 if (mr->ic_info.rdma_recv_ic_id_valid)
1788 ic_id_validity |= EFA_QUERY_MR_VALIDITY_RDMA_RECV_IC_ID;
1789
1790 return uverbs_copy_to(attrs, EFA_IB_ATTR_QUERY_MR_RESP_IC_ID_VALIDITY,
1791 &ic_id_validity, sizeof(ic_id_validity));
1792 }
1793
efa_dereg_mr(struct ib_mr * ibmr,struct ib_udata * udata)1794 int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1795 {
1796 struct efa_dev *dev = to_edev(ibmr->device);
1797 struct efa_com_dereg_mr_params params;
1798 struct efa_mr *mr = to_emr(ibmr);
1799 int err;
1800
1801 ibdev_dbg(&dev->ibdev, "Deregister mr[%d]\n", ibmr->lkey);
1802
1803 params.l_key = mr->ibmr.lkey;
1804 err = efa_com_dereg_mr(&dev->edev, ¶ms);
1805 if (err)
1806 return err;
1807
1808 ib_umem_release(mr->umem);
1809 kfree(mr);
1810
1811 return 0;
1812 }
1813
efa_get_port_immutable(struct ib_device * ibdev,u32 port_num,struct ib_port_immutable * immutable)1814 int efa_get_port_immutable(struct ib_device *ibdev, u32 port_num,
1815 struct ib_port_immutable *immutable)
1816 {
1817 struct ib_port_attr attr;
1818 int err;
1819
1820 err = ib_query_port(ibdev, port_num, &attr);
1821 if (err) {
1822 ibdev_dbg(ibdev, "Couldn't query port err[%d]\n", err);
1823 return err;
1824 }
1825
1826 immutable->pkey_tbl_len = attr.pkey_tbl_len;
1827 immutable->gid_tbl_len = attr.gid_tbl_len;
1828
1829 return 0;
1830 }
1831
efa_dealloc_uar(struct efa_dev * dev,u16 uarn)1832 static int efa_dealloc_uar(struct efa_dev *dev, u16 uarn)
1833 {
1834 struct efa_com_dealloc_uar_params params = {
1835 .uarn = uarn,
1836 };
1837
1838 return efa_com_dealloc_uar(&dev->edev, ¶ms);
1839 }
1840
1841 #define EFA_CHECK_USER_COMP(_dev, _comp_mask, _attr, _mask, _attr_str) \
1842 (_attr_str = (!(_dev)->dev_attr._attr || ((_comp_mask) & (_mask))) ? \
1843 NULL : #_attr)
1844
efa_user_comp_handshake(const struct ib_ucontext * ibucontext,const struct efa_ibv_alloc_ucontext_cmd * cmd)1845 static int efa_user_comp_handshake(const struct ib_ucontext *ibucontext,
1846 const struct efa_ibv_alloc_ucontext_cmd *cmd)
1847 {
1848 struct efa_dev *dev = to_edev(ibucontext->device);
1849 char *attr_str;
1850
1851 if (EFA_CHECK_USER_COMP(dev, cmd->comp_mask, max_tx_batch,
1852 EFA_ALLOC_UCONTEXT_CMD_COMP_TX_BATCH, attr_str))
1853 goto err;
1854
1855 if (EFA_CHECK_USER_COMP(dev, cmd->comp_mask, min_sq_depth,
1856 EFA_ALLOC_UCONTEXT_CMD_COMP_MIN_SQ_WR,
1857 attr_str))
1858 goto err;
1859
1860 return 0;
1861
1862 err:
1863 ibdev_dbg(&dev->ibdev, "Userspace handshake failed for %s attribute\n",
1864 attr_str);
1865 return -EOPNOTSUPP;
1866 }
1867
efa_alloc_ucontext(struct ib_ucontext * ibucontext,struct ib_udata * udata)1868 int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata)
1869 {
1870 struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1871 struct efa_dev *dev = to_edev(ibucontext->device);
1872 struct efa_ibv_alloc_ucontext_resp resp = {};
1873 struct efa_ibv_alloc_ucontext_cmd cmd = {};
1874 struct efa_com_alloc_uar_result result;
1875 int err;
1876
1877 /*
1878 * it's fine if the driver does not know all request fields,
1879 * we will ack input fields in our response.
1880 */
1881
1882 err = ib_copy_from_udata(&cmd, udata,
1883 min(sizeof(cmd), udata->inlen));
1884 if (err) {
1885 ibdev_dbg(&dev->ibdev,
1886 "Cannot copy udata for alloc_ucontext\n");
1887 goto err_out;
1888 }
1889
1890 err = efa_user_comp_handshake(ibucontext, &cmd);
1891 if (err)
1892 goto err_out;
1893
1894 err = efa_com_alloc_uar(&dev->edev, &result);
1895 if (err)
1896 goto err_out;
1897
1898 ucontext->uarn = result.uarn;
1899
1900 resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_QUERY_DEVICE;
1901 resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_CREATE_AH;
1902 resp.sub_cqs_per_cq = dev->dev_attr.sub_cqs_per_cq;
1903 resp.inline_buf_size = dev->dev_attr.inline_buf_size;
1904 resp.max_llq_size = dev->dev_attr.max_llq_size;
1905 resp.max_tx_batch = dev->dev_attr.max_tx_batch;
1906 resp.min_sq_wr = dev->dev_attr.min_sq_depth;
1907
1908 err = ib_copy_to_udata(udata, &resp,
1909 min(sizeof(resp), udata->outlen));
1910 if (err)
1911 goto err_dealloc_uar;
1912
1913 return 0;
1914
1915 err_dealloc_uar:
1916 efa_dealloc_uar(dev, result.uarn);
1917 err_out:
1918 atomic64_inc(&dev->stats.alloc_ucontext_err);
1919 return err;
1920 }
1921
efa_dealloc_ucontext(struct ib_ucontext * ibucontext)1922 void efa_dealloc_ucontext(struct ib_ucontext *ibucontext)
1923 {
1924 struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1925 struct efa_dev *dev = to_edev(ibucontext->device);
1926
1927 efa_dealloc_uar(dev, ucontext->uarn);
1928 }
1929
efa_mmap_free(struct rdma_user_mmap_entry * rdma_entry)1930 void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
1931 {
1932 struct efa_user_mmap_entry *entry = to_emmap(rdma_entry);
1933
1934 kfree(entry);
1935 }
1936
__efa_mmap(struct efa_dev * dev,struct efa_ucontext * ucontext,struct vm_area_struct * vma)1937 static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
1938 struct vm_area_struct *vma)
1939 {
1940 struct rdma_user_mmap_entry *rdma_entry;
1941 struct efa_user_mmap_entry *entry;
1942 unsigned long va;
1943 int err = 0;
1944 u64 pfn;
1945
1946 rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
1947 if (!rdma_entry) {
1948 ibdev_dbg(&dev->ibdev,
1949 "pgoff[%#lx] does not have valid entry\n",
1950 vma->vm_pgoff);
1951 atomic64_inc(&dev->stats.mmap_err);
1952 return -EINVAL;
1953 }
1954 entry = to_emmap(rdma_entry);
1955
1956 ibdev_dbg(&dev->ibdev,
1957 "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
1958 entry->address, rdma_entry->npages * PAGE_SIZE,
1959 entry->mmap_flag);
1960
1961 pfn = entry->address >> PAGE_SHIFT;
1962 switch (entry->mmap_flag) {
1963 case EFA_MMAP_IO_NC:
1964 err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
1965 entry->rdma_entry.npages * PAGE_SIZE,
1966 pgprot_noncached(vma->vm_page_prot),
1967 rdma_entry);
1968 break;
1969 case EFA_MMAP_IO_WC:
1970 err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
1971 entry->rdma_entry.npages * PAGE_SIZE,
1972 pgprot_writecombine(vma->vm_page_prot),
1973 rdma_entry);
1974 break;
1975 case EFA_MMAP_DMA_PAGE:
1976 for (va = vma->vm_start; va < vma->vm_end;
1977 va += PAGE_SIZE, pfn++) {
1978 err = vm_insert_page(vma, va, pfn_to_page(pfn));
1979 if (err)
1980 break;
1981 }
1982 break;
1983 default:
1984 err = -EINVAL;
1985 }
1986
1987 if (err) {
1988 ibdev_dbg(
1989 &dev->ibdev,
1990 "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
1991 entry->address, rdma_entry->npages * PAGE_SIZE,
1992 entry->mmap_flag, err);
1993 atomic64_inc(&dev->stats.mmap_err);
1994 }
1995
1996 rdma_user_mmap_entry_put(rdma_entry);
1997 return err;
1998 }
1999
efa_mmap(struct ib_ucontext * ibucontext,struct vm_area_struct * vma)2000 int efa_mmap(struct ib_ucontext *ibucontext,
2001 struct vm_area_struct *vma)
2002 {
2003 struct efa_ucontext *ucontext = to_eucontext(ibucontext);
2004 struct efa_dev *dev = to_edev(ibucontext->device);
2005 size_t length = vma->vm_end - vma->vm_start;
2006
2007 ibdev_dbg(&dev->ibdev,
2008 "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
2009 vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
2010
2011 return __efa_mmap(dev, ucontext, vma);
2012 }
2013
efa_ah_destroy(struct efa_dev * dev,struct efa_ah * ah)2014 static int efa_ah_destroy(struct efa_dev *dev, struct efa_ah *ah)
2015 {
2016 struct efa_com_destroy_ah_params params = {
2017 .ah = ah->ah,
2018 .pdn = to_epd(ah->ibah.pd)->pdn,
2019 };
2020
2021 return efa_com_destroy_ah(&dev->edev, ¶ms);
2022 }
2023
efa_create_ah(struct ib_ah * ibah,struct rdma_ah_init_attr * init_attr,struct ib_udata * udata)2024 int efa_create_ah(struct ib_ah *ibah,
2025 struct rdma_ah_init_attr *init_attr,
2026 struct ib_udata *udata)
2027 {
2028 struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
2029 struct efa_dev *dev = to_edev(ibah->device);
2030 struct efa_com_create_ah_params params = {};
2031 struct efa_ibv_create_ah_resp resp = {};
2032 struct efa_com_create_ah_result result;
2033 struct efa_ah *ah = to_eah(ibah);
2034 int err;
2035
2036 if (!(init_attr->flags & RDMA_CREATE_AH_SLEEPABLE)) {
2037 ibdev_dbg(&dev->ibdev,
2038 "Create address handle is not supported in atomic context\n");
2039 err = -EOPNOTSUPP;
2040 goto err_out;
2041 }
2042
2043 if (udata->inlen &&
2044 !ib_is_udata_cleared(udata, 0, udata->inlen)) {
2045 ibdev_dbg(&dev->ibdev, "Incompatible ABI params\n");
2046 err = -EINVAL;
2047 goto err_out;
2048 }
2049
2050 memcpy(params.dest_addr, ah_attr->grh.dgid.raw,
2051 sizeof(params.dest_addr));
2052 params.pdn = to_epd(ibah->pd)->pdn;
2053 err = efa_com_create_ah(&dev->edev, ¶ms, &result);
2054 if (err)
2055 goto err_out;
2056
2057 memcpy(ah->id, ah_attr->grh.dgid.raw, sizeof(ah->id));
2058 ah->ah = result.ah;
2059
2060 resp.efa_address_handle = result.ah;
2061
2062 if (udata->outlen) {
2063 err = ib_copy_to_udata(udata, &resp,
2064 min(sizeof(resp), udata->outlen));
2065 if (err) {
2066 ibdev_dbg(&dev->ibdev,
2067 "Failed to copy udata for create_ah response\n");
2068 goto err_destroy_ah;
2069 }
2070 }
2071 ibdev_dbg(&dev->ibdev, "Created ah[%d]\n", ah->ah);
2072
2073 return 0;
2074
2075 err_destroy_ah:
2076 efa_ah_destroy(dev, ah);
2077 err_out:
2078 atomic64_inc(&dev->stats.create_ah_err);
2079 return err;
2080 }
2081
efa_destroy_ah(struct ib_ah * ibah,u32 flags)2082 int efa_destroy_ah(struct ib_ah *ibah, u32 flags)
2083 {
2084 struct efa_dev *dev = to_edev(ibah->pd->device);
2085 struct efa_ah *ah = to_eah(ibah);
2086
2087 ibdev_dbg(&dev->ibdev, "Destroy ah[%d]\n", ah->ah);
2088
2089 if (!(flags & RDMA_DESTROY_AH_SLEEPABLE)) {
2090 ibdev_dbg(&dev->ibdev,
2091 "Destroy address handle is not supported in atomic context\n");
2092 return -EOPNOTSUPP;
2093 }
2094
2095 efa_ah_destroy(dev, ah);
2096 return 0;
2097 }
2098
efa_alloc_hw_port_stats(struct ib_device * ibdev,u32 port_num)2099 struct rdma_hw_stats *efa_alloc_hw_port_stats(struct ib_device *ibdev,
2100 u32 port_num)
2101 {
2102 return rdma_alloc_hw_stats_struct(efa_port_stats_descs,
2103 ARRAY_SIZE(efa_port_stats_descs),
2104 RDMA_HW_STATS_DEFAULT_LIFESPAN);
2105 }
2106
efa_alloc_hw_device_stats(struct ib_device * ibdev)2107 struct rdma_hw_stats *efa_alloc_hw_device_stats(struct ib_device *ibdev)
2108 {
2109 return rdma_alloc_hw_stats_struct(efa_device_stats_descs,
2110 ARRAY_SIZE(efa_device_stats_descs),
2111 RDMA_HW_STATS_DEFAULT_LIFESPAN);
2112 }
2113
efa_fill_device_stats(struct efa_dev * dev,struct rdma_hw_stats * stats)2114 static int efa_fill_device_stats(struct efa_dev *dev,
2115 struct rdma_hw_stats *stats)
2116 {
2117 struct efa_com_stats_admin *as = &dev->edev.aq.stats;
2118 struct efa_stats *s = &dev->stats;
2119
2120 stats->value[EFA_SUBMITTED_CMDS] = atomic64_read(&as->submitted_cmd);
2121 stats->value[EFA_COMPLETED_CMDS] = atomic64_read(&as->completed_cmd);
2122 stats->value[EFA_CMDS_ERR] = atomic64_read(&as->cmd_err);
2123 stats->value[EFA_NO_COMPLETION_CMDS] = atomic64_read(&as->no_completion);
2124
2125 stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd);
2126 stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->alloc_pd_err);
2127 stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->create_qp_err);
2128 stats->value[EFA_CREATE_CQ_ERR] = atomic64_read(&s->create_cq_err);
2129 stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->reg_mr_err);
2130 stats->value[EFA_ALLOC_UCONTEXT_ERR] =
2131 atomic64_read(&s->alloc_ucontext_err);
2132 stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->create_ah_err);
2133 stats->value[EFA_MMAP_ERR] = atomic64_read(&s->mmap_err);
2134
2135 return ARRAY_SIZE(efa_device_stats_descs);
2136 }
2137
efa_fill_port_stats(struct efa_dev * dev,struct rdma_hw_stats * stats,u32 port_num)2138 static int efa_fill_port_stats(struct efa_dev *dev, struct rdma_hw_stats *stats,
2139 u32 port_num)
2140 {
2141 struct efa_com_get_stats_params params = {};
2142 union efa_com_get_stats_result result;
2143 struct efa_com_rdma_write_stats *rws;
2144 struct efa_com_rdma_read_stats *rrs;
2145 struct efa_com_messages_stats *ms;
2146 struct efa_com_basic_stats *bs;
2147 int err;
2148
2149 params.scope = EFA_ADMIN_GET_STATS_SCOPE_ALL;
2150 params.type = EFA_ADMIN_GET_STATS_TYPE_BASIC;
2151
2152 err = efa_com_get_stats(&dev->edev, ¶ms, &result);
2153 if (err)
2154 return err;
2155
2156 bs = &result.basic_stats;
2157 stats->value[EFA_TX_BYTES] = bs->tx_bytes;
2158 stats->value[EFA_TX_PKTS] = bs->tx_pkts;
2159 stats->value[EFA_RX_BYTES] = bs->rx_bytes;
2160 stats->value[EFA_RX_PKTS] = bs->rx_pkts;
2161 stats->value[EFA_RX_DROPS] = bs->rx_drops;
2162
2163 params.type = EFA_ADMIN_GET_STATS_TYPE_MESSAGES;
2164 err = efa_com_get_stats(&dev->edev, ¶ms, &result);
2165 if (err)
2166 return err;
2167
2168 ms = &result.messages_stats;
2169 stats->value[EFA_SEND_BYTES] = ms->send_bytes;
2170 stats->value[EFA_SEND_WRS] = ms->send_wrs;
2171 stats->value[EFA_RECV_BYTES] = ms->recv_bytes;
2172 stats->value[EFA_RECV_WRS] = ms->recv_wrs;
2173
2174 params.type = EFA_ADMIN_GET_STATS_TYPE_RDMA_READ;
2175 err = efa_com_get_stats(&dev->edev, ¶ms, &result);
2176 if (err)
2177 return err;
2178
2179 rrs = &result.rdma_read_stats;
2180 stats->value[EFA_RDMA_READ_WRS] = rrs->read_wrs;
2181 stats->value[EFA_RDMA_READ_BYTES] = rrs->read_bytes;
2182 stats->value[EFA_RDMA_READ_WR_ERR] = rrs->read_wr_err;
2183 stats->value[EFA_RDMA_READ_RESP_BYTES] = rrs->read_resp_bytes;
2184
2185 if (EFA_DEV_CAP(dev, RDMA_WRITE)) {
2186 params.type = EFA_ADMIN_GET_STATS_TYPE_RDMA_WRITE;
2187 err = efa_com_get_stats(&dev->edev, ¶ms, &result);
2188 if (err)
2189 return err;
2190
2191 rws = &result.rdma_write_stats;
2192 stats->value[EFA_RDMA_WRITE_WRS] = rws->write_wrs;
2193 stats->value[EFA_RDMA_WRITE_BYTES] = rws->write_bytes;
2194 stats->value[EFA_RDMA_WRITE_WR_ERR] = rws->write_wr_err;
2195 stats->value[EFA_RDMA_WRITE_RECV_BYTES] = rws->write_recv_bytes;
2196 }
2197
2198 return ARRAY_SIZE(efa_port_stats_descs);
2199 }
2200
efa_get_hw_stats(struct ib_device * ibdev,struct rdma_hw_stats * stats,u32 port_num,int index)2201 int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
2202 u32 port_num, int index)
2203 {
2204 if (port_num)
2205 return efa_fill_port_stats(to_edev(ibdev), stats, port_num);
2206 else
2207 return efa_fill_device_stats(to_edev(ibdev), stats);
2208 }
2209
efa_port_link_layer(struct ib_device * ibdev,u32 port_num)2210 enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
2211 u32 port_num)
2212 {
2213 return IB_LINK_LAYER_UNSPECIFIED;
2214 }
2215
2216 DECLARE_UVERBS_NAMED_METHOD(EFA_IB_METHOD_MR_QUERY,
2217 UVERBS_ATTR_IDR(EFA_IB_ATTR_QUERY_MR_HANDLE,
2218 UVERBS_OBJECT_MR,
2219 UVERBS_ACCESS_READ,
2220 UA_MANDATORY),
2221 UVERBS_ATTR_PTR_OUT(EFA_IB_ATTR_QUERY_MR_RESP_IC_ID_VALIDITY,
2222 UVERBS_ATTR_TYPE(u16),
2223 UA_MANDATORY),
2224 UVERBS_ATTR_PTR_OUT(EFA_IB_ATTR_QUERY_MR_RESP_RECV_IC_ID,
2225 UVERBS_ATTR_TYPE(u16),
2226 UA_MANDATORY),
2227 UVERBS_ATTR_PTR_OUT(EFA_IB_ATTR_QUERY_MR_RESP_RDMA_READ_IC_ID,
2228 UVERBS_ATTR_TYPE(u16),
2229 UA_MANDATORY),
2230 UVERBS_ATTR_PTR_OUT(EFA_IB_ATTR_QUERY_MR_RESP_RDMA_RECV_IC_ID,
2231 UVERBS_ATTR_TYPE(u16),
2232 UA_MANDATORY));
2233
2234 ADD_UVERBS_METHODS(efa_mr,
2235 UVERBS_OBJECT_MR,
2236 &UVERBS_METHOD(EFA_IB_METHOD_MR_QUERY));
2237
2238 const struct uapi_definition efa_uapi_defs[] = {
2239 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_MR,
2240 &efa_mr),
2241 {},
2242 };
2243