1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright 2018-2026 Amazon.com, Inc. or its affiliates. All rights reserved.
4 */
5
6 #include <linux/dma-buf.h>
7 #include <linux/dma-resv.h>
8 #include <linux/vmalloc.h>
9 #include <linux/log2.h>
10
11 #include <rdma/ib_addr.h>
12 #include <rdma/ib_user_verbs.h>
13 #include <rdma/ib_verbs.h>
14 #include <rdma/iter.h>
15 #include <rdma/uverbs_ioctl.h>
16 #define UVERBS_MODULE_NAME efa_ib
17 #include <rdma/uverbs_named_ioctl.h>
18 #include <rdma/ib_user_ioctl_cmds.h>
19
20 #include "efa.h"
21 #include "efa_io_defs.h"
22
23 enum {
24 EFA_MMAP_DMA_PAGE = 0,
25 EFA_MMAP_IO_WC,
26 EFA_MMAP_IO_NC,
27 };
28
29 struct efa_user_mmap_entry {
30 struct rdma_user_mmap_entry rdma_entry;
31 u64 address;
32 u8 mmap_flag;
33 };
34
35 #define EFA_DEFINE_DEVICE_STATS(op) \
36 op(EFA_SUBMITTED_CMDS, "submitted_cmds") \
37 op(EFA_COMPLETED_CMDS, "completed_cmds") \
38 op(EFA_CMDS_ERR, "cmds_err") \
39 op(EFA_NO_COMPLETION_CMDS, "no_completion_cmds") \
40 op(EFA_KEEP_ALIVE_RCVD, "keep_alive_rcvd") \
41 op(EFA_ALLOC_PD_ERR, "alloc_pd_err") \
42 op(EFA_CREATE_QP_ERR, "create_qp_err") \
43 op(EFA_CREATE_CQ_ERR, "create_cq_err") \
44 op(EFA_REG_MR_ERR, "reg_mr_err") \
45 op(EFA_ALLOC_UCONTEXT_ERR, "alloc_ucontext_err") \
46 op(EFA_CREATE_AH_ERR, "create_ah_err") \
47 op(EFA_MMAP_ERR, "mmap_err")
48
49 #define EFA_DEFINE_PORT_STATS(op) \
50 op(EFA_TX_BYTES, "tx_bytes") \
51 op(EFA_TX_PKTS, "tx_pkts") \
52 op(EFA_RX_BYTES, "rx_bytes") \
53 op(EFA_RX_PKTS, "rx_pkts") \
54 op(EFA_RX_DROPS, "rx_drops") \
55 op(EFA_SEND_BYTES, "send_bytes") \
56 op(EFA_SEND_WRS, "send_wrs") \
57 op(EFA_RECV_BYTES, "recv_bytes") \
58 op(EFA_RECV_WRS, "recv_wrs") \
59 op(EFA_RDMA_READ_WRS, "rdma_read_wrs") \
60 op(EFA_RDMA_READ_BYTES, "rdma_read_bytes") \
61 op(EFA_RDMA_READ_WR_ERR, "rdma_read_wr_err") \
62 op(EFA_RDMA_READ_RESP_BYTES, "rdma_read_resp_bytes") \
63 op(EFA_RDMA_WRITE_WRS, "rdma_write_wrs") \
64 op(EFA_RDMA_WRITE_BYTES, "rdma_write_bytes") \
65 op(EFA_RDMA_WRITE_WR_ERR, "rdma_write_wr_err") \
66 op(EFA_RDMA_WRITE_RECV_BYTES, "rdma_write_recv_bytes") \
67 op(EFA_RETRANS_BYTES, "retrans_bytes") \
68 op(EFA_RETRANS_PKTS, "retrans_pkts") \
69 op(EFA_RETRANS_TIMEOUT_EVENS, "retrans_timeout_events") \
70 op(EFA_UNRESPONSIVE_REMOTE_EVENTS, "unresponsive_remote_events") \
71 op(EFA_IMPAIRED_REMOTE_CONN_EVENTS, "impaired_remote_conn_events") \
72
73 #define EFA_STATS_ENUM(ename, name) ename,
74 #define EFA_STATS_STR(ename, nam) \
75 [ename].name = nam,
76
77 enum efa_hw_device_stats {
78 EFA_DEFINE_DEVICE_STATS(EFA_STATS_ENUM)
79 };
80
81 static const struct rdma_stat_desc efa_device_stats_descs[] = {
82 EFA_DEFINE_DEVICE_STATS(EFA_STATS_STR)
83 };
84
85 enum efa_hw_port_stats {
86 EFA_DEFINE_PORT_STATS(EFA_STATS_ENUM)
87 };
88
89 static const struct rdma_stat_desc efa_port_stats_descs[] = {
90 EFA_DEFINE_PORT_STATS(EFA_STATS_STR)
91 };
92
93 #define EFA_DEFAULT_LINK_SPEED_GBPS 100
94
95 #define EFA_CHUNK_PAYLOAD_SHIFT 12
96 #define EFA_CHUNK_PAYLOAD_SIZE BIT(EFA_CHUNK_PAYLOAD_SHIFT)
97 #define EFA_CHUNK_PAYLOAD_PTR_SIZE 8
98
99 #define EFA_CHUNK_SHIFT 12
100 #define EFA_CHUNK_SIZE BIT(EFA_CHUNK_SHIFT)
101 #define EFA_CHUNK_PTR_SIZE sizeof(struct efa_com_ctrl_buff_info)
102
103 #define EFA_PTRS_PER_CHUNK \
104 ((EFA_CHUNK_SIZE - EFA_CHUNK_PTR_SIZE) / EFA_CHUNK_PAYLOAD_PTR_SIZE)
105
106 #define EFA_CHUNK_USED_SIZE \
107 ((EFA_PTRS_PER_CHUNK * EFA_CHUNK_PAYLOAD_PTR_SIZE) + EFA_CHUNK_PTR_SIZE)
108
109 struct pbl_chunk {
110 dma_addr_t dma_addr;
111 u64 *buf;
112 u32 length;
113 };
114
115 struct pbl_chunk_list {
116 struct pbl_chunk *chunks;
117 unsigned int size;
118 };
119
120 struct pbl_context {
121 union {
122 struct {
123 dma_addr_t dma_addr;
124 } continuous;
125 struct {
126 u32 pbl_buf_size_in_pages;
127 struct scatterlist *sgl;
128 int sg_dma_cnt;
129 struct pbl_chunk_list chunk_list;
130 } indirect;
131 } phys;
132 u64 *pbl_buf;
133 u32 pbl_buf_size_in_bytes;
134 u8 physically_continuous;
135 };
136
to_edev(struct ib_device * ibdev)137 static inline struct efa_dev *to_edev(struct ib_device *ibdev)
138 {
139 return container_of(ibdev, struct efa_dev, ibdev);
140 }
141
to_eucontext(struct ib_ucontext * ibucontext)142 static inline struct efa_ucontext *to_eucontext(struct ib_ucontext *ibucontext)
143 {
144 return container_of(ibucontext, struct efa_ucontext, ibucontext);
145 }
146
to_epd(struct ib_pd * ibpd)147 static inline struct efa_pd *to_epd(struct ib_pd *ibpd)
148 {
149 return container_of(ibpd, struct efa_pd, ibpd);
150 }
151
to_emr(struct ib_mr * ibmr)152 static inline struct efa_mr *to_emr(struct ib_mr *ibmr)
153 {
154 return container_of(ibmr, struct efa_mr, ibmr);
155 }
156
to_eqp(struct ib_qp * ibqp)157 static inline struct efa_qp *to_eqp(struct ib_qp *ibqp)
158 {
159 return container_of(ibqp, struct efa_qp, ibqp);
160 }
161
to_ecq(struct ib_cq * ibcq)162 static inline struct efa_cq *to_ecq(struct ib_cq *ibcq)
163 {
164 return container_of(ibcq, struct efa_cq, ibcq);
165 }
166
to_eah(struct ib_ah * ibah)167 static inline struct efa_ah *to_eah(struct ib_ah *ibah)
168 {
169 return container_of(ibah, struct efa_ah, ibah);
170 }
171
172 static inline struct efa_user_mmap_entry *
to_emmap(struct rdma_user_mmap_entry * rdma_entry)173 to_emmap(struct rdma_user_mmap_entry *rdma_entry)
174 {
175 return container_of(rdma_entry, struct efa_user_mmap_entry, rdma_entry);
176 }
177
178 #define EFA_DEV_CAP(dev, cap) \
179 ((dev)->dev_attr.device_caps & \
180 EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_##cap##_MASK)
181
182 #define is_reserved_cleared(reserved) \
183 !memchr_inv(reserved, 0, sizeof(reserved))
184
efa_zalloc_mapped(struct efa_dev * dev,dma_addr_t * dma_addr,size_t size,enum dma_data_direction dir)185 static void *efa_zalloc_mapped(struct efa_dev *dev, dma_addr_t *dma_addr,
186 size_t size, enum dma_data_direction dir)
187 {
188 void *addr;
189
190 addr = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
191 if (!addr)
192 return NULL;
193
194 *dma_addr = dma_map_single(&dev->pdev->dev, addr, size, dir);
195 if (dma_mapping_error(&dev->pdev->dev, *dma_addr)) {
196 ibdev_err(&dev->ibdev, "Failed to map DMA address\n");
197 free_pages_exact(addr, size);
198 return NULL;
199 }
200
201 return addr;
202 }
203
efa_free_mapped(struct efa_dev * dev,void * cpu_addr,dma_addr_t dma_addr,size_t size,enum dma_data_direction dir)204 static void efa_free_mapped(struct efa_dev *dev, void *cpu_addr,
205 dma_addr_t dma_addr,
206 size_t size, enum dma_data_direction dir)
207 {
208 dma_unmap_single(&dev->pdev->dev, dma_addr, size, dir);
209 free_pages_exact(cpu_addr, size);
210 }
211
efa_query_device(struct ib_device * ibdev,struct ib_device_attr * props,struct ib_udata * udata)212 int efa_query_device(struct ib_device *ibdev,
213 struct ib_device_attr *props,
214 struct ib_udata *udata)
215 {
216 struct efa_com_get_device_attr_result *dev_attr;
217 struct efa_ibv_ex_query_device_resp resp = {};
218 struct efa_dev *dev = to_edev(ibdev);
219 int err;
220
221 if (udata && udata->inlen &&
222 !ib_is_udata_cleared(udata, 0, udata->inlen)) {
223 ibdev_dbg(ibdev,
224 "Incompatible ABI params, udata not cleared\n");
225 return -EINVAL;
226 }
227
228 dev_attr = &dev->dev_attr;
229
230 memset(props, 0, sizeof(*props));
231 props->max_mr_size = dev_attr->max_mr_pages * PAGE_SIZE;
232 props->page_size_cap = dev_attr->page_size_cap;
233 props->vendor_id = dev->pdev->vendor;
234 props->vendor_part_id = dev->pdev->device;
235 props->hw_ver = dev->pdev->subsystem_device;
236 props->max_qp = dev_attr->max_qp;
237 props->max_cq = dev_attr->max_cq;
238 props->max_pd = dev_attr->max_pd;
239 props->max_mr = dev_attr->max_mr;
240 props->max_ah = dev_attr->max_ah;
241 props->max_cqe = dev_attr->max_cq_depth;
242 props->max_qp_wr = min_t(u32, dev_attr->max_sq_depth,
243 dev_attr->max_rq_depth);
244 props->max_send_sge = dev_attr->max_sq_sge;
245 props->max_recv_sge = dev_attr->max_rq_sge;
246 props->max_sge_rd = dev_attr->max_wr_rdma_sge;
247 props->max_pkeys = 1;
248
249 if (udata && udata->outlen) {
250 resp.max_sq_sge = dev_attr->max_sq_sge;
251 resp.max_rq_sge = dev_attr->max_rq_sge;
252 resp.max_sq_wr = dev_attr->max_sq_depth;
253 resp.max_rq_wr = dev_attr->max_rq_depth;
254 resp.max_rdma_size = dev_attr->max_rdma_size;
255
256 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_WITH_SGID;
257 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_WITH_EXT_MEM;
258 if (EFA_DEV_CAP(dev, RDMA_READ))
259 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_READ;
260
261 if (EFA_DEV_CAP(dev, RNR_RETRY))
262 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RNR_RETRY;
263
264 if (EFA_DEV_CAP(dev, DATA_POLLING_128))
265 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_DATA_POLLING_128;
266
267 if (EFA_DEV_CAP(dev, RDMA_WRITE))
268 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_WRITE;
269
270 if (EFA_DEV_CAP(dev, UNSOLICITED_WRITE_RECV))
271 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_UNSOLICITED_WRITE_RECV;
272
273 if (dev->neqs)
274 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_NOTIFICATIONS;
275
276 err = ib_copy_to_udata(udata, &resp,
277 min(sizeof(resp), udata->outlen));
278 if (err) {
279 ibdev_dbg(ibdev,
280 "Failed to copy udata for query_device\n");
281 return err;
282 }
283 }
284
285 return 0;
286 }
287
efa_link_gbps_to_speed_and_width(u16 gbps,enum ib_port_speed * speed,enum ib_port_width * width)288 static void efa_link_gbps_to_speed_and_width(u16 gbps,
289 enum ib_port_speed *speed,
290 enum ib_port_width *width)
291 {
292 if (gbps >= 400) {
293 *width = IB_WIDTH_8X;
294 *speed = IB_SPEED_HDR;
295 } else if (gbps >= 200) {
296 *width = IB_WIDTH_4X;
297 *speed = IB_SPEED_HDR;
298 } else if (gbps >= 120) {
299 *width = IB_WIDTH_12X;
300 *speed = IB_SPEED_FDR10;
301 } else if (gbps >= 100) {
302 *width = IB_WIDTH_4X;
303 *speed = IB_SPEED_EDR;
304 } else if (gbps >= 60) {
305 *width = IB_WIDTH_12X;
306 *speed = IB_SPEED_DDR;
307 } else if (gbps >= 50) {
308 *width = IB_WIDTH_1X;
309 *speed = IB_SPEED_HDR;
310 } else if (gbps >= 40) {
311 *width = IB_WIDTH_4X;
312 *speed = IB_SPEED_FDR10;
313 } else if (gbps >= 30) {
314 *width = IB_WIDTH_12X;
315 *speed = IB_SPEED_SDR;
316 } else {
317 *width = IB_WIDTH_1X;
318 *speed = IB_SPEED_EDR;
319 }
320 }
321
efa_query_port(struct ib_device * ibdev,u32 port,struct ib_port_attr * props)322 int efa_query_port(struct ib_device *ibdev, u32 port,
323 struct ib_port_attr *props)
324 {
325 struct efa_dev *dev = to_edev(ibdev);
326 enum ib_port_speed link_speed;
327 enum ib_port_width link_width;
328 u16 link_gbps;
329
330 props->lmc = 1;
331
332 props->state = IB_PORT_ACTIVE;
333 props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
334 props->gid_tbl_len = 1;
335 props->pkey_tbl_len = 1;
336 link_gbps = dev->dev_attr.max_link_speed_gbps ?: EFA_DEFAULT_LINK_SPEED_GBPS;
337 efa_link_gbps_to_speed_and_width(link_gbps, &link_speed, &link_width);
338 props->active_speed = link_speed;
339 props->active_width = link_width;
340 props->max_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
341 props->active_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
342 props->max_msg_sz = dev->dev_attr.mtu;
343 props->max_vl_num = 1;
344
345 return 0;
346 }
347
efa_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_qp_init_attr * qp_init_attr)348 int efa_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
349 int qp_attr_mask,
350 struct ib_qp_init_attr *qp_init_attr)
351 {
352 struct efa_dev *dev = to_edev(ibqp->device);
353 struct efa_com_query_qp_params params = {};
354 struct efa_com_query_qp_result result;
355 struct efa_qp *qp = to_eqp(ibqp);
356 int err;
357
358 #define EFA_QUERY_QP_SUPP_MASK \
359 (IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | \
360 IB_QP_QKEY | IB_QP_SQ_PSN | IB_QP_CAP | IB_QP_RNR_RETRY)
361
362 if (qp_attr_mask & ~EFA_QUERY_QP_SUPP_MASK) {
363 ibdev_dbg(&dev->ibdev,
364 "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
365 qp_attr_mask, EFA_QUERY_QP_SUPP_MASK);
366 return -EOPNOTSUPP;
367 }
368
369 memset(qp_attr, 0, sizeof(*qp_attr));
370 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
371
372 params.qp_handle = qp->qp_handle;
373 err = efa_com_query_qp(&dev->edev, ¶ms, &result);
374 if (err)
375 return err;
376
377 qp_attr->qp_state = result.qp_state;
378 qp_attr->qkey = result.qkey;
379 qp_attr->sq_psn = result.sq_psn;
380 qp_attr->sq_draining = result.sq_draining;
381 qp_attr->port_num = 1;
382 qp_attr->rnr_retry = result.rnr_retry;
383
384 qp_attr->cap.max_send_wr = qp->max_send_wr;
385 qp_attr->cap.max_recv_wr = qp->max_recv_wr;
386 qp_attr->cap.max_send_sge = qp->max_send_sge;
387 qp_attr->cap.max_recv_sge = qp->max_recv_sge;
388 qp_attr->cap.max_inline_data = qp->max_inline_data;
389
390 qp_init_attr->qp_type = ibqp->qp_type;
391 qp_init_attr->recv_cq = ibqp->recv_cq;
392 qp_init_attr->send_cq = ibqp->send_cq;
393 qp_init_attr->qp_context = ibqp->qp_context;
394 qp_init_attr->cap = qp_attr->cap;
395
396 return 0;
397 }
398
efa_query_gid(struct ib_device * ibdev,u32 port,int index,union ib_gid * gid)399 int efa_query_gid(struct ib_device *ibdev, u32 port, int index,
400 union ib_gid *gid)
401 {
402 struct efa_dev *dev = to_edev(ibdev);
403
404 memcpy(gid->raw, dev->dev_attr.addr, sizeof(dev->dev_attr.addr));
405
406 return 0;
407 }
408
efa_query_pkey(struct ib_device * ibdev,u32 port,u16 index,u16 * pkey)409 int efa_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
410 u16 *pkey)
411 {
412 if (index > 0)
413 return -EINVAL;
414
415 *pkey = 0xffff;
416 return 0;
417 }
418
efa_pd_dealloc(struct efa_dev * dev,u16 pdn)419 static int efa_pd_dealloc(struct efa_dev *dev, u16 pdn)
420 {
421 struct efa_com_dealloc_pd_params params = {
422 .pdn = pdn,
423 };
424
425 return efa_com_dealloc_pd(&dev->edev, ¶ms);
426 }
427
efa_alloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)428 int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
429 {
430 struct efa_dev *dev = to_edev(ibpd->device);
431 struct efa_ibv_alloc_pd_resp resp = {};
432 struct efa_com_alloc_pd_result result;
433 struct efa_pd *pd = to_epd(ibpd);
434 int err;
435
436 if (udata->inlen &&
437 !ib_is_udata_cleared(udata, 0, udata->inlen)) {
438 ibdev_dbg(&dev->ibdev,
439 "Incompatible ABI params, udata not cleared\n");
440 err = -EINVAL;
441 goto err_out;
442 }
443
444 err = efa_com_alloc_pd(&dev->edev, &result);
445 if (err)
446 goto err_out;
447
448 pd->pdn = result.pdn;
449 resp.pdn = result.pdn;
450
451 if (udata->outlen) {
452 err = ib_copy_to_udata(udata, &resp,
453 min(sizeof(resp), udata->outlen));
454 if (err) {
455 ibdev_dbg(&dev->ibdev,
456 "Failed to copy udata for alloc_pd\n");
457 goto err_dealloc_pd;
458 }
459 }
460
461 ibdev_dbg(&dev->ibdev, "Allocated pd[%d]\n", pd->pdn);
462
463 return 0;
464
465 err_dealloc_pd:
466 efa_pd_dealloc(dev, result.pdn);
467 err_out:
468 atomic64_inc(&dev->stats.alloc_pd_err);
469 return err;
470 }
471
efa_dealloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)472 int efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
473 {
474 struct efa_dev *dev = to_edev(ibpd->device);
475 struct efa_pd *pd = to_epd(ibpd);
476
477 ibdev_dbg(&dev->ibdev, "Dealloc pd[%d]\n", pd->pdn);
478 efa_pd_dealloc(dev, pd->pdn);
479 return 0;
480 }
481
efa_destroy_qp_handle(struct efa_dev * dev,u32 qp_handle)482 static int efa_destroy_qp_handle(struct efa_dev *dev, u32 qp_handle)
483 {
484 struct efa_com_destroy_qp_params params = { .qp_handle = qp_handle };
485
486 return efa_com_destroy_qp(&dev->edev, ¶ms);
487 }
488
efa_qp_user_mmap_entries_remove(struct efa_qp * qp)489 static void efa_qp_user_mmap_entries_remove(struct efa_qp *qp)
490 {
491 rdma_user_mmap_entry_remove(qp->rq_mmap_entry);
492 rdma_user_mmap_entry_remove(qp->rq_db_mmap_entry);
493 rdma_user_mmap_entry_remove(qp->llq_desc_mmap_entry);
494 rdma_user_mmap_entry_remove(qp->sq_db_mmap_entry);
495 }
496
efa_destroy_qp(struct ib_qp * ibqp,struct ib_udata * udata)497 int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
498 {
499 struct efa_dev *dev = to_edev(ibqp->pd->device);
500 struct efa_qp *qp = to_eqp(ibqp);
501 int err;
502
503 ibdev_dbg(&dev->ibdev, "Destroy qp[%u]\n", ibqp->qp_num);
504
505 err = efa_destroy_qp_handle(dev, qp->qp_handle);
506 if (err)
507 return err;
508
509 efa_qp_user_mmap_entries_remove(qp);
510
511 if (qp->rq_cpu_addr) {
512 ibdev_dbg(&dev->ibdev,
513 "qp->cpu_addr[0x%p] freed: size[%lu], dma[%pad]\n",
514 qp->rq_cpu_addr, qp->rq_size,
515 &qp->rq_dma_addr);
516 efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
517 qp->rq_size, DMA_TO_DEVICE);
518 }
519
520 return 0;
521 }
522
523 static struct rdma_user_mmap_entry*
efa_user_mmap_entry_insert(struct ib_ucontext * ucontext,u64 address,size_t length,u8 mmap_flag,u64 * offset)524 efa_user_mmap_entry_insert(struct ib_ucontext *ucontext,
525 u64 address, size_t length,
526 u8 mmap_flag, u64 *offset)
527 {
528 struct efa_user_mmap_entry *entry = kzalloc_obj(*entry);
529 int err;
530
531 if (!entry)
532 return NULL;
533
534 entry->address = address;
535 entry->mmap_flag = mmap_flag;
536
537 err = rdma_user_mmap_entry_insert(ucontext, &entry->rdma_entry,
538 length);
539 if (err) {
540 kfree(entry);
541 return NULL;
542 }
543 *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
544
545 return &entry->rdma_entry;
546 }
547
qp_mmap_entries_setup(struct efa_qp * qp,struct efa_dev * dev,struct efa_ucontext * ucontext,struct efa_com_create_qp_params * params,struct efa_ibv_create_qp_resp * resp)548 static int qp_mmap_entries_setup(struct efa_qp *qp,
549 struct efa_dev *dev,
550 struct efa_ucontext *ucontext,
551 struct efa_com_create_qp_params *params,
552 struct efa_ibv_create_qp_resp *resp)
553 {
554 size_t length;
555 u64 address;
556
557 address = dev->db_bar_addr + resp->sq_db_offset;
558 qp->sq_db_mmap_entry =
559 efa_user_mmap_entry_insert(&ucontext->ibucontext,
560 address,
561 PAGE_SIZE, EFA_MMAP_IO_NC,
562 &resp->sq_db_mmap_key);
563 if (!qp->sq_db_mmap_entry)
564 return -ENOMEM;
565
566 resp->sq_db_offset &= ~PAGE_MASK;
567
568 address = dev->mem_bar_addr + resp->llq_desc_offset;
569 length = PAGE_ALIGN(params->sq_ring_size_in_bytes +
570 offset_in_page(resp->llq_desc_offset));
571
572 qp->llq_desc_mmap_entry =
573 efa_user_mmap_entry_insert(&ucontext->ibucontext,
574 address, length,
575 EFA_MMAP_IO_WC,
576 &resp->llq_desc_mmap_key);
577 if (!qp->llq_desc_mmap_entry)
578 goto err_remove_mmap;
579
580 resp->llq_desc_offset &= ~PAGE_MASK;
581
582 if (qp->rq_cpu_addr) {
583 address = dev->db_bar_addr + resp->rq_db_offset;
584
585 qp->rq_db_mmap_entry =
586 efa_user_mmap_entry_insert(&ucontext->ibucontext,
587 address, PAGE_SIZE,
588 EFA_MMAP_IO_NC,
589 &resp->rq_db_mmap_key);
590 if (!qp->rq_db_mmap_entry)
591 goto err_remove_mmap;
592
593 resp->rq_db_offset &= ~PAGE_MASK;
594
595 address = virt_to_phys(qp->rq_cpu_addr);
596 qp->rq_mmap_entry =
597 efa_user_mmap_entry_insert(&ucontext->ibucontext,
598 address, qp->rq_size,
599 EFA_MMAP_DMA_PAGE,
600 &resp->rq_mmap_key);
601 if (!qp->rq_mmap_entry)
602 goto err_remove_mmap;
603
604 resp->rq_mmap_size = qp->rq_size;
605 }
606
607 return 0;
608
609 err_remove_mmap:
610 efa_qp_user_mmap_entries_remove(qp);
611
612 return -ENOMEM;
613 }
614
efa_qp_validate_cap(struct efa_dev * dev,struct ib_qp_init_attr * init_attr)615 static int efa_qp_validate_cap(struct efa_dev *dev,
616 struct ib_qp_init_attr *init_attr)
617 {
618 if (init_attr->cap.max_send_wr > dev->dev_attr.max_sq_depth) {
619 ibdev_dbg(&dev->ibdev,
620 "qp: requested send wr[%u] exceeds the max[%u]\n",
621 init_attr->cap.max_send_wr,
622 dev->dev_attr.max_sq_depth);
623 return -EINVAL;
624 }
625 if (init_attr->cap.max_recv_wr > dev->dev_attr.max_rq_depth) {
626 ibdev_dbg(&dev->ibdev,
627 "qp: requested receive wr[%u] exceeds the max[%u]\n",
628 init_attr->cap.max_recv_wr,
629 dev->dev_attr.max_rq_depth);
630 return -EINVAL;
631 }
632 if (init_attr->cap.max_send_sge > dev->dev_attr.max_sq_sge) {
633 ibdev_dbg(&dev->ibdev,
634 "qp: requested sge send[%u] exceeds the max[%u]\n",
635 init_attr->cap.max_send_sge, dev->dev_attr.max_sq_sge);
636 return -EINVAL;
637 }
638 if (init_attr->cap.max_recv_sge > dev->dev_attr.max_rq_sge) {
639 ibdev_dbg(&dev->ibdev,
640 "qp: requested sge recv[%u] exceeds the max[%u]\n",
641 init_attr->cap.max_recv_sge, dev->dev_attr.max_rq_sge);
642 return -EINVAL;
643 }
644 if (init_attr->cap.max_inline_data > dev->dev_attr.inline_buf_size_ex) {
645 ibdev_dbg(&dev->ibdev,
646 "qp: requested inline data[%u] exceeds the max[%u]\n",
647 init_attr->cap.max_inline_data,
648 dev->dev_attr.inline_buf_size_ex);
649 return -EINVAL;
650 }
651
652 return 0;
653 }
654
efa_qp_validate_attr(struct efa_dev * dev,struct ib_qp_init_attr * init_attr)655 static int efa_qp_validate_attr(struct efa_dev *dev,
656 struct ib_qp_init_attr *init_attr)
657 {
658 if (init_attr->qp_type != IB_QPT_DRIVER &&
659 init_attr->qp_type != IB_QPT_UD) {
660 ibdev_dbg(&dev->ibdev,
661 "Unsupported qp type %d\n", init_attr->qp_type);
662 return -EOPNOTSUPP;
663 }
664
665 if (init_attr->srq) {
666 ibdev_dbg(&dev->ibdev, "SRQ is not supported\n");
667 return -EOPNOTSUPP;
668 }
669
670 if (init_attr->create_flags) {
671 ibdev_dbg(&dev->ibdev, "Unsupported create flags\n");
672 return -EOPNOTSUPP;
673 }
674
675 return 0;
676 }
677
efa_create_qp(struct ib_qp * ibqp,struct ib_qp_init_attr * init_attr,struct ib_udata * udata)678 int efa_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
679 struct ib_udata *udata)
680 {
681 struct efa_com_create_qp_params create_qp_params = {};
682 struct efa_com_create_qp_result create_qp_resp;
683 struct efa_dev *dev = to_edev(ibqp->device);
684 struct efa_ibv_create_qp_resp resp = {};
685 struct efa_ibv_create_qp cmd;
686 struct efa_qp *qp = to_eqp(ibqp);
687 struct efa_ucontext *ucontext;
688 u16 supported_efa_flags = 0;
689 int err;
690
691 ucontext = rdma_udata_to_drv_context(udata, struct efa_ucontext,
692 ibucontext);
693
694 err = efa_qp_validate_cap(dev, init_attr);
695 if (err)
696 goto err_out;
697
698 err = efa_qp_validate_attr(dev, init_attr);
699 if (err)
700 goto err_out;
701
702 err = ib_copy_validate_udata_in_cm(udata, cmd, driver_qp_type, 0);
703 if (err)
704 goto err_out;
705
706 if (!is_reserved_cleared(cmd.reserved_98)) {
707 ibdev_dbg(&dev->ibdev,
708 "Incompatible ABI params, unknown fields in udata\n");
709 err = -EINVAL;
710 goto err_out;
711 }
712
713 if (EFA_DEV_CAP(dev, UNSOLICITED_WRITE_RECV))
714 supported_efa_flags |= EFA_CREATE_QP_WITH_UNSOLICITED_WRITE_RECV;
715
716 if (cmd.flags & ~supported_efa_flags) {
717 ibdev_dbg(&dev->ibdev, "Unsupported EFA QP create flags[%#x], supported[%#x]\n",
718 cmd.flags, supported_efa_flags);
719 err = -EOPNOTSUPP;
720 goto err_out;
721 }
722
723 create_qp_params.uarn = ucontext->uarn;
724 create_qp_params.pd = to_epd(ibqp->pd)->pdn;
725
726 if (init_attr->qp_type == IB_QPT_UD) {
727 create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_UD;
728 } else if (cmd.driver_qp_type == EFA_QP_DRIVER_TYPE_SRD) {
729 create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_SRD;
730 } else {
731 ibdev_dbg(&dev->ibdev,
732 "Unsupported qp type %d driver qp type %d\n",
733 init_attr->qp_type, cmd.driver_qp_type);
734 err = -EOPNOTSUPP;
735 goto err_out;
736 }
737
738 ibdev_dbg(&dev->ibdev, "Create QP: qp type %d driver qp type %#x\n",
739 init_attr->qp_type, cmd.driver_qp_type);
740 create_qp_params.send_cq_idx = to_ecq(init_attr->send_cq)->cq_idx;
741 create_qp_params.recv_cq_idx = to_ecq(init_attr->recv_cq)->cq_idx;
742 create_qp_params.sq_depth = init_attr->cap.max_send_wr;
743 create_qp_params.sq_ring_size_in_bytes = cmd.sq_ring_size;
744
745 create_qp_params.rq_depth = init_attr->cap.max_recv_wr;
746 create_qp_params.rq_ring_size_in_bytes = cmd.rq_ring_size;
747 qp->rq_size = PAGE_ALIGN(create_qp_params.rq_ring_size_in_bytes);
748 if (qp->rq_size) {
749 qp->rq_cpu_addr = efa_zalloc_mapped(dev, &qp->rq_dma_addr,
750 qp->rq_size, DMA_TO_DEVICE);
751 if (!qp->rq_cpu_addr) {
752 err = -ENOMEM;
753 goto err_out;
754 }
755
756 ibdev_dbg(&dev->ibdev,
757 "qp->cpu_addr[0x%p] allocated: size[%lu], dma[%pad]\n",
758 qp->rq_cpu_addr, qp->rq_size, &qp->rq_dma_addr);
759 create_qp_params.rq_base_addr = qp->rq_dma_addr;
760 }
761
762 create_qp_params.sl = cmd.sl;
763
764 if (cmd.flags & EFA_CREATE_QP_WITH_UNSOLICITED_WRITE_RECV)
765 create_qp_params.unsolicited_write_recv = true;
766
767 err = efa_com_create_qp(&dev->edev, &create_qp_params,
768 &create_qp_resp);
769 if (err)
770 goto err_free_mapped;
771
772 resp.sq_db_offset = create_qp_resp.sq_db_offset;
773 resp.rq_db_offset = create_qp_resp.rq_db_offset;
774 resp.llq_desc_offset = create_qp_resp.llq_descriptors_offset;
775 resp.send_sub_cq_idx = create_qp_resp.send_sub_cq_idx;
776 resp.recv_sub_cq_idx = create_qp_resp.recv_sub_cq_idx;
777
778 err = qp_mmap_entries_setup(qp, dev, ucontext, &create_qp_params,
779 &resp);
780 if (err)
781 goto err_destroy_qp;
782
783 qp->qp_handle = create_qp_resp.qp_handle;
784 qp->ibqp.qp_num = create_qp_resp.qp_num;
785 qp->max_send_wr = init_attr->cap.max_send_wr;
786 qp->max_recv_wr = init_attr->cap.max_recv_wr;
787 qp->max_send_sge = init_attr->cap.max_send_sge;
788 qp->max_recv_sge = init_attr->cap.max_recv_sge;
789 qp->max_inline_data = init_attr->cap.max_inline_data;
790
791 if (udata->outlen) {
792 err = ib_copy_to_udata(udata, &resp,
793 min(sizeof(resp), udata->outlen));
794 if (err) {
795 ibdev_dbg(&dev->ibdev,
796 "Failed to copy udata for qp[%u]\n",
797 create_qp_resp.qp_num);
798 goto err_remove_mmap_entries;
799 }
800 }
801
802 ibdev_dbg(&dev->ibdev, "Created qp[%d]\n", qp->ibqp.qp_num);
803
804 return 0;
805
806 err_remove_mmap_entries:
807 efa_qp_user_mmap_entries_remove(qp);
808 err_destroy_qp:
809 efa_destroy_qp_handle(dev, create_qp_resp.qp_handle);
810 err_free_mapped:
811 if (qp->rq_cpu_addr)
812 efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
813 qp->rq_size, DMA_TO_DEVICE);
814 err_out:
815 atomic64_inc(&dev->stats.create_qp_err);
816 return err;
817 }
818
819 static const struct {
820 int valid;
821 enum ib_qp_attr_mask req_param;
822 enum ib_qp_attr_mask opt_param;
823 } srd_qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
824 [IB_QPS_RESET] = {
825 [IB_QPS_RESET] = { .valid = 1 },
826 [IB_QPS_INIT] = {
827 .valid = 1,
828 .req_param = IB_QP_PKEY_INDEX |
829 IB_QP_PORT |
830 IB_QP_QKEY,
831 },
832 },
833 [IB_QPS_INIT] = {
834 [IB_QPS_RESET] = { .valid = 1 },
835 [IB_QPS_ERR] = { .valid = 1 },
836 [IB_QPS_INIT] = {
837 .valid = 1,
838 .opt_param = IB_QP_PKEY_INDEX |
839 IB_QP_PORT |
840 IB_QP_QKEY,
841 },
842 [IB_QPS_RTR] = {
843 .valid = 1,
844 .opt_param = IB_QP_PKEY_INDEX |
845 IB_QP_QKEY,
846 },
847 },
848 [IB_QPS_RTR] = {
849 [IB_QPS_RESET] = { .valid = 1 },
850 [IB_QPS_ERR] = { .valid = 1 },
851 [IB_QPS_RTS] = {
852 .valid = 1,
853 .req_param = IB_QP_SQ_PSN,
854 .opt_param = IB_QP_CUR_STATE |
855 IB_QP_QKEY |
856 IB_QP_RNR_RETRY,
857
858 }
859 },
860 [IB_QPS_RTS] = {
861 [IB_QPS_RESET] = { .valid = 1 },
862 [IB_QPS_ERR] = { .valid = 1 },
863 [IB_QPS_RTS] = {
864 .valid = 1,
865 .opt_param = IB_QP_CUR_STATE |
866 IB_QP_QKEY,
867 },
868 [IB_QPS_SQD] = {
869 .valid = 1,
870 .opt_param = IB_QP_EN_SQD_ASYNC_NOTIFY,
871 },
872 },
873 [IB_QPS_SQD] = {
874 [IB_QPS_RESET] = { .valid = 1 },
875 [IB_QPS_ERR] = { .valid = 1 },
876 [IB_QPS_RTS] = {
877 .valid = 1,
878 .opt_param = IB_QP_CUR_STATE |
879 IB_QP_QKEY,
880 },
881 [IB_QPS_SQD] = {
882 .valid = 1,
883 .opt_param = IB_QP_PKEY_INDEX |
884 IB_QP_QKEY,
885 }
886 },
887 [IB_QPS_SQE] = {
888 [IB_QPS_RESET] = { .valid = 1 },
889 [IB_QPS_ERR] = { .valid = 1 },
890 [IB_QPS_RTS] = {
891 .valid = 1,
892 .opt_param = IB_QP_CUR_STATE |
893 IB_QP_QKEY,
894 }
895 },
896 [IB_QPS_ERR] = {
897 [IB_QPS_RESET] = { .valid = 1 },
898 [IB_QPS_ERR] = { .valid = 1 },
899 }
900 };
901
efa_modify_srd_qp_is_ok(enum ib_qp_state cur_state,enum ib_qp_state next_state,enum ib_qp_attr_mask mask)902 static bool efa_modify_srd_qp_is_ok(enum ib_qp_state cur_state,
903 enum ib_qp_state next_state,
904 enum ib_qp_attr_mask mask)
905 {
906 enum ib_qp_attr_mask req_param, opt_param;
907
908 if (mask & IB_QP_CUR_STATE &&
909 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
910 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
911 return false;
912
913 if (!srd_qp_state_table[cur_state][next_state].valid)
914 return false;
915
916 req_param = srd_qp_state_table[cur_state][next_state].req_param;
917 opt_param = srd_qp_state_table[cur_state][next_state].opt_param;
918
919 if ((mask & req_param) != req_param)
920 return false;
921
922 if (mask & ~(req_param | opt_param | IB_QP_STATE))
923 return false;
924
925 return true;
926 }
927
efa_modify_qp_validate(struct efa_dev * dev,struct efa_qp * qp,struct ib_qp_attr * qp_attr,int qp_attr_mask,enum ib_qp_state cur_state,enum ib_qp_state new_state)928 static int efa_modify_qp_validate(struct efa_dev *dev, struct efa_qp *qp,
929 struct ib_qp_attr *qp_attr, int qp_attr_mask,
930 enum ib_qp_state cur_state,
931 enum ib_qp_state new_state)
932 {
933 int err;
934
935 #define EFA_MODIFY_QP_SUPP_MASK \
936 (IB_QP_STATE | IB_QP_CUR_STATE | IB_QP_EN_SQD_ASYNC_NOTIFY | \
937 IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_QKEY | IB_QP_SQ_PSN | \
938 IB_QP_RNR_RETRY)
939
940 if (qp_attr_mask & ~EFA_MODIFY_QP_SUPP_MASK) {
941 ibdev_dbg(&dev->ibdev,
942 "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
943 qp_attr_mask, EFA_MODIFY_QP_SUPP_MASK);
944 return -EOPNOTSUPP;
945 }
946
947 if (qp->ibqp.qp_type == IB_QPT_DRIVER)
948 err = !efa_modify_srd_qp_is_ok(cur_state, new_state,
949 qp_attr_mask);
950 else
951 err = !ib_modify_qp_is_ok(cur_state, new_state, IB_QPT_UD,
952 qp_attr_mask);
953
954 if (err) {
955 ibdev_dbg(&dev->ibdev, "Invalid modify QP parameters\n");
956 return -EINVAL;
957 }
958
959 if ((qp_attr_mask & IB_QP_PORT) && qp_attr->port_num != 1) {
960 ibdev_dbg(&dev->ibdev, "Can't change port num\n");
961 return -EOPNOTSUPP;
962 }
963
964 if ((qp_attr_mask & IB_QP_PKEY_INDEX) && qp_attr->pkey_index) {
965 ibdev_dbg(&dev->ibdev, "Can't change pkey index\n");
966 return -EOPNOTSUPP;
967 }
968
969 return 0;
970 }
971
efa_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_udata * udata)972 int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
973 int qp_attr_mask, struct ib_udata *udata)
974 {
975 struct efa_dev *dev = to_edev(ibqp->device);
976 struct efa_com_modify_qp_params params = {};
977 struct efa_qp *qp = to_eqp(ibqp);
978 enum ib_qp_state cur_state;
979 enum ib_qp_state new_state;
980 int err;
981
982 if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
983 return -EOPNOTSUPP;
984
985 if (udata->inlen &&
986 !ib_is_udata_cleared(udata, 0, udata->inlen)) {
987 ibdev_dbg(&dev->ibdev,
988 "Incompatible ABI params, udata not cleared\n");
989 return -EINVAL;
990 }
991
992 cur_state = qp_attr_mask & IB_QP_CUR_STATE ? qp_attr->cur_qp_state :
993 qp->state;
994 new_state = qp_attr_mask & IB_QP_STATE ? qp_attr->qp_state : cur_state;
995
996 err = efa_modify_qp_validate(dev, qp, qp_attr, qp_attr_mask, cur_state,
997 new_state);
998 if (err)
999 return err;
1000
1001 params.qp_handle = qp->qp_handle;
1002
1003 if (qp_attr_mask & IB_QP_STATE) {
1004 EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_QP_STATE,
1005 1);
1006 EFA_SET(¶ms.modify_mask,
1007 EFA_ADMIN_MODIFY_QP_CMD_CUR_QP_STATE, 1);
1008 params.cur_qp_state = cur_state;
1009 params.qp_state = new_state;
1010 }
1011
1012 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1013 EFA_SET(¶ms.modify_mask,
1014 EFA_ADMIN_MODIFY_QP_CMD_SQ_DRAINED_ASYNC_NOTIFY, 1);
1015 params.sq_drained_async_notify = qp_attr->en_sqd_async_notify;
1016 }
1017
1018 if (qp_attr_mask & IB_QP_QKEY) {
1019 EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_QKEY, 1);
1020 params.qkey = qp_attr->qkey;
1021 }
1022
1023 if (qp_attr_mask & IB_QP_SQ_PSN) {
1024 EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_SQ_PSN, 1);
1025 params.sq_psn = qp_attr->sq_psn;
1026 }
1027
1028 if (qp_attr_mask & IB_QP_RNR_RETRY) {
1029 EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_RNR_RETRY,
1030 1);
1031 params.rnr_retry = qp_attr->rnr_retry;
1032 }
1033
1034 err = efa_com_modify_qp(&dev->edev, ¶ms);
1035 if (err)
1036 return err;
1037
1038 qp->state = new_state;
1039
1040 return 0;
1041 }
1042
efa_destroy_cq_idx(struct efa_dev * dev,int cq_idx)1043 static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx)
1044 {
1045 struct efa_com_destroy_cq_params params = { .cq_idx = cq_idx };
1046
1047 return efa_com_destroy_cq(&dev->edev, ¶ms);
1048 }
1049
efa_cq_user_mmap_entries_remove(struct efa_cq * cq)1050 static void efa_cq_user_mmap_entries_remove(struct efa_cq *cq)
1051 {
1052 rdma_user_mmap_entry_remove(cq->db_mmap_entry);
1053 rdma_user_mmap_entry_remove(cq->mmap_entry);
1054 }
1055
efa_destroy_cq(struct ib_cq * ibcq,struct ib_udata * udata)1056 int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
1057 {
1058 struct efa_dev *dev = to_edev(ibcq->device);
1059 struct efa_cq *cq = to_ecq(ibcq);
1060
1061 ibdev_dbg(&dev->ibdev,
1062 "Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n",
1063 cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr);
1064
1065 efa_destroy_cq_idx(dev, cq->cq_idx);
1066 if (cq->cpu_addr)
1067 efa_cq_user_mmap_entries_remove(cq);
1068 if (cq->eq) {
1069 xa_erase(&dev->cqs_xa, cq->cq_idx);
1070 synchronize_irq(cq->eq->irq.irqn);
1071 }
1072
1073 if (cq->cpu_addr)
1074 efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, DMA_FROM_DEVICE);
1075 return 0;
1076 }
1077
efa_vec2eq(struct efa_dev * dev,int vec)1078 static struct efa_eq *efa_vec2eq(struct efa_dev *dev, int vec)
1079 {
1080 return &dev->eqs[vec];
1081 }
1082
cq_mmap_entries_setup(struct efa_dev * dev,struct efa_cq * cq,struct efa_ibv_create_cq_resp * resp,bool db_valid)1083 static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
1084 struct efa_ibv_create_cq_resp *resp,
1085 bool db_valid)
1086 {
1087 resp->q_mmap_size = cq->size;
1088 cq->mmap_entry = efa_user_mmap_entry_insert(&cq->ucontext->ibucontext,
1089 virt_to_phys(cq->cpu_addr),
1090 cq->size, EFA_MMAP_DMA_PAGE,
1091 &resp->q_mmap_key);
1092 if (!cq->mmap_entry)
1093 return -ENOMEM;
1094
1095 if (db_valid) {
1096 cq->db_mmap_entry =
1097 efa_user_mmap_entry_insert(&cq->ucontext->ibucontext,
1098 dev->db_bar_addr + resp->db_off,
1099 PAGE_SIZE, EFA_MMAP_IO_NC,
1100 &resp->db_mmap_key);
1101 if (!cq->db_mmap_entry) {
1102 rdma_user_mmap_entry_remove(cq->mmap_entry);
1103 return -ENOMEM;
1104 }
1105
1106 resp->db_off &= ~PAGE_MASK;
1107 resp->comp_mask |= EFA_CREATE_CQ_RESP_DB_OFF;
1108 }
1109
1110 return 0;
1111 }
1112
efa_create_user_cq(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct uverbs_attr_bundle * attrs)1113 int efa_create_user_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1114 struct uverbs_attr_bundle *attrs)
1115 {
1116 struct ib_udata *udata = &attrs->driver_udata;
1117 struct efa_ucontext *ucontext = rdma_udata_to_drv_context(
1118 udata, struct efa_ucontext, ibucontext);
1119 struct efa_com_create_cq_params params = {};
1120 struct efa_ibv_create_cq_resp resp = {};
1121 struct efa_com_create_cq_result result;
1122 struct ib_device *ibdev = ibcq->device;
1123 struct efa_dev *dev = to_edev(ibdev);
1124 struct efa_ibv_create_cq cmd;
1125 struct efa_cq *cq = to_ecq(ibcq);
1126 int entries = attr->cqe;
1127 bool set_src_addr;
1128 int err;
1129
1130 ibdev_dbg(ibdev, "create_cq entries %d\n", entries);
1131
1132 if (attr->flags)
1133 return -EOPNOTSUPP;
1134
1135 if (entries > dev->dev_attr.max_cq_depth) {
1136 ibdev_dbg(ibdev,
1137 "cq: requested entries[%u] greater than max[%u]\n",
1138 entries, dev->dev_attr.max_cq_depth);
1139 err = -EINVAL;
1140 goto err_out;
1141 }
1142
1143 err = ib_copy_validate_udata_in_cm(udata, cmd, num_sub_cqs, 0);
1144 if (err)
1145 goto err_out;
1146
1147 if (!is_reserved_cleared(cmd.reserved_58)) {
1148 ibdev_dbg(ibdev,
1149 "Incompatible ABI params, unknown fields in udata\n");
1150 err = -EINVAL;
1151 goto err_out;
1152 }
1153
1154 set_src_addr = !!(cmd.flags & EFA_CREATE_CQ_WITH_SGID);
1155 if ((cmd.cq_entry_size != sizeof(struct efa_io_rx_cdesc_ex)) &&
1156 (set_src_addr ||
1157 cmd.cq_entry_size != sizeof(struct efa_io_rx_cdesc))) {
1158 ibdev_dbg(ibdev,
1159 "Invalid entry size [%u]\n", cmd.cq_entry_size);
1160 err = -EINVAL;
1161 goto err_out;
1162 }
1163
1164 if (cmd.num_sub_cqs != dev->dev_attr.sub_cqs_per_cq) {
1165 ibdev_dbg(ibdev,
1166 "Invalid number of sub cqs[%u] expected[%u]\n",
1167 cmd.num_sub_cqs, dev->dev_attr.sub_cqs_per_cq);
1168 err = -EINVAL;
1169 goto err_out;
1170 }
1171
1172 cq->ucontext = ucontext;
1173 cq->size = PAGE_ALIGN(cmd.cq_entry_size * entries * cmd.num_sub_cqs);
1174
1175 if (ibcq->umem) {
1176 if (ibcq->umem->length < cq->size) {
1177 ibdev_dbg(&dev->ibdev, "External memory too small\n");
1178 err = -EINVAL;
1179 goto err_out;
1180 }
1181
1182 if (!ib_umem_is_contiguous(ibcq->umem)) {
1183 ibdev_dbg(&dev->ibdev, "Non contiguous CQ unsupported\n");
1184 err = -EINVAL;
1185 goto err_out;
1186 }
1187
1188 cq->dma_addr = ib_umem_start_dma_addr(ibcq->umem);
1189 } else {
1190 cq->cpu_addr = efa_zalloc_mapped(dev, &cq->dma_addr, cq->size,
1191 DMA_FROM_DEVICE);
1192 if (!cq->cpu_addr) {
1193 err = -ENOMEM;
1194 goto err_out;
1195 }
1196 }
1197
1198 params.uarn = cq->ucontext->uarn;
1199 params.sub_cq_depth = entries;
1200 params.dma_addr = cq->dma_addr;
1201 params.entry_size_in_bytes = cmd.cq_entry_size;
1202 params.num_sub_cqs = cmd.num_sub_cqs;
1203 params.set_src_addr = set_src_addr;
1204 if (cmd.flags & EFA_CREATE_CQ_WITH_COMPLETION_CHANNEL) {
1205 cq->eq = efa_vec2eq(dev, attr->comp_vector);
1206 params.eqn = cq->eq->eeq.eqn;
1207 params.interrupt_mode_enabled = true;
1208 }
1209
1210 err = efa_com_create_cq(&dev->edev, ¶ms, &result);
1211 if (err)
1212 goto err_free_mapped;
1213
1214 resp.db_off = result.db_off;
1215 resp.cq_idx = result.cq_idx;
1216 cq->cq_idx = result.cq_idx;
1217 cq->ibcq.cqe = result.actual_depth;
1218 WARN_ON_ONCE(entries != result.actual_depth);
1219
1220 if (cq->cpu_addr)
1221 err = cq_mmap_entries_setup(dev, cq, &resp, result.db_valid);
1222
1223 if (err) {
1224 ibdev_dbg(ibdev, "Could not setup cq[%u] mmap entries\n",
1225 cq->cq_idx);
1226 goto err_destroy_cq;
1227 }
1228
1229 if (cq->eq) {
1230 err = xa_err(xa_store(&dev->cqs_xa, cq->cq_idx, cq, GFP_KERNEL));
1231 if (err) {
1232 ibdev_dbg(ibdev, "Failed to store cq[%u] in xarray\n",
1233 cq->cq_idx);
1234 goto err_remove_mmap;
1235 }
1236 }
1237
1238 if (udata->outlen) {
1239 err = ib_copy_to_udata(udata, &resp,
1240 min(sizeof(resp), udata->outlen));
1241 if (err) {
1242 ibdev_dbg(ibdev,
1243 "Failed to copy udata for create_cq\n");
1244 goto err_xa_erase;
1245 }
1246 }
1247
1248 ibdev_dbg(ibdev, "Created cq[%d], cq depth[%u]. dma[%pad] virt[0x%p]\n",
1249 cq->cq_idx, result.actual_depth, &cq->dma_addr, cq->cpu_addr);
1250
1251 return 0;
1252
1253 err_xa_erase:
1254 if (cq->eq)
1255 xa_erase(&dev->cqs_xa, cq->cq_idx);
1256 err_remove_mmap:
1257 if (cq->cpu_addr)
1258 efa_cq_user_mmap_entries_remove(cq);
1259 err_destroy_cq:
1260 efa_destroy_cq_idx(dev, cq->cq_idx);
1261 err_free_mapped:
1262 if (cq->cpu_addr)
1263 efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
1264 DMA_FROM_DEVICE);
1265 err_out:
1266 atomic64_inc(&dev->stats.create_cq_err);
1267 return err;
1268 }
1269
umem_to_page_list(struct efa_dev * dev,struct ib_umem * umem,u64 * page_list,u32 hp_cnt,u8 hp_shift)1270 static int umem_to_page_list(struct efa_dev *dev,
1271 struct ib_umem *umem,
1272 u64 *page_list,
1273 u32 hp_cnt,
1274 u8 hp_shift)
1275 {
1276 struct ib_block_iter biter;
1277 unsigned int hp_idx = 0;
1278
1279 rdma_umem_for_each_dma_block(umem, &biter, BIT(hp_shift))
1280 page_list[hp_idx++] = rdma_block_iter_dma_address(&biter);
1281
1282 return 0;
1283 }
1284
efa_vmalloc_buf_to_sg(u64 * buf,int page_cnt)1285 static struct scatterlist *efa_vmalloc_buf_to_sg(u64 *buf, int page_cnt)
1286 {
1287 struct scatterlist *sglist;
1288 struct page *pg;
1289 int i;
1290
1291 sglist = kmalloc_objs(*sglist, page_cnt);
1292 if (!sglist)
1293 return NULL;
1294 sg_init_table(sglist, page_cnt);
1295 for (i = 0; i < page_cnt; i++) {
1296 pg = vmalloc_to_page(buf);
1297 if (!pg)
1298 goto err;
1299 sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
1300 buf += PAGE_SIZE / sizeof(*buf);
1301 }
1302 return sglist;
1303
1304 err:
1305 kfree(sglist);
1306 return NULL;
1307 }
1308
1309 /*
1310 * create a chunk list of physical pages dma addresses from the supplied
1311 * scatter gather list
1312 */
pbl_chunk_list_create(struct efa_dev * dev,struct pbl_context * pbl)1313 static int pbl_chunk_list_create(struct efa_dev *dev, struct pbl_context *pbl)
1314 {
1315 struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
1316 int page_cnt = pbl->phys.indirect.pbl_buf_size_in_pages;
1317 struct scatterlist *pages_sgl = pbl->phys.indirect.sgl;
1318 unsigned int chunk_list_size, chunk_idx, payload_idx;
1319 int sg_dma_cnt = pbl->phys.indirect.sg_dma_cnt;
1320 struct efa_com_ctrl_buff_info *ctrl_buf;
1321 u64 *cur_chunk_buf, *prev_chunk_buf;
1322 struct ib_block_iter biter;
1323 dma_addr_t dma_addr;
1324 int i;
1325
1326 /* allocate a chunk list that consists of 4KB chunks */
1327 chunk_list_size = DIV_ROUND_UP(page_cnt, EFA_PTRS_PER_CHUNK);
1328
1329 chunk_list->size = chunk_list_size;
1330 chunk_list->chunks = kzalloc_objs(*chunk_list->chunks, chunk_list_size);
1331 if (!chunk_list->chunks)
1332 return -ENOMEM;
1333
1334 ibdev_dbg(&dev->ibdev,
1335 "chunk_list_size[%u] - pages[%u]\n", chunk_list_size,
1336 page_cnt);
1337
1338 /* allocate chunk buffers: */
1339 for (i = 0; i < chunk_list_size; i++) {
1340 chunk_list->chunks[i].buf = kzalloc(EFA_CHUNK_SIZE, GFP_KERNEL);
1341 if (!chunk_list->chunks[i].buf)
1342 goto chunk_list_dealloc;
1343
1344 chunk_list->chunks[i].length = EFA_CHUNK_USED_SIZE;
1345 }
1346 chunk_list->chunks[chunk_list_size - 1].length =
1347 ((page_cnt % EFA_PTRS_PER_CHUNK) * EFA_CHUNK_PAYLOAD_PTR_SIZE) +
1348 EFA_CHUNK_PTR_SIZE;
1349
1350 /* fill the dma addresses of sg list pages to chunks: */
1351 chunk_idx = 0;
1352 payload_idx = 0;
1353 cur_chunk_buf = chunk_list->chunks[0].buf;
1354 rdma_for_each_block(pages_sgl, &biter, sg_dma_cnt,
1355 EFA_CHUNK_PAYLOAD_SIZE) {
1356 cur_chunk_buf[payload_idx++] =
1357 rdma_block_iter_dma_address(&biter);
1358
1359 if (payload_idx == EFA_PTRS_PER_CHUNK) {
1360 chunk_idx++;
1361 cur_chunk_buf = chunk_list->chunks[chunk_idx].buf;
1362 payload_idx = 0;
1363 }
1364 }
1365
1366 /* map chunks to dma and fill chunks next ptrs */
1367 for (i = chunk_list_size - 1; i >= 0; i--) {
1368 dma_addr = dma_map_single(&dev->pdev->dev,
1369 chunk_list->chunks[i].buf,
1370 chunk_list->chunks[i].length,
1371 DMA_TO_DEVICE);
1372 if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
1373 ibdev_err(&dev->ibdev,
1374 "chunk[%u] dma_map_failed\n", i);
1375 goto chunk_list_unmap;
1376 }
1377
1378 chunk_list->chunks[i].dma_addr = dma_addr;
1379 ibdev_dbg(&dev->ibdev,
1380 "chunk[%u] mapped at [%pad]\n", i, &dma_addr);
1381
1382 if (!i)
1383 break;
1384
1385 prev_chunk_buf = chunk_list->chunks[i - 1].buf;
1386
1387 ctrl_buf = (struct efa_com_ctrl_buff_info *)
1388 &prev_chunk_buf[EFA_PTRS_PER_CHUNK];
1389 ctrl_buf->length = chunk_list->chunks[i].length;
1390
1391 efa_com_set_dma_addr(dma_addr,
1392 &ctrl_buf->address.mem_addr_high,
1393 &ctrl_buf->address.mem_addr_low);
1394 }
1395
1396 return 0;
1397
1398 chunk_list_unmap:
1399 for (; i < chunk_list_size; i++) {
1400 dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
1401 chunk_list->chunks[i].length, DMA_TO_DEVICE);
1402 }
1403 chunk_list_dealloc:
1404 for (i = 0; i < chunk_list_size; i++)
1405 kfree(chunk_list->chunks[i].buf);
1406
1407 kfree(chunk_list->chunks);
1408 return -ENOMEM;
1409 }
1410
pbl_chunk_list_destroy(struct efa_dev * dev,struct pbl_context * pbl)1411 static void pbl_chunk_list_destroy(struct efa_dev *dev, struct pbl_context *pbl)
1412 {
1413 struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
1414 int i;
1415
1416 for (i = 0; i < chunk_list->size; i++) {
1417 dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
1418 chunk_list->chunks[i].length, DMA_TO_DEVICE);
1419 kfree(chunk_list->chunks[i].buf);
1420 }
1421
1422 kfree(chunk_list->chunks);
1423 }
1424
1425 /* initialize pbl continuous mode: map pbl buffer to a dma address. */
pbl_continuous_initialize(struct efa_dev * dev,struct pbl_context * pbl)1426 static int pbl_continuous_initialize(struct efa_dev *dev,
1427 struct pbl_context *pbl)
1428 {
1429 dma_addr_t dma_addr;
1430
1431 dma_addr = dma_map_single(&dev->pdev->dev, pbl->pbl_buf,
1432 pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
1433 if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
1434 ibdev_err(&dev->ibdev, "Unable to map pbl to DMA address\n");
1435 return -ENOMEM;
1436 }
1437
1438 pbl->phys.continuous.dma_addr = dma_addr;
1439 ibdev_dbg(&dev->ibdev,
1440 "pbl continuous - dma_addr = %pad, size[%u]\n",
1441 &dma_addr, pbl->pbl_buf_size_in_bytes);
1442
1443 return 0;
1444 }
1445
1446 /*
1447 * initialize pbl indirect mode:
1448 * create a chunk list out of the dma addresses of the physical pages of
1449 * pbl buffer.
1450 */
pbl_indirect_initialize(struct efa_dev * dev,struct pbl_context * pbl)1451 static int pbl_indirect_initialize(struct efa_dev *dev, struct pbl_context *pbl)
1452 {
1453 u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, EFA_CHUNK_PAYLOAD_SIZE);
1454 struct scatterlist *sgl;
1455 int sg_dma_cnt, err;
1456
1457 BUILD_BUG_ON(EFA_CHUNK_PAYLOAD_SIZE > PAGE_SIZE);
1458 sgl = efa_vmalloc_buf_to_sg(pbl->pbl_buf, size_in_pages);
1459 if (!sgl)
1460 return -ENOMEM;
1461
1462 sg_dma_cnt = dma_map_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
1463 if (!sg_dma_cnt) {
1464 err = -EINVAL;
1465 goto err_map;
1466 }
1467
1468 pbl->phys.indirect.pbl_buf_size_in_pages = size_in_pages;
1469 pbl->phys.indirect.sgl = sgl;
1470 pbl->phys.indirect.sg_dma_cnt = sg_dma_cnt;
1471 err = pbl_chunk_list_create(dev, pbl);
1472 if (err) {
1473 ibdev_dbg(&dev->ibdev,
1474 "chunk_list creation failed[%d]\n", err);
1475 goto err_chunk;
1476 }
1477
1478 ibdev_dbg(&dev->ibdev,
1479 "pbl indirect - size[%u], chunks[%u]\n",
1480 pbl->pbl_buf_size_in_bytes,
1481 pbl->phys.indirect.chunk_list.size);
1482
1483 return 0;
1484
1485 err_chunk:
1486 dma_unmap_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
1487 err_map:
1488 kfree(sgl);
1489 return err;
1490 }
1491
pbl_indirect_terminate(struct efa_dev * dev,struct pbl_context * pbl)1492 static void pbl_indirect_terminate(struct efa_dev *dev, struct pbl_context *pbl)
1493 {
1494 pbl_chunk_list_destroy(dev, pbl);
1495 dma_unmap_sg(&dev->pdev->dev, pbl->phys.indirect.sgl,
1496 pbl->phys.indirect.pbl_buf_size_in_pages, DMA_TO_DEVICE);
1497 kfree(pbl->phys.indirect.sgl);
1498 }
1499
1500 /* create a page buffer list from a mapped user memory region */
pbl_create(struct efa_dev * dev,struct pbl_context * pbl,struct ib_umem * umem,int hp_cnt,u8 hp_shift)1501 static int pbl_create(struct efa_dev *dev,
1502 struct pbl_context *pbl,
1503 struct ib_umem *umem,
1504 int hp_cnt,
1505 u8 hp_shift)
1506 {
1507 int err;
1508
1509 pbl->pbl_buf_size_in_bytes = hp_cnt * EFA_CHUNK_PAYLOAD_PTR_SIZE;
1510 pbl->pbl_buf = kvzalloc(pbl->pbl_buf_size_in_bytes, GFP_KERNEL);
1511 if (!pbl->pbl_buf)
1512 return -ENOMEM;
1513
1514 if (is_vmalloc_addr(pbl->pbl_buf)) {
1515 pbl->physically_continuous = 0;
1516 err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
1517 hp_shift);
1518 if (err)
1519 goto err_free;
1520
1521 err = pbl_indirect_initialize(dev, pbl);
1522 if (err)
1523 goto err_free;
1524 } else {
1525 pbl->physically_continuous = 1;
1526 err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
1527 hp_shift);
1528 if (err)
1529 goto err_free;
1530
1531 err = pbl_continuous_initialize(dev, pbl);
1532 if (err)
1533 goto err_free;
1534 }
1535
1536 ibdev_dbg(&dev->ibdev,
1537 "user_pbl_created: user_pages[%u], continuous[%u]\n",
1538 hp_cnt, pbl->physically_continuous);
1539
1540 return 0;
1541
1542 err_free:
1543 kvfree(pbl->pbl_buf);
1544 return err;
1545 }
1546
pbl_destroy(struct efa_dev * dev,struct pbl_context * pbl)1547 static void pbl_destroy(struct efa_dev *dev, struct pbl_context *pbl)
1548 {
1549 if (pbl->physically_continuous)
1550 dma_unmap_single(&dev->pdev->dev, pbl->phys.continuous.dma_addr,
1551 pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
1552 else
1553 pbl_indirect_terminate(dev, pbl);
1554
1555 kvfree(pbl->pbl_buf);
1556 }
1557
efa_create_inline_pbl(struct efa_dev * dev,struct efa_mr * mr,struct efa_com_reg_mr_params * params)1558 static int efa_create_inline_pbl(struct efa_dev *dev, struct efa_mr *mr,
1559 struct efa_com_reg_mr_params *params)
1560 {
1561 int err;
1562
1563 params->inline_pbl = 1;
1564 err = umem_to_page_list(dev, mr->umem, params->pbl.inline_pbl_array,
1565 params->page_num, params->page_shift);
1566 if (err)
1567 return err;
1568
1569 ibdev_dbg(&dev->ibdev,
1570 "inline_pbl_array - pages[%u]\n", params->page_num);
1571
1572 return 0;
1573 }
1574
efa_create_pbl(struct efa_dev * dev,struct pbl_context * pbl,struct efa_mr * mr,struct efa_com_reg_mr_params * params)1575 static int efa_create_pbl(struct efa_dev *dev,
1576 struct pbl_context *pbl,
1577 struct efa_mr *mr,
1578 struct efa_com_reg_mr_params *params)
1579 {
1580 int err;
1581
1582 err = pbl_create(dev, pbl, mr->umem, params->page_num,
1583 params->page_shift);
1584 if (err) {
1585 ibdev_dbg(&dev->ibdev, "Failed to create pbl[%d]\n", err);
1586 return err;
1587 }
1588
1589 params->inline_pbl = 0;
1590 params->indirect = !pbl->physically_continuous;
1591 if (pbl->physically_continuous) {
1592 params->pbl.pbl.length = pbl->pbl_buf_size_in_bytes;
1593
1594 efa_com_set_dma_addr(pbl->phys.continuous.dma_addr,
1595 ¶ms->pbl.pbl.address.mem_addr_high,
1596 ¶ms->pbl.pbl.address.mem_addr_low);
1597 } else {
1598 params->pbl.pbl.length =
1599 pbl->phys.indirect.chunk_list.chunks[0].length;
1600
1601 efa_com_set_dma_addr(pbl->phys.indirect.chunk_list.chunks[0].dma_addr,
1602 ¶ms->pbl.pbl.address.mem_addr_high,
1603 ¶ms->pbl.pbl.address.mem_addr_low);
1604 }
1605
1606 return 0;
1607 }
1608
efa_alloc_mr(struct ib_pd * ibpd,int access_flags,struct ib_udata * udata)1609 static struct efa_mr *efa_alloc_mr(struct ib_pd *ibpd, int access_flags,
1610 struct ib_udata *udata)
1611 {
1612 struct efa_dev *dev = to_edev(ibpd->device);
1613 int supp_access_flags;
1614 struct efa_mr *mr;
1615
1616 if (udata && udata->inlen &&
1617 !ib_is_udata_cleared(udata, 0, udata->inlen)) {
1618 ibdev_dbg(&dev->ibdev,
1619 "Incompatible ABI params, udata not cleared\n");
1620 return ERR_PTR(-EINVAL);
1621 }
1622
1623 supp_access_flags =
1624 IB_ACCESS_LOCAL_WRITE |
1625 (EFA_DEV_CAP(dev, RDMA_READ) ? IB_ACCESS_REMOTE_READ : 0) |
1626 (EFA_DEV_CAP(dev, RDMA_WRITE) ? IB_ACCESS_REMOTE_WRITE : 0);
1627
1628 access_flags &= ~IB_ACCESS_OPTIONAL;
1629 if (access_flags & ~supp_access_flags) {
1630 ibdev_dbg(&dev->ibdev,
1631 "Unsupported access flags[%#x], supported[%#x]\n",
1632 access_flags, supp_access_flags);
1633 return ERR_PTR(-EOPNOTSUPP);
1634 }
1635
1636 mr = kzalloc_obj(*mr);
1637 if (!mr)
1638 return ERR_PTR(-ENOMEM);
1639
1640 return mr;
1641 }
1642
efa_register_mr(struct ib_pd * ibpd,struct efa_mr * mr,u64 start,u64 length,u64 virt_addr,int access_flags)1643 static int efa_register_mr(struct ib_pd *ibpd, struct efa_mr *mr, u64 start,
1644 u64 length, u64 virt_addr, int access_flags)
1645 {
1646 struct efa_dev *dev = to_edev(ibpd->device);
1647 struct efa_com_reg_mr_params params = {};
1648 struct efa_com_reg_mr_result result = {};
1649 struct pbl_context pbl;
1650 unsigned int pg_sz;
1651 int inline_size;
1652 int err;
1653
1654 params.pd = to_epd(ibpd)->pdn;
1655 params.iova = virt_addr;
1656 params.mr_length_in_bytes = length;
1657 params.permissions = access_flags;
1658
1659 pg_sz = ib_umem_find_best_pgsz(mr->umem,
1660 dev->dev_attr.page_size_cap,
1661 virt_addr);
1662 if (!pg_sz) {
1663 ibdev_dbg(&dev->ibdev, "Failed to find a suitable page size in page_size_cap %#llx\n",
1664 dev->dev_attr.page_size_cap);
1665 return -EOPNOTSUPP;
1666 }
1667
1668 params.page_shift = order_base_2(pg_sz);
1669 params.page_num = ib_umem_num_dma_blocks(mr->umem, pg_sz);
1670
1671 ibdev_dbg(&dev->ibdev,
1672 "start %#llx length %#llx params.page_shift %u params.page_num %u\n",
1673 start, length, params.page_shift, params.page_num);
1674
1675 inline_size = ARRAY_SIZE(params.pbl.inline_pbl_array);
1676 if (params.page_num <= inline_size) {
1677 err = efa_create_inline_pbl(dev, mr, ¶ms);
1678 if (err)
1679 return err;
1680
1681 err = efa_com_register_mr(&dev->edev, ¶ms, &result);
1682 if (err)
1683 return err;
1684 } else {
1685 err = efa_create_pbl(dev, &pbl, mr, ¶ms);
1686 if (err)
1687 return err;
1688
1689 err = efa_com_register_mr(&dev->edev, ¶ms, &result);
1690 pbl_destroy(dev, &pbl);
1691
1692 if (err)
1693 return err;
1694 }
1695
1696 mr->ibmr.lkey = result.l_key;
1697 mr->ibmr.rkey = result.r_key;
1698 mr->ibmr.length = length;
1699 mr->ic_info.recv_ic_id = result.ic_info.recv_ic_id;
1700 mr->ic_info.rdma_read_ic_id = result.ic_info.rdma_read_ic_id;
1701 mr->ic_info.rdma_recv_ic_id = result.ic_info.rdma_recv_ic_id;
1702 mr->ic_info.recv_ic_id_valid = result.ic_info.recv_ic_id_valid;
1703 mr->ic_info.rdma_read_ic_id_valid = result.ic_info.rdma_read_ic_id_valid;
1704 mr->ic_info.rdma_recv_ic_id_valid = result.ic_info.rdma_recv_ic_id_valid;
1705 ibdev_dbg(&dev->ibdev, "Registered mr[%d]\n", mr->ibmr.lkey);
1706
1707 return 0;
1708 }
1709
efa_reg_user_mr_dmabuf(struct ib_pd * ibpd,u64 start,u64 length,u64 virt_addr,int fd,int access_flags,struct ib_dmah * dmah,struct uverbs_attr_bundle * attrs)1710 struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start,
1711 u64 length, u64 virt_addr,
1712 int fd, int access_flags,
1713 struct ib_dmah *dmah,
1714 struct uverbs_attr_bundle *attrs)
1715 {
1716 struct efa_dev *dev = to_edev(ibpd->device);
1717 struct ib_umem_dmabuf *umem_dmabuf;
1718 struct efa_mr *mr;
1719 int err;
1720
1721 if (dmah) {
1722 err = -EOPNOTSUPP;
1723 goto err_out;
1724 }
1725
1726 mr = efa_alloc_mr(ibpd, access_flags, &attrs->driver_udata);
1727 if (IS_ERR(mr)) {
1728 err = PTR_ERR(mr);
1729 goto err_out;
1730 }
1731
1732 umem_dmabuf = ib_umem_dmabuf_get_pinned(ibpd->device, start, length, fd,
1733 access_flags);
1734 if (IS_ERR(umem_dmabuf)) {
1735 err = PTR_ERR(umem_dmabuf);
1736 ibdev_dbg(&dev->ibdev, "Failed to get dmabuf umem[%pe]\n",
1737 umem_dmabuf);
1738 goto err_free;
1739 }
1740
1741 mr->umem = &umem_dmabuf->umem;
1742 err = efa_register_mr(ibpd, mr, start, length, virt_addr, access_flags);
1743 if (err)
1744 goto err_release;
1745
1746 return &mr->ibmr;
1747
1748 err_release:
1749 ib_umem_release(mr->umem);
1750 err_free:
1751 kfree(mr);
1752 err_out:
1753 atomic64_inc(&dev->stats.reg_mr_err);
1754 return ERR_PTR(err);
1755 }
1756
efa_reg_mr(struct ib_pd * ibpd,u64 start,u64 length,u64 virt_addr,int access_flags,struct ib_dmah * dmah,struct ib_udata * udata)1757 struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
1758 u64 virt_addr, int access_flags,
1759 struct ib_dmah *dmah,
1760 struct ib_udata *udata)
1761 {
1762 struct efa_dev *dev = to_edev(ibpd->device);
1763 struct efa_mr *mr;
1764 int err;
1765
1766 if (dmah) {
1767 err = -EOPNOTSUPP;
1768 goto err_out;
1769 }
1770
1771 mr = efa_alloc_mr(ibpd, access_flags, udata);
1772 if (IS_ERR(mr)) {
1773 err = PTR_ERR(mr);
1774 goto err_out;
1775 }
1776
1777 mr->umem = ib_umem_get(ibpd->device, start, length, access_flags);
1778 if (IS_ERR(mr->umem)) {
1779 err = PTR_ERR(mr->umem);
1780 ibdev_dbg(&dev->ibdev,
1781 "Failed to pin and map user space memory[%pe]\n",
1782 mr->umem);
1783 goto err_free;
1784 }
1785
1786 err = efa_register_mr(ibpd, mr, start, length, virt_addr, access_flags);
1787 if (err)
1788 goto err_release;
1789
1790 return &mr->ibmr;
1791
1792 err_release:
1793 ib_umem_release(mr->umem);
1794 err_free:
1795 kfree(mr);
1796 err_out:
1797 atomic64_inc(&dev->stats.reg_mr_err);
1798 return ERR_PTR(err);
1799 }
1800
UVERBS_HANDLER(EFA_IB_METHOD_MR_QUERY)1801 static int UVERBS_HANDLER(EFA_IB_METHOD_MR_QUERY)(struct uverbs_attr_bundle *attrs)
1802 {
1803 struct ib_mr *ibmr = uverbs_attr_get_obj(attrs, EFA_IB_ATTR_QUERY_MR_HANDLE);
1804 struct efa_mr *mr = to_emr(ibmr);
1805 u16 ic_id_validity = 0;
1806 int ret;
1807
1808 ret = uverbs_copy_to(attrs, EFA_IB_ATTR_QUERY_MR_RESP_RECV_IC_ID,
1809 &mr->ic_info.recv_ic_id, sizeof(mr->ic_info.recv_ic_id));
1810 if (ret)
1811 return ret;
1812
1813 ret = uverbs_copy_to(attrs, EFA_IB_ATTR_QUERY_MR_RESP_RDMA_READ_IC_ID,
1814 &mr->ic_info.rdma_read_ic_id, sizeof(mr->ic_info.rdma_read_ic_id));
1815 if (ret)
1816 return ret;
1817
1818 ret = uverbs_copy_to(attrs, EFA_IB_ATTR_QUERY_MR_RESP_RDMA_RECV_IC_ID,
1819 &mr->ic_info.rdma_recv_ic_id, sizeof(mr->ic_info.rdma_recv_ic_id));
1820 if (ret)
1821 return ret;
1822
1823 if (mr->ic_info.recv_ic_id_valid)
1824 ic_id_validity |= EFA_QUERY_MR_VALIDITY_RECV_IC_ID;
1825 if (mr->ic_info.rdma_read_ic_id_valid)
1826 ic_id_validity |= EFA_QUERY_MR_VALIDITY_RDMA_READ_IC_ID;
1827 if (mr->ic_info.rdma_recv_ic_id_valid)
1828 ic_id_validity |= EFA_QUERY_MR_VALIDITY_RDMA_RECV_IC_ID;
1829
1830 return uverbs_copy_to(attrs, EFA_IB_ATTR_QUERY_MR_RESP_IC_ID_VALIDITY,
1831 &ic_id_validity, sizeof(ic_id_validity));
1832 }
1833
efa_dereg_mr(struct ib_mr * ibmr,struct ib_udata * udata)1834 int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1835 {
1836 struct efa_dev *dev = to_edev(ibmr->device);
1837 struct efa_com_dereg_mr_params params;
1838 struct efa_mr *mr = to_emr(ibmr);
1839 int err;
1840
1841 ibdev_dbg(&dev->ibdev, "Deregister mr[%d]\n", ibmr->lkey);
1842
1843 params.l_key = mr->ibmr.lkey;
1844 err = efa_com_dereg_mr(&dev->edev, ¶ms);
1845 if (err)
1846 return err;
1847
1848 ib_umem_release(mr->umem);
1849 kfree(mr);
1850
1851 return 0;
1852 }
1853
efa_get_port_immutable(struct ib_device * ibdev,u32 port_num,struct ib_port_immutable * immutable)1854 int efa_get_port_immutable(struct ib_device *ibdev, u32 port_num,
1855 struct ib_port_immutable *immutable)
1856 {
1857 struct ib_port_attr attr;
1858 int err;
1859
1860 err = ib_query_port(ibdev, port_num, &attr);
1861 if (err) {
1862 ibdev_dbg(ibdev, "Couldn't query port err[%d]\n", err);
1863 return err;
1864 }
1865
1866 immutable->pkey_tbl_len = attr.pkey_tbl_len;
1867 immutable->gid_tbl_len = attr.gid_tbl_len;
1868
1869 return 0;
1870 }
1871
efa_dealloc_uar(struct efa_dev * dev,u16 uarn)1872 static int efa_dealloc_uar(struct efa_dev *dev, u16 uarn)
1873 {
1874 struct efa_com_dealloc_uar_params params = {
1875 .uarn = uarn,
1876 };
1877
1878 return efa_com_dealloc_uar(&dev->edev, ¶ms);
1879 }
1880
1881 #define EFA_CHECK_USER_SUPP(_dev, _supported_caps, _attr, _mask, _attr_str) \
1882 (_attr_str = (!(_dev)->dev_attr._attr || ((_supported_caps) & (_mask))) ? \
1883 NULL : #_attr)
1884
efa_user_supp_handshake(const struct ib_ucontext * ibucontext,const struct efa_ibv_alloc_ucontext_cmd * cmd)1885 static int efa_user_supp_handshake(const struct ib_ucontext *ibucontext,
1886 const struct efa_ibv_alloc_ucontext_cmd *cmd)
1887 {
1888 struct efa_dev *dev = to_edev(ibucontext->device);
1889 char *attr_str;
1890
1891 if (EFA_CHECK_USER_SUPP(dev, cmd->supported_caps, max_tx_batch,
1892 EFA_ALLOC_UCONTEXT_CMD_SUPP_CAPS_TX_BATCH,
1893 attr_str))
1894 goto err;
1895
1896 if (EFA_CHECK_USER_SUPP(dev, cmd->supported_caps, min_sq_depth,
1897 EFA_ALLOC_UCONTEXT_CMD_SUPP_CAPS_MIN_SQ_WR,
1898 attr_str))
1899 goto err;
1900
1901 return 0;
1902
1903 err:
1904 ibdev_dbg(&dev->ibdev, "Userspace handshake failed for %s attribute\n",
1905 attr_str);
1906 return -EOPNOTSUPP;
1907 }
1908
efa_alloc_ucontext(struct ib_ucontext * ibucontext,struct ib_udata * udata)1909 int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata)
1910 {
1911 struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1912 struct efa_dev *dev = to_edev(ibucontext->device);
1913 struct efa_ibv_alloc_ucontext_resp resp = {};
1914 struct efa_ibv_alloc_ucontext_cmd cmd = {};
1915 struct efa_com_alloc_uar_result result;
1916 int err;
1917
1918 /*
1919 * it's fine if the driver does not know all request fields,
1920 * we will ack input fields in our response.
1921 */
1922
1923 err = ib_copy_from_udata(&cmd, udata,
1924 min(sizeof(cmd), udata->inlen));
1925 if (err) {
1926 ibdev_dbg(&dev->ibdev,
1927 "Cannot copy udata for alloc_ucontext\n");
1928 goto err_out;
1929 }
1930
1931 err = efa_user_supp_handshake(ibucontext, &cmd);
1932 if (err)
1933 goto err_out;
1934
1935 err = efa_com_alloc_uar(&dev->edev, &result);
1936 if (err)
1937 goto err_out;
1938
1939 ucontext->uarn = result.uarn;
1940
1941 resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_QUERY_DEVICE;
1942 resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_CREATE_AH;
1943 resp.sub_cqs_per_cq = dev->dev_attr.sub_cqs_per_cq;
1944 resp.inline_buf_size = dev->dev_attr.inline_buf_size;
1945 resp.inline_buf_size_ex = dev->dev_attr.inline_buf_size_ex;
1946 resp.max_llq_size = dev->dev_attr.max_llq_size;
1947 resp.max_tx_batch = dev->dev_attr.max_tx_batch;
1948 resp.min_sq_wr = dev->dev_attr.min_sq_depth;
1949
1950 err = ib_copy_to_udata(udata, &resp,
1951 min(sizeof(resp), udata->outlen));
1952 if (err)
1953 goto err_dealloc_uar;
1954
1955 return 0;
1956
1957 err_dealloc_uar:
1958 efa_dealloc_uar(dev, result.uarn);
1959 err_out:
1960 atomic64_inc(&dev->stats.alloc_ucontext_err);
1961 return err;
1962 }
1963
efa_dealloc_ucontext(struct ib_ucontext * ibucontext)1964 void efa_dealloc_ucontext(struct ib_ucontext *ibucontext)
1965 {
1966 struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1967 struct efa_dev *dev = to_edev(ibucontext->device);
1968
1969 efa_dealloc_uar(dev, ucontext->uarn);
1970 }
1971
efa_mmap_free(struct rdma_user_mmap_entry * rdma_entry)1972 void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
1973 {
1974 struct efa_user_mmap_entry *entry = to_emmap(rdma_entry);
1975
1976 kfree(entry);
1977 }
1978
__efa_mmap(struct efa_dev * dev,struct efa_ucontext * ucontext,struct vm_area_struct * vma)1979 static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
1980 struct vm_area_struct *vma)
1981 {
1982 struct rdma_user_mmap_entry *rdma_entry;
1983 struct efa_user_mmap_entry *entry;
1984 unsigned long va;
1985 int err = 0;
1986 u64 pfn;
1987
1988 rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
1989 if (!rdma_entry) {
1990 ibdev_dbg(&dev->ibdev,
1991 "pgoff[%#lx] does not have valid entry\n",
1992 vma->vm_pgoff);
1993 atomic64_inc(&dev->stats.mmap_err);
1994 return -EINVAL;
1995 }
1996 entry = to_emmap(rdma_entry);
1997
1998 ibdev_dbg(&dev->ibdev,
1999 "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
2000 entry->address, rdma_entry->npages * PAGE_SIZE,
2001 entry->mmap_flag);
2002
2003 pfn = entry->address >> PAGE_SHIFT;
2004 switch (entry->mmap_flag) {
2005 case EFA_MMAP_IO_NC:
2006 err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
2007 entry->rdma_entry.npages * PAGE_SIZE,
2008 pgprot_noncached(vma->vm_page_prot),
2009 rdma_entry);
2010 break;
2011 case EFA_MMAP_IO_WC:
2012 err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
2013 entry->rdma_entry.npages * PAGE_SIZE,
2014 pgprot_writecombine(vma->vm_page_prot),
2015 rdma_entry);
2016 break;
2017 case EFA_MMAP_DMA_PAGE:
2018 for (va = vma->vm_start; va < vma->vm_end;
2019 va += PAGE_SIZE, pfn++) {
2020 err = vm_insert_page(vma, va, pfn_to_page(pfn));
2021 if (err)
2022 break;
2023 }
2024 break;
2025 default:
2026 err = -EINVAL;
2027 }
2028
2029 if (err) {
2030 ibdev_dbg(
2031 &dev->ibdev,
2032 "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
2033 entry->address, rdma_entry->npages * PAGE_SIZE,
2034 entry->mmap_flag, err);
2035 atomic64_inc(&dev->stats.mmap_err);
2036 }
2037
2038 rdma_user_mmap_entry_put(rdma_entry);
2039 return err;
2040 }
2041
efa_mmap(struct ib_ucontext * ibucontext,struct vm_area_struct * vma)2042 int efa_mmap(struct ib_ucontext *ibucontext,
2043 struct vm_area_struct *vma)
2044 {
2045 struct efa_ucontext *ucontext = to_eucontext(ibucontext);
2046 struct efa_dev *dev = to_edev(ibucontext->device);
2047 size_t length = vma->vm_end - vma->vm_start;
2048
2049 ibdev_dbg(&dev->ibdev,
2050 "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
2051 vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
2052
2053 return __efa_mmap(dev, ucontext, vma);
2054 }
2055
efa_ah_destroy(struct efa_dev * dev,struct efa_ah * ah)2056 static int efa_ah_destroy(struct efa_dev *dev, struct efa_ah *ah)
2057 {
2058 struct efa_com_destroy_ah_params params = {
2059 .ah = ah->ah,
2060 .pdn = to_epd(ah->ibah.pd)->pdn,
2061 };
2062
2063 return efa_com_destroy_ah(&dev->edev, ¶ms);
2064 }
2065
efa_create_ah(struct ib_ah * ibah,struct rdma_ah_init_attr * init_attr,struct ib_udata * udata)2066 int efa_create_ah(struct ib_ah *ibah,
2067 struct rdma_ah_init_attr *init_attr,
2068 struct ib_udata *udata)
2069 {
2070 struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
2071 struct efa_dev *dev = to_edev(ibah->device);
2072 struct efa_com_create_ah_params params = {};
2073 struct efa_ibv_create_ah_resp resp = {};
2074 struct efa_com_create_ah_result result;
2075 struct efa_ah *ah = to_eah(ibah);
2076 int err;
2077
2078 if (!(init_attr->flags & RDMA_CREATE_AH_SLEEPABLE)) {
2079 ibdev_dbg(&dev->ibdev,
2080 "Create address handle is not supported in atomic context\n");
2081 err = -EOPNOTSUPP;
2082 goto err_out;
2083 }
2084
2085 if (udata->inlen &&
2086 !ib_is_udata_cleared(udata, 0, udata->inlen)) {
2087 ibdev_dbg(&dev->ibdev, "Incompatible ABI params\n");
2088 err = -EINVAL;
2089 goto err_out;
2090 }
2091
2092 memcpy(params.dest_addr, ah_attr->grh.dgid.raw,
2093 sizeof(params.dest_addr));
2094 params.pdn = to_epd(ibah->pd)->pdn;
2095 err = efa_com_create_ah(&dev->edev, ¶ms, &result);
2096 if (err)
2097 goto err_out;
2098
2099 memcpy(ah->id, ah_attr->grh.dgid.raw, sizeof(ah->id));
2100 ah->ah = result.ah;
2101
2102 resp.efa_address_handle = result.ah;
2103
2104 if (udata->outlen) {
2105 err = ib_copy_to_udata(udata, &resp,
2106 min(sizeof(resp), udata->outlen));
2107 if (err) {
2108 ibdev_dbg(&dev->ibdev,
2109 "Failed to copy udata for create_ah response\n");
2110 goto err_destroy_ah;
2111 }
2112 }
2113 ibdev_dbg(&dev->ibdev, "Created ah[%d]\n", ah->ah);
2114
2115 return 0;
2116
2117 err_destroy_ah:
2118 efa_ah_destroy(dev, ah);
2119 err_out:
2120 atomic64_inc(&dev->stats.create_ah_err);
2121 return err;
2122 }
2123
efa_destroy_ah(struct ib_ah * ibah,u32 flags)2124 int efa_destroy_ah(struct ib_ah *ibah, u32 flags)
2125 {
2126 struct efa_dev *dev = to_edev(ibah->pd->device);
2127 struct efa_ah *ah = to_eah(ibah);
2128
2129 ibdev_dbg(&dev->ibdev, "Destroy ah[%d]\n", ah->ah);
2130
2131 if (!(flags & RDMA_DESTROY_AH_SLEEPABLE)) {
2132 ibdev_dbg(&dev->ibdev,
2133 "Destroy address handle is not supported in atomic context\n");
2134 return -EOPNOTSUPP;
2135 }
2136
2137 efa_ah_destroy(dev, ah);
2138 return 0;
2139 }
2140
efa_alloc_hw_port_stats(struct ib_device * ibdev,u32 port_num)2141 struct rdma_hw_stats *efa_alloc_hw_port_stats(struct ib_device *ibdev,
2142 u32 port_num)
2143 {
2144 return rdma_alloc_hw_stats_struct(efa_port_stats_descs,
2145 ARRAY_SIZE(efa_port_stats_descs),
2146 RDMA_HW_STATS_DEFAULT_LIFESPAN);
2147 }
2148
efa_alloc_hw_device_stats(struct ib_device * ibdev)2149 struct rdma_hw_stats *efa_alloc_hw_device_stats(struct ib_device *ibdev)
2150 {
2151 return rdma_alloc_hw_stats_struct(efa_device_stats_descs,
2152 ARRAY_SIZE(efa_device_stats_descs),
2153 RDMA_HW_STATS_DEFAULT_LIFESPAN);
2154 }
2155
efa_fill_device_stats(struct efa_dev * dev,struct rdma_hw_stats * stats)2156 static int efa_fill_device_stats(struct efa_dev *dev,
2157 struct rdma_hw_stats *stats)
2158 {
2159 struct efa_com_stats_admin *as = &dev->edev.aq.stats;
2160 struct efa_stats *s = &dev->stats;
2161
2162 stats->value[EFA_SUBMITTED_CMDS] = atomic64_read(&as->submitted_cmd);
2163 stats->value[EFA_COMPLETED_CMDS] = atomic64_read(&as->completed_cmd);
2164 stats->value[EFA_CMDS_ERR] = atomic64_read(&as->cmd_err);
2165 stats->value[EFA_NO_COMPLETION_CMDS] = atomic64_read(&as->no_completion);
2166
2167 stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd);
2168 stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->alloc_pd_err);
2169 stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->create_qp_err);
2170 stats->value[EFA_CREATE_CQ_ERR] = atomic64_read(&s->create_cq_err);
2171 stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->reg_mr_err);
2172 stats->value[EFA_ALLOC_UCONTEXT_ERR] =
2173 atomic64_read(&s->alloc_ucontext_err);
2174 stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->create_ah_err);
2175 stats->value[EFA_MMAP_ERR] = atomic64_read(&s->mmap_err);
2176
2177 return ARRAY_SIZE(efa_device_stats_descs);
2178 }
2179
efa_fill_port_stats(struct efa_dev * dev,struct rdma_hw_stats * stats,u32 port_num)2180 static int efa_fill_port_stats(struct efa_dev *dev, struct rdma_hw_stats *stats,
2181 u32 port_num)
2182 {
2183 struct efa_com_get_stats_params params = {};
2184 union efa_com_get_stats_result result;
2185 struct efa_com_rdma_write_stats *rws;
2186 struct efa_com_rdma_read_stats *rrs;
2187 struct efa_com_messages_stats *ms;
2188 struct efa_com_network_stats *ns;
2189 struct efa_com_basic_stats *bs;
2190 int err;
2191
2192 params.scope = EFA_ADMIN_GET_STATS_SCOPE_ALL;
2193 params.type = EFA_ADMIN_GET_STATS_TYPE_BASIC;
2194
2195 err = efa_com_get_stats(&dev->edev, ¶ms, &result);
2196 if (err)
2197 return err;
2198
2199 bs = &result.basic_stats;
2200 stats->value[EFA_TX_BYTES] = bs->tx_bytes;
2201 stats->value[EFA_TX_PKTS] = bs->tx_pkts;
2202 stats->value[EFA_RX_BYTES] = bs->rx_bytes;
2203 stats->value[EFA_RX_PKTS] = bs->rx_pkts;
2204 stats->value[EFA_RX_DROPS] = bs->rx_drops;
2205
2206 params.type = EFA_ADMIN_GET_STATS_TYPE_MESSAGES;
2207 err = efa_com_get_stats(&dev->edev, ¶ms, &result);
2208 if (err)
2209 return err;
2210
2211 ms = &result.messages_stats;
2212 stats->value[EFA_SEND_BYTES] = ms->send_bytes;
2213 stats->value[EFA_SEND_WRS] = ms->send_wrs;
2214 stats->value[EFA_RECV_BYTES] = ms->recv_bytes;
2215 stats->value[EFA_RECV_WRS] = ms->recv_wrs;
2216
2217 params.type = EFA_ADMIN_GET_STATS_TYPE_RDMA_READ;
2218 err = efa_com_get_stats(&dev->edev, ¶ms, &result);
2219 if (err)
2220 return err;
2221
2222 rrs = &result.rdma_read_stats;
2223 stats->value[EFA_RDMA_READ_WRS] = rrs->read_wrs;
2224 stats->value[EFA_RDMA_READ_BYTES] = rrs->read_bytes;
2225 stats->value[EFA_RDMA_READ_WR_ERR] = rrs->read_wr_err;
2226 stats->value[EFA_RDMA_READ_RESP_BYTES] = rrs->read_resp_bytes;
2227
2228 if (EFA_DEV_CAP(dev, RDMA_WRITE)) {
2229 params.type = EFA_ADMIN_GET_STATS_TYPE_RDMA_WRITE;
2230 err = efa_com_get_stats(&dev->edev, ¶ms, &result);
2231 if (err)
2232 return err;
2233
2234 rws = &result.rdma_write_stats;
2235 stats->value[EFA_RDMA_WRITE_WRS] = rws->write_wrs;
2236 stats->value[EFA_RDMA_WRITE_BYTES] = rws->write_bytes;
2237 stats->value[EFA_RDMA_WRITE_WR_ERR] = rws->write_wr_err;
2238 stats->value[EFA_RDMA_WRITE_RECV_BYTES] = rws->write_recv_bytes;
2239 }
2240
2241 params.type = EFA_ADMIN_GET_STATS_TYPE_NETWORK;
2242 err = efa_com_get_stats(&dev->edev, ¶ms, &result);
2243 if (err)
2244 return err;
2245
2246 ns = &result.network_stats;
2247 stats->value[EFA_RETRANS_BYTES] = ns->retrans_bytes;
2248 stats->value[EFA_RETRANS_PKTS] = ns->retrans_pkts;
2249 stats->value[EFA_RETRANS_TIMEOUT_EVENS] = ns->retrans_timeout_events;
2250 stats->value[EFA_UNRESPONSIVE_REMOTE_EVENTS] = ns->unresponsive_remote_events;
2251 stats->value[EFA_IMPAIRED_REMOTE_CONN_EVENTS] = ns->impaired_remote_conn_events;
2252
2253 return ARRAY_SIZE(efa_port_stats_descs);
2254 }
2255
efa_get_hw_stats(struct ib_device * ibdev,struct rdma_hw_stats * stats,u32 port_num,int index)2256 int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
2257 u32 port_num, int index)
2258 {
2259 if (port_num)
2260 return efa_fill_port_stats(to_edev(ibdev), stats, port_num);
2261 else
2262 return efa_fill_device_stats(to_edev(ibdev), stats);
2263 }
2264
efa_port_link_layer(struct ib_device * ibdev,u32 port_num)2265 enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
2266 u32 port_num)
2267 {
2268 return IB_LINK_LAYER_UNSPECIFIED;
2269 }
2270
2271 DECLARE_UVERBS_NAMED_METHOD(EFA_IB_METHOD_MR_QUERY,
2272 UVERBS_ATTR_IDR(EFA_IB_ATTR_QUERY_MR_HANDLE,
2273 UVERBS_OBJECT_MR,
2274 UVERBS_ACCESS_READ,
2275 UA_MANDATORY),
2276 UVERBS_ATTR_PTR_OUT(EFA_IB_ATTR_QUERY_MR_RESP_IC_ID_VALIDITY,
2277 UVERBS_ATTR_TYPE(u16),
2278 UA_MANDATORY),
2279 UVERBS_ATTR_PTR_OUT(EFA_IB_ATTR_QUERY_MR_RESP_RECV_IC_ID,
2280 UVERBS_ATTR_TYPE(u16),
2281 UA_MANDATORY),
2282 UVERBS_ATTR_PTR_OUT(EFA_IB_ATTR_QUERY_MR_RESP_RDMA_READ_IC_ID,
2283 UVERBS_ATTR_TYPE(u16),
2284 UA_MANDATORY),
2285 UVERBS_ATTR_PTR_OUT(EFA_IB_ATTR_QUERY_MR_RESP_RDMA_RECV_IC_ID,
2286 UVERBS_ATTR_TYPE(u16),
2287 UA_MANDATORY));
2288
2289 ADD_UVERBS_METHODS(efa_mr,
2290 UVERBS_OBJECT_MR,
2291 &UVERBS_METHOD(EFA_IB_METHOD_MR_QUERY));
2292
2293 const struct uapi_definition efa_uapi_defs[] = {
2294 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_MR,
2295 &efa_mr),
2296 {},
2297 };
2298