1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
4 */
5
6 #include <linux/dma-buf.h>
7 #include <linux/dma-resv.h>
8 #include <linux/vmalloc.h>
9 #include <linux/log2.h>
10
11 #include <rdma/ib_addr.h>
12 #include <rdma/ib_umem.h>
13 #include <rdma/ib_user_verbs.h>
14 #include <rdma/ib_verbs.h>
15 #include <rdma/uverbs_ioctl.h>
16 #define UVERBS_MODULE_NAME efa_ib
17 #include <rdma/uverbs_named_ioctl.h>
18 #include <rdma/ib_user_ioctl_cmds.h>
19
20 #include "efa.h"
21 #include "efa_io_defs.h"
22
23 enum {
24 EFA_MMAP_DMA_PAGE = 0,
25 EFA_MMAP_IO_WC,
26 EFA_MMAP_IO_NC,
27 };
28
29 struct efa_user_mmap_entry {
30 struct rdma_user_mmap_entry rdma_entry;
31 u64 address;
32 u8 mmap_flag;
33 };
34
35 #define EFA_DEFINE_DEVICE_STATS(op) \
36 op(EFA_SUBMITTED_CMDS, "submitted_cmds") \
37 op(EFA_COMPLETED_CMDS, "completed_cmds") \
38 op(EFA_CMDS_ERR, "cmds_err") \
39 op(EFA_NO_COMPLETION_CMDS, "no_completion_cmds") \
40 op(EFA_KEEP_ALIVE_RCVD, "keep_alive_rcvd") \
41 op(EFA_ALLOC_PD_ERR, "alloc_pd_err") \
42 op(EFA_CREATE_QP_ERR, "create_qp_err") \
43 op(EFA_CREATE_CQ_ERR, "create_cq_err") \
44 op(EFA_REG_MR_ERR, "reg_mr_err") \
45 op(EFA_ALLOC_UCONTEXT_ERR, "alloc_ucontext_err") \
46 op(EFA_CREATE_AH_ERR, "create_ah_err") \
47 op(EFA_MMAP_ERR, "mmap_err")
48
49 #define EFA_DEFINE_PORT_STATS(op) \
50 op(EFA_TX_BYTES, "tx_bytes") \
51 op(EFA_TX_PKTS, "tx_pkts") \
52 op(EFA_RX_BYTES, "rx_bytes") \
53 op(EFA_RX_PKTS, "rx_pkts") \
54 op(EFA_RX_DROPS, "rx_drops") \
55 op(EFA_SEND_BYTES, "send_bytes") \
56 op(EFA_SEND_WRS, "send_wrs") \
57 op(EFA_RECV_BYTES, "recv_bytes") \
58 op(EFA_RECV_WRS, "recv_wrs") \
59 op(EFA_RDMA_READ_WRS, "rdma_read_wrs") \
60 op(EFA_RDMA_READ_BYTES, "rdma_read_bytes") \
61 op(EFA_RDMA_READ_WR_ERR, "rdma_read_wr_err") \
62 op(EFA_RDMA_READ_RESP_BYTES, "rdma_read_resp_bytes") \
63 op(EFA_RDMA_WRITE_WRS, "rdma_write_wrs") \
64 op(EFA_RDMA_WRITE_BYTES, "rdma_write_bytes") \
65 op(EFA_RDMA_WRITE_WR_ERR, "rdma_write_wr_err") \
66 op(EFA_RDMA_WRITE_RECV_BYTES, "rdma_write_recv_bytes") \
67 op(EFA_RETRANS_BYTES, "retrans_bytes") \
68 op(EFA_RETRANS_PKTS, "retrans_pkts") \
69 op(EFA_RETRANS_TIMEOUT_EVENS, "retrans_timeout_events") \
70 op(EFA_UNRESPONSIVE_REMOTE_EVENTS, "unresponsive_remote_events") \
71 op(EFA_IMPAIRED_REMOTE_CONN_EVENTS, "impaired_remote_conn_events") \
72
73 #define EFA_STATS_ENUM(ename, name) ename,
74 #define EFA_STATS_STR(ename, nam) \
75 [ename].name = nam,
76
77 enum efa_hw_device_stats {
78 EFA_DEFINE_DEVICE_STATS(EFA_STATS_ENUM)
79 };
80
81 static const struct rdma_stat_desc efa_device_stats_descs[] = {
82 EFA_DEFINE_DEVICE_STATS(EFA_STATS_STR)
83 };
84
85 enum efa_hw_port_stats {
86 EFA_DEFINE_PORT_STATS(EFA_STATS_ENUM)
87 };
88
89 static const struct rdma_stat_desc efa_port_stats_descs[] = {
90 EFA_DEFINE_PORT_STATS(EFA_STATS_STR)
91 };
92
93 #define EFA_DEFAULT_LINK_SPEED_GBPS 100
94
95 #define EFA_CHUNK_PAYLOAD_SHIFT 12
96 #define EFA_CHUNK_PAYLOAD_SIZE BIT(EFA_CHUNK_PAYLOAD_SHIFT)
97 #define EFA_CHUNK_PAYLOAD_PTR_SIZE 8
98
99 #define EFA_CHUNK_SHIFT 12
100 #define EFA_CHUNK_SIZE BIT(EFA_CHUNK_SHIFT)
101 #define EFA_CHUNK_PTR_SIZE sizeof(struct efa_com_ctrl_buff_info)
102
103 #define EFA_PTRS_PER_CHUNK \
104 ((EFA_CHUNK_SIZE - EFA_CHUNK_PTR_SIZE) / EFA_CHUNK_PAYLOAD_PTR_SIZE)
105
106 #define EFA_CHUNK_USED_SIZE \
107 ((EFA_PTRS_PER_CHUNK * EFA_CHUNK_PAYLOAD_PTR_SIZE) + EFA_CHUNK_PTR_SIZE)
108
109 struct pbl_chunk {
110 dma_addr_t dma_addr;
111 u64 *buf;
112 u32 length;
113 };
114
115 struct pbl_chunk_list {
116 struct pbl_chunk *chunks;
117 unsigned int size;
118 };
119
120 struct pbl_context {
121 union {
122 struct {
123 dma_addr_t dma_addr;
124 } continuous;
125 struct {
126 u32 pbl_buf_size_in_pages;
127 struct scatterlist *sgl;
128 int sg_dma_cnt;
129 struct pbl_chunk_list chunk_list;
130 } indirect;
131 } phys;
132 u64 *pbl_buf;
133 u32 pbl_buf_size_in_bytes;
134 u8 physically_continuous;
135 };
136
to_edev(struct ib_device * ibdev)137 static inline struct efa_dev *to_edev(struct ib_device *ibdev)
138 {
139 return container_of(ibdev, struct efa_dev, ibdev);
140 }
141
to_eucontext(struct ib_ucontext * ibucontext)142 static inline struct efa_ucontext *to_eucontext(struct ib_ucontext *ibucontext)
143 {
144 return container_of(ibucontext, struct efa_ucontext, ibucontext);
145 }
146
to_epd(struct ib_pd * ibpd)147 static inline struct efa_pd *to_epd(struct ib_pd *ibpd)
148 {
149 return container_of(ibpd, struct efa_pd, ibpd);
150 }
151
to_emr(struct ib_mr * ibmr)152 static inline struct efa_mr *to_emr(struct ib_mr *ibmr)
153 {
154 return container_of(ibmr, struct efa_mr, ibmr);
155 }
156
to_eqp(struct ib_qp * ibqp)157 static inline struct efa_qp *to_eqp(struct ib_qp *ibqp)
158 {
159 return container_of(ibqp, struct efa_qp, ibqp);
160 }
161
to_ecq(struct ib_cq * ibcq)162 static inline struct efa_cq *to_ecq(struct ib_cq *ibcq)
163 {
164 return container_of(ibcq, struct efa_cq, ibcq);
165 }
166
to_eah(struct ib_ah * ibah)167 static inline struct efa_ah *to_eah(struct ib_ah *ibah)
168 {
169 return container_of(ibah, struct efa_ah, ibah);
170 }
171
172 static inline struct efa_user_mmap_entry *
to_emmap(struct rdma_user_mmap_entry * rdma_entry)173 to_emmap(struct rdma_user_mmap_entry *rdma_entry)
174 {
175 return container_of(rdma_entry, struct efa_user_mmap_entry, rdma_entry);
176 }
177
178 #define EFA_DEV_CAP(dev, cap) \
179 ((dev)->dev_attr.device_caps & \
180 EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_##cap##_MASK)
181
182 #define is_reserved_cleared(reserved) \
183 !memchr_inv(reserved, 0, sizeof(reserved))
184
efa_zalloc_mapped(struct efa_dev * dev,dma_addr_t * dma_addr,size_t size,enum dma_data_direction dir)185 static void *efa_zalloc_mapped(struct efa_dev *dev, dma_addr_t *dma_addr,
186 size_t size, enum dma_data_direction dir)
187 {
188 void *addr;
189
190 addr = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
191 if (!addr)
192 return NULL;
193
194 *dma_addr = dma_map_single(&dev->pdev->dev, addr, size, dir);
195 if (dma_mapping_error(&dev->pdev->dev, *dma_addr)) {
196 ibdev_err(&dev->ibdev, "Failed to map DMA address\n");
197 free_pages_exact(addr, size);
198 return NULL;
199 }
200
201 return addr;
202 }
203
efa_free_mapped(struct efa_dev * dev,void * cpu_addr,dma_addr_t dma_addr,size_t size,enum dma_data_direction dir)204 static void efa_free_mapped(struct efa_dev *dev, void *cpu_addr,
205 dma_addr_t dma_addr,
206 size_t size, enum dma_data_direction dir)
207 {
208 dma_unmap_single(&dev->pdev->dev, dma_addr, size, dir);
209 free_pages_exact(cpu_addr, size);
210 }
211
efa_query_device(struct ib_device * ibdev,struct ib_device_attr * props,struct ib_udata * udata)212 int efa_query_device(struct ib_device *ibdev,
213 struct ib_device_attr *props,
214 struct ib_udata *udata)
215 {
216 struct efa_com_get_device_attr_result *dev_attr;
217 struct efa_ibv_ex_query_device_resp resp = {};
218 struct efa_dev *dev = to_edev(ibdev);
219 int err;
220
221 if (udata && udata->inlen &&
222 !ib_is_udata_cleared(udata, 0, udata->inlen)) {
223 ibdev_dbg(ibdev,
224 "Incompatible ABI params, udata not cleared\n");
225 return -EINVAL;
226 }
227
228 dev_attr = &dev->dev_attr;
229
230 memset(props, 0, sizeof(*props));
231 props->max_mr_size = dev_attr->max_mr_pages * PAGE_SIZE;
232 props->page_size_cap = dev_attr->page_size_cap;
233 props->vendor_id = dev->pdev->vendor;
234 props->vendor_part_id = dev->pdev->device;
235 props->hw_ver = dev->pdev->subsystem_device;
236 props->max_qp = dev_attr->max_qp;
237 props->max_cq = dev_attr->max_cq;
238 props->max_pd = dev_attr->max_pd;
239 props->max_mr = dev_attr->max_mr;
240 props->max_ah = dev_attr->max_ah;
241 props->max_cqe = dev_attr->max_cq_depth;
242 props->max_qp_wr = min_t(u32, dev_attr->max_sq_depth,
243 dev_attr->max_rq_depth);
244 props->max_send_sge = dev_attr->max_sq_sge;
245 props->max_recv_sge = dev_attr->max_rq_sge;
246 props->max_sge_rd = dev_attr->max_wr_rdma_sge;
247 props->max_pkeys = 1;
248
249 if (udata && udata->outlen) {
250 resp.max_sq_sge = dev_attr->max_sq_sge;
251 resp.max_rq_sge = dev_attr->max_rq_sge;
252 resp.max_sq_wr = dev_attr->max_sq_depth;
253 resp.max_rq_wr = dev_attr->max_rq_depth;
254 resp.max_rdma_size = dev_attr->max_rdma_size;
255
256 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_WITH_SGID;
257 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_WITH_EXT_MEM;
258 if (EFA_DEV_CAP(dev, RDMA_READ))
259 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_READ;
260
261 if (EFA_DEV_CAP(dev, RNR_RETRY))
262 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RNR_RETRY;
263
264 if (EFA_DEV_CAP(dev, DATA_POLLING_128))
265 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_DATA_POLLING_128;
266
267 if (EFA_DEV_CAP(dev, RDMA_WRITE))
268 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_WRITE;
269
270 if (EFA_DEV_CAP(dev, UNSOLICITED_WRITE_RECV))
271 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_UNSOLICITED_WRITE_RECV;
272
273 if (dev->neqs)
274 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_NOTIFICATIONS;
275
276 err = ib_copy_to_udata(udata, &resp,
277 min(sizeof(resp), udata->outlen));
278 if (err) {
279 ibdev_dbg(ibdev,
280 "Failed to copy udata for query_device\n");
281 return err;
282 }
283 }
284
285 return 0;
286 }
287
efa_link_gbps_to_speed_and_width(u16 gbps,enum ib_port_speed * speed,enum ib_port_width * width)288 static void efa_link_gbps_to_speed_and_width(u16 gbps,
289 enum ib_port_speed *speed,
290 enum ib_port_width *width)
291 {
292 if (gbps >= 400) {
293 *width = IB_WIDTH_8X;
294 *speed = IB_SPEED_HDR;
295 } else if (gbps >= 200) {
296 *width = IB_WIDTH_4X;
297 *speed = IB_SPEED_HDR;
298 } else if (gbps >= 120) {
299 *width = IB_WIDTH_12X;
300 *speed = IB_SPEED_FDR10;
301 } else if (gbps >= 100) {
302 *width = IB_WIDTH_4X;
303 *speed = IB_SPEED_EDR;
304 } else if (gbps >= 60) {
305 *width = IB_WIDTH_12X;
306 *speed = IB_SPEED_DDR;
307 } else if (gbps >= 50) {
308 *width = IB_WIDTH_1X;
309 *speed = IB_SPEED_HDR;
310 } else if (gbps >= 40) {
311 *width = IB_WIDTH_4X;
312 *speed = IB_SPEED_FDR10;
313 } else if (gbps >= 30) {
314 *width = IB_WIDTH_12X;
315 *speed = IB_SPEED_SDR;
316 } else {
317 *width = IB_WIDTH_1X;
318 *speed = IB_SPEED_EDR;
319 }
320 }
321
efa_query_port(struct ib_device * ibdev,u32 port,struct ib_port_attr * props)322 int efa_query_port(struct ib_device *ibdev, u32 port,
323 struct ib_port_attr *props)
324 {
325 struct efa_dev *dev = to_edev(ibdev);
326 enum ib_port_speed link_speed;
327 enum ib_port_width link_width;
328 u16 link_gbps;
329
330 props->lmc = 1;
331
332 props->state = IB_PORT_ACTIVE;
333 props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
334 props->gid_tbl_len = 1;
335 props->pkey_tbl_len = 1;
336 link_gbps = dev->dev_attr.max_link_speed_gbps ?: EFA_DEFAULT_LINK_SPEED_GBPS;
337 efa_link_gbps_to_speed_and_width(link_gbps, &link_speed, &link_width);
338 props->active_speed = link_speed;
339 props->active_width = link_width;
340 props->max_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
341 props->active_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
342 props->max_msg_sz = dev->dev_attr.mtu;
343 props->max_vl_num = 1;
344
345 return 0;
346 }
347
efa_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_qp_init_attr * qp_init_attr)348 int efa_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
349 int qp_attr_mask,
350 struct ib_qp_init_attr *qp_init_attr)
351 {
352 struct efa_dev *dev = to_edev(ibqp->device);
353 struct efa_com_query_qp_params params = {};
354 struct efa_com_query_qp_result result;
355 struct efa_qp *qp = to_eqp(ibqp);
356 int err;
357
358 #define EFA_QUERY_QP_SUPP_MASK \
359 (IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | \
360 IB_QP_QKEY | IB_QP_SQ_PSN | IB_QP_CAP | IB_QP_RNR_RETRY)
361
362 if (qp_attr_mask & ~EFA_QUERY_QP_SUPP_MASK) {
363 ibdev_dbg(&dev->ibdev,
364 "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
365 qp_attr_mask, EFA_QUERY_QP_SUPP_MASK);
366 return -EOPNOTSUPP;
367 }
368
369 memset(qp_attr, 0, sizeof(*qp_attr));
370 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
371
372 params.qp_handle = qp->qp_handle;
373 err = efa_com_query_qp(&dev->edev, ¶ms, &result);
374 if (err)
375 return err;
376
377 qp_attr->qp_state = result.qp_state;
378 qp_attr->qkey = result.qkey;
379 qp_attr->sq_psn = result.sq_psn;
380 qp_attr->sq_draining = result.sq_draining;
381 qp_attr->port_num = 1;
382 qp_attr->rnr_retry = result.rnr_retry;
383
384 qp_attr->cap.max_send_wr = qp->max_send_wr;
385 qp_attr->cap.max_recv_wr = qp->max_recv_wr;
386 qp_attr->cap.max_send_sge = qp->max_send_sge;
387 qp_attr->cap.max_recv_sge = qp->max_recv_sge;
388 qp_attr->cap.max_inline_data = qp->max_inline_data;
389
390 qp_init_attr->qp_type = ibqp->qp_type;
391 qp_init_attr->recv_cq = ibqp->recv_cq;
392 qp_init_attr->send_cq = ibqp->send_cq;
393 qp_init_attr->qp_context = ibqp->qp_context;
394 qp_init_attr->cap = qp_attr->cap;
395
396 return 0;
397 }
398
efa_query_gid(struct ib_device * ibdev,u32 port,int index,union ib_gid * gid)399 int efa_query_gid(struct ib_device *ibdev, u32 port, int index,
400 union ib_gid *gid)
401 {
402 struct efa_dev *dev = to_edev(ibdev);
403
404 memcpy(gid->raw, dev->dev_attr.addr, sizeof(dev->dev_attr.addr));
405
406 return 0;
407 }
408
efa_query_pkey(struct ib_device * ibdev,u32 port,u16 index,u16 * pkey)409 int efa_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
410 u16 *pkey)
411 {
412 if (index > 0)
413 return -EINVAL;
414
415 *pkey = 0xffff;
416 return 0;
417 }
418
efa_pd_dealloc(struct efa_dev * dev,u16 pdn)419 static int efa_pd_dealloc(struct efa_dev *dev, u16 pdn)
420 {
421 struct efa_com_dealloc_pd_params params = {
422 .pdn = pdn,
423 };
424
425 return efa_com_dealloc_pd(&dev->edev, ¶ms);
426 }
427
efa_alloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)428 int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
429 {
430 struct efa_dev *dev = to_edev(ibpd->device);
431 struct efa_ibv_alloc_pd_resp resp = {};
432 struct efa_com_alloc_pd_result result;
433 struct efa_pd *pd = to_epd(ibpd);
434 int err;
435
436 if (udata->inlen &&
437 !ib_is_udata_cleared(udata, 0, udata->inlen)) {
438 ibdev_dbg(&dev->ibdev,
439 "Incompatible ABI params, udata not cleared\n");
440 err = -EINVAL;
441 goto err_out;
442 }
443
444 err = efa_com_alloc_pd(&dev->edev, &result);
445 if (err)
446 goto err_out;
447
448 pd->pdn = result.pdn;
449 resp.pdn = result.pdn;
450
451 if (udata->outlen) {
452 err = ib_copy_to_udata(udata, &resp,
453 min(sizeof(resp), udata->outlen));
454 if (err) {
455 ibdev_dbg(&dev->ibdev,
456 "Failed to copy udata for alloc_pd\n");
457 goto err_dealloc_pd;
458 }
459 }
460
461 ibdev_dbg(&dev->ibdev, "Allocated pd[%d]\n", pd->pdn);
462
463 return 0;
464
465 err_dealloc_pd:
466 efa_pd_dealloc(dev, result.pdn);
467 err_out:
468 atomic64_inc(&dev->stats.alloc_pd_err);
469 return err;
470 }
471
efa_dealloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)472 int efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
473 {
474 struct efa_dev *dev = to_edev(ibpd->device);
475 struct efa_pd *pd = to_epd(ibpd);
476
477 ibdev_dbg(&dev->ibdev, "Dealloc pd[%d]\n", pd->pdn);
478 efa_pd_dealloc(dev, pd->pdn);
479 return 0;
480 }
481
efa_destroy_qp_handle(struct efa_dev * dev,u32 qp_handle)482 static int efa_destroy_qp_handle(struct efa_dev *dev, u32 qp_handle)
483 {
484 struct efa_com_destroy_qp_params params = { .qp_handle = qp_handle };
485
486 return efa_com_destroy_qp(&dev->edev, ¶ms);
487 }
488
efa_qp_user_mmap_entries_remove(struct efa_qp * qp)489 static void efa_qp_user_mmap_entries_remove(struct efa_qp *qp)
490 {
491 rdma_user_mmap_entry_remove(qp->rq_mmap_entry);
492 rdma_user_mmap_entry_remove(qp->rq_db_mmap_entry);
493 rdma_user_mmap_entry_remove(qp->llq_desc_mmap_entry);
494 rdma_user_mmap_entry_remove(qp->sq_db_mmap_entry);
495 }
496
efa_destroy_qp(struct ib_qp * ibqp,struct ib_udata * udata)497 int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
498 {
499 struct efa_dev *dev = to_edev(ibqp->pd->device);
500 struct efa_qp *qp = to_eqp(ibqp);
501 int err;
502
503 ibdev_dbg(&dev->ibdev, "Destroy qp[%u]\n", ibqp->qp_num);
504
505 err = efa_destroy_qp_handle(dev, qp->qp_handle);
506 if (err)
507 return err;
508
509 efa_qp_user_mmap_entries_remove(qp);
510
511 if (qp->rq_cpu_addr) {
512 ibdev_dbg(&dev->ibdev,
513 "qp->cpu_addr[0x%p] freed: size[%lu], dma[%pad]\n",
514 qp->rq_cpu_addr, qp->rq_size,
515 &qp->rq_dma_addr);
516 efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
517 qp->rq_size, DMA_TO_DEVICE);
518 }
519
520 return 0;
521 }
522
523 static struct rdma_user_mmap_entry*
efa_user_mmap_entry_insert(struct ib_ucontext * ucontext,u64 address,size_t length,u8 mmap_flag,u64 * offset)524 efa_user_mmap_entry_insert(struct ib_ucontext *ucontext,
525 u64 address, size_t length,
526 u8 mmap_flag, u64 *offset)
527 {
528 struct efa_user_mmap_entry *entry = kzalloc_obj(*entry);
529 int err;
530
531 if (!entry)
532 return NULL;
533
534 entry->address = address;
535 entry->mmap_flag = mmap_flag;
536
537 err = rdma_user_mmap_entry_insert(ucontext, &entry->rdma_entry,
538 length);
539 if (err) {
540 kfree(entry);
541 return NULL;
542 }
543 *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
544
545 return &entry->rdma_entry;
546 }
547
qp_mmap_entries_setup(struct efa_qp * qp,struct efa_dev * dev,struct efa_ucontext * ucontext,struct efa_com_create_qp_params * params,struct efa_ibv_create_qp_resp * resp)548 static int qp_mmap_entries_setup(struct efa_qp *qp,
549 struct efa_dev *dev,
550 struct efa_ucontext *ucontext,
551 struct efa_com_create_qp_params *params,
552 struct efa_ibv_create_qp_resp *resp)
553 {
554 size_t length;
555 u64 address;
556
557 address = dev->db_bar_addr + resp->sq_db_offset;
558 qp->sq_db_mmap_entry =
559 efa_user_mmap_entry_insert(&ucontext->ibucontext,
560 address,
561 PAGE_SIZE, EFA_MMAP_IO_NC,
562 &resp->sq_db_mmap_key);
563 if (!qp->sq_db_mmap_entry)
564 return -ENOMEM;
565
566 resp->sq_db_offset &= ~PAGE_MASK;
567
568 address = dev->mem_bar_addr + resp->llq_desc_offset;
569 length = PAGE_ALIGN(params->sq_ring_size_in_bytes +
570 offset_in_page(resp->llq_desc_offset));
571
572 qp->llq_desc_mmap_entry =
573 efa_user_mmap_entry_insert(&ucontext->ibucontext,
574 address, length,
575 EFA_MMAP_IO_WC,
576 &resp->llq_desc_mmap_key);
577 if (!qp->llq_desc_mmap_entry)
578 goto err_remove_mmap;
579
580 resp->llq_desc_offset &= ~PAGE_MASK;
581
582 if (qp->rq_size) {
583 address = dev->db_bar_addr + resp->rq_db_offset;
584
585 qp->rq_db_mmap_entry =
586 efa_user_mmap_entry_insert(&ucontext->ibucontext,
587 address, PAGE_SIZE,
588 EFA_MMAP_IO_NC,
589 &resp->rq_db_mmap_key);
590 if (!qp->rq_db_mmap_entry)
591 goto err_remove_mmap;
592
593 resp->rq_db_offset &= ~PAGE_MASK;
594
595 address = virt_to_phys(qp->rq_cpu_addr);
596 qp->rq_mmap_entry =
597 efa_user_mmap_entry_insert(&ucontext->ibucontext,
598 address, qp->rq_size,
599 EFA_MMAP_DMA_PAGE,
600 &resp->rq_mmap_key);
601 if (!qp->rq_mmap_entry)
602 goto err_remove_mmap;
603
604 resp->rq_mmap_size = qp->rq_size;
605 }
606
607 return 0;
608
609 err_remove_mmap:
610 efa_qp_user_mmap_entries_remove(qp);
611
612 return -ENOMEM;
613 }
614
efa_qp_validate_cap(struct efa_dev * dev,struct ib_qp_init_attr * init_attr)615 static int efa_qp_validate_cap(struct efa_dev *dev,
616 struct ib_qp_init_attr *init_attr)
617 {
618 if (init_attr->cap.max_send_wr > dev->dev_attr.max_sq_depth) {
619 ibdev_dbg(&dev->ibdev,
620 "qp: requested send wr[%u] exceeds the max[%u]\n",
621 init_attr->cap.max_send_wr,
622 dev->dev_attr.max_sq_depth);
623 return -EINVAL;
624 }
625 if (init_attr->cap.max_recv_wr > dev->dev_attr.max_rq_depth) {
626 ibdev_dbg(&dev->ibdev,
627 "qp: requested receive wr[%u] exceeds the max[%u]\n",
628 init_attr->cap.max_recv_wr,
629 dev->dev_attr.max_rq_depth);
630 return -EINVAL;
631 }
632 if (init_attr->cap.max_send_sge > dev->dev_attr.max_sq_sge) {
633 ibdev_dbg(&dev->ibdev,
634 "qp: requested sge send[%u] exceeds the max[%u]\n",
635 init_attr->cap.max_send_sge, dev->dev_attr.max_sq_sge);
636 return -EINVAL;
637 }
638 if (init_attr->cap.max_recv_sge > dev->dev_attr.max_rq_sge) {
639 ibdev_dbg(&dev->ibdev,
640 "qp: requested sge recv[%u] exceeds the max[%u]\n",
641 init_attr->cap.max_recv_sge, dev->dev_attr.max_rq_sge);
642 return -EINVAL;
643 }
644 if (init_attr->cap.max_inline_data > dev->dev_attr.inline_buf_size) {
645 ibdev_dbg(&dev->ibdev,
646 "qp: requested inline data[%u] exceeds the max[%u]\n",
647 init_attr->cap.max_inline_data,
648 dev->dev_attr.inline_buf_size);
649 return -EINVAL;
650 }
651
652 return 0;
653 }
654
efa_qp_validate_attr(struct efa_dev * dev,struct ib_qp_init_attr * init_attr)655 static int efa_qp_validate_attr(struct efa_dev *dev,
656 struct ib_qp_init_attr *init_attr)
657 {
658 if (init_attr->qp_type != IB_QPT_DRIVER &&
659 init_attr->qp_type != IB_QPT_UD) {
660 ibdev_dbg(&dev->ibdev,
661 "Unsupported qp type %d\n", init_attr->qp_type);
662 return -EOPNOTSUPP;
663 }
664
665 if (init_attr->srq) {
666 ibdev_dbg(&dev->ibdev, "SRQ is not supported\n");
667 return -EOPNOTSUPP;
668 }
669
670 if (init_attr->create_flags) {
671 ibdev_dbg(&dev->ibdev, "Unsupported create flags\n");
672 return -EOPNOTSUPP;
673 }
674
675 return 0;
676 }
677
efa_create_qp(struct ib_qp * ibqp,struct ib_qp_init_attr * init_attr,struct ib_udata * udata)678 int efa_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
679 struct ib_udata *udata)
680 {
681 struct efa_com_create_qp_params create_qp_params = {};
682 struct efa_com_create_qp_result create_qp_resp;
683 struct efa_dev *dev = to_edev(ibqp->device);
684 struct efa_ibv_create_qp_resp resp = {};
685 struct efa_ibv_create_qp cmd = {};
686 struct efa_qp *qp = to_eqp(ibqp);
687 struct efa_ucontext *ucontext;
688 u16 supported_efa_flags = 0;
689 int err;
690
691 ucontext = rdma_udata_to_drv_context(udata, struct efa_ucontext,
692 ibucontext);
693
694 err = efa_qp_validate_cap(dev, init_attr);
695 if (err)
696 goto err_out;
697
698 err = efa_qp_validate_attr(dev, init_attr);
699 if (err)
700 goto err_out;
701
702 if (offsetofend(typeof(cmd), driver_qp_type) > udata->inlen) {
703 ibdev_dbg(&dev->ibdev,
704 "Incompatible ABI params, no input udata\n");
705 err = -EINVAL;
706 goto err_out;
707 }
708
709 if (udata->inlen > sizeof(cmd) &&
710 !ib_is_udata_cleared(udata, sizeof(cmd),
711 udata->inlen - sizeof(cmd))) {
712 ibdev_dbg(&dev->ibdev,
713 "Incompatible ABI params, unknown fields in udata\n");
714 err = -EINVAL;
715 goto err_out;
716 }
717
718 err = ib_copy_from_udata(&cmd, udata,
719 min(sizeof(cmd), udata->inlen));
720 if (err) {
721 ibdev_dbg(&dev->ibdev,
722 "Cannot copy udata for create_qp\n");
723 goto err_out;
724 }
725
726 if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_98)) {
727 ibdev_dbg(&dev->ibdev,
728 "Incompatible ABI params, unknown fields in udata\n");
729 err = -EINVAL;
730 goto err_out;
731 }
732
733 if (EFA_DEV_CAP(dev, UNSOLICITED_WRITE_RECV))
734 supported_efa_flags |= EFA_CREATE_QP_WITH_UNSOLICITED_WRITE_RECV;
735
736 if (cmd.flags & ~supported_efa_flags) {
737 ibdev_dbg(&dev->ibdev, "Unsupported EFA QP create flags[%#x], supported[%#x]\n",
738 cmd.flags, supported_efa_flags);
739 err = -EOPNOTSUPP;
740 goto err_out;
741 }
742
743 create_qp_params.uarn = ucontext->uarn;
744 create_qp_params.pd = to_epd(ibqp->pd)->pdn;
745
746 if (init_attr->qp_type == IB_QPT_UD) {
747 create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_UD;
748 } else if (cmd.driver_qp_type == EFA_QP_DRIVER_TYPE_SRD) {
749 create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_SRD;
750 } else {
751 ibdev_dbg(&dev->ibdev,
752 "Unsupported qp type %d driver qp type %d\n",
753 init_attr->qp_type, cmd.driver_qp_type);
754 err = -EOPNOTSUPP;
755 goto err_out;
756 }
757
758 ibdev_dbg(&dev->ibdev, "Create QP: qp type %d driver qp type %#x\n",
759 init_attr->qp_type, cmd.driver_qp_type);
760 create_qp_params.send_cq_idx = to_ecq(init_attr->send_cq)->cq_idx;
761 create_qp_params.recv_cq_idx = to_ecq(init_attr->recv_cq)->cq_idx;
762 create_qp_params.sq_depth = init_attr->cap.max_send_wr;
763 create_qp_params.sq_ring_size_in_bytes = cmd.sq_ring_size;
764
765 create_qp_params.rq_depth = init_attr->cap.max_recv_wr;
766 create_qp_params.rq_ring_size_in_bytes = cmd.rq_ring_size;
767 qp->rq_size = PAGE_ALIGN(create_qp_params.rq_ring_size_in_bytes);
768 if (qp->rq_size) {
769 qp->rq_cpu_addr = efa_zalloc_mapped(dev, &qp->rq_dma_addr,
770 qp->rq_size, DMA_TO_DEVICE);
771 if (!qp->rq_cpu_addr) {
772 err = -ENOMEM;
773 goto err_out;
774 }
775
776 ibdev_dbg(&dev->ibdev,
777 "qp->cpu_addr[0x%p] allocated: size[%lu], dma[%pad]\n",
778 qp->rq_cpu_addr, qp->rq_size, &qp->rq_dma_addr);
779 create_qp_params.rq_base_addr = qp->rq_dma_addr;
780 }
781
782 create_qp_params.sl = cmd.sl;
783
784 if (cmd.flags & EFA_CREATE_QP_WITH_UNSOLICITED_WRITE_RECV)
785 create_qp_params.unsolicited_write_recv = true;
786
787 err = efa_com_create_qp(&dev->edev, &create_qp_params,
788 &create_qp_resp);
789 if (err)
790 goto err_free_mapped;
791
792 resp.sq_db_offset = create_qp_resp.sq_db_offset;
793 resp.rq_db_offset = create_qp_resp.rq_db_offset;
794 resp.llq_desc_offset = create_qp_resp.llq_descriptors_offset;
795 resp.send_sub_cq_idx = create_qp_resp.send_sub_cq_idx;
796 resp.recv_sub_cq_idx = create_qp_resp.recv_sub_cq_idx;
797
798 err = qp_mmap_entries_setup(qp, dev, ucontext, &create_qp_params,
799 &resp);
800 if (err)
801 goto err_destroy_qp;
802
803 qp->qp_handle = create_qp_resp.qp_handle;
804 qp->ibqp.qp_num = create_qp_resp.qp_num;
805 qp->max_send_wr = init_attr->cap.max_send_wr;
806 qp->max_recv_wr = init_attr->cap.max_recv_wr;
807 qp->max_send_sge = init_attr->cap.max_send_sge;
808 qp->max_recv_sge = init_attr->cap.max_recv_sge;
809 qp->max_inline_data = init_attr->cap.max_inline_data;
810
811 if (udata->outlen) {
812 err = ib_copy_to_udata(udata, &resp,
813 min(sizeof(resp), udata->outlen));
814 if (err) {
815 ibdev_dbg(&dev->ibdev,
816 "Failed to copy udata for qp[%u]\n",
817 create_qp_resp.qp_num);
818 goto err_remove_mmap_entries;
819 }
820 }
821
822 ibdev_dbg(&dev->ibdev, "Created qp[%d]\n", qp->ibqp.qp_num);
823
824 return 0;
825
826 err_remove_mmap_entries:
827 efa_qp_user_mmap_entries_remove(qp);
828 err_destroy_qp:
829 efa_destroy_qp_handle(dev, create_qp_resp.qp_handle);
830 err_free_mapped:
831 if (qp->rq_size)
832 efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
833 qp->rq_size, DMA_TO_DEVICE);
834 err_out:
835 atomic64_inc(&dev->stats.create_qp_err);
836 return err;
837 }
838
839 static const struct {
840 int valid;
841 enum ib_qp_attr_mask req_param;
842 enum ib_qp_attr_mask opt_param;
843 } srd_qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
844 [IB_QPS_RESET] = {
845 [IB_QPS_RESET] = { .valid = 1 },
846 [IB_QPS_INIT] = {
847 .valid = 1,
848 .req_param = IB_QP_PKEY_INDEX |
849 IB_QP_PORT |
850 IB_QP_QKEY,
851 },
852 },
853 [IB_QPS_INIT] = {
854 [IB_QPS_RESET] = { .valid = 1 },
855 [IB_QPS_ERR] = { .valid = 1 },
856 [IB_QPS_INIT] = {
857 .valid = 1,
858 .opt_param = IB_QP_PKEY_INDEX |
859 IB_QP_PORT |
860 IB_QP_QKEY,
861 },
862 [IB_QPS_RTR] = {
863 .valid = 1,
864 .opt_param = IB_QP_PKEY_INDEX |
865 IB_QP_QKEY,
866 },
867 },
868 [IB_QPS_RTR] = {
869 [IB_QPS_RESET] = { .valid = 1 },
870 [IB_QPS_ERR] = { .valid = 1 },
871 [IB_QPS_RTS] = {
872 .valid = 1,
873 .req_param = IB_QP_SQ_PSN,
874 .opt_param = IB_QP_CUR_STATE |
875 IB_QP_QKEY |
876 IB_QP_RNR_RETRY,
877
878 }
879 },
880 [IB_QPS_RTS] = {
881 [IB_QPS_RESET] = { .valid = 1 },
882 [IB_QPS_ERR] = { .valid = 1 },
883 [IB_QPS_RTS] = {
884 .valid = 1,
885 .opt_param = IB_QP_CUR_STATE |
886 IB_QP_QKEY,
887 },
888 [IB_QPS_SQD] = {
889 .valid = 1,
890 .opt_param = IB_QP_EN_SQD_ASYNC_NOTIFY,
891 },
892 },
893 [IB_QPS_SQD] = {
894 [IB_QPS_RESET] = { .valid = 1 },
895 [IB_QPS_ERR] = { .valid = 1 },
896 [IB_QPS_RTS] = {
897 .valid = 1,
898 .opt_param = IB_QP_CUR_STATE |
899 IB_QP_QKEY,
900 },
901 [IB_QPS_SQD] = {
902 .valid = 1,
903 .opt_param = IB_QP_PKEY_INDEX |
904 IB_QP_QKEY,
905 }
906 },
907 [IB_QPS_SQE] = {
908 [IB_QPS_RESET] = { .valid = 1 },
909 [IB_QPS_ERR] = { .valid = 1 },
910 [IB_QPS_RTS] = {
911 .valid = 1,
912 .opt_param = IB_QP_CUR_STATE |
913 IB_QP_QKEY,
914 }
915 },
916 [IB_QPS_ERR] = {
917 [IB_QPS_RESET] = { .valid = 1 },
918 [IB_QPS_ERR] = { .valid = 1 },
919 }
920 };
921
efa_modify_srd_qp_is_ok(enum ib_qp_state cur_state,enum ib_qp_state next_state,enum ib_qp_attr_mask mask)922 static bool efa_modify_srd_qp_is_ok(enum ib_qp_state cur_state,
923 enum ib_qp_state next_state,
924 enum ib_qp_attr_mask mask)
925 {
926 enum ib_qp_attr_mask req_param, opt_param;
927
928 if (mask & IB_QP_CUR_STATE &&
929 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
930 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
931 return false;
932
933 if (!srd_qp_state_table[cur_state][next_state].valid)
934 return false;
935
936 req_param = srd_qp_state_table[cur_state][next_state].req_param;
937 opt_param = srd_qp_state_table[cur_state][next_state].opt_param;
938
939 if ((mask & req_param) != req_param)
940 return false;
941
942 if (mask & ~(req_param | opt_param | IB_QP_STATE))
943 return false;
944
945 return true;
946 }
947
efa_modify_qp_validate(struct efa_dev * dev,struct efa_qp * qp,struct ib_qp_attr * qp_attr,int qp_attr_mask,enum ib_qp_state cur_state,enum ib_qp_state new_state)948 static int efa_modify_qp_validate(struct efa_dev *dev, struct efa_qp *qp,
949 struct ib_qp_attr *qp_attr, int qp_attr_mask,
950 enum ib_qp_state cur_state,
951 enum ib_qp_state new_state)
952 {
953 int err;
954
955 #define EFA_MODIFY_QP_SUPP_MASK \
956 (IB_QP_STATE | IB_QP_CUR_STATE | IB_QP_EN_SQD_ASYNC_NOTIFY | \
957 IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_QKEY | IB_QP_SQ_PSN | \
958 IB_QP_RNR_RETRY)
959
960 if (qp_attr_mask & ~EFA_MODIFY_QP_SUPP_MASK) {
961 ibdev_dbg(&dev->ibdev,
962 "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
963 qp_attr_mask, EFA_MODIFY_QP_SUPP_MASK);
964 return -EOPNOTSUPP;
965 }
966
967 if (qp->ibqp.qp_type == IB_QPT_DRIVER)
968 err = !efa_modify_srd_qp_is_ok(cur_state, new_state,
969 qp_attr_mask);
970 else
971 err = !ib_modify_qp_is_ok(cur_state, new_state, IB_QPT_UD,
972 qp_attr_mask);
973
974 if (err) {
975 ibdev_dbg(&dev->ibdev, "Invalid modify QP parameters\n");
976 return -EINVAL;
977 }
978
979 if ((qp_attr_mask & IB_QP_PORT) && qp_attr->port_num != 1) {
980 ibdev_dbg(&dev->ibdev, "Can't change port num\n");
981 return -EOPNOTSUPP;
982 }
983
984 if ((qp_attr_mask & IB_QP_PKEY_INDEX) && qp_attr->pkey_index) {
985 ibdev_dbg(&dev->ibdev, "Can't change pkey index\n");
986 return -EOPNOTSUPP;
987 }
988
989 return 0;
990 }
991
efa_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_udata * udata)992 int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
993 int qp_attr_mask, struct ib_udata *udata)
994 {
995 struct efa_dev *dev = to_edev(ibqp->device);
996 struct efa_com_modify_qp_params params = {};
997 struct efa_qp *qp = to_eqp(ibqp);
998 enum ib_qp_state cur_state;
999 enum ib_qp_state new_state;
1000 int err;
1001
1002 if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1003 return -EOPNOTSUPP;
1004
1005 if (udata->inlen &&
1006 !ib_is_udata_cleared(udata, 0, udata->inlen)) {
1007 ibdev_dbg(&dev->ibdev,
1008 "Incompatible ABI params, udata not cleared\n");
1009 return -EINVAL;
1010 }
1011
1012 cur_state = qp_attr_mask & IB_QP_CUR_STATE ? qp_attr->cur_qp_state :
1013 qp->state;
1014 new_state = qp_attr_mask & IB_QP_STATE ? qp_attr->qp_state : cur_state;
1015
1016 err = efa_modify_qp_validate(dev, qp, qp_attr, qp_attr_mask, cur_state,
1017 new_state);
1018 if (err)
1019 return err;
1020
1021 params.qp_handle = qp->qp_handle;
1022
1023 if (qp_attr_mask & IB_QP_STATE) {
1024 EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_QP_STATE,
1025 1);
1026 EFA_SET(¶ms.modify_mask,
1027 EFA_ADMIN_MODIFY_QP_CMD_CUR_QP_STATE, 1);
1028 params.cur_qp_state = cur_state;
1029 params.qp_state = new_state;
1030 }
1031
1032 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1033 EFA_SET(¶ms.modify_mask,
1034 EFA_ADMIN_MODIFY_QP_CMD_SQ_DRAINED_ASYNC_NOTIFY, 1);
1035 params.sq_drained_async_notify = qp_attr->en_sqd_async_notify;
1036 }
1037
1038 if (qp_attr_mask & IB_QP_QKEY) {
1039 EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_QKEY, 1);
1040 params.qkey = qp_attr->qkey;
1041 }
1042
1043 if (qp_attr_mask & IB_QP_SQ_PSN) {
1044 EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_SQ_PSN, 1);
1045 params.sq_psn = qp_attr->sq_psn;
1046 }
1047
1048 if (qp_attr_mask & IB_QP_RNR_RETRY) {
1049 EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_RNR_RETRY,
1050 1);
1051 params.rnr_retry = qp_attr->rnr_retry;
1052 }
1053
1054 err = efa_com_modify_qp(&dev->edev, ¶ms);
1055 if (err)
1056 return err;
1057
1058 qp->state = new_state;
1059
1060 return 0;
1061 }
1062
efa_destroy_cq_idx(struct efa_dev * dev,int cq_idx)1063 static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx)
1064 {
1065 struct efa_com_destroy_cq_params params = { .cq_idx = cq_idx };
1066
1067 return efa_com_destroy_cq(&dev->edev, ¶ms);
1068 }
1069
efa_cq_user_mmap_entries_remove(struct efa_cq * cq)1070 static void efa_cq_user_mmap_entries_remove(struct efa_cq *cq)
1071 {
1072 rdma_user_mmap_entry_remove(cq->db_mmap_entry);
1073 rdma_user_mmap_entry_remove(cq->mmap_entry);
1074 }
1075
efa_destroy_cq(struct ib_cq * ibcq,struct ib_udata * udata)1076 int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
1077 {
1078 struct efa_dev *dev = to_edev(ibcq->device);
1079 struct efa_cq *cq = to_ecq(ibcq);
1080
1081 ibdev_dbg(&dev->ibdev,
1082 "Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n",
1083 cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr);
1084
1085 efa_destroy_cq_idx(dev, cq->cq_idx);
1086 efa_cq_user_mmap_entries_remove(cq);
1087 if (cq->eq) {
1088 xa_erase(&dev->cqs_xa, cq->cq_idx);
1089 synchronize_irq(cq->eq->irq.irqn);
1090 }
1091
1092 if (cq->umem)
1093 ib_umem_release(cq->umem);
1094 else
1095 efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, DMA_FROM_DEVICE);
1096 return 0;
1097 }
1098
efa_vec2eq(struct efa_dev * dev,int vec)1099 static struct efa_eq *efa_vec2eq(struct efa_dev *dev, int vec)
1100 {
1101 return &dev->eqs[vec];
1102 }
1103
cq_mmap_entries_setup(struct efa_dev * dev,struct efa_cq * cq,struct efa_ibv_create_cq_resp * resp,bool db_valid)1104 static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
1105 struct efa_ibv_create_cq_resp *resp,
1106 bool db_valid)
1107 {
1108 resp->q_mmap_size = cq->size;
1109 cq->mmap_entry = efa_user_mmap_entry_insert(&cq->ucontext->ibucontext,
1110 virt_to_phys(cq->cpu_addr),
1111 cq->size, EFA_MMAP_DMA_PAGE,
1112 &resp->q_mmap_key);
1113 if (!cq->mmap_entry)
1114 return -ENOMEM;
1115
1116 if (db_valid) {
1117 cq->db_mmap_entry =
1118 efa_user_mmap_entry_insert(&cq->ucontext->ibucontext,
1119 dev->db_bar_addr + resp->db_off,
1120 PAGE_SIZE, EFA_MMAP_IO_NC,
1121 &resp->db_mmap_key);
1122 if (!cq->db_mmap_entry) {
1123 rdma_user_mmap_entry_remove(cq->mmap_entry);
1124 return -ENOMEM;
1125 }
1126
1127 resp->db_off &= ~PAGE_MASK;
1128 resp->comp_mask |= EFA_CREATE_CQ_RESP_DB_OFF;
1129 }
1130
1131 return 0;
1132 }
1133
efa_create_cq_umem(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct ib_umem * umem,struct uverbs_attr_bundle * attrs)1134 int efa_create_cq_umem(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1135 struct ib_umem *umem, struct uverbs_attr_bundle *attrs)
1136 {
1137 struct ib_udata *udata = &attrs->driver_udata;
1138 struct efa_ucontext *ucontext = rdma_udata_to_drv_context(
1139 udata, struct efa_ucontext, ibucontext);
1140 struct efa_com_create_cq_params params = {};
1141 struct efa_ibv_create_cq_resp resp = {};
1142 struct efa_com_create_cq_result result;
1143 struct ib_device *ibdev = ibcq->device;
1144 struct efa_dev *dev = to_edev(ibdev);
1145 struct efa_ibv_create_cq cmd = {};
1146 struct efa_cq *cq = to_ecq(ibcq);
1147 int entries = attr->cqe;
1148 bool set_src_addr;
1149 int err;
1150
1151 ibdev_dbg(ibdev, "create_cq entries %d\n", entries);
1152
1153 if (attr->flags)
1154 return -EOPNOTSUPP;
1155
1156 if (entries < 1 || entries > dev->dev_attr.max_cq_depth) {
1157 ibdev_dbg(ibdev,
1158 "cq: requested entries[%u] non-positive or greater than max[%u]\n",
1159 entries, dev->dev_attr.max_cq_depth);
1160 err = -EINVAL;
1161 goto err_out;
1162 }
1163
1164 if (offsetofend(typeof(cmd), num_sub_cqs) > udata->inlen) {
1165 ibdev_dbg(ibdev,
1166 "Incompatible ABI params, no input udata\n");
1167 err = -EINVAL;
1168 goto err_out;
1169 }
1170
1171 if (udata->inlen > sizeof(cmd) &&
1172 !ib_is_udata_cleared(udata, sizeof(cmd),
1173 udata->inlen - sizeof(cmd))) {
1174 ibdev_dbg(ibdev,
1175 "Incompatible ABI params, unknown fields in udata\n");
1176 err = -EINVAL;
1177 goto err_out;
1178 }
1179
1180 err = ib_copy_from_udata(&cmd, udata,
1181 min(sizeof(cmd), udata->inlen));
1182 if (err) {
1183 ibdev_dbg(ibdev, "Cannot copy udata for create_cq\n");
1184 goto err_out;
1185 }
1186
1187 if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_58)) {
1188 ibdev_dbg(ibdev,
1189 "Incompatible ABI params, unknown fields in udata\n");
1190 err = -EINVAL;
1191 goto err_out;
1192 }
1193
1194 set_src_addr = !!(cmd.flags & EFA_CREATE_CQ_WITH_SGID);
1195 if ((cmd.cq_entry_size != sizeof(struct efa_io_rx_cdesc_ex)) &&
1196 (set_src_addr ||
1197 cmd.cq_entry_size != sizeof(struct efa_io_rx_cdesc))) {
1198 ibdev_dbg(ibdev,
1199 "Invalid entry size [%u]\n", cmd.cq_entry_size);
1200 err = -EINVAL;
1201 goto err_out;
1202 }
1203
1204 if (cmd.num_sub_cqs != dev->dev_attr.sub_cqs_per_cq) {
1205 ibdev_dbg(ibdev,
1206 "Invalid number of sub cqs[%u] expected[%u]\n",
1207 cmd.num_sub_cqs, dev->dev_attr.sub_cqs_per_cq);
1208 err = -EINVAL;
1209 goto err_out;
1210 }
1211
1212 cq->ucontext = ucontext;
1213 cq->size = PAGE_ALIGN(cmd.cq_entry_size * entries * cmd.num_sub_cqs);
1214
1215 if (umem) {
1216 if (umem->length < cq->size) {
1217 ibdev_dbg(&dev->ibdev, "External memory too small\n");
1218 err = -EINVAL;
1219 goto err_out;
1220 }
1221
1222 if (!ib_umem_is_contiguous(umem)) {
1223 ibdev_dbg(&dev->ibdev, "Non contiguous CQ unsupported\n");
1224 err = -EINVAL;
1225 goto err_out;
1226 }
1227
1228 cq->cpu_addr = NULL;
1229 cq->dma_addr = ib_umem_start_dma_addr(umem);
1230 cq->umem = umem;
1231 } else {
1232 cq->cpu_addr = efa_zalloc_mapped(dev, &cq->dma_addr, cq->size,
1233 DMA_FROM_DEVICE);
1234 if (!cq->cpu_addr) {
1235 err = -ENOMEM;
1236 goto err_out;
1237 }
1238 }
1239
1240 params.uarn = cq->ucontext->uarn;
1241 params.sub_cq_depth = entries;
1242 params.dma_addr = cq->dma_addr;
1243 params.entry_size_in_bytes = cmd.cq_entry_size;
1244 params.num_sub_cqs = cmd.num_sub_cqs;
1245 params.set_src_addr = set_src_addr;
1246 if (cmd.flags & EFA_CREATE_CQ_WITH_COMPLETION_CHANNEL) {
1247 cq->eq = efa_vec2eq(dev, attr->comp_vector);
1248 params.eqn = cq->eq->eeq.eqn;
1249 params.interrupt_mode_enabled = true;
1250 }
1251
1252 err = efa_com_create_cq(&dev->edev, ¶ms, &result);
1253 if (err)
1254 goto err_free_mapped;
1255
1256 resp.db_off = result.db_off;
1257 resp.cq_idx = result.cq_idx;
1258 cq->cq_idx = result.cq_idx;
1259 cq->ibcq.cqe = result.actual_depth;
1260 WARN_ON_ONCE(entries != result.actual_depth);
1261
1262 if (!umem)
1263 err = cq_mmap_entries_setup(dev, cq, &resp, result.db_valid);
1264
1265 if (err) {
1266 ibdev_dbg(ibdev, "Could not setup cq[%u] mmap entries\n",
1267 cq->cq_idx);
1268 goto err_destroy_cq;
1269 }
1270
1271 if (cq->eq) {
1272 err = xa_err(xa_store(&dev->cqs_xa, cq->cq_idx, cq, GFP_KERNEL));
1273 if (err) {
1274 ibdev_dbg(ibdev, "Failed to store cq[%u] in xarray\n",
1275 cq->cq_idx);
1276 goto err_remove_mmap;
1277 }
1278 }
1279
1280 if (udata->outlen) {
1281 err = ib_copy_to_udata(udata, &resp,
1282 min(sizeof(resp), udata->outlen));
1283 if (err) {
1284 ibdev_dbg(ibdev,
1285 "Failed to copy udata for create_cq\n");
1286 goto err_xa_erase;
1287 }
1288 }
1289
1290 ibdev_dbg(ibdev, "Created cq[%d], cq depth[%u]. dma[%pad] virt[0x%p]\n",
1291 cq->cq_idx, result.actual_depth, &cq->dma_addr, cq->cpu_addr);
1292
1293 return 0;
1294
1295 err_xa_erase:
1296 if (cq->eq)
1297 xa_erase(&dev->cqs_xa, cq->cq_idx);
1298 err_remove_mmap:
1299 efa_cq_user_mmap_entries_remove(cq);
1300 err_destroy_cq:
1301 efa_destroy_cq_idx(dev, cq->cq_idx);
1302 err_free_mapped:
1303 if (!umem)
1304 efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
1305 DMA_FROM_DEVICE);
1306 err_out:
1307 atomic64_inc(&dev->stats.create_cq_err);
1308 return err;
1309 }
1310
efa_create_cq(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct uverbs_attr_bundle * attrs)1311 int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1312 struct uverbs_attr_bundle *attrs)
1313 {
1314 return efa_create_cq_umem(ibcq, attr, NULL, attrs);
1315 }
1316
umem_to_page_list(struct efa_dev * dev,struct ib_umem * umem,u64 * page_list,u32 hp_cnt,u8 hp_shift)1317 static int umem_to_page_list(struct efa_dev *dev,
1318 struct ib_umem *umem,
1319 u64 *page_list,
1320 u32 hp_cnt,
1321 u8 hp_shift)
1322 {
1323 struct ib_block_iter biter;
1324 unsigned int hp_idx = 0;
1325
1326 rdma_umem_for_each_dma_block(umem, &biter, BIT(hp_shift))
1327 page_list[hp_idx++] = rdma_block_iter_dma_address(&biter);
1328
1329 return 0;
1330 }
1331
efa_vmalloc_buf_to_sg(u64 * buf,int page_cnt)1332 static struct scatterlist *efa_vmalloc_buf_to_sg(u64 *buf, int page_cnt)
1333 {
1334 struct scatterlist *sglist;
1335 struct page *pg;
1336 int i;
1337
1338 sglist = kmalloc_objs(*sglist, page_cnt);
1339 if (!sglist)
1340 return NULL;
1341 sg_init_table(sglist, page_cnt);
1342 for (i = 0; i < page_cnt; i++) {
1343 pg = vmalloc_to_page(buf);
1344 if (!pg)
1345 goto err;
1346 sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
1347 buf += PAGE_SIZE / sizeof(*buf);
1348 }
1349 return sglist;
1350
1351 err:
1352 kfree(sglist);
1353 return NULL;
1354 }
1355
1356 /*
1357 * create a chunk list of physical pages dma addresses from the supplied
1358 * scatter gather list
1359 */
pbl_chunk_list_create(struct efa_dev * dev,struct pbl_context * pbl)1360 static int pbl_chunk_list_create(struct efa_dev *dev, struct pbl_context *pbl)
1361 {
1362 struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
1363 int page_cnt = pbl->phys.indirect.pbl_buf_size_in_pages;
1364 struct scatterlist *pages_sgl = pbl->phys.indirect.sgl;
1365 unsigned int chunk_list_size, chunk_idx, payload_idx;
1366 int sg_dma_cnt = pbl->phys.indirect.sg_dma_cnt;
1367 struct efa_com_ctrl_buff_info *ctrl_buf;
1368 u64 *cur_chunk_buf, *prev_chunk_buf;
1369 struct ib_block_iter biter;
1370 dma_addr_t dma_addr;
1371 int i;
1372
1373 /* allocate a chunk list that consists of 4KB chunks */
1374 chunk_list_size = DIV_ROUND_UP(page_cnt, EFA_PTRS_PER_CHUNK);
1375
1376 chunk_list->size = chunk_list_size;
1377 chunk_list->chunks = kzalloc_objs(*chunk_list->chunks, chunk_list_size);
1378 if (!chunk_list->chunks)
1379 return -ENOMEM;
1380
1381 ibdev_dbg(&dev->ibdev,
1382 "chunk_list_size[%u] - pages[%u]\n", chunk_list_size,
1383 page_cnt);
1384
1385 /* allocate chunk buffers: */
1386 for (i = 0; i < chunk_list_size; i++) {
1387 chunk_list->chunks[i].buf = kzalloc(EFA_CHUNK_SIZE, GFP_KERNEL);
1388 if (!chunk_list->chunks[i].buf)
1389 goto chunk_list_dealloc;
1390
1391 chunk_list->chunks[i].length = EFA_CHUNK_USED_SIZE;
1392 }
1393 chunk_list->chunks[chunk_list_size - 1].length =
1394 ((page_cnt % EFA_PTRS_PER_CHUNK) * EFA_CHUNK_PAYLOAD_PTR_SIZE) +
1395 EFA_CHUNK_PTR_SIZE;
1396
1397 /* fill the dma addresses of sg list pages to chunks: */
1398 chunk_idx = 0;
1399 payload_idx = 0;
1400 cur_chunk_buf = chunk_list->chunks[0].buf;
1401 rdma_for_each_block(pages_sgl, &biter, sg_dma_cnt,
1402 EFA_CHUNK_PAYLOAD_SIZE) {
1403 cur_chunk_buf[payload_idx++] =
1404 rdma_block_iter_dma_address(&biter);
1405
1406 if (payload_idx == EFA_PTRS_PER_CHUNK) {
1407 chunk_idx++;
1408 cur_chunk_buf = chunk_list->chunks[chunk_idx].buf;
1409 payload_idx = 0;
1410 }
1411 }
1412
1413 /* map chunks to dma and fill chunks next ptrs */
1414 for (i = chunk_list_size - 1; i >= 0; i--) {
1415 dma_addr = dma_map_single(&dev->pdev->dev,
1416 chunk_list->chunks[i].buf,
1417 chunk_list->chunks[i].length,
1418 DMA_TO_DEVICE);
1419 if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
1420 ibdev_err(&dev->ibdev,
1421 "chunk[%u] dma_map_failed\n", i);
1422 goto chunk_list_unmap;
1423 }
1424
1425 chunk_list->chunks[i].dma_addr = dma_addr;
1426 ibdev_dbg(&dev->ibdev,
1427 "chunk[%u] mapped at [%pad]\n", i, &dma_addr);
1428
1429 if (!i)
1430 break;
1431
1432 prev_chunk_buf = chunk_list->chunks[i - 1].buf;
1433
1434 ctrl_buf = (struct efa_com_ctrl_buff_info *)
1435 &prev_chunk_buf[EFA_PTRS_PER_CHUNK];
1436 ctrl_buf->length = chunk_list->chunks[i].length;
1437
1438 efa_com_set_dma_addr(dma_addr,
1439 &ctrl_buf->address.mem_addr_high,
1440 &ctrl_buf->address.mem_addr_low);
1441 }
1442
1443 return 0;
1444
1445 chunk_list_unmap:
1446 for (; i < chunk_list_size; i++) {
1447 dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
1448 chunk_list->chunks[i].length, DMA_TO_DEVICE);
1449 }
1450 chunk_list_dealloc:
1451 for (i = 0; i < chunk_list_size; i++)
1452 kfree(chunk_list->chunks[i].buf);
1453
1454 kfree(chunk_list->chunks);
1455 return -ENOMEM;
1456 }
1457
pbl_chunk_list_destroy(struct efa_dev * dev,struct pbl_context * pbl)1458 static void pbl_chunk_list_destroy(struct efa_dev *dev, struct pbl_context *pbl)
1459 {
1460 struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
1461 int i;
1462
1463 for (i = 0; i < chunk_list->size; i++) {
1464 dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
1465 chunk_list->chunks[i].length, DMA_TO_DEVICE);
1466 kfree(chunk_list->chunks[i].buf);
1467 }
1468
1469 kfree(chunk_list->chunks);
1470 }
1471
1472 /* initialize pbl continuous mode: map pbl buffer to a dma address. */
pbl_continuous_initialize(struct efa_dev * dev,struct pbl_context * pbl)1473 static int pbl_continuous_initialize(struct efa_dev *dev,
1474 struct pbl_context *pbl)
1475 {
1476 dma_addr_t dma_addr;
1477
1478 dma_addr = dma_map_single(&dev->pdev->dev, pbl->pbl_buf,
1479 pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
1480 if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
1481 ibdev_err(&dev->ibdev, "Unable to map pbl to DMA address\n");
1482 return -ENOMEM;
1483 }
1484
1485 pbl->phys.continuous.dma_addr = dma_addr;
1486 ibdev_dbg(&dev->ibdev,
1487 "pbl continuous - dma_addr = %pad, size[%u]\n",
1488 &dma_addr, pbl->pbl_buf_size_in_bytes);
1489
1490 return 0;
1491 }
1492
1493 /*
1494 * initialize pbl indirect mode:
1495 * create a chunk list out of the dma addresses of the physical pages of
1496 * pbl buffer.
1497 */
pbl_indirect_initialize(struct efa_dev * dev,struct pbl_context * pbl)1498 static int pbl_indirect_initialize(struct efa_dev *dev, struct pbl_context *pbl)
1499 {
1500 u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, EFA_CHUNK_PAYLOAD_SIZE);
1501 struct scatterlist *sgl;
1502 int sg_dma_cnt, err;
1503
1504 BUILD_BUG_ON(EFA_CHUNK_PAYLOAD_SIZE > PAGE_SIZE);
1505 sgl = efa_vmalloc_buf_to_sg(pbl->pbl_buf, size_in_pages);
1506 if (!sgl)
1507 return -ENOMEM;
1508
1509 sg_dma_cnt = dma_map_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
1510 if (!sg_dma_cnt) {
1511 err = -EINVAL;
1512 goto err_map;
1513 }
1514
1515 pbl->phys.indirect.pbl_buf_size_in_pages = size_in_pages;
1516 pbl->phys.indirect.sgl = sgl;
1517 pbl->phys.indirect.sg_dma_cnt = sg_dma_cnt;
1518 err = pbl_chunk_list_create(dev, pbl);
1519 if (err) {
1520 ibdev_dbg(&dev->ibdev,
1521 "chunk_list creation failed[%d]\n", err);
1522 goto err_chunk;
1523 }
1524
1525 ibdev_dbg(&dev->ibdev,
1526 "pbl indirect - size[%u], chunks[%u]\n",
1527 pbl->pbl_buf_size_in_bytes,
1528 pbl->phys.indirect.chunk_list.size);
1529
1530 return 0;
1531
1532 err_chunk:
1533 dma_unmap_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
1534 err_map:
1535 kfree(sgl);
1536 return err;
1537 }
1538
pbl_indirect_terminate(struct efa_dev * dev,struct pbl_context * pbl)1539 static void pbl_indirect_terminate(struct efa_dev *dev, struct pbl_context *pbl)
1540 {
1541 pbl_chunk_list_destroy(dev, pbl);
1542 dma_unmap_sg(&dev->pdev->dev, pbl->phys.indirect.sgl,
1543 pbl->phys.indirect.pbl_buf_size_in_pages, DMA_TO_DEVICE);
1544 kfree(pbl->phys.indirect.sgl);
1545 }
1546
1547 /* create a page buffer list from a mapped user memory region */
pbl_create(struct efa_dev * dev,struct pbl_context * pbl,struct ib_umem * umem,int hp_cnt,u8 hp_shift)1548 static int pbl_create(struct efa_dev *dev,
1549 struct pbl_context *pbl,
1550 struct ib_umem *umem,
1551 int hp_cnt,
1552 u8 hp_shift)
1553 {
1554 int err;
1555
1556 pbl->pbl_buf_size_in_bytes = hp_cnt * EFA_CHUNK_PAYLOAD_PTR_SIZE;
1557 pbl->pbl_buf = kvzalloc(pbl->pbl_buf_size_in_bytes, GFP_KERNEL);
1558 if (!pbl->pbl_buf)
1559 return -ENOMEM;
1560
1561 if (is_vmalloc_addr(pbl->pbl_buf)) {
1562 pbl->physically_continuous = 0;
1563 err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
1564 hp_shift);
1565 if (err)
1566 goto err_free;
1567
1568 err = pbl_indirect_initialize(dev, pbl);
1569 if (err)
1570 goto err_free;
1571 } else {
1572 pbl->physically_continuous = 1;
1573 err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
1574 hp_shift);
1575 if (err)
1576 goto err_free;
1577
1578 err = pbl_continuous_initialize(dev, pbl);
1579 if (err)
1580 goto err_free;
1581 }
1582
1583 ibdev_dbg(&dev->ibdev,
1584 "user_pbl_created: user_pages[%u], continuous[%u]\n",
1585 hp_cnt, pbl->physically_continuous);
1586
1587 return 0;
1588
1589 err_free:
1590 kvfree(pbl->pbl_buf);
1591 return err;
1592 }
1593
pbl_destroy(struct efa_dev * dev,struct pbl_context * pbl)1594 static void pbl_destroy(struct efa_dev *dev, struct pbl_context *pbl)
1595 {
1596 if (pbl->physically_continuous)
1597 dma_unmap_single(&dev->pdev->dev, pbl->phys.continuous.dma_addr,
1598 pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
1599 else
1600 pbl_indirect_terminate(dev, pbl);
1601
1602 kvfree(pbl->pbl_buf);
1603 }
1604
efa_create_inline_pbl(struct efa_dev * dev,struct efa_mr * mr,struct efa_com_reg_mr_params * params)1605 static int efa_create_inline_pbl(struct efa_dev *dev, struct efa_mr *mr,
1606 struct efa_com_reg_mr_params *params)
1607 {
1608 int err;
1609
1610 params->inline_pbl = 1;
1611 err = umem_to_page_list(dev, mr->umem, params->pbl.inline_pbl_array,
1612 params->page_num, params->page_shift);
1613 if (err)
1614 return err;
1615
1616 ibdev_dbg(&dev->ibdev,
1617 "inline_pbl_array - pages[%u]\n", params->page_num);
1618
1619 return 0;
1620 }
1621
efa_create_pbl(struct efa_dev * dev,struct pbl_context * pbl,struct efa_mr * mr,struct efa_com_reg_mr_params * params)1622 static int efa_create_pbl(struct efa_dev *dev,
1623 struct pbl_context *pbl,
1624 struct efa_mr *mr,
1625 struct efa_com_reg_mr_params *params)
1626 {
1627 int err;
1628
1629 err = pbl_create(dev, pbl, mr->umem, params->page_num,
1630 params->page_shift);
1631 if (err) {
1632 ibdev_dbg(&dev->ibdev, "Failed to create pbl[%d]\n", err);
1633 return err;
1634 }
1635
1636 params->inline_pbl = 0;
1637 params->indirect = !pbl->physically_continuous;
1638 if (pbl->physically_continuous) {
1639 params->pbl.pbl.length = pbl->pbl_buf_size_in_bytes;
1640
1641 efa_com_set_dma_addr(pbl->phys.continuous.dma_addr,
1642 ¶ms->pbl.pbl.address.mem_addr_high,
1643 ¶ms->pbl.pbl.address.mem_addr_low);
1644 } else {
1645 params->pbl.pbl.length =
1646 pbl->phys.indirect.chunk_list.chunks[0].length;
1647
1648 efa_com_set_dma_addr(pbl->phys.indirect.chunk_list.chunks[0].dma_addr,
1649 ¶ms->pbl.pbl.address.mem_addr_high,
1650 ¶ms->pbl.pbl.address.mem_addr_low);
1651 }
1652
1653 return 0;
1654 }
1655
efa_alloc_mr(struct ib_pd * ibpd,int access_flags,struct ib_udata * udata)1656 static struct efa_mr *efa_alloc_mr(struct ib_pd *ibpd, int access_flags,
1657 struct ib_udata *udata)
1658 {
1659 struct efa_dev *dev = to_edev(ibpd->device);
1660 int supp_access_flags;
1661 struct efa_mr *mr;
1662
1663 if (udata && udata->inlen &&
1664 !ib_is_udata_cleared(udata, 0, sizeof(udata->inlen))) {
1665 ibdev_dbg(&dev->ibdev,
1666 "Incompatible ABI params, udata not cleared\n");
1667 return ERR_PTR(-EINVAL);
1668 }
1669
1670 supp_access_flags =
1671 IB_ACCESS_LOCAL_WRITE |
1672 (EFA_DEV_CAP(dev, RDMA_READ) ? IB_ACCESS_REMOTE_READ : 0) |
1673 (EFA_DEV_CAP(dev, RDMA_WRITE) ? IB_ACCESS_REMOTE_WRITE : 0);
1674
1675 access_flags &= ~IB_ACCESS_OPTIONAL;
1676 if (access_flags & ~supp_access_flags) {
1677 ibdev_dbg(&dev->ibdev,
1678 "Unsupported access flags[%#x], supported[%#x]\n",
1679 access_flags, supp_access_flags);
1680 return ERR_PTR(-EOPNOTSUPP);
1681 }
1682
1683 mr = kzalloc_obj(*mr);
1684 if (!mr)
1685 return ERR_PTR(-ENOMEM);
1686
1687 return mr;
1688 }
1689
efa_register_mr(struct ib_pd * ibpd,struct efa_mr * mr,u64 start,u64 length,u64 virt_addr,int access_flags)1690 static int efa_register_mr(struct ib_pd *ibpd, struct efa_mr *mr, u64 start,
1691 u64 length, u64 virt_addr, int access_flags)
1692 {
1693 struct efa_dev *dev = to_edev(ibpd->device);
1694 struct efa_com_reg_mr_params params = {};
1695 struct efa_com_reg_mr_result result = {};
1696 struct pbl_context pbl;
1697 unsigned int pg_sz;
1698 int inline_size;
1699 int err;
1700
1701 params.pd = to_epd(ibpd)->pdn;
1702 params.iova = virt_addr;
1703 params.mr_length_in_bytes = length;
1704 params.permissions = access_flags;
1705
1706 pg_sz = ib_umem_find_best_pgsz(mr->umem,
1707 dev->dev_attr.page_size_cap,
1708 virt_addr);
1709 if (!pg_sz) {
1710 ibdev_dbg(&dev->ibdev, "Failed to find a suitable page size in page_size_cap %#llx\n",
1711 dev->dev_attr.page_size_cap);
1712 return -EOPNOTSUPP;
1713 }
1714
1715 params.page_shift = order_base_2(pg_sz);
1716 params.page_num = ib_umem_num_dma_blocks(mr->umem, pg_sz);
1717
1718 ibdev_dbg(&dev->ibdev,
1719 "start %#llx length %#llx params.page_shift %u params.page_num %u\n",
1720 start, length, params.page_shift, params.page_num);
1721
1722 inline_size = ARRAY_SIZE(params.pbl.inline_pbl_array);
1723 if (params.page_num <= inline_size) {
1724 err = efa_create_inline_pbl(dev, mr, ¶ms);
1725 if (err)
1726 return err;
1727
1728 err = efa_com_register_mr(&dev->edev, ¶ms, &result);
1729 if (err)
1730 return err;
1731 } else {
1732 err = efa_create_pbl(dev, &pbl, mr, ¶ms);
1733 if (err)
1734 return err;
1735
1736 err = efa_com_register_mr(&dev->edev, ¶ms, &result);
1737 pbl_destroy(dev, &pbl);
1738
1739 if (err)
1740 return err;
1741 }
1742
1743 mr->ibmr.lkey = result.l_key;
1744 mr->ibmr.rkey = result.r_key;
1745 mr->ibmr.length = length;
1746 mr->ic_info.recv_ic_id = result.ic_info.recv_ic_id;
1747 mr->ic_info.rdma_read_ic_id = result.ic_info.rdma_read_ic_id;
1748 mr->ic_info.rdma_recv_ic_id = result.ic_info.rdma_recv_ic_id;
1749 mr->ic_info.recv_ic_id_valid = result.ic_info.recv_ic_id_valid;
1750 mr->ic_info.rdma_read_ic_id_valid = result.ic_info.rdma_read_ic_id_valid;
1751 mr->ic_info.rdma_recv_ic_id_valid = result.ic_info.rdma_recv_ic_id_valid;
1752 ibdev_dbg(&dev->ibdev, "Registered mr[%d]\n", mr->ibmr.lkey);
1753
1754 return 0;
1755 }
1756
efa_reg_user_mr_dmabuf(struct ib_pd * ibpd,u64 start,u64 length,u64 virt_addr,int fd,int access_flags,struct ib_dmah * dmah,struct uverbs_attr_bundle * attrs)1757 struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start,
1758 u64 length, u64 virt_addr,
1759 int fd, int access_flags,
1760 struct ib_dmah *dmah,
1761 struct uverbs_attr_bundle *attrs)
1762 {
1763 struct efa_dev *dev = to_edev(ibpd->device);
1764 struct ib_umem_dmabuf *umem_dmabuf;
1765 struct efa_mr *mr;
1766 int err;
1767
1768 if (dmah) {
1769 err = -EOPNOTSUPP;
1770 goto err_out;
1771 }
1772
1773 mr = efa_alloc_mr(ibpd, access_flags, &attrs->driver_udata);
1774 if (IS_ERR(mr)) {
1775 err = PTR_ERR(mr);
1776 goto err_out;
1777 }
1778
1779 umem_dmabuf = ib_umem_dmabuf_get_pinned(ibpd->device, start, length, fd,
1780 access_flags);
1781 if (IS_ERR(umem_dmabuf)) {
1782 err = PTR_ERR(umem_dmabuf);
1783 ibdev_dbg(&dev->ibdev, "Failed to get dmabuf umem[%pe]\n",
1784 umem_dmabuf);
1785 goto err_free;
1786 }
1787
1788 mr->umem = &umem_dmabuf->umem;
1789 err = efa_register_mr(ibpd, mr, start, length, virt_addr, access_flags);
1790 if (err)
1791 goto err_release;
1792
1793 return &mr->ibmr;
1794
1795 err_release:
1796 ib_umem_release(mr->umem);
1797 err_free:
1798 kfree(mr);
1799 err_out:
1800 atomic64_inc(&dev->stats.reg_mr_err);
1801 return ERR_PTR(err);
1802 }
1803
efa_reg_mr(struct ib_pd * ibpd,u64 start,u64 length,u64 virt_addr,int access_flags,struct ib_dmah * dmah,struct ib_udata * udata)1804 struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
1805 u64 virt_addr, int access_flags,
1806 struct ib_dmah *dmah,
1807 struct ib_udata *udata)
1808 {
1809 struct efa_dev *dev = to_edev(ibpd->device);
1810 struct efa_mr *mr;
1811 int err;
1812
1813 if (dmah) {
1814 err = -EOPNOTSUPP;
1815 goto err_out;
1816 }
1817
1818 mr = efa_alloc_mr(ibpd, access_flags, udata);
1819 if (IS_ERR(mr)) {
1820 err = PTR_ERR(mr);
1821 goto err_out;
1822 }
1823
1824 mr->umem = ib_umem_get(ibpd->device, start, length, access_flags);
1825 if (IS_ERR(mr->umem)) {
1826 err = PTR_ERR(mr->umem);
1827 ibdev_dbg(&dev->ibdev,
1828 "Failed to pin and map user space memory[%pe]\n",
1829 mr->umem);
1830 goto err_free;
1831 }
1832
1833 err = efa_register_mr(ibpd, mr, start, length, virt_addr, access_flags);
1834 if (err)
1835 goto err_release;
1836
1837 return &mr->ibmr;
1838
1839 err_release:
1840 ib_umem_release(mr->umem);
1841 err_free:
1842 kfree(mr);
1843 err_out:
1844 atomic64_inc(&dev->stats.reg_mr_err);
1845 return ERR_PTR(err);
1846 }
1847
UVERBS_HANDLER(EFA_IB_METHOD_MR_QUERY)1848 static int UVERBS_HANDLER(EFA_IB_METHOD_MR_QUERY)(struct uverbs_attr_bundle *attrs)
1849 {
1850 struct ib_mr *ibmr = uverbs_attr_get_obj(attrs, EFA_IB_ATTR_QUERY_MR_HANDLE);
1851 struct efa_mr *mr = to_emr(ibmr);
1852 u16 ic_id_validity = 0;
1853 int ret;
1854
1855 ret = uverbs_copy_to(attrs, EFA_IB_ATTR_QUERY_MR_RESP_RECV_IC_ID,
1856 &mr->ic_info.recv_ic_id, sizeof(mr->ic_info.recv_ic_id));
1857 if (ret)
1858 return ret;
1859
1860 ret = uverbs_copy_to(attrs, EFA_IB_ATTR_QUERY_MR_RESP_RDMA_READ_IC_ID,
1861 &mr->ic_info.rdma_read_ic_id, sizeof(mr->ic_info.rdma_read_ic_id));
1862 if (ret)
1863 return ret;
1864
1865 ret = uverbs_copy_to(attrs, EFA_IB_ATTR_QUERY_MR_RESP_RDMA_RECV_IC_ID,
1866 &mr->ic_info.rdma_recv_ic_id, sizeof(mr->ic_info.rdma_recv_ic_id));
1867 if (ret)
1868 return ret;
1869
1870 if (mr->ic_info.recv_ic_id_valid)
1871 ic_id_validity |= EFA_QUERY_MR_VALIDITY_RECV_IC_ID;
1872 if (mr->ic_info.rdma_read_ic_id_valid)
1873 ic_id_validity |= EFA_QUERY_MR_VALIDITY_RDMA_READ_IC_ID;
1874 if (mr->ic_info.rdma_recv_ic_id_valid)
1875 ic_id_validity |= EFA_QUERY_MR_VALIDITY_RDMA_RECV_IC_ID;
1876
1877 return uverbs_copy_to(attrs, EFA_IB_ATTR_QUERY_MR_RESP_IC_ID_VALIDITY,
1878 &ic_id_validity, sizeof(ic_id_validity));
1879 }
1880
efa_dereg_mr(struct ib_mr * ibmr,struct ib_udata * udata)1881 int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1882 {
1883 struct efa_dev *dev = to_edev(ibmr->device);
1884 struct efa_com_dereg_mr_params params;
1885 struct efa_mr *mr = to_emr(ibmr);
1886 int err;
1887
1888 ibdev_dbg(&dev->ibdev, "Deregister mr[%d]\n", ibmr->lkey);
1889
1890 params.l_key = mr->ibmr.lkey;
1891 err = efa_com_dereg_mr(&dev->edev, ¶ms);
1892 if (err)
1893 return err;
1894
1895 ib_umem_release(mr->umem);
1896 kfree(mr);
1897
1898 return 0;
1899 }
1900
efa_get_port_immutable(struct ib_device * ibdev,u32 port_num,struct ib_port_immutable * immutable)1901 int efa_get_port_immutable(struct ib_device *ibdev, u32 port_num,
1902 struct ib_port_immutable *immutable)
1903 {
1904 struct ib_port_attr attr;
1905 int err;
1906
1907 err = ib_query_port(ibdev, port_num, &attr);
1908 if (err) {
1909 ibdev_dbg(ibdev, "Couldn't query port err[%d]\n", err);
1910 return err;
1911 }
1912
1913 immutable->pkey_tbl_len = attr.pkey_tbl_len;
1914 immutable->gid_tbl_len = attr.gid_tbl_len;
1915
1916 return 0;
1917 }
1918
efa_dealloc_uar(struct efa_dev * dev,u16 uarn)1919 static int efa_dealloc_uar(struct efa_dev *dev, u16 uarn)
1920 {
1921 struct efa_com_dealloc_uar_params params = {
1922 .uarn = uarn,
1923 };
1924
1925 return efa_com_dealloc_uar(&dev->edev, ¶ms);
1926 }
1927
1928 #define EFA_CHECK_USER_COMP(_dev, _comp_mask, _attr, _mask, _attr_str) \
1929 (_attr_str = (!(_dev)->dev_attr._attr || ((_comp_mask) & (_mask))) ? \
1930 NULL : #_attr)
1931
efa_user_comp_handshake(const struct ib_ucontext * ibucontext,const struct efa_ibv_alloc_ucontext_cmd * cmd)1932 static int efa_user_comp_handshake(const struct ib_ucontext *ibucontext,
1933 const struct efa_ibv_alloc_ucontext_cmd *cmd)
1934 {
1935 struct efa_dev *dev = to_edev(ibucontext->device);
1936 char *attr_str;
1937
1938 if (EFA_CHECK_USER_COMP(dev, cmd->comp_mask, max_tx_batch,
1939 EFA_ALLOC_UCONTEXT_CMD_COMP_TX_BATCH, attr_str))
1940 goto err;
1941
1942 if (EFA_CHECK_USER_COMP(dev, cmd->comp_mask, min_sq_depth,
1943 EFA_ALLOC_UCONTEXT_CMD_COMP_MIN_SQ_WR,
1944 attr_str))
1945 goto err;
1946
1947 return 0;
1948
1949 err:
1950 ibdev_dbg(&dev->ibdev, "Userspace handshake failed for %s attribute\n",
1951 attr_str);
1952 return -EOPNOTSUPP;
1953 }
1954
efa_alloc_ucontext(struct ib_ucontext * ibucontext,struct ib_udata * udata)1955 int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata)
1956 {
1957 struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1958 struct efa_dev *dev = to_edev(ibucontext->device);
1959 struct efa_ibv_alloc_ucontext_resp resp = {};
1960 struct efa_ibv_alloc_ucontext_cmd cmd = {};
1961 struct efa_com_alloc_uar_result result;
1962 int err;
1963
1964 /*
1965 * it's fine if the driver does not know all request fields,
1966 * we will ack input fields in our response.
1967 */
1968
1969 err = ib_copy_from_udata(&cmd, udata,
1970 min(sizeof(cmd), udata->inlen));
1971 if (err) {
1972 ibdev_dbg(&dev->ibdev,
1973 "Cannot copy udata for alloc_ucontext\n");
1974 goto err_out;
1975 }
1976
1977 err = efa_user_comp_handshake(ibucontext, &cmd);
1978 if (err)
1979 goto err_out;
1980
1981 err = efa_com_alloc_uar(&dev->edev, &result);
1982 if (err)
1983 goto err_out;
1984
1985 ucontext->uarn = result.uarn;
1986
1987 resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_QUERY_DEVICE;
1988 resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_CREATE_AH;
1989 resp.sub_cqs_per_cq = dev->dev_attr.sub_cqs_per_cq;
1990 resp.inline_buf_size = dev->dev_attr.inline_buf_size;
1991 resp.max_llq_size = dev->dev_attr.max_llq_size;
1992 resp.max_tx_batch = dev->dev_attr.max_tx_batch;
1993 resp.min_sq_wr = dev->dev_attr.min_sq_depth;
1994
1995 err = ib_copy_to_udata(udata, &resp,
1996 min(sizeof(resp), udata->outlen));
1997 if (err)
1998 goto err_dealloc_uar;
1999
2000 return 0;
2001
2002 err_dealloc_uar:
2003 efa_dealloc_uar(dev, result.uarn);
2004 err_out:
2005 atomic64_inc(&dev->stats.alloc_ucontext_err);
2006 return err;
2007 }
2008
efa_dealloc_ucontext(struct ib_ucontext * ibucontext)2009 void efa_dealloc_ucontext(struct ib_ucontext *ibucontext)
2010 {
2011 struct efa_ucontext *ucontext = to_eucontext(ibucontext);
2012 struct efa_dev *dev = to_edev(ibucontext->device);
2013
2014 efa_dealloc_uar(dev, ucontext->uarn);
2015 }
2016
efa_mmap_free(struct rdma_user_mmap_entry * rdma_entry)2017 void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
2018 {
2019 struct efa_user_mmap_entry *entry = to_emmap(rdma_entry);
2020
2021 kfree(entry);
2022 }
2023
__efa_mmap(struct efa_dev * dev,struct efa_ucontext * ucontext,struct vm_area_struct * vma)2024 static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
2025 struct vm_area_struct *vma)
2026 {
2027 struct rdma_user_mmap_entry *rdma_entry;
2028 struct efa_user_mmap_entry *entry;
2029 unsigned long va;
2030 int err = 0;
2031 u64 pfn;
2032
2033 rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
2034 if (!rdma_entry) {
2035 ibdev_dbg(&dev->ibdev,
2036 "pgoff[%#lx] does not have valid entry\n",
2037 vma->vm_pgoff);
2038 atomic64_inc(&dev->stats.mmap_err);
2039 return -EINVAL;
2040 }
2041 entry = to_emmap(rdma_entry);
2042
2043 ibdev_dbg(&dev->ibdev,
2044 "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
2045 entry->address, rdma_entry->npages * PAGE_SIZE,
2046 entry->mmap_flag);
2047
2048 pfn = entry->address >> PAGE_SHIFT;
2049 switch (entry->mmap_flag) {
2050 case EFA_MMAP_IO_NC:
2051 err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
2052 entry->rdma_entry.npages * PAGE_SIZE,
2053 pgprot_noncached(vma->vm_page_prot),
2054 rdma_entry);
2055 break;
2056 case EFA_MMAP_IO_WC:
2057 err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
2058 entry->rdma_entry.npages * PAGE_SIZE,
2059 pgprot_writecombine(vma->vm_page_prot),
2060 rdma_entry);
2061 break;
2062 case EFA_MMAP_DMA_PAGE:
2063 for (va = vma->vm_start; va < vma->vm_end;
2064 va += PAGE_SIZE, pfn++) {
2065 err = vm_insert_page(vma, va, pfn_to_page(pfn));
2066 if (err)
2067 break;
2068 }
2069 break;
2070 default:
2071 err = -EINVAL;
2072 }
2073
2074 if (err) {
2075 ibdev_dbg(
2076 &dev->ibdev,
2077 "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
2078 entry->address, rdma_entry->npages * PAGE_SIZE,
2079 entry->mmap_flag, err);
2080 atomic64_inc(&dev->stats.mmap_err);
2081 }
2082
2083 rdma_user_mmap_entry_put(rdma_entry);
2084 return err;
2085 }
2086
efa_mmap(struct ib_ucontext * ibucontext,struct vm_area_struct * vma)2087 int efa_mmap(struct ib_ucontext *ibucontext,
2088 struct vm_area_struct *vma)
2089 {
2090 struct efa_ucontext *ucontext = to_eucontext(ibucontext);
2091 struct efa_dev *dev = to_edev(ibucontext->device);
2092 size_t length = vma->vm_end - vma->vm_start;
2093
2094 ibdev_dbg(&dev->ibdev,
2095 "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
2096 vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
2097
2098 return __efa_mmap(dev, ucontext, vma);
2099 }
2100
efa_ah_destroy(struct efa_dev * dev,struct efa_ah * ah)2101 static int efa_ah_destroy(struct efa_dev *dev, struct efa_ah *ah)
2102 {
2103 struct efa_com_destroy_ah_params params = {
2104 .ah = ah->ah,
2105 .pdn = to_epd(ah->ibah.pd)->pdn,
2106 };
2107
2108 return efa_com_destroy_ah(&dev->edev, ¶ms);
2109 }
2110
efa_create_ah(struct ib_ah * ibah,struct rdma_ah_init_attr * init_attr,struct ib_udata * udata)2111 int efa_create_ah(struct ib_ah *ibah,
2112 struct rdma_ah_init_attr *init_attr,
2113 struct ib_udata *udata)
2114 {
2115 struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
2116 struct efa_dev *dev = to_edev(ibah->device);
2117 struct efa_com_create_ah_params params = {};
2118 struct efa_ibv_create_ah_resp resp = {};
2119 struct efa_com_create_ah_result result;
2120 struct efa_ah *ah = to_eah(ibah);
2121 int err;
2122
2123 if (!(init_attr->flags & RDMA_CREATE_AH_SLEEPABLE)) {
2124 ibdev_dbg(&dev->ibdev,
2125 "Create address handle is not supported in atomic context\n");
2126 err = -EOPNOTSUPP;
2127 goto err_out;
2128 }
2129
2130 if (udata->inlen &&
2131 !ib_is_udata_cleared(udata, 0, udata->inlen)) {
2132 ibdev_dbg(&dev->ibdev, "Incompatible ABI params\n");
2133 err = -EINVAL;
2134 goto err_out;
2135 }
2136
2137 memcpy(params.dest_addr, ah_attr->grh.dgid.raw,
2138 sizeof(params.dest_addr));
2139 params.pdn = to_epd(ibah->pd)->pdn;
2140 err = efa_com_create_ah(&dev->edev, ¶ms, &result);
2141 if (err)
2142 goto err_out;
2143
2144 memcpy(ah->id, ah_attr->grh.dgid.raw, sizeof(ah->id));
2145 ah->ah = result.ah;
2146
2147 resp.efa_address_handle = result.ah;
2148
2149 if (udata->outlen) {
2150 err = ib_copy_to_udata(udata, &resp,
2151 min(sizeof(resp), udata->outlen));
2152 if (err) {
2153 ibdev_dbg(&dev->ibdev,
2154 "Failed to copy udata for create_ah response\n");
2155 goto err_destroy_ah;
2156 }
2157 }
2158 ibdev_dbg(&dev->ibdev, "Created ah[%d]\n", ah->ah);
2159
2160 return 0;
2161
2162 err_destroy_ah:
2163 efa_ah_destroy(dev, ah);
2164 err_out:
2165 atomic64_inc(&dev->stats.create_ah_err);
2166 return err;
2167 }
2168
efa_destroy_ah(struct ib_ah * ibah,u32 flags)2169 int efa_destroy_ah(struct ib_ah *ibah, u32 flags)
2170 {
2171 struct efa_dev *dev = to_edev(ibah->pd->device);
2172 struct efa_ah *ah = to_eah(ibah);
2173
2174 ibdev_dbg(&dev->ibdev, "Destroy ah[%d]\n", ah->ah);
2175
2176 if (!(flags & RDMA_DESTROY_AH_SLEEPABLE)) {
2177 ibdev_dbg(&dev->ibdev,
2178 "Destroy address handle is not supported in atomic context\n");
2179 return -EOPNOTSUPP;
2180 }
2181
2182 efa_ah_destroy(dev, ah);
2183 return 0;
2184 }
2185
efa_alloc_hw_port_stats(struct ib_device * ibdev,u32 port_num)2186 struct rdma_hw_stats *efa_alloc_hw_port_stats(struct ib_device *ibdev,
2187 u32 port_num)
2188 {
2189 return rdma_alloc_hw_stats_struct(efa_port_stats_descs,
2190 ARRAY_SIZE(efa_port_stats_descs),
2191 RDMA_HW_STATS_DEFAULT_LIFESPAN);
2192 }
2193
efa_alloc_hw_device_stats(struct ib_device * ibdev)2194 struct rdma_hw_stats *efa_alloc_hw_device_stats(struct ib_device *ibdev)
2195 {
2196 return rdma_alloc_hw_stats_struct(efa_device_stats_descs,
2197 ARRAY_SIZE(efa_device_stats_descs),
2198 RDMA_HW_STATS_DEFAULT_LIFESPAN);
2199 }
2200
efa_fill_device_stats(struct efa_dev * dev,struct rdma_hw_stats * stats)2201 static int efa_fill_device_stats(struct efa_dev *dev,
2202 struct rdma_hw_stats *stats)
2203 {
2204 struct efa_com_stats_admin *as = &dev->edev.aq.stats;
2205 struct efa_stats *s = &dev->stats;
2206
2207 stats->value[EFA_SUBMITTED_CMDS] = atomic64_read(&as->submitted_cmd);
2208 stats->value[EFA_COMPLETED_CMDS] = atomic64_read(&as->completed_cmd);
2209 stats->value[EFA_CMDS_ERR] = atomic64_read(&as->cmd_err);
2210 stats->value[EFA_NO_COMPLETION_CMDS] = atomic64_read(&as->no_completion);
2211
2212 stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd);
2213 stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->alloc_pd_err);
2214 stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->create_qp_err);
2215 stats->value[EFA_CREATE_CQ_ERR] = atomic64_read(&s->create_cq_err);
2216 stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->reg_mr_err);
2217 stats->value[EFA_ALLOC_UCONTEXT_ERR] =
2218 atomic64_read(&s->alloc_ucontext_err);
2219 stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->create_ah_err);
2220 stats->value[EFA_MMAP_ERR] = atomic64_read(&s->mmap_err);
2221
2222 return ARRAY_SIZE(efa_device_stats_descs);
2223 }
2224
efa_fill_port_stats(struct efa_dev * dev,struct rdma_hw_stats * stats,u32 port_num)2225 static int efa_fill_port_stats(struct efa_dev *dev, struct rdma_hw_stats *stats,
2226 u32 port_num)
2227 {
2228 struct efa_com_get_stats_params params = {};
2229 union efa_com_get_stats_result result;
2230 struct efa_com_rdma_write_stats *rws;
2231 struct efa_com_rdma_read_stats *rrs;
2232 struct efa_com_messages_stats *ms;
2233 struct efa_com_network_stats *ns;
2234 struct efa_com_basic_stats *bs;
2235 int err;
2236
2237 params.scope = EFA_ADMIN_GET_STATS_SCOPE_ALL;
2238 params.type = EFA_ADMIN_GET_STATS_TYPE_BASIC;
2239
2240 err = efa_com_get_stats(&dev->edev, ¶ms, &result);
2241 if (err)
2242 return err;
2243
2244 bs = &result.basic_stats;
2245 stats->value[EFA_TX_BYTES] = bs->tx_bytes;
2246 stats->value[EFA_TX_PKTS] = bs->tx_pkts;
2247 stats->value[EFA_RX_BYTES] = bs->rx_bytes;
2248 stats->value[EFA_RX_PKTS] = bs->rx_pkts;
2249 stats->value[EFA_RX_DROPS] = bs->rx_drops;
2250
2251 params.type = EFA_ADMIN_GET_STATS_TYPE_MESSAGES;
2252 err = efa_com_get_stats(&dev->edev, ¶ms, &result);
2253 if (err)
2254 return err;
2255
2256 ms = &result.messages_stats;
2257 stats->value[EFA_SEND_BYTES] = ms->send_bytes;
2258 stats->value[EFA_SEND_WRS] = ms->send_wrs;
2259 stats->value[EFA_RECV_BYTES] = ms->recv_bytes;
2260 stats->value[EFA_RECV_WRS] = ms->recv_wrs;
2261
2262 params.type = EFA_ADMIN_GET_STATS_TYPE_RDMA_READ;
2263 err = efa_com_get_stats(&dev->edev, ¶ms, &result);
2264 if (err)
2265 return err;
2266
2267 rrs = &result.rdma_read_stats;
2268 stats->value[EFA_RDMA_READ_WRS] = rrs->read_wrs;
2269 stats->value[EFA_RDMA_READ_BYTES] = rrs->read_bytes;
2270 stats->value[EFA_RDMA_READ_WR_ERR] = rrs->read_wr_err;
2271 stats->value[EFA_RDMA_READ_RESP_BYTES] = rrs->read_resp_bytes;
2272
2273 if (EFA_DEV_CAP(dev, RDMA_WRITE)) {
2274 params.type = EFA_ADMIN_GET_STATS_TYPE_RDMA_WRITE;
2275 err = efa_com_get_stats(&dev->edev, ¶ms, &result);
2276 if (err)
2277 return err;
2278
2279 rws = &result.rdma_write_stats;
2280 stats->value[EFA_RDMA_WRITE_WRS] = rws->write_wrs;
2281 stats->value[EFA_RDMA_WRITE_BYTES] = rws->write_bytes;
2282 stats->value[EFA_RDMA_WRITE_WR_ERR] = rws->write_wr_err;
2283 stats->value[EFA_RDMA_WRITE_RECV_BYTES] = rws->write_recv_bytes;
2284 }
2285
2286 params.type = EFA_ADMIN_GET_STATS_TYPE_NETWORK;
2287 err = efa_com_get_stats(&dev->edev, ¶ms, &result);
2288 if (err)
2289 return err;
2290
2291 ns = &result.network_stats;
2292 stats->value[EFA_RETRANS_BYTES] = ns->retrans_bytes;
2293 stats->value[EFA_RETRANS_PKTS] = ns->retrans_pkts;
2294 stats->value[EFA_RETRANS_TIMEOUT_EVENS] = ns->retrans_timeout_events;
2295 stats->value[EFA_UNRESPONSIVE_REMOTE_EVENTS] = ns->unresponsive_remote_events;
2296 stats->value[EFA_IMPAIRED_REMOTE_CONN_EVENTS] = ns->impaired_remote_conn_events;
2297
2298 return ARRAY_SIZE(efa_port_stats_descs);
2299 }
2300
efa_get_hw_stats(struct ib_device * ibdev,struct rdma_hw_stats * stats,u32 port_num,int index)2301 int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
2302 u32 port_num, int index)
2303 {
2304 if (port_num)
2305 return efa_fill_port_stats(to_edev(ibdev), stats, port_num);
2306 else
2307 return efa_fill_device_stats(to_edev(ibdev), stats);
2308 }
2309
efa_port_link_layer(struct ib_device * ibdev,u32 port_num)2310 enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
2311 u32 port_num)
2312 {
2313 return IB_LINK_LAYER_UNSPECIFIED;
2314 }
2315
2316 DECLARE_UVERBS_NAMED_METHOD(EFA_IB_METHOD_MR_QUERY,
2317 UVERBS_ATTR_IDR(EFA_IB_ATTR_QUERY_MR_HANDLE,
2318 UVERBS_OBJECT_MR,
2319 UVERBS_ACCESS_READ,
2320 UA_MANDATORY),
2321 UVERBS_ATTR_PTR_OUT(EFA_IB_ATTR_QUERY_MR_RESP_IC_ID_VALIDITY,
2322 UVERBS_ATTR_TYPE(u16),
2323 UA_MANDATORY),
2324 UVERBS_ATTR_PTR_OUT(EFA_IB_ATTR_QUERY_MR_RESP_RECV_IC_ID,
2325 UVERBS_ATTR_TYPE(u16),
2326 UA_MANDATORY),
2327 UVERBS_ATTR_PTR_OUT(EFA_IB_ATTR_QUERY_MR_RESP_RDMA_READ_IC_ID,
2328 UVERBS_ATTR_TYPE(u16),
2329 UA_MANDATORY),
2330 UVERBS_ATTR_PTR_OUT(EFA_IB_ATTR_QUERY_MR_RESP_RDMA_RECV_IC_ID,
2331 UVERBS_ATTR_TYPE(u16),
2332 UA_MANDATORY));
2333
2334 ADD_UVERBS_METHODS(efa_mr,
2335 UVERBS_OBJECT_MR,
2336 &UVERBS_METHOD(EFA_IB_METHOD_MR_QUERY));
2337
2338 const struct uapi_definition efa_uapi_defs[] = {
2339 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_MR,
2340 &efa_mr),
2341 {},
2342 };
2343