1e126ba97SEli Cohen /*
26cf0a15fSSaeed Mahameed * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3e126ba97SEli Cohen *
4e126ba97SEli Cohen * This software is available to you under a choice of one of two
5e126ba97SEli Cohen * licenses. You may choose to be licensed under the terms of the GNU
6e126ba97SEli Cohen * General Public License (GPL) Version 2, available from the file
7e126ba97SEli Cohen * COPYING in the main directory of this source tree, or the
8e126ba97SEli Cohen * OpenIB.org BSD license below:
9e126ba97SEli Cohen *
10e126ba97SEli Cohen * Redistribution and use in source and binary forms, with or
11e126ba97SEli Cohen * without modification, are permitted provided that the following
12e126ba97SEli Cohen * conditions are met:
13e126ba97SEli Cohen *
14e126ba97SEli Cohen * - Redistributions of source code must retain the above
15e126ba97SEli Cohen * copyright notice, this list of conditions and the following
16e126ba97SEli Cohen * disclaimer.
17e126ba97SEli Cohen *
18e126ba97SEli Cohen * - Redistributions in binary form must reproduce the above
19e126ba97SEli Cohen * copyright notice, this list of conditions and the following
20e126ba97SEli Cohen * disclaimer in the documentation and/or other materials
21e126ba97SEli Cohen * provided with the distribution.
22e126ba97SEli Cohen *
23e126ba97SEli Cohen * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24e126ba97SEli Cohen * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25e126ba97SEli Cohen * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26e126ba97SEli Cohen * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27e126ba97SEli Cohen * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28e126ba97SEli Cohen * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29e126ba97SEli Cohen * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30e126ba97SEli Cohen * SOFTWARE.
31e126ba97SEli Cohen */
32e126ba97SEli Cohen
33e126ba97SEli Cohen #include <linux/kref.h>
34e126ba97SEli Cohen #include <rdma/ib_umem.h>
35a8237b32SYann Droneaud #include <rdma/ib_user_verbs.h>
36b636401fSSagi Grimberg #include <rdma/ib_cache.h>
37e126ba97SEli Cohen #include "mlx5_ib.h"
38f02d0d6eSLeon Romanovsky #include "srq.h"
39333fbaa0SLeon Romanovsky #include "qp.h"
40e126ba97SEli Cohen
41*589b844fSAkiva Goldberger #define UVERBS_MODULE_NAME mlx5_ib
42*589b844fSAkiva Goldberger #include <rdma/uverbs_named_ioctl.h>
43*589b844fSAkiva Goldberger
mlx5_ib_cq_comp(struct mlx5_core_cq * cq,struct mlx5_eqe * eqe)444e0e2ea1SYishai Hadas static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe)
45e126ba97SEli Cohen {
46e126ba97SEli Cohen struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
47e126ba97SEli Cohen
48e126ba97SEli Cohen ibcq->comp_handler(ibcq, ibcq->cq_context);
49e126ba97SEli Cohen }
50e126ba97SEli Cohen
mlx5_ib_cq_event(struct mlx5_core_cq * mcq,enum mlx5_event type)51e126ba97SEli Cohen static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type)
52e126ba97SEli Cohen {
53e126ba97SEli Cohen struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq);
54e126ba97SEli Cohen struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
55e126ba97SEli Cohen struct ib_cq *ibcq = &cq->ibcq;
56e126ba97SEli Cohen struct ib_event event;
57e126ba97SEli Cohen
58e126ba97SEli Cohen if (type != MLX5_EVENT_TYPE_CQ_ERROR) {
59e126ba97SEli Cohen mlx5_ib_warn(dev, "Unexpected event type %d on CQ %06x\n",
60e126ba97SEli Cohen type, mcq->cqn);
61e126ba97SEli Cohen return;
62e126ba97SEli Cohen }
63e126ba97SEli Cohen
64e126ba97SEli Cohen if (ibcq->event_handler) {
65e126ba97SEli Cohen event.device = &dev->ib_dev;
66e126ba97SEli Cohen event.event = IB_EVENT_CQ_ERR;
67e126ba97SEli Cohen event.element.cq = ibcq;
68e126ba97SEli Cohen ibcq->event_handler(&event, ibcq->cq_context);
69e126ba97SEli Cohen }
70e126ba97SEli Cohen }
71e126ba97SEli Cohen
get_cqe(struct mlx5_ib_cq * cq,int n)72e126ba97SEli Cohen static void *get_cqe(struct mlx5_ib_cq *cq, int n)
73e126ba97SEli Cohen {
74388ca8beSYonatan Cohen return mlx5_frag_buf_get_wqe(&cq->buf.fbc, n);
75e126ba97SEli Cohen }
76e126ba97SEli Cohen
sw_ownership_bit(int n,int nent)77bde51583SEli Cohen static u8 sw_ownership_bit(int n, int nent)
78bde51583SEli Cohen {
79bde51583SEli Cohen return (n & nent) ? 1 : 0;
80bde51583SEli Cohen }
81bde51583SEli Cohen
get_sw_cqe(struct mlx5_ib_cq * cq,int n)82e126ba97SEli Cohen static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n)
83e126ba97SEli Cohen {
84e126ba97SEli Cohen void *cqe = get_cqe(cq, n & cq->ibcq.cqe);
85e126ba97SEli Cohen struct mlx5_cqe64 *cqe64;
86e126ba97SEli Cohen
87e126ba97SEli Cohen cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
88bde51583SEli Cohen
89bdefffd1STariq Toukan if (likely(get_cqe_opcode(cqe64) != MLX5_CQE_INVALID) &&
90bde51583SEli Cohen !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) {
91bde51583SEli Cohen return cqe;
92bde51583SEli Cohen } else {
93bde51583SEli Cohen return NULL;
94bde51583SEli Cohen }
95e126ba97SEli Cohen }
96e126ba97SEli Cohen
next_cqe_sw(struct mlx5_ib_cq * cq)97e126ba97SEli Cohen static void *next_cqe_sw(struct mlx5_ib_cq *cq)
98e126ba97SEli Cohen {
99e126ba97SEli Cohen return get_sw_cqe(cq, cq->mcq.cons_index);
100e126ba97SEli Cohen }
101e126ba97SEli Cohen
get_umr_comp(struct mlx5_ib_wq * wq,int idx)102e126ba97SEli Cohen static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx)
103e126ba97SEli Cohen {
104e126ba97SEli Cohen switch (wq->wr_data[idx]) {
105e126ba97SEli Cohen case MLX5_IB_WR_UMR:
106e126ba97SEli Cohen return 0;
107e126ba97SEli Cohen
108e126ba97SEli Cohen case IB_WR_LOCAL_INV:
109e126ba97SEli Cohen return IB_WC_LOCAL_INV;
110e126ba97SEli Cohen
1118a187ee5SSagi Grimberg case IB_WR_REG_MR:
1128a187ee5SSagi Grimberg return IB_WC_REG_MR;
1138a187ee5SSagi Grimberg
114e126ba97SEli Cohen default:
115e126ba97SEli Cohen pr_warn("unknown completion status\n");
116e126ba97SEli Cohen return 0;
117e126ba97SEli Cohen }
118e126ba97SEli Cohen }
119e126ba97SEli Cohen
handle_good_req(struct ib_wc * wc,struct mlx5_cqe64 * cqe,struct mlx5_ib_wq * wq,int idx)120e126ba97SEli Cohen static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
121e126ba97SEli Cohen struct mlx5_ib_wq *wq, int idx)
122e126ba97SEli Cohen {
123e126ba97SEli Cohen wc->wc_flags = 0;
124e126ba97SEli Cohen switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) {
125e126ba97SEli Cohen case MLX5_OPCODE_RDMA_WRITE_IMM:
126e126ba97SEli Cohen wc->wc_flags |= IB_WC_WITH_IMM;
127df561f66SGustavo A. R. Silva fallthrough;
128e126ba97SEli Cohen case MLX5_OPCODE_RDMA_WRITE:
129e126ba97SEli Cohen wc->opcode = IB_WC_RDMA_WRITE;
130e126ba97SEli Cohen break;
131e126ba97SEli Cohen case MLX5_OPCODE_SEND_IMM:
132e126ba97SEli Cohen wc->wc_flags |= IB_WC_WITH_IMM;
133df561f66SGustavo A. R. Silva fallthrough;
134e126ba97SEli Cohen case MLX5_OPCODE_SEND:
135e126ba97SEli Cohen case MLX5_OPCODE_SEND_INVAL:
136e126ba97SEli Cohen wc->opcode = IB_WC_SEND;
137e126ba97SEli Cohen break;
138e126ba97SEli Cohen case MLX5_OPCODE_RDMA_READ:
139e126ba97SEli Cohen wc->opcode = IB_WC_RDMA_READ;
140e126ba97SEli Cohen wc->byte_len = be32_to_cpu(cqe->byte_cnt);
141e126ba97SEli Cohen break;
142e126ba97SEli Cohen case MLX5_OPCODE_ATOMIC_CS:
143e126ba97SEli Cohen wc->opcode = IB_WC_COMP_SWAP;
144e126ba97SEli Cohen wc->byte_len = 8;
145e126ba97SEli Cohen break;
146e126ba97SEli Cohen case MLX5_OPCODE_ATOMIC_FA:
147e126ba97SEli Cohen wc->opcode = IB_WC_FETCH_ADD;
148e126ba97SEli Cohen wc->byte_len = 8;
149e126ba97SEli Cohen break;
150e126ba97SEli Cohen case MLX5_OPCODE_ATOMIC_MASKED_CS:
151e126ba97SEli Cohen wc->opcode = IB_WC_MASKED_COMP_SWAP;
152e126ba97SEli Cohen wc->byte_len = 8;
153e126ba97SEli Cohen break;
154e126ba97SEli Cohen case MLX5_OPCODE_ATOMIC_MASKED_FA:
155e126ba97SEli Cohen wc->opcode = IB_WC_MASKED_FETCH_ADD;
156e126ba97SEli Cohen wc->byte_len = 8;
157e126ba97SEli Cohen break;
158e126ba97SEli Cohen case MLX5_OPCODE_UMR:
159e126ba97SEli Cohen wc->opcode = get_umr_comp(wq, idx);
160e126ba97SEli Cohen break;
161e126ba97SEli Cohen }
162e126ba97SEli Cohen }
163e126ba97SEli Cohen
164e126ba97SEli Cohen enum {
165e126ba97SEli Cohen MLX5_GRH_IN_BUFFER = 1,
166e126ba97SEli Cohen MLX5_GRH_IN_CQE = 2,
167e126ba97SEli Cohen };
168e126ba97SEli Cohen
handle_responder(struct ib_wc * wc,struct mlx5_cqe64 * cqe,struct mlx5_ib_qp * qp)169e126ba97SEli Cohen static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
170e126ba97SEli Cohen struct mlx5_ib_qp *qp)
171e126ba97SEli Cohen {
172cb34be6dSAchiad Shochat enum rdma_link_layer ll = rdma_port_get_link_layer(qp->ibqp.device, 1);
173e126ba97SEli Cohen struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
1744b916ed9SLeon Romanovsky struct mlx5_ib_srq *srq = NULL;
175e126ba97SEli Cohen struct mlx5_ib_wq *wq;
176e126ba97SEli Cohen u16 wqe_ctr;
17712f8fedeSMoni Shoua u8 roce_packet_type;
17812f8fedeSMoni Shoua bool vlan_present;
179e126ba97SEli Cohen u8 g;
180e126ba97SEli Cohen
181e126ba97SEli Cohen if (qp->ibqp.srq || qp->ibqp.xrcd) {
182e126ba97SEli Cohen struct mlx5_core_srq *msrq = NULL;
183e126ba97SEli Cohen
184e126ba97SEli Cohen if (qp->ibqp.xrcd) {
185b4990804SLeon Romanovsky msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn));
1864b916ed9SLeon Romanovsky if (msrq)
187e126ba97SEli Cohen srq = to_mibsrq(msrq);
188e126ba97SEli Cohen } else {
189e126ba97SEli Cohen srq = to_msrq(qp->ibqp.srq);
190e126ba97SEli Cohen }
191e126ba97SEli Cohen if (srq) {
192e126ba97SEli Cohen wqe_ctr = be16_to_cpu(cqe->wqe_counter);
193e126ba97SEli Cohen wc->wr_id = srq->wrid[wqe_ctr];
194e126ba97SEli Cohen mlx5_ib_free_srq_wqe(srq, wqe_ctr);
19510f56242SMoni Shoua if (msrq)
19610f56242SMoni Shoua mlx5_core_res_put(&msrq->common);
197e126ba97SEli Cohen }
198e126ba97SEli Cohen } else {
199e126ba97SEli Cohen wq = &qp->rq;
200e126ba97SEli Cohen wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
201e126ba97SEli Cohen ++wq->tail;
202e126ba97SEli Cohen }
203e126ba97SEli Cohen wc->byte_len = be32_to_cpu(cqe->byte_cnt);
204e126ba97SEli Cohen
205bdefffd1STariq Toukan switch (get_cqe_opcode(cqe)) {
206e126ba97SEli Cohen case MLX5_CQE_RESP_WR_IMM:
207e126ba97SEli Cohen wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
208e126ba97SEli Cohen wc->wc_flags = IB_WC_WITH_IMM;
209244faedfSRaed Salem wc->ex.imm_data = cqe->immediate;
210e126ba97SEli Cohen break;
211e126ba97SEli Cohen case MLX5_CQE_RESP_SEND:
212e126ba97SEli Cohen wc->opcode = IB_WC_RECV;
213c7ce833bSErez Shitrit wc->wc_flags = IB_WC_IP_CSUM_OK;
214c7ce833bSErez Shitrit if (unlikely(!((cqe->hds_ip_ext & CQE_L3_OK) &&
215c7ce833bSErez Shitrit (cqe->hds_ip_ext & CQE_L4_OK))))
216e126ba97SEli Cohen wc->wc_flags = 0;
217e126ba97SEli Cohen break;
218e126ba97SEli Cohen case MLX5_CQE_RESP_SEND_IMM:
219e126ba97SEli Cohen wc->opcode = IB_WC_RECV;
220e126ba97SEli Cohen wc->wc_flags = IB_WC_WITH_IMM;
221244faedfSRaed Salem wc->ex.imm_data = cqe->immediate;
222e126ba97SEli Cohen break;
223e126ba97SEli Cohen case MLX5_CQE_RESP_SEND_INV:
224e126ba97SEli Cohen wc->opcode = IB_WC_RECV;
225e126ba97SEli Cohen wc->wc_flags = IB_WC_WITH_INVALIDATE;
226244faedfSRaed Salem wc->ex.invalidate_rkey = be32_to_cpu(cqe->inval_rkey);
227e126ba97SEli Cohen break;
228e126ba97SEli Cohen }
229e126ba97SEli Cohen wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
230e126ba97SEli Cohen wc->dlid_path_bits = cqe->ml_path;
231e126ba97SEli Cohen g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
232e126ba97SEli Cohen wc->wc_flags |= g ? IB_WC_GRH : 0;
2339ecf6ac1SMaor Gottlieb if (is_qp1(qp->type)) {
234244faedfSRaed Salem u16 pkey = be32_to_cpu(cqe->pkey) & 0xffff;
235b636401fSSagi Grimberg
236b636401fSSagi Grimberg ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey,
237b636401fSSagi Grimberg &wc->pkey_index);
238b636401fSSagi Grimberg } else {
239b636401fSSagi Grimberg wc->pkey_index = 0;
240b636401fSSagi Grimberg }
241cb34be6dSAchiad Shochat
24212f8fedeSMoni Shoua if (ll != IB_LINK_LAYER_ETHERNET) {
24365389322SMoni Shoua wc->slid = be16_to_cpu(cqe->slid);
24412f8fedeSMoni Shoua wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
245cb34be6dSAchiad Shochat return;
24612f8fedeSMoni Shoua }
247cb34be6dSAchiad Shochat
24865389322SMoni Shoua wc->slid = 0;
24912f8fedeSMoni Shoua vlan_present = cqe->l4_l3_hdr_type & 0x1;
25012f8fedeSMoni Shoua roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3;
25112f8fedeSMoni Shoua if (vlan_present) {
25212f8fedeSMoni Shoua wc->vlan_id = (be16_to_cpu(cqe->vlan_info)) & 0xfff;
25312f8fedeSMoni Shoua wc->sl = (be16_to_cpu(cqe->vlan_info) >> 13) & 0x7;
25412f8fedeSMoni Shoua wc->wc_flags |= IB_WC_WITH_VLAN;
25512f8fedeSMoni Shoua } else {
25612f8fedeSMoni Shoua wc->sl = 0;
25712f8fedeSMoni Shoua }
25812f8fedeSMoni Shoua
25912f8fedeSMoni Shoua switch (roce_packet_type) {
260cb34be6dSAchiad Shochat case MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH:
2611c15b4f2SAvihai Horon wc->network_hdr_type = RDMA_NETWORK_ROCE_V1;
262cb34be6dSAchiad Shochat break;
263cb34be6dSAchiad Shochat case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6:
264cb34be6dSAchiad Shochat wc->network_hdr_type = RDMA_NETWORK_IPV6;
265cb34be6dSAchiad Shochat break;
266cb34be6dSAchiad Shochat case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4:
267cb34be6dSAchiad Shochat wc->network_hdr_type = RDMA_NETWORK_IPV4;
268cb34be6dSAchiad Shochat break;
269cb34be6dSAchiad Shochat }
270cb34be6dSAchiad Shochat wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
271e126ba97SEli Cohen }
272e126ba97SEli Cohen
dump_cqe(struct mlx5_ib_dev * dev,struct mlx5_err_cqe * cqe,struct ib_wc * wc,const char * level)273abef378cSArumugam Kolappan static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe,
274abef378cSArumugam Kolappan struct ib_wc *wc, const char *level)
275e126ba97SEli Cohen {
276abef378cSArumugam Kolappan mlx5_ib_log(level, dev, "WC error: %d, Message: %s\n", wc->status,
277abef378cSArumugam Kolappan ib_wc_status_msg(wc->status));
278abef378cSArumugam Kolappan print_hex_dump(level, "cqe_dump: ", DUMP_PREFIX_OFFSET, 16, 1,
279abef378cSArumugam Kolappan cqe, sizeof(*cqe), false);
280e126ba97SEli Cohen }
281e126ba97SEli Cohen
mlx5_handle_error_cqe(struct mlx5_ib_dev * dev,struct mlx5_err_cqe * cqe,struct ib_wc * wc)282e126ba97SEli Cohen static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
283e126ba97SEli Cohen struct mlx5_err_cqe *cqe,
284e126ba97SEli Cohen struct ib_wc *wc)
285e126ba97SEli Cohen {
286abef378cSArumugam Kolappan const char *dump = KERN_WARNING;
287e126ba97SEli Cohen
288e126ba97SEli Cohen switch (cqe->syndrome) {
289e126ba97SEli Cohen case MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR:
290e126ba97SEli Cohen wc->status = IB_WC_LOC_LEN_ERR;
291e126ba97SEli Cohen break;
292e126ba97SEli Cohen case MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR:
293e126ba97SEli Cohen wc->status = IB_WC_LOC_QP_OP_ERR;
294e126ba97SEli Cohen break;
295e126ba97SEli Cohen case MLX5_CQE_SYNDROME_LOCAL_PROT_ERR:
296abef378cSArumugam Kolappan dump = KERN_DEBUG;
297e126ba97SEli Cohen wc->status = IB_WC_LOC_PROT_ERR;
298e126ba97SEli Cohen break;
299e126ba97SEli Cohen case MLX5_CQE_SYNDROME_WR_FLUSH_ERR:
300abef378cSArumugam Kolappan dump = NULL;
301e126ba97SEli Cohen wc->status = IB_WC_WR_FLUSH_ERR;
302e126ba97SEli Cohen break;
303e126ba97SEli Cohen case MLX5_CQE_SYNDROME_MW_BIND_ERR:
304e126ba97SEli Cohen wc->status = IB_WC_MW_BIND_ERR;
305e126ba97SEli Cohen break;
306e126ba97SEli Cohen case MLX5_CQE_SYNDROME_BAD_RESP_ERR:
307e126ba97SEli Cohen wc->status = IB_WC_BAD_RESP_ERR;
308e126ba97SEli Cohen break;
309e126ba97SEli Cohen case MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR:
310e126ba97SEli Cohen wc->status = IB_WC_LOC_ACCESS_ERR;
311e126ba97SEli Cohen break;
312e126ba97SEli Cohen case MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
313e126ba97SEli Cohen wc->status = IB_WC_REM_INV_REQ_ERR;
314e126ba97SEli Cohen break;
315e126ba97SEli Cohen case MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR:
316abef378cSArumugam Kolappan dump = KERN_DEBUG;
317e126ba97SEli Cohen wc->status = IB_WC_REM_ACCESS_ERR;
318e126ba97SEli Cohen break;
319e126ba97SEli Cohen case MLX5_CQE_SYNDROME_REMOTE_OP_ERR:
320abef378cSArumugam Kolappan dump = KERN_DEBUG;
321e126ba97SEli Cohen wc->status = IB_WC_REM_OP_ERR;
322e126ba97SEli Cohen break;
323e126ba97SEli Cohen case MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
324abef378cSArumugam Kolappan dump = NULL;
325e126ba97SEli Cohen wc->status = IB_WC_RETRY_EXC_ERR;
326e126ba97SEli Cohen break;
327e126ba97SEli Cohen case MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
328abef378cSArumugam Kolappan dump = NULL;
329e126ba97SEli Cohen wc->status = IB_WC_RNR_RETRY_EXC_ERR;
330e126ba97SEli Cohen break;
331e126ba97SEli Cohen case MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR:
332e126ba97SEli Cohen wc->status = IB_WC_REM_ABORT_ERR;
333e126ba97SEli Cohen break;
334e126ba97SEli Cohen default:
335e126ba97SEli Cohen wc->status = IB_WC_GENERAL_ERR;
336e126ba97SEli Cohen break;
337e126ba97SEli Cohen }
338e126ba97SEli Cohen
339e126ba97SEli Cohen wc->vendor_err = cqe->vendor_err_synd;
340abef378cSArumugam Kolappan if (dump)
341abef378cSArumugam Kolappan dump_cqe(dev, cqe, wc, dump);
342a7ad9ddeSDust Li }
343e126ba97SEli Cohen
handle_atomics(struct mlx5_ib_qp * qp,struct mlx5_cqe64 * cqe64,u16 tail,u16 head)344950bf4f1SLeon Romanovsky static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
345950bf4f1SLeon Romanovsky u16 tail, u16 head)
346950bf4f1SLeon Romanovsky {
347950bf4f1SLeon Romanovsky u16 idx;
348950bf4f1SLeon Romanovsky
349950bf4f1SLeon Romanovsky do {
350950bf4f1SLeon Romanovsky idx = tail & (qp->sq.wqe_cnt - 1);
351950bf4f1SLeon Romanovsky if (idx == head)
352950bf4f1SLeon Romanovsky break;
353950bf4f1SLeon Romanovsky
354950bf4f1SLeon Romanovsky tail = qp->sq.w_list[idx].next;
355950bf4f1SLeon Romanovsky } while (1);
356950bf4f1SLeon Romanovsky tail = qp->sq.w_list[idx].next;
357950bf4f1SLeon Romanovsky qp->sq.last_poll = tail;
358950bf4f1SLeon Romanovsky }
359950bf4f1SLeon Romanovsky
free_cq_buf(struct mlx5_ib_dev * dev,struct mlx5_ib_cq_buf * buf)360bde51583SEli Cohen static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
361bde51583SEli Cohen {
3624972e6faSTariq Toukan mlx5_frag_buf_free(dev->mdev, &buf->frag_buf);
363bde51583SEli Cohen }
364bde51583SEli Cohen
get_sig_err_item(struct mlx5_sig_err_cqe * cqe,struct ib_sig_err * item)365d5436ba0SSagi Grimberg static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
366d5436ba0SSagi Grimberg struct ib_sig_err *item)
367d5436ba0SSagi Grimberg {
368d5436ba0SSagi Grimberg u16 syndrome = be16_to_cpu(cqe->syndrome);
369d5436ba0SSagi Grimberg
370d5436ba0SSagi Grimberg #define GUARD_ERR (1 << 13)
371d5436ba0SSagi Grimberg #define APPTAG_ERR (1 << 12)
372d5436ba0SSagi Grimberg #define REFTAG_ERR (1 << 11)
373d5436ba0SSagi Grimberg
374d5436ba0SSagi Grimberg if (syndrome & GUARD_ERR) {
375d5436ba0SSagi Grimberg item->err_type = IB_SIG_BAD_GUARD;
376d5436ba0SSagi Grimberg item->expected = be32_to_cpu(cqe->expected_trans_sig) >> 16;
377d5436ba0SSagi Grimberg item->actual = be32_to_cpu(cqe->actual_trans_sig) >> 16;
378d5436ba0SSagi Grimberg } else
379d5436ba0SSagi Grimberg if (syndrome & REFTAG_ERR) {
380d5436ba0SSagi Grimberg item->err_type = IB_SIG_BAD_REFTAG;
381d5436ba0SSagi Grimberg item->expected = be32_to_cpu(cqe->expected_reftag);
382d5436ba0SSagi Grimberg item->actual = be32_to_cpu(cqe->actual_reftag);
383d5436ba0SSagi Grimberg } else
384d5436ba0SSagi Grimberg if (syndrome & APPTAG_ERR) {
385d5436ba0SSagi Grimberg item->err_type = IB_SIG_BAD_APPTAG;
386d5436ba0SSagi Grimberg item->expected = be32_to_cpu(cqe->expected_trans_sig) & 0xffff;
387d5436ba0SSagi Grimberg item->actual = be32_to_cpu(cqe->actual_trans_sig) & 0xffff;
388d5436ba0SSagi Grimberg } else {
389d5436ba0SSagi Grimberg pr_err("Got signature completion error with bad syndrome %04x\n",
390d5436ba0SSagi Grimberg syndrome);
391d5436ba0SSagi Grimberg }
392d5436ba0SSagi Grimberg
393d5436ba0SSagi Grimberg item->sig_err_offset = be64_to_cpu(cqe->err_offset);
394d5436ba0SSagi Grimberg item->key = be32_to_cpu(cqe->mkey);
395d5436ba0SSagi Grimberg }
396d5436ba0SSagi Grimberg
sw_comp(struct mlx5_ib_qp * qp,int num_entries,struct ib_wc * wc,int * npolled,bool is_send)3978e3b6883SLeon Romanovsky static void sw_comp(struct mlx5_ib_qp *qp, int num_entries, struct ib_wc *wc,
398950bf4f1SLeon Romanovsky int *npolled, bool is_send)
39989ea94a7SMaor Gottlieb {
40089ea94a7SMaor Gottlieb struct mlx5_ib_wq *wq;
40189ea94a7SMaor Gottlieb unsigned int cur;
40289ea94a7SMaor Gottlieb int np;
40389ea94a7SMaor Gottlieb int i;
40489ea94a7SMaor Gottlieb
4058e3b6883SLeon Romanovsky wq = (is_send) ? &qp->sq : &qp->rq;
40689ea94a7SMaor Gottlieb cur = wq->head - wq->tail;
40789ea94a7SMaor Gottlieb np = *npolled;
40889ea94a7SMaor Gottlieb
40989ea94a7SMaor Gottlieb if (cur == 0)
41089ea94a7SMaor Gottlieb return;
41189ea94a7SMaor Gottlieb
41289ea94a7SMaor Gottlieb for (i = 0; i < cur && np < num_entries; i++) {
413950bf4f1SLeon Romanovsky unsigned int idx;
414950bf4f1SLeon Romanovsky
415950bf4f1SLeon Romanovsky idx = (is_send) ? wq->last_poll : wq->tail;
416950bf4f1SLeon Romanovsky idx &= (wq->wqe_cnt - 1);
417950bf4f1SLeon Romanovsky wc->wr_id = wq->wrid[idx];
41889ea94a7SMaor Gottlieb wc->status = IB_WC_WR_FLUSH_ERR;
41989ea94a7SMaor Gottlieb wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
42089ea94a7SMaor Gottlieb wq->tail++;
421950bf4f1SLeon Romanovsky if (is_send)
422950bf4f1SLeon Romanovsky wq->last_poll = wq->w_list[idx].next;
42389ea94a7SMaor Gottlieb np++;
42489ea94a7SMaor Gottlieb wc->qp = &qp->ibqp;
42589ea94a7SMaor Gottlieb wc++;
42689ea94a7SMaor Gottlieb }
42789ea94a7SMaor Gottlieb *npolled = np;
42889ea94a7SMaor Gottlieb }
42989ea94a7SMaor Gottlieb
mlx5_ib_poll_sw_comp(struct mlx5_ib_cq * cq,int num_entries,struct ib_wc * wc,int * npolled)43089ea94a7SMaor Gottlieb static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries,
43189ea94a7SMaor Gottlieb struct ib_wc *wc, int *npolled)
43289ea94a7SMaor Gottlieb {
43389ea94a7SMaor Gottlieb struct mlx5_ib_qp *qp;
43489ea94a7SMaor Gottlieb
43589ea94a7SMaor Gottlieb *npolled = 0;
4364edf8d5cSTalat Batheesh /* Find uncompleted WQEs belonging to that cq and return mmics ones */
43789ea94a7SMaor Gottlieb list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) {
4388e3b6883SLeon Romanovsky sw_comp(qp, num_entries, wc + *npolled, npolled, true);
43989ea94a7SMaor Gottlieb if (*npolled >= num_entries)
44089ea94a7SMaor Gottlieb return;
44189ea94a7SMaor Gottlieb }
44289ea94a7SMaor Gottlieb
44389ea94a7SMaor Gottlieb list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) {
4448e3b6883SLeon Romanovsky sw_comp(qp, num_entries, wc + *npolled, npolled, false);
44589ea94a7SMaor Gottlieb if (*npolled >= num_entries)
44689ea94a7SMaor Gottlieb return;
44789ea94a7SMaor Gottlieb }
44889ea94a7SMaor Gottlieb }
44989ea94a7SMaor Gottlieb
mlx5_poll_one(struct mlx5_ib_cq * cq,struct mlx5_ib_qp ** cur_qp,struct ib_wc * wc)450e126ba97SEli Cohen static int mlx5_poll_one(struct mlx5_ib_cq *cq,
451e126ba97SEli Cohen struct mlx5_ib_qp **cur_qp,
452e126ba97SEli Cohen struct ib_wc *wc)
453e126ba97SEli Cohen {
454e126ba97SEli Cohen struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
455e126ba97SEli Cohen struct mlx5_err_cqe *err_cqe;
456e126ba97SEli Cohen struct mlx5_cqe64 *cqe64;
457e126ba97SEli Cohen struct mlx5_core_qp *mqp;
458e126ba97SEli Cohen struct mlx5_ib_wq *wq;
459e126ba97SEli Cohen uint8_t opcode;
460e126ba97SEli Cohen uint32_t qpn;
461e126ba97SEli Cohen u16 wqe_ctr;
462e126ba97SEli Cohen void *cqe;
463e126ba97SEli Cohen int idx;
464e126ba97SEli Cohen
465bde51583SEli Cohen repoll:
466e126ba97SEli Cohen cqe = next_cqe_sw(cq);
467e126ba97SEli Cohen if (!cqe)
468e126ba97SEli Cohen return -EAGAIN;
469e126ba97SEli Cohen
470e126ba97SEli Cohen cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
471e126ba97SEli Cohen
472e126ba97SEli Cohen ++cq->mcq.cons_index;
473e126ba97SEli Cohen
474e126ba97SEli Cohen /* Make sure we read CQ entry contents after we've checked the
475e126ba97SEli Cohen * ownership bit.
476e126ba97SEli Cohen */
477e126ba97SEli Cohen rmb();
478e126ba97SEli Cohen
479bdefffd1STariq Toukan opcode = get_cqe_opcode(cqe64);
480bde51583SEli Cohen if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) {
481bde51583SEli Cohen if (likely(cq->resize_buf)) {
482bde51583SEli Cohen free_cq_buf(dev, &cq->buf);
483bde51583SEli Cohen cq->buf = *cq->resize_buf;
484bde51583SEli Cohen kfree(cq->resize_buf);
485bde51583SEli Cohen cq->resize_buf = NULL;
486bde51583SEli Cohen goto repoll;
487bde51583SEli Cohen } else {
488bde51583SEli Cohen mlx5_ib_warn(dev, "unexpected resize cqe\n");
489bde51583SEli Cohen }
490bde51583SEli Cohen }
491e126ba97SEli Cohen
492e126ba97SEli Cohen qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff;
493e126ba97SEli Cohen if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) {
494e126ba97SEli Cohen /* We do not have to take the QP table lock here,
495e126ba97SEli Cohen * because CQs will be locked while QPs are removed
496e126ba97SEli Cohen * from the table.
497e126ba97SEli Cohen */
498333fbaa0SLeon Romanovsky mqp = radix_tree_lookup(&dev->qp_table.tree, qpn);
499e126ba97SEli Cohen *cur_qp = to_mibqp(mqp);
500e126ba97SEli Cohen }
501e126ba97SEli Cohen
502e126ba97SEli Cohen wc->qp = &(*cur_qp)->ibqp;
503e126ba97SEli Cohen switch (opcode) {
504e126ba97SEli Cohen case MLX5_CQE_REQ:
505e126ba97SEli Cohen wq = &(*cur_qp)->sq;
506e126ba97SEli Cohen wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
507e126ba97SEli Cohen idx = wqe_ctr & (wq->wqe_cnt - 1);
508e126ba97SEli Cohen handle_good_req(wc, cqe64, wq, idx);
509950bf4f1SLeon Romanovsky handle_atomics(*cur_qp, cqe64, wq->last_poll, idx);
510e126ba97SEli Cohen wc->wr_id = wq->wrid[idx];
511e126ba97SEli Cohen wq->tail = wq->wqe_head[idx] + 1;
512e126ba97SEli Cohen wc->status = IB_WC_SUCCESS;
513e126ba97SEli Cohen break;
514e126ba97SEli Cohen case MLX5_CQE_RESP_WR_IMM:
515e126ba97SEli Cohen case MLX5_CQE_RESP_SEND:
516e126ba97SEli Cohen case MLX5_CQE_RESP_SEND_IMM:
517e126ba97SEli Cohen case MLX5_CQE_RESP_SEND_INV:
518e126ba97SEli Cohen handle_responder(wc, cqe64, *cur_qp);
519e126ba97SEli Cohen wc->status = IB_WC_SUCCESS;
520e126ba97SEli Cohen break;
521e126ba97SEli Cohen case MLX5_CQE_RESIZE_CQ:
522e126ba97SEli Cohen break;
523e126ba97SEli Cohen case MLX5_CQE_REQ_ERR:
524e126ba97SEli Cohen case MLX5_CQE_RESP_ERR:
525e126ba97SEli Cohen err_cqe = (struct mlx5_err_cqe *)cqe64;
526e126ba97SEli Cohen mlx5_handle_error_cqe(dev, err_cqe, wc);
527e126ba97SEli Cohen mlx5_ib_dbg(dev, "%s error cqe on cqn 0x%x:\n",
528e126ba97SEli Cohen opcode == MLX5_CQE_REQ_ERR ?
529e126ba97SEli Cohen "Requestor" : "Responder", cq->mcq.cqn);
530e126ba97SEli Cohen mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n",
531e126ba97SEli Cohen err_cqe->syndrome, err_cqe->vendor_err_synd);
532158e71bbSAharon Landau if (wc->status != IB_WC_WR_FLUSH_ERR &&
533158e71bbSAharon Landau (*cur_qp)->type == MLX5_IB_QPT_REG_UMR)
534158e71bbSAharon Landau dev->umrc.state = MLX5_UMR_STATE_RECOVER;
535158e71bbSAharon Landau
536e126ba97SEli Cohen if (opcode == MLX5_CQE_REQ_ERR) {
537e126ba97SEli Cohen wq = &(*cur_qp)->sq;
538e126ba97SEli Cohen wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
539e126ba97SEli Cohen idx = wqe_ctr & (wq->wqe_cnt - 1);
540e126ba97SEli Cohen wc->wr_id = wq->wrid[idx];
541e126ba97SEli Cohen wq->tail = wq->wqe_head[idx] + 1;
542e126ba97SEli Cohen } else {
543e126ba97SEli Cohen struct mlx5_ib_srq *srq;
544e126ba97SEli Cohen
545e126ba97SEli Cohen if ((*cur_qp)->ibqp.srq) {
546e126ba97SEli Cohen srq = to_msrq((*cur_qp)->ibqp.srq);
547e126ba97SEli Cohen wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
548e126ba97SEli Cohen wc->wr_id = srq->wrid[wqe_ctr];
549e126ba97SEli Cohen mlx5_ib_free_srq_wqe(srq, wqe_ctr);
550e126ba97SEli Cohen } else {
551e126ba97SEli Cohen wq = &(*cur_qp)->rq;
552e126ba97SEli Cohen wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
553e126ba97SEli Cohen ++wq->tail;
554e126ba97SEli Cohen }
555e126ba97SEli Cohen }
556e126ba97SEli Cohen break;
55750211ec9SJason Gunthorpe case MLX5_CQE_SIG_ERR: {
55850211ec9SJason Gunthorpe struct mlx5_sig_err_cqe *sig_err_cqe =
55950211ec9SJason Gunthorpe (struct mlx5_sig_err_cqe *)cqe64;
56050211ec9SJason Gunthorpe struct mlx5_core_sig_ctx *sig;
561d5436ba0SSagi Grimberg
56250211ec9SJason Gunthorpe xa_lock(&dev->sig_mrs);
56350211ec9SJason Gunthorpe sig = xa_load(&dev->sig_mrs,
564d5436ba0SSagi Grimberg mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
56550211ec9SJason Gunthorpe get_sig_err_item(sig_err_cqe, &sig->err_item);
56650211ec9SJason Gunthorpe sig->sig_err_exists = true;
56750211ec9SJason Gunthorpe sig->sigerr_count++;
568d5436ba0SSagi Grimberg
569d5436ba0SSagi Grimberg mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n",
57050211ec9SJason Gunthorpe cq->mcq.cqn, sig->err_item.key,
57150211ec9SJason Gunthorpe sig->err_item.err_type,
57250211ec9SJason Gunthorpe sig->err_item.sig_err_offset,
57350211ec9SJason Gunthorpe sig->err_item.expected,
57450211ec9SJason Gunthorpe sig->err_item.actual);
575d5436ba0SSagi Grimberg
57650211ec9SJason Gunthorpe xa_unlock(&dev->sig_mrs);
577d5436ba0SSagi Grimberg goto repoll;
578e126ba97SEli Cohen }
57950211ec9SJason Gunthorpe }
580e126ba97SEli Cohen
581e126ba97SEli Cohen return 0;
582e126ba97SEli Cohen }
583e126ba97SEli Cohen
poll_soft_wc(struct mlx5_ib_cq * cq,int num_entries,struct ib_wc * wc,bool is_fatal_err)58425361e02SHaggai Eran static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
5857b74a83cSErez Shitrit struct ib_wc *wc, bool is_fatal_err)
58625361e02SHaggai Eran {
58725361e02SHaggai Eran struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
58825361e02SHaggai Eran struct mlx5_ib_wc *soft_wc, *next;
58925361e02SHaggai Eran int npolled = 0;
59025361e02SHaggai Eran
59125361e02SHaggai Eran list_for_each_entry_safe(soft_wc, next, &cq->wc_list, list) {
59225361e02SHaggai Eran if (npolled >= num_entries)
59325361e02SHaggai Eran break;
59425361e02SHaggai Eran
59525361e02SHaggai Eran mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n",
59625361e02SHaggai Eran cq->mcq.cqn);
59725361e02SHaggai Eran
5987b74a83cSErez Shitrit if (unlikely(is_fatal_err)) {
5997b74a83cSErez Shitrit soft_wc->wc.status = IB_WC_WR_FLUSH_ERR;
6007b74a83cSErez Shitrit soft_wc->wc.vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
6017b74a83cSErez Shitrit }
60225361e02SHaggai Eran wc[npolled++] = soft_wc->wc;
60325361e02SHaggai Eran list_del(&soft_wc->list);
60425361e02SHaggai Eran kfree(soft_wc);
60525361e02SHaggai Eran }
60625361e02SHaggai Eran
60725361e02SHaggai Eran return npolled;
60825361e02SHaggai Eran }
60925361e02SHaggai Eran
mlx5_ib_poll_cq(struct ib_cq * ibcq,int num_entries,struct ib_wc * wc)610e126ba97SEli Cohen int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
611e126ba97SEli Cohen {
612e126ba97SEli Cohen struct mlx5_ib_cq *cq = to_mcq(ibcq);
613e126ba97SEli Cohen struct mlx5_ib_qp *cur_qp = NULL;
61489ea94a7SMaor Gottlieb struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
61589ea94a7SMaor Gottlieb struct mlx5_core_dev *mdev = dev->mdev;
616e126ba97SEli Cohen unsigned long flags;
61725361e02SHaggai Eran int soft_polled = 0;
618e126ba97SEli Cohen int npolled;
619e126ba97SEli Cohen
620e126ba97SEli Cohen spin_lock_irqsave(&cq->lock, flags);
62189ea94a7SMaor Gottlieb if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
6227b74a83cSErez Shitrit /* make sure no soft wqe's are waiting */
6237b74a83cSErez Shitrit if (unlikely(!list_empty(&cq->wc_list)))
6247b74a83cSErez Shitrit soft_polled = poll_soft_wc(cq, num_entries, wc, true);
6257b74a83cSErez Shitrit
6267b74a83cSErez Shitrit mlx5_ib_poll_sw_comp(cq, num_entries - soft_polled,
6277b74a83cSErez Shitrit wc + soft_polled, &npolled);
62889ea94a7SMaor Gottlieb goto out;
62989ea94a7SMaor Gottlieb }
630e126ba97SEli Cohen
63125361e02SHaggai Eran if (unlikely(!list_empty(&cq->wc_list)))
6327b74a83cSErez Shitrit soft_polled = poll_soft_wc(cq, num_entries, wc, false);
63325361e02SHaggai Eran
63425361e02SHaggai Eran for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
635dbdf7d4eSLeon Romanovsky if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))
636e126ba97SEli Cohen break;
637e126ba97SEli Cohen }
638e126ba97SEli Cohen
639e126ba97SEli Cohen if (npolled)
640e126ba97SEli Cohen mlx5_cq_set_ci(&cq->mcq);
64189ea94a7SMaor Gottlieb out:
642e126ba97SEli Cohen spin_unlock_irqrestore(&cq->lock, flags);
643e126ba97SEli Cohen
64425361e02SHaggai Eran return soft_polled + npolled;
645e126ba97SEli Cohen }
646e126ba97SEli Cohen
mlx5_ib_arm_cq(struct ib_cq * ibcq,enum ib_cq_notify_flags flags)647e126ba97SEli Cohen int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
648e126ba97SEli Cohen {
649ce0f7509SSaeed Mahameed struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
65025361e02SHaggai Eran struct mlx5_ib_cq *cq = to_mcq(ibcq);
6515fe9dec0SEli Cohen void __iomem *uar_page = mdev->priv.uar->map;
65225361e02SHaggai Eran unsigned long irq_flags;
65325361e02SHaggai Eran int ret = 0;
654ce0f7509SSaeed Mahameed
65525361e02SHaggai Eran spin_lock_irqsave(&cq->lock, irq_flags);
65625361e02SHaggai Eran if (cq->notify_flags != IB_CQ_NEXT_COMP)
65725361e02SHaggai Eran cq->notify_flags = flags & IB_CQ_SOLICITED_MASK;
65825361e02SHaggai Eran
65925361e02SHaggai Eran if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !list_empty(&cq->wc_list))
66025361e02SHaggai Eran ret = 1;
66125361e02SHaggai Eran spin_unlock_irqrestore(&cq->lock, irq_flags);
66225361e02SHaggai Eran
66325361e02SHaggai Eran mlx5_cq_arm(&cq->mcq,
664e126ba97SEli Cohen (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
665e126ba97SEli Cohen MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
6665fe9dec0SEli Cohen uar_page, to_mcq(ibcq)->mcq.cons_index);
667e126ba97SEli Cohen
66825361e02SHaggai Eran return ret;
669e126ba97SEli Cohen }
670e126ba97SEli Cohen
alloc_cq_frag_buf(struct mlx5_ib_dev * dev,struct mlx5_ib_cq_buf * buf,int nent,int cqe_size)671388ca8beSYonatan Cohen static int alloc_cq_frag_buf(struct mlx5_ib_dev *dev,
672388ca8beSYonatan Cohen struct mlx5_ib_cq_buf *buf,
673388ca8beSYonatan Cohen int nent,
674388ca8beSYonatan Cohen int cqe_size)
675e126ba97SEli Cohen {
6764972e6faSTariq Toukan struct mlx5_frag_buf *frag_buf = &buf->frag_buf;
6774972e6faSTariq Toukan u8 log_wq_stride = 6 + (cqe_size == 128 ? 1 : 0);
6784972e6faSTariq Toukan u8 log_wq_sz = ilog2(cqe_size);
679e126ba97SEli Cohen int err;
680e126ba97SEli Cohen
681388ca8beSYonatan Cohen err = mlx5_frag_buf_alloc_node(dev->mdev,
682388ca8beSYonatan Cohen nent * cqe_size,
683388ca8beSYonatan Cohen frag_buf,
684388ca8beSYonatan Cohen dev->mdev->priv.numa_node);
685e126ba97SEli Cohen if (err)
686e126ba97SEli Cohen return err;
687e126ba97SEli Cohen
6884972e6faSTariq Toukan mlx5_init_fbc(frag_buf->frags, log_wq_stride, log_wq_sz, &buf->fbc);
6894972e6faSTariq Toukan
690e126ba97SEli Cohen buf->cqe_size = cqe_size;
691bde51583SEli Cohen buf->nent = nent;
692e126ba97SEli Cohen
693e126ba97SEli Cohen return 0;
694e126ba97SEli Cohen }
695e126ba97SEli Cohen
6966f1006a4SYonatan Cohen enum {
6976f1006a4SYonatan Cohen MLX5_CQE_RES_FORMAT_HASH = 0,
6986f1006a4SYonatan Cohen MLX5_CQE_RES_FORMAT_CSUM = 1,
6996f1006a4SYonatan Cohen MLX5_CQE_RES_FORMAT_CSUM_STRIDX = 3,
7006f1006a4SYonatan Cohen };
7016f1006a4SYonatan Cohen
mini_cqe_res_format_to_hw(struct mlx5_ib_dev * dev,u8 format)7026f1006a4SYonatan Cohen static int mini_cqe_res_format_to_hw(struct mlx5_ib_dev *dev, u8 format)
7036f1006a4SYonatan Cohen {
7046f1006a4SYonatan Cohen switch (format) {
7056f1006a4SYonatan Cohen case MLX5_IB_CQE_RES_FORMAT_HASH:
7066f1006a4SYonatan Cohen return MLX5_CQE_RES_FORMAT_HASH;
7076f1006a4SYonatan Cohen case MLX5_IB_CQE_RES_FORMAT_CSUM:
7086f1006a4SYonatan Cohen return MLX5_CQE_RES_FORMAT_CSUM;
7096f1006a4SYonatan Cohen case MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX:
7106f1006a4SYonatan Cohen if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
7116f1006a4SYonatan Cohen return MLX5_CQE_RES_FORMAT_CSUM_STRIDX;
7126f1006a4SYonatan Cohen return -EOPNOTSUPP;
7136f1006a4SYonatan Cohen default:
7146f1006a4SYonatan Cohen return -EINVAL;
7156f1006a4SYonatan Cohen }
7166f1006a4SYonatan Cohen }
7176f1006a4SYonatan Cohen
create_cq_user(struct mlx5_ib_dev * dev,struct ib_udata * udata,struct mlx5_ib_cq * cq,int entries,u32 ** cqb,int * cqe_size,int * index,int * inlen,struct uverbs_attr_bundle * attrs)718e126ba97SEli Cohen static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
719ff23dfa1SShamir Rabinovitch struct mlx5_ib_cq *cq, int entries, u32 **cqb,
720*589b844fSAkiva Goldberger int *cqe_size, int *index, int *inlen,
721*589b844fSAkiva Goldberger struct uverbs_attr_bundle *attrs)
722e126ba97SEli Cohen {
7231cbe6fc8SBodong Wang struct mlx5_ib_create_cq ucmd = {};
724c08fbdc5SJason Gunthorpe unsigned long page_size;
725c08fbdc5SJason Gunthorpe unsigned int page_offset_quantized;
726a8237b32SYann Droneaud size_t ucmdlen;
72727827786SSaeed Mahameed __be64 *pas;
728e126ba97SEli Cohen int ncont;
72927827786SSaeed Mahameed void *cqc;
730e126ba97SEli Cohen int err;
731ff23dfa1SShamir Rabinovitch struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
732ff23dfa1SShamir Rabinovitch udata, struct mlx5_ib_ucontext, ibucontext);
733e126ba97SEli Cohen
73464d99f6aSYishai Hadas ucmdlen = min(udata->inlen, sizeof(ucmd));
73564d99f6aSYishai Hadas if (ucmdlen < offsetof(struct mlx5_ib_create_cq, flags))
73664d99f6aSYishai Hadas return -EINVAL;
737a8237b32SYann Droneaud
738a8237b32SYann Droneaud if (ib_copy_from_udata(&ucmd, udata, ucmdlen))
739e126ba97SEli Cohen return -EFAULT;
740e126ba97SEli Cohen
74164d99f6aSYishai Hadas if ((ucmd.flags & ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD |
74233652951SAharon Landau MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX |
74333652951SAharon Landau MLX5_IB_CREATE_CQ_FLAGS_REAL_TIME_TS)))
744a8237b32SYann Droneaud return -EINVAL;
745a8237b32SYann Droneaud
74664d99f6aSYishai Hadas if ((ucmd.cqe_size != 64 && ucmd.cqe_size != 128) ||
74764d99f6aSYishai Hadas ucmd.reserved0 || ucmd.reserved1)
748e126ba97SEli Cohen return -EINVAL;
749e126ba97SEli Cohen
750e126ba97SEli Cohen *cqe_size = ucmd.cqe_size;
751e126ba97SEli Cohen
752b0ea0fa5SJason Gunthorpe cq->buf.umem =
753c320e527SMoni Shoua ib_umem_get(&dev->ib_dev, ucmd.buf_addr,
754c320e527SMoni Shoua entries * ucmd.cqe_size, IB_ACCESS_LOCAL_WRITE);
755e126ba97SEli Cohen if (IS_ERR(cq->buf.umem)) {
756e126ba97SEli Cohen err = PTR_ERR(cq->buf.umem);
757e126ba97SEli Cohen return err;
758e126ba97SEli Cohen }
759e126ba97SEli Cohen
760c08fbdc5SJason Gunthorpe page_size = mlx5_umem_find_best_cq_quantized_pgoff(
761c08fbdc5SJason Gunthorpe cq->buf.umem, cqc, log_page_size, MLX5_ADAPTER_PAGE_SHIFT,
762c08fbdc5SJason Gunthorpe page_offset, 64, &page_offset_quantized);
763c08fbdc5SJason Gunthorpe if (!page_size) {
764c08fbdc5SJason Gunthorpe err = -EINVAL;
765c08fbdc5SJason Gunthorpe goto err_umem;
766c08fbdc5SJason Gunthorpe }
767c08fbdc5SJason Gunthorpe
7680bedd3d0SLang Cheng err = mlx5_ib_db_map_user(context, ucmd.db_addr, &cq->db);
769e126ba97SEli Cohen if (err)
770e126ba97SEli Cohen goto err_umem;
771e126ba97SEli Cohen
772c08fbdc5SJason Gunthorpe ncont = ib_umem_num_dma_blocks(cq->buf.umem, page_size);
773f8fb3110SJason Gunthorpe mlx5_ib_dbg(
774f8fb3110SJason Gunthorpe dev,
775c08fbdc5SJason Gunthorpe "addr 0x%llx, size %u, npages %zu, page_size %lu, ncont %d\n",
776f8fb3110SJason Gunthorpe ucmd.buf_addr, entries * ucmd.cqe_size,
777c08fbdc5SJason Gunthorpe ib_umem_num_pages(cq->buf.umem), page_size, ncont);
778e126ba97SEli Cohen
77927827786SSaeed Mahameed *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
78027827786SSaeed Mahameed MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont;
7811b9a07eeSLeon Romanovsky *cqb = kvzalloc(*inlen, GFP_KERNEL);
782e126ba97SEli Cohen if (!*cqb) {
783e126ba97SEli Cohen err = -ENOMEM;
784e126ba97SEli Cohen goto err_db;
785e126ba97SEli Cohen }
78627827786SSaeed Mahameed
78727827786SSaeed Mahameed pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
788c08fbdc5SJason Gunthorpe mlx5_ib_populate_pas(cq->buf.umem, page_size, pas, 0);
78927827786SSaeed Mahameed
79027827786SSaeed Mahameed cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
79127827786SSaeed Mahameed MLX5_SET(cqc, cqc, log_page_size,
792c08fbdc5SJason Gunthorpe order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT);
793c08fbdc5SJason Gunthorpe MLX5_SET(cqc, cqc, page_offset, page_offset_quantized);
794e126ba97SEli Cohen
795*589b844fSAkiva Goldberger if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_CREATE_CQ_UAR_INDEX)) {
796*589b844fSAkiva Goldberger err = uverbs_copy_from(index, attrs, MLX5_IB_ATTR_CREATE_CQ_UAR_INDEX);
797*589b844fSAkiva Goldberger if (err)
798*589b844fSAkiva Goldberger goto err_cqb;
799*589b844fSAkiva Goldberger } else if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX) {
80064d99f6aSYishai Hadas *index = ucmd.uar_page_index;
8010a2fd01cSYishai Hadas } else if (context->bfregi.lib_uar_dyn) {
8020a2fd01cSYishai Hadas err = -EINVAL;
8030a2fd01cSYishai Hadas goto err_cqb;
8040a2fd01cSYishai Hadas } else {
805ff23dfa1SShamir Rabinovitch *index = context->bfregi.sys_pages[0];
8060a2fd01cSYishai Hadas }
807e126ba97SEli Cohen
8081cbe6fc8SBodong Wang if (ucmd.cqe_comp_en == 1) {
8096f1006a4SYonatan Cohen int mini_cqe_format;
8106f1006a4SYonatan Cohen
811de57f2adSGuy Levi if (!((*cqe_size == 128 &&
812de57f2adSGuy Levi MLX5_CAP_GEN(dev->mdev, cqe_compression_128)) ||
813de57f2adSGuy Levi (*cqe_size == 64 &&
814de57f2adSGuy Levi MLX5_CAP_GEN(dev->mdev, cqe_compression)))) {
8151cbe6fc8SBodong Wang err = -EOPNOTSUPP;
8161cbe6fc8SBodong Wang mlx5_ib_warn(dev, "CQE compression is not supported for size %d!\n",
8171cbe6fc8SBodong Wang *cqe_size);
8181cbe6fc8SBodong Wang goto err_cqb;
8191cbe6fc8SBodong Wang }
8201cbe6fc8SBodong Wang
8216f1006a4SYonatan Cohen mini_cqe_format =
8226f1006a4SYonatan Cohen mini_cqe_res_format_to_hw(dev,
8231cbe6fc8SBodong Wang ucmd.cqe_comp_res_format);
8246f1006a4SYonatan Cohen if (mini_cqe_format < 0) {
8256f1006a4SYonatan Cohen err = mini_cqe_format;
8266f1006a4SYonatan Cohen mlx5_ib_dbg(dev, "CQE compression res format %d error: %d\n",
8276f1006a4SYonatan Cohen ucmd.cqe_comp_res_format, err);
8281cbe6fc8SBodong Wang goto err_cqb;
8291cbe6fc8SBodong Wang }
8301cbe6fc8SBodong Wang
8311cbe6fc8SBodong Wang MLX5_SET(cqc, cqc, cqe_comp_en, 1);
8326f1006a4SYonatan Cohen MLX5_SET(cqc, cqc, mini_cqe_res_format, mini_cqe_format);
8331cbe6fc8SBodong Wang }
8341cbe6fc8SBodong Wang
8357a0c8f42SGuy Levi if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD) {
8367a0c8f42SGuy Levi if (*cqe_size != 128 ||
8377a0c8f42SGuy Levi !MLX5_CAP_GEN(dev->mdev, cqe_128_always)) {
8387a0c8f42SGuy Levi err = -EOPNOTSUPP;
8397a0c8f42SGuy Levi mlx5_ib_warn(dev,
8407a0c8f42SGuy Levi "CQE padding is not supported for CQE size of %dB!\n",
8417a0c8f42SGuy Levi *cqe_size);
8427a0c8f42SGuy Levi goto err_cqb;
8437a0c8f42SGuy Levi }
8447a0c8f42SGuy Levi
8457a0c8f42SGuy Levi cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD;
8467a0c8f42SGuy Levi }
8477a0c8f42SGuy Levi
84833652951SAharon Landau if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_REAL_TIME_TS)
84933652951SAharon Landau cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS;
85033652951SAharon Landau
851ff23dfa1SShamir Rabinovitch MLX5_SET(create_cq_in, *cqb, uid, context->devx_uid);
852e126ba97SEli Cohen return 0;
853e126ba97SEli Cohen
8541cbe6fc8SBodong Wang err_cqb:
855909d4344SChristophe JAILLET kvfree(*cqb);
8561cbe6fc8SBodong Wang
857e126ba97SEli Cohen err_db:
858ff23dfa1SShamir Rabinovitch mlx5_ib_db_unmap_user(context, &cq->db);
859e126ba97SEli Cohen
860e126ba97SEli Cohen err_umem:
861e126ba97SEli Cohen ib_umem_release(cq->buf.umem);
862e126ba97SEli Cohen return err;
863e126ba97SEli Cohen }
864e126ba97SEli Cohen
destroy_cq_user(struct mlx5_ib_cq * cq,struct ib_udata * udata)865bdeacabdSShamir Rabinovitch static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_udata *udata)
866e126ba97SEli Cohen {
867bdeacabdSShamir Rabinovitch struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
868bdeacabdSShamir Rabinovitch udata, struct mlx5_ib_ucontext, ibucontext);
869bdeacabdSShamir Rabinovitch
870bdeacabdSShamir Rabinovitch mlx5_ib_db_unmap_user(context, &cq->db);
871e126ba97SEli Cohen ib_umem_release(cq->buf.umem);
872e126ba97SEli Cohen }
873e126ba97SEli Cohen
init_cq_frag_buf(struct mlx5_ib_cq_buf * buf)8742ba0aa2fSAlaa Hleihel static void init_cq_frag_buf(struct mlx5_ib_cq_buf *buf)
875e126ba97SEli Cohen {
876e126ba97SEli Cohen int i;
877e126ba97SEli Cohen void *cqe;
878e126ba97SEli Cohen struct mlx5_cqe64 *cqe64;
879e126ba97SEli Cohen
880bde51583SEli Cohen for (i = 0; i < buf->nent; i++) {
8812ba0aa2fSAlaa Hleihel cqe = mlx5_frag_buf_get_wqe(&buf->fbc, i);
882bde51583SEli Cohen cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
883bde51583SEli Cohen cqe64->op_own = MLX5_CQE_INVALID << 4;
884e126ba97SEli Cohen }
885e126ba97SEli Cohen }
886e126ba97SEli Cohen
create_cq_kernel(struct mlx5_ib_dev * dev,struct mlx5_ib_cq * cq,int entries,int cqe_size,u32 ** cqb,int * index,int * inlen)887e126ba97SEli Cohen static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
888e126ba97SEli Cohen int entries, int cqe_size,
88927827786SSaeed Mahameed u32 **cqb, int *index, int *inlen)
890e126ba97SEli Cohen {
89127827786SSaeed Mahameed __be64 *pas;
89227827786SSaeed Mahameed void *cqc;
893e126ba97SEli Cohen int err;
894e126ba97SEli Cohen
8959603b61dSJack Morgenstein err = mlx5_db_alloc(dev->mdev, &cq->db);
896e126ba97SEli Cohen if (err)
897e126ba97SEli Cohen return err;
898e126ba97SEli Cohen
899e126ba97SEli Cohen cq->mcq.set_ci_db = cq->db.db;
900e126ba97SEli Cohen cq->mcq.arm_db = cq->db.db + 1;
901e126ba97SEli Cohen cq->mcq.cqe_sz = cqe_size;
902e126ba97SEli Cohen
903388ca8beSYonatan Cohen err = alloc_cq_frag_buf(dev, &cq->buf, entries, cqe_size);
904e126ba97SEli Cohen if (err)
905e126ba97SEli Cohen goto err_db;
906e126ba97SEli Cohen
9072ba0aa2fSAlaa Hleihel init_cq_frag_buf(&cq->buf);
908e126ba97SEli Cohen
90927827786SSaeed Mahameed *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
910388ca8beSYonatan Cohen MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) *
9114972e6faSTariq Toukan cq->buf.frag_buf.npages;
9121b9a07eeSLeon Romanovsky *cqb = kvzalloc(*inlen, GFP_KERNEL);
913e126ba97SEli Cohen if (!*cqb) {
914e126ba97SEli Cohen err = -ENOMEM;
915e126ba97SEli Cohen goto err_buf;
916e126ba97SEli Cohen }
917e126ba97SEli Cohen
91827827786SSaeed Mahameed pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
9194972e6faSTariq Toukan mlx5_fill_page_frag_array(&cq->buf.frag_buf, pas);
92027827786SSaeed Mahameed
92127827786SSaeed Mahameed cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
92227827786SSaeed Mahameed MLX5_SET(cqc, cqc, log_page_size,
9234972e6faSTariq Toukan cq->buf.frag_buf.page_shift -
924388ca8beSYonatan Cohen MLX5_ADAPTER_PAGE_SHIFT);
92527827786SSaeed Mahameed
9265fe9dec0SEli Cohen *index = dev->mdev->priv.uar->index;
927e126ba97SEli Cohen
928e126ba97SEli Cohen return 0;
929e126ba97SEli Cohen
930e126ba97SEli Cohen err_buf:
931e126ba97SEli Cohen free_cq_buf(dev, &cq->buf);
932e126ba97SEli Cohen
933e126ba97SEli Cohen err_db:
9349603b61dSJack Morgenstein mlx5_db_free(dev->mdev, &cq->db);
935e126ba97SEli Cohen return err;
936e126ba97SEli Cohen }
937e126ba97SEli Cohen
destroy_cq_kernel(struct mlx5_ib_dev * dev,struct mlx5_ib_cq * cq)938e126ba97SEli Cohen static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
939e126ba97SEli Cohen {
940e126ba97SEli Cohen free_cq_buf(dev, &cq->buf);
9419603b61dSJack Morgenstein mlx5_db_free(dev->mdev, &cq->db);
942e126ba97SEli Cohen }
943e126ba97SEli Cohen
notify_soft_wc_handler(struct work_struct * work)94425361e02SHaggai Eran static void notify_soft_wc_handler(struct work_struct *work)
94525361e02SHaggai Eran {
94625361e02SHaggai Eran struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq,
94725361e02SHaggai Eran notify_work);
94825361e02SHaggai Eran
94925361e02SHaggai Eran cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
95025361e02SHaggai Eran }
95125361e02SHaggai Eran
mlx5_ib_create_cq(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct uverbs_attr_bundle * attrs)952e39afe3dSLeon Romanovsky int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
953dd6d7f85SAkiva Goldberger struct uverbs_attr_bundle *attrs)
954e126ba97SEli Cohen {
955dd6d7f85SAkiva Goldberger struct ib_udata *udata = &attrs->driver_udata;
956e39afe3dSLeon Romanovsky struct ib_device *ibdev = ibcq->device;
957bcf4c1eaSMatan Barak int entries = attr->cqe;
958bcf4c1eaSMatan Barak int vector = attr->comp_vector;
959e126ba97SEli Cohen struct mlx5_ib_dev *dev = to_mdev(ibdev);
960e39afe3dSLeon Romanovsky struct mlx5_ib_cq *cq = to_mcq(ibcq);
96138164b77SYishai Hadas u32 out[MLX5_ST_SZ_DW(create_cq_out)];
9623f649ab7SKees Cook int index;
9633f649ab7SKees Cook int inlen;
96427827786SSaeed Mahameed u32 *cqb = NULL;
96527827786SSaeed Mahameed void *cqc;
966e126ba97SEli Cohen int cqe_size;
967e126ba97SEli Cohen int eqn;
968e126ba97SEli Cohen int err;
969e126ba97SEli Cohen
9709ea57852SNoa Osherovich if (entries < 0 ||
9719ea57852SNoa Osherovich (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))))
972e39afe3dSLeon Romanovsky return -EINVAL;
97351ee86a4SEli Cohen
97434356f64SLeon Romanovsky if (check_cq_create_flags(attr->flags))
975e39afe3dSLeon Romanovsky return -EOPNOTSUPP;
976972ecb82SMatan Barak
977e126ba97SEli Cohen entries = roundup_pow_of_two(entries + 1);
978938fe83cSSaeed Mahameed if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))
979e39afe3dSLeon Romanovsky return -EINVAL;
980e126ba97SEli Cohen
981e126ba97SEli Cohen cq->ibcq.cqe = entries - 1;
982e126ba97SEli Cohen mutex_init(&cq->resize_mutex);
983e126ba97SEli Cohen spin_lock_init(&cq->lock);
984e126ba97SEli Cohen cq->resize_buf = NULL;
985e126ba97SEli Cohen cq->resize_umem = NULL;
986051f2630SLeon Romanovsky cq->create_flags = attr->flags;
98789ea94a7SMaor Gottlieb INIT_LIST_HEAD(&cq->list_send_qp);
98889ea94a7SMaor Gottlieb INIT_LIST_HEAD(&cq->list_recv_qp);
989e126ba97SEli Cohen
990bdeacabdSShamir Rabinovitch if (udata) {
991ff23dfa1SShamir Rabinovitch err = create_cq_user(dev, udata, cq, entries, &cqb, &cqe_size,
992*589b844fSAkiva Goldberger &index, &inlen, attrs);
993e126ba97SEli Cohen if (err)
994e39afe3dSLeon Romanovsky return err;
995e126ba97SEli Cohen } else {
99616b0e069SDaniel Jurgens cqe_size = cache_line_size() == 128 ? 128 : 64;
997e126ba97SEli Cohen err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
998e126ba97SEli Cohen &index, &inlen);
999e126ba97SEli Cohen if (err)
1000e39afe3dSLeon Romanovsky return err;
100125361e02SHaggai Eran
100225361e02SHaggai Eran INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
1003e126ba97SEli Cohen }
1004e126ba97SEli Cohen
1005f14c1a14SMaher Sanalla err = mlx5_comp_eqn_get(dev->mdev, vector, &eqn);
1006e126ba97SEli Cohen if (err)
1007e126ba97SEli Cohen goto err_cqb;
1008e126ba97SEli Cohen
100927827786SSaeed Mahameed cq->cqe_size = cqe_size;
101027827786SSaeed Mahameed
101127827786SSaeed Mahameed cqc = MLX5_ADDR_OF(create_cq_in, cqb, cq_context);
10127a0c8f42SGuy Levi MLX5_SET(cqc, cqc, cqe_sz,
10137a0c8f42SGuy Levi cqe_sz_to_mlx_sz(cqe_size,
10147a0c8f42SGuy Levi cq->private_flags &
10157a0c8f42SGuy Levi MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD));
101627827786SSaeed Mahameed MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
101727827786SSaeed Mahameed MLX5_SET(cqc, cqc, uar_page, index);
1018616d5769STal Gilboa MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
101927827786SSaeed Mahameed MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma);
1020beb801acSJason Gunthorpe if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN)
102127827786SSaeed Mahameed MLX5_SET(cqc, cqc, oi, 1);
1022e126ba97SEli Cohen
102338164b77SYishai Hadas err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen, out, sizeof(out));
1024e126ba97SEli Cohen if (err)
1025e126ba97SEli Cohen goto err_cqb;
1026e126ba97SEli Cohen
1027e126ba97SEli Cohen mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
1028ff23dfa1SShamir Rabinovitch if (udata)
1029c16d2750SMatan Barak cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
1030c16d2750SMatan Barak else
1031e126ba97SEli Cohen cq->mcq.comp = mlx5_ib_cq_comp;
1032e126ba97SEli Cohen cq->mcq.event = mlx5_ib_cq_event;
1033e126ba97SEli Cohen
103425361e02SHaggai Eran INIT_LIST_HEAD(&cq->wc_list);
103525361e02SHaggai Eran
1036ff23dfa1SShamir Rabinovitch if (udata)
1037e126ba97SEli Cohen if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
1038e126ba97SEli Cohen err = -EFAULT;
1039e126ba97SEli Cohen goto err_cmd;
1040e126ba97SEli Cohen }
1041e126ba97SEli Cohen
1042e126ba97SEli Cohen
1043479163f4SAl Viro kvfree(cqb);
1044e39afe3dSLeon Romanovsky return 0;
1045e126ba97SEli Cohen
1046e126ba97SEli Cohen err_cmd:
10479603b61dSJack Morgenstein mlx5_core_destroy_cq(dev->mdev, &cq->mcq);
1048e126ba97SEli Cohen
1049e126ba97SEli Cohen err_cqb:
1050479163f4SAl Viro kvfree(cqb);
1051bdeacabdSShamir Rabinovitch if (udata)
1052bdeacabdSShamir Rabinovitch destroy_cq_user(cq, udata);
1053e126ba97SEli Cohen else
1054e126ba97SEli Cohen destroy_cq_kernel(dev, cq);
1055e39afe3dSLeon Romanovsky return err;
1056e126ba97SEli Cohen }
1057e126ba97SEli Cohen
mlx5_ib_destroy_cq(struct ib_cq * cq,struct ib_udata * udata)105843d781b9SLeon Romanovsky int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
1059e126ba97SEli Cohen {
1060e126ba97SEli Cohen struct mlx5_ib_dev *dev = to_mdev(cq->device);
1061e126ba97SEli Cohen struct mlx5_ib_cq *mcq = to_mcq(cq);
106243d781b9SLeon Romanovsky int ret;
1063e126ba97SEli Cohen
106443d781b9SLeon Romanovsky ret = mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
106543d781b9SLeon Romanovsky if (ret)
106643d781b9SLeon Romanovsky return ret;
106743d781b9SLeon Romanovsky
1068bdeacabdSShamir Rabinovitch if (udata)
1069bdeacabdSShamir Rabinovitch destroy_cq_user(mcq, udata);
1070e126ba97SEli Cohen else
1071e126ba97SEli Cohen destroy_cq_kernel(dev, mcq);
107243d781b9SLeon Romanovsky return 0;
1073e126ba97SEli Cohen }
1074e126ba97SEli Cohen
is_equal_rsn(struct mlx5_cqe64 * cqe64,u32 rsn)1075cfd8f1d4SMoshe Lazer static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn)
1076e126ba97SEli Cohen {
1077cfd8f1d4SMoshe Lazer return rsn == (ntohl(cqe64->sop_drop_qpn) & 0xffffff);
1078e126ba97SEli Cohen }
1079e126ba97SEli Cohen
__mlx5_ib_cq_clean(struct mlx5_ib_cq * cq,u32 rsn,struct mlx5_ib_srq * srq)1080e126ba97SEli Cohen void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq)
1081e126ba97SEli Cohen {
1082e126ba97SEli Cohen struct mlx5_cqe64 *cqe64, *dest64;
1083e126ba97SEli Cohen void *cqe, *dest;
1084e126ba97SEli Cohen u32 prod_index;
1085e126ba97SEli Cohen int nfreed = 0;
1086e126ba97SEli Cohen u8 owner_bit;
1087e126ba97SEli Cohen
1088e126ba97SEli Cohen if (!cq)
1089e126ba97SEli Cohen return;
1090e126ba97SEli Cohen
1091e126ba97SEli Cohen /* First we need to find the current producer index, so we
1092e126ba97SEli Cohen * know where to start cleaning from. It doesn't matter if HW
1093e126ba97SEli Cohen * adds new entries after this loop -- the QP we're worried
1094e126ba97SEli Cohen * about is already in RESET, so the new entries won't come
1095e126ba97SEli Cohen * from our QP and therefore don't need to be checked.
1096e126ba97SEli Cohen */
1097e126ba97SEli Cohen for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++)
1098e126ba97SEli Cohen if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
1099e126ba97SEli Cohen break;
1100e126ba97SEli Cohen
1101e126ba97SEli Cohen /* Now sweep backwards through the CQ, removing CQ entries
1102e126ba97SEli Cohen * that match our QP by copying older entries on top of them.
1103e126ba97SEli Cohen */
1104e126ba97SEli Cohen while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
1105e126ba97SEli Cohen cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
1106e126ba97SEli Cohen cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
1107cfd8f1d4SMoshe Lazer if (is_equal_rsn(cqe64, rsn)) {
1108cfd8f1d4SMoshe Lazer if (srq && (ntohl(cqe64->srqn) & 0xffffff))
1109e126ba97SEli Cohen mlx5_ib_free_srq_wqe(srq, be16_to_cpu(cqe64->wqe_counter));
1110e126ba97SEli Cohen ++nfreed;
1111e126ba97SEli Cohen } else if (nfreed) {
1112e126ba97SEli Cohen dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
1113e126ba97SEli Cohen dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64;
1114e126ba97SEli Cohen owner_bit = dest64->op_own & MLX5_CQE_OWNER_MASK;
1115e126ba97SEli Cohen memcpy(dest, cqe, cq->mcq.cqe_sz);
1116e126ba97SEli Cohen dest64->op_own = owner_bit |
1117e126ba97SEli Cohen (dest64->op_own & ~MLX5_CQE_OWNER_MASK);
1118e126ba97SEli Cohen }
1119e126ba97SEli Cohen }
1120e126ba97SEli Cohen
1121e126ba97SEli Cohen if (nfreed) {
1122e126ba97SEli Cohen cq->mcq.cons_index += nfreed;
1123e126ba97SEli Cohen /* Make sure update of buffer contents is done before
1124e126ba97SEli Cohen * updating consumer index.
1125e126ba97SEli Cohen */
1126e126ba97SEli Cohen wmb();
1127e126ba97SEli Cohen mlx5_cq_set_ci(&cq->mcq);
1128e126ba97SEli Cohen }
1129e126ba97SEli Cohen }
1130e126ba97SEli Cohen
mlx5_ib_cq_clean(struct mlx5_ib_cq * cq,u32 qpn,struct mlx5_ib_srq * srq)1131e126ba97SEli Cohen void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
1132e126ba97SEli Cohen {
1133e126ba97SEli Cohen if (!cq)
1134e126ba97SEli Cohen return;
1135e126ba97SEli Cohen
1136e126ba97SEli Cohen spin_lock_irq(&cq->lock);
1137e126ba97SEli Cohen __mlx5_ib_cq_clean(cq, qpn, srq);
1138e126ba97SEli Cohen spin_unlock_irq(&cq->lock);
1139e126ba97SEli Cohen }
1140e126ba97SEli Cohen
mlx5_ib_modify_cq(struct ib_cq * cq,u16 cq_count,u16 cq_period)1141e126ba97SEli Cohen int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
1142e126ba97SEli Cohen {
11433bdb31f6SEli Cohen struct mlx5_ib_dev *dev = to_mdev(cq->device);
11443bdb31f6SEli Cohen struct mlx5_ib_cq *mcq = to_mcq(cq);
11453bdb31f6SEli Cohen int err;
11463bdb31f6SEli Cohen
1147938fe83cSSaeed Mahameed if (!MLX5_CAP_GEN(dev->mdev, cq_moderation))
114826e551c5SKamal Heib return -EOPNOTSUPP;
11493bdb31f6SEli Cohen
1150b0e9df6dSYonatan Cohen if (cq_period > MLX5_MAX_CQ_PERIOD)
1151b0e9df6dSYonatan Cohen return -EINVAL;
1152b0e9df6dSYonatan Cohen
115327827786SSaeed Mahameed err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq,
115427827786SSaeed Mahameed cq_period, cq_count);
11553bdb31f6SEli Cohen if (err)
11563bdb31f6SEli Cohen mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn);
11573bdb31f6SEli Cohen
11583bdb31f6SEli Cohen return err;
1159e126ba97SEli Cohen }
1160e126ba97SEli Cohen
resize_user(struct mlx5_ib_dev * dev,struct mlx5_ib_cq * cq,int entries,struct ib_udata * udata,int * cqe_size)1161bde51583SEli Cohen static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
11627db0eea9SJason Gunthorpe int entries, struct ib_udata *udata,
1163c08fbdc5SJason Gunthorpe int *cqe_size)
1164bde51583SEli Cohen {
1165bde51583SEli Cohen struct mlx5_ib_resize_cq ucmd;
1166bde51583SEli Cohen struct ib_umem *umem;
1167bde51583SEli Cohen int err;
1168bde51583SEli Cohen
116957761d8dSEli Cohen err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
117057761d8dSEli Cohen if (err)
117157761d8dSEli Cohen return err;
117257761d8dSEli Cohen
117357761d8dSEli Cohen if (ucmd.reserved0 || ucmd.reserved1)
117457761d8dSEli Cohen return -EINVAL;
1175bde51583SEli Cohen
117628e9091eSLeon Romanovsky /* check multiplication overflow */
117728e9091eSLeon Romanovsky if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
117828e9091eSLeon Romanovsky return -EINVAL;
117928e9091eSLeon Romanovsky
1180c320e527SMoni Shoua umem = ib_umem_get(&dev->ib_dev, ucmd.buf_addr,
118128e9091eSLeon Romanovsky (size_t)ucmd.cqe_size * entries,
118272b894b0SChristoph Hellwig IB_ACCESS_LOCAL_WRITE);
1183bde51583SEli Cohen if (IS_ERR(umem)) {
1184bde51583SEli Cohen err = PTR_ERR(umem);
1185bde51583SEli Cohen return err;
1186bde51583SEli Cohen }
1187bde51583SEli Cohen
1188bde51583SEli Cohen cq->resize_umem = umem;
1189bde51583SEli Cohen *cqe_size = ucmd.cqe_size;
1190bde51583SEli Cohen
1191bde51583SEli Cohen return 0;
1192bde51583SEli Cohen }
1193bde51583SEli Cohen
resize_kernel(struct mlx5_ib_dev * dev,struct mlx5_ib_cq * cq,int entries,int cqe_size)1194bde51583SEli Cohen static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
1195bde51583SEli Cohen int entries, int cqe_size)
1196bde51583SEli Cohen {
1197bde51583SEli Cohen int err;
1198bde51583SEli Cohen
1199bde51583SEli Cohen cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL);
1200bde51583SEli Cohen if (!cq->resize_buf)
1201bde51583SEli Cohen return -ENOMEM;
1202bde51583SEli Cohen
1203388ca8beSYonatan Cohen err = alloc_cq_frag_buf(dev, cq->resize_buf, entries, cqe_size);
1204bde51583SEli Cohen if (err)
1205bde51583SEli Cohen goto ex;
1206bde51583SEli Cohen
12072ba0aa2fSAlaa Hleihel init_cq_frag_buf(cq->resize_buf);
1208bde51583SEli Cohen
1209bde51583SEli Cohen return 0;
1210bde51583SEli Cohen
1211bde51583SEli Cohen ex:
1212bde51583SEli Cohen kfree(cq->resize_buf);
1213bde51583SEli Cohen return err;
1214bde51583SEli Cohen }
1215bde51583SEli Cohen
copy_resize_cqes(struct mlx5_ib_cq * cq)1216bde51583SEli Cohen static int copy_resize_cqes(struct mlx5_ib_cq *cq)
1217bde51583SEli Cohen {
1218bde51583SEli Cohen struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
1219bde51583SEli Cohen struct mlx5_cqe64 *scqe64;
1220bde51583SEli Cohen struct mlx5_cqe64 *dcqe64;
1221bde51583SEli Cohen void *start_cqe;
1222bde51583SEli Cohen void *scqe;
1223bde51583SEli Cohen void *dcqe;
1224bde51583SEli Cohen int ssize;
1225bde51583SEli Cohen int dsize;
1226bde51583SEli Cohen int i;
1227bde51583SEli Cohen u8 sw_own;
1228bde51583SEli Cohen
1229bde51583SEli Cohen ssize = cq->buf.cqe_size;
1230bde51583SEli Cohen dsize = cq->resize_buf->cqe_size;
1231bde51583SEli Cohen if (ssize != dsize) {
1232bde51583SEli Cohen mlx5_ib_warn(dev, "resize from different cqe size is not supported\n");
1233bde51583SEli Cohen return -EINVAL;
1234bde51583SEli Cohen }
1235bde51583SEli Cohen
1236bde51583SEli Cohen i = cq->mcq.cons_index;
1237bde51583SEli Cohen scqe = get_sw_cqe(cq, i);
1238bde51583SEli Cohen scqe64 = ssize == 64 ? scqe : scqe + 64;
1239bde51583SEli Cohen start_cqe = scqe;
1240bde51583SEli Cohen if (!scqe) {
1241bde51583SEli Cohen mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
1242bde51583SEli Cohen return -EINVAL;
1243bde51583SEli Cohen }
1244bde51583SEli Cohen
1245bdefffd1STariq Toukan while (get_cqe_opcode(scqe64) != MLX5_CQE_RESIZE_CQ) {
1246388ca8beSYonatan Cohen dcqe = mlx5_frag_buf_get_wqe(&cq->resize_buf->fbc,
1247388ca8beSYonatan Cohen (i + 1) & cq->resize_buf->nent);
1248bde51583SEli Cohen dcqe64 = dsize == 64 ? dcqe : dcqe + 64;
1249bde51583SEli Cohen sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent);
1250bde51583SEli Cohen memcpy(dcqe, scqe, dsize);
1251bde51583SEli Cohen dcqe64->op_own = (dcqe64->op_own & ~MLX5_CQE_OWNER_MASK) | sw_own;
1252bde51583SEli Cohen
1253bde51583SEli Cohen ++i;
1254bde51583SEli Cohen scqe = get_sw_cqe(cq, i);
1255bde51583SEli Cohen scqe64 = ssize == 64 ? scqe : scqe + 64;
1256bde51583SEli Cohen if (!scqe) {
1257bde51583SEli Cohen mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
1258bde51583SEli Cohen return -EINVAL;
1259bde51583SEli Cohen }
1260bde51583SEli Cohen
1261bde51583SEli Cohen if (scqe == start_cqe) {
1262bde51583SEli Cohen pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n",
1263bde51583SEli Cohen cq->mcq.cqn);
1264bde51583SEli Cohen return -ENOMEM;
1265bde51583SEli Cohen }
1266bde51583SEli Cohen }
1267bde51583SEli Cohen ++cq->mcq.cons_index;
1268bde51583SEli Cohen return 0;
1269bde51583SEli Cohen }
1270bde51583SEli Cohen
mlx5_ib_resize_cq(struct ib_cq * ibcq,int entries,struct ib_udata * udata)1271e126ba97SEli Cohen int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
1272e126ba97SEli Cohen {
1273bde51583SEli Cohen struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
1274bde51583SEli Cohen struct mlx5_ib_cq *cq = to_mcq(ibcq);
127527827786SSaeed Mahameed void *cqc;
127627827786SSaeed Mahameed u32 *in;
1277bde51583SEli Cohen int err;
1278bde51583SEli Cohen int npas;
127927827786SSaeed Mahameed __be64 *pas;
1280c08fbdc5SJason Gunthorpe unsigned int page_offset_quantized = 0;
1281c08fbdc5SJason Gunthorpe unsigned int page_shift;
1282bde51583SEli Cohen int inlen;
12833f649ab7SKees Cook int cqe_size;
1284bde51583SEli Cohen unsigned long flags;
1285bde51583SEli Cohen
1286938fe83cSSaeed Mahameed if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) {
1287bde51583SEli Cohen pr_info("Firmware does not support resize CQ\n");
1288e126ba97SEli Cohen return -ENOSYS;
1289e126ba97SEli Cohen }
1290e126ba97SEli Cohen
12913c4c3774SNoa Osherovich if (entries < 1 ||
12923c4c3774SNoa Osherovich entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
12933c4c3774SNoa Osherovich mlx5_ib_warn(dev, "wrong entries number %d, max %d\n",
12943c4c3774SNoa Osherovich entries,
12953c4c3774SNoa Osherovich 1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz));
1296bde51583SEli Cohen return -EINVAL;
12973c4c3774SNoa Osherovich }
1298bde51583SEli Cohen
1299bde51583SEli Cohen entries = roundup_pow_of_two(entries + 1);
1300938fe83cSSaeed Mahameed if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
1301bde51583SEli Cohen return -EINVAL;
1302bde51583SEli Cohen
1303bde51583SEli Cohen if (entries == ibcq->cqe + 1)
1304bde51583SEli Cohen return 0;
1305bde51583SEli Cohen
1306bde51583SEli Cohen mutex_lock(&cq->resize_mutex);
1307bde51583SEli Cohen if (udata) {
1308c08fbdc5SJason Gunthorpe unsigned long page_size;
1309c08fbdc5SJason Gunthorpe
1310c08fbdc5SJason Gunthorpe err = resize_user(dev, cq, entries, udata, &cqe_size);
13117db0eea9SJason Gunthorpe if (err)
13127db0eea9SJason Gunthorpe goto ex;
1313c08fbdc5SJason Gunthorpe
1314c08fbdc5SJason Gunthorpe page_size = mlx5_umem_find_best_cq_quantized_pgoff(
1315c08fbdc5SJason Gunthorpe cq->resize_umem, cqc, log_page_size,
1316c08fbdc5SJason Gunthorpe MLX5_ADAPTER_PAGE_SHIFT, page_offset, 64,
1317c08fbdc5SJason Gunthorpe &page_offset_quantized);
1318c08fbdc5SJason Gunthorpe if (!page_size) {
1319c08fbdc5SJason Gunthorpe err = -EINVAL;
1320c08fbdc5SJason Gunthorpe goto ex_resize;
1321c08fbdc5SJason Gunthorpe }
1322c08fbdc5SJason Gunthorpe npas = ib_umem_num_dma_blocks(cq->resize_umem, page_size);
1323c08fbdc5SJason Gunthorpe page_shift = order_base_2(page_size);
1324bde51583SEli Cohen } else {
13257db0eea9SJason Gunthorpe struct mlx5_frag_buf *frag_buf;
13267db0eea9SJason Gunthorpe
1327bde51583SEli Cohen cqe_size = 64;
1328bde51583SEli Cohen err = resize_kernel(dev, cq, entries, cqe_size);
13297db0eea9SJason Gunthorpe if (err)
13307db0eea9SJason Gunthorpe goto ex;
13317db0eea9SJason Gunthorpe frag_buf = &cq->resize_buf->frag_buf;
13324972e6faSTariq Toukan npas = frag_buf->npages;
13334972e6faSTariq Toukan page_shift = frag_buf->page_shift;
1334bde51583SEli Cohen }
1335bde51583SEli Cohen
133627827786SSaeed Mahameed inlen = MLX5_ST_SZ_BYTES(modify_cq_in) +
133727827786SSaeed Mahameed MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas;
133827827786SSaeed Mahameed
13391b9a07eeSLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL);
1340bde51583SEli Cohen if (!in) {
1341bde51583SEli Cohen err = -ENOMEM;
1342bde51583SEli Cohen goto ex_resize;
1343bde51583SEli Cohen }
1344bde51583SEli Cohen
134527827786SSaeed Mahameed pas = (__be64 *)MLX5_ADDR_OF(modify_cq_in, in, pas);
1346bde51583SEli Cohen if (udata)
1347aab8d396SJason Gunthorpe mlx5_ib_populate_pas(cq->resize_umem, 1UL << page_shift, pas,
1348aab8d396SJason Gunthorpe 0);
1349bde51583SEli Cohen else
13504972e6faSTariq Toukan mlx5_fill_page_frag_array(&cq->resize_buf->frag_buf, pas);
1351bde51583SEli Cohen
135227827786SSaeed Mahameed MLX5_SET(modify_cq_in, in,
135327827786SSaeed Mahameed modify_field_select_resize_field_select.resize_field_select.resize_field_select,
135427827786SSaeed Mahameed MLX5_MODIFY_CQ_MASK_LOG_SIZE |
1355bde51583SEli Cohen MLX5_MODIFY_CQ_MASK_PG_OFFSET |
1356bde51583SEli Cohen MLX5_MODIFY_CQ_MASK_PG_SIZE);
135727827786SSaeed Mahameed
135827827786SSaeed Mahameed cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
135927827786SSaeed Mahameed
136027827786SSaeed Mahameed MLX5_SET(cqc, cqc, log_page_size,
136127827786SSaeed Mahameed page_shift - MLX5_ADAPTER_PAGE_SHIFT);
1362c08fbdc5SJason Gunthorpe MLX5_SET(cqc, cqc, page_offset, page_offset_quantized);
13637a0c8f42SGuy Levi MLX5_SET(cqc, cqc, cqe_sz,
13647a0c8f42SGuy Levi cqe_sz_to_mlx_sz(cqe_size,
13657a0c8f42SGuy Levi cq->private_flags &
13667a0c8f42SGuy Levi MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD));
136727827786SSaeed Mahameed MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
136827827786SSaeed Mahameed
136927827786SSaeed Mahameed MLX5_SET(modify_cq_in, in, op_mod, MLX5_CQ_OPMOD_RESIZE);
137027827786SSaeed Mahameed MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn);
1371bde51583SEli Cohen
13729603b61dSJack Morgenstein err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
1373bde51583SEli Cohen if (err)
1374bde51583SEli Cohen goto ex_alloc;
1375bde51583SEli Cohen
1376bde51583SEli Cohen if (udata) {
1377bde51583SEli Cohen cq->ibcq.cqe = entries - 1;
1378bde51583SEli Cohen ib_umem_release(cq->buf.umem);
1379bde51583SEli Cohen cq->buf.umem = cq->resize_umem;
1380bde51583SEli Cohen cq->resize_umem = NULL;
1381bde51583SEli Cohen } else {
1382bde51583SEli Cohen struct mlx5_ib_cq_buf tbuf;
1383bde51583SEli Cohen int resized = 0;
1384bde51583SEli Cohen
1385bde51583SEli Cohen spin_lock_irqsave(&cq->lock, flags);
1386bde51583SEli Cohen if (cq->resize_buf) {
1387bde51583SEli Cohen err = copy_resize_cqes(cq);
1388bde51583SEli Cohen if (!err) {
1389bde51583SEli Cohen tbuf = cq->buf;
1390bde51583SEli Cohen cq->buf = *cq->resize_buf;
1391bde51583SEli Cohen kfree(cq->resize_buf);
1392bde51583SEli Cohen cq->resize_buf = NULL;
1393bde51583SEli Cohen resized = 1;
1394bde51583SEli Cohen }
1395bde51583SEli Cohen }
1396bde51583SEli Cohen cq->ibcq.cqe = entries - 1;
1397bde51583SEli Cohen spin_unlock_irqrestore(&cq->lock, flags);
1398bde51583SEli Cohen if (resized)
1399bde51583SEli Cohen free_cq_buf(dev, &tbuf);
1400bde51583SEli Cohen }
1401bde51583SEli Cohen mutex_unlock(&cq->resize_mutex);
1402bde51583SEli Cohen
1403479163f4SAl Viro kvfree(in);
1404bde51583SEli Cohen return 0;
1405bde51583SEli Cohen
1406bde51583SEli Cohen ex_alloc:
1407479163f4SAl Viro kvfree(in);
1408bde51583SEli Cohen
1409bde51583SEli Cohen ex_resize:
1410836a0fbbSLeon Romanovsky ib_umem_release(cq->resize_umem);
1411836a0fbbSLeon Romanovsky if (!udata) {
1412836a0fbbSLeon Romanovsky free_cq_buf(dev, cq->resize_buf);
1413836a0fbbSLeon Romanovsky cq->resize_buf = NULL;
1414836a0fbbSLeon Romanovsky }
1415bde51583SEli Cohen ex:
1416bde51583SEli Cohen mutex_unlock(&cq->resize_mutex);
1417bde51583SEli Cohen return err;
1418bde51583SEli Cohen }
1419bde51583SEli Cohen
mlx5_ib_get_cqe_size(struct ib_cq * ibcq)14205d6ff1baSYonatan Cohen int mlx5_ib_get_cqe_size(struct ib_cq *ibcq)
1421e126ba97SEli Cohen {
1422e126ba97SEli Cohen struct mlx5_ib_cq *cq;
1423e126ba97SEli Cohen
1424e126ba97SEli Cohen if (!ibcq)
1425e126ba97SEli Cohen return 128;
1426e126ba97SEli Cohen
1427e126ba97SEli Cohen cq = to_mcq(ibcq);
1428e126ba97SEli Cohen return cq->cqe_size;
1429e126ba97SEli Cohen }
143025361e02SHaggai Eran
143125361e02SHaggai Eran /* Called from atomic context */
mlx5_ib_generate_wc(struct ib_cq * ibcq,struct ib_wc * wc)143225361e02SHaggai Eran int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc)
143325361e02SHaggai Eran {
143425361e02SHaggai Eran struct mlx5_ib_wc *soft_wc;
143525361e02SHaggai Eran struct mlx5_ib_cq *cq = to_mcq(ibcq);
143625361e02SHaggai Eran unsigned long flags;
143725361e02SHaggai Eran
143825361e02SHaggai Eran soft_wc = kmalloc(sizeof(*soft_wc), GFP_ATOMIC);
143925361e02SHaggai Eran if (!soft_wc)
144025361e02SHaggai Eran return -ENOMEM;
144125361e02SHaggai Eran
144225361e02SHaggai Eran soft_wc->wc = *wc;
144325361e02SHaggai Eran spin_lock_irqsave(&cq->lock, flags);
144425361e02SHaggai Eran list_add_tail(&soft_wc->list, &cq->wc_list);
144525361e02SHaggai Eran if (cq->notify_flags == IB_CQ_NEXT_COMP ||
144625361e02SHaggai Eran wc->status != IB_WC_SUCCESS) {
144725361e02SHaggai Eran cq->notify_flags = 0;
144825361e02SHaggai Eran schedule_work(&cq->notify_work);
144925361e02SHaggai Eran }
145025361e02SHaggai Eran spin_unlock_irqrestore(&cq->lock, flags);
145125361e02SHaggai Eran
145225361e02SHaggai Eran return 0;
145325361e02SHaggai Eran }
1454*589b844fSAkiva Goldberger
1455*589b844fSAkiva Goldberger ADD_UVERBS_ATTRIBUTES_SIMPLE(
1456*589b844fSAkiva Goldberger mlx5_ib_cq_create,
1457*589b844fSAkiva Goldberger UVERBS_OBJECT_CQ,
1458*589b844fSAkiva Goldberger UVERBS_METHOD_CQ_CREATE,
1459*589b844fSAkiva Goldberger UVERBS_ATTR_PTR_IN(
1460*589b844fSAkiva Goldberger MLX5_IB_ATTR_CREATE_CQ_UAR_INDEX,
1461*589b844fSAkiva Goldberger UVERBS_ATTR_TYPE(u32),
1462*589b844fSAkiva Goldberger UA_OPTIONAL));
1463*589b844fSAkiva Goldberger
1464*589b844fSAkiva Goldberger const struct uapi_definition mlx5_ib_create_cq_defs[] = {
1465*589b844fSAkiva Goldberger UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_CQ, &mlx5_ib_cq_create),
1466*589b844fSAkiva Goldberger {},
1467*589b844fSAkiva Goldberger };
1468