1 /*- 2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include <linux/kref.h> 29 #include <rdma/ib_umem.h> 30 #include <rdma/ib_user_verbs.h> 31 #include <rdma/ib_cache.h> 32 #include "mlx5_ib.h" 33 34 static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq) 35 { 36 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; 37 38 ibcq->comp_handler(ibcq, ibcq->cq_context); 39 } 40 41 static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, int type) 42 { 43 struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq); 44 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); 45 struct ib_cq *ibcq = &cq->ibcq; 46 struct ib_event event; 47 48 if (type != MLX5_EVENT_TYPE_CQ_ERROR) { 49 mlx5_ib_warn(dev, "Unexpected event type %d on CQ %06x\n", 50 type, mcq->cqn); 51 return; 52 } 53 54 if (ibcq->event_handler) { 55 event.device = &dev->ib_dev; 56 event.event = IB_EVENT_CQ_ERR; 57 event.element.cq = ibcq; 58 ibcq->event_handler(&event, ibcq->cq_context); 59 } 60 } 61 62 static void *get_cqe_from_buf(struct mlx5_ib_cq_buf *buf, int n, int size) 63 { 64 return mlx5_buf_offset(&buf->buf, n * size); 65 } 66 67 static void *get_cqe(struct mlx5_ib_cq *cq, int n) 68 { 69 return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz); 70 } 71 72 static u8 sw_ownership_bit(int n, int nent) 73 { 74 return (n & nent) ? 1 : 0; 75 } 76 77 static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n) 78 { 79 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); 80 struct mlx5_cqe64 *cqe64; 81 82 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; 83 84 if (likely((cqe64->op_own) >> 4 != MLX5_CQE_INVALID) && 85 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { 86 return cqe; 87 } else { 88 return NULL; 89 } 90 } 91 92 static void *next_cqe_sw(struct mlx5_ib_cq *cq) 93 { 94 return get_sw_cqe(cq, cq->mcq.cons_index); 95 } 96 97 static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx) 98 { 99 switch (wq->wr_data[idx]) { 100 case MLX5_IB_WR_UMR: 101 return 0; 102 103 case IB_WR_LOCAL_INV: 104 return IB_WC_LOCAL_INV; 105 106 case IB_WR_REG_MR: 107 return IB_WC_REG_MR; 108 109 default: 110 pr_warn("unknown completion status\n"); 111 return 0; 112 } 113 } 114 115 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, 116 struct mlx5_ib_wq *wq, int idx) 117 { 118 wc->wc_flags = 0; 119 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) { 120 case MLX5_OPCODE_RDMA_WRITE_IMM: 121 wc->wc_flags |= IB_WC_WITH_IMM; 122 case MLX5_OPCODE_RDMA_WRITE: 123 wc->opcode = IB_WC_RDMA_WRITE; 124 break; 125 case MLX5_OPCODE_SEND_IMM: 126 wc->wc_flags |= IB_WC_WITH_IMM; 127 case MLX5_OPCODE_SEND: 128 case MLX5_OPCODE_SEND_INVAL: 129 wc->opcode = IB_WC_SEND; 130 break; 131 case MLX5_OPCODE_RDMA_READ: 132 wc->opcode = IB_WC_RDMA_READ; 133 wc->byte_len = be32_to_cpu(cqe->byte_cnt); 134 break; 135 case MLX5_OPCODE_ATOMIC_CS: 136 wc->opcode = IB_WC_COMP_SWAP; 137 wc->byte_len = 8; 138 break; 139 case MLX5_OPCODE_ATOMIC_FA: 140 wc->opcode = IB_WC_FETCH_ADD; 141 wc->byte_len = 8; 142 break; 143 case MLX5_OPCODE_ATOMIC_MASKED_CS: 144 wc->opcode = IB_WC_MASKED_COMP_SWAP; 145 wc->byte_len = 8; 146 break; 147 case MLX5_OPCODE_ATOMIC_MASKED_FA: 148 wc->opcode = IB_WC_MASKED_FETCH_ADD; 149 wc->byte_len = 8; 150 break; 151 case MLX5_OPCODE_UMR: 152 wc->opcode = get_umr_comp(wq, idx); 153 break; 154 } 155 } 156 157 enum { 158 MLX5_GRH_IN_BUFFER = 1, 159 MLX5_GRH_IN_CQE = 2, 160 }; 161 162 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, 163 struct mlx5_ib_qp *qp) 164 { 165 enum rdma_link_layer ll = rdma_port_get_link_layer(qp->ibqp.device, 1); 166 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device); 167 struct mlx5_ib_srq *srq; 168 struct mlx5_ib_wq *wq; 169 u16 wqe_ctr; 170 u8 roce_packet_type; 171 bool vlan_present; 172 u8 g; 173 174 if (qp->ibqp.srq || qp->ibqp.xrcd) { 175 struct mlx5_core_srq *msrq = NULL; 176 177 if (qp->ibqp.xrcd) { 178 msrq = mlx5_core_get_srq(dev->mdev, 179 be32_to_cpu(cqe->srqn)); 180 srq = to_mibsrq(msrq); 181 } else { 182 srq = to_msrq(qp->ibqp.srq); 183 } 184 if (srq) { 185 wqe_ctr = be16_to_cpu(cqe->wqe_counter); 186 wc->wr_id = srq->wrid[wqe_ctr]; 187 mlx5_ib_free_srq_wqe(srq, wqe_ctr); 188 if (msrq && atomic_dec_and_test(&msrq->refcount)) 189 complete(&msrq->free); 190 } 191 } else { 192 wq = &qp->rq; 193 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 194 ++wq->tail; 195 } 196 wc->byte_len = be32_to_cpu(cqe->byte_cnt); 197 198 switch (cqe->op_own >> 4) { 199 case MLX5_CQE_RESP_WR_IMM: 200 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; 201 wc->wc_flags = IB_WC_WITH_IMM; 202 wc->ex.imm_data = cqe->imm_inval_pkey; 203 break; 204 case MLX5_CQE_RESP_SEND: 205 wc->opcode = IB_WC_RECV; 206 wc->wc_flags = IB_WC_IP_CSUM_OK; 207 if (unlikely(!((cqe->hds_ip_ext & CQE_L3_OK) && 208 (cqe->hds_ip_ext & CQE_L4_OK)))) 209 wc->wc_flags = 0; 210 break; 211 case MLX5_CQE_RESP_SEND_IMM: 212 wc->opcode = IB_WC_RECV; 213 wc->wc_flags = IB_WC_WITH_IMM; 214 wc->ex.imm_data = cqe->imm_inval_pkey; 215 break; 216 case MLX5_CQE_RESP_SEND_INV: 217 wc->opcode = IB_WC_RECV; 218 wc->wc_flags = IB_WC_WITH_INVALIDATE; 219 wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey); 220 break; 221 } 222 wc->slid = be16_to_cpu(cqe->slid); 223 wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff; 224 wc->dlid_path_bits = cqe->ml_path; 225 g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; 226 wc->wc_flags |= g ? IB_WC_GRH : 0; 227 if (unlikely(is_qp1(qp->ibqp.qp_type))) { 228 u16 pkey = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff; 229 230 ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey, 231 &wc->pkey_index); 232 } else { 233 wc->pkey_index = 0; 234 } 235 236 if (ll != IB_LINK_LAYER_ETHERNET) { 237 wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf; 238 return; 239 } 240 241 vlan_present = cqe_has_vlan(cqe); 242 roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3; 243 if (vlan_present) { 244 wc->vlan_id = (be16_to_cpu(cqe->vlan_info)) & 0xfff; 245 wc->sl = (be16_to_cpu(cqe->vlan_info) >> 13) & 0x7; 246 wc->wc_flags |= IB_WC_WITH_VLAN; 247 } else { 248 wc->sl = 0; 249 } 250 251 switch (roce_packet_type) { 252 case MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH: 253 wc->network_hdr_type = RDMA_NETWORK_IB; 254 break; 255 case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6: 256 wc->network_hdr_type = RDMA_NETWORK_IPV6; 257 break; 258 case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4: 259 wc->network_hdr_type = RDMA_NETWORK_IPV4; 260 break; 261 } 262 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE; 263 } 264 265 static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe) 266 { 267 __be32 *p = (__be32 *)cqe; 268 int i; 269 270 mlx5_ib_warn(dev, "dump error cqe\n"); 271 for (i = 0; i < sizeof(*cqe) / 16; i++, p += 4) 272 pr_info("%08x %08x %08x %08x\n", be32_to_cpu(p[0]), 273 be32_to_cpu(p[1]), be32_to_cpu(p[2]), 274 be32_to_cpu(p[3])); 275 } 276 277 static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev, 278 struct mlx5_err_cqe *cqe, 279 struct ib_wc *wc) 280 { 281 int dump = 1; 282 283 switch (cqe->syndrome) { 284 case MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR: 285 wc->status = IB_WC_LOC_LEN_ERR; 286 break; 287 case MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR: 288 wc->status = IB_WC_LOC_QP_OP_ERR; 289 break; 290 case MLX5_CQE_SYNDROME_LOCAL_PROT_ERR: 291 wc->status = IB_WC_LOC_PROT_ERR; 292 break; 293 case MLX5_CQE_SYNDROME_WR_FLUSH_ERR: 294 dump = 0; 295 wc->status = IB_WC_WR_FLUSH_ERR; 296 break; 297 case MLX5_CQE_SYNDROME_MW_BIND_ERR: 298 wc->status = IB_WC_MW_BIND_ERR; 299 break; 300 case MLX5_CQE_SYNDROME_BAD_RESP_ERR: 301 wc->status = IB_WC_BAD_RESP_ERR; 302 break; 303 case MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR: 304 wc->status = IB_WC_LOC_ACCESS_ERR; 305 break; 306 case MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR: 307 wc->status = IB_WC_REM_INV_REQ_ERR; 308 break; 309 case MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR: 310 wc->status = IB_WC_REM_ACCESS_ERR; 311 break; 312 case MLX5_CQE_SYNDROME_REMOTE_OP_ERR: 313 wc->status = IB_WC_REM_OP_ERR; 314 break; 315 case MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR: 316 wc->status = IB_WC_RETRY_EXC_ERR; 317 dump = 0; 318 break; 319 case MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR: 320 wc->status = IB_WC_RNR_RETRY_EXC_ERR; 321 dump = 0; 322 break; 323 case MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR: 324 wc->status = IB_WC_REM_ABORT_ERR; 325 break; 326 default: 327 wc->status = IB_WC_GENERAL_ERR; 328 break; 329 } 330 331 wc->vendor_err = cqe->vendor_err_synd; 332 if (dump) 333 dump_cqe(dev, cqe); 334 } 335 336 static int is_atomic_response(struct mlx5_ib_qp *qp, uint16_t idx) 337 { 338 /* TBD: waiting decision 339 */ 340 return 0; 341 } 342 343 static void *mlx5_get_atomic_laddr(struct mlx5_ib_qp *qp, uint16_t idx) 344 { 345 struct mlx5_wqe_data_seg *dpseg; 346 void *addr; 347 348 dpseg = mlx5_get_send_wqe(qp, idx) + sizeof(struct mlx5_wqe_ctrl_seg) + 349 sizeof(struct mlx5_wqe_raddr_seg) + 350 sizeof(struct mlx5_wqe_atomic_seg); 351 addr = (void *)(unsigned long)be64_to_cpu(dpseg->addr); 352 return addr; 353 } 354 355 static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, 356 uint16_t idx) 357 { 358 void *addr; 359 int byte_count; 360 int i; 361 362 if (!is_atomic_response(qp, idx)) 363 return; 364 365 byte_count = be32_to_cpu(cqe64->byte_cnt); 366 addr = mlx5_get_atomic_laddr(qp, idx); 367 368 if (byte_count == 4) { 369 *(uint32_t *)addr = be32_to_cpu(*((__be32 *)addr)); 370 } else { 371 for (i = 0; i < byte_count; i += 8) { 372 *(uint64_t *)addr = be64_to_cpu(*((__be64 *)addr)); 373 addr += 8; 374 } 375 } 376 377 return; 378 } 379 380 static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, 381 u16 tail, u16 head) 382 { 383 u16 idx; 384 385 do { 386 idx = tail & (qp->sq.wqe_cnt - 1); 387 handle_atomic(qp, cqe64, idx); 388 if (idx == head) 389 break; 390 391 tail = qp->sq.w_list[idx].next; 392 } while (1); 393 tail = qp->sq.w_list[idx].next; 394 qp->sq.last_poll = tail; 395 } 396 397 static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf) 398 { 399 mlx5_buf_free(dev->mdev, &buf->buf); 400 } 401 402 static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe, 403 struct ib_sig_err *item) 404 { 405 u16 syndrome = be16_to_cpu(cqe->syndrome); 406 407 #define GUARD_ERR (1 << 13) 408 #define APPTAG_ERR (1 << 12) 409 #define REFTAG_ERR (1 << 11) 410 411 if (syndrome & GUARD_ERR) { 412 item->err_type = IB_SIG_BAD_GUARD; 413 item->expected = be32_to_cpu(cqe->expected_trans_sig) >> 16; 414 item->actual = be32_to_cpu(cqe->actual_trans_sig) >> 16; 415 } else 416 if (syndrome & REFTAG_ERR) { 417 item->err_type = IB_SIG_BAD_REFTAG; 418 item->expected = be32_to_cpu(cqe->expected_reftag); 419 item->actual = be32_to_cpu(cqe->actual_reftag); 420 } else 421 if (syndrome & APPTAG_ERR) { 422 item->err_type = IB_SIG_BAD_APPTAG; 423 item->expected = be32_to_cpu(cqe->expected_trans_sig) & 0xffff; 424 item->actual = be32_to_cpu(cqe->actual_trans_sig) & 0xffff; 425 } else { 426 pr_err("Got signature completion error with bad syndrome %04x\n", 427 syndrome); 428 } 429 430 item->sig_err_offset = be64_to_cpu(cqe->err_offset); 431 item->key = be32_to_cpu(cqe->mkey); 432 } 433 434 static void sw_send_comp(struct mlx5_ib_qp *qp, int num_entries, 435 struct ib_wc *wc, int *npolled) 436 { 437 struct mlx5_ib_wq *wq; 438 unsigned int cur; 439 unsigned int idx; 440 int np; 441 int i; 442 443 wq = &qp->sq; 444 cur = wq->head - wq->tail; 445 np = *npolled; 446 447 if (cur == 0) 448 return; 449 450 for (i = 0; i < cur && np < num_entries; i++) { 451 idx = wq->last_poll & (wq->wqe_cnt - 1); 452 wc->wr_id = wq->wrid[idx]; 453 wc->status = IB_WC_WR_FLUSH_ERR; 454 wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR; 455 wq->tail++; 456 np++; 457 wc->qp = &qp->ibqp; 458 wc++; 459 wq->last_poll = wq->w_list[idx].next; 460 } 461 *npolled = np; 462 } 463 464 static void sw_recv_comp(struct mlx5_ib_qp *qp, int num_entries, 465 struct ib_wc *wc, int *npolled) 466 { 467 struct mlx5_ib_wq *wq; 468 unsigned int cur; 469 int np; 470 int i; 471 472 wq = &qp->rq; 473 cur = wq->head - wq->tail; 474 np = *npolled; 475 476 if (cur == 0) 477 return; 478 479 for (i = 0; i < cur && np < num_entries; i++) { 480 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 481 wc->status = IB_WC_WR_FLUSH_ERR; 482 wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR; 483 wq->tail++; 484 np++; 485 wc->qp = &qp->ibqp; 486 wc++; 487 } 488 *npolled = np; 489 } 490 491 static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries, 492 struct ib_wc *wc, int *npolled) 493 { 494 struct mlx5_ib_qp *qp; 495 496 *npolled = 0; 497 /* Find uncompleted WQEs belonging to that cq and retrun mmics ones */ 498 list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) { 499 sw_send_comp(qp, num_entries, wc + *npolled, npolled); 500 if (*npolled >= num_entries) 501 return; 502 } 503 504 list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) { 505 sw_recv_comp(qp, num_entries, wc + *npolled, npolled); 506 if (*npolled >= num_entries) 507 return; 508 } 509 } 510 511 static int mlx5_poll_one(struct mlx5_ib_cq *cq, 512 struct mlx5_ib_qp **cur_qp, 513 struct ib_wc *wc) 514 { 515 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); 516 struct mlx5_err_cqe *err_cqe; 517 struct mlx5_cqe64 *cqe64; 518 struct mlx5_core_qp *mqp; 519 struct mlx5_ib_wq *wq; 520 struct mlx5_sig_err_cqe *sig_err_cqe; 521 struct mlx5_core_mr *mmkey; 522 struct mlx5_ib_mr *mr; 523 unsigned long flags; 524 uint8_t opcode; 525 uint32_t qpn; 526 u16 wqe_ctr; 527 void *cqe; 528 int idx; 529 530 repoll: 531 cqe = next_cqe_sw(cq); 532 if (!cqe) 533 return -EAGAIN; 534 535 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; 536 537 ++cq->mcq.cons_index; 538 539 /* Make sure we read CQ entry contents after we've checked the 540 * ownership bit. 541 */ 542 rmb(); 543 544 opcode = cqe64->op_own >> 4; 545 if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) { 546 if (likely(cq->resize_buf)) { 547 free_cq_buf(dev, &cq->buf); 548 cq->buf = *cq->resize_buf; 549 kfree(cq->resize_buf); 550 cq->resize_buf = NULL; 551 goto repoll; 552 } else { 553 mlx5_ib_warn(dev, "unexpected resize cqe\n"); 554 } 555 } 556 557 qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff; 558 if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) { 559 /* We do not have to take the QP table lock here, 560 * because CQs will be locked while QPs are removed 561 * from the table. 562 */ 563 mqp = __mlx5_qp_lookup(dev->mdev, qpn); 564 *cur_qp = to_mibqp(mqp); 565 } 566 567 wc->qp = &(*cur_qp)->ibqp; 568 switch (opcode) { 569 case MLX5_CQE_REQ: 570 wq = &(*cur_qp)->sq; 571 wqe_ctr = be16_to_cpu(cqe64->wqe_counter); 572 idx = wqe_ctr & (wq->wqe_cnt - 1); 573 handle_good_req(wc, cqe64, wq, idx); 574 handle_atomics(*cur_qp, cqe64, wq->last_poll, idx); 575 wc->wr_id = wq->wrid[idx]; 576 wq->tail = wq->wqe_head[idx] + 1; 577 wc->status = IB_WC_SUCCESS; 578 break; 579 case MLX5_CQE_RESP_WR_IMM: 580 case MLX5_CQE_RESP_SEND: 581 case MLX5_CQE_RESP_SEND_IMM: 582 case MLX5_CQE_RESP_SEND_INV: 583 handle_responder(wc, cqe64, *cur_qp); 584 wc->status = IB_WC_SUCCESS; 585 break; 586 case MLX5_CQE_RESIZE_CQ: 587 break; 588 case MLX5_CQE_REQ_ERR: 589 case MLX5_CQE_RESP_ERR: 590 err_cqe = (struct mlx5_err_cqe *)cqe64; 591 mlx5_handle_error_cqe(dev, err_cqe, wc); 592 mlx5_ib_dbg(dev, "%s error cqe on cqn 0x%x:\n", 593 opcode == MLX5_CQE_REQ_ERR ? 594 "Requestor" : "Responder", cq->mcq.cqn); 595 mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n", 596 err_cqe->syndrome, err_cqe->vendor_err_synd); 597 if (opcode == MLX5_CQE_REQ_ERR) { 598 wq = &(*cur_qp)->sq; 599 wqe_ctr = be16_to_cpu(cqe64->wqe_counter); 600 idx = wqe_ctr & (wq->wqe_cnt - 1); 601 wc->wr_id = wq->wrid[idx]; 602 wq->tail = wq->wqe_head[idx] + 1; 603 } else { 604 struct mlx5_ib_srq *srq; 605 606 if ((*cur_qp)->ibqp.srq) { 607 srq = to_msrq((*cur_qp)->ibqp.srq); 608 wqe_ctr = be16_to_cpu(cqe64->wqe_counter); 609 wc->wr_id = srq->wrid[wqe_ctr]; 610 mlx5_ib_free_srq_wqe(srq, wqe_ctr); 611 } else { 612 wq = &(*cur_qp)->rq; 613 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 614 ++wq->tail; 615 } 616 } 617 break; 618 case MLX5_CQE_SIG_ERR: 619 sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64; 620 621 spin_lock_irqsave(&dev->mdev->priv.mr_table.lock, flags); 622 mmkey = __mlx5_mr_lookup(dev->mdev, 623 mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey))); 624 mr = to_mibmr(mmkey); 625 get_sig_err_item(sig_err_cqe, &mr->sig->err_item); 626 mr->sig->sig_err_exists = true; 627 mr->sig->sigerr_count++; 628 629 mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n", 630 cq->mcq.cqn, mr->sig->err_item.key, 631 mr->sig->err_item.err_type, 632 (long long)mr->sig->err_item.sig_err_offset, 633 mr->sig->err_item.expected, 634 mr->sig->err_item.actual); 635 636 spin_unlock_irqrestore(&dev->mdev->priv.mr_table.lock, flags); 637 goto repoll; 638 } 639 640 return 0; 641 } 642 643 static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries, 644 struct ib_wc *wc) 645 { 646 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); 647 struct mlx5_ib_wc *soft_wc, *next; 648 int npolled = 0; 649 650 list_for_each_entry_safe(soft_wc, next, &cq->wc_list, list) { 651 if (npolled >= num_entries) 652 break; 653 654 mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n", 655 cq->mcq.cqn); 656 657 wc[npolled++] = soft_wc->wc; 658 list_del(&soft_wc->list); 659 kfree(soft_wc); 660 } 661 662 return npolled; 663 } 664 665 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) 666 { 667 struct mlx5_ib_cq *cq = to_mcq(ibcq); 668 struct mlx5_ib_qp *cur_qp = NULL; 669 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); 670 struct mlx5_core_dev *mdev = dev->mdev; 671 unsigned long flags; 672 int soft_polled = 0; 673 int npolled; 674 675 spin_lock_irqsave(&cq->lock, flags); 676 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 677 mlx5_ib_poll_sw_comp(cq, num_entries, wc, &npolled); 678 goto out; 679 } 680 681 if (unlikely(!list_empty(&cq->wc_list))) 682 soft_polled = poll_soft_wc(cq, num_entries, wc); 683 684 for (npolled = 0; npolled < num_entries - soft_polled; npolled++) { 685 if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled)) 686 break; 687 } 688 689 if (npolled) 690 mlx5_cq_set_ci(&cq->mcq); 691 out: 692 spin_unlock_irqrestore(&cq->lock, flags); 693 694 return soft_polled + npolled; 695 } 696 697 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) 698 { 699 struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev; 700 struct mlx5_ib_cq *cq = to_mcq(ibcq); 701 void __iomem *uar_page = mdev->priv.uuari.uars[0].map; 702 unsigned long irq_flags; 703 int ret = 0; 704 705 spin_lock_irqsave(&cq->lock, irq_flags); 706 if (cq->notify_flags != IB_CQ_NEXT_COMP) 707 cq->notify_flags = flags & IB_CQ_SOLICITED_MASK; 708 709 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !list_empty(&cq->wc_list)) 710 ret = 1; 711 spin_unlock_irqrestore(&cq->lock, irq_flags); 712 713 mlx5_cq_arm(&cq->mcq, 714 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? 715 MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT, 716 uar_page, 717 MLX5_GET_DOORBELL_LOCK(&mdev->priv.cq_uar_lock), 718 to_mcq(ibcq)->mcq.cons_index); 719 720 return ret; 721 } 722 723 static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf, 724 int nent, int cqe_size) 725 { 726 int err; 727 728 err = mlx5_buf_alloc(dev->mdev, nent * cqe_size, 729 2 * PAGE_SIZE, &buf->buf); 730 if (err) 731 return err; 732 733 buf->cqe_size = cqe_size; 734 buf->nent = nent; 735 736 return 0; 737 } 738 739 static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, 740 struct ib_ucontext *context, struct mlx5_ib_cq *cq, 741 int entries, u32 **cqb, 742 int *cqe_size, int *index, int *inlen) 743 { 744 struct mlx5_ib_create_cq ucmd; 745 size_t ucmdlen; 746 int page_shift; 747 __be64 *pas; 748 int npages; 749 int ncont; 750 void *cqc; 751 int err; 752 753 ucmdlen = 754 (udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) < 755 sizeof(ucmd)) ? (sizeof(ucmd) - 756 sizeof(ucmd.reserved)) : sizeof(ucmd); 757 758 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) 759 return -EFAULT; 760 761 if (ucmdlen == sizeof(ucmd) && 762 ucmd.reserved != 0) 763 return -EINVAL; 764 765 if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128) 766 return -EINVAL; 767 768 *cqe_size = ucmd.cqe_size; 769 770 cq->buf.umem = ib_umem_get(context, ucmd.buf_addr, 771 entries * ucmd.cqe_size, 772 IB_ACCESS_LOCAL_WRITE, 1); 773 if (IS_ERR(cq->buf.umem)) { 774 err = PTR_ERR(cq->buf.umem); 775 return err; 776 } 777 778 err = mlx5_ib_db_map_user(to_mucontext(context), ucmd.db_addr, 779 &cq->db); 780 if (err) 781 goto err_umem; 782 783 mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift, 784 &ncont, NULL); 785 mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n", 786 (long long)ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont); 787 788 *inlen = MLX5_ST_SZ_BYTES(create_cq_in) + 789 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont; 790 *cqb = mlx5_vzalloc(*inlen); 791 if (!*cqb) { 792 err = -ENOMEM; 793 goto err_db; 794 } 795 796 pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas); 797 mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, pas, 0); 798 799 cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context); 800 MLX5_SET(cqc, cqc, log_page_size, 801 page_shift - MLX5_ADAPTER_PAGE_SHIFT); 802 803 *index = to_mucontext(context)->uuari.uars[0].index; 804 805 return 0; 806 807 err_db: 808 mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); 809 810 err_umem: 811 ib_umem_release(cq->buf.umem); 812 return err; 813 } 814 815 static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context) 816 { 817 mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); 818 ib_umem_release(cq->buf.umem); 819 } 820 821 static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf) 822 { 823 int i; 824 void *cqe; 825 struct mlx5_cqe64 *cqe64; 826 827 for (i = 0; i < buf->nent; i++) { 828 cqe = get_cqe_from_buf(buf, i, buf->cqe_size); 829 cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64; 830 cqe64->op_own = MLX5_CQE_INVALID << 4; 831 } 832 } 833 834 static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, 835 int entries, int cqe_size, 836 u32 **cqb, int *index, int *inlen) 837 { 838 __be64 *pas; 839 void *cqc; 840 int err; 841 842 err = mlx5_db_alloc(dev->mdev, &cq->db); 843 if (err) 844 return err; 845 846 cq->mcq.set_ci_db = cq->db.db; 847 cq->mcq.arm_db = cq->db.db + 1; 848 cq->mcq.cqe_sz = cqe_size; 849 850 err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size); 851 if (err) 852 goto err_db; 853 854 init_cq_buf(cq, &cq->buf); 855 856 *inlen = MLX5_ST_SZ_BYTES(create_cq_in) + 857 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * cq->buf.buf.npages; 858 *cqb = mlx5_vzalloc(*inlen); 859 if (!*cqb) { 860 err = -ENOMEM; 861 goto err_buf; 862 } 863 864 pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas); 865 mlx5_fill_page_array(&cq->buf.buf, pas); 866 867 cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context); 868 MLX5_SET(cqc, cqc, log_page_size, 869 cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); 870 871 *index = dev->mdev->priv.uuari.uars[0].index; 872 873 return 0; 874 875 err_buf: 876 free_cq_buf(dev, &cq->buf); 877 878 err_db: 879 mlx5_db_free(dev->mdev, &cq->db); 880 return err; 881 } 882 883 static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) 884 { 885 free_cq_buf(dev, &cq->buf); 886 mlx5_db_free(dev->mdev, &cq->db); 887 } 888 889 static void notify_soft_wc_handler(struct work_struct *work) 890 { 891 struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq, 892 notify_work); 893 894 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); 895 } 896 897 struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, 898 const struct ib_cq_init_attr *attr, 899 struct ib_ucontext *context, 900 struct ib_udata *udata) 901 { 902 int entries = attr->cqe; 903 int vector = attr->comp_vector; 904 struct mlx5_ib_dev *dev = to_mdev(ibdev); 905 struct mlx5_ib_cq *cq; 906 int uninitialized_var(index); 907 int uninitialized_var(inlen); 908 u32 *cqb = NULL; 909 void *cqc; 910 int cqe_size; 911 unsigned int irqn; 912 int eqn; 913 int err; 914 915 if (entries < 0 || 916 (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))) 917 return ERR_PTR(-EINVAL); 918 919 if (check_cq_create_flags(attr->flags)) 920 return ERR_PTR(-EOPNOTSUPP); 921 922 entries = roundup_pow_of_two(entries + 1); 923 if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) 924 return ERR_PTR(-EINVAL); 925 926 cq = kzalloc(sizeof(*cq), GFP_KERNEL); 927 if (!cq) 928 return ERR_PTR(-ENOMEM); 929 930 cq->ibcq.cqe = entries - 1; 931 mutex_init(&cq->resize_mutex); 932 spin_lock_init(&cq->lock); 933 cq->resize_buf = NULL; 934 cq->resize_umem = NULL; 935 cq->create_flags = attr->flags; 936 INIT_LIST_HEAD(&cq->list_send_qp); 937 INIT_LIST_HEAD(&cq->list_recv_qp); 938 939 if (context) { 940 err = create_cq_user(dev, udata, context, cq, entries, 941 &cqb, &cqe_size, &index, &inlen); 942 if (err) 943 goto err_create; 944 } else { 945 cqe_size = cache_line_size() == 128 ? 128 : 64; 946 err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, 947 &index, &inlen); 948 if (err) 949 goto err_create; 950 951 INIT_WORK(&cq->notify_work, notify_soft_wc_handler); 952 } 953 954 err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn); 955 if (err) 956 goto err_cqb; 957 958 cq->cqe_size = cqe_size; 959 960 cqc = MLX5_ADDR_OF(create_cq_in, cqb, cq_context); 961 MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size)); 962 MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries)); 963 MLX5_SET(cqc, cqc, uar_page, index); 964 MLX5_SET(cqc, cqc, c_eqn, eqn); 965 MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma); 966 if (cq->create_flags & IB_CQ_FLAGS_IGNORE_OVERRUN) 967 MLX5_SET(cqc, cqc, oi, 1); 968 969 err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen); 970 if (err) 971 goto err_cqb; 972 973 mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn); 974 cq->mcq.irqn = irqn; 975 cq->mcq.comp = mlx5_ib_cq_comp; 976 cq->mcq.event = mlx5_ib_cq_event; 977 978 INIT_LIST_HEAD(&cq->wc_list); 979 980 if (context) 981 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) { 982 err = -EFAULT; 983 goto err_cmd; 984 } 985 986 987 kvfree(cqb); 988 return &cq->ibcq; 989 990 err_cmd: 991 mlx5_core_destroy_cq(dev->mdev, &cq->mcq); 992 993 err_cqb: 994 kvfree(cqb); 995 if (context) 996 destroy_cq_user(cq, context); 997 else 998 destroy_cq_kernel(dev, cq); 999 1000 err_create: 1001 kfree(cq); 1002 1003 return ERR_PTR(err); 1004 } 1005 1006 1007 int mlx5_ib_destroy_cq(struct ib_cq *cq) 1008 { 1009 struct mlx5_ib_dev *dev = to_mdev(cq->device); 1010 struct mlx5_ib_cq *mcq = to_mcq(cq); 1011 struct ib_ucontext *context = NULL; 1012 1013 if (cq->uobject) 1014 context = cq->uobject->context; 1015 1016 mlx5_core_destroy_cq(dev->mdev, &mcq->mcq); 1017 if (context) 1018 destroy_cq_user(mcq, context); 1019 else 1020 destroy_cq_kernel(dev, mcq); 1021 1022 kfree(mcq); 1023 1024 return 0; 1025 } 1026 1027 static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn) 1028 { 1029 return rsn == (ntohl(cqe64->sop_drop_qpn) & 0xffffff); 1030 } 1031 1032 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq) 1033 { 1034 struct mlx5_cqe64 *cqe64, *dest64; 1035 void *cqe, *dest; 1036 u32 prod_index; 1037 int nfreed = 0; 1038 u8 owner_bit; 1039 1040 if (!cq) 1041 return; 1042 1043 /* First we need to find the current producer index, so we 1044 * know where to start cleaning from. It doesn't matter if HW 1045 * adds new entries after this loop -- the QP we're worried 1046 * about is already in RESET, so the new entries won't come 1047 * from our QP and therefore don't need to be checked. 1048 */ 1049 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++) 1050 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe) 1051 break; 1052 1053 /* Now sweep backwards through the CQ, removing CQ entries 1054 * that match our QP by copying older entries on top of them. 1055 */ 1056 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { 1057 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); 1058 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; 1059 if (is_equal_rsn(cqe64, rsn)) { 1060 if (srq && (ntohl(cqe64->srqn) & 0xffffff)) 1061 mlx5_ib_free_srq_wqe(srq, be16_to_cpu(cqe64->wqe_counter)); 1062 ++nfreed; 1063 } else if (nfreed) { 1064 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe); 1065 dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64; 1066 owner_bit = dest64->op_own & MLX5_CQE_OWNER_MASK; 1067 memcpy(dest, cqe, cq->mcq.cqe_sz); 1068 dest64->op_own = owner_bit | 1069 (dest64->op_own & ~MLX5_CQE_OWNER_MASK); 1070 } 1071 } 1072 1073 if (nfreed) { 1074 cq->mcq.cons_index += nfreed; 1075 /* Make sure update of buffer contents is done before 1076 * updating consumer index. 1077 */ 1078 wmb(); 1079 mlx5_cq_set_ci(&cq->mcq); 1080 } 1081 } 1082 1083 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq) 1084 { 1085 if (!cq) 1086 return; 1087 1088 spin_lock_irq(&cq->lock); 1089 __mlx5_ib_cq_clean(cq, qpn, srq); 1090 spin_unlock_irq(&cq->lock); 1091 } 1092 1093 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) 1094 { 1095 struct mlx5_ib_dev *dev = to_mdev(cq->device); 1096 struct mlx5_ib_cq *mcq = to_mcq(cq); 1097 int err; 1098 1099 if (!MLX5_CAP_GEN(dev->mdev, cq_moderation)) 1100 return -ENOSYS; 1101 1102 err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq, 1103 cq_period, cq_count); 1104 if (err) 1105 mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn); 1106 1107 return err; 1108 } 1109 1110 static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, 1111 int entries, struct ib_udata *udata, int *npas, 1112 int *page_shift, int *cqe_size) 1113 { 1114 struct mlx5_ib_resize_cq ucmd; 1115 struct ib_umem *umem; 1116 int err; 1117 int npages; 1118 struct ib_ucontext *context = cq->buf.umem->context; 1119 1120 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); 1121 if (err) 1122 return err; 1123 1124 if (ucmd.reserved0 || ucmd.reserved1) 1125 return -EINVAL; 1126 1127 /* check multiplication overflow */ 1128 if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1) 1129 return -EINVAL; 1130 1131 umem = ib_umem_get(context, ucmd.buf_addr, 1132 (size_t)ucmd.cqe_size * entries, 1133 IB_ACCESS_LOCAL_WRITE, 1); 1134 if (IS_ERR(umem)) { 1135 err = PTR_ERR(umem); 1136 return err; 1137 } 1138 1139 mlx5_ib_cont_pages(umem, ucmd.buf_addr, &npages, page_shift, 1140 npas, NULL); 1141 1142 cq->resize_umem = umem; 1143 *cqe_size = ucmd.cqe_size; 1144 1145 return 0; 1146 } 1147 1148 static void un_resize_user(struct mlx5_ib_cq *cq) 1149 { 1150 ib_umem_release(cq->resize_umem); 1151 } 1152 1153 static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, 1154 int entries, int cqe_size) 1155 { 1156 int err; 1157 1158 cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL); 1159 if (!cq->resize_buf) 1160 return -ENOMEM; 1161 1162 err = alloc_cq_buf(dev, cq->resize_buf, entries, cqe_size); 1163 if (err) 1164 goto ex; 1165 1166 init_cq_buf(cq, cq->resize_buf); 1167 1168 return 0; 1169 1170 ex: 1171 kfree(cq->resize_buf); 1172 return err; 1173 } 1174 1175 static void un_resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) 1176 { 1177 free_cq_buf(dev, cq->resize_buf); 1178 cq->resize_buf = NULL; 1179 } 1180 1181 static int copy_resize_cqes(struct mlx5_ib_cq *cq) 1182 { 1183 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); 1184 struct mlx5_cqe64 *scqe64; 1185 struct mlx5_cqe64 *dcqe64; 1186 void *start_cqe; 1187 void *scqe; 1188 void *dcqe; 1189 int ssize; 1190 int dsize; 1191 int i; 1192 u8 sw_own; 1193 1194 ssize = cq->buf.cqe_size; 1195 dsize = cq->resize_buf->cqe_size; 1196 if (ssize != dsize) { 1197 mlx5_ib_warn(dev, "resize from different cqe size is not supported\n"); 1198 return -EINVAL; 1199 } 1200 1201 i = cq->mcq.cons_index; 1202 scqe = get_sw_cqe(cq, i); 1203 scqe64 = ssize == 64 ? scqe : scqe + 64; 1204 start_cqe = scqe; 1205 if (!scqe) { 1206 mlx5_ib_warn(dev, "expected cqe in sw ownership\n"); 1207 return -EINVAL; 1208 } 1209 1210 while ((scqe64->op_own >> 4) != MLX5_CQE_RESIZE_CQ) { 1211 dcqe = get_cqe_from_buf(cq->resize_buf, 1212 (i + 1) & (cq->resize_buf->nent), 1213 dsize); 1214 dcqe64 = dsize == 64 ? dcqe : dcqe + 64; 1215 sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent); 1216 memcpy(dcqe, scqe, dsize); 1217 dcqe64->op_own = (dcqe64->op_own & ~MLX5_CQE_OWNER_MASK) | sw_own; 1218 1219 ++i; 1220 scqe = get_sw_cqe(cq, i); 1221 scqe64 = ssize == 64 ? scqe : scqe + 64; 1222 if (!scqe) { 1223 mlx5_ib_warn(dev, "expected cqe in sw ownership\n"); 1224 return -EINVAL; 1225 } 1226 1227 if (scqe == start_cqe) { 1228 pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n", 1229 cq->mcq.cqn); 1230 return -ENOMEM; 1231 } 1232 } 1233 ++cq->mcq.cons_index; 1234 return 0; 1235 } 1236 1237 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) 1238 { 1239 struct mlx5_ib_dev *dev = to_mdev(ibcq->device); 1240 struct mlx5_ib_cq *cq = to_mcq(ibcq); 1241 void *cqc; 1242 u32 *in; 1243 int err; 1244 int npas; 1245 __be64 *pas; 1246 int page_shift; 1247 int inlen; 1248 int uninitialized_var(cqe_size); 1249 unsigned long flags; 1250 1251 if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) { 1252 pr_info("Firmware does not support resize CQ\n"); 1253 return -ENOSYS; 1254 } 1255 1256 if (entries < 1 || 1257 entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) { 1258 mlx5_ib_warn(dev, "wrong entries number %d, max %d\n", 1259 entries, 1260 1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)); 1261 return -EINVAL; 1262 } 1263 1264 entries = roundup_pow_of_two(entries + 1); 1265 if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1) 1266 return -EINVAL; 1267 1268 if (entries == ibcq->cqe + 1) 1269 return 0; 1270 1271 mutex_lock(&cq->resize_mutex); 1272 if (udata) { 1273 err = resize_user(dev, cq, entries, udata, &npas, &page_shift, 1274 &cqe_size); 1275 } else { 1276 cqe_size = 64; 1277 err = resize_kernel(dev, cq, entries, cqe_size); 1278 if (!err) { 1279 npas = cq->resize_buf->buf.npages; 1280 page_shift = cq->resize_buf->buf.page_shift; 1281 } 1282 } 1283 1284 if (err) 1285 goto ex; 1286 1287 inlen = MLX5_ST_SZ_BYTES(modify_cq_in) + 1288 MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas; 1289 1290 in = mlx5_vzalloc(inlen); 1291 if (!in) { 1292 err = -ENOMEM; 1293 goto ex_resize; 1294 } 1295 1296 pas = (__be64 *)MLX5_ADDR_OF(modify_cq_in, in, pas); 1297 if (udata) 1298 mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift, 1299 pas, 0); 1300 else 1301 mlx5_fill_page_array(&cq->resize_buf->buf, pas); 1302 1303 MLX5_SET(modify_cq_in, in, 1304 modify_field_select_resize_field_select.resize_field_select.resize_field_select, 1305 MLX5_MODIFY_CQ_MASK_LOG_SIZE | 1306 MLX5_MODIFY_CQ_MASK_PG_OFFSET | 1307 MLX5_MODIFY_CQ_MASK_PG_SIZE); 1308 1309 cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context); 1310 1311 MLX5_SET(cqc, cqc, log_page_size, 1312 page_shift - MLX5_ADAPTER_PAGE_SHIFT); 1313 MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size)); 1314 MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries)); 1315 1316 MLX5_SET(modify_cq_in, in, op_mod, MLX5_CQ_OPMOD_RESIZE); 1317 MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn); 1318 1319 err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen); 1320 if (err) 1321 goto ex_alloc; 1322 1323 if (udata) { 1324 cq->ibcq.cqe = entries - 1; 1325 ib_umem_release(cq->buf.umem); 1326 cq->buf.umem = cq->resize_umem; 1327 cq->resize_umem = NULL; 1328 } else { 1329 struct mlx5_ib_cq_buf tbuf; 1330 int resized = 0; 1331 1332 spin_lock_irqsave(&cq->lock, flags); 1333 if (cq->resize_buf) { 1334 err = copy_resize_cqes(cq); 1335 if (!err) { 1336 tbuf = cq->buf; 1337 cq->buf = *cq->resize_buf; 1338 kfree(cq->resize_buf); 1339 cq->resize_buf = NULL; 1340 resized = 1; 1341 } 1342 } 1343 cq->ibcq.cqe = entries - 1; 1344 spin_unlock_irqrestore(&cq->lock, flags); 1345 if (resized) 1346 free_cq_buf(dev, &tbuf); 1347 } 1348 mutex_unlock(&cq->resize_mutex); 1349 1350 kvfree(in); 1351 return 0; 1352 1353 ex_alloc: 1354 kvfree(in); 1355 1356 ex_resize: 1357 if (udata) 1358 un_resize_user(cq); 1359 else 1360 un_resize_kernel(dev, cq); 1361 ex: 1362 mutex_unlock(&cq->resize_mutex); 1363 return err; 1364 } 1365 1366 int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq) 1367 { 1368 struct mlx5_ib_cq *cq; 1369 1370 if (!ibcq) 1371 return 128; 1372 1373 cq = to_mcq(ibcq); 1374 return cq->cqe_size; 1375 } 1376 1377 /* Called from atomic context */ 1378 int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc) 1379 { 1380 struct mlx5_ib_wc *soft_wc; 1381 struct mlx5_ib_cq *cq = to_mcq(ibcq); 1382 unsigned long flags; 1383 1384 soft_wc = kmalloc(sizeof(*soft_wc), GFP_ATOMIC); 1385 if (!soft_wc) 1386 return -ENOMEM; 1387 1388 soft_wc->wc = *wc; 1389 spin_lock_irqsave(&cq->lock, flags); 1390 list_add_tail(&soft_wc->list, &cq->wc_list); 1391 if (cq->notify_flags == IB_CQ_NEXT_COMP || 1392 wc->status != IB_WC_SUCCESS) { 1393 cq->notify_flags = 0; 1394 schedule_work(&cq->notify_work); 1395 } 1396 spin_unlock_irqrestore(&cq->lock, flags); 1397 1398 return 0; 1399 } 1400