1 /*- 2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include <linux/kref.h> 29 #include <rdma/ib_umem.h> 30 #include <rdma/ib_user_verbs.h> 31 #include <rdma/ib_cache.h> 32 #include "mlx5_ib.h" 33 34 static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq) 35 { 36 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; 37 38 ibcq->comp_handler(ibcq, ibcq->cq_context); 39 } 40 41 static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, int type) 42 { 43 struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq); 44 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); 45 struct ib_cq *ibcq = &cq->ibcq; 46 struct ib_event event; 47 48 if (type != MLX5_EVENT_TYPE_CQ_ERROR) { 49 mlx5_ib_warn(dev, "Unexpected event type %d on CQ %06x\n", 50 type, mcq->cqn); 51 return; 52 } 53 54 if (ibcq->event_handler) { 55 event.device = &dev->ib_dev; 56 event.event = IB_EVENT_CQ_ERR; 57 event.element.cq = ibcq; 58 ibcq->event_handler(&event, ibcq->cq_context); 59 } 60 } 61 62 static void *get_cqe_from_buf(struct mlx5_ib_cq_buf *buf, int n, int size) 63 { 64 return mlx5_buf_offset(&buf->buf, n * size); 65 } 66 67 static void *get_cqe(struct mlx5_ib_cq *cq, int n) 68 { 69 return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz); 70 } 71 72 static u8 sw_ownership_bit(int n, int nent) 73 { 74 return (n & nent) ? 1 : 0; 75 } 76 77 static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n) 78 { 79 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); 80 struct mlx5_cqe64 *cqe64; 81 82 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; 83 84 if (likely((cqe64->op_own) >> 4 != MLX5_CQE_INVALID) && 85 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { 86 return cqe; 87 } else { 88 return NULL; 89 } 90 } 91 92 static void *next_cqe_sw(struct mlx5_ib_cq *cq) 93 { 94 return get_sw_cqe(cq, cq->mcq.cons_index); 95 } 96 97 static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx) 98 { 99 switch (wq->wr_data[idx]) { 100 case MLX5_IB_WR_UMR: 101 return 0; 102 103 case IB_WR_LOCAL_INV: 104 return IB_WC_LOCAL_INV; 105 106 case IB_WR_REG_MR: 107 return IB_WC_REG_MR; 108 109 default: 110 pr_warn("unknown completion status\n"); 111 return 0; 112 } 113 } 114 115 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, 116 struct mlx5_ib_wq *wq, int idx) 117 { 118 wc->wc_flags = 0; 119 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) { 120 case MLX5_OPCODE_RDMA_WRITE_IMM: 121 wc->wc_flags |= IB_WC_WITH_IMM; 122 case MLX5_OPCODE_RDMA_WRITE: 123 wc->opcode = IB_WC_RDMA_WRITE; 124 break; 125 case MLX5_OPCODE_SEND_IMM: 126 wc->wc_flags |= IB_WC_WITH_IMM; 127 case MLX5_OPCODE_SEND: 128 case MLX5_OPCODE_SEND_INVAL: 129 wc->opcode = IB_WC_SEND; 130 break; 131 case MLX5_OPCODE_RDMA_READ: 132 wc->opcode = IB_WC_RDMA_READ; 133 wc->byte_len = be32_to_cpu(cqe->byte_cnt); 134 break; 135 case MLX5_OPCODE_ATOMIC_CS: 136 wc->opcode = IB_WC_COMP_SWAP; 137 wc->byte_len = 8; 138 break; 139 case MLX5_OPCODE_ATOMIC_FA: 140 wc->opcode = IB_WC_FETCH_ADD; 141 wc->byte_len = 8; 142 break; 143 case MLX5_OPCODE_ATOMIC_MASKED_CS: 144 wc->opcode = IB_WC_MASKED_COMP_SWAP; 145 wc->byte_len = 8; 146 break; 147 case MLX5_OPCODE_ATOMIC_MASKED_FA: 148 wc->opcode = IB_WC_MASKED_FETCH_ADD; 149 wc->byte_len = 8; 150 break; 151 case MLX5_OPCODE_UMR: 152 wc->opcode = get_umr_comp(wq, idx); 153 break; 154 } 155 } 156 157 enum { 158 MLX5_GRH_IN_BUFFER = 1, 159 MLX5_GRH_IN_CQE = 2, 160 }; 161 162 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, 163 struct mlx5_ib_qp *qp) 164 { 165 enum rdma_link_layer ll = rdma_port_get_link_layer(qp->ibqp.device, 1); 166 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device); 167 struct mlx5_ib_srq *srq; 168 struct mlx5_ib_wq *wq; 169 u16 wqe_ctr; 170 u8 roce_packet_type; 171 bool vlan_present; 172 u8 g; 173 174 if (qp->ibqp.srq || qp->ibqp.xrcd) { 175 struct mlx5_core_srq *msrq = NULL; 176 177 if (qp->ibqp.xrcd) { 178 msrq = mlx5_core_get_srq(dev->mdev, 179 be32_to_cpu(cqe->srqn)); 180 srq = to_mibsrq(msrq); 181 } else { 182 srq = to_msrq(qp->ibqp.srq); 183 } 184 if (srq) { 185 wqe_ctr = be16_to_cpu(cqe->wqe_counter); 186 wc->wr_id = srq->wrid[wqe_ctr]; 187 mlx5_ib_free_srq_wqe(srq, wqe_ctr); 188 if (msrq && atomic_dec_and_test(&msrq->refcount)) 189 complete(&msrq->free); 190 } 191 } else { 192 wq = &qp->rq; 193 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 194 ++wq->tail; 195 } 196 wc->byte_len = be32_to_cpu(cqe->byte_cnt); 197 198 switch (cqe->op_own >> 4) { 199 case MLX5_CQE_RESP_WR_IMM: 200 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; 201 wc->wc_flags = IB_WC_WITH_IMM; 202 wc->ex.imm_data = cqe->imm_inval_pkey; 203 break; 204 case MLX5_CQE_RESP_SEND: 205 wc->opcode = IB_WC_RECV; 206 wc->wc_flags = IB_WC_IP_CSUM_OK; 207 if (unlikely(!((cqe->hds_ip_ext & CQE_L3_OK) && 208 (cqe->hds_ip_ext & CQE_L4_OK)))) 209 wc->wc_flags = 0; 210 break; 211 case MLX5_CQE_RESP_SEND_IMM: 212 wc->opcode = IB_WC_RECV; 213 wc->wc_flags = IB_WC_WITH_IMM; 214 wc->ex.imm_data = cqe->imm_inval_pkey; 215 break; 216 case MLX5_CQE_RESP_SEND_INV: 217 wc->opcode = IB_WC_RECV; 218 wc->wc_flags = IB_WC_WITH_INVALIDATE; 219 wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey); 220 break; 221 } 222 wc->slid = be16_to_cpu(cqe->slid); 223 wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff; 224 wc->dlid_path_bits = cqe->ml_path; 225 g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; 226 wc->wc_flags |= g ? IB_WC_GRH : 0; 227 if (unlikely(is_qp1(qp->ibqp.qp_type))) { 228 u16 pkey = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff; 229 230 ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey, 231 &wc->pkey_index); 232 } else { 233 wc->pkey_index = 0; 234 } 235 236 if (ll != IB_LINK_LAYER_ETHERNET) { 237 wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf; 238 return; 239 } 240 241 vlan_present = cqe_has_vlan(cqe); 242 roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3; 243 if (vlan_present) { 244 wc->vlan_id = (be16_to_cpu(cqe->vlan_info)) & 0xfff; 245 wc->sl = (be16_to_cpu(cqe->vlan_info) >> 13) & 0x7; 246 wc->wc_flags |= IB_WC_WITH_VLAN; 247 } else { 248 wc->sl = 0; 249 } 250 251 switch (roce_packet_type) { 252 case MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH: 253 wc->network_hdr_type = RDMA_NETWORK_IB; 254 break; 255 case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6: 256 wc->network_hdr_type = RDMA_NETWORK_IPV6; 257 break; 258 case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4: 259 wc->network_hdr_type = RDMA_NETWORK_IPV4; 260 break; 261 } 262 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE; 263 } 264 265 static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe) 266 { 267 __be32 *p = (__be32 *)cqe; 268 int i; 269 270 mlx5_ib_warn(dev, "dump error cqe\n"); 271 for (i = 0; i < sizeof(*cqe) / 16; i++, p += 4) 272 pr_info("%08x %08x %08x %08x\n", be32_to_cpu(p[0]), 273 be32_to_cpu(p[1]), be32_to_cpu(p[2]), 274 be32_to_cpu(p[3])); 275 } 276 277 static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev, 278 struct mlx5_err_cqe *cqe, 279 struct ib_wc *wc) 280 { 281 int dump = 1; 282 283 switch (cqe->syndrome) { 284 case MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR: 285 wc->status = IB_WC_LOC_LEN_ERR; 286 break; 287 case MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR: 288 wc->status = IB_WC_LOC_QP_OP_ERR; 289 break; 290 case MLX5_CQE_SYNDROME_LOCAL_PROT_ERR: 291 wc->status = IB_WC_LOC_PROT_ERR; 292 break; 293 case MLX5_CQE_SYNDROME_WR_FLUSH_ERR: 294 dump = 0; 295 wc->status = IB_WC_WR_FLUSH_ERR; 296 break; 297 case MLX5_CQE_SYNDROME_MW_BIND_ERR: 298 wc->status = IB_WC_MW_BIND_ERR; 299 break; 300 case MLX5_CQE_SYNDROME_BAD_RESP_ERR: 301 wc->status = IB_WC_BAD_RESP_ERR; 302 break; 303 case MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR: 304 wc->status = IB_WC_LOC_ACCESS_ERR; 305 break; 306 case MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR: 307 wc->status = IB_WC_REM_INV_REQ_ERR; 308 break; 309 case MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR: 310 wc->status = IB_WC_REM_ACCESS_ERR; 311 break; 312 case MLX5_CQE_SYNDROME_REMOTE_OP_ERR: 313 wc->status = IB_WC_REM_OP_ERR; 314 break; 315 case MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR: 316 wc->status = IB_WC_RETRY_EXC_ERR; 317 dump = 0; 318 break; 319 case MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR: 320 wc->status = IB_WC_RNR_RETRY_EXC_ERR; 321 dump = 0; 322 break; 323 case MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR: 324 wc->status = IB_WC_REM_ABORT_ERR; 325 break; 326 default: 327 wc->status = IB_WC_GENERAL_ERR; 328 break; 329 } 330 331 wc->vendor_err = cqe->vendor_err_synd; 332 if (dump) 333 dump_cqe(dev, cqe); 334 } 335 336 static int is_atomic_response(struct mlx5_ib_qp *qp, uint16_t idx) 337 { 338 /* TBD: waiting decision 339 */ 340 return 0; 341 } 342 343 static void *mlx5_get_atomic_laddr(struct mlx5_ib_qp *qp, uint16_t idx) 344 { 345 struct mlx5_wqe_data_seg *dpseg; 346 void *addr; 347 348 dpseg = mlx5_get_send_wqe(qp, idx) + sizeof(struct mlx5_wqe_ctrl_seg) + 349 sizeof(struct mlx5_wqe_raddr_seg) + 350 sizeof(struct mlx5_wqe_atomic_seg); 351 addr = (void *)(unsigned long)be64_to_cpu(dpseg->addr); 352 return addr; 353 } 354 355 static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, 356 uint16_t idx) 357 { 358 void *addr; 359 int byte_count; 360 int i; 361 362 if (!is_atomic_response(qp, idx)) 363 return; 364 365 byte_count = be32_to_cpu(cqe64->byte_cnt); 366 addr = mlx5_get_atomic_laddr(qp, idx); 367 368 if (byte_count == 4) { 369 *(uint32_t *)addr = be32_to_cpu(*((__be32 *)addr)); 370 } else { 371 for (i = 0; i < byte_count; i += 8) { 372 *(uint64_t *)addr = be64_to_cpu(*((__be64 *)addr)); 373 addr += 8; 374 } 375 } 376 377 return; 378 } 379 380 static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, 381 u16 tail, u16 head) 382 { 383 u16 idx; 384 385 do { 386 idx = tail & (qp->sq.wqe_cnt - 1); 387 handle_atomic(qp, cqe64, idx); 388 if (idx == head) 389 break; 390 391 tail = qp->sq.w_list[idx].next; 392 } while (1); 393 tail = qp->sq.w_list[idx].next; 394 qp->sq.last_poll = tail; 395 } 396 397 static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf) 398 { 399 mlx5_buf_free(dev->mdev, &buf->buf); 400 } 401 402 static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe, 403 struct ib_sig_err *item) 404 { 405 u16 syndrome = be16_to_cpu(cqe->syndrome); 406 407 #define GUARD_ERR (1 << 13) 408 #define APPTAG_ERR (1 << 12) 409 #define REFTAG_ERR (1 << 11) 410 411 if (syndrome & GUARD_ERR) { 412 item->err_type = IB_SIG_BAD_GUARD; 413 item->expected = be32_to_cpu(cqe->expected_trans_sig) >> 16; 414 item->actual = be32_to_cpu(cqe->actual_trans_sig) >> 16; 415 } else 416 if (syndrome & REFTAG_ERR) { 417 item->err_type = IB_SIG_BAD_REFTAG; 418 item->expected = be32_to_cpu(cqe->expected_reftag); 419 item->actual = be32_to_cpu(cqe->actual_reftag); 420 } else 421 if (syndrome & APPTAG_ERR) { 422 item->err_type = IB_SIG_BAD_APPTAG; 423 item->expected = be32_to_cpu(cqe->expected_trans_sig) & 0xffff; 424 item->actual = be32_to_cpu(cqe->actual_trans_sig) & 0xffff; 425 } else { 426 pr_err("Got signature completion error with bad syndrome %04x\n", 427 syndrome); 428 } 429 430 item->sig_err_offset = be64_to_cpu(cqe->err_offset); 431 item->key = be32_to_cpu(cqe->mkey); 432 } 433 434 static void sw_send_comp(struct mlx5_ib_qp *qp, int num_entries, 435 struct ib_wc *wc, int *npolled) 436 { 437 struct mlx5_ib_wq *wq; 438 unsigned int cur; 439 unsigned int idx; 440 int np; 441 int i; 442 443 wq = &qp->sq; 444 cur = wq->head - wq->tail; 445 np = *npolled; 446 447 if (cur == 0) 448 return; 449 450 for (i = 0; i < cur && np < num_entries; i++) { 451 idx = wq->last_poll & (wq->wqe_cnt - 1); 452 wc->wr_id = wq->wrid[idx]; 453 wc->status = IB_WC_WR_FLUSH_ERR; 454 wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR; 455 wq->tail++; 456 np++; 457 wc->qp = &qp->ibqp; 458 wc++; 459 wq->last_poll = wq->w_list[idx].next; 460 } 461 *npolled = np; 462 } 463 464 static void sw_recv_comp(struct mlx5_ib_qp *qp, int num_entries, 465 struct ib_wc *wc, int *npolled) 466 { 467 struct mlx5_ib_wq *wq; 468 unsigned int cur; 469 int np; 470 int i; 471 472 wq = &qp->rq; 473 cur = wq->head - wq->tail; 474 np = *npolled; 475 476 if (cur == 0) 477 return; 478 479 for (i = 0; i < cur && np < num_entries; i++) { 480 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 481 wc->status = IB_WC_WR_FLUSH_ERR; 482 wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR; 483 wq->tail++; 484 np++; 485 wc->qp = &qp->ibqp; 486 wc++; 487 } 488 *npolled = np; 489 } 490 491 static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries, 492 struct ib_wc *wc, int *npolled) 493 { 494 struct mlx5_ib_qp *qp; 495 496 *npolled = 0; 497 /* Find uncompleted WQEs belonging to that cq and retrun mmics ones */ 498 list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) { 499 sw_send_comp(qp, num_entries, wc + *npolled, npolled); 500 if (*npolled >= num_entries) 501 return; 502 } 503 504 list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) { 505 sw_recv_comp(qp, num_entries, wc + *npolled, npolled); 506 if (*npolled >= num_entries) 507 return; 508 } 509 } 510 511 static int mlx5_poll_one(struct mlx5_ib_cq *cq, 512 struct mlx5_ib_qp **cur_qp, 513 struct ib_wc *wc) 514 { 515 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); 516 struct mlx5_err_cqe *err_cqe; 517 struct mlx5_cqe64 *cqe64; 518 struct mlx5_core_qp *mqp; 519 struct mlx5_ib_wq *wq; 520 struct mlx5_sig_err_cqe *sig_err_cqe; 521 struct mlx5_core_mr *mmkey; 522 struct mlx5_ib_mr *mr; 523 unsigned long flags; 524 uint8_t opcode; 525 uint32_t qpn; 526 u16 wqe_ctr; 527 void *cqe; 528 int idx; 529 530 repoll: 531 cqe = next_cqe_sw(cq); 532 if (!cqe) 533 return -EAGAIN; 534 535 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; 536 537 ++cq->mcq.cons_index; 538 539 /* Make sure we read CQ entry contents after we've checked the 540 * ownership bit. 541 */ 542 rmb(); 543 544 opcode = cqe64->op_own >> 4; 545 if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) { 546 if (likely(cq->resize_buf)) { 547 free_cq_buf(dev, &cq->buf); 548 cq->buf = *cq->resize_buf; 549 kfree(cq->resize_buf); 550 cq->resize_buf = NULL; 551 goto repoll; 552 } else { 553 mlx5_ib_warn(dev, "unexpected resize cqe\n"); 554 } 555 } 556 557 qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff; 558 if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) { 559 /* We do not have to take the QP table lock here, 560 * because CQs will be locked while QPs are removed 561 * from the table. 562 */ 563 mqp = __mlx5_qp_lookup(dev->mdev, qpn); 564 *cur_qp = to_mibqp(mqp); 565 } 566 567 wc->qp = &(*cur_qp)->ibqp; 568 switch (opcode) { 569 case MLX5_CQE_REQ: 570 wq = &(*cur_qp)->sq; 571 wqe_ctr = be16_to_cpu(cqe64->wqe_counter); 572 idx = wqe_ctr & (wq->wqe_cnt - 1); 573 handle_good_req(wc, cqe64, wq, idx); 574 handle_atomics(*cur_qp, cqe64, wq->last_poll, idx); 575 wc->wr_id = wq->wrid[idx]; 576 wq->tail = wq->wqe_head[idx] + 1; 577 wc->status = IB_WC_SUCCESS; 578 break; 579 case MLX5_CQE_RESP_WR_IMM: 580 case MLX5_CQE_RESP_SEND: 581 case MLX5_CQE_RESP_SEND_IMM: 582 case MLX5_CQE_RESP_SEND_INV: 583 handle_responder(wc, cqe64, *cur_qp); 584 wc->status = IB_WC_SUCCESS; 585 break; 586 case MLX5_CQE_RESIZE_CQ: 587 break; 588 case MLX5_CQE_REQ_ERR: 589 case MLX5_CQE_RESP_ERR: 590 err_cqe = (struct mlx5_err_cqe *)cqe64; 591 mlx5_handle_error_cqe(dev, err_cqe, wc); 592 mlx5_ib_dbg(dev, "%s error cqe on cqn 0x%x:\n", 593 opcode == MLX5_CQE_REQ_ERR ? 594 "Requestor" : "Responder", cq->mcq.cqn); 595 mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n", 596 err_cqe->syndrome, err_cqe->vendor_err_synd); 597 if (opcode == MLX5_CQE_REQ_ERR) { 598 wq = &(*cur_qp)->sq; 599 wqe_ctr = be16_to_cpu(cqe64->wqe_counter); 600 idx = wqe_ctr & (wq->wqe_cnt - 1); 601 wc->wr_id = wq->wrid[idx]; 602 wq->tail = wq->wqe_head[idx] + 1; 603 } else { 604 struct mlx5_ib_srq *srq; 605 606 if ((*cur_qp)->ibqp.srq) { 607 srq = to_msrq((*cur_qp)->ibqp.srq); 608 wqe_ctr = be16_to_cpu(cqe64->wqe_counter); 609 wc->wr_id = srq->wrid[wqe_ctr]; 610 mlx5_ib_free_srq_wqe(srq, wqe_ctr); 611 } else { 612 wq = &(*cur_qp)->rq; 613 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 614 ++wq->tail; 615 } 616 } 617 break; 618 case MLX5_CQE_SIG_ERR: 619 sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64; 620 621 spin_lock_irqsave(&dev->mdev->priv.mr_table.lock, flags); 622 mmkey = __mlx5_mr_lookup(dev->mdev, 623 mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey))); 624 mr = to_mibmr(mmkey); 625 get_sig_err_item(sig_err_cqe, &mr->sig->err_item); 626 mr->sig->sig_err_exists = true; 627 mr->sig->sigerr_count++; 628 629 mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n", 630 cq->mcq.cqn, mr->sig->err_item.key, 631 mr->sig->err_item.err_type, 632 (long long)mr->sig->err_item.sig_err_offset, 633 mr->sig->err_item.expected, 634 mr->sig->err_item.actual); 635 636 spin_unlock_irqrestore(&dev->mdev->priv.mr_table.lock, flags); 637 goto repoll; 638 } 639 640 return 0; 641 } 642 643 static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries, 644 struct ib_wc *wc) 645 { 646 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); 647 struct mlx5_ib_wc *soft_wc, *next; 648 int npolled = 0; 649 650 list_for_each_entry_safe(soft_wc, next, &cq->wc_list, list) { 651 if (npolled >= num_entries) 652 break; 653 654 mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n", 655 cq->mcq.cqn); 656 657 wc[npolled++] = soft_wc->wc; 658 list_del(&soft_wc->list); 659 kfree(soft_wc); 660 } 661 662 return npolled; 663 } 664 665 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) 666 { 667 struct mlx5_ib_cq *cq = to_mcq(ibcq); 668 struct mlx5_ib_qp *cur_qp = NULL; 669 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); 670 struct mlx5_core_dev *mdev = dev->mdev; 671 unsigned long flags; 672 int soft_polled = 0; 673 int npolled; 674 675 spin_lock_irqsave(&cq->lock, flags); 676 if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)) { 677 mlx5_ib_poll_sw_comp(cq, num_entries, wc, &npolled); 678 goto out; 679 } 680 681 if (unlikely(!list_empty(&cq->wc_list))) 682 soft_polled = poll_soft_wc(cq, num_entries, wc); 683 684 for (npolled = 0; npolled < num_entries - soft_polled; npolled++) { 685 if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled)) 686 break; 687 } 688 689 if (npolled) 690 mlx5_cq_set_ci(&cq->mcq); 691 out: 692 spin_unlock_irqrestore(&cq->lock, flags); 693 694 return soft_polled + npolled; 695 } 696 697 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) 698 { 699 struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev; 700 struct mlx5_ib_cq *cq = to_mcq(ibcq); 701 void __iomem *uar_page = mdev->priv.uuari.uars[0].map; 702 unsigned long irq_flags; 703 int ret = 0; 704 705 if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)) 706 return -1; 707 708 spin_lock_irqsave(&cq->lock, irq_flags); 709 if (cq->notify_flags != IB_CQ_NEXT_COMP) 710 cq->notify_flags = flags & IB_CQ_SOLICITED_MASK; 711 712 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !list_empty(&cq->wc_list)) 713 ret = 1; 714 spin_unlock_irqrestore(&cq->lock, irq_flags); 715 716 mlx5_cq_arm(&cq->mcq, 717 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? 718 MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT, 719 uar_page, 720 MLX5_GET_DOORBELL_LOCK(&mdev->priv.cq_uar_lock), 721 cq->mcq.cons_index); 722 723 return ret; 724 } 725 726 static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf, 727 int nent, int cqe_size) 728 { 729 int err; 730 731 err = mlx5_buf_alloc(dev->mdev, nent * cqe_size, 732 2 * PAGE_SIZE, &buf->buf); 733 if (err) 734 return err; 735 736 buf->cqe_size = cqe_size; 737 buf->nent = nent; 738 739 return 0; 740 } 741 742 static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, 743 struct ib_ucontext *context, struct mlx5_ib_cq *cq, 744 int entries, u32 **cqb, 745 int *cqe_size, int *index, int *inlen) 746 { 747 struct mlx5_ib_create_cq ucmd; 748 size_t ucmdlen; 749 int page_shift; 750 __be64 *pas; 751 int npages; 752 int ncont; 753 void *cqc; 754 int err; 755 756 ucmdlen = 757 (udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) < 758 sizeof(ucmd)) ? (sizeof(ucmd) - 759 sizeof(ucmd.reserved)) : sizeof(ucmd); 760 761 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) 762 return -EFAULT; 763 764 if (ucmdlen == sizeof(ucmd) && 765 ucmd.reserved != 0) 766 return -EINVAL; 767 768 if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128) 769 return -EINVAL; 770 771 *cqe_size = ucmd.cqe_size; 772 773 cq->buf.umem = ib_umem_get(context, ucmd.buf_addr, 774 entries * ucmd.cqe_size, 775 IB_ACCESS_LOCAL_WRITE, 1); 776 if (IS_ERR(cq->buf.umem)) { 777 err = PTR_ERR(cq->buf.umem); 778 return err; 779 } 780 781 err = mlx5_ib_db_map_user(to_mucontext(context), ucmd.db_addr, 782 &cq->db); 783 if (err) 784 goto err_umem; 785 786 mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift, 787 &ncont, NULL); 788 mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n", 789 (long long)ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont); 790 791 *inlen = MLX5_ST_SZ_BYTES(create_cq_in) + 792 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont; 793 *cqb = mlx5_vzalloc(*inlen); 794 if (!*cqb) { 795 err = -ENOMEM; 796 goto err_db; 797 } 798 799 pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas); 800 mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, pas, 0); 801 802 cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context); 803 MLX5_SET(cqc, cqc, log_page_size, 804 page_shift - MLX5_ADAPTER_PAGE_SHIFT); 805 806 *index = to_mucontext(context)->uuari.uars[0].index; 807 808 return 0; 809 810 err_db: 811 mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); 812 813 err_umem: 814 ib_umem_release(cq->buf.umem); 815 return err; 816 } 817 818 static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context) 819 { 820 mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); 821 ib_umem_release(cq->buf.umem); 822 } 823 824 static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf) 825 { 826 int i; 827 void *cqe; 828 struct mlx5_cqe64 *cqe64; 829 830 for (i = 0; i < buf->nent; i++) { 831 cqe = get_cqe_from_buf(buf, i, buf->cqe_size); 832 cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64; 833 cqe64->op_own = MLX5_CQE_INVALID << 4; 834 } 835 } 836 837 static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, 838 int entries, int cqe_size, 839 u32 **cqb, int *index, int *inlen) 840 { 841 __be64 *pas; 842 void *cqc; 843 int err; 844 845 err = mlx5_db_alloc(dev->mdev, &cq->db); 846 if (err) 847 return err; 848 849 cq->mcq.set_ci_db = cq->db.db; 850 cq->mcq.arm_db = cq->db.db + 1; 851 cq->mcq.cqe_sz = cqe_size; 852 853 err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size); 854 if (err) 855 goto err_db; 856 857 init_cq_buf(cq, &cq->buf); 858 859 *inlen = MLX5_ST_SZ_BYTES(create_cq_in) + 860 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * cq->buf.buf.npages; 861 *cqb = mlx5_vzalloc(*inlen); 862 if (!*cqb) { 863 err = -ENOMEM; 864 goto err_buf; 865 } 866 867 pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas); 868 mlx5_fill_page_array(&cq->buf.buf, pas); 869 870 cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context); 871 MLX5_SET(cqc, cqc, log_page_size, 872 cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); 873 874 *index = dev->mdev->priv.uuari.uars[0].index; 875 876 return 0; 877 878 err_buf: 879 free_cq_buf(dev, &cq->buf); 880 881 err_db: 882 mlx5_db_free(dev->mdev, &cq->db); 883 return err; 884 } 885 886 static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) 887 { 888 free_cq_buf(dev, &cq->buf); 889 mlx5_db_free(dev->mdev, &cq->db); 890 } 891 892 static void notify_soft_wc_handler(struct work_struct *work) 893 { 894 struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq, 895 notify_work); 896 897 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); 898 } 899 900 struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, 901 const struct ib_cq_init_attr *attr, 902 struct ib_ucontext *context, 903 struct ib_udata *udata) 904 { 905 int entries = attr->cqe; 906 int vector = attr->comp_vector; 907 struct mlx5_ib_dev *dev = to_mdev(ibdev); 908 struct mlx5_ib_cq *cq; 909 int uninitialized_var(index); 910 int uninitialized_var(inlen); 911 u32 *cqb = NULL; 912 void *cqc; 913 int cqe_size; 914 unsigned int irqn; 915 int eqn; 916 int err; 917 918 if (entries < 0 || 919 (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))) 920 return ERR_PTR(-EINVAL); 921 922 if (check_cq_create_flags(attr->flags)) 923 return ERR_PTR(-EOPNOTSUPP); 924 925 entries = roundup_pow_of_two(entries + 1); 926 if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) 927 return ERR_PTR(-EINVAL); 928 929 cq = kzalloc(sizeof(*cq), GFP_KERNEL); 930 if (!cq) 931 return ERR_PTR(-ENOMEM); 932 933 cq->ibcq.cqe = entries - 1; 934 mutex_init(&cq->resize_mutex); 935 spin_lock_init(&cq->lock); 936 cq->resize_buf = NULL; 937 cq->resize_umem = NULL; 938 cq->create_flags = attr->flags; 939 INIT_LIST_HEAD(&cq->list_send_qp); 940 INIT_LIST_HEAD(&cq->list_recv_qp); 941 942 if (context) { 943 err = create_cq_user(dev, udata, context, cq, entries, 944 &cqb, &cqe_size, &index, &inlen); 945 if (err) 946 goto err_create; 947 } else { 948 cqe_size = cache_line_size() == 128 ? 128 : 64; 949 err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, 950 &index, &inlen); 951 if (err) 952 goto err_create; 953 954 INIT_WORK(&cq->notify_work, notify_soft_wc_handler); 955 } 956 957 err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn); 958 if (err) 959 goto err_cqb; 960 961 cq->cqe_size = cqe_size; 962 963 cqc = MLX5_ADDR_OF(create_cq_in, cqb, cq_context); 964 MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size)); 965 MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries)); 966 MLX5_SET(cqc, cqc, uar_page, index); 967 MLX5_SET(cqc, cqc, c_eqn, eqn); 968 MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma); 969 if (cq->create_flags & IB_CQ_FLAGS_IGNORE_OVERRUN) 970 MLX5_SET(cqc, cqc, oi, 1); 971 972 err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen); 973 if (err) 974 goto err_cqb; 975 976 mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn); 977 cq->mcq.irqn = irqn; 978 cq->mcq.comp = mlx5_ib_cq_comp; 979 cq->mcq.event = mlx5_ib_cq_event; 980 981 INIT_LIST_HEAD(&cq->wc_list); 982 983 if (context) 984 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) { 985 err = -EFAULT; 986 goto err_cmd; 987 } 988 989 990 kvfree(cqb); 991 return &cq->ibcq; 992 993 err_cmd: 994 mlx5_core_destroy_cq(dev->mdev, &cq->mcq); 995 996 err_cqb: 997 kvfree(cqb); 998 if (context) 999 destroy_cq_user(cq, context); 1000 else 1001 destroy_cq_kernel(dev, cq); 1002 1003 err_create: 1004 kfree(cq); 1005 1006 return ERR_PTR(err); 1007 } 1008 1009 1010 int mlx5_ib_destroy_cq(struct ib_cq *cq) 1011 { 1012 struct mlx5_ib_dev *dev = to_mdev(cq->device); 1013 struct mlx5_ib_cq *mcq = to_mcq(cq); 1014 struct ib_ucontext *context = NULL; 1015 1016 if (cq->uobject) 1017 context = cq->uobject->context; 1018 1019 mlx5_core_destroy_cq(dev->mdev, &mcq->mcq); 1020 if (context) 1021 destroy_cq_user(mcq, context); 1022 else 1023 destroy_cq_kernel(dev, mcq); 1024 1025 kfree(mcq); 1026 1027 return 0; 1028 } 1029 1030 static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn) 1031 { 1032 return rsn == (ntohl(cqe64->sop_drop_qpn) & 0xffffff); 1033 } 1034 1035 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq) 1036 { 1037 struct mlx5_cqe64 *cqe64, *dest64; 1038 void *cqe, *dest; 1039 u32 prod_index; 1040 int nfreed = 0; 1041 u8 owner_bit; 1042 1043 if (!cq) 1044 return; 1045 1046 /* First we need to find the current producer index, so we 1047 * know where to start cleaning from. It doesn't matter if HW 1048 * adds new entries after this loop -- the QP we're worried 1049 * about is already in RESET, so the new entries won't come 1050 * from our QP and therefore don't need to be checked. 1051 */ 1052 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++) 1053 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe) 1054 break; 1055 1056 /* Now sweep backwards through the CQ, removing CQ entries 1057 * that match our QP by copying older entries on top of them. 1058 */ 1059 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { 1060 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); 1061 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; 1062 if (is_equal_rsn(cqe64, rsn)) { 1063 if (srq && (ntohl(cqe64->srqn) & 0xffffff)) 1064 mlx5_ib_free_srq_wqe(srq, be16_to_cpu(cqe64->wqe_counter)); 1065 ++nfreed; 1066 } else if (nfreed) { 1067 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe); 1068 dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64; 1069 owner_bit = dest64->op_own & MLX5_CQE_OWNER_MASK; 1070 memcpy(dest, cqe, cq->mcq.cqe_sz); 1071 dest64->op_own = owner_bit | 1072 (dest64->op_own & ~MLX5_CQE_OWNER_MASK); 1073 } 1074 } 1075 1076 if (nfreed) { 1077 cq->mcq.cons_index += nfreed; 1078 /* Make sure update of buffer contents is done before 1079 * updating consumer index. 1080 */ 1081 wmb(); 1082 mlx5_cq_set_ci(&cq->mcq); 1083 } 1084 } 1085 1086 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq) 1087 { 1088 if (!cq) 1089 return; 1090 1091 spin_lock_irq(&cq->lock); 1092 __mlx5_ib_cq_clean(cq, qpn, srq); 1093 spin_unlock_irq(&cq->lock); 1094 } 1095 1096 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) 1097 { 1098 struct mlx5_ib_dev *dev = to_mdev(cq->device); 1099 struct mlx5_ib_cq *mcq = to_mcq(cq); 1100 int err; 1101 1102 if (!MLX5_CAP_GEN(dev->mdev, cq_moderation)) 1103 return -ENOSYS; 1104 1105 err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq, 1106 cq_period, cq_count); 1107 if (err) 1108 mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn); 1109 1110 return err; 1111 } 1112 1113 static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, 1114 int entries, struct ib_udata *udata, int *npas, 1115 int *page_shift, int *cqe_size) 1116 { 1117 struct mlx5_ib_resize_cq ucmd; 1118 struct ib_umem *umem; 1119 int err; 1120 int npages; 1121 struct ib_ucontext *context = cq->buf.umem->context; 1122 1123 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); 1124 if (err) 1125 return err; 1126 1127 if (ucmd.reserved0 || ucmd.reserved1) 1128 return -EINVAL; 1129 1130 /* check multiplication overflow */ 1131 if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1) 1132 return -EINVAL; 1133 1134 umem = ib_umem_get(context, ucmd.buf_addr, 1135 (size_t)ucmd.cqe_size * entries, 1136 IB_ACCESS_LOCAL_WRITE, 1); 1137 if (IS_ERR(umem)) { 1138 err = PTR_ERR(umem); 1139 return err; 1140 } 1141 1142 mlx5_ib_cont_pages(umem, ucmd.buf_addr, &npages, page_shift, 1143 npas, NULL); 1144 1145 cq->resize_umem = umem; 1146 *cqe_size = ucmd.cqe_size; 1147 1148 return 0; 1149 } 1150 1151 static void un_resize_user(struct mlx5_ib_cq *cq) 1152 { 1153 ib_umem_release(cq->resize_umem); 1154 } 1155 1156 static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, 1157 int entries, int cqe_size) 1158 { 1159 int err; 1160 1161 cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL); 1162 if (!cq->resize_buf) 1163 return -ENOMEM; 1164 1165 err = alloc_cq_buf(dev, cq->resize_buf, entries, cqe_size); 1166 if (err) 1167 goto ex; 1168 1169 init_cq_buf(cq, cq->resize_buf); 1170 1171 return 0; 1172 1173 ex: 1174 kfree(cq->resize_buf); 1175 return err; 1176 } 1177 1178 static void un_resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) 1179 { 1180 free_cq_buf(dev, cq->resize_buf); 1181 cq->resize_buf = NULL; 1182 } 1183 1184 static int copy_resize_cqes(struct mlx5_ib_cq *cq) 1185 { 1186 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); 1187 struct mlx5_cqe64 *scqe64; 1188 struct mlx5_cqe64 *dcqe64; 1189 void *start_cqe; 1190 void *scqe; 1191 void *dcqe; 1192 int ssize; 1193 int dsize; 1194 int i; 1195 u8 sw_own; 1196 1197 ssize = cq->buf.cqe_size; 1198 dsize = cq->resize_buf->cqe_size; 1199 if (ssize != dsize) { 1200 mlx5_ib_warn(dev, "resize from different cqe size is not supported\n"); 1201 return -EINVAL; 1202 } 1203 1204 i = cq->mcq.cons_index; 1205 scqe = get_sw_cqe(cq, i); 1206 scqe64 = ssize == 64 ? scqe : scqe + 64; 1207 start_cqe = scqe; 1208 if (!scqe) { 1209 mlx5_ib_warn(dev, "expected cqe in sw ownership\n"); 1210 return -EINVAL; 1211 } 1212 1213 while ((scqe64->op_own >> 4) != MLX5_CQE_RESIZE_CQ) { 1214 dcqe = get_cqe_from_buf(cq->resize_buf, 1215 (i + 1) & (cq->resize_buf->nent), 1216 dsize); 1217 dcqe64 = dsize == 64 ? dcqe : dcqe + 64; 1218 sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent); 1219 memcpy(dcqe, scqe, dsize); 1220 dcqe64->op_own = (dcqe64->op_own & ~MLX5_CQE_OWNER_MASK) | sw_own; 1221 1222 ++i; 1223 scqe = get_sw_cqe(cq, i); 1224 scqe64 = ssize == 64 ? scqe : scqe + 64; 1225 if (!scqe) { 1226 mlx5_ib_warn(dev, "expected cqe in sw ownership\n"); 1227 return -EINVAL; 1228 } 1229 1230 if (scqe == start_cqe) { 1231 pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n", 1232 cq->mcq.cqn); 1233 return -ENOMEM; 1234 } 1235 } 1236 ++cq->mcq.cons_index; 1237 return 0; 1238 } 1239 1240 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) 1241 { 1242 struct mlx5_ib_dev *dev = to_mdev(ibcq->device); 1243 struct mlx5_ib_cq *cq = to_mcq(ibcq); 1244 void *cqc; 1245 u32 *in; 1246 int err; 1247 int npas; 1248 __be64 *pas; 1249 int page_shift; 1250 int inlen; 1251 int uninitialized_var(cqe_size); 1252 unsigned long flags; 1253 1254 if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) { 1255 pr_info("Firmware does not support resize CQ\n"); 1256 return -ENOSYS; 1257 } 1258 1259 if (entries < 1 || 1260 entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) { 1261 mlx5_ib_warn(dev, "wrong entries number %d, max %d\n", 1262 entries, 1263 1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)); 1264 return -EINVAL; 1265 } 1266 1267 entries = roundup_pow_of_two(entries + 1); 1268 if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1) 1269 return -EINVAL; 1270 1271 if (entries == ibcq->cqe + 1) 1272 return 0; 1273 1274 mutex_lock(&cq->resize_mutex); 1275 if (udata) { 1276 err = resize_user(dev, cq, entries, udata, &npas, &page_shift, 1277 &cqe_size); 1278 } else { 1279 cqe_size = 64; 1280 err = resize_kernel(dev, cq, entries, cqe_size); 1281 if (!err) { 1282 npas = cq->resize_buf->buf.npages; 1283 page_shift = cq->resize_buf->buf.page_shift; 1284 } 1285 } 1286 1287 if (err) 1288 goto ex; 1289 1290 inlen = MLX5_ST_SZ_BYTES(modify_cq_in) + 1291 MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas; 1292 1293 in = mlx5_vzalloc(inlen); 1294 if (!in) { 1295 err = -ENOMEM; 1296 goto ex_resize; 1297 } 1298 1299 pas = (__be64 *)MLX5_ADDR_OF(modify_cq_in, in, pas); 1300 if (udata) 1301 mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift, 1302 pas, 0); 1303 else 1304 mlx5_fill_page_array(&cq->resize_buf->buf, pas); 1305 1306 MLX5_SET(modify_cq_in, in, 1307 modify_field_select_resize_field_select.resize_field_select.resize_field_select, 1308 MLX5_MODIFY_CQ_MASK_LOG_SIZE | 1309 MLX5_MODIFY_CQ_MASK_PG_OFFSET | 1310 MLX5_MODIFY_CQ_MASK_PG_SIZE); 1311 1312 cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context); 1313 1314 MLX5_SET(cqc, cqc, log_page_size, 1315 page_shift - MLX5_ADAPTER_PAGE_SHIFT); 1316 MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size)); 1317 MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries)); 1318 1319 MLX5_SET(modify_cq_in, in, op_mod, MLX5_CQ_OPMOD_RESIZE); 1320 MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn); 1321 1322 err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen); 1323 if (err) 1324 goto ex_alloc; 1325 1326 if (udata) { 1327 cq->ibcq.cqe = entries - 1; 1328 ib_umem_release(cq->buf.umem); 1329 cq->buf.umem = cq->resize_umem; 1330 cq->resize_umem = NULL; 1331 } else { 1332 struct mlx5_ib_cq_buf tbuf; 1333 int resized = 0; 1334 1335 spin_lock_irqsave(&cq->lock, flags); 1336 if (cq->resize_buf) { 1337 err = copy_resize_cqes(cq); 1338 if (!err) { 1339 tbuf = cq->buf; 1340 cq->buf = *cq->resize_buf; 1341 kfree(cq->resize_buf); 1342 cq->resize_buf = NULL; 1343 resized = 1; 1344 } 1345 } 1346 cq->ibcq.cqe = entries - 1; 1347 spin_unlock_irqrestore(&cq->lock, flags); 1348 if (resized) 1349 free_cq_buf(dev, &tbuf); 1350 } 1351 mutex_unlock(&cq->resize_mutex); 1352 1353 kvfree(in); 1354 return 0; 1355 1356 ex_alloc: 1357 kvfree(in); 1358 1359 ex_resize: 1360 if (udata) 1361 un_resize_user(cq); 1362 else 1363 un_resize_kernel(dev, cq); 1364 ex: 1365 mutex_unlock(&cq->resize_mutex); 1366 return err; 1367 } 1368 1369 int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq) 1370 { 1371 struct mlx5_ib_cq *cq; 1372 1373 if (!ibcq) 1374 return 128; 1375 1376 cq = to_mcq(ibcq); 1377 return cq->cqe_size; 1378 } 1379 1380 /* Called from atomic context */ 1381 int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc) 1382 { 1383 struct mlx5_ib_wc *soft_wc; 1384 struct mlx5_ib_cq *cq = to_mcq(ibcq); 1385 unsigned long flags; 1386 1387 soft_wc = kmalloc(sizeof(*soft_wc), GFP_ATOMIC); 1388 if (!soft_wc) 1389 return -ENOMEM; 1390 1391 soft_wc->wc = *wc; 1392 spin_lock_irqsave(&cq->lock, flags); 1393 list_add_tail(&soft_wc->list, &cq->wc_list); 1394 if (cq->notify_flags == IB_CQ_NEXT_COMP || 1395 wc->status != IB_WC_SUCCESS) { 1396 cq->notify_flags = 0; 1397 schedule_work(&cq->notify_work); 1398 } 1399 spin_unlock_irqrestore(&cq->lock, flags); 1400 1401 return 0; 1402 } 1403