1 /*- 2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB 3 * 4 * Copyright (c) 2015 - 2021 Intel Corporation 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenFabrics.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 /*$FreeBSD$*/ 35 36 #include "irdma_main.h" 37 38 static struct irdma_rsrc_limits rsrc_limits_table[] = { 39 [0] = { 40 .qplimit = SZ_128, 41 }, 42 [1] = { 43 .qplimit = SZ_1K, 44 }, 45 [2] = { 46 .qplimit = SZ_2K, 47 }, 48 [3] = { 49 .qplimit = SZ_4K, 50 }, 51 [4] = { 52 .qplimit = SZ_16K, 53 }, 54 [5] = { 55 .qplimit = SZ_64K, 56 }, 57 [6] = { 58 .qplimit = SZ_128K, 59 }, 60 [7] = { 61 .qplimit = SZ_256K, 62 }, 63 }; 64 65 /* types of hmc objects */ 66 static enum irdma_hmc_rsrc_type iw_hmc_obj_types[] = { 67 IRDMA_HMC_IW_QP, 68 IRDMA_HMC_IW_CQ, 69 IRDMA_HMC_IW_HTE, 70 IRDMA_HMC_IW_ARP, 71 IRDMA_HMC_IW_APBVT_ENTRY, 72 IRDMA_HMC_IW_MR, 73 IRDMA_HMC_IW_XF, 74 IRDMA_HMC_IW_XFFL, 75 IRDMA_HMC_IW_Q1, 76 IRDMA_HMC_IW_Q1FL, 77 IRDMA_HMC_IW_TIMER, 78 IRDMA_HMC_IW_FSIMC, 79 IRDMA_HMC_IW_FSIAV, 80 IRDMA_HMC_IW_RRF, 81 IRDMA_HMC_IW_RRFFL, 82 IRDMA_HMC_IW_HDR, 83 IRDMA_HMC_IW_MD, 84 IRDMA_HMC_IW_OOISC, 85 IRDMA_HMC_IW_OOISCFFL, 86 }; 87 88 /** 89 * irdma_iwarp_ce_handler - handle iwarp completions 90 * @iwcq: iwarp cq receiving event 91 */ 92 static void 93 irdma_iwarp_ce_handler(struct irdma_sc_cq *iwcq) 94 { 95 struct irdma_cq *cq = iwcq->back_cq; 96 97 if (!cq->user_mode) 98 cq->armed = false; 99 if (cq->ibcq.comp_handler) 100 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); 101 } 102 103 /** 104 * irdma_puda_ce_handler - handle puda completion events 105 * @rf: RDMA PCI function 106 * @cq: puda completion q for event 107 */ 108 static void 109 irdma_puda_ce_handler(struct irdma_pci_f *rf, 110 struct irdma_sc_cq *cq) 111 { 112 struct irdma_sc_dev *dev = &rf->sc_dev; 113 u32 compl_error; 114 int status; 115 116 do { 117 status = irdma_puda_poll_cmpl(dev, cq, &compl_error); 118 if (status == -ENOENT) 119 break; 120 if (status) { 121 irdma_debug(dev, IRDMA_DEBUG_ERR, "puda status = %d\n", 122 status); 123 break; 124 } 125 if (compl_error) { 126 irdma_debug(dev, IRDMA_DEBUG_ERR, 127 "puda compl_err =0x%x\n", compl_error); 128 break; 129 } 130 } while (1); 131 132 irdma_sc_ccq_arm(cq); 133 } 134 135 /** 136 * irdma_process_ceq - handle ceq for completions 137 * @rf: RDMA PCI function 138 * @ceq: ceq having cq for completion 139 */ 140 static void 141 irdma_process_ceq(struct irdma_pci_f *rf, struct irdma_ceq *ceq) 142 { 143 struct irdma_sc_dev *dev = &rf->sc_dev; 144 struct irdma_sc_ceq *sc_ceq; 145 struct irdma_sc_cq *cq; 146 unsigned long flags; 147 148 sc_ceq = &ceq->sc_ceq; 149 do { 150 spin_lock_irqsave(&ceq->ce_lock, flags); 151 cq = irdma_sc_process_ceq(dev, sc_ceq); 152 if (!cq) { 153 spin_unlock_irqrestore(&ceq->ce_lock, flags); 154 break; 155 } 156 157 if (cq->cq_type == IRDMA_CQ_TYPE_IWARP) 158 irdma_iwarp_ce_handler(cq); 159 160 spin_unlock_irqrestore(&ceq->ce_lock, flags); 161 162 if (cq->cq_type == IRDMA_CQ_TYPE_CQP) 163 queue_work(rf->cqp_cmpl_wq, &rf->cqp_cmpl_work); 164 else if (cq->cq_type == IRDMA_CQ_TYPE_ILQ || 165 cq->cq_type == IRDMA_CQ_TYPE_IEQ) 166 irdma_puda_ce_handler(rf, cq); 167 } while (1); 168 } 169 170 static void 171 irdma_set_flush_fields(struct irdma_sc_qp *qp, 172 struct irdma_aeqe_info *info) 173 { 174 qp->sq_flush_code = info->sq; 175 qp->rq_flush_code = info->rq; 176 qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; 177 178 switch (info->ae_id) { 179 case IRDMA_AE_AMP_BOUNDS_VIOLATION: 180 case IRDMA_AE_AMP_INVALID_STAG: 181 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; 182 /* fallthrough */ 183 case IRDMA_AE_UDA_XMIT_BAD_PD: 184 qp->flush_code = FLUSH_PROT_ERR; 185 break; 186 case IRDMA_AE_AMP_UNALLOCATED_STAG: 187 case IRDMA_AE_AMP_BAD_PD: 188 qp->flush_code = FLUSH_PROT_ERR; 189 break; 190 case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG: 191 case IRDMA_AE_AMP_BAD_QP: 192 case IRDMA_AE_WQE_UNEXPECTED_OPCODE: 193 qp->flush_code = FLUSH_LOC_QP_OP_ERR; 194 break; 195 case IRDMA_AE_AMP_BAD_STAG_KEY: 196 case IRDMA_AE_AMP_BAD_STAG_INDEX: 197 case IRDMA_AE_AMP_TO_WRAP: 198 case IRDMA_AE_AMP_RIGHTS_VIOLATION: 199 case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS: 200 case IRDMA_AE_PRIV_OPERATION_DENIED: 201 case IRDMA_AE_IB_REMOTE_ACCESS_ERROR: 202 case IRDMA_AE_IB_REMOTE_OP_ERROR: 203 qp->flush_code = FLUSH_REM_ACCESS_ERR; 204 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; 205 break; 206 case IRDMA_AE_LLP_SEGMENT_TOO_SMALL: 207 case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: 208 case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT: 209 case IRDMA_AE_UDA_L4LEN_INVALID: 210 case IRDMA_AE_ROCE_RSP_LENGTH_ERROR: 211 qp->flush_code = FLUSH_LOC_LEN_ERR; 212 break; 213 case IRDMA_AE_LCE_QP_CATASTROPHIC: 214 qp->flush_code = FLUSH_FATAL_ERR; 215 break; 216 case IRDMA_AE_DDP_UBE_INVALID_MO: 217 case IRDMA_AE_IB_RREQ_AND_Q1_FULL: 218 case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR: 219 qp->flush_code = FLUSH_GENERAL_ERR; 220 break; 221 case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS: 222 case IRDMA_AE_AMP_MWBIND_BIND_DISABLED: 223 case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS: 224 qp->flush_code = FLUSH_MW_BIND_ERR; 225 break; 226 case IRDMA_AE_LLP_TOO_MANY_RETRIES: 227 qp->flush_code = FLUSH_RETRY_EXC_ERR; 228 break; 229 case IRDMA_AE_IB_INVALID_REQUEST: 230 qp->flush_code = FLUSH_REM_INV_REQ_ERR; 231 break; 232 default: 233 qp->flush_code = FLUSH_FATAL_ERR; 234 break; 235 } 236 } 237 238 /** 239 * irdma_process_aeq - handle aeq events 240 * @rf: RDMA PCI function 241 */ 242 static void 243 irdma_process_aeq(struct irdma_pci_f *rf) 244 { 245 struct irdma_sc_dev *dev = &rf->sc_dev; 246 struct irdma_aeq *aeq = &rf->aeq; 247 struct irdma_sc_aeq *sc_aeq = &aeq->sc_aeq; 248 struct irdma_aeqe_info aeinfo; 249 struct irdma_aeqe_info *info = &aeinfo; 250 int ret; 251 struct irdma_qp *iwqp = NULL; 252 struct irdma_cq *iwcq = NULL; 253 struct irdma_sc_qp *qp = NULL; 254 struct irdma_qp_host_ctx_info *ctx_info = NULL; 255 unsigned long flags; 256 257 u32 aeqcnt = 0; 258 259 if (!sc_aeq->size) 260 return; 261 262 do { 263 memset(info, 0, sizeof(*info)); 264 ret = irdma_sc_get_next_aeqe(sc_aeq, info); 265 if (ret) 266 break; 267 268 aeqcnt++; 269 irdma_debug(dev, IRDMA_DEBUG_AEQ, 270 "ae_id = 0x%x bool qp=%d qp_id = %d tcp_state=%d iwarp_state=%d ae_src=%d\n", 271 info->ae_id, info->qp, info->qp_cq_id, info->tcp_state, info->iwarp_state, info->ae_src); 272 273 if (info->qp) { 274 spin_lock_irqsave(&rf->qptable_lock, flags); 275 iwqp = rf->qp_table[info->qp_cq_id]; 276 if (!iwqp) { 277 spin_unlock_irqrestore(&rf->qptable_lock, 278 flags); 279 if (info->ae_id == IRDMA_AE_QP_SUSPEND_COMPLETE) { 280 struct irdma_device *iwdev = rf->iwdev; 281 282 atomic_dec(&iwdev->vsi.qp_suspend_reqs); 283 wake_up(&iwdev->suspend_wq); 284 continue; 285 } 286 irdma_debug(dev, IRDMA_DEBUG_AEQ, 287 "qp_id %d is already freed\n", 288 info->qp_cq_id); 289 continue; 290 } 291 irdma_qp_add_ref(&iwqp->ibqp); 292 spin_unlock_irqrestore(&rf->qptable_lock, flags); 293 qp = &iwqp->sc_qp; 294 spin_lock_irqsave(&iwqp->lock, flags); 295 iwqp->hw_tcp_state = info->tcp_state; 296 iwqp->hw_iwarp_state = info->iwarp_state; 297 if (info->ae_id != IRDMA_AE_QP_SUSPEND_COMPLETE) 298 iwqp->last_aeq = info->ae_id; 299 spin_unlock_irqrestore(&iwqp->lock, flags); 300 ctx_info = &iwqp->ctx_info; 301 } else { 302 if (info->ae_id != IRDMA_AE_CQ_OPERATION_ERROR) 303 continue; 304 } 305 306 switch (info->ae_id) { 307 struct irdma_cm_node *cm_node; 308 case IRDMA_AE_LLP_CONNECTION_ESTABLISHED: 309 cm_node = iwqp->cm_node; 310 if (cm_node->accept_pend) { 311 atomic_dec(&cm_node->listener->pend_accepts_cnt); 312 cm_node->accept_pend = 0; 313 } 314 iwqp->rts_ae_rcvd = 1; 315 wake_up_interruptible(&iwqp->waitq); 316 break; 317 case IRDMA_AE_LLP_FIN_RECEIVED: 318 case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE: 319 if (qp->term_flags) 320 break; 321 if (atomic_inc_return(&iwqp->close_timer_started) == 1) { 322 iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSE_WAIT; 323 if (iwqp->hw_tcp_state == IRDMA_TCP_STATE_CLOSE_WAIT && 324 iwqp->ibqp_state == IB_QPS_RTS) { 325 irdma_next_iw_state(iwqp, 326 IRDMA_QP_STATE_CLOSING, 327 0, 0, 0); 328 irdma_cm_disconn(iwqp); 329 } 330 irdma_schedule_cm_timer(iwqp->cm_node, 331 (struct irdma_puda_buf *)iwqp, 332 IRDMA_TIMER_TYPE_CLOSE, 333 1, 0); 334 } 335 break; 336 case IRDMA_AE_LLP_CLOSE_COMPLETE: 337 if (qp->term_flags) 338 irdma_terminate_done(qp, 0); 339 else 340 irdma_cm_disconn(iwqp); 341 break; 342 case IRDMA_AE_BAD_CLOSE: 343 case IRDMA_AE_RESET_SENT: 344 irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0, 345 0); 346 irdma_cm_disconn(iwqp); 347 break; 348 case IRDMA_AE_LLP_CONNECTION_RESET: 349 if (atomic_read(&iwqp->close_timer_started)) 350 break; 351 irdma_cm_disconn(iwqp); 352 break; 353 case IRDMA_AE_QP_SUSPEND_COMPLETE: 354 if (iwqp->iwdev->vsi.tc_change_pending) { 355 atomic_dec(&iwqp->sc_qp.vsi->qp_suspend_reqs); 356 wake_up(&iwqp->iwdev->suspend_wq); 357 } 358 break; 359 case IRDMA_AE_TERMINATE_SENT: 360 irdma_terminate_send_fin(qp); 361 break; 362 case IRDMA_AE_LLP_TERMINATE_RECEIVED: 363 irdma_terminate_received(qp, info); 364 break; 365 case IRDMA_AE_CQ_OPERATION_ERROR: 366 irdma_dev_err(dev, 367 "Processing CQ[0x%x] op error, AE 0x%04X\n", 368 info->qp_cq_id, info->ae_id); 369 spin_lock_irqsave(&rf->cqtable_lock, flags); 370 iwcq = rf->cq_table[info->qp_cq_id]; 371 if (!iwcq) { 372 spin_unlock_irqrestore(&rf->cqtable_lock, 373 flags); 374 irdma_debug(dev, IRDMA_DEBUG_AEQ, 375 "cq_id %d is already freed\n", 376 info->qp_cq_id); 377 continue; 378 } 379 irdma_cq_add_ref(&iwcq->ibcq); 380 spin_unlock_irqrestore(&rf->cqtable_lock, flags); 381 if (iwcq->ibcq.event_handler) { 382 struct ib_event ibevent; 383 384 ibevent.device = iwcq->ibcq.device; 385 ibevent.event = IB_EVENT_CQ_ERR; 386 ibevent.element.cq = &iwcq->ibcq; 387 iwcq->ibcq.event_handler(&ibevent, 388 iwcq->ibcq.cq_context); 389 } 390 irdma_cq_rem_ref(&iwcq->ibcq); 391 break; 392 case IRDMA_AE_RESET_NOT_SENT: 393 case IRDMA_AE_LLP_DOUBT_REACHABILITY: 394 case IRDMA_AE_RESOURCE_EXHAUSTION: 395 break; 396 case IRDMA_AE_PRIV_OPERATION_DENIED: 397 case IRDMA_AE_STAG_ZERO_INVALID: 398 case IRDMA_AE_IB_RREQ_AND_Q1_FULL: 399 case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION: 400 case IRDMA_AE_DDP_UBE_INVALID_MO: 401 case IRDMA_AE_DDP_UBE_INVALID_QN: 402 case IRDMA_AE_DDP_NO_L_BIT: 403 case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION: 404 case IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE: 405 case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST: 406 case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP: 407 case IRDMA_AE_INVALID_ARP_ENTRY: 408 case IRDMA_AE_INVALID_TCP_OPTION_RCVD: 409 case IRDMA_AE_STALE_ARP_ENTRY: 410 case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR: 411 case IRDMA_AE_LLP_SEGMENT_TOO_SMALL: 412 case IRDMA_AE_LLP_SYN_RECEIVED: 413 case IRDMA_AE_LLP_TOO_MANY_RETRIES: 414 case IRDMA_AE_LCE_QP_CATASTROPHIC: 415 case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC: 416 case IRDMA_AE_LCE_CQ_CATASTROPHIC: 417 case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG: 418 default: 419 irdma_dev_err(dev, "abnormal ae_id = 0x%x bool qp=%d qp_id = %d, ae_source=%d\n", 420 info->ae_id, info->qp, info->qp_cq_id, info->ae_src); 421 if (rdma_protocol_roce(&iwqp->iwdev->ibdev, 1)) { 422 ctx_info->roce_info->err_rq_idx_valid = info->rq; 423 if (info->rq) { 424 ctx_info->roce_info->err_rq_idx = info->wqe_idx; 425 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, 426 ctx_info); 427 } 428 irdma_set_flush_fields(qp, info); 429 irdma_cm_disconn(iwqp); 430 break; 431 } 432 ctx_info->iwarp_info->err_rq_idx_valid = info->rq; 433 if (info->rq) { 434 ctx_info->iwarp_info->err_rq_idx = info->wqe_idx; 435 ctx_info->tcp_info_valid = false; 436 ctx_info->iwarp_info_valid = true; 437 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, 438 ctx_info); 439 } 440 if (iwqp->hw_iwarp_state != IRDMA_QP_STATE_RTS && 441 iwqp->hw_iwarp_state != IRDMA_QP_STATE_TERMINATE) { 442 irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0, 0); 443 irdma_cm_disconn(iwqp); 444 } else { 445 iwqp->sc_qp.term_flags = 1; 446 irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0, 447 0); 448 irdma_cm_disconn(iwqp); 449 } 450 break; 451 } 452 if (info->qp) 453 irdma_qp_rem_ref(&iwqp->ibqp); 454 } while (1); 455 456 if (aeqcnt) 457 irdma_sc_repost_aeq_entries(dev, aeqcnt); 458 } 459 460 /** 461 * irdma_ena_intr - set up device interrupts 462 * @dev: hardware control device structure 463 * @msix_id: id of the interrupt to be enabled 464 */ 465 static void 466 irdma_ena_intr(struct irdma_sc_dev *dev, u32 msix_id) 467 { 468 dev->irq_ops->irdma_en_irq(dev, msix_id); 469 } 470 471 /** 472 * irdma_dpc - tasklet for aeq and ceq 0 473 * @t: tasklet_struct ptr 474 */ 475 static void 476 irdma_dpc(struct tasklet_struct *t) 477 { 478 struct irdma_pci_f *rf = from_tasklet(rf, t, dpc_tasklet); 479 480 if (rf->msix_shared) 481 irdma_process_ceq(rf, rf->ceqlist); 482 irdma_process_aeq(rf); 483 irdma_ena_intr(&rf->sc_dev, rf->iw_msixtbl[0].idx); 484 } 485 486 /** 487 * irdma_ceq_dpc - dpc handler for CEQ 488 * @t: tasklet_struct ptr 489 */ 490 static void 491 irdma_ceq_dpc(struct tasklet_struct *t) 492 { 493 struct irdma_ceq *iwceq = from_tasklet(iwceq, t, dpc_tasklet); 494 struct irdma_pci_f *rf = iwceq->rf; 495 496 irdma_process_ceq(rf, iwceq); 497 irdma_ena_intr(&rf->sc_dev, iwceq->msix_idx); 498 } 499 500 /** 501 * irdma_save_msix_info - copy msix vector information to iwarp device 502 * @rf: RDMA PCI function 503 * 504 * Allocate iwdev msix table and copy the msix info to the table 505 * Return 0 if successful, otherwise return error 506 */ 507 static int 508 irdma_save_msix_info(struct irdma_pci_f *rf) 509 { 510 struct irdma_qvlist_info *iw_qvlist; 511 struct irdma_qv_info *iw_qvinfo; 512 u32 ceq_idx; 513 u32 i; 514 u32 size; 515 516 if (!rf->msix_count) 517 return -EINVAL; 518 519 size = sizeof(struct irdma_msix_vector) * rf->msix_count; 520 size += sizeof(struct irdma_qvlist_info); 521 size += sizeof(struct irdma_qv_info) * rf->msix_count - 1; 522 rf->iw_msixtbl = kzalloc(size, GFP_KERNEL); 523 if (!rf->iw_msixtbl) 524 return -ENOMEM; 525 526 rf->iw_qvlist = (struct irdma_qvlist_info *) 527 (&rf->iw_msixtbl[rf->msix_count]); 528 iw_qvlist = rf->iw_qvlist; 529 iw_qvinfo = iw_qvlist->qv_info; 530 iw_qvlist->num_vectors = rf->msix_count; 531 if (rf->msix_count <= num_online_cpus()) 532 rf->msix_shared = true; 533 else if (rf->msix_count > num_online_cpus() + 1) 534 rf->msix_count = num_online_cpus() + 1; 535 536 for (i = 0, ceq_idx = 0; i < rf->msix_count; i++, iw_qvinfo++) { 537 rf->iw_msixtbl[i].idx = rf->msix_info.entry + i; 538 rf->iw_msixtbl[i].cpu_affinity = ceq_idx; 539 if (!i) { 540 iw_qvinfo->aeq_idx = 0; 541 if (rf->msix_shared) 542 iw_qvinfo->ceq_idx = ceq_idx++; 543 else 544 iw_qvinfo->ceq_idx = IRDMA_Q_INVALID_IDX; 545 } else { 546 iw_qvinfo->aeq_idx = IRDMA_Q_INVALID_IDX; 547 iw_qvinfo->ceq_idx = ceq_idx++; 548 } 549 iw_qvinfo->itr_idx = 3; 550 iw_qvinfo->v_idx = rf->iw_msixtbl[i].idx; 551 } 552 553 return 0; 554 } 555 556 /** 557 * irdma_irq_handler - interrupt handler for aeq and ceq0 558 * @data: RDMA PCI function 559 */ 560 static void 561 irdma_irq_handler(void *data) 562 { 563 struct irdma_pci_f *rf = data; 564 565 tasklet_schedule(&rf->dpc_tasklet); 566 } 567 568 /** 569 * irdma_ceq_handler - interrupt handler for ceq 570 * @data: ceq pointer 571 */ 572 static void 573 irdma_ceq_handler(void *data) 574 { 575 struct irdma_ceq *iwceq = data; 576 577 tasklet_schedule(&iwceq->dpc_tasklet); 578 } 579 580 /** 581 * irdma_free_irq - free device interrupts in FreeBSD manner 582 * @rf: RDMA PCI function 583 * @msix_vec: msix vector to disable irq 584 * 585 * The function is called when destroying irq. It tearsdown 586 * the interrupt and release resources. 587 */ 588 static void 589 irdma_free_irq(struct irdma_pci_f *rf, struct irdma_msix_vector *msix_vec) 590 { 591 if (msix_vec->tag) { 592 bus_teardown_intr(rf->dev_ctx.dev, msix_vec->res, 593 msix_vec->tag); 594 msix_vec->tag = NULL; 595 } 596 if (msix_vec->res) { 597 bus_release_resource(rf->dev_ctx.dev, SYS_RES_IRQ, 598 msix_vec->idx + 1, 599 msix_vec->res); 600 msix_vec->res = NULL; 601 } 602 } 603 604 /** 605 * irdma_destroy_irq - destroy device interrupts 606 * @rf: RDMA PCI function 607 * @msix_vec: msix vector to disable irq 608 * @dev_id: parameter to pass to free_irq (used during irq setup) 609 * 610 * The function is called when destroying aeq/ceq 611 */ 612 static void 613 irdma_destroy_irq(struct irdma_pci_f *rf, 614 struct irdma_msix_vector *msix_vec, void *dev_id) 615 { 616 struct irdma_sc_dev *dev = &rf->sc_dev; 617 618 dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx); 619 irdma_free_irq(rf, msix_vec); 620 } 621 622 /** 623 * irdma_destroy_cqp - destroy control qp 624 * @rf: RDMA PCI function 625 * @free_hwcqp: 1 if hw cqp should be freed 626 * 627 * Issue destroy cqp request and 628 * free the resources associated with the cqp 629 */ 630 static void 631 irdma_destroy_cqp(struct irdma_pci_f *rf, bool free_hwcqp) 632 { 633 struct irdma_sc_dev *dev = &rf->sc_dev; 634 struct irdma_cqp *cqp = &rf->cqp; 635 int status = 0; 636 637 if (rf->cqp_cmpl_wq) 638 destroy_workqueue(rf->cqp_cmpl_wq); 639 if (free_hwcqp) 640 status = irdma_sc_cqp_destroy(dev->cqp); 641 if (status) 642 irdma_debug(dev, IRDMA_DEBUG_ERR, "Destroy CQP failed %d\n", 643 status); 644 645 irdma_cleanup_pending_cqp_op(rf); 646 irdma_free_dma_mem(dev->hw, &cqp->sq); 647 kfree(cqp->scratch_array); 648 cqp->scratch_array = NULL; 649 kfree(cqp->cqp_requests); 650 cqp->cqp_requests = NULL; 651 } 652 653 static void 654 irdma_destroy_virt_aeq(struct irdma_pci_f *rf) 655 { 656 struct irdma_aeq *aeq = &rf->aeq; 657 u32 pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE); 658 dma_addr_t *pg_arr = (dma_addr_t *) aeq->palloc.level1.addr; 659 660 irdma_unmap_vm_page_list(&rf->hw, pg_arr, pg_cnt); 661 irdma_free_pble(rf->pble_rsrc, &aeq->palloc); 662 vfree(aeq->mem.va); 663 } 664 665 /** 666 * irdma_destroy_aeq - destroy aeq 667 * @rf: RDMA PCI function 668 * 669 * Issue a destroy aeq request and 670 * free the resources associated with the aeq 671 * The function is called during driver unload 672 */ 673 static void 674 irdma_destroy_aeq(struct irdma_pci_f *rf) 675 { 676 struct irdma_sc_dev *dev = &rf->sc_dev; 677 struct irdma_aeq *aeq = &rf->aeq; 678 int status = -EBUSY; 679 680 if (!rf->msix_shared) { 681 rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, rf->iw_msixtbl->idx, false); 682 irdma_destroy_irq(rf, rf->iw_msixtbl, rf); 683 } 684 if (rf->reset) 685 goto exit; 686 687 aeq->sc_aeq.size = 0; 688 status = irdma_cqp_aeq_cmd(dev, &aeq->sc_aeq, IRDMA_OP_AEQ_DESTROY); 689 if (status) 690 irdma_debug(dev, IRDMA_DEBUG_ERR, "Destroy AEQ failed %d\n", 691 status); 692 693 exit: 694 if (aeq->virtual_map) 695 irdma_destroy_virt_aeq(rf); 696 else 697 irdma_free_dma_mem(dev->hw, &aeq->mem); 698 } 699 700 /** 701 * irdma_destroy_ceq - destroy ceq 702 * @rf: RDMA PCI function 703 * @iwceq: ceq to be destroyed 704 * 705 * Issue a destroy ceq request and 706 * free the resources associated with the ceq 707 */ 708 static void 709 irdma_destroy_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq) 710 { 711 struct irdma_sc_dev *dev = &rf->sc_dev; 712 int status; 713 714 if (rf->reset) 715 goto exit; 716 717 status = irdma_sc_ceq_destroy(&iwceq->sc_ceq, 0, 1); 718 if (status) { 719 irdma_debug(dev, IRDMA_DEBUG_ERR, 720 "CEQ destroy command failed %d\n", status); 721 goto exit; 722 } 723 724 status = irdma_sc_cceq_destroy_done(&iwceq->sc_ceq); 725 if (status) 726 irdma_debug(dev, IRDMA_DEBUG_ERR, 727 "CEQ destroy completion failed %d\n", status); 728 exit: 729 spin_lock_destroy(&iwceq->ce_lock); 730 spin_lock_destroy(&iwceq->sc_ceq.req_cq_lock); 731 kfree(iwceq->sc_ceq.reg_cq); 732 irdma_free_dma_mem(dev->hw, &iwceq->mem); 733 } 734 735 /** 736 * irdma_del_ceq_0 - destroy ceq 0 737 * @rf: RDMA PCI function 738 * 739 * Disable the ceq 0 interrupt and destroy the ceq 0 740 */ 741 static void 742 irdma_del_ceq_0(struct irdma_pci_f *rf) 743 { 744 struct irdma_ceq *iwceq = rf->ceqlist; 745 struct irdma_msix_vector *msix_vec; 746 747 if (rf->msix_shared) { 748 msix_vec = &rf->iw_msixtbl[0]; 749 rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, 750 msix_vec->ceq_id, 751 msix_vec->idx, false); 752 irdma_destroy_irq(rf, msix_vec, rf); 753 } else { 754 msix_vec = &rf->iw_msixtbl[1]; 755 irdma_destroy_irq(rf, msix_vec, iwceq); 756 } 757 758 irdma_destroy_ceq(rf, iwceq); 759 rf->sc_dev.ceq_valid = false; 760 rf->ceqs_count = 0; 761 } 762 763 /** 764 * irdma_del_ceqs - destroy all ceq's except CEQ 0 765 * @rf: RDMA PCI function 766 * 767 * Go through all of the device ceq's, except 0, and for each 768 * ceq disable the ceq interrupt and destroy the ceq 769 */ 770 static void 771 irdma_del_ceqs(struct irdma_pci_f *rf) 772 { 773 struct irdma_ceq *iwceq = &rf->ceqlist[1]; 774 struct irdma_msix_vector *msix_vec; 775 u32 i = 0; 776 777 if (rf->msix_shared) 778 msix_vec = &rf->iw_msixtbl[1]; 779 else 780 msix_vec = &rf->iw_msixtbl[2]; 781 782 for (i = 1; i < rf->ceqs_count; i++, msix_vec++, iwceq++) { 783 rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, msix_vec->ceq_id, 784 msix_vec->idx, false); 785 irdma_destroy_irq(rf, msix_vec, iwceq); 786 irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq, 787 IRDMA_OP_CEQ_DESTROY); 788 spin_lock_destroy(&iwceq->ce_lock); 789 spin_lock_destroy(&iwceq->sc_ceq.req_cq_lock); 790 kfree(iwceq->sc_ceq.reg_cq); 791 irdma_free_dma_mem(rf->sc_dev.hw, &iwceq->mem); 792 } 793 rf->ceqs_count = 1; 794 } 795 796 /** 797 * irdma_destroy_ccq - destroy control cq 798 * @rf: RDMA PCI function 799 * 800 * Issue destroy ccq request and 801 * free the resources associated with the ccq 802 */ 803 static void 804 irdma_destroy_ccq(struct irdma_pci_f *rf) 805 { 806 struct irdma_sc_dev *dev = &rf->sc_dev; 807 struct irdma_ccq *ccq = &rf->ccq; 808 int status = 0; 809 810 if (!rf->reset) 811 status = irdma_sc_ccq_destroy(dev->ccq, 0, true); 812 if (status) 813 irdma_debug(dev, IRDMA_DEBUG_ERR, "CCQ destroy failed %d\n", 814 status); 815 irdma_free_dma_mem(dev->hw, &ccq->mem_cq); 816 } 817 818 /** 819 * irdma_close_hmc_objects_type - delete hmc objects of a given type 820 * @dev: iwarp device 821 * @obj_type: the hmc object type to be deleted 822 * @hmc_info: host memory info struct 823 * @privileged: permission to close HMC objects 824 * @reset: true if called before reset 825 */ 826 static void 827 irdma_close_hmc_objects_type(struct irdma_sc_dev *dev, 828 enum irdma_hmc_rsrc_type obj_type, 829 struct irdma_hmc_info *hmc_info, 830 bool privileged, bool reset) 831 { 832 struct irdma_hmc_del_obj_info info = {0}; 833 834 info.hmc_info = hmc_info; 835 info.rsrc_type = obj_type; 836 info.count = hmc_info->hmc_obj[obj_type].cnt; 837 info.privileged = privileged; 838 if (irdma_sc_del_hmc_obj(dev, &info, reset)) 839 irdma_debug(dev, IRDMA_DEBUG_ERR, 840 "del HMC obj of type %d failed\n", obj_type); 841 } 842 843 /** 844 * irdma_del_hmc_objects - remove all device hmc objects 845 * @dev: iwarp device 846 * @hmc_info: hmc_info to free 847 * @privileged: permission to delete HMC objects 848 * @reset: true if called before reset 849 * @vers: hardware version 850 */ 851 void 852 irdma_del_hmc_objects(struct irdma_sc_dev *dev, 853 struct irdma_hmc_info *hmc_info, bool privileged, 854 bool reset, enum irdma_vers vers) 855 { 856 unsigned int i; 857 858 for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) { 859 if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) 860 irdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i], 861 hmc_info, privileged, reset); 862 if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER) 863 break; 864 } 865 } 866 867 /** 868 * irdma_create_hmc_obj_type - create hmc object of a given type 869 * @dev: hardware control device structure 870 * @info: information for the hmc object to create 871 */ 872 static int 873 irdma_create_hmc_obj_type(struct irdma_sc_dev *dev, 874 struct irdma_hmc_create_obj_info *info) 875 { 876 return irdma_sc_create_hmc_obj(dev, info); 877 } 878 879 /** 880 * irdma_create_hmc_objs - create all hmc objects for the device 881 * @rf: RDMA PCI function 882 * @privileged: permission to create HMC objects 883 * @vers: HW version 884 * 885 * Create the device hmc objects and allocate hmc pages 886 * Return 0 if successful, otherwise clean up and return error 887 */ 888 static int 889 irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged, 890 enum irdma_vers vers) 891 { 892 struct irdma_sc_dev *dev = &rf->sc_dev; 893 struct irdma_hmc_create_obj_info info = {0}; 894 int i, status = 0; 895 896 info.hmc_info = dev->hmc_info; 897 info.privileged = privileged; 898 info.entry_type = rf->sd_type; 899 900 for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) { 901 if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) { 902 info.rsrc_type = iw_hmc_obj_types[i]; 903 info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt; 904 info.add_sd_cnt = 0; 905 status = irdma_create_hmc_obj_type(dev, &info); 906 if (status) { 907 irdma_debug(dev, IRDMA_DEBUG_ERR, 908 "create obj type %d status = %d\n", 909 iw_hmc_obj_types[i], status); 910 break; 911 } 912 } 913 if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER) 914 break; 915 } 916 917 if (!status) 918 return irdma_sc_static_hmc_pages_allocated(dev->cqp, 0, dev->hmc_fn_id, 919 true, true); 920 921 while (i) { 922 i--; 923 /* destroy the hmc objects of a given type */ 924 if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) 925 irdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i], 926 dev->hmc_info, privileged, 927 false); 928 } 929 930 return status; 931 } 932 933 /** 934 * irdma_obj_aligned_mem - get aligned memory from device allocated memory 935 * @rf: RDMA PCI function 936 * @memptr: points to the memory addresses 937 * @size: size of memory needed 938 * @mask: mask for the aligned memory 939 * 940 * Get aligned memory of the requested size and 941 * update the memptr to point to the new aligned memory 942 * Return 0 if successful, otherwise return no memory error 943 */ 944 static int 945 irdma_obj_aligned_mem(struct irdma_pci_f *rf, 946 struct irdma_dma_mem *memptr, u32 size, 947 u32 mask) 948 { 949 unsigned long va, newva; 950 unsigned long extra; 951 952 va = (unsigned long)rf->obj_next.va; 953 newva = va; 954 if (mask) 955 newva = ALIGN(va, (unsigned long)mask + 1ULL); 956 extra = newva - va; 957 memptr->va = (u8 *)va + extra; 958 memptr->pa = rf->obj_next.pa + extra; 959 memptr->size = size; 960 if (((u8 *)memptr->va + size) > ((u8 *)rf->obj_mem.va + rf->obj_mem.size)) 961 return -ENOMEM; 962 963 rf->obj_next.va = (u8 *)memptr->va + size; 964 rf->obj_next.pa = memptr->pa + size; 965 966 return 0; 967 } 968 969 /** 970 * irdma_create_cqp - create control qp 971 * @rf: RDMA PCI function 972 * 973 * Return 0, if the cqp and all the resources associated with it 974 * are successfully created, otherwise return error 975 */ 976 static int 977 irdma_create_cqp(struct irdma_pci_f *rf) 978 { 979 u32 sqsize = IRDMA_CQP_SW_SQSIZE_2048; 980 struct irdma_dma_mem mem; 981 struct irdma_sc_dev *dev = &rf->sc_dev; 982 struct irdma_cqp_init_info cqp_init_info = {0}; 983 struct irdma_cqp *cqp = &rf->cqp; 984 u16 maj_err, min_err; 985 int i, status; 986 987 cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL); 988 memset(cqp->cqp_requests, 0, sqsize * sizeof(*cqp->cqp_requests)); 989 if (!cqp->cqp_requests) 990 return -ENOMEM; 991 992 cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL); 993 memset(cqp->scratch_array, 0, sqsize * sizeof(*cqp->scratch_array)); 994 if (!cqp->scratch_array) { 995 kfree(cqp->cqp_requests); 996 return -ENOMEM; 997 } 998 999 dev->cqp = &cqp->sc_cqp; 1000 dev->cqp->dev = dev; 1001 cqp->sq.size = sizeof(struct irdma_cqp_sq_wqe) * sqsize; 1002 cqp->sq.va = irdma_allocate_dma_mem(dev->hw, &cqp->sq, cqp->sq.size, 1003 IRDMA_CQP_ALIGNMENT); 1004 if (!cqp->sq.va) { 1005 kfree(cqp->scratch_array); 1006 kfree(cqp->cqp_requests); 1007 return -ENOMEM; 1008 } 1009 1010 status = irdma_obj_aligned_mem(rf, &mem, sizeof(struct irdma_cqp_ctx), 1011 IRDMA_HOST_CTX_ALIGNMENT_M); 1012 if (status) 1013 goto exit; 1014 1015 dev->cqp->host_ctx_pa = mem.pa; 1016 dev->cqp->host_ctx = mem.va; 1017 /* populate the cqp init info */ 1018 cqp_init_info.dev = dev; 1019 cqp_init_info.sq_size = sqsize; 1020 cqp_init_info.sq = cqp->sq.va; 1021 cqp_init_info.sq_pa = cqp->sq.pa; 1022 cqp_init_info.host_ctx_pa = mem.pa; 1023 cqp_init_info.host_ctx = mem.va; 1024 cqp_init_info.hmc_profile = rf->rsrc_profile; 1025 cqp_init_info.scratch_array = cqp->scratch_array; 1026 cqp_init_info.protocol_used = rf->protocol_used; 1027 cqp_init_info.en_rem_endpoint_trk = rf->en_rem_endpoint_trk; 1028 memcpy(&cqp_init_info.dcqcn_params, &rf->dcqcn_params, 1029 sizeof(cqp_init_info.dcqcn_params)); 1030 1031 switch (rf->rdma_ver) { 1032 case IRDMA_GEN_1: 1033 cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_1; 1034 break; 1035 case IRDMA_GEN_2: 1036 cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_2; 1037 break; 1038 } 1039 status = irdma_sc_cqp_init(dev->cqp, &cqp_init_info); 1040 if (status) { 1041 irdma_debug(dev, IRDMA_DEBUG_ERR, "cqp init status %d\n", 1042 status); 1043 goto exit; 1044 } 1045 1046 spin_lock_init(&cqp->req_lock); 1047 spin_lock_init(&cqp->compl_lock); 1048 1049 status = irdma_sc_cqp_create(dev->cqp, &maj_err, &min_err); 1050 if (status) { 1051 irdma_debug(dev, IRDMA_DEBUG_ERR, 1052 "cqp create failed - status %d maj_err %d min_err %d\n", 1053 status, maj_err, min_err); 1054 goto exit; 1055 } 1056 1057 INIT_LIST_HEAD(&cqp->cqp_avail_reqs); 1058 INIT_LIST_HEAD(&cqp->cqp_pending_reqs); 1059 1060 /* init the waitqueue of the cqp_requests and add them to the list */ 1061 for (i = 0; i < sqsize; i++) { 1062 init_waitqueue_head(&cqp->cqp_requests[i].waitq); 1063 list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs); 1064 } 1065 init_waitqueue_head(&cqp->remove_wq); 1066 return 0; 1067 1068 exit: 1069 irdma_destroy_cqp(rf, false); 1070 1071 return status; 1072 } 1073 1074 /** 1075 * irdma_create_ccq - create control cq 1076 * @rf: RDMA PCI function 1077 * 1078 * Return 0, if the ccq and the resources associated with it 1079 * are successfully created, otherwise return error 1080 */ 1081 static int 1082 irdma_create_ccq(struct irdma_pci_f *rf) 1083 { 1084 struct irdma_sc_dev *dev = &rf->sc_dev; 1085 struct irdma_ccq_init_info info = {0}; 1086 struct irdma_ccq *ccq = &rf->ccq; 1087 int status; 1088 1089 dev->ccq = &ccq->sc_cq; 1090 dev->ccq->dev = dev; 1091 info.dev = dev; 1092 ccq->shadow_area.size = sizeof(struct irdma_cq_shadow_area); 1093 ccq->mem_cq.size = sizeof(struct irdma_cqe) * IW_CCQ_SIZE; 1094 ccq->mem_cq.va = irdma_allocate_dma_mem(dev->hw, &ccq->mem_cq, 1095 ccq->mem_cq.size, 1096 IRDMA_CQ0_ALIGNMENT); 1097 if (!ccq->mem_cq.va) 1098 return -ENOMEM; 1099 1100 status = irdma_obj_aligned_mem(rf, &ccq->shadow_area, 1101 ccq->shadow_area.size, 1102 IRDMA_SHADOWAREA_M); 1103 if (status) 1104 goto exit; 1105 1106 ccq->sc_cq.back_cq = ccq; 1107 /* populate the ccq init info */ 1108 info.cq_base = ccq->mem_cq.va; 1109 info.cq_pa = ccq->mem_cq.pa; 1110 info.num_elem = IW_CCQ_SIZE; 1111 info.shadow_area = ccq->shadow_area.va; 1112 info.shadow_area_pa = ccq->shadow_area.pa; 1113 info.ceqe_mask = false; 1114 info.ceq_id_valid = true; 1115 info.shadow_read_threshold = 16; 1116 info.vsi = &rf->default_vsi; 1117 status = irdma_sc_ccq_init(dev->ccq, &info); 1118 if (!status) 1119 status = irdma_sc_ccq_create(dev->ccq, 0, true, true); 1120 exit: 1121 if (status) 1122 irdma_free_dma_mem(dev->hw, &ccq->mem_cq); 1123 1124 return status; 1125 } 1126 1127 /** 1128 * irdma_alloc_set_mac - set up a mac address table entry 1129 * @iwdev: irdma device 1130 * 1131 * Allocate a mac ip entry and add it to the hw table Return 0 1132 * if successful, otherwise return error 1133 */ 1134 static int 1135 irdma_alloc_set_mac(struct irdma_device *iwdev) 1136 { 1137 int status; 1138 1139 status = irdma_alloc_local_mac_entry(iwdev->rf, 1140 &iwdev->mac_ip_table_idx); 1141 if (!status) { 1142 status = irdma_add_local_mac_entry(iwdev->rf, 1143 (u8 *)IF_LLADDR(iwdev->netdev), 1144 (u8)iwdev->mac_ip_table_idx); 1145 if (status) 1146 irdma_del_local_mac_entry(iwdev->rf, 1147 (u8)iwdev->mac_ip_table_idx); 1148 } 1149 return status; 1150 } 1151 1152 /** 1153 * irdma_irq_request - set up the msix interrupt vector 1154 * @rf: RDMA PCI function 1155 * @msix_vec: interrupt vector information 1156 * @handler: function pointer to associate with interrupt 1157 * @argument: argument passed to the handler 1158 * 1159 * Allocate interrupt resources and setup interrupt 1160 * Return 0 if successful, otherwise return error 1161 * Note that after this function bus_describe_intr shall 1162 * be called. 1163 */ 1164 static int 1165 irdma_irq_request(struct irdma_pci_f *rf, 1166 struct irdma_msix_vector *msix_vec, 1167 driver_intr_t handler, void *argument) 1168 { 1169 device_t dev = rf->dev_ctx.dev; 1170 int rid = msix_vec->idx + 1; 1171 int err, status; 1172 1173 msix_vec->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); 1174 if (!msix_vec->res) { 1175 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR, 1176 "Unable to allocate bus resource int[%d]\n", rid); 1177 return -EINVAL; 1178 } 1179 err = bus_setup_intr(dev, msix_vec->res, INTR_TYPE_NET | INTR_MPSAFE, 1180 NULL, handler, argument, &msix_vec->tag); 1181 if (err) { 1182 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR, 1183 "Unable to register handler with %x status\n", err); 1184 status = -EINVAL; 1185 goto fail_intr; 1186 } 1187 return 0; 1188 1189 fail_intr: 1190 bus_release_resource(dev, SYS_RES_IRQ, rid, msix_vec->res); 1191 msix_vec->res = NULL; 1192 1193 return status; 1194 } 1195 1196 /** 1197 * irdma_cfg_ceq_vector - set up the msix interrupt vector for 1198 * ceq 1199 * @rf: RDMA PCI function 1200 * @iwceq: ceq associated with the vector 1201 * @ceq_id: the id number of the iwceq 1202 * @msix_vec: interrupt vector information 1203 * 1204 * Allocate interrupt resources and enable irq handling 1205 * Return 0 if successful, otherwise return error 1206 */ 1207 static int 1208 irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq, 1209 u32 ceq_id, struct irdma_msix_vector *msix_vec) 1210 { 1211 int status; 1212 1213 if (rf->msix_shared && !ceq_id) { 1214 tasklet_setup(&rf->dpc_tasklet, irdma_dpc); 1215 status = irdma_irq_request(rf, msix_vec, irdma_irq_handler, rf); 1216 if (status) 1217 return status; 1218 bus_describe_intr(rf->dev_ctx.dev, msix_vec->res, msix_vec->tag, "AEQCEQ"); 1219 } else { 1220 tasklet_setup(&iwceq->dpc_tasklet, irdma_ceq_dpc); 1221 1222 status = irdma_irq_request(rf, msix_vec, irdma_ceq_handler, iwceq); 1223 if (status) 1224 return status; 1225 bus_describe_intr(rf->dev_ctx.dev, msix_vec->res, msix_vec->tag, "CEQ"); 1226 } 1227 status = bus_bind_intr(rf->dev_ctx.dev, msix_vec->res, msix_vec->cpu_affinity); 1228 if (status) { 1229 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR, 1230 "ceq irq config fail\n"); 1231 return status; 1232 } 1233 1234 msix_vec->ceq_id = ceq_id; 1235 rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, ceq_id, msix_vec->idx, true); 1236 1237 return 0; 1238 } 1239 1240 /** 1241 * irdma_cfg_aeq_vector - set up the msix vector for aeq 1242 * @rf: RDMA PCI function 1243 * 1244 * Allocate interrupt resources and enable irq handling 1245 * Return 0 if successful, otherwise return error 1246 */ 1247 static int 1248 irdma_cfg_aeq_vector(struct irdma_pci_f *rf) 1249 { 1250 struct irdma_msix_vector *msix_vec = rf->iw_msixtbl; 1251 u32 ret = 0; 1252 1253 if (!rf->msix_shared) { 1254 tasklet_setup(&rf->dpc_tasklet, irdma_dpc); 1255 ret = irdma_irq_request(rf, msix_vec, irdma_irq_handler, rf); 1256 if (ret) 1257 return ret; 1258 bus_describe_intr(rf->dev_ctx.dev, msix_vec->res, msix_vec->tag, "irdma"); 1259 } 1260 if (ret) { 1261 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR, 1262 "aeq irq config fail\n"); 1263 return -EINVAL; 1264 } 1265 1266 rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, msix_vec->idx, true); 1267 1268 return 0; 1269 } 1270 1271 /** 1272 * irdma_create_ceq - create completion event queue 1273 * @rf: RDMA PCI function 1274 * @iwceq: pointer to the ceq resources to be created 1275 * @ceq_id: the id number of the iwceq 1276 * @vsi: SC vsi struct 1277 * 1278 * Return 0, if the ceq and the resources associated with it 1279 * are successfully created, otherwise return error 1280 */ 1281 static int 1282 irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq, 1283 u32 ceq_id, struct irdma_sc_vsi *vsi) 1284 { 1285 int status; 1286 struct irdma_ceq_init_info info = {0}; 1287 struct irdma_sc_dev *dev = &rf->sc_dev; 1288 u64 scratch; 1289 u32 ceq_size; 1290 1291 info.ceq_id = ceq_id; 1292 iwceq->rf = rf; 1293 ceq_size = min(rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt, 1294 dev->hw_attrs.max_hw_ceq_size); 1295 iwceq->mem.size = sizeof(struct irdma_ceqe) * ceq_size; 1296 iwceq->mem.va = irdma_allocate_dma_mem(dev->hw, &iwceq->mem, 1297 iwceq->mem.size, 1298 IRDMA_CEQ_ALIGNMENT); 1299 if (!iwceq->mem.va) 1300 return -ENOMEM; 1301 1302 info.ceq_id = ceq_id; 1303 info.ceqe_base = iwceq->mem.va; 1304 info.ceqe_pa = iwceq->mem.pa; 1305 info.elem_cnt = ceq_size; 1306 info.reg_cq = kzalloc(sizeof(struct irdma_sc_cq *) * info.elem_cnt, GFP_KERNEL); 1307 1308 iwceq->sc_ceq.ceq_id = ceq_id; 1309 info.dev = dev; 1310 info.vsi = vsi; 1311 scratch = (uintptr_t)&rf->cqp.sc_cqp; 1312 status = irdma_sc_ceq_init(&iwceq->sc_ceq, &info); 1313 if (!status) { 1314 if (dev->ceq_valid) 1315 status = irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq, 1316 IRDMA_OP_CEQ_CREATE); 1317 else 1318 status = irdma_sc_cceq_create(&iwceq->sc_ceq, scratch); 1319 } 1320 1321 if (status) { 1322 kfree(info.reg_cq); 1323 irdma_free_dma_mem(dev->hw, &iwceq->mem); 1324 } 1325 1326 return status; 1327 } 1328 1329 /** 1330 * irdma_setup_ceq_0 - create CEQ 0 and it's interrupt resource 1331 * @rf: RDMA PCI function 1332 * 1333 * Allocate a list for all device completion event queues 1334 * Create the ceq 0 and configure it's msix interrupt vector 1335 * Return 0, if successfully set up, otherwise return error 1336 */ 1337 static int 1338 irdma_setup_ceq_0(struct irdma_pci_f *rf) 1339 { 1340 struct irdma_ceq *iwceq; 1341 struct irdma_msix_vector *msix_vec; 1342 u32 i; 1343 int status = 0; 1344 u32 num_ceqs; 1345 1346 num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs); 1347 rf->ceqlist = kcalloc(num_ceqs, sizeof(*rf->ceqlist), GFP_KERNEL); 1348 memset(rf->ceqlist, 0, num_ceqs * sizeof(*rf->ceqlist)); 1349 if (!rf->ceqlist) { 1350 status = -ENOMEM; 1351 goto exit; 1352 } 1353 1354 iwceq = &rf->ceqlist[0]; 1355 status = irdma_create_ceq(rf, iwceq, 0, &rf->default_vsi); 1356 if (status) { 1357 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR, 1358 "create ceq status = %d\n", status); 1359 goto exit; 1360 } 1361 1362 spin_lock_init(&iwceq->ce_lock); 1363 i = rf->msix_shared ? 0 : 1; 1364 msix_vec = &rf->iw_msixtbl[i]; 1365 iwceq->irq = msix_vec->irq; 1366 iwceq->msix_idx = msix_vec->idx; 1367 status = irdma_cfg_ceq_vector(rf, iwceq, 0, msix_vec); 1368 if (status) { 1369 irdma_destroy_ceq(rf, iwceq); 1370 goto exit; 1371 } 1372 1373 irdma_ena_intr(&rf->sc_dev, msix_vec->idx); 1374 rf->ceqs_count++; 1375 1376 exit: 1377 if (status && !rf->ceqs_count) { 1378 kfree(rf->ceqlist); 1379 rf->ceqlist = NULL; 1380 return status; 1381 } 1382 rf->sc_dev.ceq_valid = true; 1383 1384 return 0; 1385 } 1386 1387 /** 1388 * irdma_setup_ceqs - manage the device ceq's and their interrupt resources 1389 * @rf: RDMA PCI function 1390 * @vsi: VSI structure for this CEQ 1391 * 1392 * Allocate a list for all device completion event queues 1393 * Create the ceq's and configure their msix interrupt vectors 1394 * Return 0, if ceqs are successfully set up, otherwise return error 1395 */ 1396 static int 1397 irdma_setup_ceqs(struct irdma_pci_f *rf, struct irdma_sc_vsi *vsi) 1398 { 1399 u32 i; 1400 u32 ceq_id; 1401 struct irdma_ceq *iwceq; 1402 struct irdma_msix_vector *msix_vec; 1403 int status; 1404 u32 num_ceqs; 1405 1406 num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs); 1407 i = (rf->msix_shared) ? 1 : 2; 1408 for (ceq_id = 1; i < num_ceqs; i++, ceq_id++) { 1409 iwceq = &rf->ceqlist[ceq_id]; 1410 status = irdma_create_ceq(rf, iwceq, ceq_id, vsi); 1411 if (status) { 1412 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR, 1413 "create ceq status = %d\n", status); 1414 goto del_ceqs; 1415 } 1416 spin_lock_init(&iwceq->ce_lock); 1417 msix_vec = &rf->iw_msixtbl[i]; 1418 iwceq->irq = msix_vec->irq; 1419 iwceq->msix_idx = msix_vec->idx; 1420 status = irdma_cfg_ceq_vector(rf, iwceq, ceq_id, msix_vec); 1421 if (status) { 1422 irdma_destroy_ceq(rf, iwceq); 1423 goto del_ceqs; 1424 } 1425 irdma_ena_intr(&rf->sc_dev, msix_vec->idx); 1426 rf->ceqs_count++; 1427 } 1428 1429 return 0; 1430 1431 del_ceqs: 1432 irdma_del_ceqs(rf); 1433 1434 return status; 1435 } 1436 1437 static int 1438 irdma_create_virt_aeq(struct irdma_pci_f *rf, u32 size) 1439 { 1440 struct irdma_aeq *aeq = &rf->aeq; 1441 dma_addr_t *pg_arr; 1442 u32 pg_cnt; 1443 int status; 1444 1445 if (rf->rdma_ver < IRDMA_GEN_2) 1446 return -EOPNOTSUPP; 1447 1448 aeq->mem.size = sizeof(struct irdma_sc_aeqe) * size; 1449 aeq->mem.va = vzalloc(aeq->mem.size); 1450 1451 if (!aeq->mem.va) 1452 return -ENOMEM; 1453 1454 pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE); 1455 status = irdma_get_pble(rf->pble_rsrc, &aeq->palloc, pg_cnt, true); 1456 if (status) { 1457 vfree(aeq->mem.va); 1458 return status; 1459 } 1460 1461 pg_arr = (dma_addr_t *) aeq->palloc.level1.addr; 1462 status = irdma_map_vm_page_list(&rf->hw, aeq->mem.va, pg_arr, pg_cnt); 1463 if (status) { 1464 irdma_free_pble(rf->pble_rsrc, &aeq->palloc); 1465 vfree(aeq->mem.va); 1466 return status; 1467 } 1468 1469 return 0; 1470 } 1471 1472 /** 1473 * irdma_create_aeq - create async event queue 1474 * @rf: RDMA PCI function 1475 * 1476 * Return 0, if the aeq and the resources associated with it 1477 * are successfully created, otherwise return error 1478 */ 1479 static int 1480 irdma_create_aeq(struct irdma_pci_f *rf) 1481 { 1482 struct irdma_aeq_init_info info = {0}; 1483 struct irdma_sc_dev *dev = &rf->sc_dev; 1484 struct irdma_aeq *aeq = &rf->aeq; 1485 struct irdma_hmc_info *hmc_info = rf->sc_dev.hmc_info; 1486 u32 aeq_size; 1487 u8 multiplier = (rf->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) ? 2 : 1; 1488 int status; 1489 1490 aeq_size = multiplier * hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt + 1491 hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt; 1492 aeq_size = min(aeq_size, dev->hw_attrs.max_hw_aeq_size); 1493 1494 aeq->mem.size = sizeof(struct irdma_sc_aeqe) * aeq_size; 1495 aeq->mem.va = irdma_allocate_dma_mem(dev->hw, &aeq->mem, aeq->mem.size, 1496 IRDMA_AEQ_ALIGNMENT); 1497 if (aeq->mem.va) 1498 goto skip_virt_aeq; 1499 1500 /* physically mapped aeq failed. setup virtual aeq */ 1501 status = irdma_create_virt_aeq(rf, aeq_size); 1502 if (status) 1503 return status; 1504 1505 info.virtual_map = true; 1506 aeq->virtual_map = info.virtual_map; 1507 info.pbl_chunk_size = 1; 1508 info.first_pm_pbl_idx = aeq->palloc.level1.idx; 1509 1510 skip_virt_aeq: 1511 info.aeqe_base = aeq->mem.va; 1512 info.aeq_elem_pa = aeq->mem.pa; 1513 info.elem_cnt = aeq_size; 1514 info.dev = dev; 1515 info.msix_idx = rf->iw_msixtbl->idx; 1516 status = irdma_sc_aeq_init(&aeq->sc_aeq, &info); 1517 if (status) 1518 goto err; 1519 1520 status = irdma_cqp_aeq_cmd(dev, &aeq->sc_aeq, IRDMA_OP_AEQ_CREATE); 1521 if (status) 1522 goto err; 1523 1524 return 0; 1525 1526 err: 1527 if (aeq->virtual_map) 1528 irdma_destroy_virt_aeq(rf); 1529 else 1530 irdma_free_dma_mem(dev->hw, &aeq->mem); 1531 1532 return status; 1533 } 1534 1535 /** 1536 * irdma_setup_aeq - set up the device aeq 1537 * @rf: RDMA PCI function 1538 * 1539 * Create the aeq and configure its msix interrupt vector 1540 * Return 0 if successful, otherwise return error 1541 */ 1542 static int 1543 irdma_setup_aeq(struct irdma_pci_f *rf) 1544 { 1545 struct irdma_sc_dev *dev = &rf->sc_dev; 1546 int status; 1547 1548 status = irdma_create_aeq(rf); 1549 if (status) 1550 return status; 1551 1552 status = irdma_cfg_aeq_vector(rf); 1553 if (status) { 1554 irdma_destroy_aeq(rf); 1555 return status; 1556 } 1557 1558 if (!rf->msix_shared) 1559 irdma_ena_intr(dev, rf->iw_msixtbl[0].idx); 1560 1561 return 0; 1562 } 1563 1564 /** 1565 * irdma_initialize_ilq - create iwarp local queue for cm 1566 * @iwdev: irdma device 1567 * 1568 * Return 0 if successful, otherwise return error 1569 */ 1570 static int 1571 irdma_initialize_ilq(struct irdma_device *iwdev) 1572 { 1573 struct irdma_puda_rsrc_info info = {0}; 1574 int status; 1575 1576 info.type = IRDMA_PUDA_RSRC_TYPE_ILQ; 1577 info.cq_id = 1; 1578 info.qp_id = 1; 1579 info.count = 1; 1580 info.pd_id = 1; 1581 info.abi_ver = IRDMA_ABI_VER; 1582 info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768); 1583 info.rq_size = info.sq_size; 1584 info.buf_size = 1024; 1585 info.tx_buf_cnt = 2 * info.sq_size; 1586 info.receive = irdma_receive_ilq; 1587 info.xmit_complete = irdma_free_sqbuf; 1588 status = irdma_puda_create_rsrc(&iwdev->vsi, &info); 1589 if (status) 1590 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_ERR, 1591 "ilq create fail\n"); 1592 1593 return status; 1594 } 1595 1596 /** 1597 * irdma_initialize_ieq - create iwarp exception queue 1598 * @iwdev: irdma device 1599 * 1600 * Return 0 if successful, otherwise return error 1601 */ 1602 static int 1603 irdma_initialize_ieq(struct irdma_device *iwdev) 1604 { 1605 struct irdma_puda_rsrc_info info = {0}; 1606 int status; 1607 1608 info.type = IRDMA_PUDA_RSRC_TYPE_IEQ; 1609 info.cq_id = 2; 1610 info.qp_id = iwdev->vsi.exception_lan_q; 1611 info.count = 1; 1612 info.pd_id = 2; 1613 info.abi_ver = IRDMA_ABI_VER; 1614 info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768); 1615 info.rq_size = info.sq_size; 1616 info.buf_size = iwdev->vsi.mtu + IRDMA_IPV4_PAD; 1617 info.tx_buf_cnt = 4096; 1618 status = irdma_puda_create_rsrc(&iwdev->vsi, &info); 1619 if (status) 1620 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_ERR, 1621 "ieq create fail\n"); 1622 1623 return status; 1624 } 1625 1626 /** 1627 * irdma_reinitialize_ieq - destroy and re-create ieq 1628 * @vsi: VSI structure 1629 */ 1630 void 1631 irdma_reinitialize_ieq(struct irdma_sc_vsi *vsi) 1632 { 1633 struct irdma_device *iwdev = vsi->back_vsi; 1634 struct irdma_pci_f *rf = iwdev->rf; 1635 1636 irdma_puda_dele_rsrc(vsi, IRDMA_PUDA_RSRC_TYPE_IEQ, false); 1637 if (irdma_initialize_ieq(iwdev)) { 1638 iwdev->rf->reset = true; 1639 rf->gen_ops.request_reset(rf); 1640 } 1641 } 1642 1643 /** 1644 * irdma_hmc_setup - create hmc objects for the device 1645 * @rf: RDMA PCI function 1646 * 1647 * Set up the device private memory space for the number and size of 1648 * the hmc objects and create the objects 1649 * Return 0 if successful, otherwise return error 1650 */ 1651 static int 1652 irdma_hmc_setup(struct irdma_pci_f *rf) 1653 { 1654 int status; 1655 struct irdma_sc_dev *dev = &rf->sc_dev; 1656 u32 qpcnt; 1657 1658 if (rf->rdma_ver == IRDMA_GEN_1) 1659 qpcnt = rsrc_limits_table[rf->limits_sel].qplimit * 2; 1660 else 1661 qpcnt = rsrc_limits_table[rf->limits_sel].qplimit; 1662 1663 rf->sd_type = IRDMA_SD_TYPE_DIRECT; 1664 status = irdma_cfg_fpm_val(dev, qpcnt); 1665 if (status) 1666 return status; 1667 1668 status = irdma_create_hmc_objs(rf, true, rf->rdma_ver); 1669 1670 return status; 1671 } 1672 1673 /** 1674 * irdma_del_init_mem - deallocate memory resources 1675 * @rf: RDMA PCI function 1676 */ 1677 static void 1678 irdma_del_init_mem(struct irdma_pci_f *rf) 1679 { 1680 struct irdma_sc_dev *dev = &rf->sc_dev; 1681 1682 kfree(dev->hmc_info->sd_table.sd_entry); 1683 dev->hmc_info->sd_table.sd_entry = NULL; 1684 vfree(rf->mem_rsrc); 1685 rf->mem_rsrc = NULL; 1686 irdma_free_dma_mem(&rf->hw, &rf->obj_mem); 1687 if (rf->rdma_ver != IRDMA_GEN_1) { 1688 kfree(rf->allocated_ws_nodes); 1689 rf->allocated_ws_nodes = NULL; 1690 mutex_destroy(&dev->ws_mutex); 1691 } 1692 kfree(rf->ceqlist); 1693 rf->ceqlist = NULL; 1694 kfree(rf->iw_msixtbl); 1695 rf->iw_msixtbl = NULL; 1696 kfree(rf->hmc_info_mem); 1697 rf->hmc_info_mem = NULL; 1698 } 1699 1700 /** 1701 * irdma_initialize_dev - initialize device 1702 * @rf: RDMA PCI function 1703 * 1704 * Allocate memory for the hmc objects and initialize iwdev 1705 * Return 0 if successful, otherwise clean up the resources 1706 * and return error 1707 */ 1708 static int 1709 irdma_initialize_dev(struct irdma_pci_f *rf) 1710 { 1711 int status; 1712 struct irdma_sc_dev *dev = &rf->sc_dev; 1713 struct irdma_device_init_info info = {0}; 1714 struct irdma_dma_mem mem; 1715 u32 size; 1716 1717 size = sizeof(struct irdma_hmc_pble_rsrc) + 1718 sizeof(struct irdma_hmc_info) + 1719 (sizeof(struct irdma_hmc_obj_info) * IRDMA_HMC_IW_MAX); 1720 1721 rf->hmc_info_mem = kzalloc(size, GFP_KERNEL); 1722 if (!rf->hmc_info_mem) 1723 return -ENOMEM; 1724 1725 rf->pble_rsrc = (struct irdma_hmc_pble_rsrc *)rf->hmc_info_mem; 1726 dev->hmc_info = &rf->hw.hmc; 1727 dev->hmc_info->hmc_obj = (struct irdma_hmc_obj_info *) 1728 (rf->pble_rsrc + 1); 1729 1730 status = irdma_obj_aligned_mem(rf, &mem, IRDMA_QUERY_FPM_BUF_SIZE, 1731 IRDMA_FPM_QUERY_BUF_ALIGNMENT_M); 1732 if (status) 1733 goto error; 1734 1735 info.fpm_query_buf_pa = mem.pa; 1736 info.fpm_query_buf = mem.va; 1737 1738 status = irdma_obj_aligned_mem(rf, &mem, IRDMA_COMMIT_FPM_BUF_SIZE, 1739 IRDMA_FPM_COMMIT_BUF_ALIGNMENT_M); 1740 if (status) 1741 goto error; 1742 1743 info.fpm_commit_buf_pa = mem.pa; 1744 info.fpm_commit_buf = mem.va; 1745 1746 info.bar0 = rf->hw.hw_addr; 1747 info.hmc_fn_id = rf->peer_info->pf_id; 1748 info.hw = &rf->hw; 1749 status = irdma_sc_dev_init(rf->rdma_ver, &rf->sc_dev, &info); 1750 if (status) 1751 goto error; 1752 1753 return status; 1754 error: 1755 kfree(rf->hmc_info_mem); 1756 rf->hmc_info_mem = NULL; 1757 1758 return status; 1759 } 1760 1761 /** 1762 * irdma_rt_deinit_hw - clean up the irdma device resources 1763 * @iwdev: irdma device 1764 * 1765 * remove the mac ip entry and ipv4/ipv6 addresses, destroy the 1766 * device queues and free the pble and the hmc objects 1767 */ 1768 void 1769 irdma_rt_deinit_hw(struct irdma_device *iwdev) 1770 { 1771 struct irdma_sc_qp qp = {{0}}; 1772 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_INIT, "state = %d\n", 1773 iwdev->init_state); 1774 1775 switch (iwdev->init_state) { 1776 case IP_ADDR_REGISTERED: 1777 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) 1778 irdma_del_local_mac_entry(iwdev->rf, 1779 (u8)iwdev->mac_ip_table_idx); 1780 /* fallthrough */ 1781 case AEQ_CREATED: 1782 case PBLE_CHUNK_MEM: 1783 case CEQS_CREATED: 1784 case REM_ENDPOINT_TRK_CREATED: 1785 if (iwdev->rf->en_rem_endpoint_trk) { 1786 qp.dev = &iwdev->rf->sc_dev; 1787 qp.qp_uk.qp_id = IRDMA_REM_ENDPOINT_TRK_QPID; 1788 qp.qp_uk.qp_type = IRDMA_QP_TYPE_IWARP; 1789 irdma_cqp_qp_destroy_cmd(qp.dev, &qp); 1790 } 1791 /* fallthrough */ 1792 case IEQ_CREATED: 1793 if (!iwdev->roce_mode) 1794 irdma_puda_dele_rsrc(&iwdev->vsi, IRDMA_PUDA_RSRC_TYPE_IEQ, 1795 iwdev->rf->reset); 1796 /* fallthrough */ 1797 case ILQ_CREATED: 1798 if (!iwdev->roce_mode) 1799 irdma_puda_dele_rsrc(&iwdev->vsi, 1800 IRDMA_PUDA_RSRC_TYPE_ILQ, 1801 iwdev->rf->reset); 1802 break; 1803 default: 1804 irdma_dev_warn(&iwdev->rf->sc_dev, "bad init_state = %d\n", 1805 iwdev->init_state); 1806 break; 1807 } 1808 1809 irdma_cleanup_cm_core(&iwdev->cm_core); 1810 if (iwdev->vsi.pestat) { 1811 irdma_vsi_stats_free(&iwdev->vsi); 1812 kfree(iwdev->vsi.pestat); 1813 } 1814 if (iwdev->cleanup_wq) 1815 destroy_workqueue(iwdev->cleanup_wq); 1816 } 1817 1818 static int 1819 irdma_setup_init_state(struct irdma_pci_f *rf) 1820 { 1821 int status; 1822 1823 status = irdma_save_msix_info(rf); 1824 if (status) 1825 return status; 1826 1827 rf->obj_mem.size = 8192; 1828 rf->obj_mem.va = irdma_allocate_dma_mem(&rf->hw, &rf->obj_mem, 1829 rf->obj_mem.size, 1830 IRDMA_HW_PAGE_SIZE); 1831 if (!rf->obj_mem.va) { 1832 status = -ENOMEM; 1833 goto clean_msixtbl; 1834 } 1835 1836 rf->obj_next = rf->obj_mem; 1837 status = irdma_initialize_dev(rf); 1838 if (status) 1839 goto clean_obj_mem; 1840 1841 return 0; 1842 1843 clean_obj_mem: 1844 irdma_free_dma_mem(&rf->hw, &rf->obj_mem); 1845 clean_msixtbl: 1846 kfree(rf->iw_msixtbl); 1847 rf->iw_msixtbl = NULL; 1848 return status; 1849 } 1850 1851 /** 1852 * irdma_get_used_rsrc - determine resources used internally 1853 * @iwdev: irdma device 1854 * 1855 * Called at the end of open to get all internal allocations 1856 */ 1857 static void 1858 irdma_get_used_rsrc(struct irdma_device *iwdev) 1859 { 1860 iwdev->rf->used_pds = find_next_zero_bit(iwdev->rf->allocated_pds, 1861 iwdev->rf->max_pd, 0); 1862 iwdev->rf->used_qps = find_next_zero_bit(iwdev->rf->allocated_qps, 1863 iwdev->rf->max_qp, 0); 1864 iwdev->rf->used_cqs = find_next_zero_bit(iwdev->rf->allocated_cqs, 1865 iwdev->rf->max_cq, 0); 1866 iwdev->rf->used_mrs = find_next_zero_bit(iwdev->rf->allocated_mrs, 1867 iwdev->rf->max_mr, 0); 1868 } 1869 1870 void 1871 irdma_ctrl_deinit_hw(struct irdma_pci_f *rf) 1872 { 1873 enum init_completion_state state = rf->init_state; 1874 1875 rf->init_state = INVALID_STATE; 1876 if (rf->rsrc_created) { 1877 irdma_destroy_aeq(rf); 1878 irdma_destroy_pble_prm(rf->pble_rsrc); 1879 irdma_del_ceqs(rf); 1880 rf->rsrc_created = false; 1881 } 1882 1883 switch (state) { 1884 case CEQ0_CREATED: 1885 irdma_del_ceq_0(rf); 1886 /* fallthrough */ 1887 case CCQ_CREATED: 1888 irdma_destroy_ccq(rf); 1889 /* fallthrough */ 1890 case HW_RSRC_INITIALIZED: 1891 case HMC_OBJS_CREATED: 1892 irdma_del_hmc_objects(&rf->sc_dev, rf->sc_dev.hmc_info, true, 1893 rf->reset, rf->rdma_ver); 1894 /* fallthrough */ 1895 case CQP_CREATED: 1896 irdma_destroy_cqp(rf, !rf->reset); 1897 /* fallthrough */ 1898 case INITIAL_STATE: 1899 irdma_del_init_mem(rf); 1900 break; 1901 case INVALID_STATE: 1902 default: 1903 irdma_pr_warn("bad init_state = %d\n", rf->init_state); 1904 break; 1905 } 1906 } 1907 1908 /** 1909 * irdma_rt_init_hw - Initializes runtime portion of HW 1910 * @iwdev: irdma device 1911 * @l2params: qos, tc, mtu info from netdev driver 1912 * 1913 * Create device queues ILQ, IEQ, CEQs and PBLEs. Setup irdma 1914 * device resource objects. 1915 */ 1916 int 1917 irdma_rt_init_hw(struct irdma_device *iwdev, 1918 struct irdma_l2params *l2params) 1919 { 1920 struct irdma_pci_f *rf = iwdev->rf; 1921 struct irdma_sc_dev *dev = &rf->sc_dev; 1922 struct irdma_sc_qp qp = {{0}}; 1923 struct irdma_vsi_init_info vsi_info = {0}; 1924 struct irdma_vsi_stats_info stats_info = {0}; 1925 int status; 1926 1927 vsi_info.dev = dev; 1928 vsi_info.back_vsi = iwdev; 1929 vsi_info.params = l2params; 1930 vsi_info.pf_data_vsi_num = iwdev->vsi_num; 1931 vsi_info.register_qset = rf->gen_ops.register_qset; 1932 vsi_info.unregister_qset = rf->gen_ops.unregister_qset; 1933 vsi_info.exception_lan_q = 2; 1934 irdma_sc_vsi_init(&iwdev->vsi, &vsi_info); 1935 1936 status = irdma_setup_cm_core(iwdev, rf->rdma_ver); 1937 if (status) 1938 return status; 1939 1940 stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL); 1941 if (!stats_info.pestat) { 1942 irdma_cleanup_cm_core(&iwdev->cm_core); 1943 return -ENOMEM; 1944 } 1945 stats_info.fcn_id = dev->hmc_fn_id; 1946 status = irdma_vsi_stats_init(&iwdev->vsi, &stats_info); 1947 if (status) { 1948 irdma_cleanup_cm_core(&iwdev->cm_core); 1949 kfree(stats_info.pestat); 1950 return status; 1951 } 1952 1953 do { 1954 if (!iwdev->roce_mode) { 1955 status = irdma_initialize_ilq(iwdev); 1956 if (status) 1957 break; 1958 iwdev->init_state = ILQ_CREATED; 1959 status = irdma_initialize_ieq(iwdev); 1960 if (status) 1961 break; 1962 iwdev->init_state = IEQ_CREATED; 1963 } 1964 if (iwdev->rf->en_rem_endpoint_trk) { 1965 qp.dev = dev; 1966 qp.qp_uk.qp_id = IRDMA_REM_ENDPOINT_TRK_QPID; 1967 qp.qp_uk.qp_type = IRDMA_QP_TYPE_IWARP; 1968 status = irdma_cqp_qp_create_cmd(dev, &qp); 1969 if (status) 1970 break; 1971 iwdev->init_state = REM_ENDPOINT_TRK_CREATED; 1972 } 1973 if (!rf->rsrc_created) { 1974 status = irdma_setup_ceqs(rf, &iwdev->vsi); 1975 if (status) 1976 break; 1977 1978 iwdev->init_state = CEQS_CREATED; 1979 1980 status = irdma_hmc_init_pble(&rf->sc_dev, 1981 rf->pble_rsrc); 1982 if (status) { 1983 irdma_del_ceqs(rf); 1984 break; 1985 } 1986 1987 iwdev->init_state = PBLE_CHUNK_MEM; 1988 1989 status = irdma_setup_aeq(rf); 1990 if (status) { 1991 irdma_destroy_pble_prm(rf->pble_rsrc); 1992 irdma_del_ceqs(rf); 1993 break; 1994 } 1995 iwdev->init_state = AEQ_CREATED; 1996 rf->rsrc_created = true; 1997 } 1998 1999 iwdev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | 2000 IB_DEVICE_MEM_WINDOW | 2001 IB_DEVICE_MEM_MGT_EXTENSIONS; 2002 2003 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) 2004 irdma_alloc_set_mac(iwdev); 2005 irdma_add_ip(iwdev); 2006 iwdev->init_state = IP_ADDR_REGISTERED; 2007 2008 /* 2009 * handles asynch cleanup tasks - disconnect CM , free qp, free cq bufs 2010 */ 2011 iwdev->cleanup_wq = alloc_workqueue("irdma-cleanup-wq", 2012 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); 2013 if (!iwdev->cleanup_wq) 2014 return -ENOMEM; 2015 irdma_get_used_rsrc(iwdev); 2016 init_waitqueue_head(&iwdev->suspend_wq); 2017 2018 return 0; 2019 } while (0); 2020 2021 irdma_dev_err(idev_to_dev(dev), "HW runtime init FAIL status = %d last cmpl = %d\n", 2022 status, iwdev->init_state); 2023 irdma_rt_deinit_hw(iwdev); 2024 2025 return status; 2026 } 2027 2028 /** 2029 * irdma_ctrl_init_hw - Initializes control portion of HW 2030 * @rf: RDMA PCI function 2031 * 2032 * Create admin queues, HMC obejcts and RF resource objects 2033 */ 2034 int 2035 irdma_ctrl_init_hw(struct irdma_pci_f *rf) 2036 { 2037 struct irdma_sc_dev *dev = &rf->sc_dev; 2038 int status; 2039 do { 2040 status = irdma_setup_init_state(rf); 2041 if (status) 2042 break; 2043 rf->init_state = INITIAL_STATE; 2044 2045 status = irdma_create_cqp(rf); 2046 if (status) 2047 break; 2048 rf->init_state = CQP_CREATED; 2049 2050 dev->feature_info[IRDMA_FEATURE_FW_INFO] = IRDMA_FW_VER_DEFAULT; 2051 if (rf->rdma_ver != IRDMA_GEN_1) { 2052 status = irdma_get_rdma_features(dev); 2053 if (status) 2054 break; 2055 } 2056 2057 status = irdma_hmc_setup(rf); 2058 if (status) 2059 break; 2060 rf->init_state = HMC_OBJS_CREATED; 2061 2062 status = irdma_initialize_hw_rsrc(rf); 2063 if (status) 2064 break; 2065 rf->init_state = HW_RSRC_INITIALIZED; 2066 2067 status = irdma_create_ccq(rf); 2068 if (status) 2069 break; 2070 rf->init_state = CCQ_CREATED; 2071 2072 status = irdma_setup_ceq_0(rf); 2073 if (status) 2074 break; 2075 rf->init_state = CEQ0_CREATED; 2076 /* Handles processing of CQP completions */ 2077 rf->cqp_cmpl_wq = alloc_ordered_workqueue("cqp_cmpl_wq", 2078 WQ_HIGHPRI | WQ_UNBOUND); 2079 if (!rf->cqp_cmpl_wq) { 2080 status = -ENOMEM; 2081 break; 2082 } 2083 INIT_WORK(&rf->cqp_cmpl_work, cqp_compl_worker); 2084 irdma_sc_ccq_arm(dev->ccq); 2085 return 0; 2086 } while (0); 2087 2088 pr_err("IRDMA hardware initialization FAILED init_state=%d status=%d\n", 2089 rf->init_state, status); 2090 irdma_ctrl_deinit_hw(rf); 2091 return status; 2092 } 2093 2094 /** 2095 * irdma_set_hw_rsrc - set hw memory resources. 2096 * @rf: RDMA PCI function 2097 */ 2098 static void 2099 irdma_set_hw_rsrc(struct irdma_pci_f *rf) 2100 { 2101 rf->allocated_qps = (void *)(rf->mem_rsrc + 2102 (sizeof(struct irdma_arp_entry) * rf->arp_table_size)); 2103 rf->allocated_cqs = &rf->allocated_qps[BITS_TO_LONGS(rf->max_qp)]; 2104 rf->allocated_mrs = &rf->allocated_cqs[BITS_TO_LONGS(rf->max_cq)]; 2105 rf->allocated_pds = &rf->allocated_mrs[BITS_TO_LONGS(rf->max_mr)]; 2106 rf->allocated_ahs = &rf->allocated_pds[BITS_TO_LONGS(rf->max_pd)]; 2107 rf->allocated_mcgs = &rf->allocated_ahs[BITS_TO_LONGS(rf->max_ah)]; 2108 rf->allocated_arps = &rf->allocated_mcgs[BITS_TO_LONGS(rf->max_mcg)]; 2109 2110 rf->qp_table = (struct irdma_qp **) 2111 (&rf->allocated_arps[BITS_TO_LONGS(rf->arp_table_size)]); 2112 rf->cq_table = (struct irdma_cq **)(&rf->qp_table[rf->max_qp]); 2113 2114 spin_lock_init(&rf->rsrc_lock); 2115 spin_lock_init(&rf->arp_lock); 2116 spin_lock_init(&rf->qptable_lock); 2117 spin_lock_init(&rf->cqtable_lock); 2118 spin_lock_init(&rf->qh_list_lock); 2119 } 2120 2121 /** 2122 * irdma_calc_mem_rsrc_size - calculate memory resources size. 2123 * @rf: RDMA PCI function 2124 */ 2125 static u32 irdma_calc_mem_rsrc_size(struct irdma_pci_f *rf){ 2126 u32 rsrc_size; 2127 2128 rsrc_size = sizeof(struct irdma_arp_entry) * rf->arp_table_size; 2129 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_qp); 2130 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mr); 2131 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_cq); 2132 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_pd); 2133 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->arp_table_size); 2134 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_ah); 2135 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mcg); 2136 rsrc_size += sizeof(struct irdma_qp **) * rf->max_qp; 2137 rsrc_size += sizeof(struct irdma_cq **) * rf->max_cq; 2138 2139 return rsrc_size; 2140 } 2141 2142 /** 2143 * irdma_initialize_hw_rsrc - initialize hw resource tracking array 2144 * @rf: RDMA PCI function 2145 */ 2146 u32 2147 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf) 2148 { 2149 u32 rsrc_size; 2150 u32 mrdrvbits; 2151 u32 ret; 2152 2153 if (rf->rdma_ver != IRDMA_GEN_1) { 2154 rf->allocated_ws_nodes = 2155 kcalloc(BITS_TO_LONGS(IRDMA_MAX_WS_NODES), 2156 sizeof(unsigned long), GFP_KERNEL); 2157 if (!rf->allocated_ws_nodes) 2158 return -ENOMEM; 2159 2160 set_bit(0, rf->allocated_ws_nodes); 2161 rf->max_ws_node_id = IRDMA_MAX_WS_NODES; 2162 } 2163 rf->max_cqe = rf->sc_dev.hw_attrs.uk_attrs.max_hw_cq_size; 2164 rf->max_qp = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt; 2165 rf->max_mr = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt; 2166 rf->max_cq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt; 2167 rf->max_pd = rf->sc_dev.hw_attrs.max_hw_pds; 2168 rf->arp_table_size = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt; 2169 rf->max_ah = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt; 2170 rf->max_mcg = rf->max_qp; 2171 2172 rsrc_size = irdma_calc_mem_rsrc_size(rf); 2173 rf->mem_rsrc = vzalloc(rsrc_size); 2174 if (!rf->mem_rsrc) { 2175 ret = -ENOMEM; 2176 goto mem_rsrc_vmalloc_fail; 2177 } 2178 2179 rf->arp_table = (struct irdma_arp_entry *)rf->mem_rsrc; 2180 2181 irdma_set_hw_rsrc(rf); 2182 2183 set_bit(0, rf->allocated_mrs); 2184 set_bit(0, rf->allocated_qps); 2185 set_bit(0, rf->allocated_cqs); 2186 set_bit(0, rf->allocated_pds); 2187 set_bit(0, rf->allocated_arps); 2188 set_bit(0, rf->allocated_ahs); 2189 set_bit(0, rf->allocated_mcgs); 2190 set_bit(2, rf->allocated_qps); /* qp 2 IEQ */ 2191 set_bit(1, rf->allocated_qps); /* qp 1 ILQ */ 2192 set_bit(IRDMA_REM_ENDPOINT_TRK_QPID, rf->allocated_qps); /* qp 3 Remote Endpt trk */ 2193 set_bit(1, rf->allocated_cqs); 2194 set_bit(1, rf->allocated_pds); 2195 set_bit(2, rf->allocated_cqs); 2196 set_bit(2, rf->allocated_pds); 2197 2198 INIT_LIST_HEAD(&rf->mc_qht_list.list); 2199 /* stag index mask has a minimum of 14 bits */ 2200 mrdrvbits = 24 - max(get_count_order(rf->max_mr), 14); 2201 rf->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits)); 2202 2203 return 0; 2204 2205 mem_rsrc_vmalloc_fail: 2206 kfree(rf->allocated_ws_nodes); 2207 rf->allocated_ws_nodes = NULL; 2208 2209 return ret; 2210 } 2211 2212 /** 2213 * irdma_cqp_ce_handler - handle cqp completions 2214 * @rf: RDMA PCI function 2215 * @cq: cq for cqp completions 2216 */ 2217 void 2218 irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq) 2219 { 2220 struct irdma_cqp_request *cqp_request; 2221 struct irdma_sc_dev *dev = &rf->sc_dev; 2222 u32 cqe_count = 0; 2223 struct irdma_ccq_cqe_info info; 2224 unsigned long flags; 2225 int ret; 2226 2227 do { 2228 memset(&info, 0, sizeof(info)); 2229 spin_lock_irqsave(&rf->cqp.compl_lock, flags); 2230 ret = irdma_sc_ccq_get_cqe_info(cq, &info); 2231 spin_unlock_irqrestore(&rf->cqp.compl_lock, flags); 2232 if (ret) 2233 break; 2234 2235 cqp_request = (struct irdma_cqp_request *) 2236 (unsigned long)info.scratch; 2237 if (info.error && irdma_cqp_crit_err(dev, cqp_request->info.cqp_cmd, 2238 info.maj_err_code, 2239 info.min_err_code)) 2240 irdma_dev_err(dev, "cqp opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n", 2241 info.op_code, info.maj_err_code, 2242 info.min_err_code); 2243 if (cqp_request) { 2244 cqp_request->compl_info.maj_err_code = info.maj_err_code; 2245 cqp_request->compl_info.min_err_code = info.min_err_code; 2246 cqp_request->compl_info.op_ret_val = info.op_ret_val; 2247 cqp_request->compl_info.error = info.error; 2248 2249 if (cqp_request->waiting) { 2250 cqp_request->request_done = true; 2251 wake_up(&cqp_request->waitq); 2252 irdma_put_cqp_request(&rf->cqp, cqp_request); 2253 } else { 2254 if (cqp_request->callback_fcn) 2255 cqp_request->callback_fcn(cqp_request); 2256 irdma_put_cqp_request(&rf->cqp, cqp_request); 2257 } 2258 } 2259 2260 cqe_count++; 2261 } while (1); 2262 2263 if (cqe_count) { 2264 irdma_process_bh(dev); 2265 irdma_sc_ccq_arm(dev->ccq); 2266 } 2267 } 2268 2269 /** 2270 * cqp_compl_worker - Handle cqp completions 2271 * @work: Pointer to work structure 2272 */ 2273 void 2274 cqp_compl_worker(struct work_struct *work) 2275 { 2276 struct irdma_pci_f *rf = container_of(work, struct irdma_pci_f, 2277 cqp_cmpl_work); 2278 struct irdma_sc_cq *cq = &rf->ccq.sc_cq; 2279 2280 irdma_cqp_ce_handler(rf, cq); 2281 } 2282 2283 /** 2284 * irdma_lookup_apbvt_entry - lookup hash table for an existing apbvt entry corresponding to port 2285 * @cm_core: cm's core 2286 * @port: port to identify apbvt entry 2287 */ 2288 static struct irdma_apbvt_entry * 2289 irdma_lookup_apbvt_entry(struct irdma_cm_core *cm_core, 2290 u16 port) 2291 { 2292 struct irdma_apbvt_entry *entry; 2293 2294 HASH_FOR_EACH_POSSIBLE(cm_core->apbvt_hash_tbl, entry, hlist, port) { 2295 if (entry->port == port) { 2296 entry->use_cnt++; 2297 return entry; 2298 } 2299 } 2300 2301 return NULL; 2302 } 2303 2304 /** 2305 * irdma_next_iw_state - modify qp state 2306 * @iwqp: iwarp qp to modify 2307 * @state: next state for qp 2308 * @del_hash: del hash 2309 * @term: term message 2310 * @termlen: length of term message 2311 */ 2312 void 2313 irdma_next_iw_state(struct irdma_qp *iwqp, u8 state, u8 del_hash, u8 term, 2314 u8 termlen) 2315 { 2316 struct irdma_modify_qp_info info = {0}; 2317 2318 info.next_iwarp_state = state; 2319 info.remove_hash_idx = del_hash; 2320 info.cq_num_valid = true; 2321 info.arp_cache_idx_valid = true; 2322 info.dont_send_term = true; 2323 info.dont_send_fin = true; 2324 info.termlen = termlen; 2325 2326 if (term & IRDMAQP_TERM_SEND_TERM_ONLY) 2327 info.dont_send_term = false; 2328 if (term & IRDMAQP_TERM_SEND_FIN_ONLY) 2329 info.dont_send_fin = false; 2330 if (iwqp->sc_qp.term_flags && state == IRDMA_QP_STATE_ERROR) 2331 info.reset_tcp_conn = true; 2332 iwqp->hw_iwarp_state = state; 2333 irdma_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0); 2334 iwqp->iwarp_state = info.next_iwarp_state; 2335 } 2336 2337 /** 2338 * irdma_del_local_mac_entry - remove a mac entry from the hw 2339 * table 2340 * @rf: RDMA PCI function 2341 * @idx: the index of the mac ip address to delete 2342 */ 2343 void 2344 irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx) 2345 { 2346 struct irdma_cqp *iwcqp = &rf->cqp; 2347 struct irdma_cqp_request *cqp_request; 2348 struct cqp_cmds_info *cqp_info; 2349 2350 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true); 2351 if (!cqp_request) 2352 return; 2353 2354 cqp_info = &cqp_request->info; 2355 cqp_info->cqp_cmd = IRDMA_OP_DELETE_LOCAL_MAC_ENTRY; 2356 cqp_info->post_sq = 1; 2357 cqp_info->in.u.del_local_mac_entry.cqp = &iwcqp->sc_cqp; 2358 cqp_info->in.u.del_local_mac_entry.scratch = (uintptr_t)cqp_request; 2359 cqp_info->in.u.del_local_mac_entry.entry_idx = idx; 2360 cqp_info->in.u.del_local_mac_entry.ignore_ref_count = 0; 2361 2362 irdma_handle_cqp_op(rf, cqp_request); 2363 irdma_put_cqp_request(iwcqp, cqp_request); 2364 } 2365 2366 /** 2367 * irdma_add_local_mac_entry - add a mac ip address entry to the 2368 * hw table 2369 * @rf: RDMA PCI function 2370 * @mac_addr: pointer to mac address 2371 * @idx: the index of the mac ip address to add 2372 */ 2373 int 2374 irdma_add_local_mac_entry(struct irdma_pci_f *rf, u8 *mac_addr, u16 idx) 2375 { 2376 struct irdma_local_mac_entry_info *info; 2377 struct irdma_cqp *iwcqp = &rf->cqp; 2378 struct irdma_cqp_request *cqp_request; 2379 struct cqp_cmds_info *cqp_info; 2380 int status; 2381 2382 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true); 2383 if (!cqp_request) 2384 return -ENOMEM; 2385 2386 cqp_info = &cqp_request->info; 2387 cqp_info->post_sq = 1; 2388 info = &cqp_info->in.u.add_local_mac_entry.info; 2389 ether_addr_copy(info->mac_addr, mac_addr); 2390 info->entry_idx = idx; 2391 cqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request; 2392 cqp_info->cqp_cmd = IRDMA_OP_ADD_LOCAL_MAC_ENTRY; 2393 cqp_info->in.u.add_local_mac_entry.cqp = &iwcqp->sc_cqp; 2394 cqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request; 2395 2396 status = irdma_handle_cqp_op(rf, cqp_request); 2397 irdma_put_cqp_request(iwcqp, cqp_request); 2398 2399 return status; 2400 } 2401 2402 /** 2403 * irdma_alloc_local_mac_entry - allocate a mac entry 2404 * @rf: RDMA PCI function 2405 * @mac_tbl_idx: the index of the new mac address 2406 * 2407 * Allocate a mac address entry and update the mac_tbl_idx 2408 * to hold the index of the newly created mac address 2409 * Return 0 if successful, otherwise return error 2410 */ 2411 int 2412 irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx) 2413 { 2414 struct irdma_cqp *iwcqp = &rf->cqp; 2415 struct irdma_cqp_request *cqp_request; 2416 struct cqp_cmds_info *cqp_info; 2417 int status = 0; 2418 2419 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true); 2420 if (!cqp_request) 2421 return -ENOMEM; 2422 2423 cqp_info = &cqp_request->info; 2424 cqp_info->cqp_cmd = IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY; 2425 cqp_info->post_sq = 1; 2426 cqp_info->in.u.alloc_local_mac_entry.cqp = &iwcqp->sc_cqp; 2427 cqp_info->in.u.alloc_local_mac_entry.scratch = (uintptr_t)cqp_request; 2428 status = irdma_handle_cqp_op(rf, cqp_request); 2429 if (!status) 2430 *mac_tbl_idx = (u16)cqp_request->compl_info.op_ret_val; 2431 2432 irdma_put_cqp_request(iwcqp, cqp_request); 2433 2434 return status; 2435 } 2436 2437 /** 2438 * irdma_cqp_manage_apbvt_cmd - send cqp command manage apbvt 2439 * @iwdev: irdma device 2440 * @accel_local_port: port for apbvt 2441 * @add_port: add ordelete port 2442 */ 2443 static int 2444 irdma_cqp_manage_apbvt_cmd(struct irdma_device *iwdev, 2445 u16 accel_local_port, bool add_port) 2446 { 2447 struct irdma_apbvt_info *info; 2448 struct irdma_cqp_request *cqp_request; 2449 struct cqp_cmds_info *cqp_info; 2450 int status; 2451 2452 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, add_port); 2453 if (!cqp_request) 2454 return -ENOMEM; 2455 2456 cqp_info = &cqp_request->info; 2457 info = &cqp_info->in.u.manage_apbvt_entry.info; 2458 memset(info, 0, sizeof(*info)); 2459 info->add = add_port; 2460 info->port = accel_local_port; 2461 cqp_info->cqp_cmd = IRDMA_OP_MANAGE_APBVT_ENTRY; 2462 cqp_info->post_sq = 1; 2463 cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->rf->cqp.sc_cqp; 2464 cqp_info->in.u.manage_apbvt_entry.scratch = (uintptr_t)cqp_request; 2465 irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_DEV, "%s: port=0x%04x\n", 2466 (!add_port) ? "DELETE" : "ADD", accel_local_port); 2467 2468 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); 2469 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); 2470 2471 return status; 2472 } 2473 2474 /** 2475 * irdma_add_apbvt - add tcp port to HW apbvt table 2476 * @iwdev: irdma device 2477 * @port: port for apbvt 2478 */ 2479 struct irdma_apbvt_entry * 2480 irdma_add_apbvt(struct irdma_device *iwdev, u16 port) 2481 { 2482 struct irdma_cm_core *cm_core = &iwdev->cm_core; 2483 struct irdma_apbvt_entry *entry; 2484 unsigned long flags; 2485 2486 spin_lock_irqsave(&cm_core->apbvt_lock, flags); 2487 entry = irdma_lookup_apbvt_entry(cm_core, port); 2488 if (entry) { 2489 spin_unlock_irqrestore(&cm_core->apbvt_lock, flags); 2490 return entry; 2491 } 2492 2493 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 2494 if (!entry) { 2495 spin_unlock_irqrestore(&cm_core->apbvt_lock, flags); 2496 return NULL; 2497 } 2498 2499 entry->port = port; 2500 entry->use_cnt = 1; 2501 HASH_ADD(cm_core->apbvt_hash_tbl, &entry->hlist, entry->port); 2502 spin_unlock_irqrestore(&cm_core->apbvt_lock, flags); 2503 2504 if (irdma_cqp_manage_apbvt_cmd(iwdev, port, true)) { 2505 kfree(entry); 2506 return NULL; 2507 } 2508 2509 return entry; 2510 } 2511 2512 /** 2513 * irdma_del_apbvt - delete tcp port from HW apbvt table 2514 * @iwdev: irdma device 2515 * @entry: apbvt entry object 2516 */ 2517 void 2518 irdma_del_apbvt(struct irdma_device *iwdev, 2519 struct irdma_apbvt_entry *entry) 2520 { 2521 struct irdma_cm_core *cm_core = &iwdev->cm_core; 2522 unsigned long flags; 2523 2524 spin_lock_irqsave(&cm_core->apbvt_lock, flags); 2525 if (--entry->use_cnt) { 2526 spin_unlock_irqrestore(&cm_core->apbvt_lock, flags); 2527 return; 2528 } 2529 2530 HASH_DEL(cm_core->apbvt_hash_tbl, &entry->hlist); 2531 /* 2532 * apbvt_lock is held across CQP delete APBVT OP (non-waiting) to protect against race where add APBVT CQP can 2533 * race ahead of the delete APBVT for same port. 2534 */ 2535 irdma_cqp_manage_apbvt_cmd(iwdev, entry->port, false); 2536 kfree(entry); 2537 spin_unlock_irqrestore(&cm_core->apbvt_lock, flags); 2538 } 2539 2540 /** 2541 * irdma_manage_arp_cache - manage hw arp cache 2542 * @rf: RDMA PCI function 2543 * @mac_addr: mac address ptr 2544 * @ip_addr: ip addr for arp cache 2545 * @action: add, delete or modify 2546 */ 2547 void 2548 irdma_manage_arp_cache(struct irdma_pci_f *rf, unsigned char *mac_addr, 2549 u32 *ip_addr, u32 action) 2550 { 2551 struct irdma_add_arp_cache_entry_info *info; 2552 struct irdma_cqp_request *cqp_request; 2553 struct cqp_cmds_info *cqp_info; 2554 int arp_index; 2555 2556 arp_index = irdma_arp_table(rf, ip_addr, mac_addr, action); 2557 if (arp_index == -1) 2558 return; 2559 2560 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false); 2561 if (!cqp_request) 2562 return; 2563 2564 cqp_info = &cqp_request->info; 2565 if (action == IRDMA_ARP_ADD) { 2566 cqp_info->cqp_cmd = IRDMA_OP_ADD_ARP_CACHE_ENTRY; 2567 info = &cqp_info->in.u.add_arp_cache_entry.info; 2568 memset(info, 0, sizeof(*info)); 2569 info->arp_index = (u16)arp_index; 2570 info->permanent = true; 2571 ether_addr_copy(info->mac_addr, mac_addr); 2572 cqp_info->in.u.add_arp_cache_entry.scratch = 2573 (uintptr_t)cqp_request; 2574 cqp_info->in.u.add_arp_cache_entry.cqp = &rf->cqp.sc_cqp; 2575 } else { 2576 cqp_info->cqp_cmd = IRDMA_OP_DELETE_ARP_CACHE_ENTRY; 2577 cqp_info->in.u.del_arp_cache_entry.scratch = 2578 (uintptr_t)cqp_request; 2579 cqp_info->in.u.del_arp_cache_entry.cqp = &rf->cqp.sc_cqp; 2580 cqp_info->in.u.del_arp_cache_entry.arp_index = arp_index; 2581 } 2582 2583 cqp_info->post_sq = 1; 2584 irdma_handle_cqp_op(rf, cqp_request); 2585 irdma_put_cqp_request(&rf->cqp, cqp_request); 2586 } 2587 2588 /** 2589 * irdma_send_syn_cqp_callback - do syn/ack after qhash 2590 * @cqp_request: qhash cqp completion 2591 */ 2592 static void 2593 irdma_send_syn_cqp_callback(struct irdma_cqp_request *cqp_request) 2594 { 2595 struct irdma_cm_node *cm_node = cqp_request->param; 2596 2597 irdma_send_syn(cm_node, 1); 2598 irdma_rem_ref_cm_node(cm_node); 2599 } 2600 2601 /** 2602 * irdma_manage_qhash - add or modify qhash 2603 * @iwdev: irdma device 2604 * @cminfo: cm info for qhash 2605 * @etype: type (syn or quad) 2606 * @mtype: type of qhash 2607 * @cmnode: cmnode associated with connection 2608 * @wait: wait for completion 2609 */ 2610 int 2611 irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo, 2612 enum irdma_quad_entry_type etype, 2613 enum irdma_quad_hash_manage_type mtype, void *cmnode, 2614 bool wait) 2615 { 2616 struct irdma_qhash_table_info *info; 2617 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; 2618 struct irdma_cqp *iwcqp = &iwdev->rf->cqp; 2619 struct irdma_cqp_request *cqp_request; 2620 struct cqp_cmds_info *cqp_info; 2621 struct irdma_cm_node *cm_node = cmnode; 2622 int status; 2623 2624 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait); 2625 if (!cqp_request) 2626 return -ENOMEM; 2627 2628 cqp_info = &cqp_request->info; 2629 info = &cqp_info->in.u.manage_qhash_table_entry.info; 2630 memset(info, 0, sizeof(*info)); 2631 info->vsi = &iwdev->vsi; 2632 info->manage = mtype; 2633 info->entry_type = etype; 2634 if (cminfo->vlan_id < VLAN_N_VID) { 2635 info->vlan_valid = true; 2636 info->vlan_id = cminfo->vlan_id; 2637 } else { 2638 info->vlan_valid = false; 2639 } 2640 info->ipv4_valid = cminfo->ipv4; 2641 info->user_pri = cminfo->user_pri; 2642 ether_addr_copy(info->mac_addr, IF_LLADDR(iwdev->netdev)); 2643 info->qp_num = cminfo->qh_qpid; 2644 info->dest_port = cminfo->loc_port; 2645 info->dest_ip[0] = cminfo->loc_addr[0]; 2646 info->dest_ip[1] = cminfo->loc_addr[1]; 2647 info->dest_ip[2] = cminfo->loc_addr[2]; 2648 info->dest_ip[3] = cminfo->loc_addr[3]; 2649 if (etype == IRDMA_QHASH_TYPE_TCP_ESTABLISHED || 2650 etype == IRDMA_QHASH_TYPE_UDP_UNICAST || 2651 etype == IRDMA_QHASH_TYPE_UDP_MCAST || 2652 etype == IRDMA_QHASH_TYPE_ROCE_MCAST || 2653 etype == IRDMA_QHASH_TYPE_ROCEV2_HW) { 2654 info->src_port = cminfo->rem_port; 2655 info->src_ip[0] = cminfo->rem_addr[0]; 2656 info->src_ip[1] = cminfo->rem_addr[1]; 2657 info->src_ip[2] = cminfo->rem_addr[2]; 2658 info->src_ip[3] = cminfo->rem_addr[3]; 2659 } 2660 if (cmnode) { 2661 cqp_request->callback_fcn = irdma_send_syn_cqp_callback; 2662 cqp_request->param = cmnode; 2663 if (!wait) 2664 atomic_inc(&cm_node->refcnt); 2665 } 2666 if (info->ipv4_valid) 2667 irdma_debug(dev, IRDMA_DEBUG_CM, 2668 "%s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI4 rem_addr=%pI4 mac=%pM, vlan_id=%d cm_node=%p\n", 2669 (!mtype) ? "DELETE" : "ADD", __builtin_return_address(0), 2670 info->dest_port, info->src_port, info->dest_ip, info->src_ip, 2671 info->mac_addr, cminfo->vlan_id, cmnode ? cmnode : NULL); 2672 else 2673 irdma_debug(dev, IRDMA_DEBUG_CM, 2674 "%s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI6 rem_addr=%pI6 mac=%pM, vlan_id=%d cm_node=%p\n", 2675 (!mtype) ? "DELETE" : "ADD", __builtin_return_address(0), 2676 info->dest_port, info->src_port, info->dest_ip, info->src_ip, 2677 info->mac_addr, cminfo->vlan_id, cmnode ? cmnode : NULL); 2678 2679 cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->rf->cqp.sc_cqp; 2680 cqp_info->in.u.manage_qhash_table_entry.scratch = (uintptr_t)cqp_request; 2681 cqp_info->cqp_cmd = IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY; 2682 cqp_info->post_sq = 1; 2683 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); 2684 if (status && cm_node && !wait) 2685 irdma_rem_ref_cm_node(cm_node); 2686 2687 irdma_put_cqp_request(iwcqp, cqp_request); 2688 2689 return status; 2690 } 2691 2692 /** 2693 * irdma_hw_flush_wqes - flush qp's wqe 2694 * @rf: RDMA PCI function 2695 * @qp: hardware control qp 2696 * @info: info for flush 2697 * @wait: flag wait for completion 2698 */ 2699 int 2700 irdma_hw_flush_wqes(struct irdma_pci_f *rf, struct irdma_sc_qp *qp, 2701 struct irdma_qp_flush_info *info, bool wait) 2702 { 2703 int status; 2704 struct irdma_qp_flush_info *hw_info; 2705 struct irdma_cqp_request *cqp_request; 2706 struct cqp_cmds_info *cqp_info; 2707 struct irdma_qp *iwqp = qp->qp_uk.back_qp; 2708 2709 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait); 2710 if (!cqp_request) 2711 return -ENOMEM; 2712 2713 cqp_info = &cqp_request->info; 2714 hw_info = &cqp_request->info.in.u.qp_flush_wqes.info; 2715 memcpy(hw_info, info, sizeof(*hw_info)); 2716 cqp_info->cqp_cmd = IRDMA_OP_QP_FLUSH_WQES; 2717 cqp_info->post_sq = 1; 2718 cqp_info->in.u.qp_flush_wqes.qp = qp; 2719 cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request; 2720 status = irdma_handle_cqp_op(rf, cqp_request); 2721 if (status) { 2722 qp->qp_uk.sq_flush_complete = true; 2723 qp->qp_uk.rq_flush_complete = true; 2724 irdma_put_cqp_request(&rf->cqp, cqp_request); 2725 return status; 2726 } 2727 2728 if (!wait || cqp_request->compl_info.maj_err_code) 2729 goto put_cqp; 2730 2731 if (info->rq) { 2732 if (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_SQ_WQE_FLUSHED || 2733 cqp_request->compl_info.min_err_code == 0) { 2734 /* RQ WQE flush was requested but did not happen */ 2735 qp->qp_uk.rq_flush_complete = true; 2736 } 2737 } 2738 if (info->sq) { 2739 if (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_RQ_WQE_FLUSHED || 2740 cqp_request->compl_info.min_err_code == 0) { 2741 /* SQ WQE flush was requested but did not happen */ 2742 qp->qp_uk.sq_flush_complete = true; 2743 } 2744 } 2745 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_VERBS, 2746 "qp_id=%d qp_type=%d qpstate=%d ibqpstate=%d last_aeq=%d hw_iw_state=%d maj_err_code=%d min_err_code=%d\n", 2747 iwqp->ibqp.qp_num, rf->protocol_used, iwqp->iwarp_state, 2748 iwqp->ibqp_state, iwqp->last_aeq, iwqp->hw_iwarp_state, 2749 cqp_request->compl_info.maj_err_code, cqp_request->compl_info.min_err_code); 2750 put_cqp: 2751 irdma_put_cqp_request(&rf->cqp, cqp_request); 2752 2753 return status; 2754 } 2755 2756 /** 2757 * irdma_gen_ae - generate AE 2758 * @rf: RDMA PCI function 2759 * @qp: qp associated with AE 2760 * @info: info for ae 2761 * @wait: wait for completion 2762 */ 2763 void 2764 irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp, 2765 struct irdma_gen_ae_info *info, bool wait) 2766 { 2767 struct irdma_gen_ae_info *ae_info; 2768 struct irdma_cqp_request *cqp_request; 2769 struct cqp_cmds_info *cqp_info; 2770 2771 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait); 2772 if (!cqp_request) 2773 return; 2774 2775 cqp_info = &cqp_request->info; 2776 ae_info = &cqp_request->info.in.u.gen_ae.info; 2777 memcpy(ae_info, info, sizeof(*ae_info)); 2778 cqp_info->cqp_cmd = IRDMA_OP_GEN_AE; 2779 cqp_info->post_sq = 1; 2780 cqp_info->in.u.gen_ae.qp = qp; 2781 cqp_info->in.u.gen_ae.scratch = (uintptr_t)cqp_request; 2782 2783 irdma_handle_cqp_op(rf, cqp_request); 2784 irdma_put_cqp_request(&rf->cqp, cqp_request); 2785 } 2786 2787 void 2788 irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask) 2789 { 2790 struct irdma_qp_flush_info info = {0}; 2791 struct irdma_pci_f *rf = iwqp->iwdev->rf; 2792 u8 flush_code = iwqp->sc_qp.flush_code; 2793 2794 if (!(flush_mask & IRDMA_FLUSH_SQ) && !(flush_mask & IRDMA_FLUSH_RQ)) 2795 return; 2796 2797 /* Set flush info fields */ 2798 info.sq = flush_mask & IRDMA_FLUSH_SQ; 2799 info.rq = flush_mask & IRDMA_FLUSH_RQ; 2800 2801 if (flush_mask & IRDMA_REFLUSH) { 2802 if (info.sq) 2803 iwqp->sc_qp.flush_sq = false; 2804 if (info.rq) 2805 iwqp->sc_qp.flush_rq = false; 2806 } 2807 2808 /* Generate userflush errors in CQE */ 2809 info.sq_major_code = IRDMA_FLUSH_MAJOR_ERR; 2810 info.sq_minor_code = FLUSH_GENERAL_ERR; 2811 info.rq_major_code = IRDMA_FLUSH_MAJOR_ERR; 2812 info.rq_minor_code = FLUSH_GENERAL_ERR; 2813 info.userflushcode = true; 2814 if (flush_code) { 2815 if (info.sq && iwqp->sc_qp.sq_flush_code) 2816 info.sq_minor_code = flush_code; 2817 if (info.rq && iwqp->sc_qp.rq_flush_code) 2818 info.rq_minor_code = flush_code; 2819 } 2820 2821 if (irdma_upload_context && !(flush_mask & IRDMA_REFLUSH) && 2822 irdma_upload_qp_context(iwqp, 0, 1)) 2823 irdma_print("failed to upload QP context\n"); 2824 2825 /* Issue flush */ 2826 (void)irdma_hw_flush_wqes(rf, &iwqp->sc_qp, &info, 2827 flush_mask & IRDMA_FLUSH_WAIT); 2828 iwqp->flush_issued = true; 2829 } 2830