1 /*- 2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB 3 * 4 * Copyright (c) 2015 - 2022 Intel Corporation 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenFabrics.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 /*$FreeBSD$*/ 35 36 #include "irdma_main.h" 37 38 LIST_HEAD(irdma_handlers); 39 DEFINE_SPINLOCK(irdma_handler_lock); 40 41 /** 42 * irdma_arp_table -manage arp table 43 * @rf: RDMA PCI function 44 * @ip_addr: ip address for device 45 * @mac_addr: mac address ptr 46 * @action: modify, delete or add 47 */ 48 int 49 irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, const u8 *mac_addr, 50 u32 action) 51 { 52 unsigned long flags; 53 int arp_index; 54 u32 ip[4] = {}; 55 56 memcpy(ip, ip_addr, sizeof(ip)); 57 58 spin_lock_irqsave(&rf->arp_lock, flags); 59 for (arp_index = 0; (u32)arp_index < rf->arp_table_size; arp_index++) { 60 if (!memcmp(rf->arp_table[arp_index].ip_addr, ip, sizeof(ip))) 61 break; 62 } 63 64 switch (action) { 65 case IRDMA_ARP_ADD: 66 if (arp_index != rf->arp_table_size) { 67 arp_index = -1; 68 break; 69 } 70 71 arp_index = 0; 72 if (irdma_alloc_rsrc(rf, rf->allocated_arps, rf->arp_table_size, 73 (u32 *)&arp_index, &rf->next_arp_index)) { 74 arp_index = -1; 75 break; 76 } 77 78 memcpy(rf->arp_table[arp_index].ip_addr, ip, 79 sizeof(rf->arp_table[arp_index].ip_addr)); 80 ether_addr_copy(rf->arp_table[arp_index].mac_addr, mac_addr); 81 break; 82 case IRDMA_ARP_RESOLVE: 83 if (arp_index == rf->arp_table_size) 84 arp_index = -1; 85 break; 86 case IRDMA_ARP_DELETE: 87 if (arp_index == rf->arp_table_size) { 88 arp_index = -1; 89 break; 90 } 91 92 memset(rf->arp_table[arp_index].ip_addr, 0, 93 sizeof(rf->arp_table[arp_index].ip_addr)); 94 eth_zero_addr(rf->arp_table[arp_index].mac_addr); 95 irdma_free_rsrc(rf, rf->allocated_arps, arp_index); 96 break; 97 default: 98 arp_index = -1; 99 break; 100 } 101 102 spin_unlock_irqrestore(&rf->arp_lock, flags); 103 return arp_index; 104 } 105 106 /** 107 * irdma_add_arp - add a new arp entry if needed 108 * @rf: RDMA function 109 * @ip: IP address 110 * @mac: MAC address 111 */ 112 int 113 irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, const u8 *mac) 114 { 115 int arpidx; 116 117 arpidx = irdma_arp_table(rf, &ip[0], NULL, IRDMA_ARP_RESOLVE); 118 if (arpidx >= 0) { 119 if (ether_addr_equal(rf->arp_table[arpidx].mac_addr, mac)) 120 return arpidx; 121 122 irdma_manage_arp_cache(rf, rf->arp_table[arpidx].mac_addr, ip, 123 IRDMA_ARP_DELETE); 124 } 125 126 irdma_manage_arp_cache(rf, mac, ip, IRDMA_ARP_ADD); 127 128 return irdma_arp_table(rf, ip, NULL, IRDMA_ARP_RESOLVE); 129 } 130 131 /** 132 * irdma_netdevice_event - system notifier for netdev events 133 * @notifier: not used 134 * @event: event for notifier 135 * @ptr: netdev 136 */ 137 int 138 irdma_netdevice_event(struct notifier_block *notifier, unsigned long event, 139 void *ptr) 140 { 141 struct irdma_device *iwdev; 142 struct ifnet *netdev = netdev_notifier_info_to_ifp(ptr); 143 144 iwdev = container_of(notifier, struct irdma_device, nb_netdevice_event); 145 if (iwdev->netdev != netdev) 146 return NOTIFY_DONE; 147 148 iwdev->iw_status = 1; 149 switch (event) { 150 case NETDEV_DOWN: 151 iwdev->iw_status = 0; 152 /* fallthrough */ 153 case NETDEV_UP: 154 irdma_port_ibevent(iwdev); 155 break; 156 default: 157 break; 158 } 159 160 return NOTIFY_DONE; 161 } 162 163 void 164 irdma_unregister_notifiers(struct irdma_device *iwdev) 165 { 166 unregister_netdevice_notifier(&iwdev->nb_netdevice_event); 167 } 168 169 int 170 irdma_register_notifiers(struct irdma_device *iwdev) 171 { 172 int ret; 173 174 iwdev->nb_netdevice_event.notifier_call = irdma_netdevice_event; 175 ret = register_netdevice_notifier(&iwdev->nb_netdevice_event); 176 if (ret) { 177 irdma_dev_err(&iwdev->ibdev, "register_netdevice_notifier failed\n"); 178 return ret; 179 } 180 return ret; 181 } 182 /** 183 * irdma_alloc_and_get_cqp_request - get cqp struct 184 * @cqp: device cqp ptr 185 * @wait: cqp to be used in wait mode 186 */ 187 struct irdma_cqp_request * 188 irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp, 189 bool wait) 190 { 191 struct irdma_cqp_request *cqp_request = NULL; 192 unsigned long flags; 193 194 spin_lock_irqsave(&cqp->req_lock, flags); 195 if (!list_empty(&cqp->cqp_avail_reqs)) { 196 cqp_request = list_entry(cqp->cqp_avail_reqs.next, 197 struct irdma_cqp_request, list); 198 list_del_init(&cqp_request->list); 199 } 200 spin_unlock_irqrestore(&cqp->req_lock, flags); 201 if (!cqp_request) { 202 cqp_request = kzalloc(sizeof(*cqp_request), GFP_ATOMIC); 203 if (cqp_request) { 204 cqp_request->dynamic = true; 205 if (wait) 206 init_waitqueue_head(&cqp_request->waitq); 207 } 208 } 209 if (!cqp_request) { 210 irdma_debug(cqp->sc_cqp.dev, IRDMA_DEBUG_ERR, "CQP Request Fail: No Memory"); 211 return NULL; 212 } 213 214 cqp_request->waiting = wait; 215 atomic_set(&cqp_request->refcnt, 1); 216 memset(&cqp_request->compl_info, 0, sizeof(cqp_request->compl_info)); 217 218 return cqp_request; 219 } 220 221 /** 222 * irdma_get_cqp_request - increase refcount for cqp_request 223 * @cqp_request: pointer to cqp_request instance 224 */ 225 static inline void 226 irdma_get_cqp_request(struct irdma_cqp_request *cqp_request) 227 { 228 atomic_inc(&cqp_request->refcnt); 229 } 230 231 /** 232 * irdma_free_cqp_request - free cqp request 233 * @cqp: cqp ptr 234 * @cqp_request: to be put back in cqp list 235 */ 236 void 237 irdma_free_cqp_request(struct irdma_cqp *cqp, 238 struct irdma_cqp_request *cqp_request) 239 { 240 unsigned long flags; 241 242 if (cqp_request->dynamic) { 243 kfree(cqp_request); 244 } else { 245 cqp_request->request_done = false; 246 cqp_request->callback_fcn = NULL; 247 cqp_request->waiting = false; 248 249 spin_lock_irqsave(&cqp->req_lock, flags); 250 list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs); 251 spin_unlock_irqrestore(&cqp->req_lock, flags); 252 } 253 wake_up(&cqp->remove_wq); 254 } 255 256 /** 257 * irdma_put_cqp_request - dec ref count and free if 0 258 * @cqp: cqp ptr 259 * @cqp_request: to be put back in cqp list 260 */ 261 void 262 irdma_put_cqp_request(struct irdma_cqp *cqp, 263 struct irdma_cqp_request *cqp_request) 264 { 265 if (atomic_dec_and_test(&cqp_request->refcnt)) 266 irdma_free_cqp_request(cqp, cqp_request); 267 } 268 269 /** 270 * irdma_free_pending_cqp_request -free pending cqp request objs 271 * @cqp: cqp ptr 272 * @cqp_request: to be put back in cqp list 273 */ 274 static void 275 irdma_free_pending_cqp_request(struct irdma_cqp *cqp, 276 struct irdma_cqp_request *cqp_request) 277 { 278 if (cqp_request->waiting) { 279 cqp_request->compl_info.error = true; 280 cqp_request->request_done = true; 281 wake_up(&cqp_request->waitq); 282 } 283 wait_event_timeout(cqp->remove_wq, 284 atomic_read(&cqp_request->refcnt) == 1, 1000); 285 irdma_put_cqp_request(cqp, cqp_request); 286 } 287 288 /** 289 * irdma_cleanup_pending_cqp_op - clean-up cqp with no 290 * completions 291 * @rf: RDMA PCI function 292 */ 293 void 294 irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf) 295 { 296 struct irdma_sc_dev *dev = &rf->sc_dev; 297 struct irdma_cqp *cqp = &rf->cqp; 298 struct irdma_cqp_request *cqp_request = NULL; 299 struct cqp_cmds_info *pcmdinfo = NULL; 300 u32 i, pending_work, wqe_idx; 301 302 pending_work = IRDMA_RING_USED_QUANTA(cqp->sc_cqp.sq_ring); 303 wqe_idx = IRDMA_RING_CURRENT_TAIL(cqp->sc_cqp.sq_ring); 304 for (i = 0; i < pending_work; i++) { 305 cqp_request = (struct irdma_cqp_request *)(uintptr_t) 306 cqp->scratch_array[wqe_idx]; 307 if (cqp_request) 308 irdma_free_pending_cqp_request(cqp, cqp_request); 309 wqe_idx = (wqe_idx + 1) % IRDMA_RING_SIZE(cqp->sc_cqp.sq_ring); 310 } 311 312 while (!list_empty(&dev->cqp_cmd_head)) { 313 pcmdinfo = irdma_remove_cqp_head(dev); 314 cqp_request = 315 container_of(pcmdinfo, struct irdma_cqp_request, info); 316 if (cqp_request) 317 irdma_free_pending_cqp_request(cqp, cqp_request); 318 } 319 } 320 321 /** 322 * irdma_wait_event - wait for completion 323 * @rf: RDMA PCI function 324 * @cqp_request: cqp request to wait 325 */ 326 static int 327 irdma_wait_event(struct irdma_pci_f *rf, 328 struct irdma_cqp_request *cqp_request) 329 { 330 struct irdma_cqp_timeout cqp_timeout = {0}; 331 int timeout_threshold = CQP_TIMEOUT_THRESHOLD; 332 bool cqp_error = false; 333 int err_code = 0; 334 335 cqp_timeout.compl_cqp_cmds = rf->sc_dev.cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]; 336 do { 337 int wait_time_ms = rf->sc_dev.hw_attrs.max_cqp_compl_wait_time_ms; 338 339 irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq); 340 if (wait_event_timeout(cqp_request->waitq, 341 cqp_request->request_done, 342 msecs_to_jiffies(wait_time_ms))) 343 break; 344 345 irdma_check_cqp_progress(&cqp_timeout, &rf->sc_dev); 346 347 if (cqp_timeout.count < timeout_threshold) 348 continue; 349 350 if (!rf->reset) { 351 rf->reset = true; 352 rf->gen_ops.request_reset(rf); 353 } 354 return -ETIMEDOUT; 355 } while (1); 356 357 cqp_error = cqp_request->compl_info.error; 358 if (cqp_error) { 359 err_code = -EIO; 360 if (cqp_request->compl_info.maj_err_code == 0xFFFF) { 361 if (cqp_request->compl_info.min_err_code == 0x8002) { 362 err_code = -EBUSY; 363 } else if (cqp_request->compl_info.min_err_code == 0x8029) { 364 if (!rf->reset) { 365 rf->reset = true; 366 rf->gen_ops.request_reset(rf); 367 } 368 } 369 } 370 } 371 372 return err_code; 373 } 374 375 static const char *const irdma_cqp_cmd_names[IRDMA_MAX_CQP_OPS] = { 376 [IRDMA_OP_CEQ_DESTROY] = "Destroy CEQ Cmd", 377 [IRDMA_OP_AEQ_DESTROY] = "Destroy AEQ Cmd", 378 [IRDMA_OP_DELETE_ARP_CACHE_ENTRY] = "Delete ARP Cache Cmd", 379 [IRDMA_OP_MANAGE_APBVT_ENTRY] = "Manage APBV Table Entry Cmd", 380 [IRDMA_OP_CEQ_CREATE] = "CEQ Create Cmd", 381 [IRDMA_OP_AEQ_CREATE] = "AEQ Destroy Cmd", 382 [IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY] = "Manage Quad Hash Table Entry Cmd", 383 [IRDMA_OP_QP_MODIFY] = "Modify QP Cmd", 384 [IRDMA_OP_QP_UPLOAD_CONTEXT] = "Upload Context Cmd", 385 [IRDMA_OP_CQ_CREATE] = "Create CQ Cmd", 386 [IRDMA_OP_CQ_DESTROY] = "Destroy CQ Cmd", 387 [IRDMA_OP_QP_CREATE] = "Create QP Cmd", 388 [IRDMA_OP_QP_DESTROY] = "Destroy QP Cmd", 389 [IRDMA_OP_ALLOC_STAG] = "Allocate STag Cmd", 390 [IRDMA_OP_MR_REG_NON_SHARED] = "Register Non-Shared MR Cmd", 391 [IRDMA_OP_DEALLOC_STAG] = "Deallocate STag Cmd", 392 [IRDMA_OP_MW_ALLOC] = "Allocate Memory Window Cmd", 393 [IRDMA_OP_QP_FLUSH_WQES] = "Flush QP Cmd", 394 [IRDMA_OP_ADD_ARP_CACHE_ENTRY] = "Add ARP Cache Cmd", 395 [IRDMA_OP_MANAGE_PUSH_PAGE] = "Manage Push Page Cmd", 396 [IRDMA_OP_UPDATE_PE_SDS] = "Update PE SDs Cmd", 397 [IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE] = "Manage HMC PM Function Table Cmd", 398 [IRDMA_OP_SUSPEND] = "Suspend QP Cmd", 399 [IRDMA_OP_RESUME] = "Resume QP Cmd", 400 [IRDMA_OP_MANAGE_VF_PBLE_BP] = "Manage VF PBLE Backing Pages Cmd", 401 [IRDMA_OP_QUERY_FPM_VAL] = "Query FPM Values Cmd", 402 [IRDMA_OP_COMMIT_FPM_VAL] = "Commit FPM Values Cmd", 403 [IRDMA_OP_AH_CREATE] = "Create Address Handle Cmd", 404 [IRDMA_OP_AH_MODIFY] = "Modify Address Handle Cmd", 405 [IRDMA_OP_AH_DESTROY] = "Destroy Address Handle Cmd", 406 [IRDMA_OP_MC_CREATE] = "Create Multicast Group Cmd", 407 [IRDMA_OP_MC_DESTROY] = "Destroy Multicast Group Cmd", 408 [IRDMA_OP_MC_MODIFY] = "Modify Multicast Group Cmd", 409 [IRDMA_OP_STATS_ALLOCATE] = "Add Statistics Instance Cmd", 410 [IRDMA_OP_STATS_FREE] = "Free Statistics Instance Cmd", 411 [IRDMA_OP_STATS_GATHER] = "Gather Statistics Cmd", 412 [IRDMA_OP_WS_ADD_NODE] = "Add Work Scheduler Node Cmd", 413 [IRDMA_OP_WS_MODIFY_NODE] = "Modify Work Scheduler Node Cmd", 414 [IRDMA_OP_WS_DELETE_NODE] = "Delete Work Scheduler Node Cmd", 415 [IRDMA_OP_WS_FAILOVER_START] = "Failover Start Cmd", 416 [IRDMA_OP_WS_FAILOVER_COMPLETE] = "Failover Complete Cmd", 417 [IRDMA_OP_SET_UP_MAP] = "Set UP-UP Mapping Cmd", 418 [IRDMA_OP_GEN_AE] = "Generate AE Cmd", 419 [IRDMA_OP_QUERY_RDMA_FEATURES] = "RDMA Get Features Cmd", 420 [IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY] = "Allocate Local MAC Entry Cmd", 421 [IRDMA_OP_ADD_LOCAL_MAC_ENTRY] = "Add Local MAC Entry Cmd", 422 [IRDMA_OP_DELETE_LOCAL_MAC_ENTRY] = "Delete Local MAC Entry Cmd", 423 [IRDMA_OP_CQ_MODIFY] = "CQ Modify Cmd", 424 }; 425 426 static const struct irdma_cqp_err_info irdma_noncrit_err_list[] = { 427 {0xffff, 0x8002, "Invalid State"}, 428 {0xffff, 0x8006, "Flush No Wqe Pending"}, 429 {0xffff, 0x8007, "Modify QP Bad Close"}, 430 {0xffff, 0x8009, "LLP Closed"}, 431 {0xffff, 0x800a, "Reset Not Sent"}, 432 {0xffff, 0x200, "Failover Pending"} 433 }; 434 435 /** 436 * irdma_cqp_crit_err - check if CQP error is critical 437 * @dev: pointer to dev structure 438 * @cqp_cmd: code for last CQP operation 439 * @maj_err_code: major error code 440 * @min_err_code: minot error code 441 */ 442 bool 443 irdma_cqp_crit_err(struct irdma_sc_dev *dev, u8 cqp_cmd, 444 u16 maj_err_code, u16 min_err_code) 445 { 446 int i; 447 448 for (i = 0; i < ARRAY_SIZE(irdma_noncrit_err_list); ++i) { 449 if (maj_err_code == irdma_noncrit_err_list[i].maj && 450 min_err_code == irdma_noncrit_err_list[i].min) { 451 irdma_debug(dev, IRDMA_DEBUG_CQP, 452 "[%s Error][%s] maj=0x%x min=0x%x\n", 453 irdma_noncrit_err_list[i].desc, 454 irdma_cqp_cmd_names[cqp_cmd], maj_err_code, 455 min_err_code); 456 return false; 457 } 458 } 459 return true; 460 } 461 462 /** 463 * irdma_handle_cqp_op - process cqp command 464 * @rf: RDMA PCI function 465 * @cqp_request: cqp request to process 466 */ 467 int 468 irdma_handle_cqp_op(struct irdma_pci_f *rf, 469 struct irdma_cqp_request *cqp_request) 470 { 471 struct irdma_sc_dev *dev = &rf->sc_dev; 472 struct cqp_cmds_info *info = &cqp_request->info; 473 int status; 474 bool put_cqp_request = true; 475 476 if (rf->reset) 477 return 0; 478 479 irdma_get_cqp_request(cqp_request); 480 status = irdma_process_cqp_cmd(dev, info); 481 if (status) 482 goto err; 483 484 if (cqp_request->waiting) { 485 put_cqp_request = false; 486 status = irdma_wait_event(rf, cqp_request); 487 if (status) 488 goto err; 489 } 490 491 return 0; 492 493 err: 494 if (irdma_cqp_crit_err(dev, info->cqp_cmd, 495 cqp_request->compl_info.maj_err_code, 496 cqp_request->compl_info.min_err_code)) 497 irdma_dev_err(&rf->iwdev->ibdev, 498 "[%s Error][op_code=%d] status=%d waiting=%d completion_err=%d maj=0x%x min=0x%x\n", 499 irdma_cqp_cmd_names[info->cqp_cmd], info->cqp_cmd, status, 500 cqp_request->waiting, cqp_request->compl_info.error, 501 cqp_request->compl_info.maj_err_code, 502 cqp_request->compl_info.min_err_code); 503 504 if (put_cqp_request) 505 irdma_put_cqp_request(&rf->cqp, cqp_request); 506 507 return status; 508 } 509 510 void 511 irdma_qp_add_ref(struct ib_qp *ibqp) 512 { 513 struct irdma_qp *iwqp = to_iwqp(ibqp); 514 515 atomic_inc(&iwqp->refcnt); 516 } 517 518 void 519 irdma_qp_rem_ref(struct ib_qp *ibqp) 520 { 521 struct irdma_qp *iwqp = to_iwqp(ibqp); 522 struct irdma_device *iwdev = iwqp->iwdev; 523 unsigned long flags; 524 525 spin_lock_irqsave(&iwdev->rf->qptable_lock, flags); 526 if (!atomic_dec_and_test(&iwqp->refcnt)) { 527 spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags); 528 return; 529 } 530 531 iwdev->rf->qp_table[iwqp->ibqp.qp_num] = NULL; 532 spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags); 533 complete(&iwqp->free_qp); 534 } 535 536 void 537 irdma_cq_add_ref(struct ib_cq *ibcq) 538 { 539 struct irdma_cq *iwcq = to_iwcq(ibcq); 540 541 atomic_inc(&iwcq->refcnt); 542 } 543 544 void 545 irdma_cq_rem_ref(struct ib_cq *ibcq) 546 { 547 struct irdma_cq *iwcq = to_iwcq(ibcq); 548 struct irdma_pci_f *rf = container_of(iwcq->sc_cq.dev, struct irdma_pci_f, sc_dev); 549 unsigned long flags; 550 551 spin_lock_irqsave(&rf->cqtable_lock, flags); 552 if (!atomic_dec_and_test(&iwcq->refcnt)) { 553 spin_unlock_irqrestore(&rf->cqtable_lock, flags); 554 return; 555 } 556 557 rf->cq_table[iwcq->cq_num] = NULL; 558 spin_unlock_irqrestore(&rf->cqtable_lock, flags); 559 complete(&iwcq->free_cq); 560 } 561 562 struct ib_device * 563 to_ibdev(struct irdma_sc_dev *dev) 564 { 565 return &(container_of(dev, struct irdma_pci_f, sc_dev))->iwdev->ibdev; 566 } 567 568 /** 569 * irdma_get_qp - get qp address 570 * @device: iwarp device 571 * @qpn: qp number 572 */ 573 struct ib_qp * 574 irdma_get_qp(struct ib_device *device, int qpn) 575 { 576 struct irdma_device *iwdev = to_iwdev(device); 577 578 if (qpn < IW_FIRST_QPN || qpn >= iwdev->rf->max_qp) 579 return NULL; 580 581 return &iwdev->rf->qp_table[qpn]->ibqp; 582 } 583 584 /** 585 * irdma_remove_cqp_head - return head entry and remove 586 * @dev: device 587 */ 588 void * 589 irdma_remove_cqp_head(struct irdma_sc_dev *dev) 590 { 591 struct list_head *entry; 592 struct list_head *list = &dev->cqp_cmd_head; 593 594 if (list_empty(list)) 595 return NULL; 596 597 entry = list->next; 598 list_del(entry); 599 600 return entry; 601 } 602 603 /** 604 * irdma_cqp_sds_cmd - create cqp command for sd 605 * @dev: hardware control device structure 606 * @sdinfo: information for sd cqp 607 * 608 */ 609 int 610 irdma_cqp_sds_cmd(struct irdma_sc_dev *dev, 611 struct irdma_update_sds_info *sdinfo) 612 { 613 struct irdma_cqp_request *cqp_request; 614 struct cqp_cmds_info *cqp_info; 615 struct irdma_pci_f *rf = dev_to_rf(dev); 616 int status; 617 618 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); 619 if (!cqp_request) 620 return -ENOMEM; 621 622 cqp_info = &cqp_request->info; 623 memcpy(&cqp_info->in.u.update_pe_sds.info, sdinfo, 624 sizeof(cqp_info->in.u.update_pe_sds.info)); 625 cqp_info->cqp_cmd = IRDMA_OP_UPDATE_PE_SDS; 626 cqp_info->post_sq = 1; 627 cqp_info->in.u.update_pe_sds.dev = dev; 628 cqp_info->in.u.update_pe_sds.scratch = (uintptr_t)cqp_request; 629 630 status = irdma_handle_cqp_op(rf, cqp_request); 631 irdma_put_cqp_request(&rf->cqp, cqp_request); 632 633 return status; 634 } 635 636 /** 637 * irdma_cqp_qp_suspend_resume - cqp command for suspend/resume 638 * @qp: hardware control qp 639 * @op: suspend or resume 640 */ 641 int 642 irdma_cqp_qp_suspend_resume(struct irdma_sc_qp *qp, u8 op) 643 { 644 struct irdma_sc_dev *dev = qp->dev; 645 struct irdma_cqp_request *cqp_request; 646 struct irdma_sc_cqp *cqp = dev->cqp; 647 struct cqp_cmds_info *cqp_info; 648 struct irdma_pci_f *rf = dev_to_rf(dev); 649 int status; 650 651 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false); 652 if (!cqp_request) 653 return -ENOMEM; 654 655 cqp_info = &cqp_request->info; 656 cqp_info->cqp_cmd = op; 657 cqp_info->in.u.suspend_resume.cqp = cqp; 658 cqp_info->in.u.suspend_resume.qp = qp; 659 cqp_info->in.u.suspend_resume.scratch = (uintptr_t)cqp_request; 660 661 status = irdma_handle_cqp_op(rf, cqp_request); 662 irdma_put_cqp_request(&rf->cqp, cqp_request); 663 664 return status; 665 } 666 667 /** 668 * irdma_term_modify_qp - modify qp for term message 669 * @qp: hardware control qp 670 * @next_state: qp's next state 671 * @term: terminate code 672 * @term_len: length 673 */ 674 void 675 irdma_term_modify_qp(struct irdma_sc_qp *qp, u8 next_state, u8 term, 676 u8 term_len) 677 { 678 struct irdma_qp *iwqp; 679 680 iwqp = qp->qp_uk.back_qp; 681 irdma_next_iw_state(iwqp, next_state, 0, term, term_len); 682 }; 683 684 /** 685 * irdma_terminate_done - after terminate is completed 686 * @qp: hardware control qp 687 * @timeout_occurred: indicates if terminate timer expired 688 */ 689 void 690 irdma_terminate_done(struct irdma_sc_qp *qp, int timeout_occurred) 691 { 692 struct irdma_qp *iwqp; 693 u8 hte = 0; 694 bool first_time; 695 unsigned long flags; 696 697 iwqp = qp->qp_uk.back_qp; 698 spin_lock_irqsave(&iwqp->lock, flags); 699 if (iwqp->hte_added) { 700 iwqp->hte_added = 0; 701 hte = 1; 702 } 703 first_time = !(qp->term_flags & IRDMA_TERM_DONE); 704 qp->term_flags |= IRDMA_TERM_DONE; 705 spin_unlock_irqrestore(&iwqp->lock, flags); 706 if (first_time) { 707 if (!timeout_occurred) 708 irdma_terminate_del_timer(qp); 709 710 irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, hte, 0, 0); 711 irdma_cm_disconn(iwqp); 712 } 713 } 714 715 static void 716 irdma_terminate_timeout(struct timer_list *t) 717 { 718 struct irdma_qp *iwqp = from_timer(iwqp, t, terminate_timer); 719 struct irdma_sc_qp *qp = &iwqp->sc_qp; 720 721 irdma_terminate_done(qp, 1); 722 irdma_qp_rem_ref(&iwqp->ibqp); 723 } 724 725 /** 726 * irdma_terminate_start_timer - start terminate timeout 727 * @qp: hardware control qp 728 */ 729 void 730 irdma_terminate_start_timer(struct irdma_sc_qp *qp) 731 { 732 struct irdma_qp *iwqp; 733 734 iwqp = qp->qp_uk.back_qp; 735 irdma_qp_add_ref(&iwqp->ibqp); 736 timer_setup(&iwqp->terminate_timer, irdma_terminate_timeout, 0); 737 iwqp->terminate_timer.expires = jiffies + HZ; 738 739 add_timer(&iwqp->terminate_timer); 740 } 741 742 /** 743 * irdma_terminate_del_timer - delete terminate timeout 744 * @qp: hardware control qp 745 */ 746 void 747 irdma_terminate_del_timer(struct irdma_sc_qp *qp) 748 { 749 struct irdma_qp *iwqp; 750 int ret; 751 752 iwqp = qp->qp_uk.back_qp; 753 ret = irdma_del_timer_compat(&iwqp->terminate_timer); 754 if (ret) 755 irdma_qp_rem_ref(&iwqp->ibqp); 756 } 757 758 /** 759 * irdma_cqp_query_fpm_val_cmd - send cqp command for fpm 760 * @dev: function device struct 761 * @val_mem: buffer for fpm 762 * @hmc_fn_id: function id for fpm 763 */ 764 int 765 irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev, 766 struct irdma_dma_mem *val_mem, u16 hmc_fn_id) 767 { 768 struct irdma_cqp_request *cqp_request; 769 struct cqp_cmds_info *cqp_info; 770 struct irdma_pci_f *rf = dev_to_rf(dev); 771 int status; 772 773 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); 774 if (!cqp_request) 775 return -ENOMEM; 776 777 cqp_info = &cqp_request->info; 778 cqp_request->param = NULL; 779 cqp_info->in.u.query_fpm_val.cqp = dev->cqp; 780 cqp_info->in.u.query_fpm_val.fpm_val_pa = val_mem->pa; 781 cqp_info->in.u.query_fpm_val.fpm_val_va = val_mem->va; 782 cqp_info->in.u.query_fpm_val.hmc_fn_id = hmc_fn_id; 783 cqp_info->cqp_cmd = IRDMA_OP_QUERY_FPM_VAL; 784 cqp_info->post_sq = 1; 785 cqp_info->in.u.query_fpm_val.scratch = (uintptr_t)cqp_request; 786 787 status = irdma_handle_cqp_op(rf, cqp_request); 788 irdma_put_cqp_request(&rf->cqp, cqp_request); 789 790 return status; 791 } 792 793 /** 794 * irdma_cqp_commit_fpm_val_cmd - commit fpm values in hw 795 * @dev: hardware control device structure 796 * @val_mem: buffer with fpm values 797 * @hmc_fn_id: function id for fpm 798 */ 799 int 800 irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev, 801 struct irdma_dma_mem *val_mem, u16 hmc_fn_id) 802 { 803 struct irdma_cqp_request *cqp_request; 804 struct cqp_cmds_info *cqp_info; 805 struct irdma_pci_f *rf = dev_to_rf(dev); 806 int status; 807 808 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); 809 if (!cqp_request) 810 return -ENOMEM; 811 812 cqp_info = &cqp_request->info; 813 cqp_request->param = NULL; 814 cqp_info->in.u.commit_fpm_val.cqp = dev->cqp; 815 cqp_info->in.u.commit_fpm_val.fpm_val_pa = val_mem->pa; 816 cqp_info->in.u.commit_fpm_val.fpm_val_va = val_mem->va; 817 cqp_info->in.u.commit_fpm_val.hmc_fn_id = hmc_fn_id; 818 cqp_info->cqp_cmd = IRDMA_OP_COMMIT_FPM_VAL; 819 cqp_info->post_sq = 1; 820 cqp_info->in.u.commit_fpm_val.scratch = (uintptr_t)cqp_request; 821 822 status = irdma_handle_cqp_op(rf, cqp_request); 823 irdma_put_cqp_request(&rf->cqp, cqp_request); 824 825 return status; 826 } 827 828 /** 829 * irdma_cqp_cq_create_cmd - create a cq for the cqp 830 * @dev: device pointer 831 * @cq: pointer to created cq 832 */ 833 int 834 irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq) 835 { 836 struct irdma_pci_f *rf = dev_to_rf(dev); 837 struct irdma_cqp *iwcqp = &rf->cqp; 838 struct irdma_cqp_request *cqp_request; 839 struct cqp_cmds_info *cqp_info; 840 int status; 841 842 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true); 843 if (!cqp_request) 844 return -ENOMEM; 845 846 cqp_info = &cqp_request->info; 847 cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE; 848 cqp_info->post_sq = 1; 849 cqp_info->in.u.cq_create.cq = cq; 850 cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request; 851 852 status = irdma_handle_cqp_op(rf, cqp_request); 853 irdma_put_cqp_request(iwcqp, cqp_request); 854 855 return status; 856 } 857 858 /** 859 * irdma_cqp_qp_create_cmd - create a qp for the cqp 860 * @dev: device pointer 861 * @qp: pointer to created qp 862 */ 863 int 864 irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp) 865 { 866 struct irdma_pci_f *rf = dev_to_rf(dev); 867 struct irdma_cqp *iwcqp = &rf->cqp; 868 struct irdma_cqp_request *cqp_request; 869 struct cqp_cmds_info *cqp_info; 870 struct irdma_create_qp_info *qp_info; 871 int status; 872 873 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true); 874 if (!cqp_request) 875 return -ENOMEM; 876 877 cqp_info = &cqp_request->info; 878 qp_info = &cqp_request->info.in.u.qp_create.info; 879 memset(qp_info, 0, sizeof(*qp_info)); 880 qp_info->cq_num_valid = true; 881 qp_info->next_iwarp_state = IRDMA_QP_STATE_RTS; 882 cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE; 883 cqp_info->post_sq = 1; 884 cqp_info->in.u.qp_create.qp = qp; 885 cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request; 886 887 status = irdma_handle_cqp_op(rf, cqp_request); 888 irdma_put_cqp_request(iwcqp, cqp_request); 889 890 return status; 891 } 892 893 /** 894 * irdma_dealloc_push_page - free a push page for qp 895 * @rf: RDMA PCI function 896 * @qp: hardware control qp 897 */ 898 void 899 irdma_dealloc_push_page(struct irdma_pci_f *rf, 900 struct irdma_sc_qp *qp) 901 { 902 struct irdma_cqp_request *cqp_request; 903 struct cqp_cmds_info *cqp_info; 904 int status; 905 906 if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) 907 return; 908 909 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false); 910 if (!cqp_request) 911 return; 912 913 cqp_info = &cqp_request->info; 914 cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE; 915 cqp_info->post_sq = 1; 916 cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx; 917 cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle; 918 cqp_info->in.u.manage_push_page.info.free_page = 1; 919 cqp_info->in.u.manage_push_page.info.push_page_type = 0; 920 cqp_info->in.u.manage_push_page.cqp = &rf->cqp.sc_cqp; 921 cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request; 922 status = irdma_handle_cqp_op(rf, cqp_request); 923 if (!status) 924 qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX; 925 irdma_put_cqp_request(&rf->cqp, cqp_request); 926 } 927 928 /** 929 * irdma_cq_wq_destroy - send cq destroy cqp 930 * @rf: RDMA PCI function 931 * @cq: hardware control cq 932 */ 933 void 934 irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq) 935 { 936 struct irdma_cqp_request *cqp_request; 937 struct cqp_cmds_info *cqp_info; 938 939 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); 940 if (!cqp_request) 941 return; 942 943 cqp_info = &cqp_request->info; 944 cqp_info->cqp_cmd = IRDMA_OP_CQ_DESTROY; 945 cqp_info->post_sq = 1; 946 cqp_info->in.u.cq_destroy.cq = cq; 947 cqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request; 948 949 irdma_handle_cqp_op(rf, cqp_request); 950 irdma_put_cqp_request(&rf->cqp, cqp_request); 951 } 952 953 /** 954 * irdma_hw_modify_qp_callback - handle state for modifyQPs that don't wait 955 * @cqp_request: modify QP completion 956 */ 957 static void 958 irdma_hw_modify_qp_callback(struct irdma_cqp_request *cqp_request) 959 { 960 struct cqp_cmds_info *cqp_info; 961 struct irdma_qp *iwqp; 962 963 cqp_info = &cqp_request->info; 964 iwqp = cqp_info->in.u.qp_modify.qp->qp_uk.back_qp; 965 atomic_dec(&iwqp->hw_mod_qp_pend); 966 wake_up(&iwqp->mod_qp_waitq); 967 } 968 969 /** 970 * irdma_hw_modify_qp - setup cqp for modify qp 971 * @iwdev: RDMA device 972 * @iwqp: qp ptr (user or kernel) 973 * @info: info for modify qp 974 * @wait: flag to wait or not for modify qp completion 975 */ 976 int 977 irdma_hw_modify_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp, 978 struct irdma_modify_qp_info *info, bool wait) 979 { 980 int status; 981 struct irdma_pci_f *rf = iwdev->rf; 982 struct irdma_cqp_request *cqp_request; 983 struct cqp_cmds_info *cqp_info; 984 struct irdma_modify_qp_info *m_info; 985 986 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait); 987 if (!cqp_request) 988 return -ENOMEM; 989 990 if (!wait) { 991 cqp_request->callback_fcn = irdma_hw_modify_qp_callback; 992 atomic_inc(&iwqp->hw_mod_qp_pend); 993 } 994 cqp_info = &cqp_request->info; 995 m_info = &cqp_info->in.u.qp_modify.info; 996 memcpy(m_info, info, sizeof(*m_info)); 997 cqp_info->cqp_cmd = IRDMA_OP_QP_MODIFY; 998 cqp_info->post_sq = 1; 999 cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp; 1000 cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request; 1001 status = irdma_handle_cqp_op(rf, cqp_request); 1002 irdma_put_cqp_request(&rf->cqp, cqp_request); 1003 if (status) { 1004 if (rdma_protocol_roce(&iwdev->ibdev, 1)) 1005 return status; 1006 1007 switch (m_info->next_iwarp_state) { 1008 struct irdma_gen_ae_info ae_info; 1009 1010 case IRDMA_QP_STATE_RTS: 1011 case IRDMA_QP_STATE_IDLE: 1012 case IRDMA_QP_STATE_TERMINATE: 1013 case IRDMA_QP_STATE_CLOSING: 1014 if (info->curr_iwarp_state == IRDMA_QP_STATE_IDLE) 1015 irdma_send_reset(iwqp->cm_node); 1016 else 1017 iwqp->sc_qp.term_flags = IRDMA_TERM_DONE; 1018 if (!wait) { 1019 ae_info.ae_code = IRDMA_AE_BAD_CLOSE; 1020 ae_info.ae_src = 0; 1021 irdma_gen_ae(rf, &iwqp->sc_qp, &ae_info, false); 1022 } else { 1023 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, 1024 wait); 1025 if (!cqp_request) 1026 return -ENOMEM; 1027 1028 cqp_info = &cqp_request->info; 1029 m_info = &cqp_info->in.u.qp_modify.info; 1030 memcpy(m_info, info, sizeof(*m_info)); 1031 cqp_info->cqp_cmd = IRDMA_OP_QP_MODIFY; 1032 cqp_info->post_sq = 1; 1033 cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp; 1034 cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request; 1035 m_info->next_iwarp_state = IRDMA_QP_STATE_ERROR; 1036 m_info->reset_tcp_conn = true; 1037 irdma_handle_cqp_op(rf, cqp_request); 1038 irdma_put_cqp_request(&rf->cqp, cqp_request); 1039 } 1040 break; 1041 case IRDMA_QP_STATE_ERROR: 1042 default: 1043 break; 1044 } 1045 } 1046 1047 return status; 1048 } 1049 1050 /** 1051 * irdma_cqp_cq_destroy_cmd - destroy the cqp cq 1052 * @dev: device pointer 1053 * @cq: pointer to cq 1054 */ 1055 void 1056 irdma_cqp_cq_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq) 1057 { 1058 struct irdma_pci_f *rf = dev_to_rf(dev); 1059 1060 irdma_cq_wq_destroy(rf, cq); 1061 } 1062 1063 /** 1064 * irdma_cqp_qp_destroy_cmd - destroy the cqp 1065 * @dev: device pointer 1066 * @qp: pointer to qp 1067 */ 1068 int 1069 irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp) 1070 { 1071 struct irdma_pci_f *rf = dev_to_rf(dev); 1072 struct irdma_cqp *iwcqp = &rf->cqp; 1073 struct irdma_cqp_request *cqp_request; 1074 struct cqp_cmds_info *cqp_info; 1075 int status; 1076 1077 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true); 1078 if (!cqp_request) 1079 return -ENOMEM; 1080 1081 cqp_info = &cqp_request->info; 1082 memset(cqp_info, 0, sizeof(*cqp_info)); 1083 cqp_info->cqp_cmd = IRDMA_OP_QP_DESTROY; 1084 cqp_info->post_sq = 1; 1085 cqp_info->in.u.qp_destroy.qp = qp; 1086 cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request; 1087 cqp_info->in.u.qp_destroy.remove_hash_idx = true; 1088 1089 status = irdma_handle_cqp_op(rf, cqp_request); 1090 irdma_put_cqp_request(&rf->cqp, cqp_request); 1091 1092 return status; 1093 } 1094 1095 /** 1096 * irdma_ieq_mpa_crc_ae - generate AE for crc error 1097 * @dev: hardware control device structure 1098 * @qp: hardware control qp 1099 */ 1100 void 1101 irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp) 1102 { 1103 struct irdma_gen_ae_info info = {0}; 1104 struct irdma_pci_f *rf = dev_to_rf(dev); 1105 1106 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_AEQ, "Generate MPA CRC AE\n"); 1107 info.ae_code = IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR; 1108 info.ae_src = IRDMA_AE_SOURCE_RQ; 1109 irdma_gen_ae(rf, qp, &info, false); 1110 } 1111 1112 /** 1113 * irdma_ieq_get_qp - get qp based on quad in puda buffer 1114 * @dev: hardware control device structure 1115 * @buf: receive puda buffer on exception q 1116 */ 1117 struct irdma_sc_qp * 1118 irdma_ieq_get_qp(struct irdma_sc_dev *dev, 1119 struct irdma_puda_buf *buf) 1120 { 1121 struct irdma_qp *iwqp; 1122 struct irdma_cm_node *cm_node; 1123 struct irdma_device *iwdev = buf->vsi->back_vsi; 1124 u32 loc_addr[4] = {0}; 1125 u32 rem_addr[4] = {0}; 1126 u16 loc_port, rem_port; 1127 struct ip6_hdr *ip6h; 1128 struct ip *iph = (struct ip *)buf->iph; 1129 struct tcphdr *tcph = (struct tcphdr *)buf->tcph; 1130 1131 if (iph->ip_v == 4) { 1132 loc_addr[0] = ntohl(iph->ip_dst.s_addr); 1133 rem_addr[0] = ntohl(iph->ip_src.s_addr); 1134 } else { 1135 ip6h = (struct ip6_hdr *)buf->iph; 1136 irdma_copy_ip_ntohl(loc_addr, ip6h->ip6_dst.__u6_addr.__u6_addr32); 1137 irdma_copy_ip_ntohl(rem_addr, ip6h->ip6_src.__u6_addr.__u6_addr32); 1138 } 1139 loc_port = ntohs(tcph->th_dport); 1140 rem_port = ntohs(tcph->th_sport); 1141 cm_node = irdma_find_node(&iwdev->cm_core, rem_port, rem_addr, loc_port, 1142 loc_addr, buf->vlan_valid ? buf->vlan_id : 0xFFFF); 1143 if (!cm_node) 1144 return NULL; 1145 1146 iwqp = cm_node->iwqp; 1147 irdma_rem_ref_cm_node(cm_node); 1148 1149 return &iwqp->sc_qp; 1150 } 1151 1152 /** 1153 * irdma_send_ieq_ack - ACKs for duplicate or OOO partials FPDUs 1154 * @qp: qp ptr 1155 */ 1156 void 1157 irdma_send_ieq_ack(struct irdma_sc_qp *qp) 1158 { 1159 struct irdma_cm_node *cm_node = ((struct irdma_qp *)qp->qp_uk.back_qp)->cm_node; 1160 struct irdma_puda_buf *buf = qp->pfpdu.lastrcv_buf; 1161 struct tcphdr *tcph = (struct tcphdr *)buf->tcph; 1162 1163 cm_node->tcp_cntxt.rcv_nxt = qp->pfpdu.nextseqnum; 1164 cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->th_ack); 1165 1166 irdma_send_ack(cm_node); 1167 } 1168 1169 /** 1170 * irdma_puda_ieq_get_ah_info - get AH info from IEQ buffer 1171 * @qp: qp pointer 1172 * @ah_info: AH info pointer 1173 */ 1174 void 1175 irdma_puda_ieq_get_ah_info(struct irdma_sc_qp *qp, 1176 struct irdma_ah_info *ah_info) 1177 { 1178 struct irdma_puda_buf *buf = qp->pfpdu.ah_buf; 1179 struct ip *iph; 1180 struct ip6_hdr *ip6h; 1181 1182 memset(ah_info, 0, sizeof(*ah_info)); 1183 ah_info->do_lpbk = true; 1184 ah_info->vlan_tag = buf->vlan_id; 1185 ah_info->insert_vlan_tag = buf->vlan_valid; 1186 ah_info->ipv4_valid = buf->ipv4; 1187 ah_info->vsi = qp->vsi; 1188 1189 if (buf->smac_valid) 1190 ether_addr_copy(ah_info->mac_addr, buf->smac); 1191 1192 if (buf->ipv4) { 1193 ah_info->ipv4_valid = true; 1194 iph = (struct ip *)buf->iph; 1195 ah_info->hop_ttl = iph->ip_ttl; 1196 ah_info->tc_tos = iph->ip_tos; 1197 ah_info->dest_ip_addr[0] = ntohl(iph->ip_dst.s_addr); 1198 ah_info->src_ip_addr[0] = ntohl(iph->ip_src.s_addr); 1199 } else { 1200 ip6h = (struct ip6_hdr *)buf->iph; 1201 ah_info->hop_ttl = ip6h->ip6_hops; 1202 ah_info->tc_tos = ip6h->ip6_vfc; 1203 irdma_copy_ip_ntohl(ah_info->dest_ip_addr, 1204 ip6h->ip6_dst.__u6_addr.__u6_addr32); 1205 irdma_copy_ip_ntohl(ah_info->src_ip_addr, 1206 ip6h->ip6_src.__u6_addr.__u6_addr32); 1207 } 1208 1209 ah_info->dst_arpindex = irdma_arp_table(dev_to_rf(qp->dev), 1210 ah_info->dest_ip_addr, 1211 NULL, IRDMA_ARP_RESOLVE); 1212 } 1213 1214 /** 1215 * irdma_gen1_ieq_update_tcpip_info - update tcpip in the buffer 1216 * @buf: puda to update 1217 * @len: length of buffer 1218 * @seqnum: seq number for tcp 1219 */ 1220 static void 1221 irdma_gen1_ieq_update_tcpip_info(struct irdma_puda_buf *buf, 1222 u16 len, u32 seqnum) 1223 { 1224 struct tcphdr *tcph; 1225 struct ip *iph; 1226 u16 iphlen; 1227 u16 pktsize; 1228 u8 *addr = buf->mem.va; 1229 1230 iphlen = (buf->ipv4) ? 20 : 40; 1231 iph = (struct ip *)(addr + buf->maclen); 1232 tcph = (struct tcphdr *)(addr + buf->maclen + iphlen); 1233 pktsize = len + buf->tcphlen + iphlen; 1234 iph->ip_len = htons(pktsize); 1235 tcph->th_seq = htonl(seqnum); 1236 } 1237 1238 /** 1239 * irdma_ieq_update_tcpip_info - update tcpip in the buffer 1240 * @buf: puda to update 1241 * @len: length of buffer 1242 * @seqnum: seq number for tcp 1243 */ 1244 void 1245 irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len, 1246 u32 seqnum) 1247 { 1248 struct tcphdr *tcph; 1249 u8 *addr; 1250 1251 if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) 1252 return irdma_gen1_ieq_update_tcpip_info(buf, len, seqnum); 1253 1254 addr = buf->mem.va; 1255 tcph = (struct tcphdr *)addr; 1256 tcph->th_seq = htonl(seqnum); 1257 } 1258 1259 /** 1260 * irdma_gen1_puda_get_tcpip_info - get tcpip info from puda 1261 * buffer 1262 * @info: to get information 1263 * @buf: puda buffer 1264 */ 1265 static int 1266 irdma_gen1_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info, 1267 struct irdma_puda_buf *buf) 1268 { 1269 struct ip *iph; 1270 struct ip6_hdr *ip6h; 1271 struct tcphdr *tcph; 1272 u16 iphlen; 1273 u16 pkt_len; 1274 u8 *mem = buf->mem.va; 1275 struct ether_header *ethh = buf->mem.va; 1276 1277 if (ethh->ether_type == htons(0x8100)) { 1278 info->vlan_valid = true; 1279 buf->vlan_id = ntohs(((struct ether_vlan_header *)ethh)->evl_tag) & 1280 EVL_VLID_MASK; 1281 } 1282 1283 buf->maclen = (info->vlan_valid) ? 18 : 14; 1284 iphlen = (info->l3proto) ? 40 : 20; 1285 buf->ipv4 = (info->l3proto) ? false : true; 1286 buf->iph = mem + buf->maclen; 1287 iph = (struct ip *)buf->iph; 1288 buf->tcph = buf->iph + iphlen; 1289 tcph = (struct tcphdr *)buf->tcph; 1290 1291 if (buf->ipv4) { 1292 pkt_len = ntohs(iph->ip_len); 1293 } else { 1294 ip6h = (struct ip6_hdr *)buf->iph; 1295 pkt_len = ntohs(ip6h->ip6_plen) + iphlen; 1296 } 1297 1298 buf->totallen = pkt_len + buf->maclen; 1299 1300 if (info->payload_len < buf->totallen) { 1301 irdma_debug(buf->vsi->dev, IRDMA_DEBUG_ERR, 1302 "payload_len = 0x%x totallen expected0x%x\n", 1303 info->payload_len, buf->totallen); 1304 return -EINVAL; 1305 } 1306 1307 buf->tcphlen = tcph->th_off << 2; 1308 buf->datalen = pkt_len - iphlen - buf->tcphlen; 1309 buf->data = buf->datalen ? buf->tcph + buf->tcphlen : NULL; 1310 buf->hdrlen = buf->maclen + iphlen + buf->tcphlen; 1311 buf->seqnum = ntohl(tcph->th_seq); 1312 1313 return 0; 1314 } 1315 1316 /** 1317 * irdma_puda_get_tcpip_info - get tcpip info from puda buffer 1318 * @info: to get information 1319 * @buf: puda buffer 1320 */ 1321 int 1322 irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info, 1323 struct irdma_puda_buf *buf) 1324 { 1325 struct tcphdr *tcph; 1326 u32 pkt_len; 1327 u8 *mem; 1328 1329 if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) 1330 return irdma_gen1_puda_get_tcpip_info(info, buf); 1331 1332 mem = buf->mem.va; 1333 buf->vlan_valid = info->vlan_valid; 1334 if (info->vlan_valid) 1335 buf->vlan_id = info->vlan; 1336 1337 buf->ipv4 = info->ipv4; 1338 if (buf->ipv4) 1339 buf->iph = mem + IRDMA_IPV4_PAD; 1340 else 1341 buf->iph = mem; 1342 1343 buf->tcph = mem + IRDMA_TCP_OFFSET; 1344 tcph = (struct tcphdr *)buf->tcph; 1345 pkt_len = info->payload_len; 1346 buf->totallen = pkt_len; 1347 buf->tcphlen = tcph->th_off << 2; 1348 buf->datalen = pkt_len - IRDMA_TCP_OFFSET - buf->tcphlen; 1349 buf->data = buf->datalen ? buf->tcph + buf->tcphlen : NULL; 1350 buf->hdrlen = IRDMA_TCP_OFFSET + buf->tcphlen; 1351 buf->seqnum = ntohl(tcph->th_seq); 1352 1353 if (info->smac_valid) { 1354 ether_addr_copy(buf->smac, info->smac); 1355 buf->smac_valid = true; 1356 } 1357 1358 return 0; 1359 } 1360 1361 /** 1362 * irdma_hw_stats_timeout - Stats timer-handler which updates all HW stats 1363 * @t: timer_list pointer 1364 */ 1365 static void 1366 irdma_hw_stats_timeout(struct timer_list *t) 1367 { 1368 struct irdma_vsi_pestat *pf_devstat = 1369 from_timer(pf_devstat, t, stats_timer); 1370 struct irdma_sc_vsi *sc_vsi = pf_devstat->vsi; 1371 1372 if (sc_vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) 1373 irdma_cqp_gather_stats_cmd(sc_vsi->dev, sc_vsi->pestat, false); 1374 1375 mod_timer(&pf_devstat->stats_timer, 1376 jiffies + msecs_to_jiffies(STATS_TIMER_DELAY)); 1377 } 1378 1379 /** 1380 * irdma_hw_stats_start_timer - Start periodic stats timer 1381 * @vsi: vsi structure pointer 1382 */ 1383 void 1384 irdma_hw_stats_start_timer(struct irdma_sc_vsi *vsi) 1385 { 1386 struct irdma_vsi_pestat *devstat = vsi->pestat; 1387 1388 timer_setup(&devstat->stats_timer, irdma_hw_stats_timeout, 0); 1389 mod_timer(&devstat->stats_timer, 1390 jiffies + msecs_to_jiffies(STATS_TIMER_DELAY)); 1391 } 1392 1393 /** 1394 * irdma_hw_stats_stop_timer - Delete periodic stats timer 1395 * @vsi: pointer to vsi structure 1396 */ 1397 void 1398 irdma_hw_stats_stop_timer(struct irdma_sc_vsi *vsi) 1399 { 1400 struct irdma_vsi_pestat *devstat = vsi->pestat; 1401 1402 del_timer_sync(&devstat->stats_timer); 1403 } 1404 1405 /** 1406 * irdma_process_stats - Checking for wrap and update stats 1407 * @pestat: stats structure pointer 1408 */ 1409 static inline void 1410 irdma_process_stats(struct irdma_vsi_pestat *pestat) 1411 { 1412 sc_vsi_update_stats(pestat->vsi); 1413 } 1414 1415 /** 1416 * irdma_process_cqp_stats - Checking for wrap and update stats 1417 * @cqp_request: cqp_request structure pointer 1418 */ 1419 static void 1420 irdma_process_cqp_stats(struct irdma_cqp_request *cqp_request) 1421 { 1422 struct irdma_vsi_pestat *pestat = cqp_request->param; 1423 1424 irdma_process_stats(pestat); 1425 } 1426 1427 /** 1428 * irdma_cqp_gather_stats_cmd - Gather stats 1429 * @dev: pointer to device structure 1430 * @pestat: pointer to stats info 1431 * @wait: flag to wait or not wait for stats 1432 */ 1433 int 1434 irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev, 1435 struct irdma_vsi_pestat *pestat, bool wait) 1436 { 1437 1438 struct irdma_pci_f *rf = dev_to_rf(dev); 1439 struct irdma_cqp *iwcqp = &rf->cqp; 1440 struct irdma_cqp_request *cqp_request; 1441 struct cqp_cmds_info *cqp_info; 1442 int status; 1443 1444 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait); 1445 if (!cqp_request) 1446 return -ENOMEM; 1447 1448 cqp_info = &cqp_request->info; 1449 memset(cqp_info, 0, sizeof(*cqp_info)); 1450 cqp_info->cqp_cmd = IRDMA_OP_STATS_GATHER; 1451 cqp_info->post_sq = 1; 1452 cqp_info->in.u.stats_gather.info = pestat->gather_info; 1453 cqp_info->in.u.stats_gather.scratch = (uintptr_t)cqp_request; 1454 cqp_info->in.u.stats_gather.cqp = &rf->cqp.sc_cqp; 1455 cqp_request->param = pestat; 1456 if (!wait) 1457 cqp_request->callback_fcn = irdma_process_cqp_stats; 1458 status = irdma_handle_cqp_op(rf, cqp_request); 1459 if (wait) 1460 irdma_process_stats(pestat); 1461 irdma_put_cqp_request(&rf->cqp, cqp_request); 1462 1463 return status; 1464 } 1465 1466 /** 1467 * irdma_cqp_stats_inst_cmd - Allocate/free stats instance 1468 * @vsi: pointer to vsi structure 1469 * @cmd: command to allocate or free 1470 * @stats_info: pointer to allocate stats info 1471 */ 1472 int 1473 irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd, 1474 struct irdma_stats_inst_info *stats_info) 1475 { 1476 struct irdma_pci_f *rf = dev_to_rf(vsi->dev); 1477 struct irdma_cqp *iwcqp = &rf->cqp; 1478 struct irdma_cqp_request *cqp_request; 1479 struct cqp_cmds_info *cqp_info; 1480 int status; 1481 bool wait = false; 1482 1483 if (cmd == IRDMA_OP_STATS_ALLOCATE) 1484 wait = true; 1485 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait); 1486 if (!cqp_request) 1487 return -ENOMEM; 1488 1489 cqp_info = &cqp_request->info; 1490 memset(cqp_info, 0, sizeof(*cqp_info)); 1491 cqp_info->cqp_cmd = cmd; 1492 cqp_info->post_sq = 1; 1493 cqp_info->in.u.stats_manage.info = *stats_info; 1494 cqp_info->in.u.stats_manage.scratch = (uintptr_t)cqp_request; 1495 cqp_info->in.u.stats_manage.cqp = &rf->cqp.sc_cqp; 1496 status = irdma_handle_cqp_op(rf, cqp_request); 1497 if (wait) 1498 stats_info->stats_idx = cqp_request->compl_info.op_ret_val; 1499 irdma_put_cqp_request(iwcqp, cqp_request); 1500 1501 return status; 1502 } 1503 1504 /** 1505 * irdma_cqp_ceq_cmd - Create/Destroy CEQ's after CEQ 0 1506 * @dev: pointer to device info 1507 * @sc_ceq: pointer to ceq structure 1508 * @op: Create or Destroy 1509 */ 1510 int 1511 irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_ceq *sc_ceq, 1512 u8 op) 1513 { 1514 struct irdma_cqp_request *cqp_request; 1515 struct cqp_cmds_info *cqp_info; 1516 struct irdma_pci_f *rf = dev_to_rf(dev); 1517 int status; 1518 1519 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); 1520 if (!cqp_request) 1521 return -ENOMEM; 1522 1523 cqp_info = &cqp_request->info; 1524 cqp_info->post_sq = 1; 1525 cqp_info->cqp_cmd = op; 1526 cqp_info->in.u.ceq_create.ceq = sc_ceq; 1527 cqp_info->in.u.ceq_create.scratch = (uintptr_t)cqp_request; 1528 1529 status = irdma_handle_cqp_op(rf, cqp_request); 1530 irdma_put_cqp_request(&rf->cqp, cqp_request); 1531 1532 return status; 1533 } 1534 1535 /** 1536 * irdma_cqp_aeq_cmd - Create/Destroy AEQ 1537 * @dev: pointer to device info 1538 * @sc_aeq: pointer to aeq structure 1539 * @op: Create or Destroy 1540 */ 1541 int 1542 irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_aeq *sc_aeq, 1543 u8 op) 1544 { 1545 struct irdma_cqp_request *cqp_request; 1546 struct cqp_cmds_info *cqp_info; 1547 struct irdma_pci_f *rf = dev_to_rf(dev); 1548 int status; 1549 1550 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); 1551 if (!cqp_request) 1552 return -ENOMEM; 1553 1554 cqp_info = &cqp_request->info; 1555 cqp_info->post_sq = 1; 1556 cqp_info->cqp_cmd = op; 1557 cqp_info->in.u.aeq_create.aeq = sc_aeq; 1558 cqp_info->in.u.aeq_create.scratch = (uintptr_t)cqp_request; 1559 1560 status = irdma_handle_cqp_op(rf, cqp_request); 1561 irdma_put_cqp_request(&rf->cqp, cqp_request); 1562 1563 return status; 1564 } 1565 1566 /** 1567 * irdma_cqp_ws_node_cmd - Add/modify/delete ws node 1568 * @dev: pointer to device structure 1569 * @cmd: Add, modify or delete 1570 * @node_info: pointer to ws node info 1571 */ 1572 int 1573 irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd, 1574 struct irdma_ws_node_info *node_info) 1575 { 1576 struct irdma_pci_f *rf = dev_to_rf(dev); 1577 struct irdma_cqp *iwcqp = &rf->cqp; 1578 struct irdma_sc_cqp *cqp = &iwcqp->sc_cqp; 1579 struct irdma_cqp_request *cqp_request; 1580 struct cqp_cmds_info *cqp_info; 1581 int status; 1582 bool poll; 1583 1584 if (!rf->sc_dev.ceq_valid) 1585 poll = true; 1586 else 1587 poll = false; 1588 1589 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, !poll); 1590 if (!cqp_request) 1591 return -ENOMEM; 1592 1593 cqp_info = &cqp_request->info; 1594 memset(cqp_info, 0, sizeof(*cqp_info)); 1595 cqp_info->cqp_cmd = cmd; 1596 cqp_info->post_sq = 1; 1597 cqp_info->in.u.ws_node.info = *node_info; 1598 cqp_info->in.u.ws_node.cqp = cqp; 1599 cqp_info->in.u.ws_node.scratch = (uintptr_t)cqp_request; 1600 status = irdma_handle_cqp_op(rf, cqp_request); 1601 if (status) 1602 goto exit; 1603 1604 if (poll) { 1605 struct irdma_ccq_cqe_info compl_info; 1606 1607 status = irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_WORK_SCHED_NODE, 1608 &compl_info); 1609 node_info->qs_handle = compl_info.op_ret_val; 1610 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_DCB, 1611 "opcode=%d, compl_info.retval=%d\n", 1612 compl_info.op_code, compl_info.op_ret_val); 1613 } else { 1614 node_info->qs_handle = cqp_request->compl_info.op_ret_val; 1615 } 1616 1617 exit: 1618 irdma_put_cqp_request(&rf->cqp, cqp_request); 1619 1620 return status; 1621 } 1622 1623 /** 1624 * irdma_cqp_up_map_cmd - Set the up-up mapping 1625 * @dev: pointer to device structure 1626 * @cmd: map command 1627 * @map_info: pointer to up map info 1628 */ 1629 int 1630 irdma_cqp_up_map_cmd(struct irdma_sc_dev *dev, u8 cmd, 1631 struct irdma_up_info *map_info) 1632 { 1633 struct irdma_pci_f *rf = dev_to_rf(dev); 1634 struct irdma_cqp *iwcqp = &rf->cqp; 1635 struct irdma_sc_cqp *cqp = &iwcqp->sc_cqp; 1636 struct irdma_cqp_request *cqp_request; 1637 struct cqp_cmds_info *cqp_info; 1638 int status; 1639 1640 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, false); 1641 if (!cqp_request) 1642 return -ENOMEM; 1643 1644 cqp_info = &cqp_request->info; 1645 memset(cqp_info, 0, sizeof(*cqp_info)); 1646 cqp_info->cqp_cmd = cmd; 1647 cqp_info->post_sq = 1; 1648 cqp_info->in.u.up_map.info = *map_info; 1649 cqp_info->in.u.up_map.cqp = cqp; 1650 cqp_info->in.u.up_map.scratch = (uintptr_t)cqp_request; 1651 1652 status = irdma_handle_cqp_op(rf, cqp_request); 1653 irdma_put_cqp_request(&rf->cqp, cqp_request); 1654 1655 return status; 1656 } 1657 1658 /** 1659 * irdma_ah_cqp_op - perform an AH cqp operation 1660 * @rf: RDMA PCI function 1661 * @sc_ah: address handle 1662 * @cmd: AH operation 1663 * @wait: wait if true 1664 * @callback_fcn: Callback function on CQP op completion 1665 * @cb_param: parameter for callback function 1666 * 1667 * returns errno 1668 */ 1669 int 1670 irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd, 1671 bool wait, 1672 void (*callback_fcn) (struct irdma_cqp_request *), 1673 void *cb_param) 1674 { 1675 struct irdma_cqp_request *cqp_request; 1676 struct cqp_cmds_info *cqp_info; 1677 int status; 1678 1679 if (cmd != IRDMA_OP_AH_CREATE && cmd != IRDMA_OP_AH_DESTROY) 1680 return -EINVAL; 1681 1682 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait); 1683 if (!cqp_request) 1684 return -ENOMEM; 1685 1686 cqp_info = &cqp_request->info; 1687 cqp_info->cqp_cmd = cmd; 1688 cqp_info->post_sq = 1; 1689 if (cmd == IRDMA_OP_AH_CREATE) { 1690 cqp_info->in.u.ah_create.info = sc_ah->ah_info; 1691 cqp_info->in.u.ah_create.scratch = (uintptr_t)cqp_request; 1692 cqp_info->in.u.ah_create.cqp = &rf->cqp.sc_cqp; 1693 } else if (cmd == IRDMA_OP_AH_DESTROY) { 1694 cqp_info->in.u.ah_destroy.info = sc_ah->ah_info; 1695 cqp_info->in.u.ah_destroy.scratch = (uintptr_t)cqp_request; 1696 cqp_info->in.u.ah_destroy.cqp = &rf->cqp.sc_cqp; 1697 } 1698 1699 if (!wait) { 1700 cqp_request->callback_fcn = callback_fcn; 1701 cqp_request->param = cb_param; 1702 } 1703 status = irdma_handle_cqp_op(rf, cqp_request); 1704 irdma_put_cqp_request(&rf->cqp, cqp_request); 1705 1706 if (status) 1707 return -ENOMEM; 1708 1709 if (wait) 1710 sc_ah->ah_info.ah_valid = (cmd == IRDMA_OP_AH_CREATE); 1711 1712 return 0; 1713 } 1714 1715 /** 1716 * irdma_ieq_ah_cb - callback after creation of AH for IEQ 1717 * @cqp_request: pointer to cqp_request of create AH 1718 */ 1719 static void 1720 irdma_ieq_ah_cb(struct irdma_cqp_request *cqp_request) 1721 { 1722 struct irdma_sc_qp *qp = cqp_request->param; 1723 struct irdma_sc_ah *sc_ah = qp->pfpdu.ah; 1724 unsigned long flags; 1725 1726 spin_lock_irqsave(&qp->pfpdu.lock, flags); 1727 if (!cqp_request->compl_info.op_ret_val) { 1728 sc_ah->ah_info.ah_valid = true; 1729 irdma_ieq_process_fpdus(qp, qp->vsi->ieq); 1730 } else { 1731 sc_ah->ah_info.ah_valid = false; 1732 irdma_ieq_cleanup_qp(qp->vsi->ieq, qp); 1733 } 1734 spin_unlock_irqrestore(&qp->pfpdu.lock, flags); 1735 } 1736 1737 /** 1738 * irdma_ilq_ah_cb - callback after creation of AH for ILQ 1739 * @cqp_request: pointer to cqp_request of create AH 1740 */ 1741 static void 1742 irdma_ilq_ah_cb(struct irdma_cqp_request *cqp_request) 1743 { 1744 struct irdma_cm_node *cm_node = cqp_request->param; 1745 struct irdma_sc_ah *sc_ah = cm_node->ah; 1746 1747 sc_ah->ah_info.ah_valid = !cqp_request->compl_info.op_ret_val; 1748 irdma_add_conn_est_qh(cm_node); 1749 } 1750 1751 /** 1752 * irdma_puda_create_ah - create AH for ILQ/IEQ qp's 1753 * @dev: device pointer 1754 * @ah_info: Address handle info 1755 * @wait: When true will wait for operation to complete 1756 * @type: ILQ/IEQ 1757 * @cb_param: Callback param when not waiting 1758 * @ah_ret: Returned pointer to address handle if created 1759 * 1760 */ 1761 int 1762 irdma_puda_create_ah(struct irdma_sc_dev *dev, 1763 struct irdma_ah_info *ah_info, bool wait, 1764 enum puda_rsrc_type type, void *cb_param, 1765 struct irdma_sc_ah **ah_ret) 1766 { 1767 struct irdma_sc_ah *ah; 1768 struct irdma_pci_f *rf = dev_to_rf(dev); 1769 int err; 1770 1771 ah = kzalloc(sizeof(*ah), GFP_ATOMIC); 1772 *ah_ret = ah; 1773 if (!ah) 1774 return -ENOMEM; 1775 1776 err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, 1777 &ah_info->ah_idx, &rf->next_ah); 1778 if (err) 1779 goto err_free; 1780 1781 ah->dev = dev; 1782 ah->ah_info = *ah_info; 1783 1784 if (type == IRDMA_PUDA_RSRC_TYPE_ILQ) 1785 err = irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_CREATE, wait, 1786 irdma_ilq_ah_cb, cb_param); 1787 else 1788 err = irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_CREATE, wait, 1789 irdma_ieq_ah_cb, cb_param); 1790 1791 if (err) 1792 goto error; 1793 return 0; 1794 1795 error: 1796 irdma_free_rsrc(rf, rf->allocated_ahs, ah->ah_info.ah_idx); 1797 err_free: 1798 kfree(ah); 1799 *ah_ret = NULL; 1800 return -ENOMEM; 1801 } 1802 1803 /** 1804 * irdma_puda_free_ah - free a puda address handle 1805 * @dev: device pointer 1806 * @ah: The address handle to free 1807 */ 1808 void 1809 irdma_puda_free_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah) 1810 { 1811 struct irdma_pci_f *rf = dev_to_rf(dev); 1812 1813 if (!ah) 1814 return; 1815 1816 if (ah->ah_info.ah_valid) { 1817 irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_DESTROY, false, NULL, NULL); 1818 irdma_free_rsrc(rf, rf->allocated_ahs, ah->ah_info.ah_idx); 1819 } 1820 1821 kfree(ah); 1822 } 1823 1824 /** 1825 * irdma_gsi_ud_qp_ah_cb - callback after creation of AH for GSI/ID QP 1826 * @cqp_request: pointer to cqp_request of create AH 1827 */ 1828 void 1829 irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request) 1830 { 1831 struct irdma_sc_ah *sc_ah = cqp_request->param; 1832 1833 if (!cqp_request->compl_info.op_ret_val) 1834 sc_ah->ah_info.ah_valid = true; 1835 else 1836 sc_ah->ah_info.ah_valid = false; 1837 } 1838 1839 /** 1840 * irdma_prm_add_pble_mem - add moemory to pble resources 1841 * @pprm: pble resource manager 1842 * @pchunk: chunk of memory to add 1843 */ 1844 int 1845 irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm, 1846 struct irdma_chunk *pchunk) 1847 { 1848 u64 sizeofbitmap; 1849 1850 if (pchunk->size & 0xfff) 1851 return -EINVAL; 1852 1853 sizeofbitmap = (u64)pchunk->size >> pprm->pble_shift; 1854 1855 pchunk->bitmapmem.size = sizeofbitmap >> 3; 1856 pchunk->bitmapmem.va = kzalloc(pchunk->bitmapmem.size, GFP_KERNEL); 1857 1858 if (!pchunk->bitmapmem.va) 1859 return -ENOMEM; 1860 1861 pchunk->bitmapbuf = pchunk->bitmapmem.va; 1862 bitmap_zero(pchunk->bitmapbuf, sizeofbitmap); 1863 1864 pchunk->sizeofbitmap = sizeofbitmap; 1865 /* each pble is 8 bytes hence shift by 3 */ 1866 pprm->total_pble_alloc += pchunk->size >> 3; 1867 pprm->free_pble_cnt += pchunk->size >> 3; 1868 1869 return 0; 1870 } 1871 1872 /** 1873 * irdma_prm_get_pbles - get pble's from prm 1874 * @pprm: pble resource manager 1875 * @chunkinfo: nformation about chunk where pble's were acquired 1876 * @mem_size: size of pble memory needed 1877 * @vaddr: returns virtual address of pble memory 1878 * @fpm_addr: returns fpm address of pble memory 1879 */ 1880 int 1881 irdma_prm_get_pbles(struct irdma_pble_prm *pprm, 1882 struct irdma_pble_chunkinfo *chunkinfo, u64 mem_size, 1883 u64 **vaddr, u64 *fpm_addr) 1884 { 1885 u64 bits_needed; 1886 u64 bit_idx = PBLE_INVALID_IDX; 1887 struct irdma_chunk *pchunk = NULL; 1888 struct list_head *chunk_entry = (&pprm->clist)->next; 1889 u32 offset; 1890 unsigned long flags; 1891 *vaddr = NULL; 1892 *fpm_addr = 0; 1893 1894 bits_needed = DIV_ROUND_UP_ULL(mem_size, BIT_ULL(pprm->pble_shift)); 1895 1896 spin_lock_irqsave(&pprm->prm_lock, flags); 1897 while (chunk_entry != &pprm->clist) { 1898 pchunk = (struct irdma_chunk *)chunk_entry; 1899 bit_idx = bitmap_find_next_zero_area(pchunk->bitmapbuf, 1900 pchunk->sizeofbitmap, 0, 1901 bits_needed, 0); 1902 if (bit_idx < pchunk->sizeofbitmap) 1903 break; 1904 1905 /* list.next used macro */ 1906 chunk_entry = (&pchunk->list)->next; 1907 } 1908 1909 if (!pchunk || bit_idx >= pchunk->sizeofbitmap) { 1910 spin_unlock_irqrestore(&pprm->prm_lock, flags); 1911 return -ENOMEM; 1912 } 1913 1914 bitmap_set(pchunk->bitmapbuf, bit_idx, bits_needed); 1915 offset = bit_idx << pprm->pble_shift; 1916 *vaddr = (u64 *)((u8 *)pchunk->vaddr + offset); 1917 *fpm_addr = pchunk->fpm_addr + offset; 1918 1919 chunkinfo->pchunk = pchunk; 1920 chunkinfo->bit_idx = bit_idx; 1921 chunkinfo->bits_used = bits_needed; 1922 /* 3 is sizeof pble divide */ 1923 pprm->free_pble_cnt -= chunkinfo->bits_used << (pprm->pble_shift - 3); 1924 spin_unlock_irqrestore(&pprm->prm_lock, flags); 1925 1926 return 0; 1927 } 1928 1929 /** 1930 * irdma_prm_return_pbles - return pbles back to prm 1931 * @pprm: pble resource manager 1932 * @chunkinfo: chunk where pble's were acquired and to be freed 1933 */ 1934 void 1935 irdma_prm_return_pbles(struct irdma_pble_prm *pprm, 1936 struct irdma_pble_chunkinfo *chunkinfo) 1937 { 1938 unsigned long flags; 1939 1940 spin_lock_irqsave(&pprm->prm_lock, flags); 1941 pprm->free_pble_cnt += chunkinfo->bits_used << (pprm->pble_shift - 3); 1942 bitmap_clear(chunkinfo->pchunk->bitmapbuf, chunkinfo->bit_idx, 1943 chunkinfo->bits_used); 1944 spin_unlock_irqrestore(&pprm->prm_lock, flags); 1945 } 1946 1947 int 1948 irdma_map_vm_page_list(struct irdma_hw *hw, void *va, dma_addr_t * pg_dma, 1949 u32 pg_cnt) 1950 { 1951 struct page *vm_page; 1952 int i; 1953 u8 *addr; 1954 1955 addr = (u8 *)(uintptr_t)va; 1956 for (i = 0; i < pg_cnt; i++) { 1957 vm_page = vmalloc_to_page(addr); 1958 if (!vm_page) 1959 goto err; 1960 1961 pg_dma[i] = dma_map_page(hw_to_dev(hw), vm_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); 1962 if (dma_mapping_error(hw_to_dev(hw), pg_dma[i])) 1963 goto err; 1964 1965 addr += PAGE_SIZE; 1966 } 1967 1968 return 0; 1969 1970 err: 1971 irdma_unmap_vm_page_list(hw, pg_dma, i); 1972 return -ENOMEM; 1973 } 1974 1975 void 1976 irdma_unmap_vm_page_list(struct irdma_hw *hw, dma_addr_t * pg_dma, u32 pg_cnt) 1977 { 1978 int i; 1979 1980 for (i = 0; i < pg_cnt; i++) 1981 dma_unmap_page(hw_to_dev(hw), pg_dma[i], PAGE_SIZE, DMA_BIDIRECTIONAL); 1982 } 1983 1984 /** 1985 * irdma_pble_free_paged_mem - free virtual paged memory 1986 * @chunk: chunk to free with paged memory 1987 */ 1988 void 1989 irdma_pble_free_paged_mem(struct irdma_chunk *chunk) 1990 { 1991 if (!chunk->pg_cnt) 1992 goto done; 1993 1994 irdma_unmap_vm_page_list(chunk->dev->hw, chunk->dmainfo.dmaaddrs, 1995 chunk->pg_cnt); 1996 1997 done: 1998 kfree(chunk->dmainfo.dmaaddrs); 1999 chunk->dmainfo.dmaaddrs = NULL; 2000 vfree(chunk->vaddr); 2001 chunk->vaddr = NULL; 2002 chunk->type = 0; 2003 } 2004 2005 /** 2006 * irdma_pble_get_paged_mem -allocate paged memory for pbles 2007 * @chunk: chunk to add for paged memory 2008 * @pg_cnt: number of pages needed 2009 */ 2010 int 2011 irdma_pble_get_paged_mem(struct irdma_chunk *chunk, u32 pg_cnt) 2012 { 2013 u32 size; 2014 void *va; 2015 2016 chunk->dmainfo.dmaaddrs = kzalloc(pg_cnt << 3, GFP_KERNEL); 2017 if (!chunk->dmainfo.dmaaddrs) 2018 return -ENOMEM; 2019 2020 size = PAGE_SIZE * pg_cnt; 2021 va = vmalloc(size); 2022 if (!va) 2023 goto err; 2024 2025 if (irdma_map_vm_page_list(chunk->dev->hw, va, chunk->dmainfo.dmaaddrs, 2026 pg_cnt)) { 2027 vfree(va); 2028 goto err; 2029 } 2030 chunk->vaddr = va; 2031 chunk->size = size; 2032 chunk->pg_cnt = pg_cnt; 2033 chunk->type = PBLE_SD_PAGED; 2034 2035 return 0; 2036 err: 2037 kfree(chunk->dmainfo.dmaaddrs); 2038 chunk->dmainfo.dmaaddrs = NULL; 2039 2040 return -ENOMEM; 2041 } 2042 2043 /** 2044 * irdma_alloc_ws_node_id - Allocate a tx scheduler node ID 2045 * @dev: device pointer 2046 */ 2047 u16 2048 irdma_alloc_ws_node_id(struct irdma_sc_dev *dev) 2049 { 2050 struct irdma_pci_f *rf = dev_to_rf(dev); 2051 u32 next = 1; 2052 u32 node_id; 2053 2054 if (irdma_alloc_rsrc(rf, rf->allocated_ws_nodes, rf->max_ws_node_id, 2055 &node_id, &next)) 2056 return IRDMA_WS_NODE_INVALID; 2057 2058 return (u16)node_id; 2059 } 2060 2061 /** 2062 * irdma_free_ws_node_id - Free a tx scheduler node ID 2063 * @dev: device pointer 2064 * @node_id: Work scheduler node ID 2065 */ 2066 void 2067 irdma_free_ws_node_id(struct irdma_sc_dev *dev, u16 node_id) 2068 { 2069 struct irdma_pci_f *rf = dev_to_rf(dev); 2070 2071 irdma_free_rsrc(rf, rf->allocated_ws_nodes, (u32)node_id); 2072 } 2073 2074 /** 2075 * irdma_modify_qp_to_err - Modify a QP to error 2076 * @sc_qp: qp structure 2077 */ 2078 void 2079 irdma_modify_qp_to_err(struct irdma_sc_qp *sc_qp) 2080 { 2081 struct irdma_qp *qp = sc_qp->qp_uk.back_qp; 2082 struct ib_qp_attr attr; 2083 2084 if (qp->iwdev->rf->reset) 2085 return; 2086 attr.qp_state = IB_QPS_ERR; 2087 2088 if (rdma_protocol_roce(qp->ibqp.device, 1)) 2089 irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL); 2090 else 2091 irdma_modify_qp(&qp->ibqp, &attr, IB_QP_STATE, NULL); 2092 } 2093 2094 void 2095 irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event) 2096 { 2097 struct ib_event ibevent; 2098 2099 if (!iwqp->ibqp.event_handler) 2100 return; 2101 2102 switch (event) { 2103 case IRDMA_QP_EVENT_CATASTROPHIC: 2104 ibevent.event = IB_EVENT_QP_FATAL; 2105 break; 2106 case IRDMA_QP_EVENT_ACCESS_ERR: 2107 ibevent.event = IB_EVENT_QP_ACCESS_ERR; 2108 break; 2109 case IRDMA_QP_EVENT_REQ_ERR: 2110 ibevent.event = IB_EVENT_QP_REQ_ERR; 2111 break; 2112 } 2113 ibevent.device = iwqp->ibqp.device; 2114 ibevent.element.qp = &iwqp->ibqp; 2115 iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context); 2116 } 2117 2118 static void 2119 clear_qp_ctx_addr(__le64 * ctx) 2120 { 2121 u64 tmp; 2122 2123 get_64bit_val(ctx, 272, &tmp); 2124 tmp &= GENMASK_ULL(63, 58); 2125 set_64bit_val(ctx, 272, tmp); 2126 2127 get_64bit_val(ctx, 296, &tmp); 2128 tmp &= GENMASK_ULL(7, 0); 2129 set_64bit_val(ctx, 296, tmp); 2130 2131 get_64bit_val(ctx, 312, &tmp); 2132 tmp &= GENMASK_ULL(7, 0); 2133 set_64bit_val(ctx, 312, tmp); 2134 2135 set_64bit_val(ctx, 368, 0); 2136 } 2137 2138 /** 2139 * irdma_upload_qp_context - upload raw QP context 2140 * @iwqp: QP pointer 2141 * @freeze: freeze QP 2142 * @raw: raw context flag 2143 */ 2144 int 2145 irdma_upload_qp_context(struct irdma_qp *iwqp, bool freeze, bool raw) 2146 { 2147 struct irdma_dma_mem dma_mem; 2148 struct irdma_sc_dev *dev; 2149 struct irdma_sc_qp *qp; 2150 struct irdma_cqp *iwcqp; 2151 struct irdma_cqp_request *cqp_request; 2152 struct cqp_cmds_info *cqp_info; 2153 struct irdma_upload_context_info *info; 2154 struct irdma_pci_f *rf; 2155 int ret; 2156 u32 *ctx; 2157 2158 rf = iwqp->iwdev->rf; 2159 if (!rf) 2160 return -EINVAL; 2161 2162 qp = &iwqp->sc_qp; 2163 dev = &rf->sc_dev; 2164 iwcqp = &rf->cqp; 2165 2166 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true); 2167 if (!cqp_request) 2168 return -EINVAL; 2169 2170 cqp_info = &cqp_request->info; 2171 info = &cqp_info->in.u.qp_upload_context.info; 2172 memset(info, 0, sizeof(struct irdma_upload_context_info)); 2173 cqp_info->cqp_cmd = IRDMA_OP_QP_UPLOAD_CONTEXT; 2174 cqp_info->post_sq = 1; 2175 cqp_info->in.u.qp_upload_context.dev = dev; 2176 cqp_info->in.u.qp_upload_context.scratch = (uintptr_t)cqp_request; 2177 2178 dma_mem.size = PAGE_SIZE; 2179 dma_mem.va = irdma_allocate_dma_mem(dev->hw, &dma_mem, dma_mem.size, PAGE_SIZE); 2180 if (!dma_mem.va) { 2181 irdma_put_cqp_request(&rf->cqp, cqp_request); 2182 return -ENOMEM; 2183 } 2184 2185 ctx = dma_mem.va; 2186 info->buf_pa = dma_mem.pa; 2187 info->raw_format = raw; 2188 info->freeze_qp = freeze; 2189 info->qp_type = qp->qp_uk.qp_type; /* 1 is iWARP and 2 UDA */ 2190 info->qp_id = qp->qp_uk.qp_id; 2191 ret = irdma_handle_cqp_op(rf, cqp_request); 2192 if (ret) 2193 goto error; 2194 irdma_debug(dev, IRDMA_DEBUG_QP, "PRINT CONTXT QP [%d]\n", info->qp_id); 2195 { 2196 u32 i, j; 2197 2198 clear_qp_ctx_addr(dma_mem.va); 2199 for (i = 0, j = 0; i < 32; i++, j += 4) 2200 irdma_debug(dev, IRDMA_DEBUG_QP, 2201 "%d:\t [%08X %08x %08X %08X]\n", 2202 (j * 4), ctx[j], ctx[j + 1], ctx[j + 2], 2203 ctx[j + 3]); 2204 } 2205 error: 2206 irdma_put_cqp_request(iwcqp, cqp_request); 2207 irdma_free_dma_mem(dev->hw, &dma_mem); 2208 2209 return ret; 2210 } 2211 2212 bool 2213 irdma_cq_empty(struct irdma_cq *iwcq) 2214 { 2215 struct irdma_cq_uk *ukcq; 2216 u64 qword3; 2217 __le64 *cqe; 2218 u8 polarity; 2219 2220 ukcq = &iwcq->sc_cq.cq_uk; 2221 cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq); 2222 get_64bit_val(cqe, 24, &qword3); 2223 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3); 2224 2225 return polarity != ukcq->polarity; 2226 } 2227 2228 void 2229 irdma_remove_cmpls_list(struct irdma_cq *iwcq) 2230 { 2231 struct irdma_cmpl_gen *cmpl_node; 2232 struct list_head *tmp_node, *list_node; 2233 2234 list_for_each_safe(list_node, tmp_node, &iwcq->cmpl_generated) { 2235 cmpl_node = list_entry(list_node, struct irdma_cmpl_gen, list); 2236 list_del(&cmpl_node->list); 2237 kfree(cmpl_node); 2238 } 2239 } 2240 2241 int 2242 irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info) 2243 { 2244 struct irdma_cmpl_gen *cmpl; 2245 2246 if (list_empty(&iwcq->cmpl_generated)) 2247 return -ENOENT; 2248 cmpl = list_first_entry_or_null(&iwcq->cmpl_generated, struct irdma_cmpl_gen, list); 2249 list_del(&cmpl->list); 2250 memcpy(cq_poll_info, &cmpl->cpi, sizeof(*cq_poll_info)); 2251 kfree(cmpl); 2252 2253 irdma_debug(iwcq->sc_cq.dev, IRDMA_DEBUG_VERBS, 2254 "%s: Poll artificially generated completion for QP 0x%X, op %u, wr_id=0x%lx\n", 2255 __func__, cq_poll_info->qp_id, cq_poll_info->op_type, 2256 cq_poll_info->wr_id); 2257 2258 return 0; 2259 } 2260 2261 /** 2262 * irdma_set_cpi_common_values - fill in values for polling info struct 2263 * @cpi: resulting structure of cq_poll_info type 2264 * @qp: QPair 2265 * @qp_num: id of the QP 2266 */ 2267 static void 2268 irdma_set_cpi_common_values(struct irdma_cq_poll_info *cpi, 2269 struct irdma_qp_uk *qp, u32 qp_num) 2270 { 2271 cpi->comp_status = IRDMA_COMPL_STATUS_FLUSHED; 2272 cpi->error = 1; 2273 cpi->major_err = IRDMA_FLUSH_MAJOR_ERR; 2274 cpi->minor_err = FLUSH_GENERAL_ERR; 2275 cpi->qp_handle = (irdma_qp_handle) (uintptr_t)qp; 2276 cpi->qp_id = qp_num; 2277 } 2278 2279 static inline void 2280 irdma_comp_handler(struct irdma_cq *cq) 2281 { 2282 if (!cq->ibcq.comp_handler) 2283 return; 2284 2285 if (atomic_cmpxchg(&cq->armed, 1, 0)) 2286 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); 2287 } 2288 2289 /** 2290 * irdma_generate_flush_completions - generate completion from WRs 2291 * @iwqp: pointer to QP 2292 */ 2293 void 2294 irdma_generate_flush_completions(struct irdma_qp *iwqp) 2295 { 2296 struct irdma_qp_uk *qp = &iwqp->sc_qp.qp_uk; 2297 struct irdma_ring *sq_ring = &qp->sq_ring; 2298 struct irdma_ring *rq_ring = &qp->rq_ring; 2299 struct irdma_cmpl_gen *cmpl; 2300 __le64 *sw_wqe; 2301 u64 wqe_qword; 2302 u32 wqe_idx; 2303 bool compl_generated = false; 2304 unsigned long flags1; 2305 2306 spin_lock_irqsave(&iwqp->iwscq->lock, flags1); 2307 if (irdma_cq_empty(iwqp->iwscq)) { 2308 unsigned long flags2; 2309 2310 spin_lock_irqsave(&iwqp->lock, flags2); 2311 while (IRDMA_RING_MORE_WORK(*sq_ring)) { 2312 cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC); 2313 if (!cmpl) { 2314 spin_unlock_irqrestore(&iwqp->lock, flags2); 2315 spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1); 2316 return; 2317 } 2318 2319 wqe_idx = sq_ring->tail; 2320 irdma_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id); 2321 2322 cmpl->cpi.wr_id = qp->sq_wrtrk_array[wqe_idx].wrid; 2323 cmpl->cpi.signaled = qp->sq_wrtrk_array[wqe_idx].signaled; 2324 sw_wqe = qp->sq_base[wqe_idx].elem; 2325 get_64bit_val(sw_wqe, IRDMA_BYTE_24, &wqe_qword); 2326 cmpl->cpi.op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, wqe_qword); 2327 cmpl->cpi.q_type = IRDMA_CQE_QTYPE_SQ; 2328 /* remove the SQ WR by moving SQ tail */ 2329 IRDMA_RING_SET_TAIL(*sq_ring, 2330 sq_ring->tail + qp->sq_wrtrk_array[sq_ring->tail].quanta); 2331 2332 if (cmpl->cpi.op_type == IRDMAQP_OP_NOP) { 2333 kfree(cmpl); 2334 continue; 2335 } 2336 irdma_debug(iwqp->sc_qp.dev, IRDMA_DEBUG_DEV, 2337 "%s: adding wr_id = 0x%lx SQ Completion to list qp_id=%d\n", 2338 __func__, cmpl->cpi.wr_id, qp->qp_id); 2339 list_add_tail(&cmpl->list, &iwqp->iwscq->cmpl_generated); 2340 compl_generated = true; 2341 } 2342 spin_unlock_irqrestore(&iwqp->lock, flags2); 2343 spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1); 2344 if (compl_generated) { 2345 irdma_comp_handler(iwqp->iwscq); 2346 compl_generated = false; 2347 } 2348 } else { 2349 spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1); 2350 irdma_sched_qp_flush_work(iwqp); 2351 } 2352 2353 spin_lock_irqsave(&iwqp->iwrcq->lock, flags1); 2354 if (irdma_cq_empty(iwqp->iwrcq)) { 2355 unsigned long flags2; 2356 2357 spin_lock_irqsave(&iwqp->lock, flags2); 2358 while (IRDMA_RING_MORE_WORK(*rq_ring)) { 2359 cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC); 2360 if (!cmpl) { 2361 spin_unlock_irqrestore(&iwqp->lock, flags2); 2362 spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1); 2363 return; 2364 } 2365 2366 wqe_idx = rq_ring->tail; 2367 irdma_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id); 2368 2369 cmpl->cpi.wr_id = qp->rq_wrid_array[wqe_idx]; 2370 cmpl->cpi.signaled = 1; 2371 cmpl->cpi.op_type = IRDMA_OP_TYPE_REC; 2372 cmpl->cpi.q_type = IRDMA_CQE_QTYPE_RQ; 2373 /* remove the RQ WR by moving RQ tail */ 2374 IRDMA_RING_SET_TAIL(*rq_ring, rq_ring->tail + 1); 2375 irdma_debug(iwqp->sc_qp.dev, IRDMA_DEBUG_DEV, 2376 "%s: adding wr_id = 0x%lx RQ Completion to list qp_id=%d, wqe_idx=%d\n", 2377 __func__, cmpl->cpi.wr_id, qp->qp_id, 2378 wqe_idx); 2379 2380 list_add_tail(&cmpl->list, &iwqp->iwrcq->cmpl_generated); 2381 2382 compl_generated = true; 2383 } 2384 spin_unlock_irqrestore(&iwqp->lock, flags2); 2385 spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1); 2386 if (compl_generated) 2387 irdma_comp_handler(iwqp->iwrcq); 2388 } else { 2389 spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1); 2390 irdma_sched_qp_flush_work(iwqp); 2391 } 2392 } 2393 2394 /** 2395 * irdma_udqp_qs_change - change qs for UD QP in a worker thread 2396 * @iwqp: QP pointer 2397 * @user_prio: new user priority value 2398 * @qs_change: when false, only user priority changes, QS handle do not need to change 2399 */ 2400 static void 2401 irdma_udqp_qs_change(struct irdma_qp *iwqp, u8 user_prio, bool qs_change) 2402 { 2403 irdma_qp_rem_qos(&iwqp->sc_qp); 2404 if (qs_change) 2405 iwqp->sc_qp.dev->ws_remove(iwqp->sc_qp.vsi, iwqp->ctx_info.user_pri); 2406 2407 iwqp->ctx_info.user_pri = user_prio; 2408 iwqp->sc_qp.user_pri = user_prio; 2409 2410 if (qs_change) 2411 if (iwqp->sc_qp.dev->ws_add(iwqp->sc_qp.vsi, user_prio)) 2412 irdma_dev_warn(&iwqp->iwdev->ibdev, 2413 "WS add failed during %s, qp_id: %x user_pri: %x", 2414 __func__, iwqp->ibqp.qp_num, user_prio); 2415 irdma_qp_add_qos(&iwqp->sc_qp); 2416 } 2417 2418 void 2419 irdma_udqp_qs_worker(struct work_struct *work) 2420 { 2421 struct irdma_udqs_work *udqs_work = container_of(work, struct irdma_udqs_work, work); 2422 2423 irdma_udqp_qs_change(udqs_work->iwqp, udqs_work->user_prio, udqs_work->qs_change); 2424 if (udqs_work->qs_change) 2425 irdma_cqp_qp_suspend_resume(&udqs_work->iwqp->sc_qp, IRDMA_OP_RESUME); 2426 irdma_qp_rem_ref(&udqs_work->iwqp->ibqp); 2427 kfree(udqs_work); 2428 } 2429