1 /*- 2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB 3 * 4 * Copyright (c) 2021 - 2023 Intel Corporation 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenFabrics.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include "osdep.h" 36 #include "ice_rdma.h" 37 #include "irdma_di_if.h" 38 #include "irdma_main.h" 39 #include <sys/gsb_crc32.h> 40 #include <netinet/in_fib.h> 41 #include <netinet6/in6_fib.h> 42 #include <net/route/nhop.h> 43 #include <net/if_llatbl.h> 44 45 /* additional QP debuging option. Keep false unless needed */ 46 bool irdma_upload_context = false; 47 48 inline u32 49 irdma_rd32(struct irdma_dev_ctx *dev_ctx, u32 reg){ 50 51 KASSERT(reg < dev_ctx->mem_bus_space_size, 52 ("irdma: register offset %#jx too large (max is %#jx)", 53 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size)); 54 55 return (bus_space_read_4(dev_ctx->mem_bus_space_tag, 56 dev_ctx->mem_bus_space_handle, reg)); 57 } 58 59 inline void 60 irdma_wr32(struct irdma_dev_ctx *dev_ctx, u32 reg, u32 value) 61 { 62 63 KASSERT(reg < dev_ctx->mem_bus_space_size, 64 ("irdma: register offset %#jx too large (max is %#jx)", 65 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size)); 66 67 bus_space_write_4(dev_ctx->mem_bus_space_tag, 68 dev_ctx->mem_bus_space_handle, reg, value); 69 } 70 71 inline u64 72 irdma_rd64(struct irdma_dev_ctx *dev_ctx, u32 reg){ 73 74 KASSERT(reg < dev_ctx->mem_bus_space_size, 75 ("irdma: register offset %#jx too large (max is %#jx)", 76 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size)); 77 78 return (bus_space_read_8(dev_ctx->mem_bus_space_tag, 79 dev_ctx->mem_bus_space_handle, reg)); 80 } 81 82 inline void 83 irdma_wr64(struct irdma_dev_ctx *dev_ctx, u32 reg, u64 value) 84 { 85 86 KASSERT(reg < dev_ctx->mem_bus_space_size, 87 ("irdma: register offset %#jx too large (max is %#jx)", 88 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size)); 89 90 bus_space_write_8(dev_ctx->mem_bus_space_tag, 91 dev_ctx->mem_bus_space_handle, reg, value); 92 93 } 94 95 void 96 irdma_request_reset(struct irdma_pci_f *rf) 97 { 98 struct ice_rdma_peer *peer = rf->peer_info; 99 struct ice_rdma_request req = {0}; 100 101 req.type = ICE_RDMA_EVENT_RESET; 102 103 printf("%s:%d requesting pf-reset\n", __func__, __LINE__); 104 IRDMA_DI_REQ_HANDLER(peer, &req); 105 } 106 107 int 108 irdma_register_qset(struct irdma_sc_vsi *vsi, struct irdma_ws_node *tc_node) 109 { 110 struct irdma_device *iwdev = vsi->back_vsi; 111 struct ice_rdma_peer *peer = iwdev->rf->peer_info; 112 struct ice_rdma_request req = {0}; 113 struct ice_rdma_qset_update *res = &req.res; 114 115 req.type = ICE_RDMA_EVENT_QSET_REGISTER; 116 res->cnt_req = 1; 117 res->res_type = ICE_RDMA_QSET_ALLOC; 118 res->qsets.qs_handle = tc_node->qs_handle; 119 res->qsets.tc = tc_node->traffic_class; 120 res->qsets.vsi_id = vsi->vsi_idx; 121 122 IRDMA_DI_REQ_HANDLER(peer, &req); 123 124 tc_node->l2_sched_node_id = res->qsets.teid; 125 vsi->qos[tc_node->user_pri].l2_sched_node_id = 126 res->qsets.teid; 127 128 return 0; 129 } 130 131 void 132 irdma_unregister_qset(struct irdma_sc_vsi *vsi, struct irdma_ws_node *tc_node) 133 { 134 struct irdma_device *iwdev = vsi->back_vsi; 135 struct ice_rdma_peer *peer = iwdev->rf->peer_info; 136 struct ice_rdma_request req = {0}; 137 struct ice_rdma_qset_update *res = &req.res; 138 139 req.type = ICE_RDMA_EVENT_QSET_REGISTER; 140 res->res_allocated = 1; 141 res->res_type = ICE_RDMA_QSET_FREE; 142 res->qsets.vsi_id = vsi->vsi_idx; 143 res->qsets.teid = tc_node->l2_sched_node_id; 144 res->qsets.qs_handle = tc_node->qs_handle; 145 146 IRDMA_DI_REQ_HANDLER(peer, &req); 147 } 148 149 void * 150 hw_to_dev(struct irdma_hw *hw) 151 { 152 struct irdma_pci_f *rf; 153 154 rf = container_of(hw, struct irdma_pci_f, hw); 155 return rf->pcidev; 156 } 157 158 void 159 irdma_free_hash_desc(void *desc) 160 { 161 return; 162 } 163 164 int 165 irdma_init_hash_desc(void **desc) 166 { 167 return 0; 168 } 169 170 int 171 irdma_ieq_check_mpacrc(void *desc, 172 void *addr, u32 len, u32 val) 173 { 174 u32 crc = calculate_crc32c(0xffffffff, addr, len) ^ 0xffffffff; 175 int ret_code = 0; 176 177 if (crc != val) { 178 irdma_pr_err("mpa crc check fail %x %x\n", crc, val); 179 ret_code = -EINVAL; 180 } 181 printf("%s: result crc=%x value=%x\n", __func__, crc, val); 182 return ret_code; 183 } 184 185 static u_int 186 irdma_add_ipv6_cb(void *arg, struct ifaddr *addr, u_int count __unused) 187 { 188 struct irdma_device *iwdev = arg; 189 struct sockaddr_in6 *sin6; 190 u32 local_ipaddr6[4] = {}; 191 char ip6buf[INET6_ADDRSTRLEN]; 192 u8 *mac_addr; 193 194 sin6 = (struct sockaddr_in6 *)addr->ifa_addr; 195 196 irdma_copy_ip_ntohl(local_ipaddr6, (u32 *)&sin6->sin6_addr); 197 198 mac_addr = if_getlladdr(addr->ifa_ifp); 199 200 printf("%s:%d IP=%s, MAC=%02x:%02x:%02x:%02x:%02x:%02x\n", 201 __func__, __LINE__, 202 ip6_sprintf(ip6buf, &sin6->sin6_addr), 203 mac_addr[0], mac_addr[1], mac_addr[2], 204 mac_addr[3], mac_addr[4], mac_addr[5]); 205 206 irdma_manage_arp_cache(iwdev->rf, mac_addr, local_ipaddr6, 207 IRDMA_ARP_ADD); 208 return (0); 209 } 210 211 /** 212 * irdma_add_ipv6_addr - add ipv6 address to the hw arp table 213 * @iwdev: irdma device 214 * @ifp: interface network device pointer 215 */ 216 static void 217 irdma_add_ipv6_addr(struct irdma_device *iwdev, struct ifnet *ifp) 218 { 219 if_addr_rlock(ifp); 220 if_foreach_addr_type(ifp, AF_INET6, irdma_add_ipv6_cb, iwdev); 221 if_addr_runlock(ifp); 222 } 223 224 static u_int 225 irdma_add_ipv4_cb(void *arg, struct ifaddr *addr, u_int count __unused) 226 { 227 struct irdma_device *iwdev = arg; 228 struct sockaddr_in *sin; 229 u32 ip_addr[4] = {}; 230 uint8_t *mac_addr; 231 232 sin = (struct sockaddr_in *)addr->ifa_addr; 233 234 ip_addr[0] = ntohl(sin->sin_addr.s_addr); 235 236 mac_addr = if_getlladdr(addr->ifa_ifp); 237 238 printf("%s:%d IP=%d.%d.%d.%d, MAC=%02x:%02x:%02x:%02x:%02x:%02x\n", 239 __func__, __LINE__, 240 ip_addr[0] >> 24, 241 (ip_addr[0] >> 16) & 0xFF, 242 (ip_addr[0] >> 8) & 0xFF, 243 ip_addr[0] & 0xFF, 244 mac_addr[0], mac_addr[1], mac_addr[2], 245 mac_addr[3], mac_addr[4], mac_addr[5]); 246 247 irdma_manage_arp_cache(iwdev->rf, mac_addr, ip_addr, 248 IRDMA_ARP_ADD); 249 return (0); 250 } 251 252 /** 253 * irdma_add_ipv4_addr - add ipv4 address to the hw arp table 254 * @iwdev: irdma device 255 * @ifp: interface network device pointer 256 */ 257 static void 258 irdma_add_ipv4_addr(struct irdma_device *iwdev, struct ifnet *ifp) 259 { 260 if_addr_rlock(ifp); 261 if_foreach_addr_type(ifp, AF_INET, irdma_add_ipv4_cb, iwdev); 262 if_addr_runlock(ifp); 263 } 264 265 /** 266 * irdma_add_ip - add ip addresses 267 * @iwdev: irdma device 268 * 269 * Add ipv4/ipv6 addresses to the arp cache 270 */ 271 void 272 irdma_add_ip(struct irdma_device *iwdev) 273 { 274 struct ifnet *ifp = iwdev->netdev; 275 struct ifnet *ifv; 276 struct epoch_tracker et; 277 int i; 278 279 irdma_add_ipv4_addr(iwdev, ifp); 280 irdma_add_ipv6_addr(iwdev, ifp); 281 for (i = 0; if_getvlantrunk(ifp) != NULL && i < VLAN_N_VID; ++i) { 282 NET_EPOCH_ENTER(et); 283 ifv = VLAN_DEVAT(ifp, i); 284 NET_EPOCH_EXIT(et); 285 if (!ifv) 286 continue; 287 irdma_add_ipv4_addr(iwdev, ifv); 288 irdma_add_ipv6_addr(iwdev, ifv); 289 } 290 } 291 292 static void 293 irdma_ifaddrevent_handler(void *arg, struct ifnet *ifp, struct ifaddr *ifa, int event) 294 { 295 struct irdma_pci_f *rf = arg; 296 struct ifnet *ifv = NULL; 297 struct sockaddr_in *sin; 298 struct epoch_tracker et; 299 int arp_index = 0, i = 0; 300 u32 ip[4] = {}; 301 302 if (!ifa || !ifa->ifa_addr || !ifp) 303 return; 304 if (rf->iwdev->netdev != ifp) { 305 for (i = 0; if_getvlantrunk(rf->iwdev->netdev) != NULL && i < VLAN_N_VID; ++i) { 306 NET_EPOCH_ENTER(et); 307 ifv = VLAN_DEVAT(rf->iwdev->netdev, i); 308 NET_EPOCH_EXIT(et); 309 if (ifv == ifp) 310 break; 311 } 312 if (ifv != ifp) 313 return; 314 } 315 sin = (struct sockaddr_in *)ifa->ifa_addr; 316 317 switch (event) { 318 case IFADDR_EVENT_ADD: 319 if (sin->sin_family == AF_INET) 320 irdma_add_ipv4_addr(rf->iwdev, ifp); 321 else if (sin->sin_family == AF_INET6) 322 irdma_add_ipv6_addr(rf->iwdev, ifp); 323 break; 324 case IFADDR_EVENT_DEL: 325 if (sin->sin_family == AF_INET) { 326 ip[0] = ntohl(sin->sin_addr.s_addr); 327 } else if (sin->sin_family == AF_INET6) { 328 irdma_copy_ip_ntohl(ip, (u32 *)&((struct sockaddr_in6 *)sin)->sin6_addr); 329 } else { 330 break; 331 } 332 for_each_set_bit(arp_index, rf->allocated_arps, rf->arp_table_size) { 333 if (!memcmp(rf->arp_table[arp_index].ip_addr, ip, sizeof(ip))) { 334 irdma_manage_arp_cache(rf, rf->arp_table[arp_index].mac_addr, 335 rf->arp_table[arp_index].ip_addr, 336 IRDMA_ARP_DELETE); 337 } 338 } 339 break; 340 default: 341 break; 342 } 343 } 344 345 void 346 irdma_reg_ipaddr_event_cb(struct irdma_pci_f *rf) 347 { 348 rf->irdma_ifaddr_event = EVENTHANDLER_REGISTER(ifaddr_event_ext, 349 irdma_ifaddrevent_handler, 350 rf, 351 EVENTHANDLER_PRI_ANY); 352 } 353 354 void 355 irdma_dereg_ipaddr_event_cb(struct irdma_pci_f *rf) 356 { 357 EVENTHANDLER_DEREGISTER(ifaddr_event_ext, rf->irdma_ifaddr_event); 358 } 359 360 static int 361 irdma_get_route_ifp(struct sockaddr *dst_sin, struct ifnet *netdev, 362 struct ifnet **ifp, struct sockaddr **nexthop, bool *gateway) 363 { 364 struct nhop_object *nh; 365 366 if (dst_sin->sa_family == AF_INET6) 367 nh = fib6_lookup(RT_DEFAULT_FIB, &((struct sockaddr_in6 *)dst_sin)->sin6_addr, 368 ((struct sockaddr_in6 *)dst_sin)->sin6_scope_id, NHR_NONE, 0); 369 else 370 nh = fib4_lookup(RT_DEFAULT_FIB, ((struct sockaddr_in *)dst_sin)->sin_addr, 0, NHR_NONE, 0); 371 if (!nh || (nh->nh_ifp != netdev && 372 rdma_vlan_dev_real_dev(nh->nh_ifp) != netdev)) 373 goto rt_not_found; 374 *gateway = (nh->nh_flags & NHF_GATEWAY) ? true : false; 375 *nexthop = (*gateway) ? &nh->gw_sa : dst_sin; 376 *ifp = nh->nh_ifp; 377 378 return 0; 379 380 rt_not_found: 381 pr_err("irdma: route not found\n"); 382 return -ENETUNREACH; 383 } 384 385 /** 386 * irdma_get_dst_mac - get destination mac address 387 * @cm_node: connection's node 388 * @dst_sin: destination address information 389 * @dst_mac: mac address array to return 390 */ 391 int 392 irdma_get_dst_mac(struct irdma_cm_node *cm_node, struct sockaddr *dst_sin, u8 *dst_mac) 393 { 394 struct ifnet *netdev = cm_node->iwdev->netdev; 395 #ifdef VIMAGE 396 struct vnet *vnet = irdma_cmid_to_vnet(cm_node->cm_id); 397 #endif 398 struct ifnet *ifp; 399 struct llentry *lle; 400 struct sockaddr *nexthop; 401 struct epoch_tracker et; 402 int err; 403 bool gateway; 404 405 NET_EPOCH_ENTER(et); 406 CURVNET_SET_QUIET(vnet); 407 err = irdma_get_route_ifp(dst_sin, netdev, &ifp, &nexthop, &gateway); 408 if (err) 409 goto get_route_fail; 410 411 if (dst_sin->sa_family == AF_INET) { 412 err = arpresolve(ifp, gateway, NULL, nexthop, dst_mac, NULL, &lle); 413 } else if (dst_sin->sa_family == AF_INET6) { 414 err = nd6_resolve(ifp, LLE_SF(AF_INET6, gateway), NULL, nexthop, 415 dst_mac, NULL, &lle); 416 } else { 417 err = -EPROTONOSUPPORT; 418 } 419 420 get_route_fail: 421 CURVNET_RESTORE(); 422 NET_EPOCH_EXIT(et); 423 if (err) { 424 pr_err("failed to resolve neighbor address (err=%d)\n", 425 err); 426 return -ENETUNREACH; 427 } 428 429 return 0; 430 } 431 432 /** 433 * irdma_addr_resolve_neigh - resolve neighbor address 434 * @cm_node: connection's node 435 * @dst_ip: remote ip address 436 * @arpindex: if there is an arp entry 437 */ 438 int 439 irdma_addr_resolve_neigh(struct irdma_cm_node *cm_node, 440 u32 dst_ip, int arpindex) 441 { 442 struct irdma_device *iwdev = cm_node->iwdev; 443 struct sockaddr_in dst_sin = {}; 444 int err; 445 u32 ip[4] = {}; 446 u8 dst_mac[MAX_ADDR_LEN]; 447 448 dst_sin.sin_len = sizeof(dst_sin); 449 dst_sin.sin_family = AF_INET; 450 dst_sin.sin_port = 0; 451 dst_sin.sin_addr.s_addr = htonl(dst_ip); 452 453 err = irdma_get_dst_mac(cm_node, (struct sockaddr *)&dst_sin, dst_mac); 454 if (err) 455 return arpindex; 456 457 ip[0] = dst_ip; 458 459 return irdma_add_arp(iwdev->rf, ip, dst_mac); 460 } 461 462 /** 463 * irdma_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address 464 * @cm_node: connection's node 465 * @dest: remote ip address 466 * @arpindex: if there is an arp entry 467 */ 468 int 469 irdma_addr_resolve_neigh_ipv6(struct irdma_cm_node *cm_node, 470 u32 *dest, int arpindex) 471 { 472 struct irdma_device *iwdev = cm_node->iwdev; 473 struct sockaddr_in6 dst_addr = {}; 474 int err; 475 u8 dst_mac[MAX_ADDR_LEN]; 476 477 dst_addr.sin6_family = AF_INET6; 478 dst_addr.sin6_len = sizeof(dst_addr); 479 dst_addr.sin6_scope_id = if_getindex(iwdev->netdev); 480 481 irdma_copy_ip_htonl(dst_addr.sin6_addr.__u6_addr.__u6_addr32, dest); 482 err = irdma_get_dst_mac(cm_node, (struct sockaddr *)&dst_addr, dst_mac); 483 if (err) 484 return arpindex; 485 486 return irdma_add_arp(iwdev->rf, dest, dst_mac); 487 } 488 489 int 490 irdma_resolve_neigh_lpb_chk(struct irdma_device *iwdev, struct irdma_cm_node *cm_node, 491 struct irdma_cm_info *cm_info) 492 { 493 #ifdef VIMAGE 494 struct vnet *vnet = irdma_cmid_to_vnet(cm_node->cm_id); 495 #endif 496 int arpindex; 497 int oldarpindex; 498 bool is_lpb = false; 499 500 CURVNET_SET_QUIET(vnet); 501 is_lpb = cm_node->ipv4 ? 502 irdma_ipv4_is_lpb(cm_node->loc_addr[0], cm_node->rem_addr[0]) : 503 irdma_ipv6_is_lpb(cm_node->loc_addr, cm_node->rem_addr); 504 CURVNET_RESTORE(); 505 if (is_lpb) { 506 cm_node->do_lpb = true; 507 arpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr, 508 NULL, 509 IRDMA_ARP_RESOLVE); 510 } else { 511 oldarpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr, 512 NULL, 513 IRDMA_ARP_RESOLVE); 514 if (cm_node->ipv4) 515 arpindex = irdma_addr_resolve_neigh(cm_node, 516 cm_info->rem_addr[0], 517 oldarpindex); 518 else 519 arpindex = irdma_addr_resolve_neigh_ipv6(cm_node, 520 cm_info->rem_addr, 521 oldarpindex); 522 } 523 return arpindex; 524 } 525 526 /** 527 * irdma_add_handler - add a handler to the list 528 * @hdl: handler to be added to the handler list 529 */ 530 void 531 irdma_add_handler(struct irdma_handler *hdl) 532 { 533 unsigned long flags; 534 535 spin_lock_irqsave(&irdma_handler_lock, flags); 536 list_add(&hdl->list, &irdma_handlers); 537 spin_unlock_irqrestore(&irdma_handler_lock, flags); 538 } 539 540 /** 541 * irdma_del_handler - delete a handler from the list 542 * @hdl: handler to be deleted from the handler list 543 */ 544 void 545 irdma_del_handler(struct irdma_handler *hdl) 546 { 547 unsigned long flags; 548 549 spin_lock_irqsave(&irdma_handler_lock, flags); 550 list_del(&hdl->list); 551 spin_unlock_irqrestore(&irdma_handler_lock, flags); 552 } 553 554 /** 555 * irdma_set_rf_user_cfg_params - apply user configurable settings 556 * @rf: RDMA PCI function 557 */ 558 void 559 irdma_set_rf_user_cfg_params(struct irdma_pci_f *rf) 560 { 561 int en_rem_endpoint_trk = 0; 562 int limits_sel = 4; 563 564 rf->en_rem_endpoint_trk = en_rem_endpoint_trk; 565 rf->limits_sel = limits_sel; 566 rf->rst_to = IRDMA_RST_TIMEOUT_HZ; 567 /* Enable DCQCN algorithm by default */ 568 rf->dcqcn_ena = true; 569 } 570 571 /** 572 * irdma_sysctl_dcqcn_update - handle dcqcn_ena sysctl update 573 * @arg1: pointer to rf 574 * @arg2: unused 575 * @oidp: sysctl oid structure 576 * @req: sysctl request pointer 577 */ 578 static int 579 irdma_sysctl_dcqcn_update(SYSCTL_HANDLER_ARGS) 580 { 581 struct irdma_pci_f *rf = (struct irdma_pci_f *)arg1; 582 int ret; 583 u8 dcqcn_ena = rf->dcqcn_ena; 584 585 ret = sysctl_handle_8(oidp, &dcqcn_ena, 0, req); 586 if ((ret) || (req->newptr == NULL)) 587 return ret; 588 if (dcqcn_ena == 0) 589 rf->dcqcn_ena = false; 590 else 591 rf->dcqcn_ena = true; 592 593 return 0; 594 } 595 596 enum irdma_cqp_stats_info { 597 IRDMA_CQP_REQ_CMDS = 28, 598 IRDMA_CQP_CMPL_CMDS = 29 599 }; 600 601 static int 602 irdma_sysctl_cqp_stats(SYSCTL_HANDLER_ARGS) 603 { 604 struct irdma_sc_cqp *cqp = (struct irdma_sc_cqp *)arg1; 605 char rslt[192] = "no cqp available yet"; 606 int rslt_size = sizeof(rslt) - 1; 607 int option = (int)arg2; 608 609 if (!cqp) { 610 return sysctl_handle_string(oidp, rslt, sizeof(rslt), req); 611 } 612 613 snprintf(rslt, sizeof(rslt), ""); 614 switch (option) { 615 case IRDMA_CQP_REQ_CMDS: 616 snprintf(rslt, rslt_size, "%lu", cqp->requested_ops); 617 break; 618 case IRDMA_CQP_CMPL_CMDS: 619 snprintf(rslt, rslt_size, "%lu", atomic64_read(&cqp->completed_ops)); 620 break; 621 } 622 623 return sysctl_handle_string(oidp, rslt, sizeof(rslt), req); 624 } 625 626 struct irdma_sw_stats_tunable_info { 627 u8 op_type; 628 const char name[32]; 629 const char desc[32]; 630 uintptr_t value; 631 }; 632 633 static const struct irdma_sw_stats_tunable_info irdma_sws_list[] = { 634 {IRDMA_OP_CEQ_DESTROY, "ceq_destroy", "ceq_destroy", 0}, 635 {IRDMA_OP_AEQ_DESTROY, "aeq_destroy", "aeq_destroy", 0}, 636 {IRDMA_OP_DELETE_ARP_CACHE_ENTRY, "delete_arp_cache_entry", 637 "delete_arp_cache_entry", 0}, 638 {IRDMA_OP_MANAGE_APBVT_ENTRY, "manage_apbvt_entry", 639 "manage_apbvt_entry", 0}, 640 {IRDMA_OP_CEQ_CREATE, "ceq_create", "ceq_create", 0}, 641 {IRDMA_OP_AEQ_CREATE, "aeq_create", "aeq_create", 0}, 642 {IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY, "manage_qhash_table_entry", 643 "manage_qhash_table_entry", 0}, 644 {IRDMA_OP_QP_MODIFY, "qp_modify", "qp_modify", 0}, 645 {IRDMA_OP_QP_UPLOAD_CONTEXT, "qp_upload_context", "qp_upload_context", 646 0}, 647 {IRDMA_OP_CQ_CREATE, "cq_create", "cq_create", 0}, 648 {IRDMA_OP_CQ_DESTROY, "cq_destroy", "cq_destroy", 0}, 649 {IRDMA_OP_QP_CREATE, "qp_create", "qp_create", 0}, 650 {IRDMA_OP_QP_DESTROY, "qp_destroy", "qp_destroy", 0}, 651 {IRDMA_OP_ALLOC_STAG, "alloc_stag", "alloc_stag", 0}, 652 {IRDMA_OP_MR_REG_NON_SHARED, "mr_reg_non_shared", "mr_reg_non_shared", 653 0}, 654 {IRDMA_OP_DEALLOC_STAG, "dealloc_stag", "dealloc_stag", 0}, 655 {IRDMA_OP_MW_ALLOC, "mw_alloc", "mw_alloc", 0}, 656 {IRDMA_OP_QP_FLUSH_WQES, "qp_flush_wqes", "qp_flush_wqes", 0}, 657 {IRDMA_OP_ADD_ARP_CACHE_ENTRY, "add_arp_cache_entry", 658 "add_arp_cache_entry", 0}, 659 {IRDMA_OP_MANAGE_PUSH_PAGE, "manage_push_page", "manage_push_page", 0}, 660 {IRDMA_OP_UPDATE_PE_SDS, "update_pe_sds", "update_pe_sds", 0}, 661 {IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE, "manage_hmc_pm_func_table", 662 "manage_hmc_pm_func_table", 0}, 663 {IRDMA_OP_SUSPEND, "suspend", "suspend", 0}, 664 {IRDMA_OP_RESUME, "resume", "resume", 0}, 665 {IRDMA_OP_MANAGE_VCHNL_REQ_PBLE_BP, "manage_vchnl_req_pble_bp", 666 "manage_vchnl_req_pble_bp", 0}, 667 {IRDMA_OP_QUERY_FPM_VAL, "query_fpm_val", "query_fpm_val", 0}, 668 {IRDMA_OP_COMMIT_FPM_VAL, "commit_fpm_val", "commit_fpm_val", 0}, 669 {IRDMA_OP_AH_CREATE, "ah_create", "ah_create", 0}, 670 {IRDMA_OP_AH_MODIFY, "ah_modify", "ah_modify", 0}, 671 {IRDMA_OP_AH_DESTROY, "ah_destroy", "ah_destroy", 0}, 672 {IRDMA_OP_MC_CREATE, "mc_create", "mc_create", 0}, 673 {IRDMA_OP_MC_DESTROY, "mc_destroy", "mc_destroy", 0}, 674 {IRDMA_OP_MC_MODIFY, "mc_modify", "mc_modify", 0}, 675 {IRDMA_OP_STATS_ALLOCATE, "stats_allocate", "stats_allocate", 0}, 676 {IRDMA_OP_STATS_FREE, "stats_free", "stats_free", 0}, 677 {IRDMA_OP_STATS_GATHER, "stats_gather", "stats_gather", 0}, 678 {IRDMA_OP_WS_ADD_NODE, "ws_add_node", "ws_add_node", 0}, 679 {IRDMA_OP_WS_MODIFY_NODE, "ws_modify_node", "ws_modify_node", 0}, 680 {IRDMA_OP_WS_DELETE_NODE, "ws_delete_node", "ws_delete_node", 0}, 681 {IRDMA_OP_WS_FAILOVER_START, "ws_failover_start", "ws_failover_start", 682 0}, 683 {IRDMA_OP_WS_FAILOVER_COMPLETE, "ws_failover_complete", 684 "ws_failover_complete", 0}, 685 {IRDMA_OP_SET_UP_MAP, "set_up_map", "set_up_map", 0}, 686 {IRDMA_OP_GEN_AE, "gen_ae", "gen_ae", 0}, 687 {IRDMA_OP_QUERY_RDMA_FEATURES, "query_rdma_features", 688 "query_rdma_features", 0}, 689 {IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY, "alloc_local_mac_entry", 690 "alloc_local_mac_entry", 0}, 691 {IRDMA_OP_ADD_LOCAL_MAC_ENTRY, "add_local_mac_entry", 692 "add_local_mac_entry", 0}, 693 {IRDMA_OP_DELETE_LOCAL_MAC_ENTRY, "delete_local_mac_entry", 694 "delete_local_mac_entry", 0}, 695 {IRDMA_OP_CQ_MODIFY, "cq_modify", "cq_modify", 0} 696 }; 697 698 static const struct irdma_sw_stats_tunable_info irdma_cmcs_list[] = { 699 {0, "cm_nodes_created", "cm_nodes_created", 700 offsetof(struct irdma_cm_core, stats_nodes_created)}, 701 {0, "cm_nodes_destroyed", "cm_nodes_destroyed", 702 offsetof(struct irdma_cm_core, stats_nodes_destroyed)}, 703 {0, "cm_listen_created", "cm_listen_created", 704 offsetof(struct irdma_cm_core, stats_listen_created)}, 705 {0, "cm_listen_destroyed", "cm_listen_destroyed", 706 offsetof(struct irdma_cm_core, stats_listen_destroyed)}, 707 {0, "cm_listen_nodes_created", "cm_listen_nodes_created", 708 offsetof(struct irdma_cm_core, stats_listen_nodes_created)}, 709 {0, "cm_listen_nodes_destroyed", "cm_listen_nodes_destroyed", 710 offsetof(struct irdma_cm_core, stats_listen_nodes_destroyed)}, 711 {0, "cm_lpbs", "cm_lpbs", offsetof(struct irdma_cm_core, stats_lpbs)}, 712 {0, "cm_accepts", "cm_accepts", offsetof(struct irdma_cm_core, 713 stats_accepts)}, 714 {0, "cm_rejects", "cm_rejects", offsetof(struct irdma_cm_core, 715 stats_rejects)}, 716 {0, "cm_connect_errs", "cm_connect_errs", 717 offsetof(struct irdma_cm_core, stats_connect_errs)}, 718 {0, "cm_passive_errs", "cm_passive_errs", 719 offsetof(struct irdma_cm_core, stats_passive_errs)}, 720 {0, "cm_pkt_retrans", "cm_pkt_retrans", offsetof(struct irdma_cm_core, 721 stats_pkt_retrans)}, 722 {0, "cm_backlog_drops", "cm_backlog_drops", 723 offsetof(struct irdma_cm_core, stats_backlog_drops)}, 724 }; 725 726 static const struct irdma_sw_stats_tunable_info irdma_ilqs32_list[] = { 727 {0, "ilq_avail_buf_count", "ilq_avail_buf_count", 728 offsetof(struct irdma_puda_rsrc, avail_buf_count)}, 729 {0, "ilq_alloc_buf_count", "ilq_alloc_buf_count", 730 offsetof(struct irdma_puda_rsrc, alloc_buf_count)} 731 }; 732 733 static const struct irdma_sw_stats_tunable_info irdma_ilqs_list[] = { 734 {0, "ilq_stats_buf_alloc_fail", "ilq_stats_buf_alloc_fail", 735 offsetof(struct irdma_puda_rsrc, stats_buf_alloc_fail)}, 736 {0, "ilq_stats_pkt_rcvd", "ilq_stats_pkt_rcvd", 737 offsetof(struct irdma_puda_rsrc, stats_pkt_rcvd)}, 738 {0, "ilq_stats_pkt_sent", "ilq_stats_pkt_sent", 739 offsetof(struct irdma_puda_rsrc, stats_pkt_sent)}, 740 {0, "ilq_stats_rcvd_pkt_err", "ilq_stats_rcvd_pkt_err", 741 offsetof(struct irdma_puda_rsrc, stats_rcvd_pkt_err)}, 742 {0, "ilq_stats_sent_pkt_q", "ilq_stats_sent_pkt_q", 743 offsetof(struct irdma_puda_rsrc, stats_sent_pkt_q)} 744 }; 745 746 static const struct irdma_sw_stats_tunable_info irdma_ieqs32_list[] = { 747 {0, "ieq_avail_buf_count", "ieq_avail_buf_count", 748 offsetof(struct irdma_puda_rsrc, avail_buf_count)}, 749 {0, "ieq_alloc_buf_count", "ieq_alloc_buf_count", 750 offsetof(struct irdma_puda_rsrc, alloc_buf_count)} 751 }; 752 753 static const struct irdma_sw_stats_tunable_info irdma_ieqs_list[] = { 754 {0, "ieq_stats_buf_alloc_fail", "ieq_stats_buf_alloc_fail", 755 offsetof(struct irdma_puda_rsrc, stats_buf_alloc_fail)}, 756 {0, "ieq_stats_pkt_rcvd", "ieq_stats_pkt_rcvd", 757 offsetof(struct irdma_puda_rsrc, stats_pkt_rcvd)}, 758 {0, "ieq_stats_pkt_sent", "ieq_stats_pkt_sent", 759 offsetof(struct irdma_puda_rsrc, stats_pkt_sent)}, 760 {0, "ieq_stats_rcvd_pkt_err", "ieq_stats_rcvd_pkt_err", 761 offsetof(struct irdma_puda_rsrc, stats_rcvd_pkt_err)}, 762 {0, "ieq_stats_sent_pkt_q", "ieq_stats_sent_pkt_q", 763 offsetof(struct irdma_puda_rsrc, stats_sent_pkt_q)}, 764 {0, "ieq_stats_bad_qp_id", "ieq_stats_bad_qp_id", 765 offsetof(struct irdma_puda_rsrc, stats_bad_qp_id)}, 766 {0, "ieq_fpdu_processed", "ieq_fpdu_processed", 767 offsetof(struct irdma_puda_rsrc, fpdu_processed)}, 768 {0, "ieq_bad_seq_num", "ieq_bad_seq_num", 769 offsetof(struct irdma_puda_rsrc, bad_seq_num)}, 770 {0, "ieq_crc_err", "ieq_crc_err", offsetof(struct irdma_puda_rsrc, 771 crc_err)}, 772 {0, "ieq_pmode_count", "ieq_pmode_count", 773 offsetof(struct irdma_puda_rsrc, pmode_count)}, 774 {0, "ieq_partials_handled", "ieq_partials_handled", 775 offsetof(struct irdma_puda_rsrc, partials_handled)}, 776 }; 777 778 /** 779 * irdma_dcqcn_tunables_init - create tunables for dcqcn settings 780 * @rf: RDMA PCI function 781 * 782 * Create DCQCN related sysctls for the driver. 783 * dcqcn_ena is writeable settings and applicable to next QP creation or 784 * context setting. 785 * all other settings are of RDTUN type (read on driver load) and are 786 * applicable only to CQP creation. 787 */ 788 void 789 irdma_dcqcn_tunables_init(struct irdma_pci_f *rf) 790 { 791 struct sysctl_oid_list *irdma_sysctl_oid_list; 792 793 irdma_sysctl_oid_list = SYSCTL_CHILDREN(rf->tun_info.irdma_sysctl_tree); 794 795 SYSCTL_ADD_PROC(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list, 796 OID_AUTO, "dcqcn_enable", CTLFLAG_RW | CTLTYPE_U8, rf, 0, 797 irdma_sysctl_dcqcn_update, "A", 798 "enables DCQCN algorithm for RoCEv2 on all ports, default=true"); 799 800 SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list, 801 OID_AUTO, "dcqcn_cc_cfg_valid", CTLFLAG_RDTUN, 802 &rf->dcqcn_params.cc_cfg_valid, 0, 803 "set DCQCN parameters to be valid, default=false"); 804 805 rf->dcqcn_params.min_dec_factor = 1; 806 SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list, 807 OID_AUTO, "dcqcn_min_dec_factor", CTLFLAG_RDTUN, 808 &rf->dcqcn_params.min_dec_factor, 0, 809 "set minimum percentage factor by which tx rate can be changed for CNP, Range: 1-100, default=1"); 810 811 SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list, 812 OID_AUTO, "dcqcn_min_rate_MBps", CTLFLAG_RDTUN, 813 &rf->dcqcn_params.min_rate, 0, 814 "set minimum rate limit value, in MBits per second, default=0"); 815 816 rf->dcqcn_params.dcqcn_f = 5; 817 SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list, 818 OID_AUTO, "dcqcn_F", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_f, 0, 819 "set number of times to stay in each stage of bandwidth recovery, default=5"); 820 821 rf->dcqcn_params.dcqcn_t = 0x37; 822 SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list, 823 OID_AUTO, "dcqcn_T", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_t, 0, 824 "number of us to elapse before increasing the CWND in DCQCN mode, default=0x37"); 825 826 rf->dcqcn_params.dcqcn_b = 0x249f0; 827 SYSCTL_ADD_U32(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list, 828 OID_AUTO, "dcqcn_B", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_b, 0, 829 "set number of MSS to add to the congestion window in additive increase mode, default=0x249f0"); 830 831 rf->dcqcn_params.rai_factor = 1; 832 SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list, 833 OID_AUTO, "dcqcn_rai_factor", CTLFLAG_RDTUN, 834 &rf->dcqcn_params.rai_factor, 0, 835 "set number of MSS to add to the congestion window in additive increase mode, default=1"); 836 837 rf->dcqcn_params.hai_factor = 5; 838 SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list, 839 OID_AUTO, "dcqcn_hai_factor", CTLFLAG_RDTUN, 840 &rf->dcqcn_params.hai_factor, 0, 841 "set number of MSS to add to the congestion window in hyperactive increase mode, default=5"); 842 843 rf->dcqcn_params.rreduce_mperiod = 50; 844 SYSCTL_ADD_U32(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list, 845 OID_AUTO, "dcqcn_rreduce_mperiod", CTLFLAG_RDTUN, 846 &rf->dcqcn_params.rreduce_mperiod, 0, 847 "set minimum time between 2 consecutive rate reductions for a single flow, default=50"); 848 } 849 850 /** 851 * irdma_sysctl_settings - sysctl runtime settings init 852 * @rf: RDMA PCI function 853 */ 854 void 855 irdma_sysctl_settings(struct irdma_pci_f *rf) 856 { 857 struct sysctl_oid_list *irdma_sysctl_oid_list; 858 859 irdma_sysctl_oid_list = SYSCTL_CHILDREN(rf->tun_info.irdma_sysctl_tree); 860 861 SYSCTL_ADD_BOOL(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list, 862 OID_AUTO, "upload_context", CTLFLAG_RWTUN, 863 &irdma_upload_context, 0, 864 "allow for generating QP's upload context, default=0"); 865 } 866 867 void 868 irdma_sw_stats_tunables_init(struct irdma_pci_f *rf) 869 { 870 struct sysctl_oid_list *sws_oid_list; 871 struct sysctl_ctx_list *irdma_ctx = &rf->tun_info.irdma_sysctl_ctx; 872 struct irdma_sc_dev *dev = &rf->sc_dev; 873 struct irdma_cm_core *cm_core = &rf->iwdev->cm_core; 874 struct irdma_puda_rsrc *ilq = rf->iwdev->vsi.ilq; 875 struct irdma_puda_rsrc *ieq = rf->iwdev->vsi.ieq; 876 u64 *ll_ptr; 877 u32 *l_ptr; 878 int cqp_stat_cnt = sizeof(irdma_sws_list) / sizeof(struct irdma_sw_stats_tunable_info); 879 int cmcore_stat_cnt = sizeof(irdma_cmcs_list) / sizeof(struct irdma_sw_stats_tunable_info); 880 int ilqs_stat_cnt = sizeof(irdma_ilqs_list) / sizeof(struct irdma_sw_stats_tunable_info); 881 int ilqs32_stat_cnt = sizeof(irdma_ilqs32_list) / sizeof(struct irdma_sw_stats_tunable_info); 882 int ieqs_stat_cnt = sizeof(irdma_ieqs_list) / sizeof(struct irdma_sw_stats_tunable_info); 883 int ieqs32_stat_cnt = sizeof(irdma_ieqs32_list) / sizeof(struct irdma_sw_stats_tunable_info); 884 int i; 885 886 sws_oid_list = SYSCTL_CHILDREN(rf->tun_info.sws_sysctl_tree); 887 888 for (i = 0; i < cqp_stat_cnt; ++i) { 889 SYSCTL_ADD_U64(irdma_ctx, sws_oid_list, OID_AUTO, 890 irdma_sws_list[i].name, CTLFLAG_RD, 891 &dev->cqp_cmd_stats[irdma_sws_list[i].op_type], 892 0, irdma_sws_list[i].desc); 893 } 894 SYSCTL_ADD_PROC(irdma_ctx, sws_oid_list, OID_AUTO, 895 "req_cmds", CTLFLAG_RD | CTLTYPE_STRING, 896 dev->cqp, IRDMA_CQP_REQ_CMDS, irdma_sysctl_cqp_stats, "A", 897 "req_cmds"); 898 SYSCTL_ADD_PROC(irdma_ctx, sws_oid_list, OID_AUTO, 899 "cmpl_cmds", CTLFLAG_RD | CTLTYPE_STRING, 900 dev->cqp, IRDMA_CQP_CMPL_CMDS, irdma_sysctl_cqp_stats, "A", 901 "cmpl_cmds"); 902 for (i = 0; i < cmcore_stat_cnt; ++i) { 903 ll_ptr = (u64 *)((uintptr_t)cm_core + irdma_cmcs_list[i].value); 904 SYSCTL_ADD_U64(irdma_ctx, sws_oid_list, OID_AUTO, 905 irdma_cmcs_list[i].name, CTLFLAG_RD, ll_ptr, 906 0, irdma_cmcs_list[i].desc); 907 } 908 for (i = 0; ilq && i < ilqs_stat_cnt; ++i) { 909 ll_ptr = (u64 *)((uintptr_t)ilq + irdma_ilqs_list[i].value); 910 SYSCTL_ADD_U64(irdma_ctx, sws_oid_list, OID_AUTO, 911 irdma_ilqs_list[i].name, CTLFLAG_RD, ll_ptr, 912 0, irdma_ilqs_list[i].desc); 913 } 914 for (i = 0; ilq && i < ilqs32_stat_cnt; ++i) { 915 l_ptr = (u32 *)((uintptr_t)ilq + irdma_ilqs32_list[i].value); 916 SYSCTL_ADD_U32(irdma_ctx, sws_oid_list, OID_AUTO, 917 irdma_ilqs32_list[i].name, CTLFLAG_RD, l_ptr, 918 0, irdma_ilqs32_list[i].desc); 919 } 920 for (i = 0; ieq && i < ieqs_stat_cnt; ++i) { 921 ll_ptr = (u64 *)((uintptr_t)ieq + irdma_ieqs_list[i].value); 922 SYSCTL_ADD_U64(irdma_ctx, sws_oid_list, OID_AUTO, 923 irdma_ieqs_list[i].name, CTLFLAG_RD, ll_ptr, 924 0, irdma_ieqs_list[i].desc); 925 } 926 for (i = 0; ieq && i < ieqs32_stat_cnt; ++i) { 927 l_ptr = (u32 *)((uintptr_t)ieq + irdma_ieqs32_list[i].value); 928 SYSCTL_ADD_U32(irdma_ctx, sws_oid_list, OID_AUTO, 929 irdma_ieqs32_list[i].name, CTLFLAG_RD, l_ptr, 930 0, irdma_ieqs32_list[i].desc); 931 } 932 } 933 934 /** 935 * irdma_dmamap_cb - callback for bus_dmamap_load 936 */ 937 static void 938 irdma_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error) 939 { 940 if (error) 941 return; 942 *(bus_addr_t *) arg = segs->ds_addr; 943 return; 944 } 945 946 /** 947 * irdma_allocate_dma_mem - allocate dma memory 948 * @hw: pointer to hw structure 949 * @mem: structure holding memory information 950 * @size: requested size 951 * @alignment: requested alignment 952 */ 953 void * 954 irdma_allocate_dma_mem(struct irdma_hw *hw, struct irdma_dma_mem *mem, 955 u64 size, u32 alignment) 956 { 957 struct irdma_dev_ctx *dev_ctx = (struct irdma_dev_ctx *)hw->dev_context; 958 device_t dev = dev_ctx->dev; 959 void *va; 960 int ret; 961 962 ret = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 963 alignment, 0, /* alignment, bounds */ 964 BUS_SPACE_MAXADDR, /* lowaddr */ 965 BUS_SPACE_MAXADDR, /* highaddr */ 966 NULL, NULL, /* filter, filterarg */ 967 size, /* maxsize */ 968 1, /* nsegments */ 969 size, /* maxsegsize */ 970 BUS_DMA_ALLOCNOW, /* flags */ 971 NULL, /* lockfunc */ 972 NULL, /* lockfuncarg */ 973 &mem->tag); 974 if (ret != 0) { 975 device_printf(dev, "%s: bus_dma_tag_create failed, error %u\n", 976 __func__, ret); 977 goto fail_0; 978 } 979 ret = bus_dmamem_alloc(mem->tag, (void **)&va, 980 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &mem->map); 981 if (ret != 0) { 982 device_printf(dev, "%s: bus_dmamem_alloc failed, error %u\n", 983 __func__, ret); 984 goto fail_1; 985 } 986 ret = bus_dmamap_load(mem->tag, mem->map, va, size, 987 irdma_dmamap_cb, &mem->pa, BUS_DMA_NOWAIT); 988 if (ret != 0) { 989 device_printf(dev, "%s: bus_dmamap_load failed, error %u\n", 990 __func__, ret); 991 goto fail_2; 992 } 993 mem->nseg = 1; 994 mem->size = size; 995 bus_dmamap_sync(mem->tag, mem->map, 996 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 997 998 return va; 999 fail_2: 1000 bus_dmamem_free(mem->tag, va, mem->map); 1001 fail_1: 1002 bus_dma_tag_destroy(mem->tag); 1003 fail_0: 1004 mem->map = NULL; 1005 mem->tag = NULL; 1006 1007 return NULL; 1008 } 1009 1010 /** 1011 * irdma_free_dma_mem - Memory free helper fn 1012 * @hw: pointer to hw structure 1013 * @mem: ptr to mem struct to free 1014 */ 1015 int 1016 irdma_free_dma_mem(struct irdma_hw *hw, struct irdma_dma_mem *mem) 1017 { 1018 if (!mem) 1019 return -EINVAL; 1020 bus_dmamap_sync(mem->tag, mem->map, 1021 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1022 bus_dmamap_unload(mem->tag, mem->map); 1023 if (!mem->va) 1024 return -ENOMEM; 1025 bus_dmamem_free(mem->tag, mem->va, mem->map); 1026 bus_dma_tag_destroy(mem->tag); 1027 1028 mem->va = NULL; 1029 1030 return 0; 1031 } 1032 1033 void 1034 irdma_cleanup_dead_qps(struct irdma_sc_vsi *vsi) 1035 { 1036 struct irdma_sc_qp *qp = NULL; 1037 struct irdma_qp *iwqp; 1038 struct irdma_pci_f *rf; 1039 u8 i; 1040 1041 for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) { 1042 qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp); 1043 while (qp) { 1044 if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_UDA) { 1045 qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp); 1046 continue; 1047 } 1048 iwqp = qp->qp_uk.back_qp; 1049 rf = iwqp->iwdev->rf; 1050 irdma_free_dma_mem(rf->sc_dev.hw, &iwqp->q2_ctx_mem); 1051 irdma_free_dma_mem(rf->sc_dev.hw, &iwqp->kqp.dma_mem); 1052 1053 kfree(iwqp->kqp.sq_wrid_mem); 1054 kfree(iwqp->kqp.rq_wrid_mem); 1055 qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp); 1056 kfree(iwqp); 1057 } 1058 } 1059 } 1060