1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* Copyright (c) 2022, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /*$FreeBSD$*/ 32 33 /** 34 * @file ice_rdma.c 35 * @brief RDMA client driver interface 36 * 37 * Functions to interface with the RDMA client driver, for enabling RMDA 38 * functionality for the ice driver. 39 * 40 * The RDMA client interface is based on a simple kobject interface which is 41 * defined by the rmda_if.m and irdma_di_if.m interfaces. 42 * 43 * The ice device driver provides the rmda_di_if.m interface methods, while 44 * the client RDMA driver provides the irdma_if.m interface methods as an 45 * extension ontop of the irdma_di_if kobject. 46 * 47 * The initial connection between drivers is done via the RDMA client driver 48 * calling ice_rdma_register. 49 */ 50 51 #include "ice_iflib.h" 52 #include "ice_rdma_internal.h" 53 54 #include "irdma_if.h" 55 #include "irdma_di_if.h" 56 57 /** 58 * @var ice_rdma 59 * @brief global RDMA driver state 60 * 61 * Contains global state the driver uses to connect to a client RDMA interface 62 * driver. 63 */ 64 static struct ice_rdma_state ice_rdma; 65 66 /* 67 * Helper function prototypes 68 */ 69 static int ice_rdma_pf_attach_locked(struct ice_softc *sc); 70 static void ice_rdma_pf_detach_locked(struct ice_softc *sc); 71 static int ice_rdma_check_version(struct ice_rdma_info *info); 72 static void ice_rdma_cp_qos_info(struct ice_hw *hw, 73 struct ice_dcbx_cfg *dcbx_cfg, 74 struct ice_qos_params *qos_info); 75 76 /* 77 * RDMA Device Interface prototypes 78 */ 79 static int ice_rdma_pf_reset(struct ice_rdma_peer *peer); 80 static int ice_rdma_pf_msix_init(struct ice_rdma_peer *peer, 81 struct ice_rdma_msix_mapping *msix_info); 82 static int ice_rdma_qset_register_request(struct ice_rdma_peer *peer, 83 struct ice_rdma_qset_update *res); 84 static int ice_rdma_update_vsi_filter(struct ice_rdma_peer *peer_dev, 85 bool enable); 86 static void ice_rdma_request_handler(struct ice_rdma_peer *peer, 87 struct ice_rdma_request *req); 88 89 90 /** 91 * @var ice_rdma_di_methods 92 * @brief RDMA driver interface methods 93 * 94 * Kobject methods implementing the driver-side interface for the RDMA peer 95 * clients. This method table contains the operations which the client can 96 * request from the driver. 97 * 98 * The client driver will then extend this kobject class with methods that the 99 * driver can request from the client. 100 */ 101 static kobj_method_t ice_rdma_di_methods[] = { 102 KOBJMETHOD(irdma_di_reset, ice_rdma_pf_reset), 103 KOBJMETHOD(irdma_di_msix_init, ice_rdma_pf_msix_init), 104 KOBJMETHOD(irdma_di_qset_register_request, ice_rdma_qset_register_request), 105 KOBJMETHOD(irdma_di_vsi_filter_update, ice_rdma_update_vsi_filter), 106 KOBJMETHOD(irdma_di_req_handler, ice_rdma_request_handler), 107 KOBJMETHOD_END 108 }; 109 110 /* Define ice_rdma_di class which will be extended by the iRDMA driver */ 111 DEFINE_CLASS_0(ice_rdma_di, ice_rdma_di_class, ice_rdma_di_methods, sizeof(struct ice_rdma_peer)); 112 113 /** 114 * ice_rdma_pf_reset - RDMA client interface requested a reset 115 * @peer: the RDMA peer client structure 116 * 117 * Implements IRDMA_DI_RESET, called by the RDMA client driver to request 118 * a reset of an ice driver device. 119 */ 120 static int 121 ice_rdma_pf_reset(struct ice_rdma_peer *peer) 122 { 123 struct ice_softc *sc = ice_rdma_peer_to_sc(peer); 124 125 /* 126 * Request that the driver re-initialize by bringing the interface 127 * down and up. 128 */ 129 ice_request_stack_reinit(sc); 130 131 return (0); 132 } 133 134 /** 135 * ice_rdma_pf_msix_init - RDMA client interface request MSI-X initialization 136 * @peer: the RDMA peer client structure 137 * @msix_info: requested MSI-X mapping 138 * 139 * Implements IRDMA_DI_MSIX_INIT, called by the RDMA client driver to 140 * initialize the MSI-X resources required for RDMA functionality. 141 */ 142 static int 143 ice_rdma_pf_msix_init(struct ice_rdma_peer *peer, 144 struct ice_rdma_msix_mapping __unused *msix_info) 145 { 146 struct ice_softc *sc = ice_rdma_peer_to_sc(peer); 147 148 MPASS(msix_info != NULL); 149 150 device_printf(sc->dev, "%s: iRDMA MSI-X initialization request is not yet implemented\n", __func__); 151 152 /* TODO: implement MSI-X initialization for RDMA */ 153 return (ENOSYS); 154 } 155 156 /** 157 * ice_rdma_register_request - RDMA client interface request qset 158 * registration or unregistration 159 * @peer: the RDMA peer client structure 160 * @res: resources to be registered or unregistered 161 */ 162 static int 163 ice_rdma_qset_register_request(struct ice_rdma_peer *peer, struct ice_rdma_qset_update *res) 164 { 165 struct ice_softc *sc = ice_rdma_peer_to_sc(peer); 166 struct ice_vsi *vsi = NULL; 167 struct ice_dcbx_cfg *dcbx_cfg; 168 struct ice_hw *hw = &sc->hw; 169 enum ice_status status; 170 int count, i, ret = 0; 171 uint32_t *qset_teid; 172 uint16_t *qs_handle; 173 uint16_t max_rdmaqs[ICE_MAX_TRAFFIC_CLASS]; 174 uint16_t vsi_id; 175 uint8_t ena_tc = 0; 176 177 if (!res) 178 return -EINVAL; 179 180 if (res->cnt_req > ICE_MAX_TXQ_PER_TXQG) 181 return -EINVAL; 182 183 switch(res->res_type) { 184 case ICE_RDMA_QSET_ALLOC: 185 count = res->cnt_req; 186 vsi_id = peer->pf_vsi_num; 187 break; 188 case ICE_RDMA_QSET_FREE: 189 count = res->res_allocated; 190 vsi_id = res->qsets.vsi_id; 191 break; 192 default: 193 return -EINVAL; 194 } 195 qset_teid = (uint32_t *)ice_calloc(hw, count, sizeof(*qset_teid)); 196 if (!qset_teid) 197 return -ENOMEM; 198 199 qs_handle = (uint16_t *)ice_calloc(hw, count, sizeof(*qs_handle)); 200 if (!qs_handle) { 201 ice_free(hw, qset_teid); 202 return -ENOMEM; 203 } 204 205 ice_for_each_traffic_class(i) 206 max_rdmaqs[i] = 0; 207 for (i = 0; i < sc->num_available_vsi; i++) { 208 if (sc->all_vsi[i] && 209 ice_get_hw_vsi_num(hw, sc->all_vsi[i]->idx) == vsi_id) { 210 vsi = sc->all_vsi[i]; 211 break; 212 } 213 } 214 215 if (!vsi) { 216 ice_debug(hw, ICE_DBG_RDMA, "RDMA QSet invalid VSI\n"); 217 ret = -EINVAL; 218 goto out; 219 } 220 if (sc != vsi->sc) { 221 ice_debug(hw, ICE_DBG_RDMA, "VSI is tied to unexpected device\n"); 222 ret = -EXDEV; 223 goto out; 224 } 225 226 for (i = 0; i < count; i++) { 227 struct ice_rdma_qset_params *qset; 228 229 qset = &res->qsets; 230 if (qset->vsi_id != peer->pf_vsi_num) { 231 ice_debug(hw, ICE_DBG_RDMA, "RDMA QSet invalid VSI requested %d %d\n", 232 qset->vsi_id, peer->pf_vsi_num); 233 ret = -EINVAL; 234 goto out; 235 } 236 max_rdmaqs[qset->tc]++; 237 qs_handle[i] = qset->qs_handle; 238 qset_teid[i] = qset->teid; 239 } 240 241 switch(res->res_type) { 242 case ICE_RDMA_QSET_ALLOC: 243 dcbx_cfg = &hw->port_info->qos_cfg.local_dcbx_cfg; 244 for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { 245 ena_tc |= BIT(dcbx_cfg->etscfg.prio_table[i]); 246 } 247 248 ice_debug(hw, ICE_DBG_RDMA, "%s:%d ena_tc=%x\n", __func__, __LINE__, ena_tc); 249 status = ice_cfg_vsi_rdma(hw->port_info, vsi->idx, ena_tc, 250 max_rdmaqs); 251 if (status) { 252 ice_debug(hw, ICE_DBG_RDMA, "Failed VSI RDMA qset config\n"); 253 ret = -EINVAL; 254 goto out; 255 } 256 257 for (i = 0; i < count; i++) { 258 struct ice_rdma_qset_params *qset; 259 260 qset = &res->qsets; 261 status = ice_ena_vsi_rdma_qset(hw->port_info, vsi->idx, 262 qset->tc, &qs_handle[i], 1, 263 &qset_teid[i]); 264 if (status) { 265 ice_debug(hw, ICE_DBG_RDMA, "Failed VSI RDMA qset enable\n"); 266 ret = -EINVAL; 267 goto out; 268 } 269 qset->teid = qset_teid[i]; 270 } 271 break; 272 case ICE_RDMA_QSET_FREE: 273 status = ice_dis_vsi_rdma_qset(hw->port_info, count, qset_teid, qs_handle); 274 if (status) 275 ret = -EINVAL; 276 break; 277 default: 278 ret = -EINVAL; 279 break; 280 } 281 282 out: 283 ice_free(hw, qs_handle); 284 ice_free(hw, qset_teid); 285 286 return ret; 287 } 288 289 /** 290 * ice_rdma_update_vsi_filter - configure vsi information 291 * when opening or closing rdma driver 292 * @peer: the RDMA peer client structure 293 * @enable: enable or disable the rdma filter 294 */ 295 static int 296 ice_rdma_update_vsi_filter(struct ice_rdma_peer *peer, 297 bool enable) 298 { 299 struct ice_softc *sc = ice_rdma_peer_to_sc(peer); 300 struct ice_vsi *vsi; 301 int ret; 302 303 vsi = &sc->pf_vsi; 304 if (!vsi) 305 return -EINVAL; 306 307 ret = ice_cfg_iwarp_fltr(&sc->hw, vsi->idx, enable); 308 if (ret) { 309 device_printf(sc->dev, "Failed to %sable iWARP filtering\n", 310 enable ? "en" : "dis"); 311 } else { 312 if (enable) 313 vsi->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; 314 else 315 vsi->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; 316 } 317 318 return ret; 319 } 320 321 /** 322 * ice_rdma_request_handler - handle requests incoming from RDMA driver 323 * @peer: the RDMA peer client structure 324 * @req: structure containing request 325 */ 326 static void 327 ice_rdma_request_handler(struct ice_rdma_peer *peer, 328 struct ice_rdma_request *req) 329 { 330 if (!req || !peer) { 331 log(LOG_WARNING, "%s: peer or req are not valid\n", __func__); 332 return; 333 } 334 335 switch(req->type) { 336 case ICE_RDMA_EVENT_RESET: 337 break; 338 case ICE_RDMA_EVENT_QSET_REGISTER: 339 ice_rdma_qset_register_request(peer, &req->res); 340 break; 341 case ICE_RDMA_EVENT_VSI_FILTER_UPDATE: 342 ice_rdma_update_vsi_filter(peer, req->enable_filter); 343 break; 344 default: 345 log(LOG_WARNING, "%s: Event %d not supported\n", __func__, req->type); 346 break; 347 } 348 } 349 350 /** 351 * ice_rdma_cp_qos_info - gather current QOS/DCB settings in LAN to pass 352 * to RDMA driver 353 * @hw: ice hw structure 354 * @dcbx_cfg: current DCB settings in ice driver 355 * @qos_info: destination of the DCB settings 356 */ 357 static void 358 ice_rdma_cp_qos_info(struct ice_hw *hw, struct ice_dcbx_cfg *dcbx_cfg, 359 struct ice_qos_params *qos_info) 360 { 361 u32 up2tc; 362 u8 j; 363 u8 num_tc = 0; 364 u8 val_tc = 0; /* number of TC for validation */ 365 u8 cnt_tc = 0; 366 367 /* setup qos_info fields with defaults */ 368 qos_info->num_apps = 0; 369 qos_info->num_tc = 1; 370 371 for (j = 0; j < ICE_TC_MAX_USER_PRIORITY; j++) 372 qos_info->up2tc[j] = 0; 373 374 qos_info->tc_info[0].rel_bw = 100; 375 for (j = 1; j < IEEE_8021QAZ_MAX_TCS; j++) 376 qos_info->tc_info[j].rel_bw = 0; 377 378 /* gather current values */ 379 up2tc = rd32(hw, PRTDCB_TUP2TC); 380 qos_info->num_apps = dcbx_cfg->numapps; 381 382 for (j = 0; j < ICE_MAX_TRAFFIC_CLASS; j++) { 383 num_tc |= BIT(dcbx_cfg->etscfg.prio_table[j]); 384 } 385 for (j = 0; j < ICE_MAX_TRAFFIC_CLASS; j++) { 386 if (num_tc & BIT(j)) { 387 cnt_tc++; 388 val_tc |= BIT(j); 389 } else { 390 break; 391 } 392 } 393 qos_info->num_tc = (val_tc == num_tc && num_tc != 0) ? cnt_tc : 1; 394 for (j = 0; j < ICE_TC_MAX_USER_PRIORITY; j++) 395 qos_info->up2tc[j] = (up2tc >> (j * 3)) & 0x7; 396 397 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) 398 qos_info->tc_info[j].rel_bw = dcbx_cfg->etscfg.tcbwtable[j]; 399 for (j = 0; j < qos_info->num_apps; j++) { 400 qos_info->apps[j].priority = dcbx_cfg->app[j].priority; 401 qos_info->apps[j].prot_id = dcbx_cfg->app[j].prot_id; 402 qos_info->apps[j].selector = dcbx_cfg->app[j].selector; 403 } 404 } 405 406 /** 407 * ice_rdma_check_version - Check that the provided RDMA version is compatible 408 * @info: the RDMA client information structure 409 * 410 * Verify that the client RDMA driver provided a version that is compatible 411 * with the driver interface. 412 */ 413 static int 414 ice_rdma_check_version(struct ice_rdma_info *info) 415 { 416 /* Make sure the MAJOR version matches */ 417 if (info->major_version != ICE_RDMA_MAJOR_VERSION) { 418 log(LOG_WARNING, "%s: the iRDMA driver requested version %d.%d.%d, but this driver only supports major version %d.x.x\n", 419 __func__, 420 info->major_version, info->minor_version, info->patch_version, 421 ICE_RDMA_MAJOR_VERSION); 422 return (ENOTSUP); 423 } 424 425 /* 426 * Make sure that the MINOR version is compatible. 427 * 428 * This means that the RDMA client driver version MUST not be greater 429 * than the version provided by the driver, as it would indicate that 430 * the RDMA client expects features which are not supported by the 431 * main driver. 432 */ 433 if (info->minor_version > ICE_RDMA_MINOR_VERSION) { 434 log(LOG_WARNING, "%s: the iRDMA driver requested version %d.%d.%d, but this driver only supports up to minor version %d.%d.x\n", 435 __func__, 436 info->major_version, info->minor_version, info->patch_version, 437 ICE_RDMA_MAJOR_VERSION, ICE_RDMA_MINOR_VERSION); 438 return (ENOTSUP); 439 } 440 441 /* 442 * Make sure that the PATCH version is compatible. 443 * 444 * This means that the RDMA client version MUST not be greater than 445 * the version provided by the driver, as it may indicate that the 446 * RDMA client expects certain backwards compatible bug fixes which 447 * are not implemented by this version of the main driver. 448 */ 449 if ((info->minor_version == ICE_RDMA_MINOR_VERSION) && 450 (info->patch_version > ICE_RDMA_PATCH_VERSION)) { 451 log(LOG_WARNING, "%s: the iRDMA driver requested version %d.%d.%d, but this driver only supports up to patch version %d.%d.%d\n", 452 __func__, 453 info->major_version, info->minor_version, info->patch_version, 454 ICE_RDMA_MAJOR_VERSION, ICE_RDMA_MINOR_VERSION, ICE_RDMA_PATCH_VERSION); 455 return (ENOTSUP); 456 } 457 458 /* Make sure that the kobject class is initialized */ 459 if (info->rdma_class == NULL) { 460 log(LOG_WARNING, "%s: the iRDMA driver did not specify a kobject interface\n", 461 __func__); 462 return (EINVAL); 463 } 464 465 return (0); 466 } 467 468 /** 469 * ice_rdma_register - Register an RDMA client driver 470 * @info: the RDMA client information structure 471 * 472 * Called by the RDMA client driver on load. Used to initialize the RDMA 473 * client driver interface and enable interop between the ice driver and the 474 * RDMA client driver. 475 * 476 * The RDMA client driver must provide the version number it expects, along 477 * with a pointer to a kobject class that extends the irdma_di_if class, and 478 * implements the irdma_if class interface. 479 */ 480 int 481 ice_rdma_register(struct ice_rdma_info *info) 482 { 483 struct ice_rdma_entry *entry; 484 int err = 0; 485 486 sx_xlock(&ice_rdma.mtx); 487 488 if (!ice_enable_irdma) { 489 log(LOG_INFO, "%s: The iRDMA driver interface has been disabled\n", __func__); 490 err = (ECONNREFUSED); 491 goto return_unlock; 492 } 493 494 if (ice_rdma.registered) { 495 log(LOG_WARNING, "%s: iRDMA driver already registered\n", __func__); 496 err = (EBUSY); 497 goto return_unlock; 498 } 499 500 /* Make sure the iRDMA version is compatible */ 501 err = ice_rdma_check_version(info); 502 if (err) 503 goto return_unlock; 504 505 log(LOG_INFO, "%s: iRDMA driver registered using version %d.%d.%d\n", 506 __func__, info->major_version, info->minor_version, info->patch_version); 507 508 ice_rdma.peer_class = info->rdma_class; 509 510 /* 511 * Initialize the kobject interface and notify the RDMA client of each 512 * existing PF interface. 513 */ 514 LIST_FOREACH(entry, &ice_rdma.peers, node) { 515 kobj_init((kobj_t)&entry->peer, ice_rdma.peer_class); 516 IRDMA_PROBE(&entry->peer); 517 if (entry->initiated) 518 IRDMA_OPEN(&entry->peer); 519 } 520 ice_rdma.registered = true; 521 522 return_unlock: 523 sx_xunlock(&ice_rdma.mtx); 524 525 return (err); 526 } 527 528 /** 529 * ice_rdma_unregister - Unregister an RDMA client driver 530 * 531 * Called by the RDMA client driver on unload. Used to de-initialize the RDMA 532 * client driver interface and shut down communication between the ice driver 533 * and the RDMA client driver. 534 */ 535 int 536 ice_rdma_unregister(void) 537 { 538 struct ice_rdma_entry *entry; 539 540 sx_xlock(&ice_rdma.mtx); 541 542 if (!ice_rdma.registered) { 543 log(LOG_WARNING, "%s: iRDMA driver was not previously registered\n", 544 __func__); 545 sx_xunlock(&ice_rdma.mtx); 546 return (ENOENT); 547 } 548 549 log(LOG_INFO, "%s: iRDMA driver unregistered\n", __func__); 550 ice_rdma.registered = false; 551 ice_rdma.peer_class = NULL; 552 553 /* 554 * Release the kobject interface for each of the existing PF 555 * interfaces. Note that we do not notify the client about removing 556 * each PF, as it is assumed that the client will have already cleaned 557 * up any associated resources when it is unregistered. 558 */ 559 LIST_FOREACH(entry, &ice_rdma.peers, node) 560 kobj_delete((kobj_t)&entry->peer, NULL); 561 562 sx_xunlock(&ice_rdma.mtx); 563 564 return (0); 565 } 566 567 /** 568 * ice_rdma_init - RDMA driver init routine 569 * 570 * Called during ice driver module initialization to setup the RDMA client 571 * interface mutex and RDMA peer structure list. 572 */ 573 void 574 ice_rdma_init(void) 575 { 576 LIST_INIT(&ice_rdma.peers); 577 sx_init_flags(&ice_rdma.mtx, "ice rdma interface", SX_DUPOK); 578 579 ice_rdma.registered = false; 580 ice_rdma.peer_class = NULL; 581 } 582 583 /** 584 * ice_rdma_exit - RDMA driver exit routine 585 * 586 * Called during ice driver module exit to shutdown the RDMA client interface 587 * mutex. 588 */ 589 void 590 ice_rdma_exit(void) 591 { 592 MPASS(LIST_EMPTY(&ice_rdma.peers)); 593 sx_destroy(&ice_rdma.mtx); 594 } 595 596 /** 597 * ice_rdma_pf_attach_locked - Prepare a PF for RDMA connections 598 * @sc: the ice driver softc 599 * 600 * Initialize a peer entry for this PF and add it to the RDMA interface list. 601 * Notify the client RDMA driver of a new PF device. 602 * 603 * @pre must be called while holding the ice_rdma mutex. 604 */ 605 static int 606 ice_rdma_pf_attach_locked(struct ice_softc *sc) 607 { 608 struct ice_rdma_entry *entry; 609 610 /* Do not attach the PF unless RDMA is supported */ 611 if (!ice_is_bit_set(sc->feat_cap, ICE_FEATURE_RDMA)) 612 return (0); 613 614 entry = &sc->rdma_entry; 615 if (entry->attached) { 616 device_printf(sc->dev, "iRDMA peer entry already exists\n"); 617 return (EEXIST); 618 } 619 620 entry->attached = true; 621 entry->peer.dev = sc->dev; 622 entry->peer.ifp = sc->ifp; 623 entry->peer.pf_id = sc->hw.pf_id; 624 entry->peer.pci_mem = sc->bar0.res; 625 entry->peer.pf_vsi_num = ice_get_hw_vsi_num(&sc->hw, sc->pf_vsi.idx); 626 if (sc->rdma_imap && sc->rdma_imap[0] != ICE_INVALID_RES_IDX && 627 sc->irdma_vectors > 0) { 628 entry->peer.msix.base = sc->rdma_imap[0]; 629 entry->peer.msix.count = sc->irdma_vectors; 630 } 631 632 /* Gather DCB/QOS info into peer */ 633 memset(&entry->peer.initial_qos_info, 0, sizeof(entry->peer.initial_qos_info)); 634 ice_rdma_cp_qos_info(&sc->hw, &sc->hw.port_info->qos_cfg.local_dcbx_cfg, 635 &entry->peer.initial_qos_info); 636 637 /* 638 * If the RDMA client driver has already registered, initialize the 639 * kobject and notify the client of a new PF 640 */ 641 if (ice_rdma.registered) { 642 kobj_init((kobj_t)&entry->peer, ice_rdma.peer_class); 643 IRDMA_PROBE(&entry->peer); 644 } 645 646 LIST_INSERT_HEAD(&ice_rdma.peers, entry, node); 647 648 ice_set_bit(ICE_FEATURE_RDMA, sc->feat_en); 649 650 return (0); 651 } 652 653 /** 654 * ice_rdma_pf_attach - Notify the RDMA client of a new PF 655 * @sc: the ice driver softc 656 * 657 * Called during PF attach to notify the RDMA client of a new PF. 658 */ 659 int 660 ice_rdma_pf_attach(struct ice_softc *sc) 661 { 662 int err; 663 664 sx_xlock(&ice_rdma.mtx); 665 err = ice_rdma_pf_attach_locked(sc); 666 sx_xunlock(&ice_rdma.mtx); 667 668 return (err); 669 } 670 671 /** 672 * ice_rdma_pf_detach_locked - Notify the RDMA client on PF detach 673 * @sc: the ice driver softc 674 * 675 * Notify the RDMA peer client driver of removal of a PF, and release any 676 * RDMA-specific resources associated with that PF. Remove the PF from the 677 * list of available RDMA entries. 678 * 679 * @pre must be called while holding the ice_rdma mutex. 680 */ 681 static void 682 ice_rdma_pf_detach_locked(struct ice_softc *sc) 683 { 684 struct ice_rdma_entry *entry; 685 686 /* No need to detach the PF if RDMA is not enabled */ 687 if (!ice_is_bit_set(sc->feat_en, ICE_FEATURE_RDMA)) 688 return; 689 690 entry = &sc->rdma_entry; 691 if (!entry->attached) { 692 device_printf(sc->dev, "iRDMA peer entry was not attached\n"); 693 return; 694 } 695 696 /* 697 * If the RDMA client driver is registered, notify the client that 698 * a PF has been removed, and release the kobject reference. 699 */ 700 if (ice_rdma.registered) { 701 IRDMA_REMOVE(&entry->peer); 702 kobj_delete((kobj_t)&entry->peer, NULL); 703 } 704 705 LIST_REMOVE(entry, node); 706 entry->attached = false; 707 708 ice_clear_bit(ICE_FEATURE_RDMA, sc->feat_en); 709 } 710 711 /** 712 * ice_rdma_pf_detach - Notify the RDMA client of a PF detaching 713 * @sc: the ice driver softc 714 * 715 * Take the ice_rdma mutex and then notify the RDMA client that a PF has been 716 * removed. 717 */ 718 void 719 ice_rdma_pf_detach(struct ice_softc *sc) 720 { 721 sx_xlock(&ice_rdma.mtx); 722 ice_rdma_pf_detach_locked(sc); 723 sx_xunlock(&ice_rdma.mtx); 724 } 725 726 /** 727 * ice_rdma_pf_init - Notify the RDMA client that a PF has initialized 728 * @sc: the ice driver softc 729 * 730 * Called by the ice driver when a PF has been initialized. Notifies the RDMA 731 * client that a PF is up and ready to operate. 732 */ 733 int 734 ice_rdma_pf_init(struct ice_softc *sc) 735 { 736 struct ice_rdma_peer *peer = &sc->rdma_entry.peer; 737 738 sx_xlock(&ice_rdma.mtx); 739 740 /* Update the MTU */ 741 peer->mtu = sc->ifp->if_mtu; 742 sc->rdma_entry.initiated = true; 743 744 if (sc->rdma_entry.attached && ice_rdma.registered) { 745 sx_xunlock(&ice_rdma.mtx); 746 return IRDMA_OPEN(peer); 747 } 748 749 sx_xunlock(&ice_rdma.mtx); 750 751 return (0); 752 } 753 754 /** 755 * ice_rdma_pf_stop - Notify the RDMA client of a stopped PF device 756 * @sc: the ice driver softc 757 * 758 * Called by the ice driver when a PF is stopped. Notifies the RDMA client 759 * driver that the PF has stopped and is not ready to operate. 760 */ 761 int 762 ice_rdma_pf_stop(struct ice_softc *sc) 763 { 764 sx_xlock(&ice_rdma.mtx); 765 766 sc->rdma_entry.initiated = false; 767 if (sc->rdma_entry.attached && ice_rdma.registered) { 768 sx_xunlock(&ice_rdma.mtx); 769 return IRDMA_CLOSE(&sc->rdma_entry.peer); 770 } 771 772 sx_xunlock(&ice_rdma.mtx); 773 774 return (0); 775 } 776 777 /** 778 * ice_rdma_link_change - Notify RDMA client of a change in link status 779 * @sc: the ice driver softc 780 * @linkstate: the link status 781 * @baudrate: the link rate in bits per second 782 * 783 * Notify the RDMA client of a link status change, by sending it the new link 784 * state and baudrate. 785 * 786 * The link state is represented the same was as in the ifnet structure. It 787 * should be LINK_STATE_UNKNOWN, LINK_STATE_DOWN, or LINK_STATE_UP. 788 */ 789 void 790 ice_rdma_link_change(struct ice_softc *sc, int linkstate, uint64_t baudrate) 791 { 792 struct ice_rdma_peer *peer = &sc->rdma_entry.peer; 793 struct ice_rdma_event event; 794 795 memset(&event, 0, sizeof(struct ice_rdma_event)); 796 event.type = ICE_RDMA_EVENT_LINK_CHANGE; 797 event.linkstate = linkstate; 798 event.baudrate = baudrate; 799 800 sx_xlock(&ice_rdma.mtx); 801 802 if (sc->rdma_entry.attached && ice_rdma.registered) 803 IRDMA_EVENT_HANDLER(peer, &event); 804 805 sx_xunlock(&ice_rdma.mtx); 806 } 807 808 /** 809 * ice_rdma_notify_dcb_qos_change - notify RDMA driver to pause traffic 810 * @sc: the ice driver softc 811 * 812 * Notify the RDMA driver that QOS/DCB settings are about to change. 813 * Once the function return, all the QPs should be suspended. 814 */ 815 void 816 ice_rdma_notify_dcb_qos_change(struct ice_softc *sc) 817 { 818 struct ice_rdma_peer *peer = &sc->rdma_entry.peer; 819 struct ice_rdma_event event; 820 821 memset(&event, 0, sizeof(struct ice_rdma_event)); 822 event.type = ICE_RDMA_EVENT_TC_CHANGE; 823 /* pre-event */ 824 event.prep = true; 825 826 sx_xlock(&ice_rdma.mtx); 827 if (sc->rdma_entry.attached && ice_rdma.registered) 828 IRDMA_EVENT_HANDLER(peer, &event); 829 sx_xunlock(&ice_rdma.mtx); 830 } 831 832 /** 833 * ice_rdma_dcb_qos_update - pass the changed dcb settings to RDMA driver 834 * @sc: the ice driver softc 835 * @pi: the port info structure 836 * 837 * Pass the changed DCB settings to RDMA traffic. This function should be 838 * called only after ice_rdma_notify_dcb_qos_change has been called and 839 * returned before. After the function returns, all the RDMA traffic 840 * should be resumed. 841 */ 842 void 843 ice_rdma_dcb_qos_update(struct ice_softc *sc, struct ice_port_info *pi) 844 { 845 struct ice_rdma_peer *peer = &sc->rdma_entry.peer; 846 struct ice_rdma_event event; 847 848 memset(&event, 0, sizeof(struct ice_rdma_event)); 849 event.type = ICE_RDMA_EVENT_TC_CHANGE; 850 /* post-event */ 851 event.prep = false; 852 853 /* gather current configuration */ 854 ice_rdma_cp_qos_info(&sc->hw, &pi->qos_cfg.local_dcbx_cfg, &event.port_qos); 855 sx_xlock(&ice_rdma.mtx); 856 if (sc->rdma_entry.attached && ice_rdma.registered) 857 IRDMA_EVENT_HANDLER(peer, &event); 858 sx_xunlock(&ice_rdma.mtx); 859 } 860