1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Admin Function driver 3 * 4 * Copyright (C) 2018 Marvell. 5 * 6 */ 7 8 #include <linux/types.h> 9 #include <linux/module.h> 10 #include <linux/pci.h> 11 12 #include "rvu.h" 13 #include "cgx.h" 14 #include "lmac_common.h" 15 #include "rvu_reg.h" 16 #include "rvu_trace.h" 17 #include "rvu_npc_hash.h" 18 19 struct cgx_evq_entry { 20 struct list_head evq_node; 21 struct cgx_link_event link_event; 22 }; 23 24 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 25 static struct _req_type __maybe_unused \ 26 *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \ 27 { \ 28 struct _req_type *req; \ 29 \ 30 req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ 31 &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \ 32 sizeof(struct _rsp_type)); \ 33 if (!req) \ 34 return NULL; \ 35 req->hdr.sig = OTX2_MBOX_REQ_SIG; \ 36 req->hdr.id = _id; \ 37 trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req), 0); \ 38 return req; \ 39 } 40 41 MBOX_UP_CGX_MESSAGES 42 #undef M 43 44 bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature) 45 { 46 u8 cgx_id, lmac_id; 47 void *cgxd; 48 49 if (!is_pf_cgxmapped(rvu, pf)) 50 return 0; 51 52 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 53 cgxd = rvu_cgx_pdata(cgx_id, rvu); 54 55 return (cgx_features_get(cgxd) & feature); 56 } 57 58 #define CGX_OFFSET(x) ((x) * rvu->hw->lmac_per_cgx) 59 /* Returns bitmap of mapped PFs */ 60 static u64 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id) 61 { 62 return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id]; 63 } 64 65 int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id) 66 { 67 unsigned long pfmap; 68 69 pfmap = cgxlmac_to_pfmap(rvu, cgx_id, lmac_id); 70 71 /* Assumes only one pf mapped to a cgx lmac port */ 72 if (!pfmap) 73 return -ENODEV; 74 else 75 return find_first_bit(&pfmap, 76 rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx); 77 } 78 79 static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id) 80 { 81 return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF); 82 } 83 84 void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu) 85 { 86 if (cgx_id >= rvu->cgx_cnt_max) 87 return NULL; 88 89 return rvu->cgx_idmap[cgx_id]; 90 } 91 92 /* Return first enabled CGX instance if none are enabled then return NULL */ 93 void *rvu_first_cgx_pdata(struct rvu *rvu) 94 { 95 int first_enabled_cgx = 0; 96 void *cgxd = NULL; 97 98 for (; first_enabled_cgx < rvu->cgx_cnt_max; first_enabled_cgx++) { 99 cgxd = rvu_cgx_pdata(first_enabled_cgx, rvu); 100 if (cgxd) 101 break; 102 } 103 104 return cgxd; 105 } 106 107 /* Based on P2X connectivity find mapped NIX block for a PF */ 108 static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf, 109 int cgx_id, int lmac_id) 110 { 111 struct rvu_pfvf *pfvf = &rvu->pf[pf]; 112 u8 p2x; 113 114 p2x = cgx_lmac_get_p2x(cgx_id, lmac_id); 115 /* Firmware sets P2X_SELECT as either NIX0 or NIX1 */ 116 pfvf->nix_blkaddr = BLKADDR_NIX0; 117 if (is_rvu_supports_nix1(rvu) && p2x == CMR_P2X_SEL_NIX1) 118 pfvf->nix_blkaddr = BLKADDR_NIX1; 119 } 120 121 static int rvu_map_cgx_lmac_pf(struct rvu *rvu) 122 { 123 struct npc_pkind *pkind = &rvu->hw->pkind; 124 int cgx_cnt_max = rvu->cgx_cnt_max; 125 int pf = PF_CGXMAP_BASE; 126 unsigned long lmac_bmap; 127 int size, free_pkind; 128 int cgx, lmac, iter; 129 int numvfs, hwvfs; 130 131 if (!cgx_cnt_max) 132 return 0; 133 134 if (cgx_cnt_max > 0xF || rvu->hw->lmac_per_cgx > 0xF) 135 return -EINVAL; 136 137 /* Alloc map table 138 * An additional entry is required since PF id starts from 1 and 139 * hence entry at offset 0 is invalid. 140 */ 141 size = (cgx_cnt_max * rvu->hw->lmac_per_cgx + 1) * sizeof(u8); 142 rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL); 143 if (!rvu->pf2cgxlmac_map) 144 return -ENOMEM; 145 146 /* Initialize all entries with an invalid cgx and lmac id */ 147 memset(rvu->pf2cgxlmac_map, 0xFF, size); 148 149 /* Reverse map table */ 150 rvu->cgxlmac2pf_map = 151 devm_kzalloc(rvu->dev, 152 cgx_cnt_max * rvu->hw->lmac_per_cgx * sizeof(u64), 153 GFP_KERNEL); 154 if (!rvu->cgxlmac2pf_map) 155 return -ENOMEM; 156 157 rvu->cgx_mapped_pfs = 0; 158 for (cgx = 0; cgx < cgx_cnt_max; cgx++) { 159 if (!rvu_cgx_pdata(cgx, rvu)) 160 continue; 161 lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu)); 162 for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) { 163 if (iter >= MAX_LMAC_COUNT) 164 continue; 165 lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu), 166 iter); 167 rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac); 168 rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf; 169 free_pkind = rvu_alloc_rsrc(&pkind->rsrc); 170 pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16; 171 rvu_map_cgx_nix_block(rvu, pf, cgx, lmac); 172 rvu->cgx_mapped_pfs++; 173 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs); 174 rvu->cgx_mapped_vfs += numvfs; 175 pf++; 176 } 177 } 178 return 0; 179 } 180 181 static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu) 182 { 183 struct cgx_evq_entry *qentry; 184 unsigned long flags; 185 int err; 186 187 qentry = kmalloc(sizeof(*qentry), GFP_KERNEL); 188 if (!qentry) 189 return -ENOMEM; 190 191 /* Lock the event queue before we read the local link status */ 192 spin_lock_irqsave(&rvu->cgx_evq_lock, flags); 193 err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id, 194 &qentry->link_event.link_uinfo); 195 qentry->link_event.cgx_id = cgx_id; 196 qentry->link_event.lmac_id = lmac_id; 197 if (err) { 198 kfree(qentry); 199 goto skip_add; 200 } 201 list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head); 202 skip_add: 203 spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags); 204 205 /* start worker to process the events */ 206 queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work); 207 208 return 0; 209 } 210 211 /* This is called from interrupt context and is expected to be atomic */ 212 static int cgx_lmac_postevent(struct cgx_link_event *event, void *data) 213 { 214 struct cgx_evq_entry *qentry; 215 struct rvu *rvu = data; 216 217 /* post event to the event queue */ 218 qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC); 219 if (!qentry) 220 return -ENOMEM; 221 qentry->link_event = *event; 222 spin_lock(&rvu->cgx_evq_lock); 223 list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head); 224 spin_unlock(&rvu->cgx_evq_lock); 225 226 /* start worker to process the events */ 227 queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work); 228 229 return 0; 230 } 231 232 static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu) 233 { 234 struct cgx_link_user_info *linfo; 235 struct cgx_link_info_msg *msg; 236 unsigned long pfmap; 237 int pfid; 238 239 linfo = &event->link_uinfo; 240 pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id); 241 if (!pfmap) { 242 dev_err(rvu->dev, "CGX port%d:%d not mapped with PF\n", 243 event->cgx_id, event->lmac_id); 244 return; 245 } 246 247 do { 248 pfid = find_first_bit(&pfmap, 249 rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx); 250 clear_bit(pfid, &pfmap); 251 252 /* check if notification is enabled */ 253 if (!test_bit(pfid, &rvu->pf_notify_bmap)) { 254 dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n", 255 event->cgx_id, event->lmac_id, 256 linfo->link_up ? "UP" : "DOWN"); 257 continue; 258 } 259 260 mutex_lock(&rvu->mbox_lock); 261 262 /* Send mbox message to PF */ 263 msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid); 264 if (!msg) { 265 mutex_unlock(&rvu->mbox_lock); 266 continue; 267 } 268 269 msg->link_info = *linfo; 270 271 otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pfid); 272 273 otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pfid); 274 275 otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid); 276 277 mutex_unlock(&rvu->mbox_lock); 278 } while (pfmap); 279 } 280 281 static void cgx_evhandler_task(struct work_struct *work) 282 { 283 struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work); 284 struct cgx_evq_entry *qentry; 285 struct cgx_link_event *event; 286 unsigned long flags; 287 288 do { 289 /* Dequeue an event */ 290 spin_lock_irqsave(&rvu->cgx_evq_lock, flags); 291 qentry = list_first_entry_or_null(&rvu->cgx_evq_head, 292 struct cgx_evq_entry, 293 evq_node); 294 if (qentry) 295 list_del(&qentry->evq_node); 296 spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags); 297 if (!qentry) 298 break; /* nothing more to process */ 299 300 event = &qentry->link_event; 301 302 /* process event */ 303 cgx_notify_pfs(event, rvu); 304 kfree(qentry); 305 } while (1); 306 } 307 308 static int cgx_lmac_event_handler_init(struct rvu *rvu) 309 { 310 unsigned long lmac_bmap; 311 struct cgx_event_cb cb; 312 int cgx, lmac, err; 313 void *cgxd; 314 315 spin_lock_init(&rvu->cgx_evq_lock); 316 INIT_LIST_HEAD(&rvu->cgx_evq_head); 317 INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task); 318 rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0); 319 if (!rvu->cgx_evh_wq) { 320 dev_err(rvu->dev, "alloc workqueue failed"); 321 return -ENOMEM; 322 } 323 324 cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */ 325 cb.data = rvu; 326 327 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { 328 cgxd = rvu_cgx_pdata(cgx, rvu); 329 if (!cgxd) 330 continue; 331 lmac_bmap = cgx_get_lmac_bmap(cgxd); 332 for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) { 333 err = cgx_lmac_evh_register(&cb, cgxd, lmac); 334 if (err) 335 dev_err(rvu->dev, 336 "%d:%d handler register failed\n", 337 cgx, lmac); 338 } 339 } 340 341 return 0; 342 } 343 344 static void rvu_cgx_wq_destroy(struct rvu *rvu) 345 { 346 if (rvu->cgx_evh_wq) { 347 destroy_workqueue(rvu->cgx_evh_wq); 348 rvu->cgx_evh_wq = NULL; 349 } 350 } 351 352 int rvu_cgx_init(struct rvu *rvu) 353 { 354 struct mac_ops *mac_ops; 355 int cgx, err; 356 void *cgxd; 357 358 /* CGX port id starts from 0 and are not necessarily contiguous 359 * Hence we allocate resources based on the maximum port id value. 360 */ 361 rvu->cgx_cnt_max = cgx_get_cgxcnt_max(); 362 if (!rvu->cgx_cnt_max) { 363 dev_info(rvu->dev, "No CGX devices found!\n"); 364 return 0; 365 } 366 367 rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max * 368 sizeof(void *), GFP_KERNEL); 369 if (!rvu->cgx_idmap) 370 return -ENOMEM; 371 372 /* Initialize the cgxdata table */ 373 for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) 374 rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx); 375 376 /* Map CGX LMAC interfaces to RVU PFs */ 377 err = rvu_map_cgx_lmac_pf(rvu); 378 if (err) 379 return err; 380 381 /* Clear X2P reset on all MAC blocks */ 382 for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) { 383 cgxd = rvu_cgx_pdata(cgx, rvu); 384 if (!cgxd) 385 continue; 386 mac_ops = get_mac_ops(cgxd); 387 mac_ops->mac_x2p_reset(cgxd, false); 388 } 389 390 /* Register for CGX events */ 391 err = cgx_lmac_event_handler_init(rvu); 392 if (err) 393 return err; 394 395 mutex_init(&rvu->cgx_cfg_lock); 396 397 return 0; 398 } 399 400 void cgx_start_linkup(struct rvu *rvu) 401 { 402 unsigned long lmac_bmap; 403 struct mac_ops *mac_ops; 404 int cgx, lmac, err; 405 void *cgxd; 406 407 /* Enable receive on all LMACS */ 408 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { 409 cgxd = rvu_cgx_pdata(cgx, rvu); 410 if (!cgxd) 411 continue; 412 mac_ops = get_mac_ops(cgxd); 413 lmac_bmap = cgx_get_lmac_bmap(cgxd); 414 for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) 415 mac_ops->mac_enadis_rx(cgxd, lmac, true); 416 } 417 418 /* Do link up for all CGX ports */ 419 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { 420 cgxd = rvu_cgx_pdata(cgx, rvu); 421 if (!cgxd) 422 continue; 423 err = cgx_lmac_linkup_start(cgxd); 424 if (err) 425 dev_err(rvu->dev, 426 "Link up process failed to start on cgx %d\n", 427 cgx); 428 } 429 } 430 431 int rvu_cgx_exit(struct rvu *rvu) 432 { 433 unsigned long lmac_bmap; 434 int cgx, lmac; 435 void *cgxd; 436 437 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { 438 cgxd = rvu_cgx_pdata(cgx, rvu); 439 if (!cgxd) 440 continue; 441 lmac_bmap = cgx_get_lmac_bmap(cgxd); 442 for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) 443 cgx_lmac_evh_unregister(cgxd, lmac); 444 } 445 446 /* Ensure event handler unregister is completed */ 447 mb(); 448 449 rvu_cgx_wq_destroy(rvu); 450 return 0; 451 } 452 453 /* Most of the CGX configuration is restricted to the mapped PF only, 454 * VF's of mapped PF and other PFs are not allowed. This fn() checks 455 * whether a PFFUNC is permitted to do the config or not. 456 */ 457 inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc) 458 { 459 if ((pcifunc & RVU_PFVF_FUNC_MASK) || 460 !is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) 461 return false; 462 return true; 463 } 464 465 void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable) 466 { 467 struct mac_ops *mac_ops; 468 u8 cgx_id, lmac_id; 469 void *cgxd; 470 471 if (!is_pf_cgxmapped(rvu, pf)) 472 return; 473 474 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 475 cgxd = rvu_cgx_pdata(cgx_id, rvu); 476 477 mac_ops = get_mac_ops(cgxd); 478 /* Set / clear CTL_BCK to control pause frame forwarding to NIX */ 479 if (enable) 480 mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, true); 481 else 482 mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, false); 483 } 484 485 int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) 486 { 487 int pf = rvu_get_pf(rvu->pdev, pcifunc); 488 struct mac_ops *mac_ops; 489 u8 cgx_id, lmac_id; 490 void *cgxd; 491 492 if (!is_cgx_config_permitted(rvu, pcifunc)) 493 return LMAC_AF_ERR_PERM_DENIED; 494 495 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 496 cgxd = rvu_cgx_pdata(cgx_id, rvu); 497 mac_ops = get_mac_ops(cgxd); 498 499 return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start); 500 } 501 502 int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable) 503 { 504 int pf = rvu_get_pf(rvu->pdev, pcifunc); 505 struct mac_ops *mac_ops; 506 u8 cgx_id, lmac_id; 507 void *cgxd; 508 509 if (!is_cgx_config_permitted(rvu, pcifunc)) 510 return LMAC_AF_ERR_PERM_DENIED; 511 512 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 513 cgxd = rvu_cgx_pdata(cgx_id, rvu); 514 mac_ops = get_mac_ops(cgxd); 515 516 return mac_ops->mac_tx_enable(cgxd, lmac_id, enable); 517 } 518 519 int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable) 520 { 521 struct mac_ops *mac_ops; 522 523 mac_ops = get_mac_ops(cgxd); 524 return mac_ops->mac_tx_enable(cgxd, lmac_id, enable); 525 } 526 527 void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc) 528 { 529 int pf = rvu_get_pf(rvu->pdev, pcifunc); 530 int i = 0, lmac_count = 0; 531 struct mac_ops *mac_ops; 532 u8 max_dmac_filters; 533 u8 cgx_id, lmac_id; 534 void *cgx_dev; 535 536 if (!is_cgx_config_permitted(rvu, pcifunc)) 537 return; 538 539 if (rvu_npc_exact_has_match_table(rvu)) { 540 rvu_npc_exact_reset(rvu, pcifunc); 541 return; 542 } 543 544 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 545 cgx_dev = cgx_get_pdata(cgx_id); 546 lmac_count = cgx_get_lmac_cnt(cgx_dev); 547 548 mac_ops = get_mac_ops(cgx_dev); 549 if (!mac_ops) 550 return; 551 552 max_dmac_filters = mac_ops->dmac_filter_count / lmac_count; 553 554 for (i = 0; i < max_dmac_filters; i++) 555 cgx_lmac_addr_del(cgx_id, lmac_id, i); 556 557 /* As cgx_lmac_addr_del does not clear entry for index 0 558 * so it needs to be done explicitly 559 */ 560 cgx_lmac_addr_reset(cgx_id, lmac_id); 561 } 562 563 int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req, 564 struct msg_rsp *rsp) 565 { 566 rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true); 567 return 0; 568 } 569 570 int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req, 571 struct msg_rsp *rsp) 572 { 573 rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false); 574 return 0; 575 } 576 577 static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req, 578 void *rsp) 579 { 580 int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 581 struct mac_ops *mac_ops; 582 int stat = 0, err = 0; 583 u64 tx_stat, rx_stat; 584 u8 cgx_idx, lmac; 585 void *cgxd; 586 587 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 588 return LMAC_AF_ERR_PERM_DENIED; 589 590 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); 591 cgxd = rvu_cgx_pdata(cgx_idx, rvu); 592 mac_ops = get_mac_ops(cgxd); 593 594 /* Rx stats */ 595 while (stat < mac_ops->rx_stats_cnt) { 596 err = mac_ops->mac_get_rx_stats(cgxd, lmac, stat, &rx_stat); 597 if (err) 598 return err; 599 if (mac_ops->rx_stats_cnt == RPM_RX_STATS_COUNT) 600 ((struct rpm_stats_rsp *)rsp)->rx_stats[stat] = rx_stat; 601 else 602 ((struct cgx_stats_rsp *)rsp)->rx_stats[stat] = rx_stat; 603 stat++; 604 } 605 606 /* Tx stats */ 607 stat = 0; 608 while (stat < mac_ops->tx_stats_cnt) { 609 err = mac_ops->mac_get_tx_stats(cgxd, lmac, stat, &tx_stat); 610 if (err) 611 return err; 612 if (mac_ops->tx_stats_cnt == RPM_TX_STATS_COUNT) 613 ((struct rpm_stats_rsp *)rsp)->tx_stats[stat] = tx_stat; 614 else 615 ((struct cgx_stats_rsp *)rsp)->tx_stats[stat] = tx_stat; 616 stat++; 617 } 618 return 0; 619 } 620 621 int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req, 622 struct cgx_stats_rsp *rsp) 623 { 624 return rvu_lmac_get_stats(rvu, req, (void *)rsp); 625 } 626 627 int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req, 628 struct rpm_stats_rsp *rsp) 629 { 630 return rvu_lmac_get_stats(rvu, req, (void *)rsp); 631 } 632 633 int rvu_mbox_handler_cgx_stats_rst(struct rvu *rvu, struct msg_req *req, 634 struct msg_rsp *rsp) 635 { 636 int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 637 struct rvu_pfvf *parent_pf; 638 struct mac_ops *mac_ops; 639 u8 cgx_idx, lmac; 640 void *cgxd; 641 642 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 643 return LMAC_AF_ERR_PERM_DENIED; 644 645 parent_pf = &rvu->pf[pf]; 646 /* To ensure reset cgx stats won't affect VF stats, 647 * check if it used by only PF interface. 648 * If not, return 649 */ 650 if (parent_pf->cgx_users > 1) { 651 dev_info(rvu->dev, "CGX busy, could not reset statistics\n"); 652 return 0; 653 } 654 655 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); 656 cgxd = rvu_cgx_pdata(cgx_idx, rvu); 657 mac_ops = get_mac_ops(cgxd); 658 659 return mac_ops->mac_stats_reset(cgxd, lmac); 660 } 661 662 int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu, 663 struct msg_req *req, 664 struct cgx_fec_stats_rsp *rsp) 665 { 666 int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 667 struct mac_ops *mac_ops; 668 u8 cgx_idx, lmac; 669 void *cgxd; 670 671 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 672 return LMAC_AF_ERR_PERM_DENIED; 673 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); 674 675 cgxd = rvu_cgx_pdata(cgx_idx, rvu); 676 mac_ops = get_mac_ops(cgxd); 677 return mac_ops->get_fec_stats(cgxd, lmac, rsp); 678 } 679 680 int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu, 681 struct cgx_mac_addr_set_or_get *req, 682 struct cgx_mac_addr_set_or_get *rsp) 683 { 684 int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 685 struct rvu_pfvf *pfvf; 686 u8 cgx_id, lmac_id; 687 688 if (!is_pf_cgxmapped(rvu, pf)) 689 return LMAC_AF_ERR_PF_NOT_MAPPED; 690 691 if (rvu_npc_exact_has_match_table(rvu)) 692 return rvu_npc_exact_mac_addr_set(rvu, req, rsp); 693 694 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 695 696 pfvf = &rvu->pf[pf]; 697 ether_addr_copy(pfvf->mac_addr, req->mac_addr); 698 cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr); 699 700 return 0; 701 } 702 703 int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu, 704 struct cgx_mac_addr_add_req *req, 705 struct cgx_mac_addr_add_rsp *rsp) 706 { 707 int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 708 u8 cgx_id, lmac_id; 709 int rc = 0; 710 711 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 712 return -EPERM; 713 714 if (rvu_npc_exact_has_match_table(rvu)) 715 return rvu_npc_exact_mac_addr_add(rvu, req, rsp); 716 717 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 718 rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr); 719 if (rc >= 0) { 720 rsp->index = rc; 721 return 0; 722 } 723 724 return rc; 725 } 726 727 int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu, 728 struct cgx_mac_addr_del_req *req, 729 struct msg_rsp *rsp) 730 { 731 int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 732 u8 cgx_id, lmac_id; 733 734 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 735 return -EPERM; 736 737 if (rvu_npc_exact_has_match_table(rvu)) 738 return rvu_npc_exact_mac_addr_del(rvu, req, rsp); 739 740 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 741 return cgx_lmac_addr_del(cgx_id, lmac_id, req->index); 742 } 743 744 int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu, 745 struct msg_req *req, 746 struct cgx_max_dmac_entries_get_rsp 747 *rsp) 748 { 749 int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 750 u8 cgx_id, lmac_id; 751 752 /* If msg is received from PFs(which are not mapped to CGX LMACs) 753 * or VF then no entries are allocated for DMAC filters at CGX level. 754 * So returning zero. 755 */ 756 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) { 757 rsp->max_dmac_filters = 0; 758 return 0; 759 } 760 761 if (rvu_npc_exact_has_match_table(rvu)) { 762 rsp->max_dmac_filters = rvu_npc_exact_get_max_entries(rvu); 763 return 0; 764 } 765 766 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 767 rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id); 768 return 0; 769 } 770 771 int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu, 772 struct cgx_mac_addr_set_or_get *req, 773 struct cgx_mac_addr_set_or_get *rsp) 774 { 775 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 776 777 if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, req->hdr.pcifunc))) 778 return LMAC_AF_ERR_PF_NOT_MAPPED; 779 780 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); 781 return 0; 782 } 783 784 int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req, 785 struct msg_rsp *rsp) 786 { 787 u16 pcifunc = req->hdr.pcifunc; 788 int pf = rvu_get_pf(rvu->pdev, pcifunc); 789 u8 cgx_id, lmac_id; 790 791 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 792 return -EPERM; 793 794 /* Disable drop on non hit rule */ 795 if (rvu_npc_exact_has_match_table(rvu)) 796 return rvu_npc_exact_promisc_enable(rvu, req->hdr.pcifunc); 797 798 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 799 800 cgx_lmac_promisc_config(cgx_id, lmac_id, true); 801 return 0; 802 } 803 804 int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req, 805 struct msg_rsp *rsp) 806 { 807 int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 808 u8 cgx_id, lmac_id; 809 810 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 811 return -EPERM; 812 813 /* Disable drop on non hit rule */ 814 if (rvu_npc_exact_has_match_table(rvu)) 815 return rvu_npc_exact_promisc_disable(rvu, req->hdr.pcifunc); 816 817 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 818 819 cgx_lmac_promisc_config(cgx_id, lmac_id, false); 820 return 0; 821 } 822 823 static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) 824 { 825 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 826 int pf = rvu_get_pf(rvu->pdev, pcifunc); 827 struct mac_ops *mac_ops; 828 u8 cgx_id, lmac_id; 829 void *cgxd; 830 831 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP)) 832 return 0; 833 834 /* This msg is expected only from PF/VFs that are mapped to CGX/RPM LMACs, 835 * if received from other PF/VF simply ACK, nothing to do. 836 */ 837 if (!is_pf_cgxmapped(rvu, pf)) 838 return -EPERM; 839 840 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 841 cgxd = rvu_cgx_pdata(cgx_id, rvu); 842 843 mac_ops = get_mac_ops(cgxd); 844 mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, enable); 845 /* If PTP is enabled then inform NPC that packets to be 846 * parsed by this PF will have their data shifted by 8 bytes 847 * and if PTP is disabled then no shift is required 848 */ 849 if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable)) 850 return -EINVAL; 851 /* This flag is required to clean up CGX conf if app gets killed */ 852 pfvf->hw_rx_tstamp_en = enable; 853 854 /* Inform MCS about 8B RX header */ 855 rvu_mcs_ptp_cfg(rvu, cgx_id, lmac_id, enable); 856 return 0; 857 } 858 859 int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req, 860 struct msg_rsp *rsp) 861 { 862 if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, req->hdr.pcifunc))) 863 return -EPERM; 864 865 return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true); 866 } 867 868 int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req, 869 struct msg_rsp *rsp) 870 { 871 return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, false); 872 } 873 874 static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en) 875 { 876 int pf = rvu_get_pf(rvu->pdev, pcifunc); 877 u8 cgx_id, lmac_id; 878 879 if (!is_cgx_config_permitted(rvu, pcifunc)) 880 return -EPERM; 881 882 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 883 884 if (en) { 885 set_bit(pf, &rvu->pf_notify_bmap); 886 /* Send the current link status to PF */ 887 rvu_cgx_send_link_info(cgx_id, lmac_id, rvu); 888 } else { 889 clear_bit(pf, &rvu->pf_notify_bmap); 890 } 891 892 return 0; 893 } 894 895 int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req, 896 struct msg_rsp *rsp) 897 { 898 rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true); 899 return 0; 900 } 901 902 int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req, 903 struct msg_rsp *rsp) 904 { 905 rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false); 906 return 0; 907 } 908 909 int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req, 910 struct cgx_link_info_msg *rsp) 911 { 912 u8 cgx_id, lmac_id; 913 int pf, err; 914 915 pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 916 917 if (!is_pf_cgxmapped(rvu, pf)) 918 return -ENODEV; 919 920 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 921 922 err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id, 923 &rsp->link_info); 924 return err; 925 } 926 927 int rvu_mbox_handler_cgx_features_get(struct rvu *rvu, 928 struct msg_req *req, 929 struct cgx_features_info_msg *rsp) 930 { 931 int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 932 u8 cgx_idx, lmac; 933 void *cgxd; 934 935 if (!is_pf_cgxmapped(rvu, pf)) 936 return 0; 937 938 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); 939 cgxd = rvu_cgx_pdata(cgx_idx, rvu); 940 rsp->lmac_features = cgx_features_get(cgxd); 941 942 return 0; 943 } 944 945 u32 rvu_cgx_get_fifolen(struct rvu *rvu) 946 { 947 void *cgxd = rvu_first_cgx_pdata(rvu); 948 949 if (!cgxd) 950 return 0; 951 952 return cgx_get_fifo_len(cgxd); 953 } 954 955 u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac) 956 { 957 struct mac_ops *mac_ops; 958 void *cgxd; 959 960 cgxd = rvu_cgx_pdata(cgx, rvu); 961 if (!cgxd) 962 return 0; 963 964 mac_ops = get_mac_ops(cgxd); 965 if (!mac_ops->lmac_fifo_len) 966 return 0; 967 968 return mac_ops->lmac_fifo_len(cgxd, lmac); 969 } 970 971 static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en) 972 { 973 int pf = rvu_get_pf(rvu->pdev, pcifunc); 974 struct mac_ops *mac_ops; 975 u8 cgx_id, lmac_id; 976 977 if (!is_cgx_config_permitted(rvu, pcifunc)) 978 return -EPERM; 979 980 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 981 mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu)); 982 983 return mac_ops->mac_lmac_intl_lbk(rvu_cgx_pdata(cgx_id, rvu), 984 lmac_id, en); 985 } 986 987 int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req, 988 struct msg_rsp *rsp) 989 { 990 rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true); 991 return 0; 992 } 993 994 int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req, 995 struct msg_rsp *rsp) 996 { 997 rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false); 998 return 0; 999 } 1000 1001 int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause) 1002 { 1003 int pf = rvu_get_pf(rvu->pdev, pcifunc); 1004 u8 rx_pfc = 0, tx_pfc = 0; 1005 struct mac_ops *mac_ops; 1006 u8 cgx_id, lmac_id; 1007 void *cgxd; 1008 1009 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC)) 1010 return 0; 1011 1012 /* This msg is expected only from PF/VFs that are mapped to CGX LMACs, 1013 * if received from other PF/VF simply ACK, nothing to do. 1014 */ 1015 if (!is_pf_cgxmapped(rvu, pf)) 1016 return LMAC_AF_ERR_PF_NOT_MAPPED; 1017 1018 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1019 cgxd = rvu_cgx_pdata(cgx_id, rvu); 1020 mac_ops = get_mac_ops(cgxd); 1021 1022 mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &tx_pfc, &rx_pfc); 1023 if (tx_pfc || rx_pfc) { 1024 dev_warn(rvu->dev, 1025 "Can not configure 802.3X flow control as PFC frames are enabled"); 1026 return LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED; 1027 } 1028 1029 mutex_lock(&rvu->rsrc_lock); 1030 if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause, 1031 pcifunc & RVU_PFVF_FUNC_MASK)) { 1032 mutex_unlock(&rvu->rsrc_lock); 1033 return LMAC_AF_ERR_PERM_DENIED; 1034 } 1035 mutex_unlock(&rvu->rsrc_lock); 1036 1037 return mac_ops->mac_enadis_pause_frm(cgxd, lmac_id, tx_pause, rx_pause); 1038 } 1039 1040 int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu, 1041 struct cgx_pause_frm_cfg *req, 1042 struct cgx_pause_frm_cfg *rsp) 1043 { 1044 int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 1045 struct mac_ops *mac_ops; 1046 u8 cgx_id, lmac_id; 1047 int err = 0; 1048 void *cgxd; 1049 1050 /* This msg is expected only from PF/VFs that are mapped to CGX LMACs, 1051 * if received from other PF/VF simply ACK, nothing to do. 1052 */ 1053 if (!is_pf_cgxmapped(rvu, pf)) 1054 return -ENODEV; 1055 1056 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1057 cgxd = rvu_cgx_pdata(cgx_id, rvu); 1058 mac_ops = get_mac_ops(cgxd); 1059 1060 if (req->set) 1061 err = rvu_cgx_cfg_pause_frm(rvu, req->hdr.pcifunc, req->tx_pause, req->rx_pause); 1062 else 1063 mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause); 1064 1065 return err; 1066 } 1067 1068 int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req, 1069 struct msg_rsp *rsp) 1070 { 1071 int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 1072 u8 cgx_id, lmac_id; 1073 1074 if (!is_pf_cgxmapped(rvu, pf)) 1075 return LMAC_AF_ERR_PF_NOT_MAPPED; 1076 1077 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1078 return cgx_get_phy_fec_stats(rvu_cgx_pdata(cgx_id, rvu), lmac_id); 1079 } 1080 1081 /* Finds cumulative status of NIX rx/tx counters from LF of a PF and those 1082 * from its VFs as well. ie. NIX rx/tx counters at the CGX port level 1083 */ 1084 int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, 1085 int index, int rxtxflag, u64 *stat) 1086 { 1087 struct rvu_block *block; 1088 int blkaddr; 1089 u16 pcifunc; 1090 int pf, lf; 1091 1092 *stat = 0; 1093 1094 if (!cgxd || !rvu) 1095 return -EINVAL; 1096 1097 pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id); 1098 if (pf < 0) 1099 return pf; 1100 1101 /* Assumes LF of a PF and all of its VF belongs to the same 1102 * NIX block 1103 */ 1104 pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); 1105 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1106 if (blkaddr < 0) 1107 return 0; 1108 block = &rvu->hw->block[blkaddr]; 1109 1110 for (lf = 0; lf < block->lf.max; lf++) { 1111 /* Check if a lf is attached to this PF or one of its VFs */ 1112 if (!((block->fn_map[lf] & ~RVU_PFVF_FUNC_MASK) == (pcifunc & 1113 ~RVU_PFVF_FUNC_MASK))) 1114 continue; 1115 if (rxtxflag == NIX_STATS_RX) 1116 *stat += rvu_read64(rvu, blkaddr, 1117 NIX_AF_LFX_RX_STATX(lf, index)); 1118 else 1119 *stat += rvu_read64(rvu, blkaddr, 1120 NIX_AF_LFX_TX_STATX(lf, index)); 1121 } 1122 1123 return 0; 1124 } 1125 1126 int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start) 1127 { 1128 struct rvu_pfvf *parent_pf, *pfvf; 1129 int cgx_users, err = 0; 1130 1131 if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) 1132 return 0; 1133 1134 parent_pf = &rvu->pf[rvu_get_pf(rvu->pdev, pcifunc)]; 1135 pfvf = rvu_get_pfvf(rvu, pcifunc); 1136 1137 mutex_lock(&rvu->cgx_cfg_lock); 1138 1139 if (start && pfvf->cgx_in_use) 1140 goto exit; /* CGX is already started hence nothing to do */ 1141 if (!start && !pfvf->cgx_in_use) 1142 goto exit; /* CGX is already stopped hence nothing to do */ 1143 1144 if (start) { 1145 cgx_users = parent_pf->cgx_users; 1146 parent_pf->cgx_users++; 1147 } else { 1148 parent_pf->cgx_users--; 1149 cgx_users = parent_pf->cgx_users; 1150 } 1151 1152 /* Start CGX when first of all NIXLFs is started. 1153 * Stop CGX when last of all NIXLFs is stopped. 1154 */ 1155 if (!cgx_users) { 1156 err = rvu_cgx_config_rxtx(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK, 1157 start); 1158 if (err) { 1159 dev_err(rvu->dev, "Unable to %s CGX\n", 1160 start ? "start" : "stop"); 1161 /* Revert the usage count in case of error */ 1162 parent_pf->cgx_users = start ? parent_pf->cgx_users - 1 1163 : parent_pf->cgx_users + 1; 1164 goto exit; 1165 } 1166 } 1167 pfvf->cgx_in_use = start; 1168 exit: 1169 mutex_unlock(&rvu->cgx_cfg_lock); 1170 return err; 1171 } 1172 1173 int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu, 1174 struct fec_mode *req, 1175 struct fec_mode *rsp) 1176 { 1177 int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 1178 u8 cgx_id, lmac_id; 1179 1180 if (!is_pf_cgxmapped(rvu, pf)) 1181 return -EPERM; 1182 1183 if (req->fec == OTX2_FEC_OFF) 1184 req->fec = OTX2_FEC_NONE; 1185 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1186 rsp->fec = cgx_set_fec(req->fec, cgx_id, lmac_id); 1187 return 0; 1188 } 1189 1190 int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req, 1191 struct cgx_fw_data *rsp) 1192 { 1193 int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 1194 u8 cgx_id, lmac_id; 1195 1196 if (!rvu->fwdata) 1197 return LMAC_AF_ERR_FIRMWARE_DATA_NOT_MAPPED; 1198 1199 if (!is_pf_cgxmapped(rvu, pf)) 1200 return -EPERM; 1201 1202 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1203 1204 if (rvu->hw->lmac_per_cgx == CGX_LMACS_USX) 1205 memcpy(&rsp->fwdata, 1206 &rvu->fwdata->cgx_fw_data_usx[cgx_id][lmac_id], 1207 sizeof(struct cgx_lmac_fwdata_s)); 1208 else 1209 memcpy(&rsp->fwdata, 1210 &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id], 1211 sizeof(struct cgx_lmac_fwdata_s)); 1212 1213 return 0; 1214 } 1215 1216 int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu, 1217 struct cgx_set_link_mode_req *req, 1218 struct cgx_set_link_mode_rsp *rsp) 1219 { 1220 int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 1221 struct cgx_lmac_fwdata_s *linkmodes; 1222 u8 cgx_idx, lmac; 1223 void *cgxd; 1224 1225 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 1226 return -EPERM; 1227 1228 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); 1229 cgxd = rvu_cgx_pdata(cgx_idx, rvu); 1230 if (rvu->hw->lmac_per_cgx == CGX_LMACS_USX) 1231 linkmodes = &rvu->fwdata->cgx_fw_data_usx[cgx_idx][lmac]; 1232 else 1233 linkmodes = &rvu->fwdata->cgx_fw_data[cgx_idx][lmac]; 1234 1235 rsp->status = cgx_set_link_mode(cgxd, req->args, linkmodes, 1236 cgx_idx, lmac); 1237 return 0; 1238 } 1239 1240 int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req, 1241 struct msg_rsp *rsp) 1242 { 1243 int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 1244 u8 cgx_id, lmac_id; 1245 1246 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 1247 return LMAC_AF_ERR_PERM_DENIED; 1248 1249 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1250 1251 if (rvu_npc_exact_has_match_table(rvu)) 1252 return rvu_npc_exact_mac_addr_reset(rvu, req, rsp); 1253 1254 return cgx_lmac_addr_reset(cgx_id, lmac_id); 1255 } 1256 1257 int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu, 1258 struct cgx_mac_addr_update_req *req, 1259 struct cgx_mac_addr_update_rsp *rsp) 1260 { 1261 int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 1262 u8 cgx_id, lmac_id; 1263 1264 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 1265 return LMAC_AF_ERR_PERM_DENIED; 1266 1267 if (rvu_npc_exact_has_match_table(rvu)) 1268 return rvu_npc_exact_mac_addr_update(rvu, req, rsp); 1269 1270 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1271 return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index); 1272 } 1273 1274 int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, 1275 u8 rx_pause, u16 pfc_en) 1276 { 1277 int pf = rvu_get_pf(rvu->pdev, pcifunc); 1278 u8 rx_8023 = 0, tx_8023 = 0; 1279 struct mac_ops *mac_ops; 1280 u8 cgx_id, lmac_id; 1281 void *cgxd; 1282 1283 /* This msg is expected only from PF/VFs that are mapped to CGX LMACs, 1284 * if received from other PF/VF simply ACK, nothing to do. 1285 */ 1286 if (!is_pf_cgxmapped(rvu, pf)) 1287 return -ENODEV; 1288 1289 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1290 cgxd = rvu_cgx_pdata(cgx_id, rvu); 1291 mac_ops = get_mac_ops(cgxd); 1292 1293 mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &tx_8023, &rx_8023); 1294 if (tx_8023 || rx_8023) { 1295 dev_warn(rvu->dev, 1296 "Can not configure PFC as 802.3X pause frames are enabled"); 1297 return LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED; 1298 } 1299 1300 mutex_lock(&rvu->rsrc_lock); 1301 if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause, 1302 pcifunc & RVU_PFVF_FUNC_MASK)) { 1303 mutex_unlock(&rvu->rsrc_lock); 1304 return LMAC_AF_ERR_PERM_DENIED; 1305 } 1306 mutex_unlock(&rvu->rsrc_lock); 1307 1308 return mac_ops->pfc_config(cgxd, lmac_id, tx_pause, rx_pause, pfc_en); 1309 } 1310 1311 int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, 1312 struct cgx_pfc_cfg *req, 1313 struct cgx_pfc_rsp *rsp) 1314 { 1315 int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 1316 struct mac_ops *mac_ops; 1317 u8 cgx_id, lmac_id; 1318 void *cgxd; 1319 int err; 1320 1321 /* This msg is expected only from PF/VFs that are mapped to CGX LMACs, 1322 * if received from other PF/VF simply ACK, nothing to do. 1323 */ 1324 if (!is_pf_cgxmapped(rvu, pf)) 1325 return -ENODEV; 1326 1327 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1328 cgxd = rvu_cgx_pdata(cgx_id, rvu); 1329 mac_ops = get_mac_ops(cgxd); 1330 1331 err = rvu_cgx_prio_flow_ctrl_cfg(rvu, req->hdr.pcifunc, req->tx_pause, 1332 req->rx_pause, req->pfc_en); 1333 1334 mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause); 1335 return err; 1336 } 1337 1338 void rvu_mac_reset(struct rvu *rvu, u16 pcifunc) 1339 { 1340 int pf = rvu_get_pf(rvu->pdev, pcifunc); 1341 struct mac_ops *mac_ops; 1342 struct cgx *cgxd; 1343 u8 cgx, lmac; 1344 1345 if (!is_pf_cgxmapped(rvu, pf)) 1346 return; 1347 1348 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac); 1349 cgxd = rvu_cgx_pdata(cgx, rvu); 1350 mac_ops = get_mac_ops(cgxd); 1351 1352 if (mac_ops->mac_reset(cgxd, lmac, !is_vf(pcifunc))) 1353 dev_err(rvu->dev, "Failed to reset MAC\n"); 1354 } 1355