1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Admin Function driver 3 * 4 * Copyright (C) 2024 Marvell. 5 * 6 */ 7 8 #include <linux/bitfield.h> 9 #include <linux/types.h> 10 #include <linux/device.h> 11 #include <linux/module.h> 12 #include <linux/pci.h> 13 14 #include "rvu.h" 15 #include "rvu_reg.h" 16 17 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 18 static struct _req_type __maybe_unused \ 19 *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \ 20 { \ 21 struct _req_type *req; \ 22 \ 23 req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ 24 &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \ 25 sizeof(struct _rsp_type)); \ 26 if (!req) \ 27 return NULL; \ 28 req->hdr.sig = OTX2_MBOX_REQ_SIG; \ 29 req->hdr.id = _id; \ 30 return req; \ 31 } 32 33 MBOX_UP_REP_MESSAGES 34 #undef M 35 36 static int rvu_rep_up_notify(struct rvu *rvu, struct rep_event *event) 37 { 38 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, event->pcifunc); 39 struct rep_event *msg; 40 int pf; 41 42 pf = rvu_get_pf(event->pcifunc); 43 44 if (event->event & RVU_EVENT_MAC_ADDR_CHANGE) 45 ether_addr_copy(pfvf->mac_addr, event->evt_data.mac); 46 47 mutex_lock(&rvu->mbox_lock); 48 msg = otx2_mbox_alloc_msg_rep_event_up_notify(rvu, pf); 49 if (!msg) { 50 mutex_unlock(&rvu->mbox_lock); 51 return -ENOMEM; 52 } 53 54 msg->hdr.pcifunc = event->pcifunc; 55 msg->event = event->event; 56 57 memcpy(&msg->evt_data, &event->evt_data, sizeof(struct rep_evt_data)); 58 59 otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf); 60 61 otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf); 62 63 otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf); 64 65 mutex_unlock(&rvu->mbox_lock); 66 return 0; 67 } 68 69 static void rvu_rep_wq_handler(struct work_struct *work) 70 { 71 struct rvu *rvu = container_of(work, struct rvu, rep_evt_work); 72 struct rep_evtq_ent *qentry; 73 struct rep_event *event; 74 unsigned long flags; 75 76 do { 77 spin_lock_irqsave(&rvu->rep_evtq_lock, flags); 78 qentry = list_first_entry_or_null(&rvu->rep_evtq_head, 79 struct rep_evtq_ent, 80 node); 81 if (qentry) 82 list_del(&qentry->node); 83 84 spin_unlock_irqrestore(&rvu->rep_evtq_lock, flags); 85 if (!qentry) 86 break; /* nothing more to process */ 87 88 event = &qentry->event; 89 90 rvu_rep_up_notify(rvu, event); 91 kfree(qentry); 92 } while (1); 93 } 94 95 int rvu_mbox_handler_rep_event_notify(struct rvu *rvu, struct rep_event *req, 96 struct msg_rsp *rsp) 97 { 98 struct rep_evtq_ent *qentry; 99 100 qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC); 101 if (!qentry) 102 return -ENOMEM; 103 104 qentry->event = *req; 105 spin_lock(&rvu->rep_evtq_lock); 106 list_add_tail(&qentry->node, &rvu->rep_evtq_head); 107 spin_unlock(&rvu->rep_evtq_lock); 108 queue_work(rvu->rep_evt_wq, &rvu->rep_evt_work); 109 return 0; 110 } 111 112 int rvu_rep_notify_pfvf_state(struct rvu *rvu, u16 pcifunc, bool enable) 113 { 114 struct rep_event *req; 115 int pf; 116 117 if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) 118 return 0; 119 120 pf = rvu_get_pf(rvu->rep_pcifunc); 121 122 mutex_lock(&rvu->mbox_lock); 123 req = otx2_mbox_alloc_msg_rep_event_up_notify(rvu, pf); 124 if (!req) { 125 mutex_unlock(&rvu->mbox_lock); 126 return -ENOMEM; 127 } 128 129 req->hdr.pcifunc = rvu->rep_pcifunc; 130 req->event |= RVU_EVENT_PFVF_STATE; 131 req->pcifunc = pcifunc; 132 req->evt_data.vf_state = enable; 133 134 otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf); 135 otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf); 136 137 mutex_unlock(&rvu->mbox_lock); 138 return 0; 139 } 140 141 #define RVU_LF_RX_STATS(reg) \ 142 rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, reg)) 143 144 #define RVU_LF_TX_STATS(reg) \ 145 rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, reg)) 146 147 int rvu_mbox_handler_nix_lf_stats(struct rvu *rvu, 148 struct nix_stats_req *req, 149 struct nix_stats_rsp *rsp) 150 { 151 u16 pcifunc = req->pcifunc; 152 int nixlf, blkaddr, err; 153 struct msg_req rst_req; 154 struct msg_rsp rst_rsp; 155 156 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 157 if (err) 158 return 0; 159 160 if (req->reset) { 161 rst_req.hdr.pcifunc = pcifunc; 162 return rvu_mbox_handler_nix_stats_rst(rvu, &rst_req, &rst_rsp); 163 } 164 rsp->rx.octs = RVU_LF_RX_STATS(RX_OCTS); 165 rsp->rx.ucast = RVU_LF_RX_STATS(RX_UCAST); 166 rsp->rx.bcast = RVU_LF_RX_STATS(RX_BCAST); 167 rsp->rx.mcast = RVU_LF_RX_STATS(RX_MCAST); 168 rsp->rx.drop = RVU_LF_RX_STATS(RX_DROP); 169 rsp->rx.err = RVU_LF_RX_STATS(RX_ERR); 170 rsp->rx.drop_octs = RVU_LF_RX_STATS(RX_DROP_OCTS); 171 rsp->rx.drop_mcast = RVU_LF_RX_STATS(RX_DRP_MCAST); 172 rsp->rx.drop_bcast = RVU_LF_RX_STATS(RX_DRP_BCAST); 173 174 rsp->tx.octs = RVU_LF_TX_STATS(TX_OCTS); 175 rsp->tx.ucast = RVU_LF_TX_STATS(TX_UCAST); 176 rsp->tx.bcast = RVU_LF_TX_STATS(TX_BCAST); 177 rsp->tx.mcast = RVU_LF_TX_STATS(TX_MCAST); 178 rsp->tx.drop = RVU_LF_TX_STATS(TX_DROP); 179 180 rsp->pcifunc = req->pcifunc; 181 return 0; 182 } 183 184 static u16 rvu_rep_get_vlan_id(struct rvu *rvu, u16 pcifunc) 185 { 186 int id; 187 188 for (id = 0; id < rvu->rep_cnt; id++) 189 if (rvu->rep2pfvf_map[id] == pcifunc) 190 return id; 191 return 0; 192 } 193 194 static int rvu_rep_tx_vlan_cfg(struct rvu *rvu, u16 pcifunc, 195 u16 vlan_tci, int *vidx) 196 { 197 struct nix_vtag_config_rsp rsp = {}; 198 struct nix_vtag_config req = {}; 199 u64 etype = ETH_P_8021Q; 200 int err; 201 202 /* Insert vlan tag */ 203 req.hdr.pcifunc = pcifunc; 204 req.vtag_size = VTAGSIZE_T4; 205 req.cfg_type = 0; /* tx vlan cfg */ 206 req.tx.cfg_vtag0 = true; 207 req.tx.vtag0 = FIELD_PREP(NIX_VLAN_ETYPE_MASK, etype) | vlan_tci; 208 209 err = rvu_mbox_handler_nix_vtag_cfg(rvu, &req, &rsp); 210 if (err) { 211 dev_err(rvu->dev, "Tx vlan config failed\n"); 212 return err; 213 } 214 *vidx = rsp.vtag0_idx; 215 return 0; 216 } 217 218 static int rvu_rep_rx_vlan_cfg(struct rvu *rvu, u16 pcifunc) 219 { 220 struct nix_vtag_config req = {}; 221 struct nix_vtag_config_rsp rsp; 222 223 /* config strip, capture and size */ 224 req.hdr.pcifunc = pcifunc; 225 req.vtag_size = VTAGSIZE_T4; 226 req.cfg_type = 1; /* rx vlan cfg */ 227 req.rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0; 228 req.rx.strip_vtag = true; 229 req.rx.capture_vtag = false; 230 231 return rvu_mbox_handler_nix_vtag_cfg(rvu, &req, &rsp); 232 } 233 234 static int rvu_rep_install_rx_rule(struct rvu *rvu, u16 pcifunc, 235 u16 entry, bool rte) 236 { 237 struct npc_install_flow_req req = {}; 238 struct npc_install_flow_rsp rsp = {}; 239 struct rvu_pfvf *pfvf; 240 u16 vlan_tci, rep_id; 241 242 pfvf = rvu_get_pfvf(rvu, pcifunc); 243 244 /* To steer the traffic from Representee to Representor */ 245 rep_id = rvu_rep_get_vlan_id(rvu, pcifunc); 246 if (rte) { 247 vlan_tci = rep_id | BIT_ULL(8); 248 req.vf = rvu->rep_pcifunc; 249 req.op = NIX_RX_ACTIONOP_UCAST; 250 req.index = rep_id; 251 } else { 252 vlan_tci = rep_id; 253 req.vf = pcifunc; 254 req.op = NIX_RX_ACTION_DEFAULT; 255 } 256 257 rvu_rep_rx_vlan_cfg(rvu, req.vf); 258 req.entry = entry; 259 req.hdr.pcifunc = 0; /* AF is requester */ 260 req.features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_VLAN_ETYPE_CTAG); 261 req.vtag0_valid = true; 262 req.vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0; 263 req.packet.vlan_etype = cpu_to_be16(ETH_P_8021Q); 264 req.mask.vlan_etype = cpu_to_be16(ETH_P_8021Q); 265 req.packet.vlan_tci = cpu_to_be16(vlan_tci); 266 req.mask.vlan_tci = cpu_to_be16(0xffff); 267 268 req.channel = RVU_SWITCH_LBK_CHAN; 269 req.chan_mask = 0xffff; 270 req.intf = pfvf->nix_rx_intf; 271 272 return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); 273 } 274 275 static int rvu_rep_install_tx_rule(struct rvu *rvu, u16 pcifunc, u16 entry, 276 bool rte) 277 { 278 struct npc_install_flow_req req = {}; 279 struct npc_install_flow_rsp rsp = {}; 280 struct rvu_pfvf *pfvf; 281 int vidx, err; 282 u16 vlan_tci; 283 u8 lbkid; 284 285 pfvf = rvu_get_pfvf(rvu, pcifunc); 286 vlan_tci = rvu_rep_get_vlan_id(rvu, pcifunc); 287 if (rte) 288 vlan_tci |= BIT_ULL(8); 289 290 err = rvu_rep_tx_vlan_cfg(rvu, pcifunc, vlan_tci, &vidx); 291 if (err) 292 return err; 293 294 lbkid = pfvf->nix_blkaddr == BLKADDR_NIX0 ? 0 : 1; 295 req.hdr.pcifunc = 0; /* AF is requester */ 296 if (rte) { 297 req.vf = pcifunc; 298 } else { 299 req.vf = rvu->rep_pcifunc; 300 req.packet.sq_id = vlan_tci; 301 req.mask.sq_id = 0xffff; 302 } 303 304 req.entry = entry; 305 req.intf = pfvf->nix_tx_intf; 306 req.op = NIX_TX_ACTIONOP_UCAST_CHAN; 307 req.index = (lbkid << 8) | RVU_SWITCH_LBK_CHAN; 308 req.set_cntr = 1; 309 req.vtag0_def = vidx; 310 req.vtag0_op = 1; 311 return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); 312 } 313 314 int rvu_rep_install_mcam_rules(struct rvu *rvu) 315 { 316 struct rvu_switch *rswitch = &rvu->rswitch; 317 u16 start = rswitch->start_entry; 318 struct rvu_hwinfo *hw = rvu->hw; 319 u16 pcifunc, entry = 0; 320 int pf, vf, numvfs; 321 int err, nixlf, i; 322 u8 rep; 323 324 for (pf = 1; pf < hw->total_pfs; pf++) { 325 if (!is_pf_cgxmapped(rvu, pf)) 326 continue; 327 328 pcifunc = pf << RVU_PFVF_PF_SHIFT; 329 rvu_get_nix_blkaddr(rvu, pcifunc); 330 rep = true; 331 for (i = 0; i < 2; i++) { 332 err = rvu_rep_install_rx_rule(rvu, pcifunc, 333 start + entry, rep); 334 if (err) 335 return err; 336 rswitch->entry2pcifunc[entry++] = pcifunc; 337 338 err = rvu_rep_install_tx_rule(rvu, pcifunc, 339 start + entry, rep); 340 if (err) 341 return err; 342 rswitch->entry2pcifunc[entry++] = pcifunc; 343 rep = false; 344 } 345 346 rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL); 347 for (vf = 0; vf < numvfs; vf++) { 348 pcifunc = pf << RVU_PFVF_PF_SHIFT | 349 ((vf + 1) & RVU_PFVF_FUNC_MASK); 350 rvu_get_nix_blkaddr(rvu, pcifunc); 351 352 /* Skip installimg rules if nixlf is not attached */ 353 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 354 if (err) 355 continue; 356 rep = true; 357 for (i = 0; i < 2; i++) { 358 err = rvu_rep_install_rx_rule(rvu, pcifunc, 359 start + entry, 360 rep); 361 if (err) 362 return err; 363 rswitch->entry2pcifunc[entry++] = pcifunc; 364 365 err = rvu_rep_install_tx_rule(rvu, pcifunc, 366 start + entry, 367 rep); 368 if (err) 369 return err; 370 rswitch->entry2pcifunc[entry++] = pcifunc; 371 rep = false; 372 } 373 } 374 } 375 376 /* Initialize the wq for handling REP events */ 377 spin_lock_init(&rvu->rep_evtq_lock); 378 INIT_LIST_HEAD(&rvu->rep_evtq_head); 379 INIT_WORK(&rvu->rep_evt_work, rvu_rep_wq_handler); 380 rvu->rep_evt_wq = alloc_workqueue("rep_evt_wq", 0, 0); 381 if (!rvu->rep_evt_wq) { 382 dev_err(rvu->dev, "REP workqueue allocation failed\n"); 383 return -ENOMEM; 384 } 385 return 0; 386 } 387 388 void rvu_rep_update_rules(struct rvu *rvu, u16 pcifunc, bool ena) 389 { 390 struct rvu_switch *rswitch = &rvu->rswitch; 391 struct npc_mcam *mcam = &rvu->hw->mcam; 392 u32 max = rswitch->used_entries; 393 int blkaddr; 394 u16 entry; 395 396 if (!rswitch->used_entries) 397 return; 398 399 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 400 401 if (blkaddr < 0) 402 return; 403 404 rvu_switch_enable_lbk_link(rvu, pcifunc, ena); 405 mutex_lock(&mcam->lock); 406 for (entry = 0; entry < max; entry++) { 407 if (rswitch->entry2pcifunc[entry] == pcifunc) 408 npc_enable_mcam_entry(rvu, mcam, blkaddr, entry, ena); 409 } 410 mutex_unlock(&mcam->lock); 411 } 412 413 int rvu_rep_pf_init(struct rvu *rvu) 414 { 415 u16 pcifunc = rvu->rep_pcifunc; 416 struct rvu_pfvf *pfvf; 417 418 pfvf = rvu_get_pfvf(rvu, pcifunc); 419 set_bit(NIXLF_INITIALIZED, &pfvf->flags); 420 rvu_switch_enable_lbk_link(rvu, pcifunc, true); 421 rvu_rep_rx_vlan_cfg(rvu, pcifunc); 422 return 0; 423 } 424 425 int rvu_mbox_handler_esw_cfg(struct rvu *rvu, struct esw_cfg_req *req, 426 struct msg_rsp *rsp) 427 { 428 if (req->hdr.pcifunc != rvu->rep_pcifunc) 429 return 0; 430 431 rvu->rep_mode = req->ena; 432 433 if (!rvu->rep_mode) 434 rvu_npc_free_mcam_entries(rvu, req->hdr.pcifunc, -1); 435 436 return 0; 437 } 438 439 int rvu_mbox_handler_get_rep_cnt(struct rvu *rvu, struct msg_req *req, 440 struct get_rep_cnt_rsp *rsp) 441 { 442 int pf, vf, numvfs, hwvf, rep = 0; 443 u16 pcifunc; 444 445 rvu->rep_pcifunc = req->hdr.pcifunc; 446 rsp->rep_cnt = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs; 447 rvu->rep_cnt = rsp->rep_cnt; 448 449 rvu->rep2pfvf_map = devm_kzalloc(rvu->dev, rvu->rep_cnt * 450 sizeof(u16), GFP_KERNEL); 451 if (!rvu->rep2pfvf_map) 452 return -ENOMEM; 453 454 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 455 if (!is_pf_cgxmapped(rvu, pf)) 456 continue; 457 pcifunc = pf << RVU_PFVF_PF_SHIFT; 458 rvu->rep2pfvf_map[rep] = pcifunc; 459 rsp->rep_pf_map[rep] = pcifunc; 460 rep++; 461 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); 462 for (vf = 0; vf < numvfs; vf++) { 463 rvu->rep2pfvf_map[rep] = pcifunc | 464 ((vf + 1) & RVU_PFVF_FUNC_MASK); 465 rsp->rep_pf_map[rep] = rvu->rep2pfvf_map[rep]; 466 rep++; 467 } 468 } 469 return 0; 470 } 471