1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Admin Function driver 3 * 4 * Copyright (C) 2021 Marvell. 5 * 6 */ 7 8 #include <linux/bitfield.h> 9 #include "rvu.h" 10 11 void rvu_switch_enable_lbk_link(struct rvu *rvu, u16 pcifunc, bool enable) 12 { 13 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 14 struct nix_hw *nix_hw; 15 16 nix_hw = get_nix_hw(rvu->hw, pfvf->nix_blkaddr); 17 /* Enable LBK links with channel 63 for TX MCAM rule */ 18 rvu_nix_tx_tl2_cfg(rvu, pfvf->nix_blkaddr, pcifunc, 19 &nix_hw->txsch[NIX_TXSCH_LVL_TL2], enable); 20 } 21 22 static int rvu_switch_install_rx_rule(struct rvu *rvu, u16 pcifunc, 23 u16 chan_mask) 24 { 25 struct npc_install_flow_req req = { 0 }; 26 struct npc_install_flow_rsp rsp = { 0 }; 27 struct rvu_pfvf *pfvf; 28 29 pfvf = rvu_get_pfvf(rvu, pcifunc); 30 /* If the pcifunc is not initialized then nothing to do. 31 * This same function will be called again via rvu_switch_update_rules 32 * after pcifunc is initialized. 33 */ 34 if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags)) 35 return 0; 36 37 ether_addr_copy(req.packet.dmac, pfvf->mac_addr); 38 eth_broadcast_addr((u8 *)&req.mask.dmac); 39 req.hdr.pcifunc = 0; /* AF is requester */ 40 req.vf = pcifunc; 41 req.features = BIT_ULL(NPC_DMAC); 42 req.channel = pfvf->rx_chan_base; 43 req.chan_mask = chan_mask; 44 req.intf = pfvf->nix_rx_intf; 45 req.op = NIX_RX_ACTION_DEFAULT; 46 req.default_rule = 1; 47 48 return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); 49 } 50 51 static int rvu_switch_install_tx_rule(struct rvu *rvu, u16 pcifunc, u16 entry) 52 { 53 struct npc_install_flow_req req = { 0 }; 54 struct npc_install_flow_rsp rsp = { 0 }; 55 struct rvu_pfvf *pfvf; 56 u8 lbkid; 57 58 pfvf = rvu_get_pfvf(rvu, pcifunc); 59 /* If the pcifunc is not initialized then nothing to do. 60 * This same function will be called again via rvu_switch_update_rules 61 * after pcifunc is initialized. 62 */ 63 if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags)) 64 return 0; 65 66 rvu_switch_enable_lbk_link(rvu, pcifunc, true); 67 68 lbkid = pfvf->nix_blkaddr == BLKADDR_NIX0 ? 0 : 1; 69 ether_addr_copy(req.packet.dmac, pfvf->mac_addr); 70 eth_broadcast_addr((u8 *)&req.mask.dmac); 71 req.hdr.pcifunc = 0; /* AF is requester */ 72 req.vf = pcifunc; 73 req.entry = entry; 74 req.features = BIT_ULL(NPC_DMAC); 75 req.intf = pfvf->nix_tx_intf; 76 req.op = NIX_TX_ACTIONOP_UCAST_CHAN; 77 req.index = (lbkid << 8) | RVU_SWITCH_LBK_CHAN; 78 req.set_cntr = 1; 79 80 return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); 81 } 82 83 static int rvu_switch_install_rules(struct rvu *rvu) 84 { 85 struct rvu_switch *rswitch = &rvu->rswitch; 86 u16 start = rswitch->start_entry; 87 struct rvu_hwinfo *hw = rvu->hw; 88 u16 pcifunc, entry = 0; 89 int pf, vf, numvfs; 90 int err; 91 92 for (pf = 1; pf < hw->total_pfs; pf++) { 93 if (!is_pf_cgxmapped(rvu, pf)) 94 continue; 95 96 pcifunc = pf << 10; 97 /* rvu_get_nix_blkaddr sets up the corresponding NIX block 98 * address and NIX RX and TX interfaces for a pcifunc. 99 * Generally it is called during attach call of a pcifunc but it 100 * is called here since we are pre-installing rules before 101 * nixlfs are attached 102 */ 103 rvu_get_nix_blkaddr(rvu, pcifunc); 104 105 /* MCAM RX rule for a PF/VF already exists as default unicast 106 * rules installed by AF. Hence change the channel in those 107 * rules to ignore channel so that packets with the required 108 * DMAC received from LBK(by other PF/VFs in system) or from 109 * external world (from wire) are accepted. 110 */ 111 err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0); 112 if (err) { 113 dev_err(rvu->dev, "RX rule for PF%d failed(%d)\n", 114 pf, err); 115 return err; 116 } 117 118 err = rvu_switch_install_tx_rule(rvu, pcifunc, start + entry); 119 if (err) { 120 dev_err(rvu->dev, "TX rule for PF%d failed(%d)\n", 121 pf, err); 122 return err; 123 } 124 125 rswitch->entry2pcifunc[entry++] = pcifunc; 126 127 rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL); 128 for (vf = 0; vf < numvfs; vf++) { 129 pcifunc = pf << 10 | ((vf + 1) & 0x3FF); 130 rvu_get_nix_blkaddr(rvu, pcifunc); 131 132 err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0); 133 if (err) { 134 dev_err(rvu->dev, 135 "RX rule for PF%dVF%d failed(%d)\n", 136 pf, vf, err); 137 return err; 138 } 139 140 err = rvu_switch_install_tx_rule(rvu, pcifunc, 141 start + entry); 142 if (err) { 143 dev_err(rvu->dev, 144 "TX rule for PF%dVF%d failed(%d)\n", 145 pf, vf, err); 146 return err; 147 } 148 149 rswitch->entry2pcifunc[entry++] = pcifunc; 150 } 151 } 152 153 return 0; 154 } 155 156 void rvu_switch_enable(struct rvu *rvu) 157 { 158 struct npc_mcam_alloc_entry_req alloc_req = { 0 }; 159 struct npc_mcam_alloc_entry_rsp alloc_rsp = { 0 }; 160 struct npc_delete_flow_req uninstall_req = { 0 }; 161 struct npc_delete_flow_rsp uninstall_rsp = { 0 }; 162 struct npc_mcam_free_entry_req free_req = { 0 }; 163 struct rvu_switch *rswitch = &rvu->rswitch; 164 struct msg_rsp rsp; 165 int ret; 166 167 alloc_req.contig = true; 168 alloc_req.count = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs; 169 if (rvu->rep_mode) 170 alloc_req.count = alloc_req.count * 4; 171 ret = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req, 172 &alloc_rsp); 173 if (ret) { 174 dev_err(rvu->dev, 175 "Unable to allocate MCAM entries\n"); 176 goto exit; 177 } 178 179 if (alloc_rsp.count != alloc_req.count) { 180 dev_err(rvu->dev, 181 "Unable to allocate %d MCAM entries, got %d\n", 182 alloc_req.count, alloc_rsp.count); 183 goto free_entries; 184 } 185 186 rswitch->entry2pcifunc = kcalloc(alloc_req.count, sizeof(u16), 187 GFP_KERNEL); 188 if (!rswitch->entry2pcifunc) 189 goto free_entries; 190 191 rswitch->used_entries = alloc_rsp.count; 192 rswitch->start_entry = alloc_rsp.entry; 193 194 if (rvu->rep_mode) { 195 rvu_rep_pf_init(rvu); 196 ret = rvu_rep_install_mcam_rules(rvu); 197 } else { 198 ret = rvu_switch_install_rules(rvu); 199 } 200 if (ret) 201 goto uninstall_rules; 202 203 return; 204 205 uninstall_rules: 206 uninstall_req.start = rswitch->start_entry; 207 uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1; 208 rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp); 209 kfree(rswitch->entry2pcifunc); 210 free_entries: 211 free_req.all = 1; 212 rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp); 213 exit: 214 return; 215 } 216 217 void rvu_switch_disable(struct rvu *rvu) 218 { 219 struct npc_delete_flow_req uninstall_req = { 0 }; 220 struct npc_delete_flow_rsp uninstall_rsp = { 0 }; 221 struct npc_mcam_free_entry_req free_req = { 0 }; 222 struct rvu_switch *rswitch = &rvu->rswitch; 223 struct rvu_hwinfo *hw = rvu->hw; 224 int pf, vf, numvfs; 225 struct msg_rsp rsp; 226 u16 pcifunc; 227 int err; 228 229 if (!rswitch->used_entries) 230 return; 231 232 if (rvu->rep_mode) 233 goto free_ents; 234 235 for (pf = 1; pf < hw->total_pfs; pf++) { 236 if (!is_pf_cgxmapped(rvu, pf)) 237 continue; 238 239 pcifunc = pf << 10; 240 err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF); 241 if (err) 242 dev_err(rvu->dev, 243 "Reverting RX rule for PF%d failed(%d)\n", 244 pf, err); 245 246 /* Disable LBK link */ 247 rvu_switch_enable_lbk_link(rvu, pcifunc, false); 248 249 rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL); 250 for (vf = 0; vf < numvfs; vf++) { 251 pcifunc = pf << 10 | ((vf + 1) & 0x3FF); 252 err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF); 253 if (err) 254 dev_err(rvu->dev, 255 "Reverting RX rule for PF%dVF%d failed(%d)\n", 256 pf, vf, err); 257 258 rvu_switch_enable_lbk_link(rvu, pcifunc, false); 259 } 260 } 261 262 free_ents: 263 uninstall_req.start = rswitch->start_entry; 264 uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1; 265 free_req.all = 1; 266 rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp); 267 rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp); 268 rswitch->used_entries = 0; 269 kfree(rswitch->entry2pcifunc); 270 } 271 272 void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc, bool ena) 273 { 274 struct rvu_switch *rswitch = &rvu->rswitch; 275 u32 max = rswitch->used_entries; 276 u16 entry; 277 278 if (rvu->rep_mode) 279 return rvu_rep_update_rules(rvu, pcifunc, ena); 280 281 if (!rswitch->used_entries) 282 return; 283 284 for (entry = 0; entry < max; entry++) { 285 if (rswitch->entry2pcifunc[entry] == pcifunc) 286 break; 287 } 288 289 if (entry >= max) 290 return; 291 292 rvu_switch_install_tx_rule(rvu, pcifunc, rswitch->start_entry + entry); 293 rvu_switch_install_rx_rule(rvu, pcifunc, 0x0); 294 } 295