1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3 *
4 * Copyright (C) 2021 Marvell.
5 *
6 */
7
8 #include <linux/bitfield.h>
9 #include "rvu.h"
10
rvu_switch_enable_lbk_link(struct rvu * rvu,u16 pcifunc,bool enable)11 static void rvu_switch_enable_lbk_link(struct rvu *rvu, u16 pcifunc, bool enable)
12 {
13 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
14 struct nix_hw *nix_hw;
15
16 nix_hw = get_nix_hw(rvu->hw, pfvf->nix_blkaddr);
17 /* Enable LBK links with channel 63 for TX MCAM rule */
18 rvu_nix_tx_tl2_cfg(rvu, pfvf->nix_blkaddr, pcifunc,
19 &nix_hw->txsch[NIX_TXSCH_LVL_TL2], enable);
20 }
21
rvu_switch_install_rx_rule(struct rvu * rvu,u16 pcifunc,u16 chan_mask)22 static int rvu_switch_install_rx_rule(struct rvu *rvu, u16 pcifunc,
23 u16 chan_mask)
24 {
25 struct npc_install_flow_req req = { 0 };
26 struct npc_install_flow_rsp rsp = { 0 };
27 struct rvu_pfvf *pfvf;
28
29 pfvf = rvu_get_pfvf(rvu, pcifunc);
30 /* If the pcifunc is not initialized then nothing to do.
31 * This same function will be called again via rvu_switch_update_rules
32 * after pcifunc is initialized.
33 */
34 if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags))
35 return 0;
36
37 ether_addr_copy(req.packet.dmac, pfvf->mac_addr);
38 eth_broadcast_addr((u8 *)&req.mask.dmac);
39 req.hdr.pcifunc = 0; /* AF is requester */
40 req.vf = pcifunc;
41 req.features = BIT_ULL(NPC_DMAC);
42 req.channel = pfvf->rx_chan_base;
43 req.chan_mask = chan_mask;
44 req.intf = pfvf->nix_rx_intf;
45 req.op = NIX_RX_ACTION_DEFAULT;
46 req.default_rule = 1;
47
48 return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
49 }
50
rvu_switch_install_tx_rule(struct rvu * rvu,u16 pcifunc,u16 entry)51 static int rvu_switch_install_tx_rule(struct rvu *rvu, u16 pcifunc, u16 entry)
52 {
53 struct npc_install_flow_req req = { 0 };
54 struct npc_install_flow_rsp rsp = { 0 };
55 struct rvu_pfvf *pfvf;
56 u8 lbkid;
57
58 pfvf = rvu_get_pfvf(rvu, pcifunc);
59 /* If the pcifunc is not initialized then nothing to do.
60 * This same function will be called again via rvu_switch_update_rules
61 * after pcifunc is initialized.
62 */
63 if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags))
64 return 0;
65
66 rvu_switch_enable_lbk_link(rvu, pcifunc, true);
67
68 lbkid = pfvf->nix_blkaddr == BLKADDR_NIX0 ? 0 : 1;
69 ether_addr_copy(req.packet.dmac, pfvf->mac_addr);
70 eth_broadcast_addr((u8 *)&req.mask.dmac);
71 req.hdr.pcifunc = 0; /* AF is requester */
72 req.vf = pcifunc;
73 req.entry = entry;
74 req.features = BIT_ULL(NPC_DMAC);
75 req.intf = pfvf->nix_tx_intf;
76 req.op = NIX_TX_ACTIONOP_UCAST_CHAN;
77 req.index = (lbkid << 8) | RVU_SWITCH_LBK_CHAN;
78 req.set_cntr = 1;
79
80 return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
81 }
82
rvu_switch_install_rules(struct rvu * rvu)83 static int rvu_switch_install_rules(struct rvu *rvu)
84 {
85 struct rvu_switch *rswitch = &rvu->rswitch;
86 u16 start = rswitch->start_entry;
87 struct rvu_hwinfo *hw = rvu->hw;
88 u16 pcifunc, entry = 0;
89 int pf, vf, numvfs;
90 int err;
91
92 for (pf = 1; pf < hw->total_pfs; pf++) {
93 if (!is_pf_cgxmapped(rvu, pf))
94 continue;
95
96 pcifunc = pf << 10;
97 /* rvu_get_nix_blkaddr sets up the corresponding NIX block
98 * address and NIX RX and TX interfaces for a pcifunc.
99 * Generally it is called during attach call of a pcifunc but it
100 * is called here since we are pre-installing rules before
101 * nixlfs are attached
102 */
103 rvu_get_nix_blkaddr(rvu, pcifunc);
104
105 /* MCAM RX rule for a PF/VF already exists as default unicast
106 * rules installed by AF. Hence change the channel in those
107 * rules to ignore channel so that packets with the required
108 * DMAC received from LBK(by other PF/VFs in system) or from
109 * external world (from wire) are accepted.
110 */
111 err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
112 if (err) {
113 dev_err(rvu->dev, "RX rule for PF%d failed(%d)\n",
114 pf, err);
115 return err;
116 }
117
118 err = rvu_switch_install_tx_rule(rvu, pcifunc, start + entry);
119 if (err) {
120 dev_err(rvu->dev, "TX rule for PF%d failed(%d)\n",
121 pf, err);
122 return err;
123 }
124
125 rswitch->entry2pcifunc[entry++] = pcifunc;
126
127 rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
128 for (vf = 0; vf < numvfs; vf++) {
129 pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
130 rvu_get_nix_blkaddr(rvu, pcifunc);
131
132 err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
133 if (err) {
134 dev_err(rvu->dev,
135 "RX rule for PF%dVF%d failed(%d)\n",
136 pf, vf, err);
137 return err;
138 }
139
140 err = rvu_switch_install_tx_rule(rvu, pcifunc,
141 start + entry);
142 if (err) {
143 dev_err(rvu->dev,
144 "TX rule for PF%dVF%d failed(%d)\n",
145 pf, vf, err);
146 return err;
147 }
148
149 rswitch->entry2pcifunc[entry++] = pcifunc;
150 }
151 }
152
153 return 0;
154 }
155
rvu_switch_enable(struct rvu * rvu)156 void rvu_switch_enable(struct rvu *rvu)
157 {
158 struct npc_mcam_alloc_entry_req alloc_req = { 0 };
159 struct npc_mcam_alloc_entry_rsp alloc_rsp = { 0 };
160 struct npc_delete_flow_req uninstall_req = { 0 };
161 struct npc_delete_flow_rsp uninstall_rsp = { 0 };
162 struct npc_mcam_free_entry_req free_req = { 0 };
163 struct rvu_switch *rswitch = &rvu->rswitch;
164 struct msg_rsp rsp;
165 int ret;
166
167 alloc_req.contig = true;
168 alloc_req.count = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs;
169 ret = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
170 &alloc_rsp);
171 if (ret) {
172 dev_err(rvu->dev,
173 "Unable to allocate MCAM entries\n");
174 goto exit;
175 }
176
177 if (alloc_rsp.count != alloc_req.count) {
178 dev_err(rvu->dev,
179 "Unable to allocate %d MCAM entries, got %d\n",
180 alloc_req.count, alloc_rsp.count);
181 goto free_entries;
182 }
183
184 rswitch->entry2pcifunc = kcalloc(alloc_req.count, sizeof(u16),
185 GFP_KERNEL);
186 if (!rswitch->entry2pcifunc)
187 goto free_entries;
188
189 rswitch->used_entries = alloc_rsp.count;
190 rswitch->start_entry = alloc_rsp.entry;
191
192 ret = rvu_switch_install_rules(rvu);
193 if (ret)
194 goto uninstall_rules;
195
196 return;
197
198 uninstall_rules:
199 uninstall_req.start = rswitch->start_entry;
200 uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1;
201 rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp);
202 kfree(rswitch->entry2pcifunc);
203 free_entries:
204 free_req.all = 1;
205 rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp);
206 exit:
207 return;
208 }
209
rvu_switch_disable(struct rvu * rvu)210 void rvu_switch_disable(struct rvu *rvu)
211 {
212 struct npc_delete_flow_req uninstall_req = { 0 };
213 struct npc_delete_flow_rsp uninstall_rsp = { 0 };
214 struct npc_mcam_free_entry_req free_req = { 0 };
215 struct rvu_switch *rswitch = &rvu->rswitch;
216 struct rvu_hwinfo *hw = rvu->hw;
217 int pf, vf, numvfs;
218 struct msg_rsp rsp;
219 u16 pcifunc;
220 int err;
221
222 if (!rswitch->used_entries)
223 return;
224
225 for (pf = 1; pf < hw->total_pfs; pf++) {
226 if (!is_pf_cgxmapped(rvu, pf))
227 continue;
228
229 pcifunc = pf << 10;
230 err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF);
231 if (err)
232 dev_err(rvu->dev,
233 "Reverting RX rule for PF%d failed(%d)\n",
234 pf, err);
235
236 /* Disable LBK link */
237 rvu_switch_enable_lbk_link(rvu, pcifunc, false);
238
239 rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
240 for (vf = 0; vf < numvfs; vf++) {
241 pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
242 err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF);
243 if (err)
244 dev_err(rvu->dev,
245 "Reverting RX rule for PF%dVF%d failed(%d)\n",
246 pf, vf, err);
247
248 rvu_switch_enable_lbk_link(rvu, pcifunc, false);
249 }
250 }
251
252 uninstall_req.start = rswitch->start_entry;
253 uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1;
254 free_req.all = 1;
255 rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp);
256 rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp);
257 rswitch->used_entries = 0;
258 kfree(rswitch->entry2pcifunc);
259 }
260
rvu_switch_update_rules(struct rvu * rvu,u16 pcifunc)261 void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc)
262 {
263 struct rvu_switch *rswitch = &rvu->rswitch;
264 u32 max = rswitch->used_entries;
265 u16 entry;
266
267 if (!rswitch->used_entries)
268 return;
269
270 for (entry = 0; entry < max; entry++) {
271 if (rswitch->entry2pcifunc[entry] == pcifunc)
272 break;
273 }
274
275 if (entry >= max)
276 return;
277
278 rvu_switch_install_tx_rule(rvu, pcifunc, rswitch->start_entry + entry);
279 rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
280 }
281