1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
3
4 #include <linux/etherdevice.h>
5 #include <linux/pci.h>
6
7 #include "wx_type.h"
8 #include "wx_hw.h"
9 #include "wx_mbx.h"
10 #include "wx_sriov.h"
11
wx_vf_configuration(struct pci_dev * pdev,int event_mask)12 static void wx_vf_configuration(struct pci_dev *pdev, int event_mask)
13 {
14 bool enable = !!WX_VF_ENABLE_CHECK(event_mask);
15 struct wx *wx = pci_get_drvdata(pdev);
16 u32 vfn = WX_VF_NUM_GET(event_mask);
17
18 if (enable)
19 eth_zero_addr(wx->vfinfo[vfn].vf_mac_addr);
20 }
21
wx_alloc_vf_macvlans(struct wx * wx,u8 num_vfs)22 static int wx_alloc_vf_macvlans(struct wx *wx, u8 num_vfs)
23 {
24 struct vf_macvlans *mv_list;
25 int num_vf_macvlans, i;
26
27 /* Initialize list of VF macvlans */
28 INIT_LIST_HEAD(&wx->vf_mvs.mvlist);
29
30 num_vf_macvlans = wx->mac.num_rar_entries -
31 (WX_MAX_PF_MACVLANS + 1 + num_vfs);
32 if (!num_vf_macvlans)
33 return -EINVAL;
34
35 mv_list = kzalloc_objs(struct vf_macvlans, num_vf_macvlans);
36 if (!mv_list)
37 return -ENOMEM;
38
39 for (i = 0; i < num_vf_macvlans; i++) {
40 mv_list[i].vf = -1;
41 mv_list[i].free = true;
42 list_add(&mv_list[i].mvlist, &wx->vf_mvs.mvlist);
43 }
44 wx->mv_list = mv_list;
45
46 return 0;
47 }
48
wx_sriov_clear_data(struct wx * wx)49 static void wx_sriov_clear_data(struct wx *wx)
50 {
51 /* set num VFs to 0 to prevent access to vfinfo */
52 wx->num_vfs = 0;
53
54 /* free VF control structures */
55 kfree(wx->vfinfo);
56 wx->vfinfo = NULL;
57
58 /* free macvlan list */
59 kfree(wx->mv_list);
60 wx->mv_list = NULL;
61
62 /* set default pool back to 0 */
63 wr32m(wx, WX_PSR_VM_CTL, WX_PSR_VM_CTL_POOL_MASK, 0);
64 wx->ring_feature[RING_F_VMDQ].offset = 0;
65
66 clear_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags);
67 clear_bit(WX_FLAG_SRIOV_ENABLED, wx->flags);
68 /* Disable VMDq flag so device will be set in NM mode */
69 if (wx->ring_feature[RING_F_VMDQ].limit == 1)
70 clear_bit(WX_FLAG_VMDQ_ENABLED, wx->flags);
71 }
72
__wx_enable_sriov(struct wx * wx,u8 num_vfs)73 static int __wx_enable_sriov(struct wx *wx, u8 num_vfs)
74 {
75 int i, ret = 0;
76 u32 value = 0;
77
78 set_bit(WX_FLAG_SRIOV_ENABLED, wx->flags);
79 dev_info(&wx->pdev->dev, "SR-IOV enabled with %d VFs\n", num_vfs);
80
81 if (num_vfs == 7 && wx->mac.type == wx_mac_em)
82 set_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags);
83
84 /* Enable VMDq flag so device will be set in VM mode */
85 set_bit(WX_FLAG_VMDQ_ENABLED, wx->flags);
86 if (!wx->ring_feature[RING_F_VMDQ].limit)
87 wx->ring_feature[RING_F_VMDQ].limit = 1;
88 wx->ring_feature[RING_F_VMDQ].offset = num_vfs;
89
90 wx->vfinfo = kzalloc_objs(struct vf_data_storage, num_vfs);
91 if (!wx->vfinfo)
92 return -ENOMEM;
93
94 ret = wx_alloc_vf_macvlans(wx, num_vfs);
95 if (ret)
96 return ret;
97
98 /* Initialize default switching mode VEB */
99 wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_SW_EN, WX_PSR_CTL_SW_EN);
100
101 for (i = 0; i < num_vfs; i++) {
102 /* enable spoof checking for all VFs */
103 wx->vfinfo[i].spoofchk_enabled = true;
104 wx->vfinfo[i].link_enable = true;
105 /* untrust all VFs */
106 wx->vfinfo[i].trusted = false;
107 /* set the default xcast mode */
108 wx->vfinfo[i].xcast_mode = WXVF_XCAST_MODE_NONE;
109 }
110
111 if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
112 value = WX_CFG_PORT_CTL_NUM_VT_8;
113 } else {
114 if (num_vfs < 32)
115 value = WX_CFG_PORT_CTL_NUM_VT_32;
116 else
117 value = WX_CFG_PORT_CTL_NUM_VT_64;
118 }
119 wr32m(wx, WX_CFG_PORT_CTL,
120 WX_CFG_PORT_CTL_NUM_VT_MASK,
121 value);
122
123 /* Disable RSC when in SR-IOV mode */
124 clear_bit(WX_FLAG_RSC_CAPABLE, wx->flags);
125 clear_bit(WX_FLAG_RSC_ENABLED, wx->flags);
126
127 return ret;
128 }
129
wx_sriov_reinit(struct wx * wx)130 static void wx_sriov_reinit(struct wx *wx)
131 {
132 rtnl_lock();
133 wx->setup_tc(wx->netdev, netdev_get_num_tc(wx->netdev));
134 rtnl_unlock();
135 }
136
wx_disable_sriov(struct wx * wx)137 void wx_disable_sriov(struct wx *wx)
138 {
139 if (!pci_vfs_assigned(wx->pdev))
140 pci_disable_sriov(wx->pdev);
141 else
142 wx_err(wx, "Unloading driver while VFs are assigned.\n");
143
144 /* clear flags and free allloced data */
145 wx_sriov_clear_data(wx);
146 }
147 EXPORT_SYMBOL(wx_disable_sriov);
148
wx_pci_sriov_enable(struct pci_dev * dev,int num_vfs)149 static int wx_pci_sriov_enable(struct pci_dev *dev,
150 int num_vfs)
151 {
152 struct wx *wx = pci_get_drvdata(dev);
153 int err = 0, i;
154
155 if (netif_is_rxfh_configured(wx->netdev)) {
156 wx_err(wx, "Cannot enable SR-IOV while RXFH is configured\n");
157 wx_err(wx, "Run 'ethtool -X <if> default' to reset RSS table\n");
158 return -EBUSY;
159 }
160
161 err = __wx_enable_sriov(wx, num_vfs);
162 if (err)
163 return err;
164
165 wx->num_vfs = num_vfs;
166 for (i = 0; i < wx->num_vfs; i++)
167 wx_vf_configuration(dev, (i | WX_VF_ENABLE));
168
169 /* reset before enabling SRIOV to avoid mailbox issues */
170 wx_sriov_reinit(wx);
171
172 err = pci_enable_sriov(dev, num_vfs);
173 if (err) {
174 wx_err(wx, "Failed to enable PCI sriov: %d\n", err);
175 goto err_out;
176 }
177
178 return num_vfs;
179 err_out:
180 wx_sriov_clear_data(wx);
181 return err;
182 }
183
wx_pci_sriov_disable(struct pci_dev * dev)184 static int wx_pci_sriov_disable(struct pci_dev *dev)
185 {
186 struct wx *wx = pci_get_drvdata(dev);
187
188 if (netif_is_rxfh_configured(wx->netdev)) {
189 wx_err(wx, "Cannot disable SR-IOV while RXFH is configured\n");
190 wx_err(wx, "Run 'ethtool -X <if> default' to reset RSS table\n");
191 return -EBUSY;
192 }
193
194 wx_disable_sriov(wx);
195 wx_sriov_reinit(wx);
196
197 return 0;
198 }
199
wx_pci_sriov_configure(struct pci_dev * pdev,int num_vfs)200 int wx_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
201 {
202 struct wx *wx = pci_get_drvdata(pdev);
203 int err;
204
205 if (!num_vfs) {
206 if (!pci_vfs_assigned(pdev))
207 return wx_pci_sriov_disable(pdev);
208
209 wx_err(wx, "can't free VFs because some are assigned to VMs.\n");
210 return -EBUSY;
211 }
212
213 err = wx_pci_sriov_enable(pdev, num_vfs);
214 if (err)
215 return err;
216
217 return num_vfs;
218 }
219 EXPORT_SYMBOL(wx_pci_sriov_configure);
220
wx_set_vf_mac(struct wx * wx,u16 vf,const u8 * mac_addr)221 static int wx_set_vf_mac(struct wx *wx, u16 vf, const u8 *mac_addr)
222 {
223 u8 hw_addr[ETH_ALEN];
224 int ret = 0;
225
226 ether_addr_copy(hw_addr, mac_addr);
227 wx_del_mac_filter(wx, wx->vfinfo[vf].vf_mac_addr, vf);
228 ret = wx_add_mac_filter(wx, hw_addr, vf);
229 if (ret >= 0)
230 ether_addr_copy(wx->vfinfo[vf].vf_mac_addr, mac_addr);
231 else
232 eth_zero_addr(wx->vfinfo[vf].vf_mac_addr);
233
234 return ret;
235 }
236
wx_set_vmolr(struct wx * wx,u16 vf,bool aupe)237 static void wx_set_vmolr(struct wx *wx, u16 vf, bool aupe)
238 {
239 u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf));
240
241 vmolr |= WX_PSR_VM_L2CTL_BAM;
242 if (aupe)
243 vmolr |= WX_PSR_VM_L2CTL_AUPE;
244 else
245 vmolr &= ~WX_PSR_VM_L2CTL_AUPE;
246 wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr);
247 }
248
wx_set_vmvir(struct wx * wx,u16 vid,u16 qos,u16 vf)249 static void wx_set_vmvir(struct wx *wx, u16 vid, u16 qos, u16 vf)
250 {
251 u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) |
252 WX_TDM_VLAN_INS_VLANA_DEFAULT;
253
254 wr32(wx, WX_TDM_VLAN_INS(vf), vmvir);
255 }
256
wx_set_vf_vlan(struct wx * wx,int add,int vid,u16 vf)257 static int wx_set_vf_vlan(struct wx *wx, int add, int vid, u16 vf)
258 {
259 if (!vid && !add)
260 return 0;
261
262 return wx_set_vfta(wx, vid, vf, (bool)add);
263 }
264
wx_set_vlan_anti_spoofing(struct wx * wx,bool enable,int vf)265 static void wx_set_vlan_anti_spoofing(struct wx *wx, bool enable, int vf)
266 {
267 u32 index = WX_VF_REG_OFFSET(vf), vf_bit = WX_VF_IND_SHIFT(vf);
268 u32 pfvfspoof;
269
270 pfvfspoof = rd32(wx, WX_TDM_VLAN_AS(index));
271 if (enable)
272 pfvfspoof |= BIT(vf_bit);
273 else
274 pfvfspoof &= ~BIT(vf_bit);
275 wr32(wx, WX_TDM_VLAN_AS(index), pfvfspoof);
276 }
277
wx_write_qde(struct wx * wx,u32 vf,u32 qde)278 static void wx_write_qde(struct wx *wx, u32 vf, u32 qde)
279 {
280 struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ];
281 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
282 u32 reg = 0, n = vf * q_per_pool / 32;
283 u32 i = vf * q_per_pool;
284
285 reg = rd32(wx, WX_RDM_PF_QDE(n));
286 for (i = (vf * q_per_pool - n * 32);
287 i < ((vf + 1) * q_per_pool - n * 32);
288 i++) {
289 if (qde == 1)
290 reg |= qde << i;
291 else
292 reg &= qde << i;
293 }
294
295 wr32(wx, WX_RDM_PF_QDE(n), reg);
296 }
297
wx_clear_vmvir(struct wx * wx,u32 vf)298 static void wx_clear_vmvir(struct wx *wx, u32 vf)
299 {
300 wr32(wx, WX_TDM_VLAN_INS(vf), 0);
301 }
302
wx_ping_vf(struct wx * wx,int vf)303 static void wx_ping_vf(struct wx *wx, int vf)
304 {
305 u32 ping = WX_PF_CONTROL_MSG;
306
307 if (wx->vfinfo[vf].clear_to_send)
308 ping |= WX_VT_MSGTYPE_CTS;
309 wx_write_mbx_pf(wx, &ping, 1, vf);
310 }
311
wx_set_vf_rx_tx(struct wx * wx,int vf)312 static void wx_set_vf_rx_tx(struct wx *wx, int vf)
313 {
314 u32 index = WX_VF_REG_OFFSET(vf), vf_bit = WX_VF_IND_SHIFT(vf);
315 u32 reg_cur_tx, reg_cur_rx, reg_req_tx, reg_req_rx;
316
317 reg_cur_tx = rd32(wx, WX_TDM_VF_TE(index));
318 reg_cur_rx = rd32(wx, WX_RDM_VF_RE(index));
319
320 if (wx->vfinfo[vf].link_enable) {
321 reg_req_tx = reg_cur_tx | BIT(vf_bit);
322 reg_req_rx = reg_cur_rx | BIT(vf_bit);
323 /* Enable particular VF */
324 if (reg_cur_tx != reg_req_tx)
325 wr32(wx, WX_TDM_VF_TE(index), reg_req_tx);
326 if (reg_cur_rx != reg_req_rx)
327 wr32(wx, WX_RDM_VF_RE(index), reg_req_rx);
328 } else {
329 reg_req_tx = BIT(vf_bit);
330 reg_req_rx = BIT(vf_bit);
331 /* Disable particular VF */
332 if (reg_cur_tx & reg_req_tx)
333 wr32(wx, WX_TDM_VFTE_CLR(index), reg_req_tx);
334 if (reg_cur_rx & reg_req_rx)
335 wr32(wx, WX_RDM_VFRE_CLR(index), reg_req_rx);
336 }
337 }
338
wx_get_vf_queues(struct wx * wx,u32 * msgbuf,u32 vf)339 static int wx_get_vf_queues(struct wx *wx, u32 *msgbuf, u32 vf)
340 {
341 struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ];
342 unsigned int default_tc = 0;
343
344 msgbuf[WX_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
345 msgbuf[WX_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
346
347 if (wx->vfinfo[vf].pf_vlan || wx->vfinfo[vf].pf_qos)
348 msgbuf[WX_VF_TRANS_VLAN] = 1;
349 else
350 msgbuf[WX_VF_TRANS_VLAN] = 0;
351
352 /* notify VF of default queue */
353 msgbuf[WX_VF_DEF_QUEUE] = default_tc;
354
355 return 0;
356 }
357
wx_vf_reset_event(struct wx * wx,u16 vf)358 static void wx_vf_reset_event(struct wx *wx, u16 vf)
359 {
360 struct vf_data_storage *vfinfo = &wx->vfinfo[vf];
361 u8 num_tcs = netdev_get_num_tc(wx->netdev);
362
363 /* add PF assigned VLAN */
364 wx_set_vf_vlan(wx, true, vfinfo->pf_vlan, vf);
365
366 /* reset offloads to defaults */
367 wx_set_vmolr(wx, vf, !vfinfo->pf_vlan);
368
369 /* set outgoing tags for VFs */
370 if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) {
371 wx_clear_vmvir(wx, vf);
372 } else {
373 if (vfinfo->pf_qos || !num_tcs)
374 wx_set_vmvir(wx, vfinfo->pf_vlan,
375 vfinfo->pf_qos, vf);
376 else
377 wx_set_vmvir(wx, vfinfo->pf_vlan,
378 wx->default_up, vf);
379 }
380
381 /* reset multicast table array for vf */
382 wx->vfinfo[vf].num_vf_mc_hashes = 0;
383
384 /* Flush and reset the mta with the new values */
385 wx_set_rx_mode(wx->netdev);
386
387 wx_del_mac_filter(wx, wx->vfinfo[vf].vf_mac_addr, vf);
388 /* reset VF api back to unknown */
389 wx->vfinfo[vf].vf_api = wx_mbox_api_null;
390 }
391
wx_vf_reset_msg(struct wx * wx,u16 vf)392 static void wx_vf_reset_msg(struct wx *wx, u16 vf)
393 {
394 const u8 *vf_mac = wx->vfinfo[vf].vf_mac_addr;
395 struct net_device *dev = wx->netdev;
396 u32 msgbuf[5] = {0, 0, 0, 0, 0};
397 u8 *addr = (u8 *)(&msgbuf[1]);
398 u32 reg = 0, index, vf_bit;
399 int pf_max_frame;
400
401 /* reset the filters for the device */
402 wx_vf_reset_event(wx, vf);
403
404 /* set vf mac address */
405 if (!is_zero_ether_addr(vf_mac))
406 wx_set_vf_mac(wx, vf, vf_mac);
407
408 index = WX_VF_REG_OFFSET(vf);
409 vf_bit = WX_VF_IND_SHIFT(vf);
410
411 /* force drop enable for all VF Rx queues */
412 wx_write_qde(wx, vf, 1);
413
414 /* set transmit and receive for vf */
415 wx_set_vf_rx_tx(wx, vf);
416
417 pf_max_frame = dev->mtu + ETH_HLEN;
418
419 if (pf_max_frame > ETH_FRAME_LEN)
420 reg = BIT(vf_bit);
421 wr32(wx, WX_RDM_VFRE_CLR(index), reg);
422
423 /* enable VF mailbox for further messages */
424 wx->vfinfo[vf].clear_to_send = true;
425
426 /* reply to reset with ack and vf mac address */
427 msgbuf[0] = WX_VF_RESET;
428 if (!is_zero_ether_addr(vf_mac)) {
429 msgbuf[0] |= WX_VT_MSGTYPE_ACK;
430 memcpy(addr, vf_mac, ETH_ALEN);
431 } else {
432 msgbuf[0] |= WX_VT_MSGTYPE_NACK;
433 wx_err(wx, "VF %d has no MAC address assigned", vf);
434 }
435
436 msgbuf[3] = wx->mac.mc_filter_type;
437 wx_write_mbx_pf(wx, msgbuf, WX_VF_PERMADDR_MSG_LEN, vf);
438 }
439
wx_set_vf_mac_addr(struct wx * wx,u32 * msgbuf,u16 vf)440 static int wx_set_vf_mac_addr(struct wx *wx, u32 *msgbuf, u16 vf)
441 {
442 const u8 *new_mac = ((u8 *)(&msgbuf[1]));
443 int ret;
444
445 if (!is_valid_ether_addr(new_mac)) {
446 wx_err(wx, "VF %d attempted to set invalid mac\n", vf);
447 return -EINVAL;
448 }
449
450 if (wx->vfinfo[vf].pf_set_mac &&
451 memcmp(wx->vfinfo[vf].vf_mac_addr, new_mac, ETH_ALEN)) {
452 wx_err(wx,
453 "VF %d attempt to set a MAC but it already had a MAC.",
454 vf);
455 return -EBUSY;
456 }
457
458 ret = wx_set_vf_mac(wx, vf, new_mac);
459 if (ret < 0)
460 return ret;
461
462 return 0;
463 }
464
wx_set_vf_multicasts(struct wx * wx,u32 * msgbuf,u32 vf)465 static void wx_set_vf_multicasts(struct wx *wx, u32 *msgbuf, u32 vf)
466 {
467 struct vf_data_storage *vfinfo = &wx->vfinfo[vf];
468 u16 entries = (msgbuf[0] & WX_VT_MSGINFO_MASK)
469 >> WX_VT_MSGINFO_SHIFT;
470 u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf));
471 u32 vector_bit, vector_reg, mta_reg, i;
472 u16 *hash_list = (u16 *)&msgbuf[1];
473
474 /* only so many hash values supported */
475 entries = min_t(u16, entries, WX_MAX_VF_MC_ENTRIES);
476 vfinfo->num_vf_mc_hashes = entries;
477
478 for (i = 0; i < entries; i++)
479 vfinfo->vf_mc_hashes[i] = hash_list[i];
480
481 for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
482 vector_reg = WX_PSR_MC_TBL_REG(vfinfo->vf_mc_hashes[i]);
483 vector_bit = WX_PSR_MC_TBL_BIT(vfinfo->vf_mc_hashes[i]);
484 mta_reg = wx->mac.mta_shadow[vector_reg];
485 mta_reg |= BIT(vector_bit);
486 wx->mac.mta_shadow[vector_reg] = mta_reg;
487 wr32(wx, WX_PSR_MC_TBL(vector_reg), mta_reg);
488 }
489 vmolr |= WX_PSR_VM_L2CTL_ROMPE;
490 wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr);
491 }
492
wx_set_vf_lpe(struct wx * wx,u32 max_frame,u32 vf)493 static void wx_set_vf_lpe(struct wx *wx, u32 max_frame, u32 vf)
494 {
495 u32 index, vf_bit, vfre;
496 u32 max_frs, reg_val;
497
498 /* determine VF receive enable location */
499 index = WX_VF_REG_OFFSET(vf);
500 vf_bit = WX_VF_IND_SHIFT(vf);
501
502 vfre = rd32(wx, WX_RDM_VF_RE(index));
503 vfre |= BIT(vf_bit);
504 wr32(wx, WX_RDM_VF_RE(index), vfre);
505
506 /* pull current max frame size from hardware */
507 max_frs = DIV_ROUND_UP(max_frame, 1024);
508 reg_val = rd32(wx, WX_MAC_WDG_TIMEOUT) & WX_MAC_WDG_TIMEOUT_WTO_MASK;
509 if (max_frs > (reg_val + WX_MAC_WDG_TIMEOUT_WTO_DELTA))
510 wr32(wx, WX_MAC_WDG_TIMEOUT,
511 max_frs - WX_MAC_WDG_TIMEOUT_WTO_DELTA);
512 }
513
wx_find_vlvf_entry(struct wx * wx,u32 vlan)514 static int wx_find_vlvf_entry(struct wx *wx, u32 vlan)
515 {
516 int regindex;
517 u32 vlvf;
518
519 /* short cut the special case */
520 if (vlan == 0)
521 return 0;
522
523 /* Search for the vlan id in the VLVF entries */
524 for (regindex = 1; regindex < WX_PSR_VLAN_SWC_ENTRIES; regindex++) {
525 wr32(wx, WX_PSR_VLAN_SWC_IDX, regindex);
526 vlvf = rd32(wx, WX_PSR_VLAN_SWC);
527 if ((vlvf & VLAN_VID_MASK) == vlan)
528 break;
529 }
530
531 /* Return a negative value if not found */
532 if (regindex >= WX_PSR_VLAN_SWC_ENTRIES)
533 regindex = -EINVAL;
534
535 return regindex;
536 }
537
wx_set_vf_macvlan(struct wx * wx,u16 vf,int index,unsigned char * mac_addr)538 static int wx_set_vf_macvlan(struct wx *wx,
539 u16 vf, int index, unsigned char *mac_addr)
540 {
541 struct vf_macvlans *entry;
542 struct list_head *pos;
543 int retval = 0;
544
545 if (index <= 1) {
546 list_for_each(pos, &wx->vf_mvs.mvlist) {
547 entry = list_entry(pos, struct vf_macvlans, mvlist);
548 if (entry->vf == vf) {
549 entry->vf = -1;
550 entry->free = true;
551 entry->is_macvlan = false;
552 wx_del_mac_filter(wx, entry->vf_macvlan, vf);
553 }
554 }
555 }
556
557 if (!index)
558 return 0;
559
560 entry = NULL;
561 list_for_each(pos, &wx->vf_mvs.mvlist) {
562 entry = list_entry(pos, struct vf_macvlans, mvlist);
563 if (entry->free)
564 break;
565 }
566
567 if (!entry || !entry->free)
568 return -ENOSPC;
569
570 retval = wx_add_mac_filter(wx, mac_addr, vf);
571 if (retval >= 0) {
572 entry->free = false;
573 entry->is_macvlan = true;
574 entry->vf = vf;
575 memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
576 }
577
578 return retval;
579 }
580
wx_set_vf_vlan_msg(struct wx * wx,u32 * msgbuf,u16 vf)581 static int wx_set_vf_vlan_msg(struct wx *wx, u32 *msgbuf, u16 vf)
582 {
583 int add = (msgbuf[0] & WX_VT_MSGINFO_MASK) >> WX_VT_MSGINFO_SHIFT;
584 int vid = (msgbuf[1] & WX_PSR_VLAN_SWC_VLANID_MASK);
585 int ret;
586
587 if (add)
588 wx->vfinfo[vf].vlan_count++;
589 else if (wx->vfinfo[vf].vlan_count)
590 wx->vfinfo[vf].vlan_count--;
591
592 /* in case of promiscuous mode any VLAN filter set for a VF must
593 * also have the PF pool added to it.
594 */
595 if (add && wx->netdev->flags & IFF_PROMISC)
596 wx_set_vf_vlan(wx, add, vid, VMDQ_P(0));
597
598 ret = wx_set_vf_vlan(wx, add, vid, vf);
599 if (!ret && wx->vfinfo[vf].spoofchk_enabled)
600 wx_set_vlan_anti_spoofing(wx, true, vf);
601
602 /* Go through all the checks to see if the VLAN filter should
603 * be wiped completely.
604 */
605 if (!add && wx->netdev->flags & IFF_PROMISC) {
606 u32 bits = 0, vlvf;
607 int reg_ndx;
608
609 reg_ndx = wx_find_vlvf_entry(wx, vid);
610 if (reg_ndx < 0)
611 return -ENOSPC;
612 wr32(wx, WX_PSR_VLAN_SWC_IDX, reg_ndx);
613 vlvf = rd32(wx, WX_PSR_VLAN_SWC);
614 /* See if any other pools are set for this VLAN filter
615 * entry other than the PF.
616 */
617 if (VMDQ_P(0) < 32) {
618 bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L);
619 bits &= ~BIT(VMDQ_P(0));
620 if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
621 bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_H);
622 } else {
623 if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
624 bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H);
625 bits &= ~BIT(VMDQ_P(0) % 32);
626 bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_L);
627 }
628 /* If the filter was removed then ensure PF pool bit
629 * is cleared if the PF only added itself to the pool
630 * because the PF is in promiscuous mode.
631 */
632 if ((vlvf & VLAN_VID_MASK) == vid && !bits)
633 wx_set_vf_vlan(wx, add, vid, VMDQ_P(0));
634 }
635
636 return 0;
637 }
638
wx_set_vf_macvlan_msg(struct wx * wx,u32 * msgbuf,u16 vf)639 static int wx_set_vf_macvlan_msg(struct wx *wx, u32 *msgbuf, u16 vf)
640 {
641 int index = (msgbuf[0] & WX_VT_MSGINFO_MASK) >>
642 WX_VT_MSGINFO_SHIFT;
643 u8 *new_mac = ((u8 *)(&msgbuf[1]));
644 int err;
645
646 if (wx->vfinfo[vf].pf_set_mac && index > 0) {
647 wx_err(wx, "VF %d request MACVLAN filter but is denied\n", vf);
648 return -EINVAL;
649 }
650
651 /* An non-zero index indicates the VF is setting a filter */
652 if (index) {
653 if (!is_valid_ether_addr(new_mac)) {
654 wx_err(wx, "VF %d attempted to set invalid mac\n", vf);
655 return -EINVAL;
656 }
657 /* If the VF is allowed to set MAC filters then turn off
658 * anti-spoofing to avoid false positives.
659 */
660 if (wx->vfinfo[vf].spoofchk_enabled)
661 wx_set_vf_spoofchk(wx->netdev, vf, false);
662 }
663
664 err = wx_set_vf_macvlan(wx, vf, index, new_mac);
665 if (err == -ENOSPC)
666 wx_err(wx,
667 "VF %d request MACVLAN filter but there is no space\n",
668 vf);
669 if (err < 0)
670 return err;
671
672 return 0;
673 }
674
wx_negotiate_vf_api(struct wx * wx,u32 * msgbuf,u32 vf)675 static int wx_negotiate_vf_api(struct wx *wx, u32 *msgbuf, u32 vf)
676 {
677 int api = msgbuf[1];
678
679 switch (api) {
680 case wx_mbox_api_13:
681 wx->vfinfo[vf].vf_api = api;
682 return 0;
683 default:
684 wx_err(wx, "VF %d requested invalid api version %u\n", vf, api);
685 return -EINVAL;
686 }
687 }
688
wx_get_vf_link_state(struct wx * wx,u32 * msgbuf,u32 vf)689 static int wx_get_vf_link_state(struct wx *wx, u32 *msgbuf, u32 vf)
690 {
691 msgbuf[1] = wx->vfinfo[vf].link_enable;
692
693 return 0;
694 }
695
wx_get_fw_version(struct wx * wx,u32 * msgbuf,u32 vf)696 static int wx_get_fw_version(struct wx *wx, u32 *msgbuf, u32 vf)
697 {
698 unsigned long fw_version = 0ULL;
699 int ret = 0;
700
701 ret = kstrtoul(wx->eeprom_id, 16, &fw_version);
702 if (ret)
703 return -EOPNOTSUPP;
704 msgbuf[1] = fw_version;
705
706 return 0;
707 }
708
wx_update_vf_xcast_mode(struct wx * wx,u32 * msgbuf,u32 vf)709 static int wx_update_vf_xcast_mode(struct wx *wx, u32 *msgbuf, u32 vf)
710 {
711 int xcast_mode = msgbuf[1];
712 u32 vmolr, disable, enable;
713
714 if (wx->vfinfo[vf].xcast_mode == xcast_mode)
715 return 0;
716
717 switch (xcast_mode) {
718 case WXVF_XCAST_MODE_NONE:
719 disable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE |
720 WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE |
721 WX_PSR_VM_L2CTL_VPE;
722 enable = 0;
723 break;
724 case WXVF_XCAST_MODE_MULTI:
725 disable = WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE |
726 WX_PSR_VM_L2CTL_VPE;
727 enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE;
728 break;
729 case WXVF_XCAST_MODE_ALLMULTI:
730 disable = WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_VPE;
731 enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE |
732 WX_PSR_VM_L2CTL_MPE;
733 break;
734 case WXVF_XCAST_MODE_PROMISC:
735 disable = 0;
736 enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE |
737 WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE |
738 WX_PSR_VM_L2CTL_VPE;
739 break;
740 default:
741 return -EOPNOTSUPP;
742 }
743
744 vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf));
745 vmolr &= ~disable;
746 vmolr |= enable;
747 wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr);
748
749 wx->vfinfo[vf].xcast_mode = xcast_mode;
750 msgbuf[1] = xcast_mode;
751
752 return 0;
753 }
754
wx_rcv_msg_from_vf(struct wx * wx,u16 vf)755 static void wx_rcv_msg_from_vf(struct wx *wx, u16 vf)
756 {
757 u16 mbx_size = WX_VXMAILBOX_SIZE;
758 u32 msgbuf[WX_VXMAILBOX_SIZE];
759 int retval;
760
761 retval = wx_read_mbx_pf(wx, msgbuf, mbx_size, vf);
762 if (retval) {
763 wx_err(wx, "Error receiving message from VF\n");
764 return;
765 }
766
767 /* this is a message we already processed, do nothing */
768 if (msgbuf[0] & (WX_VT_MSGTYPE_ACK | WX_VT_MSGTYPE_NACK))
769 return;
770
771 if (msgbuf[0] == WX_VF_RESET) {
772 wx_vf_reset_msg(wx, vf);
773 return;
774 }
775
776 /* until the vf completes a virtual function reset it should not be
777 * allowed to start any configuration.
778 */
779 if (!wx->vfinfo[vf].clear_to_send) {
780 msgbuf[0] |= WX_VT_MSGTYPE_NACK;
781 wx_write_mbx_pf(wx, msgbuf, 1, vf);
782 return;
783 }
784
785 switch ((msgbuf[0] & U16_MAX)) {
786 case WX_VF_SET_MAC_ADDR:
787 retval = wx_set_vf_mac_addr(wx, msgbuf, vf);
788 break;
789 case WX_VF_SET_MULTICAST:
790 wx_set_vf_multicasts(wx, msgbuf, vf);
791 retval = 0;
792 break;
793 case WX_VF_SET_VLAN:
794 retval = wx_set_vf_vlan_msg(wx, msgbuf, vf);
795 break;
796 case WX_VF_SET_LPE:
797 wx_set_vf_lpe(wx, msgbuf[1], vf);
798 retval = 0;
799 break;
800 case WX_VF_SET_MACVLAN:
801 retval = wx_set_vf_macvlan_msg(wx, msgbuf, vf);
802 break;
803 case WX_VF_API_NEGOTIATE:
804 retval = wx_negotiate_vf_api(wx, msgbuf, vf);
805 break;
806 case WX_VF_GET_QUEUES:
807 retval = wx_get_vf_queues(wx, msgbuf, vf);
808 break;
809 case WX_VF_GET_LINK_STATE:
810 retval = wx_get_vf_link_state(wx, msgbuf, vf);
811 break;
812 case WX_VF_GET_FW_VERSION:
813 retval = wx_get_fw_version(wx, msgbuf, vf);
814 break;
815 case WX_VF_UPDATE_XCAST_MODE:
816 retval = wx_update_vf_xcast_mode(wx, msgbuf, vf);
817 break;
818 case WX_VF_BACKUP:
819 break;
820 default:
821 wx_err(wx, "Unhandled Msg %8.8x\n", msgbuf[0]);
822 break;
823 }
824
825 /* notify the VF of the results of what it sent us */
826 if (retval)
827 msgbuf[0] |= WX_VT_MSGTYPE_NACK;
828 else
829 msgbuf[0] |= WX_VT_MSGTYPE_ACK;
830
831 msgbuf[0] |= WX_VT_MSGTYPE_CTS;
832
833 wx_write_mbx_pf(wx, msgbuf, mbx_size, vf);
834 }
835
wx_rcv_ack_from_vf(struct wx * wx,u16 vf)836 static void wx_rcv_ack_from_vf(struct wx *wx, u16 vf)
837 {
838 u32 msg = WX_VT_MSGTYPE_NACK;
839
840 /* if device isn't clear to send it shouldn't be reading either */
841 if (!wx->vfinfo[vf].clear_to_send)
842 wx_write_mbx_pf(wx, &msg, 1, vf);
843 }
844
wx_msg_task(struct wx * wx)845 void wx_msg_task(struct wx *wx)
846 {
847 u16 vf;
848
849 for (vf = 0; vf < wx->num_vfs; vf++) {
850 /* process any reset requests */
851 if (!wx_check_for_rst_pf(wx, vf))
852 wx_vf_reset_event(wx, vf);
853
854 /* process any messages pending */
855 if (!wx_check_for_msg_pf(wx, vf))
856 wx_rcv_msg_from_vf(wx, vf);
857
858 /* process any acks */
859 if (!wx_check_for_ack_pf(wx, vf))
860 wx_rcv_ack_from_vf(wx, vf);
861 }
862 }
863 EXPORT_SYMBOL(wx_msg_task);
864
wx_disable_vf_rx_tx(struct wx * wx)865 void wx_disable_vf_rx_tx(struct wx *wx)
866 {
867 wr32(wx, WX_TDM_VFTE_CLR(0), U32_MAX);
868 wr32(wx, WX_RDM_VFRE_CLR(0), U32_MAX);
869 if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
870 wr32(wx, WX_TDM_VFTE_CLR(1), U32_MAX);
871 wr32(wx, WX_RDM_VFRE_CLR(1), U32_MAX);
872 }
873 }
874 EXPORT_SYMBOL(wx_disable_vf_rx_tx);
875
wx_ping_all_vfs_with_link_status(struct wx * wx,bool link_up)876 void wx_ping_all_vfs_with_link_status(struct wx *wx, bool link_up)
877 {
878 u32 msgbuf[2] = {0, 0};
879 u16 i;
880
881 if (!wx->num_vfs)
882 return;
883 msgbuf[0] = WX_PF_NOFITY_VF_LINK_STATUS | WX_PF_CONTROL_MSG;
884 if (link_up)
885 msgbuf[1] = FIELD_PREP(GENMASK(31, 1), wx->speed) | link_up;
886 if (wx->notify_down)
887 msgbuf[1] |= WX_PF_NOFITY_VF_NET_NOT_RUNNING;
888 for (i = 0; i < wx->num_vfs; i++) {
889 if (wx->vfinfo[i].clear_to_send)
890 msgbuf[0] |= WX_VT_MSGTYPE_CTS;
891 wx_write_mbx_pf(wx, msgbuf, 2, i);
892 }
893 }
894 EXPORT_SYMBOL(wx_ping_all_vfs_with_link_status);
895
wx_set_vf_link_state(struct wx * wx,int vf,int state)896 static void wx_set_vf_link_state(struct wx *wx, int vf, int state)
897 {
898 wx->vfinfo[vf].link_state = state;
899 switch (state) {
900 case IFLA_VF_LINK_STATE_AUTO:
901 if (netif_running(wx->netdev))
902 wx->vfinfo[vf].link_enable = true;
903 else
904 wx->vfinfo[vf].link_enable = false;
905 break;
906 case IFLA_VF_LINK_STATE_ENABLE:
907 wx->vfinfo[vf].link_enable = true;
908 break;
909 case IFLA_VF_LINK_STATE_DISABLE:
910 wx->vfinfo[vf].link_enable = false;
911 break;
912 }
913 /* restart the VF */
914 wx->vfinfo[vf].clear_to_send = false;
915 wx_ping_vf(wx, vf);
916
917 wx_set_vf_rx_tx(wx, vf);
918 }
919
wx_set_all_vfs(struct wx * wx)920 void wx_set_all_vfs(struct wx *wx)
921 {
922 int i;
923
924 for (i = 0; i < wx->num_vfs; i++)
925 wx_set_vf_link_state(wx, i, wx->vfinfo[i].link_state);
926 }
927 EXPORT_SYMBOL(wx_set_all_vfs);
928