xref: /linux/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c (revision 6439a0e64c355d2e375bd094f365d56ce81faba3)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
3 
4 #include <linux/etherdevice.h>
5 #include <linux/pci.h>
6 
7 #include "wx_type.h"
8 #include "wx_hw.h"
9 #include "wx_lib.h"
10 #include "wx_vf.h"
11 #include "wx_vf_lib.h"
12 
wx_write_eitr_vf(struct wx_q_vector * q_vector)13 static void wx_write_eitr_vf(struct wx_q_vector *q_vector)
14 {
15 	struct wx *wx = q_vector->wx;
16 	int v_idx = q_vector->v_idx;
17 	u32 itr_reg;
18 
19 	itr_reg = q_vector->itr & WX_VXITR_MASK;
20 
21 	/* set the WDIS bit to not clear the timer bits and cause an
22 	 * immediate assertion of the interrupt
23 	 */
24 	itr_reg |= WX_VXITR_CNT_WDIS;
25 
26 	wr32(wx, WX_VXITR(v_idx), itr_reg);
27 }
28 
wx_set_ivar_vf(struct wx * wx,s8 direction,u8 queue,u8 msix_vector)29 static void wx_set_ivar_vf(struct wx *wx, s8 direction, u8 queue,
30 			   u8 msix_vector)
31 {
32 	u32 ivar, index;
33 
34 	if (direction == -1) {
35 		/* other causes */
36 		msix_vector |= WX_PX_IVAR_ALLOC_VAL;
37 		ivar = rd32(wx, WX_VXIVAR_MISC);
38 		ivar &= ~0xFF;
39 		ivar |= msix_vector;
40 		wr32(wx, WX_VXIVAR_MISC, ivar);
41 	} else {
42 		/* tx or rx causes */
43 		msix_vector |= WX_PX_IVAR_ALLOC_VAL;
44 		index = ((16 * (queue & 1)) + (8 * direction));
45 		ivar = rd32(wx, WX_VXIVAR(queue >> 1));
46 		ivar &= ~(0xFF << index);
47 		ivar |= (msix_vector << index);
48 		wr32(wx, WX_VXIVAR(queue >> 1), ivar);
49 	}
50 }
51 
wx_configure_msix_vf(struct wx * wx)52 void wx_configure_msix_vf(struct wx *wx)
53 {
54 	int v_idx;
55 
56 	wx->eims_enable_mask = 0;
57 	for (v_idx = 0; v_idx < wx->num_q_vectors; v_idx++) {
58 		struct wx_q_vector *q_vector = wx->q_vector[v_idx];
59 		struct wx_ring *ring;
60 
61 		wx_for_each_ring(ring, q_vector->rx)
62 			wx_set_ivar_vf(wx, 0, ring->reg_idx, v_idx);
63 
64 		wx_for_each_ring(ring, q_vector->tx)
65 			wx_set_ivar_vf(wx, 1, ring->reg_idx, v_idx);
66 
67 		/* add q_vector eims value to global eims_enable_mask */
68 		wx->eims_enable_mask |= BIT(v_idx);
69 		wx_write_eitr_vf(q_vector);
70 	}
71 
72 	wx_set_ivar_vf(wx, -1, 1, v_idx);
73 
74 	/* setup eims_other and add value to global eims_enable_mask */
75 	wx->eims_other = BIT(v_idx);
76 	wx->eims_enable_mask |= wx->eims_other;
77 }
78 
wx_write_uc_addr_list_vf(struct net_device * netdev)79 int wx_write_uc_addr_list_vf(struct net_device *netdev)
80 {
81 	struct wx *wx = netdev_priv(netdev);
82 	int count = 0;
83 
84 	if (!netdev_uc_empty(netdev)) {
85 		struct netdev_hw_addr *ha;
86 
87 		netdev_for_each_uc_addr(ha, netdev)
88 			wx_set_uc_addr_vf(wx, ++count, ha->addr);
89 	} else {
90 		/*
91 		 * If the list is empty then send message to PF driver to
92 		 * clear all macvlans on this VF.
93 		 */
94 		wx_set_uc_addr_vf(wx, 0, NULL);
95 	}
96 
97 	return count;
98 }
99 
100 /**
101  * wx_configure_tx_ring_vf - Configure Tx ring after Reset
102  * @wx: board private structure
103  * @ring: structure containing ring specific data
104  *
105  * Configure the Tx descriptor ring after a reset.
106  **/
wx_configure_tx_ring_vf(struct wx * wx,struct wx_ring * ring)107 static void wx_configure_tx_ring_vf(struct wx *wx, struct wx_ring *ring)
108 {
109 	u8 reg_idx = ring->reg_idx;
110 	u64 tdba = ring->dma;
111 	u32 txdctl = 0;
112 	int ret;
113 
114 	/* disable queue to avoid issues while updating state */
115 	wr32(wx, WX_VXTXDCTL(reg_idx), WX_VXTXDCTL_FLUSH);
116 	wr32(wx, WX_VXTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
117 	wr32(wx, WX_VXTDBAH(reg_idx), tdba >> 32);
118 
119 	/* enable relaxed ordering */
120 	pcie_capability_clear_and_set_word(wx->pdev, PCI_EXP_DEVCTL,
121 					   0, PCI_EXP_DEVCTL_RELAX_EN);
122 
123 	/* reset head and tail pointers */
124 	wr32(wx, WX_VXTDH(reg_idx), 0);
125 	wr32(wx, WX_VXTDT(reg_idx), 0);
126 	ring->tail = wx->hw_addr + WX_VXTDT(reg_idx);
127 
128 	/* reset ntu and ntc to place SW in sync with hardwdare */
129 	ring->next_to_clean = 0;
130 	ring->next_to_use = 0;
131 
132 	txdctl |= WX_VXTXDCTL_BUFLEN(wx_buf_len(ring->count));
133 	txdctl |= WX_VXTXDCTL_ENABLE;
134 
135 	/* reinitialize tx_buffer_info */
136 	memset(ring->tx_buffer_info, 0,
137 	       sizeof(struct wx_tx_buffer) * ring->count);
138 
139 	wr32(wx, WX_VXTXDCTL(reg_idx), txdctl);
140 	/* poll to verify queue is enabled */
141 	ret = read_poll_timeout(rd32, txdctl, txdctl & WX_VXTXDCTL_ENABLE,
142 				1000, 10000, true, wx, WX_VXTXDCTL(reg_idx));
143 	if (ret == -ETIMEDOUT)
144 		wx_err(wx, "Could not enable Tx Queue %d\n", reg_idx);
145 }
146 
147 /**
148  * wx_configure_tx_vf - Configure Transmit Unit after Reset
149  * @wx: board private structure
150  *
151  * Configure the Tx unit of the MAC after a reset.
152  **/
wx_configure_tx_vf(struct wx * wx)153 void wx_configure_tx_vf(struct wx *wx)
154 {
155 	u32 i;
156 
157 	/* Setup the HW Tx Head and Tail descriptor pointers */
158 	for (i = 0; i < wx->num_tx_queues; i++)
159 		wx_configure_tx_ring_vf(wx, wx->tx_ring[i]);
160 }
161 
wx_configure_srrctl_vf(struct wx * wx,struct wx_ring * ring,int index)162 static void wx_configure_srrctl_vf(struct wx *wx, struct wx_ring *ring,
163 				   int index)
164 {
165 	u32 srrctl;
166 
167 	srrctl = rd32m(wx, WX_VXRXDCTL(index),
168 		       (u32)~(WX_VXRXDCTL_HDRSZ_MASK | WX_VXRXDCTL_BUFSZ_MASK));
169 	srrctl |= WX_VXRXDCTL_DROP;
170 	srrctl |= WX_VXRXDCTL_HDRSZ(wx_hdr_sz(WX_RX_HDR_SIZE));
171 	srrctl |= WX_VXRXDCTL_BUFSZ(wx_buf_sz(WX_RX_BUF_SIZE));
172 
173 	wr32(wx, WX_VXRXDCTL(index), srrctl);
174 }
175 
wx_setup_psrtype_vf(struct wx * wx)176 void wx_setup_psrtype_vf(struct wx *wx)
177 {
178 	/* PSRTYPE must be initialized */
179 	u32 psrtype = WX_VXMRQC_PSR_L2HDR |
180 		      WX_VXMRQC_PSR_L3HDR |
181 		      WX_VXMRQC_PSR_L4HDR |
182 		      WX_VXMRQC_PSR_TUNHDR |
183 		      WX_VXMRQC_PSR_TUNMAC;
184 
185 	wr32m(wx, WX_VXMRQC, WX_VXMRQC_PSR_MASK, WX_VXMRQC_PSR(psrtype));
186 }
187 
wx_setup_vfmrqc_vf(struct wx * wx)188 void wx_setup_vfmrqc_vf(struct wx *wx)
189 {
190 	u16 rss_i = wx->num_rx_queues;
191 	u32 vfmrqc = 0, vfreta = 0;
192 	u8 i, j;
193 
194 	/* Fill out hash function seeds */
195 	netdev_rss_key_fill(wx->rss_key, WX_RSS_KEY_SIZE);
196 	for (i = 0; i < WX_RSS_KEY_SIZE / 4; i++)
197 		wr32(wx, WX_VXRSSRK(i), wx->rss_key[i]);
198 
199 	for (i = 0, j = 0; i < WX_MAX_RETA_ENTRIES; i++, j++) {
200 		if (j == rss_i)
201 			j = 0;
202 
203 		wx->rss_indir_tbl[i] = j;
204 
205 		vfreta |= j << (i & 0x3) * 8;
206 		if ((i & 3) == 3) {
207 			wr32(wx, WX_VXRETA(i >> 2), vfreta);
208 			vfreta = 0;
209 		}
210 	}
211 
212 	/* Perform hash on these packet types */
213 	vfmrqc |= WX_VXMRQC_RSS_ALG_IPV4 |
214 		  WX_VXMRQC_RSS_ALG_IPV4_TCP |
215 		  WX_VXMRQC_RSS_ALG_IPV6 |
216 		  WX_VXMRQC_RSS_ALG_IPV6_TCP;
217 
218 	vfmrqc |= WX_VXMRQC_RSS_EN;
219 
220 	if (wx->num_rx_queues > 3)
221 		vfmrqc |= WX_VXMRQC_RSS_HASH(2);
222 	else if (wx->num_rx_queues > 1)
223 		vfmrqc |= WX_VXMRQC_RSS_HASH(1);
224 	wr32m(wx, WX_VXMRQC, WX_VXMRQC_RSS_MASK, WX_VXMRQC_RSS(vfmrqc));
225 }
226 
wx_configure_rx_ring_vf(struct wx * wx,struct wx_ring * ring)227 void wx_configure_rx_ring_vf(struct wx *wx, struct wx_ring *ring)
228 {
229 	u8 reg_idx = ring->reg_idx;
230 	union wx_rx_desc *rx_desc;
231 	u64 rdba = ring->dma;
232 	u32 rxdctl;
233 
234 	/* disable queue to avoid issues while updating state */
235 	rxdctl = rd32(wx, WX_VXRXDCTL(reg_idx));
236 	wx_disable_rx_queue(wx, ring);
237 
238 	wr32(wx, WX_VXRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
239 	wr32(wx, WX_VXRDBAH(reg_idx), rdba >> 32);
240 
241 	/* enable relaxed ordering */
242 	pcie_capability_clear_and_set_word(wx->pdev, PCI_EXP_DEVCTL,
243 					   0, PCI_EXP_DEVCTL_RELAX_EN);
244 
245 	/* reset head and tail pointers */
246 	wr32(wx, WX_VXRDH(reg_idx), 0);
247 	wr32(wx, WX_VXRDT(reg_idx), 0);
248 	ring->tail = wx->hw_addr + WX_VXRDT(reg_idx);
249 
250 	/* initialize rx_buffer_info */
251 	memset(ring->rx_buffer_info, 0,
252 	       sizeof(struct wx_rx_buffer) * ring->count);
253 
254 	/* initialize Rx descriptor 0 */
255 	rx_desc = WX_RX_DESC(ring, 0);
256 	rx_desc->wb.upper.length = 0;
257 
258 	/* reset ntu and ntc to place SW in sync with hardwdare */
259 	ring->next_to_clean = 0;
260 	ring->next_to_use = 0;
261 	ring->next_to_alloc = 0;
262 
263 	wx_configure_srrctl_vf(wx, ring, reg_idx);
264 
265 	/* allow any size packet since we can handle overflow */
266 	rxdctl &= ~WX_VXRXDCTL_BUFLEN_MASK;
267 	rxdctl |= WX_VXRXDCTL_BUFLEN(wx_buf_len(ring->count));
268 	rxdctl |= WX_VXRXDCTL_ENABLE | WX_VXRXDCTL_VLAN;
269 
270 	/* enable RSC */
271 	rxdctl &= ~WX_VXRXDCTL_RSCMAX_MASK;
272 	rxdctl |= WX_VXRXDCTL_RSCMAX(0);
273 	rxdctl |= WX_VXRXDCTL_RSCEN;
274 
275 	wr32(wx, WX_VXRXDCTL(reg_idx), rxdctl);
276 
277 	/* pf/vf reuse */
278 	wx_enable_rx_queue(wx, ring);
279 	wx_alloc_rx_buffers(ring, wx_desc_unused(ring));
280 }
281