xref: /linux/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c (revision d30c1683aaecb93d2ab95685dc4300a33d3cea7a)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
3 
4 #include <linux/etherdevice.h>
5 #include <linux/pci.h>
6 
7 #include "wx_type.h"
8 #include "wx_hw.h"
9 #include "wx_lib.h"
10 #include "wx_vf.h"
11 #include "wx_vf_lib.h"
12 
13 void wx_write_eitr_vf(struct wx_q_vector *q_vector)
14 {
15 	struct wx *wx = q_vector->wx;
16 	int v_idx = q_vector->v_idx;
17 	u32 itr_reg;
18 
19 	itr_reg = q_vector->itr & WX_VXITR_MASK;
20 
21 	/* set the WDIS bit to not clear the timer bits and cause an
22 	 * immediate assertion of the interrupt
23 	 */
24 	itr_reg |= WX_VXITR_CNT_WDIS;
25 
26 	wr32(wx, WX_VXITR(v_idx), itr_reg);
27 }
28 
29 static void wx_set_ivar_vf(struct wx *wx, s8 direction, u8 queue,
30 			   u8 msix_vector)
31 {
32 	u32 ivar, index;
33 
34 	if (direction == -1) {
35 		/* other causes */
36 		msix_vector |= WX_PX_IVAR_ALLOC_VAL;
37 		ivar = rd32(wx, WX_VXIVAR_MISC);
38 		ivar &= ~0xFF;
39 		ivar |= msix_vector;
40 		wr32(wx, WX_VXIVAR_MISC, ivar);
41 	} else {
42 		/* tx or rx causes */
43 		msix_vector |= WX_PX_IVAR_ALLOC_VAL;
44 		index = ((16 * (queue & 1)) + (8 * direction));
45 		ivar = rd32(wx, WX_VXIVAR(queue >> 1));
46 		ivar &= ~(0xFF << index);
47 		ivar |= (msix_vector << index);
48 		wr32(wx, WX_VXIVAR(queue >> 1), ivar);
49 	}
50 }
51 
52 void wx_configure_msix_vf(struct wx *wx)
53 {
54 	int v_idx;
55 
56 	wx->eims_enable_mask = 0;
57 	for (v_idx = 0; v_idx < wx->num_q_vectors; v_idx++) {
58 		struct wx_q_vector *q_vector = wx->q_vector[v_idx];
59 		struct wx_ring *ring;
60 
61 		wx_for_each_ring(ring, q_vector->rx)
62 			wx_set_ivar_vf(wx, 0, ring->reg_idx, v_idx);
63 
64 		wx_for_each_ring(ring, q_vector->tx)
65 			wx_set_ivar_vf(wx, 1, ring->reg_idx, v_idx);
66 
67 		/* add q_vector eims value to global eims_enable_mask */
68 		wx->eims_enable_mask |= BIT(v_idx);
69 		wx_write_eitr_vf(q_vector);
70 	}
71 
72 	wx_set_ivar_vf(wx, -1, 1, v_idx);
73 
74 	/* setup eims_other and add value to global eims_enable_mask */
75 	wx->eims_other = BIT(v_idx);
76 	wx->eims_enable_mask |= wx->eims_other;
77 }
78 
79 int wx_write_uc_addr_list_vf(struct net_device *netdev)
80 {
81 	struct wx *wx = netdev_priv(netdev);
82 	int count = 0;
83 
84 	if (!netdev_uc_empty(netdev)) {
85 		struct netdev_hw_addr *ha;
86 
87 		netdev_for_each_uc_addr(ha, netdev)
88 			wx_set_uc_addr_vf(wx, ++count, ha->addr);
89 	} else {
90 		/*
91 		 * If the list is empty then send message to PF driver to
92 		 * clear all macvlans on this VF.
93 		 */
94 		wx_set_uc_addr_vf(wx, 0, NULL);
95 	}
96 
97 	return count;
98 }
99 
100 /**
101  * wx_configure_tx_ring_vf - Configure Tx ring after Reset
102  * @wx: board private structure
103  * @ring: structure containing ring specific data
104  *
105  * Configure the Tx descriptor ring after a reset.
106  **/
107 static void wx_configure_tx_ring_vf(struct wx *wx, struct wx_ring *ring)
108 {
109 	u8 reg_idx = ring->reg_idx;
110 	u64 tdba = ring->dma;
111 	u32 txdctl = 0;
112 	int ret;
113 
114 	/* disable queue to avoid issues while updating state */
115 	wr32(wx, WX_VXTXDCTL(reg_idx), WX_VXTXDCTL_FLUSH);
116 	wr32(wx, WX_VXTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
117 	wr32(wx, WX_VXTDBAH(reg_idx), tdba >> 32);
118 
119 	/* enable relaxed ordering */
120 	pcie_capability_clear_and_set_word(wx->pdev, PCI_EXP_DEVCTL,
121 					   0, PCI_EXP_DEVCTL_RELAX_EN);
122 
123 	/* reset head and tail pointers */
124 	wr32(wx, WX_VXTDH(reg_idx), 0);
125 	wr32(wx, WX_VXTDT(reg_idx), 0);
126 	ring->tail = wx->hw_addr + WX_VXTDT(reg_idx);
127 
128 	/* reset ntu and ntc to place SW in sync with hardwdare */
129 	ring->next_to_clean = 0;
130 	ring->next_to_use = 0;
131 
132 	txdctl |= WX_VXTXDCTL_BUFLEN(wx_buf_len(ring->count));
133 	txdctl |= WX_VXTXDCTL_ENABLE;
134 
135 	if (ring->headwb_mem) {
136 		wr32(wx, WX_VXTXD_HEAD_ADDRL(reg_idx),
137 		     ring->headwb_dma & DMA_BIT_MASK(32));
138 		wr32(wx, WX_VXTXD_HEAD_ADDRH(reg_idx),
139 		     upper_32_bits(ring->headwb_dma));
140 
141 		txdctl |= WX_VXTXDCTL_HEAD_WB;
142 	}
143 
144 	/* reinitialize tx_buffer_info */
145 	memset(ring->tx_buffer_info, 0,
146 	       sizeof(struct wx_tx_buffer) * ring->count);
147 
148 	wr32(wx, WX_VXTXDCTL(reg_idx), txdctl);
149 	/* poll to verify queue is enabled */
150 	ret = read_poll_timeout(rd32, txdctl, txdctl & WX_VXTXDCTL_ENABLE,
151 				1000, 10000, true, wx, WX_VXTXDCTL(reg_idx));
152 	if (ret == -ETIMEDOUT)
153 		wx_err(wx, "Could not enable Tx Queue %d\n", reg_idx);
154 }
155 
156 /**
157  * wx_configure_tx_vf - Configure Transmit Unit after Reset
158  * @wx: board private structure
159  *
160  * Configure the Tx unit of the MAC after a reset.
161  **/
162 void wx_configure_tx_vf(struct wx *wx)
163 {
164 	u32 i;
165 
166 	/* Setup the HW Tx Head and Tail descriptor pointers */
167 	for (i = 0; i < wx->num_tx_queues; i++)
168 		wx_configure_tx_ring_vf(wx, wx->tx_ring[i]);
169 }
170 
171 static void wx_configure_srrctl_vf(struct wx *wx, struct wx_ring *ring,
172 				   int index)
173 {
174 	u32 srrctl;
175 
176 	srrctl = rd32m(wx, WX_VXRXDCTL(index),
177 		       (u32)~(WX_VXRXDCTL_HDRSZ_MASK | WX_VXRXDCTL_BUFSZ_MASK));
178 	srrctl |= WX_VXRXDCTL_DROP;
179 	srrctl |= WX_VXRXDCTL_HDRSZ(wx_hdr_sz(WX_RX_HDR_SIZE));
180 	srrctl |= WX_VXRXDCTL_BUFSZ(wx_buf_sz(WX_RX_BUF_SIZE));
181 
182 	wr32(wx, WX_VXRXDCTL(index), srrctl);
183 }
184 
185 void wx_setup_psrtype_vf(struct wx *wx)
186 {
187 	/* PSRTYPE must be initialized */
188 	u32 psrtype = WX_VXMRQC_PSR_L2HDR |
189 		      WX_VXMRQC_PSR_L3HDR |
190 		      WX_VXMRQC_PSR_L4HDR |
191 		      WX_VXMRQC_PSR_TUNHDR |
192 		      WX_VXMRQC_PSR_TUNMAC;
193 
194 	wr32m(wx, WX_VXMRQC, WX_VXMRQC_PSR_MASK, WX_VXMRQC_PSR(psrtype));
195 }
196 
197 void wx_setup_vfmrqc_vf(struct wx *wx)
198 {
199 	u16 rss_i = wx->num_rx_queues;
200 	u32 vfmrqc = 0, vfreta = 0;
201 	u8 i, j;
202 
203 	/* Fill out hash function seeds */
204 	netdev_rss_key_fill(wx->rss_key, WX_RSS_KEY_SIZE);
205 	for (i = 0; i < WX_RSS_KEY_SIZE / 4; i++)
206 		wr32(wx, WX_VXRSSRK(i), wx->rss_key[i]);
207 
208 	for (i = 0, j = 0; i < WX_MAX_RETA_ENTRIES; i++, j++) {
209 		if (j == rss_i)
210 			j = 0;
211 
212 		wx->rss_indir_tbl[i] = j;
213 
214 		vfreta |= j << (i & 0x3) * 8;
215 		if ((i & 3) == 3) {
216 			wr32(wx, WX_VXRETA(i >> 2), vfreta);
217 			vfreta = 0;
218 		}
219 	}
220 
221 	/* Perform hash on these packet types */
222 	vfmrqc |= WX_VXMRQC_RSS_ALG_IPV4 |
223 		  WX_VXMRQC_RSS_ALG_IPV4_TCP |
224 		  WX_VXMRQC_RSS_ALG_IPV6 |
225 		  WX_VXMRQC_RSS_ALG_IPV6_TCP;
226 
227 	vfmrqc |= WX_VXMRQC_RSS_EN;
228 
229 	if (wx->num_rx_queues > 3)
230 		vfmrqc |= WX_VXMRQC_RSS_HASH(2);
231 	else if (wx->num_rx_queues > 1)
232 		vfmrqc |= WX_VXMRQC_RSS_HASH(1);
233 	wr32m(wx, WX_VXMRQC, WX_VXMRQC_RSS_MASK, WX_VXMRQC_RSS(vfmrqc));
234 }
235 
236 void wx_configure_rx_ring_vf(struct wx *wx, struct wx_ring *ring)
237 {
238 	u8 reg_idx = ring->reg_idx;
239 	union wx_rx_desc *rx_desc;
240 	u64 rdba = ring->dma;
241 	u32 rxdctl;
242 
243 	/* disable queue to avoid issues while updating state */
244 	rxdctl = rd32(wx, WX_VXRXDCTL(reg_idx));
245 	wx_disable_rx_queue(wx, ring);
246 
247 	wr32(wx, WX_VXRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
248 	wr32(wx, WX_VXRDBAH(reg_idx), rdba >> 32);
249 
250 	/* enable relaxed ordering */
251 	pcie_capability_clear_and_set_word(wx->pdev, PCI_EXP_DEVCTL,
252 					   0, PCI_EXP_DEVCTL_RELAX_EN);
253 
254 	/* reset head and tail pointers */
255 	wr32(wx, WX_VXRDH(reg_idx), 0);
256 	wr32(wx, WX_VXRDT(reg_idx), 0);
257 	ring->tail = wx->hw_addr + WX_VXRDT(reg_idx);
258 
259 	/* initialize rx_buffer_info */
260 	memset(ring->rx_buffer_info, 0,
261 	       sizeof(struct wx_rx_buffer) * ring->count);
262 
263 	/* initialize Rx descriptor 0 */
264 	rx_desc = WX_RX_DESC(ring, 0);
265 	rx_desc->wb.upper.length = 0;
266 
267 	/* reset ntu and ntc to place SW in sync with hardwdare */
268 	ring->next_to_clean = 0;
269 	ring->next_to_use = 0;
270 	ring->next_to_alloc = 0;
271 
272 	wx_configure_srrctl_vf(wx, ring, reg_idx);
273 
274 	/* allow any size packet since we can handle overflow */
275 	rxdctl &= ~WX_VXRXDCTL_BUFLEN_MASK;
276 	rxdctl |= WX_VXRXDCTL_BUFLEN(wx_buf_len(ring->count));
277 	rxdctl |= WX_VXRXDCTL_ENABLE | WX_VXRXDCTL_VLAN;
278 
279 	/* enable RSC */
280 	rxdctl &= ~WX_VXRXDCTL_RSCMAX_MASK;
281 	rxdctl |= WX_VXRXDCTL_RSCMAX(0);
282 	rxdctl |= WX_VXRXDCTL_RSCEN;
283 
284 	if (test_bit(WX_FLAG_RX_MERGE_ENABLED, wx->flags))
285 		rxdctl |= WX_VXRXDCTL_DESC_MERGE;
286 
287 	wr32(wx, WX_VXRXDCTL(reg_idx), rxdctl);
288 
289 	/* pf/vf reuse */
290 	wx_enable_rx_queue(wx, ring);
291 	wx_alloc_rx_buffers(ring, wx_desc_unused(ring));
292 }
293