xref: /linux/drivers/net/ethernet/wangxun/libwx/wx_hw.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
3 
4 #include <linux/etherdevice.h>
5 #include <linux/netdevice.h>
6 #include <linux/if_ether.h>
7 #include <linux/if_vlan.h>
8 #include <linux/iopoll.h>
9 #include <linux/pci.h>
10 
11 #include "wx_type.h"
12 #include "wx_lib.h"
13 #include "wx_hw.h"
14 
wx_phy_read_reg_mdi(struct mii_bus * bus,int phy_addr,int devnum,int regnum)15 static int wx_phy_read_reg_mdi(struct mii_bus *bus, int phy_addr, int devnum, int regnum)
16 {
17 	struct wx *wx = bus->priv;
18 	u32 command, val;
19 	int ret;
20 
21 	/* setup and write the address cycle command */
22 	command = WX_MSCA_RA(regnum) |
23 		  WX_MSCA_PA(phy_addr) |
24 		  WX_MSCA_DA(devnum);
25 	wr32(wx, WX_MSCA, command);
26 
27 	command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | WX_MSCC_BUSY;
28 	if (wx->mac.type == wx_mac_em)
29 		command |= WX_MDIO_CLK(6);
30 	wr32(wx, WX_MSCC, command);
31 
32 	/* wait to complete */
33 	ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000,
34 				100000, false, wx, WX_MSCC);
35 	if (ret) {
36 		wx_err(wx, "Mdio read c22 command did not complete.\n");
37 		return ret;
38 	}
39 
40 	return (u16)rd32(wx, WX_MSCC);
41 }
42 
wx_phy_write_reg_mdi(struct mii_bus * bus,int phy_addr,int devnum,int regnum,u16 value)43 static int wx_phy_write_reg_mdi(struct mii_bus *bus, int phy_addr,
44 				int devnum, int regnum, u16 value)
45 {
46 	struct wx *wx = bus->priv;
47 	u32 command, val;
48 	int ret;
49 
50 	/* setup and write the address cycle command */
51 	command = WX_MSCA_RA(regnum) |
52 		  WX_MSCA_PA(phy_addr) |
53 		  WX_MSCA_DA(devnum);
54 	wr32(wx, WX_MSCA, command);
55 
56 	command = value | WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | WX_MSCC_BUSY;
57 	if (wx->mac.type == wx_mac_em)
58 		command |= WX_MDIO_CLK(6);
59 	wr32(wx, WX_MSCC, command);
60 
61 	/* wait to complete */
62 	ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000,
63 				100000, false, wx, WX_MSCC);
64 	if (ret)
65 		wx_err(wx, "Mdio write c22 command did not complete.\n");
66 
67 	return ret;
68 }
69 
wx_phy_read_reg_mdi_c22(struct mii_bus * bus,int phy_addr,int regnum)70 int wx_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum)
71 {
72 	struct wx *wx = bus->priv;
73 
74 	wr32(wx, WX_MDIO_CLAUSE_SELECT, 0xF);
75 	return wx_phy_read_reg_mdi(bus, phy_addr, 0, regnum);
76 }
77 EXPORT_SYMBOL(wx_phy_read_reg_mdi_c22);
78 
wx_phy_write_reg_mdi_c22(struct mii_bus * bus,int phy_addr,int regnum,u16 value)79 int wx_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value)
80 {
81 	struct wx *wx = bus->priv;
82 
83 	wr32(wx, WX_MDIO_CLAUSE_SELECT, 0xF);
84 	return wx_phy_write_reg_mdi(bus, phy_addr, 0, regnum, value);
85 }
86 EXPORT_SYMBOL(wx_phy_write_reg_mdi_c22);
87 
wx_phy_read_reg_mdi_c45(struct mii_bus * bus,int phy_addr,int devnum,int regnum)88 int wx_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum)
89 {
90 	struct wx *wx = bus->priv;
91 
92 	wr32(wx, WX_MDIO_CLAUSE_SELECT, 0);
93 	return wx_phy_read_reg_mdi(bus, phy_addr, devnum, regnum);
94 }
95 EXPORT_SYMBOL(wx_phy_read_reg_mdi_c45);
96 
wx_phy_write_reg_mdi_c45(struct mii_bus * bus,int phy_addr,int devnum,int regnum,u16 value)97 int wx_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr,
98 			     int devnum, int regnum, u16 value)
99 {
100 	struct wx *wx = bus->priv;
101 
102 	wr32(wx, WX_MDIO_CLAUSE_SELECT, 0);
103 	return wx_phy_write_reg_mdi(bus, phy_addr, devnum, regnum, value);
104 }
105 EXPORT_SYMBOL(wx_phy_write_reg_mdi_c45);
106 
wx_intr_disable(struct wx * wx,u64 qmask)107 static void wx_intr_disable(struct wx *wx, u64 qmask)
108 {
109 	u32 mask;
110 
111 	mask = (qmask & U32_MAX);
112 	if (mask)
113 		wr32(wx, WX_PX_IMS(0), mask);
114 
115 	if (wx->mac.type == wx_mac_sp) {
116 		mask = (qmask >> 32);
117 		if (mask)
118 			wr32(wx, WX_PX_IMS(1), mask);
119 	}
120 }
121 
wx_intr_enable(struct wx * wx,u64 qmask)122 void wx_intr_enable(struct wx *wx, u64 qmask)
123 {
124 	u32 mask;
125 
126 	mask = (qmask & U32_MAX);
127 	if (mask)
128 		wr32(wx, WX_PX_IMC(0), mask);
129 	if (wx->mac.type == wx_mac_sp) {
130 		mask = (qmask >> 32);
131 		if (mask)
132 			wr32(wx, WX_PX_IMC(1), mask);
133 	}
134 }
135 EXPORT_SYMBOL(wx_intr_enable);
136 
137 /**
138  * wx_irq_disable - Mask off interrupt generation on the NIC
139  * @wx: board private structure
140  **/
wx_irq_disable(struct wx * wx)141 void wx_irq_disable(struct wx *wx)
142 {
143 	struct pci_dev *pdev = wx->pdev;
144 
145 	wr32(wx, WX_PX_MISC_IEN, 0);
146 	wx_intr_disable(wx, WX_INTR_ALL);
147 
148 	if (pdev->msix_enabled) {
149 		int vector;
150 
151 		for (vector = 0; vector < wx->num_q_vectors; vector++)
152 			synchronize_irq(wx->msix_q_entries[vector].vector);
153 
154 		synchronize_irq(wx->msix_entry->vector);
155 	} else {
156 		synchronize_irq(pdev->irq);
157 	}
158 }
159 EXPORT_SYMBOL(wx_irq_disable);
160 
161 /* cmd_addr is used for some special command:
162  * 1. to be sector address, when implemented erase sector command
163  * 2. to be flash address when implemented read, write flash address
164  */
wx_fmgr_cmd_op(struct wx * wx,u32 cmd,u32 cmd_addr)165 static int wx_fmgr_cmd_op(struct wx *wx, u32 cmd, u32 cmd_addr)
166 {
167 	u32 cmd_val = 0, val = 0;
168 
169 	cmd_val = WX_SPI_CMD_CMD(cmd) |
170 		  WX_SPI_CMD_CLK(WX_SPI_CLK_DIV) |
171 		  cmd_addr;
172 	wr32(wx, WX_SPI_CMD, cmd_val);
173 
174 	return read_poll_timeout(rd32, val, (val & 0x1), 10, 100000,
175 				 false, wx, WX_SPI_STATUS);
176 }
177 
wx_flash_read_dword(struct wx * wx,u32 addr,u32 * data)178 static int wx_flash_read_dword(struct wx *wx, u32 addr, u32 *data)
179 {
180 	int ret = 0;
181 
182 	ret = wx_fmgr_cmd_op(wx, WX_SPI_CMD_READ_DWORD, addr);
183 	if (ret < 0)
184 		return ret;
185 
186 	*data = rd32(wx, WX_SPI_DATA);
187 
188 	return ret;
189 }
190 
wx_check_flash_load(struct wx * hw,u32 check_bit)191 int wx_check_flash_load(struct wx *hw, u32 check_bit)
192 {
193 	u32 reg = 0;
194 	int err = 0;
195 
196 	/* if there's flash existing */
197 	if (!(rd32(hw, WX_SPI_STATUS) &
198 	      WX_SPI_STATUS_FLASH_BYPASS)) {
199 		/* wait hw load flash done */
200 		err = read_poll_timeout(rd32, reg, !(reg & check_bit), 20000, 2000000,
201 					false, hw, WX_SPI_ILDR_STATUS);
202 		if (err < 0)
203 			wx_err(hw, "Check flash load timeout.\n");
204 	}
205 
206 	return err;
207 }
208 EXPORT_SYMBOL(wx_check_flash_load);
209 
wx_control_hw(struct wx * wx,bool drv)210 void wx_control_hw(struct wx *wx, bool drv)
211 {
212 	/* True : Let firmware know the driver has taken over
213 	 * False : Let firmware take over control of hw
214 	 */
215 	wr32m(wx, WX_CFG_PORT_CTL, WX_CFG_PORT_CTL_DRV_LOAD,
216 	      drv ? WX_CFG_PORT_CTL_DRV_LOAD : 0);
217 }
218 EXPORT_SYMBOL(wx_control_hw);
219 
220 /**
221  * wx_mng_present - returns 0 when management capability is present
222  * @wx: pointer to hardware structure
223  */
wx_mng_present(struct wx * wx)224 int wx_mng_present(struct wx *wx)
225 {
226 	u32 fwsm;
227 
228 	fwsm = rd32(wx, WX_MIS_ST);
229 	if (fwsm & WX_MIS_ST_MNG_INIT_DN)
230 		return 0;
231 	else
232 		return -EACCES;
233 }
234 EXPORT_SYMBOL(wx_mng_present);
235 
236 /* Software lock to be held while software semaphore is being accessed. */
237 static DEFINE_MUTEX(wx_sw_sync_lock);
238 
239 /**
240  *  wx_release_sw_sync - Release SW semaphore
241  *  @wx: pointer to hardware structure
242  *  @mask: Mask to specify which semaphore to release
243  *
244  *  Releases the SW semaphore for the specified
245  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
246  **/
wx_release_sw_sync(struct wx * wx,u32 mask)247 static void wx_release_sw_sync(struct wx *wx, u32 mask)
248 {
249 	mutex_lock(&wx_sw_sync_lock);
250 	wr32m(wx, WX_MNG_SWFW_SYNC, mask, 0);
251 	mutex_unlock(&wx_sw_sync_lock);
252 }
253 
254 /**
255  *  wx_acquire_sw_sync - Acquire SW semaphore
256  *  @wx: pointer to hardware structure
257  *  @mask: Mask to specify which semaphore to acquire
258  *
259  *  Acquires the SW semaphore for the specified
260  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
261  **/
wx_acquire_sw_sync(struct wx * wx,u32 mask)262 static int wx_acquire_sw_sync(struct wx *wx, u32 mask)
263 {
264 	u32 sem = 0;
265 	int ret = 0;
266 
267 	mutex_lock(&wx_sw_sync_lock);
268 	ret = read_poll_timeout(rd32, sem, !(sem & mask),
269 				5000, 2000000, false, wx, WX_MNG_SWFW_SYNC);
270 	if (!ret) {
271 		sem |= mask;
272 		wr32(wx, WX_MNG_SWFW_SYNC, sem);
273 	} else {
274 		wx_err(wx, "SW Semaphore not granted: 0x%x.\n", sem);
275 	}
276 	mutex_unlock(&wx_sw_sync_lock);
277 
278 	return ret;
279 }
280 
281 /**
282  *  wx_host_interface_command - Issue command to manageability block
283  *  @wx: pointer to the HW structure
284  *  @buffer: contains the command to write and where the return status will
285  *   be placed
286  *  @length: length of buffer, must be multiple of 4 bytes
287  *  @timeout: time in ms to wait for command completion
288  *  @return_data: read and return data from the buffer (true) or not (false)
289  *   Needed because FW structures are big endian and decoding of
290  *   these fields can be 8 bit or 16 bit based on command. Decoding
291  *   is not easily understood without making a table of commands.
292  *   So we will leave this up to the caller to read back the data
293  *   in these cases.
294  **/
wx_host_interface_command(struct wx * wx,u32 * buffer,u32 length,u32 timeout,bool return_data)295 int wx_host_interface_command(struct wx *wx, u32 *buffer,
296 			      u32 length, u32 timeout, bool return_data)
297 {
298 	u32 hdr_size = sizeof(struct wx_hic_hdr);
299 	u32 hicr, i, bi, buf[64] = {};
300 	int status = 0;
301 	u32 dword_len;
302 	u16 buf_len;
303 
304 	if (length == 0 || length > WX_HI_MAX_BLOCK_BYTE_LENGTH) {
305 		wx_err(wx, "Buffer length failure buffersize=%d.\n", length);
306 		return -EINVAL;
307 	}
308 
309 	status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB);
310 	if (status != 0)
311 		return status;
312 
313 	/* Calculate length in DWORDs. We must be DWORD aligned */
314 	if ((length % (sizeof(u32))) != 0) {
315 		wx_err(wx, "Buffer length failure, not aligned to dword");
316 		status = -EINVAL;
317 		goto rel_out;
318 	}
319 
320 	dword_len = length >> 2;
321 
322 	/* The device driver writes the relevant command block
323 	 * into the ram area.
324 	 */
325 	for (i = 0; i < dword_len; i++) {
326 		wr32a(wx, WX_MNG_MBOX, i, (__force u32)cpu_to_le32(buffer[i]));
327 		/* write flush */
328 		buf[i] = rd32a(wx, WX_MNG_MBOX, i);
329 	}
330 	/* Setting this bit tells the ARC that a new command is pending. */
331 	wr32m(wx, WX_MNG_MBOX_CTL,
332 	      WX_MNG_MBOX_CTL_SWRDY, WX_MNG_MBOX_CTL_SWRDY);
333 
334 	status = read_poll_timeout(rd32, hicr, hicr & WX_MNG_MBOX_CTL_FWRDY, 1000,
335 				   timeout * 1000, false, wx, WX_MNG_MBOX_CTL);
336 
337 	/* Check command completion */
338 	if (status) {
339 		wx_dbg(wx, "Command has failed with no status valid.\n");
340 
341 		buf[0] = rd32(wx, WX_MNG_MBOX);
342 		if ((buffer[0] & 0xff) != (~buf[0] >> 24)) {
343 			status = -EINVAL;
344 			goto rel_out;
345 		}
346 		if ((buf[0] & 0xff0000) >> 16 == 0x80) {
347 			wx_dbg(wx, "It's unknown cmd.\n");
348 			status = -EINVAL;
349 			goto rel_out;
350 		}
351 
352 		wx_dbg(wx, "write value:\n");
353 		for (i = 0; i < dword_len; i++)
354 			wx_dbg(wx, "%x ", buffer[i]);
355 		wx_dbg(wx, "read value:\n");
356 		for (i = 0; i < dword_len; i++)
357 			wx_dbg(wx, "%x ", buf[i]);
358 	}
359 
360 	if (!return_data)
361 		goto rel_out;
362 
363 	/* Calculate length in DWORDs */
364 	dword_len = hdr_size >> 2;
365 
366 	/* first pull in the header so we know the buffer length */
367 	for (bi = 0; bi < dword_len; bi++) {
368 		buffer[bi] = rd32a(wx, WX_MNG_MBOX, bi);
369 		le32_to_cpus(&buffer[bi]);
370 	}
371 
372 	/* If there is any thing in data position pull it in */
373 	buf_len = ((struct wx_hic_hdr *)buffer)->buf_len;
374 	if (buf_len == 0)
375 		goto rel_out;
376 
377 	if (length < buf_len + hdr_size) {
378 		wx_err(wx, "Buffer not large enough for reply message.\n");
379 		status = -EFAULT;
380 		goto rel_out;
381 	}
382 
383 	/* Calculate length in DWORDs, add 3 for odd lengths */
384 	dword_len = (buf_len + 3) >> 2;
385 
386 	/* Pull in the rest of the buffer (bi is where we left off) */
387 	for (; bi <= dword_len; bi++) {
388 		buffer[bi] = rd32a(wx, WX_MNG_MBOX, bi);
389 		le32_to_cpus(&buffer[bi]);
390 	}
391 
392 rel_out:
393 	wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB);
394 	return status;
395 }
396 EXPORT_SYMBOL(wx_host_interface_command);
397 
398 /**
399  *  wx_read_ee_hostif_data - Read EEPROM word using a host interface cmd
400  *  assuming that the semaphore is already obtained.
401  *  @wx: pointer to hardware structure
402  *  @offset: offset of  word in the EEPROM to read
403  *  @data: word read from the EEPROM
404  *
405  *  Reads a 16 bit word from the EEPROM using the hostif.
406  **/
wx_read_ee_hostif_data(struct wx * wx,u16 offset,u16 * data)407 static int wx_read_ee_hostif_data(struct wx *wx, u16 offset, u16 *data)
408 {
409 	struct wx_hic_read_shadow_ram buffer;
410 	int status;
411 
412 	buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
413 	buffer.hdr.req.buf_lenh = 0;
414 	buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
415 	buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
416 
417 	/* convert offset from words to bytes */
418 	buffer.address = (__force u32)cpu_to_be32(offset * 2);
419 	/* one word */
420 	buffer.length = (__force u16)cpu_to_be16(sizeof(u16));
421 
422 	status = wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer),
423 					   WX_HI_COMMAND_TIMEOUT, false);
424 
425 	if (status != 0)
426 		return status;
427 
428 	*data = (u16)rd32a(wx, WX_MNG_MBOX, FW_NVM_DATA_OFFSET);
429 
430 	return status;
431 }
432 
433 /**
434  *  wx_read_ee_hostif - Read EEPROM word using a host interface cmd
435  *  @wx: pointer to hardware structure
436  *  @offset: offset of  word in the EEPROM to read
437  *  @data: word read from the EEPROM
438  *
439  *  Reads a 16 bit word from the EEPROM using the hostif.
440  **/
wx_read_ee_hostif(struct wx * wx,u16 offset,u16 * data)441 int wx_read_ee_hostif(struct wx *wx, u16 offset, u16 *data)
442 {
443 	int status = 0;
444 
445 	status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH);
446 	if (status == 0) {
447 		status = wx_read_ee_hostif_data(wx, offset, data);
448 		wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH);
449 	}
450 
451 	return status;
452 }
453 EXPORT_SYMBOL(wx_read_ee_hostif);
454 
455 /**
456  *  wx_read_ee_hostif_buffer- Read EEPROM word(s) using hostif
457  *  @wx: pointer to hardware structure
458  *  @offset: offset of  word in the EEPROM to read
459  *  @words: number of words
460  *  @data: word(s) read from the EEPROM
461  *
462  *  Reads a 16 bit word(s) from the EEPROM using the hostif.
463  **/
wx_read_ee_hostif_buffer(struct wx * wx,u16 offset,u16 words,u16 * data)464 int wx_read_ee_hostif_buffer(struct wx *wx,
465 			     u16 offset, u16 words, u16 *data)
466 {
467 	struct wx_hic_read_shadow_ram buffer;
468 	u32 current_word = 0;
469 	u16 words_to_read;
470 	u32 value = 0;
471 	int status;
472 	u32 i;
473 
474 	/* Take semaphore for the entire operation. */
475 	status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH);
476 	if (status != 0)
477 		return status;
478 
479 	while (words) {
480 		if (words > FW_MAX_READ_BUFFER_SIZE / 2)
481 			words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
482 		else
483 			words_to_read = words;
484 
485 		buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
486 		buffer.hdr.req.buf_lenh = 0;
487 		buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
488 		buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
489 
490 		/* convert offset from words to bytes */
491 		buffer.address = (__force u32)cpu_to_be32((offset + current_word) * 2);
492 		buffer.length = (__force u16)cpu_to_be16(words_to_read * 2);
493 
494 		status = wx_host_interface_command(wx, (u32 *)&buffer,
495 						   sizeof(buffer),
496 						   WX_HI_COMMAND_TIMEOUT,
497 						   false);
498 
499 		if (status != 0) {
500 			wx_err(wx, "Host interface command failed\n");
501 			goto out;
502 		}
503 
504 		for (i = 0; i < words_to_read; i++) {
505 			u32 reg = WX_MNG_MBOX + (FW_NVM_DATA_OFFSET << 2) + 2 * i;
506 
507 			value = rd32(wx, reg);
508 			data[current_word] = (u16)(value & 0xffff);
509 			current_word++;
510 			i++;
511 			if (i < words_to_read) {
512 				value >>= 16;
513 				data[current_word] = (u16)(value & 0xffff);
514 				current_word++;
515 			}
516 		}
517 		words -= words_to_read;
518 	}
519 
520 out:
521 	wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH);
522 	return status;
523 }
524 EXPORT_SYMBOL(wx_read_ee_hostif_buffer);
525 
526 /**
527  *  wx_init_eeprom_params - Initialize EEPROM params
528  *  @wx: pointer to hardware structure
529  *
530  *  Initializes the EEPROM parameters wx_eeprom_info within the
531  *  wx_hw struct in order to set up EEPROM access.
532  **/
wx_init_eeprom_params(struct wx * wx)533 void wx_init_eeprom_params(struct wx *wx)
534 {
535 	struct wx_eeprom_info *eeprom = &wx->eeprom;
536 	u16 eeprom_size;
537 	u16 data = 0x80;
538 
539 	if (eeprom->type == wx_eeprom_uninitialized) {
540 		eeprom->semaphore_delay = 10;
541 		eeprom->type = wx_eeprom_none;
542 
543 		if (!(rd32(wx, WX_SPI_STATUS) &
544 		      WX_SPI_STATUS_FLASH_BYPASS)) {
545 			eeprom->type = wx_flash;
546 
547 			eeprom_size = 4096;
548 			eeprom->word_size = eeprom_size >> 1;
549 
550 			wx_dbg(wx, "Eeprom params: type = %d, size = %d\n",
551 			       eeprom->type, eeprom->word_size);
552 		}
553 	}
554 
555 	if (wx->mac.type == wx_mac_sp) {
556 		if (wx_read_ee_hostif(wx, WX_SW_REGION_PTR, &data)) {
557 			wx_err(wx, "NVM Read Error\n");
558 			return;
559 		}
560 		data = data >> 1;
561 	}
562 
563 	eeprom->sw_region_offset = data;
564 }
565 EXPORT_SYMBOL(wx_init_eeprom_params);
566 
567 /**
568  *  wx_get_mac_addr - Generic get MAC address
569  *  @wx: pointer to hardware structure
570  *  @mac_addr: Adapter MAC address
571  *
572  *  Reads the adapter's MAC address from first Receive Address Register (RAR0)
573  *  A reset of the adapter must be performed prior to calling this function
574  *  in order for the MAC address to have been loaded from the EEPROM into RAR0
575  **/
wx_get_mac_addr(struct wx * wx,u8 * mac_addr)576 void wx_get_mac_addr(struct wx *wx, u8 *mac_addr)
577 {
578 	u32 rar_high;
579 	u32 rar_low;
580 	u16 i;
581 
582 	wr32(wx, WX_PSR_MAC_SWC_IDX, 0);
583 	rar_high = rd32(wx, WX_PSR_MAC_SWC_AD_H);
584 	rar_low = rd32(wx, WX_PSR_MAC_SWC_AD_L);
585 
586 	for (i = 0; i < 2; i++)
587 		mac_addr[i] = (u8)(rar_high >> (1 - i) * 8);
588 
589 	for (i = 0; i < 4; i++)
590 		mac_addr[i + 2] = (u8)(rar_low >> (3 - i) * 8);
591 }
592 EXPORT_SYMBOL(wx_get_mac_addr);
593 
594 /**
595  *  wx_set_rar - Set Rx address register
596  *  @wx: pointer to hardware structure
597  *  @index: Receive address register to write
598  *  @addr: Address to put into receive address register
599  *  @pools: VMDq "set" or "pool" index
600  *  @enable_addr: set flag that address is active
601  *
602  *  Puts an ethernet address into a receive address register.
603  **/
wx_set_rar(struct wx * wx,u32 index,u8 * addr,u64 pools,u32 enable_addr)604 static int wx_set_rar(struct wx *wx, u32 index, u8 *addr, u64 pools,
605 		      u32 enable_addr)
606 {
607 	u32 rar_entries = wx->mac.num_rar_entries;
608 	u32 rar_low, rar_high;
609 
610 	/* Make sure we are using a valid rar index range */
611 	if (index >= rar_entries) {
612 		wx_err(wx, "RAR index %d is out of range.\n", index);
613 		return -EINVAL;
614 	}
615 
616 	/* select the MAC address */
617 	wr32(wx, WX_PSR_MAC_SWC_IDX, index);
618 
619 	/* setup VMDq pool mapping */
620 	wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF);
621 	if (wx->mac.type == wx_mac_sp)
622 		wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32);
623 
624 	/* HW expects these in little endian so we reverse the byte
625 	 * order from network order (big endian) to little endian
626 	 *
627 	 * Some parts put the VMDq setting in the extra RAH bits,
628 	 * so save everything except the lower 16 bits that hold part
629 	 * of the address and the address valid bit.
630 	 */
631 	rar_low = ((u32)addr[5] |
632 		  ((u32)addr[4] << 8) |
633 		  ((u32)addr[3] << 16) |
634 		  ((u32)addr[2] << 24));
635 	rar_high = ((u32)addr[1] |
636 		   ((u32)addr[0] << 8));
637 	if (enable_addr != 0)
638 		rar_high |= WX_PSR_MAC_SWC_AD_H_AV;
639 
640 	wr32(wx, WX_PSR_MAC_SWC_AD_L, rar_low);
641 	wr32m(wx, WX_PSR_MAC_SWC_AD_H,
642 	      (WX_PSR_MAC_SWC_AD_H_AD(U16_MAX) |
643 	       WX_PSR_MAC_SWC_AD_H_ADTYPE(1) |
644 	       WX_PSR_MAC_SWC_AD_H_AV),
645 	      rar_high);
646 
647 	return 0;
648 }
649 
650 /**
651  *  wx_clear_rar - Remove Rx address register
652  *  @wx: pointer to hardware structure
653  *  @index: Receive address register to write
654  *
655  *  Clears an ethernet address from a receive address register.
656  **/
wx_clear_rar(struct wx * wx,u32 index)657 static int wx_clear_rar(struct wx *wx, u32 index)
658 {
659 	u32 rar_entries = wx->mac.num_rar_entries;
660 
661 	/* Make sure we are using a valid rar index range */
662 	if (index >= rar_entries) {
663 		wx_err(wx, "RAR index %d is out of range.\n", index);
664 		return -EINVAL;
665 	}
666 
667 	/* Some parts put the VMDq setting in the extra RAH bits,
668 	 * so save everything except the lower 16 bits that hold part
669 	 * of the address and the address valid bit.
670 	 */
671 	wr32(wx, WX_PSR_MAC_SWC_IDX, index);
672 
673 	wr32(wx, WX_PSR_MAC_SWC_VM_L, 0);
674 	wr32(wx, WX_PSR_MAC_SWC_VM_H, 0);
675 
676 	wr32(wx, WX_PSR_MAC_SWC_AD_L, 0);
677 	wr32m(wx, WX_PSR_MAC_SWC_AD_H,
678 	      (WX_PSR_MAC_SWC_AD_H_AD(U16_MAX) |
679 	       WX_PSR_MAC_SWC_AD_H_ADTYPE(1) |
680 	       WX_PSR_MAC_SWC_AD_H_AV),
681 	      0);
682 
683 	return 0;
684 }
685 
686 /**
687  *  wx_clear_vmdq - Disassociate a VMDq pool index from a rx address
688  *  @wx: pointer to hardware struct
689  *  @rar: receive address register index to disassociate
690  *  @vmdq: VMDq pool index to remove from the rar
691  **/
wx_clear_vmdq(struct wx * wx,u32 rar,u32 __maybe_unused vmdq)692 static int wx_clear_vmdq(struct wx *wx, u32 rar, u32 __maybe_unused vmdq)
693 {
694 	u32 rar_entries = wx->mac.num_rar_entries;
695 	u32 mpsar_lo, mpsar_hi;
696 
697 	/* Make sure we are using a valid rar index range */
698 	if (rar >= rar_entries) {
699 		wx_err(wx, "RAR index %d is out of range.\n", rar);
700 		return -EINVAL;
701 	}
702 
703 	wr32(wx, WX_PSR_MAC_SWC_IDX, rar);
704 	mpsar_lo = rd32(wx, WX_PSR_MAC_SWC_VM_L);
705 	mpsar_hi = rd32(wx, WX_PSR_MAC_SWC_VM_H);
706 
707 	if (!mpsar_lo && !mpsar_hi)
708 		return 0;
709 
710 	/* was that the last pool using this rar? */
711 	if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
712 		wx_clear_rar(wx, rar);
713 
714 	return 0;
715 }
716 
717 /**
718  *  wx_init_uta_tables - Initialize the Unicast Table Array
719  *  @wx: pointer to hardware structure
720  **/
wx_init_uta_tables(struct wx * wx)721 static void wx_init_uta_tables(struct wx *wx)
722 {
723 	int i;
724 
725 	wx_dbg(wx, " Clearing UTA\n");
726 
727 	for (i = 0; i < 128; i++)
728 		wr32(wx, WX_PSR_UC_TBL(i), 0);
729 }
730 
731 /**
732  *  wx_init_rx_addrs - Initializes receive address filters.
733  *  @wx: pointer to hardware structure
734  *
735  *  Places the MAC address in receive address register 0 and clears the rest
736  *  of the receive address registers. Clears the multicast table. Assumes
737  *  the receiver is in reset when the routine is called.
738  **/
wx_init_rx_addrs(struct wx * wx)739 void wx_init_rx_addrs(struct wx *wx)
740 {
741 	u32 rar_entries = wx->mac.num_rar_entries;
742 	u32 psrctl;
743 	int i;
744 
745 	/* If the current mac address is valid, assume it is a software override
746 	 * to the permanent address.
747 	 * Otherwise, use the permanent address from the eeprom.
748 	 */
749 	if (!is_valid_ether_addr(wx->mac.addr)) {
750 		/* Get the MAC address from the RAR0 for later reference */
751 		wx_get_mac_addr(wx, wx->mac.addr);
752 		wx_dbg(wx, "Keeping Current RAR0 Addr = %pM\n", wx->mac.addr);
753 	} else {
754 		/* Setup the receive address. */
755 		wx_dbg(wx, "Overriding MAC Address in RAR[0]\n");
756 		wx_dbg(wx, "New MAC Addr = %pM\n", wx->mac.addr);
757 
758 		wx_set_rar(wx, 0, wx->mac.addr, 0, WX_PSR_MAC_SWC_AD_H_AV);
759 
760 		if (wx->mac.type == wx_mac_sp) {
761 			/* clear VMDq pool/queue selection for RAR 0 */
762 			wx_clear_vmdq(wx, 0, WX_CLEAR_VMDQ_ALL);
763 		}
764 	}
765 
766 	/* Zero out the other receive addresses. */
767 	wx_dbg(wx, "Clearing RAR[1-%d]\n", rar_entries - 1);
768 	for (i = 1; i < rar_entries; i++) {
769 		wr32(wx, WX_PSR_MAC_SWC_IDX, i);
770 		wr32(wx, WX_PSR_MAC_SWC_AD_L, 0);
771 		wr32(wx, WX_PSR_MAC_SWC_AD_H, 0);
772 	}
773 
774 	/* Clear the MTA */
775 	wx->addr_ctrl.mta_in_use = 0;
776 	psrctl = rd32(wx, WX_PSR_CTL);
777 	psrctl &= ~(WX_PSR_CTL_MO | WX_PSR_CTL_MFE);
778 	psrctl |= wx->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT;
779 	wr32(wx, WX_PSR_CTL, psrctl);
780 	wx_dbg(wx, " Clearing MTA\n");
781 	for (i = 0; i < wx->mac.mcft_size; i++)
782 		wr32(wx, WX_PSR_MC_TBL(i), 0);
783 
784 	wx_init_uta_tables(wx);
785 }
786 EXPORT_SYMBOL(wx_init_rx_addrs);
787 
wx_sync_mac_table(struct wx * wx)788 static void wx_sync_mac_table(struct wx *wx)
789 {
790 	int i;
791 
792 	for (i = 0; i < wx->mac.num_rar_entries; i++) {
793 		if (wx->mac_table[i].state & WX_MAC_STATE_MODIFIED) {
794 			if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) {
795 				wx_set_rar(wx, i,
796 					   wx->mac_table[i].addr,
797 					   wx->mac_table[i].pools,
798 					   WX_PSR_MAC_SWC_AD_H_AV);
799 			} else {
800 				wx_clear_rar(wx, i);
801 			}
802 			wx->mac_table[i].state &= ~(WX_MAC_STATE_MODIFIED);
803 		}
804 	}
805 }
806 
807 /* this function destroys the first RAR entry */
wx_mac_set_default_filter(struct wx * wx,u8 * addr)808 void wx_mac_set_default_filter(struct wx *wx, u8 *addr)
809 {
810 	memcpy(&wx->mac_table[0].addr, addr, ETH_ALEN);
811 	wx->mac_table[0].pools = 1ULL;
812 	wx->mac_table[0].state = (WX_MAC_STATE_DEFAULT | WX_MAC_STATE_IN_USE);
813 	wx_set_rar(wx, 0, wx->mac_table[0].addr,
814 		   wx->mac_table[0].pools,
815 		   WX_PSR_MAC_SWC_AD_H_AV);
816 }
817 EXPORT_SYMBOL(wx_mac_set_default_filter);
818 
wx_flush_sw_mac_table(struct wx * wx)819 void wx_flush_sw_mac_table(struct wx *wx)
820 {
821 	u32 i;
822 
823 	for (i = 0; i < wx->mac.num_rar_entries; i++) {
824 		if (!(wx->mac_table[i].state & WX_MAC_STATE_IN_USE))
825 			continue;
826 
827 		wx->mac_table[i].state |= WX_MAC_STATE_MODIFIED;
828 		wx->mac_table[i].state &= ~WX_MAC_STATE_IN_USE;
829 		memset(wx->mac_table[i].addr, 0, ETH_ALEN);
830 		wx->mac_table[i].pools = 0;
831 	}
832 	wx_sync_mac_table(wx);
833 }
834 EXPORT_SYMBOL(wx_flush_sw_mac_table);
835 
wx_add_mac_filter(struct wx * wx,u8 * addr,u16 pool)836 static int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool)
837 {
838 	u32 i;
839 
840 	if (is_zero_ether_addr(addr))
841 		return -EINVAL;
842 
843 	for (i = 0; i < wx->mac.num_rar_entries; i++) {
844 		if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) {
845 			if (ether_addr_equal(addr, wx->mac_table[i].addr)) {
846 				if (wx->mac_table[i].pools != (1ULL << pool)) {
847 					memcpy(wx->mac_table[i].addr, addr, ETH_ALEN);
848 					wx->mac_table[i].pools |= (1ULL << pool);
849 					wx_sync_mac_table(wx);
850 					return i;
851 				}
852 			}
853 		}
854 
855 		if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE)
856 			continue;
857 		wx->mac_table[i].state |= (WX_MAC_STATE_MODIFIED |
858 					   WX_MAC_STATE_IN_USE);
859 		memcpy(wx->mac_table[i].addr, addr, ETH_ALEN);
860 		wx->mac_table[i].pools |= (1ULL << pool);
861 		wx_sync_mac_table(wx);
862 		return i;
863 	}
864 	return -ENOMEM;
865 }
866 
wx_del_mac_filter(struct wx * wx,u8 * addr,u16 pool)867 static int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool)
868 {
869 	u32 i;
870 
871 	if (is_zero_ether_addr(addr))
872 		return -EINVAL;
873 
874 	/* search table for addr, if found, set to 0 and sync */
875 	for (i = 0; i < wx->mac.num_rar_entries; i++) {
876 		if (!ether_addr_equal(addr, wx->mac_table[i].addr))
877 			continue;
878 
879 		wx->mac_table[i].state |= WX_MAC_STATE_MODIFIED;
880 		wx->mac_table[i].pools &= ~(1ULL << pool);
881 		if (!wx->mac_table[i].pools) {
882 			wx->mac_table[i].state &= ~WX_MAC_STATE_IN_USE;
883 			memset(wx->mac_table[i].addr, 0, ETH_ALEN);
884 		}
885 		wx_sync_mac_table(wx);
886 		return 0;
887 	}
888 	return -ENOMEM;
889 }
890 
wx_available_rars(struct wx * wx)891 static int wx_available_rars(struct wx *wx)
892 {
893 	u32 i, count = 0;
894 
895 	for (i = 0; i < wx->mac.num_rar_entries; i++) {
896 		if (wx->mac_table[i].state == 0)
897 			count++;
898 	}
899 
900 	return count;
901 }
902 
903 /**
904  * wx_write_uc_addr_list - write unicast addresses to RAR table
905  * @netdev: network interface device structure
906  * @pool: index for mac table
907  *
908  * Writes unicast address list to the RAR table.
909  * Returns: -ENOMEM on failure/insufficient address space
910  *                0 on no addresses written
911  *                X on writing X addresses to the RAR table
912  **/
wx_write_uc_addr_list(struct net_device * netdev,int pool)913 static int wx_write_uc_addr_list(struct net_device *netdev, int pool)
914 {
915 	struct wx *wx = netdev_priv(netdev);
916 	int count = 0;
917 
918 	/* return ENOMEM indicating insufficient memory for addresses */
919 	if (netdev_uc_count(netdev) > wx_available_rars(wx))
920 		return -ENOMEM;
921 
922 	if (!netdev_uc_empty(netdev)) {
923 		struct netdev_hw_addr *ha;
924 
925 		netdev_for_each_uc_addr(ha, netdev) {
926 			wx_del_mac_filter(wx, ha->addr, pool);
927 			wx_add_mac_filter(wx, ha->addr, pool);
928 			count++;
929 		}
930 	}
931 	return count;
932 }
933 
934 /**
935  *  wx_mta_vector - Determines bit-vector in multicast table to set
936  *  @wx: pointer to private structure
937  *  @mc_addr: the multicast address
938  *
939  *  Extracts the 12 bits, from a multicast address, to determine which
940  *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
941  *  incoming rx multicast addresses, to determine the bit-vector to check in
942  *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
943  *  by the MO field of the MCSTCTRL. The MO field is set during initialization
944  *  to mc_filter_type.
945  **/
wx_mta_vector(struct wx * wx,u8 * mc_addr)946 static u32 wx_mta_vector(struct wx *wx, u8 *mc_addr)
947 {
948 	u32 vector = 0;
949 
950 	switch (wx->mac.mc_filter_type) {
951 	case 0:   /* use bits [47:36] of the address */
952 		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
953 		break;
954 	case 1:   /* use bits [46:35] of the address */
955 		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
956 		break;
957 	case 2:   /* use bits [45:34] of the address */
958 		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
959 		break;
960 	case 3:   /* use bits [43:32] of the address */
961 		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
962 		break;
963 	default:  /* Invalid mc_filter_type */
964 		wx_err(wx, "MC filter type param set incorrectly\n");
965 		break;
966 	}
967 
968 	/* vector can only be 12-bits or boundary will be exceeded */
969 	vector &= 0xFFF;
970 	return vector;
971 }
972 
973 /**
974  *  wx_set_mta - Set bit-vector in multicast table
975  *  @wx: pointer to private structure
976  *  @mc_addr: Multicast address
977  *
978  *  Sets the bit-vector in the multicast table.
979  **/
wx_set_mta(struct wx * wx,u8 * mc_addr)980 static void wx_set_mta(struct wx *wx, u8 *mc_addr)
981 {
982 	u32 vector, vector_bit, vector_reg;
983 
984 	wx->addr_ctrl.mta_in_use++;
985 
986 	vector = wx_mta_vector(wx, mc_addr);
987 	wx_dbg(wx, " bit-vector = 0x%03X\n", vector);
988 
989 	/* The MTA is a register array of 128 32-bit registers. It is treated
990 	 * like an array of 4096 bits.  We want to set bit
991 	 * BitArray[vector_value]. So we figure out what register the bit is
992 	 * in, read it, OR in the new bit, then write back the new value.  The
993 	 * register is determined by the upper 7 bits of the vector value and
994 	 * the bit within that register are determined by the lower 5 bits of
995 	 * the value.
996 	 */
997 	vector_reg = (vector >> 5) & 0x7F;
998 	vector_bit = vector & 0x1F;
999 	wx->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
1000 }
1001 
1002 /**
1003  *  wx_update_mc_addr_list - Updates MAC list of multicast addresses
1004  *  @wx: pointer to private structure
1005  *  @netdev: pointer to net device structure
1006  *
1007  *  The given list replaces any existing list. Clears the MC addrs from receive
1008  *  address registers and the multicast table. Uses unused receive address
1009  *  registers for the first multicast addresses, and hashes the rest into the
1010  *  multicast table.
1011  **/
wx_update_mc_addr_list(struct wx * wx,struct net_device * netdev)1012 static void wx_update_mc_addr_list(struct wx *wx, struct net_device *netdev)
1013 {
1014 	struct netdev_hw_addr *ha;
1015 	u32 i, psrctl;
1016 
1017 	/* Set the new number of MC addresses that we are being requested to
1018 	 * use.
1019 	 */
1020 	wx->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
1021 	wx->addr_ctrl.mta_in_use = 0;
1022 
1023 	/* Clear mta_shadow */
1024 	wx_dbg(wx, " Clearing MTA\n");
1025 	memset(&wx->mac.mta_shadow, 0, sizeof(wx->mac.mta_shadow));
1026 
1027 	/* Update mta_shadow */
1028 	netdev_for_each_mc_addr(ha, netdev) {
1029 		wx_dbg(wx, " Adding the multicast addresses:\n");
1030 		wx_set_mta(wx, ha->addr);
1031 	}
1032 
1033 	/* Enable mta */
1034 	for (i = 0; i < wx->mac.mcft_size; i++)
1035 		wr32a(wx, WX_PSR_MC_TBL(0), i,
1036 		      wx->mac.mta_shadow[i]);
1037 
1038 	if (wx->addr_ctrl.mta_in_use > 0) {
1039 		psrctl = rd32(wx, WX_PSR_CTL);
1040 		psrctl &= ~(WX_PSR_CTL_MO | WX_PSR_CTL_MFE);
1041 		psrctl |= WX_PSR_CTL_MFE |
1042 			  (wx->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT);
1043 		wr32(wx, WX_PSR_CTL, psrctl);
1044 	}
1045 
1046 	wx_dbg(wx, "Update mc addr list Complete\n");
1047 }
1048 
1049 /**
1050  * wx_write_mc_addr_list - write multicast addresses to MTA
1051  * @netdev: network interface device structure
1052  *
1053  * Writes multicast address list to the MTA hash table.
1054  * Returns: 0 on no addresses written
1055  *          X on writing X addresses to MTA
1056  **/
wx_write_mc_addr_list(struct net_device * netdev)1057 static int wx_write_mc_addr_list(struct net_device *netdev)
1058 {
1059 	struct wx *wx = netdev_priv(netdev);
1060 
1061 	if (!netif_running(netdev))
1062 		return 0;
1063 
1064 	wx_update_mc_addr_list(wx, netdev);
1065 
1066 	return netdev_mc_count(netdev);
1067 }
1068 
1069 /**
1070  * wx_set_mac - Change the Ethernet Address of the NIC
1071  * @netdev: network interface device structure
1072  * @p: pointer to an address structure
1073  *
1074  * Returns 0 on success, negative on failure
1075  **/
wx_set_mac(struct net_device * netdev,void * p)1076 int wx_set_mac(struct net_device *netdev, void *p)
1077 {
1078 	struct wx *wx = netdev_priv(netdev);
1079 	struct sockaddr *addr = p;
1080 	int retval;
1081 
1082 	retval = eth_prepare_mac_addr_change(netdev, addr);
1083 	if (retval)
1084 		return retval;
1085 
1086 	wx_del_mac_filter(wx, wx->mac.addr, 0);
1087 	eth_hw_addr_set(netdev, addr->sa_data);
1088 	memcpy(wx->mac.addr, addr->sa_data, netdev->addr_len);
1089 
1090 	wx_mac_set_default_filter(wx, wx->mac.addr);
1091 
1092 	return 0;
1093 }
1094 EXPORT_SYMBOL(wx_set_mac);
1095 
wx_disable_rx(struct wx * wx)1096 void wx_disable_rx(struct wx *wx)
1097 {
1098 	u32 pfdtxgswc;
1099 	u32 rxctrl;
1100 
1101 	rxctrl = rd32(wx, WX_RDB_PB_CTL);
1102 	if (rxctrl & WX_RDB_PB_CTL_RXEN) {
1103 		pfdtxgswc = rd32(wx, WX_PSR_CTL);
1104 		if (pfdtxgswc & WX_PSR_CTL_SW_EN) {
1105 			pfdtxgswc &= ~WX_PSR_CTL_SW_EN;
1106 			wr32(wx, WX_PSR_CTL, pfdtxgswc);
1107 			wx->mac.set_lben = true;
1108 		} else {
1109 			wx->mac.set_lben = false;
1110 		}
1111 		rxctrl &= ~WX_RDB_PB_CTL_RXEN;
1112 		wr32(wx, WX_RDB_PB_CTL, rxctrl);
1113 
1114 		if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) ||
1115 		      ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) {
1116 			/* disable mac receiver */
1117 			wr32m(wx, WX_MAC_RX_CFG,
1118 			      WX_MAC_RX_CFG_RE, 0);
1119 		}
1120 	}
1121 }
1122 EXPORT_SYMBOL(wx_disable_rx);
1123 
wx_enable_rx(struct wx * wx)1124 static void wx_enable_rx(struct wx *wx)
1125 {
1126 	u32 psrctl;
1127 
1128 	/* enable mac receiver */
1129 	wr32m(wx, WX_MAC_RX_CFG,
1130 	      WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE);
1131 
1132 	wr32m(wx, WX_RDB_PB_CTL,
1133 	      WX_RDB_PB_CTL_RXEN, WX_RDB_PB_CTL_RXEN);
1134 
1135 	if (wx->mac.set_lben) {
1136 		psrctl = rd32(wx, WX_PSR_CTL);
1137 		psrctl |= WX_PSR_CTL_SW_EN;
1138 		wr32(wx, WX_PSR_CTL, psrctl);
1139 		wx->mac.set_lben = false;
1140 	}
1141 }
1142 
1143 /**
1144  * wx_set_rxpba - Initialize Rx packet buffer
1145  * @wx: pointer to private structure
1146  **/
wx_set_rxpba(struct wx * wx)1147 static void wx_set_rxpba(struct wx *wx)
1148 {
1149 	u32 rxpktsize, txpktsize, txpbthresh;
1150 	u32 pbsize = wx->mac.rx_pb_size;
1151 
1152 	if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) {
1153 		if (test_bit(WX_FLAG_FDIR_HASH, wx->flags) ||
1154 		    test_bit(WX_FLAG_FDIR_PERFECT, wx->flags))
1155 			pbsize -= 64; /* Default 64KB */
1156 	}
1157 
1158 	rxpktsize = pbsize << WX_RDB_PB_SZ_SHIFT;
1159 	wr32(wx, WX_RDB_PB_SZ(0), rxpktsize);
1160 
1161 	/* Only support an equally distributed Tx packet buffer strategy. */
1162 	txpktsize = wx->mac.tx_pb_size;
1163 	txpbthresh = (txpktsize / 1024) - WX_TXPKT_SIZE_MAX;
1164 	wr32(wx, WX_TDB_PB_SZ(0), txpktsize);
1165 	wr32(wx, WX_TDM_PB_THRE(0), txpbthresh);
1166 }
1167 
1168 #define WX_ETH_FRAMING 20
1169 
1170 /**
1171  * wx_hpbthresh - calculate high water mark for flow control
1172  *
1173  * @wx: board private structure to calculate for
1174  **/
wx_hpbthresh(struct wx * wx)1175 static int wx_hpbthresh(struct wx *wx)
1176 {
1177 	struct net_device *dev = wx->netdev;
1178 	int link, tc, kb, marker;
1179 	u32 dv_id, rx_pba;
1180 
1181 	/* Calculate max LAN frame size */
1182 	link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + WX_ETH_FRAMING;
1183 	tc = link;
1184 
1185 	/* Calculate delay value for device */
1186 	dv_id = WX_DV(link, tc);
1187 
1188 	/* Delay value is calculated in bit times convert to KB */
1189 	kb = WX_BT2KB(dv_id);
1190 	rx_pba = rd32(wx, WX_RDB_PB_SZ(0)) >> WX_RDB_PB_SZ_SHIFT;
1191 
1192 	marker = rx_pba - kb;
1193 
1194 	/* It is possible that the packet buffer is not large enough
1195 	 * to provide required headroom. In this case throw an error
1196 	 * to user and a do the best we can.
1197 	 */
1198 	if (marker < 0) {
1199 		dev_warn(&wx->pdev->dev,
1200 			 "Packet Buffer can not provide enough headroom to support flow control. Decrease MTU or number of traffic classes\n");
1201 		marker = tc + 1;
1202 	}
1203 
1204 	return marker;
1205 }
1206 
1207 /**
1208  * wx_lpbthresh - calculate low water mark for flow control
1209  *
1210  * @wx: board private structure to calculate for
1211  **/
wx_lpbthresh(struct wx * wx)1212 static int wx_lpbthresh(struct wx *wx)
1213 {
1214 	struct net_device *dev = wx->netdev;
1215 	u32 dv_id;
1216 	int tc;
1217 
1218 	/* Calculate max LAN frame size */
1219 	tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
1220 
1221 	/* Calculate delay value for device */
1222 	dv_id = WX_LOW_DV(tc);
1223 
1224 	/* Delay value is calculated in bit times convert to KB */
1225 	return WX_BT2KB(dv_id);
1226 }
1227 
1228 /**
1229  * wx_pbthresh_setup - calculate and setup high low water marks
1230  *
1231  * @wx: board private structure to calculate for
1232  **/
wx_pbthresh_setup(struct wx * wx)1233 static void wx_pbthresh_setup(struct wx *wx)
1234 {
1235 	wx->fc.high_water = wx_hpbthresh(wx);
1236 	wx->fc.low_water = wx_lpbthresh(wx);
1237 
1238 	/* Low water marks must not be larger than high water marks */
1239 	if (wx->fc.low_water > wx->fc.high_water)
1240 		wx->fc.low_water = 0;
1241 }
1242 
wx_configure_port(struct wx * wx)1243 static void wx_configure_port(struct wx *wx)
1244 {
1245 	u32 value, i;
1246 
1247 	value = WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ;
1248 	wr32m(wx, WX_CFG_PORT_CTL,
1249 	      WX_CFG_PORT_CTL_D_VLAN |
1250 	      WX_CFG_PORT_CTL_QINQ,
1251 	      value);
1252 
1253 	wr32(wx, WX_CFG_TAG_TPID(0),
1254 	     ETH_P_8021Q | ETH_P_8021AD << 16);
1255 	wx->tpid[0] = ETH_P_8021Q;
1256 	wx->tpid[1] = ETH_P_8021AD;
1257 	for (i = 1; i < 4; i++)
1258 		wr32(wx, WX_CFG_TAG_TPID(i),
1259 		     ETH_P_8021Q | ETH_P_8021Q << 16);
1260 	for (i = 2; i < 8; i++)
1261 		wx->tpid[i] = ETH_P_8021Q;
1262 }
1263 
1264 /**
1265  *  wx_disable_sec_rx_path - Stops the receive data path
1266  *  @wx: pointer to private structure
1267  *
1268  *  Stops the receive data path and waits for the HW to internally empty
1269  *  the Rx security block
1270  **/
wx_disable_sec_rx_path(struct wx * wx)1271 int wx_disable_sec_rx_path(struct wx *wx)
1272 {
1273 	u32 secrx;
1274 
1275 	wr32m(wx, WX_RSC_CTL,
1276 	      WX_RSC_CTL_RX_DIS, WX_RSC_CTL_RX_DIS);
1277 
1278 	return read_poll_timeout(rd32, secrx, secrx & WX_RSC_ST_RSEC_RDY,
1279 				 1000, 40000, false, wx, WX_RSC_ST);
1280 }
1281 EXPORT_SYMBOL(wx_disable_sec_rx_path);
1282 
1283 /**
1284  *  wx_enable_sec_rx_path - Enables the receive data path
1285  *  @wx: pointer to private structure
1286  *
1287  *  Enables the receive data path.
1288  **/
wx_enable_sec_rx_path(struct wx * wx)1289 void wx_enable_sec_rx_path(struct wx *wx)
1290 {
1291 	wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_RX_DIS, 0);
1292 	WX_WRITE_FLUSH(wx);
1293 }
1294 EXPORT_SYMBOL(wx_enable_sec_rx_path);
1295 
wx_vlan_strip_control(struct wx * wx,bool enable)1296 static void wx_vlan_strip_control(struct wx *wx, bool enable)
1297 {
1298 	int i, j;
1299 
1300 	for (i = 0; i < wx->num_rx_queues; i++) {
1301 		struct wx_ring *ring = wx->rx_ring[i];
1302 
1303 		j = ring->reg_idx;
1304 		wr32m(wx, WX_PX_RR_CFG(j), WX_PX_RR_CFG_VLAN,
1305 		      enable ? WX_PX_RR_CFG_VLAN : 0);
1306 	}
1307 }
1308 
wx_set_rx_mode(struct net_device * netdev)1309 void wx_set_rx_mode(struct net_device *netdev)
1310 {
1311 	struct wx *wx = netdev_priv(netdev);
1312 	netdev_features_t features;
1313 	u32 fctrl, vmolr, vlnctrl;
1314 	int count;
1315 
1316 	features = netdev->features;
1317 
1318 	/* Check for Promiscuous and All Multicast modes */
1319 	fctrl = rd32(wx, WX_PSR_CTL);
1320 	fctrl &= ~(WX_PSR_CTL_UPE | WX_PSR_CTL_MPE);
1321 	vmolr = rd32(wx, WX_PSR_VM_L2CTL(0));
1322 	vmolr &= ~(WX_PSR_VM_L2CTL_UPE |
1323 		   WX_PSR_VM_L2CTL_MPE |
1324 		   WX_PSR_VM_L2CTL_ROPE |
1325 		   WX_PSR_VM_L2CTL_ROMPE);
1326 	vlnctrl = rd32(wx, WX_PSR_VLAN_CTL);
1327 	vlnctrl &= ~(WX_PSR_VLAN_CTL_VFE | WX_PSR_VLAN_CTL_CFIEN);
1328 
1329 	/* set all bits that we expect to always be set */
1330 	fctrl |= WX_PSR_CTL_BAM | WX_PSR_CTL_MFE;
1331 	vmolr |= WX_PSR_VM_L2CTL_BAM |
1332 		 WX_PSR_VM_L2CTL_AUPE |
1333 		 WX_PSR_VM_L2CTL_VACC;
1334 	vlnctrl |= WX_PSR_VLAN_CTL_VFE;
1335 
1336 	wx->addr_ctrl.user_set_promisc = false;
1337 	if (netdev->flags & IFF_PROMISC) {
1338 		wx->addr_ctrl.user_set_promisc = true;
1339 		fctrl |= WX_PSR_CTL_UPE | WX_PSR_CTL_MPE;
1340 		/* pf don't want packets routing to vf, so clear UPE */
1341 		vmolr |= WX_PSR_VM_L2CTL_MPE;
1342 		vlnctrl &= ~WX_PSR_VLAN_CTL_VFE;
1343 	}
1344 
1345 	if (netdev->flags & IFF_ALLMULTI) {
1346 		fctrl |= WX_PSR_CTL_MPE;
1347 		vmolr |= WX_PSR_VM_L2CTL_MPE;
1348 	}
1349 
1350 	if (netdev->features & NETIF_F_RXALL) {
1351 		vmolr |= (WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_MPE);
1352 		vlnctrl &= ~WX_PSR_VLAN_CTL_VFE;
1353 		/* receive bad packets */
1354 		wr32m(wx, WX_RSC_CTL,
1355 		      WX_RSC_CTL_SAVE_MAC_ERR,
1356 		      WX_RSC_CTL_SAVE_MAC_ERR);
1357 	} else {
1358 		vmolr |= WX_PSR_VM_L2CTL_ROPE | WX_PSR_VM_L2CTL_ROMPE;
1359 	}
1360 
1361 	/* Write addresses to available RAR registers, if there is not
1362 	 * sufficient space to store all the addresses then enable
1363 	 * unicast promiscuous mode
1364 	 */
1365 	count = wx_write_uc_addr_list(netdev, 0);
1366 	if (count < 0) {
1367 		vmolr &= ~WX_PSR_VM_L2CTL_ROPE;
1368 		vmolr |= WX_PSR_VM_L2CTL_UPE;
1369 	}
1370 
1371 	/* Write addresses to the MTA, if the attempt fails
1372 	 * then we should just turn on promiscuous mode so
1373 	 * that we can at least receive multicast traffic
1374 	 */
1375 	count = wx_write_mc_addr_list(netdev);
1376 	if (count < 0) {
1377 		vmolr &= ~WX_PSR_VM_L2CTL_ROMPE;
1378 		vmolr |= WX_PSR_VM_L2CTL_MPE;
1379 	}
1380 
1381 	wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
1382 	wr32(wx, WX_PSR_CTL, fctrl);
1383 	wr32(wx, WX_PSR_VM_L2CTL(0), vmolr);
1384 
1385 	if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
1386 	    (features & NETIF_F_HW_VLAN_STAG_RX))
1387 		wx_vlan_strip_control(wx, true);
1388 	else
1389 		wx_vlan_strip_control(wx, false);
1390 
1391 }
1392 EXPORT_SYMBOL(wx_set_rx_mode);
1393 
wx_set_rx_buffer_len(struct wx * wx)1394 static void wx_set_rx_buffer_len(struct wx *wx)
1395 {
1396 	struct net_device *netdev = wx->netdev;
1397 	u32 mhadd, max_frame;
1398 
1399 	max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1400 	/* adjust max frame to be at least the size of a standard frame */
1401 	if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
1402 		max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
1403 
1404 	mhadd = rd32(wx, WX_PSR_MAX_SZ);
1405 	if (max_frame != mhadd)
1406 		wr32(wx, WX_PSR_MAX_SZ, max_frame);
1407 }
1408 
1409 /**
1410  * wx_change_mtu - Change the Maximum Transfer Unit
1411  * @netdev: network interface device structure
1412  * @new_mtu: new value for maximum frame size
1413  *
1414  * Returns 0 on success, negative on failure
1415  **/
wx_change_mtu(struct net_device * netdev,int new_mtu)1416 int wx_change_mtu(struct net_device *netdev, int new_mtu)
1417 {
1418 	struct wx *wx = netdev_priv(netdev);
1419 
1420 	WRITE_ONCE(netdev->mtu, new_mtu);
1421 	wx_set_rx_buffer_len(wx);
1422 
1423 	return 0;
1424 }
1425 EXPORT_SYMBOL(wx_change_mtu);
1426 
1427 /* Disable the specified rx queue */
wx_disable_rx_queue(struct wx * wx,struct wx_ring * ring)1428 void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring)
1429 {
1430 	u8 reg_idx = ring->reg_idx;
1431 	u32 rxdctl;
1432 	int ret;
1433 
1434 	/* write value back with RRCFG.EN bit cleared */
1435 	wr32m(wx, WX_PX_RR_CFG(reg_idx),
1436 	      WX_PX_RR_CFG_RR_EN, 0);
1437 
1438 	/* the hardware may take up to 100us to really disable the rx queue */
1439 	ret = read_poll_timeout(rd32, rxdctl, !(rxdctl & WX_PX_RR_CFG_RR_EN),
1440 				10, 100, true, wx, WX_PX_RR_CFG(reg_idx));
1441 
1442 	if (ret == -ETIMEDOUT) {
1443 		/* Just for information */
1444 		wx_err(wx,
1445 		       "RRCFG.EN on Rx queue %d not cleared within the polling period\n",
1446 		       reg_idx);
1447 	}
1448 }
1449 EXPORT_SYMBOL(wx_disable_rx_queue);
1450 
wx_enable_rx_queue(struct wx * wx,struct wx_ring * ring)1451 static void wx_enable_rx_queue(struct wx *wx, struct wx_ring *ring)
1452 {
1453 	u8 reg_idx = ring->reg_idx;
1454 	u32 rxdctl;
1455 	int ret;
1456 
1457 	ret = read_poll_timeout(rd32, rxdctl, rxdctl & WX_PX_RR_CFG_RR_EN,
1458 				1000, 10000, true, wx, WX_PX_RR_CFG(reg_idx));
1459 
1460 	if (ret == -ETIMEDOUT) {
1461 		/* Just for information */
1462 		wx_err(wx,
1463 		       "RRCFG.EN on Rx queue %d not set within the polling period\n",
1464 		       reg_idx);
1465 	}
1466 }
1467 
wx_configure_srrctl(struct wx * wx,struct wx_ring * rx_ring)1468 static void wx_configure_srrctl(struct wx *wx,
1469 				struct wx_ring *rx_ring)
1470 {
1471 	u16 reg_idx = rx_ring->reg_idx;
1472 	u32 srrctl;
1473 
1474 	srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
1475 	srrctl &= ~(WX_PX_RR_CFG_RR_HDR_SZ |
1476 		    WX_PX_RR_CFG_RR_BUF_SZ |
1477 		    WX_PX_RR_CFG_SPLIT_MODE);
1478 	/* configure header buffer length, needed for RSC */
1479 	srrctl |= WX_RXBUFFER_256 << WX_PX_RR_CFG_BHDRSIZE_SHIFT;
1480 
1481 	/* configure the packet buffer length */
1482 	srrctl |= WX_RX_BUFSZ >> WX_PX_RR_CFG_BSIZEPKT_SHIFT;
1483 
1484 	wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl);
1485 }
1486 
wx_configure_tx_ring(struct wx * wx,struct wx_ring * ring)1487 static void wx_configure_tx_ring(struct wx *wx,
1488 				 struct wx_ring *ring)
1489 {
1490 	u32 txdctl = WX_PX_TR_CFG_ENABLE;
1491 	u8 reg_idx = ring->reg_idx;
1492 	u64 tdba = ring->dma;
1493 	int ret;
1494 
1495 	/* disable queue to avoid issues while updating state */
1496 	wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH);
1497 	WX_WRITE_FLUSH(wx);
1498 
1499 	wr32(wx, WX_PX_TR_BAL(reg_idx), tdba & DMA_BIT_MASK(32));
1500 	wr32(wx, WX_PX_TR_BAH(reg_idx), upper_32_bits(tdba));
1501 
1502 	/* reset head and tail pointers */
1503 	wr32(wx, WX_PX_TR_RP(reg_idx), 0);
1504 	wr32(wx, WX_PX_TR_WP(reg_idx), 0);
1505 	ring->tail = wx->hw_addr + WX_PX_TR_WP(reg_idx);
1506 
1507 	if (ring->count < WX_MAX_TXD)
1508 		txdctl |= ring->count / 128 << WX_PX_TR_CFG_TR_SIZE_SHIFT;
1509 	txdctl |= 0x20 << WX_PX_TR_CFG_WTHRESH_SHIFT;
1510 
1511 	ring->atr_count = 0;
1512 	if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags) &&
1513 	    test_bit(WX_FLAG_FDIR_HASH, wx->flags))
1514 		ring->atr_sample_rate = wx->atr_sample_rate;
1515 	else
1516 		ring->atr_sample_rate = 0;
1517 
1518 	/* reinitialize tx_buffer_info */
1519 	memset(ring->tx_buffer_info, 0,
1520 	       sizeof(struct wx_tx_buffer) * ring->count);
1521 
1522 	/* enable queue */
1523 	wr32(wx, WX_PX_TR_CFG(reg_idx), txdctl);
1524 
1525 	/* poll to verify queue is enabled */
1526 	ret = read_poll_timeout(rd32, txdctl, txdctl & WX_PX_TR_CFG_ENABLE,
1527 				1000, 10000, true, wx, WX_PX_TR_CFG(reg_idx));
1528 	if (ret == -ETIMEDOUT)
1529 		wx_err(wx, "Could not enable Tx Queue %d\n", reg_idx);
1530 }
1531 
wx_configure_rx_ring(struct wx * wx,struct wx_ring * ring)1532 static void wx_configure_rx_ring(struct wx *wx,
1533 				 struct wx_ring *ring)
1534 {
1535 	u16 reg_idx = ring->reg_idx;
1536 	union wx_rx_desc *rx_desc;
1537 	u64 rdba = ring->dma;
1538 	u32 rxdctl;
1539 
1540 	/* disable queue to avoid issues while updating state */
1541 	rxdctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
1542 	wx_disable_rx_queue(wx, ring);
1543 
1544 	wr32(wx, WX_PX_RR_BAL(reg_idx), rdba & DMA_BIT_MASK(32));
1545 	wr32(wx, WX_PX_RR_BAH(reg_idx), upper_32_bits(rdba));
1546 
1547 	if (ring->count == WX_MAX_RXD)
1548 		rxdctl |= 0 << WX_PX_RR_CFG_RR_SIZE_SHIFT;
1549 	else
1550 		rxdctl |= (ring->count / 128) << WX_PX_RR_CFG_RR_SIZE_SHIFT;
1551 
1552 	rxdctl |= 0x1 << WX_PX_RR_CFG_RR_THER_SHIFT;
1553 	wr32(wx, WX_PX_RR_CFG(reg_idx), rxdctl);
1554 
1555 	/* reset head and tail pointers */
1556 	wr32(wx, WX_PX_RR_RP(reg_idx), 0);
1557 	wr32(wx, WX_PX_RR_WP(reg_idx), 0);
1558 	ring->tail = wx->hw_addr + WX_PX_RR_WP(reg_idx);
1559 
1560 	wx_configure_srrctl(wx, ring);
1561 
1562 	/* initialize rx_buffer_info */
1563 	memset(ring->rx_buffer_info, 0,
1564 	       sizeof(struct wx_rx_buffer) * ring->count);
1565 
1566 	/* initialize Rx descriptor 0 */
1567 	rx_desc = WX_RX_DESC(ring, 0);
1568 	rx_desc->wb.upper.length = 0;
1569 
1570 	/* enable receive descriptor ring */
1571 	wr32m(wx, WX_PX_RR_CFG(reg_idx),
1572 	      WX_PX_RR_CFG_RR_EN, WX_PX_RR_CFG_RR_EN);
1573 
1574 	wx_enable_rx_queue(wx, ring);
1575 	wx_alloc_rx_buffers(ring, wx_desc_unused(ring));
1576 }
1577 
1578 /**
1579  * wx_configure_tx - Configure Transmit Unit after Reset
1580  * @wx: pointer to private structure
1581  *
1582  * Configure the Tx unit of the MAC after a reset.
1583  **/
wx_configure_tx(struct wx * wx)1584 static void wx_configure_tx(struct wx *wx)
1585 {
1586 	u32 i;
1587 
1588 	/* TDM_CTL.TE must be before Tx queues are enabled */
1589 	wr32m(wx, WX_TDM_CTL,
1590 	      WX_TDM_CTL_TE, WX_TDM_CTL_TE);
1591 
1592 	/* Setup the HW Tx Head and Tail descriptor pointers */
1593 	for (i = 0; i < wx->num_tx_queues; i++)
1594 		wx_configure_tx_ring(wx, wx->tx_ring[i]);
1595 
1596 	wr32m(wx, WX_TSC_BUF_AE, WX_TSC_BUF_AE_THR, 0x10);
1597 
1598 	if (wx->mac.type == wx_mac_em)
1599 		wr32m(wx, WX_TSC_CTL, WX_TSC_CTL_TX_DIS | WX_TSC_CTL_TSEC_DIS, 0x1);
1600 
1601 	/* enable mac transmitter */
1602 	wr32m(wx, WX_MAC_TX_CFG,
1603 	      WX_MAC_TX_CFG_TE, WX_MAC_TX_CFG_TE);
1604 }
1605 
wx_restore_vlan(struct wx * wx)1606 static void wx_restore_vlan(struct wx *wx)
1607 {
1608 	u16 vid = 1;
1609 
1610 	wx_vlan_rx_add_vid(wx->netdev, htons(ETH_P_8021Q), 0);
1611 
1612 	for_each_set_bit_from(vid, wx->active_vlans, VLAN_N_VID)
1613 		wx_vlan_rx_add_vid(wx->netdev, htons(ETH_P_8021Q), vid);
1614 }
1615 
wx_store_reta(struct wx * wx)1616 static void wx_store_reta(struct wx *wx)
1617 {
1618 	u8 *indir_tbl = wx->rss_indir_tbl;
1619 	u32 reta = 0;
1620 	u32 i;
1621 
1622 	/* Fill out the redirection table as follows:
1623 	 *  - 8 bit wide entries containing 4 bit RSS index
1624 	 */
1625 	for (i = 0; i < WX_MAX_RETA_ENTRIES; i++) {
1626 		reta |= indir_tbl[i] << (i & 0x3) * 8;
1627 		if ((i & 3) == 3) {
1628 			wr32(wx, WX_RDB_RSSTBL(i >> 2), reta);
1629 			reta = 0;
1630 		}
1631 	}
1632 }
1633 
wx_setup_reta(struct wx * wx)1634 static void wx_setup_reta(struct wx *wx)
1635 {
1636 	u16 rss_i = wx->ring_feature[RING_F_RSS].indices;
1637 	u32 random_key_size = WX_RSS_KEY_SIZE / 4;
1638 	u32 i, j;
1639 
1640 	/* Fill out hash function seeds */
1641 	for (i = 0; i < random_key_size; i++)
1642 		wr32(wx, WX_RDB_RSSRK(i), wx->rss_key[i]);
1643 
1644 	/* Fill out redirection table */
1645 	memset(wx->rss_indir_tbl, 0, sizeof(wx->rss_indir_tbl));
1646 
1647 	for (i = 0, j = 0; i < WX_MAX_RETA_ENTRIES; i++, j++) {
1648 		if (j == rss_i)
1649 			j = 0;
1650 
1651 		wx->rss_indir_tbl[i] = j;
1652 	}
1653 
1654 	wx_store_reta(wx);
1655 }
1656 
wx_setup_mrqc(struct wx * wx)1657 static void wx_setup_mrqc(struct wx *wx)
1658 {
1659 	u32 rss_field = 0;
1660 
1661 	/* Disable indicating checksum in descriptor, enables RSS hash */
1662 	wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_PCSD, WX_PSR_CTL_PCSD);
1663 
1664 	/* Perform hash on these packet types */
1665 	rss_field = WX_RDB_RA_CTL_RSS_IPV4 |
1666 		    WX_RDB_RA_CTL_RSS_IPV4_TCP |
1667 		    WX_RDB_RA_CTL_RSS_IPV4_UDP |
1668 		    WX_RDB_RA_CTL_RSS_IPV6 |
1669 		    WX_RDB_RA_CTL_RSS_IPV6_TCP |
1670 		    WX_RDB_RA_CTL_RSS_IPV6_UDP;
1671 
1672 	netdev_rss_key_fill(wx->rss_key, sizeof(wx->rss_key));
1673 
1674 	wx_setup_reta(wx);
1675 
1676 	if (wx->rss_enabled)
1677 		rss_field |= WX_RDB_RA_CTL_RSS_EN;
1678 
1679 	wr32(wx, WX_RDB_RA_CTL, rss_field);
1680 }
1681 
1682 /**
1683  * wx_configure_rx - Configure Receive Unit after Reset
1684  * @wx: pointer to private structure
1685  *
1686  * Configure the Rx unit of the MAC after a reset.
1687  **/
wx_configure_rx(struct wx * wx)1688 void wx_configure_rx(struct wx *wx)
1689 {
1690 	u32 psrtype, i;
1691 	int ret;
1692 
1693 	wx_disable_rx(wx);
1694 
1695 	psrtype = WX_RDB_PL_CFG_L4HDR |
1696 		  WX_RDB_PL_CFG_L3HDR |
1697 		  WX_RDB_PL_CFG_L2HDR |
1698 		  WX_RDB_PL_CFG_TUN_TUNHDR;
1699 	wr32(wx, WX_RDB_PL_CFG(0), psrtype);
1700 
1701 	/* enable hw crc stripping */
1702 	wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_CRC_STRIP, WX_RSC_CTL_CRC_STRIP);
1703 
1704 	if (wx->mac.type == wx_mac_sp) {
1705 		u32 psrctl;
1706 
1707 		/* RSC Setup */
1708 		psrctl = rd32(wx, WX_PSR_CTL);
1709 		psrctl |= WX_PSR_CTL_RSC_ACK; /* Disable RSC for ACK packets */
1710 		psrctl |= WX_PSR_CTL_RSC_DIS;
1711 		wr32(wx, WX_PSR_CTL, psrctl);
1712 	}
1713 
1714 	wx_setup_mrqc(wx);
1715 
1716 	/* set_rx_buffer_len must be called before ring initialization */
1717 	wx_set_rx_buffer_len(wx);
1718 
1719 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
1720 	 * the Base and Length of the Rx Descriptor Ring
1721 	 */
1722 	for (i = 0; i < wx->num_rx_queues; i++)
1723 		wx_configure_rx_ring(wx, wx->rx_ring[i]);
1724 
1725 	/* Enable all receives, disable security engine prior to block traffic */
1726 	ret = wx_disable_sec_rx_path(wx);
1727 	if (ret < 0)
1728 		wx_err(wx, "The register status is abnormal, please check device.");
1729 
1730 	wx_enable_rx(wx);
1731 	wx_enable_sec_rx_path(wx);
1732 }
1733 EXPORT_SYMBOL(wx_configure_rx);
1734 
wx_configure_isb(struct wx * wx)1735 static void wx_configure_isb(struct wx *wx)
1736 {
1737 	/* set ISB Address */
1738 	wr32(wx, WX_PX_ISB_ADDR_L, wx->isb_dma & DMA_BIT_MASK(32));
1739 	if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
1740 		wr32(wx, WX_PX_ISB_ADDR_H, upper_32_bits(wx->isb_dma));
1741 }
1742 
wx_configure(struct wx * wx)1743 void wx_configure(struct wx *wx)
1744 {
1745 	wx_set_rxpba(wx);
1746 	wx_pbthresh_setup(wx);
1747 	wx_configure_port(wx);
1748 
1749 	wx_set_rx_mode(wx->netdev);
1750 	wx_restore_vlan(wx);
1751 
1752 	if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))
1753 		wx->configure_fdir(wx);
1754 
1755 	wx_configure_tx(wx);
1756 	wx_configure_rx(wx);
1757 	wx_configure_isb(wx);
1758 }
1759 EXPORT_SYMBOL(wx_configure);
1760 
1761 /**
1762  *  wx_disable_pcie_master - Disable PCI-express master access
1763  *  @wx: pointer to hardware structure
1764  *
1765  *  Disables PCI-Express master access and verifies there are no pending
1766  *  requests.
1767  **/
wx_disable_pcie_master(struct wx * wx)1768 int wx_disable_pcie_master(struct wx *wx)
1769 {
1770 	int status = 0;
1771 	u32 val;
1772 
1773 	/* Always set this bit to ensure any future transactions are blocked */
1774 	pci_clear_master(wx->pdev);
1775 
1776 	/* Exit if master requests are blocked */
1777 	if (!(rd32(wx, WX_PX_TRANSACTION_PENDING)))
1778 		return 0;
1779 
1780 	/* Poll for master request bit to clear */
1781 	status = read_poll_timeout(rd32, val, !val, 100, WX_PCI_MASTER_DISABLE_TIMEOUT,
1782 				   false, wx, WX_PX_TRANSACTION_PENDING);
1783 	if (status < 0)
1784 		wx_err(wx, "PCIe transaction pending bit did not clear.\n");
1785 
1786 	return status;
1787 }
1788 EXPORT_SYMBOL(wx_disable_pcie_master);
1789 
1790 /**
1791  *  wx_stop_adapter - Generic stop Tx/Rx units
1792  *  @wx: pointer to hardware structure
1793  *
1794  *  Sets the adapter_stopped flag within wx_hw struct. Clears interrupts,
1795  *  disables transmit and receive units. The adapter_stopped flag is used by
1796  *  the shared code and drivers to determine if the adapter is in a stopped
1797  *  state and should not touch the hardware.
1798  **/
wx_stop_adapter(struct wx * wx)1799 int wx_stop_adapter(struct wx *wx)
1800 {
1801 	u16 i;
1802 
1803 	/* Set the adapter_stopped flag so other driver functions stop touching
1804 	 * the hardware
1805 	 */
1806 	wx->adapter_stopped = true;
1807 
1808 	/* Disable the receive unit */
1809 	wx_disable_rx(wx);
1810 
1811 	/* Set interrupt mask to stop interrupts from being generated */
1812 	wx_intr_disable(wx, WX_INTR_ALL);
1813 
1814 	/* Clear any pending interrupts, flush previous writes */
1815 	wr32(wx, WX_PX_MISC_IC, 0xffffffff);
1816 	wr32(wx, WX_BME_CTL, 0x3);
1817 
1818 	/* Disable the transmit unit.  Each queue must be disabled. */
1819 	for (i = 0; i < wx->mac.max_tx_queues; i++) {
1820 		wr32m(wx, WX_PX_TR_CFG(i),
1821 		      WX_PX_TR_CFG_SWFLSH | WX_PX_TR_CFG_ENABLE,
1822 		      WX_PX_TR_CFG_SWFLSH);
1823 	}
1824 
1825 	/* Disable the receive unit by stopping each queue */
1826 	for (i = 0; i < wx->mac.max_rx_queues; i++) {
1827 		wr32m(wx, WX_PX_RR_CFG(i),
1828 		      WX_PX_RR_CFG_RR_EN, 0);
1829 	}
1830 
1831 	/* flush all queues disables */
1832 	WX_WRITE_FLUSH(wx);
1833 
1834 	/* Prevent the PCI-E bus from hanging by disabling PCI-E master
1835 	 * access and verify no pending requests
1836 	 */
1837 	return wx_disable_pcie_master(wx);
1838 }
1839 EXPORT_SYMBOL(wx_stop_adapter);
1840 
wx_reset_misc(struct wx * wx)1841 void wx_reset_misc(struct wx *wx)
1842 {
1843 	int i;
1844 
1845 	/* receive packets that size > 2048 */
1846 	wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_JE, WX_MAC_RX_CFG_JE);
1847 
1848 	/* clear counters on read */
1849 	wr32m(wx, WX_MMC_CONTROL,
1850 	      WX_MMC_CONTROL_RSTONRD, WX_MMC_CONTROL_RSTONRD);
1851 
1852 	wr32m(wx, WX_MAC_RX_FLOW_CTRL,
1853 	      WX_MAC_RX_FLOW_CTRL_RFE, WX_MAC_RX_FLOW_CTRL_RFE);
1854 
1855 	wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
1856 
1857 	wr32m(wx, WX_MIS_RST_ST,
1858 	      WX_MIS_RST_ST_RST_INIT, 0x1E00);
1859 
1860 	/* errata 4: initialize mng flex tbl and wakeup flex tbl*/
1861 	wr32(wx, WX_PSR_MNG_FLEX_SEL, 0);
1862 	for (i = 0; i < 16; i++) {
1863 		wr32(wx, WX_PSR_MNG_FLEX_DW_L(i), 0);
1864 		wr32(wx, WX_PSR_MNG_FLEX_DW_H(i), 0);
1865 		wr32(wx, WX_PSR_MNG_FLEX_MSK(i), 0);
1866 	}
1867 	wr32(wx, WX_PSR_LAN_FLEX_SEL, 0);
1868 	for (i = 0; i < 16; i++) {
1869 		wr32(wx, WX_PSR_LAN_FLEX_DW_L(i), 0);
1870 		wr32(wx, WX_PSR_LAN_FLEX_DW_H(i), 0);
1871 		wr32(wx, WX_PSR_LAN_FLEX_MSK(i), 0);
1872 	}
1873 
1874 	/* set pause frame dst mac addr */
1875 	wr32(wx, WX_RDB_PFCMACDAL, 0xC2000001);
1876 	wr32(wx, WX_RDB_PFCMACDAH, 0x0180);
1877 }
1878 EXPORT_SYMBOL(wx_reset_misc);
1879 
1880 /**
1881  *  wx_get_pcie_msix_counts - Gets MSI-X vector count
1882  *  @wx: pointer to hardware structure
1883  *  @msix_count: number of MSI interrupts that can be obtained
1884  *  @max_msix_count: number of MSI interrupts that mac need
1885  *
1886  *  Read PCIe configuration space, and get the MSI-X vector count from
1887  *  the capabilities table.
1888  **/
wx_get_pcie_msix_counts(struct wx * wx,u16 * msix_count,u16 max_msix_count)1889 int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count)
1890 {
1891 	struct pci_dev *pdev = wx->pdev;
1892 	struct device *dev = &pdev->dev;
1893 	int pos;
1894 
1895 	*msix_count = 1;
1896 	pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
1897 	if (!pos) {
1898 		dev_err(dev, "Unable to find MSI-X Capabilities\n");
1899 		return -EINVAL;
1900 	}
1901 	pci_read_config_word(pdev,
1902 			     pos + PCI_MSIX_FLAGS,
1903 			     msix_count);
1904 	*msix_count &= WX_PCIE_MSIX_TBL_SZ_MASK;
1905 	/* MSI-X count is zero-based in HW */
1906 	*msix_count += 1;
1907 
1908 	if (*msix_count > max_msix_count)
1909 		*msix_count = max_msix_count;
1910 
1911 	return 0;
1912 }
1913 EXPORT_SYMBOL(wx_get_pcie_msix_counts);
1914 
1915 /**
1916  * wx_init_rss_key - Initialize wx RSS key
1917  * @wx: device handle
1918  *
1919  * Allocates and initializes the RSS key if it is not allocated.
1920  **/
wx_init_rss_key(struct wx * wx)1921 static int wx_init_rss_key(struct wx *wx)
1922 {
1923 	u32 *rss_key;
1924 
1925 	if (!wx->rss_key) {
1926 		rss_key = kzalloc(WX_RSS_KEY_SIZE, GFP_KERNEL);
1927 		if (unlikely(!rss_key))
1928 			return -ENOMEM;
1929 
1930 		netdev_rss_key_fill(rss_key, WX_RSS_KEY_SIZE);
1931 		wx->rss_key = rss_key;
1932 	}
1933 
1934 	return 0;
1935 }
1936 
wx_sw_init(struct wx * wx)1937 int wx_sw_init(struct wx *wx)
1938 {
1939 	struct pci_dev *pdev = wx->pdev;
1940 	u32 ssid = 0;
1941 	int err = 0;
1942 
1943 	wx->vendor_id = pdev->vendor;
1944 	wx->device_id = pdev->device;
1945 	wx->revision_id = pdev->revision;
1946 	wx->oem_svid = pdev->subsystem_vendor;
1947 	wx->oem_ssid = pdev->subsystem_device;
1948 	wx->bus.device = PCI_SLOT(pdev->devfn);
1949 	wx->bus.func = PCI_FUNC(pdev->devfn);
1950 
1951 	if (wx->oem_svid == PCI_VENDOR_ID_WANGXUN) {
1952 		wx->subsystem_vendor_id = pdev->subsystem_vendor;
1953 		wx->subsystem_device_id = pdev->subsystem_device;
1954 	} else {
1955 		err = wx_flash_read_dword(wx, 0xfffdc, &ssid);
1956 		if (err < 0) {
1957 			wx_err(wx, "read of internal subsystem device id failed\n");
1958 			return err;
1959 		}
1960 
1961 		wx->subsystem_device_id = swab16((u16)ssid);
1962 	}
1963 
1964 	err = wx_init_rss_key(wx);
1965 	if (err < 0) {
1966 		wx_err(wx, "rss key allocation failed\n");
1967 		return err;
1968 	}
1969 
1970 	wx->mac_table = kcalloc(wx->mac.num_rar_entries,
1971 				sizeof(struct wx_mac_addr),
1972 				GFP_KERNEL);
1973 	if (!wx->mac_table) {
1974 		wx_err(wx, "mac_table allocation failed\n");
1975 		kfree(wx->rss_key);
1976 		return -ENOMEM;
1977 	}
1978 
1979 	bitmap_zero(wx->state, WX_STATE_NBITS);
1980 	bitmap_zero(wx->flags, WX_PF_FLAGS_NBITS);
1981 	wx->misc_irq_domain = false;
1982 
1983 	return 0;
1984 }
1985 EXPORT_SYMBOL(wx_sw_init);
1986 
1987 /**
1988  *  wx_find_vlvf_slot - find the vlanid or the first empty slot
1989  *  @wx: pointer to hardware structure
1990  *  @vlan: VLAN id to write to VLAN filter
1991  *
1992  *  return the VLVF index where this VLAN id should be placed
1993  *
1994  **/
wx_find_vlvf_slot(struct wx * wx,u32 vlan)1995 static int wx_find_vlvf_slot(struct wx *wx, u32 vlan)
1996 {
1997 	u32 bits = 0, first_empty_slot = 0;
1998 	int regindex;
1999 
2000 	/* short cut the special case */
2001 	if (vlan == 0)
2002 		return 0;
2003 
2004 	/* Search for the vlan id in the VLVF entries. Save off the first empty
2005 	 * slot found along the way
2006 	 */
2007 	for (regindex = 1; regindex < WX_PSR_VLAN_SWC_ENTRIES; regindex++) {
2008 		wr32(wx, WX_PSR_VLAN_SWC_IDX, regindex);
2009 		bits = rd32(wx, WX_PSR_VLAN_SWC);
2010 		if (!bits && !(first_empty_slot))
2011 			first_empty_slot = regindex;
2012 		else if ((bits & 0x0FFF) == vlan)
2013 			break;
2014 	}
2015 
2016 	if (regindex >= WX_PSR_VLAN_SWC_ENTRIES) {
2017 		if (first_empty_slot)
2018 			regindex = first_empty_slot;
2019 		else
2020 			regindex = -ENOMEM;
2021 	}
2022 
2023 	return regindex;
2024 }
2025 
2026 /**
2027  *  wx_set_vlvf - Set VLAN Pool Filter
2028  *  @wx: pointer to hardware structure
2029  *  @vlan: VLAN id to write to VLAN filter
2030  *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
2031  *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
2032  *  @vfta_changed: pointer to boolean flag which indicates whether VFTA
2033  *                 should be changed
2034  *
2035  *  Turn on/off specified bit in VLVF table.
2036  **/
wx_set_vlvf(struct wx * wx,u32 vlan,u32 vind,bool vlan_on,bool * vfta_changed)2037 static int wx_set_vlvf(struct wx *wx, u32 vlan, u32 vind, bool vlan_on,
2038 		       bool *vfta_changed)
2039 {
2040 	int vlvf_index;
2041 	u32 vt, bits;
2042 
2043 	/* If VT Mode is set
2044 	 *   Either vlan_on
2045 	 *     make sure the vlan is in VLVF
2046 	 *     set the vind bit in the matching VLVFB
2047 	 *   Or !vlan_on
2048 	 *     clear the pool bit and possibly the vind
2049 	 */
2050 	vt = rd32(wx, WX_CFG_PORT_CTL);
2051 	if (!(vt & WX_CFG_PORT_CTL_NUM_VT_MASK))
2052 		return 0;
2053 
2054 	vlvf_index = wx_find_vlvf_slot(wx, vlan);
2055 	if (vlvf_index < 0)
2056 		return vlvf_index;
2057 
2058 	wr32(wx, WX_PSR_VLAN_SWC_IDX, vlvf_index);
2059 	if (vlan_on) {
2060 		/* set the pool bit */
2061 		if (vind < 32) {
2062 			bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L);
2063 			bits |= (1 << vind);
2064 			wr32(wx, WX_PSR_VLAN_SWC_VM_L, bits);
2065 		} else {
2066 			bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H);
2067 			bits |= (1 << (vind - 32));
2068 			wr32(wx, WX_PSR_VLAN_SWC_VM_H, bits);
2069 		}
2070 	} else {
2071 		/* clear the pool bit */
2072 		if (vind < 32) {
2073 			bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L);
2074 			bits &= ~(1 << vind);
2075 			wr32(wx, WX_PSR_VLAN_SWC_VM_L, bits);
2076 			bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_H);
2077 		} else {
2078 			bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H);
2079 			bits &= ~(1 << (vind - 32));
2080 			wr32(wx, WX_PSR_VLAN_SWC_VM_H, bits);
2081 			bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_L);
2082 		}
2083 	}
2084 
2085 	if (bits) {
2086 		wr32(wx, WX_PSR_VLAN_SWC, (WX_PSR_VLAN_SWC_VIEN | vlan));
2087 		if (!vlan_on && vfta_changed)
2088 			*vfta_changed = false;
2089 	} else {
2090 		wr32(wx, WX_PSR_VLAN_SWC, 0);
2091 	}
2092 
2093 	return 0;
2094 }
2095 
2096 /**
2097  *  wx_set_vfta - Set VLAN filter table
2098  *  @wx: pointer to hardware structure
2099  *  @vlan: VLAN id to write to VLAN filter
2100  *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
2101  *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
2102  *
2103  *  Turn on/off specified VLAN in the VLAN filter table.
2104  **/
wx_set_vfta(struct wx * wx,u32 vlan,u32 vind,bool vlan_on)2105 static int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on)
2106 {
2107 	u32 bitindex, vfta, targetbit;
2108 	bool vfta_changed = false;
2109 	int regindex, ret;
2110 
2111 	/* this is a 2 part operation - first the VFTA, then the
2112 	 * VLVF and VLVFB if VT Mode is set
2113 	 * We don't write the VFTA until we know the VLVF part succeeded.
2114 	 */
2115 
2116 	/* Part 1
2117 	 * The VFTA is a bitstring made up of 128 32-bit registers
2118 	 * that enable the particular VLAN id, much like the MTA:
2119 	 *    bits[11-5]: which register
2120 	 *    bits[4-0]:  which bit in the register
2121 	 */
2122 	regindex = (vlan >> 5) & 0x7F;
2123 	bitindex = vlan & 0x1F;
2124 	targetbit = (1 << bitindex);
2125 	/* errata 5 */
2126 	vfta = wx->mac.vft_shadow[regindex];
2127 	if (vlan_on) {
2128 		if (!(vfta & targetbit)) {
2129 			vfta |= targetbit;
2130 			vfta_changed = true;
2131 		}
2132 	} else {
2133 		if ((vfta & targetbit)) {
2134 			vfta &= ~targetbit;
2135 			vfta_changed = true;
2136 		}
2137 	}
2138 	/* Part 2
2139 	 * Call wx_set_vlvf to set VLVFB and VLVF
2140 	 */
2141 	ret = wx_set_vlvf(wx, vlan, vind, vlan_on, &vfta_changed);
2142 	if (ret != 0)
2143 		return ret;
2144 
2145 	if (vfta_changed)
2146 		wr32(wx, WX_PSR_VLAN_TBL(regindex), vfta);
2147 	wx->mac.vft_shadow[regindex] = vfta;
2148 
2149 	return 0;
2150 }
2151 
2152 /**
2153  *  wx_clear_vfta - Clear VLAN filter table
2154  *  @wx: pointer to hardware structure
2155  *
2156  *  Clears the VLAN filer table, and the VMDq index associated with the filter
2157  **/
wx_clear_vfta(struct wx * wx)2158 static void wx_clear_vfta(struct wx *wx)
2159 {
2160 	u32 offset;
2161 
2162 	for (offset = 0; offset < wx->mac.vft_size; offset++) {
2163 		wr32(wx, WX_PSR_VLAN_TBL(offset), 0);
2164 		wx->mac.vft_shadow[offset] = 0;
2165 	}
2166 
2167 	for (offset = 0; offset < WX_PSR_VLAN_SWC_ENTRIES; offset++) {
2168 		wr32(wx, WX_PSR_VLAN_SWC_IDX, offset);
2169 		wr32(wx, WX_PSR_VLAN_SWC, 0);
2170 		wr32(wx, WX_PSR_VLAN_SWC_VM_L, 0);
2171 		wr32(wx, WX_PSR_VLAN_SWC_VM_H, 0);
2172 	}
2173 }
2174 
wx_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)2175 int wx_vlan_rx_add_vid(struct net_device *netdev,
2176 		       __be16 proto, u16 vid)
2177 {
2178 	struct wx *wx = netdev_priv(netdev);
2179 
2180 	/* add VID to filter table */
2181 	wx_set_vfta(wx, vid, VMDQ_P(0), true);
2182 	set_bit(vid, wx->active_vlans);
2183 
2184 	return 0;
2185 }
2186 EXPORT_SYMBOL(wx_vlan_rx_add_vid);
2187 
wx_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)2188 int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
2189 {
2190 	struct wx *wx = netdev_priv(netdev);
2191 
2192 	/* remove VID from filter table */
2193 	if (vid)
2194 		wx_set_vfta(wx, vid, VMDQ_P(0), false);
2195 	clear_bit(vid, wx->active_vlans);
2196 
2197 	return 0;
2198 }
2199 EXPORT_SYMBOL(wx_vlan_rx_kill_vid);
2200 
wx_enable_rx_drop(struct wx * wx,struct wx_ring * ring)2201 static void wx_enable_rx_drop(struct wx *wx, struct wx_ring *ring)
2202 {
2203 	u16 reg_idx = ring->reg_idx;
2204 	u32 srrctl;
2205 
2206 	srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
2207 	srrctl |= WX_PX_RR_CFG_DROP_EN;
2208 
2209 	wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl);
2210 }
2211 
wx_disable_rx_drop(struct wx * wx,struct wx_ring * ring)2212 static void wx_disable_rx_drop(struct wx *wx, struct wx_ring *ring)
2213 {
2214 	u16 reg_idx = ring->reg_idx;
2215 	u32 srrctl;
2216 
2217 	srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
2218 	srrctl &= ~WX_PX_RR_CFG_DROP_EN;
2219 
2220 	wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl);
2221 }
2222 
wx_fc_enable(struct wx * wx,bool tx_pause,bool rx_pause)2223 int wx_fc_enable(struct wx *wx, bool tx_pause, bool rx_pause)
2224 {
2225 	u16 pause_time = WX_DEFAULT_FCPAUSE;
2226 	u32 mflcn_reg, fccfg_reg, reg;
2227 	u32 fcrtl, fcrth;
2228 	int i;
2229 
2230 	/* Low water mark of zero causes XOFF floods */
2231 	if (tx_pause && wx->fc.high_water) {
2232 		if (!wx->fc.low_water || wx->fc.low_water >= wx->fc.high_water) {
2233 			wx_err(wx, "Invalid water mark configuration\n");
2234 			return -EINVAL;
2235 		}
2236 	}
2237 
2238 	/* Disable any previous flow control settings */
2239 	mflcn_reg = rd32(wx, WX_MAC_RX_FLOW_CTRL);
2240 	mflcn_reg &= ~WX_MAC_RX_FLOW_CTRL_RFE;
2241 
2242 	fccfg_reg = rd32(wx, WX_RDB_RFCC);
2243 	fccfg_reg &= ~WX_RDB_RFCC_RFCE_802_3X;
2244 
2245 	if (rx_pause)
2246 		mflcn_reg |= WX_MAC_RX_FLOW_CTRL_RFE;
2247 	if (tx_pause)
2248 		fccfg_reg |= WX_RDB_RFCC_RFCE_802_3X;
2249 
2250 	/* Set 802.3x based flow control settings. */
2251 	wr32(wx, WX_MAC_RX_FLOW_CTRL, mflcn_reg);
2252 	wr32(wx, WX_RDB_RFCC, fccfg_reg);
2253 
2254 	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
2255 	if (tx_pause && wx->fc.high_water) {
2256 		fcrtl = (wx->fc.low_water << 10) | WX_RDB_RFCL_XONE;
2257 		wr32(wx, WX_RDB_RFCL, fcrtl);
2258 		fcrth = (wx->fc.high_water << 10) | WX_RDB_RFCH_XOFFE;
2259 	} else {
2260 		wr32(wx, WX_RDB_RFCL, 0);
2261 		/* In order to prevent Tx hangs when the internal Tx
2262 		 * switch is enabled we must set the high water mark
2263 		 * to the Rx packet buffer size - 24KB.  This allows
2264 		 * the Tx switch to function even under heavy Rx
2265 		 * workloads.
2266 		 */
2267 		fcrth = rd32(wx, WX_RDB_PB_SZ(0)) - 24576;
2268 	}
2269 
2270 	wr32(wx, WX_RDB_RFCH, fcrth);
2271 
2272 	/* Configure pause time */
2273 	reg = pause_time * 0x00010001;
2274 	wr32(wx, WX_RDB_RFCV, reg);
2275 
2276 	/* Configure flow control refresh threshold value */
2277 	wr32(wx, WX_RDB_RFCRT, pause_time / 2);
2278 
2279 	/*  We should set the drop enable bit if:
2280 	 *  Number of Rx queues > 1 and flow control is disabled
2281 	 *
2282 	 *  This allows us to avoid head of line blocking for security
2283 	 *  and performance reasons.
2284 	 */
2285 	if (wx->num_rx_queues > 1 && !tx_pause) {
2286 		for (i = 0; i < wx->num_rx_queues; i++)
2287 			wx_enable_rx_drop(wx, wx->rx_ring[i]);
2288 	} else {
2289 		for (i = 0; i < wx->num_rx_queues; i++)
2290 			wx_disable_rx_drop(wx, wx->rx_ring[i]);
2291 	}
2292 
2293 	return 0;
2294 }
2295 EXPORT_SYMBOL(wx_fc_enable);
2296 
2297 /**
2298  * wx_update_stats - Update the board statistics counters.
2299  * @wx: board private structure
2300  **/
wx_update_stats(struct wx * wx)2301 void wx_update_stats(struct wx *wx)
2302 {
2303 	struct wx_hw_stats *hwstats = &wx->stats;
2304 
2305 	u64 non_eop_descs = 0, alloc_rx_buff_failed = 0;
2306 	u64 hw_csum_rx_good = 0, hw_csum_rx_error = 0;
2307 	u64 restart_queue = 0, tx_busy = 0;
2308 	u32 i;
2309 
2310 	/* gather some stats to the wx struct that are per queue */
2311 	for (i = 0; i < wx->num_rx_queues; i++) {
2312 		struct wx_ring *rx_ring = wx->rx_ring[i];
2313 
2314 		non_eop_descs += rx_ring->rx_stats.non_eop_descs;
2315 		alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
2316 		hw_csum_rx_good += rx_ring->rx_stats.csum_good_cnt;
2317 		hw_csum_rx_error += rx_ring->rx_stats.csum_err;
2318 	}
2319 	wx->non_eop_descs = non_eop_descs;
2320 	wx->alloc_rx_buff_failed = alloc_rx_buff_failed;
2321 	wx->hw_csum_rx_error = hw_csum_rx_error;
2322 	wx->hw_csum_rx_good = hw_csum_rx_good;
2323 
2324 	for (i = 0; i < wx->num_tx_queues; i++) {
2325 		struct wx_ring *tx_ring = wx->tx_ring[i];
2326 
2327 		restart_queue += tx_ring->tx_stats.restart_queue;
2328 		tx_busy += tx_ring->tx_stats.tx_busy;
2329 	}
2330 	wx->restart_queue = restart_queue;
2331 	wx->tx_busy = tx_busy;
2332 
2333 	hwstats->gprc += rd32(wx, WX_RDM_PKT_CNT);
2334 	hwstats->gptc += rd32(wx, WX_TDM_PKT_CNT);
2335 	hwstats->gorc += rd64(wx, WX_RDM_BYTE_CNT_LSB);
2336 	hwstats->gotc += rd64(wx, WX_TDM_BYTE_CNT_LSB);
2337 	hwstats->tpr += rd64(wx, WX_RX_FRAME_CNT_GOOD_BAD_L);
2338 	hwstats->tpt += rd64(wx, WX_TX_FRAME_CNT_GOOD_BAD_L);
2339 	hwstats->crcerrs += rd64(wx, WX_RX_CRC_ERROR_FRAMES_L);
2340 	hwstats->rlec += rd64(wx, WX_RX_LEN_ERROR_FRAMES_L);
2341 	hwstats->bprc += rd64(wx, WX_RX_BC_FRAMES_GOOD_L);
2342 	hwstats->bptc += rd64(wx, WX_TX_BC_FRAMES_GOOD_L);
2343 	hwstats->mprc += rd64(wx, WX_RX_MC_FRAMES_GOOD_L);
2344 	hwstats->mptc += rd64(wx, WX_TX_MC_FRAMES_GOOD_L);
2345 	hwstats->roc += rd32(wx, WX_RX_OVERSIZE_FRAMES_GOOD);
2346 	hwstats->ruc += rd32(wx, WX_RX_UNDERSIZE_FRAMES_GOOD);
2347 	hwstats->lxonoffrxc += rd32(wx, WX_MAC_LXONOFFRXC);
2348 	hwstats->lxontxc += rd32(wx, WX_RDB_LXONTXC);
2349 	hwstats->lxofftxc += rd32(wx, WX_RDB_LXOFFTXC);
2350 	hwstats->o2bgptc += rd32(wx, WX_TDM_OS2BMC_CNT);
2351 	hwstats->b2ospc += rd32(wx, WX_MNG_BMC2OS_CNT);
2352 	hwstats->o2bspc += rd32(wx, WX_MNG_OS2BMC_CNT);
2353 	hwstats->b2ogprc += rd32(wx, WX_RDM_BMC2OS_CNT);
2354 	hwstats->rdmdrop += rd32(wx, WX_RDM_DRP_PKT);
2355 
2356 	if (wx->mac.type == wx_mac_sp) {
2357 		hwstats->fdirmatch += rd32(wx, WX_RDB_FDIR_MATCH);
2358 		hwstats->fdirmiss += rd32(wx, WX_RDB_FDIR_MISS);
2359 	}
2360 
2361 	for (i = 0; i < wx->mac.max_rx_queues; i++)
2362 		hwstats->qmprc += rd32(wx, WX_PX_MPRC(i));
2363 }
2364 EXPORT_SYMBOL(wx_update_stats);
2365 
2366 /**
2367  *  wx_clear_hw_cntrs - Generic clear hardware counters
2368  *  @wx: board private structure
2369  *
2370  *  Clears all hardware statistics counters by reading them from the hardware
2371  *  Statistics counters are clear on read.
2372  **/
wx_clear_hw_cntrs(struct wx * wx)2373 void wx_clear_hw_cntrs(struct wx *wx)
2374 {
2375 	u16 i = 0;
2376 
2377 	for (i = 0; i < wx->mac.max_rx_queues; i++)
2378 		wr32(wx, WX_PX_MPRC(i), 0);
2379 
2380 	rd32(wx, WX_RDM_PKT_CNT);
2381 	rd32(wx, WX_TDM_PKT_CNT);
2382 	rd64(wx, WX_RDM_BYTE_CNT_LSB);
2383 	rd32(wx, WX_TDM_BYTE_CNT_LSB);
2384 	rd32(wx, WX_RDM_DRP_PKT);
2385 	rd32(wx, WX_RX_UNDERSIZE_FRAMES_GOOD);
2386 	rd32(wx, WX_RX_OVERSIZE_FRAMES_GOOD);
2387 	rd64(wx, WX_RX_FRAME_CNT_GOOD_BAD_L);
2388 	rd64(wx, WX_TX_FRAME_CNT_GOOD_BAD_L);
2389 	rd64(wx, WX_RX_MC_FRAMES_GOOD_L);
2390 	rd64(wx, WX_TX_MC_FRAMES_GOOD_L);
2391 	rd64(wx, WX_RX_BC_FRAMES_GOOD_L);
2392 	rd64(wx, WX_TX_BC_FRAMES_GOOD_L);
2393 	rd64(wx, WX_RX_CRC_ERROR_FRAMES_L);
2394 	rd64(wx, WX_RX_LEN_ERROR_FRAMES_L);
2395 	rd32(wx, WX_RDB_LXONTXC);
2396 	rd32(wx, WX_RDB_LXOFFTXC);
2397 	rd32(wx, WX_MAC_LXONOFFRXC);
2398 }
2399 EXPORT_SYMBOL(wx_clear_hw_cntrs);
2400 
2401 /**
2402  *  wx_start_hw - Prepare hardware for Tx/Rx
2403  *  @wx: pointer to hardware structure
2404  *
2405  *  Starts the hardware using the generic start_hw function
2406  *  and the generation start_hw function.
2407  *  Then performs revision-specific operations, if any.
2408  **/
wx_start_hw(struct wx * wx)2409 void wx_start_hw(struct wx *wx)
2410 {
2411 	int i;
2412 
2413 	/* Clear the VLAN filter table */
2414 	wx_clear_vfta(wx);
2415 	WX_WRITE_FLUSH(wx);
2416 	/* Clear the rate limiters */
2417 	for (i = 0; i < wx->mac.max_tx_queues; i++) {
2418 		wr32(wx, WX_TDM_RP_IDX, i);
2419 		wr32(wx, WX_TDM_RP_RATE, 0);
2420 	}
2421 }
2422 EXPORT_SYMBOL(wx_start_hw);
2423 
2424 MODULE_LICENSE("GPL");
2425