xref: /linux/drivers/net/ethernet/wangxun/libwx/wx_hw.c (revision 6bab77ced3ffbce3d6c5b5bcce17da7c8a3f8266)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
3 
4 #include <linux/etherdevice.h>
5 #include <linux/netdevice.h>
6 #include <linux/if_ether.h>
7 #include <linux/if_vlan.h>
8 #include <linux/iopoll.h>
9 #include <linux/pci.h>
10 
11 #include "wx_type.h"
12 #include "wx_lib.h"
13 #include "wx_sriov.h"
14 #include "wx_hw.h"
15 
16 static int wx_phy_read_reg_mdi(struct mii_bus *bus, int phy_addr, int devnum, int regnum)
17 {
18 	struct wx *wx = bus->priv;
19 	u32 command, val;
20 	int ret;
21 
22 	/* setup and write the address cycle command */
23 	command = WX_MSCA_RA(regnum) |
24 		  WX_MSCA_PA(phy_addr) |
25 		  WX_MSCA_DA(devnum);
26 	wr32(wx, WX_MSCA, command);
27 
28 	command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | WX_MSCC_BUSY;
29 	if (wx->mac.type == wx_mac_em)
30 		command |= WX_MDIO_CLK(6);
31 	wr32(wx, WX_MSCC, command);
32 
33 	/* wait to complete */
34 	ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000,
35 				100000, false, wx, WX_MSCC);
36 	if (ret) {
37 		wx_err(wx, "Mdio read c22 command did not complete.\n");
38 		return ret;
39 	}
40 
41 	return (u16)rd32(wx, WX_MSCC);
42 }
43 
44 static int wx_phy_write_reg_mdi(struct mii_bus *bus, int phy_addr,
45 				int devnum, int regnum, u16 value)
46 {
47 	struct wx *wx = bus->priv;
48 	u32 command, val;
49 	int ret;
50 
51 	/* setup and write the address cycle command */
52 	command = WX_MSCA_RA(regnum) |
53 		  WX_MSCA_PA(phy_addr) |
54 		  WX_MSCA_DA(devnum);
55 	wr32(wx, WX_MSCA, command);
56 
57 	command = value | WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | WX_MSCC_BUSY;
58 	if (wx->mac.type == wx_mac_em)
59 		command |= WX_MDIO_CLK(6);
60 	wr32(wx, WX_MSCC, command);
61 
62 	/* wait to complete */
63 	ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000,
64 				100000, false, wx, WX_MSCC);
65 	if (ret)
66 		wx_err(wx, "Mdio write c22 command did not complete.\n");
67 
68 	return ret;
69 }
70 
71 int wx_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum)
72 {
73 	struct wx *wx = bus->priv;
74 
75 	wr32(wx, WX_MDIO_CLAUSE_SELECT, 0xF);
76 	return wx_phy_read_reg_mdi(bus, phy_addr, 0, regnum);
77 }
78 EXPORT_SYMBOL(wx_phy_read_reg_mdi_c22);
79 
80 int wx_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value)
81 {
82 	struct wx *wx = bus->priv;
83 
84 	wr32(wx, WX_MDIO_CLAUSE_SELECT, 0xF);
85 	return wx_phy_write_reg_mdi(bus, phy_addr, 0, regnum, value);
86 }
87 EXPORT_SYMBOL(wx_phy_write_reg_mdi_c22);
88 
89 int wx_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum)
90 {
91 	struct wx *wx = bus->priv;
92 
93 	wr32(wx, WX_MDIO_CLAUSE_SELECT, 0);
94 	return wx_phy_read_reg_mdi(bus, phy_addr, devnum, regnum);
95 }
96 EXPORT_SYMBOL(wx_phy_read_reg_mdi_c45);
97 
98 int wx_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr,
99 			     int devnum, int regnum, u16 value)
100 {
101 	struct wx *wx = bus->priv;
102 
103 	wr32(wx, WX_MDIO_CLAUSE_SELECT, 0);
104 	return wx_phy_write_reg_mdi(bus, phy_addr, devnum, regnum, value);
105 }
106 EXPORT_SYMBOL(wx_phy_write_reg_mdi_c45);
107 
108 static void wx_intr_disable(struct wx *wx, u64 qmask)
109 {
110 	u32 mask;
111 
112 	mask = (qmask & U32_MAX);
113 	if (mask)
114 		wr32(wx, WX_PX_IMS(0), mask);
115 
116 	switch (wx->mac.type) {
117 	case wx_mac_sp:
118 	case wx_mac_aml:
119 		mask = (qmask >> 32);
120 		if (mask)
121 			wr32(wx, WX_PX_IMS(1), mask);
122 		break;
123 	default:
124 		break;
125 	}
126 }
127 
128 void wx_intr_enable(struct wx *wx, u64 qmask)
129 {
130 	u32 mask;
131 
132 	mask = (qmask & U32_MAX);
133 	if (mask)
134 		wr32(wx, WX_PX_IMC(0), mask);
135 
136 	switch (wx->mac.type) {
137 	case wx_mac_sp:
138 	case wx_mac_aml:
139 		mask = (qmask >> 32);
140 		if (mask)
141 			wr32(wx, WX_PX_IMC(1), mask);
142 		break;
143 	default:
144 		break;
145 	}
146 }
147 EXPORT_SYMBOL(wx_intr_enable);
148 
149 /**
150  * wx_irq_disable - Mask off interrupt generation on the NIC
151  * @wx: board private structure
152  **/
153 void wx_irq_disable(struct wx *wx)
154 {
155 	struct pci_dev *pdev = wx->pdev;
156 
157 	wr32(wx, WX_PX_MISC_IEN, 0);
158 	wx_intr_disable(wx, WX_INTR_ALL);
159 
160 	if (pdev->msix_enabled) {
161 		int vector;
162 
163 		for (vector = 0; vector < wx->num_q_vectors; vector++)
164 			synchronize_irq(wx->msix_q_entries[vector].vector);
165 
166 		synchronize_irq(wx->msix_entry->vector);
167 	} else {
168 		synchronize_irq(pdev->irq);
169 	}
170 }
171 EXPORT_SYMBOL(wx_irq_disable);
172 
173 /* cmd_addr is used for some special command:
174  * 1. to be sector address, when implemented erase sector command
175  * 2. to be flash address when implemented read, write flash address
176  */
177 static int wx_fmgr_cmd_op(struct wx *wx, u32 cmd, u32 cmd_addr)
178 {
179 	u32 cmd_val = 0, val = 0;
180 
181 	cmd_val = WX_SPI_CMD_CMD(cmd) |
182 		  WX_SPI_CMD_CLK(WX_SPI_CLK_DIV) |
183 		  cmd_addr;
184 	wr32(wx, WX_SPI_CMD, cmd_val);
185 
186 	return read_poll_timeout(rd32, val, (val & 0x1), 10, 100000,
187 				 false, wx, WX_SPI_STATUS);
188 }
189 
190 static int wx_flash_read_dword(struct wx *wx, u32 addr, u32 *data)
191 {
192 	int ret = 0;
193 
194 	ret = wx_fmgr_cmd_op(wx, WX_SPI_CMD_READ_DWORD, addr);
195 	if (ret < 0)
196 		return ret;
197 
198 	*data = rd32(wx, WX_SPI_DATA);
199 
200 	return ret;
201 }
202 
203 int wx_check_flash_load(struct wx *hw, u32 check_bit)
204 {
205 	u32 reg = 0;
206 	int err = 0;
207 
208 	/* if there's flash existing */
209 	if (!(rd32(hw, WX_SPI_STATUS) &
210 	      WX_SPI_STATUS_FLASH_BYPASS)) {
211 		/* wait hw load flash done */
212 		err = read_poll_timeout(rd32, reg, !(reg & check_bit), 20000, 2000000,
213 					false, hw, WX_SPI_ILDR_STATUS);
214 		if (err < 0)
215 			wx_err(hw, "Check flash load timeout.\n");
216 	}
217 
218 	return err;
219 }
220 EXPORT_SYMBOL(wx_check_flash_load);
221 
222 void wx_control_hw(struct wx *wx, bool drv)
223 {
224 	/* True : Let firmware know the driver has taken over
225 	 * False : Let firmware take over control of hw
226 	 */
227 	wr32m(wx, WX_CFG_PORT_CTL, WX_CFG_PORT_CTL_DRV_LOAD,
228 	      drv ? WX_CFG_PORT_CTL_DRV_LOAD : 0);
229 }
230 EXPORT_SYMBOL(wx_control_hw);
231 
232 /**
233  * wx_mng_present - returns 0 when management capability is present
234  * @wx: pointer to hardware structure
235  */
236 int wx_mng_present(struct wx *wx)
237 {
238 	u32 fwsm;
239 
240 	fwsm = rd32(wx, WX_MIS_ST);
241 	if (fwsm & WX_MIS_ST_MNG_INIT_DN)
242 		return 0;
243 	else
244 		return -EACCES;
245 }
246 EXPORT_SYMBOL(wx_mng_present);
247 
248 /* Software lock to be held while software semaphore is being accessed. */
249 static DEFINE_MUTEX(wx_sw_sync_lock);
250 
251 /**
252  *  wx_release_sw_sync - Release SW semaphore
253  *  @wx: pointer to hardware structure
254  *  @mask: Mask to specify which semaphore to release
255  *
256  *  Releases the SW semaphore for the specified
257  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
258  **/
259 static void wx_release_sw_sync(struct wx *wx, u32 mask)
260 {
261 	mutex_lock(&wx_sw_sync_lock);
262 	wr32m(wx, WX_MNG_SWFW_SYNC, mask, 0);
263 	mutex_unlock(&wx_sw_sync_lock);
264 }
265 
266 /**
267  *  wx_acquire_sw_sync - Acquire SW semaphore
268  *  @wx: pointer to hardware structure
269  *  @mask: Mask to specify which semaphore to acquire
270  *
271  *  Acquires the SW semaphore for the specified
272  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
273  **/
274 static int wx_acquire_sw_sync(struct wx *wx, u32 mask)
275 {
276 	u32 sem = 0;
277 	int ret = 0;
278 
279 	mutex_lock(&wx_sw_sync_lock);
280 	ret = read_poll_timeout(rd32, sem, !(sem & mask),
281 				5000, 2000000, false, wx, WX_MNG_SWFW_SYNC);
282 	if (!ret) {
283 		sem |= mask;
284 		wr32(wx, WX_MNG_SWFW_SYNC, sem);
285 	} else {
286 		wx_err(wx, "SW Semaphore not granted: 0x%x.\n", sem);
287 	}
288 	mutex_unlock(&wx_sw_sync_lock);
289 
290 	return ret;
291 }
292 
293 static int wx_host_interface_command_s(struct wx *wx, u32 *buffer,
294 				       u32 length, u32 timeout, bool return_data)
295 {
296 	u32 hdr_size = sizeof(struct wx_hic_hdr);
297 	u32 hicr, i, bi, buf[64] = {};
298 	int status = 0;
299 	u32 dword_len;
300 	u16 buf_len;
301 
302 	status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB);
303 	if (status != 0)
304 		return status;
305 
306 	dword_len = length >> 2;
307 
308 	/* The device driver writes the relevant command block
309 	 * into the ram area.
310 	 */
311 	for (i = 0; i < dword_len; i++) {
312 		wr32a(wx, WX_MNG_MBOX, i, (__force u32)cpu_to_le32(buffer[i]));
313 		/* write flush */
314 		buf[i] = rd32a(wx, WX_MNG_MBOX, i);
315 	}
316 	/* Setting this bit tells the ARC that a new command is pending. */
317 	wr32m(wx, WX_MNG_MBOX_CTL,
318 	      WX_MNG_MBOX_CTL_SWRDY, WX_MNG_MBOX_CTL_SWRDY);
319 
320 	status = read_poll_timeout(rd32, hicr, hicr & WX_MNG_MBOX_CTL_FWRDY, 1000,
321 				   timeout * 1000, false, wx, WX_MNG_MBOX_CTL);
322 
323 	buf[0] = rd32(wx, WX_MNG_MBOX);
324 	if ((buf[0] & 0xff0000) >> 16 == 0x80) {
325 		wx_err(wx, "Unknown FW command: 0x%x\n", buffer[0] & 0xff);
326 		status = -EINVAL;
327 		goto rel_out;
328 	}
329 
330 	/* Check command completion */
331 	if (status) {
332 		wx_err(wx, "Command has failed with no status valid.\n");
333 		wx_dbg(wx, "write value:\n");
334 		for (i = 0; i < dword_len; i++)
335 			wx_dbg(wx, "%x ", buffer[i]);
336 		wx_dbg(wx, "read value:\n");
337 		for (i = 0; i < dword_len; i++)
338 			wx_dbg(wx, "%x ", buf[i]);
339 		wx_dbg(wx, "\ncheck: %x %x\n", buffer[0] & 0xff, ~buf[0] >> 24);
340 
341 		goto rel_out;
342 	}
343 
344 	if (!return_data)
345 		goto rel_out;
346 
347 	/* Calculate length in DWORDs */
348 	dword_len = hdr_size >> 2;
349 
350 	/* first pull in the header so we know the buffer length */
351 	for (bi = 0; bi < dword_len; bi++) {
352 		buffer[bi] = rd32a(wx, WX_MNG_MBOX, bi);
353 		le32_to_cpus(&buffer[bi]);
354 	}
355 
356 	/* If there is any thing in data position pull it in */
357 	buf_len = ((struct wx_hic_hdr *)buffer)->buf_len;
358 	if (buf_len == 0)
359 		goto rel_out;
360 
361 	if (length < buf_len + hdr_size) {
362 		wx_err(wx, "Buffer not large enough for reply message.\n");
363 		status = -EFAULT;
364 		goto rel_out;
365 	}
366 
367 	/* Calculate length in DWORDs, add 3 for odd lengths */
368 	dword_len = (buf_len + 3) >> 2;
369 
370 	/* Pull in the rest of the buffer (bi is where we left off) */
371 	for (; bi <= dword_len; bi++) {
372 		buffer[bi] = rd32a(wx, WX_MNG_MBOX, bi);
373 		le32_to_cpus(&buffer[bi]);
374 	}
375 
376 rel_out:
377 	wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB);
378 	return status;
379 }
380 
381 static bool wx_poll_fw_reply(struct wx *wx, u32 *buffer, u8 send_cmd)
382 {
383 	u32 dword_len = sizeof(struct wx_hic_hdr) >> 2;
384 	struct wx_hic_hdr *recv_hdr;
385 	u32 i;
386 
387 	/* read hdr */
388 	for (i = 0; i < dword_len; i++) {
389 		buffer[i] = rd32a(wx, WX_FW2SW_MBOX, i);
390 		le32_to_cpus(&buffer[i]);
391 	}
392 
393 	/* check hdr */
394 	recv_hdr = (struct wx_hic_hdr *)buffer;
395 	if (recv_hdr->cmd == send_cmd &&
396 	    recv_hdr->index == wx->swfw_index)
397 		return true;
398 
399 	return false;
400 }
401 
402 static int wx_host_interface_command_r(struct wx *wx, u32 *buffer,
403 				       u32 length, u32 timeout, bool return_data)
404 {
405 	struct wx_hic_hdr *hdr = (struct wx_hic_hdr *)buffer;
406 	u32 hdr_size = sizeof(struct wx_hic_hdr);
407 	bool busy, reply;
408 	u32 dword_len;
409 	u16 buf_len;
410 	int err = 0;
411 	u8 send_cmd;
412 	u32 i;
413 
414 	/* wait to get lock */
415 	might_sleep();
416 	err = read_poll_timeout(test_and_set_bit, busy, !busy, 1000, timeout * 1000,
417 				false, WX_STATE_SWFW_BUSY, wx->state);
418 	if (err)
419 		return err;
420 
421 	/* index to unique seq id for each mbox message */
422 	hdr->index = wx->swfw_index;
423 	send_cmd = hdr->cmd;
424 
425 	dword_len = length >> 2;
426 	/* write data to SW-FW mbox array */
427 	for (i = 0; i < dword_len; i++) {
428 		wr32a(wx, WX_SW2FW_MBOX, i, (__force u32)cpu_to_le32(buffer[i]));
429 		/* write flush */
430 		rd32a(wx, WX_SW2FW_MBOX, i);
431 	}
432 
433 	/* generate interrupt to notify FW */
434 	wr32m(wx, WX_SW2FW_MBOX_CMD, WX_SW2FW_MBOX_CMD_VLD, 0);
435 	wr32m(wx, WX_SW2FW_MBOX_CMD, WX_SW2FW_MBOX_CMD_VLD, WX_SW2FW_MBOX_CMD_VLD);
436 
437 	/* polling reply from FW */
438 	err = read_poll_timeout(wx_poll_fw_reply, reply, reply, 2000,
439 				timeout * 1000, true, wx, buffer, send_cmd);
440 	if (err) {
441 		wx_err(wx, "Polling from FW messages timeout, cmd: 0x%x, index: %d\n",
442 		       send_cmd, wx->swfw_index);
443 		goto rel_out;
444 	}
445 
446 	if (hdr->cmd_or_resp.ret_status == 0x80) {
447 		wx_err(wx, "Unknown FW command: 0x%x\n", send_cmd);
448 		err = -EINVAL;
449 		goto rel_out;
450 	}
451 
452 	/* expect no reply from FW then return */
453 	if (!return_data)
454 		goto rel_out;
455 
456 	/* If there is any thing in data position pull it in */
457 	buf_len = hdr->buf_len;
458 	if (buf_len == 0)
459 		goto rel_out;
460 
461 	if (length < buf_len + hdr_size) {
462 		wx_err(wx, "Buffer not large enough for reply message.\n");
463 		err = -EFAULT;
464 		goto rel_out;
465 	}
466 
467 	/* Calculate length in DWORDs, add 3 for odd lengths */
468 	dword_len = (buf_len + 3) >> 2;
469 	for (i = hdr_size >> 2; i <= dword_len; i++) {
470 		buffer[i] = rd32a(wx, WX_FW2SW_MBOX, i);
471 		le32_to_cpus(&buffer[i]);
472 	}
473 
474 rel_out:
475 	/* index++, index replace wx_hic_hdr.checksum */
476 	if (wx->swfw_index == WX_HIC_HDR_INDEX_MAX)
477 		wx->swfw_index = 0;
478 	else
479 		wx->swfw_index++;
480 
481 	clear_bit(WX_STATE_SWFW_BUSY, wx->state);
482 	return err;
483 }
484 
485 /**
486  *  wx_host_interface_command - Issue command to manageability block
487  *  @wx: pointer to the HW structure
488  *  @buffer: contains the command to write and where the return status will
489  *   be placed
490  *  @length: length of buffer, must be multiple of 4 bytes
491  *  @timeout: time in ms to wait for command completion
492  *  @return_data: read and return data from the buffer (true) or not (false)
493  *   Needed because FW structures are big endian and decoding of
494  *   these fields can be 8 bit or 16 bit based on command. Decoding
495  *   is not easily understood without making a table of commands.
496  *   So we will leave this up to the caller to read back the data
497  *   in these cases.
498  **/
499 int wx_host_interface_command(struct wx *wx, u32 *buffer,
500 			      u32 length, u32 timeout, bool return_data)
501 {
502 	if (length == 0 || length > WX_HI_MAX_BLOCK_BYTE_LENGTH) {
503 		wx_err(wx, "Buffer length failure buffersize=%d.\n", length);
504 		return -EINVAL;
505 	}
506 
507 	/* Calculate length in DWORDs. We must be DWORD aligned */
508 	if ((length % (sizeof(u32))) != 0) {
509 		wx_err(wx, "Buffer length failure, not aligned to dword");
510 		return -EINVAL;
511 	}
512 
513 	if (test_bit(WX_FLAG_SWFW_RING, wx->flags))
514 		return wx_host_interface_command_r(wx, buffer, length,
515 						   timeout, return_data);
516 
517 	return wx_host_interface_command_s(wx, buffer, length, timeout, return_data);
518 }
519 EXPORT_SYMBOL(wx_host_interface_command);
520 
521 int wx_set_pps(struct wx *wx, bool enable, u64 nsec, u64 cycles)
522 {
523 	struct wx_hic_set_pps pps_cmd;
524 
525 	pps_cmd.hdr.cmd = FW_PPS_SET_CMD;
526 	pps_cmd.hdr.buf_len = FW_PPS_SET_LEN;
527 	pps_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
528 	pps_cmd.lan_id = wx->bus.func;
529 	pps_cmd.enable = (u8)enable;
530 	pps_cmd.nsec = nsec;
531 	pps_cmd.cycles = cycles;
532 	pps_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
533 
534 	return wx_host_interface_command(wx, (u32 *)&pps_cmd,
535 					 sizeof(pps_cmd),
536 					 WX_HI_COMMAND_TIMEOUT,
537 					 false);
538 }
539 
540 /**
541  *  wx_read_ee_hostif_data - Read EEPROM word using a host interface cmd
542  *  assuming that the semaphore is already obtained.
543  *  @wx: pointer to hardware structure
544  *  @offset: offset of  word in the EEPROM to read
545  *  @data: word read from the EEPROM
546  *
547  *  Reads a 16 bit word from the EEPROM using the hostif.
548  **/
549 static int wx_read_ee_hostif_data(struct wx *wx, u16 offset, u16 *data)
550 {
551 	struct wx_hic_read_shadow_ram buffer;
552 	int status;
553 
554 	buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
555 	buffer.hdr.req.buf_lenh = 0;
556 	buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
557 	buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
558 
559 	/* convert offset from words to bytes */
560 	buffer.address = (__force u32)cpu_to_be32(offset * 2);
561 	/* one word */
562 	buffer.length = (__force u16)cpu_to_be16(sizeof(u16));
563 
564 	status = wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer),
565 					   WX_HI_COMMAND_TIMEOUT, false);
566 
567 	if (status != 0)
568 		return status;
569 
570 	if (!test_bit(WX_FLAG_SWFW_RING, wx->flags))
571 		*data = (u16)rd32a(wx, WX_MNG_MBOX, FW_NVM_DATA_OFFSET);
572 	else
573 		*data = (u16)rd32a(wx, WX_FW2SW_MBOX, FW_NVM_DATA_OFFSET);
574 
575 	return status;
576 }
577 
578 /**
579  *  wx_read_ee_hostif - Read EEPROM word using a host interface cmd
580  *  @wx: pointer to hardware structure
581  *  @offset: offset of  word in the EEPROM to read
582  *  @data: word read from the EEPROM
583  *
584  *  Reads a 16 bit word from the EEPROM using the hostif.
585  **/
586 int wx_read_ee_hostif(struct wx *wx, u16 offset, u16 *data)
587 {
588 	int status = 0;
589 
590 	status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH);
591 	if (status == 0) {
592 		status = wx_read_ee_hostif_data(wx, offset, data);
593 		wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH);
594 	}
595 
596 	return status;
597 }
598 EXPORT_SYMBOL(wx_read_ee_hostif);
599 
600 /**
601  *  wx_read_ee_hostif_buffer- Read EEPROM word(s) using hostif
602  *  @wx: pointer to hardware structure
603  *  @offset: offset of  word in the EEPROM to read
604  *  @words: number of words
605  *  @data: word(s) read from the EEPROM
606  *
607  *  Reads a 16 bit word(s) from the EEPROM using the hostif.
608  **/
609 int wx_read_ee_hostif_buffer(struct wx *wx,
610 			     u16 offset, u16 words, u16 *data)
611 {
612 	struct wx_hic_read_shadow_ram buffer;
613 	u32 current_word = 0;
614 	u16 words_to_read;
615 	u32 value = 0;
616 	int status;
617 	u32 mbox;
618 	u32 i;
619 
620 	/* Take semaphore for the entire operation. */
621 	status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH);
622 	if (status != 0)
623 		return status;
624 
625 	while (words) {
626 		if (words > FW_MAX_READ_BUFFER_SIZE / 2)
627 			words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
628 		else
629 			words_to_read = words;
630 
631 		buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
632 		buffer.hdr.req.buf_lenh = 0;
633 		buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
634 		buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
635 
636 		/* convert offset from words to bytes */
637 		buffer.address = (__force u32)cpu_to_be32((offset + current_word) * 2);
638 		buffer.length = (__force u16)cpu_to_be16(words_to_read * 2);
639 
640 		status = wx_host_interface_command(wx, (u32 *)&buffer,
641 						   sizeof(buffer),
642 						   WX_HI_COMMAND_TIMEOUT,
643 						   false);
644 
645 		if (status != 0) {
646 			wx_err(wx, "Host interface command failed\n");
647 			goto out;
648 		}
649 
650 		if (!test_bit(WX_FLAG_SWFW_RING, wx->flags))
651 			mbox = WX_MNG_MBOX;
652 		else
653 			mbox = WX_FW2SW_MBOX;
654 		for (i = 0; i < words_to_read; i++) {
655 			u32 reg = mbox + (FW_NVM_DATA_OFFSET << 2) + 2 * i;
656 
657 			value = rd32(wx, reg);
658 			data[current_word] = (u16)(value & 0xffff);
659 			current_word++;
660 			i++;
661 			if (i < words_to_read) {
662 				value >>= 16;
663 				data[current_word] = (u16)(value & 0xffff);
664 				current_word++;
665 			}
666 		}
667 		words -= words_to_read;
668 	}
669 
670 out:
671 	wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH);
672 	return status;
673 }
674 EXPORT_SYMBOL(wx_read_ee_hostif_buffer);
675 
676 /**
677  *  wx_init_eeprom_params - Initialize EEPROM params
678  *  @wx: pointer to hardware structure
679  *
680  *  Initializes the EEPROM parameters wx_eeprom_info within the
681  *  wx_hw struct in order to set up EEPROM access.
682  **/
683 void wx_init_eeprom_params(struct wx *wx)
684 {
685 	struct wx_eeprom_info *eeprom = &wx->eeprom;
686 	u16 eeprom_size;
687 	u16 data = 0x80;
688 
689 	if (eeprom->type == wx_eeprom_uninitialized) {
690 		eeprom->semaphore_delay = 10;
691 		eeprom->type = wx_eeprom_none;
692 
693 		if (!(rd32(wx, WX_SPI_STATUS) &
694 		      WX_SPI_STATUS_FLASH_BYPASS)) {
695 			eeprom->type = wx_flash;
696 
697 			eeprom_size = 4096;
698 			eeprom->word_size = eeprom_size >> 1;
699 
700 			wx_dbg(wx, "Eeprom params: type = %d, size = %d\n",
701 			       eeprom->type, eeprom->word_size);
702 		}
703 	}
704 
705 	switch (wx->mac.type) {
706 	case wx_mac_sp:
707 	case wx_mac_aml:
708 		if (wx_read_ee_hostif(wx, WX_SW_REGION_PTR, &data)) {
709 			wx_err(wx, "NVM Read Error\n");
710 			return;
711 		}
712 		data = data >> 1;
713 		break;
714 	default:
715 		break;
716 	}
717 
718 	eeprom->sw_region_offset = data;
719 }
720 EXPORT_SYMBOL(wx_init_eeprom_params);
721 
722 /**
723  *  wx_get_mac_addr - Generic get MAC address
724  *  @wx: pointer to hardware structure
725  *  @mac_addr: Adapter MAC address
726  *
727  *  Reads the adapter's MAC address from first Receive Address Register (RAR0)
728  *  A reset of the adapter must be performed prior to calling this function
729  *  in order for the MAC address to have been loaded from the EEPROM into RAR0
730  **/
731 void wx_get_mac_addr(struct wx *wx, u8 *mac_addr)
732 {
733 	u32 rar_high;
734 	u32 rar_low;
735 	u16 i;
736 
737 	wr32(wx, WX_PSR_MAC_SWC_IDX, 0);
738 	rar_high = rd32(wx, WX_PSR_MAC_SWC_AD_H);
739 	rar_low = rd32(wx, WX_PSR_MAC_SWC_AD_L);
740 
741 	for (i = 0; i < 2; i++)
742 		mac_addr[i] = (u8)(rar_high >> (1 - i) * 8);
743 
744 	for (i = 0; i < 4; i++)
745 		mac_addr[i + 2] = (u8)(rar_low >> (3 - i) * 8);
746 }
747 EXPORT_SYMBOL(wx_get_mac_addr);
748 
749 /**
750  *  wx_set_rar - Set Rx address register
751  *  @wx: pointer to hardware structure
752  *  @index: Receive address register to write
753  *  @addr: Address to put into receive address register
754  *  @pools: VMDq "set" or "pool" index
755  *  @enable_addr: set flag that address is active
756  *
757  *  Puts an ethernet address into a receive address register.
758  **/
759 static int wx_set_rar(struct wx *wx, u32 index, u8 *addr, u64 pools,
760 		      u32 enable_addr)
761 {
762 	u32 rar_entries = wx->mac.num_rar_entries;
763 	u32 rar_low, rar_high;
764 
765 	/* Make sure we are using a valid rar index range */
766 	if (index >= rar_entries) {
767 		wx_err(wx, "RAR index %d is out of range.\n", index);
768 		return -EINVAL;
769 	}
770 
771 	/* select the MAC address */
772 	wr32(wx, WX_PSR_MAC_SWC_IDX, index);
773 
774 	/* setup VMDq pool mapping */
775 	wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF);
776 
777 	switch (wx->mac.type) {
778 	case wx_mac_sp:
779 	case wx_mac_aml:
780 		wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32);
781 		break;
782 	default:
783 		break;
784 	}
785 
786 	/* HW expects these in little endian so we reverse the byte
787 	 * order from network order (big endian) to little endian
788 	 *
789 	 * Some parts put the VMDq setting in the extra RAH bits,
790 	 * so save everything except the lower 16 bits that hold part
791 	 * of the address and the address valid bit.
792 	 */
793 	rar_low = ((u32)addr[5] |
794 		  ((u32)addr[4] << 8) |
795 		  ((u32)addr[3] << 16) |
796 		  ((u32)addr[2] << 24));
797 	rar_high = ((u32)addr[1] |
798 		   ((u32)addr[0] << 8));
799 	if (enable_addr != 0)
800 		rar_high |= WX_PSR_MAC_SWC_AD_H_AV;
801 
802 	wr32(wx, WX_PSR_MAC_SWC_AD_L, rar_low);
803 	wr32m(wx, WX_PSR_MAC_SWC_AD_H,
804 	      (WX_PSR_MAC_SWC_AD_H_AD(U16_MAX) |
805 	       WX_PSR_MAC_SWC_AD_H_ADTYPE(1) |
806 	       WX_PSR_MAC_SWC_AD_H_AV),
807 	      rar_high);
808 
809 	return 0;
810 }
811 
812 /**
813  *  wx_clear_rar - Remove Rx address register
814  *  @wx: pointer to hardware structure
815  *  @index: Receive address register to write
816  *
817  *  Clears an ethernet address from a receive address register.
818  **/
819 static int wx_clear_rar(struct wx *wx, u32 index)
820 {
821 	u32 rar_entries = wx->mac.num_rar_entries;
822 
823 	/* Make sure we are using a valid rar index range */
824 	if (index >= rar_entries) {
825 		wx_err(wx, "RAR index %d is out of range.\n", index);
826 		return -EINVAL;
827 	}
828 
829 	/* Some parts put the VMDq setting in the extra RAH bits,
830 	 * so save everything except the lower 16 bits that hold part
831 	 * of the address and the address valid bit.
832 	 */
833 	wr32(wx, WX_PSR_MAC_SWC_IDX, index);
834 
835 	wr32(wx, WX_PSR_MAC_SWC_VM_L, 0);
836 	wr32(wx, WX_PSR_MAC_SWC_VM_H, 0);
837 
838 	wr32(wx, WX_PSR_MAC_SWC_AD_L, 0);
839 	wr32m(wx, WX_PSR_MAC_SWC_AD_H,
840 	      (WX_PSR_MAC_SWC_AD_H_AD(U16_MAX) |
841 	       WX_PSR_MAC_SWC_AD_H_ADTYPE(1) |
842 	       WX_PSR_MAC_SWC_AD_H_AV),
843 	      0);
844 
845 	return 0;
846 }
847 
848 /**
849  *  wx_clear_vmdq - Disassociate a VMDq pool index from a rx address
850  *  @wx: pointer to hardware struct
851  *  @rar: receive address register index to disassociate
852  *  @vmdq: VMDq pool index to remove from the rar
853  **/
854 static int wx_clear_vmdq(struct wx *wx, u32 rar, u32 __maybe_unused vmdq)
855 {
856 	u32 rar_entries = wx->mac.num_rar_entries;
857 	u32 mpsar_lo, mpsar_hi;
858 
859 	/* Make sure we are using a valid rar index range */
860 	if (rar >= rar_entries) {
861 		wx_err(wx, "RAR index %d is out of range.\n", rar);
862 		return -EINVAL;
863 	}
864 
865 	wr32(wx, WX_PSR_MAC_SWC_IDX, rar);
866 	mpsar_lo = rd32(wx, WX_PSR_MAC_SWC_VM_L);
867 	mpsar_hi = rd32(wx, WX_PSR_MAC_SWC_VM_H);
868 
869 	if (!mpsar_lo && !mpsar_hi)
870 		return 0;
871 
872 	/* was that the last pool using this rar? */
873 	if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
874 		wx_clear_rar(wx, rar);
875 
876 	return 0;
877 }
878 
879 /**
880  *  wx_init_uta_tables - Initialize the Unicast Table Array
881  *  @wx: pointer to hardware structure
882  **/
883 static void wx_init_uta_tables(struct wx *wx)
884 {
885 	int i;
886 
887 	wx_dbg(wx, " Clearing UTA\n");
888 
889 	for (i = 0; i < 128; i++)
890 		wr32(wx, WX_PSR_UC_TBL(i), 0);
891 }
892 
893 /**
894  *  wx_init_rx_addrs - Initializes receive address filters.
895  *  @wx: pointer to hardware structure
896  *
897  *  Places the MAC address in receive address register 0 and clears the rest
898  *  of the receive address registers. Clears the multicast table. Assumes
899  *  the receiver is in reset when the routine is called.
900  **/
901 void wx_init_rx_addrs(struct wx *wx)
902 {
903 	u32 rar_entries = wx->mac.num_rar_entries;
904 	u32 psrctl;
905 	int i;
906 
907 	/* If the current mac address is valid, assume it is a software override
908 	 * to the permanent address.
909 	 * Otherwise, use the permanent address from the eeprom.
910 	 */
911 	if (!is_valid_ether_addr(wx->mac.addr)) {
912 		/* Get the MAC address from the RAR0 for later reference */
913 		wx_get_mac_addr(wx, wx->mac.addr);
914 		wx_dbg(wx, "Keeping Current RAR0 Addr = %pM\n", wx->mac.addr);
915 	} else {
916 		/* Setup the receive address. */
917 		wx_dbg(wx, "Overriding MAC Address in RAR[0]\n");
918 		wx_dbg(wx, "New MAC Addr = %pM\n", wx->mac.addr);
919 
920 		wx_set_rar(wx, 0, wx->mac.addr, 0, WX_PSR_MAC_SWC_AD_H_AV);
921 
922 		switch (wx->mac.type) {
923 		case wx_mac_sp:
924 		case wx_mac_aml:
925 			/* clear VMDq pool/queue selection for RAR 0 */
926 			wx_clear_vmdq(wx, 0, WX_CLEAR_VMDQ_ALL);
927 			break;
928 		default:
929 			break;
930 		}
931 	}
932 
933 	/* Zero out the other receive addresses. */
934 	wx_dbg(wx, "Clearing RAR[1-%d]\n", rar_entries - 1);
935 	for (i = 1; i < rar_entries; i++) {
936 		wr32(wx, WX_PSR_MAC_SWC_IDX, i);
937 		wr32(wx, WX_PSR_MAC_SWC_AD_L, 0);
938 		wr32(wx, WX_PSR_MAC_SWC_AD_H, 0);
939 	}
940 
941 	/* Clear the MTA */
942 	wx->addr_ctrl.mta_in_use = 0;
943 	psrctl = rd32(wx, WX_PSR_CTL);
944 	psrctl &= ~(WX_PSR_CTL_MO | WX_PSR_CTL_MFE);
945 	psrctl |= wx->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT;
946 	wr32(wx, WX_PSR_CTL, psrctl);
947 	wx_dbg(wx, " Clearing MTA\n");
948 	for (i = 0; i < wx->mac.mcft_size; i++)
949 		wr32(wx, WX_PSR_MC_TBL(i), 0);
950 
951 	wx_init_uta_tables(wx);
952 }
953 EXPORT_SYMBOL(wx_init_rx_addrs);
954 
955 static void wx_sync_mac_table(struct wx *wx)
956 {
957 	int i;
958 
959 	for (i = 0; i < wx->mac.num_rar_entries; i++) {
960 		if (wx->mac_table[i].state & WX_MAC_STATE_MODIFIED) {
961 			if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) {
962 				wx_set_rar(wx, i,
963 					   wx->mac_table[i].addr,
964 					   wx->mac_table[i].pools,
965 					   WX_PSR_MAC_SWC_AD_H_AV);
966 			} else {
967 				wx_clear_rar(wx, i);
968 			}
969 			wx->mac_table[i].state &= ~(WX_MAC_STATE_MODIFIED);
970 		}
971 	}
972 }
973 
974 static void wx_full_sync_mac_table(struct wx *wx)
975 {
976 	int i;
977 
978 	for (i = 0; i < wx->mac.num_rar_entries; i++) {
979 		if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) {
980 			wx_set_rar(wx, i,
981 				   wx->mac_table[i].addr,
982 				   wx->mac_table[i].pools,
983 				   WX_PSR_MAC_SWC_AD_H_AV);
984 		} else {
985 			wx_clear_rar(wx, i);
986 		}
987 		wx->mac_table[i].state &= ~(WX_MAC_STATE_MODIFIED);
988 	}
989 }
990 
991 /* this function destroys the first RAR entry */
992 void wx_mac_set_default_filter(struct wx *wx, u8 *addr)
993 {
994 	memcpy(&wx->mac_table[0].addr, addr, ETH_ALEN);
995 	wx->mac_table[0].pools = BIT(VMDQ_P(0));
996 	wx->mac_table[0].state = (WX_MAC_STATE_DEFAULT | WX_MAC_STATE_IN_USE);
997 	wx_set_rar(wx, 0, wx->mac_table[0].addr,
998 		   wx->mac_table[0].pools,
999 		   WX_PSR_MAC_SWC_AD_H_AV);
1000 }
1001 EXPORT_SYMBOL(wx_mac_set_default_filter);
1002 
1003 void wx_flush_sw_mac_table(struct wx *wx)
1004 {
1005 	u32 i;
1006 
1007 	for (i = 0; i < wx->mac.num_rar_entries; i++) {
1008 		if (!(wx->mac_table[i].state & WX_MAC_STATE_IN_USE))
1009 			continue;
1010 
1011 		wx->mac_table[i].state |= WX_MAC_STATE_MODIFIED;
1012 		wx->mac_table[i].state &= ~WX_MAC_STATE_IN_USE;
1013 		memset(wx->mac_table[i].addr, 0, ETH_ALEN);
1014 		wx->mac_table[i].pools = 0;
1015 	}
1016 	wx_sync_mac_table(wx);
1017 }
1018 EXPORT_SYMBOL(wx_flush_sw_mac_table);
1019 
1020 int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool)
1021 {
1022 	u32 i;
1023 
1024 	if (is_zero_ether_addr(addr))
1025 		return -EINVAL;
1026 
1027 	for (i = 0; i < wx->mac.num_rar_entries; i++) {
1028 		if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) {
1029 			if (ether_addr_equal(addr, wx->mac_table[i].addr)) {
1030 				if (wx->mac_table[i].pools != (1ULL << pool)) {
1031 					memcpy(wx->mac_table[i].addr, addr, ETH_ALEN);
1032 					wx->mac_table[i].pools |= (1ULL << pool);
1033 					wx_sync_mac_table(wx);
1034 					return i;
1035 				}
1036 			}
1037 		}
1038 
1039 		if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE)
1040 			continue;
1041 		wx->mac_table[i].state |= (WX_MAC_STATE_MODIFIED |
1042 					   WX_MAC_STATE_IN_USE);
1043 		memcpy(wx->mac_table[i].addr, addr, ETH_ALEN);
1044 		wx->mac_table[i].pools |= (1ULL << pool);
1045 		wx_sync_mac_table(wx);
1046 		return i;
1047 	}
1048 	return -ENOMEM;
1049 }
1050 
1051 int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool)
1052 {
1053 	u32 i;
1054 
1055 	if (is_zero_ether_addr(addr))
1056 		return -EINVAL;
1057 
1058 	/* search table for addr, if found, set to 0 and sync */
1059 	for (i = 0; i < wx->mac.num_rar_entries; i++) {
1060 		if (!ether_addr_equal(addr, wx->mac_table[i].addr))
1061 			continue;
1062 
1063 		wx->mac_table[i].state |= WX_MAC_STATE_MODIFIED;
1064 		wx->mac_table[i].pools &= ~(1ULL << pool);
1065 		if (!wx->mac_table[i].pools) {
1066 			wx->mac_table[i].state &= ~WX_MAC_STATE_IN_USE;
1067 			memset(wx->mac_table[i].addr, 0, ETH_ALEN);
1068 		}
1069 		wx_sync_mac_table(wx);
1070 		return 0;
1071 	}
1072 	return -ENOMEM;
1073 }
1074 
1075 static int wx_available_rars(struct wx *wx)
1076 {
1077 	u32 i, count = 0;
1078 
1079 	for (i = 0; i < wx->mac.num_rar_entries; i++) {
1080 		if (wx->mac_table[i].state == 0)
1081 			count++;
1082 	}
1083 
1084 	return count;
1085 }
1086 
1087 /**
1088  * wx_write_uc_addr_list - write unicast addresses to RAR table
1089  * @netdev: network interface device structure
1090  * @pool: index for mac table
1091  *
1092  * Writes unicast address list to the RAR table.
1093  * Returns: -ENOMEM on failure/insufficient address space
1094  *                0 on no addresses written
1095  *                X on writing X addresses to the RAR table
1096  **/
1097 static int wx_write_uc_addr_list(struct net_device *netdev, int pool)
1098 {
1099 	struct wx *wx = netdev_priv(netdev);
1100 	int count = 0;
1101 
1102 	/* return ENOMEM indicating insufficient memory for addresses */
1103 	if (netdev_uc_count(netdev) > wx_available_rars(wx))
1104 		return -ENOMEM;
1105 
1106 	if (!netdev_uc_empty(netdev)) {
1107 		struct netdev_hw_addr *ha;
1108 
1109 		netdev_for_each_uc_addr(ha, netdev) {
1110 			wx_del_mac_filter(wx, ha->addr, pool);
1111 			wx_add_mac_filter(wx, ha->addr, pool);
1112 			count++;
1113 		}
1114 	}
1115 	return count;
1116 }
1117 
1118 /**
1119  *  wx_mta_vector - Determines bit-vector in multicast table to set
1120  *  @wx: pointer to private structure
1121  *  @mc_addr: the multicast address
1122  *
1123  *  Extracts the 12 bits, from a multicast address, to determine which
1124  *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
1125  *  incoming rx multicast addresses, to determine the bit-vector to check in
1126  *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
1127  *  by the MO field of the MCSTCTRL. The MO field is set during initialization
1128  *  to mc_filter_type.
1129  **/
1130 static u32 wx_mta_vector(struct wx *wx, u8 *mc_addr)
1131 {
1132 	u32 vector = 0;
1133 
1134 	switch (wx->mac.mc_filter_type) {
1135 	case 0:   /* use bits [47:36] of the address */
1136 		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
1137 		break;
1138 	case 1:   /* use bits [46:35] of the address */
1139 		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
1140 		break;
1141 	case 2:   /* use bits [45:34] of the address */
1142 		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
1143 		break;
1144 	case 3:   /* use bits [43:32] of the address */
1145 		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
1146 		break;
1147 	default:  /* Invalid mc_filter_type */
1148 		wx_err(wx, "MC filter type param set incorrectly\n");
1149 		break;
1150 	}
1151 
1152 	/* vector can only be 12-bits or boundary will be exceeded */
1153 	vector &= 0xFFF;
1154 	return vector;
1155 }
1156 
1157 /**
1158  *  wx_set_mta - Set bit-vector in multicast table
1159  *  @wx: pointer to private structure
1160  *  @mc_addr: Multicast address
1161  *
1162  *  Sets the bit-vector in the multicast table.
1163  **/
1164 static void wx_set_mta(struct wx *wx, u8 *mc_addr)
1165 {
1166 	u32 vector, vector_bit, vector_reg;
1167 
1168 	wx->addr_ctrl.mta_in_use++;
1169 
1170 	vector = wx_mta_vector(wx, mc_addr);
1171 	wx_dbg(wx, " bit-vector = 0x%03X\n", vector);
1172 
1173 	/* The MTA is a register array of 128 32-bit registers. It is treated
1174 	 * like an array of 4096 bits.  We want to set bit
1175 	 * BitArray[vector_value]. So we figure out what register the bit is
1176 	 * in, read it, OR in the new bit, then write back the new value.  The
1177 	 * register is determined by the upper 7 bits of the vector value and
1178 	 * the bit within that register are determined by the lower 5 bits of
1179 	 * the value.
1180 	 */
1181 	vector_reg = (vector >> 5) & 0x7F;
1182 	vector_bit = vector & 0x1F;
1183 	wx->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
1184 }
1185 
1186 /**
1187  *  wx_update_mc_addr_list - Updates MAC list of multicast addresses
1188  *  @wx: pointer to private structure
1189  *  @netdev: pointer to net device structure
1190  *
1191  *  The given list replaces any existing list. Clears the MC addrs from receive
1192  *  address registers and the multicast table. Uses unused receive address
1193  *  registers for the first multicast addresses, and hashes the rest into the
1194  *  multicast table.
1195  **/
1196 static void wx_update_mc_addr_list(struct wx *wx, struct net_device *netdev)
1197 {
1198 	struct netdev_hw_addr *ha;
1199 	u32 i, psrctl;
1200 
1201 	/* Set the new number of MC addresses that we are being requested to
1202 	 * use.
1203 	 */
1204 	wx->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
1205 	wx->addr_ctrl.mta_in_use = 0;
1206 
1207 	/* Clear mta_shadow */
1208 	wx_dbg(wx, " Clearing MTA\n");
1209 	memset(&wx->mac.mta_shadow, 0, sizeof(wx->mac.mta_shadow));
1210 
1211 	/* Update mta_shadow */
1212 	netdev_for_each_mc_addr(ha, netdev) {
1213 		wx_dbg(wx, " Adding the multicast addresses:\n");
1214 		wx_set_mta(wx, ha->addr);
1215 	}
1216 
1217 	/* Enable mta */
1218 	for (i = 0; i < wx->mac.mcft_size; i++)
1219 		wr32a(wx, WX_PSR_MC_TBL(0), i,
1220 		      wx->mac.mta_shadow[i]);
1221 
1222 	if (wx->addr_ctrl.mta_in_use > 0) {
1223 		psrctl = rd32(wx, WX_PSR_CTL);
1224 		psrctl &= ~(WX_PSR_CTL_MO | WX_PSR_CTL_MFE);
1225 		psrctl |= WX_PSR_CTL_MFE |
1226 			  (wx->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT);
1227 		wr32(wx, WX_PSR_CTL, psrctl);
1228 	}
1229 
1230 	wx_dbg(wx, "Update mc addr list Complete\n");
1231 }
1232 
1233 static void wx_restore_vf_multicasts(struct wx *wx)
1234 {
1235 	u32 i, j, vector_bit, vector_reg;
1236 	struct vf_data_storage *vfinfo;
1237 
1238 	for (i = 0; i < wx->num_vfs; i++) {
1239 		u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(i));
1240 
1241 		vfinfo = &wx->vfinfo[i];
1242 		for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
1243 			wx->addr_ctrl.mta_in_use++;
1244 			vector_reg = WX_PSR_MC_TBL_REG(vfinfo->vf_mc_hashes[j]);
1245 			vector_bit = WX_PSR_MC_TBL_BIT(vfinfo->vf_mc_hashes[j]);
1246 			wr32m(wx, WX_PSR_MC_TBL(vector_reg),
1247 			      BIT(vector_bit), BIT(vector_bit));
1248 			/* errata 5: maintain a copy of the reg table conf */
1249 			wx->mac.mta_shadow[vector_reg] |= BIT(vector_bit);
1250 		}
1251 		if (vfinfo->num_vf_mc_hashes)
1252 			vmolr |= WX_PSR_VM_L2CTL_ROMPE;
1253 		else
1254 			vmolr &= ~WX_PSR_VM_L2CTL_ROMPE;
1255 		wr32(wx, WX_PSR_VM_L2CTL(i), vmolr);
1256 	}
1257 
1258 	/* Restore any VF macvlans */
1259 	wx_full_sync_mac_table(wx);
1260 }
1261 
1262 /**
1263  * wx_write_mc_addr_list - write multicast addresses to MTA
1264  * @netdev: network interface device structure
1265  *
1266  * Writes multicast address list to the MTA hash table.
1267  * Returns: 0 on no addresses written
1268  *          X on writing X addresses to MTA
1269  **/
1270 static int wx_write_mc_addr_list(struct net_device *netdev)
1271 {
1272 	struct wx *wx = netdev_priv(netdev);
1273 
1274 	if (!netif_running(netdev))
1275 		return 0;
1276 
1277 	wx_update_mc_addr_list(wx, netdev);
1278 
1279 	if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
1280 		wx_restore_vf_multicasts(wx);
1281 
1282 	return netdev_mc_count(netdev);
1283 }
1284 
1285 /**
1286  * wx_set_mac - Change the Ethernet Address of the NIC
1287  * @netdev: network interface device structure
1288  * @p: pointer to an address structure
1289  *
1290  * Returns 0 on success, negative on failure
1291  **/
1292 int wx_set_mac(struct net_device *netdev, void *p)
1293 {
1294 	struct wx *wx = netdev_priv(netdev);
1295 	struct sockaddr *addr = p;
1296 	int retval;
1297 
1298 	retval = eth_prepare_mac_addr_change(netdev, addr);
1299 	if (retval)
1300 		return retval;
1301 
1302 	wx_del_mac_filter(wx, wx->mac.addr, VMDQ_P(0));
1303 	eth_hw_addr_set(netdev, addr->sa_data);
1304 	memcpy(wx->mac.addr, addr->sa_data, netdev->addr_len);
1305 
1306 	wx_mac_set_default_filter(wx, wx->mac.addr);
1307 
1308 	return 0;
1309 }
1310 EXPORT_SYMBOL(wx_set_mac);
1311 
1312 void wx_disable_rx(struct wx *wx)
1313 {
1314 	u32 pfdtxgswc;
1315 	u32 rxctrl;
1316 
1317 	rxctrl = rd32(wx, WX_RDB_PB_CTL);
1318 	if (rxctrl & WX_RDB_PB_CTL_RXEN) {
1319 		pfdtxgswc = rd32(wx, WX_PSR_CTL);
1320 		if (pfdtxgswc & WX_PSR_CTL_SW_EN) {
1321 			pfdtxgswc &= ~WX_PSR_CTL_SW_EN;
1322 			wr32(wx, WX_PSR_CTL, pfdtxgswc);
1323 			wx->mac.set_lben = true;
1324 		} else {
1325 			wx->mac.set_lben = false;
1326 		}
1327 		rxctrl &= ~WX_RDB_PB_CTL_RXEN;
1328 		wr32(wx, WX_RDB_PB_CTL, rxctrl);
1329 
1330 		if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) ||
1331 		      ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) {
1332 			/* disable mac receiver */
1333 			wr32m(wx, WX_MAC_RX_CFG,
1334 			      WX_MAC_RX_CFG_RE, 0);
1335 		}
1336 	}
1337 }
1338 EXPORT_SYMBOL(wx_disable_rx);
1339 
1340 static void wx_enable_rx(struct wx *wx)
1341 {
1342 	u32 psrctl;
1343 
1344 	/* enable mac receiver */
1345 	wr32m(wx, WX_MAC_RX_CFG,
1346 	      WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE);
1347 
1348 	wr32m(wx, WX_RDB_PB_CTL,
1349 	      WX_RDB_PB_CTL_RXEN, WX_RDB_PB_CTL_RXEN);
1350 
1351 	if (wx->mac.set_lben) {
1352 		psrctl = rd32(wx, WX_PSR_CTL);
1353 		psrctl |= WX_PSR_CTL_SW_EN;
1354 		wr32(wx, WX_PSR_CTL, psrctl);
1355 		wx->mac.set_lben = false;
1356 	}
1357 }
1358 
1359 /**
1360  * wx_set_rxpba - Initialize Rx packet buffer
1361  * @wx: pointer to private structure
1362  **/
1363 static void wx_set_rxpba(struct wx *wx)
1364 {
1365 	u32 rxpktsize, txpktsize, txpbthresh;
1366 	u32 pbsize = wx->mac.rx_pb_size;
1367 
1368 	if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) {
1369 		if (test_bit(WX_FLAG_FDIR_HASH, wx->flags) ||
1370 		    test_bit(WX_FLAG_FDIR_PERFECT, wx->flags))
1371 			pbsize -= 64; /* Default 64KB */
1372 	}
1373 
1374 	rxpktsize = pbsize << WX_RDB_PB_SZ_SHIFT;
1375 	wr32(wx, WX_RDB_PB_SZ(0), rxpktsize);
1376 
1377 	/* Only support an equally distributed Tx packet buffer strategy. */
1378 	txpktsize = wx->mac.tx_pb_size;
1379 	txpbthresh = (txpktsize / 1024) - WX_TXPKT_SIZE_MAX;
1380 	wr32(wx, WX_TDB_PB_SZ(0), txpktsize);
1381 	wr32(wx, WX_TDM_PB_THRE(0), txpbthresh);
1382 }
1383 
1384 #define WX_ETH_FRAMING 20
1385 
1386 /**
1387  * wx_hpbthresh - calculate high water mark for flow control
1388  *
1389  * @wx: board private structure to calculate for
1390  **/
1391 static int wx_hpbthresh(struct wx *wx)
1392 {
1393 	struct net_device *dev = wx->netdev;
1394 	int link, tc, kb, marker;
1395 	u32 dv_id, rx_pba;
1396 
1397 	/* Calculate max LAN frame size */
1398 	link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + WX_ETH_FRAMING;
1399 	tc = link;
1400 
1401 	/* Calculate delay value for device */
1402 	dv_id = WX_DV(link, tc);
1403 
1404 	/* Loopback switch introduces additional latency */
1405 	if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
1406 		dv_id += WX_B2BT(tc);
1407 
1408 	/* Delay value is calculated in bit times convert to KB */
1409 	kb = WX_BT2KB(dv_id);
1410 	rx_pba = rd32(wx, WX_RDB_PB_SZ(0)) >> WX_RDB_PB_SZ_SHIFT;
1411 
1412 	marker = rx_pba - kb;
1413 
1414 	/* It is possible that the packet buffer is not large enough
1415 	 * to provide required headroom. In this case throw an error
1416 	 * to user and a do the best we can.
1417 	 */
1418 	if (marker < 0) {
1419 		dev_warn(&wx->pdev->dev,
1420 			 "Packet Buffer can not provide enough headroom to support flow control. Decrease MTU or number of traffic classes\n");
1421 		marker = tc + 1;
1422 	}
1423 
1424 	return marker;
1425 }
1426 
1427 /**
1428  * wx_lpbthresh - calculate low water mark for flow control
1429  *
1430  * @wx: board private structure to calculate for
1431  **/
1432 static int wx_lpbthresh(struct wx *wx)
1433 {
1434 	struct net_device *dev = wx->netdev;
1435 	u32 dv_id;
1436 	int tc;
1437 
1438 	/* Calculate max LAN frame size */
1439 	tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
1440 
1441 	/* Calculate delay value for device */
1442 	dv_id = WX_LOW_DV(tc);
1443 
1444 	/* Delay value is calculated in bit times convert to KB */
1445 	return WX_BT2KB(dv_id);
1446 }
1447 
1448 /**
1449  * wx_pbthresh_setup - calculate and setup high low water marks
1450  *
1451  * @wx: board private structure to calculate for
1452  **/
1453 static void wx_pbthresh_setup(struct wx *wx)
1454 {
1455 	wx->fc.high_water = wx_hpbthresh(wx);
1456 	wx->fc.low_water = wx_lpbthresh(wx);
1457 
1458 	/* Low water marks must not be larger than high water marks */
1459 	if (wx->fc.low_water > wx->fc.high_water)
1460 		wx->fc.low_water = 0;
1461 }
1462 
1463 static void wx_set_ethertype_anti_spoofing(struct wx *wx, bool enable, int vf)
1464 {
1465 	u32 pfvfspoof, reg_offset, vf_shift;
1466 
1467 	vf_shift = WX_VF_IND_SHIFT(vf);
1468 	reg_offset = WX_VF_REG_OFFSET(vf);
1469 
1470 	pfvfspoof = rd32(wx, WX_TDM_ETYPE_AS(reg_offset));
1471 	if (enable)
1472 		pfvfspoof |= BIT(vf_shift);
1473 	else
1474 		pfvfspoof &= ~BIT(vf_shift);
1475 	wr32(wx, WX_TDM_ETYPE_AS(reg_offset), pfvfspoof);
1476 }
1477 
1478 int wx_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
1479 {
1480 	u32 index = WX_VF_REG_OFFSET(vf), vf_bit = WX_VF_IND_SHIFT(vf);
1481 	struct wx *wx = netdev_priv(netdev);
1482 	u32 regval;
1483 
1484 	if (vf >= wx->num_vfs)
1485 		return -EINVAL;
1486 
1487 	wx->vfinfo[vf].spoofchk_enabled = setting;
1488 
1489 	regval = (setting << vf_bit);
1490 	wr32m(wx, WX_TDM_MAC_AS(index), regval | BIT(vf_bit), regval);
1491 
1492 	if (wx->vfinfo[vf].vlan_count)
1493 		wr32m(wx, WX_TDM_VLAN_AS(index), regval | BIT(vf_bit), regval);
1494 
1495 	return 0;
1496 }
1497 
1498 static void wx_configure_virtualization(struct wx *wx)
1499 {
1500 	u16 pool = wx->num_rx_pools;
1501 	u32 reg_offset, vf_shift;
1502 	u32 i;
1503 
1504 	if (!test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
1505 		return;
1506 
1507 	wr32m(wx, WX_PSR_VM_CTL,
1508 	      WX_PSR_VM_CTL_POOL_MASK | WX_PSR_VM_CTL_REPLEN,
1509 	      FIELD_PREP(WX_PSR_VM_CTL_POOL_MASK, VMDQ_P(0)) |
1510 	      WX_PSR_VM_CTL_REPLEN);
1511 	while (pool--)
1512 		wr32m(wx, WX_PSR_VM_L2CTL(pool),
1513 		      WX_PSR_VM_L2CTL_AUPE, WX_PSR_VM_L2CTL_AUPE);
1514 
1515 	if (wx->mac.type == wx_mac_em) {
1516 		vf_shift = BIT(VMDQ_P(0));
1517 		/* Enable only the PF pools for Tx/Rx */
1518 		wr32(wx, WX_RDM_VF_RE(0), vf_shift);
1519 		wr32(wx, WX_TDM_VF_TE(0), vf_shift);
1520 	} else {
1521 		vf_shift = WX_VF_IND_SHIFT(VMDQ_P(0));
1522 		reg_offset = WX_VF_REG_OFFSET(VMDQ_P(0));
1523 
1524 		/* Enable only the PF pools for Tx/Rx */
1525 		wr32(wx, WX_RDM_VF_RE(reg_offset), GENMASK(31, vf_shift));
1526 		wr32(wx, WX_RDM_VF_RE(reg_offset ^ 1), reg_offset - 1);
1527 		wr32(wx, WX_TDM_VF_TE(reg_offset), GENMASK(31, vf_shift));
1528 		wr32(wx, WX_TDM_VF_TE(reg_offset ^ 1), reg_offset - 1);
1529 	}
1530 
1531 	/* clear VLAN promisc flag so VFTA will be updated if necessary */
1532 	clear_bit(WX_FLAG_VLAN_PROMISC, wx->flags);
1533 
1534 	for (i = 0; i < wx->num_vfs; i++) {
1535 		if (!wx->vfinfo[i].spoofchk_enabled)
1536 			wx_set_vf_spoofchk(wx->netdev, i, false);
1537 		/* enable ethertype anti spoofing if hw supports it */
1538 		wx_set_ethertype_anti_spoofing(wx, true, i);
1539 	}
1540 }
1541 
1542 static void wx_configure_port(struct wx *wx)
1543 {
1544 	u32 value, i;
1545 
1546 	if (wx->mac.type == wx_mac_em) {
1547 		value = (wx->num_vfs == 0) ?
1548 			WX_CFG_PORT_CTL_NUM_VT_NONE :
1549 			WX_CFG_PORT_CTL_NUM_VT_8;
1550 	} else {
1551 		if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags)) {
1552 			if (wx->ring_feature[RING_F_RSS].indices == 4)
1553 				value = WX_CFG_PORT_CTL_NUM_VT_32;
1554 			else
1555 				value = WX_CFG_PORT_CTL_NUM_VT_64;
1556 		} else {
1557 			value = 0;
1558 		}
1559 	}
1560 
1561 	value |= WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ;
1562 	wr32m(wx, WX_CFG_PORT_CTL,
1563 	      WX_CFG_PORT_CTL_NUM_VT_MASK |
1564 	      WX_CFG_PORT_CTL_D_VLAN |
1565 	      WX_CFG_PORT_CTL_QINQ,
1566 	      value);
1567 
1568 	wr32(wx, WX_CFG_TAG_TPID(0),
1569 	     ETH_P_8021Q | ETH_P_8021AD << 16);
1570 	wx->tpid[0] = ETH_P_8021Q;
1571 	wx->tpid[1] = ETH_P_8021AD;
1572 	for (i = 1; i < 4; i++)
1573 		wr32(wx, WX_CFG_TAG_TPID(i),
1574 		     ETH_P_8021Q | ETH_P_8021Q << 16);
1575 	for (i = 2; i < 8; i++)
1576 		wx->tpid[i] = ETH_P_8021Q;
1577 }
1578 
1579 /**
1580  *  wx_disable_sec_rx_path - Stops the receive data path
1581  *  @wx: pointer to private structure
1582  *
1583  *  Stops the receive data path and waits for the HW to internally empty
1584  *  the Rx security block
1585  **/
1586 int wx_disable_sec_rx_path(struct wx *wx)
1587 {
1588 	u32 secrx;
1589 
1590 	wr32m(wx, WX_RSC_CTL,
1591 	      WX_RSC_CTL_RX_DIS, WX_RSC_CTL_RX_DIS);
1592 
1593 	return read_poll_timeout(rd32, secrx, secrx & WX_RSC_ST_RSEC_RDY,
1594 				 1000, 40000, false, wx, WX_RSC_ST);
1595 }
1596 EXPORT_SYMBOL(wx_disable_sec_rx_path);
1597 
1598 /**
1599  *  wx_enable_sec_rx_path - Enables the receive data path
1600  *  @wx: pointer to private structure
1601  *
1602  *  Enables the receive data path.
1603  **/
1604 void wx_enable_sec_rx_path(struct wx *wx)
1605 {
1606 	wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_RX_DIS, 0);
1607 	WX_WRITE_FLUSH(wx);
1608 }
1609 EXPORT_SYMBOL(wx_enable_sec_rx_path);
1610 
1611 static void wx_vlan_strip_control(struct wx *wx, bool enable)
1612 {
1613 	int i, j;
1614 
1615 	for (i = 0; i < wx->num_rx_queues; i++) {
1616 		struct wx_ring *ring = wx->rx_ring[i];
1617 
1618 		j = ring->reg_idx;
1619 		wr32m(wx, WX_PX_RR_CFG(j), WX_PX_RR_CFG_VLAN,
1620 		      enable ? WX_PX_RR_CFG_VLAN : 0);
1621 	}
1622 }
1623 
1624 static void wx_vlan_promisc_enable(struct wx *wx)
1625 {
1626 	u32 vlnctrl, i, vind, bits, reg_idx;
1627 
1628 	vlnctrl = rd32(wx, WX_PSR_VLAN_CTL);
1629 	if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags)) {
1630 		/* we need to keep the VLAN filter on in SRIOV */
1631 		vlnctrl |= WX_PSR_VLAN_CTL_VFE;
1632 		wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
1633 	} else {
1634 		vlnctrl &= ~WX_PSR_VLAN_CTL_VFE;
1635 		wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
1636 		return;
1637 	}
1638 	/* We are already in VLAN promisc, nothing to do */
1639 	if (test_bit(WX_FLAG_VLAN_PROMISC, wx->flags))
1640 		return;
1641 	/* Set flag so we don't redo unnecessary work */
1642 	set_bit(WX_FLAG_VLAN_PROMISC, wx->flags);
1643 	/* Add PF to all active pools */
1644 	for (i = WX_PSR_VLAN_SWC_ENTRIES; --i;) {
1645 		wr32(wx, WX_PSR_VLAN_SWC_IDX, i);
1646 		vind = WX_VF_IND_SHIFT(VMDQ_P(0));
1647 		reg_idx = WX_VF_REG_OFFSET(VMDQ_P(0));
1648 		bits = rd32(wx, WX_PSR_VLAN_SWC_VM(reg_idx));
1649 		bits |= BIT(vind);
1650 		wr32(wx, WX_PSR_VLAN_SWC_VM(reg_idx), bits);
1651 	}
1652 	/* Set all bits in the VLAN filter table array */
1653 	for (i = 0; i < wx->mac.vft_size; i++)
1654 		wr32(wx, WX_PSR_VLAN_TBL(i), U32_MAX);
1655 }
1656 
1657 static void wx_scrub_vfta(struct wx *wx)
1658 {
1659 	u32 i, vid, bits, vfta, vind, vlvf, reg_idx;
1660 
1661 	for (i = WX_PSR_VLAN_SWC_ENTRIES; --i;) {
1662 		wr32(wx, WX_PSR_VLAN_SWC_IDX, i);
1663 		vlvf = rd32(wx, WX_PSR_VLAN_SWC_IDX);
1664 		/* pull VLAN ID from VLVF */
1665 		vid = vlvf & ~WX_PSR_VLAN_SWC_VIEN;
1666 		if (vlvf & WX_PSR_VLAN_SWC_VIEN) {
1667 			/* if PF is part of this then continue */
1668 			if (test_bit(vid, wx->active_vlans))
1669 				continue;
1670 		}
1671 		/* remove PF from the pool */
1672 		vind = WX_VF_IND_SHIFT(VMDQ_P(0));
1673 		reg_idx = WX_VF_REG_OFFSET(VMDQ_P(0));
1674 		bits = rd32(wx, WX_PSR_VLAN_SWC_VM(reg_idx));
1675 		bits &= ~BIT(vind);
1676 		wr32(wx, WX_PSR_VLAN_SWC_VM(reg_idx), bits);
1677 	}
1678 	/* extract values from vft_shadow and write back to VFTA */
1679 	for (i = 0; i < wx->mac.vft_size; i++) {
1680 		vfta = wx->mac.vft_shadow[i];
1681 		wr32(wx, WX_PSR_VLAN_TBL(i), vfta);
1682 	}
1683 }
1684 
1685 static void wx_vlan_promisc_disable(struct wx *wx)
1686 {
1687 	u32 vlnctrl;
1688 
1689 	/* configure vlan filtering */
1690 	vlnctrl = rd32(wx, WX_PSR_VLAN_CTL);
1691 	vlnctrl |= WX_PSR_VLAN_CTL_VFE;
1692 	wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
1693 	/* We are not in VLAN promisc, nothing to do */
1694 	if (!test_bit(WX_FLAG_VLAN_PROMISC, wx->flags))
1695 		return;
1696 	/* Set flag so we don't redo unnecessary work */
1697 	clear_bit(WX_FLAG_VLAN_PROMISC, wx->flags);
1698 	wx_scrub_vfta(wx);
1699 }
1700 
1701 void wx_set_rx_mode(struct net_device *netdev)
1702 {
1703 	struct wx *wx = netdev_priv(netdev);
1704 	netdev_features_t features;
1705 	u32 fctrl, vmolr, vlnctrl;
1706 	int count;
1707 
1708 	features = netdev->features;
1709 
1710 	/* Check for Promiscuous and All Multicast modes */
1711 	fctrl = rd32(wx, WX_PSR_CTL);
1712 	fctrl &= ~(WX_PSR_CTL_UPE | WX_PSR_CTL_MPE);
1713 	vmolr = rd32(wx, WX_PSR_VM_L2CTL(VMDQ_P(0)));
1714 	vmolr &= ~(WX_PSR_VM_L2CTL_UPE |
1715 		   WX_PSR_VM_L2CTL_MPE |
1716 		   WX_PSR_VM_L2CTL_ROPE |
1717 		   WX_PSR_VM_L2CTL_ROMPE);
1718 	vlnctrl = rd32(wx, WX_PSR_VLAN_CTL);
1719 	vlnctrl &= ~(WX_PSR_VLAN_CTL_VFE | WX_PSR_VLAN_CTL_CFIEN);
1720 
1721 	/* set all bits that we expect to always be set */
1722 	fctrl |= WX_PSR_CTL_BAM | WX_PSR_CTL_MFE;
1723 	vmolr |= WX_PSR_VM_L2CTL_BAM |
1724 		 WX_PSR_VM_L2CTL_AUPE |
1725 		 WX_PSR_VM_L2CTL_VACC;
1726 	vlnctrl |= WX_PSR_VLAN_CTL_VFE;
1727 
1728 	wx->addr_ctrl.user_set_promisc = false;
1729 	if (netdev->flags & IFF_PROMISC) {
1730 		wx->addr_ctrl.user_set_promisc = true;
1731 		fctrl |= WX_PSR_CTL_UPE | WX_PSR_CTL_MPE;
1732 		/* pf don't want packets routing to vf, so clear UPE */
1733 		vmolr |= WX_PSR_VM_L2CTL_MPE;
1734 		if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags) &&
1735 		    test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
1736 			vlnctrl |= WX_PSR_VLAN_CTL_VFE;
1737 		features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
1738 	}
1739 
1740 	if (netdev->flags & IFF_ALLMULTI) {
1741 		fctrl |= WX_PSR_CTL_MPE;
1742 		vmolr |= WX_PSR_VM_L2CTL_MPE;
1743 	}
1744 
1745 	if (netdev->features & NETIF_F_RXALL) {
1746 		vmolr |= (WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_MPE);
1747 		vlnctrl &= ~WX_PSR_VLAN_CTL_VFE;
1748 		/* receive bad packets */
1749 		wr32m(wx, WX_RSC_CTL,
1750 		      WX_RSC_CTL_SAVE_MAC_ERR,
1751 		      WX_RSC_CTL_SAVE_MAC_ERR);
1752 	} else {
1753 		vmolr |= WX_PSR_VM_L2CTL_ROPE | WX_PSR_VM_L2CTL_ROMPE;
1754 	}
1755 
1756 	/* Write addresses to available RAR registers, if there is not
1757 	 * sufficient space to store all the addresses then enable
1758 	 * unicast promiscuous mode
1759 	 */
1760 	count = wx_write_uc_addr_list(netdev, VMDQ_P(0));
1761 	if (count < 0) {
1762 		vmolr &= ~WX_PSR_VM_L2CTL_ROPE;
1763 		vmolr |= WX_PSR_VM_L2CTL_UPE;
1764 	}
1765 
1766 	/* Write addresses to the MTA, if the attempt fails
1767 	 * then we should just turn on promiscuous mode so
1768 	 * that we can at least receive multicast traffic
1769 	 */
1770 	count = wx_write_mc_addr_list(netdev);
1771 	if (count < 0) {
1772 		vmolr &= ~WX_PSR_VM_L2CTL_ROMPE;
1773 		vmolr |= WX_PSR_VM_L2CTL_MPE;
1774 	}
1775 
1776 	wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
1777 	wr32(wx, WX_PSR_CTL, fctrl);
1778 	wr32(wx, WX_PSR_VM_L2CTL(VMDQ_P(0)), vmolr);
1779 
1780 	if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
1781 	    (features & NETIF_F_HW_VLAN_STAG_RX))
1782 		wx_vlan_strip_control(wx, true);
1783 	else
1784 		wx_vlan_strip_control(wx, false);
1785 
1786 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1787 		wx_vlan_promisc_disable(wx);
1788 	else
1789 		wx_vlan_promisc_enable(wx);
1790 }
1791 EXPORT_SYMBOL(wx_set_rx_mode);
1792 
1793 static void wx_set_rx_buffer_len(struct wx *wx)
1794 {
1795 	struct net_device *netdev = wx->netdev;
1796 	u32 mhadd, max_frame;
1797 
1798 	max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1799 	/* adjust max frame to be at least the size of a standard frame */
1800 	if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
1801 		max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
1802 
1803 	mhadd = rd32(wx, WX_PSR_MAX_SZ);
1804 	if (max_frame != mhadd)
1805 		wr32(wx, WX_PSR_MAX_SZ, max_frame);
1806 }
1807 
1808 /**
1809  * wx_change_mtu - Change the Maximum Transfer Unit
1810  * @netdev: network interface device structure
1811  * @new_mtu: new value for maximum frame size
1812  *
1813  * Returns 0 on success, negative on failure
1814  **/
1815 int wx_change_mtu(struct net_device *netdev, int new_mtu)
1816 {
1817 	struct wx *wx = netdev_priv(netdev);
1818 
1819 	WRITE_ONCE(netdev->mtu, new_mtu);
1820 	wx_set_rx_buffer_len(wx);
1821 
1822 	return 0;
1823 }
1824 EXPORT_SYMBOL(wx_change_mtu);
1825 
1826 /* Disable the specified rx queue */
1827 void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring)
1828 {
1829 	u8 reg_idx = ring->reg_idx;
1830 	u32 rxdctl;
1831 	int ret;
1832 
1833 	/* write value back with RRCFG.EN bit cleared */
1834 	wr32m(wx, WX_PX_RR_CFG(reg_idx),
1835 	      WX_PX_RR_CFG_RR_EN, 0);
1836 
1837 	/* the hardware may take up to 100us to really disable the rx queue */
1838 	ret = read_poll_timeout(rd32, rxdctl, !(rxdctl & WX_PX_RR_CFG_RR_EN),
1839 				10, 100, true, wx, WX_PX_RR_CFG(reg_idx));
1840 
1841 	if (ret == -ETIMEDOUT) {
1842 		/* Just for information */
1843 		wx_err(wx,
1844 		       "RRCFG.EN on Rx queue %d not cleared within the polling period\n",
1845 		       reg_idx);
1846 	}
1847 }
1848 EXPORT_SYMBOL(wx_disable_rx_queue);
1849 
1850 static void wx_enable_rx_queue(struct wx *wx, struct wx_ring *ring)
1851 {
1852 	u8 reg_idx = ring->reg_idx;
1853 	u32 rxdctl;
1854 	int ret;
1855 
1856 	ret = read_poll_timeout(rd32, rxdctl, rxdctl & WX_PX_RR_CFG_RR_EN,
1857 				1000, 10000, true, wx, WX_PX_RR_CFG(reg_idx));
1858 
1859 	if (ret == -ETIMEDOUT) {
1860 		/* Just for information */
1861 		wx_err(wx,
1862 		       "RRCFG.EN on Rx queue %d not set within the polling period\n",
1863 		       reg_idx);
1864 	}
1865 }
1866 
1867 static void wx_configure_srrctl(struct wx *wx,
1868 				struct wx_ring *rx_ring)
1869 {
1870 	u16 reg_idx = rx_ring->reg_idx;
1871 	u32 srrctl;
1872 
1873 	srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
1874 	srrctl &= ~(WX_PX_RR_CFG_RR_HDR_SZ |
1875 		    WX_PX_RR_CFG_RR_BUF_SZ |
1876 		    WX_PX_RR_CFG_SPLIT_MODE);
1877 	/* configure header buffer length, needed for RSC */
1878 	srrctl |= WX_RXBUFFER_256 << WX_PX_RR_CFG_BHDRSIZE_SHIFT;
1879 
1880 	/* configure the packet buffer length */
1881 	srrctl |= WX_RX_BUFSZ >> WX_PX_RR_CFG_BSIZEPKT_SHIFT;
1882 
1883 	wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl);
1884 }
1885 
1886 static void wx_configure_tx_ring(struct wx *wx,
1887 				 struct wx_ring *ring)
1888 {
1889 	u32 txdctl = WX_PX_TR_CFG_ENABLE;
1890 	u8 reg_idx = ring->reg_idx;
1891 	u64 tdba = ring->dma;
1892 	int ret;
1893 
1894 	/* disable queue to avoid issues while updating state */
1895 	wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH);
1896 	WX_WRITE_FLUSH(wx);
1897 
1898 	wr32(wx, WX_PX_TR_BAL(reg_idx), tdba & DMA_BIT_MASK(32));
1899 	wr32(wx, WX_PX_TR_BAH(reg_idx), upper_32_bits(tdba));
1900 
1901 	/* reset head and tail pointers */
1902 	wr32(wx, WX_PX_TR_RP(reg_idx), 0);
1903 	wr32(wx, WX_PX_TR_WP(reg_idx), 0);
1904 	ring->tail = wx->hw_addr + WX_PX_TR_WP(reg_idx);
1905 
1906 	if (ring->count < WX_MAX_TXD)
1907 		txdctl |= ring->count / 128 << WX_PX_TR_CFG_TR_SIZE_SHIFT;
1908 	txdctl |= 0x20 << WX_PX_TR_CFG_WTHRESH_SHIFT;
1909 
1910 	ring->atr_count = 0;
1911 	if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags) &&
1912 	    test_bit(WX_FLAG_FDIR_HASH, wx->flags))
1913 		ring->atr_sample_rate = wx->atr_sample_rate;
1914 	else
1915 		ring->atr_sample_rate = 0;
1916 
1917 	/* reinitialize tx_buffer_info */
1918 	memset(ring->tx_buffer_info, 0,
1919 	       sizeof(struct wx_tx_buffer) * ring->count);
1920 
1921 	/* enable queue */
1922 	wr32(wx, WX_PX_TR_CFG(reg_idx), txdctl);
1923 
1924 	/* poll to verify queue is enabled */
1925 	ret = read_poll_timeout(rd32, txdctl, txdctl & WX_PX_TR_CFG_ENABLE,
1926 				1000, 10000, true, wx, WX_PX_TR_CFG(reg_idx));
1927 	if (ret == -ETIMEDOUT)
1928 		wx_err(wx, "Could not enable Tx Queue %d\n", reg_idx);
1929 }
1930 
1931 static void wx_configure_rx_ring(struct wx *wx,
1932 				 struct wx_ring *ring)
1933 {
1934 	u16 reg_idx = ring->reg_idx;
1935 	union wx_rx_desc *rx_desc;
1936 	u64 rdba = ring->dma;
1937 	u32 rxdctl;
1938 
1939 	/* disable queue to avoid issues while updating state */
1940 	rxdctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
1941 	wx_disable_rx_queue(wx, ring);
1942 
1943 	wr32(wx, WX_PX_RR_BAL(reg_idx), rdba & DMA_BIT_MASK(32));
1944 	wr32(wx, WX_PX_RR_BAH(reg_idx), upper_32_bits(rdba));
1945 
1946 	if (ring->count == WX_MAX_RXD)
1947 		rxdctl |= 0 << WX_PX_RR_CFG_RR_SIZE_SHIFT;
1948 	else
1949 		rxdctl |= (ring->count / 128) << WX_PX_RR_CFG_RR_SIZE_SHIFT;
1950 
1951 	rxdctl |= 0x1 << WX_PX_RR_CFG_RR_THER_SHIFT;
1952 	wr32(wx, WX_PX_RR_CFG(reg_idx), rxdctl);
1953 
1954 	/* reset head and tail pointers */
1955 	wr32(wx, WX_PX_RR_RP(reg_idx), 0);
1956 	wr32(wx, WX_PX_RR_WP(reg_idx), 0);
1957 	ring->tail = wx->hw_addr + WX_PX_RR_WP(reg_idx);
1958 
1959 	wx_configure_srrctl(wx, ring);
1960 
1961 	/* initialize rx_buffer_info */
1962 	memset(ring->rx_buffer_info, 0,
1963 	       sizeof(struct wx_rx_buffer) * ring->count);
1964 
1965 	/* initialize Rx descriptor 0 */
1966 	rx_desc = WX_RX_DESC(ring, 0);
1967 	rx_desc->wb.upper.length = 0;
1968 
1969 	/* enable receive descriptor ring */
1970 	wr32m(wx, WX_PX_RR_CFG(reg_idx),
1971 	      WX_PX_RR_CFG_RR_EN, WX_PX_RR_CFG_RR_EN);
1972 
1973 	wx_enable_rx_queue(wx, ring);
1974 	wx_alloc_rx_buffers(ring, wx_desc_unused(ring));
1975 }
1976 
1977 /**
1978  * wx_configure_tx - Configure Transmit Unit after Reset
1979  * @wx: pointer to private structure
1980  *
1981  * Configure the Tx unit of the MAC after a reset.
1982  **/
1983 static void wx_configure_tx(struct wx *wx)
1984 {
1985 	u32 i;
1986 
1987 	/* TDM_CTL.TE must be before Tx queues are enabled */
1988 	wr32m(wx, WX_TDM_CTL,
1989 	      WX_TDM_CTL_TE, WX_TDM_CTL_TE);
1990 
1991 	/* Setup the HW Tx Head and Tail descriptor pointers */
1992 	for (i = 0; i < wx->num_tx_queues; i++)
1993 		wx_configure_tx_ring(wx, wx->tx_ring[i]);
1994 
1995 	wr32m(wx, WX_TSC_BUF_AE, WX_TSC_BUF_AE_THR, 0x10);
1996 
1997 	if (wx->mac.type == wx_mac_em)
1998 		wr32m(wx, WX_TSC_CTL, WX_TSC_CTL_TX_DIS | WX_TSC_CTL_TSEC_DIS, 0x1);
1999 
2000 	/* enable mac transmitter */
2001 	wr32m(wx, WX_MAC_TX_CFG,
2002 	      WX_MAC_TX_CFG_TE, WX_MAC_TX_CFG_TE);
2003 }
2004 
2005 static void wx_restore_vlan(struct wx *wx)
2006 {
2007 	u16 vid = 1;
2008 
2009 	wx_vlan_rx_add_vid(wx->netdev, htons(ETH_P_8021Q), 0);
2010 
2011 	for_each_set_bit_from(vid, wx->active_vlans, VLAN_N_VID)
2012 		wx_vlan_rx_add_vid(wx->netdev, htons(ETH_P_8021Q), vid);
2013 }
2014 
2015 static void wx_store_reta(struct wx *wx)
2016 {
2017 	u8 *indir_tbl = wx->rss_indir_tbl;
2018 	u32 reta = 0;
2019 	u32 i;
2020 
2021 	/* Fill out the redirection table as follows:
2022 	 *  - 8 bit wide entries containing 4 bit RSS index
2023 	 */
2024 	for (i = 0; i < WX_MAX_RETA_ENTRIES; i++) {
2025 		reta |= indir_tbl[i] << (i & 0x3) * 8;
2026 		if ((i & 3) == 3) {
2027 			wr32(wx, WX_RDB_RSSTBL(i >> 2), reta);
2028 			reta = 0;
2029 		}
2030 	}
2031 }
2032 
2033 static void wx_setup_reta(struct wx *wx)
2034 {
2035 	u16 rss_i = wx->ring_feature[RING_F_RSS].indices;
2036 	u32 random_key_size = WX_RSS_KEY_SIZE / 4;
2037 	u32 i, j;
2038 
2039 	if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags)) {
2040 		if (wx->mac.type == wx_mac_em)
2041 			rss_i = 1;
2042 		else
2043 			rss_i = rss_i < 4 ? 4 : rss_i;
2044 	}
2045 
2046 	/* Fill out hash function seeds */
2047 	for (i = 0; i < random_key_size; i++)
2048 		wr32(wx, WX_RDB_RSSRK(i), wx->rss_key[i]);
2049 
2050 	/* Fill out redirection table */
2051 	memset(wx->rss_indir_tbl, 0, sizeof(wx->rss_indir_tbl));
2052 
2053 	for (i = 0, j = 0; i < WX_MAX_RETA_ENTRIES; i++, j++) {
2054 		if (j == rss_i)
2055 			j = 0;
2056 
2057 		wx->rss_indir_tbl[i] = j;
2058 	}
2059 
2060 	wx_store_reta(wx);
2061 }
2062 
2063 #define WX_RDB_RSS_PL_2		FIELD_PREP(GENMASK(31, 29), 1)
2064 #define WX_RDB_RSS_PL_4		FIELD_PREP(GENMASK(31, 29), 2)
2065 static void wx_setup_psrtype(struct wx *wx)
2066 {
2067 	int rss_i = wx->ring_feature[RING_F_RSS].indices;
2068 	u32 psrtype;
2069 	int pool;
2070 
2071 	psrtype = WX_RDB_PL_CFG_L4HDR |
2072 		  WX_RDB_PL_CFG_L3HDR |
2073 		  WX_RDB_PL_CFG_L2HDR |
2074 		  WX_RDB_PL_CFG_TUN_OUTL2HDR |
2075 		  WX_RDB_PL_CFG_TUN_TUNHDR;
2076 
2077 	if (wx->mac.type == wx_mac_em) {
2078 		for_each_set_bit(pool, &wx->fwd_bitmask, 8)
2079 			wr32(wx, WX_RDB_PL_CFG(VMDQ_P(pool)), psrtype);
2080 	} else {
2081 		if (rss_i > 3)
2082 			psrtype |= WX_RDB_RSS_PL_4;
2083 		else if (rss_i > 1)
2084 			psrtype |= WX_RDB_RSS_PL_2;
2085 
2086 		for_each_set_bit(pool, &wx->fwd_bitmask, 32)
2087 			wr32(wx, WX_RDB_PL_CFG(VMDQ_P(pool)), psrtype);
2088 	}
2089 }
2090 
2091 static void wx_setup_mrqc(struct wx *wx)
2092 {
2093 	u32 rss_field = 0;
2094 
2095 	/* VT, and RSS do not coexist at the same time */
2096 	if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags))
2097 		return;
2098 
2099 	/* Disable indicating checksum in descriptor, enables RSS hash */
2100 	wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_PCSD, WX_PSR_CTL_PCSD);
2101 
2102 	/* Perform hash on these packet types */
2103 	rss_field = WX_RDB_RA_CTL_RSS_IPV4 |
2104 		    WX_RDB_RA_CTL_RSS_IPV4_TCP |
2105 		    WX_RDB_RA_CTL_RSS_IPV4_UDP |
2106 		    WX_RDB_RA_CTL_RSS_IPV6 |
2107 		    WX_RDB_RA_CTL_RSS_IPV6_TCP |
2108 		    WX_RDB_RA_CTL_RSS_IPV6_UDP;
2109 
2110 	netdev_rss_key_fill(wx->rss_key, sizeof(wx->rss_key));
2111 
2112 	wx_setup_reta(wx);
2113 
2114 	if (wx->rss_enabled)
2115 		rss_field |= WX_RDB_RA_CTL_RSS_EN;
2116 
2117 	wr32(wx, WX_RDB_RA_CTL, rss_field);
2118 }
2119 
2120 /**
2121  * wx_configure_rx - Configure Receive Unit after Reset
2122  * @wx: pointer to private structure
2123  *
2124  * Configure the Rx unit of the MAC after a reset.
2125  **/
2126 void wx_configure_rx(struct wx *wx)
2127 {
2128 	int ret;
2129 	u32 i;
2130 
2131 	wx_disable_rx(wx);
2132 	wx_setup_psrtype(wx);
2133 
2134 	/* enable hw crc stripping */
2135 	wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_CRC_STRIP, WX_RSC_CTL_CRC_STRIP);
2136 
2137 	if (test_bit(WX_FLAG_RSC_CAPABLE, wx->flags)) {
2138 		u32 psrctl;
2139 
2140 		/* RSC Setup */
2141 		psrctl = rd32(wx, WX_PSR_CTL);
2142 		psrctl |= WX_PSR_CTL_RSC_ACK; /* Disable RSC for ACK packets */
2143 		psrctl |= WX_PSR_CTL_RSC_DIS;
2144 		wr32(wx, WX_PSR_CTL, psrctl);
2145 	}
2146 
2147 	wx_setup_mrqc(wx);
2148 
2149 	/* set_rx_buffer_len must be called before ring initialization */
2150 	wx_set_rx_buffer_len(wx);
2151 
2152 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
2153 	 * the Base and Length of the Rx Descriptor Ring
2154 	 */
2155 	for (i = 0; i < wx->num_rx_queues; i++)
2156 		wx_configure_rx_ring(wx, wx->rx_ring[i]);
2157 
2158 	/* Enable all receives, disable security engine prior to block traffic */
2159 	ret = wx_disable_sec_rx_path(wx);
2160 	if (ret < 0)
2161 		wx_err(wx, "The register status is abnormal, please check device.");
2162 
2163 	wx_enable_rx(wx);
2164 	wx_enable_sec_rx_path(wx);
2165 }
2166 EXPORT_SYMBOL(wx_configure_rx);
2167 
2168 static void wx_configure_isb(struct wx *wx)
2169 {
2170 	/* set ISB Address */
2171 	wr32(wx, WX_PX_ISB_ADDR_L, wx->isb_dma & DMA_BIT_MASK(32));
2172 	if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
2173 		wr32(wx, WX_PX_ISB_ADDR_H, upper_32_bits(wx->isb_dma));
2174 }
2175 
2176 void wx_configure(struct wx *wx)
2177 {
2178 	wx_set_rxpba(wx);
2179 	wx_pbthresh_setup(wx);
2180 	wx_configure_virtualization(wx);
2181 	wx_configure_port(wx);
2182 
2183 	wx_set_rx_mode(wx->netdev);
2184 	wx_restore_vlan(wx);
2185 
2186 	if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))
2187 		wx->configure_fdir(wx);
2188 
2189 	wx_configure_tx(wx);
2190 	wx_configure_rx(wx);
2191 	wx_configure_isb(wx);
2192 }
2193 EXPORT_SYMBOL(wx_configure);
2194 
2195 /**
2196  *  wx_disable_pcie_master - Disable PCI-express master access
2197  *  @wx: pointer to hardware structure
2198  *
2199  *  Disables PCI-Express master access and verifies there are no pending
2200  *  requests.
2201  **/
2202 int wx_disable_pcie_master(struct wx *wx)
2203 {
2204 	int status = 0;
2205 	u32 val;
2206 
2207 	/* Always set this bit to ensure any future transactions are blocked */
2208 	pci_clear_master(wx->pdev);
2209 
2210 	/* Exit if master requests are blocked */
2211 	if (!(rd32(wx, WX_PX_TRANSACTION_PENDING)))
2212 		return 0;
2213 
2214 	/* Poll for master request bit to clear */
2215 	status = read_poll_timeout(rd32, val, !val, 100, WX_PCI_MASTER_DISABLE_TIMEOUT,
2216 				   false, wx, WX_PX_TRANSACTION_PENDING);
2217 	if (status < 0)
2218 		wx_err(wx, "PCIe transaction pending bit did not clear.\n");
2219 
2220 	return status;
2221 }
2222 EXPORT_SYMBOL(wx_disable_pcie_master);
2223 
2224 /**
2225  *  wx_stop_adapter - Generic stop Tx/Rx units
2226  *  @wx: pointer to hardware structure
2227  *
2228  *  Sets the adapter_stopped flag within wx_hw struct. Clears interrupts,
2229  *  disables transmit and receive units. The adapter_stopped flag is used by
2230  *  the shared code and drivers to determine if the adapter is in a stopped
2231  *  state and should not touch the hardware.
2232  **/
2233 int wx_stop_adapter(struct wx *wx)
2234 {
2235 	u16 i;
2236 
2237 	/* Set the adapter_stopped flag so other driver functions stop touching
2238 	 * the hardware
2239 	 */
2240 	wx->adapter_stopped = true;
2241 
2242 	/* Disable the receive unit */
2243 	wx_disable_rx(wx);
2244 
2245 	/* Set interrupt mask to stop interrupts from being generated */
2246 	wx_intr_disable(wx, WX_INTR_ALL);
2247 
2248 	/* Clear any pending interrupts, flush previous writes */
2249 	wr32(wx, WX_PX_MISC_IC, 0xffffffff);
2250 	wr32(wx, WX_BME_CTL, 0x3);
2251 
2252 	/* Disable the transmit unit.  Each queue must be disabled. */
2253 	for (i = 0; i < wx->mac.max_tx_queues; i++) {
2254 		wr32m(wx, WX_PX_TR_CFG(i),
2255 		      WX_PX_TR_CFG_SWFLSH | WX_PX_TR_CFG_ENABLE,
2256 		      WX_PX_TR_CFG_SWFLSH);
2257 	}
2258 
2259 	/* Disable the receive unit by stopping each queue */
2260 	for (i = 0; i < wx->mac.max_rx_queues; i++) {
2261 		wr32m(wx, WX_PX_RR_CFG(i),
2262 		      WX_PX_RR_CFG_RR_EN, 0);
2263 	}
2264 
2265 	/* flush all queues disables */
2266 	WX_WRITE_FLUSH(wx);
2267 
2268 	/* Prevent the PCI-E bus from hanging by disabling PCI-E master
2269 	 * access and verify no pending requests
2270 	 */
2271 	return wx_disable_pcie_master(wx);
2272 }
2273 EXPORT_SYMBOL(wx_stop_adapter);
2274 
2275 void wx_reset_misc(struct wx *wx)
2276 {
2277 	int i;
2278 
2279 	/* receive packets that size > 2048 */
2280 	wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_JE, WX_MAC_RX_CFG_JE);
2281 
2282 	/* clear counters on read */
2283 	wr32m(wx, WX_MMC_CONTROL,
2284 	      WX_MMC_CONTROL_RSTONRD, WX_MMC_CONTROL_RSTONRD);
2285 
2286 	wr32m(wx, WX_MAC_RX_FLOW_CTRL,
2287 	      WX_MAC_RX_FLOW_CTRL_RFE, WX_MAC_RX_FLOW_CTRL_RFE);
2288 
2289 	wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
2290 
2291 	wr32m(wx, WX_MIS_RST_ST,
2292 	      WX_MIS_RST_ST_RST_INIT, 0x1E00);
2293 
2294 	/* errata 4: initialize mng flex tbl and wakeup flex tbl*/
2295 	wr32(wx, WX_PSR_MNG_FLEX_SEL, 0);
2296 	for (i = 0; i < 16; i++) {
2297 		wr32(wx, WX_PSR_MNG_FLEX_DW_L(i), 0);
2298 		wr32(wx, WX_PSR_MNG_FLEX_DW_H(i), 0);
2299 		wr32(wx, WX_PSR_MNG_FLEX_MSK(i), 0);
2300 	}
2301 	wr32(wx, WX_PSR_LAN_FLEX_SEL, 0);
2302 	for (i = 0; i < 16; i++) {
2303 		wr32(wx, WX_PSR_LAN_FLEX_DW_L(i), 0);
2304 		wr32(wx, WX_PSR_LAN_FLEX_DW_H(i), 0);
2305 		wr32(wx, WX_PSR_LAN_FLEX_MSK(i), 0);
2306 	}
2307 
2308 	/* set pause frame dst mac addr */
2309 	wr32(wx, WX_RDB_PFCMACDAL, 0xC2000001);
2310 	wr32(wx, WX_RDB_PFCMACDAH, 0x0180);
2311 }
2312 EXPORT_SYMBOL(wx_reset_misc);
2313 
2314 /**
2315  *  wx_get_pcie_msix_counts - Gets MSI-X vector count
2316  *  @wx: pointer to hardware structure
2317  *  @msix_count: number of MSI interrupts that can be obtained
2318  *  @max_msix_count: number of MSI interrupts that mac need
2319  *
2320  *  Read PCIe configuration space, and get the MSI-X vector count from
2321  *  the capabilities table.
2322  **/
2323 int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count)
2324 {
2325 	struct pci_dev *pdev = wx->pdev;
2326 	struct device *dev = &pdev->dev;
2327 	int pos;
2328 
2329 	*msix_count = 1;
2330 	pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
2331 	if (!pos) {
2332 		dev_err(dev, "Unable to find MSI-X Capabilities\n");
2333 		return -EINVAL;
2334 	}
2335 	pci_read_config_word(pdev,
2336 			     pos + PCI_MSIX_FLAGS,
2337 			     msix_count);
2338 	*msix_count &= WX_PCIE_MSIX_TBL_SZ_MASK;
2339 	/* MSI-X count is zero-based in HW */
2340 	*msix_count += 1;
2341 
2342 	if (*msix_count > max_msix_count)
2343 		*msix_count = max_msix_count;
2344 
2345 	return 0;
2346 }
2347 EXPORT_SYMBOL(wx_get_pcie_msix_counts);
2348 
2349 /**
2350  * wx_init_rss_key - Initialize wx RSS key
2351  * @wx: device handle
2352  *
2353  * Allocates and initializes the RSS key if it is not allocated.
2354  **/
2355 static int wx_init_rss_key(struct wx *wx)
2356 {
2357 	u32 *rss_key;
2358 
2359 	if (!wx->rss_key) {
2360 		rss_key = kzalloc(WX_RSS_KEY_SIZE, GFP_KERNEL);
2361 		if (unlikely(!rss_key))
2362 			return -ENOMEM;
2363 
2364 		netdev_rss_key_fill(rss_key, WX_RSS_KEY_SIZE);
2365 		wx->rss_key = rss_key;
2366 	}
2367 
2368 	return 0;
2369 }
2370 
2371 int wx_sw_init(struct wx *wx)
2372 {
2373 	struct pci_dev *pdev = wx->pdev;
2374 	u32 ssid = 0;
2375 	int err = 0;
2376 
2377 	wx->vendor_id = pdev->vendor;
2378 	wx->device_id = pdev->device;
2379 	wx->revision_id = pdev->revision;
2380 	wx->oem_svid = pdev->subsystem_vendor;
2381 	wx->oem_ssid = pdev->subsystem_device;
2382 	wx->bus.device = PCI_SLOT(pdev->devfn);
2383 	wx->bus.func = PCI_FUNC(pdev->devfn);
2384 
2385 	if (wx->oem_svid == PCI_VENDOR_ID_WANGXUN) {
2386 		wx->subsystem_vendor_id = pdev->subsystem_vendor;
2387 		wx->subsystem_device_id = pdev->subsystem_device;
2388 	} else {
2389 		err = wx_flash_read_dword(wx, 0xfffdc, &ssid);
2390 		if (err < 0) {
2391 			wx_err(wx, "read of internal subsystem device id failed\n");
2392 			return err;
2393 		}
2394 
2395 		wx->subsystem_device_id = swab16((u16)ssid);
2396 	}
2397 
2398 	err = wx_init_rss_key(wx);
2399 	if (err < 0) {
2400 		wx_err(wx, "rss key allocation failed\n");
2401 		return err;
2402 	}
2403 
2404 	wx->mac_table = kcalloc(wx->mac.num_rar_entries,
2405 				sizeof(struct wx_mac_addr),
2406 				GFP_KERNEL);
2407 	if (!wx->mac_table) {
2408 		wx_err(wx, "mac_table allocation failed\n");
2409 		kfree(wx->rss_key);
2410 		return -ENOMEM;
2411 	}
2412 
2413 	bitmap_zero(wx->state, WX_STATE_NBITS);
2414 	bitmap_zero(wx->flags, WX_PF_FLAGS_NBITS);
2415 	wx->misc_irq_domain = false;
2416 
2417 	return 0;
2418 }
2419 EXPORT_SYMBOL(wx_sw_init);
2420 
2421 /**
2422  *  wx_find_vlvf_slot - find the vlanid or the first empty slot
2423  *  @wx: pointer to hardware structure
2424  *  @vlan: VLAN id to write to VLAN filter
2425  *
2426  *  return the VLVF index where this VLAN id should be placed
2427  *
2428  **/
2429 static int wx_find_vlvf_slot(struct wx *wx, u32 vlan)
2430 {
2431 	u32 bits = 0, first_empty_slot = 0;
2432 	int regindex;
2433 
2434 	/* short cut the special case */
2435 	if (vlan == 0)
2436 		return 0;
2437 
2438 	/* Search for the vlan id in the VLVF entries. Save off the first empty
2439 	 * slot found along the way
2440 	 */
2441 	for (regindex = 1; regindex < WX_PSR_VLAN_SWC_ENTRIES; regindex++) {
2442 		wr32(wx, WX_PSR_VLAN_SWC_IDX, regindex);
2443 		bits = rd32(wx, WX_PSR_VLAN_SWC);
2444 		if (!bits && !(first_empty_slot))
2445 			first_empty_slot = regindex;
2446 		else if ((bits & 0x0FFF) == vlan)
2447 			break;
2448 	}
2449 
2450 	if (regindex >= WX_PSR_VLAN_SWC_ENTRIES) {
2451 		if (first_empty_slot)
2452 			regindex = first_empty_slot;
2453 		else
2454 			regindex = -ENOMEM;
2455 	}
2456 
2457 	return regindex;
2458 }
2459 
2460 /**
2461  *  wx_set_vlvf - Set VLAN Pool Filter
2462  *  @wx: pointer to hardware structure
2463  *  @vlan: VLAN id to write to VLAN filter
2464  *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
2465  *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
2466  *  @vfta_changed: pointer to boolean flag which indicates whether VFTA
2467  *                 should be changed
2468  *
2469  *  Turn on/off specified bit in VLVF table.
2470  **/
2471 static int wx_set_vlvf(struct wx *wx, u32 vlan, u32 vind, bool vlan_on,
2472 		       bool *vfta_changed)
2473 {
2474 	int vlvf_index;
2475 	u32 vt, bits;
2476 
2477 	/* If VT Mode is set
2478 	 *   Either vlan_on
2479 	 *     make sure the vlan is in VLVF
2480 	 *     set the vind bit in the matching VLVFB
2481 	 *   Or !vlan_on
2482 	 *     clear the pool bit and possibly the vind
2483 	 */
2484 	vt = rd32(wx, WX_CFG_PORT_CTL);
2485 	if (!(vt & WX_CFG_PORT_CTL_NUM_VT_MASK))
2486 		return 0;
2487 
2488 	vlvf_index = wx_find_vlvf_slot(wx, vlan);
2489 	if (vlvf_index < 0)
2490 		return vlvf_index;
2491 
2492 	wr32(wx, WX_PSR_VLAN_SWC_IDX, vlvf_index);
2493 	if (vlan_on) {
2494 		/* set the pool bit */
2495 		if (vind < 32) {
2496 			bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L);
2497 			bits |= (1 << vind);
2498 			wr32(wx, WX_PSR_VLAN_SWC_VM_L, bits);
2499 		} else {
2500 			bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H);
2501 			bits |= (1 << (vind - 32));
2502 			wr32(wx, WX_PSR_VLAN_SWC_VM_H, bits);
2503 		}
2504 	} else {
2505 		/* clear the pool bit */
2506 		if (vind < 32) {
2507 			bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L);
2508 			bits &= ~(1 << vind);
2509 			wr32(wx, WX_PSR_VLAN_SWC_VM_L, bits);
2510 			bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_H);
2511 		} else {
2512 			bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H);
2513 			bits &= ~(1 << (vind - 32));
2514 			wr32(wx, WX_PSR_VLAN_SWC_VM_H, bits);
2515 			bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_L);
2516 		}
2517 	}
2518 
2519 	if (bits) {
2520 		wr32(wx, WX_PSR_VLAN_SWC, (WX_PSR_VLAN_SWC_VIEN | vlan));
2521 		if (!vlan_on && vfta_changed)
2522 			*vfta_changed = false;
2523 	} else {
2524 		wr32(wx, WX_PSR_VLAN_SWC, 0);
2525 	}
2526 
2527 	return 0;
2528 }
2529 
2530 /**
2531  *  wx_set_vfta - Set VLAN filter table
2532  *  @wx: pointer to hardware structure
2533  *  @vlan: VLAN id to write to VLAN filter
2534  *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
2535  *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
2536  *
2537  *  Turn on/off specified VLAN in the VLAN filter table.
2538  **/
2539 int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on)
2540 {
2541 	u32 bitindex, vfta, targetbit;
2542 	bool vfta_changed = false;
2543 	int regindex, ret;
2544 
2545 	/* this is a 2 part operation - first the VFTA, then the
2546 	 * VLVF and VLVFB if VT Mode is set
2547 	 * We don't write the VFTA until we know the VLVF part succeeded.
2548 	 */
2549 
2550 	/* Part 1
2551 	 * The VFTA is a bitstring made up of 128 32-bit registers
2552 	 * that enable the particular VLAN id, much like the MTA:
2553 	 *    bits[11-5]: which register
2554 	 *    bits[4-0]:  which bit in the register
2555 	 */
2556 	regindex = (vlan >> 5) & 0x7F;
2557 	bitindex = vlan & 0x1F;
2558 	targetbit = (1 << bitindex);
2559 	/* errata 5 */
2560 	vfta = wx->mac.vft_shadow[regindex];
2561 	if (vlan_on) {
2562 		if (!(vfta & targetbit)) {
2563 			vfta |= targetbit;
2564 			vfta_changed = true;
2565 		}
2566 	} else {
2567 		if ((vfta & targetbit)) {
2568 			vfta &= ~targetbit;
2569 			vfta_changed = true;
2570 		}
2571 	}
2572 	/* Part 2
2573 	 * Call wx_set_vlvf to set VLVFB and VLVF
2574 	 */
2575 	ret = wx_set_vlvf(wx, vlan, vind, vlan_on, &vfta_changed);
2576 	if (ret != 0)
2577 		return ret;
2578 
2579 	if (vfta_changed)
2580 		wr32(wx, WX_PSR_VLAN_TBL(regindex), vfta);
2581 	wx->mac.vft_shadow[regindex] = vfta;
2582 
2583 	return 0;
2584 }
2585 
2586 /**
2587  *  wx_clear_vfta - Clear VLAN filter table
2588  *  @wx: pointer to hardware structure
2589  *
2590  *  Clears the VLAN filer table, and the VMDq index associated with the filter
2591  **/
2592 static void wx_clear_vfta(struct wx *wx)
2593 {
2594 	u32 offset;
2595 
2596 	for (offset = 0; offset < wx->mac.vft_size; offset++) {
2597 		wr32(wx, WX_PSR_VLAN_TBL(offset), 0);
2598 		wx->mac.vft_shadow[offset] = 0;
2599 	}
2600 
2601 	for (offset = 0; offset < WX_PSR_VLAN_SWC_ENTRIES; offset++) {
2602 		wr32(wx, WX_PSR_VLAN_SWC_IDX, offset);
2603 		wr32(wx, WX_PSR_VLAN_SWC, 0);
2604 		wr32(wx, WX_PSR_VLAN_SWC_VM_L, 0);
2605 		wr32(wx, WX_PSR_VLAN_SWC_VM_H, 0);
2606 	}
2607 }
2608 
2609 int wx_vlan_rx_add_vid(struct net_device *netdev,
2610 		       __be16 proto, u16 vid)
2611 {
2612 	struct wx *wx = netdev_priv(netdev);
2613 
2614 	/* add VID to filter table */
2615 	wx_set_vfta(wx, vid, VMDQ_P(0), true);
2616 	set_bit(vid, wx->active_vlans);
2617 
2618 	return 0;
2619 }
2620 EXPORT_SYMBOL(wx_vlan_rx_add_vid);
2621 
2622 int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
2623 {
2624 	struct wx *wx = netdev_priv(netdev);
2625 
2626 	/* remove VID from filter table */
2627 	if (vid)
2628 		wx_set_vfta(wx, vid, VMDQ_P(0), false);
2629 	clear_bit(vid, wx->active_vlans);
2630 
2631 	return 0;
2632 }
2633 EXPORT_SYMBOL(wx_vlan_rx_kill_vid);
2634 
2635 static void wx_enable_rx_drop(struct wx *wx, struct wx_ring *ring)
2636 {
2637 	u16 reg_idx = ring->reg_idx;
2638 	u32 srrctl;
2639 
2640 	srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
2641 	srrctl |= WX_PX_RR_CFG_DROP_EN;
2642 
2643 	wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl);
2644 }
2645 
2646 static void wx_disable_rx_drop(struct wx *wx, struct wx_ring *ring)
2647 {
2648 	u16 reg_idx = ring->reg_idx;
2649 	u32 srrctl;
2650 
2651 	srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
2652 	srrctl &= ~WX_PX_RR_CFG_DROP_EN;
2653 
2654 	wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl);
2655 }
2656 
2657 int wx_fc_enable(struct wx *wx, bool tx_pause, bool rx_pause)
2658 {
2659 	u16 pause_time = WX_DEFAULT_FCPAUSE;
2660 	u32 mflcn_reg, fccfg_reg, reg;
2661 	u32 fcrtl, fcrth;
2662 	int i;
2663 
2664 	/* Low water mark of zero causes XOFF floods */
2665 	if (tx_pause && wx->fc.high_water) {
2666 		if (!wx->fc.low_water || wx->fc.low_water >= wx->fc.high_water) {
2667 			wx_err(wx, "Invalid water mark configuration\n");
2668 			return -EINVAL;
2669 		}
2670 	}
2671 
2672 	/* Disable any previous flow control settings */
2673 	mflcn_reg = rd32(wx, WX_MAC_RX_FLOW_CTRL);
2674 	mflcn_reg &= ~WX_MAC_RX_FLOW_CTRL_RFE;
2675 
2676 	fccfg_reg = rd32(wx, WX_RDB_RFCC);
2677 	fccfg_reg &= ~WX_RDB_RFCC_RFCE_802_3X;
2678 
2679 	if (rx_pause)
2680 		mflcn_reg |= WX_MAC_RX_FLOW_CTRL_RFE;
2681 	if (tx_pause)
2682 		fccfg_reg |= WX_RDB_RFCC_RFCE_802_3X;
2683 
2684 	/* Set 802.3x based flow control settings. */
2685 	wr32(wx, WX_MAC_RX_FLOW_CTRL, mflcn_reg);
2686 	wr32(wx, WX_RDB_RFCC, fccfg_reg);
2687 
2688 	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
2689 	if (tx_pause && wx->fc.high_water) {
2690 		fcrtl = (wx->fc.low_water << 10) | WX_RDB_RFCL_XONE;
2691 		wr32(wx, WX_RDB_RFCL, fcrtl);
2692 		fcrth = (wx->fc.high_water << 10) | WX_RDB_RFCH_XOFFE;
2693 	} else {
2694 		wr32(wx, WX_RDB_RFCL, 0);
2695 		/* In order to prevent Tx hangs when the internal Tx
2696 		 * switch is enabled we must set the high water mark
2697 		 * to the Rx packet buffer size - 24KB.  This allows
2698 		 * the Tx switch to function even under heavy Rx
2699 		 * workloads.
2700 		 */
2701 		fcrth = rd32(wx, WX_RDB_PB_SZ(0)) - 24576;
2702 	}
2703 
2704 	wr32(wx, WX_RDB_RFCH, fcrth);
2705 
2706 	/* Configure pause time */
2707 	reg = pause_time * 0x00010001;
2708 	wr32(wx, WX_RDB_RFCV, reg);
2709 
2710 	/* Configure flow control refresh threshold value */
2711 	wr32(wx, WX_RDB_RFCRT, pause_time / 2);
2712 
2713 	/*  We should set the drop enable bit if:
2714 	 *  Number of Rx queues > 1 and flow control is disabled
2715 	 *
2716 	 *  This allows us to avoid head of line blocking for security
2717 	 *  and performance reasons.
2718 	 */
2719 	if (wx->num_rx_queues > 1 && !tx_pause) {
2720 		for (i = 0; i < wx->num_rx_queues; i++)
2721 			wx_enable_rx_drop(wx, wx->rx_ring[i]);
2722 	} else {
2723 		for (i = 0; i < wx->num_rx_queues; i++)
2724 			wx_disable_rx_drop(wx, wx->rx_ring[i]);
2725 	}
2726 
2727 	return 0;
2728 }
2729 EXPORT_SYMBOL(wx_fc_enable);
2730 
2731 /**
2732  * wx_update_stats - Update the board statistics counters.
2733  * @wx: board private structure
2734  **/
2735 void wx_update_stats(struct wx *wx)
2736 {
2737 	struct wx_hw_stats *hwstats = &wx->stats;
2738 
2739 	u64 non_eop_descs = 0, alloc_rx_buff_failed = 0;
2740 	u64 hw_csum_rx_good = 0, hw_csum_rx_error = 0;
2741 	u64 restart_queue = 0, tx_busy = 0;
2742 	u32 i;
2743 
2744 	/* gather some stats to the wx struct that are per queue */
2745 	for (i = 0; i < wx->num_rx_queues; i++) {
2746 		struct wx_ring *rx_ring = wx->rx_ring[i];
2747 
2748 		non_eop_descs += rx_ring->rx_stats.non_eop_descs;
2749 		alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
2750 		hw_csum_rx_good += rx_ring->rx_stats.csum_good_cnt;
2751 		hw_csum_rx_error += rx_ring->rx_stats.csum_err;
2752 	}
2753 	wx->non_eop_descs = non_eop_descs;
2754 	wx->alloc_rx_buff_failed = alloc_rx_buff_failed;
2755 	wx->hw_csum_rx_error = hw_csum_rx_error;
2756 	wx->hw_csum_rx_good = hw_csum_rx_good;
2757 
2758 	for (i = 0; i < wx->num_tx_queues; i++) {
2759 		struct wx_ring *tx_ring = wx->tx_ring[i];
2760 
2761 		restart_queue += tx_ring->tx_stats.restart_queue;
2762 		tx_busy += tx_ring->tx_stats.tx_busy;
2763 	}
2764 	wx->restart_queue = restart_queue;
2765 	wx->tx_busy = tx_busy;
2766 
2767 	hwstats->gprc += rd32(wx, WX_RDM_PKT_CNT);
2768 	hwstats->gptc += rd32(wx, WX_TDM_PKT_CNT);
2769 	hwstats->gorc += rd64(wx, WX_RDM_BYTE_CNT_LSB);
2770 	hwstats->gotc += rd64(wx, WX_TDM_BYTE_CNT_LSB);
2771 	hwstats->tpr += rd64(wx, WX_RX_FRAME_CNT_GOOD_BAD_L);
2772 	hwstats->tpt += rd64(wx, WX_TX_FRAME_CNT_GOOD_BAD_L);
2773 	hwstats->crcerrs += rd64(wx, WX_RX_CRC_ERROR_FRAMES_L);
2774 	hwstats->rlec += rd64(wx, WX_RX_LEN_ERROR_FRAMES_L);
2775 	hwstats->bprc += rd64(wx, WX_RX_BC_FRAMES_GOOD_L);
2776 	hwstats->bptc += rd64(wx, WX_TX_BC_FRAMES_GOOD_L);
2777 	hwstats->mprc += rd64(wx, WX_RX_MC_FRAMES_GOOD_L);
2778 	hwstats->mptc += rd64(wx, WX_TX_MC_FRAMES_GOOD_L);
2779 	hwstats->roc += rd32(wx, WX_RX_OVERSIZE_FRAMES_GOOD);
2780 	hwstats->ruc += rd32(wx, WX_RX_UNDERSIZE_FRAMES_GOOD);
2781 	hwstats->lxonoffrxc += rd32(wx, WX_MAC_LXONOFFRXC);
2782 	hwstats->lxontxc += rd32(wx, WX_RDB_LXONTXC);
2783 	hwstats->lxofftxc += rd32(wx, WX_RDB_LXOFFTXC);
2784 	hwstats->o2bgptc += rd32(wx, WX_TDM_OS2BMC_CNT);
2785 	hwstats->b2ospc += rd32(wx, WX_MNG_BMC2OS_CNT);
2786 	hwstats->o2bspc += rd32(wx, WX_MNG_OS2BMC_CNT);
2787 	hwstats->b2ogprc += rd32(wx, WX_RDM_BMC2OS_CNT);
2788 	hwstats->rdmdrop += rd32(wx, WX_RDM_DRP_PKT);
2789 
2790 	if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) {
2791 		hwstats->fdirmatch += rd32(wx, WX_RDB_FDIR_MATCH);
2792 		hwstats->fdirmiss += rd32(wx, WX_RDB_FDIR_MISS);
2793 	}
2794 
2795 	for (i = 0; i < wx->mac.max_rx_queues; i++)
2796 		hwstats->qmprc += rd32(wx, WX_PX_MPRC(i));
2797 }
2798 EXPORT_SYMBOL(wx_update_stats);
2799 
2800 /**
2801  *  wx_clear_hw_cntrs - Generic clear hardware counters
2802  *  @wx: board private structure
2803  *
2804  *  Clears all hardware statistics counters by reading them from the hardware
2805  *  Statistics counters are clear on read.
2806  **/
2807 void wx_clear_hw_cntrs(struct wx *wx)
2808 {
2809 	u16 i = 0;
2810 
2811 	for (i = 0; i < wx->mac.max_rx_queues; i++)
2812 		wr32(wx, WX_PX_MPRC(i), 0);
2813 
2814 	rd32(wx, WX_RDM_PKT_CNT);
2815 	rd32(wx, WX_TDM_PKT_CNT);
2816 	rd64(wx, WX_RDM_BYTE_CNT_LSB);
2817 	rd32(wx, WX_TDM_BYTE_CNT_LSB);
2818 	rd32(wx, WX_RDM_DRP_PKT);
2819 	rd32(wx, WX_RX_UNDERSIZE_FRAMES_GOOD);
2820 	rd32(wx, WX_RX_OVERSIZE_FRAMES_GOOD);
2821 	rd64(wx, WX_RX_FRAME_CNT_GOOD_BAD_L);
2822 	rd64(wx, WX_TX_FRAME_CNT_GOOD_BAD_L);
2823 	rd64(wx, WX_RX_MC_FRAMES_GOOD_L);
2824 	rd64(wx, WX_TX_MC_FRAMES_GOOD_L);
2825 	rd64(wx, WX_RX_BC_FRAMES_GOOD_L);
2826 	rd64(wx, WX_TX_BC_FRAMES_GOOD_L);
2827 	rd64(wx, WX_RX_CRC_ERROR_FRAMES_L);
2828 	rd64(wx, WX_RX_LEN_ERROR_FRAMES_L);
2829 	rd32(wx, WX_RDB_LXONTXC);
2830 	rd32(wx, WX_RDB_LXOFFTXC);
2831 	rd32(wx, WX_MAC_LXONOFFRXC);
2832 }
2833 EXPORT_SYMBOL(wx_clear_hw_cntrs);
2834 
2835 /**
2836  *  wx_start_hw - Prepare hardware for Tx/Rx
2837  *  @wx: pointer to hardware structure
2838  *
2839  *  Starts the hardware using the generic start_hw function
2840  *  and the generation start_hw function.
2841  *  Then performs revision-specific operations, if any.
2842  **/
2843 void wx_start_hw(struct wx *wx)
2844 {
2845 	int i;
2846 
2847 	/* Clear the VLAN filter table */
2848 	wx_clear_vfta(wx);
2849 	WX_WRITE_FLUSH(wx);
2850 	/* Clear the rate limiters */
2851 	for (i = 0; i < wx->mac.max_tx_queues; i++) {
2852 		wr32(wx, WX_TDM_RP_IDX, i);
2853 		wr32(wx, WX_TDM_RP_RATE, 0);
2854 	}
2855 }
2856 EXPORT_SYMBOL(wx_start_hw);
2857 
2858 MODULE_LICENSE("GPL");
2859