xref: /linux/drivers/net/ethernet/neterion/s2io.c (revision b8e85e6f3a09fc56b0ff574887798962ef8a8f80)
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2010 Exar Corp.
4  *
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik		: For pointing out the improper error condition
15  *			  check in the s2io_xmit routine and also some
16  *			  issues in the Tx watch dog function. Also for
17  *			  patiently answering all those innumerable
18  *			  questions regaring the 2.6 porting issues.
19  * Stephen Hemminger	: Providing proper 2.6 porting mechanism for some
20  *			  macros available only in 2.6 Kernel.
21  * Francois Romieu	: For pointing out all code part that were
22  *			  deprecated and also styling related comments.
23  * Grant Grundler	: For helping me get rid of some Architecture
24  *			  dependent code.
25  * Christopher Hellwig	: Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explanation of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *		values are 1, 2.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     2(MSI_X). Default value is '2(MSI_X)'
41  * lro_max_pkts: This parameter defines maximum number of packets can be
42  *     aggregated as a single large packet
43  * napi: This parameter used to enable/disable NAPI (polling Rx)
44  *     Possible values '1' for enable and '0' for disable. Default is '1'
45  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
46  *                 Possible values '1' for enable , '0' for disable.
47  *                 Default is '2' - which means disable in promisc mode
48  *                 and enable in non-promiscuous mode.
49  * multiq: This parameter used to enable/disable MULTIQUEUE support.
50  *      Possible values '1' for enable and '0' for disable. Default is '0'
51  ************************************************************************/
52 
53 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54 
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/mdio.h>
65 #include <linux/skbuff.h>
66 #include <linux/init.h>
67 #include <linux/delay.h>
68 #include <linux/stddef.h>
69 #include <linux/ioctl.h>
70 #include <linux/timex.h>
71 #include <linux/ethtool.h>
72 #include <linux/workqueue.h>
73 #include <linux/if_vlan.h>
74 #include <linux/ip.h>
75 #include <linux/tcp.h>
76 #include <linux/uaccess.h>
77 #include <linux/io.h>
78 #include <linux/io-64-nonatomic-lo-hi.h>
79 #include <linux/slab.h>
80 #include <linux/prefetch.h>
81 #include <net/tcp.h>
82 #include <net/checksum.h>
83 
84 #include <asm/div64.h>
85 #include <asm/irq.h>
86 
87 /* local include */
88 #include "s2io.h"
89 #include "s2io-regs.h"
90 
91 #define DRV_VERSION "2.0.26.28"
92 
93 /* S2io Driver name & version. */
94 static const char s2io_driver_name[] = "Neterion";
95 static const char s2io_driver_version[] = DRV_VERSION;
96 
97 static const int rxd_size[2] = {32, 48};
98 static const int rxd_count[2] = {127, 85};
99 
100 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
101 {
102 	int ret;
103 
104 	ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
105 	       (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
106 
107 	return ret;
108 }
109 
110 /*
111  * Cards with following subsystem_id have a link state indication
112  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
113  * macro below identifies these cards given the subsystem_id.
114  */
115 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid)		\
116 	(dev_type == XFRAME_I_DEVICE) ?					\
117 	((((subid >= 0x600B) && (subid <= 0x600D)) ||			\
118 	  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
119 
120 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
121 				      ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
122 
123 static inline int is_s2io_card_up(const struct s2io_nic *sp)
124 {
125 	return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
126 }
127 
128 /* Ethtool related variables and Macros. */
129 static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
130 	"Register test\t(offline)",
131 	"Eeprom test\t(offline)",
132 	"Link test\t(online)",
133 	"RLDRAM test\t(offline)",
134 	"BIST Test\t(offline)"
135 };
136 
137 static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
138 	{"tmac_frms"},
139 	{"tmac_data_octets"},
140 	{"tmac_drop_frms"},
141 	{"tmac_mcst_frms"},
142 	{"tmac_bcst_frms"},
143 	{"tmac_pause_ctrl_frms"},
144 	{"tmac_ttl_octets"},
145 	{"tmac_ucst_frms"},
146 	{"tmac_nucst_frms"},
147 	{"tmac_any_err_frms"},
148 	{"tmac_ttl_less_fb_octets"},
149 	{"tmac_vld_ip_octets"},
150 	{"tmac_vld_ip"},
151 	{"tmac_drop_ip"},
152 	{"tmac_icmp"},
153 	{"tmac_rst_tcp"},
154 	{"tmac_tcp"},
155 	{"tmac_udp"},
156 	{"rmac_vld_frms"},
157 	{"rmac_data_octets"},
158 	{"rmac_fcs_err_frms"},
159 	{"rmac_drop_frms"},
160 	{"rmac_vld_mcst_frms"},
161 	{"rmac_vld_bcst_frms"},
162 	{"rmac_in_rng_len_err_frms"},
163 	{"rmac_out_rng_len_err_frms"},
164 	{"rmac_long_frms"},
165 	{"rmac_pause_ctrl_frms"},
166 	{"rmac_unsup_ctrl_frms"},
167 	{"rmac_ttl_octets"},
168 	{"rmac_accepted_ucst_frms"},
169 	{"rmac_accepted_nucst_frms"},
170 	{"rmac_discarded_frms"},
171 	{"rmac_drop_events"},
172 	{"rmac_ttl_less_fb_octets"},
173 	{"rmac_ttl_frms"},
174 	{"rmac_usized_frms"},
175 	{"rmac_osized_frms"},
176 	{"rmac_frag_frms"},
177 	{"rmac_jabber_frms"},
178 	{"rmac_ttl_64_frms"},
179 	{"rmac_ttl_65_127_frms"},
180 	{"rmac_ttl_128_255_frms"},
181 	{"rmac_ttl_256_511_frms"},
182 	{"rmac_ttl_512_1023_frms"},
183 	{"rmac_ttl_1024_1518_frms"},
184 	{"rmac_ip"},
185 	{"rmac_ip_octets"},
186 	{"rmac_hdr_err_ip"},
187 	{"rmac_drop_ip"},
188 	{"rmac_icmp"},
189 	{"rmac_tcp"},
190 	{"rmac_udp"},
191 	{"rmac_err_drp_udp"},
192 	{"rmac_xgmii_err_sym"},
193 	{"rmac_frms_q0"},
194 	{"rmac_frms_q1"},
195 	{"rmac_frms_q2"},
196 	{"rmac_frms_q3"},
197 	{"rmac_frms_q4"},
198 	{"rmac_frms_q5"},
199 	{"rmac_frms_q6"},
200 	{"rmac_frms_q7"},
201 	{"rmac_full_q0"},
202 	{"rmac_full_q1"},
203 	{"rmac_full_q2"},
204 	{"rmac_full_q3"},
205 	{"rmac_full_q4"},
206 	{"rmac_full_q5"},
207 	{"rmac_full_q6"},
208 	{"rmac_full_q7"},
209 	{"rmac_pause_cnt"},
210 	{"rmac_xgmii_data_err_cnt"},
211 	{"rmac_xgmii_ctrl_err_cnt"},
212 	{"rmac_accepted_ip"},
213 	{"rmac_err_tcp"},
214 	{"rd_req_cnt"},
215 	{"new_rd_req_cnt"},
216 	{"new_rd_req_rtry_cnt"},
217 	{"rd_rtry_cnt"},
218 	{"wr_rtry_rd_ack_cnt"},
219 	{"wr_req_cnt"},
220 	{"new_wr_req_cnt"},
221 	{"new_wr_req_rtry_cnt"},
222 	{"wr_rtry_cnt"},
223 	{"wr_disc_cnt"},
224 	{"rd_rtry_wr_ack_cnt"},
225 	{"txp_wr_cnt"},
226 	{"txd_rd_cnt"},
227 	{"txd_wr_cnt"},
228 	{"rxd_rd_cnt"},
229 	{"rxd_wr_cnt"},
230 	{"txf_rd_cnt"},
231 	{"rxf_wr_cnt"}
232 };
233 
234 static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
235 	{"rmac_ttl_1519_4095_frms"},
236 	{"rmac_ttl_4096_8191_frms"},
237 	{"rmac_ttl_8192_max_frms"},
238 	{"rmac_ttl_gt_max_frms"},
239 	{"rmac_osized_alt_frms"},
240 	{"rmac_jabber_alt_frms"},
241 	{"rmac_gt_max_alt_frms"},
242 	{"rmac_vlan_frms"},
243 	{"rmac_len_discard"},
244 	{"rmac_fcs_discard"},
245 	{"rmac_pf_discard"},
246 	{"rmac_da_discard"},
247 	{"rmac_red_discard"},
248 	{"rmac_rts_discard"},
249 	{"rmac_ingm_full_discard"},
250 	{"link_fault_cnt"}
251 };
252 
253 static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
254 	{"\n DRIVER STATISTICS"},
255 	{"single_bit_ecc_errs"},
256 	{"double_bit_ecc_errs"},
257 	{"parity_err_cnt"},
258 	{"serious_err_cnt"},
259 	{"soft_reset_cnt"},
260 	{"fifo_full_cnt"},
261 	{"ring_0_full_cnt"},
262 	{"ring_1_full_cnt"},
263 	{"ring_2_full_cnt"},
264 	{"ring_3_full_cnt"},
265 	{"ring_4_full_cnt"},
266 	{"ring_5_full_cnt"},
267 	{"ring_6_full_cnt"},
268 	{"ring_7_full_cnt"},
269 	{"alarm_transceiver_temp_high"},
270 	{"alarm_transceiver_temp_low"},
271 	{"alarm_laser_bias_current_high"},
272 	{"alarm_laser_bias_current_low"},
273 	{"alarm_laser_output_power_high"},
274 	{"alarm_laser_output_power_low"},
275 	{"warn_transceiver_temp_high"},
276 	{"warn_transceiver_temp_low"},
277 	{"warn_laser_bias_current_high"},
278 	{"warn_laser_bias_current_low"},
279 	{"warn_laser_output_power_high"},
280 	{"warn_laser_output_power_low"},
281 	{"lro_aggregated_pkts"},
282 	{"lro_flush_both_count"},
283 	{"lro_out_of_sequence_pkts"},
284 	{"lro_flush_due_to_max_pkts"},
285 	{"lro_avg_aggr_pkts"},
286 	{"mem_alloc_fail_cnt"},
287 	{"pci_map_fail_cnt"},
288 	{"watchdog_timer_cnt"},
289 	{"mem_allocated"},
290 	{"mem_freed"},
291 	{"link_up_cnt"},
292 	{"link_down_cnt"},
293 	{"link_up_time"},
294 	{"link_down_time"},
295 	{"tx_tcode_buf_abort_cnt"},
296 	{"tx_tcode_desc_abort_cnt"},
297 	{"tx_tcode_parity_err_cnt"},
298 	{"tx_tcode_link_loss_cnt"},
299 	{"tx_tcode_list_proc_err_cnt"},
300 	{"rx_tcode_parity_err_cnt"},
301 	{"rx_tcode_abort_cnt"},
302 	{"rx_tcode_parity_abort_cnt"},
303 	{"rx_tcode_rda_fail_cnt"},
304 	{"rx_tcode_unkn_prot_cnt"},
305 	{"rx_tcode_fcs_err_cnt"},
306 	{"rx_tcode_buf_size_err_cnt"},
307 	{"rx_tcode_rxd_corrupt_cnt"},
308 	{"rx_tcode_unkn_err_cnt"},
309 	{"tda_err_cnt"},
310 	{"pfc_err_cnt"},
311 	{"pcc_err_cnt"},
312 	{"tti_err_cnt"},
313 	{"tpa_err_cnt"},
314 	{"sm_err_cnt"},
315 	{"lso_err_cnt"},
316 	{"mac_tmac_err_cnt"},
317 	{"mac_rmac_err_cnt"},
318 	{"xgxs_txgxs_err_cnt"},
319 	{"xgxs_rxgxs_err_cnt"},
320 	{"rc_err_cnt"},
321 	{"prc_pcix_err_cnt"},
322 	{"rpa_err_cnt"},
323 	{"rda_err_cnt"},
324 	{"rti_err_cnt"},
325 	{"mc_err_cnt"}
326 };
327 
328 #define S2IO_XENA_STAT_LEN	ARRAY_SIZE(ethtool_xena_stats_keys)
329 #define S2IO_ENHANCED_STAT_LEN	ARRAY_SIZE(ethtool_enhanced_stats_keys)
330 #define S2IO_DRIVER_STAT_LEN	ARRAY_SIZE(ethtool_driver_stats_keys)
331 
332 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
333 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
334 
335 #define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
336 #define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
337 
338 #define S2IO_TEST_LEN	ARRAY_SIZE(s2io_gstrings)
339 #define S2IO_STRINGS_LEN	(S2IO_TEST_LEN * ETH_GSTRING_LEN)
340 
341 /* copy mac addr to def_mac_addr array */
342 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
343 {
344 	sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
345 	sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
346 	sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
347 	sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
348 	sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
349 	sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
350 }
351 
352 /*
353  * Constants to be programmed into the Xena's registers, to configure
354  * the XAUI.
355  */
356 
357 #define	END_SIGN	0x0
358 static const u64 herc_act_dtx_cfg[] = {
359 	/* Set address */
360 	0x8000051536750000ULL, 0x80000515367500E0ULL,
361 	/* Write data */
362 	0x8000051536750004ULL, 0x80000515367500E4ULL,
363 	/* Set address */
364 	0x80010515003F0000ULL, 0x80010515003F00E0ULL,
365 	/* Write data */
366 	0x80010515003F0004ULL, 0x80010515003F00E4ULL,
367 	/* Set address */
368 	0x801205150D440000ULL, 0x801205150D4400E0ULL,
369 	/* Write data */
370 	0x801205150D440004ULL, 0x801205150D4400E4ULL,
371 	/* Set address */
372 	0x80020515F2100000ULL, 0x80020515F21000E0ULL,
373 	/* Write data */
374 	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
375 	/* Done */
376 	END_SIGN
377 };
378 
379 static const u64 xena_dtx_cfg[] = {
380 	/* Set address */
381 	0x8000051500000000ULL, 0x80000515000000E0ULL,
382 	/* Write data */
383 	0x80000515D9350004ULL, 0x80000515D93500E4ULL,
384 	/* Set address */
385 	0x8001051500000000ULL, 0x80010515000000E0ULL,
386 	/* Write data */
387 	0x80010515001E0004ULL, 0x80010515001E00E4ULL,
388 	/* Set address */
389 	0x8002051500000000ULL, 0x80020515000000E0ULL,
390 	/* Write data */
391 	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
392 	END_SIGN
393 };
394 
395 /*
396  * Constants for Fixing the MacAddress problem seen mostly on
397  * Alpha machines.
398  */
399 static const u64 fix_mac[] = {
400 	0x0060000000000000ULL, 0x0060600000000000ULL,
401 	0x0040600000000000ULL, 0x0000600000000000ULL,
402 	0x0020600000000000ULL, 0x0060600000000000ULL,
403 	0x0020600000000000ULL, 0x0060600000000000ULL,
404 	0x0020600000000000ULL, 0x0060600000000000ULL,
405 	0x0020600000000000ULL, 0x0060600000000000ULL,
406 	0x0020600000000000ULL, 0x0060600000000000ULL,
407 	0x0020600000000000ULL, 0x0060600000000000ULL,
408 	0x0020600000000000ULL, 0x0060600000000000ULL,
409 	0x0020600000000000ULL, 0x0060600000000000ULL,
410 	0x0020600000000000ULL, 0x0060600000000000ULL,
411 	0x0020600000000000ULL, 0x0060600000000000ULL,
412 	0x0020600000000000ULL, 0x0000600000000000ULL,
413 	0x0040600000000000ULL, 0x0060600000000000ULL,
414 	END_SIGN
415 };
416 
417 MODULE_DESCRIPTION("Neterion 10GbE driver");
418 MODULE_LICENSE("GPL");
419 MODULE_VERSION(DRV_VERSION);
420 
421 
422 /* Module Loadable parameters. */
423 S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
424 S2IO_PARM_INT(rx_ring_num, 1);
425 S2IO_PARM_INT(multiq, 0);
426 S2IO_PARM_INT(rx_ring_mode, 1);
427 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
428 S2IO_PARM_INT(rmac_pause_time, 0x100);
429 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
430 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
431 S2IO_PARM_INT(shared_splits, 0);
432 S2IO_PARM_INT(tmac_util_period, 5);
433 S2IO_PARM_INT(rmac_util_period, 5);
434 S2IO_PARM_INT(l3l4hdr_size, 128);
435 /* 0 is no steering, 1 is Priority steering, 2 is Default steering */
436 S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
437 /* Frequency of Rx desc syncs expressed as power of 2 */
438 S2IO_PARM_INT(rxsync_frequency, 3);
439 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
440 S2IO_PARM_INT(intr_type, 2);
441 /* Large receive offload feature */
442 
443 /* Max pkts to be aggregated by LRO at one time. If not specified,
444  * aggregation happens until we hit max IP pkt size(64K)
445  */
446 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
447 S2IO_PARM_INT(indicate_max_pkts, 0);
448 
449 S2IO_PARM_INT(napi, 1);
450 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
451 
452 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
453 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
454 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
455 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
456 static unsigned int rts_frm_len[MAX_RX_RINGS] =
457 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
458 
459 module_param_array(tx_fifo_len, uint, NULL, 0);
460 module_param_array(rx_ring_sz, uint, NULL, 0);
461 module_param_array(rts_frm_len, uint, NULL, 0);
462 
463 /*
464  * S2IO device table.
465  * This table lists all the devices that this driver supports.
466  */
467 static const struct pci_device_id s2io_tbl[] = {
468 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
469 	 PCI_ANY_ID, PCI_ANY_ID},
470 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
471 	 PCI_ANY_ID, PCI_ANY_ID},
472 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
473 	 PCI_ANY_ID, PCI_ANY_ID},
474 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
475 	 PCI_ANY_ID, PCI_ANY_ID},
476 	{0,}
477 };
478 
479 MODULE_DEVICE_TABLE(pci, s2io_tbl);
480 
481 static const struct pci_error_handlers s2io_err_handler = {
482 	.error_detected = s2io_io_error_detected,
483 	.slot_reset = s2io_io_slot_reset,
484 	.resume = s2io_io_resume,
485 };
486 
487 static struct pci_driver s2io_driver = {
488 	.name = "S2IO",
489 	.id_table = s2io_tbl,
490 	.probe = s2io_init_nic,
491 	.remove = s2io_rem_nic,
492 	.err_handler = &s2io_err_handler,
493 };
494 
495 /* A simplifier macro used both by init and free shared_mem Fns(). */
496 #define TXD_MEM_PAGE_CNT(len, per_each) DIV_ROUND_UP(len, per_each)
497 
498 /* netqueue manipulation helper functions */
499 static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
500 {
501 	if (!sp->config.multiq) {
502 		int i;
503 
504 		for (i = 0; i < sp->config.tx_fifo_num; i++)
505 			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
506 	}
507 	netif_tx_stop_all_queues(sp->dev);
508 }
509 
510 static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
511 {
512 	if (!sp->config.multiq)
513 		sp->mac_control.fifos[fifo_no].queue_state =
514 			FIFO_QUEUE_STOP;
515 
516 	netif_tx_stop_all_queues(sp->dev);
517 }
518 
519 static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
520 {
521 	if (!sp->config.multiq) {
522 		int i;
523 
524 		for (i = 0; i < sp->config.tx_fifo_num; i++)
525 			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
526 	}
527 	netif_tx_start_all_queues(sp->dev);
528 }
529 
530 static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
531 {
532 	if (!sp->config.multiq) {
533 		int i;
534 
535 		for (i = 0; i < sp->config.tx_fifo_num; i++)
536 			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
537 	}
538 	netif_tx_wake_all_queues(sp->dev);
539 }
540 
541 static inline void s2io_wake_tx_queue(
542 	struct fifo_info *fifo, int cnt, u8 multiq)
543 {
544 
545 	if (multiq) {
546 		if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
547 			netif_wake_subqueue(fifo->dev, fifo->fifo_no);
548 	} else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
549 		if (netif_queue_stopped(fifo->dev)) {
550 			fifo->queue_state = FIFO_QUEUE_START;
551 			netif_wake_queue(fifo->dev);
552 		}
553 	}
554 }
555 
556 /**
557  * init_shared_mem - Allocation and Initialization of Memory
558  * @nic: Device private variable.
559  * Description: The function allocates all the memory areas shared
560  * between the NIC and the driver. This includes Tx descriptors,
561  * Rx descriptors and the statistics block.
562  */
563 
564 static int init_shared_mem(struct s2io_nic *nic)
565 {
566 	u32 size;
567 	void *tmp_v_addr, *tmp_v_addr_next;
568 	dma_addr_t tmp_p_addr, tmp_p_addr_next;
569 	struct RxD_block *pre_rxd_blk = NULL;
570 	int i, j, blk_cnt;
571 	int lst_size, lst_per_page;
572 	struct net_device *dev = nic->dev;
573 	unsigned long tmp;
574 	struct buffAdd *ba;
575 	struct config_param *config = &nic->config;
576 	struct mac_info *mac_control = &nic->mac_control;
577 	unsigned long long mem_allocated = 0;
578 
579 	/* Allocation and initialization of TXDLs in FIFOs */
580 	size = 0;
581 	for (i = 0; i < config->tx_fifo_num; i++) {
582 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
583 
584 		size += tx_cfg->fifo_len;
585 	}
586 	if (size > MAX_AVAILABLE_TXDS) {
587 		DBG_PRINT(ERR_DBG,
588 			  "Too many TxDs requested: %d, max supported: %d\n",
589 			  size, MAX_AVAILABLE_TXDS);
590 		return -EINVAL;
591 	}
592 
593 	size = 0;
594 	for (i = 0; i < config->tx_fifo_num; i++) {
595 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
596 
597 		size = tx_cfg->fifo_len;
598 		/*
599 		 * Legal values are from 2 to 8192
600 		 */
601 		if (size < 2) {
602 			DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
603 				  "Valid lengths are 2 through 8192\n",
604 				  i, size);
605 			return -EINVAL;
606 		}
607 	}
608 
609 	lst_size = (sizeof(struct TxD) * config->max_txds);
610 	lst_per_page = PAGE_SIZE / lst_size;
611 
612 	for (i = 0; i < config->tx_fifo_num; i++) {
613 		struct fifo_info *fifo = &mac_control->fifos[i];
614 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
615 		int fifo_len = tx_cfg->fifo_len;
616 		int list_holder_size = fifo_len * sizeof(struct list_info_hold);
617 
618 		fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
619 		if (!fifo->list_info) {
620 			DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
621 			return -ENOMEM;
622 		}
623 		mem_allocated += list_holder_size;
624 	}
625 	for (i = 0; i < config->tx_fifo_num; i++) {
626 		int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
627 						lst_per_page);
628 		struct fifo_info *fifo = &mac_control->fifos[i];
629 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
630 
631 		fifo->tx_curr_put_info.offset = 0;
632 		fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
633 		fifo->tx_curr_get_info.offset = 0;
634 		fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
635 		fifo->fifo_no = i;
636 		fifo->nic = nic;
637 		fifo->max_txds = MAX_SKB_FRAGS + 2;
638 		fifo->dev = dev;
639 
640 		for (j = 0; j < page_num; j++) {
641 			int k = 0;
642 			dma_addr_t tmp_p;
643 			void *tmp_v;
644 			tmp_v = dma_alloc_coherent(&nic->pdev->dev, PAGE_SIZE,
645 						   &tmp_p, GFP_KERNEL);
646 			if (!tmp_v) {
647 				DBG_PRINT(INFO_DBG,
648 					  "dma_alloc_coherent failed for TxDL\n");
649 				return -ENOMEM;
650 			}
651 			/* If we got a zero DMA address(can happen on
652 			 * certain platforms like PPC), reallocate.
653 			 * Store virtual address of page we don't want,
654 			 * to be freed later.
655 			 */
656 			if (!tmp_p) {
657 				mac_control->zerodma_virt_addr = tmp_v;
658 				DBG_PRINT(INIT_DBG,
659 					  "%s: Zero DMA address for TxDL. "
660 					  "Virtual address %p\n",
661 					  dev->name, tmp_v);
662 				tmp_v = dma_alloc_coherent(&nic->pdev->dev,
663 							   PAGE_SIZE, &tmp_p,
664 							   GFP_KERNEL);
665 				if (!tmp_v) {
666 					DBG_PRINT(INFO_DBG,
667 						  "dma_alloc_coherent failed for TxDL\n");
668 					return -ENOMEM;
669 				}
670 				mem_allocated += PAGE_SIZE;
671 			}
672 			while (k < lst_per_page) {
673 				int l = (j * lst_per_page) + k;
674 				if (l == tx_cfg->fifo_len)
675 					break;
676 				fifo->list_info[l].list_virt_addr =
677 					tmp_v + (k * lst_size);
678 				fifo->list_info[l].list_phy_addr =
679 					tmp_p + (k * lst_size);
680 				k++;
681 			}
682 		}
683 	}
684 
685 	for (i = 0; i < config->tx_fifo_num; i++) {
686 		struct fifo_info *fifo = &mac_control->fifos[i];
687 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
688 
689 		size = tx_cfg->fifo_len;
690 		fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
691 		if (!fifo->ufo_in_band_v)
692 			return -ENOMEM;
693 		mem_allocated += (size * sizeof(u64));
694 	}
695 
696 	/* Allocation and initialization of RXDs in Rings */
697 	size = 0;
698 	for (i = 0; i < config->rx_ring_num; i++) {
699 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
700 		struct ring_info *ring = &mac_control->rings[i];
701 
702 		if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
703 			DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
704 				  "multiple of RxDs per Block\n",
705 				  dev->name, i);
706 			return FAILURE;
707 		}
708 		size += rx_cfg->num_rxd;
709 		ring->block_count = rx_cfg->num_rxd /
710 			(rxd_count[nic->rxd_mode] + 1);
711 		ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
712 	}
713 	if (nic->rxd_mode == RXD_MODE_1)
714 		size = (size * (sizeof(struct RxD1)));
715 	else
716 		size = (size * (sizeof(struct RxD3)));
717 
718 	for (i = 0; i < config->rx_ring_num; i++) {
719 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
720 		struct ring_info *ring = &mac_control->rings[i];
721 
722 		ring->rx_curr_get_info.block_index = 0;
723 		ring->rx_curr_get_info.offset = 0;
724 		ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
725 		ring->rx_curr_put_info.block_index = 0;
726 		ring->rx_curr_put_info.offset = 0;
727 		ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
728 		ring->nic = nic;
729 		ring->ring_no = i;
730 
731 		blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
732 		/*  Allocating all the Rx blocks */
733 		for (j = 0; j < blk_cnt; j++) {
734 			struct rx_block_info *rx_blocks;
735 			int l;
736 
737 			rx_blocks = &ring->rx_blocks[j];
738 			size = SIZE_OF_BLOCK;	/* size is always page size */
739 			tmp_v_addr = dma_alloc_coherent(&nic->pdev->dev, size,
740 							&tmp_p_addr, GFP_KERNEL);
741 			if (tmp_v_addr == NULL) {
742 				/*
743 				 * In case of failure, free_shared_mem()
744 				 * is called, which should free any
745 				 * memory that was alloced till the
746 				 * failure happened.
747 				 */
748 				rx_blocks->block_virt_addr = tmp_v_addr;
749 				return -ENOMEM;
750 			}
751 			mem_allocated += size;
752 
753 			size = sizeof(struct rxd_info) *
754 				rxd_count[nic->rxd_mode];
755 			rx_blocks->block_virt_addr = tmp_v_addr;
756 			rx_blocks->block_dma_addr = tmp_p_addr;
757 			rx_blocks->rxds = kmalloc(size,  GFP_KERNEL);
758 			if (!rx_blocks->rxds)
759 				return -ENOMEM;
760 			mem_allocated += size;
761 			for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
762 				rx_blocks->rxds[l].virt_addr =
763 					rx_blocks->block_virt_addr +
764 					(rxd_size[nic->rxd_mode] * l);
765 				rx_blocks->rxds[l].dma_addr =
766 					rx_blocks->block_dma_addr +
767 					(rxd_size[nic->rxd_mode] * l);
768 			}
769 		}
770 		/* Interlinking all Rx Blocks */
771 		for (j = 0; j < blk_cnt; j++) {
772 			int next = (j + 1) % blk_cnt;
773 			tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
774 			tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
775 			tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
776 			tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
777 
778 			pre_rxd_blk = tmp_v_addr;
779 			pre_rxd_blk->reserved_2_pNext_RxD_block =
780 				(unsigned long)tmp_v_addr_next;
781 			pre_rxd_blk->pNext_RxD_Blk_physical =
782 				(u64)tmp_p_addr_next;
783 		}
784 	}
785 	if (nic->rxd_mode == RXD_MODE_3B) {
786 		/*
787 		 * Allocation of Storages for buffer addresses in 2BUFF mode
788 		 * and the buffers as well.
789 		 */
790 		for (i = 0; i < config->rx_ring_num; i++) {
791 			struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
792 			struct ring_info *ring = &mac_control->rings[i];
793 
794 			blk_cnt = rx_cfg->num_rxd /
795 				(rxd_count[nic->rxd_mode] + 1);
796 			size = sizeof(struct buffAdd *) * blk_cnt;
797 			ring->ba = kmalloc(size, GFP_KERNEL);
798 			if (!ring->ba)
799 				return -ENOMEM;
800 			mem_allocated += size;
801 			for (j = 0; j < blk_cnt; j++) {
802 				int k = 0;
803 
804 				size = sizeof(struct buffAdd) *
805 					(rxd_count[nic->rxd_mode] + 1);
806 				ring->ba[j] = kmalloc(size, GFP_KERNEL);
807 				if (!ring->ba[j])
808 					return -ENOMEM;
809 				mem_allocated += size;
810 				while (k != rxd_count[nic->rxd_mode]) {
811 					ba = &ring->ba[j][k];
812 					size = BUF0_LEN + ALIGN_SIZE;
813 					ba->ba_0_org = kmalloc(size, GFP_KERNEL);
814 					if (!ba->ba_0_org)
815 						return -ENOMEM;
816 					mem_allocated += size;
817 					tmp = (unsigned long)ba->ba_0_org;
818 					tmp += ALIGN_SIZE;
819 					tmp &= ~((unsigned long)ALIGN_SIZE);
820 					ba->ba_0 = (void *)tmp;
821 
822 					size = BUF1_LEN + ALIGN_SIZE;
823 					ba->ba_1_org = kmalloc(size, GFP_KERNEL);
824 					if (!ba->ba_1_org)
825 						return -ENOMEM;
826 					mem_allocated += size;
827 					tmp = (unsigned long)ba->ba_1_org;
828 					tmp += ALIGN_SIZE;
829 					tmp &= ~((unsigned long)ALIGN_SIZE);
830 					ba->ba_1 = (void *)tmp;
831 					k++;
832 				}
833 			}
834 		}
835 	}
836 
837 	/* Allocation and initialization of Statistics block */
838 	size = sizeof(struct stat_block);
839 	mac_control->stats_mem =
840 		dma_alloc_coherent(&nic->pdev->dev, size,
841 				   &mac_control->stats_mem_phy, GFP_KERNEL);
842 
843 	if (!mac_control->stats_mem) {
844 		/*
845 		 * In case of failure, free_shared_mem() is called, which
846 		 * should free any memory that was alloced till the
847 		 * failure happened.
848 		 */
849 		return -ENOMEM;
850 	}
851 	mem_allocated += size;
852 	mac_control->stats_mem_sz = size;
853 
854 	tmp_v_addr = mac_control->stats_mem;
855 	mac_control->stats_info = tmp_v_addr;
856 	memset(tmp_v_addr, 0, size);
857 	DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
858 		dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
859 	mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
860 	return SUCCESS;
861 }
862 
863 /**
864  * free_shared_mem - Free the allocated Memory
865  * @nic:  Device private variable.
866  * Description: This function is to free all memory locations allocated by
867  * the init_shared_mem() function and return it to the kernel.
868  */
869 
870 static void free_shared_mem(struct s2io_nic *nic)
871 {
872 	int i, j, blk_cnt, size;
873 	void *tmp_v_addr;
874 	dma_addr_t tmp_p_addr;
875 	int lst_size, lst_per_page;
876 	struct net_device *dev;
877 	int page_num = 0;
878 	struct config_param *config;
879 	struct mac_info *mac_control;
880 	struct stat_block *stats;
881 	struct swStat *swstats;
882 
883 	if (!nic)
884 		return;
885 
886 	dev = nic->dev;
887 
888 	config = &nic->config;
889 	mac_control = &nic->mac_control;
890 	stats = mac_control->stats_info;
891 	swstats = &stats->sw_stat;
892 
893 	lst_size = sizeof(struct TxD) * config->max_txds;
894 	lst_per_page = PAGE_SIZE / lst_size;
895 
896 	for (i = 0; i < config->tx_fifo_num; i++) {
897 		struct fifo_info *fifo = &mac_control->fifos[i];
898 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
899 
900 		page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
901 		for (j = 0; j < page_num; j++) {
902 			int mem_blks = (j * lst_per_page);
903 			struct list_info_hold *fli;
904 
905 			if (!fifo->list_info)
906 				return;
907 
908 			fli = &fifo->list_info[mem_blks];
909 			if (!fli->list_virt_addr)
910 				break;
911 			dma_free_coherent(&nic->pdev->dev, PAGE_SIZE,
912 					  fli->list_virt_addr,
913 					  fli->list_phy_addr);
914 			swstats->mem_freed += PAGE_SIZE;
915 		}
916 		/* If we got a zero DMA address during allocation,
917 		 * free the page now
918 		 */
919 		if (mac_control->zerodma_virt_addr) {
920 			dma_free_coherent(&nic->pdev->dev, PAGE_SIZE,
921 					  mac_control->zerodma_virt_addr,
922 					  (dma_addr_t)0);
923 			DBG_PRINT(INIT_DBG,
924 				  "%s: Freeing TxDL with zero DMA address. "
925 				  "Virtual address %p\n",
926 				  dev->name, mac_control->zerodma_virt_addr);
927 			swstats->mem_freed += PAGE_SIZE;
928 		}
929 		kfree(fifo->list_info);
930 		swstats->mem_freed += tx_cfg->fifo_len *
931 			sizeof(struct list_info_hold);
932 	}
933 
934 	size = SIZE_OF_BLOCK;
935 	for (i = 0; i < config->rx_ring_num; i++) {
936 		struct ring_info *ring = &mac_control->rings[i];
937 
938 		blk_cnt = ring->block_count;
939 		for (j = 0; j < blk_cnt; j++) {
940 			tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
941 			tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
942 			if (tmp_v_addr == NULL)
943 				break;
944 			dma_free_coherent(&nic->pdev->dev, size, tmp_v_addr,
945 					  tmp_p_addr);
946 			swstats->mem_freed += size;
947 			kfree(ring->rx_blocks[j].rxds);
948 			swstats->mem_freed += sizeof(struct rxd_info) *
949 				rxd_count[nic->rxd_mode];
950 		}
951 	}
952 
953 	if (nic->rxd_mode == RXD_MODE_3B) {
954 		/* Freeing buffer storage addresses in 2BUFF mode. */
955 		for (i = 0; i < config->rx_ring_num; i++) {
956 			struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
957 			struct ring_info *ring = &mac_control->rings[i];
958 
959 			blk_cnt = rx_cfg->num_rxd /
960 				(rxd_count[nic->rxd_mode] + 1);
961 			for (j = 0; j < blk_cnt; j++) {
962 				int k = 0;
963 				if (!ring->ba[j])
964 					continue;
965 				while (k != rxd_count[nic->rxd_mode]) {
966 					struct buffAdd *ba = &ring->ba[j][k];
967 					kfree(ba->ba_0_org);
968 					swstats->mem_freed +=
969 						BUF0_LEN + ALIGN_SIZE;
970 					kfree(ba->ba_1_org);
971 					swstats->mem_freed +=
972 						BUF1_LEN + ALIGN_SIZE;
973 					k++;
974 				}
975 				kfree(ring->ba[j]);
976 				swstats->mem_freed += sizeof(struct buffAdd) *
977 					(rxd_count[nic->rxd_mode] + 1);
978 			}
979 			kfree(ring->ba);
980 			swstats->mem_freed += sizeof(struct buffAdd *) *
981 				blk_cnt;
982 		}
983 	}
984 
985 	for (i = 0; i < nic->config.tx_fifo_num; i++) {
986 		struct fifo_info *fifo = &mac_control->fifos[i];
987 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
988 
989 		if (fifo->ufo_in_band_v) {
990 			swstats->mem_freed += tx_cfg->fifo_len *
991 				sizeof(u64);
992 			kfree(fifo->ufo_in_band_v);
993 		}
994 	}
995 
996 	if (mac_control->stats_mem) {
997 		swstats->mem_freed += mac_control->stats_mem_sz;
998 		dma_free_coherent(&nic->pdev->dev, mac_control->stats_mem_sz,
999 				  mac_control->stats_mem,
1000 				  mac_control->stats_mem_phy);
1001 	}
1002 }
1003 
1004 /*
1005  * s2io_verify_pci_mode -
1006  */
1007 
1008 static int s2io_verify_pci_mode(struct s2io_nic *nic)
1009 {
1010 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1011 	register u64 val64 = 0;
1012 	int     mode;
1013 
1014 	val64 = readq(&bar0->pci_mode);
1015 	mode = (u8)GET_PCI_MODE(val64);
1016 
1017 	if (val64 & PCI_MODE_UNKNOWN_MODE)
1018 		return -1;      /* Unknown PCI mode */
1019 	return mode;
1020 }
1021 
1022 #define NEC_VENID   0x1033
1023 #define NEC_DEVID   0x0125
1024 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1025 {
1026 	struct pci_dev *tdev = NULL;
1027 	for_each_pci_dev(tdev) {
1028 		if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1029 			if (tdev->bus == s2io_pdev->bus->parent) {
1030 				pci_dev_put(tdev);
1031 				return 1;
1032 			}
1033 		}
1034 	}
1035 	return 0;
1036 }
1037 
1038 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1039 /*
1040  * s2io_print_pci_mode -
1041  */
1042 static int s2io_print_pci_mode(struct s2io_nic *nic)
1043 {
1044 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1045 	register u64 val64 = 0;
1046 	int	mode;
1047 	struct config_param *config = &nic->config;
1048 	const char *pcimode;
1049 
1050 	val64 = readq(&bar0->pci_mode);
1051 	mode = (u8)GET_PCI_MODE(val64);
1052 
1053 	if (val64 & PCI_MODE_UNKNOWN_MODE)
1054 		return -1;	/* Unknown PCI mode */
1055 
1056 	config->bus_speed = bus_speed[mode];
1057 
1058 	if (s2io_on_nec_bridge(nic->pdev)) {
1059 		DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1060 			  nic->dev->name);
1061 		return mode;
1062 	}
1063 
1064 	switch (mode) {
1065 	case PCI_MODE_PCI_33:
1066 		pcimode = "33MHz PCI bus";
1067 		break;
1068 	case PCI_MODE_PCI_66:
1069 		pcimode = "66MHz PCI bus";
1070 		break;
1071 	case PCI_MODE_PCIX_M1_66:
1072 		pcimode = "66MHz PCIX(M1) bus";
1073 		break;
1074 	case PCI_MODE_PCIX_M1_100:
1075 		pcimode = "100MHz PCIX(M1) bus";
1076 		break;
1077 	case PCI_MODE_PCIX_M1_133:
1078 		pcimode = "133MHz PCIX(M1) bus";
1079 		break;
1080 	case PCI_MODE_PCIX_M2_66:
1081 		pcimode = "133MHz PCIX(M2) bus";
1082 		break;
1083 	case PCI_MODE_PCIX_M2_100:
1084 		pcimode = "200MHz PCIX(M2) bus";
1085 		break;
1086 	case PCI_MODE_PCIX_M2_133:
1087 		pcimode = "266MHz PCIX(M2) bus";
1088 		break;
1089 	default:
1090 		pcimode = "unsupported bus!";
1091 		mode = -1;
1092 	}
1093 
1094 	DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
1095 		  nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
1096 
1097 	return mode;
1098 }
1099 
1100 /**
1101  *  init_tti - Initialization transmit traffic interrupt scheme
1102  *  @nic: device private variable
1103  *  @link: link status (UP/DOWN) used to enable/disable continuous
1104  *  transmit interrupts
1105  *  @may_sleep: parameter indicates if sleeping when waiting for
1106  *  command complete
1107  *  Description: The function configures transmit traffic interrupts
1108  *  Return Value:  SUCCESS on success and
1109  *  '-1' on failure
1110  */
1111 
1112 static int init_tti(struct s2io_nic *nic, int link, bool may_sleep)
1113 {
1114 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1115 	register u64 val64 = 0;
1116 	int i;
1117 	struct config_param *config = &nic->config;
1118 
1119 	for (i = 0; i < config->tx_fifo_num; i++) {
1120 		/*
1121 		 * TTI Initialization. Default Tx timer gets us about
1122 		 * 250 interrupts per sec. Continuous interrupts are enabled
1123 		 * by default.
1124 		 */
1125 		if (nic->device_type == XFRAME_II_DEVICE) {
1126 			int count = (nic->config.bus_speed * 125)/2;
1127 			val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1128 		} else
1129 			val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1130 
1131 		val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1132 			TTI_DATA1_MEM_TX_URNG_B(0x10) |
1133 			TTI_DATA1_MEM_TX_URNG_C(0x30) |
1134 			TTI_DATA1_MEM_TX_TIMER_AC_EN;
1135 		if (i == 0)
1136 			if (use_continuous_tx_intrs && (link == LINK_UP))
1137 				val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1138 		writeq(val64, &bar0->tti_data1_mem);
1139 
1140 		if (nic->config.intr_type == MSI_X) {
1141 			val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1142 				TTI_DATA2_MEM_TX_UFC_B(0x100) |
1143 				TTI_DATA2_MEM_TX_UFC_C(0x200) |
1144 				TTI_DATA2_MEM_TX_UFC_D(0x300);
1145 		} else {
1146 			if ((nic->config.tx_steering_type ==
1147 			     TX_DEFAULT_STEERING) &&
1148 			    (config->tx_fifo_num > 1) &&
1149 			    (i >= nic->udp_fifo_idx) &&
1150 			    (i < (nic->udp_fifo_idx +
1151 				  nic->total_udp_fifos)))
1152 				val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1153 					TTI_DATA2_MEM_TX_UFC_B(0x80) |
1154 					TTI_DATA2_MEM_TX_UFC_C(0x100) |
1155 					TTI_DATA2_MEM_TX_UFC_D(0x120);
1156 			else
1157 				val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1158 					TTI_DATA2_MEM_TX_UFC_B(0x20) |
1159 					TTI_DATA2_MEM_TX_UFC_C(0x40) |
1160 					TTI_DATA2_MEM_TX_UFC_D(0x80);
1161 		}
1162 
1163 		writeq(val64, &bar0->tti_data2_mem);
1164 
1165 		val64 = TTI_CMD_MEM_WE |
1166 			TTI_CMD_MEM_STROBE_NEW_CMD |
1167 			TTI_CMD_MEM_OFFSET(i);
1168 		writeq(val64, &bar0->tti_command_mem);
1169 
1170 		if (wait_for_cmd_complete(&bar0->tti_command_mem,
1171 					  TTI_CMD_MEM_STROBE_NEW_CMD,
1172 					  S2IO_BIT_RESET, may_sleep) != SUCCESS)
1173 			return FAILURE;
1174 	}
1175 
1176 	return SUCCESS;
1177 }
1178 
1179 /**
1180  *  init_nic - Initialization of hardware
1181  *  @nic: device private variable
1182  *  Description: The function sequentially configures every block
1183  *  of the H/W from their reset values.
1184  *  Return Value:  SUCCESS on success and
1185  *  '-1' on failure (endian settings incorrect).
1186  */
1187 
1188 static int init_nic(struct s2io_nic *nic)
1189 {
1190 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1191 	struct net_device *dev = nic->dev;
1192 	register u64 val64 = 0;
1193 	void __iomem *add;
1194 	u32 time;
1195 	int i, j;
1196 	int dtx_cnt = 0;
1197 	unsigned long long mem_share;
1198 	int mem_size;
1199 	struct config_param *config = &nic->config;
1200 	struct mac_info *mac_control = &nic->mac_control;
1201 
1202 	/* to set the swapper controle on the card */
1203 	if (s2io_set_swapper(nic)) {
1204 		DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
1205 		return -EIO;
1206 	}
1207 
1208 	/*
1209 	 * Herc requires EOI to be removed from reset before XGXS, so..
1210 	 */
1211 	if (nic->device_type & XFRAME_II_DEVICE) {
1212 		val64 = 0xA500000000ULL;
1213 		writeq(val64, &bar0->sw_reset);
1214 		msleep(500);
1215 		val64 = readq(&bar0->sw_reset);
1216 	}
1217 
1218 	/* Remove XGXS from reset state */
1219 	val64 = 0;
1220 	writeq(val64, &bar0->sw_reset);
1221 	msleep(500);
1222 	val64 = readq(&bar0->sw_reset);
1223 
1224 	/* Ensure that it's safe to access registers by checking
1225 	 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1226 	 */
1227 	if (nic->device_type == XFRAME_II_DEVICE) {
1228 		for (i = 0; i < 50; i++) {
1229 			val64 = readq(&bar0->adapter_status);
1230 			if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1231 				break;
1232 			msleep(10);
1233 		}
1234 		if (i == 50)
1235 			return -ENODEV;
1236 	}
1237 
1238 	/*  Enable Receiving broadcasts */
1239 	add = &bar0->mac_cfg;
1240 	val64 = readq(&bar0->mac_cfg);
1241 	val64 |= MAC_RMAC_BCAST_ENABLE;
1242 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1243 	writel((u32)val64, add);
1244 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1245 	writel((u32) (val64 >> 32), (add + 4));
1246 
1247 	/* Read registers in all blocks */
1248 	val64 = readq(&bar0->mac_int_mask);
1249 	val64 = readq(&bar0->mc_int_mask);
1250 	val64 = readq(&bar0->xgxs_int_mask);
1251 
1252 	/*  Set MTU */
1253 	val64 = dev->mtu;
1254 	writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1255 
1256 	if (nic->device_type & XFRAME_II_DEVICE) {
1257 		while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1258 			SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1259 					  &bar0->dtx_control, UF);
1260 			if (dtx_cnt & 0x1)
1261 				msleep(1); /* Necessary!! */
1262 			dtx_cnt++;
1263 		}
1264 	} else {
1265 		while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1266 			SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1267 					  &bar0->dtx_control, UF);
1268 			val64 = readq(&bar0->dtx_control);
1269 			dtx_cnt++;
1270 		}
1271 	}
1272 
1273 	/*  Tx DMA Initialization */
1274 	val64 = 0;
1275 	writeq(val64, &bar0->tx_fifo_partition_0);
1276 	writeq(val64, &bar0->tx_fifo_partition_1);
1277 	writeq(val64, &bar0->tx_fifo_partition_2);
1278 	writeq(val64, &bar0->tx_fifo_partition_3);
1279 
1280 	for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1281 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1282 
1283 		val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1284 			vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1285 
1286 		if (i == (config->tx_fifo_num - 1)) {
1287 			if (i % 2 == 0)
1288 				i++;
1289 		}
1290 
1291 		switch (i) {
1292 		case 1:
1293 			writeq(val64, &bar0->tx_fifo_partition_0);
1294 			val64 = 0;
1295 			j = 0;
1296 			break;
1297 		case 3:
1298 			writeq(val64, &bar0->tx_fifo_partition_1);
1299 			val64 = 0;
1300 			j = 0;
1301 			break;
1302 		case 5:
1303 			writeq(val64, &bar0->tx_fifo_partition_2);
1304 			val64 = 0;
1305 			j = 0;
1306 			break;
1307 		case 7:
1308 			writeq(val64, &bar0->tx_fifo_partition_3);
1309 			val64 = 0;
1310 			j = 0;
1311 			break;
1312 		default:
1313 			j++;
1314 			break;
1315 		}
1316 	}
1317 
1318 	/*
1319 	 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1320 	 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1321 	 */
1322 	if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
1323 		writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1324 
1325 	val64 = readq(&bar0->tx_fifo_partition_0);
1326 	DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1327 		  &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1328 
1329 	/*
1330 	 * Initialization of Tx_PA_CONFIG register to ignore packet
1331 	 * integrity checking.
1332 	 */
1333 	val64 = readq(&bar0->tx_pa_cfg);
1334 	val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1335 		TX_PA_CFG_IGNORE_SNAP_OUI |
1336 		TX_PA_CFG_IGNORE_LLC_CTRL |
1337 		TX_PA_CFG_IGNORE_L2_ERR;
1338 	writeq(val64, &bar0->tx_pa_cfg);
1339 
1340 	/* Rx DMA initialization. */
1341 	val64 = 0;
1342 	for (i = 0; i < config->rx_ring_num; i++) {
1343 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1344 
1345 		val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1346 	}
1347 	writeq(val64, &bar0->rx_queue_priority);
1348 
1349 	/*
1350 	 * Allocating equal share of memory to all the
1351 	 * configured Rings.
1352 	 */
1353 	val64 = 0;
1354 	if (nic->device_type & XFRAME_II_DEVICE)
1355 		mem_size = 32;
1356 	else
1357 		mem_size = 64;
1358 
1359 	for (i = 0; i < config->rx_ring_num; i++) {
1360 		switch (i) {
1361 		case 0:
1362 			mem_share = (mem_size / config->rx_ring_num +
1363 				     mem_size % config->rx_ring_num);
1364 			val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1365 			continue;
1366 		case 1:
1367 			mem_share = (mem_size / config->rx_ring_num);
1368 			val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1369 			continue;
1370 		case 2:
1371 			mem_share = (mem_size / config->rx_ring_num);
1372 			val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1373 			continue;
1374 		case 3:
1375 			mem_share = (mem_size / config->rx_ring_num);
1376 			val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1377 			continue;
1378 		case 4:
1379 			mem_share = (mem_size / config->rx_ring_num);
1380 			val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1381 			continue;
1382 		case 5:
1383 			mem_share = (mem_size / config->rx_ring_num);
1384 			val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1385 			continue;
1386 		case 6:
1387 			mem_share = (mem_size / config->rx_ring_num);
1388 			val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1389 			continue;
1390 		case 7:
1391 			mem_share = (mem_size / config->rx_ring_num);
1392 			val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1393 			continue;
1394 		}
1395 	}
1396 	writeq(val64, &bar0->rx_queue_cfg);
1397 
1398 	/*
1399 	 * Filling Tx round robin registers
1400 	 * as per the number of FIFOs for equal scheduling priority
1401 	 */
1402 	switch (config->tx_fifo_num) {
1403 	case 1:
1404 		val64 = 0x0;
1405 		writeq(val64, &bar0->tx_w_round_robin_0);
1406 		writeq(val64, &bar0->tx_w_round_robin_1);
1407 		writeq(val64, &bar0->tx_w_round_robin_2);
1408 		writeq(val64, &bar0->tx_w_round_robin_3);
1409 		writeq(val64, &bar0->tx_w_round_robin_4);
1410 		break;
1411 	case 2:
1412 		val64 = 0x0001000100010001ULL;
1413 		writeq(val64, &bar0->tx_w_round_robin_0);
1414 		writeq(val64, &bar0->tx_w_round_robin_1);
1415 		writeq(val64, &bar0->tx_w_round_robin_2);
1416 		writeq(val64, &bar0->tx_w_round_robin_3);
1417 		val64 = 0x0001000100000000ULL;
1418 		writeq(val64, &bar0->tx_w_round_robin_4);
1419 		break;
1420 	case 3:
1421 		val64 = 0x0001020001020001ULL;
1422 		writeq(val64, &bar0->tx_w_round_robin_0);
1423 		val64 = 0x0200010200010200ULL;
1424 		writeq(val64, &bar0->tx_w_round_robin_1);
1425 		val64 = 0x0102000102000102ULL;
1426 		writeq(val64, &bar0->tx_w_round_robin_2);
1427 		val64 = 0x0001020001020001ULL;
1428 		writeq(val64, &bar0->tx_w_round_robin_3);
1429 		val64 = 0x0200010200000000ULL;
1430 		writeq(val64, &bar0->tx_w_round_robin_4);
1431 		break;
1432 	case 4:
1433 		val64 = 0x0001020300010203ULL;
1434 		writeq(val64, &bar0->tx_w_round_robin_0);
1435 		writeq(val64, &bar0->tx_w_round_robin_1);
1436 		writeq(val64, &bar0->tx_w_round_robin_2);
1437 		writeq(val64, &bar0->tx_w_round_robin_3);
1438 		val64 = 0x0001020300000000ULL;
1439 		writeq(val64, &bar0->tx_w_round_robin_4);
1440 		break;
1441 	case 5:
1442 		val64 = 0x0001020304000102ULL;
1443 		writeq(val64, &bar0->tx_w_round_robin_0);
1444 		val64 = 0x0304000102030400ULL;
1445 		writeq(val64, &bar0->tx_w_round_robin_1);
1446 		val64 = 0x0102030400010203ULL;
1447 		writeq(val64, &bar0->tx_w_round_robin_2);
1448 		val64 = 0x0400010203040001ULL;
1449 		writeq(val64, &bar0->tx_w_round_robin_3);
1450 		val64 = 0x0203040000000000ULL;
1451 		writeq(val64, &bar0->tx_w_round_robin_4);
1452 		break;
1453 	case 6:
1454 		val64 = 0x0001020304050001ULL;
1455 		writeq(val64, &bar0->tx_w_round_robin_0);
1456 		val64 = 0x0203040500010203ULL;
1457 		writeq(val64, &bar0->tx_w_round_robin_1);
1458 		val64 = 0x0405000102030405ULL;
1459 		writeq(val64, &bar0->tx_w_round_robin_2);
1460 		val64 = 0x0001020304050001ULL;
1461 		writeq(val64, &bar0->tx_w_round_robin_3);
1462 		val64 = 0x0203040500000000ULL;
1463 		writeq(val64, &bar0->tx_w_round_robin_4);
1464 		break;
1465 	case 7:
1466 		val64 = 0x0001020304050600ULL;
1467 		writeq(val64, &bar0->tx_w_round_robin_0);
1468 		val64 = 0x0102030405060001ULL;
1469 		writeq(val64, &bar0->tx_w_round_robin_1);
1470 		val64 = 0x0203040506000102ULL;
1471 		writeq(val64, &bar0->tx_w_round_robin_2);
1472 		val64 = 0x0304050600010203ULL;
1473 		writeq(val64, &bar0->tx_w_round_robin_3);
1474 		val64 = 0x0405060000000000ULL;
1475 		writeq(val64, &bar0->tx_w_round_robin_4);
1476 		break;
1477 	case 8:
1478 		val64 = 0x0001020304050607ULL;
1479 		writeq(val64, &bar0->tx_w_round_robin_0);
1480 		writeq(val64, &bar0->tx_w_round_robin_1);
1481 		writeq(val64, &bar0->tx_w_round_robin_2);
1482 		writeq(val64, &bar0->tx_w_round_robin_3);
1483 		val64 = 0x0001020300000000ULL;
1484 		writeq(val64, &bar0->tx_w_round_robin_4);
1485 		break;
1486 	}
1487 
1488 	/* Enable all configured Tx FIFO partitions */
1489 	val64 = readq(&bar0->tx_fifo_partition_0);
1490 	val64 |= (TX_FIFO_PARTITION_EN);
1491 	writeq(val64, &bar0->tx_fifo_partition_0);
1492 
1493 	/* Filling the Rx round robin registers as per the
1494 	 * number of Rings and steering based on QoS with
1495 	 * equal priority.
1496 	 */
1497 	switch (config->rx_ring_num) {
1498 	case 1:
1499 		val64 = 0x0;
1500 		writeq(val64, &bar0->rx_w_round_robin_0);
1501 		writeq(val64, &bar0->rx_w_round_robin_1);
1502 		writeq(val64, &bar0->rx_w_round_robin_2);
1503 		writeq(val64, &bar0->rx_w_round_robin_3);
1504 		writeq(val64, &bar0->rx_w_round_robin_4);
1505 
1506 		val64 = 0x8080808080808080ULL;
1507 		writeq(val64, &bar0->rts_qos_steering);
1508 		break;
1509 	case 2:
1510 		val64 = 0x0001000100010001ULL;
1511 		writeq(val64, &bar0->rx_w_round_robin_0);
1512 		writeq(val64, &bar0->rx_w_round_robin_1);
1513 		writeq(val64, &bar0->rx_w_round_robin_2);
1514 		writeq(val64, &bar0->rx_w_round_robin_3);
1515 		val64 = 0x0001000100000000ULL;
1516 		writeq(val64, &bar0->rx_w_round_robin_4);
1517 
1518 		val64 = 0x8080808040404040ULL;
1519 		writeq(val64, &bar0->rts_qos_steering);
1520 		break;
1521 	case 3:
1522 		val64 = 0x0001020001020001ULL;
1523 		writeq(val64, &bar0->rx_w_round_robin_0);
1524 		val64 = 0x0200010200010200ULL;
1525 		writeq(val64, &bar0->rx_w_round_robin_1);
1526 		val64 = 0x0102000102000102ULL;
1527 		writeq(val64, &bar0->rx_w_round_robin_2);
1528 		val64 = 0x0001020001020001ULL;
1529 		writeq(val64, &bar0->rx_w_round_robin_3);
1530 		val64 = 0x0200010200000000ULL;
1531 		writeq(val64, &bar0->rx_w_round_robin_4);
1532 
1533 		val64 = 0x8080804040402020ULL;
1534 		writeq(val64, &bar0->rts_qos_steering);
1535 		break;
1536 	case 4:
1537 		val64 = 0x0001020300010203ULL;
1538 		writeq(val64, &bar0->rx_w_round_robin_0);
1539 		writeq(val64, &bar0->rx_w_round_robin_1);
1540 		writeq(val64, &bar0->rx_w_round_robin_2);
1541 		writeq(val64, &bar0->rx_w_round_robin_3);
1542 		val64 = 0x0001020300000000ULL;
1543 		writeq(val64, &bar0->rx_w_round_robin_4);
1544 
1545 		val64 = 0x8080404020201010ULL;
1546 		writeq(val64, &bar0->rts_qos_steering);
1547 		break;
1548 	case 5:
1549 		val64 = 0x0001020304000102ULL;
1550 		writeq(val64, &bar0->rx_w_round_robin_0);
1551 		val64 = 0x0304000102030400ULL;
1552 		writeq(val64, &bar0->rx_w_round_robin_1);
1553 		val64 = 0x0102030400010203ULL;
1554 		writeq(val64, &bar0->rx_w_round_robin_2);
1555 		val64 = 0x0400010203040001ULL;
1556 		writeq(val64, &bar0->rx_w_round_robin_3);
1557 		val64 = 0x0203040000000000ULL;
1558 		writeq(val64, &bar0->rx_w_round_robin_4);
1559 
1560 		val64 = 0x8080404020201008ULL;
1561 		writeq(val64, &bar0->rts_qos_steering);
1562 		break;
1563 	case 6:
1564 		val64 = 0x0001020304050001ULL;
1565 		writeq(val64, &bar0->rx_w_round_robin_0);
1566 		val64 = 0x0203040500010203ULL;
1567 		writeq(val64, &bar0->rx_w_round_robin_1);
1568 		val64 = 0x0405000102030405ULL;
1569 		writeq(val64, &bar0->rx_w_round_robin_2);
1570 		val64 = 0x0001020304050001ULL;
1571 		writeq(val64, &bar0->rx_w_round_robin_3);
1572 		val64 = 0x0203040500000000ULL;
1573 		writeq(val64, &bar0->rx_w_round_robin_4);
1574 
1575 		val64 = 0x8080404020100804ULL;
1576 		writeq(val64, &bar0->rts_qos_steering);
1577 		break;
1578 	case 7:
1579 		val64 = 0x0001020304050600ULL;
1580 		writeq(val64, &bar0->rx_w_round_robin_0);
1581 		val64 = 0x0102030405060001ULL;
1582 		writeq(val64, &bar0->rx_w_round_robin_1);
1583 		val64 = 0x0203040506000102ULL;
1584 		writeq(val64, &bar0->rx_w_round_robin_2);
1585 		val64 = 0x0304050600010203ULL;
1586 		writeq(val64, &bar0->rx_w_round_robin_3);
1587 		val64 = 0x0405060000000000ULL;
1588 		writeq(val64, &bar0->rx_w_round_robin_4);
1589 
1590 		val64 = 0x8080402010080402ULL;
1591 		writeq(val64, &bar0->rts_qos_steering);
1592 		break;
1593 	case 8:
1594 		val64 = 0x0001020304050607ULL;
1595 		writeq(val64, &bar0->rx_w_round_robin_0);
1596 		writeq(val64, &bar0->rx_w_round_robin_1);
1597 		writeq(val64, &bar0->rx_w_round_robin_2);
1598 		writeq(val64, &bar0->rx_w_round_robin_3);
1599 		val64 = 0x0001020300000000ULL;
1600 		writeq(val64, &bar0->rx_w_round_robin_4);
1601 
1602 		val64 = 0x8040201008040201ULL;
1603 		writeq(val64, &bar0->rts_qos_steering);
1604 		break;
1605 	}
1606 
1607 	/* UDP Fix */
1608 	val64 = 0;
1609 	for (i = 0; i < 8; i++)
1610 		writeq(val64, &bar0->rts_frm_len_n[i]);
1611 
1612 	/* Set the default rts frame length for the rings configured */
1613 	val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1614 	for (i = 0 ; i < config->rx_ring_num ; i++)
1615 		writeq(val64, &bar0->rts_frm_len_n[i]);
1616 
1617 	/* Set the frame length for the configured rings
1618 	 * desired by the user
1619 	 */
1620 	for (i = 0; i < config->rx_ring_num; i++) {
1621 		/* If rts_frm_len[i] == 0 then it is assumed that user not
1622 		 * specified frame length steering.
1623 		 * If the user provides the frame length then program
1624 		 * the rts_frm_len register for those values or else
1625 		 * leave it as it is.
1626 		 */
1627 		if (rts_frm_len[i] != 0) {
1628 			writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1629 			       &bar0->rts_frm_len_n[i]);
1630 		}
1631 	}
1632 
1633 	/* Disable differentiated services steering logic */
1634 	for (i = 0; i < 64; i++) {
1635 		if (rts_ds_steer(nic, i, 0) == FAILURE) {
1636 			DBG_PRINT(ERR_DBG,
1637 				  "%s: rts_ds_steer failed on codepoint %d\n",
1638 				  dev->name, i);
1639 			return -ENODEV;
1640 		}
1641 	}
1642 
1643 	/* Program statistics memory */
1644 	writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1645 
1646 	if (nic->device_type == XFRAME_II_DEVICE) {
1647 		val64 = STAT_BC(0x320);
1648 		writeq(val64, &bar0->stat_byte_cnt);
1649 	}
1650 
1651 	/*
1652 	 * Initializing the sampling rate for the device to calculate the
1653 	 * bandwidth utilization.
1654 	 */
1655 	val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1656 		MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1657 	writeq(val64, &bar0->mac_link_util);
1658 
1659 	/*
1660 	 * Initializing the Transmit and Receive Traffic Interrupt
1661 	 * Scheme.
1662 	 */
1663 
1664 	/* Initialize TTI */
1665 	if (SUCCESS != init_tti(nic, nic->last_link_state, true))
1666 		return -ENODEV;
1667 
1668 	/* RTI Initialization */
1669 	if (nic->device_type == XFRAME_II_DEVICE) {
1670 		/*
1671 		 * Programmed to generate Apprx 500 Intrs per
1672 		 * second
1673 		 */
1674 		int count = (nic->config.bus_speed * 125)/4;
1675 		val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1676 	} else
1677 		val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1678 	val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1679 		RTI_DATA1_MEM_RX_URNG_B(0x10) |
1680 		RTI_DATA1_MEM_RX_URNG_C(0x30) |
1681 		RTI_DATA1_MEM_RX_TIMER_AC_EN;
1682 
1683 	writeq(val64, &bar0->rti_data1_mem);
1684 
1685 	val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1686 		RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1687 	if (nic->config.intr_type == MSI_X)
1688 		val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1689 			  RTI_DATA2_MEM_RX_UFC_D(0x40));
1690 	else
1691 		val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1692 			  RTI_DATA2_MEM_RX_UFC_D(0x80));
1693 	writeq(val64, &bar0->rti_data2_mem);
1694 
1695 	for (i = 0; i < config->rx_ring_num; i++) {
1696 		val64 = RTI_CMD_MEM_WE |
1697 			RTI_CMD_MEM_STROBE_NEW_CMD |
1698 			RTI_CMD_MEM_OFFSET(i);
1699 		writeq(val64, &bar0->rti_command_mem);
1700 
1701 		/*
1702 		 * Once the operation completes, the Strobe bit of the
1703 		 * command register will be reset. We poll for this
1704 		 * particular condition. We wait for a maximum of 500ms
1705 		 * for the operation to complete, if it's not complete
1706 		 * by then we return error.
1707 		 */
1708 		time = 0;
1709 		while (true) {
1710 			val64 = readq(&bar0->rti_command_mem);
1711 			if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1712 				break;
1713 
1714 			if (time > 10) {
1715 				DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
1716 					  dev->name);
1717 				return -ENODEV;
1718 			}
1719 			time++;
1720 			msleep(50);
1721 		}
1722 	}
1723 
1724 	/*
1725 	 * Initializing proper values as Pause threshold into all
1726 	 * the 8 Queues on Rx side.
1727 	 */
1728 	writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1729 	writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1730 
1731 	/* Disable RMAC PAD STRIPPING */
1732 	add = &bar0->mac_cfg;
1733 	val64 = readq(&bar0->mac_cfg);
1734 	val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1735 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1736 	writel((u32) (val64), add);
1737 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1738 	writel((u32) (val64 >> 32), (add + 4));
1739 	val64 = readq(&bar0->mac_cfg);
1740 
1741 	/* Enable FCS stripping by adapter */
1742 	add = &bar0->mac_cfg;
1743 	val64 = readq(&bar0->mac_cfg);
1744 	val64 |= MAC_CFG_RMAC_STRIP_FCS;
1745 	if (nic->device_type == XFRAME_II_DEVICE)
1746 		writeq(val64, &bar0->mac_cfg);
1747 	else {
1748 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1749 		writel((u32) (val64), add);
1750 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1751 		writel((u32) (val64 >> 32), (add + 4));
1752 	}
1753 
1754 	/*
1755 	 * Set the time value to be inserted in the pause frame
1756 	 * generated by xena.
1757 	 */
1758 	val64 = readq(&bar0->rmac_pause_cfg);
1759 	val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1760 	val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1761 	writeq(val64, &bar0->rmac_pause_cfg);
1762 
1763 	/*
1764 	 * Set the Threshold Limit for Generating the pause frame
1765 	 * If the amount of data in any Queue exceeds ratio of
1766 	 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1767 	 * pause frame is generated
1768 	 */
1769 	val64 = 0;
1770 	for (i = 0; i < 4; i++) {
1771 		val64 |= (((u64)0xFF00 |
1772 			   nic->mac_control.mc_pause_threshold_q0q3)
1773 			  << (i * 2 * 8));
1774 	}
1775 	writeq(val64, &bar0->mc_pause_thresh_q0q3);
1776 
1777 	val64 = 0;
1778 	for (i = 0; i < 4; i++) {
1779 		val64 |= (((u64)0xFF00 |
1780 			   nic->mac_control.mc_pause_threshold_q4q7)
1781 			  << (i * 2 * 8));
1782 	}
1783 	writeq(val64, &bar0->mc_pause_thresh_q4q7);
1784 
1785 	/*
1786 	 * TxDMA will stop Read request if the number of read split has
1787 	 * exceeded the limit pointed by shared_splits
1788 	 */
1789 	val64 = readq(&bar0->pic_control);
1790 	val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1791 	writeq(val64, &bar0->pic_control);
1792 
1793 	if (nic->config.bus_speed == 266) {
1794 		writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1795 		writeq(0x0, &bar0->read_retry_delay);
1796 		writeq(0x0, &bar0->write_retry_delay);
1797 	}
1798 
1799 	/*
1800 	 * Programming the Herc to split every write transaction
1801 	 * that does not start on an ADB to reduce disconnects.
1802 	 */
1803 	if (nic->device_type == XFRAME_II_DEVICE) {
1804 		val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1805 			MISC_LINK_STABILITY_PRD(3);
1806 		writeq(val64, &bar0->misc_control);
1807 		val64 = readq(&bar0->pic_control2);
1808 		val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1809 		writeq(val64, &bar0->pic_control2);
1810 	}
1811 	if (strstr(nic->product_name, "CX4")) {
1812 		val64 = TMAC_AVG_IPG(0x17);
1813 		writeq(val64, &bar0->tmac_avg_ipg);
1814 	}
1815 
1816 	return SUCCESS;
1817 }
1818 #define LINK_UP_DOWN_INTERRUPT		1
1819 #define MAC_RMAC_ERR_TIMER		2
1820 
1821 static int s2io_link_fault_indication(struct s2io_nic *nic)
1822 {
1823 	if (nic->device_type == XFRAME_II_DEVICE)
1824 		return LINK_UP_DOWN_INTERRUPT;
1825 	else
1826 		return MAC_RMAC_ERR_TIMER;
1827 }
1828 
1829 /**
1830  *  do_s2io_write_bits -  update alarm bits in alarm register
1831  *  @value: alarm bits
1832  *  @flag: interrupt status
1833  *  @addr: address value
1834  *  Description: update alarm bits in alarm register
1835  *  Return Value:
1836  *  NONE.
1837  */
1838 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1839 {
1840 	u64 temp64;
1841 
1842 	temp64 = readq(addr);
1843 
1844 	if (flag == ENABLE_INTRS)
1845 		temp64 &= ~((u64)value);
1846 	else
1847 		temp64 |= ((u64)value);
1848 	writeq(temp64, addr);
1849 }
1850 
1851 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1852 {
1853 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1854 	register u64 gen_int_mask = 0;
1855 	u64 interruptible;
1856 
1857 	writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1858 	if (mask & TX_DMA_INTR) {
1859 		gen_int_mask |= TXDMA_INT_M;
1860 
1861 		do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1862 				   TXDMA_PCC_INT | TXDMA_TTI_INT |
1863 				   TXDMA_LSO_INT | TXDMA_TPA_INT |
1864 				   TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1865 
1866 		do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1867 				   PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1868 				   PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1869 				   &bar0->pfc_err_mask);
1870 
1871 		do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1872 				   TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1873 				   TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1874 
1875 		do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1876 				   PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1877 				   PCC_N_SERR | PCC_6_COF_OV_ERR |
1878 				   PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1879 				   PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1880 				   PCC_TXB_ECC_SG_ERR,
1881 				   flag, &bar0->pcc_err_mask);
1882 
1883 		do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1884 				   TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1885 
1886 		do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1887 				   LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1888 				   LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1889 				   flag, &bar0->lso_err_mask);
1890 
1891 		do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1892 				   flag, &bar0->tpa_err_mask);
1893 
1894 		do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1895 	}
1896 
1897 	if (mask & TX_MAC_INTR) {
1898 		gen_int_mask |= TXMAC_INT_M;
1899 		do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1900 				   &bar0->mac_int_mask);
1901 		do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1902 				   TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1903 				   TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1904 				   flag, &bar0->mac_tmac_err_mask);
1905 	}
1906 
1907 	if (mask & TX_XGXS_INTR) {
1908 		gen_int_mask |= TXXGXS_INT_M;
1909 		do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1910 				   &bar0->xgxs_int_mask);
1911 		do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1912 				   TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1913 				   flag, &bar0->xgxs_txgxs_err_mask);
1914 	}
1915 
1916 	if (mask & RX_DMA_INTR) {
1917 		gen_int_mask |= RXDMA_INT_M;
1918 		do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1919 				   RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1920 				   flag, &bar0->rxdma_int_mask);
1921 		do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1922 				   RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1923 				   RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1924 				   RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1925 		do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1926 				   PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1927 				   PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1928 				   &bar0->prc_pcix_err_mask);
1929 		do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1930 				   RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1931 				   &bar0->rpa_err_mask);
1932 		do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1933 				   RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1934 				   RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1935 				   RDA_FRM_ECC_SG_ERR |
1936 				   RDA_MISC_ERR|RDA_PCIX_ERR,
1937 				   flag, &bar0->rda_err_mask);
1938 		do_s2io_write_bits(RTI_SM_ERR_ALARM |
1939 				   RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1940 				   flag, &bar0->rti_err_mask);
1941 	}
1942 
1943 	if (mask & RX_MAC_INTR) {
1944 		gen_int_mask |= RXMAC_INT_M;
1945 		do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1946 				   &bar0->mac_int_mask);
1947 		interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1948 				 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1949 				 RMAC_DOUBLE_ECC_ERR);
1950 		if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
1951 			interruptible |= RMAC_LINK_STATE_CHANGE_INT;
1952 		do_s2io_write_bits(interruptible,
1953 				   flag, &bar0->mac_rmac_err_mask);
1954 	}
1955 
1956 	if (mask & RX_XGXS_INTR) {
1957 		gen_int_mask |= RXXGXS_INT_M;
1958 		do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1959 				   &bar0->xgxs_int_mask);
1960 		do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1961 				   &bar0->xgxs_rxgxs_err_mask);
1962 	}
1963 
1964 	if (mask & MC_INTR) {
1965 		gen_int_mask |= MC_INT_M;
1966 		do_s2io_write_bits(MC_INT_MASK_MC_INT,
1967 				   flag, &bar0->mc_int_mask);
1968 		do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1969 				   MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1970 				   &bar0->mc_err_mask);
1971 	}
1972 	nic->general_int_mask = gen_int_mask;
1973 
1974 	/* Remove this line when alarm interrupts are enabled */
1975 	nic->general_int_mask = 0;
1976 }
1977 
1978 /**
1979  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1980  *  @nic: device private variable,
1981  *  @mask: A mask indicating which Intr block must be modified and,
1982  *  @flag: A flag indicating whether to enable or disable the Intrs.
1983  *  Description: This function will either disable or enable the interrupts
1984  *  depending on the flag argument. The mask argument can be used to
1985  *  enable/disable any Intr block.
1986  *  Return Value: NONE.
1987  */
1988 
1989 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1990 {
1991 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1992 	register u64 temp64 = 0, intr_mask = 0;
1993 
1994 	intr_mask = nic->general_int_mask;
1995 
1996 	/*  Top level interrupt classification */
1997 	/*  PIC Interrupts */
1998 	if (mask & TX_PIC_INTR) {
1999 		/*  Enable PIC Intrs in the general intr mask register */
2000 		intr_mask |= TXPIC_INT_M;
2001 		if (flag == ENABLE_INTRS) {
2002 			/*
2003 			 * If Hercules adapter enable GPIO otherwise
2004 			 * disable all PCIX, Flash, MDIO, IIC and GPIO
2005 			 * interrupts for now.
2006 			 * TODO
2007 			 */
2008 			if (s2io_link_fault_indication(nic) ==
2009 			    LINK_UP_DOWN_INTERRUPT) {
2010 				do_s2io_write_bits(PIC_INT_GPIO, flag,
2011 						   &bar0->pic_int_mask);
2012 				do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2013 						   &bar0->gpio_int_mask);
2014 			} else
2015 				writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2016 		} else if (flag == DISABLE_INTRS) {
2017 			/*
2018 			 * Disable PIC Intrs in the general
2019 			 * intr mask register
2020 			 */
2021 			writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2022 		}
2023 	}
2024 
2025 	/*  Tx traffic interrupts */
2026 	if (mask & TX_TRAFFIC_INTR) {
2027 		intr_mask |= TXTRAFFIC_INT_M;
2028 		if (flag == ENABLE_INTRS) {
2029 			/*
2030 			 * Enable all the Tx side interrupts
2031 			 * writing 0 Enables all 64 TX interrupt levels
2032 			 */
2033 			writeq(0x0, &bar0->tx_traffic_mask);
2034 		} else if (flag == DISABLE_INTRS) {
2035 			/*
2036 			 * Disable Tx Traffic Intrs in the general intr mask
2037 			 * register.
2038 			 */
2039 			writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2040 		}
2041 	}
2042 
2043 	/*  Rx traffic interrupts */
2044 	if (mask & RX_TRAFFIC_INTR) {
2045 		intr_mask |= RXTRAFFIC_INT_M;
2046 		if (flag == ENABLE_INTRS) {
2047 			/* writing 0 Enables all 8 RX interrupt levels */
2048 			writeq(0x0, &bar0->rx_traffic_mask);
2049 		} else if (flag == DISABLE_INTRS) {
2050 			/*
2051 			 * Disable Rx Traffic Intrs in the general intr mask
2052 			 * register.
2053 			 */
2054 			writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2055 		}
2056 	}
2057 
2058 	temp64 = readq(&bar0->general_int_mask);
2059 	if (flag == ENABLE_INTRS)
2060 		temp64 &= ~((u64)intr_mask);
2061 	else
2062 		temp64 = DISABLE_ALL_INTRS;
2063 	writeq(temp64, &bar0->general_int_mask);
2064 
2065 	nic->general_int_mask = readq(&bar0->general_int_mask);
2066 }
2067 
2068 /**
2069  *  verify_pcc_quiescent- Checks for PCC quiescent state
2070  *  @sp : private member of the device structure, which is a pointer to the
2071  *  s2io_nic structure.
2072  *  @flag: boolean controlling function path
2073  *  Return: 1 If PCC is quiescence
2074  *          0 If PCC is not quiescence
2075  */
2076 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2077 {
2078 	int ret = 0, herc;
2079 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2080 	u64 val64 = readq(&bar0->adapter_status);
2081 
2082 	herc = (sp->device_type == XFRAME_II_DEVICE);
2083 
2084 	if (flag == false) {
2085 		if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2086 			if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2087 				ret = 1;
2088 		} else {
2089 			if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2090 				ret = 1;
2091 		}
2092 	} else {
2093 		if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2094 			if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2095 			     ADAPTER_STATUS_RMAC_PCC_IDLE))
2096 				ret = 1;
2097 		} else {
2098 			if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2099 			     ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2100 				ret = 1;
2101 		}
2102 	}
2103 
2104 	return ret;
2105 }
2106 /**
2107  *  verify_xena_quiescence - Checks whether the H/W is ready
2108  *  @sp : private member of the device structure, which is a pointer to the
2109  *  s2io_nic structure.
2110  *  Description: Returns whether the H/W is ready to go or not. Depending
2111  *  on whether adapter enable bit was written or not the comparison
2112  *  differs and the calling function passes the input argument flag to
2113  *  indicate this.
2114  *  Return: 1 If xena is quiescence
2115  *          0 If Xena is not quiescence
2116  */
2117 
2118 static int verify_xena_quiescence(struct s2io_nic *sp)
2119 {
2120 	int  mode;
2121 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2122 	u64 val64 = readq(&bar0->adapter_status);
2123 	mode = s2io_verify_pci_mode(sp);
2124 
2125 	if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2126 		DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
2127 		return 0;
2128 	}
2129 	if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2130 		DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
2131 		return 0;
2132 	}
2133 	if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2134 		DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
2135 		return 0;
2136 	}
2137 	if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2138 		DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
2139 		return 0;
2140 	}
2141 	if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2142 		DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
2143 		return 0;
2144 	}
2145 	if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2146 		DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
2147 		return 0;
2148 	}
2149 	if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2150 		DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
2151 		return 0;
2152 	}
2153 	if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2154 		DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
2155 		return 0;
2156 	}
2157 
2158 	/*
2159 	 * In PCI 33 mode, the P_PLL is not used, and therefore,
2160 	 * the P_PLL_LOCK bit in the adapter_status register will
2161 	 * not be asserted.
2162 	 */
2163 	if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2164 	    sp->device_type == XFRAME_II_DEVICE &&
2165 	    mode != PCI_MODE_PCI_33) {
2166 		DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
2167 		return 0;
2168 	}
2169 	if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2170 	      ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2171 		DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
2172 		return 0;
2173 	}
2174 	return 1;
2175 }
2176 
2177 /**
2178  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2179  * @sp: Pointer to device specifc structure
2180  * Description :
2181  * New procedure to clear mac address reading  problems on Alpha platforms
2182  *
2183  */
2184 
2185 static void fix_mac_address(struct s2io_nic *sp)
2186 {
2187 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2188 	int i = 0;
2189 
2190 	while (fix_mac[i] != END_SIGN) {
2191 		writeq(fix_mac[i++], &bar0->gpio_control);
2192 		udelay(10);
2193 		(void) readq(&bar0->gpio_control);
2194 	}
2195 }
2196 
2197 /**
2198  *  start_nic - Turns the device on
2199  *  @nic : device private variable.
2200  *  Description:
2201  *  This function actually turns the device on. Before this  function is
2202  *  called,all Registers are configured from their reset states
2203  *  and shared memory is allocated but the NIC is still quiescent. On
2204  *  calling this function, the device interrupts are cleared and the NIC is
2205  *  literally switched on by writing into the adapter control register.
2206  *  Return Value:
2207  *  SUCCESS on success and -1 on failure.
2208  */
2209 
2210 static int start_nic(struct s2io_nic *nic)
2211 {
2212 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2213 	struct net_device *dev = nic->dev;
2214 	register u64 val64 = 0;
2215 	u16 subid, i;
2216 	struct config_param *config = &nic->config;
2217 	struct mac_info *mac_control = &nic->mac_control;
2218 
2219 	/*  PRC Initialization and configuration */
2220 	for (i = 0; i < config->rx_ring_num; i++) {
2221 		struct ring_info *ring = &mac_control->rings[i];
2222 
2223 		writeq((u64)ring->rx_blocks[0].block_dma_addr,
2224 		       &bar0->prc_rxd0_n[i]);
2225 
2226 		val64 = readq(&bar0->prc_ctrl_n[i]);
2227 		if (nic->rxd_mode == RXD_MODE_1)
2228 			val64 |= PRC_CTRL_RC_ENABLED;
2229 		else
2230 			val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2231 		if (nic->device_type == XFRAME_II_DEVICE)
2232 			val64 |= PRC_CTRL_GROUP_READS;
2233 		val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2234 		val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2235 		writeq(val64, &bar0->prc_ctrl_n[i]);
2236 	}
2237 
2238 	if (nic->rxd_mode == RXD_MODE_3B) {
2239 		/* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2240 		val64 = readq(&bar0->rx_pa_cfg);
2241 		val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2242 		writeq(val64, &bar0->rx_pa_cfg);
2243 	}
2244 
2245 	if (vlan_tag_strip == 0) {
2246 		val64 = readq(&bar0->rx_pa_cfg);
2247 		val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2248 		writeq(val64, &bar0->rx_pa_cfg);
2249 		nic->vlan_strip_flag = 0;
2250 	}
2251 
2252 	/*
2253 	 * Enabling MC-RLDRAM. After enabling the device, we timeout
2254 	 * for around 100ms, which is approximately the time required
2255 	 * for the device to be ready for operation.
2256 	 */
2257 	val64 = readq(&bar0->mc_rldram_mrs);
2258 	val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2259 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2260 	val64 = readq(&bar0->mc_rldram_mrs);
2261 
2262 	msleep(100);	/* Delay by around 100 ms. */
2263 
2264 	/* Enabling ECC Protection. */
2265 	val64 = readq(&bar0->adapter_control);
2266 	val64 &= ~ADAPTER_ECC_EN;
2267 	writeq(val64, &bar0->adapter_control);
2268 
2269 	/*
2270 	 * Verify if the device is ready to be enabled, if so enable
2271 	 * it.
2272 	 */
2273 	val64 = readq(&bar0->adapter_status);
2274 	if (!verify_xena_quiescence(nic)) {
2275 		DBG_PRINT(ERR_DBG, "%s: device is not ready, "
2276 			  "Adapter status reads: 0x%llx\n",
2277 			  dev->name, (unsigned long long)val64);
2278 		return FAILURE;
2279 	}
2280 
2281 	/*
2282 	 * With some switches, link might be already up at this point.
2283 	 * Because of this weird behavior, when we enable laser,
2284 	 * we may not get link. We need to handle this. We cannot
2285 	 * figure out which switch is misbehaving. So we are forced to
2286 	 * make a global change.
2287 	 */
2288 
2289 	/* Enabling Laser. */
2290 	val64 = readq(&bar0->adapter_control);
2291 	val64 |= ADAPTER_EOI_TX_ON;
2292 	writeq(val64, &bar0->adapter_control);
2293 
2294 	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2295 		/*
2296 		 * Dont see link state interrupts initially on some switches,
2297 		 * so directly scheduling the link state task here.
2298 		 */
2299 		schedule_work(&nic->set_link_task);
2300 	}
2301 	/* SXE-002: Initialize link and activity LED */
2302 	subid = nic->pdev->subsystem_device;
2303 	if (((subid & 0xFF) >= 0x07) &&
2304 	    (nic->device_type == XFRAME_I_DEVICE)) {
2305 		val64 = readq(&bar0->gpio_control);
2306 		val64 |= 0x0000800000000000ULL;
2307 		writeq(val64, &bar0->gpio_control);
2308 		val64 = 0x0411040400000000ULL;
2309 		writeq(val64, (void __iomem *)bar0 + 0x2700);
2310 	}
2311 
2312 	return SUCCESS;
2313 }
2314 /**
2315  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2316  * @fifo_data: fifo data pointer
2317  * @txdlp: descriptor
2318  * @get_off: unused
2319  */
2320 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2321 					struct TxD *txdlp, int get_off)
2322 {
2323 	struct s2io_nic *nic = fifo_data->nic;
2324 	struct sk_buff *skb;
2325 	struct TxD *txds;
2326 	u16 j, frg_cnt;
2327 
2328 	txds = txdlp;
2329 	if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2330 		dma_unmap_single(&nic->pdev->dev,
2331 				 (dma_addr_t)txds->Buffer_Pointer,
2332 				 sizeof(u64), DMA_TO_DEVICE);
2333 		txds++;
2334 	}
2335 
2336 	skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
2337 	if (!skb) {
2338 		memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2339 		return NULL;
2340 	}
2341 	dma_unmap_single(&nic->pdev->dev, (dma_addr_t)txds->Buffer_Pointer,
2342 			 skb_headlen(skb), DMA_TO_DEVICE);
2343 	frg_cnt = skb_shinfo(skb)->nr_frags;
2344 	if (frg_cnt) {
2345 		txds++;
2346 		for (j = 0; j < frg_cnt; j++, txds++) {
2347 			const skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2348 			if (!txds->Buffer_Pointer)
2349 				break;
2350 			dma_unmap_page(&nic->pdev->dev,
2351 				       (dma_addr_t)txds->Buffer_Pointer,
2352 				       skb_frag_size(frag), DMA_TO_DEVICE);
2353 		}
2354 	}
2355 	memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2356 	return skb;
2357 }
2358 
2359 /**
2360  *  free_tx_buffers - Free all queued Tx buffers
2361  *  @nic : device private variable.
2362  *  Description:
2363  *  Free all queued Tx buffers.
2364  *  Return Value: void
2365  */
2366 
2367 static void free_tx_buffers(struct s2io_nic *nic)
2368 {
2369 	struct net_device *dev = nic->dev;
2370 	struct sk_buff *skb;
2371 	struct TxD *txdp;
2372 	int i, j;
2373 	int cnt = 0;
2374 	struct config_param *config = &nic->config;
2375 	struct mac_info *mac_control = &nic->mac_control;
2376 	struct stat_block *stats = mac_control->stats_info;
2377 	struct swStat *swstats = &stats->sw_stat;
2378 
2379 	for (i = 0; i < config->tx_fifo_num; i++) {
2380 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2381 		struct fifo_info *fifo = &mac_control->fifos[i];
2382 		unsigned long flags;
2383 
2384 		spin_lock_irqsave(&fifo->tx_lock, flags);
2385 		for (j = 0; j < tx_cfg->fifo_len; j++) {
2386 			txdp = fifo->list_info[j].list_virt_addr;
2387 			skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2388 			if (skb) {
2389 				swstats->mem_freed += skb->truesize;
2390 				dev_kfree_skb_irq(skb);
2391 				cnt++;
2392 			}
2393 		}
2394 		DBG_PRINT(INTR_DBG,
2395 			  "%s: forcibly freeing %d skbs on FIFO%d\n",
2396 			  dev->name, cnt, i);
2397 		fifo->tx_curr_get_info.offset = 0;
2398 		fifo->tx_curr_put_info.offset = 0;
2399 		spin_unlock_irqrestore(&fifo->tx_lock, flags);
2400 	}
2401 }
2402 
2403 /**
2404  *   stop_nic -  To stop the nic
2405  *   @nic : device private variable.
2406  *   Description:
2407  *   This function does exactly the opposite of what the start_nic()
2408  *   function does. This function is called to stop the device.
2409  *   Return Value:
2410  *   void.
2411  */
2412 
2413 static void stop_nic(struct s2io_nic *nic)
2414 {
2415 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2416 	register u64 val64 = 0;
2417 	u16 interruptible;
2418 
2419 	/*  Disable all interrupts */
2420 	en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2421 	interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2422 	interruptible |= TX_PIC_INTR;
2423 	en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2424 
2425 	/* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2426 	val64 = readq(&bar0->adapter_control);
2427 	val64 &= ~(ADAPTER_CNTL_EN);
2428 	writeq(val64, &bar0->adapter_control);
2429 }
2430 
2431 /**
2432  *  fill_rx_buffers - Allocates the Rx side skbs
2433  *  @nic : device private variable.
2434  *  @ring: per ring structure
2435  *  @from_card_up: If this is true, we will map the buffer to get
2436  *     the dma address for buf0 and buf1 to give it to the card.
2437  *     Else we will sync the already mapped buffer to give it to the card.
2438  *  Description:
2439  *  The function allocates Rx side skbs and puts the physical
2440  *  address of these buffers into the RxD buffer pointers, so that the NIC
2441  *  can DMA the received frame into these locations.
2442  *  The NIC supports 3 receive modes, viz
2443  *  1. single buffer,
2444  *  2. three buffer and
2445  *  3. Five buffer modes.
2446  *  Each mode defines how many fragments the received frame will be split
2447  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2448  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2449  *  is split into 3 fragments. As of now only single buffer mode is
2450  *  supported.
2451  *   Return Value:
2452  *  SUCCESS on success or an appropriate -ve value on failure.
2453  */
2454 static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
2455 			   int from_card_up)
2456 {
2457 	struct sk_buff *skb;
2458 	struct RxD_t *rxdp;
2459 	int off, size, block_no, block_no1;
2460 	u32 alloc_tab = 0;
2461 	u32 alloc_cnt;
2462 	u64 tmp;
2463 	struct buffAdd *ba;
2464 	struct RxD_t *first_rxdp = NULL;
2465 	u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2466 	struct RxD1 *rxdp1;
2467 	struct RxD3 *rxdp3;
2468 	struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
2469 
2470 	alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2471 
2472 	block_no1 = ring->rx_curr_get_info.block_index;
2473 	while (alloc_tab < alloc_cnt) {
2474 		block_no = ring->rx_curr_put_info.block_index;
2475 
2476 		off = ring->rx_curr_put_info.offset;
2477 
2478 		rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2479 
2480 		if ((block_no == block_no1) &&
2481 		    (off == ring->rx_curr_get_info.offset) &&
2482 		    (rxdp->Host_Control)) {
2483 			DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
2484 				  ring->dev->name);
2485 			goto end;
2486 		}
2487 		if (off && (off == ring->rxd_count)) {
2488 			ring->rx_curr_put_info.block_index++;
2489 			if (ring->rx_curr_put_info.block_index ==
2490 			    ring->block_count)
2491 				ring->rx_curr_put_info.block_index = 0;
2492 			block_no = ring->rx_curr_put_info.block_index;
2493 			off = 0;
2494 			ring->rx_curr_put_info.offset = off;
2495 			rxdp = ring->rx_blocks[block_no].block_virt_addr;
2496 			DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2497 				  ring->dev->name, rxdp);
2498 
2499 		}
2500 
2501 		if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2502 		    ((ring->rxd_mode == RXD_MODE_3B) &&
2503 		     (rxdp->Control_2 & s2BIT(0)))) {
2504 			ring->rx_curr_put_info.offset = off;
2505 			goto end;
2506 		}
2507 		/* calculate size of skb based on ring mode */
2508 		size = ring->mtu +
2509 			HEADER_ETHERNET_II_802_3_SIZE +
2510 			HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2511 		if (ring->rxd_mode == RXD_MODE_1)
2512 			size += NET_IP_ALIGN;
2513 		else
2514 			size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2515 
2516 		/* allocate skb */
2517 		skb = netdev_alloc_skb(nic->dev, size);
2518 		if (!skb) {
2519 			DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
2520 				  ring->dev->name);
2521 			if (first_rxdp) {
2522 				dma_wmb();
2523 				first_rxdp->Control_1 |= RXD_OWN_XENA;
2524 			}
2525 			swstats->mem_alloc_fail_cnt++;
2526 
2527 			return -ENOMEM ;
2528 		}
2529 		swstats->mem_allocated += skb->truesize;
2530 
2531 		if (ring->rxd_mode == RXD_MODE_1) {
2532 			/* 1 buffer mode - normal operation mode */
2533 			rxdp1 = (struct RxD1 *)rxdp;
2534 			memset(rxdp, 0, sizeof(struct RxD1));
2535 			skb_reserve(skb, NET_IP_ALIGN);
2536 			rxdp1->Buffer0_ptr =
2537 				dma_map_single(&ring->pdev->dev, skb->data,
2538 					       size - NET_IP_ALIGN,
2539 					       DMA_FROM_DEVICE);
2540 			if (dma_mapping_error(&nic->pdev->dev, rxdp1->Buffer0_ptr))
2541 				goto pci_map_failed;
2542 
2543 			rxdp->Control_2 =
2544 				SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2545 			rxdp->Host_Control = (unsigned long)skb;
2546 		} else if (ring->rxd_mode == RXD_MODE_3B) {
2547 			/*
2548 			 * 2 buffer mode -
2549 			 * 2 buffer mode provides 128
2550 			 * byte aligned receive buffers.
2551 			 */
2552 
2553 			rxdp3 = (struct RxD3 *)rxdp;
2554 			/* save buffer pointers to avoid frequent dma mapping */
2555 			Buffer0_ptr = rxdp3->Buffer0_ptr;
2556 			Buffer1_ptr = rxdp3->Buffer1_ptr;
2557 			memset(rxdp, 0, sizeof(struct RxD3));
2558 			/* restore the buffer pointers for dma sync*/
2559 			rxdp3->Buffer0_ptr = Buffer0_ptr;
2560 			rxdp3->Buffer1_ptr = Buffer1_ptr;
2561 
2562 			ba = &ring->ba[block_no][off];
2563 			skb_reserve(skb, BUF0_LEN);
2564 			tmp = (u64)(unsigned long)skb->data;
2565 			tmp += ALIGN_SIZE;
2566 			tmp &= ~ALIGN_SIZE;
2567 			skb->data = (void *) (unsigned long)tmp;
2568 			skb_reset_tail_pointer(skb);
2569 
2570 			if (from_card_up) {
2571 				rxdp3->Buffer0_ptr =
2572 					dma_map_single(&ring->pdev->dev,
2573 						       ba->ba_0, BUF0_LEN,
2574 						       DMA_FROM_DEVICE);
2575 				if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer0_ptr))
2576 					goto pci_map_failed;
2577 			} else
2578 				dma_sync_single_for_device(&ring->pdev->dev,
2579 							   (dma_addr_t)rxdp3->Buffer0_ptr,
2580 							   BUF0_LEN,
2581 							   DMA_FROM_DEVICE);
2582 
2583 			rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2584 			if (ring->rxd_mode == RXD_MODE_3B) {
2585 				/* Two buffer mode */
2586 
2587 				/*
2588 				 * Buffer2 will have L3/L4 header plus
2589 				 * L4 payload
2590 				 */
2591 				rxdp3->Buffer2_ptr = dma_map_single(&ring->pdev->dev,
2592 								    skb->data,
2593 								    ring->mtu + 4,
2594 								    DMA_FROM_DEVICE);
2595 
2596 				if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer2_ptr))
2597 					goto pci_map_failed;
2598 
2599 				if (from_card_up) {
2600 					rxdp3->Buffer1_ptr =
2601 						dma_map_single(&ring->pdev->dev,
2602 							       ba->ba_1,
2603 							       BUF1_LEN,
2604 							       DMA_FROM_DEVICE);
2605 
2606 					if (dma_mapping_error(&nic->pdev->dev,
2607 							      rxdp3->Buffer1_ptr)) {
2608 						dma_unmap_single(&ring->pdev->dev,
2609 								 (dma_addr_t)(unsigned long)
2610 								 skb->data,
2611 								 ring->mtu + 4,
2612 								 DMA_FROM_DEVICE);
2613 						goto pci_map_failed;
2614 					}
2615 				}
2616 				rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2617 				rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2618 					(ring->mtu + 4);
2619 			}
2620 			rxdp->Control_2 |= s2BIT(0);
2621 			rxdp->Host_Control = (unsigned long) (skb);
2622 		}
2623 		if (alloc_tab & ((1 << rxsync_frequency) - 1))
2624 			rxdp->Control_1 |= RXD_OWN_XENA;
2625 		off++;
2626 		if (off == (ring->rxd_count + 1))
2627 			off = 0;
2628 		ring->rx_curr_put_info.offset = off;
2629 
2630 		rxdp->Control_2 |= SET_RXD_MARKER;
2631 		if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2632 			if (first_rxdp) {
2633 				dma_wmb();
2634 				first_rxdp->Control_1 |= RXD_OWN_XENA;
2635 			}
2636 			first_rxdp = rxdp;
2637 		}
2638 		ring->rx_bufs_left += 1;
2639 		alloc_tab++;
2640 	}
2641 
2642 end:
2643 	/* Transfer ownership of first descriptor to adapter just before
2644 	 * exiting. Before that, use memory barrier so that ownership
2645 	 * and other fields are seen by adapter correctly.
2646 	 */
2647 	if (first_rxdp) {
2648 		dma_wmb();
2649 		first_rxdp->Control_1 |= RXD_OWN_XENA;
2650 	}
2651 
2652 	return SUCCESS;
2653 
2654 pci_map_failed:
2655 	swstats->pci_map_fail_cnt++;
2656 	swstats->mem_freed += skb->truesize;
2657 	dev_kfree_skb_irq(skb);
2658 	return -ENOMEM;
2659 }
2660 
2661 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2662 {
2663 	struct net_device *dev = sp->dev;
2664 	int j;
2665 	struct sk_buff *skb;
2666 	struct RxD_t *rxdp;
2667 	struct RxD1 *rxdp1;
2668 	struct RxD3 *rxdp3;
2669 	struct mac_info *mac_control = &sp->mac_control;
2670 	struct stat_block *stats = mac_control->stats_info;
2671 	struct swStat *swstats = &stats->sw_stat;
2672 
2673 	for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2674 		rxdp = mac_control->rings[ring_no].
2675 			rx_blocks[blk].rxds[j].virt_addr;
2676 		skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2677 		if (!skb)
2678 			continue;
2679 		if (sp->rxd_mode == RXD_MODE_1) {
2680 			rxdp1 = (struct RxD1 *)rxdp;
2681 			dma_unmap_single(&sp->pdev->dev,
2682 					 (dma_addr_t)rxdp1->Buffer0_ptr,
2683 					 dev->mtu +
2684 					 HEADER_ETHERNET_II_802_3_SIZE +
2685 					 HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2686 					 DMA_FROM_DEVICE);
2687 			memset(rxdp, 0, sizeof(struct RxD1));
2688 		} else if (sp->rxd_mode == RXD_MODE_3B) {
2689 			rxdp3 = (struct RxD3 *)rxdp;
2690 			dma_unmap_single(&sp->pdev->dev,
2691 					 (dma_addr_t)rxdp3->Buffer0_ptr,
2692 					 BUF0_LEN, DMA_FROM_DEVICE);
2693 			dma_unmap_single(&sp->pdev->dev,
2694 					 (dma_addr_t)rxdp3->Buffer1_ptr,
2695 					 BUF1_LEN, DMA_FROM_DEVICE);
2696 			dma_unmap_single(&sp->pdev->dev,
2697 					 (dma_addr_t)rxdp3->Buffer2_ptr,
2698 					 dev->mtu + 4, DMA_FROM_DEVICE);
2699 			memset(rxdp, 0, sizeof(struct RxD3));
2700 		}
2701 		swstats->mem_freed += skb->truesize;
2702 		dev_kfree_skb(skb);
2703 		mac_control->rings[ring_no].rx_bufs_left -= 1;
2704 	}
2705 }
2706 
2707 /**
2708  *  free_rx_buffers - Frees all Rx buffers
2709  *  @sp: device private variable.
2710  *  Description:
2711  *  This function will free all Rx buffers allocated by host.
2712  *  Return Value:
2713  *  NONE.
2714  */
2715 
2716 static void free_rx_buffers(struct s2io_nic *sp)
2717 {
2718 	struct net_device *dev = sp->dev;
2719 	int i, blk = 0, buf_cnt = 0;
2720 	struct config_param *config = &sp->config;
2721 	struct mac_info *mac_control = &sp->mac_control;
2722 
2723 	for (i = 0; i < config->rx_ring_num; i++) {
2724 		struct ring_info *ring = &mac_control->rings[i];
2725 
2726 		for (blk = 0; blk < rx_ring_sz[i]; blk++)
2727 			free_rxd_blk(sp, i, blk);
2728 
2729 		ring->rx_curr_put_info.block_index = 0;
2730 		ring->rx_curr_get_info.block_index = 0;
2731 		ring->rx_curr_put_info.offset = 0;
2732 		ring->rx_curr_get_info.offset = 0;
2733 		ring->rx_bufs_left = 0;
2734 		DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
2735 			  dev->name, buf_cnt, i);
2736 	}
2737 }
2738 
2739 static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
2740 {
2741 	if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2742 		DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
2743 			  ring->dev->name);
2744 	}
2745 	return 0;
2746 }
2747 
2748 /**
2749  * s2io_poll_msix - Rx interrupt handler for NAPI support
2750  * @napi : pointer to the napi structure.
2751  * @budget : The number of packets that were budgeted to be processed
2752  * during  one pass through the 'Poll" function.
2753  * Description:
2754  * Comes into picture only if NAPI support has been incorporated. It does
2755  * the same thing that rx_intr_handler does, but not in a interrupt context
2756  * also It will process only a given number of packets.
2757  * Return value:
2758  * 0 on success and 1 if there are No Rx packets to be processed.
2759  */
2760 
2761 static int s2io_poll_msix(struct napi_struct *napi, int budget)
2762 {
2763 	struct ring_info *ring = container_of(napi, struct ring_info, napi);
2764 	struct net_device *dev = ring->dev;
2765 	int pkts_processed = 0;
2766 	u8 __iomem *addr = NULL;
2767 	u8 val8 = 0;
2768 	struct s2io_nic *nic = netdev_priv(dev);
2769 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2770 	int budget_org = budget;
2771 
2772 	if (unlikely(!is_s2io_card_up(nic)))
2773 		return 0;
2774 
2775 	pkts_processed = rx_intr_handler(ring, budget);
2776 	s2io_chk_rx_buffers(nic, ring);
2777 
2778 	if (pkts_processed < budget_org) {
2779 		napi_complete_done(napi, pkts_processed);
2780 		/*Re Enable MSI-Rx Vector*/
2781 		addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2782 		addr += 7 - ring->ring_no;
2783 		val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2784 		writeb(val8, addr);
2785 		val8 = readb(addr);
2786 	}
2787 	return pkts_processed;
2788 }
2789 
2790 static int s2io_poll_inta(struct napi_struct *napi, int budget)
2791 {
2792 	struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2793 	int pkts_processed = 0;
2794 	int ring_pkts_processed, i;
2795 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2796 	int budget_org = budget;
2797 	struct config_param *config = &nic->config;
2798 	struct mac_info *mac_control = &nic->mac_control;
2799 
2800 	if (unlikely(!is_s2io_card_up(nic)))
2801 		return 0;
2802 
2803 	for (i = 0; i < config->rx_ring_num; i++) {
2804 		struct ring_info *ring = &mac_control->rings[i];
2805 		ring_pkts_processed = rx_intr_handler(ring, budget);
2806 		s2io_chk_rx_buffers(nic, ring);
2807 		pkts_processed += ring_pkts_processed;
2808 		budget -= ring_pkts_processed;
2809 		if (budget <= 0)
2810 			break;
2811 	}
2812 	if (pkts_processed < budget_org) {
2813 		napi_complete_done(napi, pkts_processed);
2814 		/* Re enable the Rx interrupts for the ring */
2815 		writeq(0, &bar0->rx_traffic_mask);
2816 		readl(&bar0->rx_traffic_mask);
2817 	}
2818 	return pkts_processed;
2819 }
2820 
2821 #ifdef CONFIG_NET_POLL_CONTROLLER
2822 /**
2823  * s2io_netpoll - netpoll event handler entry point
2824  * @dev : pointer to the device structure.
2825  * Description:
2826  * 	This function will be called by upper layer to check for events on the
2827  * interface in situations where interrupts are disabled. It is used for
2828  * specific in-kernel networking tasks, such as remote consoles and kernel
2829  * debugging over the network (example netdump in RedHat).
2830  */
2831 static void s2io_netpoll(struct net_device *dev)
2832 {
2833 	struct s2io_nic *nic = netdev_priv(dev);
2834 	const int irq = nic->pdev->irq;
2835 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2836 	u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2837 	int i;
2838 	struct config_param *config = &nic->config;
2839 	struct mac_info *mac_control = &nic->mac_control;
2840 
2841 	if (pci_channel_offline(nic->pdev))
2842 		return;
2843 
2844 	disable_irq(irq);
2845 
2846 	writeq(val64, &bar0->rx_traffic_int);
2847 	writeq(val64, &bar0->tx_traffic_int);
2848 
2849 	/* we need to free up the transmitted skbufs or else netpoll will
2850 	 * run out of skbs and will fail and eventually netpoll application such
2851 	 * as netdump will fail.
2852 	 */
2853 	for (i = 0; i < config->tx_fifo_num; i++)
2854 		tx_intr_handler(&mac_control->fifos[i]);
2855 
2856 	/* check for received packet and indicate up to network */
2857 	for (i = 0; i < config->rx_ring_num; i++) {
2858 		struct ring_info *ring = &mac_control->rings[i];
2859 
2860 		rx_intr_handler(ring, 0);
2861 	}
2862 
2863 	for (i = 0; i < config->rx_ring_num; i++) {
2864 		struct ring_info *ring = &mac_control->rings[i];
2865 
2866 		if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2867 			DBG_PRINT(INFO_DBG,
2868 				  "%s: Out of memory in Rx Netpoll!!\n",
2869 				  dev->name);
2870 			break;
2871 		}
2872 	}
2873 	enable_irq(irq);
2874 }
2875 #endif
2876 
2877 /**
2878  *  rx_intr_handler - Rx interrupt handler
2879  *  @ring_data: per ring structure.
2880  *  @budget: budget for napi processing.
2881  *  Description:
2882  *  If the interrupt is because of a received frame or if the
2883  *  receive ring contains fresh as yet un-processed frames,this function is
2884  *  called. It picks out the RxD at which place the last Rx processing had
2885  *  stopped and sends the skb to the OSM's Rx handler and then increments
2886  *  the offset.
2887  *  Return Value:
2888  *  No. of napi packets processed.
2889  */
2890 static int rx_intr_handler(struct ring_info *ring_data, int budget)
2891 {
2892 	int get_block, put_block;
2893 	struct rx_curr_get_info get_info, put_info;
2894 	struct RxD_t *rxdp;
2895 	struct sk_buff *skb;
2896 	int pkt_cnt = 0, napi_pkts = 0;
2897 	int i;
2898 	struct RxD1 *rxdp1;
2899 	struct RxD3 *rxdp3;
2900 
2901 	if (budget <= 0)
2902 		return napi_pkts;
2903 
2904 	get_info = ring_data->rx_curr_get_info;
2905 	get_block = get_info.block_index;
2906 	memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2907 	put_block = put_info.block_index;
2908 	rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2909 
2910 	while (RXD_IS_UP2DT(rxdp)) {
2911 		/*
2912 		 * If your are next to put index then it's
2913 		 * FIFO full condition
2914 		 */
2915 		if ((get_block == put_block) &&
2916 		    (get_info.offset + 1) == put_info.offset) {
2917 			DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2918 				  ring_data->dev->name);
2919 			break;
2920 		}
2921 		skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2922 		if (skb == NULL) {
2923 			DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
2924 				  ring_data->dev->name);
2925 			return 0;
2926 		}
2927 		if (ring_data->rxd_mode == RXD_MODE_1) {
2928 			rxdp1 = (struct RxD1 *)rxdp;
2929 			dma_unmap_single(&ring_data->pdev->dev,
2930 					 (dma_addr_t)rxdp1->Buffer0_ptr,
2931 					 ring_data->mtu +
2932 					 HEADER_ETHERNET_II_802_3_SIZE +
2933 					 HEADER_802_2_SIZE +
2934 					 HEADER_SNAP_SIZE,
2935 					 DMA_FROM_DEVICE);
2936 		} else if (ring_data->rxd_mode == RXD_MODE_3B) {
2937 			rxdp3 = (struct RxD3 *)rxdp;
2938 			dma_sync_single_for_cpu(&ring_data->pdev->dev,
2939 						(dma_addr_t)rxdp3->Buffer0_ptr,
2940 						BUF0_LEN, DMA_FROM_DEVICE);
2941 			dma_unmap_single(&ring_data->pdev->dev,
2942 					 (dma_addr_t)rxdp3->Buffer2_ptr,
2943 					 ring_data->mtu + 4, DMA_FROM_DEVICE);
2944 		}
2945 		prefetch(skb->data);
2946 		rx_osm_handler(ring_data, rxdp);
2947 		get_info.offset++;
2948 		ring_data->rx_curr_get_info.offset = get_info.offset;
2949 		rxdp = ring_data->rx_blocks[get_block].
2950 			rxds[get_info.offset].virt_addr;
2951 		if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
2952 			get_info.offset = 0;
2953 			ring_data->rx_curr_get_info.offset = get_info.offset;
2954 			get_block++;
2955 			if (get_block == ring_data->block_count)
2956 				get_block = 0;
2957 			ring_data->rx_curr_get_info.block_index = get_block;
2958 			rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2959 		}
2960 
2961 		if (ring_data->nic->config.napi) {
2962 			budget--;
2963 			napi_pkts++;
2964 			if (!budget)
2965 				break;
2966 		}
2967 		pkt_cnt++;
2968 		if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2969 			break;
2970 	}
2971 	if (ring_data->lro) {
2972 		/* Clear all LRO sessions before exiting */
2973 		for (i = 0; i < MAX_LRO_SESSIONS; i++) {
2974 			struct lro *lro = &ring_data->lro0_n[i];
2975 			if (lro->in_use) {
2976 				update_L3L4_header(ring_data->nic, lro);
2977 				queue_rx_frame(lro->parent, lro->vlan_tag);
2978 				clear_lro_session(lro);
2979 			}
2980 		}
2981 	}
2982 	return napi_pkts;
2983 }
2984 
2985 /**
2986  *  tx_intr_handler - Transmit interrupt handler
2987  *  @fifo_data : fifo data pointer
2988  *  Description:
2989  *  If an interrupt was raised to indicate DMA complete of the
2990  *  Tx packet, this function is called. It identifies the last TxD
2991  *  whose buffer was freed and frees all skbs whose data have already
2992  *  DMA'ed into the NICs internal memory.
2993  *  Return Value:
2994  *  NONE
2995  */
2996 
2997 static void tx_intr_handler(struct fifo_info *fifo_data)
2998 {
2999 	struct s2io_nic *nic = fifo_data->nic;
3000 	struct tx_curr_get_info get_info, put_info;
3001 	struct sk_buff *skb = NULL;
3002 	struct TxD *txdlp;
3003 	int pkt_cnt = 0;
3004 	unsigned long flags = 0;
3005 	u8 err_mask;
3006 	struct stat_block *stats = nic->mac_control.stats_info;
3007 	struct swStat *swstats = &stats->sw_stat;
3008 
3009 	if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3010 		return;
3011 
3012 	get_info = fifo_data->tx_curr_get_info;
3013 	memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3014 	txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3015 	while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3016 	       (get_info.offset != put_info.offset) &&
3017 	       (txdlp->Host_Control)) {
3018 		/* Check for TxD errors */
3019 		if (txdlp->Control_1 & TXD_T_CODE) {
3020 			unsigned long long err;
3021 			err = txdlp->Control_1 & TXD_T_CODE;
3022 			if (err & 0x1) {
3023 				swstats->parity_err_cnt++;
3024 			}
3025 
3026 			/* update t_code statistics */
3027 			err_mask = err >> 48;
3028 			switch (err_mask) {
3029 			case 2:
3030 				swstats->tx_buf_abort_cnt++;
3031 				break;
3032 
3033 			case 3:
3034 				swstats->tx_desc_abort_cnt++;
3035 				break;
3036 
3037 			case 7:
3038 				swstats->tx_parity_err_cnt++;
3039 				break;
3040 
3041 			case 10:
3042 				swstats->tx_link_loss_cnt++;
3043 				break;
3044 
3045 			case 15:
3046 				swstats->tx_list_proc_err_cnt++;
3047 				break;
3048 			}
3049 		}
3050 
3051 		skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3052 		if (skb == NULL) {
3053 			spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3054 			DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
3055 				  __func__);
3056 			return;
3057 		}
3058 		pkt_cnt++;
3059 
3060 		/* Updating the statistics block */
3061 		swstats->mem_freed += skb->truesize;
3062 		dev_consume_skb_irq(skb);
3063 
3064 		get_info.offset++;
3065 		if (get_info.offset == get_info.fifo_len + 1)
3066 			get_info.offset = 0;
3067 		txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3068 		fifo_data->tx_curr_get_info.offset = get_info.offset;
3069 	}
3070 
3071 	s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3072 
3073 	spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3074 }
3075 
3076 /**
3077  *  s2io_mdio_write - Function to write in to MDIO registers
3078  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3079  *  @addr     : address value
3080  *  @value    : data value
3081  *  @dev      : pointer to net_device structure
3082  *  Description:
3083  *  This function is used to write values to the MDIO registers
3084  *  NONE
3085  */
3086 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3087 			    struct net_device *dev)
3088 {
3089 	u64 val64;
3090 	struct s2io_nic *sp = netdev_priv(dev);
3091 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3092 
3093 	/* address transaction */
3094 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3095 		MDIO_MMD_DEV_ADDR(mmd_type) |
3096 		MDIO_MMS_PRT_ADDR(0x0);
3097 	writeq(val64, &bar0->mdio_control);
3098 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3099 	writeq(val64, &bar0->mdio_control);
3100 	udelay(100);
3101 
3102 	/* Data transaction */
3103 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3104 		MDIO_MMD_DEV_ADDR(mmd_type) |
3105 		MDIO_MMS_PRT_ADDR(0x0) |
3106 		MDIO_MDIO_DATA(value) |
3107 		MDIO_OP(MDIO_OP_WRITE_TRANS);
3108 	writeq(val64, &bar0->mdio_control);
3109 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3110 	writeq(val64, &bar0->mdio_control);
3111 	udelay(100);
3112 
3113 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3114 		MDIO_MMD_DEV_ADDR(mmd_type) |
3115 		MDIO_MMS_PRT_ADDR(0x0) |
3116 		MDIO_OP(MDIO_OP_READ_TRANS);
3117 	writeq(val64, &bar0->mdio_control);
3118 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3119 	writeq(val64, &bar0->mdio_control);
3120 	udelay(100);
3121 }
3122 
3123 /**
3124  *  s2io_mdio_read - Function to write in to MDIO registers
3125  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3126  *  @addr     : address value
3127  *  @dev      : pointer to net_device structure
3128  *  Description:
3129  *  This function is used to read values to the MDIO registers
3130  *  NONE
3131  */
3132 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3133 {
3134 	u64 val64 = 0x0;
3135 	u64 rval64 = 0x0;
3136 	struct s2io_nic *sp = netdev_priv(dev);
3137 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3138 
3139 	/* address transaction */
3140 	val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3141 			 | MDIO_MMD_DEV_ADDR(mmd_type)
3142 			 | MDIO_MMS_PRT_ADDR(0x0));
3143 	writeq(val64, &bar0->mdio_control);
3144 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3145 	writeq(val64, &bar0->mdio_control);
3146 	udelay(100);
3147 
3148 	/* Data transaction */
3149 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3150 		MDIO_MMD_DEV_ADDR(mmd_type) |
3151 		MDIO_MMS_PRT_ADDR(0x0) |
3152 		MDIO_OP(MDIO_OP_READ_TRANS);
3153 	writeq(val64, &bar0->mdio_control);
3154 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3155 	writeq(val64, &bar0->mdio_control);
3156 	udelay(100);
3157 
3158 	/* Read the value from regs */
3159 	rval64 = readq(&bar0->mdio_control);
3160 	rval64 = rval64 & 0xFFFF0000;
3161 	rval64 = rval64 >> 16;
3162 	return rval64;
3163 }
3164 
3165 /**
3166  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3167  *  @counter      : counter value to be updated
3168  *  @regs_stat    : registers status
3169  *  @index        : index
3170  *  @flag         : flag to indicate the status
3171  *  @type         : counter type
3172  *  Description:
3173  *  This function is to check the status of the xpak counters value
3174  *  NONE
3175  */
3176 
3177 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3178 				  u16 flag, u16 type)
3179 {
3180 	u64 mask = 0x3;
3181 	u64 val64;
3182 	int i;
3183 	for (i = 0; i < index; i++)
3184 		mask = mask << 0x2;
3185 
3186 	if (flag > 0) {
3187 		*counter = *counter + 1;
3188 		val64 = *regs_stat & mask;
3189 		val64 = val64 >> (index * 0x2);
3190 		val64 = val64 + 1;
3191 		if (val64 == 3) {
3192 			switch (type) {
3193 			case 1:
3194 				DBG_PRINT(ERR_DBG,
3195 					  "Take Xframe NIC out of service.\n");
3196 				DBG_PRINT(ERR_DBG,
3197 "Excessive temperatures may result in premature transceiver failure.\n");
3198 				break;
3199 			case 2:
3200 				DBG_PRINT(ERR_DBG,
3201 					  "Take Xframe NIC out of service.\n");
3202 				DBG_PRINT(ERR_DBG,
3203 "Excessive bias currents may indicate imminent laser diode failure.\n");
3204 				break;
3205 			case 3:
3206 				DBG_PRINT(ERR_DBG,
3207 					  "Take Xframe NIC out of service.\n");
3208 				DBG_PRINT(ERR_DBG,
3209 "Excessive laser output power may saturate far-end receiver.\n");
3210 				break;
3211 			default:
3212 				DBG_PRINT(ERR_DBG,
3213 					  "Incorrect XPAK Alarm type\n");
3214 			}
3215 			val64 = 0x0;
3216 		}
3217 		val64 = val64 << (index * 0x2);
3218 		*regs_stat = (*regs_stat & (~mask)) | (val64);
3219 
3220 	} else {
3221 		*regs_stat = *regs_stat & (~mask);
3222 	}
3223 }
3224 
3225 /**
3226  *  s2io_updt_xpak_counter - Function to update the xpak counters
3227  *  @dev         : pointer to net_device struct
3228  *  Description:
3229  *  This function is to upate the status of the xpak counters value
3230  *  NONE
3231  */
3232 static void s2io_updt_xpak_counter(struct net_device *dev)
3233 {
3234 	u16 flag  = 0x0;
3235 	u16 type  = 0x0;
3236 	u16 val16 = 0x0;
3237 	u64 val64 = 0x0;
3238 	u64 addr  = 0x0;
3239 
3240 	struct s2io_nic *sp = netdev_priv(dev);
3241 	struct stat_block *stats = sp->mac_control.stats_info;
3242 	struct xpakStat *xstats = &stats->xpak_stat;
3243 
3244 	/* Check the communication with the MDIO slave */
3245 	addr = MDIO_CTRL1;
3246 	val64 = 0x0;
3247 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3248 	if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
3249 		DBG_PRINT(ERR_DBG,
3250 			  "ERR: MDIO slave access failed - Returned %llx\n",
3251 			  (unsigned long long)val64);
3252 		return;
3253 	}
3254 
3255 	/* Check for the expected value of control reg 1 */
3256 	if (val64 != MDIO_CTRL1_SPEED10G) {
3257 		DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
3258 			  "Returned: %llx- Expected: 0x%x\n",
3259 			  (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
3260 		return;
3261 	}
3262 
3263 	/* Loading the DOM register to MDIO register */
3264 	addr = 0xA100;
3265 	s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3266 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3267 
3268 	/* Reading the Alarm flags */
3269 	addr = 0xA070;
3270 	val64 = 0x0;
3271 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3272 
3273 	flag = CHECKBIT(val64, 0x7);
3274 	type = 1;
3275 	s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
3276 			      &xstats->xpak_regs_stat,
3277 			      0x0, flag, type);
3278 
3279 	if (CHECKBIT(val64, 0x6))
3280 		xstats->alarm_transceiver_temp_low++;
3281 
3282 	flag = CHECKBIT(val64, 0x3);
3283 	type = 2;
3284 	s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
3285 			      &xstats->xpak_regs_stat,
3286 			      0x2, flag, type);
3287 
3288 	if (CHECKBIT(val64, 0x2))
3289 		xstats->alarm_laser_bias_current_low++;
3290 
3291 	flag = CHECKBIT(val64, 0x1);
3292 	type = 3;
3293 	s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
3294 			      &xstats->xpak_regs_stat,
3295 			      0x4, flag, type);
3296 
3297 	if (CHECKBIT(val64, 0x0))
3298 		xstats->alarm_laser_output_power_low++;
3299 
3300 	/* Reading the Warning flags */
3301 	addr = 0xA074;
3302 	val64 = 0x0;
3303 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3304 
3305 	if (CHECKBIT(val64, 0x7))
3306 		xstats->warn_transceiver_temp_high++;
3307 
3308 	if (CHECKBIT(val64, 0x6))
3309 		xstats->warn_transceiver_temp_low++;
3310 
3311 	if (CHECKBIT(val64, 0x3))
3312 		xstats->warn_laser_bias_current_high++;
3313 
3314 	if (CHECKBIT(val64, 0x2))
3315 		xstats->warn_laser_bias_current_low++;
3316 
3317 	if (CHECKBIT(val64, 0x1))
3318 		xstats->warn_laser_output_power_high++;
3319 
3320 	if (CHECKBIT(val64, 0x0))
3321 		xstats->warn_laser_output_power_low++;
3322 }
3323 
3324 /**
3325  *  wait_for_cmd_complete - waits for a command to complete.
3326  *  @addr: address
3327  *  @busy_bit: bit to check for busy
3328  *  @bit_state: state to check
3329  *  @may_sleep: parameter indicates if sleeping when waiting for
3330  *  command complete
3331  *  Description: Function that waits for a command to Write into RMAC
3332  *  ADDR DATA registers to be completed and returns either success or
3333  *  error depending on whether the command was complete or not.
3334  *  Return value:
3335  *   SUCCESS on success and FAILURE on failure.
3336  */
3337 
3338 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3339 				 int bit_state, bool may_sleep)
3340 {
3341 	int ret = FAILURE, cnt = 0, delay = 1;
3342 	u64 val64;
3343 
3344 	if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3345 		return FAILURE;
3346 
3347 	do {
3348 		val64 = readq(addr);
3349 		if (bit_state == S2IO_BIT_RESET) {
3350 			if (!(val64 & busy_bit)) {
3351 				ret = SUCCESS;
3352 				break;
3353 			}
3354 		} else {
3355 			if (val64 & busy_bit) {
3356 				ret = SUCCESS;
3357 				break;
3358 			}
3359 		}
3360 
3361 		if (!may_sleep)
3362 			mdelay(delay);
3363 		else
3364 			msleep(delay);
3365 
3366 		if (++cnt >= 10)
3367 			delay = 50;
3368 	} while (cnt < 20);
3369 	return ret;
3370 }
3371 /**
3372  * check_pci_device_id - Checks if the device id is supported
3373  * @id : device id
3374  * Description: Function to check if the pci device id is supported by driver.
3375  * Return value: Actual device id if supported else PCI_ANY_ID
3376  */
3377 static u16 check_pci_device_id(u16 id)
3378 {
3379 	switch (id) {
3380 	case PCI_DEVICE_ID_HERC_WIN:
3381 	case PCI_DEVICE_ID_HERC_UNI:
3382 		return XFRAME_II_DEVICE;
3383 	case PCI_DEVICE_ID_S2IO_UNI:
3384 	case PCI_DEVICE_ID_S2IO_WIN:
3385 		return XFRAME_I_DEVICE;
3386 	default:
3387 		return PCI_ANY_ID;
3388 	}
3389 }
3390 
3391 /**
3392  *  s2io_reset - Resets the card.
3393  *  @sp : private member of the device structure.
3394  *  Description: Function to Reset the card. This function then also
3395  *  restores the previously saved PCI configuration space registers as
3396  *  the card reset also resets the configuration space.
3397  *  Return value:
3398  *  void.
3399  */
3400 
3401 static void s2io_reset(struct s2io_nic *sp)
3402 {
3403 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3404 	u64 val64;
3405 	u16 subid, pci_cmd;
3406 	int i;
3407 	u16 val16;
3408 	unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3409 	unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3410 	struct stat_block *stats;
3411 	struct swStat *swstats;
3412 
3413 	DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
3414 		  __func__, pci_name(sp->pdev));
3415 
3416 	/* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3417 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3418 
3419 	val64 = SW_RESET_ALL;
3420 	writeq(val64, &bar0->sw_reset);
3421 	if (strstr(sp->product_name, "CX4"))
3422 		msleep(750);
3423 	msleep(250);
3424 	for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3425 
3426 		/* Restore the PCI state saved during initialization. */
3427 		pci_restore_state(sp->pdev);
3428 		pci_save_state(sp->pdev);
3429 		pci_read_config_word(sp->pdev, 0x2, &val16);
3430 		if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3431 			break;
3432 		msleep(200);
3433 	}
3434 
3435 	if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3436 		DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
3437 
3438 	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3439 
3440 	s2io_init_pci(sp);
3441 
3442 	/* Set swapper to enable I/O register access */
3443 	s2io_set_swapper(sp);
3444 
3445 	/* restore mac_addr entries */
3446 	do_s2io_restore_unicast_mc(sp);
3447 
3448 	/* Restore the MSIX table entries from local variables */
3449 	restore_xmsi_data(sp);
3450 
3451 	/* Clear certain PCI/PCI-X fields after reset */
3452 	if (sp->device_type == XFRAME_II_DEVICE) {
3453 		/* Clear "detected parity error" bit */
3454 		pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3455 
3456 		/* Clearing PCIX Ecc status register */
3457 		pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3458 
3459 		/* Clearing PCI_STATUS error reflected here */
3460 		writeq(s2BIT(62), &bar0->txpic_int_reg);
3461 	}
3462 
3463 	/* Reset device statistics maintained by OS */
3464 	memset(&sp->stats, 0, sizeof(struct net_device_stats));
3465 
3466 	stats = sp->mac_control.stats_info;
3467 	swstats = &stats->sw_stat;
3468 
3469 	/* save link up/down time/cnt, reset/memory/watchdog cnt */
3470 	up_cnt = swstats->link_up_cnt;
3471 	down_cnt = swstats->link_down_cnt;
3472 	up_time = swstats->link_up_time;
3473 	down_time = swstats->link_down_time;
3474 	reset_cnt = swstats->soft_reset_cnt;
3475 	mem_alloc_cnt = swstats->mem_allocated;
3476 	mem_free_cnt = swstats->mem_freed;
3477 	watchdog_cnt = swstats->watchdog_timer_cnt;
3478 
3479 	memset(stats, 0, sizeof(struct stat_block));
3480 
3481 	/* restore link up/down time/cnt, reset/memory/watchdog cnt */
3482 	swstats->link_up_cnt = up_cnt;
3483 	swstats->link_down_cnt = down_cnt;
3484 	swstats->link_up_time = up_time;
3485 	swstats->link_down_time = down_time;
3486 	swstats->soft_reset_cnt = reset_cnt;
3487 	swstats->mem_allocated = mem_alloc_cnt;
3488 	swstats->mem_freed = mem_free_cnt;
3489 	swstats->watchdog_timer_cnt = watchdog_cnt;
3490 
3491 	/* SXE-002: Configure link and activity LED to turn it off */
3492 	subid = sp->pdev->subsystem_device;
3493 	if (((subid & 0xFF) >= 0x07) &&
3494 	    (sp->device_type == XFRAME_I_DEVICE)) {
3495 		val64 = readq(&bar0->gpio_control);
3496 		val64 |= 0x0000800000000000ULL;
3497 		writeq(val64, &bar0->gpio_control);
3498 		val64 = 0x0411040400000000ULL;
3499 		writeq(val64, (void __iomem *)bar0 + 0x2700);
3500 	}
3501 
3502 	/*
3503 	 * Clear spurious ECC interrupts that would have occurred on
3504 	 * XFRAME II cards after reset.
3505 	 */
3506 	if (sp->device_type == XFRAME_II_DEVICE) {
3507 		val64 = readq(&bar0->pcc_err_reg);
3508 		writeq(val64, &bar0->pcc_err_reg);
3509 	}
3510 
3511 	sp->device_enabled_once = false;
3512 }
3513 
3514 /**
3515  *  s2io_set_swapper - to set the swapper controle on the card
3516  *  @sp : private member of the device structure,
3517  *  pointer to the s2io_nic structure.
3518  *  Description: Function to set the swapper control on the card
3519  *  correctly depending on the 'endianness' of the system.
3520  *  Return value:
3521  *  SUCCESS on success and FAILURE on failure.
3522  */
3523 
3524 static int s2io_set_swapper(struct s2io_nic *sp)
3525 {
3526 	struct net_device *dev = sp->dev;
3527 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3528 	u64 val64, valt, valr;
3529 
3530 	/*
3531 	 * Set proper endian settings and verify the same by reading
3532 	 * the PIF Feed-back register.
3533 	 */
3534 
3535 	val64 = readq(&bar0->pif_rd_swapper_fb);
3536 	if (val64 != 0x0123456789ABCDEFULL) {
3537 		int i = 0;
3538 		static const u64 value[] = {
3539 			0xC30000C3C30000C3ULL,	/* FE=1, SE=1 */
3540 			0x8100008181000081ULL,	/* FE=1, SE=0 */
3541 			0x4200004242000042ULL,	/* FE=0, SE=1 */
3542 			0			/* FE=0, SE=0 */
3543 		};
3544 
3545 		while (i < 4) {
3546 			writeq(value[i], &bar0->swapper_ctrl);
3547 			val64 = readq(&bar0->pif_rd_swapper_fb);
3548 			if (val64 == 0x0123456789ABCDEFULL)
3549 				break;
3550 			i++;
3551 		}
3552 		if (i == 4) {
3553 			DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
3554 				  "feedback read %llx\n",
3555 				  dev->name, (unsigned long long)val64);
3556 			return FAILURE;
3557 		}
3558 		valr = value[i];
3559 	} else {
3560 		valr = readq(&bar0->swapper_ctrl);
3561 	}
3562 
3563 	valt = 0x0123456789ABCDEFULL;
3564 	writeq(valt, &bar0->xmsi_address);
3565 	val64 = readq(&bar0->xmsi_address);
3566 
3567 	if (val64 != valt) {
3568 		int i = 0;
3569 		static const u64 value[] = {
3570 			0x00C3C30000C3C300ULL,	/* FE=1, SE=1 */
3571 			0x0081810000818100ULL,	/* FE=1, SE=0 */
3572 			0x0042420000424200ULL,	/* FE=0, SE=1 */
3573 			0			/* FE=0, SE=0 */
3574 		};
3575 
3576 		while (i < 4) {
3577 			writeq((value[i] | valr), &bar0->swapper_ctrl);
3578 			writeq(valt, &bar0->xmsi_address);
3579 			val64 = readq(&bar0->xmsi_address);
3580 			if (val64 == valt)
3581 				break;
3582 			i++;
3583 		}
3584 		if (i == 4) {
3585 			unsigned long long x = val64;
3586 			DBG_PRINT(ERR_DBG,
3587 				  "Write failed, Xmsi_addr reads:0x%llx\n", x);
3588 			return FAILURE;
3589 		}
3590 	}
3591 	val64 = readq(&bar0->swapper_ctrl);
3592 	val64 &= 0xFFFF000000000000ULL;
3593 
3594 #ifdef __BIG_ENDIAN
3595 	/*
3596 	 * The device by default set to a big endian format, so a
3597 	 * big endian driver need not set anything.
3598 	 */
3599 	val64 |= (SWAPPER_CTRL_TXP_FE |
3600 		  SWAPPER_CTRL_TXP_SE |
3601 		  SWAPPER_CTRL_TXD_R_FE |
3602 		  SWAPPER_CTRL_TXD_W_FE |
3603 		  SWAPPER_CTRL_TXF_R_FE |
3604 		  SWAPPER_CTRL_RXD_R_FE |
3605 		  SWAPPER_CTRL_RXD_W_FE |
3606 		  SWAPPER_CTRL_RXF_W_FE |
3607 		  SWAPPER_CTRL_XMSI_FE |
3608 		  SWAPPER_CTRL_STATS_FE |
3609 		  SWAPPER_CTRL_STATS_SE);
3610 	if (sp->config.intr_type == INTA)
3611 		val64 |= SWAPPER_CTRL_XMSI_SE;
3612 	writeq(val64, &bar0->swapper_ctrl);
3613 #else
3614 	/*
3615 	 * Initially we enable all bits to make it accessible by the
3616 	 * driver, then we selectively enable only those bits that
3617 	 * we want to set.
3618 	 */
3619 	val64 |= (SWAPPER_CTRL_TXP_FE |
3620 		  SWAPPER_CTRL_TXP_SE |
3621 		  SWAPPER_CTRL_TXD_R_FE |
3622 		  SWAPPER_CTRL_TXD_R_SE |
3623 		  SWAPPER_CTRL_TXD_W_FE |
3624 		  SWAPPER_CTRL_TXD_W_SE |
3625 		  SWAPPER_CTRL_TXF_R_FE |
3626 		  SWAPPER_CTRL_RXD_R_FE |
3627 		  SWAPPER_CTRL_RXD_R_SE |
3628 		  SWAPPER_CTRL_RXD_W_FE |
3629 		  SWAPPER_CTRL_RXD_W_SE |
3630 		  SWAPPER_CTRL_RXF_W_FE |
3631 		  SWAPPER_CTRL_XMSI_FE |
3632 		  SWAPPER_CTRL_STATS_FE |
3633 		  SWAPPER_CTRL_STATS_SE);
3634 	if (sp->config.intr_type == INTA)
3635 		val64 |= SWAPPER_CTRL_XMSI_SE;
3636 	writeq(val64, &bar0->swapper_ctrl);
3637 #endif
3638 	val64 = readq(&bar0->swapper_ctrl);
3639 
3640 	/*
3641 	 * Verifying if endian settings are accurate by reading a
3642 	 * feedback register.
3643 	 */
3644 	val64 = readq(&bar0->pif_rd_swapper_fb);
3645 	if (val64 != 0x0123456789ABCDEFULL) {
3646 		/* Endian settings are incorrect, calls for another dekko. */
3647 		DBG_PRINT(ERR_DBG,
3648 			  "%s: Endian settings are wrong, feedback read %llx\n",
3649 			  dev->name, (unsigned long long)val64);
3650 		return FAILURE;
3651 	}
3652 
3653 	return SUCCESS;
3654 }
3655 
3656 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3657 {
3658 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3659 	u64 val64;
3660 	int ret = 0, cnt = 0;
3661 
3662 	do {
3663 		val64 = readq(&bar0->xmsi_access);
3664 		if (!(val64 & s2BIT(15)))
3665 			break;
3666 		mdelay(1);
3667 		cnt++;
3668 	} while (cnt < 5);
3669 	if (cnt == 5) {
3670 		DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3671 		ret = 1;
3672 	}
3673 
3674 	return ret;
3675 }
3676 
3677 static void restore_xmsi_data(struct s2io_nic *nic)
3678 {
3679 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3680 	u64 val64;
3681 	int i, msix_index;
3682 
3683 	if (nic->device_type == XFRAME_I_DEVICE)
3684 		return;
3685 
3686 	for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3687 		msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3688 		writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3689 		writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3690 		val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3691 		writeq(val64, &bar0->xmsi_access);
3692 		if (wait_for_msix_trans(nic, msix_index))
3693 			DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3694 				  __func__, msix_index);
3695 	}
3696 }
3697 
3698 static void store_xmsi_data(struct s2io_nic *nic)
3699 {
3700 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3701 	u64 val64, addr, data;
3702 	int i, msix_index;
3703 
3704 	if (nic->device_type == XFRAME_I_DEVICE)
3705 		return;
3706 
3707 	/* Store and display */
3708 	for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3709 		msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3710 		val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3711 		writeq(val64, &bar0->xmsi_access);
3712 		if (wait_for_msix_trans(nic, msix_index)) {
3713 			DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3714 				  __func__, msix_index);
3715 			continue;
3716 		}
3717 		addr = readq(&bar0->xmsi_address);
3718 		data = readq(&bar0->xmsi_data);
3719 		if (addr && data) {
3720 			nic->msix_info[i].addr = addr;
3721 			nic->msix_info[i].data = data;
3722 		}
3723 	}
3724 }
3725 
3726 static int s2io_enable_msi_x(struct s2io_nic *nic)
3727 {
3728 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3729 	u64 rx_mat;
3730 	u16 msi_control; /* Temp variable */
3731 	int ret, i, j, msix_indx = 1;
3732 	int size;
3733 	struct stat_block *stats = nic->mac_control.stats_info;
3734 	struct swStat *swstats = &stats->sw_stat;
3735 
3736 	size = nic->num_entries * sizeof(struct msix_entry);
3737 	nic->entries = kzalloc(size, GFP_KERNEL);
3738 	if (!nic->entries) {
3739 		DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3740 			  __func__);
3741 		swstats->mem_alloc_fail_cnt++;
3742 		return -ENOMEM;
3743 	}
3744 	swstats->mem_allocated += size;
3745 
3746 	size = nic->num_entries * sizeof(struct s2io_msix_entry);
3747 	nic->s2io_entries = kzalloc(size, GFP_KERNEL);
3748 	if (!nic->s2io_entries) {
3749 		DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3750 			  __func__);
3751 		swstats->mem_alloc_fail_cnt++;
3752 		kfree(nic->entries);
3753 		swstats->mem_freed
3754 			+= (nic->num_entries * sizeof(struct msix_entry));
3755 		return -ENOMEM;
3756 	}
3757 	swstats->mem_allocated += size;
3758 
3759 	nic->entries[0].entry = 0;
3760 	nic->s2io_entries[0].entry = 0;
3761 	nic->s2io_entries[0].in_use = MSIX_FLG;
3762 	nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3763 	nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3764 
3765 	for (i = 1; i < nic->num_entries; i++) {
3766 		nic->entries[i].entry = ((i - 1) * 8) + 1;
3767 		nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3768 		nic->s2io_entries[i].arg = NULL;
3769 		nic->s2io_entries[i].in_use = 0;
3770 	}
3771 
3772 	rx_mat = readq(&bar0->rx_mat);
3773 	for (j = 0; j < nic->config.rx_ring_num; j++) {
3774 		rx_mat |= RX_MAT_SET(j, msix_indx);
3775 		nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3776 		nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3777 		nic->s2io_entries[j+1].in_use = MSIX_FLG;
3778 		msix_indx += 8;
3779 	}
3780 	writeq(rx_mat, &bar0->rx_mat);
3781 	readq(&bar0->rx_mat);
3782 
3783 	ret = pci_enable_msix_range(nic->pdev, nic->entries,
3784 				    nic->num_entries, nic->num_entries);
3785 	/* We fail init if error or we get less vectors than min required */
3786 	if (ret < 0) {
3787 		DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
3788 		kfree(nic->entries);
3789 		swstats->mem_freed += nic->num_entries *
3790 			sizeof(struct msix_entry);
3791 		kfree(nic->s2io_entries);
3792 		swstats->mem_freed += nic->num_entries *
3793 			sizeof(struct s2io_msix_entry);
3794 		nic->entries = NULL;
3795 		nic->s2io_entries = NULL;
3796 		return -ENOMEM;
3797 	}
3798 
3799 	/*
3800 	 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3801 	 * in the herc NIC. (Temp change, needs to be removed later)
3802 	 */
3803 	pci_read_config_word(nic->pdev, 0x42, &msi_control);
3804 	msi_control |= 0x1; /* Enable MSI */
3805 	pci_write_config_word(nic->pdev, 0x42, msi_control);
3806 
3807 	return 0;
3808 }
3809 
3810 /* Handle software interrupt used during MSI(X) test */
3811 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3812 {
3813 	struct s2io_nic *sp = dev_id;
3814 
3815 	sp->msi_detected = 1;
3816 	wake_up(&sp->msi_wait);
3817 
3818 	return IRQ_HANDLED;
3819 }
3820 
3821 /* Test interrupt path by forcing a software IRQ */
3822 static int s2io_test_msi(struct s2io_nic *sp)
3823 {
3824 	struct pci_dev *pdev = sp->pdev;
3825 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3826 	int err;
3827 	u64 val64, saved64;
3828 
3829 	err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3830 			  sp->name, sp);
3831 	if (err) {
3832 		DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3833 			  sp->dev->name, pci_name(pdev), pdev->irq);
3834 		return err;
3835 	}
3836 
3837 	init_waitqueue_head(&sp->msi_wait);
3838 	sp->msi_detected = 0;
3839 
3840 	saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3841 	val64 |= SCHED_INT_CTRL_ONE_SHOT;
3842 	val64 |= SCHED_INT_CTRL_TIMER_EN;
3843 	val64 |= SCHED_INT_CTRL_INT2MSI(1);
3844 	writeq(val64, &bar0->scheduled_int_ctrl);
3845 
3846 	wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3847 
3848 	if (!sp->msi_detected) {
3849 		/* MSI(X) test failed, go back to INTx mode */
3850 		DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3851 			  "using MSI(X) during test\n",
3852 			  sp->dev->name, pci_name(pdev));
3853 
3854 		err = -EOPNOTSUPP;
3855 	}
3856 
3857 	free_irq(sp->entries[1].vector, sp);
3858 
3859 	writeq(saved64, &bar0->scheduled_int_ctrl);
3860 
3861 	return err;
3862 }
3863 
3864 static void remove_msix_isr(struct s2io_nic *sp)
3865 {
3866 	int i;
3867 	u16 msi_control;
3868 
3869 	for (i = 0; i < sp->num_entries; i++) {
3870 		if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
3871 			int vector = sp->entries[i].vector;
3872 			void *arg = sp->s2io_entries[i].arg;
3873 			free_irq(vector, arg);
3874 		}
3875 	}
3876 
3877 	kfree(sp->entries);
3878 	kfree(sp->s2io_entries);
3879 	sp->entries = NULL;
3880 	sp->s2io_entries = NULL;
3881 
3882 	pci_read_config_word(sp->pdev, 0x42, &msi_control);
3883 	msi_control &= 0xFFFE; /* Disable MSI */
3884 	pci_write_config_word(sp->pdev, 0x42, msi_control);
3885 
3886 	pci_disable_msix(sp->pdev);
3887 }
3888 
3889 static void remove_inta_isr(struct s2io_nic *sp)
3890 {
3891 	free_irq(sp->pdev->irq, sp->dev);
3892 }
3893 
3894 /* ********************************************************* *
3895  * Functions defined below concern the OS part of the driver *
3896  * ********************************************************* */
3897 
3898 /**
3899  *  s2io_open - open entry point of the driver
3900  *  @dev : pointer to the device structure.
3901  *  Description:
3902  *  This function is the open entry point of the driver. It mainly calls a
3903  *  function to allocate Rx buffers and inserts them into the buffer
3904  *  descriptors and then enables the Rx part of the NIC.
3905  *  Return value:
3906  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3907  *   file on failure.
3908  */
3909 
3910 static int s2io_open(struct net_device *dev)
3911 {
3912 	struct s2io_nic *sp = netdev_priv(dev);
3913 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
3914 	int err = 0;
3915 
3916 	/*
3917 	 * Make sure you have link off by default every time
3918 	 * Nic is initialized
3919 	 */
3920 	netif_carrier_off(dev);
3921 	sp->last_link_state = 0;
3922 
3923 	/* Initialize H/W and enable interrupts */
3924 	err = s2io_card_up(sp);
3925 	if (err) {
3926 		DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3927 			  dev->name);
3928 		goto hw_init_failed;
3929 	}
3930 
3931 	if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
3932 		DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3933 		s2io_card_down(sp);
3934 		err = -ENODEV;
3935 		goto hw_init_failed;
3936 	}
3937 	s2io_start_all_tx_queue(sp);
3938 	return 0;
3939 
3940 hw_init_failed:
3941 	if (sp->config.intr_type == MSI_X) {
3942 		if (sp->entries) {
3943 			kfree(sp->entries);
3944 			swstats->mem_freed += sp->num_entries *
3945 				sizeof(struct msix_entry);
3946 		}
3947 		if (sp->s2io_entries) {
3948 			kfree(sp->s2io_entries);
3949 			swstats->mem_freed += sp->num_entries *
3950 				sizeof(struct s2io_msix_entry);
3951 		}
3952 	}
3953 	return err;
3954 }
3955 
3956 /**
3957  *  s2io_close -close entry point of the driver
3958  *  @dev : device pointer.
3959  *  Description:
3960  *  This is the stop entry point of the driver. It needs to undo exactly
3961  *  whatever was done by the open entry point,thus it's usually referred to
3962  *  as the close function.Among other things this function mainly stops the
3963  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3964  *  Return value:
3965  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3966  *  file on failure.
3967  */
3968 
3969 static int s2io_close(struct net_device *dev)
3970 {
3971 	struct s2io_nic *sp = netdev_priv(dev);
3972 	struct config_param *config = &sp->config;
3973 	u64 tmp64;
3974 	int offset;
3975 
3976 	/* Return if the device is already closed               *
3977 	 *  Can happen when s2io_card_up failed in change_mtu    *
3978 	 */
3979 	if (!is_s2io_card_up(sp))
3980 		return 0;
3981 
3982 	s2io_stop_all_tx_queue(sp);
3983 	/* delete all populated mac entries */
3984 	for (offset = 1; offset < config->max_mc_addr; offset++) {
3985 		tmp64 = do_s2io_read_unicast_mc(sp, offset);
3986 		if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
3987 			do_s2io_delete_unicast_mc(sp, tmp64);
3988 	}
3989 
3990 	s2io_card_down(sp);
3991 
3992 	return 0;
3993 }
3994 
3995 /**
3996  *  s2io_xmit - Tx entry point of te driver
3997  *  @skb : the socket buffer containing the Tx data.
3998  *  @dev : device pointer.
3999  *  Description :
4000  *  This function is the Tx entry point of the driver. S2IO NIC supports
4001  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
4002  *  NOTE: when device can't queue the pkt,just the trans_start variable will
4003  *  not be upadted.
4004  *  Return value:
4005  *  0 on success & 1 on failure.
4006  */
4007 
4008 static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4009 {
4010 	struct s2io_nic *sp = netdev_priv(dev);
4011 	u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4012 	register u64 val64;
4013 	struct TxD *txdp;
4014 	struct TxFIFO_element __iomem *tx_fifo;
4015 	unsigned long flags = 0;
4016 	u16 vlan_tag = 0;
4017 	struct fifo_info *fifo = NULL;
4018 	int offload_type;
4019 	int enable_per_list_interrupt = 0;
4020 	struct config_param *config = &sp->config;
4021 	struct mac_info *mac_control = &sp->mac_control;
4022 	struct stat_block *stats = mac_control->stats_info;
4023 	struct swStat *swstats = &stats->sw_stat;
4024 
4025 	DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4026 
4027 	if (unlikely(skb->len <= 0)) {
4028 		DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
4029 		dev_kfree_skb_any(skb);
4030 		return NETDEV_TX_OK;
4031 	}
4032 
4033 	if (!is_s2io_card_up(sp)) {
4034 		DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4035 			  dev->name);
4036 		dev_kfree_skb_any(skb);
4037 		return NETDEV_TX_OK;
4038 	}
4039 
4040 	queue = 0;
4041 	if (skb_vlan_tag_present(skb))
4042 		vlan_tag = skb_vlan_tag_get(skb);
4043 	if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4044 		if (skb->protocol == htons(ETH_P_IP)) {
4045 			struct iphdr *ip;
4046 			struct tcphdr *th;
4047 			ip = ip_hdr(skb);
4048 
4049 			if (!ip_is_fragment(ip)) {
4050 				th = (struct tcphdr *)(((unsigned char *)ip) +
4051 						       ip->ihl*4);
4052 
4053 				if (ip->protocol == IPPROTO_TCP) {
4054 					queue_len = sp->total_tcp_fifos;
4055 					queue = (ntohs(th->source) +
4056 						 ntohs(th->dest)) &
4057 						sp->fifo_selector[queue_len - 1];
4058 					if (queue >= queue_len)
4059 						queue = queue_len - 1;
4060 				} else if (ip->protocol == IPPROTO_UDP) {
4061 					queue_len = sp->total_udp_fifos;
4062 					queue = (ntohs(th->source) +
4063 						 ntohs(th->dest)) &
4064 						sp->fifo_selector[queue_len - 1];
4065 					if (queue >= queue_len)
4066 						queue = queue_len - 1;
4067 					queue += sp->udp_fifo_idx;
4068 					if (skb->len > 1024)
4069 						enable_per_list_interrupt = 1;
4070 				}
4071 			}
4072 		}
4073 	} else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4074 		/* get fifo number based on skb->priority value */
4075 		queue = config->fifo_mapping
4076 			[skb->priority & (MAX_TX_FIFOS - 1)];
4077 	fifo = &mac_control->fifos[queue];
4078 
4079 	spin_lock_irqsave(&fifo->tx_lock, flags);
4080 
4081 	if (sp->config.multiq) {
4082 		if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4083 			spin_unlock_irqrestore(&fifo->tx_lock, flags);
4084 			return NETDEV_TX_BUSY;
4085 		}
4086 	} else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4087 		if (netif_queue_stopped(dev)) {
4088 			spin_unlock_irqrestore(&fifo->tx_lock, flags);
4089 			return NETDEV_TX_BUSY;
4090 		}
4091 	}
4092 
4093 	put_off = (u16)fifo->tx_curr_put_info.offset;
4094 	get_off = (u16)fifo->tx_curr_get_info.offset;
4095 	txdp = fifo->list_info[put_off].list_virt_addr;
4096 
4097 	queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4098 	/* Avoid "put" pointer going beyond "get" pointer */
4099 	if (txdp->Host_Control ||
4100 	    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4101 		DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4102 		s2io_stop_tx_queue(sp, fifo->fifo_no);
4103 		dev_kfree_skb_any(skb);
4104 		spin_unlock_irqrestore(&fifo->tx_lock, flags);
4105 		return NETDEV_TX_OK;
4106 	}
4107 
4108 	offload_type = s2io_offload_type(skb);
4109 	if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4110 		txdp->Control_1 |= TXD_TCP_LSO_EN;
4111 		txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4112 	}
4113 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
4114 		txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4115 				    TXD_TX_CKO_TCP_EN |
4116 				    TXD_TX_CKO_UDP_EN);
4117 	}
4118 	txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4119 	txdp->Control_1 |= TXD_LIST_OWN_XENA;
4120 	txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4121 	if (enable_per_list_interrupt)
4122 		if (put_off & (queue_len >> 5))
4123 			txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4124 	if (vlan_tag) {
4125 		txdp->Control_2 |= TXD_VLAN_ENABLE;
4126 		txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4127 	}
4128 
4129 	frg_len = skb_headlen(skb);
4130 	txdp->Buffer_Pointer = dma_map_single(&sp->pdev->dev, skb->data,
4131 					      frg_len, DMA_TO_DEVICE);
4132 	if (dma_mapping_error(&sp->pdev->dev, txdp->Buffer_Pointer))
4133 		goto pci_map_failed;
4134 
4135 	txdp->Host_Control = (unsigned long)skb;
4136 	txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4137 
4138 	frg_cnt = skb_shinfo(skb)->nr_frags;
4139 	/* For fragmented SKB. */
4140 	for (i = 0; i < frg_cnt; i++) {
4141 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4142 		/* A '0' length fragment will be ignored */
4143 		if (!skb_frag_size(frag))
4144 			continue;
4145 		txdp++;
4146 		txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev,
4147 							     frag, 0,
4148 							     skb_frag_size(frag),
4149 							     DMA_TO_DEVICE);
4150 		txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag));
4151 	}
4152 	txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4153 
4154 	tx_fifo = mac_control->tx_FIFO_start[queue];
4155 	val64 = fifo->list_info[put_off].list_phy_addr;
4156 	writeq(val64, &tx_fifo->TxDL_Pointer);
4157 
4158 	val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4159 		 TX_FIFO_LAST_LIST);
4160 	if (offload_type)
4161 		val64 |= TX_FIFO_SPECIAL_FUNC;
4162 
4163 	writeq(val64, &tx_fifo->List_Control);
4164 
4165 	put_off++;
4166 	if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4167 		put_off = 0;
4168 	fifo->tx_curr_put_info.offset = put_off;
4169 
4170 	/* Avoid "put" pointer going beyond "get" pointer */
4171 	if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4172 		swstats->fifo_full_cnt++;
4173 		DBG_PRINT(TX_DBG,
4174 			  "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4175 			  put_off, get_off);
4176 		s2io_stop_tx_queue(sp, fifo->fifo_no);
4177 	}
4178 	swstats->mem_allocated += skb->truesize;
4179 	spin_unlock_irqrestore(&fifo->tx_lock, flags);
4180 
4181 	if (sp->config.intr_type == MSI_X)
4182 		tx_intr_handler(fifo);
4183 
4184 	return NETDEV_TX_OK;
4185 
4186 pci_map_failed:
4187 	swstats->pci_map_fail_cnt++;
4188 	s2io_stop_tx_queue(sp, fifo->fifo_no);
4189 	swstats->mem_freed += skb->truesize;
4190 	dev_kfree_skb_any(skb);
4191 	spin_unlock_irqrestore(&fifo->tx_lock, flags);
4192 	return NETDEV_TX_OK;
4193 }
4194 
4195 static void
4196 s2io_alarm_handle(struct timer_list *t)
4197 {
4198 	struct s2io_nic *sp = from_timer(sp, t, alarm_timer);
4199 	struct net_device *dev = sp->dev;
4200 
4201 	s2io_handle_errors(dev);
4202 	mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4203 }
4204 
4205 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4206 {
4207 	struct ring_info *ring = (struct ring_info *)dev_id;
4208 	struct s2io_nic *sp = ring->nic;
4209 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4210 
4211 	if (unlikely(!is_s2io_card_up(sp)))
4212 		return IRQ_HANDLED;
4213 
4214 	if (sp->config.napi) {
4215 		u8 __iomem *addr = NULL;
4216 		u8 val8 = 0;
4217 
4218 		addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4219 		addr += (7 - ring->ring_no);
4220 		val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4221 		writeb(val8, addr);
4222 		val8 = readb(addr);
4223 		napi_schedule(&ring->napi);
4224 	} else {
4225 		rx_intr_handler(ring, 0);
4226 		s2io_chk_rx_buffers(sp, ring);
4227 	}
4228 
4229 	return IRQ_HANDLED;
4230 }
4231 
4232 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4233 {
4234 	int i;
4235 	struct fifo_info *fifos = (struct fifo_info *)dev_id;
4236 	struct s2io_nic *sp = fifos->nic;
4237 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4238 	struct config_param *config  = &sp->config;
4239 	u64 reason;
4240 
4241 	if (unlikely(!is_s2io_card_up(sp)))
4242 		return IRQ_NONE;
4243 
4244 	reason = readq(&bar0->general_int_status);
4245 	if (unlikely(reason == S2IO_MINUS_ONE))
4246 		/* Nothing much can be done. Get out */
4247 		return IRQ_HANDLED;
4248 
4249 	if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4250 		writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4251 
4252 		if (reason & GEN_INTR_TXPIC)
4253 			s2io_txpic_intr_handle(sp);
4254 
4255 		if (reason & GEN_INTR_TXTRAFFIC)
4256 			writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4257 
4258 		for (i = 0; i < config->tx_fifo_num; i++)
4259 			tx_intr_handler(&fifos[i]);
4260 
4261 		writeq(sp->general_int_mask, &bar0->general_int_mask);
4262 		readl(&bar0->general_int_status);
4263 		return IRQ_HANDLED;
4264 	}
4265 	/* The interrupt was not raised by us */
4266 	return IRQ_NONE;
4267 }
4268 
4269 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4270 {
4271 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4272 	u64 val64;
4273 
4274 	val64 = readq(&bar0->pic_int_status);
4275 	if (val64 & PIC_INT_GPIO) {
4276 		val64 = readq(&bar0->gpio_int_reg);
4277 		if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4278 		    (val64 & GPIO_INT_REG_LINK_UP)) {
4279 			/*
4280 			 * This is unstable state so clear both up/down
4281 			 * interrupt and adapter to re-evaluate the link state.
4282 			 */
4283 			val64 |= GPIO_INT_REG_LINK_DOWN;
4284 			val64 |= GPIO_INT_REG_LINK_UP;
4285 			writeq(val64, &bar0->gpio_int_reg);
4286 			val64 = readq(&bar0->gpio_int_mask);
4287 			val64 &= ~(GPIO_INT_MASK_LINK_UP |
4288 				   GPIO_INT_MASK_LINK_DOWN);
4289 			writeq(val64, &bar0->gpio_int_mask);
4290 		} else if (val64 & GPIO_INT_REG_LINK_UP) {
4291 			val64 = readq(&bar0->adapter_status);
4292 			/* Enable Adapter */
4293 			val64 = readq(&bar0->adapter_control);
4294 			val64 |= ADAPTER_CNTL_EN;
4295 			writeq(val64, &bar0->adapter_control);
4296 			val64 |= ADAPTER_LED_ON;
4297 			writeq(val64, &bar0->adapter_control);
4298 			if (!sp->device_enabled_once)
4299 				sp->device_enabled_once = 1;
4300 
4301 			s2io_link(sp, LINK_UP);
4302 			/*
4303 			 * unmask link down interrupt and mask link-up
4304 			 * intr
4305 			 */
4306 			val64 = readq(&bar0->gpio_int_mask);
4307 			val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4308 			val64 |= GPIO_INT_MASK_LINK_UP;
4309 			writeq(val64, &bar0->gpio_int_mask);
4310 
4311 		} else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4312 			val64 = readq(&bar0->adapter_status);
4313 			s2io_link(sp, LINK_DOWN);
4314 			/* Link is down so unmaks link up interrupt */
4315 			val64 = readq(&bar0->gpio_int_mask);
4316 			val64 &= ~GPIO_INT_MASK_LINK_UP;
4317 			val64 |= GPIO_INT_MASK_LINK_DOWN;
4318 			writeq(val64, &bar0->gpio_int_mask);
4319 
4320 			/* turn off LED */
4321 			val64 = readq(&bar0->adapter_control);
4322 			val64 = val64 & (~ADAPTER_LED_ON);
4323 			writeq(val64, &bar0->adapter_control);
4324 		}
4325 	}
4326 	val64 = readq(&bar0->gpio_int_mask);
4327 }
4328 
4329 /**
4330  *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4331  *  @value: alarm bits
4332  *  @addr: address value
4333  *  @cnt: counter variable
4334  *  Description: Check for alarm and increment the counter
4335  *  Return Value:
4336  *  1 - if alarm bit set
4337  *  0 - if alarm bit is not set
4338  */
4339 static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4340 				 unsigned long long *cnt)
4341 {
4342 	u64 val64;
4343 	val64 = readq(addr);
4344 	if (val64 & value) {
4345 		writeq(val64, addr);
4346 		(*cnt)++;
4347 		return 1;
4348 	}
4349 	return 0;
4350 
4351 }
4352 
4353 /**
4354  *  s2io_handle_errors - Xframe error indication handler
4355  *  @dev_id: opaque handle to dev
4356  *  Description: Handle alarms such as loss of link, single or
4357  *  double ECC errors, critical and serious errors.
4358  *  Return Value:
4359  *  NONE
4360  */
4361 static void s2io_handle_errors(void *dev_id)
4362 {
4363 	struct net_device *dev = (struct net_device *)dev_id;
4364 	struct s2io_nic *sp = netdev_priv(dev);
4365 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4366 	u64 temp64 = 0, val64 = 0;
4367 	int i = 0;
4368 
4369 	struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4370 	struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4371 
4372 	if (!is_s2io_card_up(sp))
4373 		return;
4374 
4375 	if (pci_channel_offline(sp->pdev))
4376 		return;
4377 
4378 	memset(&sw_stat->ring_full_cnt, 0,
4379 	       sizeof(sw_stat->ring_full_cnt));
4380 
4381 	/* Handling the XPAK counters update */
4382 	if (stats->xpak_timer_count < 72000) {
4383 		/* waiting for an hour */
4384 		stats->xpak_timer_count++;
4385 	} else {
4386 		s2io_updt_xpak_counter(dev);
4387 		/* reset the count to zero */
4388 		stats->xpak_timer_count = 0;
4389 	}
4390 
4391 	/* Handling link status change error Intr */
4392 	if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4393 		val64 = readq(&bar0->mac_rmac_err_reg);
4394 		writeq(val64, &bar0->mac_rmac_err_reg);
4395 		if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4396 			schedule_work(&sp->set_link_task);
4397 	}
4398 
4399 	/* In case of a serious error, the device will be Reset. */
4400 	if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4401 				  &sw_stat->serious_err_cnt))
4402 		goto reset;
4403 
4404 	/* Check for data parity error */
4405 	if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4406 				  &sw_stat->parity_err_cnt))
4407 		goto reset;
4408 
4409 	/* Check for ring full counter */
4410 	if (sp->device_type == XFRAME_II_DEVICE) {
4411 		val64 = readq(&bar0->ring_bump_counter1);
4412 		for (i = 0; i < 4; i++) {
4413 			temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4414 			temp64 >>= 64 - ((i+1)*16);
4415 			sw_stat->ring_full_cnt[i] += temp64;
4416 		}
4417 
4418 		val64 = readq(&bar0->ring_bump_counter2);
4419 		for (i = 0; i < 4; i++) {
4420 			temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4421 			temp64 >>= 64 - ((i+1)*16);
4422 			sw_stat->ring_full_cnt[i+4] += temp64;
4423 		}
4424 	}
4425 
4426 	val64 = readq(&bar0->txdma_int_status);
4427 	/*check for pfc_err*/
4428 	if (val64 & TXDMA_PFC_INT) {
4429 		if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4430 					  PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4431 					  PFC_PCIX_ERR,
4432 					  &bar0->pfc_err_reg,
4433 					  &sw_stat->pfc_err_cnt))
4434 			goto reset;
4435 		do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4436 				      &bar0->pfc_err_reg,
4437 				      &sw_stat->pfc_err_cnt);
4438 	}
4439 
4440 	/*check for tda_err*/
4441 	if (val64 & TXDMA_TDA_INT) {
4442 		if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4443 					  TDA_SM0_ERR_ALARM |
4444 					  TDA_SM1_ERR_ALARM,
4445 					  &bar0->tda_err_reg,
4446 					  &sw_stat->tda_err_cnt))
4447 			goto reset;
4448 		do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4449 				      &bar0->tda_err_reg,
4450 				      &sw_stat->tda_err_cnt);
4451 	}
4452 	/*check for pcc_err*/
4453 	if (val64 & TXDMA_PCC_INT) {
4454 		if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4455 					  PCC_N_SERR | PCC_6_COF_OV_ERR |
4456 					  PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4457 					  PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4458 					  PCC_TXB_ECC_DB_ERR,
4459 					  &bar0->pcc_err_reg,
4460 					  &sw_stat->pcc_err_cnt))
4461 			goto reset;
4462 		do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4463 				      &bar0->pcc_err_reg,
4464 				      &sw_stat->pcc_err_cnt);
4465 	}
4466 
4467 	/*check for tti_err*/
4468 	if (val64 & TXDMA_TTI_INT) {
4469 		if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4470 					  &bar0->tti_err_reg,
4471 					  &sw_stat->tti_err_cnt))
4472 			goto reset;
4473 		do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4474 				      &bar0->tti_err_reg,
4475 				      &sw_stat->tti_err_cnt);
4476 	}
4477 
4478 	/*check for lso_err*/
4479 	if (val64 & TXDMA_LSO_INT) {
4480 		if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4481 					  LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4482 					  &bar0->lso_err_reg,
4483 					  &sw_stat->lso_err_cnt))
4484 			goto reset;
4485 		do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4486 				      &bar0->lso_err_reg,
4487 				      &sw_stat->lso_err_cnt);
4488 	}
4489 
4490 	/*check for tpa_err*/
4491 	if (val64 & TXDMA_TPA_INT) {
4492 		if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4493 					  &bar0->tpa_err_reg,
4494 					  &sw_stat->tpa_err_cnt))
4495 			goto reset;
4496 		do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4497 				      &bar0->tpa_err_reg,
4498 				      &sw_stat->tpa_err_cnt);
4499 	}
4500 
4501 	/*check for sm_err*/
4502 	if (val64 & TXDMA_SM_INT) {
4503 		if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4504 					  &bar0->sm_err_reg,
4505 					  &sw_stat->sm_err_cnt))
4506 			goto reset;
4507 	}
4508 
4509 	val64 = readq(&bar0->mac_int_status);
4510 	if (val64 & MAC_INT_STATUS_TMAC_INT) {
4511 		if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4512 					  &bar0->mac_tmac_err_reg,
4513 					  &sw_stat->mac_tmac_err_cnt))
4514 			goto reset;
4515 		do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4516 				      TMAC_DESC_ECC_SG_ERR |
4517 				      TMAC_DESC_ECC_DB_ERR,
4518 				      &bar0->mac_tmac_err_reg,
4519 				      &sw_stat->mac_tmac_err_cnt);
4520 	}
4521 
4522 	val64 = readq(&bar0->xgxs_int_status);
4523 	if (val64 & XGXS_INT_STATUS_TXGXS) {
4524 		if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4525 					  &bar0->xgxs_txgxs_err_reg,
4526 					  &sw_stat->xgxs_txgxs_err_cnt))
4527 			goto reset;
4528 		do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4529 				      &bar0->xgxs_txgxs_err_reg,
4530 				      &sw_stat->xgxs_txgxs_err_cnt);
4531 	}
4532 
4533 	val64 = readq(&bar0->rxdma_int_status);
4534 	if (val64 & RXDMA_INT_RC_INT_M) {
4535 		if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4536 					  RC_FTC_ECC_DB_ERR |
4537 					  RC_PRCn_SM_ERR_ALARM |
4538 					  RC_FTC_SM_ERR_ALARM,
4539 					  &bar0->rc_err_reg,
4540 					  &sw_stat->rc_err_cnt))
4541 			goto reset;
4542 		do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4543 				      RC_FTC_ECC_SG_ERR |
4544 				      RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4545 				      &sw_stat->rc_err_cnt);
4546 		if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4547 					  PRC_PCI_AB_WR_Rn |
4548 					  PRC_PCI_AB_F_WR_Rn,
4549 					  &bar0->prc_pcix_err_reg,
4550 					  &sw_stat->prc_pcix_err_cnt))
4551 			goto reset;
4552 		do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4553 				      PRC_PCI_DP_WR_Rn |
4554 				      PRC_PCI_DP_F_WR_Rn,
4555 				      &bar0->prc_pcix_err_reg,
4556 				      &sw_stat->prc_pcix_err_cnt);
4557 	}
4558 
4559 	if (val64 & RXDMA_INT_RPA_INT_M) {
4560 		if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4561 					  &bar0->rpa_err_reg,
4562 					  &sw_stat->rpa_err_cnt))
4563 			goto reset;
4564 		do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4565 				      &bar0->rpa_err_reg,
4566 				      &sw_stat->rpa_err_cnt);
4567 	}
4568 
4569 	if (val64 & RXDMA_INT_RDA_INT_M) {
4570 		if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4571 					  RDA_FRM_ECC_DB_N_AERR |
4572 					  RDA_SM1_ERR_ALARM |
4573 					  RDA_SM0_ERR_ALARM |
4574 					  RDA_RXD_ECC_DB_SERR,
4575 					  &bar0->rda_err_reg,
4576 					  &sw_stat->rda_err_cnt))
4577 			goto reset;
4578 		do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4579 				      RDA_FRM_ECC_SG_ERR |
4580 				      RDA_MISC_ERR |
4581 				      RDA_PCIX_ERR,
4582 				      &bar0->rda_err_reg,
4583 				      &sw_stat->rda_err_cnt);
4584 	}
4585 
4586 	if (val64 & RXDMA_INT_RTI_INT_M) {
4587 		if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4588 					  &bar0->rti_err_reg,
4589 					  &sw_stat->rti_err_cnt))
4590 			goto reset;
4591 		do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4592 				      &bar0->rti_err_reg,
4593 				      &sw_stat->rti_err_cnt);
4594 	}
4595 
4596 	val64 = readq(&bar0->mac_int_status);
4597 	if (val64 & MAC_INT_STATUS_RMAC_INT) {
4598 		if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4599 					  &bar0->mac_rmac_err_reg,
4600 					  &sw_stat->mac_rmac_err_cnt))
4601 			goto reset;
4602 		do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4603 				      RMAC_SINGLE_ECC_ERR |
4604 				      RMAC_DOUBLE_ECC_ERR,
4605 				      &bar0->mac_rmac_err_reg,
4606 				      &sw_stat->mac_rmac_err_cnt);
4607 	}
4608 
4609 	val64 = readq(&bar0->xgxs_int_status);
4610 	if (val64 & XGXS_INT_STATUS_RXGXS) {
4611 		if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4612 					  &bar0->xgxs_rxgxs_err_reg,
4613 					  &sw_stat->xgxs_rxgxs_err_cnt))
4614 			goto reset;
4615 	}
4616 
4617 	val64 = readq(&bar0->mc_int_status);
4618 	if (val64 & MC_INT_STATUS_MC_INT) {
4619 		if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4620 					  &bar0->mc_err_reg,
4621 					  &sw_stat->mc_err_cnt))
4622 			goto reset;
4623 
4624 		/* Handling Ecc errors */
4625 		if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4626 			writeq(val64, &bar0->mc_err_reg);
4627 			if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4628 				sw_stat->double_ecc_errs++;
4629 				if (sp->device_type != XFRAME_II_DEVICE) {
4630 					/*
4631 					 * Reset XframeI only if critical error
4632 					 */
4633 					if (val64 &
4634 					    (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4635 					     MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4636 						goto reset;
4637 				}
4638 			} else
4639 				sw_stat->single_ecc_errs++;
4640 		}
4641 	}
4642 	return;
4643 
4644 reset:
4645 	s2io_stop_all_tx_queue(sp);
4646 	schedule_work(&sp->rst_timer_task);
4647 	sw_stat->soft_reset_cnt++;
4648 }
4649 
4650 /**
4651  *  s2io_isr - ISR handler of the device .
4652  *  @irq: the irq of the device.
4653  *  @dev_id: a void pointer to the dev structure of the NIC.
4654  *  Description:  This function is the ISR handler of the device. It
4655  *  identifies the reason for the interrupt and calls the relevant
4656  *  service routines. As a contongency measure, this ISR allocates the
4657  *  recv buffers, if their numbers are below the panic value which is
4658  *  presently set to 25% of the original number of rcv buffers allocated.
4659  *  Return value:
4660  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4661  *   IRQ_NONE: will be returned if interrupt is not from our device
4662  */
4663 static irqreturn_t s2io_isr(int irq, void *dev_id)
4664 {
4665 	struct net_device *dev = (struct net_device *)dev_id;
4666 	struct s2io_nic *sp = netdev_priv(dev);
4667 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4668 	int i;
4669 	u64 reason = 0;
4670 	struct mac_info *mac_control;
4671 	struct config_param *config;
4672 
4673 	/* Pretend we handled any irq's from a disconnected card */
4674 	if (pci_channel_offline(sp->pdev))
4675 		return IRQ_NONE;
4676 
4677 	if (!is_s2io_card_up(sp))
4678 		return IRQ_NONE;
4679 
4680 	config = &sp->config;
4681 	mac_control = &sp->mac_control;
4682 
4683 	/*
4684 	 * Identify the cause for interrupt and call the appropriate
4685 	 * interrupt handler. Causes for the interrupt could be;
4686 	 * 1. Rx of packet.
4687 	 * 2. Tx complete.
4688 	 * 3. Link down.
4689 	 */
4690 	reason = readq(&bar0->general_int_status);
4691 
4692 	if (unlikely(reason == S2IO_MINUS_ONE))
4693 		return IRQ_HANDLED;	/* Nothing much can be done. Get out */
4694 
4695 	if (reason &
4696 	    (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
4697 		writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4698 
4699 		if (config->napi) {
4700 			if (reason & GEN_INTR_RXTRAFFIC) {
4701 				napi_schedule(&sp->napi);
4702 				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4703 				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4704 				readl(&bar0->rx_traffic_int);
4705 			}
4706 		} else {
4707 			/*
4708 			 * rx_traffic_int reg is an R1 register, writing all 1's
4709 			 * will ensure that the actual interrupt causing bit
4710 			 * get's cleared and hence a read can be avoided.
4711 			 */
4712 			if (reason & GEN_INTR_RXTRAFFIC)
4713 				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4714 
4715 			for (i = 0; i < config->rx_ring_num; i++) {
4716 				struct ring_info *ring = &mac_control->rings[i];
4717 
4718 				rx_intr_handler(ring, 0);
4719 			}
4720 		}
4721 
4722 		/*
4723 		 * tx_traffic_int reg is an R1 register, writing all 1's
4724 		 * will ensure that the actual interrupt causing bit get's
4725 		 * cleared and hence a read can be avoided.
4726 		 */
4727 		if (reason & GEN_INTR_TXTRAFFIC)
4728 			writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4729 
4730 		for (i = 0; i < config->tx_fifo_num; i++)
4731 			tx_intr_handler(&mac_control->fifos[i]);
4732 
4733 		if (reason & GEN_INTR_TXPIC)
4734 			s2io_txpic_intr_handle(sp);
4735 
4736 		/*
4737 		 * Reallocate the buffers from the interrupt handler itself.
4738 		 */
4739 		if (!config->napi) {
4740 			for (i = 0; i < config->rx_ring_num; i++) {
4741 				struct ring_info *ring = &mac_control->rings[i];
4742 
4743 				s2io_chk_rx_buffers(sp, ring);
4744 			}
4745 		}
4746 		writeq(sp->general_int_mask, &bar0->general_int_mask);
4747 		readl(&bar0->general_int_status);
4748 
4749 		return IRQ_HANDLED;
4750 
4751 	} else if (!reason) {
4752 		/* The interrupt was not raised by us */
4753 		return IRQ_NONE;
4754 	}
4755 
4756 	return IRQ_HANDLED;
4757 }
4758 
4759 /*
4760  * s2io_updt_stats -
4761  */
4762 static void s2io_updt_stats(struct s2io_nic *sp)
4763 {
4764 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4765 	u64 val64;
4766 	int cnt = 0;
4767 
4768 	if (is_s2io_card_up(sp)) {
4769 		/* Apprx 30us on a 133 MHz bus */
4770 		val64 = SET_UPDT_CLICKS(10) |
4771 			STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4772 		writeq(val64, &bar0->stat_cfg);
4773 		do {
4774 			udelay(100);
4775 			val64 = readq(&bar0->stat_cfg);
4776 			if (!(val64 & s2BIT(0)))
4777 				break;
4778 			cnt++;
4779 			if (cnt == 5)
4780 				break; /* Updt failed */
4781 		} while (1);
4782 	}
4783 }
4784 
4785 /**
4786  *  s2io_get_stats - Updates the device statistics structure.
4787  *  @dev : pointer to the device structure.
4788  *  Description:
4789  *  This function updates the device statistics structure in the s2io_nic
4790  *  structure and returns a pointer to the same.
4791  *  Return value:
4792  *  pointer to the updated net_device_stats structure.
4793  */
4794 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4795 {
4796 	struct s2io_nic *sp = netdev_priv(dev);
4797 	struct mac_info *mac_control = &sp->mac_control;
4798 	struct stat_block *stats = mac_control->stats_info;
4799 	u64 delta;
4800 
4801 	/* Configure Stats for immediate updt */
4802 	s2io_updt_stats(sp);
4803 
4804 	/* A device reset will cause the on-adapter statistics to be zero'ed.
4805 	 * This can be done while running by changing the MTU.  To prevent the
4806 	 * system from having the stats zero'ed, the driver keeps a copy of the
4807 	 * last update to the system (which is also zero'ed on reset).  This
4808 	 * enables the driver to accurately know the delta between the last
4809 	 * update and the current update.
4810 	 */
4811 	delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
4812 		le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
4813 	sp->stats.rx_packets += delta;
4814 	dev->stats.rx_packets += delta;
4815 
4816 	delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
4817 		le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
4818 	sp->stats.tx_packets += delta;
4819 	dev->stats.tx_packets += delta;
4820 
4821 	delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
4822 		le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
4823 	sp->stats.rx_bytes += delta;
4824 	dev->stats.rx_bytes += delta;
4825 
4826 	delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
4827 		le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
4828 	sp->stats.tx_bytes += delta;
4829 	dev->stats.tx_bytes += delta;
4830 
4831 	delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
4832 	sp->stats.rx_errors += delta;
4833 	dev->stats.rx_errors += delta;
4834 
4835 	delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
4836 		le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
4837 	sp->stats.tx_errors += delta;
4838 	dev->stats.tx_errors += delta;
4839 
4840 	delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
4841 	sp->stats.rx_dropped += delta;
4842 	dev->stats.rx_dropped += delta;
4843 
4844 	delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
4845 	sp->stats.tx_dropped += delta;
4846 	dev->stats.tx_dropped += delta;
4847 
4848 	/* The adapter MAC interprets pause frames as multicast packets, but
4849 	 * does not pass them up.  This erroneously increases the multicast
4850 	 * packet count and needs to be deducted when the multicast frame count
4851 	 * is queried.
4852 	 */
4853 	delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
4854 		le32_to_cpu(stats->rmac_vld_mcst_frms);
4855 	delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
4856 	delta -= sp->stats.multicast;
4857 	sp->stats.multicast += delta;
4858 	dev->stats.multicast += delta;
4859 
4860 	delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
4861 		le32_to_cpu(stats->rmac_usized_frms)) +
4862 		le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
4863 	sp->stats.rx_length_errors += delta;
4864 	dev->stats.rx_length_errors += delta;
4865 
4866 	delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
4867 	sp->stats.rx_crc_errors += delta;
4868 	dev->stats.rx_crc_errors += delta;
4869 
4870 	return &dev->stats;
4871 }
4872 
4873 /**
4874  *  s2io_set_multicast - entry point for multicast address enable/disable.
4875  *  @dev : pointer to the device structure
4876  *  @may_sleep: parameter indicates if sleeping when waiting for command
4877  *  complete
4878  *  Description:
4879  *  This function is a driver entry point which gets called by the kernel
4880  *  whenever multicast addresses must be enabled/disabled. This also gets
4881  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4882  *  determine, if multicast address must be enabled or if promiscuous mode
4883  *  is to be disabled etc.
4884  *  Return value:
4885  *  void.
4886  */
4887 static void s2io_set_multicast(struct net_device *dev, bool may_sleep)
4888 {
4889 	int i, j, prev_cnt;
4890 	struct netdev_hw_addr *ha;
4891 	struct s2io_nic *sp = netdev_priv(dev);
4892 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4893 	u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4894 		0xfeffffffffffULL;
4895 	u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4896 	void __iomem *add;
4897 	struct config_param *config = &sp->config;
4898 
4899 	if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4900 		/*  Enable all Multicast addresses */
4901 		writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4902 		       &bar0->rmac_addr_data0_mem);
4903 		writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4904 		       &bar0->rmac_addr_data1_mem);
4905 		val64 = RMAC_ADDR_CMD_MEM_WE |
4906 			RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4907 			RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4908 		writeq(val64, &bar0->rmac_addr_cmd_mem);
4909 		/* Wait till command completes */
4910 		wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4911 				      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4912 				      S2IO_BIT_RESET, may_sleep);
4913 
4914 		sp->m_cast_flg = 1;
4915 		sp->all_multi_pos = config->max_mc_addr - 1;
4916 	} else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4917 		/*  Disable all Multicast addresses */
4918 		writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4919 		       &bar0->rmac_addr_data0_mem);
4920 		writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4921 		       &bar0->rmac_addr_data1_mem);
4922 		val64 = RMAC_ADDR_CMD_MEM_WE |
4923 			RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4924 			RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4925 		writeq(val64, &bar0->rmac_addr_cmd_mem);
4926 		/* Wait till command completes */
4927 		wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4928 				      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4929 				      S2IO_BIT_RESET, may_sleep);
4930 
4931 		sp->m_cast_flg = 0;
4932 		sp->all_multi_pos = 0;
4933 	}
4934 
4935 	if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4936 		/*  Put the NIC into promiscuous mode */
4937 		add = &bar0->mac_cfg;
4938 		val64 = readq(&bar0->mac_cfg);
4939 		val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4940 
4941 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4942 		writel((u32)val64, add);
4943 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4944 		writel((u32) (val64 >> 32), (add + 4));
4945 
4946 		if (vlan_tag_strip != 1) {
4947 			val64 = readq(&bar0->rx_pa_cfg);
4948 			val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4949 			writeq(val64, &bar0->rx_pa_cfg);
4950 			sp->vlan_strip_flag = 0;
4951 		}
4952 
4953 		val64 = readq(&bar0->mac_cfg);
4954 		sp->promisc_flg = 1;
4955 		DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4956 			  dev->name);
4957 	} else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4958 		/*  Remove the NIC from promiscuous mode */
4959 		add = &bar0->mac_cfg;
4960 		val64 = readq(&bar0->mac_cfg);
4961 		val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4962 
4963 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4964 		writel((u32)val64, add);
4965 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4966 		writel((u32) (val64 >> 32), (add + 4));
4967 
4968 		if (vlan_tag_strip != 0) {
4969 			val64 = readq(&bar0->rx_pa_cfg);
4970 			val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4971 			writeq(val64, &bar0->rx_pa_cfg);
4972 			sp->vlan_strip_flag = 1;
4973 		}
4974 
4975 		val64 = readq(&bar0->mac_cfg);
4976 		sp->promisc_flg = 0;
4977 		DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
4978 	}
4979 
4980 	/*  Update individual M_CAST address list */
4981 	if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
4982 		if (netdev_mc_count(dev) >
4983 		    (config->max_mc_addr - config->max_mac_addr)) {
4984 			DBG_PRINT(ERR_DBG,
4985 				  "%s: No more Rx filters can be added - "
4986 				  "please enable ALL_MULTI instead\n",
4987 				  dev->name);
4988 			return;
4989 		}
4990 
4991 		prev_cnt = sp->mc_addr_count;
4992 		sp->mc_addr_count = netdev_mc_count(dev);
4993 
4994 		/* Clear out the previous list of Mc in the H/W. */
4995 		for (i = 0; i < prev_cnt; i++) {
4996 			writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4997 			       &bar0->rmac_addr_data0_mem);
4998 			writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4999 			       &bar0->rmac_addr_data1_mem);
5000 			val64 = RMAC_ADDR_CMD_MEM_WE |
5001 				RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5002 				RMAC_ADDR_CMD_MEM_OFFSET
5003 				(config->mc_start_offset + i);
5004 			writeq(val64, &bar0->rmac_addr_cmd_mem);
5005 
5006 			/* Wait for command completes */
5007 			if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5008 						  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5009 						  S2IO_BIT_RESET, may_sleep)) {
5010 				DBG_PRINT(ERR_DBG,
5011 					  "%s: Adding Multicasts failed\n",
5012 					  dev->name);
5013 				return;
5014 			}
5015 		}
5016 
5017 		/* Create the new Rx filter list and update the same in H/W. */
5018 		i = 0;
5019 		netdev_for_each_mc_addr(ha, dev) {
5020 			mac_addr = 0;
5021 			for (j = 0; j < ETH_ALEN; j++) {
5022 				mac_addr |= ha->addr[j];
5023 				mac_addr <<= 8;
5024 			}
5025 			mac_addr >>= 8;
5026 			writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5027 			       &bar0->rmac_addr_data0_mem);
5028 			writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5029 			       &bar0->rmac_addr_data1_mem);
5030 			val64 = RMAC_ADDR_CMD_MEM_WE |
5031 				RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5032 				RMAC_ADDR_CMD_MEM_OFFSET
5033 				(i + config->mc_start_offset);
5034 			writeq(val64, &bar0->rmac_addr_cmd_mem);
5035 
5036 			/* Wait for command completes */
5037 			if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5038 						  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5039 						  S2IO_BIT_RESET, may_sleep)) {
5040 				DBG_PRINT(ERR_DBG,
5041 					  "%s: Adding Multicasts failed\n",
5042 					  dev->name);
5043 				return;
5044 			}
5045 			i++;
5046 		}
5047 	}
5048 }
5049 
5050 /* NDO wrapper for s2io_set_multicast */
5051 static void s2io_ndo_set_multicast(struct net_device *dev)
5052 {
5053 	s2io_set_multicast(dev, false);
5054 }
5055 
5056 /* read from CAM unicast & multicast addresses and store it in
5057  * def_mac_addr structure
5058  */
5059 static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5060 {
5061 	int offset;
5062 	u64 mac_addr = 0x0;
5063 	struct config_param *config = &sp->config;
5064 
5065 	/* store unicast & multicast mac addresses */
5066 	for (offset = 0; offset < config->max_mc_addr; offset++) {
5067 		mac_addr = do_s2io_read_unicast_mc(sp, offset);
5068 		/* if read fails disable the entry */
5069 		if (mac_addr == FAILURE)
5070 			mac_addr = S2IO_DISABLE_MAC_ENTRY;
5071 		do_s2io_copy_mac_addr(sp, offset, mac_addr);
5072 	}
5073 }
5074 
5075 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5076 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5077 {
5078 	int offset;
5079 	struct config_param *config = &sp->config;
5080 	/* restore unicast mac address */
5081 	for (offset = 0; offset < config->max_mac_addr; offset++)
5082 		do_s2io_prog_unicast(sp->dev,
5083 				     sp->def_mac_addr[offset].mac_addr);
5084 
5085 	/* restore multicast mac address */
5086 	for (offset = config->mc_start_offset;
5087 	     offset < config->max_mc_addr; offset++)
5088 		do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5089 }
5090 
5091 /* add a multicast MAC address to CAM */
5092 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5093 {
5094 	int i;
5095 	u64 mac_addr;
5096 	struct config_param *config = &sp->config;
5097 
5098 	mac_addr = ether_addr_to_u64(addr);
5099 	if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5100 		return SUCCESS;
5101 
5102 	/* check if the multicast mac already preset in CAM */
5103 	for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5104 		u64 tmp64;
5105 		tmp64 = do_s2io_read_unicast_mc(sp, i);
5106 		if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5107 			break;
5108 
5109 		if (tmp64 == mac_addr)
5110 			return SUCCESS;
5111 	}
5112 	if (i == config->max_mc_addr) {
5113 		DBG_PRINT(ERR_DBG,
5114 			  "CAM full no space left for multicast MAC\n");
5115 		return FAILURE;
5116 	}
5117 	/* Update the internal structure with this new mac address */
5118 	do_s2io_copy_mac_addr(sp, i, mac_addr);
5119 
5120 	return do_s2io_add_mac(sp, mac_addr, i);
5121 }
5122 
5123 /* add MAC address to CAM */
5124 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5125 {
5126 	u64 val64;
5127 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5128 
5129 	writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5130 	       &bar0->rmac_addr_data0_mem);
5131 
5132 	val64 =	RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5133 		RMAC_ADDR_CMD_MEM_OFFSET(off);
5134 	writeq(val64, &bar0->rmac_addr_cmd_mem);
5135 
5136 	/* Wait till command completes */
5137 	if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5138 				  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5139 				  S2IO_BIT_RESET, true)) {
5140 		DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5141 		return FAILURE;
5142 	}
5143 	return SUCCESS;
5144 }
5145 /* deletes a specified unicast/multicast mac entry from CAM */
5146 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5147 {
5148 	int offset;
5149 	u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5150 	struct config_param *config = &sp->config;
5151 
5152 	for (offset = 1;
5153 	     offset < config->max_mc_addr; offset++) {
5154 		tmp64 = do_s2io_read_unicast_mc(sp, offset);
5155 		if (tmp64 == addr) {
5156 			/* disable the entry by writing  0xffffffffffffULL */
5157 			if (do_s2io_add_mac(sp, dis_addr, offset) ==  FAILURE)
5158 				return FAILURE;
5159 			/* store the new mac list from CAM */
5160 			do_s2io_store_unicast_mc(sp);
5161 			return SUCCESS;
5162 		}
5163 	}
5164 	DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5165 		  (unsigned long long)addr);
5166 	return FAILURE;
5167 }
5168 
5169 /* read mac entries from CAM */
5170 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5171 {
5172 	u64 tmp64, val64;
5173 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5174 
5175 	/* read mac addr */
5176 	val64 =	RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5177 		RMAC_ADDR_CMD_MEM_OFFSET(offset);
5178 	writeq(val64, &bar0->rmac_addr_cmd_mem);
5179 
5180 	/* Wait till command completes */
5181 	if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5182 				  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5183 				  S2IO_BIT_RESET, true)) {
5184 		DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5185 		return FAILURE;
5186 	}
5187 	tmp64 = readq(&bar0->rmac_addr_data0_mem);
5188 
5189 	return tmp64 >> 16;
5190 }
5191 
5192 /*
5193  * s2io_set_mac_addr - driver entry point
5194  */
5195 
5196 static int s2io_set_mac_addr(struct net_device *dev, void *p)
5197 {
5198 	struct sockaddr *addr = p;
5199 
5200 	if (!is_valid_ether_addr(addr->sa_data))
5201 		return -EADDRNOTAVAIL;
5202 
5203 	eth_hw_addr_set(dev, addr->sa_data);
5204 
5205 	/* store the MAC address in CAM */
5206 	return do_s2io_prog_unicast(dev, dev->dev_addr);
5207 }
5208 /**
5209  *  do_s2io_prog_unicast - Programs the Xframe mac address
5210  *  @dev : pointer to the device structure.
5211  *  @addr: a uchar pointer to the new mac address which is to be set.
5212  *  Description : This procedure will program the Xframe to receive
5213  *  frames with new Mac Address
5214  *  Return value: SUCCESS on success and an appropriate (-)ve integer
5215  *  as defined in errno.h file on failure.
5216  */
5217 
5218 static int do_s2io_prog_unicast(struct net_device *dev, const u8 *addr)
5219 {
5220 	struct s2io_nic *sp = netdev_priv(dev);
5221 	register u64 mac_addr, perm_addr;
5222 	int i;
5223 	u64 tmp64;
5224 	struct config_param *config = &sp->config;
5225 
5226 	/*
5227 	 * Set the new MAC address as the new unicast filter and reflect this
5228 	 * change on the device address registered with the OS. It will be
5229 	 * at offset 0.
5230 	 */
5231 	mac_addr = ether_addr_to_u64(addr);
5232 	perm_addr = ether_addr_to_u64(sp->def_mac_addr[0].mac_addr);
5233 
5234 	/* check if the dev_addr is different than perm_addr */
5235 	if (mac_addr == perm_addr)
5236 		return SUCCESS;
5237 
5238 	/* check if the mac already preset in CAM */
5239 	for (i = 1; i < config->max_mac_addr; i++) {
5240 		tmp64 = do_s2io_read_unicast_mc(sp, i);
5241 		if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5242 			break;
5243 
5244 		if (tmp64 == mac_addr) {
5245 			DBG_PRINT(INFO_DBG,
5246 				  "MAC addr:0x%llx already present in CAM\n",
5247 				  (unsigned long long)mac_addr);
5248 			return SUCCESS;
5249 		}
5250 	}
5251 	if (i == config->max_mac_addr) {
5252 		DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5253 		return FAILURE;
5254 	}
5255 	/* Update the internal structure with this new mac address */
5256 	do_s2io_copy_mac_addr(sp, i, mac_addr);
5257 
5258 	return do_s2io_add_mac(sp, mac_addr, i);
5259 }
5260 
5261 /**
5262  * s2io_ethtool_set_link_ksettings - Sets different link parameters.
5263  * @dev : pointer to netdev
5264  * @cmd: pointer to the structure with parameters given by ethtool to set
5265  * link information.
5266  * Description:
5267  * The function sets different link parameters provided by the user onto
5268  * the NIC.
5269  * Return value:
5270  * 0 on success.
5271  */
5272 
5273 static int
5274 s2io_ethtool_set_link_ksettings(struct net_device *dev,
5275 				const struct ethtool_link_ksettings *cmd)
5276 {
5277 	struct s2io_nic *sp = netdev_priv(dev);
5278 	if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
5279 	    (cmd->base.speed != SPEED_10000) ||
5280 	    (cmd->base.duplex != DUPLEX_FULL))
5281 		return -EINVAL;
5282 	else {
5283 		s2io_close(sp->dev);
5284 		s2io_open(sp->dev);
5285 	}
5286 
5287 	return 0;
5288 }
5289 
5290 /**
5291  * s2io_ethtool_get_link_ksettings - Return link specific information.
5292  * @dev: pointer to netdev
5293  * @cmd : pointer to the structure with parameters given by ethtool
5294  * to return link information.
5295  * Description:
5296  * Returns link specific information like speed, duplex etc.. to ethtool.
5297  * Return value :
5298  * return 0 on success.
5299  */
5300 
5301 static int
5302 s2io_ethtool_get_link_ksettings(struct net_device *dev,
5303 				struct ethtool_link_ksettings *cmd)
5304 {
5305 	struct s2io_nic *sp = netdev_priv(dev);
5306 
5307 	ethtool_link_ksettings_zero_link_mode(cmd, supported);
5308 	ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
5309 	ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
5310 
5311 	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
5312 	ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
5313 	ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
5314 
5315 	cmd->base.port = PORT_FIBRE;
5316 
5317 	if (netif_carrier_ok(sp->dev)) {
5318 		cmd->base.speed = SPEED_10000;
5319 		cmd->base.duplex = DUPLEX_FULL;
5320 	} else {
5321 		cmd->base.speed = SPEED_UNKNOWN;
5322 		cmd->base.duplex = DUPLEX_UNKNOWN;
5323 	}
5324 
5325 	cmd->base.autoneg = AUTONEG_DISABLE;
5326 	return 0;
5327 }
5328 
5329 /**
5330  * s2io_ethtool_gdrvinfo - Returns driver specific information.
5331  * @dev: pointer to netdev
5332  * @info : pointer to the structure with parameters given by ethtool to
5333  * return driver information.
5334  * Description:
5335  * Returns driver specefic information like name, version etc.. to ethtool.
5336  * Return value:
5337  *  void
5338  */
5339 
5340 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5341 				  struct ethtool_drvinfo *info)
5342 {
5343 	struct s2io_nic *sp = netdev_priv(dev);
5344 
5345 	strscpy(info->driver, s2io_driver_name, sizeof(info->driver));
5346 	strscpy(info->version, s2io_driver_version, sizeof(info->version));
5347 	strscpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5348 }
5349 
5350 /**
5351  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5352  *  @dev: pointer to netdev
5353  *  @regs : pointer to the structure with parameters given by ethtool for
5354  *          dumping the registers.
5355  *  @space: The input argument into which all the registers are dumped.
5356  *  Description:
5357  *  Dumps the entire register space of xFrame NIC into the user given
5358  *  buffer area.
5359  * Return value :
5360  * void .
5361  */
5362 
5363 static void s2io_ethtool_gregs(struct net_device *dev,
5364 			       struct ethtool_regs *regs, void *space)
5365 {
5366 	int i;
5367 	u64 reg;
5368 	u8 *reg_space = (u8 *)space;
5369 	struct s2io_nic *sp = netdev_priv(dev);
5370 
5371 	regs->len = XENA_REG_SPACE;
5372 	regs->version = sp->pdev->subsystem_device;
5373 
5374 	for (i = 0; i < regs->len; i += 8) {
5375 		reg = readq(sp->bar0 + i);
5376 		memcpy((reg_space + i), &reg, 8);
5377 	}
5378 }
5379 
5380 /*
5381  *  s2io_set_led - control NIC led
5382  */
5383 static void s2io_set_led(struct s2io_nic *sp, bool on)
5384 {
5385 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5386 	u16 subid = sp->pdev->subsystem_device;
5387 	u64 val64;
5388 
5389 	if ((sp->device_type == XFRAME_II_DEVICE) ||
5390 	    ((subid & 0xFF) >= 0x07)) {
5391 		val64 = readq(&bar0->gpio_control);
5392 		if (on)
5393 			val64 |= GPIO_CTRL_GPIO_0;
5394 		else
5395 			val64 &= ~GPIO_CTRL_GPIO_0;
5396 
5397 		writeq(val64, &bar0->gpio_control);
5398 	} else {
5399 		val64 = readq(&bar0->adapter_control);
5400 		if (on)
5401 			val64 |= ADAPTER_LED_ON;
5402 		else
5403 			val64 &= ~ADAPTER_LED_ON;
5404 
5405 		writeq(val64, &bar0->adapter_control);
5406 	}
5407 
5408 }
5409 
5410 /**
5411  * s2io_ethtool_set_led - To physically identify the nic on the system.
5412  * @dev : network device
5413  * @state: led setting
5414  *
5415  * Description: Used to physically identify the NIC on the system.
5416  * The Link LED will blink for a time specified by the user for
5417  * identification.
5418  * NOTE: The Link has to be Up to be able to blink the LED. Hence
5419  * identification is possible only if it's link is up.
5420  */
5421 
5422 static int s2io_ethtool_set_led(struct net_device *dev,
5423 				enum ethtool_phys_id_state state)
5424 {
5425 	struct s2io_nic *sp = netdev_priv(dev);
5426 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5427 	u16 subid = sp->pdev->subsystem_device;
5428 
5429 	if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
5430 		u64 val64 = readq(&bar0->adapter_control);
5431 		if (!(val64 & ADAPTER_CNTL_EN)) {
5432 			pr_err("Adapter Link down, cannot blink LED\n");
5433 			return -EAGAIN;
5434 		}
5435 	}
5436 
5437 	switch (state) {
5438 	case ETHTOOL_ID_ACTIVE:
5439 		sp->adapt_ctrl_org = readq(&bar0->gpio_control);
5440 		return 1;	/* cycle on/off once per second */
5441 
5442 	case ETHTOOL_ID_ON:
5443 		s2io_set_led(sp, true);
5444 		break;
5445 
5446 	case ETHTOOL_ID_OFF:
5447 		s2io_set_led(sp, false);
5448 		break;
5449 
5450 	case ETHTOOL_ID_INACTIVE:
5451 		if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
5452 			writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
5453 	}
5454 
5455 	return 0;
5456 }
5457 
5458 static void
5459 s2io_ethtool_gringparam(struct net_device *dev,
5460 			struct ethtool_ringparam *ering,
5461 			struct kernel_ethtool_ringparam *kernel_ering,
5462 			struct netlink_ext_ack *extack)
5463 {
5464 	struct s2io_nic *sp = netdev_priv(dev);
5465 	int i, tx_desc_count = 0, rx_desc_count = 0;
5466 
5467 	if (sp->rxd_mode == RXD_MODE_1) {
5468 		ering->rx_max_pending = MAX_RX_DESC_1;
5469 		ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5470 	} else {
5471 		ering->rx_max_pending = MAX_RX_DESC_2;
5472 		ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5473 	}
5474 
5475 	ering->tx_max_pending = MAX_TX_DESC;
5476 
5477 	for (i = 0; i < sp->config.rx_ring_num; i++)
5478 		rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5479 	ering->rx_pending = rx_desc_count;
5480 	ering->rx_jumbo_pending = rx_desc_count;
5481 
5482 	for (i = 0; i < sp->config.tx_fifo_num; i++)
5483 		tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5484 	ering->tx_pending = tx_desc_count;
5485 	DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
5486 }
5487 
5488 /**
5489  * s2io_ethtool_getpause_data -Pause frame generation and reception.
5490  * @dev: pointer to netdev
5491  * @ep : pointer to the structure with pause parameters given by ethtool.
5492  * Description:
5493  * Returns the Pause frame generation and reception capability of the NIC.
5494  * Return value:
5495  *  void
5496  */
5497 static void s2io_ethtool_getpause_data(struct net_device *dev,
5498 				       struct ethtool_pauseparam *ep)
5499 {
5500 	u64 val64;
5501 	struct s2io_nic *sp = netdev_priv(dev);
5502 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5503 
5504 	val64 = readq(&bar0->rmac_pause_cfg);
5505 	if (val64 & RMAC_PAUSE_GEN_ENABLE)
5506 		ep->tx_pause = true;
5507 	if (val64 & RMAC_PAUSE_RX_ENABLE)
5508 		ep->rx_pause = true;
5509 	ep->autoneg = false;
5510 }
5511 
5512 /**
5513  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5514  * @dev: pointer to netdev
5515  * @ep : pointer to the structure with pause parameters given by ethtool.
5516  * Description:
5517  * It can be used to set or reset Pause frame generation or reception
5518  * support of the NIC.
5519  * Return value:
5520  * int, returns 0 on Success
5521  */
5522 
5523 static int s2io_ethtool_setpause_data(struct net_device *dev,
5524 				      struct ethtool_pauseparam *ep)
5525 {
5526 	u64 val64;
5527 	struct s2io_nic *sp = netdev_priv(dev);
5528 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5529 
5530 	val64 = readq(&bar0->rmac_pause_cfg);
5531 	if (ep->tx_pause)
5532 		val64 |= RMAC_PAUSE_GEN_ENABLE;
5533 	else
5534 		val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5535 	if (ep->rx_pause)
5536 		val64 |= RMAC_PAUSE_RX_ENABLE;
5537 	else
5538 		val64 &= ~RMAC_PAUSE_RX_ENABLE;
5539 	writeq(val64, &bar0->rmac_pause_cfg);
5540 	return 0;
5541 }
5542 
5543 #define S2IO_DEV_ID		5
5544 /**
5545  * read_eeprom - reads 4 bytes of data from user given offset.
5546  * @sp : private member of the device structure, which is a pointer to the
5547  *      s2io_nic structure.
5548  * @off : offset at which the data must be written
5549  * @data : Its an output parameter where the data read at the given
5550  *	offset is stored.
5551  * Description:
5552  * Will read 4 bytes of data from the user given offset and return the
5553  * read data.
5554  * NOTE: Will allow to read only part of the EEPROM visible through the
5555  *   I2C bus.
5556  * Return value:
5557  *  -1 on failure and 0 on success.
5558  */
5559 static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
5560 {
5561 	int ret = -1;
5562 	u32 exit_cnt = 0;
5563 	u64 val64;
5564 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5565 
5566 	if (sp->device_type == XFRAME_I_DEVICE) {
5567 		val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5568 			I2C_CONTROL_ADDR(off) |
5569 			I2C_CONTROL_BYTE_CNT(0x3) |
5570 			I2C_CONTROL_READ |
5571 			I2C_CONTROL_CNTL_START;
5572 		SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5573 
5574 		while (exit_cnt < 5) {
5575 			val64 = readq(&bar0->i2c_control);
5576 			if (I2C_CONTROL_CNTL_END(val64)) {
5577 				*data = I2C_CONTROL_GET_DATA(val64);
5578 				ret = 0;
5579 				break;
5580 			}
5581 			msleep(50);
5582 			exit_cnt++;
5583 		}
5584 	}
5585 
5586 	if (sp->device_type == XFRAME_II_DEVICE) {
5587 		val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5588 			SPI_CONTROL_BYTECNT(0x3) |
5589 			SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5590 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5591 		val64 |= SPI_CONTROL_REQ;
5592 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5593 		while (exit_cnt < 5) {
5594 			val64 = readq(&bar0->spi_control);
5595 			if (val64 & SPI_CONTROL_NACK) {
5596 				ret = 1;
5597 				break;
5598 			} else if (val64 & SPI_CONTROL_DONE) {
5599 				*data = readq(&bar0->spi_data);
5600 				*data &= 0xffffff;
5601 				ret = 0;
5602 				break;
5603 			}
5604 			msleep(50);
5605 			exit_cnt++;
5606 		}
5607 	}
5608 	return ret;
5609 }
5610 
5611 /**
5612  *  write_eeprom - actually writes the relevant part of the data value.
5613  *  @sp : private member of the device structure, which is a pointer to the
5614  *       s2io_nic structure.
5615  *  @off : offset at which the data must be written
5616  *  @data : The data that is to be written
5617  *  @cnt : Number of bytes of the data that are actually to be written into
5618  *  the Eeprom. (max of 3)
5619  * Description:
5620  *  Actually writes the relevant part of the data value into the Eeprom
5621  *  through the I2C bus.
5622  * Return value:
5623  *  0 on success, -1 on failure.
5624  */
5625 
5626 static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
5627 {
5628 	int exit_cnt = 0, ret = -1;
5629 	u64 val64;
5630 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5631 
5632 	if (sp->device_type == XFRAME_I_DEVICE) {
5633 		val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5634 			I2C_CONTROL_ADDR(off) |
5635 			I2C_CONTROL_BYTE_CNT(cnt) |
5636 			I2C_CONTROL_SET_DATA((u32)data) |
5637 			I2C_CONTROL_CNTL_START;
5638 		SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5639 
5640 		while (exit_cnt < 5) {
5641 			val64 = readq(&bar0->i2c_control);
5642 			if (I2C_CONTROL_CNTL_END(val64)) {
5643 				if (!(val64 & I2C_CONTROL_NACK))
5644 					ret = 0;
5645 				break;
5646 			}
5647 			msleep(50);
5648 			exit_cnt++;
5649 		}
5650 	}
5651 
5652 	if (sp->device_type == XFRAME_II_DEVICE) {
5653 		int write_cnt = (cnt == 8) ? 0 : cnt;
5654 		writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
5655 
5656 		val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5657 			SPI_CONTROL_BYTECNT(write_cnt) |
5658 			SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5659 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5660 		val64 |= SPI_CONTROL_REQ;
5661 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5662 		while (exit_cnt < 5) {
5663 			val64 = readq(&bar0->spi_control);
5664 			if (val64 & SPI_CONTROL_NACK) {
5665 				ret = 1;
5666 				break;
5667 			} else if (val64 & SPI_CONTROL_DONE) {
5668 				ret = 0;
5669 				break;
5670 			}
5671 			msleep(50);
5672 			exit_cnt++;
5673 		}
5674 	}
5675 	return ret;
5676 }
5677 static void s2io_vpd_read(struct s2io_nic *nic)
5678 {
5679 	u8 *vpd_data;
5680 	u8 data;
5681 	int i = 0, cnt, len, fail = 0;
5682 	int vpd_addr = 0x80;
5683 	struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
5684 
5685 	if (nic->device_type == XFRAME_II_DEVICE) {
5686 		strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5687 		vpd_addr = 0x80;
5688 	} else {
5689 		strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5690 		vpd_addr = 0x50;
5691 	}
5692 	strcpy(nic->serial_num, "NOT AVAILABLE");
5693 
5694 	vpd_data = kmalloc(256, GFP_KERNEL);
5695 	if (!vpd_data) {
5696 		swstats->mem_alloc_fail_cnt++;
5697 		return;
5698 	}
5699 	swstats->mem_allocated += 256;
5700 
5701 	for (i = 0; i < 256; i += 4) {
5702 		pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5703 		pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5704 		pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5705 		for (cnt = 0; cnt < 5; cnt++) {
5706 			msleep(2);
5707 			pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5708 			if (data == 0x80)
5709 				break;
5710 		}
5711 		if (cnt >= 5) {
5712 			DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5713 			fail = 1;
5714 			break;
5715 		}
5716 		pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5717 				      (u32 *)&vpd_data[i]);
5718 	}
5719 
5720 	if (!fail) {
5721 		/* read serial number of adapter */
5722 		for (cnt = 0; cnt < 252; cnt++) {
5723 			if ((vpd_data[cnt] == 'S') &&
5724 			    (vpd_data[cnt+1] == 'N')) {
5725 				len = vpd_data[cnt+2];
5726 				if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
5727 					memcpy(nic->serial_num,
5728 					       &vpd_data[cnt + 3],
5729 					       len);
5730 					memset(nic->serial_num+len,
5731 					       0,
5732 					       VPD_STRING_LEN-len);
5733 					break;
5734 				}
5735 			}
5736 		}
5737 	}
5738 
5739 	if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5740 		len = vpd_data[1];
5741 		memcpy(nic->product_name, &vpd_data[3], len);
5742 		nic->product_name[len] = 0;
5743 	}
5744 	kfree(vpd_data);
5745 	swstats->mem_freed += 256;
5746 }
5747 
5748 /**
5749  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5750  *  @dev: pointer to netdev
5751  *  @eeprom : pointer to the user level structure provided by ethtool,
5752  *  containing all relevant information.
5753  *  @data_buf : user defined value to be written into Eeprom.
5754  *  Description: Reads the values stored in the Eeprom at given offset
5755  *  for a given length. Stores these values int the input argument data
5756  *  buffer 'data_buf' and returns these to the caller (ethtool.)
5757  *  Return value:
5758  *  int  0 on success
5759  */
5760 
5761 static int s2io_ethtool_geeprom(struct net_device *dev,
5762 				struct ethtool_eeprom *eeprom, u8 * data_buf)
5763 {
5764 	u32 i, valid;
5765 	u64 data;
5766 	struct s2io_nic *sp = netdev_priv(dev);
5767 
5768 	eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5769 
5770 	if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5771 		eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5772 
5773 	for (i = 0; i < eeprom->len; i += 4) {
5774 		if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5775 			DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5776 			return -EFAULT;
5777 		}
5778 		valid = INV(data);
5779 		memcpy((data_buf + i), &valid, 4);
5780 	}
5781 	return 0;
5782 }
5783 
5784 /**
5785  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5786  *  @dev: pointer to netdev
5787  *  @eeprom : pointer to the user level structure provided by ethtool,
5788  *  containing all relevant information.
5789  *  @data_buf : user defined value to be written into Eeprom.
5790  *  Description:
5791  *  Tries to write the user provided value in the Eeprom, at the offset
5792  *  given by the user.
5793  *  Return value:
5794  *  0 on success, -EFAULT on failure.
5795  */
5796 
5797 static int s2io_ethtool_seeprom(struct net_device *dev,
5798 				struct ethtool_eeprom *eeprom,
5799 				u8 *data_buf)
5800 {
5801 	int len = eeprom->len, cnt = 0;
5802 	u64 valid = 0, data;
5803 	struct s2io_nic *sp = netdev_priv(dev);
5804 
5805 	if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5806 		DBG_PRINT(ERR_DBG,
5807 			  "ETHTOOL_WRITE_EEPROM Err: "
5808 			  "Magic value is wrong, it is 0x%x should be 0x%x\n",
5809 			  (sp->pdev->vendor | (sp->pdev->device << 16)),
5810 			  eeprom->magic);
5811 		return -EFAULT;
5812 	}
5813 
5814 	while (len) {
5815 		data = (u32)data_buf[cnt] & 0x000000FF;
5816 		if (data)
5817 			valid = (u32)(data << 24);
5818 		else
5819 			valid = data;
5820 
5821 		if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5822 			DBG_PRINT(ERR_DBG,
5823 				  "ETHTOOL_WRITE_EEPROM Err: "
5824 				  "Cannot write into the specified offset\n");
5825 			return -EFAULT;
5826 		}
5827 		cnt++;
5828 		len--;
5829 	}
5830 
5831 	return 0;
5832 }
5833 
5834 /**
5835  * s2io_register_test - reads and writes into all clock domains.
5836  * @sp : private member of the device structure, which is a pointer to the
5837  * s2io_nic structure.
5838  * @data : variable that returns the result of each of the test conducted b
5839  * by the driver.
5840  * Description:
5841  * Read and write into all clock domains. The NIC has 3 clock domains,
5842  * see that registers in all the three regions are accessible.
5843  * Return value:
5844  * 0 on success.
5845  */
5846 
5847 static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
5848 {
5849 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5850 	u64 val64 = 0, exp_val;
5851 	int fail = 0;
5852 
5853 	val64 = readq(&bar0->pif_rd_swapper_fb);
5854 	if (val64 != 0x123456789abcdefULL) {
5855 		fail = 1;
5856 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
5857 	}
5858 
5859 	val64 = readq(&bar0->rmac_pause_cfg);
5860 	if (val64 != 0xc000ffff00000000ULL) {
5861 		fail = 1;
5862 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
5863 	}
5864 
5865 	val64 = readq(&bar0->rx_queue_cfg);
5866 	if (sp->device_type == XFRAME_II_DEVICE)
5867 		exp_val = 0x0404040404040404ULL;
5868 	else
5869 		exp_val = 0x0808080808080808ULL;
5870 	if (val64 != exp_val) {
5871 		fail = 1;
5872 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
5873 	}
5874 
5875 	val64 = readq(&bar0->xgxs_efifo_cfg);
5876 	if (val64 != 0x000000001923141EULL) {
5877 		fail = 1;
5878 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
5879 	}
5880 
5881 	val64 = 0x5A5A5A5A5A5A5A5AULL;
5882 	writeq(val64, &bar0->xmsi_data);
5883 	val64 = readq(&bar0->xmsi_data);
5884 	if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5885 		fail = 1;
5886 		DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
5887 	}
5888 
5889 	val64 = 0xA5A5A5A5A5A5A5A5ULL;
5890 	writeq(val64, &bar0->xmsi_data);
5891 	val64 = readq(&bar0->xmsi_data);
5892 	if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5893 		fail = 1;
5894 		DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
5895 	}
5896 
5897 	*data = fail;
5898 	return fail;
5899 }
5900 
5901 /**
5902  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5903  * @sp : private member of the device structure, which is a pointer to the
5904  * s2io_nic structure.
5905  * @data:variable that returns the result of each of the test conducted by
5906  * the driver.
5907  * Description:
5908  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5909  * register.
5910  * Return value:
5911  * 0 on success.
5912  */
5913 
5914 static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
5915 {
5916 	int fail = 0;
5917 	u64 ret_data, org_4F0, org_7F0;
5918 	u8 saved_4F0 = 0, saved_7F0 = 0;
5919 	struct net_device *dev = sp->dev;
5920 
5921 	/* Test Write Error at offset 0 */
5922 	/* Note that SPI interface allows write access to all areas
5923 	 * of EEPROM. Hence doing all negative testing only for Xframe I.
5924 	 */
5925 	if (sp->device_type == XFRAME_I_DEVICE)
5926 		if (!write_eeprom(sp, 0, 0, 3))
5927 			fail = 1;
5928 
5929 	/* Save current values at offsets 0x4F0 and 0x7F0 */
5930 	if (!read_eeprom(sp, 0x4F0, &org_4F0))
5931 		saved_4F0 = 1;
5932 	if (!read_eeprom(sp, 0x7F0, &org_7F0))
5933 		saved_7F0 = 1;
5934 
5935 	/* Test Write at offset 4f0 */
5936 	if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5937 		fail = 1;
5938 	if (read_eeprom(sp, 0x4F0, &ret_data))
5939 		fail = 1;
5940 
5941 	if (ret_data != 0x012345) {
5942 		DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5943 			  "Data written %llx Data read %llx\n",
5944 			  dev->name, (unsigned long long)0x12345,
5945 			  (unsigned long long)ret_data);
5946 		fail = 1;
5947 	}
5948 
5949 	/* Reset the EEPROM data go FFFF */
5950 	write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5951 
5952 	/* Test Write Request Error at offset 0x7c */
5953 	if (sp->device_type == XFRAME_I_DEVICE)
5954 		if (!write_eeprom(sp, 0x07C, 0, 3))
5955 			fail = 1;
5956 
5957 	/* Test Write Request at offset 0x7f0 */
5958 	if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5959 		fail = 1;
5960 	if (read_eeprom(sp, 0x7F0, &ret_data))
5961 		fail = 1;
5962 
5963 	if (ret_data != 0x012345) {
5964 		DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5965 			  "Data written %llx Data read %llx\n",
5966 			  dev->name, (unsigned long long)0x12345,
5967 			  (unsigned long long)ret_data);
5968 		fail = 1;
5969 	}
5970 
5971 	/* Reset the EEPROM data go FFFF */
5972 	write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5973 
5974 	if (sp->device_type == XFRAME_I_DEVICE) {
5975 		/* Test Write Error at offset 0x80 */
5976 		if (!write_eeprom(sp, 0x080, 0, 3))
5977 			fail = 1;
5978 
5979 		/* Test Write Error at offset 0xfc */
5980 		if (!write_eeprom(sp, 0x0FC, 0, 3))
5981 			fail = 1;
5982 
5983 		/* Test Write Error at offset 0x100 */
5984 		if (!write_eeprom(sp, 0x100, 0, 3))
5985 			fail = 1;
5986 
5987 		/* Test Write Error at offset 4ec */
5988 		if (!write_eeprom(sp, 0x4EC, 0, 3))
5989 			fail = 1;
5990 	}
5991 
5992 	/* Restore values at offsets 0x4F0 and 0x7F0 */
5993 	if (saved_4F0)
5994 		write_eeprom(sp, 0x4F0, org_4F0, 3);
5995 	if (saved_7F0)
5996 		write_eeprom(sp, 0x7F0, org_7F0, 3);
5997 
5998 	*data = fail;
5999 	return fail;
6000 }
6001 
6002 /**
6003  * s2io_bist_test - invokes the MemBist test of the card .
6004  * @sp : private member of the device structure, which is a pointer to the
6005  * s2io_nic structure.
6006  * @data:variable that returns the result of each of the test conducted by
6007  * the driver.
6008  * Description:
6009  * This invokes the MemBist test of the card. We give around
6010  * 2 secs time for the Test to complete. If it's still not complete
6011  * within this peiod, we consider that the test failed.
6012  * Return value:
6013  * 0 on success and -1 on failure.
6014  */
6015 
6016 static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
6017 {
6018 	u8 bist = 0;
6019 	int cnt = 0, ret = -1;
6020 
6021 	pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6022 	bist |= PCI_BIST_START;
6023 	pci_write_config_word(sp->pdev, PCI_BIST, bist);
6024 
6025 	while (cnt < 20) {
6026 		pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6027 		if (!(bist & PCI_BIST_START)) {
6028 			*data = (bist & PCI_BIST_CODE_MASK);
6029 			ret = 0;
6030 			break;
6031 		}
6032 		msleep(100);
6033 		cnt++;
6034 	}
6035 
6036 	return ret;
6037 }
6038 
6039 /**
6040  * s2io_link_test - verifies the link state of the nic
6041  * @sp: private member of the device structure, which is a pointer to the
6042  * s2io_nic structure.
6043  * @data: variable that returns the result of each of the test conducted by
6044  * the driver.
6045  * Description:
6046  * The function verifies the link state of the NIC and updates the input
6047  * argument 'data' appropriately.
6048  * Return value:
6049  * 0 on success.
6050  */
6051 
6052 static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
6053 {
6054 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
6055 	u64 val64;
6056 
6057 	val64 = readq(&bar0->adapter_status);
6058 	if (!(LINK_IS_UP(val64)))
6059 		*data = 1;
6060 	else
6061 		*data = 0;
6062 
6063 	return *data;
6064 }
6065 
6066 /**
6067  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6068  * @sp: private member of the device structure, which is a pointer to the
6069  * s2io_nic structure.
6070  * @data: variable that returns the result of each of the test
6071  * conducted by the driver.
6072  * Description:
6073  *  This is one of the offline test that tests the read and write
6074  *  access to the RldRam chip on the NIC.
6075  * Return value:
6076  *  0 on success.
6077  */
6078 
6079 static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
6080 {
6081 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
6082 	u64 val64;
6083 	int cnt, iteration = 0, test_fail = 0;
6084 
6085 	val64 = readq(&bar0->adapter_control);
6086 	val64 &= ~ADAPTER_ECC_EN;
6087 	writeq(val64, &bar0->adapter_control);
6088 
6089 	val64 = readq(&bar0->mc_rldram_test_ctrl);
6090 	val64 |= MC_RLDRAM_TEST_MODE;
6091 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6092 
6093 	val64 = readq(&bar0->mc_rldram_mrs);
6094 	val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6095 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6096 
6097 	val64 |= MC_RLDRAM_MRS_ENABLE;
6098 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6099 
6100 	while (iteration < 2) {
6101 		val64 = 0x55555555aaaa0000ULL;
6102 		if (iteration == 1)
6103 			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6104 		writeq(val64, &bar0->mc_rldram_test_d0);
6105 
6106 		val64 = 0xaaaa5a5555550000ULL;
6107 		if (iteration == 1)
6108 			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6109 		writeq(val64, &bar0->mc_rldram_test_d1);
6110 
6111 		val64 = 0x55aaaaaaaa5a0000ULL;
6112 		if (iteration == 1)
6113 			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6114 		writeq(val64, &bar0->mc_rldram_test_d2);
6115 
6116 		val64 = (u64) (0x0000003ffffe0100ULL);
6117 		writeq(val64, &bar0->mc_rldram_test_add);
6118 
6119 		val64 = MC_RLDRAM_TEST_MODE |
6120 			MC_RLDRAM_TEST_WRITE |
6121 			MC_RLDRAM_TEST_GO;
6122 		SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6123 
6124 		for (cnt = 0; cnt < 5; cnt++) {
6125 			val64 = readq(&bar0->mc_rldram_test_ctrl);
6126 			if (val64 & MC_RLDRAM_TEST_DONE)
6127 				break;
6128 			msleep(200);
6129 		}
6130 
6131 		if (cnt == 5)
6132 			break;
6133 
6134 		val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6135 		SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6136 
6137 		for (cnt = 0; cnt < 5; cnt++) {
6138 			val64 = readq(&bar0->mc_rldram_test_ctrl);
6139 			if (val64 & MC_RLDRAM_TEST_DONE)
6140 				break;
6141 			msleep(500);
6142 		}
6143 
6144 		if (cnt == 5)
6145 			break;
6146 
6147 		val64 = readq(&bar0->mc_rldram_test_ctrl);
6148 		if (!(val64 & MC_RLDRAM_TEST_PASS))
6149 			test_fail = 1;
6150 
6151 		iteration++;
6152 	}
6153 
6154 	*data = test_fail;
6155 
6156 	/* Bring the adapter out of test mode */
6157 	SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6158 
6159 	return test_fail;
6160 }
6161 
6162 /**
6163  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6164  *  @dev: pointer to netdev
6165  *  @ethtest : pointer to a ethtool command specific structure that will be
6166  *  returned to the user.
6167  *  @data : variable that returns the result of each of the test
6168  * conducted by the driver.
6169  * Description:
6170  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
6171  *  the health of the card.
6172  * Return value:
6173  *  void
6174  */
6175 
6176 static void s2io_ethtool_test(struct net_device *dev,
6177 			      struct ethtool_test *ethtest,
6178 			      uint64_t *data)
6179 {
6180 	struct s2io_nic *sp = netdev_priv(dev);
6181 	int orig_state = netif_running(sp->dev);
6182 
6183 	if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6184 		/* Offline Tests. */
6185 		if (orig_state)
6186 			s2io_close(sp->dev);
6187 
6188 		if (s2io_register_test(sp, &data[0]))
6189 			ethtest->flags |= ETH_TEST_FL_FAILED;
6190 
6191 		s2io_reset(sp);
6192 
6193 		if (s2io_rldram_test(sp, &data[3]))
6194 			ethtest->flags |= ETH_TEST_FL_FAILED;
6195 
6196 		s2io_reset(sp);
6197 
6198 		if (s2io_eeprom_test(sp, &data[1]))
6199 			ethtest->flags |= ETH_TEST_FL_FAILED;
6200 
6201 		if (s2io_bist_test(sp, &data[4]))
6202 			ethtest->flags |= ETH_TEST_FL_FAILED;
6203 
6204 		if (orig_state)
6205 			s2io_open(sp->dev);
6206 
6207 		data[2] = 0;
6208 	} else {
6209 		/* Online Tests. */
6210 		if (!orig_state) {
6211 			DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
6212 				  dev->name);
6213 			data[0] = -1;
6214 			data[1] = -1;
6215 			data[2] = -1;
6216 			data[3] = -1;
6217 			data[4] = -1;
6218 		}
6219 
6220 		if (s2io_link_test(sp, &data[2]))
6221 			ethtest->flags |= ETH_TEST_FL_FAILED;
6222 
6223 		data[0] = 0;
6224 		data[1] = 0;
6225 		data[3] = 0;
6226 		data[4] = 0;
6227 	}
6228 }
6229 
6230 static void s2io_get_ethtool_stats(struct net_device *dev,
6231 				   struct ethtool_stats *estats,
6232 				   u64 *tmp_stats)
6233 {
6234 	int i = 0, k;
6235 	struct s2io_nic *sp = netdev_priv(dev);
6236 	struct stat_block *stats = sp->mac_control.stats_info;
6237 	struct swStat *swstats = &stats->sw_stat;
6238 	struct xpakStat *xstats = &stats->xpak_stat;
6239 
6240 	s2io_updt_stats(sp);
6241 	tmp_stats[i++] =
6242 		(u64)le32_to_cpu(stats->tmac_frms_oflow) << 32  |
6243 		le32_to_cpu(stats->tmac_frms);
6244 	tmp_stats[i++] =
6245 		(u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
6246 		le32_to_cpu(stats->tmac_data_octets);
6247 	tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
6248 	tmp_stats[i++] =
6249 		(u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
6250 		le32_to_cpu(stats->tmac_mcst_frms);
6251 	tmp_stats[i++] =
6252 		(u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
6253 		le32_to_cpu(stats->tmac_bcst_frms);
6254 	tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
6255 	tmp_stats[i++] =
6256 		(u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
6257 		le32_to_cpu(stats->tmac_ttl_octets);
6258 	tmp_stats[i++] =
6259 		(u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
6260 		le32_to_cpu(stats->tmac_ucst_frms);
6261 	tmp_stats[i++] =
6262 		(u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
6263 		le32_to_cpu(stats->tmac_nucst_frms);
6264 	tmp_stats[i++] =
6265 		(u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
6266 		le32_to_cpu(stats->tmac_any_err_frms);
6267 	tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
6268 	tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
6269 	tmp_stats[i++] =
6270 		(u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
6271 		le32_to_cpu(stats->tmac_vld_ip);
6272 	tmp_stats[i++] =
6273 		(u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
6274 		le32_to_cpu(stats->tmac_drop_ip);
6275 	tmp_stats[i++] =
6276 		(u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
6277 		le32_to_cpu(stats->tmac_icmp);
6278 	tmp_stats[i++] =
6279 		(u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
6280 		le32_to_cpu(stats->tmac_rst_tcp);
6281 	tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
6282 	tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
6283 		le32_to_cpu(stats->tmac_udp);
6284 	tmp_stats[i++] =
6285 		(u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
6286 		le32_to_cpu(stats->rmac_vld_frms);
6287 	tmp_stats[i++] =
6288 		(u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
6289 		le32_to_cpu(stats->rmac_data_octets);
6290 	tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
6291 	tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
6292 	tmp_stats[i++] =
6293 		(u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
6294 		le32_to_cpu(stats->rmac_vld_mcst_frms);
6295 	tmp_stats[i++] =
6296 		(u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
6297 		le32_to_cpu(stats->rmac_vld_bcst_frms);
6298 	tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
6299 	tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
6300 	tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
6301 	tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
6302 	tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
6303 	tmp_stats[i++] =
6304 		(u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
6305 		le32_to_cpu(stats->rmac_ttl_octets);
6306 	tmp_stats[i++] =
6307 		(u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
6308 		| le32_to_cpu(stats->rmac_accepted_ucst_frms);
6309 	tmp_stats[i++] =
6310 		(u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
6311 		<< 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
6312 	tmp_stats[i++] =
6313 		(u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
6314 		le32_to_cpu(stats->rmac_discarded_frms);
6315 	tmp_stats[i++] =
6316 		(u64)le32_to_cpu(stats->rmac_drop_events_oflow)
6317 		<< 32 | le32_to_cpu(stats->rmac_drop_events);
6318 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
6319 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
6320 	tmp_stats[i++] =
6321 		(u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
6322 		le32_to_cpu(stats->rmac_usized_frms);
6323 	tmp_stats[i++] =
6324 		(u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
6325 		le32_to_cpu(stats->rmac_osized_frms);
6326 	tmp_stats[i++] =
6327 		(u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
6328 		le32_to_cpu(stats->rmac_frag_frms);
6329 	tmp_stats[i++] =
6330 		(u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
6331 		le32_to_cpu(stats->rmac_jabber_frms);
6332 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
6333 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
6334 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
6335 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
6336 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
6337 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
6338 	tmp_stats[i++] =
6339 		(u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
6340 		le32_to_cpu(stats->rmac_ip);
6341 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
6342 	tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
6343 	tmp_stats[i++] =
6344 		(u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
6345 		le32_to_cpu(stats->rmac_drop_ip);
6346 	tmp_stats[i++] =
6347 		(u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
6348 		le32_to_cpu(stats->rmac_icmp);
6349 	tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
6350 	tmp_stats[i++] =
6351 		(u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
6352 		le32_to_cpu(stats->rmac_udp);
6353 	tmp_stats[i++] =
6354 		(u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
6355 		le32_to_cpu(stats->rmac_err_drp_udp);
6356 	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
6357 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
6358 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
6359 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
6360 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
6361 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
6362 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
6363 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
6364 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
6365 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
6366 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
6367 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
6368 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
6369 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
6370 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
6371 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
6372 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
6373 	tmp_stats[i++] =
6374 		(u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
6375 		le32_to_cpu(stats->rmac_pause_cnt);
6376 	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
6377 	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
6378 	tmp_stats[i++] =
6379 		(u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
6380 		le32_to_cpu(stats->rmac_accepted_ip);
6381 	tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
6382 	tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
6383 	tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
6384 	tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
6385 	tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
6386 	tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
6387 	tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
6388 	tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
6389 	tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
6390 	tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
6391 	tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
6392 	tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
6393 	tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
6394 	tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
6395 	tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
6396 	tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
6397 	tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
6398 	tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
6399 	tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
6400 
6401 	/* Enhanced statistics exist only for Hercules */
6402 	if (sp->device_type == XFRAME_II_DEVICE) {
6403 		tmp_stats[i++] =
6404 			le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
6405 		tmp_stats[i++] =
6406 			le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
6407 		tmp_stats[i++] =
6408 			le64_to_cpu(stats->rmac_ttl_8192_max_frms);
6409 		tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
6410 		tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
6411 		tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
6412 		tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
6413 		tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
6414 		tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
6415 		tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
6416 		tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
6417 		tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
6418 		tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
6419 		tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
6420 		tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
6421 		tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
6422 	}
6423 
6424 	tmp_stats[i++] = 0;
6425 	tmp_stats[i++] = swstats->single_ecc_errs;
6426 	tmp_stats[i++] = swstats->double_ecc_errs;
6427 	tmp_stats[i++] = swstats->parity_err_cnt;
6428 	tmp_stats[i++] = swstats->serious_err_cnt;
6429 	tmp_stats[i++] = swstats->soft_reset_cnt;
6430 	tmp_stats[i++] = swstats->fifo_full_cnt;
6431 	for (k = 0; k < MAX_RX_RINGS; k++)
6432 		tmp_stats[i++] = swstats->ring_full_cnt[k];
6433 	tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
6434 	tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
6435 	tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
6436 	tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
6437 	tmp_stats[i++] = xstats->alarm_laser_output_power_high;
6438 	tmp_stats[i++] = xstats->alarm_laser_output_power_low;
6439 	tmp_stats[i++] = xstats->warn_transceiver_temp_high;
6440 	tmp_stats[i++] = xstats->warn_transceiver_temp_low;
6441 	tmp_stats[i++] = xstats->warn_laser_bias_current_high;
6442 	tmp_stats[i++] = xstats->warn_laser_bias_current_low;
6443 	tmp_stats[i++] = xstats->warn_laser_output_power_high;
6444 	tmp_stats[i++] = xstats->warn_laser_output_power_low;
6445 	tmp_stats[i++] = swstats->clubbed_frms_cnt;
6446 	tmp_stats[i++] = swstats->sending_both;
6447 	tmp_stats[i++] = swstats->outof_sequence_pkts;
6448 	tmp_stats[i++] = swstats->flush_max_pkts;
6449 	if (swstats->num_aggregations) {
6450 		u64 tmp = swstats->sum_avg_pkts_aggregated;
6451 		int count = 0;
6452 		/*
6453 		 * Since 64-bit divide does not work on all platforms,
6454 		 * do repeated subtraction.
6455 		 */
6456 		while (tmp >= swstats->num_aggregations) {
6457 			tmp -= swstats->num_aggregations;
6458 			count++;
6459 		}
6460 		tmp_stats[i++] = count;
6461 	} else
6462 		tmp_stats[i++] = 0;
6463 	tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
6464 	tmp_stats[i++] = swstats->pci_map_fail_cnt;
6465 	tmp_stats[i++] = swstats->watchdog_timer_cnt;
6466 	tmp_stats[i++] = swstats->mem_allocated;
6467 	tmp_stats[i++] = swstats->mem_freed;
6468 	tmp_stats[i++] = swstats->link_up_cnt;
6469 	tmp_stats[i++] = swstats->link_down_cnt;
6470 	tmp_stats[i++] = swstats->link_up_time;
6471 	tmp_stats[i++] = swstats->link_down_time;
6472 
6473 	tmp_stats[i++] = swstats->tx_buf_abort_cnt;
6474 	tmp_stats[i++] = swstats->tx_desc_abort_cnt;
6475 	tmp_stats[i++] = swstats->tx_parity_err_cnt;
6476 	tmp_stats[i++] = swstats->tx_link_loss_cnt;
6477 	tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
6478 
6479 	tmp_stats[i++] = swstats->rx_parity_err_cnt;
6480 	tmp_stats[i++] = swstats->rx_abort_cnt;
6481 	tmp_stats[i++] = swstats->rx_parity_abort_cnt;
6482 	tmp_stats[i++] = swstats->rx_rda_fail_cnt;
6483 	tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
6484 	tmp_stats[i++] = swstats->rx_fcs_err_cnt;
6485 	tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
6486 	tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
6487 	tmp_stats[i++] = swstats->rx_unkn_err_cnt;
6488 	tmp_stats[i++] = swstats->tda_err_cnt;
6489 	tmp_stats[i++] = swstats->pfc_err_cnt;
6490 	tmp_stats[i++] = swstats->pcc_err_cnt;
6491 	tmp_stats[i++] = swstats->tti_err_cnt;
6492 	tmp_stats[i++] = swstats->tpa_err_cnt;
6493 	tmp_stats[i++] = swstats->sm_err_cnt;
6494 	tmp_stats[i++] = swstats->lso_err_cnt;
6495 	tmp_stats[i++] = swstats->mac_tmac_err_cnt;
6496 	tmp_stats[i++] = swstats->mac_rmac_err_cnt;
6497 	tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
6498 	tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
6499 	tmp_stats[i++] = swstats->rc_err_cnt;
6500 	tmp_stats[i++] = swstats->prc_pcix_err_cnt;
6501 	tmp_stats[i++] = swstats->rpa_err_cnt;
6502 	tmp_stats[i++] = swstats->rda_err_cnt;
6503 	tmp_stats[i++] = swstats->rti_err_cnt;
6504 	tmp_stats[i++] = swstats->mc_err_cnt;
6505 }
6506 
6507 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6508 {
6509 	return XENA_REG_SPACE;
6510 }
6511 
6512 
6513 static int s2io_get_eeprom_len(struct net_device *dev)
6514 {
6515 	return XENA_EEPROM_SPACE;
6516 }
6517 
6518 static int s2io_get_sset_count(struct net_device *dev, int sset)
6519 {
6520 	struct s2io_nic *sp = netdev_priv(dev);
6521 
6522 	switch (sset) {
6523 	case ETH_SS_TEST:
6524 		return S2IO_TEST_LEN;
6525 	case ETH_SS_STATS:
6526 		switch (sp->device_type) {
6527 		case XFRAME_I_DEVICE:
6528 			return XFRAME_I_STAT_LEN;
6529 		case XFRAME_II_DEVICE:
6530 			return XFRAME_II_STAT_LEN;
6531 		default:
6532 			return 0;
6533 		}
6534 	default:
6535 		return -EOPNOTSUPP;
6536 	}
6537 }
6538 
6539 static void s2io_ethtool_get_strings(struct net_device *dev,
6540 				     u32 stringset, u8 *data)
6541 {
6542 	int stat_size = 0;
6543 	struct s2io_nic *sp = netdev_priv(dev);
6544 
6545 	switch (stringset) {
6546 	case ETH_SS_TEST:
6547 		memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6548 		break;
6549 	case ETH_SS_STATS:
6550 		stat_size = sizeof(ethtool_xena_stats_keys);
6551 		memcpy(data, &ethtool_xena_stats_keys, stat_size);
6552 		if (sp->device_type == XFRAME_II_DEVICE) {
6553 			memcpy(data + stat_size,
6554 			       &ethtool_enhanced_stats_keys,
6555 			       sizeof(ethtool_enhanced_stats_keys));
6556 			stat_size += sizeof(ethtool_enhanced_stats_keys);
6557 		}
6558 
6559 		memcpy(data + stat_size, &ethtool_driver_stats_keys,
6560 		       sizeof(ethtool_driver_stats_keys));
6561 	}
6562 }
6563 
6564 static int s2io_set_features(struct net_device *dev, netdev_features_t features)
6565 {
6566 	struct s2io_nic *sp = netdev_priv(dev);
6567 	netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
6568 
6569 	if (changed && netif_running(dev)) {
6570 		int rc;
6571 
6572 		s2io_stop_all_tx_queue(sp);
6573 		s2io_card_down(sp);
6574 		dev->features = features;
6575 		rc = s2io_card_up(sp);
6576 		if (rc)
6577 			s2io_reset(sp);
6578 		else
6579 			s2io_start_all_tx_queue(sp);
6580 
6581 		return rc ? rc : 1;
6582 	}
6583 
6584 	return 0;
6585 }
6586 
6587 static const struct ethtool_ops netdev_ethtool_ops = {
6588 	.get_drvinfo = s2io_ethtool_gdrvinfo,
6589 	.get_regs_len = s2io_ethtool_get_regs_len,
6590 	.get_regs = s2io_ethtool_gregs,
6591 	.get_link = ethtool_op_get_link,
6592 	.get_eeprom_len = s2io_get_eeprom_len,
6593 	.get_eeprom = s2io_ethtool_geeprom,
6594 	.set_eeprom = s2io_ethtool_seeprom,
6595 	.get_ringparam = s2io_ethtool_gringparam,
6596 	.get_pauseparam = s2io_ethtool_getpause_data,
6597 	.set_pauseparam = s2io_ethtool_setpause_data,
6598 	.self_test = s2io_ethtool_test,
6599 	.get_strings = s2io_ethtool_get_strings,
6600 	.set_phys_id = s2io_ethtool_set_led,
6601 	.get_ethtool_stats = s2io_get_ethtool_stats,
6602 	.get_sset_count = s2io_get_sset_count,
6603 	.get_link_ksettings = s2io_ethtool_get_link_ksettings,
6604 	.set_link_ksettings = s2io_ethtool_set_link_ksettings,
6605 };
6606 
6607 /**
6608  *  s2io_ioctl - Entry point for the Ioctl
6609  *  @dev :  Device pointer.
6610  *  @rq :  An IOCTL specefic structure, that can contain a pointer to
6611  *  a proprietary structure used to pass information to the driver.
6612  *  @cmd :  This is used to distinguish between the different commands that
6613  *  can be passed to the IOCTL functions.
6614  *  Description:
6615  *  Currently there are no special functionality supported in IOCTL, hence
6616  *  function always return EOPNOTSUPPORTED
6617  */
6618 
6619 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6620 {
6621 	return -EOPNOTSUPP;
6622 }
6623 
6624 /**
6625  *  s2io_change_mtu - entry point to change MTU size for the device.
6626  *   @dev : device pointer.
6627  *   @new_mtu : the new MTU size for the device.
6628  *   Description: A driver entry point to change MTU size for the device.
6629  *   Before changing the MTU the device must be stopped.
6630  *  Return value:
6631  *   0 on success and an appropriate (-)ve integer as defined in errno.h
6632  *   file on failure.
6633  */
6634 
6635 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6636 {
6637 	struct s2io_nic *sp = netdev_priv(dev);
6638 	int ret = 0;
6639 
6640 	dev->mtu = new_mtu;
6641 	if (netif_running(dev)) {
6642 		s2io_stop_all_tx_queue(sp);
6643 		s2io_card_down(sp);
6644 		ret = s2io_card_up(sp);
6645 		if (ret) {
6646 			DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6647 				  __func__);
6648 			return ret;
6649 		}
6650 		s2io_wake_all_tx_queue(sp);
6651 	} else { /* Device is down */
6652 		struct XENA_dev_config __iomem *bar0 = sp->bar0;
6653 		u64 val64 = new_mtu;
6654 
6655 		writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6656 	}
6657 
6658 	return ret;
6659 }
6660 
6661 /**
6662  * s2io_set_link - Set the LInk status
6663  * @work: work struct containing a pointer to device private structure
6664  * Description: Sets the link status for the adapter
6665  */
6666 
6667 static void s2io_set_link(struct work_struct *work)
6668 {
6669 	struct s2io_nic *nic = container_of(work, struct s2io_nic,
6670 					    set_link_task);
6671 	struct net_device *dev = nic->dev;
6672 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
6673 	register u64 val64;
6674 	u16 subid;
6675 
6676 	rtnl_lock();
6677 
6678 	if (!netif_running(dev))
6679 		goto out_unlock;
6680 
6681 	if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6682 		/* The card is being reset, no point doing anything */
6683 		goto out_unlock;
6684 	}
6685 
6686 	subid = nic->pdev->subsystem_device;
6687 	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6688 		/*
6689 		 * Allow a small delay for the NICs self initiated
6690 		 * cleanup to complete.
6691 		 */
6692 		msleep(100);
6693 	}
6694 
6695 	val64 = readq(&bar0->adapter_status);
6696 	if (LINK_IS_UP(val64)) {
6697 		if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6698 			if (verify_xena_quiescence(nic)) {
6699 				val64 = readq(&bar0->adapter_control);
6700 				val64 |= ADAPTER_CNTL_EN;
6701 				writeq(val64, &bar0->adapter_control);
6702 				if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6703 					    nic->device_type, subid)) {
6704 					val64 = readq(&bar0->gpio_control);
6705 					val64 |= GPIO_CTRL_GPIO_0;
6706 					writeq(val64, &bar0->gpio_control);
6707 					val64 = readq(&bar0->gpio_control);
6708 				} else {
6709 					val64 |= ADAPTER_LED_ON;
6710 					writeq(val64, &bar0->adapter_control);
6711 				}
6712 				nic->device_enabled_once = true;
6713 			} else {
6714 				DBG_PRINT(ERR_DBG,
6715 					  "%s: Error: device is not Quiescent\n",
6716 					  dev->name);
6717 				s2io_stop_all_tx_queue(nic);
6718 			}
6719 		}
6720 		val64 = readq(&bar0->adapter_control);
6721 		val64 |= ADAPTER_LED_ON;
6722 		writeq(val64, &bar0->adapter_control);
6723 		s2io_link(nic, LINK_UP);
6724 	} else {
6725 		if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6726 						      subid)) {
6727 			val64 = readq(&bar0->gpio_control);
6728 			val64 &= ~GPIO_CTRL_GPIO_0;
6729 			writeq(val64, &bar0->gpio_control);
6730 			val64 = readq(&bar0->gpio_control);
6731 		}
6732 		/* turn off LED */
6733 		val64 = readq(&bar0->adapter_control);
6734 		val64 = val64 & (~ADAPTER_LED_ON);
6735 		writeq(val64, &bar0->adapter_control);
6736 		s2io_link(nic, LINK_DOWN);
6737 	}
6738 	clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6739 
6740 out_unlock:
6741 	rtnl_unlock();
6742 }
6743 
6744 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6745 				  struct buffAdd *ba,
6746 				  struct sk_buff **skb, u64 *temp0, u64 *temp1,
6747 				  u64 *temp2, int size)
6748 {
6749 	struct net_device *dev = sp->dev;
6750 	struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6751 
6752 	if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6753 		struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6754 		/* allocate skb */
6755 		if (*skb) {
6756 			DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6757 			/*
6758 			 * As Rx frame are not going to be processed,
6759 			 * using same mapped address for the Rxd
6760 			 * buffer pointer
6761 			 */
6762 			rxdp1->Buffer0_ptr = *temp0;
6763 		} else {
6764 			*skb = netdev_alloc_skb(dev, size);
6765 			if (!(*skb)) {
6766 				DBG_PRINT(INFO_DBG,
6767 					  "%s: Out of memory to allocate %s\n",
6768 					  dev->name, "1 buf mode SKBs");
6769 				stats->mem_alloc_fail_cnt++;
6770 				return -ENOMEM ;
6771 			}
6772 			stats->mem_allocated += (*skb)->truesize;
6773 			/* storing the mapped addr in a temp variable
6774 			 * such it will be used for next rxd whose
6775 			 * Host Control is NULL
6776 			 */
6777 			rxdp1->Buffer0_ptr = *temp0 =
6778 				dma_map_single(&sp->pdev->dev, (*skb)->data,
6779 					       size - NET_IP_ALIGN,
6780 					       DMA_FROM_DEVICE);
6781 			if (dma_mapping_error(&sp->pdev->dev, rxdp1->Buffer0_ptr))
6782 				goto memalloc_failed;
6783 			rxdp->Host_Control = (unsigned long) (*skb);
6784 		}
6785 	} else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6786 		struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6787 		/* Two buffer Mode */
6788 		if (*skb) {
6789 			rxdp3->Buffer2_ptr = *temp2;
6790 			rxdp3->Buffer0_ptr = *temp0;
6791 			rxdp3->Buffer1_ptr = *temp1;
6792 		} else {
6793 			*skb = netdev_alloc_skb(dev, size);
6794 			if (!(*skb)) {
6795 				DBG_PRINT(INFO_DBG,
6796 					  "%s: Out of memory to allocate %s\n",
6797 					  dev->name,
6798 					  "2 buf mode SKBs");
6799 				stats->mem_alloc_fail_cnt++;
6800 				return -ENOMEM;
6801 			}
6802 			stats->mem_allocated += (*skb)->truesize;
6803 			rxdp3->Buffer2_ptr = *temp2 =
6804 				dma_map_single(&sp->pdev->dev, (*skb)->data,
6805 					       dev->mtu + 4, DMA_FROM_DEVICE);
6806 			if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer2_ptr))
6807 				goto memalloc_failed;
6808 			rxdp3->Buffer0_ptr = *temp0 =
6809 				dma_map_single(&sp->pdev->dev, ba->ba_0,
6810 					       BUF0_LEN, DMA_FROM_DEVICE);
6811 			if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer0_ptr)) {
6812 				dma_unmap_single(&sp->pdev->dev,
6813 						 (dma_addr_t)rxdp3->Buffer2_ptr,
6814 						 dev->mtu + 4,
6815 						 DMA_FROM_DEVICE);
6816 				goto memalloc_failed;
6817 			}
6818 			rxdp->Host_Control = (unsigned long) (*skb);
6819 
6820 			/* Buffer-1 will be dummy buffer not used */
6821 			rxdp3->Buffer1_ptr = *temp1 =
6822 				dma_map_single(&sp->pdev->dev, ba->ba_1,
6823 					       BUF1_LEN, DMA_FROM_DEVICE);
6824 			if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer1_ptr)) {
6825 				dma_unmap_single(&sp->pdev->dev,
6826 						 (dma_addr_t)rxdp3->Buffer0_ptr,
6827 						 BUF0_LEN, DMA_FROM_DEVICE);
6828 				dma_unmap_single(&sp->pdev->dev,
6829 						 (dma_addr_t)rxdp3->Buffer2_ptr,
6830 						 dev->mtu + 4,
6831 						 DMA_FROM_DEVICE);
6832 				goto memalloc_failed;
6833 			}
6834 		}
6835 	}
6836 	return 0;
6837 
6838 memalloc_failed:
6839 	stats->pci_map_fail_cnt++;
6840 	stats->mem_freed += (*skb)->truesize;
6841 	dev_kfree_skb(*skb);
6842 	return -ENOMEM;
6843 }
6844 
6845 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6846 				int size)
6847 {
6848 	struct net_device *dev = sp->dev;
6849 	if (sp->rxd_mode == RXD_MODE_1) {
6850 		rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
6851 	} else if (sp->rxd_mode == RXD_MODE_3B) {
6852 		rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6853 		rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6854 		rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
6855 	}
6856 }
6857 
6858 static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6859 {
6860 	int i, j, k, blk_cnt = 0, size;
6861 	struct config_param *config = &sp->config;
6862 	struct mac_info *mac_control = &sp->mac_control;
6863 	struct net_device *dev = sp->dev;
6864 	struct RxD_t *rxdp = NULL;
6865 	struct sk_buff *skb = NULL;
6866 	struct buffAdd *ba = NULL;
6867 	u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6868 
6869 	/* Calculate the size based on ring mode */
6870 	size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6871 		HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6872 	if (sp->rxd_mode == RXD_MODE_1)
6873 		size += NET_IP_ALIGN;
6874 	else if (sp->rxd_mode == RXD_MODE_3B)
6875 		size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6876 
6877 	for (i = 0; i < config->rx_ring_num; i++) {
6878 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
6879 		struct ring_info *ring = &mac_control->rings[i];
6880 
6881 		blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
6882 
6883 		for (j = 0; j < blk_cnt; j++) {
6884 			for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6885 				rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
6886 				if (sp->rxd_mode == RXD_MODE_3B)
6887 					ba = &ring->ba[j][k];
6888 				if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
6889 							   &temp0_64,
6890 							   &temp1_64,
6891 							   &temp2_64,
6892 							   size) == -ENOMEM) {
6893 					return 0;
6894 				}
6895 
6896 				set_rxd_buffer_size(sp, rxdp, size);
6897 				dma_wmb();
6898 				/* flip the Ownership bit to Hardware */
6899 				rxdp->Control_1 |= RXD_OWN_XENA;
6900 			}
6901 		}
6902 	}
6903 	return 0;
6904 
6905 }
6906 
6907 static int s2io_add_isr(struct s2io_nic *sp)
6908 {
6909 	int ret = 0;
6910 	struct net_device *dev = sp->dev;
6911 	int err = 0;
6912 
6913 	if (sp->config.intr_type == MSI_X)
6914 		ret = s2io_enable_msi_x(sp);
6915 	if (ret) {
6916 		DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6917 		sp->config.intr_type = INTA;
6918 	}
6919 
6920 	/*
6921 	 * Store the values of the MSIX table in
6922 	 * the struct s2io_nic structure
6923 	 */
6924 	store_xmsi_data(sp);
6925 
6926 	/* After proper initialization of H/W, register ISR */
6927 	if (sp->config.intr_type == MSI_X) {
6928 		int i, msix_rx_cnt = 0;
6929 
6930 		for (i = 0; i < sp->num_entries; i++) {
6931 			if (sp->s2io_entries[i].in_use == MSIX_FLG) {
6932 				if (sp->s2io_entries[i].type ==
6933 				    MSIX_RING_TYPE) {
6934 					snprintf(sp->desc[i],
6935 						sizeof(sp->desc[i]),
6936 						"%s:MSI-X-%d-RX",
6937 						dev->name, i);
6938 					err = request_irq(sp->entries[i].vector,
6939 							  s2io_msix_ring_handle,
6940 							  0,
6941 							  sp->desc[i],
6942 							  sp->s2io_entries[i].arg);
6943 				} else if (sp->s2io_entries[i].type ==
6944 					   MSIX_ALARM_TYPE) {
6945 					snprintf(sp->desc[i],
6946 						sizeof(sp->desc[i]),
6947 						"%s:MSI-X-%d-TX",
6948 						dev->name, i);
6949 					err = request_irq(sp->entries[i].vector,
6950 							  s2io_msix_fifo_handle,
6951 							  0,
6952 							  sp->desc[i],
6953 							  sp->s2io_entries[i].arg);
6954 
6955 				}
6956 				/* if either data or addr is zero print it. */
6957 				if (!(sp->msix_info[i].addr &&
6958 				      sp->msix_info[i].data)) {
6959 					DBG_PRINT(ERR_DBG,
6960 						  "%s @Addr:0x%llx Data:0x%llx\n",
6961 						  sp->desc[i],
6962 						  (unsigned long long)
6963 						  sp->msix_info[i].addr,
6964 						  (unsigned long long)
6965 						  ntohl(sp->msix_info[i].data));
6966 				} else
6967 					msix_rx_cnt++;
6968 				if (err) {
6969 					remove_msix_isr(sp);
6970 
6971 					DBG_PRINT(ERR_DBG,
6972 						  "%s:MSI-X-%d registration "
6973 						  "failed\n", dev->name, i);
6974 
6975 					DBG_PRINT(ERR_DBG,
6976 						  "%s: Defaulting to INTA\n",
6977 						  dev->name);
6978 					sp->config.intr_type = INTA;
6979 					break;
6980 				}
6981 				sp->s2io_entries[i].in_use =
6982 					MSIX_REGISTERED_SUCCESS;
6983 			}
6984 		}
6985 		if (!err) {
6986 			pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
6987 			DBG_PRINT(INFO_DBG,
6988 				  "MSI-X-TX entries enabled through alarm vector\n");
6989 		}
6990 	}
6991 	if (sp->config.intr_type == INTA) {
6992 		err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED,
6993 				  sp->name, dev);
6994 		if (err) {
6995 			DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6996 				  dev->name);
6997 			return -1;
6998 		}
6999 	}
7000 	return 0;
7001 }
7002 
7003 static void s2io_rem_isr(struct s2io_nic *sp)
7004 {
7005 	if (sp->config.intr_type == MSI_X)
7006 		remove_msix_isr(sp);
7007 	else
7008 		remove_inta_isr(sp);
7009 }
7010 
7011 static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
7012 {
7013 	int cnt = 0;
7014 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
7015 	register u64 val64 = 0;
7016 	struct config_param *config;
7017 	config = &sp->config;
7018 
7019 	if (!is_s2io_card_up(sp))
7020 		return;
7021 
7022 	del_timer_sync(&sp->alarm_timer);
7023 	/* If s2io_set_link task is executing, wait till it completes. */
7024 	while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
7025 		msleep(50);
7026 	clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7027 
7028 	/* Disable napi */
7029 	if (sp->config.napi) {
7030 		int off = 0;
7031 		if (config->intr_type ==  MSI_X) {
7032 			for (; off < sp->config.rx_ring_num; off++)
7033 				napi_disable(&sp->mac_control.rings[off].napi);
7034 		}
7035 		else
7036 			napi_disable(&sp->napi);
7037 	}
7038 
7039 	/* disable Tx and Rx traffic on the NIC */
7040 	if (do_io)
7041 		stop_nic(sp);
7042 
7043 	s2io_rem_isr(sp);
7044 
7045 	/* stop the tx queue, indicate link down */
7046 	s2io_link(sp, LINK_DOWN);
7047 
7048 	/* Check if the device is Quiescent and then Reset the NIC */
7049 	while (do_io) {
7050 		/* As per the HW requirement we need to replenish the
7051 		 * receive buffer to avoid the ring bump. Since there is
7052 		 * no intention of processing the Rx frame at this pointwe are
7053 		 * just setting the ownership bit of rxd in Each Rx
7054 		 * ring to HW and set the appropriate buffer size
7055 		 * based on the ring mode
7056 		 */
7057 		rxd_owner_bit_reset(sp);
7058 
7059 		val64 = readq(&bar0->adapter_status);
7060 		if (verify_xena_quiescence(sp)) {
7061 			if (verify_pcc_quiescent(sp, sp->device_enabled_once))
7062 				break;
7063 		}
7064 
7065 		msleep(50);
7066 		cnt++;
7067 		if (cnt == 10) {
7068 			DBG_PRINT(ERR_DBG, "Device not Quiescent - "
7069 				  "adapter status reads 0x%llx\n",
7070 				  (unsigned long long)val64);
7071 			break;
7072 		}
7073 	}
7074 	if (do_io)
7075 		s2io_reset(sp);
7076 
7077 	/* Free all Tx buffers */
7078 	free_tx_buffers(sp);
7079 
7080 	/* Free all Rx buffers */
7081 	free_rx_buffers(sp);
7082 
7083 	clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7084 }
7085 
7086 static void s2io_card_down(struct s2io_nic *sp)
7087 {
7088 	do_s2io_card_down(sp, 1);
7089 }
7090 
7091 static int s2io_card_up(struct s2io_nic *sp)
7092 {
7093 	int i, ret = 0;
7094 	struct config_param *config;
7095 	struct mac_info *mac_control;
7096 	struct net_device *dev = sp->dev;
7097 	u16 interruptible;
7098 
7099 	/* Initialize the H/W I/O registers */
7100 	ret = init_nic(sp);
7101 	if (ret != 0) {
7102 		DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7103 			  dev->name);
7104 		if (ret != -EIO)
7105 			s2io_reset(sp);
7106 		return ret;
7107 	}
7108 
7109 	/*
7110 	 * Initializing the Rx buffers. For now we are considering only 1
7111 	 * Rx ring and initializing buffers into 30 Rx blocks
7112 	 */
7113 	config = &sp->config;
7114 	mac_control = &sp->mac_control;
7115 
7116 	for (i = 0; i < config->rx_ring_num; i++) {
7117 		struct ring_info *ring = &mac_control->rings[i];
7118 
7119 		ring->mtu = dev->mtu;
7120 		ring->lro = !!(dev->features & NETIF_F_LRO);
7121 		ret = fill_rx_buffers(sp, ring, 1);
7122 		if (ret) {
7123 			DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7124 				  dev->name);
7125 			ret = -ENOMEM;
7126 			goto err_fill_buff;
7127 		}
7128 		DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7129 			  ring->rx_bufs_left);
7130 	}
7131 
7132 	/* Initialise napi */
7133 	if (config->napi) {
7134 		if (config->intr_type ==  MSI_X) {
7135 			for (i = 0; i < sp->config.rx_ring_num; i++)
7136 				napi_enable(&sp->mac_control.rings[i].napi);
7137 		} else {
7138 			napi_enable(&sp->napi);
7139 		}
7140 	}
7141 
7142 	/* Maintain the state prior to the open */
7143 	if (sp->promisc_flg)
7144 		sp->promisc_flg = 0;
7145 	if (sp->m_cast_flg) {
7146 		sp->m_cast_flg = 0;
7147 		sp->all_multi_pos = 0;
7148 	}
7149 
7150 	/* Setting its receive mode */
7151 	s2io_set_multicast(dev, true);
7152 
7153 	if (dev->features & NETIF_F_LRO) {
7154 		/* Initialize max aggregatable pkts per session based on MTU */
7155 		sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7156 		/* Check if we can use (if specified) user provided value */
7157 		if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7158 			sp->lro_max_aggr_per_sess = lro_max_pkts;
7159 	}
7160 
7161 	/* Enable Rx Traffic and interrupts on the NIC */
7162 	if (start_nic(sp)) {
7163 		DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7164 		ret = -ENODEV;
7165 		goto err_out;
7166 	}
7167 
7168 	/* Add interrupt service routine */
7169 	if (s2io_add_isr(sp) != 0) {
7170 		if (sp->config.intr_type == MSI_X)
7171 			s2io_rem_isr(sp);
7172 		ret = -ENODEV;
7173 		goto err_out;
7174 	}
7175 
7176 	timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0);
7177 	mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
7178 
7179 	set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7180 
7181 	/*  Enable select interrupts */
7182 	en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7183 	if (sp->config.intr_type != INTA) {
7184 		interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7185 		en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7186 	} else {
7187 		interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7188 		interruptible |= TX_PIC_INTR;
7189 		en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7190 	}
7191 
7192 	return 0;
7193 
7194 err_out:
7195 	if (config->napi) {
7196 		if (config->intr_type == MSI_X) {
7197 			for (i = 0; i < sp->config.rx_ring_num; i++)
7198 				napi_disable(&sp->mac_control.rings[i].napi);
7199 		} else {
7200 			napi_disable(&sp->napi);
7201 		}
7202 	}
7203 err_fill_buff:
7204 	s2io_reset(sp);
7205 	free_rx_buffers(sp);
7206 	return ret;
7207 }
7208 
7209 /**
7210  * s2io_restart_nic - Resets the NIC.
7211  * @work : work struct containing a pointer to the device private structure
7212  * Description:
7213  * This function is scheduled to be run by the s2io_tx_watchdog
7214  * function after 0.5 secs to reset the NIC. The idea is to reduce
7215  * the run time of the watch dog routine which is run holding a
7216  * spin lock.
7217  */
7218 
7219 static void s2io_restart_nic(struct work_struct *work)
7220 {
7221 	struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7222 	struct net_device *dev = sp->dev;
7223 
7224 	rtnl_lock();
7225 
7226 	if (!netif_running(dev))
7227 		goto out_unlock;
7228 
7229 	s2io_card_down(sp);
7230 	if (s2io_card_up(sp)) {
7231 		DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
7232 	}
7233 	s2io_wake_all_tx_queue(sp);
7234 	DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
7235 out_unlock:
7236 	rtnl_unlock();
7237 }
7238 
7239 /**
7240  *  s2io_tx_watchdog - Watchdog for transmit side.
7241  *  @dev : Pointer to net device structure
7242  *  @txqueue: index of the hanging queue
7243  *  Description:
7244  *  This function is triggered if the Tx Queue is stopped
7245  *  for a pre-defined amount of time when the Interface is still up.
7246  *  If the Interface is jammed in such a situation, the hardware is
7247  *  reset (by s2io_close) and restarted again (by s2io_open) to
7248  *  overcome any problem that might have been caused in the hardware.
7249  *  Return value:
7250  *  void
7251  */
7252 
7253 static void s2io_tx_watchdog(struct net_device *dev, unsigned int txqueue)
7254 {
7255 	struct s2io_nic *sp = netdev_priv(dev);
7256 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7257 
7258 	if (netif_carrier_ok(dev)) {
7259 		swstats->watchdog_timer_cnt++;
7260 		schedule_work(&sp->rst_timer_task);
7261 		swstats->soft_reset_cnt++;
7262 	}
7263 }
7264 
7265 /**
7266  *   rx_osm_handler - To perform some OS related operations on SKB.
7267  *   @ring_data : the ring from which this RxD was extracted.
7268  *   @rxdp: descriptor
7269  *   Description:
7270  *   This function is called by the Rx interrupt serivce routine to perform
7271  *   some OS related operations on the SKB before passing it to the upper
7272  *   layers. It mainly checks if the checksum is OK, if so adds it to the
7273  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
7274  *   to the upper layer. If the checksum is wrong, it increments the Rx
7275  *   packet error count, frees the SKB and returns error.
7276  *   Return value:
7277  *   SUCCESS on success and -1 on failure.
7278  */
7279 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7280 {
7281 	struct s2io_nic *sp = ring_data->nic;
7282 	struct net_device *dev = ring_data->dev;
7283 	struct sk_buff *skb = (struct sk_buff *)
7284 		((unsigned long)rxdp->Host_Control);
7285 	int ring_no = ring_data->ring_no;
7286 	u16 l3_csum, l4_csum;
7287 	unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7288 	struct lro *lro;
7289 	u8 err_mask;
7290 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7291 
7292 	skb->dev = dev;
7293 
7294 	if (err) {
7295 		/* Check for parity error */
7296 		if (err & 0x1)
7297 			swstats->parity_err_cnt++;
7298 
7299 		err_mask = err >> 48;
7300 		switch (err_mask) {
7301 		case 1:
7302 			swstats->rx_parity_err_cnt++;
7303 			break;
7304 
7305 		case 2:
7306 			swstats->rx_abort_cnt++;
7307 			break;
7308 
7309 		case 3:
7310 			swstats->rx_parity_abort_cnt++;
7311 			break;
7312 
7313 		case 4:
7314 			swstats->rx_rda_fail_cnt++;
7315 			break;
7316 
7317 		case 5:
7318 			swstats->rx_unkn_prot_cnt++;
7319 			break;
7320 
7321 		case 6:
7322 			swstats->rx_fcs_err_cnt++;
7323 			break;
7324 
7325 		case 7:
7326 			swstats->rx_buf_size_err_cnt++;
7327 			break;
7328 
7329 		case 8:
7330 			swstats->rx_rxd_corrupt_cnt++;
7331 			break;
7332 
7333 		case 15:
7334 			swstats->rx_unkn_err_cnt++;
7335 			break;
7336 		}
7337 		/*
7338 		 * Drop the packet if bad transfer code. Exception being
7339 		 * 0x5, which could be due to unsupported IPv6 extension header.
7340 		 * In this case, we let stack handle the packet.
7341 		 * Note that in this case, since checksum will be incorrect,
7342 		 * stack will validate the same.
7343 		 */
7344 		if (err_mask != 0x5) {
7345 			DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7346 				  dev->name, err_mask);
7347 			dev->stats.rx_crc_errors++;
7348 			swstats->mem_freed
7349 				+= skb->truesize;
7350 			dev_kfree_skb(skb);
7351 			ring_data->rx_bufs_left -= 1;
7352 			rxdp->Host_Control = 0;
7353 			return 0;
7354 		}
7355 	}
7356 
7357 	rxdp->Host_Control = 0;
7358 	if (sp->rxd_mode == RXD_MODE_1) {
7359 		int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7360 
7361 		skb_put(skb, len);
7362 	} else if (sp->rxd_mode == RXD_MODE_3B) {
7363 		int get_block = ring_data->rx_curr_get_info.block_index;
7364 		int get_off = ring_data->rx_curr_get_info.offset;
7365 		int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7366 		int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7367 
7368 		struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7369 		skb_put_data(skb, ba->ba_0, buf0_len);
7370 		skb_put(skb, buf2_len);
7371 	}
7372 
7373 	if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7374 	    ((!ring_data->lro) ||
7375 	     (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG))) &&
7376 	    (dev->features & NETIF_F_RXCSUM)) {
7377 		l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7378 		l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7379 		if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7380 			/*
7381 			 * NIC verifies if the Checksum of the received
7382 			 * frame is Ok or not and accordingly returns
7383 			 * a flag in the RxD.
7384 			 */
7385 			skb->ip_summed = CHECKSUM_UNNECESSARY;
7386 			if (ring_data->lro) {
7387 				u32 tcp_len = 0;
7388 				u8 *tcp;
7389 				int ret = 0;
7390 
7391 				ret = s2io_club_tcp_session(ring_data,
7392 							    skb->data, &tcp,
7393 							    &tcp_len, &lro,
7394 							    rxdp, sp);
7395 				switch (ret) {
7396 				case 3: /* Begin anew */
7397 					lro->parent = skb;
7398 					goto aggregate;
7399 				case 1: /* Aggregate */
7400 					lro_append_pkt(sp, lro, skb, tcp_len);
7401 					goto aggregate;
7402 				case 4: /* Flush session */
7403 					lro_append_pkt(sp, lro, skb, tcp_len);
7404 					queue_rx_frame(lro->parent,
7405 						       lro->vlan_tag);
7406 					clear_lro_session(lro);
7407 					swstats->flush_max_pkts++;
7408 					goto aggregate;
7409 				case 2: /* Flush both */
7410 					lro->parent->data_len = lro->frags_len;
7411 					swstats->sending_both++;
7412 					queue_rx_frame(lro->parent,
7413 						       lro->vlan_tag);
7414 					clear_lro_session(lro);
7415 					goto send_up;
7416 				case 0: /* sessions exceeded */
7417 				case -1: /* non-TCP or not L2 aggregatable */
7418 				case 5: /*
7419 					 * First pkt in session not
7420 					 * L3/L4 aggregatable
7421 					 */
7422 					break;
7423 				default:
7424 					DBG_PRINT(ERR_DBG,
7425 						  "%s: Samadhana!!\n",
7426 						  __func__);
7427 					BUG();
7428 				}
7429 			}
7430 		} else {
7431 			/*
7432 			 * Packet with erroneous checksum, let the
7433 			 * upper layers deal with it.
7434 			 */
7435 			skb_checksum_none_assert(skb);
7436 		}
7437 	} else
7438 		skb_checksum_none_assert(skb);
7439 
7440 	swstats->mem_freed += skb->truesize;
7441 send_up:
7442 	skb_record_rx_queue(skb, ring_no);
7443 	queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7444 aggregate:
7445 	sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7446 	return SUCCESS;
7447 }
7448 
7449 /**
7450  *  s2io_link - stops/starts the Tx queue.
7451  *  @sp : private member of the device structure, which is a pointer to the
7452  *  s2io_nic structure.
7453  *  @link : inidicates whether link is UP/DOWN.
7454  *  Description:
7455  *  This function stops/starts the Tx queue depending on whether the link
7456  *  status of the NIC is down or up. This is called by the Alarm
7457  *  interrupt handler whenever a link change interrupt comes up.
7458  *  Return value:
7459  *  void.
7460  */
7461 
7462 static void s2io_link(struct s2io_nic *sp, int link)
7463 {
7464 	struct net_device *dev = sp->dev;
7465 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7466 
7467 	if (link != sp->last_link_state) {
7468 		init_tti(sp, link, false);
7469 		if (link == LINK_DOWN) {
7470 			DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7471 			s2io_stop_all_tx_queue(sp);
7472 			netif_carrier_off(dev);
7473 			if (swstats->link_up_cnt)
7474 				swstats->link_up_time =
7475 					jiffies - sp->start_time;
7476 			swstats->link_down_cnt++;
7477 		} else {
7478 			DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7479 			if (swstats->link_down_cnt)
7480 				swstats->link_down_time =
7481 					jiffies - sp->start_time;
7482 			swstats->link_up_cnt++;
7483 			netif_carrier_on(dev);
7484 			s2io_wake_all_tx_queue(sp);
7485 		}
7486 	}
7487 	sp->last_link_state = link;
7488 	sp->start_time = jiffies;
7489 }
7490 
7491 /**
7492  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7493  *  @sp : private member of the device structure, which is a pointer to the
7494  *  s2io_nic structure.
7495  *  Description:
7496  *  This function initializes a few of the PCI and PCI-X configuration registers
7497  *  with recommended values.
7498  *  Return value:
7499  *  void
7500  */
7501 
7502 static void s2io_init_pci(struct s2io_nic *sp)
7503 {
7504 	u16 pci_cmd = 0, pcix_cmd = 0;
7505 
7506 	/* Enable Data Parity Error Recovery in PCI-X command register. */
7507 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7508 			     &(pcix_cmd));
7509 	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7510 			      (pcix_cmd | 1));
7511 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7512 			     &(pcix_cmd));
7513 
7514 	/* Set the PErr Response bit in PCI command register. */
7515 	pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7516 	pci_write_config_word(sp->pdev, PCI_COMMAND,
7517 			      (pci_cmd | PCI_COMMAND_PARITY));
7518 	pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7519 }
7520 
7521 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7522 			    u8 *dev_multiq)
7523 {
7524 	int i;
7525 
7526 	if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
7527 		DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
7528 			  "(%d) not supported\n", tx_fifo_num);
7529 
7530 		if (tx_fifo_num < 1)
7531 			tx_fifo_num = 1;
7532 		else
7533 			tx_fifo_num = MAX_TX_FIFOS;
7534 
7535 		DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
7536 	}
7537 
7538 	if (multiq)
7539 		*dev_multiq = multiq;
7540 
7541 	if (tx_steering_type && (1 == tx_fifo_num)) {
7542 		if (tx_steering_type != TX_DEFAULT_STEERING)
7543 			DBG_PRINT(ERR_DBG,
7544 				  "Tx steering is not supported with "
7545 				  "one fifo. Disabling Tx steering.\n");
7546 		tx_steering_type = NO_STEERING;
7547 	}
7548 
7549 	if ((tx_steering_type < NO_STEERING) ||
7550 	    (tx_steering_type > TX_DEFAULT_STEERING)) {
7551 		DBG_PRINT(ERR_DBG,
7552 			  "Requested transmit steering not supported\n");
7553 		DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
7554 		tx_steering_type = NO_STEERING;
7555 	}
7556 
7557 	if (rx_ring_num > MAX_RX_RINGS) {
7558 		DBG_PRINT(ERR_DBG,
7559 			  "Requested number of rx rings not supported\n");
7560 		DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
7561 			  MAX_RX_RINGS);
7562 		rx_ring_num = MAX_RX_RINGS;
7563 	}
7564 
7565 	if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7566 		DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
7567 			  "Defaulting to INTA\n");
7568 		*dev_intr_type = INTA;
7569 	}
7570 
7571 	if ((*dev_intr_type == MSI_X) &&
7572 	    ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7573 	     (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7574 		DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
7575 			  "Defaulting to INTA\n");
7576 		*dev_intr_type = INTA;
7577 	}
7578 
7579 	if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7580 		DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
7581 		DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
7582 		rx_ring_mode = 1;
7583 	}
7584 
7585 	for (i = 0; i < MAX_RX_RINGS; i++)
7586 		if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
7587 			DBG_PRINT(ERR_DBG, "Requested rx ring size not "
7588 				  "supported\nDefaulting to %d\n",
7589 				  MAX_RX_BLOCKS_PER_RING);
7590 			rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
7591 		}
7592 
7593 	return SUCCESS;
7594 }
7595 
7596 /**
7597  * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS or Traffic class respectively.
7598  * @nic: device private variable
7599  * @ds_codepoint: data
7600  * @ring: ring index
7601  * Description: The function configures the receive steering to
7602  * desired receive ring.
7603  * Return Value:  SUCCESS on success and
7604  * '-1' on failure (endian settings incorrect).
7605  */
7606 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7607 {
7608 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
7609 	register u64 val64 = 0;
7610 
7611 	if (ds_codepoint > 63)
7612 		return FAILURE;
7613 
7614 	val64 = RTS_DS_MEM_DATA(ring);
7615 	writeq(val64, &bar0->rts_ds_mem_data);
7616 
7617 	val64 = RTS_DS_MEM_CTRL_WE |
7618 		RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7619 		RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7620 
7621 	writeq(val64, &bar0->rts_ds_mem_ctrl);
7622 
7623 	return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7624 				     RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7625 				     S2IO_BIT_RESET, true);
7626 }
7627 
7628 static const struct net_device_ops s2io_netdev_ops = {
7629 	.ndo_open	        = s2io_open,
7630 	.ndo_stop	        = s2io_close,
7631 	.ndo_get_stats	        = s2io_get_stats,
7632 	.ndo_start_xmit    	= s2io_xmit,
7633 	.ndo_validate_addr	= eth_validate_addr,
7634 	.ndo_set_rx_mode	= s2io_ndo_set_multicast,
7635 	.ndo_eth_ioctl		= s2io_ioctl,
7636 	.ndo_set_mac_address    = s2io_set_mac_addr,
7637 	.ndo_change_mtu	   	= s2io_change_mtu,
7638 	.ndo_set_features	= s2io_set_features,
7639 	.ndo_tx_timeout	   	= s2io_tx_watchdog,
7640 #ifdef CONFIG_NET_POLL_CONTROLLER
7641 	.ndo_poll_controller    = s2io_netpoll,
7642 #endif
7643 };
7644 
7645 /**
7646  *  s2io_init_nic - Initialization of the adapter .
7647  *  @pdev : structure containing the PCI related information of the device.
7648  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7649  *  Description:
7650  *  The function initializes an adapter identified by the pci_dec structure.
7651  *  All OS related initialization including memory and device structure and
7652  *  initlaization of the device private variable is done. Also the swapper
7653  *  control register is initialized to enable read and write into the I/O
7654  *  registers of the device.
7655  *  Return value:
7656  *  returns 0 on success and negative on failure.
7657  */
7658 
7659 static int
7660 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7661 {
7662 	struct s2io_nic *sp;
7663 	struct net_device *dev;
7664 	int i, j, ret;
7665 	u32 mac_up, mac_down;
7666 	u64 val64 = 0, tmp64 = 0;
7667 	struct XENA_dev_config __iomem *bar0 = NULL;
7668 	u16 subid;
7669 	struct config_param *config;
7670 	struct mac_info *mac_control;
7671 	int mode;
7672 	u8 dev_intr_type = intr_type;
7673 	u8 dev_multiq = 0;
7674 
7675 	ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7676 	if (ret)
7677 		return ret;
7678 
7679 	ret = pci_enable_device(pdev);
7680 	if (ret) {
7681 		DBG_PRINT(ERR_DBG,
7682 			  "%s: pci_enable_device failed\n", __func__);
7683 		return ret;
7684 	}
7685 
7686 	if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
7687 		DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
7688 	} else {
7689 		pci_disable_device(pdev);
7690 		return -ENOMEM;
7691 	}
7692 	ret = pci_request_regions(pdev, s2io_driver_name);
7693 	if (ret) {
7694 		DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
7695 			  __func__, ret);
7696 		pci_disable_device(pdev);
7697 		return -ENODEV;
7698 	}
7699 	if (dev_multiq)
7700 		dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7701 	else
7702 		dev = alloc_etherdev(sizeof(struct s2io_nic));
7703 	if (dev == NULL) {
7704 		pci_disable_device(pdev);
7705 		pci_release_regions(pdev);
7706 		return -ENODEV;
7707 	}
7708 
7709 	pci_set_master(pdev);
7710 	pci_set_drvdata(pdev, dev);
7711 	SET_NETDEV_DEV(dev, &pdev->dev);
7712 
7713 	/*  Private member variable initialized to s2io NIC structure */
7714 	sp = netdev_priv(dev);
7715 	sp->dev = dev;
7716 	sp->pdev = pdev;
7717 	sp->device_enabled_once = false;
7718 	if (rx_ring_mode == 1)
7719 		sp->rxd_mode = RXD_MODE_1;
7720 	if (rx_ring_mode == 2)
7721 		sp->rxd_mode = RXD_MODE_3B;
7722 
7723 	sp->config.intr_type = dev_intr_type;
7724 
7725 	if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7726 	    (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7727 		sp->device_type = XFRAME_II_DEVICE;
7728 	else
7729 		sp->device_type = XFRAME_I_DEVICE;
7730 
7731 
7732 	/* Initialize some PCI/PCI-X fields of the NIC. */
7733 	s2io_init_pci(sp);
7734 
7735 	/*
7736 	 * Setting the device configuration parameters.
7737 	 * Most of these parameters can be specified by the user during
7738 	 * module insertion as they are module loadable parameters. If
7739 	 * these parameters are not specified during load time, they
7740 	 * are initialized with default values.
7741 	 */
7742 	config = &sp->config;
7743 	mac_control = &sp->mac_control;
7744 
7745 	config->napi = napi;
7746 	config->tx_steering_type = tx_steering_type;
7747 
7748 	/* Tx side parameters. */
7749 	if (config->tx_steering_type == TX_PRIORITY_STEERING)
7750 		config->tx_fifo_num = MAX_TX_FIFOS;
7751 	else
7752 		config->tx_fifo_num = tx_fifo_num;
7753 
7754 	/* Initialize the fifos used for tx steering */
7755 	if (config->tx_fifo_num < 5) {
7756 		if (config->tx_fifo_num  == 1)
7757 			sp->total_tcp_fifos = 1;
7758 		else
7759 			sp->total_tcp_fifos = config->tx_fifo_num - 1;
7760 		sp->udp_fifo_idx = config->tx_fifo_num - 1;
7761 		sp->total_udp_fifos = 1;
7762 		sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7763 	} else {
7764 		sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7765 				       FIFO_OTHER_MAX_NUM);
7766 		sp->udp_fifo_idx = sp->total_tcp_fifos;
7767 		sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7768 		sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7769 	}
7770 
7771 	config->multiq = dev_multiq;
7772 	for (i = 0; i < config->tx_fifo_num; i++) {
7773 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7774 
7775 		tx_cfg->fifo_len = tx_fifo_len[i];
7776 		tx_cfg->fifo_priority = i;
7777 	}
7778 
7779 	/* mapping the QoS priority to the configured fifos */
7780 	for (i = 0; i < MAX_TX_FIFOS; i++)
7781 		config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7782 
7783 	/* map the hashing selector table to the configured fifos */
7784 	for (i = 0; i < config->tx_fifo_num; i++)
7785 		sp->fifo_selector[i] = fifo_selector[i];
7786 
7787 
7788 	config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7789 	for (i = 0; i < config->tx_fifo_num; i++) {
7790 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7791 
7792 		tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7793 		if (tx_cfg->fifo_len < 65) {
7794 			config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7795 			break;
7796 		}
7797 	}
7798 	/* + 2 because one Txd for skb->data and one Txd for UFO */
7799 	config->max_txds = MAX_SKB_FRAGS + 2;
7800 
7801 	/* Rx side parameters. */
7802 	config->rx_ring_num = rx_ring_num;
7803 	for (i = 0; i < config->rx_ring_num; i++) {
7804 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7805 		struct ring_info *ring = &mac_control->rings[i];
7806 
7807 		rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7808 		rx_cfg->ring_priority = i;
7809 		ring->rx_bufs_left = 0;
7810 		ring->rxd_mode = sp->rxd_mode;
7811 		ring->rxd_count = rxd_count[sp->rxd_mode];
7812 		ring->pdev = sp->pdev;
7813 		ring->dev = sp->dev;
7814 	}
7815 
7816 	for (i = 0; i < rx_ring_num; i++) {
7817 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7818 
7819 		rx_cfg->ring_org = RING_ORG_BUFF1;
7820 		rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7821 	}
7822 
7823 	/*  Setting Mac Control parameters */
7824 	mac_control->rmac_pause_time = rmac_pause_time;
7825 	mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7826 	mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7827 
7828 
7829 	/*  initialize the shared memory used by the NIC and the host */
7830 	if (init_shared_mem(sp)) {
7831 		DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
7832 		ret = -ENOMEM;
7833 		goto mem_alloc_failed;
7834 	}
7835 
7836 	sp->bar0 = pci_ioremap_bar(pdev, 0);
7837 	if (!sp->bar0) {
7838 		DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7839 			  dev->name);
7840 		ret = -ENOMEM;
7841 		goto bar0_remap_failed;
7842 	}
7843 
7844 	sp->bar1 = pci_ioremap_bar(pdev, 2);
7845 	if (!sp->bar1) {
7846 		DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7847 			  dev->name);
7848 		ret = -ENOMEM;
7849 		goto bar1_remap_failed;
7850 	}
7851 
7852 	/* Initializing the BAR1 address as the start of the FIFO pointer. */
7853 	for (j = 0; j < MAX_TX_FIFOS; j++) {
7854 		mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
7855 	}
7856 
7857 	/*  Driver entry points */
7858 	dev->netdev_ops = &s2io_netdev_ops;
7859 	dev->ethtool_ops = &netdev_ethtool_ops;
7860 	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
7861 		NETIF_F_TSO | NETIF_F_TSO6 |
7862 		NETIF_F_RXCSUM | NETIF_F_LRO;
7863 	dev->features |= dev->hw_features |
7864 		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
7865 		NETIF_F_HIGHDMA;
7866 	dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7867 	INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7868 	INIT_WORK(&sp->set_link_task, s2io_set_link);
7869 
7870 	pci_save_state(sp->pdev);
7871 
7872 	/* Setting swapper control on the NIC, for proper reset operation */
7873 	if (s2io_set_swapper(sp)) {
7874 		DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
7875 			  dev->name);
7876 		ret = -EAGAIN;
7877 		goto set_swap_failed;
7878 	}
7879 
7880 	/* Verify if the Herc works on the slot its placed into */
7881 	if (sp->device_type & XFRAME_II_DEVICE) {
7882 		mode = s2io_verify_pci_mode(sp);
7883 		if (mode < 0) {
7884 			DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
7885 				  __func__);
7886 			ret = -EBADSLT;
7887 			goto set_swap_failed;
7888 		}
7889 	}
7890 
7891 	if (sp->config.intr_type == MSI_X) {
7892 		sp->num_entries = config->rx_ring_num + 1;
7893 		ret = s2io_enable_msi_x(sp);
7894 
7895 		if (!ret) {
7896 			ret = s2io_test_msi(sp);
7897 			/* rollback MSI-X, will re-enable during add_isr() */
7898 			remove_msix_isr(sp);
7899 		}
7900 		if (ret) {
7901 
7902 			DBG_PRINT(ERR_DBG,
7903 				  "MSI-X requested but failed to enable\n");
7904 			sp->config.intr_type = INTA;
7905 		}
7906 	}
7907 
7908 	if (config->intr_type ==  MSI_X) {
7909 		for (i = 0; i < config->rx_ring_num ; i++) {
7910 			struct ring_info *ring = &mac_control->rings[i];
7911 
7912 			netif_napi_add(dev, &ring->napi, s2io_poll_msix);
7913 		}
7914 	} else {
7915 		netif_napi_add(dev, &sp->napi, s2io_poll_inta);
7916 	}
7917 
7918 	/* Not needed for Herc */
7919 	if (sp->device_type & XFRAME_I_DEVICE) {
7920 		/*
7921 		 * Fix for all "FFs" MAC address problems observed on
7922 		 * Alpha platforms
7923 		 */
7924 		fix_mac_address(sp);
7925 		s2io_reset(sp);
7926 	}
7927 
7928 	/*
7929 	 * MAC address initialization.
7930 	 * For now only one mac address will be read and used.
7931 	 */
7932 	bar0 = sp->bar0;
7933 	val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7934 		RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
7935 	writeq(val64, &bar0->rmac_addr_cmd_mem);
7936 	wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7937 			      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
7938 			      S2IO_BIT_RESET, true);
7939 	tmp64 = readq(&bar0->rmac_addr_data0_mem);
7940 	mac_down = (u32)tmp64;
7941 	mac_up = (u32) (tmp64 >> 32);
7942 
7943 	sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7944 	sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7945 	sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7946 	sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7947 	sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7948 	sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7949 
7950 	/*  Set the factory defined MAC address initially   */
7951 	dev->addr_len = ETH_ALEN;
7952 	eth_hw_addr_set(dev, sp->def_mac_addr[0].mac_addr);
7953 
7954 	/* initialize number of multicast & unicast MAC entries variables */
7955 	if (sp->device_type == XFRAME_I_DEVICE) {
7956 		config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
7957 		config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
7958 		config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
7959 	} else if (sp->device_type == XFRAME_II_DEVICE) {
7960 		config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
7961 		config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
7962 		config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
7963 	}
7964 
7965 	/* MTU range: 46 - 9600 */
7966 	dev->min_mtu = MIN_MTU;
7967 	dev->max_mtu = S2IO_JUMBO_SIZE;
7968 
7969 	/* store mac addresses from CAM to s2io_nic structure */
7970 	do_s2io_store_unicast_mc(sp);
7971 
7972 	/* Configure MSIX vector for number of rings configured plus one */
7973 	if ((sp->device_type == XFRAME_II_DEVICE) &&
7974 	    (config->intr_type == MSI_X))
7975 		sp->num_entries = config->rx_ring_num + 1;
7976 
7977 	/* Store the values of the MSIX table in the s2io_nic structure */
7978 	store_xmsi_data(sp);
7979 	/* reset Nic and bring it to known state */
7980 	s2io_reset(sp);
7981 
7982 	/*
7983 	 * Initialize link state flags
7984 	 * and the card state parameter
7985 	 */
7986 	sp->state = 0;
7987 
7988 	/* Initialize spinlocks */
7989 	for (i = 0; i < sp->config.tx_fifo_num; i++) {
7990 		struct fifo_info *fifo = &mac_control->fifos[i];
7991 
7992 		spin_lock_init(&fifo->tx_lock);
7993 	}
7994 
7995 	/*
7996 	 * SXE-002: Configure link and activity LED to init state
7997 	 * on driver load.
7998 	 */
7999 	subid = sp->pdev->subsystem_device;
8000 	if ((subid & 0xFF) >= 0x07) {
8001 		val64 = readq(&bar0->gpio_control);
8002 		val64 |= 0x0000800000000000ULL;
8003 		writeq(val64, &bar0->gpio_control);
8004 		val64 = 0x0411040400000000ULL;
8005 		writeq(val64, (void __iomem *)bar0 + 0x2700);
8006 		val64 = readq(&bar0->gpio_control);
8007 	}
8008 
8009 	sp->rx_csum = 1;	/* Rx chksum verify enabled by default */
8010 
8011 	if (register_netdev(dev)) {
8012 		DBG_PRINT(ERR_DBG, "Device registration failed\n");
8013 		ret = -ENODEV;
8014 		goto register_failed;
8015 	}
8016 	s2io_vpd_read(sp);
8017 	DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
8018 	DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
8019 		  sp->product_name, pdev->revision);
8020 	DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8021 		  s2io_driver_version);
8022 	DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
8023 	DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
8024 	if (sp->device_type & XFRAME_II_DEVICE) {
8025 		mode = s2io_print_pci_mode(sp);
8026 		if (mode < 0) {
8027 			ret = -EBADSLT;
8028 			unregister_netdev(dev);
8029 			goto set_swap_failed;
8030 		}
8031 	}
8032 	switch (sp->rxd_mode) {
8033 	case RXD_MODE_1:
8034 		DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8035 			  dev->name);
8036 		break;
8037 	case RXD_MODE_3B:
8038 		DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8039 			  dev->name);
8040 		break;
8041 	}
8042 
8043 	switch (sp->config.napi) {
8044 	case 0:
8045 		DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8046 		break;
8047 	case 1:
8048 		DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8049 		break;
8050 	}
8051 
8052 	DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8053 		  sp->config.tx_fifo_num);
8054 
8055 	DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8056 		  sp->config.rx_ring_num);
8057 
8058 	switch (sp->config.intr_type) {
8059 	case INTA:
8060 		DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8061 		break;
8062 	case MSI_X:
8063 		DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8064 		break;
8065 	}
8066 	if (sp->config.multiq) {
8067 		for (i = 0; i < sp->config.tx_fifo_num; i++) {
8068 			struct fifo_info *fifo = &mac_control->fifos[i];
8069 
8070 			fifo->multiq = config->multiq;
8071 		}
8072 		DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8073 			  dev->name);
8074 	} else
8075 		DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8076 			  dev->name);
8077 
8078 	switch (sp->config.tx_steering_type) {
8079 	case NO_STEERING:
8080 		DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
8081 			  dev->name);
8082 		break;
8083 	case TX_PRIORITY_STEERING:
8084 		DBG_PRINT(ERR_DBG,
8085 			  "%s: Priority steering enabled for transmit\n",
8086 			  dev->name);
8087 		break;
8088 	case TX_DEFAULT_STEERING:
8089 		DBG_PRINT(ERR_DBG,
8090 			  "%s: Default steering enabled for transmit\n",
8091 			  dev->name);
8092 	}
8093 
8094 	DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8095 		  dev->name);
8096 	/* Initialize device name */
8097 	snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name,
8098 		 sp->product_name);
8099 
8100 	if (vlan_tag_strip)
8101 		sp->vlan_strip_flag = 1;
8102 	else
8103 		sp->vlan_strip_flag = 0;
8104 
8105 	/*
8106 	 * Make Link state as off at this point, when the Link change
8107 	 * interrupt comes the state will be automatically changed to
8108 	 * the right state.
8109 	 */
8110 	netif_carrier_off(dev);
8111 
8112 	return 0;
8113 
8114 register_failed:
8115 set_swap_failed:
8116 	iounmap(sp->bar1);
8117 bar1_remap_failed:
8118 	iounmap(sp->bar0);
8119 bar0_remap_failed:
8120 mem_alloc_failed:
8121 	free_shared_mem(sp);
8122 	pci_disable_device(pdev);
8123 	pci_release_regions(pdev);
8124 	free_netdev(dev);
8125 
8126 	return ret;
8127 }
8128 
8129 /**
8130  * s2io_rem_nic - Free the PCI device
8131  * @pdev: structure containing the PCI related information of the device.
8132  * Description: This function is called by the Pci subsystem to release a
8133  * PCI device and free up all resource held up by the device. This could
8134  * be in response to a Hot plug event or when the driver is to be removed
8135  * from memory.
8136  */
8137 
8138 static void s2io_rem_nic(struct pci_dev *pdev)
8139 {
8140 	struct net_device *dev = pci_get_drvdata(pdev);
8141 	struct s2io_nic *sp;
8142 
8143 	if (dev == NULL) {
8144 		DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8145 		return;
8146 	}
8147 
8148 	sp = netdev_priv(dev);
8149 
8150 	cancel_work_sync(&sp->rst_timer_task);
8151 	cancel_work_sync(&sp->set_link_task);
8152 
8153 	unregister_netdev(dev);
8154 
8155 	free_shared_mem(sp);
8156 	iounmap(sp->bar0);
8157 	iounmap(sp->bar1);
8158 	pci_release_regions(pdev);
8159 	free_netdev(dev);
8160 	pci_disable_device(pdev);
8161 }
8162 
8163 module_pci_driver(s2io_driver);
8164 
8165 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8166 				struct tcphdr **tcp, struct RxD_t *rxdp,
8167 				struct s2io_nic *sp)
8168 {
8169 	int ip_off;
8170 	u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8171 
8172 	if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8173 		DBG_PRINT(INIT_DBG,
8174 			  "%s: Non-TCP frames not supported for LRO\n",
8175 			  __func__);
8176 		return -1;
8177 	}
8178 
8179 	/* Checking for DIX type or DIX type with VLAN */
8180 	if ((l2_type == 0) || (l2_type == 4)) {
8181 		ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8182 		/*
8183 		 * If vlan stripping is disabled and the frame is VLAN tagged,
8184 		 * shift the offset by the VLAN header size bytes.
8185 		 */
8186 		if ((!sp->vlan_strip_flag) &&
8187 		    (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8188 			ip_off += HEADER_VLAN_SIZE;
8189 	} else {
8190 		/* LLC, SNAP etc are considered non-mergeable */
8191 		return -1;
8192 	}
8193 
8194 	*ip = (struct iphdr *)(buffer + ip_off);
8195 	ip_len = (u8)((*ip)->ihl);
8196 	ip_len <<= 2;
8197 	*tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8198 
8199 	return 0;
8200 }
8201 
8202 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8203 				  struct tcphdr *tcp)
8204 {
8205 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8206 	if ((lro->iph->saddr != ip->saddr) ||
8207 	    (lro->iph->daddr != ip->daddr) ||
8208 	    (lro->tcph->source != tcp->source) ||
8209 	    (lro->tcph->dest != tcp->dest))
8210 		return -1;
8211 	return 0;
8212 }
8213 
8214 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8215 {
8216 	return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
8217 }
8218 
8219 static void initiate_new_session(struct lro *lro, u8 *l2h,
8220 				 struct iphdr *ip, struct tcphdr *tcp,
8221 				 u32 tcp_pyld_len, u16 vlan_tag)
8222 {
8223 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8224 	lro->l2h = l2h;
8225 	lro->iph = ip;
8226 	lro->tcph = tcp;
8227 	lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8228 	lro->tcp_ack = tcp->ack_seq;
8229 	lro->sg_num = 1;
8230 	lro->total_len = ntohs(ip->tot_len);
8231 	lro->frags_len = 0;
8232 	lro->vlan_tag = vlan_tag;
8233 	/*
8234 	 * Check if we saw TCP timestamp.
8235 	 * Other consistency checks have already been done.
8236 	 */
8237 	if (tcp->doff == 8) {
8238 		__be32 *ptr;
8239 		ptr = (__be32 *)(tcp+1);
8240 		lro->saw_ts = 1;
8241 		lro->cur_tsval = ntohl(*(ptr+1));
8242 		lro->cur_tsecr = *(ptr+2);
8243 	}
8244 	lro->in_use = 1;
8245 }
8246 
8247 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8248 {
8249 	struct iphdr *ip = lro->iph;
8250 	struct tcphdr *tcp = lro->tcph;
8251 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8252 
8253 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8254 
8255 	/* Update L3 header */
8256 	csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len));
8257 	ip->tot_len = htons(lro->total_len);
8258 
8259 	/* Update L4 header */
8260 	tcp->ack_seq = lro->tcp_ack;
8261 	tcp->window = lro->window;
8262 
8263 	/* Update tsecr field if this session has timestamps enabled */
8264 	if (lro->saw_ts) {
8265 		__be32 *ptr = (__be32 *)(tcp + 1);
8266 		*(ptr+2) = lro->cur_tsecr;
8267 	}
8268 
8269 	/* Update counters required for calculation of
8270 	 * average no. of packets aggregated.
8271 	 */
8272 	swstats->sum_avg_pkts_aggregated += lro->sg_num;
8273 	swstats->num_aggregations++;
8274 }
8275 
8276 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8277 			     struct tcphdr *tcp, u32 l4_pyld)
8278 {
8279 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8280 	lro->total_len += l4_pyld;
8281 	lro->frags_len += l4_pyld;
8282 	lro->tcp_next_seq += l4_pyld;
8283 	lro->sg_num++;
8284 
8285 	/* Update ack seq no. and window ad(from this pkt) in LRO object */
8286 	lro->tcp_ack = tcp->ack_seq;
8287 	lro->window = tcp->window;
8288 
8289 	if (lro->saw_ts) {
8290 		__be32 *ptr;
8291 		/* Update tsecr and tsval from this packet */
8292 		ptr = (__be32 *)(tcp+1);
8293 		lro->cur_tsval = ntohl(*(ptr+1));
8294 		lro->cur_tsecr = *(ptr + 2);
8295 	}
8296 }
8297 
8298 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8299 				    struct tcphdr *tcp, u32 tcp_pyld_len)
8300 {
8301 	u8 *ptr;
8302 
8303 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8304 
8305 	if (!tcp_pyld_len) {
8306 		/* Runt frame or a pure ack */
8307 		return -1;
8308 	}
8309 
8310 	if (ip->ihl != 5) /* IP has options */
8311 		return -1;
8312 
8313 	/* If we see CE codepoint in IP header, packet is not mergeable */
8314 	if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8315 		return -1;
8316 
8317 	/* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8318 	if (tcp->urg || tcp->psh || tcp->rst ||
8319 	    tcp->syn || tcp->fin ||
8320 	    tcp->ece || tcp->cwr || !tcp->ack) {
8321 		/*
8322 		 * Currently recognize only the ack control word and
8323 		 * any other control field being set would result in
8324 		 * flushing the LRO session
8325 		 */
8326 		return -1;
8327 	}
8328 
8329 	/*
8330 	 * Allow only one TCP timestamp option. Don't aggregate if
8331 	 * any other options are detected.
8332 	 */
8333 	if (tcp->doff != 5 && tcp->doff != 8)
8334 		return -1;
8335 
8336 	if (tcp->doff == 8) {
8337 		ptr = (u8 *)(tcp + 1);
8338 		while (*ptr == TCPOPT_NOP)
8339 			ptr++;
8340 		if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8341 			return -1;
8342 
8343 		/* Ensure timestamp value increases monotonically */
8344 		if (l_lro)
8345 			if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8346 				return -1;
8347 
8348 		/* timestamp echo reply should be non-zero */
8349 		if (*((__be32 *)(ptr+6)) == 0)
8350 			return -1;
8351 	}
8352 
8353 	return 0;
8354 }
8355 
8356 static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
8357 				 u8 **tcp, u32 *tcp_len, struct lro **lro,
8358 				 struct RxD_t *rxdp, struct s2io_nic *sp)
8359 {
8360 	struct iphdr *ip;
8361 	struct tcphdr *tcph;
8362 	int ret = 0, i;
8363 	u16 vlan_tag = 0;
8364 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8365 
8366 	ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8367 				   rxdp, sp);
8368 	if (ret)
8369 		return ret;
8370 
8371 	DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
8372 
8373 	vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8374 	tcph = (struct tcphdr *)*tcp;
8375 	*tcp_len = get_l4_pyld_length(ip, tcph);
8376 	for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8377 		struct lro *l_lro = &ring_data->lro0_n[i];
8378 		if (l_lro->in_use) {
8379 			if (check_for_socket_match(l_lro, ip, tcph))
8380 				continue;
8381 			/* Sock pair matched */
8382 			*lro = l_lro;
8383 
8384 			if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8385 				DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
8386 					  "expected 0x%x, actual 0x%x\n",
8387 					  __func__,
8388 					  (*lro)->tcp_next_seq,
8389 					  ntohl(tcph->seq));
8390 
8391 				swstats->outof_sequence_pkts++;
8392 				ret = 2;
8393 				break;
8394 			}
8395 
8396 			if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8397 						      *tcp_len))
8398 				ret = 1; /* Aggregate */
8399 			else
8400 				ret = 2; /* Flush both */
8401 			break;
8402 		}
8403 	}
8404 
8405 	if (ret == 0) {
8406 		/* Before searching for available LRO objects,
8407 		 * check if the pkt is L3/L4 aggregatable. If not
8408 		 * don't create new LRO session. Just send this
8409 		 * packet up.
8410 		 */
8411 		if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
8412 			return 5;
8413 
8414 		for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8415 			struct lro *l_lro = &ring_data->lro0_n[i];
8416 			if (!(l_lro->in_use)) {
8417 				*lro = l_lro;
8418 				ret = 3; /* Begin anew */
8419 				break;
8420 			}
8421 		}
8422 	}
8423 
8424 	if (ret == 0) { /* sessions exceeded */
8425 		DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
8426 			  __func__);
8427 		*lro = NULL;
8428 		return ret;
8429 	}
8430 
8431 	switch (ret) {
8432 	case 3:
8433 		initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8434 				     vlan_tag);
8435 		break;
8436 	case 2:
8437 		update_L3L4_header(sp, *lro);
8438 		break;
8439 	case 1:
8440 		aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8441 		if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8442 			update_L3L4_header(sp, *lro);
8443 			ret = 4; /* Flush the LRO */
8444 		}
8445 		break;
8446 	default:
8447 		DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
8448 		break;
8449 	}
8450 
8451 	return ret;
8452 }
8453 
8454 static void clear_lro_session(struct lro *lro)
8455 {
8456 	static u16 lro_struct_size = sizeof(struct lro);
8457 
8458 	memset(lro, 0, lro_struct_size);
8459 }
8460 
8461 static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8462 {
8463 	struct net_device *dev = skb->dev;
8464 	struct s2io_nic *sp = netdev_priv(dev);
8465 
8466 	skb->protocol = eth_type_trans(skb, dev);
8467 	if (vlan_tag && sp->vlan_strip_flag)
8468 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
8469 	if (sp->config.napi)
8470 		netif_receive_skb(skb);
8471 	else
8472 		netif_rx(skb);
8473 }
8474 
8475 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8476 			   struct sk_buff *skb, u32 tcp_len)
8477 {
8478 	struct sk_buff *first = lro->parent;
8479 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8480 
8481 	first->len += tcp_len;
8482 	first->data_len = lro->frags_len;
8483 	skb_pull(skb, (skb->len - tcp_len));
8484 	if (skb_shinfo(first)->frag_list)
8485 		lro->last_frag->next = skb;
8486 	else
8487 		skb_shinfo(first)->frag_list = skb;
8488 	first->truesize += skb->truesize;
8489 	lro->last_frag = skb;
8490 	swstats->clubbed_frms_cnt++;
8491 }
8492 
8493 /**
8494  * s2io_io_error_detected - called when PCI error is detected
8495  * @pdev: Pointer to PCI device
8496  * @state: The current pci connection state
8497  *
8498  * This function is called after a PCI bus error affecting
8499  * this device has been detected.
8500  */
8501 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8502 					       pci_channel_state_t state)
8503 {
8504 	struct net_device *netdev = pci_get_drvdata(pdev);
8505 	struct s2io_nic *sp = netdev_priv(netdev);
8506 
8507 	netif_device_detach(netdev);
8508 
8509 	if (state == pci_channel_io_perm_failure)
8510 		return PCI_ERS_RESULT_DISCONNECT;
8511 
8512 	if (netif_running(netdev)) {
8513 		/* Bring down the card, while avoiding PCI I/O */
8514 		do_s2io_card_down(sp, 0);
8515 	}
8516 	pci_disable_device(pdev);
8517 
8518 	return PCI_ERS_RESULT_NEED_RESET;
8519 }
8520 
8521 /**
8522  * s2io_io_slot_reset - called after the pci bus has been reset.
8523  * @pdev: Pointer to PCI device
8524  *
8525  * Restart the card from scratch, as if from a cold-boot.
8526  * At this point, the card has exprienced a hard reset,
8527  * followed by fixups by BIOS, and has its config space
8528  * set up identically to what it was at cold boot.
8529  */
8530 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8531 {
8532 	struct net_device *netdev = pci_get_drvdata(pdev);
8533 	struct s2io_nic *sp = netdev_priv(netdev);
8534 
8535 	if (pci_enable_device(pdev)) {
8536 		pr_err("Cannot re-enable PCI device after reset.\n");
8537 		return PCI_ERS_RESULT_DISCONNECT;
8538 	}
8539 
8540 	pci_set_master(pdev);
8541 	s2io_reset(sp);
8542 
8543 	return PCI_ERS_RESULT_RECOVERED;
8544 }
8545 
8546 /**
8547  * s2io_io_resume - called when traffic can start flowing again.
8548  * @pdev: Pointer to PCI device
8549  *
8550  * This callback is called when the error recovery driver tells
8551  * us that its OK to resume normal operation.
8552  */
8553 static void s2io_io_resume(struct pci_dev *pdev)
8554 {
8555 	struct net_device *netdev = pci_get_drvdata(pdev);
8556 	struct s2io_nic *sp = netdev_priv(netdev);
8557 
8558 	if (netif_running(netdev)) {
8559 		if (s2io_card_up(sp)) {
8560 			pr_err("Can't bring device back up after reset.\n");
8561 			return;
8562 		}
8563 
8564 		if (do_s2io_prog_unicast(netdev, netdev->dev_addr) == FAILURE) {
8565 			s2io_card_down(sp);
8566 			pr_err("Can't restore mac addr after reset.\n");
8567 			return;
8568 		}
8569 	}
8570 
8571 	netif_device_attach(netdev);
8572 	netif_tx_wake_all_queues(netdev);
8573 }
8574