xref: /linux/drivers/net/ethernet/tehuti/tehuti.c (revision b43ab901d671e3e3cad425ea5e9a3c74e266dcdd)
1 /*
2  * Tehuti Networks(R) Network Driver
3  * ethtool interface implementation
4  * Copyright (C) 2007 Tehuti Networks Ltd. All rights reserved
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  */
11 
12 /*
13  * RX HW/SW interaction overview
14  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
15  * There are 2 types of RX communication channels between driver and NIC.
16  * 1) RX Free Fifo - RXF - holds descriptors of empty buffers to accept incoming
17  * traffic. This Fifo is filled by SW and is readen by HW. Each descriptor holds
18  * info about buffer's location, size and ID. An ID field is used to identify a
19  * buffer when it's returned with data via RXD Fifo (see below)
20  * 2) RX Data Fifo - RXD - holds descriptors of full buffers. This Fifo is
21  * filled by HW and is readen by SW. Each descriptor holds status and ID.
22  * HW pops descriptor from RXF Fifo, stores ID, fills buffer with incoming data,
23  * via dma moves it into host memory, builds new RXD descriptor with same ID,
24  * pushes it into RXD Fifo and raises interrupt to indicate new RX data.
25  *
26  * Current NIC configuration (registers + firmware) makes NIC use 2 RXF Fifos.
27  * One holds 1.5K packets and another - 26K packets. Depending on incoming
28  * packet size, HW desides on a RXF Fifo to pop buffer from. When packet is
29  * filled with data, HW builds new RXD descriptor for it and push it into single
30  * RXD Fifo.
31  *
32  * RX SW Data Structures
33  * ~~~~~~~~~~~~~~~~~~~~~
34  * skb db - used to keep track of all skbs owned by SW and their dma addresses.
35  * For RX case, ownership lasts from allocating new empty skb for RXF until
36  * accepting full skb from RXD and passing it to OS. Each RXF Fifo has its own
37  * skb db. Implemented as array with bitmask.
38  * fifo - keeps info about fifo's size and location, relevant HW registers,
39  * usage and skb db. Each RXD and RXF Fifo has its own fifo structure.
40  * Implemented as simple struct.
41  *
42  * RX SW Execution Flow
43  * ~~~~~~~~~~~~~~~~~~~~
44  * Upon initialization (ifconfig up) driver creates RX fifos and initializes
45  * relevant registers. At the end of init phase, driver enables interrupts.
46  * NIC sees that there is no RXF buffers and raises
47  * RD_INTR interrupt, isr fills skbs and Rx begins.
48  * Driver has two receive operation modes:
49  *    NAPI - interrupt-driven mixed with polling
50  *    interrupt-driven only
51  *
52  * Interrupt-driven only flow is following. When buffer is ready, HW raises
53  * interrupt and isr is called. isr collects all available packets
54  * (bdx_rx_receive), refills skbs (bdx_rx_alloc_skbs) and exit.
55 
56  * Rx buffer allocation note
57  * ~~~~~~~~~~~~~~~~~~~~~~~~~
58  * Driver cares to feed such amount of RxF descriptors that respective amount of
59  * RxD descriptors can not fill entire RxD fifo. The main reason is lack of
60  * overflow check in Bordeaux for RxD fifo free/used size.
61  * FIXME: this is NOT fully implemented, more work should be done
62  *
63  */
64 
65 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
66 
67 #include "tehuti.h"
68 
69 static DEFINE_PCI_DEVICE_TABLE(bdx_pci_tbl) = {
70 	{ PCI_VDEVICE(TEHUTI, 0x3009), },
71 	{ PCI_VDEVICE(TEHUTI, 0x3010), },
72 	{ PCI_VDEVICE(TEHUTI, 0x3014), },
73 	{ 0 }
74 };
75 
76 MODULE_DEVICE_TABLE(pci, bdx_pci_tbl);
77 
78 /* Definitions needed by ISR or NAPI functions */
79 static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f);
80 static void bdx_tx_cleanup(struct bdx_priv *priv);
81 static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget);
82 
83 /* Definitions needed by FW loading */
84 static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size);
85 
86 /* Definitions needed by hw_start */
87 static int bdx_tx_init(struct bdx_priv *priv);
88 static int bdx_rx_init(struct bdx_priv *priv);
89 
90 /* Definitions needed by bdx_close */
91 static void bdx_rx_free(struct bdx_priv *priv);
92 static void bdx_tx_free(struct bdx_priv *priv);
93 
94 /* Definitions needed by bdx_probe */
95 static void bdx_set_ethtool_ops(struct net_device *netdev);
96 
97 /*************************************************************************
98  *    Print Info                                                         *
99  *************************************************************************/
100 
101 static void print_hw_id(struct pci_dev *pdev)
102 {
103 	struct pci_nic *nic = pci_get_drvdata(pdev);
104 	u16 pci_link_status = 0;
105 	u16 pci_ctrl = 0;
106 
107 	pci_read_config_word(pdev, PCI_LINK_STATUS_REG, &pci_link_status);
108 	pci_read_config_word(pdev, PCI_DEV_CTRL_REG, &pci_ctrl);
109 
110 	pr_info("%s%s\n", BDX_NIC_NAME,
111 		nic->port_num == 1 ? "" : ", 2-Port");
112 	pr_info("srom 0x%x fpga %d build %u lane# %d max_pl 0x%x mrrs 0x%x\n",
113 		readl(nic->regs + SROM_VER), readl(nic->regs + FPGA_VER) & 0xFFF,
114 		readl(nic->regs + FPGA_SEED),
115 		GET_LINK_STATUS_LANES(pci_link_status),
116 		GET_DEV_CTRL_MAXPL(pci_ctrl), GET_DEV_CTRL_MRRS(pci_ctrl));
117 }
118 
119 static void print_fw_id(struct pci_nic *nic)
120 {
121 	pr_info("fw 0x%x\n", readl(nic->regs + FW_VER));
122 }
123 
124 static void print_eth_id(struct net_device *ndev)
125 {
126 	netdev_info(ndev, "%s, Port %c\n",
127 		    BDX_NIC_NAME, (ndev->if_port == 0) ? 'A' : 'B');
128 
129 }
130 
131 /*************************************************************************
132  *    Code                                                               *
133  *************************************************************************/
134 
135 #define bdx_enable_interrupts(priv)	\
136 	do { WRITE_REG(priv, regIMR, IR_RUN); } while (0)
137 #define bdx_disable_interrupts(priv)	\
138 	do { WRITE_REG(priv, regIMR, 0); } while (0)
139 
140 /* bdx_fifo_init
141  * create TX/RX descriptor fifo for host-NIC communication.
142  * 1K extra space is allocated at the end of the fifo to simplify
143  * processing of descriptors that wraps around fifo's end
144  * @priv - NIC private structure
145  * @f - fifo to initialize
146  * @fsz_type - fifo size type: 0-4KB, 1-8KB, 2-16KB, 3-32KB
147  * @reg_XXX - offsets of registers relative to base address
148  *
149  * Returns 0 on success, negative value on failure
150  *
151  */
152 static int
153 bdx_fifo_init(struct bdx_priv *priv, struct fifo *f, int fsz_type,
154 	      u16 reg_CFG0, u16 reg_CFG1, u16 reg_RPTR, u16 reg_WPTR)
155 {
156 	u16 memsz = FIFO_SIZE * (1 << fsz_type);
157 
158 	memset(f, 0, sizeof(struct fifo));
159 	/* pci_alloc_consistent gives us 4k-aligned memory */
160 	f->va = pci_alloc_consistent(priv->pdev,
161 				     memsz + FIFO_EXTRA_SPACE, &f->da);
162 	if (!f->va) {
163 		pr_err("pci_alloc_consistent failed\n");
164 		RET(-ENOMEM);
165 	}
166 	f->reg_CFG0 = reg_CFG0;
167 	f->reg_CFG1 = reg_CFG1;
168 	f->reg_RPTR = reg_RPTR;
169 	f->reg_WPTR = reg_WPTR;
170 	f->rptr = 0;
171 	f->wptr = 0;
172 	f->memsz = memsz;
173 	f->size_mask = memsz - 1;
174 	WRITE_REG(priv, reg_CFG0, (u32) ((f->da & TX_RX_CFG0_BASE) | fsz_type));
175 	WRITE_REG(priv, reg_CFG1, H32_64(f->da));
176 
177 	RET(0);
178 }
179 
180 /* bdx_fifo_free - free all resources used by fifo
181  * @priv - NIC private structure
182  * @f - fifo to release
183  */
184 static void bdx_fifo_free(struct bdx_priv *priv, struct fifo *f)
185 {
186 	ENTER;
187 	if (f->va) {
188 		pci_free_consistent(priv->pdev,
189 				    f->memsz + FIFO_EXTRA_SPACE, f->va, f->da);
190 		f->va = NULL;
191 	}
192 	RET();
193 }
194 
195 /*
196  * bdx_link_changed - notifies OS about hw link state.
197  * @bdx_priv - hw adapter structure
198  */
199 static void bdx_link_changed(struct bdx_priv *priv)
200 {
201 	u32 link = READ_REG(priv, regMAC_LNK_STAT) & MAC_LINK_STAT;
202 
203 	if (!link) {
204 		if (netif_carrier_ok(priv->ndev)) {
205 			netif_stop_queue(priv->ndev);
206 			netif_carrier_off(priv->ndev);
207 			netdev_err(priv->ndev, "Link Down\n");
208 		}
209 	} else {
210 		if (!netif_carrier_ok(priv->ndev)) {
211 			netif_wake_queue(priv->ndev);
212 			netif_carrier_on(priv->ndev);
213 			netdev_err(priv->ndev, "Link Up\n");
214 		}
215 	}
216 }
217 
218 static void bdx_isr_extra(struct bdx_priv *priv, u32 isr)
219 {
220 	if (isr & IR_RX_FREE_0) {
221 		bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
222 		DBG("RX_FREE_0\n");
223 	}
224 
225 	if (isr & IR_LNKCHG0)
226 		bdx_link_changed(priv);
227 
228 	if (isr & IR_PCIE_LINK)
229 		netdev_err(priv->ndev, "PCI-E Link Fault\n");
230 
231 	if (isr & IR_PCIE_TOUT)
232 		netdev_err(priv->ndev, "PCI-E Time Out\n");
233 
234 }
235 
236 /* bdx_isr - Interrupt Service Routine for Bordeaux NIC
237  * @irq - interrupt number
238  * @ndev - network device
239  * @regs - CPU registers
240  *
241  * Return IRQ_NONE if it was not our interrupt, IRQ_HANDLED - otherwise
242  *
243  * It reads ISR register to know interrupt reasons, and proceed them one by one.
244  * Reasons of interest are:
245  *    RX_DESC - new packet has arrived and RXD fifo holds its descriptor
246  *    RX_FREE - number of free Rx buffers in RXF fifo gets low
247  *    TX_FREE - packet was transmited and RXF fifo holds its descriptor
248  */
249 
250 static irqreturn_t bdx_isr_napi(int irq, void *dev)
251 {
252 	struct net_device *ndev = dev;
253 	struct bdx_priv *priv = netdev_priv(ndev);
254 	u32 isr;
255 
256 	ENTER;
257 	isr = (READ_REG(priv, regISR) & IR_RUN);
258 	if (unlikely(!isr)) {
259 		bdx_enable_interrupts(priv);
260 		return IRQ_NONE;	/* Not our interrupt */
261 	}
262 
263 	if (isr & IR_EXTRA)
264 		bdx_isr_extra(priv, isr);
265 
266 	if (isr & (IR_RX_DESC_0 | IR_TX_FREE_0)) {
267 		if (likely(napi_schedule_prep(&priv->napi))) {
268 			__napi_schedule(&priv->napi);
269 			RET(IRQ_HANDLED);
270 		} else {
271 			/* NOTE: we get here if intr has slipped into window
272 			 * between these lines in bdx_poll:
273 			 *    bdx_enable_interrupts(priv);
274 			 *    return 0;
275 			 * currently intrs are disabled (since we read ISR),
276 			 * and we have failed to register next poll.
277 			 * so we read the regs to trigger chip
278 			 * and allow further interupts. */
279 			READ_REG(priv, regTXF_WPTR_0);
280 			READ_REG(priv, regRXD_WPTR_0);
281 		}
282 	}
283 
284 	bdx_enable_interrupts(priv);
285 	RET(IRQ_HANDLED);
286 }
287 
288 static int bdx_poll(struct napi_struct *napi, int budget)
289 {
290 	struct bdx_priv *priv = container_of(napi, struct bdx_priv, napi);
291 	int work_done;
292 
293 	ENTER;
294 	bdx_tx_cleanup(priv);
295 	work_done = bdx_rx_receive(priv, &priv->rxd_fifo0, budget);
296 	if ((work_done < budget) ||
297 	    (priv->napi_stop++ >= 30)) {
298 		DBG("rx poll is done. backing to isr-driven\n");
299 
300 		/* from time to time we exit to let NAPI layer release
301 		 * device lock and allow waiting tasks (eg rmmod) to advance) */
302 		priv->napi_stop = 0;
303 
304 		napi_complete(napi);
305 		bdx_enable_interrupts(priv);
306 	}
307 	return work_done;
308 }
309 
310 /* bdx_fw_load - loads firmware to NIC
311  * @priv - NIC private structure
312  * Firmware is loaded via TXD fifo, so it must be initialized first.
313  * Firware must be loaded once per NIC not per PCI device provided by NIC (NIC
314  * can have few of them). So all drivers use semaphore register to choose one
315  * that will actually load FW to NIC.
316  */
317 
318 static int bdx_fw_load(struct bdx_priv *priv)
319 {
320 	const struct firmware *fw = NULL;
321 	int master, i;
322 	int rc;
323 
324 	ENTER;
325 	master = READ_REG(priv, regINIT_SEMAPHORE);
326 	if (!READ_REG(priv, regINIT_STATUS) && master) {
327 		rc = request_firmware(&fw, "tehuti/bdx.bin", &priv->pdev->dev);
328 		if (rc)
329 			goto out;
330 		bdx_tx_push_desc_safe(priv, (char *)fw->data, fw->size);
331 		mdelay(100);
332 	}
333 	for (i = 0; i < 200; i++) {
334 		if (READ_REG(priv, regINIT_STATUS)) {
335 			rc = 0;
336 			goto out;
337 		}
338 		mdelay(2);
339 	}
340 	rc = -EIO;
341 out:
342 	if (master)
343 		WRITE_REG(priv, regINIT_SEMAPHORE, 1);
344 	if (fw)
345 		release_firmware(fw);
346 
347 	if (rc) {
348 		netdev_err(priv->ndev, "firmware loading failed\n");
349 		if (rc == -EIO)
350 			DBG("VPC = 0x%x VIC = 0x%x INIT_STATUS = 0x%x i=%d\n",
351 			    READ_REG(priv, regVPC),
352 			    READ_REG(priv, regVIC),
353 			    READ_REG(priv, regINIT_STATUS), i);
354 		RET(rc);
355 	} else {
356 		DBG("%s: firmware loading success\n", priv->ndev->name);
357 		RET(0);
358 	}
359 }
360 
361 static void bdx_restore_mac(struct net_device *ndev, struct bdx_priv *priv)
362 {
363 	u32 val;
364 
365 	ENTER;
366 	DBG("mac0=%x mac1=%x mac2=%x\n",
367 	    READ_REG(priv, regUNC_MAC0_A),
368 	    READ_REG(priv, regUNC_MAC1_A), READ_REG(priv, regUNC_MAC2_A));
369 
370 	val = (ndev->dev_addr[0] << 8) | (ndev->dev_addr[1]);
371 	WRITE_REG(priv, regUNC_MAC2_A, val);
372 	val = (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]);
373 	WRITE_REG(priv, regUNC_MAC1_A, val);
374 	val = (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]);
375 	WRITE_REG(priv, regUNC_MAC0_A, val);
376 
377 	DBG("mac0=%x mac1=%x mac2=%x\n",
378 	    READ_REG(priv, regUNC_MAC0_A),
379 	    READ_REG(priv, regUNC_MAC1_A), READ_REG(priv, regUNC_MAC2_A));
380 	RET();
381 }
382 
383 /* bdx_hw_start - inits registers and starts HW's Rx and Tx engines
384  * @priv - NIC private structure
385  */
386 static int bdx_hw_start(struct bdx_priv *priv)
387 {
388 	int rc = -EIO;
389 	struct net_device *ndev = priv->ndev;
390 
391 	ENTER;
392 	bdx_link_changed(priv);
393 
394 	/* 10G overall max length (vlan, eth&ip header, ip payload, crc) */
395 	WRITE_REG(priv, regFRM_LENGTH, 0X3FE0);
396 	WRITE_REG(priv, regPAUSE_QUANT, 0x96);
397 	WRITE_REG(priv, regRX_FIFO_SECTION, 0x800010);
398 	WRITE_REG(priv, regTX_FIFO_SECTION, 0xE00010);
399 	WRITE_REG(priv, regRX_FULLNESS, 0);
400 	WRITE_REG(priv, regTX_FULLNESS, 0);
401 	WRITE_REG(priv, regCTRLST,
402 		  regCTRLST_BASE | regCTRLST_RX_ENA | regCTRLST_TX_ENA);
403 
404 	WRITE_REG(priv, regVGLB, 0);
405 	WRITE_REG(priv, regMAX_FRAME_A,
406 		  priv->rxf_fifo0.m.pktsz & MAX_FRAME_AB_VAL);
407 
408 	DBG("RDINTCM=%08x\n", priv->rdintcm);	/*NOTE: test script uses this */
409 	WRITE_REG(priv, regRDINTCM0, priv->rdintcm);
410 	WRITE_REG(priv, regRDINTCM2, 0);	/*cpu_to_le32(rcm.val)); */
411 
412 	DBG("TDINTCM=%08x\n", priv->tdintcm);	/*NOTE: test script uses this */
413 	WRITE_REG(priv, regTDINTCM0, priv->tdintcm);	/* old val = 0x300064 */
414 
415 	/* Enable timer interrupt once in 2 secs. */
416 	/*WRITE_REG(priv, regGTMR0, ((GTMR_SEC * 2) & GTMR_DATA)); */
417 	bdx_restore_mac(priv->ndev, priv);
418 
419 	WRITE_REG(priv, regGMAC_RXF_A, GMAC_RX_FILTER_OSEN |
420 		  GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB);
421 
422 #define BDX_IRQ_TYPE	((priv->nic->irq_type == IRQ_MSI) ? 0 : IRQF_SHARED)
423 
424 	rc = request_irq(priv->pdev->irq, bdx_isr_napi, BDX_IRQ_TYPE,
425 			 ndev->name, ndev);
426 	if (rc)
427 		goto err_irq;
428 	bdx_enable_interrupts(priv);
429 
430 	RET(0);
431 
432 err_irq:
433 	RET(rc);
434 }
435 
436 static void bdx_hw_stop(struct bdx_priv *priv)
437 {
438 	ENTER;
439 	bdx_disable_interrupts(priv);
440 	free_irq(priv->pdev->irq, priv->ndev);
441 
442 	netif_carrier_off(priv->ndev);
443 	netif_stop_queue(priv->ndev);
444 
445 	RET();
446 }
447 
448 static int bdx_hw_reset_direct(void __iomem *regs)
449 {
450 	u32 val, i;
451 	ENTER;
452 
453 	/* reset sequences: read, write 1, read, write 0 */
454 	val = readl(regs + regCLKPLL);
455 	writel((val | CLKPLL_SFTRST) + 0x8, regs + regCLKPLL);
456 	udelay(50);
457 	val = readl(regs + regCLKPLL);
458 	writel(val & ~CLKPLL_SFTRST, regs + regCLKPLL);
459 
460 	/* check that the PLLs are locked and reset ended */
461 	for (i = 0; i < 70; i++, mdelay(10))
462 		if ((readl(regs + regCLKPLL) & CLKPLL_LKD) == CLKPLL_LKD) {
463 			/* do any PCI-E read transaction */
464 			readl(regs + regRXD_CFG0_0);
465 			return 0;
466 		}
467 	pr_err("HW reset failed\n");
468 	return 1;		/* failure */
469 }
470 
471 static int bdx_hw_reset(struct bdx_priv *priv)
472 {
473 	u32 val, i;
474 	ENTER;
475 
476 	if (priv->port == 0) {
477 		/* reset sequences: read, write 1, read, write 0 */
478 		val = READ_REG(priv, regCLKPLL);
479 		WRITE_REG(priv, regCLKPLL, (val | CLKPLL_SFTRST) + 0x8);
480 		udelay(50);
481 		val = READ_REG(priv, regCLKPLL);
482 		WRITE_REG(priv, regCLKPLL, val & ~CLKPLL_SFTRST);
483 	}
484 	/* check that the PLLs are locked and reset ended */
485 	for (i = 0; i < 70; i++, mdelay(10))
486 		if ((READ_REG(priv, regCLKPLL) & CLKPLL_LKD) == CLKPLL_LKD) {
487 			/* do any PCI-E read transaction */
488 			READ_REG(priv, regRXD_CFG0_0);
489 			return 0;
490 		}
491 	pr_err("HW reset failed\n");
492 	return 1;		/* failure */
493 }
494 
495 static int bdx_sw_reset(struct bdx_priv *priv)
496 {
497 	int i;
498 
499 	ENTER;
500 	/* 1. load MAC (obsolete) */
501 	/* 2. disable Rx (and Tx) */
502 	WRITE_REG(priv, regGMAC_RXF_A, 0);
503 	mdelay(100);
504 	/* 3. disable port */
505 	WRITE_REG(priv, regDIS_PORT, 1);
506 	/* 4. disable queue */
507 	WRITE_REG(priv, regDIS_QU, 1);
508 	/* 5. wait until hw is disabled */
509 	for (i = 0; i < 50; i++) {
510 		if (READ_REG(priv, regRST_PORT) & 1)
511 			break;
512 		mdelay(10);
513 	}
514 	if (i == 50)
515 		netdev_err(priv->ndev, "SW reset timeout. continuing anyway\n");
516 
517 	/* 6. disable intrs */
518 	WRITE_REG(priv, regRDINTCM0, 0);
519 	WRITE_REG(priv, regTDINTCM0, 0);
520 	WRITE_REG(priv, regIMR, 0);
521 	READ_REG(priv, regISR);
522 
523 	/* 7. reset queue */
524 	WRITE_REG(priv, regRST_QU, 1);
525 	/* 8. reset port */
526 	WRITE_REG(priv, regRST_PORT, 1);
527 	/* 9. zero all read and write pointers */
528 	for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10)
529 		DBG("%x = %x\n", i, READ_REG(priv, i) & TXF_WPTR_WR_PTR);
530 	for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10)
531 		WRITE_REG(priv, i, 0);
532 	/* 10. unseet port disable */
533 	WRITE_REG(priv, regDIS_PORT, 0);
534 	/* 11. unset queue disable */
535 	WRITE_REG(priv, regDIS_QU, 0);
536 	/* 12. unset queue reset */
537 	WRITE_REG(priv, regRST_QU, 0);
538 	/* 13. unset port reset */
539 	WRITE_REG(priv, regRST_PORT, 0);
540 	/* 14. enable Rx */
541 	/* skiped. will be done later */
542 	/* 15. save MAC (obsolete) */
543 	for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10)
544 		DBG("%x = %x\n", i, READ_REG(priv, i) & TXF_WPTR_WR_PTR);
545 
546 	RET(0);
547 }
548 
549 /* bdx_reset - performs right type of reset depending on hw type */
550 static int bdx_reset(struct bdx_priv *priv)
551 {
552 	ENTER;
553 	RET((priv->pdev->device == 0x3009)
554 	    ? bdx_hw_reset(priv)
555 	    : bdx_sw_reset(priv));
556 }
557 
558 /**
559  * bdx_close - Disables a network interface
560  * @netdev: network interface device structure
561  *
562  * Returns 0, this is not allowed to fail
563  *
564  * The close entry point is called when an interface is de-activated
565  * by the OS.  The hardware is still under the drivers control, but
566  * needs to be disabled.  A global MAC reset is issued to stop the
567  * hardware, and all transmit and receive resources are freed.
568  **/
569 static int bdx_close(struct net_device *ndev)
570 {
571 	struct bdx_priv *priv = NULL;
572 
573 	ENTER;
574 	priv = netdev_priv(ndev);
575 
576 	napi_disable(&priv->napi);
577 
578 	bdx_reset(priv);
579 	bdx_hw_stop(priv);
580 	bdx_rx_free(priv);
581 	bdx_tx_free(priv);
582 	RET(0);
583 }
584 
585 /**
586  * bdx_open - Called when a network interface is made active
587  * @netdev: network interface device structure
588  *
589  * Returns 0 on success, negative value on failure
590  *
591  * The open entry point is called when a network interface is made
592  * active by the system (IFF_UP).  At this point all resources needed
593  * for transmit and receive operations are allocated, the interrupt
594  * handler is registered with the OS, the watchdog timer is started,
595  * and the stack is notified that the interface is ready.
596  **/
597 static int bdx_open(struct net_device *ndev)
598 {
599 	struct bdx_priv *priv;
600 	int rc;
601 
602 	ENTER;
603 	priv = netdev_priv(ndev);
604 	bdx_reset(priv);
605 	if (netif_running(ndev))
606 		netif_stop_queue(priv->ndev);
607 
608 	if ((rc = bdx_tx_init(priv)) ||
609 	    (rc = bdx_rx_init(priv)) ||
610 	    (rc = bdx_fw_load(priv)))
611 		goto err;
612 
613 	bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
614 
615 	rc = bdx_hw_start(priv);
616 	if (rc)
617 		goto err;
618 
619 	napi_enable(&priv->napi);
620 
621 	print_fw_id(priv->nic);
622 
623 	RET(0);
624 
625 err:
626 	bdx_close(ndev);
627 	RET(rc);
628 }
629 
630 static int bdx_range_check(struct bdx_priv *priv, u32 offset)
631 {
632 	return (offset > (u32) (BDX_REGS_SIZE / priv->nic->port_num)) ?
633 		-EINVAL : 0;
634 }
635 
636 static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
637 {
638 	struct bdx_priv *priv = netdev_priv(ndev);
639 	u32 data[3];
640 	int error;
641 
642 	ENTER;
643 
644 	DBG("jiffies=%ld cmd=%d\n", jiffies, cmd);
645 	if (cmd != SIOCDEVPRIVATE) {
646 		error = copy_from_user(data, ifr->ifr_data, sizeof(data));
647 		if (error) {
648 			pr_err("can't copy from user\n");
649 			RET(-EFAULT);
650 		}
651 		DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]);
652 	}
653 
654 	if (!capable(CAP_SYS_RAWIO))
655 		return -EPERM;
656 
657 	switch (data[0]) {
658 
659 	case BDX_OP_READ:
660 		error = bdx_range_check(priv, data[1]);
661 		if (error < 0)
662 			return error;
663 		data[2] = READ_REG(priv, data[1]);
664 		DBG("read_reg(0x%x)=0x%x (dec %d)\n", data[1], data[2],
665 		    data[2]);
666 		error = copy_to_user(ifr->ifr_data, data, sizeof(data));
667 		if (error)
668 			RET(-EFAULT);
669 		break;
670 
671 	case BDX_OP_WRITE:
672 		error = bdx_range_check(priv, data[1]);
673 		if (error < 0)
674 			return error;
675 		WRITE_REG(priv, data[1], data[2]);
676 		DBG("write_reg(0x%x, 0x%x)\n", data[1], data[2]);
677 		break;
678 
679 	default:
680 		RET(-EOPNOTSUPP);
681 	}
682 	return 0;
683 }
684 
685 static int bdx_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
686 {
687 	ENTER;
688 	if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15))
689 		RET(bdx_ioctl_priv(ndev, ifr, cmd));
690 	else
691 		RET(-EOPNOTSUPP);
692 }
693 
694 /*
695  * __bdx_vlan_rx_vid - private helper for adding/killing VLAN vid
696  *                     by passing VLAN filter table to hardware
697  * @ndev network device
698  * @vid  VLAN vid
699  * @op   add or kill operation
700  */
701 static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
702 {
703 	struct bdx_priv *priv = netdev_priv(ndev);
704 	u32 reg, bit, val;
705 
706 	ENTER;
707 	DBG2("vid=%d value=%d\n", (int)vid, enable);
708 	if (unlikely(vid >= 4096)) {
709 		pr_err("invalid VID: %u (> 4096)\n", vid);
710 		RET();
711 	}
712 	reg = regVLAN_0 + (vid / 32) * 4;
713 	bit = 1 << vid % 32;
714 	val = READ_REG(priv, reg);
715 	DBG2("reg=%x, val=%x, bit=%d\n", reg, val, bit);
716 	if (enable)
717 		val |= bit;
718 	else
719 		val &= ~bit;
720 	DBG2("new val %x\n", val);
721 	WRITE_REG(priv, reg, val);
722 	RET();
723 }
724 
725 /*
726  * bdx_vlan_rx_add_vid - kernel hook for adding VLAN vid to hw filtering table
727  * @ndev network device
728  * @vid  VLAN vid to add
729  */
730 static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
731 {
732 	__bdx_vlan_rx_vid(ndev, vid, 1);
733 	return 0;
734 }
735 
736 /*
737  * bdx_vlan_rx_kill_vid - kernel hook for killing VLAN vid in hw filtering table
738  * @ndev network device
739  * @vid  VLAN vid to kill
740  */
741 static int bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid)
742 {
743 	__bdx_vlan_rx_vid(ndev, vid, 0);
744 	return 0;
745 }
746 
747 /**
748  * bdx_change_mtu - Change the Maximum Transfer Unit
749  * @netdev: network interface device structure
750  * @new_mtu: new value for maximum frame size
751  *
752  * Returns 0 on success, negative on failure
753  */
754 static int bdx_change_mtu(struct net_device *ndev, int new_mtu)
755 {
756 	ENTER;
757 
758 	if (new_mtu == ndev->mtu)
759 		RET(0);
760 
761 	/* enforce minimum frame size */
762 	if (new_mtu < ETH_ZLEN) {
763 		netdev_err(ndev, "mtu %d is less then minimal %d\n",
764 			   new_mtu, ETH_ZLEN);
765 		RET(-EINVAL);
766 	}
767 
768 	ndev->mtu = new_mtu;
769 	if (netif_running(ndev)) {
770 		bdx_close(ndev);
771 		bdx_open(ndev);
772 	}
773 	RET(0);
774 }
775 
776 static void bdx_setmulti(struct net_device *ndev)
777 {
778 	struct bdx_priv *priv = netdev_priv(ndev);
779 
780 	u32 rxf_val =
781 	    GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB | GMAC_RX_FILTER_OSEN;
782 	int i;
783 
784 	ENTER;
785 	/* IMF - imperfect (hash) rx multicat filter */
786 	/* PMF - perfect rx multicat filter */
787 
788 	/* FIXME: RXE(OFF) */
789 	if (ndev->flags & IFF_PROMISC) {
790 		rxf_val |= GMAC_RX_FILTER_PRM;
791 	} else if (ndev->flags & IFF_ALLMULTI) {
792 		/* set IMF to accept all multicast frmaes */
793 		for (i = 0; i < MAC_MCST_HASH_NUM; i++)
794 			WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, ~0);
795 	} else if (!netdev_mc_empty(ndev)) {
796 		u8 hash;
797 		struct netdev_hw_addr *ha;
798 		u32 reg, val;
799 
800 		/* set IMF to deny all multicast frames */
801 		for (i = 0; i < MAC_MCST_HASH_NUM; i++)
802 			WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, 0);
803 		/* set PMF to deny all multicast frames */
804 		for (i = 0; i < MAC_MCST_NUM; i++) {
805 			WRITE_REG(priv, regRX_MAC_MCST0 + i * 8, 0);
806 			WRITE_REG(priv, regRX_MAC_MCST1 + i * 8, 0);
807 		}
808 
809 		/* use PMF to accept first MAC_MCST_NUM (15) addresses */
810 		/* TBD: sort addresses and write them in ascending order
811 		 * into RX_MAC_MCST regs. we skip this phase now and accept ALL
812 		 * multicast frames throu IMF */
813 		/* accept the rest of addresses throu IMF */
814 		netdev_for_each_mc_addr(ha, ndev) {
815 			hash = 0;
816 			for (i = 0; i < ETH_ALEN; i++)
817 				hash ^= ha->addr[i];
818 			reg = regRX_MCST_HASH0 + ((hash >> 5) << 2);
819 			val = READ_REG(priv, reg);
820 			val |= (1 << (hash % 32));
821 			WRITE_REG(priv, reg, val);
822 		}
823 
824 	} else {
825 		DBG("only own mac %d\n", netdev_mc_count(ndev));
826 		rxf_val |= GMAC_RX_FILTER_AB;
827 	}
828 	WRITE_REG(priv, regGMAC_RXF_A, rxf_val);
829 	/* enable RX */
830 	/* FIXME: RXE(ON) */
831 	RET();
832 }
833 
834 static int bdx_set_mac(struct net_device *ndev, void *p)
835 {
836 	struct bdx_priv *priv = netdev_priv(ndev);
837 	struct sockaddr *addr = p;
838 
839 	ENTER;
840 	/*
841 	   if (netif_running(dev))
842 	   return -EBUSY
843 	 */
844 	memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
845 	bdx_restore_mac(ndev, priv);
846 	RET(0);
847 }
848 
849 static int bdx_read_mac(struct bdx_priv *priv)
850 {
851 	u16 macAddress[3], i;
852 	ENTER;
853 
854 	macAddress[2] = READ_REG(priv, regUNC_MAC0_A);
855 	macAddress[2] = READ_REG(priv, regUNC_MAC0_A);
856 	macAddress[1] = READ_REG(priv, regUNC_MAC1_A);
857 	macAddress[1] = READ_REG(priv, regUNC_MAC1_A);
858 	macAddress[0] = READ_REG(priv, regUNC_MAC2_A);
859 	macAddress[0] = READ_REG(priv, regUNC_MAC2_A);
860 	for (i = 0; i < 3; i++) {
861 		priv->ndev->dev_addr[i * 2 + 1] = macAddress[i];
862 		priv->ndev->dev_addr[i * 2] = macAddress[i] >> 8;
863 	}
864 	RET(0);
865 }
866 
867 static u64 bdx_read_l2stat(struct bdx_priv *priv, int reg)
868 {
869 	u64 val;
870 
871 	val = READ_REG(priv, reg);
872 	val |= ((u64) READ_REG(priv, reg + 8)) << 32;
873 	return val;
874 }
875 
876 /*Do the statistics-update work*/
877 static void bdx_update_stats(struct bdx_priv *priv)
878 {
879 	struct bdx_stats *stats = &priv->hw_stats;
880 	u64 *stats_vector = (u64 *) stats;
881 	int i;
882 	int addr;
883 
884 	/*Fill HW structure */
885 	addr = 0x7200;
886 	/*First 12 statistics - 0x7200 - 0x72B0 */
887 	for (i = 0; i < 12; i++) {
888 		stats_vector[i] = bdx_read_l2stat(priv, addr);
889 		addr += 0x10;
890 	}
891 	BDX_ASSERT(addr != 0x72C0);
892 	/* 0x72C0-0x72E0 RSRV */
893 	addr = 0x72F0;
894 	for (; i < 16; i++) {
895 		stats_vector[i] = bdx_read_l2stat(priv, addr);
896 		addr += 0x10;
897 	}
898 	BDX_ASSERT(addr != 0x7330);
899 	/* 0x7330-0x7360 RSRV */
900 	addr = 0x7370;
901 	for (; i < 19; i++) {
902 		stats_vector[i] = bdx_read_l2stat(priv, addr);
903 		addr += 0x10;
904 	}
905 	BDX_ASSERT(addr != 0x73A0);
906 	/* 0x73A0-0x73B0 RSRV */
907 	addr = 0x73C0;
908 	for (; i < 23; i++) {
909 		stats_vector[i] = bdx_read_l2stat(priv, addr);
910 		addr += 0x10;
911 	}
912 	BDX_ASSERT(addr != 0x7400);
913 	BDX_ASSERT((sizeof(struct bdx_stats) / sizeof(u64)) != i);
914 }
915 
916 static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len,
917 		       u16 rxd_vlan);
918 static void print_rxfd(struct rxf_desc *rxfd);
919 
920 /*************************************************************************
921  *     Rx DB                                                             *
922  *************************************************************************/
923 
924 static void bdx_rxdb_destroy(struct rxdb *db)
925 {
926 	vfree(db);
927 }
928 
929 static struct rxdb *bdx_rxdb_create(int nelem)
930 {
931 	struct rxdb *db;
932 	int i;
933 
934 	db = vmalloc(sizeof(struct rxdb)
935 		     + (nelem * sizeof(int))
936 		     + (nelem * sizeof(struct rx_map)));
937 	if (likely(db != NULL)) {
938 		db->stack = (int *)(db + 1);
939 		db->elems = (void *)(db->stack + nelem);
940 		db->nelem = nelem;
941 		db->top = nelem;
942 		for (i = 0; i < nelem; i++)
943 			db->stack[i] = nelem - i - 1;	/* to make first allocs
944 							   close to db struct*/
945 	}
946 
947 	return db;
948 }
949 
950 static inline int bdx_rxdb_alloc_elem(struct rxdb *db)
951 {
952 	BDX_ASSERT(db->top <= 0);
953 	return db->stack[--(db->top)];
954 }
955 
956 static inline void *bdx_rxdb_addr_elem(struct rxdb *db, int n)
957 {
958 	BDX_ASSERT((n < 0) || (n >= db->nelem));
959 	return db->elems + n;
960 }
961 
962 static inline int bdx_rxdb_available(struct rxdb *db)
963 {
964 	return db->top;
965 }
966 
967 static inline void bdx_rxdb_free_elem(struct rxdb *db, int n)
968 {
969 	BDX_ASSERT((n >= db->nelem) || (n < 0));
970 	db->stack[(db->top)++] = n;
971 }
972 
973 /*************************************************************************
974  *     Rx Init                                                           *
975  *************************************************************************/
976 
977 /* bdx_rx_init - initialize RX all related HW and SW resources
978  * @priv - NIC private structure
979  *
980  * Returns 0 on success, negative value on failure
981  *
982  * It creates rxf and rxd fifos, update relevant HW registers, preallocate
983  * skb for rx. It assumes that Rx is desabled in HW
984  * funcs are grouped for better cache usage
985  *
986  * RxD fifo is smaller than RxF fifo by design. Upon high load, RxD will be
987  * filled and packets will be dropped by nic without getting into host or
988  * cousing interrupt. Anyway, in that condition, host has no chance to process
989  * all packets, but dropping in nic is cheaper, since it takes 0 cpu cycles
990  */
991 
992 /* TBD: ensure proper packet size */
993 
994 static int bdx_rx_init(struct bdx_priv *priv)
995 {
996 	ENTER;
997 
998 	if (bdx_fifo_init(priv, &priv->rxd_fifo0.m, priv->rxd_size,
999 			  regRXD_CFG0_0, regRXD_CFG1_0,
1000 			  regRXD_RPTR_0, regRXD_WPTR_0))
1001 		goto err_mem;
1002 	if (bdx_fifo_init(priv, &priv->rxf_fifo0.m, priv->rxf_size,
1003 			  regRXF_CFG0_0, regRXF_CFG1_0,
1004 			  regRXF_RPTR_0, regRXF_WPTR_0))
1005 		goto err_mem;
1006 	priv->rxdb = bdx_rxdb_create(priv->rxf_fifo0.m.memsz /
1007 				     sizeof(struct rxf_desc));
1008 	if (!priv->rxdb)
1009 		goto err_mem;
1010 
1011 	priv->rxf_fifo0.m.pktsz = priv->ndev->mtu + VLAN_ETH_HLEN;
1012 	return 0;
1013 
1014 err_mem:
1015 	netdev_err(priv->ndev, "Rx init failed\n");
1016 	return -ENOMEM;
1017 }
1018 
1019 /* bdx_rx_free_skbs - frees and unmaps all skbs allocated for the fifo
1020  * @priv - NIC private structure
1021  * @f - RXF fifo
1022  */
1023 static void bdx_rx_free_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
1024 {
1025 	struct rx_map *dm;
1026 	struct rxdb *db = priv->rxdb;
1027 	u16 i;
1028 
1029 	ENTER;
1030 	DBG("total=%d free=%d busy=%d\n", db->nelem, bdx_rxdb_available(db),
1031 	    db->nelem - bdx_rxdb_available(db));
1032 	while (bdx_rxdb_available(db) > 0) {
1033 		i = bdx_rxdb_alloc_elem(db);
1034 		dm = bdx_rxdb_addr_elem(db, i);
1035 		dm->dma = 0;
1036 	}
1037 	for (i = 0; i < db->nelem; i++) {
1038 		dm = bdx_rxdb_addr_elem(db, i);
1039 		if (dm->dma) {
1040 			pci_unmap_single(priv->pdev,
1041 					 dm->dma, f->m.pktsz,
1042 					 PCI_DMA_FROMDEVICE);
1043 			dev_kfree_skb(dm->skb);
1044 		}
1045 	}
1046 }
1047 
1048 /* bdx_rx_free - release all Rx resources
1049  * @priv - NIC private structure
1050  * It assumes that Rx is desabled in HW
1051  */
1052 static void bdx_rx_free(struct bdx_priv *priv)
1053 {
1054 	ENTER;
1055 	if (priv->rxdb) {
1056 		bdx_rx_free_skbs(priv, &priv->rxf_fifo0);
1057 		bdx_rxdb_destroy(priv->rxdb);
1058 		priv->rxdb = NULL;
1059 	}
1060 	bdx_fifo_free(priv, &priv->rxf_fifo0.m);
1061 	bdx_fifo_free(priv, &priv->rxd_fifo0.m);
1062 
1063 	RET();
1064 }
1065 
1066 /*************************************************************************
1067  *     Rx Engine                                                         *
1068  *************************************************************************/
1069 
1070 /* bdx_rx_alloc_skbs - fill rxf fifo with new skbs
1071  * @priv - nic's private structure
1072  * @f - RXF fifo that needs skbs
1073  * It allocates skbs, build rxf descs and push it (rxf descr) into rxf fifo.
1074  * skb's virtual and physical addresses are stored in skb db.
1075  * To calculate free space, func uses cached values of RPTR and WPTR
1076  * When needed, it also updates RPTR and WPTR.
1077  */
1078 
1079 /* TBD: do not update WPTR if no desc were written */
1080 
1081 static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
1082 {
1083 	struct sk_buff *skb;
1084 	struct rxf_desc *rxfd;
1085 	struct rx_map *dm;
1086 	int dno, delta, idx;
1087 	struct rxdb *db = priv->rxdb;
1088 
1089 	ENTER;
1090 	dno = bdx_rxdb_available(db) - 1;
1091 	while (dno > 0) {
1092 		skb = dev_alloc_skb(f->m.pktsz + NET_IP_ALIGN);
1093 		if (!skb) {
1094 			pr_err("NO MEM: dev_alloc_skb failed\n");
1095 			break;
1096 		}
1097 		skb->dev = priv->ndev;
1098 		skb_reserve(skb, NET_IP_ALIGN);
1099 
1100 		idx = bdx_rxdb_alloc_elem(db);
1101 		dm = bdx_rxdb_addr_elem(db, idx);
1102 		dm->dma = pci_map_single(priv->pdev,
1103 					 skb->data, f->m.pktsz,
1104 					 PCI_DMA_FROMDEVICE);
1105 		dm->skb = skb;
1106 		rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr);
1107 		rxfd->info = CPU_CHIP_SWAP32(0x10003);	/* INFO=1 BC=3 */
1108 		rxfd->va_lo = idx;
1109 		rxfd->pa_lo = CPU_CHIP_SWAP32(L32_64(dm->dma));
1110 		rxfd->pa_hi = CPU_CHIP_SWAP32(H32_64(dm->dma));
1111 		rxfd->len = CPU_CHIP_SWAP32(f->m.pktsz);
1112 		print_rxfd(rxfd);
1113 
1114 		f->m.wptr += sizeof(struct rxf_desc);
1115 		delta = f->m.wptr - f->m.memsz;
1116 		if (unlikely(delta >= 0)) {
1117 			f->m.wptr = delta;
1118 			if (delta > 0) {
1119 				memcpy(f->m.va, f->m.va + f->m.memsz, delta);
1120 				DBG("wrapped descriptor\n");
1121 			}
1122 		}
1123 		dno--;
1124 	}
1125 	/*TBD: to do - delayed rxf wptr like in txd */
1126 	WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1127 	RET();
1128 }
1129 
1130 static inline void
1131 NETIF_RX_MUX(struct bdx_priv *priv, u32 rxd_val1, u16 rxd_vlan,
1132 	     struct sk_buff *skb)
1133 {
1134 	ENTER;
1135 	DBG("rxdd->flags.bits.vtag=%d\n", GET_RXD_VTAG(rxd_val1));
1136 	if (GET_RXD_VTAG(rxd_val1)) {
1137 		DBG("%s: vlan rcv vlan '%x' vtag '%x'\n",
1138 		    priv->ndev->name,
1139 		    GET_RXD_VLAN_ID(rxd_vlan),
1140 		    GET_RXD_VTAG(rxd_val1));
1141 		__vlan_hwaccel_put_tag(skb, GET_RXD_VLAN_TCI(rxd_vlan));
1142 	}
1143 	netif_receive_skb(skb);
1144 }
1145 
1146 static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd)
1147 {
1148 	struct rxf_desc *rxfd;
1149 	struct rx_map *dm;
1150 	struct rxf_fifo *f;
1151 	struct rxdb *db;
1152 	struct sk_buff *skb;
1153 	int delta;
1154 
1155 	ENTER;
1156 	DBG("priv=%p rxdd=%p\n", priv, rxdd);
1157 	f = &priv->rxf_fifo0;
1158 	db = priv->rxdb;
1159 	DBG("db=%p f=%p\n", db, f);
1160 	dm = bdx_rxdb_addr_elem(db, rxdd->va_lo);
1161 	DBG("dm=%p\n", dm);
1162 	skb = dm->skb;
1163 	rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr);
1164 	rxfd->info = CPU_CHIP_SWAP32(0x10003);	/* INFO=1 BC=3 */
1165 	rxfd->va_lo = rxdd->va_lo;
1166 	rxfd->pa_lo = CPU_CHIP_SWAP32(L32_64(dm->dma));
1167 	rxfd->pa_hi = CPU_CHIP_SWAP32(H32_64(dm->dma));
1168 	rxfd->len = CPU_CHIP_SWAP32(f->m.pktsz);
1169 	print_rxfd(rxfd);
1170 
1171 	f->m.wptr += sizeof(struct rxf_desc);
1172 	delta = f->m.wptr - f->m.memsz;
1173 	if (unlikely(delta >= 0)) {
1174 		f->m.wptr = delta;
1175 		if (delta > 0) {
1176 			memcpy(f->m.va, f->m.va + f->m.memsz, delta);
1177 			DBG("wrapped descriptor\n");
1178 		}
1179 	}
1180 	RET();
1181 }
1182 
1183 /* bdx_rx_receive - receives full packets from RXD fifo and pass them to OS
1184  * NOTE: a special treatment is given to non-continuous descriptors
1185  * that start near the end, wraps around and continue at the beginning. a second
1186  * part is copied right after the first, and then descriptor is interpreted as
1187  * normal. fifo has an extra space to allow such operations
1188  * @priv - nic's private structure
1189  * @f - RXF fifo that needs skbs
1190  */
1191 
1192 /* TBD: replace memcpy func call by explicite inline asm */
1193 
1194 static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
1195 {
1196 	struct net_device *ndev = priv->ndev;
1197 	struct sk_buff *skb, *skb2;
1198 	struct rxd_desc *rxdd;
1199 	struct rx_map *dm;
1200 	struct rxf_fifo *rxf_fifo;
1201 	int tmp_len, size;
1202 	int done = 0;
1203 	int max_done = BDX_MAX_RX_DONE;
1204 	struct rxdb *db = NULL;
1205 	/* Unmarshalled descriptor - copy of descriptor in host order */
1206 	u32 rxd_val1;
1207 	u16 len;
1208 	u16 rxd_vlan;
1209 
1210 	ENTER;
1211 	max_done = budget;
1212 
1213 	f->m.wptr = READ_REG(priv, f->m.reg_WPTR) & TXF_WPTR_WR_PTR;
1214 
1215 	size = f->m.wptr - f->m.rptr;
1216 	if (size < 0)
1217 		size = f->m.memsz + size;	/* size is negative :-) */
1218 
1219 	while (size > 0) {
1220 
1221 		rxdd = (struct rxd_desc *)(f->m.va + f->m.rptr);
1222 		rxd_val1 = CPU_CHIP_SWAP32(rxdd->rxd_val1);
1223 
1224 		len = CPU_CHIP_SWAP16(rxdd->len);
1225 
1226 		rxd_vlan = CPU_CHIP_SWAP16(rxdd->rxd_vlan);
1227 
1228 		print_rxdd(rxdd, rxd_val1, len, rxd_vlan);
1229 
1230 		tmp_len = GET_RXD_BC(rxd_val1) << 3;
1231 		BDX_ASSERT(tmp_len <= 0);
1232 		size -= tmp_len;
1233 		if (size < 0)	/* test for partially arrived descriptor */
1234 			break;
1235 
1236 		f->m.rptr += tmp_len;
1237 
1238 		tmp_len = f->m.rptr - f->m.memsz;
1239 		if (unlikely(tmp_len >= 0)) {
1240 			f->m.rptr = tmp_len;
1241 			if (tmp_len > 0) {
1242 				DBG("wrapped desc rptr=%d tmp_len=%d\n",
1243 				    f->m.rptr, tmp_len);
1244 				memcpy(f->m.va + f->m.memsz, f->m.va, tmp_len);
1245 			}
1246 		}
1247 
1248 		if (unlikely(GET_RXD_ERR(rxd_val1))) {
1249 			DBG("rxd_err = 0x%x\n", GET_RXD_ERR(rxd_val1));
1250 			ndev->stats.rx_errors++;
1251 			bdx_recycle_skb(priv, rxdd);
1252 			continue;
1253 		}
1254 
1255 		rxf_fifo = &priv->rxf_fifo0;
1256 		db = priv->rxdb;
1257 		dm = bdx_rxdb_addr_elem(db, rxdd->va_lo);
1258 		skb = dm->skb;
1259 
1260 		if (len < BDX_COPYBREAK &&
1261 		    (skb2 = dev_alloc_skb(len + NET_IP_ALIGN))) {
1262 			skb_reserve(skb2, NET_IP_ALIGN);
1263 			/*skb_put(skb2, len); */
1264 			pci_dma_sync_single_for_cpu(priv->pdev,
1265 						    dm->dma, rxf_fifo->m.pktsz,
1266 						    PCI_DMA_FROMDEVICE);
1267 			memcpy(skb2->data, skb->data, len);
1268 			bdx_recycle_skb(priv, rxdd);
1269 			skb = skb2;
1270 		} else {
1271 			pci_unmap_single(priv->pdev,
1272 					 dm->dma, rxf_fifo->m.pktsz,
1273 					 PCI_DMA_FROMDEVICE);
1274 			bdx_rxdb_free_elem(db, rxdd->va_lo);
1275 		}
1276 
1277 		ndev->stats.rx_bytes += len;
1278 
1279 		skb_put(skb, len);
1280 		skb->protocol = eth_type_trans(skb, ndev);
1281 
1282 		/* Non-IP packets aren't checksum-offloaded */
1283 		if (GET_RXD_PKT_ID(rxd_val1) == 0)
1284 			skb_checksum_none_assert(skb);
1285 		else
1286 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1287 
1288 		NETIF_RX_MUX(priv, rxd_val1, rxd_vlan, skb);
1289 
1290 		if (++done >= max_done)
1291 			break;
1292 	}
1293 
1294 	ndev->stats.rx_packets += done;
1295 
1296 	/* FIXME: do smth to minimize pci accesses    */
1297 	WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR);
1298 
1299 	bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
1300 
1301 	RET(done);
1302 }
1303 
1304 /*************************************************************************
1305  * Debug / Temprorary Code                                               *
1306  *************************************************************************/
1307 static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len,
1308 		       u16 rxd_vlan)
1309 {
1310 	DBG("ERROR: rxdd bc %d rxfq %d to %d type %d err %d rxp %d pkt_id %d vtag %d len %d vlan_id %d cfi %d prio %d va_lo %d va_hi %d\n",
1311 	    GET_RXD_BC(rxd_val1), GET_RXD_RXFQ(rxd_val1), GET_RXD_TO(rxd_val1),
1312 	    GET_RXD_TYPE(rxd_val1), GET_RXD_ERR(rxd_val1),
1313 	    GET_RXD_RXP(rxd_val1), GET_RXD_PKT_ID(rxd_val1),
1314 	    GET_RXD_VTAG(rxd_val1), len, GET_RXD_VLAN_ID(rxd_vlan),
1315 	    GET_RXD_CFI(rxd_vlan), GET_RXD_PRIO(rxd_vlan), rxdd->va_lo,
1316 	    rxdd->va_hi);
1317 }
1318 
1319 static void print_rxfd(struct rxf_desc *rxfd)
1320 {
1321 	DBG("=== RxF desc CHIP ORDER/ENDIANESS =============\n"
1322 	    "info 0x%x va_lo %u pa_lo 0x%x pa_hi 0x%x len 0x%x\n",
1323 	    rxfd->info, rxfd->va_lo, rxfd->pa_lo, rxfd->pa_hi, rxfd->len);
1324 }
1325 
1326 /*
1327  * TX HW/SW interaction overview
1328  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1329  * There are 2 types of TX communication channels between driver and NIC.
1330  * 1) TX Free Fifo - TXF - holds ack descriptors for sent packets
1331  * 2) TX Data Fifo - TXD - holds descriptors of full buffers.
1332  *
1333  * Currently NIC supports TSO, checksuming and gather DMA
1334  * UFO and IP fragmentation is on the way
1335  *
1336  * RX SW Data Structures
1337  * ~~~~~~~~~~~~~~~~~~~~~
1338  * txdb - used to keep track of all skbs owned by SW and their dma addresses.
1339  * For TX case, ownership lasts from geting packet via hard_xmit and until HW
1340  * acknowledges sent by TXF descriptors.
1341  * Implemented as cyclic buffer.
1342  * fifo - keeps info about fifo's size and location, relevant HW registers,
1343  * usage and skb db. Each RXD and RXF Fifo has its own fifo structure.
1344  * Implemented as simple struct.
1345  *
1346  * TX SW Execution Flow
1347  * ~~~~~~~~~~~~~~~~~~~~
1348  * OS calls driver's hard_xmit method with packet to sent.
1349  * Driver creates DMA mappings, builds TXD descriptors and kicks HW
1350  * by updating TXD WPTR.
1351  * When packet is sent, HW write us TXF descriptor and SW frees original skb.
1352  * To prevent TXD fifo overflow without reading HW registers every time,
1353  * SW deploys "tx level" technique.
1354  * Upon strart up, tx level is initialized to TXD fifo length.
1355  * For every sent packet, SW gets its TXD descriptor sizei
1356  * (from precalculated array) and substructs it from tx level.
1357  * The size is also stored in txdb. When TXF ack arrives, SW fetch size of
1358  * original TXD descriptor from txdb and adds it to tx level.
1359  * When Tx level drops under some predefined treshhold, the driver
1360  * stops the TX queue. When TX level rises above that level,
1361  * the tx queue is enabled again.
1362  *
1363  * This technique avoids eccessive reading of RPTR and WPTR registers.
1364  * As our benchmarks shows, it adds 1.5 Gbit/sec to NIS's throuput.
1365  */
1366 
1367 /*************************************************************************
1368  *     Tx DB                                                             *
1369  *************************************************************************/
1370 static inline int bdx_tx_db_size(struct txdb *db)
1371 {
1372 	int taken = db->wptr - db->rptr;
1373 	if (taken < 0)
1374 		taken = db->size + 1 + taken;	/* (size + 1) equals memsz */
1375 
1376 	return db->size - taken;
1377 }
1378 
1379 /* __bdx_tx_ptr_next - helper function, increment read/write pointer + wrap
1380  * @d   - tx data base
1381  * @ptr - read or write pointer
1382  */
1383 static inline void __bdx_tx_db_ptr_next(struct txdb *db, struct tx_map **pptr)
1384 {
1385 	BDX_ASSERT(db == NULL || pptr == NULL);	/* sanity */
1386 
1387 	BDX_ASSERT(*pptr != db->rptr &&	/* expect either read */
1388 		   *pptr != db->wptr);	/* or write pointer */
1389 
1390 	BDX_ASSERT(*pptr < db->start ||	/* pointer has to be */
1391 		   *pptr >= db->end);	/* in range */
1392 
1393 	++*pptr;
1394 	if (unlikely(*pptr == db->end))
1395 		*pptr = db->start;
1396 }
1397 
1398 /* bdx_tx_db_inc_rptr - increment read pointer
1399  * @d   - tx data base
1400  */
1401 static inline void bdx_tx_db_inc_rptr(struct txdb *db)
1402 {
1403 	BDX_ASSERT(db->rptr == db->wptr);	/* can't read from empty db */
1404 	__bdx_tx_db_ptr_next(db, &db->rptr);
1405 }
1406 
1407 /* bdx_tx_db_inc_rptr - increment write pointer
1408  * @d   - tx data base
1409  */
1410 static inline void bdx_tx_db_inc_wptr(struct txdb *db)
1411 {
1412 	__bdx_tx_db_ptr_next(db, &db->wptr);
1413 	BDX_ASSERT(db->rptr == db->wptr);	/* we can not get empty db as
1414 						   a result of write */
1415 }
1416 
1417 /* bdx_tx_db_init - creates and initializes tx db
1418  * @d       - tx data base
1419  * @sz_type - size of tx fifo
1420  * Returns 0 on success, error code otherwise
1421  */
1422 static int bdx_tx_db_init(struct txdb *d, int sz_type)
1423 {
1424 	int memsz = FIFO_SIZE * (1 << (sz_type + 1));
1425 
1426 	d->start = vmalloc(memsz);
1427 	if (!d->start)
1428 		return -ENOMEM;
1429 
1430 	/*
1431 	 * In order to differentiate between db is empty and db is full
1432 	 * states at least one element should always be empty in order to
1433 	 * avoid rptr == wptr which means db is empty
1434 	 */
1435 	d->size = memsz / sizeof(struct tx_map) - 1;
1436 	d->end = d->start + d->size + 1;	/* just after last element */
1437 
1438 	/* all dbs are created equally empty */
1439 	d->rptr = d->start;
1440 	d->wptr = d->start;
1441 
1442 	return 0;
1443 }
1444 
1445 /* bdx_tx_db_close - closes tx db and frees all memory
1446  * @d - tx data base
1447  */
1448 static void bdx_tx_db_close(struct txdb *d)
1449 {
1450 	BDX_ASSERT(d == NULL);
1451 
1452 	vfree(d->start);
1453 	d->start = NULL;
1454 }
1455 
1456 /*************************************************************************
1457  *     Tx Engine                                                         *
1458  *************************************************************************/
1459 
1460 /* sizes of tx desc (including padding if needed) as function
1461  * of skb's frag number */
1462 static struct {
1463 	u16 bytes;
1464 	u16 qwords;		/* qword = 64 bit */
1465 } txd_sizes[MAX_SKB_FRAGS + 1];
1466 
1467 /* txdb_map_skb - creates and stores dma mappings for skb's data blocks
1468  * @priv - NIC private structure
1469  * @skb  - socket buffer to map
1470  *
1471  * It makes dma mappings for skb's data blocks and writes them to PBL of
1472  * new tx descriptor. It also stores them in the tx db, so they could be
1473  * unmaped after data was sent. It is reponsibility of a caller to make
1474  * sure that there is enough space in the tx db. Last element holds pointer
1475  * to skb itself and marked with zero length
1476  */
1477 static inline void
1478 bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb,
1479 	       struct txd_desc *txdd)
1480 {
1481 	struct txdb *db = &priv->txdb;
1482 	struct pbl *pbl = &txdd->pbl[0];
1483 	int nr_frags = skb_shinfo(skb)->nr_frags;
1484 	int i;
1485 
1486 	db->wptr->len = skb_headlen(skb);
1487 	db->wptr->addr.dma = pci_map_single(priv->pdev, skb->data,
1488 					    db->wptr->len, PCI_DMA_TODEVICE);
1489 	pbl->len = CPU_CHIP_SWAP32(db->wptr->len);
1490 	pbl->pa_lo = CPU_CHIP_SWAP32(L32_64(db->wptr->addr.dma));
1491 	pbl->pa_hi = CPU_CHIP_SWAP32(H32_64(db->wptr->addr.dma));
1492 	DBG("=== pbl   len: 0x%x ================\n", pbl->len);
1493 	DBG("=== pbl pa_lo: 0x%x ================\n", pbl->pa_lo);
1494 	DBG("=== pbl pa_hi: 0x%x ================\n", pbl->pa_hi);
1495 	bdx_tx_db_inc_wptr(db);
1496 
1497 	for (i = 0; i < nr_frags; i++) {
1498 		const struct skb_frag_struct *frag;
1499 
1500 		frag = &skb_shinfo(skb)->frags[i];
1501 		db->wptr->len = skb_frag_size(frag);
1502 		db->wptr->addr.dma = skb_frag_dma_map(&priv->pdev->dev, frag,
1503 						      0, skb_frag_size(frag),
1504 						      DMA_TO_DEVICE);
1505 
1506 		pbl++;
1507 		pbl->len = CPU_CHIP_SWAP32(db->wptr->len);
1508 		pbl->pa_lo = CPU_CHIP_SWAP32(L32_64(db->wptr->addr.dma));
1509 		pbl->pa_hi = CPU_CHIP_SWAP32(H32_64(db->wptr->addr.dma));
1510 		bdx_tx_db_inc_wptr(db);
1511 	}
1512 
1513 	/* add skb clean up info. */
1514 	db->wptr->len = -txd_sizes[nr_frags].bytes;
1515 	db->wptr->addr.skb = skb;
1516 	bdx_tx_db_inc_wptr(db);
1517 }
1518 
1519 /* init_txd_sizes - precalculate sizes of descriptors for skbs up to 16 frags
1520  * number of frags is used as index to fetch correct descriptors size,
1521  * instead of calculating it each time */
1522 static void __init init_txd_sizes(void)
1523 {
1524 	int i, lwords;
1525 
1526 	/* 7 - is number of lwords in txd with one phys buffer
1527 	 * 3 - is number of lwords used for every additional phys buffer */
1528 	for (i = 0; i < MAX_SKB_FRAGS + 1; i++) {
1529 		lwords = 7 + (i * 3);
1530 		if (lwords & 1)
1531 			lwords++;	/* pad it with 1 lword */
1532 		txd_sizes[i].qwords = lwords >> 1;
1533 		txd_sizes[i].bytes = lwords << 2;
1534 	}
1535 }
1536 
1537 /* bdx_tx_init - initialize all Tx related stuff.
1538  * Namely, TXD and TXF fifos, database etc */
1539 static int bdx_tx_init(struct bdx_priv *priv)
1540 {
1541 	if (bdx_fifo_init(priv, &priv->txd_fifo0.m, priv->txd_size,
1542 			  regTXD_CFG0_0,
1543 			  regTXD_CFG1_0, regTXD_RPTR_0, regTXD_WPTR_0))
1544 		goto err_mem;
1545 	if (bdx_fifo_init(priv, &priv->txf_fifo0.m, priv->txf_size,
1546 			  regTXF_CFG0_0,
1547 			  regTXF_CFG1_0, regTXF_RPTR_0, regTXF_WPTR_0))
1548 		goto err_mem;
1549 
1550 	/* The TX db has to keep mappings for all packets sent (on TxD)
1551 	 * and not yet reclaimed (on TxF) */
1552 	if (bdx_tx_db_init(&priv->txdb, max(priv->txd_size, priv->txf_size)))
1553 		goto err_mem;
1554 
1555 	priv->tx_level = BDX_MAX_TX_LEVEL;
1556 #ifdef BDX_DELAY_WPTR
1557 	priv->tx_update_mark = priv->tx_level - 1024;
1558 #endif
1559 	return 0;
1560 
1561 err_mem:
1562 	netdev_err(priv->ndev, "Tx init failed\n");
1563 	return -ENOMEM;
1564 }
1565 
1566 /*
1567  * bdx_tx_space - calculates available space in TX fifo
1568  * @priv - NIC private structure
1569  * Returns available space in TX fifo in bytes
1570  */
1571 static inline int bdx_tx_space(struct bdx_priv *priv)
1572 {
1573 	struct txd_fifo *f = &priv->txd_fifo0;
1574 	int fsize;
1575 
1576 	f->m.rptr = READ_REG(priv, f->m.reg_RPTR) & TXF_WPTR_WR_PTR;
1577 	fsize = f->m.rptr - f->m.wptr;
1578 	if (fsize <= 0)
1579 		fsize = f->m.memsz + fsize;
1580 	return fsize;
1581 }
1582 
1583 /* bdx_tx_transmit - send packet to NIC
1584  * @skb - packet to send
1585  * ndev - network device assigned to NIC
1586  * Return codes:
1587  * o NETDEV_TX_OK everything ok.
1588  * o NETDEV_TX_BUSY Cannot transmit packet, try later
1589  *   Usually a bug, means queue start/stop flow control is broken in
1590  *   the driver. Note: the driver must NOT put the skb in its DMA ring.
1591  * o NETDEV_TX_LOCKED Locking failed, please retry quickly.
1592  */
1593 static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
1594 				   struct net_device *ndev)
1595 {
1596 	struct bdx_priv *priv = netdev_priv(ndev);
1597 	struct txd_fifo *f = &priv->txd_fifo0;
1598 	int txd_checksum = 7;	/* full checksum */
1599 	int txd_lgsnd = 0;
1600 	int txd_vlan_id = 0;
1601 	int txd_vtag = 0;
1602 	int txd_mss = 0;
1603 
1604 	int nr_frags = skb_shinfo(skb)->nr_frags;
1605 	struct txd_desc *txdd;
1606 	int len;
1607 	unsigned long flags;
1608 
1609 	ENTER;
1610 	local_irq_save(flags);
1611 	if (!spin_trylock(&priv->tx_lock)) {
1612 		local_irq_restore(flags);
1613 		DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n",
1614 		    BDX_DRV_NAME, ndev->name);
1615 		return NETDEV_TX_LOCKED;
1616 	}
1617 
1618 	/* build tx descriptor */
1619 	BDX_ASSERT(f->m.wptr >= f->m.memsz);	/* started with valid wptr */
1620 	txdd = (struct txd_desc *)(f->m.va + f->m.wptr);
1621 	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL))
1622 		txd_checksum = 0;
1623 
1624 	if (skb_shinfo(skb)->gso_size) {
1625 		txd_mss = skb_shinfo(skb)->gso_size;
1626 		txd_lgsnd = 1;
1627 		DBG("skb %p skb len %d gso size = %d\n", skb, skb->len,
1628 		    txd_mss);
1629 	}
1630 
1631 	if (vlan_tx_tag_present(skb)) {
1632 		/*Cut VLAN ID to 12 bits */
1633 		txd_vlan_id = vlan_tx_tag_get(skb) & BITS_MASK(12);
1634 		txd_vtag = 1;
1635 	}
1636 
1637 	txdd->length = CPU_CHIP_SWAP16(skb->len);
1638 	txdd->mss = CPU_CHIP_SWAP16(txd_mss);
1639 	txdd->txd_val1 =
1640 	    CPU_CHIP_SWAP32(TXD_W1_VAL
1641 			    (txd_sizes[nr_frags].qwords, txd_checksum, txd_vtag,
1642 			     txd_lgsnd, txd_vlan_id));
1643 	DBG("=== TxD desc =====================\n");
1644 	DBG("=== w1: 0x%x ================\n", txdd->txd_val1);
1645 	DBG("=== w2: mss 0x%x len 0x%x\n", txdd->mss, txdd->length);
1646 
1647 	bdx_tx_map_skb(priv, skb, txdd);
1648 
1649 	/* increment TXD write pointer. In case of
1650 	   fifo wrapping copy reminder of the descriptor
1651 	   to the beginning */
1652 	f->m.wptr += txd_sizes[nr_frags].bytes;
1653 	len = f->m.wptr - f->m.memsz;
1654 	if (unlikely(len >= 0)) {
1655 		f->m.wptr = len;
1656 		if (len > 0) {
1657 			BDX_ASSERT(len > f->m.memsz);
1658 			memcpy(f->m.va, f->m.va + f->m.memsz, len);
1659 		}
1660 	}
1661 	BDX_ASSERT(f->m.wptr >= f->m.memsz);	/* finished with valid wptr */
1662 
1663 	priv->tx_level -= txd_sizes[nr_frags].bytes;
1664 	BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL);
1665 #ifdef BDX_DELAY_WPTR
1666 	if (priv->tx_level > priv->tx_update_mark) {
1667 		/* Force memory writes to complete before letting h/w
1668 		   know there are new descriptors to fetch.
1669 		   (might be needed on platforms like IA64)
1670 		   wmb(); */
1671 		WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1672 	} else {
1673 		if (priv->tx_noupd++ > BDX_NO_UPD_PACKETS) {
1674 			priv->tx_noupd = 0;
1675 			WRITE_REG(priv, f->m.reg_WPTR,
1676 				  f->m.wptr & TXF_WPTR_WR_PTR);
1677 		}
1678 	}
1679 #else
1680 	/* Force memory writes to complete before letting h/w
1681 	   know there are new descriptors to fetch.
1682 	   (might be needed on platforms like IA64)
1683 	   wmb(); */
1684 	WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1685 
1686 #endif
1687 #ifdef BDX_LLTX
1688 	ndev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
1689 #endif
1690 	ndev->stats.tx_packets++;
1691 	ndev->stats.tx_bytes += skb->len;
1692 
1693 	if (priv->tx_level < BDX_MIN_TX_LEVEL) {
1694 		DBG("%s: %s: TX Q STOP level %d\n",
1695 		    BDX_DRV_NAME, ndev->name, priv->tx_level);
1696 		netif_stop_queue(ndev);
1697 	}
1698 
1699 	spin_unlock_irqrestore(&priv->tx_lock, flags);
1700 	return NETDEV_TX_OK;
1701 }
1702 
1703 /* bdx_tx_cleanup - clean TXF fifo, run in the context of IRQ.
1704  * @priv - bdx adapter
1705  * It scans TXF fifo for descriptors, frees DMA mappings and reports to OS
1706  * that those packets were sent
1707  */
1708 static void bdx_tx_cleanup(struct bdx_priv *priv)
1709 {
1710 	struct txf_fifo *f = &priv->txf_fifo0;
1711 	struct txdb *db = &priv->txdb;
1712 	int tx_level = 0;
1713 
1714 	ENTER;
1715 	f->m.wptr = READ_REG(priv, f->m.reg_WPTR) & TXF_WPTR_MASK;
1716 	BDX_ASSERT(f->m.rptr >= f->m.memsz);	/* started with valid rptr */
1717 
1718 	while (f->m.wptr != f->m.rptr) {
1719 		f->m.rptr += BDX_TXF_DESC_SZ;
1720 		f->m.rptr &= f->m.size_mask;
1721 
1722 		/* unmap all the fragments */
1723 		/* first has to come tx_maps containing dma */
1724 		BDX_ASSERT(db->rptr->len == 0);
1725 		do {
1726 			BDX_ASSERT(db->rptr->addr.dma == 0);
1727 			pci_unmap_page(priv->pdev, db->rptr->addr.dma,
1728 				       db->rptr->len, PCI_DMA_TODEVICE);
1729 			bdx_tx_db_inc_rptr(db);
1730 		} while (db->rptr->len > 0);
1731 		tx_level -= db->rptr->len;	/* '-' koz len is negative */
1732 
1733 		/* now should come skb pointer - free it */
1734 		dev_kfree_skb_irq(db->rptr->addr.skb);
1735 		bdx_tx_db_inc_rptr(db);
1736 	}
1737 
1738 	/* let h/w know which TXF descriptors were cleaned */
1739 	BDX_ASSERT((f->m.wptr & TXF_WPTR_WR_PTR) >= f->m.memsz);
1740 	WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR);
1741 
1742 	/* We reclaimed resources, so in case the Q is stopped by xmit callback,
1743 	 * we resume the transmition and use tx_lock to synchronize with xmit.*/
1744 	spin_lock(&priv->tx_lock);
1745 	priv->tx_level += tx_level;
1746 	BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL);
1747 #ifdef BDX_DELAY_WPTR
1748 	if (priv->tx_noupd) {
1749 		priv->tx_noupd = 0;
1750 		WRITE_REG(priv, priv->txd_fifo0.m.reg_WPTR,
1751 			  priv->txd_fifo0.m.wptr & TXF_WPTR_WR_PTR);
1752 	}
1753 #endif
1754 
1755 	if (unlikely(netif_queue_stopped(priv->ndev) &&
1756 		     netif_carrier_ok(priv->ndev) &&
1757 		     (priv->tx_level >= BDX_MIN_TX_LEVEL))) {
1758 		DBG("%s: %s: TX Q WAKE level %d\n",
1759 		    BDX_DRV_NAME, priv->ndev->name, priv->tx_level);
1760 		netif_wake_queue(priv->ndev);
1761 	}
1762 	spin_unlock(&priv->tx_lock);
1763 }
1764 
1765 /* bdx_tx_free_skbs - frees all skbs from TXD fifo.
1766  * It gets called when OS stops this dev, eg upon "ifconfig down" or rmmod
1767  */
1768 static void bdx_tx_free_skbs(struct bdx_priv *priv)
1769 {
1770 	struct txdb *db = &priv->txdb;
1771 
1772 	ENTER;
1773 	while (db->rptr != db->wptr) {
1774 		if (likely(db->rptr->len))
1775 			pci_unmap_page(priv->pdev, db->rptr->addr.dma,
1776 				       db->rptr->len, PCI_DMA_TODEVICE);
1777 		else
1778 			dev_kfree_skb(db->rptr->addr.skb);
1779 		bdx_tx_db_inc_rptr(db);
1780 	}
1781 	RET();
1782 }
1783 
1784 /* bdx_tx_free - frees all Tx resources */
1785 static void bdx_tx_free(struct bdx_priv *priv)
1786 {
1787 	ENTER;
1788 	bdx_tx_free_skbs(priv);
1789 	bdx_fifo_free(priv, &priv->txd_fifo0.m);
1790 	bdx_fifo_free(priv, &priv->txf_fifo0.m);
1791 	bdx_tx_db_close(&priv->txdb);
1792 }
1793 
1794 /* bdx_tx_push_desc - push descriptor to TxD fifo
1795  * @priv - NIC private structure
1796  * @data - desc's data
1797  * @size - desc's size
1798  *
1799  * Pushes desc to TxD fifo and overlaps it if needed.
1800  * NOTE: this func does not check for available space. this is responsibility
1801  *    of the caller. Neither does it check that data size is smaller than
1802  *    fifo size.
1803  */
1804 static void bdx_tx_push_desc(struct bdx_priv *priv, void *data, int size)
1805 {
1806 	struct txd_fifo *f = &priv->txd_fifo0;
1807 	int i = f->m.memsz - f->m.wptr;
1808 
1809 	if (size == 0)
1810 		return;
1811 
1812 	if (i > size) {
1813 		memcpy(f->m.va + f->m.wptr, data, size);
1814 		f->m.wptr += size;
1815 	} else {
1816 		memcpy(f->m.va + f->m.wptr, data, i);
1817 		f->m.wptr = size - i;
1818 		memcpy(f->m.va, data + i, f->m.wptr);
1819 	}
1820 	WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1821 }
1822 
1823 /* bdx_tx_push_desc_safe - push descriptor to TxD fifo in a safe way
1824  * @priv - NIC private structure
1825  * @data - desc's data
1826  * @size - desc's size
1827  *
1828  * NOTE: this func does check for available space and, if necessary, waits for
1829  *   NIC to read existing data before writing new one.
1830  */
1831 static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size)
1832 {
1833 	int timer = 0;
1834 	ENTER;
1835 
1836 	while (size > 0) {
1837 		/* we substruct 8 because when fifo is full rptr == wptr
1838 		   which also means that fifo is empty, we can understand
1839 		   the difference, but could hw do the same ??? :) */
1840 		int avail = bdx_tx_space(priv) - 8;
1841 		if (avail <= 0) {
1842 			if (timer++ > 300) {	/* prevent endless loop */
1843 				DBG("timeout while writing desc to TxD fifo\n");
1844 				break;
1845 			}
1846 			udelay(50);	/* give hw a chance to clean fifo */
1847 			continue;
1848 		}
1849 		avail = min(avail, size);
1850 		DBG("about to push  %d bytes starting %p size %d\n", avail,
1851 		    data, size);
1852 		bdx_tx_push_desc(priv, data, avail);
1853 		size -= avail;
1854 		data += avail;
1855 	}
1856 	RET();
1857 }
1858 
1859 static const struct net_device_ops bdx_netdev_ops = {
1860 	.ndo_open		= bdx_open,
1861 	.ndo_stop		= bdx_close,
1862 	.ndo_start_xmit		= bdx_tx_transmit,
1863 	.ndo_validate_addr	= eth_validate_addr,
1864 	.ndo_do_ioctl		= bdx_ioctl,
1865 	.ndo_set_rx_mode	= bdx_setmulti,
1866 	.ndo_change_mtu		= bdx_change_mtu,
1867 	.ndo_set_mac_address	= bdx_set_mac,
1868 	.ndo_vlan_rx_add_vid	= bdx_vlan_rx_add_vid,
1869 	.ndo_vlan_rx_kill_vid	= bdx_vlan_rx_kill_vid,
1870 };
1871 
1872 /**
1873  * bdx_probe - Device Initialization Routine
1874  * @pdev: PCI device information struct
1875  * @ent: entry in bdx_pci_tbl
1876  *
1877  * Returns 0 on success, negative on failure
1878  *
1879  * bdx_probe initializes an adapter identified by a pci_dev structure.
1880  * The OS initialization, configuring of the adapter private structure,
1881  * and a hardware reset occur.
1882  *
1883  * functions and their order used as explained in
1884  * /usr/src/linux/Documentation/DMA-{API,mapping}.txt
1885  *
1886  */
1887 
1888 /* TBD: netif_msg should be checked and implemented. I disable it for now */
1889 static int __devinit
1890 bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1891 {
1892 	struct net_device *ndev;
1893 	struct bdx_priv *priv;
1894 	int err, pci_using_dac, port;
1895 	unsigned long pciaddr;
1896 	u32 regionSize;
1897 	struct pci_nic *nic;
1898 
1899 	ENTER;
1900 
1901 	nic = vmalloc(sizeof(*nic));
1902 	if (!nic)
1903 		RET(-ENOMEM);
1904 
1905     /************** pci *****************/
1906 	err = pci_enable_device(pdev);
1907 	if (err)			/* it triggers interrupt, dunno why. */
1908 		goto err_pci;		/* it's not a problem though */
1909 
1910 	if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) &&
1911 	    !(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) {
1912 		pci_using_dac = 1;
1913 	} else {
1914 		if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) ||
1915 		    (err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) {
1916 			pr_err("No usable DMA configuration, aborting\n");
1917 			goto err_dma;
1918 		}
1919 		pci_using_dac = 0;
1920 	}
1921 
1922 	err = pci_request_regions(pdev, BDX_DRV_NAME);
1923 	if (err)
1924 		goto err_dma;
1925 
1926 	pci_set_master(pdev);
1927 
1928 	pciaddr = pci_resource_start(pdev, 0);
1929 	if (!pciaddr) {
1930 		err = -EIO;
1931 		pr_err("no MMIO resource\n");
1932 		goto err_out_res;
1933 	}
1934 	regionSize = pci_resource_len(pdev, 0);
1935 	if (regionSize < BDX_REGS_SIZE) {
1936 		err = -EIO;
1937 		pr_err("MMIO resource (%x) too small\n", regionSize);
1938 		goto err_out_res;
1939 	}
1940 
1941 	nic->regs = ioremap(pciaddr, regionSize);
1942 	if (!nic->regs) {
1943 		err = -EIO;
1944 		pr_err("ioremap failed\n");
1945 		goto err_out_res;
1946 	}
1947 
1948 	if (pdev->irq < 2) {
1949 		err = -EIO;
1950 		pr_err("invalid irq (%d)\n", pdev->irq);
1951 		goto err_out_iomap;
1952 	}
1953 	pci_set_drvdata(pdev, nic);
1954 
1955 	if (pdev->device == 0x3014)
1956 		nic->port_num = 2;
1957 	else
1958 		nic->port_num = 1;
1959 
1960 	print_hw_id(pdev);
1961 
1962 	bdx_hw_reset_direct(nic->regs);
1963 
1964 	nic->irq_type = IRQ_INTX;
1965 #ifdef BDX_MSI
1966 	if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) {
1967 		err = pci_enable_msi(pdev);
1968 		if (err)
1969 			pr_err("Can't eneble msi. error is %d\n", err);
1970 		else
1971 			nic->irq_type = IRQ_MSI;
1972 	} else
1973 		DBG("HW does not support MSI\n");
1974 #endif
1975 
1976     /************** netdev **************/
1977 	for (port = 0; port < nic->port_num; port++) {
1978 		ndev = alloc_etherdev(sizeof(struct bdx_priv));
1979 		if (!ndev) {
1980 			err = -ENOMEM;
1981 			pr_err("alloc_etherdev failed\n");
1982 			goto err_out_iomap;
1983 		}
1984 
1985 		ndev->netdev_ops = &bdx_netdev_ops;
1986 		ndev->tx_queue_len = BDX_NDEV_TXQ_LEN;
1987 
1988 		bdx_set_ethtool_ops(ndev);	/* ethtool interface */
1989 
1990 		/* these fields are used for info purposes only
1991 		 * so we can have them same for all ports of the board */
1992 		ndev->if_port = port;
1993 		ndev->base_addr = pciaddr;
1994 		ndev->mem_start = pciaddr;
1995 		ndev->mem_end = pciaddr + regionSize;
1996 		ndev->irq = pdev->irq;
1997 		ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
1998 		    | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
1999 		    NETIF_F_HW_VLAN_FILTER | NETIF_F_RXCSUM
2000 		    /*| NETIF_F_FRAGLIST */
2001 		    ;
2002 		ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
2003 			NETIF_F_TSO | NETIF_F_HW_VLAN_TX;
2004 
2005 		if (pci_using_dac)
2006 			ndev->features |= NETIF_F_HIGHDMA;
2007 
2008 	/************** priv ****************/
2009 		priv = nic->priv[port] = netdev_priv(ndev);
2010 
2011 		priv->pBdxRegs = nic->regs + port * 0x8000;
2012 		priv->port = port;
2013 		priv->pdev = pdev;
2014 		priv->ndev = ndev;
2015 		priv->nic = nic;
2016 		priv->msg_enable = BDX_DEF_MSG_ENABLE;
2017 
2018 		netif_napi_add(ndev, &priv->napi, bdx_poll, 64);
2019 
2020 		if ((readl(nic->regs + FPGA_VER) & 0xFFF) == 308) {
2021 			DBG("HW statistics not supported\n");
2022 			priv->stats_flag = 0;
2023 		} else {
2024 			priv->stats_flag = 1;
2025 		}
2026 
2027 		/* Initialize fifo sizes. */
2028 		priv->txd_size = 2;
2029 		priv->txf_size = 2;
2030 		priv->rxd_size = 2;
2031 		priv->rxf_size = 3;
2032 
2033 		/* Initialize the initial coalescing registers. */
2034 		priv->rdintcm = INT_REG_VAL(0x20, 1, 4, 12);
2035 		priv->tdintcm = INT_REG_VAL(0x20, 1, 0, 12);
2036 
2037 		/* ndev->xmit_lock spinlock is not used.
2038 		 * Private priv->tx_lock is used for synchronization
2039 		 * between transmit and TX irq cleanup.  In addition
2040 		 * set multicast list callback has to use priv->tx_lock.
2041 		 */
2042 #ifdef BDX_LLTX
2043 		ndev->features |= NETIF_F_LLTX;
2044 #endif
2045 		spin_lock_init(&priv->tx_lock);
2046 
2047 		/*bdx_hw_reset(priv); */
2048 		if (bdx_read_mac(priv)) {
2049 			pr_err("load MAC address failed\n");
2050 			goto err_out_iomap;
2051 		}
2052 		SET_NETDEV_DEV(ndev, &pdev->dev);
2053 		err = register_netdev(ndev);
2054 		if (err) {
2055 			pr_err("register_netdev failed\n");
2056 			goto err_out_free;
2057 		}
2058 		netif_carrier_off(ndev);
2059 		netif_stop_queue(ndev);
2060 
2061 		print_eth_id(ndev);
2062 	}
2063 	RET(0);
2064 
2065 err_out_free:
2066 	free_netdev(ndev);
2067 err_out_iomap:
2068 	iounmap(nic->regs);
2069 err_out_res:
2070 	pci_release_regions(pdev);
2071 err_dma:
2072 	pci_disable_device(pdev);
2073 err_pci:
2074 	vfree(nic);
2075 
2076 	RET(err);
2077 }
2078 
2079 /****************** Ethtool interface *********************/
2080 /* get strings for statistics counters */
2081 static const char
2082  bdx_stat_names[][ETH_GSTRING_LEN] = {
2083 	"InUCast",		/* 0x7200 */
2084 	"InMCast",		/* 0x7210 */
2085 	"InBCast",		/* 0x7220 */
2086 	"InPkts",		/* 0x7230 */
2087 	"InErrors",		/* 0x7240 */
2088 	"InDropped",		/* 0x7250 */
2089 	"FrameTooLong",		/* 0x7260 */
2090 	"FrameSequenceErrors",	/* 0x7270 */
2091 	"InVLAN",		/* 0x7280 */
2092 	"InDroppedDFE",		/* 0x7290 */
2093 	"InDroppedIntFull",	/* 0x72A0 */
2094 	"InFrameAlignErrors",	/* 0x72B0 */
2095 
2096 	/* 0x72C0-0x72E0 RSRV */
2097 
2098 	"OutUCast",		/* 0x72F0 */
2099 	"OutMCast",		/* 0x7300 */
2100 	"OutBCast",		/* 0x7310 */
2101 	"OutPkts",		/* 0x7320 */
2102 
2103 	/* 0x7330-0x7360 RSRV */
2104 
2105 	"OutVLAN",		/* 0x7370 */
2106 	"InUCastOctects",	/* 0x7380 */
2107 	"OutUCastOctects",	/* 0x7390 */
2108 
2109 	/* 0x73A0-0x73B0 RSRV */
2110 
2111 	"InBCastOctects",	/* 0x73C0 */
2112 	"OutBCastOctects",	/* 0x73D0 */
2113 	"InOctects",		/* 0x73E0 */
2114 	"OutOctects",		/* 0x73F0 */
2115 };
2116 
2117 /*
2118  * bdx_get_settings - get device-specific settings
2119  * @netdev
2120  * @ecmd
2121  */
2122 static int bdx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
2123 {
2124 	u32 rdintcm;
2125 	u32 tdintcm;
2126 	struct bdx_priv *priv = netdev_priv(netdev);
2127 
2128 	rdintcm = priv->rdintcm;
2129 	tdintcm = priv->tdintcm;
2130 
2131 	ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
2132 	ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
2133 	ethtool_cmd_speed_set(ecmd, SPEED_10000);
2134 	ecmd->duplex = DUPLEX_FULL;
2135 	ecmd->port = PORT_FIBRE;
2136 	ecmd->transceiver = XCVR_EXTERNAL;	/* what does it mean? */
2137 	ecmd->autoneg = AUTONEG_DISABLE;
2138 
2139 	/* PCK_TH measures in multiples of FIFO bytes
2140 	   We translate to packets */
2141 	ecmd->maxtxpkt =
2142 	    ((GET_PCK_TH(tdintcm) * PCK_TH_MULT) / BDX_TXF_DESC_SZ);
2143 	ecmd->maxrxpkt =
2144 	    ((GET_PCK_TH(rdintcm) * PCK_TH_MULT) / sizeof(struct rxf_desc));
2145 
2146 	return 0;
2147 }
2148 
2149 /*
2150  * bdx_get_drvinfo - report driver information
2151  * @netdev
2152  * @drvinfo
2153  */
2154 static void
2155 bdx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
2156 {
2157 	struct bdx_priv *priv = netdev_priv(netdev);
2158 
2159 	strlcat(drvinfo->driver, BDX_DRV_NAME, sizeof(drvinfo->driver));
2160 	strlcat(drvinfo->version, BDX_DRV_VERSION, sizeof(drvinfo->version));
2161 	strlcat(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
2162 	strlcat(drvinfo->bus_info, pci_name(priv->pdev),
2163 		sizeof(drvinfo->bus_info));
2164 
2165 	drvinfo->n_stats = ((priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0);
2166 	drvinfo->testinfo_len = 0;
2167 	drvinfo->regdump_len = 0;
2168 	drvinfo->eedump_len = 0;
2169 }
2170 
2171 /*
2172  * bdx_get_coalesce - get interrupt coalescing parameters
2173  * @netdev
2174  * @ecoal
2175  */
2176 static int
2177 bdx_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal)
2178 {
2179 	u32 rdintcm;
2180 	u32 tdintcm;
2181 	struct bdx_priv *priv = netdev_priv(netdev);
2182 
2183 	rdintcm = priv->rdintcm;
2184 	tdintcm = priv->tdintcm;
2185 
2186 	/* PCK_TH measures in multiples of FIFO bytes
2187 	   We translate to packets */
2188 	ecoal->rx_coalesce_usecs = GET_INT_COAL(rdintcm) * INT_COAL_MULT;
2189 	ecoal->rx_max_coalesced_frames =
2190 	    ((GET_PCK_TH(rdintcm) * PCK_TH_MULT) / sizeof(struct rxf_desc));
2191 
2192 	ecoal->tx_coalesce_usecs = GET_INT_COAL(tdintcm) * INT_COAL_MULT;
2193 	ecoal->tx_max_coalesced_frames =
2194 	    ((GET_PCK_TH(tdintcm) * PCK_TH_MULT) / BDX_TXF_DESC_SZ);
2195 
2196 	/* adaptive parameters ignored */
2197 	return 0;
2198 }
2199 
2200 /*
2201  * bdx_set_coalesce - set interrupt coalescing parameters
2202  * @netdev
2203  * @ecoal
2204  */
2205 static int
2206 bdx_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal)
2207 {
2208 	u32 rdintcm;
2209 	u32 tdintcm;
2210 	struct bdx_priv *priv = netdev_priv(netdev);
2211 	int rx_coal;
2212 	int tx_coal;
2213 	int rx_max_coal;
2214 	int tx_max_coal;
2215 
2216 	/* Check for valid input */
2217 	rx_coal = ecoal->rx_coalesce_usecs / INT_COAL_MULT;
2218 	tx_coal = ecoal->tx_coalesce_usecs / INT_COAL_MULT;
2219 	rx_max_coal = ecoal->rx_max_coalesced_frames;
2220 	tx_max_coal = ecoal->tx_max_coalesced_frames;
2221 
2222 	/* Translate from packets to multiples of FIFO bytes */
2223 	rx_max_coal =
2224 	    (((rx_max_coal * sizeof(struct rxf_desc)) + PCK_TH_MULT - 1)
2225 	     / PCK_TH_MULT);
2226 	tx_max_coal =
2227 	    (((tx_max_coal * BDX_TXF_DESC_SZ) + PCK_TH_MULT - 1)
2228 	     / PCK_TH_MULT);
2229 
2230 	if ((rx_coal > 0x7FFF) || (tx_coal > 0x7FFF) ||
2231 	    (rx_max_coal > 0xF) || (tx_max_coal > 0xF))
2232 		return -EINVAL;
2233 
2234 	rdintcm = INT_REG_VAL(rx_coal, GET_INT_COAL_RC(priv->rdintcm),
2235 			      GET_RXF_TH(priv->rdintcm), rx_max_coal);
2236 	tdintcm = INT_REG_VAL(tx_coal, GET_INT_COAL_RC(priv->tdintcm), 0,
2237 			      tx_max_coal);
2238 
2239 	priv->rdintcm = rdintcm;
2240 	priv->tdintcm = tdintcm;
2241 
2242 	WRITE_REG(priv, regRDINTCM0, rdintcm);
2243 	WRITE_REG(priv, regTDINTCM0, tdintcm);
2244 
2245 	return 0;
2246 }
2247 
2248 /* Convert RX fifo size to number of pending packets */
2249 static inline int bdx_rx_fifo_size_to_packets(int rx_size)
2250 {
2251 	return (FIFO_SIZE * (1 << rx_size)) / sizeof(struct rxf_desc);
2252 }
2253 
2254 /* Convert TX fifo size to number of pending packets */
2255 static inline int bdx_tx_fifo_size_to_packets(int tx_size)
2256 {
2257 	return (FIFO_SIZE * (1 << tx_size)) / BDX_TXF_DESC_SZ;
2258 }
2259 
2260 /*
2261  * bdx_get_ringparam - report ring sizes
2262  * @netdev
2263  * @ring
2264  */
2265 static void
2266 bdx_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
2267 {
2268 	struct bdx_priv *priv = netdev_priv(netdev);
2269 
2270 	/*max_pending - the maximum-sized FIFO we allow */
2271 	ring->rx_max_pending = bdx_rx_fifo_size_to_packets(3);
2272 	ring->tx_max_pending = bdx_tx_fifo_size_to_packets(3);
2273 	ring->rx_pending = bdx_rx_fifo_size_to_packets(priv->rxf_size);
2274 	ring->tx_pending = bdx_tx_fifo_size_to_packets(priv->txd_size);
2275 }
2276 
2277 /*
2278  * bdx_set_ringparam - set ring sizes
2279  * @netdev
2280  * @ring
2281  */
2282 static int
2283 bdx_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
2284 {
2285 	struct bdx_priv *priv = netdev_priv(netdev);
2286 	int rx_size = 0;
2287 	int tx_size = 0;
2288 
2289 	for (; rx_size < 4; rx_size++) {
2290 		if (bdx_rx_fifo_size_to_packets(rx_size) >= ring->rx_pending)
2291 			break;
2292 	}
2293 	if (rx_size == 4)
2294 		rx_size = 3;
2295 
2296 	for (; tx_size < 4; tx_size++) {
2297 		if (bdx_tx_fifo_size_to_packets(tx_size) >= ring->tx_pending)
2298 			break;
2299 	}
2300 	if (tx_size == 4)
2301 		tx_size = 3;
2302 
2303 	/*Is there anything to do? */
2304 	if ((rx_size == priv->rxf_size) &&
2305 	    (tx_size == priv->txd_size))
2306 		return 0;
2307 
2308 	priv->rxf_size = rx_size;
2309 	if (rx_size > 1)
2310 		priv->rxd_size = rx_size - 1;
2311 	else
2312 		priv->rxd_size = rx_size;
2313 
2314 	priv->txf_size = priv->txd_size = tx_size;
2315 
2316 	if (netif_running(netdev)) {
2317 		bdx_close(netdev);
2318 		bdx_open(netdev);
2319 	}
2320 	return 0;
2321 }
2322 
2323 /*
2324  * bdx_get_strings - return a set of strings that describe the requested objects
2325  * @netdev
2326  * @data
2327  */
2328 static void bdx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2329 {
2330 	switch (stringset) {
2331 	case ETH_SS_STATS:
2332 		memcpy(data, *bdx_stat_names, sizeof(bdx_stat_names));
2333 		break;
2334 	}
2335 }
2336 
2337 /*
2338  * bdx_get_sset_count - return number of statistics or tests
2339  * @netdev
2340  */
2341 static int bdx_get_sset_count(struct net_device *netdev, int stringset)
2342 {
2343 	struct bdx_priv *priv = netdev_priv(netdev);
2344 
2345 	switch (stringset) {
2346 	case ETH_SS_STATS:
2347 		BDX_ASSERT(ARRAY_SIZE(bdx_stat_names)
2348 			   != sizeof(struct bdx_stats) / sizeof(u64));
2349 		return (priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names)	: 0;
2350 	}
2351 
2352 	return -EINVAL;
2353 }
2354 
2355 /*
2356  * bdx_get_ethtool_stats - return device's hardware L2 statistics
2357  * @netdev
2358  * @stats
2359  * @data
2360  */
2361 static void bdx_get_ethtool_stats(struct net_device *netdev,
2362 				  struct ethtool_stats *stats, u64 *data)
2363 {
2364 	struct bdx_priv *priv = netdev_priv(netdev);
2365 
2366 	if (priv->stats_flag) {
2367 
2368 		/* Update stats from HW */
2369 		bdx_update_stats(priv);
2370 
2371 		/* Copy data to user buffer */
2372 		memcpy(data, &priv->hw_stats, sizeof(priv->hw_stats));
2373 	}
2374 }
2375 
2376 /*
2377  * bdx_set_ethtool_ops - ethtool interface implementation
2378  * @netdev
2379  */
2380 static void bdx_set_ethtool_ops(struct net_device *netdev)
2381 {
2382 	static const struct ethtool_ops bdx_ethtool_ops = {
2383 		.get_settings = bdx_get_settings,
2384 		.get_drvinfo = bdx_get_drvinfo,
2385 		.get_link = ethtool_op_get_link,
2386 		.get_coalesce = bdx_get_coalesce,
2387 		.set_coalesce = bdx_set_coalesce,
2388 		.get_ringparam = bdx_get_ringparam,
2389 		.set_ringparam = bdx_set_ringparam,
2390 		.get_strings = bdx_get_strings,
2391 		.get_sset_count = bdx_get_sset_count,
2392 		.get_ethtool_stats = bdx_get_ethtool_stats,
2393 	};
2394 
2395 	SET_ETHTOOL_OPS(netdev, &bdx_ethtool_ops);
2396 }
2397 
2398 /**
2399  * bdx_remove - Device Removal Routine
2400  * @pdev: PCI device information struct
2401  *
2402  * bdx_remove is called by the PCI subsystem to alert the driver
2403  * that it should release a PCI device.  The could be caused by a
2404  * Hot-Plug event, or because the driver is going to be removed from
2405  * memory.
2406  **/
2407 static void __devexit bdx_remove(struct pci_dev *pdev)
2408 {
2409 	struct pci_nic *nic = pci_get_drvdata(pdev);
2410 	struct net_device *ndev;
2411 	int port;
2412 
2413 	for (port = 0; port < nic->port_num; port++) {
2414 		ndev = nic->priv[port]->ndev;
2415 		unregister_netdev(ndev);
2416 		free_netdev(ndev);
2417 	}
2418 
2419 	/*bdx_hw_reset_direct(nic->regs); */
2420 #ifdef BDX_MSI
2421 	if (nic->irq_type == IRQ_MSI)
2422 		pci_disable_msi(pdev);
2423 #endif
2424 
2425 	iounmap(nic->regs);
2426 	pci_release_regions(pdev);
2427 	pci_disable_device(pdev);
2428 	pci_set_drvdata(pdev, NULL);
2429 	vfree(nic);
2430 
2431 	RET();
2432 }
2433 
2434 static struct pci_driver bdx_pci_driver = {
2435 	.name = BDX_DRV_NAME,
2436 	.id_table = bdx_pci_tbl,
2437 	.probe = bdx_probe,
2438 	.remove = __devexit_p(bdx_remove),
2439 };
2440 
2441 /*
2442  * print_driver_id - print parameters of the driver build
2443  */
2444 static void __init print_driver_id(void)
2445 {
2446 	pr_info("%s, %s\n", BDX_DRV_DESC, BDX_DRV_VERSION);
2447 	pr_info("Options: hw_csum %s\n", BDX_MSI_STRING);
2448 }
2449 
2450 static int __init bdx_module_init(void)
2451 {
2452 	ENTER;
2453 	init_txd_sizes();
2454 	print_driver_id();
2455 	RET(pci_register_driver(&bdx_pci_driver));
2456 }
2457 
2458 module_init(bdx_module_init);
2459 
2460 static void __exit bdx_module_exit(void)
2461 {
2462 	ENTER;
2463 	pci_unregister_driver(&bdx_pci_driver);
2464 	RET();
2465 }
2466 
2467 module_exit(bdx_module_exit);
2468 
2469 MODULE_LICENSE("GPL");
2470 MODULE_AUTHOR(DRIVER_AUTHOR);
2471 MODULE_DESCRIPTION(BDX_DRV_DESC);
2472 MODULE_FIRMWARE("tehuti/bdx.bin");
2473