xref: /linux/drivers/net/fddi/defxx.c (revision 132db93572821ec2fdf81e354cc40f558faf7e4f)
1 /*
2  * File Name:
3  *   defxx.c
4  *
5  * Copyright Information:
6  *   Copyright Digital Equipment Corporation 1996.
7  *
8  *   This software may be used and distributed according to the terms of
9  *   the GNU General Public License, incorporated herein by reference.
10  *
11  * Abstract:
12  *   A Linux device driver supporting the Digital Equipment Corporation
13  *   FDDI TURBOchannel, EISA and PCI controller families.  Supported
14  *   adapters include:
15  *
16  *		DEC FDDIcontroller/TURBOchannel (DEFTA)
17  *		DEC FDDIcontroller/EISA         (DEFEA)
18  *		DEC FDDIcontroller/PCI          (DEFPA)
19  *
20  * The original author:
21  *   LVS	Lawrence V. Stefani <lstefani@yahoo.com>
22  *
23  * Maintainers:
24  *   macro	Maciej W. Rozycki <macro@linux-mips.org>
25  *
26  * Credits:
27  *   I'd like to thank Patricia Cross for helping me get started with
28  *   Linux, David Davies for a lot of help upgrading and configuring
29  *   my development system and for answering many OS and driver
30  *   development questions, and Alan Cox for recommendations and
31  *   integration help on getting FDDI support into Linux.  LVS
32  *
33  * Driver Architecture:
34  *   The driver architecture is largely based on previous driver work
35  *   for other operating systems.  The upper edge interface and
36  *   functions were largely taken from existing Linux device drivers
37  *   such as David Davies' DE4X5.C driver and Donald Becker's TULIP.C
38  *   driver.
39  *
40  *   Adapter Probe -
41  *		The driver scans for supported EISA adapters by reading the
42  *		SLOT ID register for each EISA slot and making a match
43  *		against the expected value.
44  *
45  *   Bus-Specific Initialization -
46  *		This driver currently supports both EISA and PCI controller
47  *		families.  While the custom DMA chip and FDDI logic is similar
48  *		or identical, the bus logic is very different.  After
49  *		initialization, the	only bus-specific differences is in how the
50  *		driver enables and disables interrupts.  Other than that, the
51  *		run-time critical code behaves the same on both families.
52  *		It's important to note that both adapter families are configured
53  *		to I/O map, rather than memory map, the adapter registers.
54  *
55  *   Driver Open/Close -
56  *		In the driver open routine, the driver ISR (interrupt service
57  *		routine) is registered and the adapter is brought to an
58  *		operational state.  In the driver close routine, the opposite
59  *		occurs; the driver ISR is deregistered and the adapter is
60  *		brought to a safe, but closed state.  Users may use consecutive
61  *		commands to bring the adapter up and down as in the following
62  *		example:
63  *					ifconfig fddi0 up
64  *					ifconfig fddi0 down
65  *					ifconfig fddi0 up
66  *
67  *   Driver Shutdown -
68  *		Apparently, there is no shutdown or halt routine support under
69  *		Linux.  This routine would be called during "reboot" or
70  *		"shutdown" to allow the driver to place the adapter in a safe
71  *		state before a warm reboot occurs.  To be really safe, the user
72  *		should close the adapter before shutdown (eg. ifconfig fddi0 down)
73  *		to ensure that the adapter DMA engine is taken off-line.  However,
74  *		the current driver code anticipates this problem and always issues
75  *		a soft reset of the adapter	at the beginning of driver initialization.
76  *		A future driver enhancement in this area may occur in 2.1.X where
77  *		Alan indicated that a shutdown handler may be implemented.
78  *
79  *   Interrupt Service Routine -
80  *		The driver supports shared interrupts, so the ISR is registered for
81  *		each board with the appropriate flag and the pointer to that board's
82  *		device structure.  This provides the context during interrupt
83  *		processing to support shared interrupts and multiple boards.
84  *
85  *		Interrupt enabling/disabling can occur at many levels.  At the host
86  *		end, you can disable system interrupts, or disable interrupts at the
87  *		PIC (on Intel systems).  Across the bus, both EISA and PCI adapters
88  *		have a bus-logic chip interrupt enable/disable as well as a DMA
89  *		controller interrupt enable/disable.
90  *
91  *		The driver currently enables and disables adapter interrupts at the
92  *		bus-logic chip and assumes that Linux will take care of clearing or
93  *		acknowledging any host-based interrupt chips.
94  *
95  *   Control Functions -
96  *		Control functions are those used to support functions such as adding
97  *		or deleting multicast addresses, enabling or disabling packet
98  *		reception filters, or other custom/proprietary commands.  Presently,
99  *		the driver supports the "get statistics", "set multicast list", and
100  *		"set mac address" functions defined by Linux.  A list of possible
101  *		enhancements include:
102  *
103  *				- Custom ioctl interface for executing port interface commands
104  *				- Custom ioctl interface for adding unicast addresses to
105  *				  adapter CAM (to support bridge functions).
106  *				- Custom ioctl interface for supporting firmware upgrades.
107  *
108  *   Hardware (port interface) Support Routines -
109  *		The driver function names that start with "dfx_hw_" represent
110  *		low-level port interface routines that are called frequently.  They
111  *		include issuing a DMA or port control command to the adapter,
112  *		resetting the adapter, or reading the adapter state.  Since the
113  *		driver initialization and run-time code must make calls into the
114  *		port interface, these routines were written to be as generic and
115  *		usable as possible.
116  *
117  *   Receive Path -
118  *		The adapter DMA engine supports a 256 entry receive descriptor block
119  *		of which up to 255 entries can be used at any given time.  The
120  *		architecture is a standard producer, consumer, completion model in
121  *		which the driver "produces" receive buffers to the adapter, the
122  *		adapter "consumes" the receive buffers by DMAing incoming packet data,
123  *		and the driver "completes" the receive buffers by servicing the
124  *		incoming packet, then "produces" a new buffer and starts the cycle
125  *		again.  Receive buffers can be fragmented in up to 16 fragments
126  *		(descriptor	entries).  For simplicity, this driver posts
127  *		single-fragment receive buffers of 4608 bytes, then allocates a
128  *		sk_buff, copies the data, then reposts the buffer.  To reduce CPU
129  *		utilization, a better approach would be to pass up the receive
130  *		buffer (no extra copy) then allocate and post a replacement buffer.
131  *		This is a performance enhancement that should be looked into at
132  *		some point.
133  *
134  *   Transmit Path -
135  *		Like the receive path, the adapter DMA engine supports a 256 entry
136  *		transmit descriptor block of which up to 255 entries can be used at
137  *		any	given time.  Transmit buffers can be fragmented	in up to 255
138  *		fragments (descriptor entries).  This driver always posts one
139  *		fragment per transmit packet request.
140  *
141  *		The fragment contains the entire packet from FC to end of data.
142  *		Before posting the buffer to the adapter, the driver sets a three-byte
143  *		packet request header (PRH) which is required by the Motorola MAC chip
144  *		used on the adapters.  The PRH tells the MAC the type of token to
145  *		receive/send, whether or not to generate and append the CRC, whether
146  *		synchronous or asynchronous framing is used, etc.  Since the PRH
147  *		definition is not necessarily consistent across all FDDI chipsets,
148  *		the driver, rather than the common FDDI packet handler routines,
149  *		sets these bytes.
150  *
151  *		To reduce the amount of descriptor fetches needed per transmit request,
152  *		the driver takes advantage of the fact that there are at least three
153  *		bytes available before the skb->data field on the outgoing transmit
154  *		request.  This is guaranteed by having fddi_setup() in net_init.c set
155  *		dev->hard_header_len to 24 bytes.  21 bytes accounts for the largest
156  *		header in an 802.2 SNAP frame.  The other 3 bytes are the extra "pad"
157  *		bytes which we'll use to store the PRH.
158  *
159  *		There's a subtle advantage to adding these pad bytes to the
160  *		hard_header_len, it ensures that the data portion of the packet for
161  *		an 802.2 SNAP frame is longword aligned.  Other FDDI driver
162  *		implementations may not need the extra padding and can start copying
163  *		or DMAing directly from the FC byte which starts at skb->data.  Should
164  *		another driver implementation need ADDITIONAL padding, the net_init.c
165  *		module should be updated and dev->hard_header_len should be increased.
166  *		NOTE: To maintain the alignment on the data portion of the packet,
167  *		dev->hard_header_len should always be evenly divisible by 4 and at
168  *		least 24 bytes in size.
169  *
170  * Modification History:
171  *		Date		Name	Description
172  *		16-Aug-96	LVS		Created.
173  *		20-Aug-96	LVS		Updated dfx_probe so that version information
174  *							string is only displayed if 1 or more cards are
175  *							found.  Changed dfx_rcv_queue_process to copy
176  *							3 NULL bytes before FC to ensure that data is
177  *							longword aligned in receive buffer.
178  *		09-Sep-96	LVS		Updated dfx_ctl_set_multicast_list to enable
179  *							LLC group promiscuous mode if multicast list
180  *							is too large.  LLC individual/group promiscuous
181  *							mode is now disabled if IFF_PROMISC flag not set.
182  *							dfx_xmt_queue_pkt no longer checks for NULL skb
183  *							on Alan Cox recommendation.  Added node address
184  *							override support.
185  *		12-Sep-96	LVS		Reset current address to factory address during
186  *							device open.  Updated transmit path to post a
187  *							single fragment which includes PRH->end of data.
188  *		Mar 2000	AC		Did various cleanups for 2.3.x
189  *		Jun 2000	jgarzik		PCI and resource alloc cleanups
190  *		Jul 2000	tjeerd		Much cleanup and some bug fixes
191  *		Sep 2000	tjeerd		Fix leak on unload, cosmetic code cleanup
192  *		Feb 2001			Skb allocation fixes
193  *		Feb 2001	davej		PCI enable cleanups.
194  *		04 Aug 2003	macro		Converted to the DMA API.
195  *		14 Aug 2004	macro		Fix device names reported.
196  *		14 Jun 2005	macro		Use irqreturn_t.
197  *		23 Oct 2006	macro		Big-endian host support.
198  *		14 Dec 2006	macro		TURBOchannel support.
199  *		01 Jul 2014	macro		Fixes for DMA on 64-bit hosts.
200  */
201 
202 /* Include files */
203 #include <linux/bitops.h>
204 #include <linux/compiler.h>
205 #include <linux/delay.h>
206 #include <linux/dma-mapping.h>
207 #include <linux/eisa.h>
208 #include <linux/errno.h>
209 #include <linux/fddidevice.h>
210 #include <linux/interrupt.h>
211 #include <linux/ioport.h>
212 #include <linux/kernel.h>
213 #include <linux/module.h>
214 #include <linux/netdevice.h>
215 #include <linux/pci.h>
216 #include <linux/skbuff.h>
217 #include <linux/slab.h>
218 #include <linux/string.h>
219 #include <linux/tc.h>
220 
221 #include <asm/byteorder.h>
222 #include <asm/io.h>
223 
224 #include "defxx.h"
225 
226 /* Version information string should be updated prior to each new release!  */
227 #define DRV_NAME "defxx"
228 #define DRV_VERSION "v1.11"
229 #define DRV_RELDATE "2014/07/01"
230 
231 static const char version[] =
232 	DRV_NAME ": " DRV_VERSION " " DRV_RELDATE
233 	"  Lawrence V. Stefani and others\n";
234 
235 #define DYNAMIC_BUFFERS 1
236 
237 #define SKBUFF_RX_COPYBREAK 200
238 /*
239  * NEW_SKB_SIZE = PI_RCV_DATA_K_SIZE_MAX+128 to allow 128 byte
240  * alignment for compatibility with old EISA boards.
241  */
242 #define NEW_SKB_SIZE (PI_RCV_DATA_K_SIZE_MAX+128)
243 
244 #ifdef CONFIG_EISA
245 #define DFX_BUS_EISA(dev) (dev->bus == &eisa_bus_type)
246 #else
247 #define DFX_BUS_EISA(dev) 0
248 #endif
249 
250 #ifdef CONFIG_TC
251 #define DFX_BUS_TC(dev) (dev->bus == &tc_bus_type)
252 #else
253 #define DFX_BUS_TC(dev) 0
254 #endif
255 
256 #ifdef CONFIG_DEFXX_MMIO
257 #define DFX_MMIO 1
258 #else
259 #define DFX_MMIO 0
260 #endif
261 
262 /* Define module-wide (static) routines */
263 
264 static void		dfx_bus_init(struct net_device *dev);
265 static void		dfx_bus_uninit(struct net_device *dev);
266 static void		dfx_bus_config_check(DFX_board_t *bp);
267 
268 static int		dfx_driver_init(struct net_device *dev,
269 					const char *print_name,
270 					resource_size_t bar_start);
271 static int		dfx_adap_init(DFX_board_t *bp, int get_buffers);
272 
273 static int		dfx_open(struct net_device *dev);
274 static int		dfx_close(struct net_device *dev);
275 
276 static void		dfx_int_pr_halt_id(DFX_board_t *bp);
277 static void		dfx_int_type_0_process(DFX_board_t *bp);
278 static void		dfx_int_common(struct net_device *dev);
279 static irqreturn_t	dfx_interrupt(int irq, void *dev_id);
280 
281 static struct		net_device_stats *dfx_ctl_get_stats(struct net_device *dev);
282 static void		dfx_ctl_set_multicast_list(struct net_device *dev);
283 static int		dfx_ctl_set_mac_address(struct net_device *dev, void *addr);
284 static int		dfx_ctl_update_cam(DFX_board_t *bp);
285 static int		dfx_ctl_update_filters(DFX_board_t *bp);
286 
287 static int		dfx_hw_dma_cmd_req(DFX_board_t *bp);
288 static int		dfx_hw_port_ctrl_req(DFX_board_t *bp, PI_UINT32	command, PI_UINT32 data_a, PI_UINT32 data_b, PI_UINT32 *host_data);
289 static void		dfx_hw_adap_reset(DFX_board_t *bp, PI_UINT32 type);
290 static int		dfx_hw_adap_state_rd(DFX_board_t *bp);
291 static int		dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type);
292 
293 static int		dfx_rcv_init(DFX_board_t *bp, int get_buffers);
294 static void		dfx_rcv_queue_process(DFX_board_t *bp);
295 #ifdef DYNAMIC_BUFFERS
296 static void		dfx_rcv_flush(DFX_board_t *bp);
297 #else
298 static inline void	dfx_rcv_flush(DFX_board_t *bp) {}
299 #endif
300 
301 static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
302 				     struct net_device *dev);
303 static int		dfx_xmt_done(DFX_board_t *bp);
304 static void		dfx_xmt_flush(DFX_board_t *bp);
305 
306 /* Define module-wide (static) variables */
307 
308 static struct pci_driver dfx_pci_driver;
309 static struct eisa_driver dfx_eisa_driver;
310 static struct tc_driver dfx_tc_driver;
311 
312 
313 /*
314  * =======================
315  * = dfx_port_write_long =
316  * = dfx_port_read_long  =
317  * =======================
318  *
319  * Overview:
320  *   Routines for reading and writing values from/to adapter
321  *
322  * Returns:
323  *   None
324  *
325  * Arguments:
326  *   bp		- pointer to board information
327  *   offset	- register offset from base I/O address
328  *   data	- for dfx_port_write_long, this is a value to write;
329  *		  for dfx_port_read_long, this is a pointer to store
330  *		  the read value
331  *
332  * Functional Description:
333  *   These routines perform the correct operation to read or write
334  *   the adapter register.
335  *
336  *   EISA port block base addresses are based on the slot number in which the
337  *   controller is installed.  For example, if the EISA controller is installed
338  *   in slot 4, the port block base address is 0x4000.  If the controller is
339  *   installed in slot 2, the port block base address is 0x2000, and so on.
340  *   This port block can be used to access PDQ, ESIC, and DEFEA on-board
341  *   registers using the register offsets defined in DEFXX.H.
342  *
343  *   PCI port block base addresses are assigned by the PCI BIOS or system
344  *   firmware.  There is one 128 byte port block which can be accessed.  It
345  *   allows for I/O mapping of both PDQ and PFI registers using the register
346  *   offsets defined in DEFXX.H.
347  *
348  * Return Codes:
349  *   None
350  *
351  * Assumptions:
352  *   bp->base is a valid base I/O address for this adapter.
353  *   offset is a valid register offset for this adapter.
354  *
355  * Side Effects:
356  *   Rather than produce macros for these functions, these routines
357  *   are defined using "inline" to ensure that the compiler will
358  *   generate inline code and not waste a procedure call and return.
359  *   This provides all the benefits of macros, but with the
360  *   advantage of strict data type checking.
361  */
362 
363 static inline void dfx_writel(DFX_board_t *bp, int offset, u32 data)
364 {
365 	writel(data, bp->base.mem + offset);
366 	mb();
367 }
368 
369 static inline void dfx_outl(DFX_board_t *bp, int offset, u32 data)
370 {
371 	outl(data, bp->base.port + offset);
372 }
373 
374 static void dfx_port_write_long(DFX_board_t *bp, int offset, u32 data)
375 {
376 	struct device __maybe_unused *bdev = bp->bus_dev;
377 	int dfx_bus_tc = DFX_BUS_TC(bdev);
378 	int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
379 
380 	if (dfx_use_mmio)
381 		dfx_writel(bp, offset, data);
382 	else
383 		dfx_outl(bp, offset, data);
384 }
385 
386 
387 static inline void dfx_readl(DFX_board_t *bp, int offset, u32 *data)
388 {
389 	mb();
390 	*data = readl(bp->base.mem + offset);
391 }
392 
393 static inline void dfx_inl(DFX_board_t *bp, int offset, u32 *data)
394 {
395 	*data = inl(bp->base.port + offset);
396 }
397 
398 static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data)
399 {
400 	struct device __maybe_unused *bdev = bp->bus_dev;
401 	int dfx_bus_tc = DFX_BUS_TC(bdev);
402 	int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
403 
404 	if (dfx_use_mmio)
405 		dfx_readl(bp, offset, data);
406 	else
407 		dfx_inl(bp, offset, data);
408 }
409 
410 
411 /*
412  * ================
413  * = dfx_get_bars =
414  * ================
415  *
416  * Overview:
417  *   Retrieves the address ranges used to access control and status
418  *   registers.
419  *
420  * Returns:
421  *   None
422  *
423  * Arguments:
424  *   bdev	- pointer to device information
425  *   bar_start	- pointer to store the start addresses
426  *   bar_len	- pointer to store the lengths of the areas
427  *
428  * Assumptions:
429  *   I am sure there are some.
430  *
431  * Side Effects:
432  *   None
433  */
434 static void dfx_get_bars(struct device *bdev,
435 			 resource_size_t *bar_start, resource_size_t *bar_len)
436 {
437 	int dfx_bus_pci = dev_is_pci(bdev);
438 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
439 	int dfx_bus_tc = DFX_BUS_TC(bdev);
440 	int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
441 
442 	if (dfx_bus_pci) {
443 		int num = dfx_use_mmio ? 0 : 1;
444 
445 		bar_start[0] = pci_resource_start(to_pci_dev(bdev), num);
446 		bar_len[0] = pci_resource_len(to_pci_dev(bdev), num);
447 		bar_start[2] = bar_start[1] = 0;
448 		bar_len[2] = bar_len[1] = 0;
449 	}
450 	if (dfx_bus_eisa) {
451 		unsigned long base_addr = to_eisa_device(bdev)->base_addr;
452 		resource_size_t bar_lo;
453 		resource_size_t bar_hi;
454 
455 		if (dfx_use_mmio) {
456 			bar_lo = inb(base_addr + PI_ESIC_K_MEM_ADD_LO_CMP_2);
457 			bar_lo <<= 8;
458 			bar_lo |= inb(base_addr + PI_ESIC_K_MEM_ADD_LO_CMP_1);
459 			bar_lo <<= 8;
460 			bar_lo |= inb(base_addr + PI_ESIC_K_MEM_ADD_LO_CMP_0);
461 			bar_lo <<= 8;
462 			bar_start[0] = bar_lo;
463 			bar_hi = inb(base_addr + PI_ESIC_K_MEM_ADD_HI_CMP_2);
464 			bar_hi <<= 8;
465 			bar_hi |= inb(base_addr + PI_ESIC_K_MEM_ADD_HI_CMP_1);
466 			bar_hi <<= 8;
467 			bar_hi |= inb(base_addr + PI_ESIC_K_MEM_ADD_HI_CMP_0);
468 			bar_hi <<= 8;
469 			bar_len[0] = ((bar_hi - bar_lo) | PI_MEM_ADD_MASK_M) +
470 				     1;
471 		} else {
472 			bar_start[0] = base_addr;
473 			bar_len[0] = PI_ESIC_K_CSR_IO_LEN;
474 		}
475 		bar_start[1] = base_addr + PI_DEFEA_K_BURST_HOLDOFF;
476 		bar_len[1] = PI_ESIC_K_BURST_HOLDOFF_LEN;
477 		bar_start[2] = base_addr + PI_ESIC_K_ESIC_CSR;
478 		bar_len[2] = PI_ESIC_K_ESIC_CSR_LEN;
479 	}
480 	if (dfx_bus_tc) {
481 		bar_start[0] = to_tc_dev(bdev)->resource.start +
482 			       PI_TC_K_CSR_OFFSET;
483 		bar_len[0] = PI_TC_K_CSR_LEN;
484 		bar_start[2] = bar_start[1] = 0;
485 		bar_len[2] = bar_len[1] = 0;
486 	}
487 }
488 
489 static const struct net_device_ops dfx_netdev_ops = {
490 	.ndo_open		= dfx_open,
491 	.ndo_stop		= dfx_close,
492 	.ndo_start_xmit		= dfx_xmt_queue_pkt,
493 	.ndo_get_stats		= dfx_ctl_get_stats,
494 	.ndo_set_rx_mode	= dfx_ctl_set_multicast_list,
495 	.ndo_set_mac_address	= dfx_ctl_set_mac_address,
496 };
497 
498 /*
499  * ================
500  * = dfx_register =
501  * ================
502  *
503  * Overview:
504  *   Initializes a supported FDDI controller
505  *
506  * Returns:
507  *   Condition code
508  *
509  * Arguments:
510  *   bdev - pointer to device information
511  *
512  * Functional Description:
513  *
514  * Return Codes:
515  *   0		 - This device (fddi0, fddi1, etc) configured successfully
516  *   -EBUSY      - Failed to get resources, or dfx_driver_init failed.
517  *
518  * Assumptions:
519  *   It compiles so it should work :-( (PCI cards do :-)
520  *
521  * Side Effects:
522  *   Device structures for FDDI adapters (fddi0, fddi1, etc) are
523  *   initialized and the board resources are read and stored in
524  *   the device structure.
525  */
526 static int dfx_register(struct device *bdev)
527 {
528 	static int version_disp;
529 	int dfx_bus_pci = dev_is_pci(bdev);
530 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
531 	int dfx_bus_tc = DFX_BUS_TC(bdev);
532 	int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
533 	const char *print_name = dev_name(bdev);
534 	struct net_device *dev;
535 	DFX_board_t	  *bp;			/* board pointer */
536 	resource_size_t bar_start[3] = {0};	/* pointers to ports */
537 	resource_size_t bar_len[3] = {0};	/* resource length */
538 	int alloc_size;				/* total buffer size used */
539 	struct resource *region;
540 	int err = 0;
541 
542 	if (!version_disp) {	/* display version info if adapter is found */
543 		version_disp = 1;	/* set display flag to TRUE so that */
544 		printk(version);	/* we only display this string ONCE */
545 	}
546 
547 	dev = alloc_fddidev(sizeof(*bp));
548 	if (!dev) {
549 		printk(KERN_ERR "%s: Unable to allocate fddidev, aborting\n",
550 		       print_name);
551 		return -ENOMEM;
552 	}
553 
554 	/* Enable PCI device. */
555 	if (dfx_bus_pci) {
556 		err = pci_enable_device(to_pci_dev(bdev));
557 		if (err) {
558 			pr_err("%s: Cannot enable PCI device, aborting\n",
559 			       print_name);
560 			goto err_out;
561 		}
562 	}
563 
564 	SET_NETDEV_DEV(dev, bdev);
565 
566 	bp = netdev_priv(dev);
567 	bp->bus_dev = bdev;
568 	dev_set_drvdata(bdev, dev);
569 
570 	dfx_get_bars(bdev, bar_start, bar_len);
571 	if (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0) {
572 		pr_err("%s: Cannot use MMIO, no address set, aborting\n",
573 		       print_name);
574 		pr_err("%s: Run ECU and set adapter's MMIO location\n",
575 		       print_name);
576 		pr_err("%s: Or recompile driver with \"CONFIG_DEFXX_MMIO=n\""
577 		       "\n", print_name);
578 		err = -ENXIO;
579 		goto err_out;
580 	}
581 
582 	if (dfx_use_mmio)
583 		region = request_mem_region(bar_start[0], bar_len[0],
584 					    print_name);
585 	else
586 		region = request_region(bar_start[0], bar_len[0], print_name);
587 	if (!region) {
588 		pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, "
589 		       "aborting\n", dfx_use_mmio ? "MMIO" : "I/O", print_name,
590 		       (long)bar_len[0], (long)bar_start[0]);
591 		err = -EBUSY;
592 		goto err_out_disable;
593 	}
594 	if (bar_start[1] != 0) {
595 		region = request_region(bar_start[1], bar_len[1], print_name);
596 		if (!region) {
597 			pr_err("%s: Cannot reserve I/O resource "
598 			       "0x%lx @ 0x%lx, aborting\n", print_name,
599 			       (long)bar_len[1], (long)bar_start[1]);
600 			err = -EBUSY;
601 			goto err_out_csr_region;
602 		}
603 	}
604 	if (bar_start[2] != 0) {
605 		region = request_region(bar_start[2], bar_len[2], print_name);
606 		if (!region) {
607 			pr_err("%s: Cannot reserve I/O resource "
608 			       "0x%lx @ 0x%lx, aborting\n", print_name,
609 			       (long)bar_len[2], (long)bar_start[2]);
610 			err = -EBUSY;
611 			goto err_out_bh_region;
612 		}
613 	}
614 
615 	/* Set up I/O base address. */
616 	if (dfx_use_mmio) {
617 		bp->base.mem = ioremap(bar_start[0], bar_len[0]);
618 		if (!bp->base.mem) {
619 			printk(KERN_ERR "%s: Cannot map MMIO\n", print_name);
620 			err = -ENOMEM;
621 			goto err_out_esic_region;
622 		}
623 	} else {
624 		bp->base.port = bar_start[0];
625 		dev->base_addr = bar_start[0];
626 	}
627 
628 	/* Initialize new device structure */
629 	dev->netdev_ops			= &dfx_netdev_ops;
630 
631 	if (dfx_bus_pci)
632 		pci_set_master(to_pci_dev(bdev));
633 
634 	if (dfx_driver_init(dev, print_name, bar_start[0]) != DFX_K_SUCCESS) {
635 		err = -ENODEV;
636 		goto err_out_unmap;
637 	}
638 
639 	err = register_netdev(dev);
640 	if (err)
641 		goto err_out_kfree;
642 
643 	printk("%s: registered as %s\n", print_name, dev->name);
644 	return 0;
645 
646 err_out_kfree:
647 	alloc_size = sizeof(PI_DESCR_BLOCK) +
648 		     PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX +
649 #ifndef DYNAMIC_BUFFERS
650 		     (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
651 #endif
652 		     sizeof(PI_CONSUMER_BLOCK) +
653 		     (PI_ALIGN_K_DESC_BLK - 1);
654 	if (bp->kmalloced)
655 		dma_free_coherent(bdev, alloc_size,
656 				  bp->kmalloced, bp->kmalloced_dma);
657 
658 err_out_unmap:
659 	if (dfx_use_mmio)
660 		iounmap(bp->base.mem);
661 
662 err_out_esic_region:
663 	if (bar_start[2] != 0)
664 		release_region(bar_start[2], bar_len[2]);
665 
666 err_out_bh_region:
667 	if (bar_start[1] != 0)
668 		release_region(bar_start[1], bar_len[1]);
669 
670 err_out_csr_region:
671 	if (dfx_use_mmio)
672 		release_mem_region(bar_start[0], bar_len[0]);
673 	else
674 		release_region(bar_start[0], bar_len[0]);
675 
676 err_out_disable:
677 	if (dfx_bus_pci)
678 		pci_disable_device(to_pci_dev(bdev));
679 
680 err_out:
681 	free_netdev(dev);
682 	return err;
683 }
684 
685 
686 /*
687  * ================
688  * = dfx_bus_init =
689  * ================
690  *
691  * Overview:
692  *   Initializes the bus-specific controller logic.
693  *
694  * Returns:
695  *   None
696  *
697  * Arguments:
698  *   dev - pointer to device information
699  *
700  * Functional Description:
701  *   Determine and save adapter IRQ in device table,
702  *   then perform bus-specific logic initialization.
703  *
704  * Return Codes:
705  *   None
706  *
707  * Assumptions:
708  *   bp->base has already been set with the proper
709  *	 base I/O address for this device.
710  *
711  * Side Effects:
712  *   Interrupts are enabled at the adapter bus-specific logic.
713  *   Note:  Interrupts at the DMA engine (PDQ chip) are not
714  *   enabled yet.
715  */
716 
717 static void dfx_bus_init(struct net_device *dev)
718 {
719 	DFX_board_t *bp = netdev_priv(dev);
720 	struct device *bdev = bp->bus_dev;
721 	int dfx_bus_pci = dev_is_pci(bdev);
722 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
723 	int dfx_bus_tc = DFX_BUS_TC(bdev);
724 	int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
725 	u8 val;
726 
727 	DBG_printk("In dfx_bus_init...\n");
728 
729 	/* Initialize a pointer back to the net_device struct */
730 	bp->dev = dev;
731 
732 	/* Initialize adapter based on bus type */
733 
734 	if (dfx_bus_tc)
735 		dev->irq = to_tc_dev(bdev)->interrupt;
736 	if (dfx_bus_eisa) {
737 		unsigned long base_addr = to_eisa_device(bdev)->base_addr;
738 
739 		/* Disable the board before fiddling with the decoders.  */
740 		outb(0, base_addr + PI_ESIC_K_SLOT_CNTRL);
741 
742 		/* Get the interrupt level from the ESIC chip.  */
743 		val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
744 		val &= PI_CONFIG_STAT_0_M_IRQ;
745 		val >>= PI_CONFIG_STAT_0_V_IRQ;
746 
747 		switch (val) {
748 		case PI_CONFIG_STAT_0_IRQ_K_9:
749 			dev->irq = 9;
750 			break;
751 
752 		case PI_CONFIG_STAT_0_IRQ_K_10:
753 			dev->irq = 10;
754 			break;
755 
756 		case PI_CONFIG_STAT_0_IRQ_K_11:
757 			dev->irq = 11;
758 			break;
759 
760 		case PI_CONFIG_STAT_0_IRQ_K_15:
761 			dev->irq = 15;
762 			break;
763 		}
764 
765 		/*
766 		 * Enable memory decoding (MEMCS1) and/or port decoding
767 		 * (IOCS1/IOCS0) as appropriate in Function Control
768 		 * Register.  MEMCS1 or IOCS0 is used for PDQ registers,
769 		 * taking 16 32-bit words, while IOCS1 is used for the
770 		 * Burst Holdoff register, taking a single 32-bit word
771 		 * only.  We use the slot-specific I/O range as per the
772 		 * ESIC spec, that is set bits 15:12 in the mask registers
773 		 * to mask them out.
774 		 */
775 
776 		/* Set the decode range of the board.  */
777 		val = 0;
778 		outb(val, base_addr + PI_ESIC_K_IO_ADD_CMP_0_1);
779 		val = PI_DEFEA_K_CSR_IO;
780 		outb(val, base_addr + PI_ESIC_K_IO_ADD_CMP_0_0);
781 
782 		val = PI_IO_CMP_M_SLOT;
783 		outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_0_1);
784 		val = (PI_ESIC_K_CSR_IO_LEN - 1) & ~3;
785 		outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_0_0);
786 
787 		val = 0;
788 		outb(val, base_addr + PI_ESIC_K_IO_ADD_CMP_1_1);
789 		val = PI_DEFEA_K_BURST_HOLDOFF;
790 		outb(val, base_addr + PI_ESIC_K_IO_ADD_CMP_1_0);
791 
792 		val = PI_IO_CMP_M_SLOT;
793 		outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_1_1);
794 		val = (PI_ESIC_K_BURST_HOLDOFF_LEN - 1) & ~3;
795 		outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_1_0);
796 
797 		/* Enable the decoders.  */
798 		val = PI_FUNCTION_CNTRL_M_IOCS1;
799 		if (dfx_use_mmio)
800 			val |= PI_FUNCTION_CNTRL_M_MEMCS1;
801 		else
802 			val |= PI_FUNCTION_CNTRL_M_IOCS0;
803 		outb(val, base_addr + PI_ESIC_K_FUNCTION_CNTRL);
804 
805 		/*
806 		 * Enable access to the rest of the module
807 		 * (including PDQ and packet memory).
808 		 */
809 		val = PI_SLOT_CNTRL_M_ENB;
810 		outb(val, base_addr + PI_ESIC_K_SLOT_CNTRL);
811 
812 		/*
813 		 * Map PDQ registers into memory or port space.  This is
814 		 * done with a bit in the Burst Holdoff register.
815 		 */
816 		val = inb(base_addr + PI_DEFEA_K_BURST_HOLDOFF);
817 		if (dfx_use_mmio)
818 			val |= PI_BURST_HOLDOFF_M_MEM_MAP;
819 		else
820 			val &= ~PI_BURST_HOLDOFF_M_MEM_MAP;
821 		outb(val, base_addr + PI_DEFEA_K_BURST_HOLDOFF);
822 
823 		/* Enable interrupts at EISA bus interface chip (ESIC) */
824 		val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
825 		val |= PI_CONFIG_STAT_0_M_INT_ENB;
826 		outb(val, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
827 	}
828 	if (dfx_bus_pci) {
829 		struct pci_dev *pdev = to_pci_dev(bdev);
830 
831 		/* Get the interrupt level from the PCI Configuration Table */
832 
833 		dev->irq = pdev->irq;
834 
835 		/* Check Latency Timer and set if less than minimal */
836 
837 		pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &val);
838 		if (val < PFI_K_LAT_TIMER_MIN) {
839 			val = PFI_K_LAT_TIMER_DEF;
840 			pci_write_config_byte(pdev, PCI_LATENCY_TIMER, val);
841 		}
842 
843 		/* Enable interrupts at PCI bus interface chip (PFI) */
844 		val = PFI_MODE_M_PDQ_INT_ENB | PFI_MODE_M_DMA_ENB;
845 		dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, val);
846 	}
847 }
848 
849 /*
850  * ==================
851  * = dfx_bus_uninit =
852  * ==================
853  *
854  * Overview:
855  *   Uninitializes the bus-specific controller logic.
856  *
857  * Returns:
858  *   None
859  *
860  * Arguments:
861  *   dev - pointer to device information
862  *
863  * Functional Description:
864  *   Perform bus-specific logic uninitialization.
865  *
866  * Return Codes:
867  *   None
868  *
869  * Assumptions:
870  *   bp->base has already been set with the proper
871  *	 base I/O address for this device.
872  *
873  * Side Effects:
874  *   Interrupts are disabled at the adapter bus-specific logic.
875  */
876 
877 static void dfx_bus_uninit(struct net_device *dev)
878 {
879 	DFX_board_t *bp = netdev_priv(dev);
880 	struct device *bdev = bp->bus_dev;
881 	int dfx_bus_pci = dev_is_pci(bdev);
882 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
883 	u8 val;
884 
885 	DBG_printk("In dfx_bus_uninit...\n");
886 
887 	/* Uninitialize adapter based on bus type */
888 
889 	if (dfx_bus_eisa) {
890 		unsigned long base_addr = to_eisa_device(bdev)->base_addr;
891 
892 		/* Disable interrupts at EISA bus interface chip (ESIC) */
893 		val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
894 		val &= ~PI_CONFIG_STAT_0_M_INT_ENB;
895 		outb(val, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
896 
897 		/* Disable the board.  */
898 		outb(0, base_addr + PI_ESIC_K_SLOT_CNTRL);
899 
900 		/* Disable memory and port decoders.  */
901 		outb(0, base_addr + PI_ESIC_K_FUNCTION_CNTRL);
902 	}
903 	if (dfx_bus_pci) {
904 		/* Disable interrupts at PCI bus interface chip (PFI) */
905 		dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, 0);
906 	}
907 }
908 
909 
910 /*
911  * ========================
912  * = dfx_bus_config_check =
913  * ========================
914  *
915  * Overview:
916  *   Checks the configuration (burst size, full-duplex, etc.)  If any parameters
917  *   are illegal, then this routine will set new defaults.
918  *
919  * Returns:
920  *   None
921  *
922  * Arguments:
923  *   bp - pointer to board information
924  *
925  * Functional Description:
926  *   For Revision 1 FDDI EISA, Revision 2 or later FDDI EISA with rev E or later
927  *   PDQ, and all FDDI PCI controllers, all values are legal.
928  *
929  * Return Codes:
930  *   None
931  *
932  * Assumptions:
933  *   dfx_adap_init has NOT been called yet so burst size and other items have
934  *   not been set.
935  *
936  * Side Effects:
937  *   None
938  */
939 
940 static void dfx_bus_config_check(DFX_board_t *bp)
941 {
942 	struct device __maybe_unused *bdev = bp->bus_dev;
943 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
944 	int	status;				/* return code from adapter port control call */
945 	u32	host_data;			/* LW data returned from port control call */
946 
947 	DBG_printk("In dfx_bus_config_check...\n");
948 
949 	/* Configuration check only valid for EISA adapter */
950 
951 	if (dfx_bus_eisa) {
952 		/*
953 		 * First check if revision 2 EISA controller.  Rev. 1 cards used
954 		 * PDQ revision B, so no workaround needed in this case.  Rev. 3
955 		 * cards used PDQ revision E, so no workaround needed in this
956 		 * case, either.  Only Rev. 2 cards used either Rev. D or E
957 		 * chips, so we must verify the chip revision on Rev. 2 cards.
958 		 */
959 		if (to_eisa_device(bdev)->id.driver_data == DEFEA_PROD_ID_2) {
960 			/*
961 			 * Revision 2 FDDI EISA controller found,
962 			 * so let's check PDQ revision of adapter.
963 			 */
964 			status = dfx_hw_port_ctrl_req(bp,
965 											PI_PCTRL_M_SUB_CMD,
966 											PI_SUB_CMD_K_PDQ_REV_GET,
967 											0,
968 											&host_data);
969 			if ((status != DFX_K_SUCCESS) || (host_data == 2))
970 				{
971 				/*
972 				 * Either we couldn't determine the PDQ revision, or
973 				 * we determined that it is at revision D.  In either case,
974 				 * we need to implement the workaround.
975 				 */
976 
977 				/* Ensure that the burst size is set to 8 longwords or less */
978 
979 				switch (bp->burst_size)
980 					{
981 					case PI_PDATA_B_DMA_BURST_SIZE_32:
982 					case PI_PDATA_B_DMA_BURST_SIZE_16:
983 						bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_8;
984 						break;
985 
986 					default:
987 						break;
988 					}
989 
990 				/* Ensure that full-duplex mode is not enabled */
991 
992 				bp->full_duplex_enb = PI_SNMP_K_FALSE;
993 				}
994 			}
995 		}
996 	}
997 
998 
999 /*
1000  * ===================
1001  * = dfx_driver_init =
1002  * ===================
1003  *
1004  * Overview:
1005  *   Initializes remaining adapter board structure information
1006  *   and makes sure adapter is in a safe state prior to dfx_open().
1007  *
1008  * Returns:
1009  *   Condition code
1010  *
1011  * Arguments:
1012  *   dev - pointer to device information
1013  *   print_name - printable device name
1014  *
1015  * Functional Description:
1016  *   This function allocates additional resources such as the host memory
1017  *   blocks needed by the adapter (eg. descriptor and consumer blocks).
1018  *	 Remaining bus initialization steps are also completed.  The adapter
1019  *   is also reset so that it is in the DMA_UNAVAILABLE state.  The OS
1020  *   must call dfx_open() to open the adapter and bring it on-line.
1021  *
1022  * Return Codes:
1023  *   DFX_K_SUCCESS	- initialization succeeded
1024  *   DFX_K_FAILURE	- initialization failed - could not allocate memory
1025  *						or read adapter MAC address
1026  *
1027  * Assumptions:
1028  *   Memory allocated from pci_alloc_consistent() call is physically
1029  *   contiguous, locked memory.
1030  *
1031  * Side Effects:
1032  *   Adapter is reset and should be in DMA_UNAVAILABLE state before
1033  *   returning from this routine.
1034  */
1035 
1036 static int dfx_driver_init(struct net_device *dev, const char *print_name,
1037 			   resource_size_t bar_start)
1038 {
1039 	DFX_board_t *bp = netdev_priv(dev);
1040 	struct device *bdev = bp->bus_dev;
1041 	int dfx_bus_pci = dev_is_pci(bdev);
1042 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
1043 	int dfx_bus_tc = DFX_BUS_TC(bdev);
1044 	int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
1045 	int alloc_size;			/* total buffer size needed */
1046 	char *top_v, *curr_v;		/* virtual addrs into memory block */
1047 	dma_addr_t top_p, curr_p;	/* physical addrs into memory block */
1048 	u32 data;			/* host data register value */
1049 	__le32 le32;
1050 	char *board_name = NULL;
1051 
1052 	DBG_printk("In dfx_driver_init...\n");
1053 
1054 	/* Initialize bus-specific hardware registers */
1055 
1056 	dfx_bus_init(dev);
1057 
1058 	/*
1059 	 * Initialize default values for configurable parameters
1060 	 *
1061 	 * Note: All of these parameters are ones that a user may
1062 	 *       want to customize.  It'd be nice to break these
1063 	 *		 out into Space.c or someplace else that's more
1064 	 *		 accessible/understandable than this file.
1065 	 */
1066 
1067 	bp->full_duplex_enb		= PI_SNMP_K_FALSE;
1068 	bp->req_ttrt			= 8 * 12500;		/* 8ms in 80 nanosec units */
1069 	bp->burst_size			= PI_PDATA_B_DMA_BURST_SIZE_DEF;
1070 	bp->rcv_bufs_to_post	= RCV_BUFS_DEF;
1071 
1072 	/*
1073 	 * Ensure that HW configuration is OK
1074 	 *
1075 	 * Note: Depending on the hardware revision, we may need to modify
1076 	 *       some of the configurable parameters to workaround hardware
1077 	 *       limitations.  We'll perform this configuration check AFTER
1078 	 *       setting the parameters to their default values.
1079 	 */
1080 
1081 	dfx_bus_config_check(bp);
1082 
1083 	/* Disable PDQ interrupts first */
1084 
1085 	dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1086 
1087 	/* Place adapter in DMA_UNAVAILABLE state by resetting adapter */
1088 
1089 	(void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST);
1090 
1091 	/*  Read the factory MAC address from the adapter then save it */
1092 
1093 	if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_LO, 0,
1094 				 &data) != DFX_K_SUCCESS) {
1095 		printk("%s: Could not read adapter factory MAC address!\n",
1096 		       print_name);
1097 		return DFX_K_FAILURE;
1098 	}
1099 	le32 = cpu_to_le32(data);
1100 	memcpy(&bp->factory_mac_addr[0], &le32, sizeof(u32));
1101 
1102 	if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_HI, 0,
1103 				 &data) != DFX_K_SUCCESS) {
1104 		printk("%s: Could not read adapter factory MAC address!\n",
1105 		       print_name);
1106 		return DFX_K_FAILURE;
1107 	}
1108 	le32 = cpu_to_le32(data);
1109 	memcpy(&bp->factory_mac_addr[4], &le32, sizeof(u16));
1110 
1111 	/*
1112 	 * Set current address to factory address
1113 	 *
1114 	 * Note: Node address override support is handled through
1115 	 *       dfx_ctl_set_mac_address.
1116 	 */
1117 
1118 	memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
1119 	if (dfx_bus_tc)
1120 		board_name = "DEFTA";
1121 	if (dfx_bus_eisa)
1122 		board_name = "DEFEA";
1123 	if (dfx_bus_pci)
1124 		board_name = "DEFPA";
1125 	pr_info("%s: %s at %s addr = 0x%llx, IRQ = %d, Hardware addr = %pMF\n",
1126 		print_name, board_name, dfx_use_mmio ? "MMIO" : "I/O",
1127 		(long long)bar_start, dev->irq, dev->dev_addr);
1128 
1129 	/*
1130 	 * Get memory for descriptor block, consumer block, and other buffers
1131 	 * that need to be DMA read or written to by the adapter.
1132 	 */
1133 
1134 	alloc_size = sizeof(PI_DESCR_BLOCK) +
1135 					PI_CMD_REQ_K_SIZE_MAX +
1136 					PI_CMD_RSP_K_SIZE_MAX +
1137 #ifndef DYNAMIC_BUFFERS
1138 					(bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
1139 #endif
1140 					sizeof(PI_CONSUMER_BLOCK) +
1141 					(PI_ALIGN_K_DESC_BLK - 1);
1142 	bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size,
1143 						   &bp->kmalloced_dma,
1144 						   GFP_ATOMIC);
1145 	if (top_v == NULL)
1146 		return DFX_K_FAILURE;
1147 
1148 	top_p = bp->kmalloced_dma;	/* get physical address of buffer */
1149 
1150 	/*
1151 	 *  To guarantee the 8K alignment required for the descriptor block, 8K - 1
1152 	 *  plus the amount of memory needed was allocated.  The physical address
1153 	 *	is now 8K aligned.  By carving up the memory in a specific order,
1154 	 *  we'll guarantee the alignment requirements for all other structures.
1155 	 *
1156 	 *  Note: If the assumptions change regarding the non-paged, non-cached,
1157 	 *		  physically contiguous nature of the memory block or the address
1158 	 *		  alignments, then we'll need to implement a different algorithm
1159 	 *		  for allocating the needed memory.
1160 	 */
1161 
1162 	curr_p = ALIGN(top_p, PI_ALIGN_K_DESC_BLK);
1163 	curr_v = top_v + (curr_p - top_p);
1164 
1165 	/* Reserve space for descriptor block */
1166 
1167 	bp->descr_block_virt = (PI_DESCR_BLOCK *) curr_v;
1168 	bp->descr_block_phys = curr_p;
1169 	curr_v += sizeof(PI_DESCR_BLOCK);
1170 	curr_p += sizeof(PI_DESCR_BLOCK);
1171 
1172 	/* Reserve space for command request buffer */
1173 
1174 	bp->cmd_req_virt = (PI_DMA_CMD_REQ *) curr_v;
1175 	bp->cmd_req_phys = curr_p;
1176 	curr_v += PI_CMD_REQ_K_SIZE_MAX;
1177 	curr_p += PI_CMD_REQ_K_SIZE_MAX;
1178 
1179 	/* Reserve space for command response buffer */
1180 
1181 	bp->cmd_rsp_virt = (PI_DMA_CMD_RSP *) curr_v;
1182 	bp->cmd_rsp_phys = curr_p;
1183 	curr_v += PI_CMD_RSP_K_SIZE_MAX;
1184 	curr_p += PI_CMD_RSP_K_SIZE_MAX;
1185 
1186 	/* Reserve space for the LLC host receive queue buffers */
1187 
1188 	bp->rcv_block_virt = curr_v;
1189 	bp->rcv_block_phys = curr_p;
1190 
1191 #ifndef DYNAMIC_BUFFERS
1192 	curr_v += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX);
1193 	curr_p += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX);
1194 #endif
1195 
1196 	/* Reserve space for the consumer block */
1197 
1198 	bp->cons_block_virt = (PI_CONSUMER_BLOCK *) curr_v;
1199 	bp->cons_block_phys = curr_p;
1200 
1201 	/* Display virtual and physical addresses if debug driver */
1202 
1203 	DBG_printk("%s: Descriptor block virt = %p, phys = %pad\n",
1204 		   print_name, bp->descr_block_virt, &bp->descr_block_phys);
1205 	DBG_printk("%s: Command Request buffer virt = %p, phys = %pad\n",
1206 		   print_name, bp->cmd_req_virt, &bp->cmd_req_phys);
1207 	DBG_printk("%s: Command Response buffer virt = %p, phys = %pad\n",
1208 		   print_name, bp->cmd_rsp_virt, &bp->cmd_rsp_phys);
1209 	DBG_printk("%s: Receive buffer block virt = %p, phys = %pad\n",
1210 		   print_name, bp->rcv_block_virt, &bp->rcv_block_phys);
1211 	DBG_printk("%s: Consumer block virt = %p, phys = %pad\n",
1212 		   print_name, bp->cons_block_virt, &bp->cons_block_phys);
1213 
1214 	return DFX_K_SUCCESS;
1215 }
1216 
1217 
1218 /*
1219  * =================
1220  * = dfx_adap_init =
1221  * =================
1222  *
1223  * Overview:
1224  *   Brings the adapter to the link avail/link unavailable state.
1225  *
1226  * Returns:
1227  *   Condition code
1228  *
1229  * Arguments:
1230  *   bp - pointer to board information
1231  *   get_buffers - non-zero if buffers to be allocated
1232  *
1233  * Functional Description:
1234  *   Issues the low-level firmware/hardware calls necessary to bring
1235  *   the adapter up, or to properly reset and restore adapter during
1236  *   run-time.
1237  *
1238  * Return Codes:
1239  *   DFX_K_SUCCESS - Adapter brought up successfully
1240  *   DFX_K_FAILURE - Adapter initialization failed
1241  *
1242  * Assumptions:
1243  *   bp->reset_type should be set to a valid reset type value before
1244  *   calling this routine.
1245  *
1246  * Side Effects:
1247  *   Adapter should be in LINK_AVAILABLE or LINK_UNAVAILABLE state
1248  *   upon a successful return of this routine.
1249  */
1250 
1251 static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
1252 	{
1253 	DBG_printk("In dfx_adap_init...\n");
1254 
1255 	/* Disable PDQ interrupts first */
1256 
1257 	dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1258 
1259 	/* Place adapter in DMA_UNAVAILABLE state by resetting adapter */
1260 
1261 	if (dfx_hw_dma_uninit(bp, bp->reset_type) != DFX_K_SUCCESS)
1262 		{
1263 		printk("%s: Could not uninitialize/reset adapter!\n", bp->dev->name);
1264 		return DFX_K_FAILURE;
1265 		}
1266 
1267 	/*
1268 	 * When the PDQ is reset, some false Type 0 interrupts may be pending,
1269 	 * so we'll acknowledge all Type 0 interrupts now before continuing.
1270 	 */
1271 
1272 	dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, PI_HOST_INT_K_ACK_ALL_TYPE_0);
1273 
1274 	/*
1275 	 * Clear Type 1 and Type 2 registers before going to DMA_AVAILABLE state
1276 	 *
1277 	 * Note: We only need to clear host copies of these registers.  The PDQ reset
1278 	 *       takes care of the on-board register values.
1279 	 */
1280 
1281 	bp->cmd_req_reg.lword	= 0;
1282 	bp->cmd_rsp_reg.lword	= 0;
1283 	bp->rcv_xmt_reg.lword	= 0;
1284 
1285 	/* Clear consumer block before going to DMA_AVAILABLE state */
1286 
1287 	memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK));
1288 
1289 	/* Initialize the DMA Burst Size */
1290 
1291 	if (dfx_hw_port_ctrl_req(bp,
1292 							PI_PCTRL_M_SUB_CMD,
1293 							PI_SUB_CMD_K_BURST_SIZE_SET,
1294 							bp->burst_size,
1295 							NULL) != DFX_K_SUCCESS)
1296 		{
1297 		printk("%s: Could not set adapter burst size!\n", bp->dev->name);
1298 		return DFX_K_FAILURE;
1299 		}
1300 
1301 	/*
1302 	 * Set base address of Consumer Block
1303 	 *
1304 	 * Assumption: 32-bit physical address of consumer block is 64 byte
1305 	 *			   aligned.  That is, bits 0-5 of the address must be zero.
1306 	 */
1307 
1308 	if (dfx_hw_port_ctrl_req(bp,
1309 							PI_PCTRL_M_CONS_BLOCK,
1310 							bp->cons_block_phys,
1311 							0,
1312 							NULL) != DFX_K_SUCCESS)
1313 		{
1314 		printk("%s: Could not set consumer block address!\n", bp->dev->name);
1315 		return DFX_K_FAILURE;
1316 		}
1317 
1318 	/*
1319 	 * Set the base address of Descriptor Block and bring adapter
1320 	 * to DMA_AVAILABLE state.
1321 	 *
1322 	 * Note: We also set the literal and data swapping requirements
1323 	 *       in this command.
1324 	 *
1325 	 * Assumption: 32-bit physical address of descriptor block
1326 	 *       is 8Kbyte aligned.
1327 	 */
1328 	if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_INIT,
1329 				 (u32)(bp->descr_block_phys |
1330 				       PI_PDATA_A_INIT_M_BSWAP_INIT),
1331 				 0, NULL) != DFX_K_SUCCESS) {
1332 		printk("%s: Could not set descriptor block address!\n",
1333 		       bp->dev->name);
1334 		return DFX_K_FAILURE;
1335 	}
1336 
1337 	/* Set transmit flush timeout value */
1338 
1339 	bp->cmd_req_virt->cmd_type = PI_CMD_K_CHARS_SET;
1340 	bp->cmd_req_virt->char_set.item[0].item_code	= PI_ITEM_K_FLUSH_TIME;
1341 	bp->cmd_req_virt->char_set.item[0].value		= 3;	/* 3 seconds */
1342 	bp->cmd_req_virt->char_set.item[0].item_index	= 0;
1343 	bp->cmd_req_virt->char_set.item[1].item_code	= PI_ITEM_K_EOL;
1344 	if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1345 		{
1346 		printk("%s: DMA command request failed!\n", bp->dev->name);
1347 		return DFX_K_FAILURE;
1348 		}
1349 
1350 	/* Set the initial values for eFDXEnable and MACTReq MIB objects */
1351 
1352 	bp->cmd_req_virt->cmd_type = PI_CMD_K_SNMP_SET;
1353 	bp->cmd_req_virt->snmp_set.item[0].item_code	= PI_ITEM_K_FDX_ENB_DIS;
1354 	bp->cmd_req_virt->snmp_set.item[0].value		= bp->full_duplex_enb;
1355 	bp->cmd_req_virt->snmp_set.item[0].item_index	= 0;
1356 	bp->cmd_req_virt->snmp_set.item[1].item_code	= PI_ITEM_K_MAC_T_REQ;
1357 	bp->cmd_req_virt->snmp_set.item[1].value		= bp->req_ttrt;
1358 	bp->cmd_req_virt->snmp_set.item[1].item_index	= 0;
1359 	bp->cmd_req_virt->snmp_set.item[2].item_code	= PI_ITEM_K_EOL;
1360 	if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1361 		{
1362 		printk("%s: DMA command request failed!\n", bp->dev->name);
1363 		return DFX_K_FAILURE;
1364 		}
1365 
1366 	/* Initialize adapter CAM */
1367 
1368 	if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
1369 		{
1370 		printk("%s: Adapter CAM update failed!\n", bp->dev->name);
1371 		return DFX_K_FAILURE;
1372 		}
1373 
1374 	/* Initialize adapter filters */
1375 
1376 	if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
1377 		{
1378 		printk("%s: Adapter filters update failed!\n", bp->dev->name);
1379 		return DFX_K_FAILURE;
1380 		}
1381 
1382 	/*
1383 	 * Remove any existing dynamic buffers (i.e. if the adapter is being
1384 	 * reinitialized)
1385 	 */
1386 
1387 	if (get_buffers)
1388 		dfx_rcv_flush(bp);
1389 
1390 	/* Initialize receive descriptor block and produce buffers */
1391 
1392 	if (dfx_rcv_init(bp, get_buffers))
1393 	        {
1394 		printk("%s: Receive buffer allocation failed\n", bp->dev->name);
1395 		if (get_buffers)
1396 			dfx_rcv_flush(bp);
1397 		return DFX_K_FAILURE;
1398 		}
1399 
1400 	/* Issue START command and bring adapter to LINK_(UN)AVAILABLE state */
1401 
1402 	bp->cmd_req_virt->cmd_type = PI_CMD_K_START;
1403 	if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1404 		{
1405 		printk("%s: Start command failed\n", bp->dev->name);
1406 		if (get_buffers)
1407 			dfx_rcv_flush(bp);
1408 		return DFX_K_FAILURE;
1409 		}
1410 
1411 	/* Initialization succeeded, reenable PDQ interrupts */
1412 
1413 	dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_ENABLE_DEF_INTS);
1414 	return DFX_K_SUCCESS;
1415 	}
1416 
1417 
1418 /*
1419  * ============
1420  * = dfx_open =
1421  * ============
1422  *
1423  * Overview:
1424  *   Opens the adapter
1425  *
1426  * Returns:
1427  *   Condition code
1428  *
1429  * Arguments:
1430  *   dev - pointer to device information
1431  *
1432  * Functional Description:
1433  *   This function brings the adapter to an operational state.
1434  *
1435  * Return Codes:
1436  *   0		 - Adapter was successfully opened
1437  *   -EAGAIN - Could not register IRQ or adapter initialization failed
1438  *
1439  * Assumptions:
1440  *   This routine should only be called for a device that was
1441  *   initialized successfully.
1442  *
1443  * Side Effects:
1444  *   Adapter should be in LINK_AVAILABLE or LINK_UNAVAILABLE state
1445  *   if the open is successful.
1446  */
1447 
1448 static int dfx_open(struct net_device *dev)
1449 {
1450 	DFX_board_t *bp = netdev_priv(dev);
1451 	int ret;
1452 
1453 	DBG_printk("In dfx_open...\n");
1454 
1455 	/* Register IRQ - support shared interrupts by passing device ptr */
1456 
1457 	ret = request_irq(dev->irq, dfx_interrupt, IRQF_SHARED, dev->name,
1458 			  dev);
1459 	if (ret) {
1460 		printk(KERN_ERR "%s: Requested IRQ %d is busy\n", dev->name, dev->irq);
1461 		return ret;
1462 	}
1463 
1464 	/*
1465 	 * Set current address to factory MAC address
1466 	 *
1467 	 * Note: We've already done this step in dfx_driver_init.
1468 	 *       However, it's possible that a user has set a node
1469 	 *		 address override, then closed and reopened the
1470 	 *		 adapter.  Unless we reset the device address field
1471 	 *		 now, we'll continue to use the existing modified
1472 	 *		 address.
1473 	 */
1474 
1475 	memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
1476 
1477 	/* Clear local unicast/multicast address tables and counts */
1478 
1479 	memset(bp->uc_table, 0, sizeof(bp->uc_table));
1480 	memset(bp->mc_table, 0, sizeof(bp->mc_table));
1481 	bp->uc_count = 0;
1482 	bp->mc_count = 0;
1483 
1484 	/* Disable promiscuous filter settings */
1485 
1486 	bp->ind_group_prom	= PI_FSTATE_K_BLOCK;
1487 	bp->group_prom		= PI_FSTATE_K_BLOCK;
1488 
1489 	spin_lock_init(&bp->lock);
1490 
1491 	/* Reset and initialize adapter */
1492 
1493 	bp->reset_type = PI_PDATA_A_RESET_M_SKIP_ST;	/* skip self-test */
1494 	if (dfx_adap_init(bp, 1) != DFX_K_SUCCESS)
1495 	{
1496 		printk(KERN_ERR "%s: Adapter open failed!\n", dev->name);
1497 		free_irq(dev->irq, dev);
1498 		return -EAGAIN;
1499 	}
1500 
1501 	/* Set device structure info */
1502 	netif_start_queue(dev);
1503 	return 0;
1504 }
1505 
1506 
1507 /*
1508  * =============
1509  * = dfx_close =
1510  * =============
1511  *
1512  * Overview:
1513  *   Closes the device/module.
1514  *
1515  * Returns:
1516  *   Condition code
1517  *
1518  * Arguments:
1519  *   dev - pointer to device information
1520  *
1521  * Functional Description:
1522  *   This routine closes the adapter and brings it to a safe state.
1523  *   The interrupt service routine is deregistered with the OS.
1524  *   The adapter can be opened again with another call to dfx_open().
1525  *
1526  * Return Codes:
1527  *   Always return 0.
1528  *
1529  * Assumptions:
1530  *   No further requests for this adapter are made after this routine is
1531  *   called.  dfx_open() can be called to reset and reinitialize the
1532  *   adapter.
1533  *
1534  * Side Effects:
1535  *   Adapter should be in DMA_UNAVAILABLE state upon completion of this
1536  *   routine.
1537  */
1538 
1539 static int dfx_close(struct net_device *dev)
1540 {
1541 	DFX_board_t *bp = netdev_priv(dev);
1542 
1543 	DBG_printk("In dfx_close...\n");
1544 
1545 	/* Disable PDQ interrupts first */
1546 
1547 	dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1548 
1549 	/* Place adapter in DMA_UNAVAILABLE state by resetting adapter */
1550 
1551 	(void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST);
1552 
1553 	/*
1554 	 * Flush any pending transmit buffers
1555 	 *
1556 	 * Note: It's important that we flush the transmit buffers
1557 	 *		 BEFORE we clear our copy of the Type 2 register.
1558 	 *		 Otherwise, we'll have no idea how many buffers
1559 	 *		 we need to free.
1560 	 */
1561 
1562 	dfx_xmt_flush(bp);
1563 
1564 	/*
1565 	 * Clear Type 1 and Type 2 registers after adapter reset
1566 	 *
1567 	 * Note: Even though we're closing the adapter, it's
1568 	 *       possible that an interrupt will occur after
1569 	 *		 dfx_close is called.  Without some assurance to
1570 	 *		 the contrary we want to make sure that we don't
1571 	 *		 process receive and transmit LLC frames and update
1572 	 *		 the Type 2 register with bad information.
1573 	 */
1574 
1575 	bp->cmd_req_reg.lword	= 0;
1576 	bp->cmd_rsp_reg.lword	= 0;
1577 	bp->rcv_xmt_reg.lword	= 0;
1578 
1579 	/* Clear consumer block for the same reason given above */
1580 
1581 	memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK));
1582 
1583 	/* Release all dynamically allocate skb in the receive ring. */
1584 
1585 	dfx_rcv_flush(bp);
1586 
1587 	/* Clear device structure flags */
1588 
1589 	netif_stop_queue(dev);
1590 
1591 	/* Deregister (free) IRQ */
1592 
1593 	free_irq(dev->irq, dev);
1594 
1595 	return 0;
1596 }
1597 
1598 
1599 /*
1600  * ======================
1601  * = dfx_int_pr_halt_id =
1602  * ======================
1603  *
1604  * Overview:
1605  *   Displays halt id's in string form.
1606  *
1607  * Returns:
1608  *   None
1609  *
1610  * Arguments:
1611  *   bp - pointer to board information
1612  *
1613  * Functional Description:
1614  *   Determine current halt id and display appropriate string.
1615  *
1616  * Return Codes:
1617  *   None
1618  *
1619  * Assumptions:
1620  *   None
1621  *
1622  * Side Effects:
1623  *   None
1624  */
1625 
1626 static void dfx_int_pr_halt_id(DFX_board_t	*bp)
1627 	{
1628 	PI_UINT32	port_status;			/* PDQ port status register value */
1629 	PI_UINT32	halt_id;				/* PDQ port status halt ID */
1630 
1631 	/* Read the latest port status */
1632 
1633 	dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
1634 
1635 	/* Display halt state transition information */
1636 
1637 	halt_id = (port_status & PI_PSTATUS_M_HALT_ID) >> PI_PSTATUS_V_HALT_ID;
1638 	switch (halt_id)
1639 		{
1640 		case PI_HALT_ID_K_SELFTEST_TIMEOUT:
1641 			printk("%s: Halt ID: Selftest Timeout\n", bp->dev->name);
1642 			break;
1643 
1644 		case PI_HALT_ID_K_PARITY_ERROR:
1645 			printk("%s: Halt ID: Host Bus Parity Error\n", bp->dev->name);
1646 			break;
1647 
1648 		case PI_HALT_ID_K_HOST_DIR_HALT:
1649 			printk("%s: Halt ID: Host-Directed Halt\n", bp->dev->name);
1650 			break;
1651 
1652 		case PI_HALT_ID_K_SW_FAULT:
1653 			printk("%s: Halt ID: Adapter Software Fault\n", bp->dev->name);
1654 			break;
1655 
1656 		case PI_HALT_ID_K_HW_FAULT:
1657 			printk("%s: Halt ID: Adapter Hardware Fault\n", bp->dev->name);
1658 			break;
1659 
1660 		case PI_HALT_ID_K_PC_TRACE:
1661 			printk("%s: Halt ID: FDDI Network PC Trace Path Test\n", bp->dev->name);
1662 			break;
1663 
1664 		case PI_HALT_ID_K_DMA_ERROR:
1665 			printk("%s: Halt ID: Adapter DMA Error\n", bp->dev->name);
1666 			break;
1667 
1668 		case PI_HALT_ID_K_IMAGE_CRC_ERROR:
1669 			printk("%s: Halt ID: Firmware Image CRC Error\n", bp->dev->name);
1670 			break;
1671 
1672 		case PI_HALT_ID_K_BUS_EXCEPTION:
1673 			printk("%s: Halt ID: 68000 Bus Exception\n", bp->dev->name);
1674 			break;
1675 
1676 		default:
1677 			printk("%s: Halt ID: Unknown (code = %X)\n", bp->dev->name, halt_id);
1678 			break;
1679 		}
1680 	}
1681 
1682 
1683 /*
1684  * ==========================
1685  * = dfx_int_type_0_process =
1686  * ==========================
1687  *
1688  * Overview:
1689  *   Processes Type 0 interrupts.
1690  *
1691  * Returns:
1692  *   None
1693  *
1694  * Arguments:
1695  *   bp - pointer to board information
1696  *
1697  * Functional Description:
1698  *   Processes all enabled Type 0 interrupts.  If the reason for the interrupt
1699  *   is a serious fault on the adapter, then an error message is displayed
1700  *   and the adapter is reset.
1701  *
1702  *   One tricky potential timing window is the rapid succession of "link avail"
1703  *   "link unavail" state change interrupts.  The acknowledgement of the Type 0
1704  *   interrupt must be done before reading the state from the Port Status
1705  *   register.  This is true because a state change could occur after reading
1706  *   the data, but before acknowledging the interrupt.  If this state change
1707  *   does happen, it would be lost because the driver is using the old state,
1708  *   and it will never know about the new state because it subsequently
1709  *   acknowledges the state change interrupt.
1710  *
1711  *          INCORRECT                                      CORRECT
1712  *      read type 0 int reasons                   read type 0 int reasons
1713  *      read adapter state                        ack type 0 interrupts
1714  *      ack type 0 interrupts                     read adapter state
1715  *      ... process interrupt ...                 ... process interrupt ...
1716  *
1717  * Return Codes:
1718  *   None
1719  *
1720  * Assumptions:
1721  *   None
1722  *
1723  * Side Effects:
1724  *   An adapter reset may occur if the adapter has any Type 0 error interrupts
1725  *   or if the port status indicates that the adapter is halted.  The driver
1726  *   is responsible for reinitializing the adapter with the current CAM
1727  *   contents and adapter filter settings.
1728  */
1729 
1730 static void dfx_int_type_0_process(DFX_board_t	*bp)
1731 
1732 	{
1733 	PI_UINT32	type_0_status;		/* Host Interrupt Type 0 register */
1734 	PI_UINT32	state;				/* current adap state (from port status) */
1735 
1736 	/*
1737 	 * Read host interrupt Type 0 register to determine which Type 0
1738 	 * interrupts are pending.  Immediately write it back out to clear
1739 	 * those interrupts.
1740 	 */
1741 
1742 	dfx_port_read_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, &type_0_status);
1743 	dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, type_0_status);
1744 
1745 	/* Check for Type 0 error interrupts */
1746 
1747 	if (type_0_status & (PI_TYPE_0_STAT_M_NXM |
1748 							PI_TYPE_0_STAT_M_PM_PAR_ERR |
1749 							PI_TYPE_0_STAT_M_BUS_PAR_ERR))
1750 		{
1751 		/* Check for Non-Existent Memory error */
1752 
1753 		if (type_0_status & PI_TYPE_0_STAT_M_NXM)
1754 			printk("%s: Non-Existent Memory Access Error\n", bp->dev->name);
1755 
1756 		/* Check for Packet Memory Parity error */
1757 
1758 		if (type_0_status & PI_TYPE_0_STAT_M_PM_PAR_ERR)
1759 			printk("%s: Packet Memory Parity Error\n", bp->dev->name);
1760 
1761 		/* Check for Host Bus Parity error */
1762 
1763 		if (type_0_status & PI_TYPE_0_STAT_M_BUS_PAR_ERR)
1764 			printk("%s: Host Bus Parity Error\n", bp->dev->name);
1765 
1766 		/* Reset adapter and bring it back on-line */
1767 
1768 		bp->link_available = PI_K_FALSE;	/* link is no longer available */
1769 		bp->reset_type = 0;					/* rerun on-board diagnostics */
1770 		printk("%s: Resetting adapter...\n", bp->dev->name);
1771 		if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS)
1772 			{
1773 			printk("%s: Adapter reset failed!  Disabling adapter interrupts.\n", bp->dev->name);
1774 			dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1775 			return;
1776 			}
1777 		printk("%s: Adapter reset successful!\n", bp->dev->name);
1778 		return;
1779 		}
1780 
1781 	/* Check for transmit flush interrupt */
1782 
1783 	if (type_0_status & PI_TYPE_0_STAT_M_XMT_FLUSH)
1784 		{
1785 		/* Flush any pending xmt's and acknowledge the flush interrupt */
1786 
1787 		bp->link_available = PI_K_FALSE;		/* link is no longer available */
1788 		dfx_xmt_flush(bp);						/* flush any outstanding packets */
1789 		(void) dfx_hw_port_ctrl_req(bp,
1790 									PI_PCTRL_M_XMT_DATA_FLUSH_DONE,
1791 									0,
1792 									0,
1793 									NULL);
1794 		}
1795 
1796 	/* Check for adapter state change */
1797 
1798 	if (type_0_status & PI_TYPE_0_STAT_M_STATE_CHANGE)
1799 		{
1800 		/* Get latest adapter state */
1801 
1802 		state = dfx_hw_adap_state_rd(bp);	/* get adapter state */
1803 		if (state == PI_STATE_K_HALTED)
1804 			{
1805 			/*
1806 			 * Adapter has transitioned to HALTED state, try to reset
1807 			 * adapter to bring it back on-line.  If reset fails,
1808 			 * leave the adapter in the broken state.
1809 			 */
1810 
1811 			printk("%s: Controller has transitioned to HALTED state!\n", bp->dev->name);
1812 			dfx_int_pr_halt_id(bp);			/* display halt id as string */
1813 
1814 			/* Reset adapter and bring it back on-line */
1815 
1816 			bp->link_available = PI_K_FALSE;	/* link is no longer available */
1817 			bp->reset_type = 0;					/* rerun on-board diagnostics */
1818 			printk("%s: Resetting adapter...\n", bp->dev->name);
1819 			if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS)
1820 				{
1821 				printk("%s: Adapter reset failed!  Disabling adapter interrupts.\n", bp->dev->name);
1822 				dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1823 				return;
1824 				}
1825 			printk("%s: Adapter reset successful!\n", bp->dev->name);
1826 			}
1827 		else if (state == PI_STATE_K_LINK_AVAIL)
1828 			{
1829 			bp->link_available = PI_K_TRUE;		/* set link available flag */
1830 			}
1831 		}
1832 	}
1833 
1834 
1835 /*
1836  * ==================
1837  * = dfx_int_common =
1838  * ==================
1839  *
1840  * Overview:
1841  *   Interrupt service routine (ISR)
1842  *
1843  * Returns:
1844  *   None
1845  *
1846  * Arguments:
1847  *   bp - pointer to board information
1848  *
1849  * Functional Description:
1850  *   This is the ISR which processes incoming adapter interrupts.
1851  *
1852  * Return Codes:
1853  *   None
1854  *
1855  * Assumptions:
1856  *   This routine assumes PDQ interrupts have not been disabled.
1857  *   When interrupts are disabled at the PDQ, the Port Status register
1858  *   is automatically cleared.  This routine uses the Port Status
1859  *   register value to determine whether a Type 0 interrupt occurred,
1860  *   so it's important that adapter interrupts are not normally
1861  *   enabled/disabled at the PDQ.
1862  *
1863  *   It's vital that this routine is NOT reentered for the
1864  *   same board and that the OS is not in another section of
1865  *   code (eg. dfx_xmt_queue_pkt) for the same board on a
1866  *   different thread.
1867  *
1868  * Side Effects:
1869  *   Pending interrupts are serviced.  Depending on the type of
1870  *   interrupt, acknowledging and clearing the interrupt at the
1871  *   PDQ involves writing a register to clear the interrupt bit
1872  *   or updating completion indices.
1873  */
1874 
1875 static void dfx_int_common(struct net_device *dev)
1876 {
1877 	DFX_board_t *bp = netdev_priv(dev);
1878 	PI_UINT32	port_status;		/* Port Status register */
1879 
1880 	/* Process xmt interrupts - frequent case, so always call this routine */
1881 
1882 	if(dfx_xmt_done(bp))				/* free consumed xmt packets */
1883 		netif_wake_queue(dev);
1884 
1885 	/* Process rcv interrupts - frequent case, so always call this routine */
1886 
1887 	dfx_rcv_queue_process(bp);		/* service received LLC frames */
1888 
1889 	/*
1890 	 * Transmit and receive producer and completion indices are updated on the
1891 	 * adapter by writing to the Type 2 Producer register.  Since the frequent
1892 	 * case is that we'll be processing either LLC transmit or receive buffers,
1893 	 * we'll optimize I/O writes by doing a single register write here.
1894 	 */
1895 
1896 	dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
1897 
1898 	/* Read PDQ Port Status register to find out which interrupts need processing */
1899 
1900 	dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
1901 
1902 	/* Process Type 0 interrupts (if any) - infrequent, so only call when needed */
1903 
1904 	if (port_status & PI_PSTATUS_M_TYPE_0_PENDING)
1905 		dfx_int_type_0_process(bp);	/* process Type 0 interrupts */
1906 	}
1907 
1908 
1909 /*
1910  * =================
1911  * = dfx_interrupt =
1912  * =================
1913  *
1914  * Overview:
1915  *   Interrupt processing routine
1916  *
1917  * Returns:
1918  *   Whether a valid interrupt was seen.
1919  *
1920  * Arguments:
1921  *   irq	- interrupt vector
1922  *   dev_id	- pointer to device information
1923  *
1924  * Functional Description:
1925  *   This routine calls the interrupt processing routine for this adapter.  It
1926  *   disables and reenables adapter interrupts, as appropriate.  We can support
1927  *   shared interrupts since the incoming dev_id pointer provides our device
1928  *   structure context.
1929  *
1930  * Return Codes:
1931  *   IRQ_HANDLED - an IRQ was handled.
1932  *   IRQ_NONE    - no IRQ was handled.
1933  *
1934  * Assumptions:
1935  *   The interrupt acknowledgement at the hardware level (eg. ACKing the PIC
1936  *   on Intel-based systems) is done by the operating system outside this
1937  *   routine.
1938  *
1939  *	 System interrupts are enabled through this call.
1940  *
1941  * Side Effects:
1942  *   Interrupts are disabled, then reenabled at the adapter.
1943  */
1944 
1945 static irqreturn_t dfx_interrupt(int irq, void *dev_id)
1946 {
1947 	struct net_device *dev = dev_id;
1948 	DFX_board_t *bp = netdev_priv(dev);
1949 	struct device *bdev = bp->bus_dev;
1950 	int dfx_bus_pci = dev_is_pci(bdev);
1951 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
1952 	int dfx_bus_tc = DFX_BUS_TC(bdev);
1953 
1954 	/* Service adapter interrupts */
1955 
1956 	if (dfx_bus_pci) {
1957 		u32 status;
1958 
1959 		dfx_port_read_long(bp, PFI_K_REG_STATUS, &status);
1960 		if (!(status & PFI_STATUS_M_PDQ_INT))
1961 			return IRQ_NONE;
1962 
1963 		spin_lock(&bp->lock);
1964 
1965 		/* Disable PDQ-PFI interrupts at PFI */
1966 		dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL,
1967 				    PFI_MODE_M_DMA_ENB);
1968 
1969 		/* Call interrupt service routine for this adapter */
1970 		dfx_int_common(dev);
1971 
1972 		/* Clear PDQ interrupt status bit and reenable interrupts */
1973 		dfx_port_write_long(bp, PFI_K_REG_STATUS,
1974 				    PFI_STATUS_M_PDQ_INT);
1975 		dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL,
1976 				    (PFI_MODE_M_PDQ_INT_ENB |
1977 				     PFI_MODE_M_DMA_ENB));
1978 
1979 		spin_unlock(&bp->lock);
1980 	}
1981 	if (dfx_bus_eisa) {
1982 		unsigned long base_addr = to_eisa_device(bdev)->base_addr;
1983 		u8 status;
1984 
1985 		status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
1986 		if (!(status & PI_CONFIG_STAT_0_M_PEND))
1987 			return IRQ_NONE;
1988 
1989 		spin_lock(&bp->lock);
1990 
1991 		/* Disable interrupts at the ESIC */
1992 		status &= ~PI_CONFIG_STAT_0_M_INT_ENB;
1993 		outb(status, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
1994 
1995 		/* Call interrupt service routine for this adapter */
1996 		dfx_int_common(dev);
1997 
1998 		/* Reenable interrupts at the ESIC */
1999 		status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
2000 		status |= PI_CONFIG_STAT_0_M_INT_ENB;
2001 		outb(status, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
2002 
2003 		spin_unlock(&bp->lock);
2004 	}
2005 	if (dfx_bus_tc) {
2006 		u32 status;
2007 
2008 		dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &status);
2009 		if (!(status & (PI_PSTATUS_M_RCV_DATA_PENDING |
2010 				PI_PSTATUS_M_XMT_DATA_PENDING |
2011 				PI_PSTATUS_M_SMT_HOST_PENDING |
2012 				PI_PSTATUS_M_UNSOL_PENDING |
2013 				PI_PSTATUS_M_CMD_RSP_PENDING |
2014 				PI_PSTATUS_M_CMD_REQ_PENDING |
2015 				PI_PSTATUS_M_TYPE_0_PENDING)))
2016 			return IRQ_NONE;
2017 
2018 		spin_lock(&bp->lock);
2019 
2020 		/* Call interrupt service routine for this adapter */
2021 		dfx_int_common(dev);
2022 
2023 		spin_unlock(&bp->lock);
2024 	}
2025 
2026 	return IRQ_HANDLED;
2027 }
2028 
2029 
2030 /*
2031  * =====================
2032  * = dfx_ctl_get_stats =
2033  * =====================
2034  *
2035  * Overview:
2036  *   Get statistics for FDDI adapter
2037  *
2038  * Returns:
2039  *   Pointer to FDDI statistics structure
2040  *
2041  * Arguments:
2042  *   dev - pointer to device information
2043  *
2044  * Functional Description:
2045  *   Gets current MIB objects from adapter, then
2046  *   returns FDDI statistics structure as defined
2047  *   in if_fddi.h.
2048  *
2049  *   Note: Since the FDDI statistics structure is
2050  *   still new and the device structure doesn't
2051  *   have an FDDI-specific get statistics handler,
2052  *   we'll return the FDDI statistics structure as
2053  *   a pointer to an Ethernet statistics structure.
2054  *   That way, at least the first part of the statistics
2055  *   structure can be decoded properly, and it allows
2056  *   "smart" applications to perform a second cast to
2057  *   decode the FDDI-specific statistics.
2058  *
2059  *   We'll have to pay attention to this routine as the
2060  *   device structure becomes more mature and LAN media
2061  *   independent.
2062  *
2063  * Return Codes:
2064  *   None
2065  *
2066  * Assumptions:
2067  *   None
2068  *
2069  * Side Effects:
2070  *   None
2071  */
2072 
2073 static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev)
2074 	{
2075 	DFX_board_t *bp = netdev_priv(dev);
2076 
2077 	/* Fill the bp->stats structure with driver-maintained counters */
2078 
2079 	bp->stats.gen.rx_packets = bp->rcv_total_frames;
2080 	bp->stats.gen.tx_packets = bp->xmt_total_frames;
2081 	bp->stats.gen.rx_bytes   = bp->rcv_total_bytes;
2082 	bp->stats.gen.tx_bytes   = bp->xmt_total_bytes;
2083 	bp->stats.gen.rx_errors  = bp->rcv_crc_errors +
2084 				   bp->rcv_frame_status_errors +
2085 				   bp->rcv_length_errors;
2086 	bp->stats.gen.tx_errors  = bp->xmt_length_errors;
2087 	bp->stats.gen.rx_dropped = bp->rcv_discards;
2088 	bp->stats.gen.tx_dropped = bp->xmt_discards;
2089 	bp->stats.gen.multicast  = bp->rcv_multicast_frames;
2090 	bp->stats.gen.collisions = 0;		/* always zero (0) for FDDI */
2091 
2092 	/* Get FDDI SMT MIB objects */
2093 
2094 	bp->cmd_req_virt->cmd_type = PI_CMD_K_SMT_MIB_GET;
2095 	if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2096 		return (struct net_device_stats *)&bp->stats;
2097 
2098 	/* Fill the bp->stats structure with the SMT MIB object values */
2099 
2100 	memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id));
2101 	bp->stats.smt_op_version_id					= bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id;
2102 	bp->stats.smt_hi_version_id					= bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id;
2103 	bp->stats.smt_lo_version_id					= bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id;
2104 	memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data));
2105 	bp->stats.smt_mib_version_id				= bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id;
2106 	bp->stats.smt_mac_cts						= bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct;
2107 	bp->stats.smt_non_master_cts				= bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct;
2108 	bp->stats.smt_master_cts					= bp->cmd_rsp_virt->smt_mib_get.smt_master_ct;
2109 	bp->stats.smt_available_paths				= bp->cmd_rsp_virt->smt_mib_get.smt_available_paths;
2110 	bp->stats.smt_config_capabilities			= bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities;
2111 	bp->stats.smt_config_policy					= bp->cmd_rsp_virt->smt_mib_get.smt_config_policy;
2112 	bp->stats.smt_connection_policy				= bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy;
2113 	bp->stats.smt_t_notify						= bp->cmd_rsp_virt->smt_mib_get.smt_t_notify;
2114 	bp->stats.smt_stat_rpt_policy				= bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy;
2115 	bp->stats.smt_trace_max_expiration			= bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration;
2116 	bp->stats.smt_bypass_present				= bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present;
2117 	bp->stats.smt_ecm_state						= bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state;
2118 	bp->stats.smt_cf_state						= bp->cmd_rsp_virt->smt_mib_get.smt_cf_state;
2119 	bp->stats.smt_remote_disconnect_flag		= bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag;
2120 	bp->stats.smt_station_status				= bp->cmd_rsp_virt->smt_mib_get.smt_station_status;
2121 	bp->stats.smt_peer_wrap_flag				= bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag;
2122 	bp->stats.smt_time_stamp					= bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls;
2123 	bp->stats.smt_transition_time_stamp			= bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls;
2124 	bp->stats.mac_frame_status_functions		= bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions;
2125 	bp->stats.mac_t_max_capability				= bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability;
2126 	bp->stats.mac_tvx_capability				= bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability;
2127 	bp->stats.mac_available_paths				= bp->cmd_rsp_virt->smt_mib_get.mac_available_paths;
2128 	bp->stats.mac_current_path					= bp->cmd_rsp_virt->smt_mib_get.mac_current_path;
2129 	memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN);
2130 	memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN);
2131 	memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN);
2132 	memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN);
2133 	bp->stats.mac_dup_address_test				= bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test;
2134 	bp->stats.mac_requested_paths				= bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths;
2135 	bp->stats.mac_downstream_port_type			= bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type;
2136 	memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN);
2137 	bp->stats.mac_t_req							= bp->cmd_rsp_virt->smt_mib_get.mac_t_req;
2138 	bp->stats.mac_t_neg							= bp->cmd_rsp_virt->smt_mib_get.mac_t_neg;
2139 	bp->stats.mac_t_max							= bp->cmd_rsp_virt->smt_mib_get.mac_t_max;
2140 	bp->stats.mac_tvx_value						= bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value;
2141 	bp->stats.mac_frame_error_threshold			= bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold;
2142 	bp->stats.mac_frame_error_ratio				= bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio;
2143 	bp->stats.mac_rmt_state						= bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state;
2144 	bp->stats.mac_da_flag						= bp->cmd_rsp_virt->smt_mib_get.mac_da_flag;
2145 	bp->stats.mac_una_da_flag					= bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag;
2146 	bp->stats.mac_frame_error_flag				= bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag;
2147 	bp->stats.mac_ma_unitdata_available			= bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available;
2148 	bp->stats.mac_hardware_present				= bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present;
2149 	bp->stats.mac_ma_unitdata_enable			= bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable;
2150 	bp->stats.path_tvx_lower_bound				= bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound;
2151 	bp->stats.path_t_max_lower_bound			= bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound;
2152 	bp->stats.path_max_t_req					= bp->cmd_rsp_virt->smt_mib_get.path_max_t_req;
2153 	memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration));
2154 	bp->stats.port_my_type[0]					= bp->cmd_rsp_virt->smt_mib_get.port_my_type[0];
2155 	bp->stats.port_my_type[1]					= bp->cmd_rsp_virt->smt_mib_get.port_my_type[1];
2156 	bp->stats.port_neighbor_type[0]				= bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0];
2157 	bp->stats.port_neighbor_type[1]				= bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1];
2158 	bp->stats.port_connection_policies[0]		= bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0];
2159 	bp->stats.port_connection_policies[1]		= bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1];
2160 	bp->stats.port_mac_indicated[0]				= bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0];
2161 	bp->stats.port_mac_indicated[1]				= bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1];
2162 	bp->stats.port_current_path[0]				= bp->cmd_rsp_virt->smt_mib_get.port_current_path[0];
2163 	bp->stats.port_current_path[1]				= bp->cmd_rsp_virt->smt_mib_get.port_current_path[1];
2164 	memcpy(&bp->stats.port_requested_paths[0*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3);
2165 	memcpy(&bp->stats.port_requested_paths[1*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3);
2166 	bp->stats.port_mac_placement[0]				= bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0];
2167 	bp->stats.port_mac_placement[1]				= bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1];
2168 	bp->stats.port_available_paths[0]			= bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0];
2169 	bp->stats.port_available_paths[1]			= bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1];
2170 	bp->stats.port_pmd_class[0]					= bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0];
2171 	bp->stats.port_pmd_class[1]					= bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1];
2172 	bp->stats.port_connection_capabilities[0]	= bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0];
2173 	bp->stats.port_connection_capabilities[1]	= bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1];
2174 	bp->stats.port_bs_flag[0]					= bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0];
2175 	bp->stats.port_bs_flag[1]					= bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1];
2176 	bp->stats.port_ler_estimate[0]				= bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0];
2177 	bp->stats.port_ler_estimate[1]				= bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1];
2178 	bp->stats.port_ler_cutoff[0]				= bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0];
2179 	bp->stats.port_ler_cutoff[1]				= bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1];
2180 	bp->stats.port_ler_alarm[0]					= bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0];
2181 	bp->stats.port_ler_alarm[1]					= bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1];
2182 	bp->stats.port_connect_state[0]				= bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0];
2183 	bp->stats.port_connect_state[1]				= bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1];
2184 	bp->stats.port_pcm_state[0]					= bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0];
2185 	bp->stats.port_pcm_state[1]					= bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1];
2186 	bp->stats.port_pc_withhold[0]				= bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0];
2187 	bp->stats.port_pc_withhold[1]				= bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1];
2188 	bp->stats.port_ler_flag[0]					= bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0];
2189 	bp->stats.port_ler_flag[1]					= bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1];
2190 	bp->stats.port_hardware_present[0]			= bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0];
2191 	bp->stats.port_hardware_present[1]			= bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1];
2192 
2193 	/* Get FDDI counters */
2194 
2195 	bp->cmd_req_virt->cmd_type = PI_CMD_K_CNTRS_GET;
2196 	if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2197 		return (struct net_device_stats *)&bp->stats;
2198 
2199 	/* Fill the bp->stats structure with the FDDI counter values */
2200 
2201 	bp->stats.mac_frame_cts				= bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls;
2202 	bp->stats.mac_copied_cts			= bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls;
2203 	bp->stats.mac_transmit_cts			= bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls;
2204 	bp->stats.mac_error_cts				= bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls;
2205 	bp->stats.mac_lost_cts				= bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls;
2206 	bp->stats.port_lct_fail_cts[0]		= bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls;
2207 	bp->stats.port_lct_fail_cts[1]		= bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls;
2208 	bp->stats.port_lem_reject_cts[0]	= bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls;
2209 	bp->stats.port_lem_reject_cts[1]	= bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls;
2210 	bp->stats.port_lem_cts[0]			= bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls;
2211 	bp->stats.port_lem_cts[1]			= bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
2212 
2213 	return (struct net_device_stats *)&bp->stats;
2214 	}
2215 
2216 
2217 /*
2218  * ==============================
2219  * = dfx_ctl_set_multicast_list =
2220  * ==============================
2221  *
2222  * Overview:
2223  *   Enable/Disable LLC frame promiscuous mode reception
2224  *   on the adapter and/or update multicast address table.
2225  *
2226  * Returns:
2227  *   None
2228  *
2229  * Arguments:
2230  *   dev - pointer to device information
2231  *
2232  * Functional Description:
2233  *   This routine follows a fairly simple algorithm for setting the
2234  *   adapter filters and CAM:
2235  *
2236  *		if IFF_PROMISC flag is set
2237  *			enable LLC individual/group promiscuous mode
2238  *		else
2239  *			disable LLC individual/group promiscuous mode
2240  *			if number of incoming multicast addresses >
2241  *					(CAM max size - number of unicast addresses in CAM)
2242  *				enable LLC group promiscuous mode
2243  *				set driver-maintained multicast address count to zero
2244  *			else
2245  *				disable LLC group promiscuous mode
2246  *				set driver-maintained multicast address count to incoming count
2247  *			update adapter CAM
2248  *		update adapter filters
2249  *
2250  * Return Codes:
2251  *   None
2252  *
2253  * Assumptions:
2254  *   Multicast addresses are presented in canonical (LSB) format.
2255  *
2256  * Side Effects:
2257  *   On-board adapter CAM and filters are updated.
2258  */
2259 
2260 static void dfx_ctl_set_multicast_list(struct net_device *dev)
2261 {
2262 	DFX_board_t *bp = netdev_priv(dev);
2263 	int					i;			/* used as index in for loop */
2264 	struct netdev_hw_addr *ha;
2265 
2266 	/* Enable LLC frame promiscuous mode, if necessary */
2267 
2268 	if (dev->flags & IFF_PROMISC)
2269 		bp->ind_group_prom = PI_FSTATE_K_PASS;		/* Enable LLC ind/group prom mode */
2270 
2271 	/* Else, update multicast address table */
2272 
2273 	else
2274 		{
2275 		bp->ind_group_prom = PI_FSTATE_K_BLOCK;		/* Disable LLC ind/group prom mode */
2276 		/*
2277 		 * Check whether incoming multicast address count exceeds table size
2278 		 *
2279 		 * Note: The adapters utilize an on-board 64 entry CAM for
2280 		 *       supporting perfect filtering of multicast packets
2281 		 *		 and bridge functions when adding unicast addresses.
2282 		 *		 There is no hash function available.  To support
2283 		 *		 additional multicast addresses, the all multicast
2284 		 *		 filter (LLC group promiscuous mode) must be enabled.
2285 		 *
2286 		 *		 The firmware reserves two CAM entries for SMT-related
2287 		 *		 multicast addresses, which leaves 62 entries available.
2288 		 *		 The following code ensures that we're not being asked
2289 		 *		 to add more than 62 addresses to the CAM.  If we are,
2290 		 *		 the driver will enable the all multicast filter.
2291 		 *		 Should the number of multicast addresses drop below
2292 		 *		 the high water mark, the filter will be disabled and
2293 		 *		 perfect filtering will be used.
2294 		 */
2295 
2296 		if (netdev_mc_count(dev) > (PI_CMD_ADDR_FILTER_K_SIZE - bp->uc_count))
2297 			{
2298 			bp->group_prom	= PI_FSTATE_K_PASS;		/* Enable LLC group prom mode */
2299 			bp->mc_count	= 0;					/* Don't add mc addrs to CAM */
2300 			}
2301 		else
2302 			{
2303 			bp->group_prom	= PI_FSTATE_K_BLOCK;	/* Disable LLC group prom mode */
2304 			bp->mc_count	= netdev_mc_count(dev);		/* Add mc addrs to CAM */
2305 			}
2306 
2307 		/* Copy addresses to multicast address table, then update adapter CAM */
2308 
2309 		i = 0;
2310 		netdev_for_each_mc_addr(ha, dev)
2311 			memcpy(&bp->mc_table[i++ * FDDI_K_ALEN],
2312 			       ha->addr, FDDI_K_ALEN);
2313 
2314 		if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
2315 			{
2316 			DBG_printk("%s: Could not update multicast address table!\n", dev->name);
2317 			}
2318 		else
2319 			{
2320 			DBG_printk("%s: Multicast address table updated!  Added %d addresses.\n", dev->name, bp->mc_count);
2321 			}
2322 		}
2323 
2324 	/* Update adapter filters */
2325 
2326 	if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
2327 		{
2328 		DBG_printk("%s: Could not update adapter filters!\n", dev->name);
2329 		}
2330 	else
2331 		{
2332 		DBG_printk("%s: Adapter filters updated!\n", dev->name);
2333 		}
2334 	}
2335 
2336 
2337 /*
2338  * ===========================
2339  * = dfx_ctl_set_mac_address =
2340  * ===========================
2341  *
2342  * Overview:
2343  *   Add node address override (unicast address) to adapter
2344  *   CAM and update dev_addr field in device table.
2345  *
2346  * Returns:
2347  *   None
2348  *
2349  * Arguments:
2350  *   dev  - pointer to device information
2351  *   addr - pointer to sockaddr structure containing unicast address to add
2352  *
2353  * Functional Description:
2354  *   The adapter supports node address overrides by adding one or more
2355  *   unicast addresses to the adapter CAM.  This is similar to adding
2356  *   multicast addresses.  In this routine we'll update the driver and
2357  *   device structures with the new address, then update the adapter CAM
2358  *   to ensure that the adapter will copy and strip frames destined and
2359  *   sourced by that address.
2360  *
2361  * Return Codes:
2362  *   Always returns zero.
2363  *
2364  * Assumptions:
2365  *   The address pointed to by addr->sa_data is a valid unicast
2366  *   address and is presented in canonical (LSB) format.
2367  *
2368  * Side Effects:
2369  *   On-board adapter CAM is updated.  On-board adapter filters
2370  *   may be updated.
2371  */
2372 
2373 static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr)
2374 	{
2375 	struct sockaddr	*p_sockaddr = (struct sockaddr *)addr;
2376 	DFX_board_t *bp = netdev_priv(dev);
2377 
2378 	/* Copy unicast address to driver-maintained structs and update count */
2379 
2380 	memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN);	/* update device struct */
2381 	memcpy(&bp->uc_table[0], p_sockaddr->sa_data, FDDI_K_ALEN);	/* update driver struct */
2382 	bp->uc_count = 1;
2383 
2384 	/*
2385 	 * Verify we're not exceeding the CAM size by adding unicast address
2386 	 *
2387 	 * Note: It's possible that before entering this routine we've
2388 	 *       already filled the CAM with 62 multicast addresses.
2389 	 *		 Since we need to place the node address override into
2390 	 *		 the CAM, we have to check to see that we're not
2391 	 *		 exceeding the CAM size.  If we are, we have to enable
2392 	 *		 the LLC group (multicast) promiscuous mode filter as
2393 	 *		 in dfx_ctl_set_multicast_list.
2394 	 */
2395 
2396 	if ((bp->uc_count + bp->mc_count) > PI_CMD_ADDR_FILTER_K_SIZE)
2397 		{
2398 		bp->group_prom	= PI_FSTATE_K_PASS;		/* Enable LLC group prom mode */
2399 		bp->mc_count	= 0;					/* Don't add mc addrs to CAM */
2400 
2401 		/* Update adapter filters */
2402 
2403 		if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
2404 			{
2405 			DBG_printk("%s: Could not update adapter filters!\n", dev->name);
2406 			}
2407 		else
2408 			{
2409 			DBG_printk("%s: Adapter filters updated!\n", dev->name);
2410 			}
2411 		}
2412 
2413 	/* Update adapter CAM with new unicast address */
2414 
2415 	if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
2416 		{
2417 		DBG_printk("%s: Could not set new MAC address!\n", dev->name);
2418 		}
2419 	else
2420 		{
2421 		DBG_printk("%s: Adapter CAM updated with new MAC address\n", dev->name);
2422 		}
2423 	return 0;			/* always return zero */
2424 	}
2425 
2426 
2427 /*
2428  * ======================
2429  * = dfx_ctl_update_cam =
2430  * ======================
2431  *
2432  * Overview:
2433  *   Procedure to update adapter CAM (Content Addressable Memory)
2434  *   with desired unicast and multicast address entries.
2435  *
2436  * Returns:
2437  *   Condition code
2438  *
2439  * Arguments:
2440  *   bp - pointer to board information
2441  *
2442  * Functional Description:
2443  *   Updates adapter CAM with current contents of board structure
2444  *   unicast and multicast address tables.  Since there are only 62
2445  *   free entries in CAM, this routine ensures that the command
2446  *   request buffer is not overrun.
2447  *
2448  * Return Codes:
2449  *   DFX_K_SUCCESS - Request succeeded
2450  *   DFX_K_FAILURE - Request failed
2451  *
2452  * Assumptions:
2453  *   All addresses being added (unicast and multicast) are in canonical
2454  *   order.
2455  *
2456  * Side Effects:
2457  *   On-board adapter CAM is updated.
2458  */
2459 
2460 static int dfx_ctl_update_cam(DFX_board_t *bp)
2461 	{
2462 	int			i;				/* used as index */
2463 	PI_LAN_ADDR	*p_addr;		/* pointer to CAM entry */
2464 
2465 	/*
2466 	 * Fill in command request information
2467 	 *
2468 	 * Note: Even though both the unicast and multicast address
2469 	 *       table entries are stored as contiguous 6 byte entries,
2470 	 *		 the firmware address filter set command expects each
2471 	 *		 entry to be two longwords (8 bytes total).  We must be
2472 	 *		 careful to only copy the six bytes of each unicast and
2473 	 *		 multicast table entry into each command entry.  This
2474 	 *		 is also why we must first clear the entire command
2475 	 *		 request buffer.
2476 	 */
2477 
2478 	memset(bp->cmd_req_virt, 0, PI_CMD_REQ_K_SIZE_MAX);	/* first clear buffer */
2479 	bp->cmd_req_virt->cmd_type = PI_CMD_K_ADDR_FILTER_SET;
2480 	p_addr = &bp->cmd_req_virt->addr_filter_set.entry[0];
2481 
2482 	/* Now add unicast addresses to command request buffer, if any */
2483 
2484 	for (i=0; i < (int)bp->uc_count; i++)
2485 		{
2486 		if (i < PI_CMD_ADDR_FILTER_K_SIZE)
2487 			{
2488 			memcpy(p_addr, &bp->uc_table[i*FDDI_K_ALEN], FDDI_K_ALEN);
2489 			p_addr++;			/* point to next command entry */
2490 			}
2491 		}
2492 
2493 	/* Now add multicast addresses to command request buffer, if any */
2494 
2495 	for (i=0; i < (int)bp->mc_count; i++)
2496 		{
2497 		if ((i + bp->uc_count) < PI_CMD_ADDR_FILTER_K_SIZE)
2498 			{
2499 			memcpy(p_addr, &bp->mc_table[i*FDDI_K_ALEN], FDDI_K_ALEN);
2500 			p_addr++;			/* point to next command entry */
2501 			}
2502 		}
2503 
2504 	/* Issue command to update adapter CAM, then return */
2505 
2506 	if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2507 		return DFX_K_FAILURE;
2508 	return DFX_K_SUCCESS;
2509 	}
2510 
2511 
2512 /*
2513  * ==========================
2514  * = dfx_ctl_update_filters =
2515  * ==========================
2516  *
2517  * Overview:
2518  *   Procedure to update adapter filters with desired
2519  *   filter settings.
2520  *
2521  * Returns:
2522  *   Condition code
2523  *
2524  * Arguments:
2525  *   bp - pointer to board information
2526  *
2527  * Functional Description:
2528  *   Enables or disables filter using current filter settings.
2529  *
2530  * Return Codes:
2531  *   DFX_K_SUCCESS - Request succeeded.
2532  *   DFX_K_FAILURE - Request failed.
2533  *
2534  * Assumptions:
2535  *   We must always pass up packets destined to the broadcast
2536  *   address (FF-FF-FF-FF-FF-FF), so we'll always keep the
2537  *   broadcast filter enabled.
2538  *
2539  * Side Effects:
2540  *   On-board adapter filters are updated.
2541  */
2542 
2543 static int dfx_ctl_update_filters(DFX_board_t *bp)
2544 	{
2545 	int	i = 0;					/* used as index */
2546 
2547 	/* Fill in command request information */
2548 
2549 	bp->cmd_req_virt->cmd_type = PI_CMD_K_FILTERS_SET;
2550 
2551 	/* Initialize Broadcast filter - * ALWAYS ENABLED * */
2552 
2553 	bp->cmd_req_virt->filter_set.item[i].item_code	= PI_ITEM_K_BROADCAST;
2554 	bp->cmd_req_virt->filter_set.item[i++].value	= PI_FSTATE_K_PASS;
2555 
2556 	/* Initialize LLC Individual/Group Promiscuous filter */
2557 
2558 	bp->cmd_req_virt->filter_set.item[i].item_code	= PI_ITEM_K_IND_GROUP_PROM;
2559 	bp->cmd_req_virt->filter_set.item[i++].value	= bp->ind_group_prom;
2560 
2561 	/* Initialize LLC Group Promiscuous filter */
2562 
2563 	bp->cmd_req_virt->filter_set.item[i].item_code	= PI_ITEM_K_GROUP_PROM;
2564 	bp->cmd_req_virt->filter_set.item[i++].value	= bp->group_prom;
2565 
2566 	/* Terminate the item code list */
2567 
2568 	bp->cmd_req_virt->filter_set.item[i].item_code	= PI_ITEM_K_EOL;
2569 
2570 	/* Issue command to update adapter filters, then return */
2571 
2572 	if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2573 		return DFX_K_FAILURE;
2574 	return DFX_K_SUCCESS;
2575 	}
2576 
2577 
2578 /*
2579  * ======================
2580  * = dfx_hw_dma_cmd_req =
2581  * ======================
2582  *
2583  * Overview:
2584  *   Sends PDQ DMA command to adapter firmware
2585  *
2586  * Returns:
2587  *   Condition code
2588  *
2589  * Arguments:
2590  *   bp - pointer to board information
2591  *
2592  * Functional Description:
2593  *   The command request and response buffers are posted to the adapter in the manner
2594  *   described in the PDQ Port Specification:
2595  *
2596  *		1. Command Response Buffer is posted to adapter.
2597  *		2. Command Request Buffer is posted to adapter.
2598  *		3. Command Request consumer index is polled until it indicates that request
2599  *         buffer has been DMA'd to adapter.
2600  *		4. Command Response consumer index is polled until it indicates that response
2601  *         buffer has been DMA'd from adapter.
2602  *
2603  *   This ordering ensures that a response buffer is already available for the firmware
2604  *   to use once it's done processing the request buffer.
2605  *
2606  * Return Codes:
2607  *   DFX_K_SUCCESS	  - DMA command succeeded
2608  * 	 DFX_K_OUTSTATE   - Adapter is NOT in proper state
2609  *   DFX_K_HW_TIMEOUT - DMA command timed out
2610  *
2611  * Assumptions:
2612  *   Command request buffer has already been filled with desired DMA command.
2613  *
2614  * Side Effects:
2615  *   None
2616  */
2617 
2618 static int dfx_hw_dma_cmd_req(DFX_board_t *bp)
2619 	{
2620 	int status;			/* adapter status */
2621 	int timeout_cnt;	/* used in for loops */
2622 
2623 	/* Make sure the adapter is in a state that we can issue the DMA command in */
2624 
2625 	status = dfx_hw_adap_state_rd(bp);
2626 	if ((status == PI_STATE_K_RESET)		||
2627 		(status == PI_STATE_K_HALTED)		||
2628 		(status == PI_STATE_K_DMA_UNAVAIL)	||
2629 		(status == PI_STATE_K_UPGRADE))
2630 		return DFX_K_OUTSTATE;
2631 
2632 	/* Put response buffer on the command response queue */
2633 
2634 	bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
2635 			((PI_CMD_RSP_K_SIZE_MAX / PI_ALIGN_K_CMD_RSP_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
2636 	bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_1 = bp->cmd_rsp_phys;
2637 
2638 	/* Bump (and wrap) the producer index and write out to register */
2639 
2640 	bp->cmd_rsp_reg.index.prod += 1;
2641 	bp->cmd_rsp_reg.index.prod &= PI_CMD_RSP_K_NUM_ENTRIES-1;
2642 	dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
2643 
2644 	/* Put request buffer on the command request queue */
2645 
2646 	bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_0 = (u32) (PI_XMT_DESCR_M_SOP |
2647 			PI_XMT_DESCR_M_EOP | (PI_CMD_REQ_K_SIZE_MAX << PI_XMT_DESCR_V_SEG_LEN));
2648 	bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_1 = bp->cmd_req_phys;
2649 
2650 	/* Bump (and wrap) the producer index and write out to register */
2651 
2652 	bp->cmd_req_reg.index.prod += 1;
2653 	bp->cmd_req_reg.index.prod &= PI_CMD_REQ_K_NUM_ENTRIES-1;
2654 	dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword);
2655 
2656 	/*
2657 	 * Here we wait for the command request consumer index to be equal
2658 	 * to the producer, indicating that the adapter has DMAed the request.
2659 	 */
2660 
2661 	for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--)
2662 		{
2663 		if (bp->cmd_req_reg.index.prod == (u8)(bp->cons_block_virt->cmd_req))
2664 			break;
2665 		udelay(100);			/* wait for 100 microseconds */
2666 		}
2667 	if (timeout_cnt == 0)
2668 		return DFX_K_HW_TIMEOUT;
2669 
2670 	/* Bump (and wrap) the completion index and write out to register */
2671 
2672 	bp->cmd_req_reg.index.comp += 1;
2673 	bp->cmd_req_reg.index.comp &= PI_CMD_REQ_K_NUM_ENTRIES-1;
2674 	dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword);
2675 
2676 	/*
2677 	 * Here we wait for the command response consumer index to be equal
2678 	 * to the producer, indicating that the adapter has DMAed the response.
2679 	 */
2680 
2681 	for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--)
2682 		{
2683 		if (bp->cmd_rsp_reg.index.prod == (u8)(bp->cons_block_virt->cmd_rsp))
2684 			break;
2685 		udelay(100);			/* wait for 100 microseconds */
2686 		}
2687 	if (timeout_cnt == 0)
2688 		return DFX_K_HW_TIMEOUT;
2689 
2690 	/* Bump (and wrap) the completion index and write out to register */
2691 
2692 	bp->cmd_rsp_reg.index.comp += 1;
2693 	bp->cmd_rsp_reg.index.comp &= PI_CMD_RSP_K_NUM_ENTRIES-1;
2694 	dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
2695 	return DFX_K_SUCCESS;
2696 	}
2697 
2698 
2699 /*
2700  * ========================
2701  * = dfx_hw_port_ctrl_req =
2702  * ========================
2703  *
2704  * Overview:
2705  *   Sends PDQ port control command to adapter firmware
2706  *
2707  * Returns:
2708  *   Host data register value in host_data if ptr is not NULL
2709  *
2710  * Arguments:
2711  *   bp			- pointer to board information
2712  *	 command	- port control command
2713  *	 data_a		- port data A register value
2714  *	 data_b		- port data B register value
2715  *	 host_data	- ptr to host data register value
2716  *
2717  * Functional Description:
2718  *   Send generic port control command to adapter by writing
2719  *   to various PDQ port registers, then polling for completion.
2720  *
2721  * Return Codes:
2722  *   DFX_K_SUCCESS	  - port control command succeeded
2723  *   DFX_K_HW_TIMEOUT - port control command timed out
2724  *
2725  * Assumptions:
2726  *   None
2727  *
2728  * Side Effects:
2729  *   None
2730  */
2731 
2732 static int dfx_hw_port_ctrl_req(
2733 	DFX_board_t	*bp,
2734 	PI_UINT32	command,
2735 	PI_UINT32	data_a,
2736 	PI_UINT32	data_b,
2737 	PI_UINT32	*host_data
2738 	)
2739 
2740 	{
2741 	PI_UINT32	port_cmd;		/* Port Control command register value */
2742 	int			timeout_cnt;	/* used in for loops */
2743 
2744 	/* Set Command Error bit in command longword */
2745 
2746 	port_cmd = (PI_UINT32) (command | PI_PCTRL_M_CMD_ERROR);
2747 
2748 	/* Issue port command to the adapter */
2749 
2750 	dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, data_a);
2751 	dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_B, data_b);
2752 	dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_CTRL, port_cmd);
2753 
2754 	/* Now wait for command to complete */
2755 
2756 	if (command == PI_PCTRL_M_BLAST_FLASH)
2757 		timeout_cnt = 600000;	/* set command timeout count to 60 seconds */
2758 	else
2759 		timeout_cnt = 20000;	/* set command timeout count to 2 seconds */
2760 
2761 	for (; timeout_cnt > 0; timeout_cnt--)
2762 		{
2763 		dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_CTRL, &port_cmd);
2764 		if (!(port_cmd & PI_PCTRL_M_CMD_ERROR))
2765 			break;
2766 		udelay(100);			/* wait for 100 microseconds */
2767 		}
2768 	if (timeout_cnt == 0)
2769 		return DFX_K_HW_TIMEOUT;
2770 
2771 	/*
2772 	 * If the address of host_data is non-zero, assume caller has supplied a
2773 	 * non NULL pointer, and return the contents of the HOST_DATA register in
2774 	 * it.
2775 	 */
2776 
2777 	if (host_data != NULL)
2778 		dfx_port_read_long(bp, PI_PDQ_K_REG_HOST_DATA, host_data);
2779 	return DFX_K_SUCCESS;
2780 	}
2781 
2782 
2783 /*
2784  * =====================
2785  * = dfx_hw_adap_reset =
2786  * =====================
2787  *
2788  * Overview:
2789  *   Resets adapter
2790  *
2791  * Returns:
2792  *   None
2793  *
2794  * Arguments:
2795  *   bp   - pointer to board information
2796  *   type - type of reset to perform
2797  *
2798  * Functional Description:
2799  *   Issue soft reset to adapter by writing to PDQ Port Reset
2800  *   register.  Use incoming reset type to tell adapter what
2801  *   kind of reset operation to perform.
2802  *
2803  * Return Codes:
2804  *   None
2805  *
2806  * Assumptions:
2807  *   This routine merely issues a soft reset to the adapter.
2808  *   It is expected that after this routine returns, the caller
2809  *   will appropriately poll the Port Status register for the
2810  *   adapter to enter the proper state.
2811  *
2812  * Side Effects:
2813  *   Internal adapter registers are cleared.
2814  */
2815 
2816 static void dfx_hw_adap_reset(
2817 	DFX_board_t	*bp,
2818 	PI_UINT32	type
2819 	)
2820 
2821 	{
2822 	/* Set Reset type and assert reset */
2823 
2824 	dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, type);	/* tell adapter type of reset */
2825 	dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, PI_RESET_M_ASSERT_RESET);
2826 
2827 	/* Wait for at least 1 Microsecond according to the spec. We wait 20 just to be safe */
2828 
2829 	udelay(20);
2830 
2831 	/* Deassert reset */
2832 
2833 	dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, 0);
2834 	}
2835 
2836 
2837 /*
2838  * ========================
2839  * = dfx_hw_adap_state_rd =
2840  * ========================
2841  *
2842  * Overview:
2843  *   Returns current adapter state
2844  *
2845  * Returns:
2846  *   Adapter state per PDQ Port Specification
2847  *
2848  * Arguments:
2849  *   bp - pointer to board information
2850  *
2851  * Functional Description:
2852  *   Reads PDQ Port Status register and returns adapter state.
2853  *
2854  * Return Codes:
2855  *   None
2856  *
2857  * Assumptions:
2858  *   None
2859  *
2860  * Side Effects:
2861  *   None
2862  */
2863 
2864 static int dfx_hw_adap_state_rd(DFX_board_t *bp)
2865 	{
2866 	PI_UINT32 port_status;		/* Port Status register value */
2867 
2868 	dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
2869 	return (port_status & PI_PSTATUS_M_STATE) >> PI_PSTATUS_V_STATE;
2870 	}
2871 
2872 
2873 /*
2874  * =====================
2875  * = dfx_hw_dma_uninit =
2876  * =====================
2877  *
2878  * Overview:
2879  *   Brings adapter to DMA_UNAVAILABLE state
2880  *
2881  * Returns:
2882  *   Condition code
2883  *
2884  * Arguments:
2885  *   bp   - pointer to board information
2886  *   type - type of reset to perform
2887  *
2888  * Functional Description:
2889  *   Bring adapter to DMA_UNAVAILABLE state by performing the following:
2890  *		1. Set reset type bit in Port Data A Register then reset adapter.
2891  *		2. Check that adapter is in DMA_UNAVAILABLE state.
2892  *
2893  * Return Codes:
2894  *   DFX_K_SUCCESS	  - adapter is in DMA_UNAVAILABLE state
2895  *   DFX_K_HW_TIMEOUT - adapter did not reset properly
2896  *
2897  * Assumptions:
2898  *   None
2899  *
2900  * Side Effects:
2901  *   Internal adapter registers are cleared.
2902  */
2903 
2904 static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type)
2905 	{
2906 	int timeout_cnt;	/* used in for loops */
2907 
2908 	/* Set reset type bit and reset adapter */
2909 
2910 	dfx_hw_adap_reset(bp, type);
2911 
2912 	/* Now wait for adapter to enter DMA_UNAVAILABLE state */
2913 
2914 	for (timeout_cnt = 100000; timeout_cnt > 0; timeout_cnt--)
2915 		{
2916 		if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_DMA_UNAVAIL)
2917 			break;
2918 		udelay(100);					/* wait for 100 microseconds */
2919 		}
2920 	if (timeout_cnt == 0)
2921 		return DFX_K_HW_TIMEOUT;
2922 	return DFX_K_SUCCESS;
2923 	}
2924 
2925 /*
2926  *	Align an sk_buff to a boundary power of 2
2927  *
2928  */
2929 #ifdef DYNAMIC_BUFFERS
2930 static void my_skb_align(struct sk_buff *skb, int n)
2931 {
2932 	unsigned long x = (unsigned long)skb->data;
2933 	unsigned long v;
2934 
2935 	v = ALIGN(x, n);	/* Where we want to be */
2936 
2937 	skb_reserve(skb, v - x);
2938 }
2939 #endif
2940 
2941 /*
2942  * ================
2943  * = dfx_rcv_init =
2944  * ================
2945  *
2946  * Overview:
2947  *   Produces buffers to adapter LLC Host receive descriptor block
2948  *
2949  * Returns:
2950  *   None
2951  *
2952  * Arguments:
2953  *   bp - pointer to board information
2954  *   get_buffers - non-zero if buffers to be allocated
2955  *
2956  * Functional Description:
2957  *   This routine can be called during dfx_adap_init() or during an adapter
2958  *	 reset.  It initializes the descriptor block and produces all allocated
2959  *   LLC Host queue receive buffers.
2960  *
2961  * Return Codes:
2962  *   Return 0 on success or -ENOMEM if buffer allocation failed (when using
2963  *   dynamic buffer allocation). If the buffer allocation failed, the
2964  *   already allocated buffers will not be released and the caller should do
2965  *   this.
2966  *
2967  * Assumptions:
2968  *   The PDQ has been reset and the adapter and driver maintained Type 2
2969  *   register indices are cleared.
2970  *
2971  * Side Effects:
2972  *   Receive buffers are posted to the adapter LLC queue and the adapter
2973  *   is notified.
2974  */
2975 
2976 static int dfx_rcv_init(DFX_board_t *bp, int get_buffers)
2977 	{
2978 	int	i, j;					/* used in for loop */
2979 
2980 	/*
2981 	 *  Since each receive buffer is a single fragment of same length, initialize
2982 	 *  first longword in each receive descriptor for entire LLC Host descriptor
2983 	 *  block.  Also initialize second longword in each receive descriptor with
2984 	 *  physical address of receive buffer.  We'll always allocate receive
2985 	 *  buffers in powers of 2 so that we can easily fill the 256 entry descriptor
2986 	 *  block and produce new receive buffers by simply updating the receive
2987 	 *  producer index.
2988 	 *
2989 	 * 	Assumptions:
2990 	 *		To support all shipping versions of PDQ, the receive buffer size
2991 	 *		must be mod 128 in length and the physical address must be 128 byte
2992 	 *		aligned.  In other words, bits 0-6 of the length and address must
2993 	 *		be zero for the following descriptor field entries to be correct on
2994 	 *		all PDQ-based boards.  We guaranteed both requirements during
2995 	 *		driver initialization when we allocated memory for the receive buffers.
2996 	 */
2997 
2998 	if (get_buffers) {
2999 #ifdef DYNAMIC_BUFFERS
3000 	for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
3001 		for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
3002 		{
3003 			struct sk_buff *newskb;
3004 			dma_addr_t dma_addr;
3005 
3006 			newskb = __netdev_alloc_skb(bp->dev, NEW_SKB_SIZE,
3007 						    GFP_NOIO);
3008 			if (!newskb)
3009 				return -ENOMEM;
3010 			/*
3011 			 * align to 128 bytes for compatibility with
3012 			 * the old EISA boards.
3013 			 */
3014 
3015 			my_skb_align(newskb, 128);
3016 			dma_addr = dma_map_single(bp->bus_dev,
3017 						  newskb->data,
3018 						  PI_RCV_DATA_K_SIZE_MAX,
3019 						  DMA_FROM_DEVICE);
3020 			if (dma_mapping_error(bp->bus_dev, dma_addr)) {
3021 				dev_kfree_skb(newskb);
3022 				return -ENOMEM;
3023 			}
3024 			bp->descr_block_virt->rcv_data[i + j].long_0 =
3025 				(u32)(PI_RCV_DESCR_M_SOP |
3026 				      ((PI_RCV_DATA_K_SIZE_MAX /
3027 					PI_ALIGN_K_RCV_DATA_BUFF) <<
3028 				       PI_RCV_DESCR_V_SEG_LEN));
3029 			bp->descr_block_virt->rcv_data[i + j].long_1 =
3030 				(u32)dma_addr;
3031 
3032 			/*
3033 			 * p_rcv_buff_va is only used inside the
3034 			 * kernel so we put the skb pointer here.
3035 			 */
3036 			bp->p_rcv_buff_va[i+j] = (char *) newskb;
3037 		}
3038 #else
3039 	for (i=0; i < (int)(bp->rcv_bufs_to_post); i++)
3040 		for (j=0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
3041 			{
3042 			bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
3043 				((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
3044 			bp->descr_block_virt->rcv_data[i+j].long_1 = (u32) (bp->rcv_block_phys + (i * PI_RCV_DATA_K_SIZE_MAX));
3045 			bp->p_rcv_buff_va[i+j] = (bp->rcv_block_virt + (i * PI_RCV_DATA_K_SIZE_MAX));
3046 			}
3047 #endif
3048 	}
3049 
3050 	/* Update receive producer and Type 2 register */
3051 
3052 	bp->rcv_xmt_reg.index.rcv_prod = bp->rcv_bufs_to_post;
3053 	dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
3054 	return 0;
3055 	}
3056 
3057 
3058 /*
3059  * =========================
3060  * = dfx_rcv_queue_process =
3061  * =========================
3062  *
3063  * Overview:
3064  *   Process received LLC frames.
3065  *
3066  * Returns:
3067  *   None
3068  *
3069  * Arguments:
3070  *   bp - pointer to board information
3071  *
3072  * Functional Description:
3073  *   Received LLC frames are processed until there are no more consumed frames.
3074  *   Once all frames are processed, the receive buffers are returned to the
3075  *   adapter.  Note that this algorithm fixes the length of time that can be spent
3076  *   in this routine, because there are a fixed number of receive buffers to
3077  *   process and buffers are not produced until this routine exits and returns
3078  *   to the ISR.
3079  *
3080  * Return Codes:
3081  *   None
3082  *
3083  * Assumptions:
3084  *   None
3085  *
3086  * Side Effects:
3087  *   None
3088  */
3089 
3090 static void dfx_rcv_queue_process(
3091 	DFX_board_t *bp
3092 	)
3093 
3094 	{
3095 	PI_TYPE_2_CONSUMER	*p_type_2_cons;		/* ptr to rcv/xmt consumer block register */
3096 	char				*p_buff;			/* ptr to start of packet receive buffer (FMC descriptor) */
3097 	u32					descr, pkt_len;		/* FMC descriptor field and packet length */
3098 	struct sk_buff		*skb = NULL;			/* pointer to a sk_buff to hold incoming packet data */
3099 
3100 	/* Service all consumed LLC receive frames */
3101 
3102 	p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data);
3103 	while (bp->rcv_xmt_reg.index.rcv_comp != p_type_2_cons->index.rcv_cons)
3104 		{
3105 		/* Process any errors */
3106 		dma_addr_t dma_addr;
3107 		int entry;
3108 
3109 		entry = bp->rcv_xmt_reg.index.rcv_comp;
3110 #ifdef DYNAMIC_BUFFERS
3111 		p_buff = (char *) (((struct sk_buff *)bp->p_rcv_buff_va[entry])->data);
3112 #else
3113 		p_buff = bp->p_rcv_buff_va[entry];
3114 #endif
3115 		dma_addr = bp->descr_block_virt->rcv_data[entry].long_1;
3116 		dma_sync_single_for_cpu(bp->bus_dev,
3117 					dma_addr + RCV_BUFF_K_DESCR,
3118 					sizeof(u32),
3119 					DMA_FROM_DEVICE);
3120 		memcpy(&descr, p_buff + RCV_BUFF_K_DESCR, sizeof(u32));
3121 
3122 		if (descr & PI_FMC_DESCR_M_RCC_FLUSH)
3123 			{
3124 			if (descr & PI_FMC_DESCR_M_RCC_CRC)
3125 				bp->rcv_crc_errors++;
3126 			else
3127 				bp->rcv_frame_status_errors++;
3128 			}
3129 		else
3130 		{
3131 			int rx_in_place = 0;
3132 
3133 			/* The frame was received without errors - verify packet length */
3134 
3135 			pkt_len = (u32)((descr & PI_FMC_DESCR_M_LEN) >> PI_FMC_DESCR_V_LEN);
3136 			pkt_len -= 4;				/* subtract 4 byte CRC */
3137 			if (!IN_RANGE(pkt_len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN))
3138 				bp->rcv_length_errors++;
3139 			else{
3140 #ifdef DYNAMIC_BUFFERS
3141 				struct sk_buff *newskb = NULL;
3142 
3143 				if (pkt_len > SKBUFF_RX_COPYBREAK) {
3144 					dma_addr_t new_dma_addr;
3145 
3146 					newskb = netdev_alloc_skb(bp->dev,
3147 								  NEW_SKB_SIZE);
3148 					if (newskb){
3149 						my_skb_align(newskb, 128);
3150 						new_dma_addr = dma_map_single(
3151 								bp->bus_dev,
3152 								newskb->data,
3153 								PI_RCV_DATA_K_SIZE_MAX,
3154 								DMA_FROM_DEVICE);
3155 						if (dma_mapping_error(
3156 								bp->bus_dev,
3157 								new_dma_addr)) {
3158 							dev_kfree_skb(newskb);
3159 							newskb = NULL;
3160 						}
3161 					}
3162 					if (newskb) {
3163 						rx_in_place = 1;
3164 
3165 						skb = (struct sk_buff *)bp->p_rcv_buff_va[entry];
3166 						dma_unmap_single(bp->bus_dev,
3167 							dma_addr,
3168 							PI_RCV_DATA_K_SIZE_MAX,
3169 							DMA_FROM_DEVICE);
3170 						skb_reserve(skb, RCV_BUFF_K_PADDING);
3171 						bp->p_rcv_buff_va[entry] = (char *)newskb;
3172 						bp->descr_block_virt->rcv_data[entry].long_1 = (u32)new_dma_addr;
3173 					}
3174 				}
3175 				if (!newskb)
3176 #endif
3177 					/* Alloc new buffer to pass up,
3178 					 * add room for PRH. */
3179 					skb = netdev_alloc_skb(bp->dev,
3180 							       pkt_len + 3);
3181 				if (skb == NULL)
3182 					{
3183 					printk("%s: Could not allocate receive buffer.  Dropping packet.\n", bp->dev->name);
3184 					bp->rcv_discards++;
3185 					break;
3186 					}
3187 				else {
3188 					if (!rx_in_place) {
3189 						/* Receive buffer allocated, pass receive packet up */
3190 						dma_sync_single_for_cpu(
3191 							bp->bus_dev,
3192 							dma_addr +
3193 							RCV_BUFF_K_PADDING,
3194 							pkt_len + 3,
3195 							DMA_FROM_DEVICE);
3196 
3197 						skb_copy_to_linear_data(skb,
3198 							       p_buff + RCV_BUFF_K_PADDING,
3199 							       pkt_len + 3);
3200 					}
3201 
3202 					skb_reserve(skb,3);		/* adjust data field so that it points to FC byte */
3203 					skb_put(skb, pkt_len);		/* pass up packet length, NOT including CRC */
3204 					skb->protocol = fddi_type_trans(skb, bp->dev);
3205 					bp->rcv_total_bytes += skb->len;
3206 					netif_rx(skb);
3207 
3208 					/* Update the rcv counters */
3209 					bp->rcv_total_frames++;
3210 					if (*(p_buff + RCV_BUFF_K_DA) & 0x01)
3211 						bp->rcv_multicast_frames++;
3212 				}
3213 			}
3214 			}
3215 
3216 		/*
3217 		 * Advance the producer (for recycling) and advance the completion
3218 		 * (for servicing received frames).  Note that it is okay to
3219 		 * advance the producer without checking that it passes the
3220 		 * completion index because they are both advanced at the same
3221 		 * rate.
3222 		 */
3223 
3224 		bp->rcv_xmt_reg.index.rcv_prod += 1;
3225 		bp->rcv_xmt_reg.index.rcv_comp += 1;
3226 		}
3227 	}
3228 
3229 
3230 /*
3231  * =====================
3232  * = dfx_xmt_queue_pkt =
3233  * =====================
3234  *
3235  * Overview:
3236  *   Queues packets for transmission
3237  *
3238  * Returns:
3239  *   Condition code
3240  *
3241  * Arguments:
3242  *   skb - pointer to sk_buff to queue for transmission
3243  *   dev - pointer to device information
3244  *
3245  * Functional Description:
3246  *   Here we assume that an incoming skb transmit request
3247  *   is contained in a single physically contiguous buffer
3248  *   in which the virtual address of the start of packet
3249  *   (skb->data) can be converted to a physical address
3250  *   by using pci_map_single().
3251  *
3252  *   Since the adapter architecture requires a three byte
3253  *   packet request header to prepend the start of packet,
3254  *   we'll write the three byte field immediately prior to
3255  *   the FC byte.  This assumption is valid because we've
3256  *   ensured that dev->hard_header_len includes three pad
3257  *   bytes.  By posting a single fragment to the adapter,
3258  *   we'll reduce the number of descriptor fetches and
3259  *   bus traffic needed to send the request.
3260  *
3261  *   Also, we can't free the skb until after it's been DMA'd
3262  *   out by the adapter, so we'll queue it in the driver and
3263  *   return it in dfx_xmt_done.
3264  *
3265  * Return Codes:
3266  *   0 - driver queued packet, link is unavailable, or skbuff was bad
3267  *	 1 - caller should requeue the sk_buff for later transmission
3268  *
3269  * Assumptions:
3270  *	 First and foremost, we assume the incoming skb pointer
3271  *   is NOT NULL and is pointing to a valid sk_buff structure.
3272  *
3273  *   The outgoing packet is complete, starting with the
3274  *   frame control byte including the last byte of data,
3275  *   but NOT including the 4 byte CRC.  We'll let the
3276  *   adapter hardware generate and append the CRC.
3277  *
3278  *   The entire packet is stored in one physically
3279  *   contiguous buffer which is not cached and whose
3280  *   32-bit physical address can be determined.
3281  *
3282  *   It's vital that this routine is NOT reentered for the
3283  *   same board and that the OS is not in another section of
3284  *   code (eg. dfx_int_common) for the same board on a
3285  *   different thread.
3286  *
3287  * Side Effects:
3288  *   None
3289  */
3290 
3291 static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
3292 				     struct net_device *dev)
3293 	{
3294 	DFX_board_t		*bp = netdev_priv(dev);
3295 	u8			prod;				/* local transmit producer index */
3296 	PI_XMT_DESCR		*p_xmt_descr;		/* ptr to transmit descriptor block entry */
3297 	XMT_DRIVER_DESCR	*p_xmt_drv_descr;	/* ptr to transmit driver descriptor */
3298 	dma_addr_t		dma_addr;
3299 	unsigned long		flags;
3300 
3301 	netif_stop_queue(dev);
3302 
3303 	/*
3304 	 * Verify that incoming transmit request is OK
3305 	 *
3306 	 * Note: The packet size check is consistent with other
3307 	 *		 Linux device drivers, although the correct packet
3308 	 *		 size should be verified before calling the
3309 	 *		 transmit routine.
3310 	 */
3311 
3312 	if (!IN_RANGE(skb->len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN))
3313 	{
3314 		printk("%s: Invalid packet length - %u bytes\n",
3315 			dev->name, skb->len);
3316 		bp->xmt_length_errors++;		/* bump error counter */
3317 		netif_wake_queue(dev);
3318 		dev_kfree_skb(skb);
3319 		return NETDEV_TX_OK;			/* return "success" */
3320 	}
3321 	/*
3322 	 * See if adapter link is available, if not, free buffer
3323 	 *
3324 	 * Note: If the link isn't available, free buffer and return 0
3325 	 *		 rather than tell the upper layer to requeue the packet.
3326 	 *		 The methodology here is that by the time the link
3327 	 *		 becomes available, the packet to be sent will be
3328 	 *		 fairly stale.  By simply dropping the packet, the
3329 	 *		 higher layer protocols will eventually time out
3330 	 *		 waiting for response packets which it won't receive.
3331 	 */
3332 
3333 	if (bp->link_available == PI_K_FALSE)
3334 		{
3335 		if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_LINK_AVAIL)	/* is link really available? */
3336 			bp->link_available = PI_K_TRUE;		/* if so, set flag and continue */
3337 		else
3338 			{
3339 			bp->xmt_discards++;					/* bump error counter */
3340 			dev_kfree_skb(skb);		/* free sk_buff now */
3341 			netif_wake_queue(dev);
3342 			return NETDEV_TX_OK;		/* return "success" */
3343 			}
3344 		}
3345 
3346 	/* Write the three PRH bytes immediately before the FC byte */
3347 
3348 	skb_push(skb, 3);
3349 	skb->data[0] = DFX_PRH0_BYTE;	/* these byte values are defined */
3350 	skb->data[1] = DFX_PRH1_BYTE;	/* in the Motorola FDDI MAC chip */
3351 	skb->data[2] = DFX_PRH2_BYTE;	/* specification */
3352 
3353 	dma_addr = dma_map_single(bp->bus_dev, skb->data, skb->len,
3354 				  DMA_TO_DEVICE);
3355 	if (dma_mapping_error(bp->bus_dev, dma_addr)) {
3356 		skb_pull(skb, 3);
3357 		return NETDEV_TX_BUSY;
3358 	}
3359 
3360 	spin_lock_irqsave(&bp->lock, flags);
3361 
3362 	/* Get the current producer and the next free xmt data descriptor */
3363 
3364 	prod		= bp->rcv_xmt_reg.index.xmt_prod;
3365 	p_xmt_descr = &(bp->descr_block_virt->xmt_data[prod]);
3366 
3367 	/*
3368 	 * Get pointer to auxiliary queue entry to contain information
3369 	 * for this packet.
3370 	 *
3371 	 * Note: The current xmt producer index will become the
3372 	 *	 current xmt completion index when we complete this
3373 	 *	 packet later on.  So, we'll get the pointer to the
3374 	 *	 next auxiliary queue entry now before we bump the
3375 	 *	 producer index.
3376 	 */
3377 
3378 	p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[prod++]);	/* also bump producer index */
3379 
3380 	/*
3381 	 * Write the descriptor with buffer info and bump producer
3382 	 *
3383 	 * Note: Since we need to start DMA from the packet request
3384 	 *		 header, we'll add 3 bytes to the DMA buffer length,
3385 	 *		 and we'll determine the physical address of the
3386 	 *		 buffer from the PRH, not skb->data.
3387 	 *
3388 	 * Assumptions:
3389 	 *		 1. Packet starts with the frame control (FC) byte
3390 	 *		    at skb->data.
3391 	 *		 2. The 4-byte CRC is not appended to the buffer or
3392 	 *			included in the length.
3393 	 *		 3. Packet length (skb->len) is from FC to end of
3394 	 *			data, inclusive.
3395 	 *		 4. The packet length does not exceed the maximum
3396 	 *			FDDI LLC frame length of 4491 bytes.
3397 	 *		 5. The entire packet is contained in a physically
3398 	 *			contiguous, non-cached, locked memory space
3399 	 *			comprised of a single buffer pointed to by
3400 	 *			skb->data.
3401 	 *		 6. The physical address of the start of packet
3402 	 *			can be determined from the virtual address
3403 	 *			by using pci_map_single() and is only 32-bits
3404 	 *			wide.
3405 	 */
3406 
3407 	p_xmt_descr->long_0	= (u32) (PI_XMT_DESCR_M_SOP | PI_XMT_DESCR_M_EOP | ((skb->len) << PI_XMT_DESCR_V_SEG_LEN));
3408 	p_xmt_descr->long_1 = (u32)dma_addr;
3409 
3410 	/*
3411 	 * Verify that descriptor is actually available
3412 	 *
3413 	 * Note: If descriptor isn't available, return 1 which tells
3414 	 *	 the upper layer to requeue the packet for later
3415 	 *	 transmission.
3416 	 *
3417 	 *       We need to ensure that the producer never reaches the
3418 	 *	 completion, except to indicate that the queue is empty.
3419 	 */
3420 
3421 	if (prod == bp->rcv_xmt_reg.index.xmt_comp)
3422 	{
3423 		skb_pull(skb,3);
3424 		spin_unlock_irqrestore(&bp->lock, flags);
3425 		return NETDEV_TX_BUSY;	/* requeue packet for later */
3426 	}
3427 
3428 	/*
3429 	 * Save info for this packet for xmt done indication routine
3430 	 *
3431 	 * Normally, we'd save the producer index in the p_xmt_drv_descr
3432 	 * structure so that we'd have it handy when we complete this
3433 	 * packet later (in dfx_xmt_done).  However, since the current
3434 	 * transmit architecture guarantees a single fragment for the
3435 	 * entire packet, we can simply bump the completion index by
3436 	 * one (1) for each completed packet.
3437 	 *
3438 	 * Note: If this assumption changes and we're presented with
3439 	 *	 an inconsistent number of transmit fragments for packet
3440 	 *	 data, we'll need to modify this code to save the current
3441 	 *	 transmit producer index.
3442 	 */
3443 
3444 	p_xmt_drv_descr->p_skb = skb;
3445 
3446 	/* Update Type 2 register */
3447 
3448 	bp->rcv_xmt_reg.index.xmt_prod = prod;
3449 	dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
3450 	spin_unlock_irqrestore(&bp->lock, flags);
3451 	netif_wake_queue(dev);
3452 	return NETDEV_TX_OK;	/* packet queued to adapter */
3453 	}
3454 
3455 
3456 /*
3457  * ================
3458  * = dfx_xmt_done =
3459  * ================
3460  *
3461  * Overview:
3462  *   Processes all frames that have been transmitted.
3463  *
3464  * Returns:
3465  *   None
3466  *
3467  * Arguments:
3468  *   bp - pointer to board information
3469  *
3470  * Functional Description:
3471  *   For all consumed transmit descriptors that have not
3472  *   yet been completed, we'll free the skb we were holding
3473  *   onto using dev_kfree_skb and bump the appropriate
3474  *   counters.
3475  *
3476  * Return Codes:
3477  *   None
3478  *
3479  * Assumptions:
3480  *   The Type 2 register is not updated in this routine.  It is
3481  *   assumed that it will be updated in the ISR when dfx_xmt_done
3482  *   returns.
3483  *
3484  * Side Effects:
3485  *   None
3486  */
3487 
3488 static int dfx_xmt_done(DFX_board_t *bp)
3489 	{
3490 	XMT_DRIVER_DESCR	*p_xmt_drv_descr;	/* ptr to transmit driver descriptor */
3491 	PI_TYPE_2_CONSUMER	*p_type_2_cons;		/* ptr to rcv/xmt consumer block register */
3492 	u8			comp;			/* local transmit completion index */
3493 	int 			freed = 0;		/* buffers freed */
3494 
3495 	/* Service all consumed transmit frames */
3496 
3497 	p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data);
3498 	while (bp->rcv_xmt_reg.index.xmt_comp != p_type_2_cons->index.xmt_cons)
3499 		{
3500 		/* Get pointer to the transmit driver descriptor block information */
3501 
3502 		p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]);
3503 
3504 		/* Increment transmit counters */
3505 
3506 		bp->xmt_total_frames++;
3507 		bp->xmt_total_bytes += p_xmt_drv_descr->p_skb->len;
3508 
3509 		/* Return skb to operating system */
3510 		comp = bp->rcv_xmt_reg.index.xmt_comp;
3511 		dma_unmap_single(bp->bus_dev,
3512 				 bp->descr_block_virt->xmt_data[comp].long_1,
3513 				 p_xmt_drv_descr->p_skb->len,
3514 				 DMA_TO_DEVICE);
3515 		dev_consume_skb_irq(p_xmt_drv_descr->p_skb);
3516 
3517 		/*
3518 		 * Move to start of next packet by updating completion index
3519 		 *
3520 		 * Here we assume that a transmit packet request is always
3521 		 * serviced by posting one fragment.  We can therefore
3522 		 * simplify the completion code by incrementing the
3523 		 * completion index by one.  This code will need to be
3524 		 * modified if this assumption changes.  See comments
3525 		 * in dfx_xmt_queue_pkt for more details.
3526 		 */
3527 
3528 		bp->rcv_xmt_reg.index.xmt_comp += 1;
3529 		freed++;
3530 		}
3531 	return freed;
3532 	}
3533 
3534 
3535 /*
3536  * =================
3537  * = dfx_rcv_flush =
3538  * =================
3539  *
3540  * Overview:
3541  *   Remove all skb's in the receive ring.
3542  *
3543  * Returns:
3544  *   None
3545  *
3546  * Arguments:
3547  *   bp - pointer to board information
3548  *
3549  * Functional Description:
3550  *   Free's all the dynamically allocated skb's that are
3551  *   currently attached to the device receive ring. This
3552  *   function is typically only used when the device is
3553  *   initialized or reinitialized.
3554  *
3555  * Return Codes:
3556  *   None
3557  *
3558  * Side Effects:
3559  *   None
3560  */
3561 #ifdef DYNAMIC_BUFFERS
3562 static void dfx_rcv_flush( DFX_board_t *bp )
3563 	{
3564 	int i, j;
3565 
3566 	for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
3567 		for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
3568 		{
3569 			struct sk_buff *skb;
3570 			skb = (struct sk_buff *)bp->p_rcv_buff_va[i+j];
3571 			if (skb) {
3572 				dma_unmap_single(bp->bus_dev,
3573 						 bp->descr_block_virt->rcv_data[i+j].long_1,
3574 						 PI_RCV_DATA_K_SIZE_MAX,
3575 						 DMA_FROM_DEVICE);
3576 				dev_kfree_skb(skb);
3577 			}
3578 			bp->p_rcv_buff_va[i+j] = NULL;
3579 		}
3580 
3581 	}
3582 #endif /* DYNAMIC_BUFFERS */
3583 
3584 /*
3585  * =================
3586  * = dfx_xmt_flush =
3587  * =================
3588  *
3589  * Overview:
3590  *   Processes all frames whether they've been transmitted
3591  *   or not.
3592  *
3593  * Returns:
3594  *   None
3595  *
3596  * Arguments:
3597  *   bp - pointer to board information
3598  *
3599  * Functional Description:
3600  *   For all produced transmit descriptors that have not
3601  *   yet been completed, we'll free the skb we were holding
3602  *   onto using dev_kfree_skb and bump the appropriate
3603  *   counters.  Of course, it's possible that some of
3604  *   these transmit requests actually did go out, but we
3605  *   won't make that distinction here.  Finally, we'll
3606  *   update the consumer index to match the producer.
3607  *
3608  * Return Codes:
3609  *   None
3610  *
3611  * Assumptions:
3612  *   This routine does NOT update the Type 2 register.  It
3613  *   is assumed that this routine is being called during a
3614  *   transmit flush interrupt, or a shutdown or close routine.
3615  *
3616  * Side Effects:
3617  *   None
3618  */
3619 
3620 static void dfx_xmt_flush( DFX_board_t *bp )
3621 	{
3622 	u32			prod_cons;		/* rcv/xmt consumer block longword */
3623 	XMT_DRIVER_DESCR	*p_xmt_drv_descr;	/* ptr to transmit driver descriptor */
3624 	u8			comp;			/* local transmit completion index */
3625 
3626 	/* Flush all outstanding transmit frames */
3627 
3628 	while (bp->rcv_xmt_reg.index.xmt_comp != bp->rcv_xmt_reg.index.xmt_prod)
3629 		{
3630 		/* Get pointer to the transmit driver descriptor block information */
3631 
3632 		p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]);
3633 
3634 		/* Return skb to operating system */
3635 		comp = bp->rcv_xmt_reg.index.xmt_comp;
3636 		dma_unmap_single(bp->bus_dev,
3637 				 bp->descr_block_virt->xmt_data[comp].long_1,
3638 				 p_xmt_drv_descr->p_skb->len,
3639 				 DMA_TO_DEVICE);
3640 		dev_kfree_skb(p_xmt_drv_descr->p_skb);
3641 
3642 		/* Increment transmit error counter */
3643 
3644 		bp->xmt_discards++;
3645 
3646 		/*
3647 		 * Move to start of next packet by updating completion index
3648 		 *
3649 		 * Here we assume that a transmit packet request is always
3650 		 * serviced by posting one fragment.  We can therefore
3651 		 * simplify the completion code by incrementing the
3652 		 * completion index by one.  This code will need to be
3653 		 * modified if this assumption changes.  See comments
3654 		 * in dfx_xmt_queue_pkt for more details.
3655 		 */
3656 
3657 		bp->rcv_xmt_reg.index.xmt_comp += 1;
3658 		}
3659 
3660 	/* Update the transmit consumer index in the consumer block */
3661 
3662 	prod_cons = (u32)(bp->cons_block_virt->xmt_rcv_data & ~PI_CONS_M_XMT_INDEX);
3663 	prod_cons |= (u32)(bp->rcv_xmt_reg.index.xmt_prod << PI_CONS_V_XMT_INDEX);
3664 	bp->cons_block_virt->xmt_rcv_data = prod_cons;
3665 	}
3666 
3667 /*
3668  * ==================
3669  * = dfx_unregister =
3670  * ==================
3671  *
3672  * Overview:
3673  *   Shuts down an FDDI controller
3674  *
3675  * Returns:
3676  *   Condition code
3677  *
3678  * Arguments:
3679  *   bdev - pointer to device information
3680  *
3681  * Functional Description:
3682  *
3683  * Return Codes:
3684  *   None
3685  *
3686  * Assumptions:
3687  *   It compiles so it should work :-( (PCI cards do :-)
3688  *
3689  * Side Effects:
3690  *   Device structures for FDDI adapters (fddi0, fddi1, etc) are
3691  *   freed.
3692  */
3693 static void dfx_unregister(struct device *bdev)
3694 {
3695 	struct net_device *dev = dev_get_drvdata(bdev);
3696 	DFX_board_t *bp = netdev_priv(dev);
3697 	int dfx_bus_pci = dev_is_pci(bdev);
3698 	int dfx_bus_tc = DFX_BUS_TC(bdev);
3699 	int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
3700 	resource_size_t bar_start[3] = {0};	/* pointers to ports */
3701 	resource_size_t bar_len[3] = {0};	/* resource lengths */
3702 	int		alloc_size;		/* total buffer size used */
3703 
3704 	unregister_netdev(dev);
3705 
3706 	alloc_size = sizeof(PI_DESCR_BLOCK) +
3707 		     PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX +
3708 #ifndef DYNAMIC_BUFFERS
3709 		     (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
3710 #endif
3711 		     sizeof(PI_CONSUMER_BLOCK) +
3712 		     (PI_ALIGN_K_DESC_BLK - 1);
3713 	if (bp->kmalloced)
3714 		dma_free_coherent(bdev, alloc_size,
3715 				  bp->kmalloced, bp->kmalloced_dma);
3716 
3717 	dfx_bus_uninit(dev);
3718 
3719 	dfx_get_bars(bdev, bar_start, bar_len);
3720 	if (bar_start[2] != 0)
3721 		release_region(bar_start[2], bar_len[2]);
3722 	if (bar_start[1] != 0)
3723 		release_region(bar_start[1], bar_len[1]);
3724 	if (dfx_use_mmio) {
3725 		iounmap(bp->base.mem);
3726 		release_mem_region(bar_start[0], bar_len[0]);
3727 	} else
3728 		release_region(bar_start[0], bar_len[0]);
3729 
3730 	if (dfx_bus_pci)
3731 		pci_disable_device(to_pci_dev(bdev));
3732 
3733 	free_netdev(dev);
3734 }
3735 
3736 
3737 static int __maybe_unused dfx_dev_register(struct device *);
3738 static int __maybe_unused dfx_dev_unregister(struct device *);
3739 
3740 #ifdef CONFIG_PCI
3741 static int dfx_pci_register(struct pci_dev *, const struct pci_device_id *);
3742 static void dfx_pci_unregister(struct pci_dev *);
3743 
3744 static const struct pci_device_id dfx_pci_table[] = {
3745 	{ PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) },
3746 	{ }
3747 };
3748 MODULE_DEVICE_TABLE(pci, dfx_pci_table);
3749 
3750 static struct pci_driver dfx_pci_driver = {
3751 	.name		= "defxx",
3752 	.id_table	= dfx_pci_table,
3753 	.probe		= dfx_pci_register,
3754 	.remove		= dfx_pci_unregister,
3755 };
3756 
3757 static int dfx_pci_register(struct pci_dev *pdev,
3758 			    const struct pci_device_id *ent)
3759 {
3760 	return dfx_register(&pdev->dev);
3761 }
3762 
3763 static void dfx_pci_unregister(struct pci_dev *pdev)
3764 {
3765 	dfx_unregister(&pdev->dev);
3766 }
3767 #endif /* CONFIG_PCI */
3768 
3769 #ifdef CONFIG_EISA
3770 static const struct eisa_device_id dfx_eisa_table[] = {
3771         { "DEC3001", DEFEA_PROD_ID_1 },
3772         { "DEC3002", DEFEA_PROD_ID_2 },
3773         { "DEC3003", DEFEA_PROD_ID_3 },
3774         { "DEC3004", DEFEA_PROD_ID_4 },
3775         { }
3776 };
3777 MODULE_DEVICE_TABLE(eisa, dfx_eisa_table);
3778 
3779 static struct eisa_driver dfx_eisa_driver = {
3780 	.id_table	= dfx_eisa_table,
3781 	.driver		= {
3782 		.name	= "defxx",
3783 		.bus	= &eisa_bus_type,
3784 		.probe	= dfx_dev_register,
3785 		.remove	= dfx_dev_unregister,
3786 	},
3787 };
3788 #endif /* CONFIG_EISA */
3789 
3790 #ifdef CONFIG_TC
3791 static struct tc_device_id const dfx_tc_table[] = {
3792 	{ "DEC     ", "PMAF-FA " },
3793 	{ "DEC     ", "PMAF-FD " },
3794 	{ "DEC     ", "PMAF-FS " },
3795 	{ "DEC     ", "PMAF-FU " },
3796 	{ }
3797 };
3798 MODULE_DEVICE_TABLE(tc, dfx_tc_table);
3799 
3800 static struct tc_driver dfx_tc_driver = {
3801 	.id_table	= dfx_tc_table,
3802 	.driver		= {
3803 		.name	= "defxx",
3804 		.bus	= &tc_bus_type,
3805 		.probe	= dfx_dev_register,
3806 		.remove	= dfx_dev_unregister,
3807 	},
3808 };
3809 #endif /* CONFIG_TC */
3810 
3811 static int __maybe_unused dfx_dev_register(struct device *dev)
3812 {
3813 	int status;
3814 
3815 	status = dfx_register(dev);
3816 	if (!status)
3817 		get_device(dev);
3818 	return status;
3819 }
3820 
3821 static int __maybe_unused dfx_dev_unregister(struct device *dev)
3822 {
3823 	put_device(dev);
3824 	dfx_unregister(dev);
3825 	return 0;
3826 }
3827 
3828 
3829 static int dfx_init(void)
3830 {
3831 	int status;
3832 
3833 	status = pci_register_driver(&dfx_pci_driver);
3834 	if (!status)
3835 		status = eisa_driver_register(&dfx_eisa_driver);
3836 	if (!status)
3837 		status = tc_register_driver(&dfx_tc_driver);
3838 	return status;
3839 }
3840 
3841 static void dfx_cleanup(void)
3842 {
3843 	tc_unregister_driver(&dfx_tc_driver);
3844 	eisa_driver_unregister(&dfx_eisa_driver);
3845 	pci_unregister_driver(&dfx_pci_driver);
3846 }
3847 
3848 module_init(dfx_init);
3849 module_exit(dfx_cleanup);
3850 MODULE_AUTHOR("Lawrence V. Stefani");
3851 MODULE_DESCRIPTION("DEC FDDIcontroller TC/EISA/PCI (DEFTA/DEFEA/DEFPA) driver "
3852 		   DRV_VERSION " " DRV_RELDATE);
3853 MODULE_LICENSE("GPL");
3854