xref: /linux/drivers/net/fddi/defza.c (revision 5ea5880764cbb164afb17a62e76ca75dc371409d)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*	FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices.
3  *
4  *	Copyright (c) 2018  Maciej W. Rozycki
5  *
6  *	This program is free software; you can redistribute it and/or
7  *	modify it under the terms of the GNU General Public License
8  *	as published by the Free Software Foundation; either version
9  *	2 of the License, or (at your option) any later version.
10  *
11  *	References:
12  *
13  *	Dave Sawyer & Phil Weeks & Frank Itkowsky,
14  *	"DEC FDDIcontroller 700 Port Specification",
15  *	Revision 1.1, Digital Equipment Corporation
16  */
17 
18 /* ------------------------------------------------------------------------- */
19 /* FZA configurable parameters.                                              */
20 
21 /* The number of transmit ring descriptors; either 0 for 512 or 1 for 1024.  */
22 #define FZA_RING_TX_MODE 0
23 
24 /* The number of receive ring descriptors; from 2 up to 256.  */
25 #define FZA_RING_RX_SIZE 256
26 
27 /* End of FZA configurable parameters.  No need to change anything below.    */
28 /* ------------------------------------------------------------------------- */
29 
30 #include <linux/delay.h>
31 #include <linux/device.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/init.h>
34 #include <linux/interrupt.h>
35 #include <linux/io.h>
36 #include <linux/io-64-nonatomic-lo-hi.h>
37 #include <linux/ioport.h>
38 #include <linux/kernel.h>
39 #include <linux/list.h>
40 #include <linux/module.h>
41 #include <linux/netdevice.h>
42 #include <linux/fddidevice.h>
43 #include <linux/sched.h>
44 #include <linux/skbuff.h>
45 #include <linux/spinlock.h>
46 #include <linux/stat.h>
47 #include <linux/tc.h>
48 #include <linux/timer.h>
49 #include <linux/types.h>
50 #include <linux/wait.h>
51 
52 #include <asm/barrier.h>
53 
54 #include "defza.h"
55 
56 #define DRV_NAME "defza"
57 #define DRV_VERSION "v.1.1.4"
58 #define DRV_RELDATE "Oct  6 2018"
59 
60 static const char version[] =
61 	DRV_NAME ": " DRV_VERSION "  " DRV_RELDATE "  Maciej W. Rozycki\n";
62 
63 MODULE_AUTHOR("Maciej W. Rozycki <macro@orcam.me.uk>");
64 MODULE_DESCRIPTION("DEC FDDIcontroller 700 (DEFZA-xx) driver");
65 MODULE_LICENSE("GPL");
66 
67 static int loopback;
68 module_param(loopback, int, 0644);
69 
70 /* Ring Purger Multicast */
71 static u8 hw_addr_purger[8] = { 0x09, 0x00, 0x2b, 0x02, 0x01, 0x05 };
72 /* Directed Beacon Multicast */
73 static u8 hw_addr_beacon[8] = { 0x01, 0x80, 0xc2, 0x00, 0x01, 0x00 };
74 
75 /* Shorthands for MMIO accesses that we require to be strongly ordered
76  * WRT preceding MMIO accesses.
77  */
78 #define readw_o readw_relaxed
79 #define readl_o readl_relaxed
80 
81 #define writew_o writew_relaxed
82 #define writel_o writel_relaxed
83 
84 /* Shorthands for MMIO accesses that we are happy with being weakly ordered
85  * WRT preceding MMIO accesses.
86  */
87 #define readw_u readw_relaxed
88 #define readl_u readl_relaxed
89 #define readq_u readq_relaxed
90 
91 #define writew_u writew_relaxed
92 #define writel_u writel_relaxed
93 #define writeq_u writeq_relaxed
94 
95 static inline struct sk_buff *fza_alloc_skb_irq(struct net_device *dev,
96 						unsigned int length)
97 {
98 	return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
99 }
100 
101 static inline struct sk_buff *fza_alloc_skb(struct net_device *dev,
102 					    unsigned int length)
103 {
104 	return __netdev_alloc_skb(dev, length, GFP_KERNEL);
105 }
106 
107 static inline void fza_skb_align(struct sk_buff *skb, unsigned int v)
108 {
109 	unsigned long x, y;
110 
111 	x = (unsigned long)skb->data;
112 	y = ALIGN(x, v);
113 
114 	skb_reserve(skb, y - x);
115 }
116 
117 static inline void fza_reads(const void __iomem *from, void *to,
118 			     unsigned long size)
119 {
120 	if (sizeof(unsigned long) == 8) {
121 		const u64 __iomem *src = from;
122 		const u32 __iomem *src_trail;
123 		u64 *dst = to;
124 		u32 *dst_trail;
125 
126 		for (size = (size + 3) / 4; size > 1; size -= 2)
127 			*dst++ = readq_u(src++);
128 		if (size) {
129 			src_trail = (u32 __iomem *)src;
130 			dst_trail = (u32 *)dst;
131 			*dst_trail = readl_u(src_trail);
132 		}
133 	} else {
134 		const u32 __iomem *src = from;
135 		u32 *dst = to;
136 
137 		for (size = (size + 3) / 4; size; size--)
138 			*dst++ = readl_u(src++);
139 	}
140 }
141 
142 static inline void fza_writes(const void *from, void __iomem *to,
143 			      unsigned long size)
144 {
145 	if (sizeof(unsigned long) == 8) {
146 		const u64 *src = from;
147 		const u32 *src_trail;
148 		u64 __iomem *dst = to;
149 		u32 __iomem *dst_trail;
150 
151 		for (size = (size + 3) / 4; size > 1; size -= 2)
152 			writeq_u(*src++, dst++);
153 		if (size) {
154 			src_trail = (u32 *)src;
155 			dst_trail = (u32 __iomem *)dst;
156 			writel_u(*src_trail, dst_trail);
157 		}
158 	} else {
159 		const u32 *src = from;
160 		u32 __iomem *dst = to;
161 
162 		for (size = (size + 3) / 4; size; size--)
163 			writel_u(*src++, dst++);
164 	}
165 }
166 
167 static inline void fza_moves(const void __iomem *from, void __iomem *to,
168 			     unsigned long size)
169 {
170 	if (sizeof(unsigned long) == 8) {
171 		const u64 __iomem *src = from;
172 		const u32 __iomem *src_trail;
173 		u64 __iomem *dst = to;
174 		u32 __iomem *dst_trail;
175 
176 		for (size = (size + 3) / 4; size > 1; size -= 2)
177 			writeq_u(readq_u(src++), dst++);
178 		if (size) {
179 			src_trail = (u32 __iomem *)src;
180 			dst_trail = (u32 __iomem *)dst;
181 			writel_u(readl_u(src_trail), dst_trail);
182 		}
183 	} else {
184 		const u32 __iomem *src = from;
185 		u32 __iomem *dst = to;
186 
187 		for (size = (size + 3) / 4; size; size--)
188 			writel_u(readl_u(src++), dst++);
189 	}
190 }
191 
192 static inline void fza_zeros(void __iomem *to, unsigned long size)
193 {
194 	if (sizeof(unsigned long) == 8) {
195 		u64 __iomem *dst = to;
196 		u32 __iomem *dst_trail;
197 
198 		for (size = (size + 3) / 4; size > 1; size -= 2)
199 			writeq_u(0, dst++);
200 		if (size) {
201 			dst_trail = (u32 __iomem *)dst;
202 			writel_u(0, dst_trail);
203 		}
204 	} else {
205 		u32 __iomem *dst = to;
206 
207 		for (size = (size + 3) / 4; size; size--)
208 			writel_u(0, dst++);
209 	}
210 }
211 
212 static inline void fza_regs_dump(struct fza_private *fp)
213 {
214 	pr_debug("%s: iomem registers:\n", fp->name);
215 	pr_debug(" reset:           0x%04x\n", readw_o(&fp->regs->reset));
216 	pr_debug(" interrupt event: 0x%04x\n", readw_u(&fp->regs->int_event));
217 	pr_debug(" status:          0x%04x\n", readw_u(&fp->regs->status));
218 	pr_debug(" interrupt mask:  0x%04x\n", readw_u(&fp->regs->int_mask));
219 	pr_debug(" control A:       0x%04x\n", readw_u(&fp->regs->control_a));
220 	pr_debug(" control B:       0x%04x\n", readw_u(&fp->regs->control_b));
221 }
222 
223 static inline void fza_do_reset(struct fza_private *fp)
224 {
225 	/* Reset the board. */
226 	writew_o(FZA_RESET_INIT, &fp->regs->reset);
227 	readw_o(&fp->regs->reset);	/* Synchronize. */
228 	readw_o(&fp->regs->reset);	/* Read it back for a small delay. */
229 	writew_o(FZA_RESET_CLR, &fp->regs->reset);
230 
231 	/* Enable all interrupt events we handle. */
232 	writew_o(fp->int_mask, &fp->regs->int_mask);
233 	readw_o(&fp->regs->int_mask);	/* Synchronize. */
234 }
235 
236 static inline void fza_do_shutdown(struct fza_private *fp)
237 {
238 	/* Disable the driver mode. */
239 	writew_o(FZA_CONTROL_B_IDLE, &fp->regs->control_b);
240 
241 	/* And reset the board. */
242 	writew_o(FZA_RESET_INIT, &fp->regs->reset);
243 	readw_o(&fp->regs->reset);	/* Synchronize. */
244 	writew_o(FZA_RESET_CLR, &fp->regs->reset);
245 	readw_o(&fp->regs->reset);	/* Synchronize. */
246 }
247 
248 static int fza_reset(struct fza_private *fp)
249 {
250 	unsigned long flags;
251 	uint status, state;
252 	long t;
253 
254 	pr_info("%s: resetting the board...\n", fp->name);
255 
256 	spin_lock_irqsave(&fp->lock, flags);
257 	fp->state_chg_flag = 0;
258 	fza_do_reset(fp);
259 	spin_unlock_irqrestore(&fp->lock, flags);
260 
261 	/* DEC says RESET needs up to 30 seconds to complete.  My DEFZA-AA
262 	 * rev. C03 happily finishes in 9.7 seconds. :-)  But we need to
263 	 * be on the safe side...
264 	 */
265 	t = wait_event_timeout(fp->state_chg_wait, fp->state_chg_flag,
266 			       45 * HZ);
267 	status = readw_u(&fp->regs->status);
268 	state = FZA_STATUS_GET_STATE(status);
269 	if (fp->state_chg_flag == 0) {
270 		pr_err("%s: RESET timed out!, state %x\n", fp->name, state);
271 		return -EIO;
272 	}
273 	if (state != FZA_STATE_UNINITIALIZED) {
274 		pr_err("%s: RESET failed!, state %x, failure ID %x\n",
275 		       fp->name, state, FZA_STATUS_GET_TEST(status));
276 		return -EIO;
277 	}
278 	pr_info("%s: OK\n", fp->name);
279 	pr_debug("%s: RESET: %lums elapsed\n", fp->name,
280 		 (45 * HZ - t) * 1000 / HZ);
281 
282 	return 0;
283 }
284 
285 static struct fza_ring_cmd __iomem *fza_cmd_send(struct net_device *dev,
286 						 int command)
287 {
288 	struct fza_private *fp = netdev_priv(dev);
289 	struct fza_ring_cmd __iomem *ring = fp->ring_cmd + fp->ring_cmd_index;
290 	unsigned int old_mask, new_mask;
291 	union fza_cmd_buf __iomem *buf;
292 	struct netdev_hw_addr *ha;
293 	int i;
294 
295 	old_mask = fp->int_mask;
296 	new_mask = old_mask & ~FZA_MASK_STATE_CHG;
297 	writew_u(new_mask, &fp->regs->int_mask);
298 	readw_o(&fp->regs->int_mask);			/* Synchronize. */
299 	fp->int_mask = new_mask;
300 
301 	buf = fp->mmio + readl_u(&ring->buffer);
302 
303 	if ((readl_u(&ring->cmd_own) & FZA_RING_OWN_MASK) !=
304 	    FZA_RING_OWN_HOST) {
305 		pr_warn("%s: command buffer full, command: %u!\n", fp->name,
306 			command);
307 		return NULL;
308 	}
309 
310 	switch (command) {
311 	case FZA_RING_CMD_INIT:
312 		writel_u(FZA_RING_TX_MODE, &buf->init.tx_mode);
313 		writel_u(FZA_RING_RX_SIZE, &buf->init.hst_rx_size);
314 		fza_zeros(&buf->init.counters, sizeof(buf->init.counters));
315 		break;
316 
317 	case FZA_RING_CMD_MODCAM:
318 		i = 0;
319 		fza_writes(&hw_addr_purger, &buf->cam.hw_addr[i++],
320 			   sizeof(*buf->cam.hw_addr));
321 		fza_writes(&hw_addr_beacon, &buf->cam.hw_addr[i++],
322 			   sizeof(*buf->cam.hw_addr));
323 		netdev_for_each_mc_addr(ha, dev) {
324 			if (i >= FZA_CMD_CAM_SIZE)
325 				break;
326 			fza_writes(ha->addr, &buf->cam.hw_addr[i++],
327 				   sizeof(*buf->cam.hw_addr));
328 		}
329 		while (i < FZA_CMD_CAM_SIZE)
330 			fza_zeros(&buf->cam.hw_addr[i++],
331 				  sizeof(*buf->cam.hw_addr));
332 		break;
333 
334 	case FZA_RING_CMD_PARAM:
335 		writel_u(loopback, &buf->param.loop_mode);
336 		writel_u(fp->t_max, &buf->param.t_max);
337 		writel_u(fp->t_req, &buf->param.t_req);
338 		writel_u(fp->tvx, &buf->param.tvx);
339 		writel_u(fp->lem_threshold, &buf->param.lem_threshold);
340 		fza_writes(&fp->station_id, &buf->param.station_id,
341 			   sizeof(buf->param.station_id));
342 		/* Convert to milliseconds due to buggy firmware. */
343 		writel_u(fp->rtoken_timeout / 12500,
344 			 &buf->param.rtoken_timeout);
345 		writel_u(fp->ring_purger, &buf->param.ring_purger);
346 		break;
347 
348 	case FZA_RING_CMD_MODPROM:
349 		if (dev->flags & IFF_PROMISC) {
350 			writel_u(1, &buf->modprom.llc_prom);
351 			writel_u(1, &buf->modprom.smt_prom);
352 		} else {
353 			writel_u(0, &buf->modprom.llc_prom);
354 			writel_u(0, &buf->modprom.smt_prom);
355 		}
356 		if (dev->flags & IFF_ALLMULTI ||
357 		    netdev_mc_count(dev) > FZA_CMD_CAM_SIZE - 2)
358 			writel_u(1, &buf->modprom.llc_multi);
359 		else
360 			writel_u(0, &buf->modprom.llc_multi);
361 		writel_u(1, &buf->modprom.llc_bcast);
362 		break;
363 	}
364 
365 	/* Trigger the command. */
366 	writel_u(FZA_RING_OWN_FZA | command, &ring->cmd_own);
367 	writew_o(FZA_CONTROL_A_CMD_POLL, &fp->regs->control_a);
368 
369 	fp->ring_cmd_index = (fp->ring_cmd_index + 1) % FZA_RING_CMD_SIZE;
370 
371 	fp->int_mask = old_mask;
372 	writew_u(fp->int_mask, &fp->regs->int_mask);
373 
374 	return ring;
375 }
376 
377 static int fza_init_send(struct net_device *dev,
378 			 struct fza_cmd_init *__iomem *init)
379 {
380 	struct fza_private *fp = netdev_priv(dev);
381 	struct fza_ring_cmd __iomem *ring;
382 	unsigned long flags;
383 	u32 stat;
384 	long t;
385 
386 	spin_lock_irqsave(&fp->lock, flags);
387 	fp->cmd_done_flag = 0;
388 	ring = fza_cmd_send(dev, FZA_RING_CMD_INIT);
389 	spin_unlock_irqrestore(&fp->lock, flags);
390 	if (!ring)
391 		/* This should never happen in the uninitialized state,
392 		 * so do not try to recover and just consider it fatal.
393 		 */
394 		return -ENOBUFS;
395 
396 	/* INIT may take quite a long time (160ms for my C03). */
397 	t = wait_event_timeout(fp->cmd_done_wait, fp->cmd_done_flag, 3 * HZ);
398 	if (fp->cmd_done_flag == 0) {
399 		pr_err("%s: INIT command timed out!, state %x\n", fp->name,
400 		       FZA_STATUS_GET_STATE(readw_u(&fp->regs->status)));
401 		return -EIO;
402 	}
403 	stat = readl_u(&ring->stat);
404 	if (stat != FZA_RING_STAT_SUCCESS) {
405 		pr_err("%s: INIT command failed!, status %02x, state %x\n",
406 		       fp->name, stat,
407 		       FZA_STATUS_GET_STATE(readw_u(&fp->regs->status)));
408 		return -EIO;
409 	}
410 	pr_debug("%s: INIT: %lums elapsed\n", fp->name,
411 		 (3 * HZ - t) * 1000 / HZ);
412 
413 	if (init)
414 		*init = fp->mmio + readl_u(&ring->buffer);
415 	return 0;
416 }
417 
418 static void fza_rx_init(struct fza_private *fp)
419 {
420 	int i;
421 
422 	/* Fill the host receive descriptor ring. */
423 	for (i = 0; i < FZA_RING_RX_SIZE; i++) {
424 		writel_o(0, &fp->ring_hst_rx[i].rmc);
425 		writel_o((fp->rx_dma[i] + 0x1000) >> 9,
426 			 &fp->ring_hst_rx[i].buffer1);
427 		writel_o(fp->rx_dma[i] >> 9 | FZA_RING_OWN_FZA,
428 			 &fp->ring_hst_rx[i].buf0_own);
429 	}
430 }
431 
432 static void fza_set_rx_mode(struct net_device *dev)
433 {
434 	fza_cmd_send(dev, FZA_RING_CMD_MODCAM);
435 	fza_cmd_send(dev, FZA_RING_CMD_MODPROM);
436 }
437 
438 union fza_buffer_txp {
439 	struct fza_buffer_tx *data_ptr;
440 	struct fza_buffer_tx __iomem *mmio_ptr;
441 };
442 
443 static int fza_do_xmit(union fza_buffer_txp ub, int len,
444 		       struct net_device *dev, int smt)
445 {
446 	struct fza_private *fp = netdev_priv(dev);
447 	struct fza_buffer_tx __iomem *rmc_tx_ptr;
448 	int i, first, frag_len, left_len;
449 	u32 own, rmc;
450 
451 	if (((((fp->ring_rmc_txd_index - 1 + fp->ring_rmc_tx_size) -
452 	       fp->ring_rmc_tx_index) % fp->ring_rmc_tx_size) *
453 	     FZA_TX_BUFFER_SIZE) < len)
454 		return 1;
455 
456 	first = fp->ring_rmc_tx_index;
457 
458 	left_len = len;
459 	frag_len = FZA_TX_BUFFER_SIZE;
460 	/* First descriptor is relinquished last. */
461 	own = FZA_RING_TX_OWN_HOST;
462 	/* First descriptor carries frame length; we don't use cut-through. */
463 	rmc = FZA_RING_TX_SOP | FZA_RING_TX_VBC | len;
464 	do {
465 		i = fp->ring_rmc_tx_index;
466 		rmc_tx_ptr = &fp->buffer_tx[i];
467 
468 		if (left_len < FZA_TX_BUFFER_SIZE)
469 			frag_len = left_len;
470 		left_len -= frag_len;
471 
472 		/* Length must be a multiple of 4 as only word writes are
473 		 * permitted!
474 		 */
475 		frag_len = (frag_len + 3) & ~3;
476 		if (smt)
477 			fza_moves(ub.mmio_ptr, rmc_tx_ptr, frag_len);
478 		else
479 			fza_writes(ub.data_ptr, rmc_tx_ptr, frag_len);
480 
481 		if (left_len == 0)
482 			rmc |= FZA_RING_TX_EOP;		/* Mark last frag. */
483 
484 		writel_o(rmc, &fp->ring_rmc_tx[i].rmc);
485 		writel_o(own, &fp->ring_rmc_tx[i].own);
486 
487 		ub.data_ptr++;
488 		fp->ring_rmc_tx_index = (fp->ring_rmc_tx_index + 1) %
489 					fp->ring_rmc_tx_size;
490 
491 		/* Settings for intermediate frags. */
492 		own = FZA_RING_TX_OWN_RMC;
493 		rmc = 0;
494 	} while (left_len > 0);
495 
496 	if (((((fp->ring_rmc_txd_index - 1 + fp->ring_rmc_tx_size) -
497 	       fp->ring_rmc_tx_index) % fp->ring_rmc_tx_size) *
498 	     FZA_TX_BUFFER_SIZE) < dev->mtu + dev->hard_header_len) {
499 		netif_stop_queue(dev);
500 		pr_debug("%s: queue stopped\n", fp->name);
501 	}
502 
503 	writel_o(FZA_RING_TX_OWN_RMC, &fp->ring_rmc_tx[first].own);
504 
505 	/* Go, go, go! */
506 	writew_o(FZA_CONTROL_A_TX_POLL, &fp->regs->control_a);
507 
508 	return 0;
509 }
510 
511 static int fza_do_recv_smt(struct fza_buffer_tx *data_ptr, int len,
512 			   u32 rmc, struct net_device *dev)
513 {
514 	struct fza_private *fp = netdev_priv(dev);
515 	struct fza_buffer_tx __iomem *smt_rx_ptr;
516 	u32 own;
517 	int i;
518 
519 	i = fp->ring_smt_rx_index;
520 	own = readl_o(&fp->ring_smt_rx[i].own);
521 	if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA)
522 		return 1;
523 
524 	smt_rx_ptr = fp->mmio + readl_u(&fp->ring_smt_rx[i].buffer);
525 
526 	/* Length must be a multiple of 4 as only word writes are permitted! */
527 	fza_writes(data_ptr, smt_rx_ptr, (len + 3) & ~3);
528 
529 	writel_o(rmc, &fp->ring_smt_rx[i].rmc);
530 	writel_o(FZA_RING_OWN_FZA, &fp->ring_smt_rx[i].own);
531 
532 	fp->ring_smt_rx_index =
533 		(fp->ring_smt_rx_index + 1) % fp->ring_smt_rx_size;
534 
535 	/* Grab it! */
536 	writew_o(FZA_CONTROL_A_SMT_RX_POLL, &fp->regs->control_a);
537 
538 	return 0;
539 }
540 
541 static void fza_tx(struct net_device *dev)
542 {
543 	struct fza_private *fp = netdev_priv(dev);
544 	u32 own, rmc;
545 	int i;
546 
547 	while (1) {
548 		i = fp->ring_rmc_txd_index;
549 		if (i == fp->ring_rmc_tx_index)
550 			break;
551 		own = readl_o(&fp->ring_rmc_tx[i].own);
552 		if ((own & FZA_RING_OWN_MASK) == FZA_RING_TX_OWN_RMC)
553 			break;
554 
555 		rmc = readl_u(&fp->ring_rmc_tx[i].rmc);
556 		/* Only process the first descriptor. */
557 		if ((rmc & FZA_RING_TX_SOP) != 0) {
558 			if ((rmc & FZA_RING_TX_DCC_MASK) ==
559 			    FZA_RING_TX_DCC_SUCCESS) {
560 				int pkt_len = (rmc & FZA_RING_PBC_MASK) - 3;
561 								/* Omit PRH. */
562 
563 				fp->stats.tx_packets++;
564 				fp->stats.tx_bytes += pkt_len;
565 			} else {
566 				fp->stats.tx_errors++;
567 				switch (rmc & FZA_RING_TX_DCC_MASK) {
568 				case FZA_RING_TX_DCC_DTP_SOP:
569 				case FZA_RING_TX_DCC_DTP:
570 				case FZA_RING_TX_DCC_ABORT:
571 					fp->stats.tx_aborted_errors++;
572 					break;
573 				case FZA_RING_TX_DCC_UNDRRUN:
574 					fp->stats.tx_fifo_errors++;
575 					break;
576 				case FZA_RING_TX_DCC_PARITY:
577 				default:
578 					break;
579 				}
580 			}
581 		}
582 
583 		fp->ring_rmc_txd_index = (fp->ring_rmc_txd_index + 1) %
584 					 fp->ring_rmc_tx_size;
585 	}
586 
587 	if (((((fp->ring_rmc_txd_index - 1 + fp->ring_rmc_tx_size) -
588 	       fp->ring_rmc_tx_index) % fp->ring_rmc_tx_size) *
589 	     FZA_TX_BUFFER_SIZE) >= dev->mtu + dev->hard_header_len) {
590 		if (fp->queue_active) {
591 			netif_wake_queue(dev);
592 			pr_debug("%s: queue woken\n", fp->name);
593 		}
594 	}
595 }
596 
597 static inline int fza_rx_err(struct fza_private *fp,
598 			     const u32 rmc, const u8 fc)
599 {
600 	int len, min_len, max_len;
601 
602 	len = rmc & FZA_RING_PBC_MASK;
603 
604 	if (unlikely((rmc & FZA_RING_RX_BAD) != 0)) {
605 		fp->stats.rx_errors++;
606 
607 		/* Check special status codes. */
608 		if ((rmc & (FZA_RING_RX_CRC | FZA_RING_RX_RRR_MASK |
609 			    FZA_RING_RX_DA_MASK | FZA_RING_RX_SA_MASK)) ==
610 		     (FZA_RING_RX_CRC | FZA_RING_RX_RRR_DADDR |
611 		      FZA_RING_RX_DA_CAM | FZA_RING_RX_SA_ALIAS)) {
612 			if (len >= 8190)
613 				fp->stats.rx_length_errors++;
614 			return 1;
615 		}
616 		if ((rmc & (FZA_RING_RX_CRC | FZA_RING_RX_RRR_MASK |
617 			    FZA_RING_RX_DA_MASK | FZA_RING_RX_SA_MASK)) ==
618 		     (FZA_RING_RX_CRC | FZA_RING_RX_RRR_DADDR |
619 		      FZA_RING_RX_DA_CAM | FZA_RING_RX_SA_CAM)) {
620 			/* Halt the interface to trigger a reset. */
621 			writew_o(FZA_CONTROL_A_HALT, &fp->regs->control_a);
622 			readw_o(&fp->regs->control_a);	/* Synchronize. */
623 			return 1;
624 		}
625 
626 		/* Check the MAC status. */
627 		switch (rmc & FZA_RING_RX_RRR_MASK) {
628 		case FZA_RING_RX_RRR_OK:
629 			if ((rmc & FZA_RING_RX_CRC) != 0)
630 				fp->stats.rx_crc_errors++;
631 			else if ((rmc & FZA_RING_RX_FSC_MASK) == 0 ||
632 				 (rmc & FZA_RING_RX_FSB_ERR) != 0)
633 				fp->stats.rx_frame_errors++;
634 			return 1;
635 		case FZA_RING_RX_RRR_SADDR:
636 		case FZA_RING_RX_RRR_DADDR:
637 		case FZA_RING_RX_RRR_ABORT:
638 			/* Halt the interface to trigger a reset. */
639 			writew_o(FZA_CONTROL_A_HALT, &fp->regs->control_a);
640 			readw_o(&fp->regs->control_a);	/* Synchronize. */
641 			return 1;
642 		case FZA_RING_RX_RRR_LENGTH:
643 			fp->stats.rx_frame_errors++;
644 			return 1;
645 		default:
646 			return 1;
647 		}
648 	}
649 
650 	/* Packet received successfully; validate the length. */
651 	switch (fc & FDDI_FC_K_FORMAT_MASK) {
652 	case FDDI_FC_K_FORMAT_MANAGEMENT:
653 		if ((fc & FDDI_FC_K_CLASS_MASK) == FDDI_FC_K_CLASS_ASYNC)
654 			min_len = 37;
655 		else
656 			min_len = 17;
657 		break;
658 	case FDDI_FC_K_FORMAT_LLC:
659 		min_len = 20;
660 		break;
661 	default:
662 		min_len = 17;
663 		break;
664 	}
665 	max_len = 4495;
666 	if (len < min_len || len > max_len) {
667 		fp->stats.rx_errors++;
668 		fp->stats.rx_length_errors++;
669 		return 1;
670 	}
671 
672 	return 0;
673 }
674 
675 static void fza_rx(struct net_device *dev)
676 {
677 	struct fza_private *fp = netdev_priv(dev);
678 	struct sk_buff *skb, *newskb;
679 	struct fza_fddihdr *frame;
680 	dma_addr_t dma, newdma;
681 	u32 own, rmc, buf;
682 	int i, len;
683 	u8 fc;
684 
685 	while (1) {
686 		i = fp->ring_hst_rx_index;
687 		own = readl_o(&fp->ring_hst_rx[i].buf0_own);
688 		if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA)
689 			break;
690 
691 		rmc = readl_u(&fp->ring_hst_rx[i].rmc);
692 		skb = fp->rx_skbuff[i];
693 		dma = fp->rx_dma[i];
694 
695 		/* The RMC doesn't count the preamble and the starting
696 		 * delimiter.  We fix it up here for a total of 3 octets.
697 		 */
698 		dma_rmb();
699 		len = (rmc & FZA_RING_PBC_MASK) + 3;
700 		frame = (struct fza_fddihdr *)skb->data;
701 
702 		/* We need to get at real FC. */
703 		dma_sync_single_for_cpu(fp->bdev,
704 					dma +
705 					((u8 *)&frame->hdr.fc - (u8 *)frame),
706 					sizeof(frame->hdr.fc),
707 					DMA_FROM_DEVICE);
708 		fc = frame->hdr.fc;
709 
710 		if (fza_rx_err(fp, rmc, fc))
711 			goto err_rx;
712 
713 		/* We have to 512-byte-align RX buffers... */
714 		newskb = fza_alloc_skb_irq(dev, FZA_RX_BUFFER_SIZE + 511);
715 		if (newskb) {
716 			fza_skb_align(newskb, 512);
717 			newdma = dma_map_single(fp->bdev, newskb->data,
718 						FZA_RX_BUFFER_SIZE,
719 						DMA_FROM_DEVICE);
720 			if (dma_mapping_error(fp->bdev, newdma)) {
721 				dev_kfree_skb_irq(newskb);
722 				newskb = NULL;
723 			}
724 		}
725 		if (newskb) {
726 			int pkt_len = len - 7;	/* Omit P, SD and FCS. */
727 			int is_multi;
728 			int rx_stat;
729 
730 			dma_unmap_single(fp->bdev, dma, FZA_RX_BUFFER_SIZE,
731 					 DMA_FROM_DEVICE);
732 
733 			/* Queue SMT frames to the SMT receive ring. */
734 			if ((fc & (FDDI_FC_K_CLASS_MASK |
735 				   FDDI_FC_K_FORMAT_MASK)) ==
736 			     (FDDI_FC_K_CLASS_ASYNC |
737 			      FDDI_FC_K_FORMAT_MANAGEMENT) &&
738 			    (rmc & FZA_RING_RX_DA_MASK) !=
739 			     FZA_RING_RX_DA_PROM) {
740 				if (fza_do_recv_smt((struct fza_buffer_tx *)
741 						    skb->data, len, rmc,
742 						    dev)) {
743 					writel_o(FZA_CONTROL_A_SMT_RX_OVFL,
744 						 &fp->regs->control_a);
745 				}
746 			}
747 
748 			is_multi = ((frame->hdr.daddr[0] & 0x01) != 0);
749 
750 			skb_reserve(skb, 3);	/* Skip over P and SD. */
751 			skb_put(skb, pkt_len);	/* And cut off FCS. */
752 			skb->protocol = fddi_type_trans(skb, dev);
753 
754 			rx_stat = netif_rx(skb);
755 			if (rx_stat != NET_RX_DROP) {
756 				fp->stats.rx_packets++;
757 				fp->stats.rx_bytes += pkt_len;
758 				if (is_multi)
759 					fp->stats.multicast++;
760 			} else {
761 				fp->stats.rx_dropped++;
762 			}
763 
764 			skb = newskb;
765 			dma = newdma;
766 			fp->rx_skbuff[i] = skb;
767 			fp->rx_dma[i] = dma;
768 		} else {
769 			fp->stats.rx_dropped++;
770 			pr_notice_ratelimited(
771 				"%s: memory squeeze, dropping packet\n",
772 				fp->name);
773 		}
774 
775 err_rx:
776 		writel_o(0, &fp->ring_hst_rx[i].rmc);
777 		buf = (dma + 0x1000) >> 9;
778 		writel_o(buf, &fp->ring_hst_rx[i].buffer1);
779 		buf = dma >> 9 | FZA_RING_OWN_FZA;
780 		writel_o(buf, &fp->ring_hst_rx[i].buf0_own);
781 		fp->ring_hst_rx_index =
782 			(fp->ring_hst_rx_index + 1) % fp->ring_hst_rx_size;
783 	}
784 }
785 
786 static void fza_tx_smt(struct net_device *dev)
787 {
788 	struct fza_private *fp = netdev_priv(dev);
789 	struct fza_buffer_tx __iomem *smt_tx_ptr;
790 	int i, len;
791 	u32 own;
792 
793 	while (1) {
794 		i = fp->ring_smt_tx_index;
795 		own = readl_o(&fp->ring_smt_tx[i].own);
796 		if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA)
797 			break;
798 
799 		smt_tx_ptr = fp->mmio + readl_u(&fp->ring_smt_tx[i].buffer);
800 		len = readl_u(&fp->ring_smt_tx[i].rmc) & FZA_RING_PBC_MASK;
801 
802 		if (!netif_queue_stopped(dev)) {
803 			if (dev_nit_active(dev)) {
804 				struct fza_buffer_tx *skb_data_ptr;
805 				struct sk_buff *skb;
806 
807 				/* Length must be a multiple of 4 as only word
808 				 * reads are permitted!
809 				 */
810 				skb = fza_alloc_skb_irq(dev, (len + 3) & ~3);
811 				if (!skb)
812 					goto err_no_skb;	/* Drop. */
813 
814 				skb_data_ptr = (struct fza_buffer_tx *)
815 					       skb->data;
816 
817 				fza_reads(smt_tx_ptr, skb_data_ptr,
818 					  (len + 3) & ~3);
819 				skb->dev = dev;
820 				skb_reserve(skb, 3);	/* Skip over PRH. */
821 				skb_put(skb, len - 3);
822 				skb_reset_network_header(skb);
823 
824 				dev_queue_xmit_nit(skb, dev);
825 
826 				dev_kfree_skb_irq(skb);
827 
828 err_no_skb:
829 				;
830 			}
831 
832 			/* Queue the frame to the RMC transmit ring. */
833 			fza_do_xmit((union fza_buffer_txp)
834 				    { .mmio_ptr = smt_tx_ptr },
835 				    len, dev, 1);
836 		}
837 
838 		writel_o(FZA_RING_OWN_FZA, &fp->ring_smt_tx[i].own);
839 		fp->ring_smt_tx_index =
840 			(fp->ring_smt_tx_index + 1) % fp->ring_smt_tx_size;
841 	}
842 }
843 
844 static void fza_uns(struct net_device *dev)
845 {
846 	struct fza_private *fp = netdev_priv(dev);
847 	u32 own;
848 	int i;
849 
850 	while (1) {
851 		i = fp->ring_uns_index;
852 		own = readl_o(&fp->ring_uns[i].own);
853 		if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA)
854 			break;
855 
856 		if (readl_u(&fp->ring_uns[i].id) == FZA_RING_UNS_RX_OVER) {
857 			fp->stats.rx_errors++;
858 			fp->stats.rx_over_errors++;
859 		}
860 
861 		writel_o(FZA_RING_OWN_FZA, &fp->ring_uns[i].own);
862 		fp->ring_uns_index =
863 			(fp->ring_uns_index + 1) % FZA_RING_UNS_SIZE;
864 	}
865 }
866 
867 static void fza_tx_flush(struct net_device *dev)
868 {
869 	struct fza_private *fp = netdev_priv(dev);
870 	u32 own;
871 	int i;
872 
873 	/* Clean up the SMT TX ring. */
874 	i = fp->ring_smt_tx_index;
875 	do {
876 		writel_o(FZA_RING_OWN_FZA, &fp->ring_smt_tx[i].own);
877 		fp->ring_smt_tx_index =
878 			(fp->ring_smt_tx_index + 1) % fp->ring_smt_tx_size;
879 
880 	} while (i != fp->ring_smt_tx_index);
881 
882 	/* Clean up the RMC TX ring. */
883 	i = fp->ring_rmc_tx_index;
884 	do {
885 		own = readl_o(&fp->ring_rmc_tx[i].own);
886 		if ((own & FZA_RING_OWN_MASK) == FZA_RING_TX_OWN_RMC) {
887 			u32 rmc = readl_u(&fp->ring_rmc_tx[i].rmc);
888 
889 			writel_u(rmc | FZA_RING_TX_DTP,
890 				 &fp->ring_rmc_tx[i].rmc);
891 		}
892 		fp->ring_rmc_tx_index =
893 			(fp->ring_rmc_tx_index + 1) % fp->ring_rmc_tx_size;
894 
895 	} while (i != fp->ring_rmc_tx_index);
896 
897 	/* Done. */
898 	writew_o(FZA_CONTROL_A_FLUSH_DONE, &fp->regs->control_a);
899 }
900 
901 static irqreturn_t fza_interrupt(int irq, void *dev_id)
902 {
903 	struct net_device *dev = dev_id;
904 	struct fza_private *fp = netdev_priv(dev);
905 	uint int_event;
906 
907 	/* Get interrupt events. */
908 	int_event = readw_o(&fp->regs->int_event) & fp->int_mask;
909 	if (int_event == 0)
910 		return IRQ_NONE;
911 
912 	/* Clear the events. */
913 	writew_u(int_event, &fp->regs->int_event);
914 
915 	/* Now handle the events.  The order matters. */
916 
917 	/* Command finished interrupt. */
918 	if ((int_event & FZA_EVENT_CMD_DONE) != 0) {
919 		fp->irq_count_cmd_done++;
920 
921 		spin_lock(&fp->lock);
922 		fp->cmd_done_flag = 1;
923 		wake_up(&fp->cmd_done_wait);
924 		spin_unlock(&fp->lock);
925 	}
926 
927 	/* Transmit finished interrupt. */
928 	if ((int_event & FZA_EVENT_TX_DONE) != 0) {
929 		fp->irq_count_tx_done++;
930 		fza_tx(dev);
931 	}
932 
933 	/* Host receive interrupt. */
934 	if ((int_event & FZA_EVENT_RX_POLL) != 0) {
935 		fp->irq_count_rx_poll++;
936 		fza_rx(dev);
937 	}
938 
939 	/* SMT transmit interrupt. */
940 	if ((int_event & FZA_EVENT_SMT_TX_POLL) != 0) {
941 		fp->irq_count_smt_tx_poll++;
942 		fza_tx_smt(dev);
943 	}
944 
945 	/* Transmit ring flush request. */
946 	if ((int_event & FZA_EVENT_FLUSH_TX) != 0) {
947 		fp->irq_count_flush_tx++;
948 		fza_tx_flush(dev);
949 	}
950 
951 	/* Link status change interrupt. */
952 	if ((int_event & FZA_EVENT_LINK_ST_CHG) != 0) {
953 		uint status;
954 
955 		fp->irq_count_link_st_chg++;
956 		status = readw_u(&fp->regs->status);
957 		if (FZA_STATUS_GET_LINK(status) == FZA_LINK_ON) {
958 			netif_carrier_on(dev);
959 			pr_info("%s: link available\n", fp->name);
960 		} else {
961 			netif_carrier_off(dev);
962 			pr_info("%s: link unavailable\n", fp->name);
963 		}
964 	}
965 
966 	/* Unsolicited event interrupt. */
967 	if ((int_event & FZA_EVENT_UNS_POLL) != 0) {
968 		fp->irq_count_uns_poll++;
969 		fza_uns(dev);
970 	}
971 
972 	/* State change interrupt. */
973 	if ((int_event & FZA_EVENT_STATE_CHG) != 0) {
974 		uint status, state;
975 
976 		fp->irq_count_state_chg++;
977 
978 		status = readw_u(&fp->regs->status);
979 		state = FZA_STATUS_GET_STATE(status);
980 		pr_debug("%s: state change: %x\n", fp->name, state);
981 		switch (state) {
982 		case FZA_STATE_RESET:
983 			break;
984 
985 		case FZA_STATE_UNINITIALIZED:
986 			netif_carrier_off(dev);
987 			timer_delete_sync(&fp->reset_timer);
988 			fp->ring_cmd_index = 0;
989 			fp->ring_uns_index = 0;
990 			fp->ring_rmc_tx_index = 0;
991 			fp->ring_rmc_txd_index = 0;
992 			fp->ring_hst_rx_index = 0;
993 			fp->ring_smt_tx_index = 0;
994 			fp->ring_smt_rx_index = 0;
995 			if (fp->state > state) {
996 				pr_info("%s: OK\n", fp->name);
997 				fza_cmd_send(dev, FZA_RING_CMD_INIT);
998 			}
999 			break;
1000 
1001 		case FZA_STATE_INITIALIZED:
1002 			if (fp->state > state) {
1003 				fza_set_rx_mode(dev);
1004 				fza_cmd_send(dev, FZA_RING_CMD_PARAM);
1005 			}
1006 			break;
1007 
1008 		case FZA_STATE_RUNNING:
1009 		case FZA_STATE_MAINTENANCE:
1010 			fp->state = state;
1011 			fza_rx_init(fp);
1012 			fp->queue_active = 1;
1013 			netif_wake_queue(dev);
1014 			pr_debug("%s: queue woken\n", fp->name);
1015 			break;
1016 
1017 		case FZA_STATE_HALTED:
1018 			fp->queue_active = 0;
1019 			netif_stop_queue(dev);
1020 			pr_debug("%s: queue stopped\n", fp->name);
1021 			timer_delete_sync(&fp->reset_timer);
1022 			pr_warn("%s: halted, reason: %x\n", fp->name,
1023 				FZA_STATUS_GET_HALT(status));
1024 			fza_regs_dump(fp);
1025 			pr_info("%s: resetting the board...\n", fp->name);
1026 			fza_do_reset(fp);
1027 			fp->timer_state = 0;
1028 			fp->reset_timer.expires = jiffies + 45 * HZ;
1029 			add_timer(&fp->reset_timer);
1030 			break;
1031 
1032 		default:
1033 			pr_warn("%s: undefined state: %x\n", fp->name, state);
1034 			break;
1035 		}
1036 
1037 		spin_lock(&fp->lock);
1038 		fp->state_chg_flag = 1;
1039 		wake_up(&fp->state_chg_wait);
1040 		spin_unlock(&fp->lock);
1041 	}
1042 
1043 	return IRQ_HANDLED;
1044 }
1045 
1046 static void fza_reset_timer(struct timer_list *t)
1047 {
1048 	struct fza_private *fp = timer_container_of(fp, t, reset_timer);
1049 
1050 	if (!fp->timer_state) {
1051 		pr_err("%s: RESET timed out!\n", fp->name);
1052 		pr_info("%s: trying harder...\n", fp->name);
1053 
1054 		/* Assert the board reset. */
1055 		writew_o(FZA_RESET_INIT, &fp->regs->reset);
1056 		readw_o(&fp->regs->reset);		/* Synchronize. */
1057 
1058 		fp->timer_state = 1;
1059 		fp->reset_timer.expires = jiffies + HZ;
1060 	} else {
1061 		/* Clear the board reset. */
1062 		writew_u(FZA_RESET_CLR, &fp->regs->reset);
1063 
1064 		/* Enable all interrupt events we handle. */
1065 		writew_o(fp->int_mask, &fp->regs->int_mask);
1066 		readw_o(&fp->regs->int_mask);		/* Synchronize. */
1067 
1068 		fp->timer_state = 0;
1069 		fp->reset_timer.expires = jiffies + 45 * HZ;
1070 	}
1071 	add_timer(&fp->reset_timer);
1072 }
1073 
1074 static int fza_set_mac_address(struct net_device *dev, void *addr)
1075 {
1076 	return -EOPNOTSUPP;
1077 }
1078 
1079 static netdev_tx_t fza_start_xmit(struct sk_buff *skb, struct net_device *dev)
1080 {
1081 	struct fza_private *fp = netdev_priv(dev);
1082 	unsigned int old_mask, new_mask;
1083 	int ret;
1084 	u8 fc;
1085 
1086 	skb_push(skb, 3);			/* Make room for PRH. */
1087 
1088 	/* Decode FC to set PRH. */
1089 	fc = skb->data[3];
1090 	skb->data[0] = 0;
1091 	skb->data[1] = 0;
1092 	skb->data[2] = FZA_PRH2_NORMAL;
1093 	if ((fc & FDDI_FC_K_CLASS_MASK) == FDDI_FC_K_CLASS_SYNC)
1094 		skb->data[0] |= FZA_PRH0_FRAME_SYNC;
1095 	switch (fc & FDDI_FC_K_FORMAT_MASK) {
1096 	case FDDI_FC_K_FORMAT_MANAGEMENT:
1097 		if ((fc & FDDI_FC_K_CONTROL_MASK) == 0) {
1098 			/* Token. */
1099 			skb->data[0] |= FZA_PRH0_TKN_TYPE_IMM;
1100 			skb->data[1] |= FZA_PRH1_TKN_SEND_NONE;
1101 		} else {
1102 			/* SMT or MAC. */
1103 			skb->data[0] |= FZA_PRH0_TKN_TYPE_UNR;
1104 			skb->data[1] |= FZA_PRH1_TKN_SEND_UNR;
1105 		}
1106 		skb->data[1] |= FZA_PRH1_CRC_NORMAL;
1107 		break;
1108 	case FDDI_FC_K_FORMAT_LLC:
1109 	case FDDI_FC_K_FORMAT_FUTURE:
1110 		skb->data[0] |= FZA_PRH0_TKN_TYPE_UNR;
1111 		skb->data[1] |= FZA_PRH1_CRC_NORMAL | FZA_PRH1_TKN_SEND_UNR;
1112 		break;
1113 	case FDDI_FC_K_FORMAT_IMPLEMENTOR:
1114 		skb->data[0] |= FZA_PRH0_TKN_TYPE_UNR;
1115 		skb->data[1] |= FZA_PRH1_TKN_SEND_ORIG;
1116 		break;
1117 	}
1118 
1119 	/* SMT transmit interrupts may sneak frames into the RMC
1120 	 * transmit ring.  We disable them while queueing a frame
1121 	 * to maintain consistency.
1122 	 */
1123 	old_mask = fp->int_mask;
1124 	new_mask = old_mask & ~FZA_MASK_SMT_TX_POLL;
1125 	writew_u(new_mask, &fp->regs->int_mask);
1126 	readw_o(&fp->regs->int_mask);			/* Synchronize. */
1127 	fp->int_mask = new_mask;
1128 	ret = fza_do_xmit((union fza_buffer_txp)
1129 			  { .data_ptr = (struct fza_buffer_tx *)skb->data },
1130 			  skb->len, dev, 0);
1131 	fp->int_mask = old_mask;
1132 	writew_u(fp->int_mask, &fp->regs->int_mask);
1133 
1134 	if (ret) {
1135 		/* Probably an SMT packet filled the remaining space,
1136 		 * so just stop the queue, but don't report it as an error.
1137 		 */
1138 		netif_stop_queue(dev);
1139 		pr_debug("%s: queue stopped\n", fp->name);
1140 		fp->stats.tx_dropped++;
1141 	}
1142 
1143 	dev_kfree_skb(skb);
1144 
1145 	return ret;
1146 }
1147 
1148 static int fza_open(struct net_device *dev)
1149 {
1150 	struct fza_private *fp = netdev_priv(dev);
1151 	struct fza_ring_cmd __iomem *ring;
1152 	struct sk_buff *skb;
1153 	unsigned long flags;
1154 	dma_addr_t dma;
1155 	int ret, i;
1156 	u32 stat;
1157 	long t;
1158 
1159 	for (i = 0; i < FZA_RING_RX_SIZE; i++) {
1160 		/* We have to 512-byte-align RX buffers... */
1161 		skb = fza_alloc_skb(dev, FZA_RX_BUFFER_SIZE + 511);
1162 		if (skb) {
1163 			fza_skb_align(skb, 512);
1164 			dma = dma_map_single(fp->bdev, skb->data,
1165 					     FZA_RX_BUFFER_SIZE,
1166 					     DMA_FROM_DEVICE);
1167 			if (dma_mapping_error(fp->bdev, dma)) {
1168 				dev_kfree_skb(skb);
1169 				skb = NULL;
1170 			}
1171 		}
1172 		if (!skb) {
1173 			for (--i; i >= 0; i--) {
1174 				dma_unmap_single(fp->bdev, fp->rx_dma[i],
1175 						 FZA_RX_BUFFER_SIZE,
1176 						 DMA_FROM_DEVICE);
1177 				dev_kfree_skb(fp->rx_skbuff[i]);
1178 				fp->rx_dma[i] = 0;
1179 				fp->rx_skbuff[i] = NULL;
1180 			}
1181 			return -ENOMEM;
1182 		}
1183 		fp->rx_skbuff[i] = skb;
1184 		fp->rx_dma[i] = dma;
1185 	}
1186 
1187 	ret = fza_init_send(dev, NULL);
1188 	if (ret != 0)
1189 		return ret;
1190 
1191 	/* Purger and Beacon multicasts need to be supplied before PARAM. */
1192 	fza_set_rx_mode(dev);
1193 
1194 	spin_lock_irqsave(&fp->lock, flags);
1195 	fp->cmd_done_flag = 0;
1196 	ring = fza_cmd_send(dev, FZA_RING_CMD_PARAM);
1197 	spin_unlock_irqrestore(&fp->lock, flags);
1198 	if (!ring)
1199 		return -ENOBUFS;
1200 
1201 	t = wait_event_timeout(fp->cmd_done_wait, fp->cmd_done_flag, 3 * HZ);
1202 	if (fp->cmd_done_flag == 0) {
1203 		pr_err("%s: PARAM command timed out!, state %x\n", fp->name,
1204 		       FZA_STATUS_GET_STATE(readw_u(&fp->regs->status)));
1205 		return -EIO;
1206 	}
1207 	stat = readl_u(&ring->stat);
1208 	if (stat != FZA_RING_STAT_SUCCESS) {
1209 		pr_err("%s: PARAM command failed!, status %02x, state %x\n",
1210 		       fp->name, stat,
1211 		       FZA_STATUS_GET_STATE(readw_u(&fp->regs->status)));
1212 		return -EIO;
1213 	}
1214 	pr_debug("%s: PARAM: %lums elapsed\n", fp->name,
1215 		 (3 * HZ - t) * 1000 / HZ);
1216 
1217 	return 0;
1218 }
1219 
1220 static int fza_close(struct net_device *dev)
1221 {
1222 	struct fza_private *fp = netdev_priv(dev);
1223 	unsigned long flags;
1224 	uint state;
1225 	long t;
1226 	int i;
1227 
1228 	netif_stop_queue(dev);
1229 	pr_debug("%s: queue stopped\n", fp->name);
1230 
1231 	timer_delete_sync(&fp->reset_timer);
1232 	spin_lock_irqsave(&fp->lock, flags);
1233 	fp->state = FZA_STATE_UNINITIALIZED;
1234 	fp->state_chg_flag = 0;
1235 	/* Shut the interface down. */
1236 	writew_o(FZA_CONTROL_A_SHUT, &fp->regs->control_a);
1237 	readw_o(&fp->regs->control_a);			/* Synchronize. */
1238 	spin_unlock_irqrestore(&fp->lock, flags);
1239 
1240 	/* DEC says SHUT needs up to 10 seconds to complete. */
1241 	t = wait_event_timeout(fp->state_chg_wait, fp->state_chg_flag,
1242 			       15 * HZ);
1243 	state = FZA_STATUS_GET_STATE(readw_o(&fp->regs->status));
1244 	if (fp->state_chg_flag == 0) {
1245 		pr_err("%s: SHUT timed out!, state %x\n", fp->name, state);
1246 		return -EIO;
1247 	}
1248 	if (state != FZA_STATE_UNINITIALIZED) {
1249 		pr_err("%s: SHUT failed!, state %x\n", fp->name, state);
1250 		return -EIO;
1251 	}
1252 	pr_debug("%s: SHUT: %lums elapsed\n", fp->name,
1253 		 (15 * HZ - t) * 1000 / HZ);
1254 
1255 	for (i = 0; i < FZA_RING_RX_SIZE; i++)
1256 		if (fp->rx_skbuff[i]) {
1257 			dma_unmap_single(fp->bdev, fp->rx_dma[i],
1258 					 FZA_RX_BUFFER_SIZE, DMA_FROM_DEVICE);
1259 			dev_kfree_skb(fp->rx_skbuff[i]);
1260 			fp->rx_dma[i] = 0;
1261 			fp->rx_skbuff[i] = NULL;
1262 		}
1263 
1264 	return 0;
1265 }
1266 
1267 static struct net_device_stats *fza_get_stats(struct net_device *dev)
1268 {
1269 	struct fza_private *fp = netdev_priv(dev);
1270 
1271 	return &fp->stats;
1272 }
1273 
1274 static int fza_probe(struct device *bdev)
1275 {
1276 	static const struct net_device_ops netdev_ops = {
1277 		.ndo_open = fza_open,
1278 		.ndo_stop = fza_close,
1279 		.ndo_start_xmit = fza_start_xmit,
1280 		.ndo_set_rx_mode = fza_set_rx_mode,
1281 		.ndo_set_mac_address = fza_set_mac_address,
1282 		.ndo_get_stats = fza_get_stats,
1283 	};
1284 	static int version_printed;
1285 	char rom_rev[4], fw_rev[4], rmc_rev[4];
1286 	struct tc_dev *tdev = to_tc_dev(bdev);
1287 	struct fza_cmd_init __iomem *init;
1288 	resource_size_t start, len;
1289 	struct net_device *dev;
1290 	struct fza_private *fp;
1291 	uint smt_ver, pmd_type;
1292 	void __iomem *mmio;
1293 	uint hw_addr[2];
1294 	int ret, i;
1295 
1296 	if (!version_printed) {
1297 		pr_info("%s", version);
1298 		version_printed = 1;
1299 	}
1300 
1301 	dev = alloc_fddidev(sizeof(*fp));
1302 	if (!dev)
1303 		return -ENOMEM;
1304 	SET_NETDEV_DEV(dev, bdev);
1305 
1306 	fp = netdev_priv(dev);
1307 	dev_set_drvdata(bdev, dev);
1308 
1309 	fp->bdev = bdev;
1310 	fp->name = dev_name(bdev);
1311 
1312 	/* Request the I/O MEM resource. */
1313 	start = tdev->resource.start;
1314 	len = tdev->resource.end - start + 1;
1315 	if (!request_mem_region(start, len, dev_name(bdev))) {
1316 		pr_err("%s: cannot reserve MMIO region\n", fp->name);
1317 		ret = -EBUSY;
1318 		goto err_out_kfree;
1319 	}
1320 
1321 	/* MMIO mapping setup. */
1322 	mmio = ioremap(start, len);
1323 	if (!mmio) {
1324 		pr_err("%s: cannot map MMIO\n", fp->name);
1325 		ret = -ENOMEM;
1326 		goto err_out_resource;
1327 	}
1328 
1329 	/* Initialize the new device structure. */
1330 	switch (loopback) {
1331 	case FZA_LOOP_NORMAL:
1332 	case FZA_LOOP_INTERN:
1333 	case FZA_LOOP_EXTERN:
1334 		break;
1335 	default:
1336 		loopback = FZA_LOOP_NORMAL;
1337 	}
1338 
1339 	fp->mmio = mmio;
1340 	dev->irq = tdev->interrupt;
1341 
1342 	pr_info("%s: DEC FDDIcontroller 700 or 700-C at 0x%08llx, irq %d\n",
1343 		fp->name, (long long)tdev->resource.start, dev->irq);
1344 	pr_debug("%s: mapped at: 0x%p\n", fp->name, mmio);
1345 
1346 	fp->regs = mmio + FZA_REG_BASE;
1347 	fp->ring_cmd = mmio + FZA_RING_CMD;
1348 	fp->ring_uns = mmio + FZA_RING_UNS;
1349 
1350 	init_waitqueue_head(&fp->state_chg_wait);
1351 	init_waitqueue_head(&fp->cmd_done_wait);
1352 	spin_lock_init(&fp->lock);
1353 	fp->int_mask = FZA_MASK_NORMAL;
1354 
1355 	timer_setup(&fp->reset_timer, fza_reset_timer, 0);
1356 
1357 	/* Sanitize the board. */
1358 	fza_regs_dump(fp);
1359 	fza_do_shutdown(fp);
1360 
1361 	ret = request_irq(dev->irq, fza_interrupt, IRQF_SHARED, fp->name, dev);
1362 	if (ret != 0) {
1363 		pr_err("%s: unable to get IRQ %d!\n", fp->name, dev->irq);
1364 		goto err_out_map;
1365 	}
1366 
1367 	/* Enable the driver mode. */
1368 	writew_o(FZA_CONTROL_B_DRIVER, &fp->regs->control_b);
1369 
1370 	/* For some reason transmit done interrupts can trigger during
1371 	 * reset.  This avoids a division error in the handler.
1372 	 */
1373 	fp->ring_rmc_tx_size = FZA_RING_TX_SIZE;
1374 
1375 	ret = fza_reset(fp);
1376 	if (ret != 0)
1377 		goto err_out_irq;
1378 
1379 	ret = fza_init_send(dev, &init);
1380 	if (ret != 0)
1381 		goto err_out_irq;
1382 
1383 	fza_reads(&init->hw_addr, &hw_addr, sizeof(hw_addr));
1384 	dev_addr_set(dev, (u8 *)&hw_addr);
1385 
1386 	fza_reads(&init->rom_rev, &rom_rev, sizeof(rom_rev));
1387 	fza_reads(&init->fw_rev, &fw_rev, sizeof(fw_rev));
1388 	fza_reads(&init->rmc_rev, &rmc_rev, sizeof(rmc_rev));
1389 	for (i = 3; i >= 0 && rom_rev[i] == ' '; i--)
1390 		rom_rev[i] = 0;
1391 	for (i = 3; i >= 0 && fw_rev[i] == ' '; i--)
1392 		fw_rev[i] = 0;
1393 	for (i = 3; i >= 0 && rmc_rev[i] == ' '; i--)
1394 		rmc_rev[i] = 0;
1395 
1396 	fp->ring_rmc_tx = mmio + readl_u(&init->rmc_tx);
1397 	fp->ring_rmc_tx_size = readl_u(&init->rmc_tx_size);
1398 	fp->ring_hst_rx = mmio + readl_u(&init->hst_rx);
1399 	fp->ring_hst_rx_size = readl_u(&init->hst_rx_size);
1400 	fp->ring_smt_tx = mmio + readl_u(&init->smt_tx);
1401 	fp->ring_smt_tx_size = readl_u(&init->smt_tx_size);
1402 	fp->ring_smt_rx = mmio + readl_u(&init->smt_rx);
1403 	fp->ring_smt_rx_size = readl_u(&init->smt_rx_size);
1404 
1405 	fp->buffer_tx = mmio + FZA_TX_BUFFER_ADDR(readl_u(&init->rmc_tx));
1406 
1407 	fp->t_max = readl_u(&init->def_t_max);
1408 	fp->t_req = readl_u(&init->def_t_req);
1409 	fp->tvx = readl_u(&init->def_tvx);
1410 	fp->lem_threshold = readl_u(&init->lem_threshold);
1411 	fza_reads(&init->def_station_id, &fp->station_id,
1412 		  sizeof(fp->station_id));
1413 	fp->rtoken_timeout = readl_u(&init->rtoken_timeout);
1414 	fp->ring_purger = readl_u(&init->ring_purger);
1415 
1416 	smt_ver = readl_u(&init->smt_ver);
1417 	pmd_type = readl_u(&init->pmd_type);
1418 
1419 	pr_debug("%s: INIT parameters:\n", fp->name);
1420 	pr_debug("        tx_mode: %u\n", readl_u(&init->tx_mode));
1421 	pr_debug("    hst_rx_size: %u\n", readl_u(&init->hst_rx_size));
1422 	pr_debug("        rmc_rev: %.4s\n", rmc_rev);
1423 	pr_debug("        rom_rev: %.4s\n", rom_rev);
1424 	pr_debug("         fw_rev: %.4s\n", fw_rev);
1425 	pr_debug("       mop_type: %u\n", readl_u(&init->mop_type));
1426 	pr_debug("         hst_rx: 0x%08x\n", readl_u(&init->hst_rx));
1427 	pr_debug("         rmc_tx: 0x%08x\n", readl_u(&init->rmc_tx));
1428 	pr_debug("    rmc_tx_size: %u\n", readl_u(&init->rmc_tx_size));
1429 	pr_debug("         smt_tx: 0x%08x\n", readl_u(&init->smt_tx));
1430 	pr_debug("    smt_tx_size: %u\n", readl_u(&init->smt_tx_size));
1431 	pr_debug("         smt_rx: 0x%08x\n", readl_u(&init->smt_rx));
1432 	pr_debug("    smt_rx_size: %u\n", readl_u(&init->smt_rx_size));
1433 	/* TC systems are always LE, so don't bother swapping. */
1434 	pr_debug("        hw_addr: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
1435 		 (readl_u(&init->hw_addr[0]) >> 0) & 0xff,
1436 		 (readl_u(&init->hw_addr[0]) >> 8) & 0xff,
1437 		 (readl_u(&init->hw_addr[0]) >> 16) & 0xff,
1438 		 (readl_u(&init->hw_addr[0]) >> 24) & 0xff,
1439 		 (readl_u(&init->hw_addr[1]) >> 0) & 0xff,
1440 		 (readl_u(&init->hw_addr[1]) >> 8) & 0xff,
1441 		 (readl_u(&init->hw_addr[1]) >> 16) & 0xff,
1442 		 (readl_u(&init->hw_addr[1]) >> 24) & 0xff);
1443 	pr_debug("      def_t_req: %u\n", readl_u(&init->def_t_req));
1444 	pr_debug("        def_tvx: %u\n", readl_u(&init->def_tvx));
1445 	pr_debug("      def_t_max: %u\n", readl_u(&init->def_t_max));
1446 	pr_debug("  lem_threshold: %u\n", readl_u(&init->lem_threshold));
1447 	/* Don't bother swapping, see above. */
1448 	pr_debug(" def_station_id: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
1449 		 (readl_u(&init->def_station_id[0]) >> 0) & 0xff,
1450 		 (readl_u(&init->def_station_id[0]) >> 8) & 0xff,
1451 		 (readl_u(&init->def_station_id[0]) >> 16) & 0xff,
1452 		 (readl_u(&init->def_station_id[0]) >> 24) & 0xff,
1453 		 (readl_u(&init->def_station_id[1]) >> 0) & 0xff,
1454 		 (readl_u(&init->def_station_id[1]) >> 8) & 0xff,
1455 		 (readl_u(&init->def_station_id[1]) >> 16) & 0xff,
1456 		 (readl_u(&init->def_station_id[1]) >> 24) & 0xff);
1457 	pr_debug("   pmd_type_alt: %u\n", readl_u(&init->pmd_type_alt));
1458 	pr_debug("        smt_ver: %u\n", readl_u(&init->smt_ver));
1459 	pr_debug(" rtoken_timeout: %u\n", readl_u(&init->rtoken_timeout));
1460 	pr_debug("    ring_purger: %u\n", readl_u(&init->ring_purger));
1461 	pr_debug("    smt_ver_max: %u\n", readl_u(&init->smt_ver_max));
1462 	pr_debug("    smt_ver_min: %u\n", readl_u(&init->smt_ver_min));
1463 	pr_debug("       pmd_type: %u\n", readl_u(&init->pmd_type));
1464 
1465 	pr_info("%s: model %s, address %pMF\n",
1466 		fp->name,
1467 		pmd_type == FZA_PMD_TYPE_TW ?
1468 			"700-C (DEFZA-CA), ThinWire PMD selected" :
1469 			pmd_type == FZA_PMD_TYPE_STP ?
1470 				"700-C (DEFZA-CA), STP PMD selected" :
1471 				"700 (DEFZA-AA), MMF PMD",
1472 		dev->dev_addr);
1473 	pr_info("%s: ROM rev. %.4s, firmware rev. %.4s, RMC rev. %.4s, "
1474 		"SMT ver. %u\n", fp->name, rom_rev, fw_rev, rmc_rev, smt_ver);
1475 
1476 	/* Now that we fetched initial parameters just shut the interface
1477 	 * until opened.
1478 	 */
1479 	ret = fza_close(dev);
1480 	if (ret != 0)
1481 		goto err_out_irq;
1482 
1483 	/* The FZA-specific entries in the device structure. */
1484 	dev->netdev_ops = &netdev_ops;
1485 
1486 	ret = register_netdev(dev);
1487 	if (ret != 0)
1488 		goto err_out_irq;
1489 
1490 	pr_info("%s: registered as %s\n", fp->name, dev->name);
1491 	fp->name = (const char *)dev->name;
1492 
1493 	get_device(bdev);
1494 	return 0;
1495 
1496 err_out_irq:
1497 	timer_delete_sync(&fp->reset_timer);
1498 	fza_do_shutdown(fp);
1499 	free_irq(dev->irq, dev);
1500 
1501 err_out_map:
1502 	iounmap(mmio);
1503 
1504 err_out_resource:
1505 	release_mem_region(start, len);
1506 
1507 err_out_kfree:
1508 	pr_err("%s: initialization failure, aborting!\n", fp->name);
1509 	free_netdev(dev);
1510 	return ret;
1511 }
1512 
1513 static int fza_remove(struct device *bdev)
1514 {
1515 	struct net_device *dev = dev_get_drvdata(bdev);
1516 	struct fza_private *fp = netdev_priv(dev);
1517 	struct tc_dev *tdev = to_tc_dev(bdev);
1518 	resource_size_t start, len;
1519 
1520 	put_device(bdev);
1521 
1522 	unregister_netdev(dev);
1523 
1524 	timer_delete_sync(&fp->reset_timer);
1525 	fza_do_shutdown(fp);
1526 	free_irq(dev->irq, dev);
1527 
1528 	iounmap(fp->mmio);
1529 
1530 	start = tdev->resource.start;
1531 	len = tdev->resource.end - start + 1;
1532 	release_mem_region(start, len);
1533 
1534 	free_netdev(dev);
1535 
1536 	return 0;
1537 }
1538 
1539 static struct tc_device_id const fza_tc_table[] = {
1540 	{ "DEC     ", "PMAF-AA " },
1541 	{ }
1542 };
1543 MODULE_DEVICE_TABLE(tc, fza_tc_table);
1544 
1545 static struct tc_driver fza_driver = {
1546 	.id_table	= fza_tc_table,
1547 	.driver		= {
1548 		.name	= "defza",
1549 		.bus	= &tc_bus_type,
1550 		.probe	= fza_probe,
1551 		.remove	= fza_remove,
1552 	},
1553 };
1554 
1555 static int fza_init(void)
1556 {
1557 	return tc_register_driver(&fza_driver);
1558 }
1559 
1560 static void fza_exit(void)
1561 {
1562 	tc_unregister_driver(&fza_driver);
1563 }
1564 
1565 module_init(fza_init);
1566 module_exit(fza_exit);
1567