xref: /linux/drivers/net/wan/wanxl.c (revision bdd1a21b52557ea8f61d0a5dc2f77151b576eb70)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * wanXL serial card driver for Linux
4  * host part
5  *
6  * Copyright (C) 2003 Krzysztof Halasa <khc@pm.waw.pl>
7  *
8  * Status:
9  *   - Only DTE (external clock) support with NRZ and NRZI encodings
10  *   - wanXL100 will require minor driver modifications, no access to hw
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/types.h>
20 #include <linux/fcntl.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/netdevice.h>
27 #include <linux/hdlc.h>
28 #include <linux/pci.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/delay.h>
31 #include <asm/io.h>
32 
33 #include "wanxl.h"
34 
35 static const char *version = "wanXL serial card driver version: 0.48";
36 
37 #define PLX_CTL_RESET   0x40000000 /* adapter reset */
38 
39 #undef DEBUG_PKT
40 #undef DEBUG_PCI
41 
42 /* MAILBOX #1 - PUTS COMMANDS */
43 #define MBX1_CMD_ABORTJ 0x85000000 /* Abort and Jump */
44 #ifdef __LITTLE_ENDIAN
45 #define MBX1_CMD_BSWAP  0x8C000001 /* little-endian Byte Swap Mode */
46 #else
47 #define MBX1_CMD_BSWAP  0x8C000000 /* big-endian Byte Swap Mode */
48 #endif
49 
50 /* MAILBOX #2 - DRAM SIZE */
51 #define MBX2_MEMSZ_MASK 0xFFFF0000 /* PUTS Memory Size Register mask */
52 
53 struct port {
54 	struct net_device *dev;
55 	struct card *card;
56 	spinlock_t lock;	/* for wanxl_xmit */
57 	int node;		/* physical port #0 - 3 */
58 	unsigned int clock_type;
59 	int tx_in, tx_out;
60 	struct sk_buff *tx_skbs[TX_BUFFERS];
61 };
62 
63 struct card_status {
64 	desc_t rx_descs[RX_QUEUE_LENGTH];
65 	port_status_t port_status[4];
66 };
67 
68 struct card {
69 	int n_ports;		/* 1, 2 or 4 ports */
70 	u8 irq;
71 
72 	u8 __iomem *plx;	/* PLX PCI9060 virtual base address */
73 	struct pci_dev *pdev;	/* for pci_name(pdev) */
74 	int rx_in;
75 	struct sk_buff *rx_skbs[RX_QUEUE_LENGTH];
76 	struct card_status *status;	/* shared between host and card */
77 	dma_addr_t status_address;
78 	struct port ports[];	/* 1 - 4 port structures follow */
79 };
80 
81 static inline struct port *dev_to_port(struct net_device *dev)
82 {
83 	return (struct port *)dev_to_hdlc(dev)->priv;
84 }
85 
86 static inline port_status_t *get_status(struct port *port)
87 {
88 	return &port->card->status->port_status[port->node];
89 }
90 
91 #ifdef DEBUG_PCI
92 static inline dma_addr_t pci_map_single_debug(struct pci_dev *pdev, void *ptr,
93 					      size_t size, int direction)
94 {
95 	dma_addr_t addr = dma_map_single(&pdev->dev, ptr, size, direction);
96 
97 	if (addr + size > 0x100000000LL)
98 		pr_crit("%s: pci_map_single() returned memory at 0x%llx!\n",
99 			pci_name(pdev), (unsigned long long)addr);
100 	return addr;
101 }
102 
103 #undef pci_map_single
104 #define pci_map_single pci_map_single_debug
105 #endif
106 
107 /* Cable and/or personality module change interrupt service */
108 static inline void wanxl_cable_intr(struct port *port)
109 {
110 	u32 value = get_status(port)->cable;
111 	int valid = 1;
112 	const char *cable, *pm, *dte = "", *dsr = "", *dcd = "";
113 
114 	switch (value & 0x7) {
115 	case STATUS_CABLE_V35:
116 		cable = "V.35";
117 		break;
118 	case STATUS_CABLE_X21:
119 		cable = "X.21";
120 		break;
121 	case STATUS_CABLE_V24:
122 		cable = "V.24";
123 		break;
124 	case STATUS_CABLE_EIA530:
125 		cable = "EIA530";
126 		break;
127 	case STATUS_CABLE_NONE:
128 		cable = "no";
129 		break;
130 	default:
131 		cable = "invalid";
132 	}
133 
134 	switch ((value >> STATUS_CABLE_PM_SHIFT) & 0x7) {
135 	case STATUS_CABLE_V35:
136 		pm = "V.35";
137 		break;
138 	case STATUS_CABLE_X21:
139 		pm = "X.21";
140 		break;
141 	case STATUS_CABLE_V24:
142 		pm = "V.24";
143 		break;
144 	case STATUS_CABLE_EIA530:
145 		pm = "EIA530";
146 		break;
147 	case STATUS_CABLE_NONE:
148 		pm = "no personality";
149 		valid = 0;
150 		break;
151 	default:
152 		pm = "invalid personality";
153 		valid = 0;
154 	}
155 
156 	if (valid) {
157 		if ((value & 7) == ((value >> STATUS_CABLE_PM_SHIFT) & 7)) {
158 			dsr = (value & STATUS_CABLE_DSR) ? ", DSR ON" :
159 				", DSR off";
160 			dcd = (value & STATUS_CABLE_DCD) ? ", carrier ON" :
161 				", carrier off";
162 		}
163 		dte = (value & STATUS_CABLE_DCE) ? " DCE" : " DTE";
164 	}
165 	netdev_info(port->dev, "%s%s module, %s cable%s%s\n",
166 		    pm, dte, cable, dsr, dcd);
167 
168 	if (value & STATUS_CABLE_DCD)
169 		netif_carrier_on(port->dev);
170 	else
171 		netif_carrier_off(port->dev);
172 }
173 
174 /* Transmit complete interrupt service */
175 static inline void wanxl_tx_intr(struct port *port)
176 {
177 	struct net_device *dev = port->dev;
178 
179 	while (1) {
180 		desc_t *desc = &get_status(port)->tx_descs[port->tx_in];
181 		struct sk_buff *skb = port->tx_skbs[port->tx_in];
182 
183 		switch (desc->stat) {
184 		case PACKET_FULL:
185 		case PACKET_EMPTY:
186 			netif_wake_queue(dev);
187 			return;
188 
189 		case PACKET_UNDERRUN:
190 			dev->stats.tx_errors++;
191 			dev->stats.tx_fifo_errors++;
192 			break;
193 
194 		default:
195 			dev->stats.tx_packets++;
196 			dev->stats.tx_bytes += skb->len;
197 		}
198 		desc->stat = PACKET_EMPTY; /* Free descriptor */
199 		dma_unmap_single(&port->card->pdev->dev, desc->address,
200 				 skb->len, DMA_TO_DEVICE);
201 		dev_consume_skb_irq(skb);
202 		port->tx_in = (port->tx_in + 1) % TX_BUFFERS;
203 	}
204 }
205 
206 /* Receive complete interrupt service */
207 static inline void wanxl_rx_intr(struct card *card)
208 {
209 	desc_t *desc;
210 
211 	while (desc = &card->status->rx_descs[card->rx_in],
212 	       desc->stat != PACKET_EMPTY) {
213 		if ((desc->stat & PACKET_PORT_MASK) > card->n_ports) {
214 			pr_crit("%s: received packet for nonexistent port\n",
215 				pci_name(card->pdev));
216 		} else {
217 			struct sk_buff *skb = card->rx_skbs[card->rx_in];
218 			struct port *port = &card->ports[desc->stat &
219 						    PACKET_PORT_MASK];
220 			struct net_device *dev = port->dev;
221 
222 			if (!skb) {
223 				dev->stats.rx_dropped++;
224 			} else {
225 				dma_unmap_single(&card->pdev->dev,
226 						 desc->address, BUFFER_LENGTH,
227 						 DMA_FROM_DEVICE);
228 				skb_put(skb, desc->length);
229 
230 #ifdef DEBUG_PKT
231 				printk(KERN_DEBUG "%s RX(%i):", dev->name,
232 				       skb->len);
233 				debug_frame(skb);
234 #endif
235 				dev->stats.rx_packets++;
236 				dev->stats.rx_bytes += skb->len;
237 				skb->protocol = hdlc_type_trans(skb, dev);
238 				netif_rx(skb);
239 				skb = NULL;
240 			}
241 
242 			if (!skb) {
243 				skb = dev_alloc_skb(BUFFER_LENGTH);
244 				desc->address = skb ?
245 					dma_map_single(&card->pdev->dev,
246 						       skb->data,
247 						       BUFFER_LENGTH,
248 						       DMA_FROM_DEVICE) : 0;
249 				card->rx_skbs[card->rx_in] = skb;
250 			}
251 		}
252 		desc->stat = PACKET_EMPTY; /* Free descriptor */
253 		card->rx_in = (card->rx_in + 1) % RX_QUEUE_LENGTH;
254 	}
255 }
256 
257 static irqreturn_t wanxl_intr(int irq, void *dev_id)
258 {
259 	struct card *card = dev_id;
260 	int i;
261 	u32 stat;
262 	int handled = 0;
263 
264 	while ((stat = readl(card->plx + PLX_DOORBELL_FROM_CARD)) != 0) {
265 		handled = 1;
266 		writel(stat, card->plx + PLX_DOORBELL_FROM_CARD);
267 
268 		for (i = 0; i < card->n_ports; i++) {
269 			if (stat & (1 << (DOORBELL_FROM_CARD_TX_0 + i)))
270 				wanxl_tx_intr(&card->ports[i]);
271 			if (stat & (1 << (DOORBELL_FROM_CARD_CABLE_0 + i)))
272 				wanxl_cable_intr(&card->ports[i]);
273 		}
274 		if (stat & (1 << DOORBELL_FROM_CARD_RX))
275 			wanxl_rx_intr(card);
276 	}
277 
278 	return IRQ_RETVAL(handled);
279 }
280 
281 static netdev_tx_t wanxl_xmit(struct sk_buff *skb, struct net_device *dev)
282 {
283 	struct port *port = dev_to_port(dev);
284 	desc_t *desc;
285 
286 	spin_lock(&port->lock);
287 
288 	desc = &get_status(port)->tx_descs[port->tx_out];
289 	if (desc->stat != PACKET_EMPTY) {
290 		/* should never happen - previous xmit should stop queue */
291 #ifdef DEBUG_PKT
292                 printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
293 #endif
294 		netif_stop_queue(dev);
295 		spin_unlock(&port->lock);
296 		return NETDEV_TX_BUSY;       /* request packet to be queued */
297 	}
298 
299 #ifdef DEBUG_PKT
300 	printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len);
301 	debug_frame(skb);
302 #endif
303 
304 	port->tx_skbs[port->tx_out] = skb;
305 	desc->address = dma_map_single(&port->card->pdev->dev, skb->data,
306 				       skb->len, DMA_TO_DEVICE);
307 	desc->length = skb->len;
308 	desc->stat = PACKET_FULL;
309 	writel(1 << (DOORBELL_TO_CARD_TX_0 + port->node),
310 	       port->card->plx + PLX_DOORBELL_TO_CARD);
311 
312 	port->tx_out = (port->tx_out + 1) % TX_BUFFERS;
313 
314 	if (get_status(port)->tx_descs[port->tx_out].stat != PACKET_EMPTY) {
315 		netif_stop_queue(dev);
316 #ifdef DEBUG_PKT
317 		printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
318 #endif
319 	}
320 
321 	spin_unlock(&port->lock);
322 	return NETDEV_TX_OK;
323 }
324 
325 static int wanxl_attach(struct net_device *dev, unsigned short encoding,
326 			unsigned short parity)
327 {
328 	struct port *port = dev_to_port(dev);
329 
330 	if (encoding != ENCODING_NRZ &&
331 	    encoding != ENCODING_NRZI)
332 		return -EINVAL;
333 
334 	if (parity != PARITY_NONE &&
335 	    parity != PARITY_CRC32_PR1_CCITT &&
336 	    parity != PARITY_CRC16_PR1_CCITT &&
337 	    parity != PARITY_CRC32_PR0_CCITT &&
338 	    parity != PARITY_CRC16_PR0_CCITT)
339 		return -EINVAL;
340 
341 	get_status(port)->encoding = encoding;
342 	get_status(port)->parity = parity;
343 	return 0;
344 }
345 
346 static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
347 {
348 	const size_t size = sizeof(sync_serial_settings);
349 	sync_serial_settings line;
350 	struct port *port = dev_to_port(dev);
351 
352 	if (cmd != SIOCWANDEV)
353 		return hdlc_ioctl(dev, ifr, cmd);
354 
355 	switch (ifr->ifr_settings.type) {
356 	case IF_GET_IFACE:
357 		ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
358 		if (ifr->ifr_settings.size < size) {
359 			ifr->ifr_settings.size = size; /* data size wanted */
360 			return -ENOBUFS;
361 		}
362 		memset(&line, 0, sizeof(line));
363 		line.clock_type = get_status(port)->clocking;
364 		line.clock_rate = 0;
365 		line.loopback = 0;
366 
367 		if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
368 			return -EFAULT;
369 		return 0;
370 
371 	case IF_IFACE_SYNC_SERIAL:
372 		if (!capable(CAP_NET_ADMIN))
373 			return -EPERM;
374 		if (dev->flags & IFF_UP)
375 			return -EBUSY;
376 
377 		if (copy_from_user(&line, ifr->ifr_settings.ifs_ifsu.sync,
378 				   size))
379 			return -EFAULT;
380 
381 		if (line.clock_type != CLOCK_EXT &&
382 		    line.clock_type != CLOCK_TXFROMRX)
383 			return -EINVAL; /* No such clock setting */
384 
385 		if (line.loopback != 0)
386 			return -EINVAL;
387 
388 		get_status(port)->clocking = line.clock_type;
389 		return 0;
390 
391 	default:
392 		return hdlc_ioctl(dev, ifr, cmd);
393 	}
394 }
395 
396 static int wanxl_open(struct net_device *dev)
397 {
398 	struct port *port = dev_to_port(dev);
399 	u8 __iomem *dbr = port->card->plx + PLX_DOORBELL_TO_CARD;
400 	unsigned long timeout;
401 	int i;
402 
403 	if (get_status(port)->open) {
404 		netdev_err(dev, "port already open\n");
405 		return -EIO;
406 	}
407 
408 	i = hdlc_open(dev);
409 	if (i)
410 		return i;
411 
412 	port->tx_in = port->tx_out = 0;
413 	for (i = 0; i < TX_BUFFERS; i++)
414 		get_status(port)->tx_descs[i].stat = PACKET_EMPTY;
415 	/* signal the card */
416 	writel(1 << (DOORBELL_TO_CARD_OPEN_0 + port->node), dbr);
417 
418 	timeout = jiffies + HZ;
419 	do {
420 		if (get_status(port)->open) {
421 			netif_start_queue(dev);
422 			return 0;
423 		}
424 	} while (time_after(timeout, jiffies));
425 
426 	netdev_err(dev, "unable to open port\n");
427 	/* ask the card to close the port, should it be still alive */
428 	writel(1 << (DOORBELL_TO_CARD_CLOSE_0 + port->node), dbr);
429 	return -EFAULT;
430 }
431 
432 static int wanxl_close(struct net_device *dev)
433 {
434 	struct port *port = dev_to_port(dev);
435 	unsigned long timeout;
436 	int i;
437 
438 	hdlc_close(dev);
439 	/* signal the card */
440 	writel(1 << (DOORBELL_TO_CARD_CLOSE_0 + port->node),
441 	       port->card->plx + PLX_DOORBELL_TO_CARD);
442 
443 	timeout = jiffies + HZ;
444 	do {
445 		if (!get_status(port)->open)
446 			break;
447 	} while (time_after(timeout, jiffies));
448 
449 	if (get_status(port)->open)
450 		netdev_err(dev, "unable to close port\n");
451 
452 	netif_stop_queue(dev);
453 
454 	for (i = 0; i < TX_BUFFERS; i++) {
455 		desc_t *desc = &get_status(port)->tx_descs[i];
456 
457 		if (desc->stat != PACKET_EMPTY) {
458 			desc->stat = PACKET_EMPTY;
459 			dma_unmap_single(&port->card->pdev->dev,
460 					 desc->address, port->tx_skbs[i]->len,
461 					 DMA_TO_DEVICE);
462 			dev_kfree_skb(port->tx_skbs[i]);
463 		}
464 	}
465 	return 0;
466 }
467 
468 static struct net_device_stats *wanxl_get_stats(struct net_device *dev)
469 {
470 	struct port *port = dev_to_port(dev);
471 
472 	dev->stats.rx_over_errors = get_status(port)->rx_overruns;
473 	dev->stats.rx_frame_errors = get_status(port)->rx_frame_errors;
474 	dev->stats.rx_errors = dev->stats.rx_over_errors +
475 		dev->stats.rx_frame_errors;
476 	return &dev->stats;
477 }
478 
479 static int wanxl_puts_command(struct card *card, u32 cmd)
480 {
481 	unsigned long timeout = jiffies + 5 * HZ;
482 
483 	writel(cmd, card->plx + PLX_MAILBOX_1);
484 	do {
485 		if (readl(card->plx + PLX_MAILBOX_1) == 0)
486 			return 0;
487 
488 		schedule();
489 	} while (time_after(timeout, jiffies));
490 
491 	return -1;
492 }
493 
494 static void wanxl_reset(struct card *card)
495 {
496 	u32 old_value = readl(card->plx + PLX_CONTROL) & ~PLX_CTL_RESET;
497 
498 	writel(0x80, card->plx + PLX_MAILBOX_0);
499 	writel(old_value | PLX_CTL_RESET, card->plx + PLX_CONTROL);
500 	readl(card->plx + PLX_CONTROL); /* wait for posted write */
501 	udelay(1);
502 	writel(old_value, card->plx + PLX_CONTROL);
503 	readl(card->plx + PLX_CONTROL); /* wait for posted write */
504 }
505 
506 static void wanxl_pci_remove_one(struct pci_dev *pdev)
507 {
508 	struct card *card = pci_get_drvdata(pdev);
509 	int i;
510 
511 	for (i = 0; i < card->n_ports; i++) {
512 		unregister_hdlc_device(card->ports[i].dev);
513 		free_netdev(card->ports[i].dev);
514 	}
515 
516 	/* unregister and free all host resources */
517 	if (card->irq)
518 		free_irq(card->irq, card);
519 
520 	wanxl_reset(card);
521 
522 	for (i = 0; i < RX_QUEUE_LENGTH; i++)
523 		if (card->rx_skbs[i]) {
524 			dma_unmap_single(&card->pdev->dev,
525 					 card->status->rx_descs[i].address,
526 					 BUFFER_LENGTH, DMA_FROM_DEVICE);
527 			dev_kfree_skb(card->rx_skbs[i]);
528 		}
529 
530 	if (card->plx)
531 		iounmap(card->plx);
532 
533 	if (card->status)
534 		dma_free_coherent(&pdev->dev, sizeof(struct card_status),
535 				  card->status, card->status_address);
536 
537 	pci_release_regions(pdev);
538 	pci_disable_device(pdev);
539 	kfree(card);
540 }
541 
542 #include "wanxlfw.inc"
543 
544 static const struct net_device_ops wanxl_ops = {
545 	.ndo_open       = wanxl_open,
546 	.ndo_stop       = wanxl_close,
547 	.ndo_start_xmit = hdlc_start_xmit,
548 	.ndo_do_ioctl   = wanxl_ioctl,
549 	.ndo_get_stats  = wanxl_get_stats,
550 };
551 
552 static int wanxl_pci_init_one(struct pci_dev *pdev,
553 			      const struct pci_device_id *ent)
554 {
555 	struct card *card;
556 	u32 ramsize, stat;
557 	unsigned long timeout;
558 	u32 plx_phy;		/* PLX PCI base address */
559 	u32 mem_phy;		/* memory PCI base addr */
560 	u8 __iomem *mem;	/* memory virtual base addr */
561 	int i, ports;
562 
563 #ifndef MODULE
564 	pr_info_once("%s\n", version);
565 #endif
566 
567 	i = pci_enable_device(pdev);
568 	if (i)
569 		return i;
570 
571 	/* QUICC can only access first 256 MB of host RAM directly,
572 	 * but PLX9060 DMA does 32-bits for actual packet data transfers
573 	 */
574 
575 	/* FIXME when PCI/DMA subsystems are fixed.
576 	 * We set both dma_mask and consistent_dma_mask to 28 bits
577 	 * and pray pci_alloc_consistent() will use this info. It should
578 	 * work on most platforms
579 	 */
580 	if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(28)) ||
581 	    dma_set_mask(&pdev->dev, DMA_BIT_MASK(28))) {
582 		pr_err("No usable DMA configuration\n");
583 		pci_disable_device(pdev);
584 		return -EIO;
585 	}
586 
587 	i = pci_request_regions(pdev, "wanXL");
588 	if (i) {
589 		pci_disable_device(pdev);
590 		return i;
591 	}
592 
593 	switch (pdev->device) {
594 	case PCI_DEVICE_ID_SBE_WANXL100:
595 		ports = 1;
596 		break;
597 	case PCI_DEVICE_ID_SBE_WANXL200:
598 		ports = 2;
599 		break;
600 	default:
601 		ports = 4;
602 	}
603 
604 	card = kzalloc(struct_size(card, ports, ports), GFP_KERNEL);
605 	if (!card) {
606 		pci_release_regions(pdev);
607 		pci_disable_device(pdev);
608 		return -ENOBUFS;
609 	}
610 
611 	pci_set_drvdata(pdev, card);
612 	card->pdev = pdev;
613 
614 	card->status = dma_alloc_coherent(&pdev->dev,
615 					  sizeof(struct card_status),
616 					  &card->status_address, GFP_KERNEL);
617 	if (!card->status) {
618 		wanxl_pci_remove_one(pdev);
619 		return -ENOBUFS;
620 	}
621 
622 #ifdef DEBUG_PCI
623 	printk(KERN_DEBUG "wanXL %s: pci_alloc_consistent() returned memory"
624 	       " at 0x%LX\n", pci_name(pdev),
625 	       (unsigned long long)card->status_address);
626 #endif
627 
628 	/* FIXME when PCI/DMA subsystems are fixed.
629 	 * We set both dma_mask and consistent_dma_mask back to 32 bits
630 	 * to indicate the card can do 32-bit DMA addressing
631 	 */
632 	if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)) ||
633 	    dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
634 		pr_err("No usable DMA configuration\n");
635 		wanxl_pci_remove_one(pdev);
636 		return -EIO;
637 	}
638 
639 	/* set up PLX mapping */
640 	plx_phy = pci_resource_start(pdev, 0);
641 
642 	card->plx = ioremap(plx_phy, 0x70);
643 	if (!card->plx) {
644 		pr_err("ioremap() failed\n");
645 		wanxl_pci_remove_one(pdev);
646 		return -EFAULT;
647 	}
648 
649 #if RESET_WHILE_LOADING
650 	wanxl_reset(card);
651 #endif
652 
653 	timeout = jiffies + 20 * HZ;
654 	while ((stat = readl(card->plx + PLX_MAILBOX_0)) != 0) {
655 		if (time_before(timeout, jiffies)) {
656 			pr_warn("%s: timeout waiting for PUTS to complete\n",
657 				pci_name(pdev));
658 			wanxl_pci_remove_one(pdev);
659 			return -ENODEV;
660 		}
661 
662 		switch (stat & 0xC0) {
663 		case 0x00:	/* hmm - PUTS completed with non-zero code? */
664 		case 0x80:	/* PUTS still testing the hardware */
665 			break;
666 
667 		default:
668 			pr_warn("%s: PUTS test 0x%X failed\n",
669 				pci_name(pdev), stat & 0x30);
670 			wanxl_pci_remove_one(pdev);
671 			return -ENODEV;
672 		}
673 
674 		schedule();
675 	}
676 
677 	/* get on-board memory size (PUTS detects no more than 4 MB) */
678 	ramsize = readl(card->plx + PLX_MAILBOX_2) & MBX2_MEMSZ_MASK;
679 
680 	/* set up on-board RAM mapping */
681 	mem_phy = pci_resource_start(pdev, 2);
682 
683 	/* sanity check the board's reported memory size */
684 	if (ramsize < BUFFERS_ADDR +
685 	    (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports) {
686 		pr_warn("%s: no enough on-board RAM (%u bytes detected, %u bytes required)\n",
687 			pci_name(pdev), ramsize,
688 			BUFFERS_ADDR +
689 			(TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports);
690 		wanxl_pci_remove_one(pdev);
691 		return -ENODEV;
692 	}
693 
694 	if (wanxl_puts_command(card, MBX1_CMD_BSWAP)) {
695 		pr_warn("%s: unable to Set Byte Swap Mode\n", pci_name(pdev));
696 		wanxl_pci_remove_one(pdev);
697 		return -ENODEV;
698 	}
699 
700 	for (i = 0; i < RX_QUEUE_LENGTH; i++) {
701 		struct sk_buff *skb = dev_alloc_skb(BUFFER_LENGTH);
702 
703 		card->rx_skbs[i] = skb;
704 		if (skb)
705 			card->status->rx_descs[i].address =
706 				dma_map_single(&card->pdev->dev, skb->data,
707 					       BUFFER_LENGTH, DMA_FROM_DEVICE);
708 	}
709 
710 	mem = ioremap(mem_phy, PDM_OFFSET + sizeof(firmware));
711 	if (!mem) {
712 		pr_err("ioremap() failed\n");
713 		wanxl_pci_remove_one(pdev);
714 		return -EFAULT;
715 	}
716 
717 	for (i = 0; i < sizeof(firmware); i += 4)
718 		writel(ntohl(*(__be32 *)(firmware + i)), mem + PDM_OFFSET + i);
719 
720 	for (i = 0; i < ports; i++)
721 		writel(card->status_address +
722 		       (void *)&card->status->port_status[i] -
723 		       (void *)card->status, mem + PDM_OFFSET + 4 + i * 4);
724 	writel(card->status_address, mem + PDM_OFFSET + 20);
725 	writel(PDM_OFFSET, mem);
726 	iounmap(mem);
727 
728 	writel(0, card->plx + PLX_MAILBOX_5);
729 
730 	if (wanxl_puts_command(card, MBX1_CMD_ABORTJ)) {
731 		pr_warn("%s: unable to Abort and Jump\n", pci_name(pdev));
732 		wanxl_pci_remove_one(pdev);
733 		return -ENODEV;
734 	}
735 
736 	timeout = jiffies + 5 * HZ;
737 	do {
738 		stat = readl(card->plx + PLX_MAILBOX_5);
739 		if (stat)
740 			break;
741 		schedule();
742 	} while (time_after(timeout, jiffies));
743 
744 	if (!stat) {
745 		pr_warn("%s: timeout while initializing card firmware\n",
746 			pci_name(pdev));
747 		wanxl_pci_remove_one(pdev);
748 		return -ENODEV;
749 	}
750 
751 #if DETECT_RAM
752 	ramsize = stat;
753 #endif
754 
755 	pr_info("%s: at 0x%X, %u KB of RAM at 0x%X, irq %u\n",
756 		pci_name(pdev), plx_phy, ramsize / 1024, mem_phy, pdev->irq);
757 
758 	/* Allocate IRQ */
759 	if (request_irq(pdev->irq, wanxl_intr, IRQF_SHARED, "wanXL", card)) {
760 		pr_warn("%s: could not allocate IRQ%i\n",
761 			pci_name(pdev), pdev->irq);
762 		wanxl_pci_remove_one(pdev);
763 		return -EBUSY;
764 	}
765 	card->irq = pdev->irq;
766 
767 	for (i = 0; i < ports; i++) {
768 		hdlc_device *hdlc;
769 		struct port *port = &card->ports[i];
770 		struct net_device *dev = alloc_hdlcdev(port);
771 
772 		if (!dev) {
773 			pr_err("%s: unable to allocate memory\n",
774 			       pci_name(pdev));
775 			wanxl_pci_remove_one(pdev);
776 			return -ENOMEM;
777 		}
778 
779 		port->dev = dev;
780 		hdlc = dev_to_hdlc(dev);
781 		spin_lock_init(&port->lock);
782 		dev->tx_queue_len = 50;
783 		dev->netdev_ops = &wanxl_ops;
784 		hdlc->attach = wanxl_attach;
785 		hdlc->xmit = wanxl_xmit;
786 		port->card = card;
787 		port->node = i;
788 		get_status(port)->clocking = CLOCK_EXT;
789 		if (register_hdlc_device(dev)) {
790 			pr_err("%s: unable to register hdlc device\n",
791 			       pci_name(pdev));
792 			free_netdev(dev);
793 			wanxl_pci_remove_one(pdev);
794 			return -ENOBUFS;
795 		}
796 		card->n_ports++;
797 	}
798 
799 	pr_info("%s: port", pci_name(pdev));
800 	for (i = 0; i < ports; i++)
801 		pr_cont("%s #%i: %s",
802 			i ? "," : "", i, card->ports[i].dev->name);
803 	pr_cont("\n");
804 
805 	for (i = 0; i < ports; i++)
806 		wanxl_cable_intr(&card->ports[i]); /* get carrier status etc.*/
807 
808 	return 0;
809 }
810 
811 static const struct pci_device_id wanxl_pci_tbl[] = {
812 	{ PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL100, PCI_ANY_ID,
813 	  PCI_ANY_ID, 0, 0, 0 },
814 	{ PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL200, PCI_ANY_ID,
815 	  PCI_ANY_ID, 0, 0, 0 },
816 	{ PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL400, PCI_ANY_ID,
817 	  PCI_ANY_ID, 0, 0, 0 },
818 	{ 0, }
819 };
820 
821 static struct pci_driver wanxl_pci_driver = {
822 	.name		= "wanXL",
823 	.id_table	= wanxl_pci_tbl,
824 	.probe		= wanxl_pci_init_one,
825 	.remove		= wanxl_pci_remove_one,
826 };
827 
828 static int __init wanxl_init_module(void)
829 {
830 #ifdef MODULE
831 	pr_info("%s\n", version);
832 #endif
833 	return pci_register_driver(&wanxl_pci_driver);
834 }
835 
836 static void __exit wanxl_cleanup_module(void)
837 {
838 	pci_unregister_driver(&wanxl_pci_driver);
839 }
840 
841 MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
842 MODULE_DESCRIPTION("SBE Inc. wanXL serial port driver");
843 MODULE_LICENSE("GPL v2");
844 MODULE_DEVICE_TABLE(pci, wanxl_pci_tbl);
845 
846 module_init(wanxl_init_module);
847 module_exit(wanxl_cleanup_module);
848