xref: /linux/drivers/atm/he.c (revision e26207a3819684e9b4450a2d30bdd065fa92d9c7)
1 /*
2 
3   he.c
4 
5   ForeRunnerHE ATM Adapter driver for ATM on Linux
6   Copyright (C) 1999-2001  Naval Research Laboratory
7 
8   This library is free software; you can redistribute it and/or
9   modify it under the terms of the GNU Lesser General Public
10   License as published by the Free Software Foundation; either
11   version 2.1 of the License, or (at your option) any later version.
12 
13   This library is distributed in the hope that it will be useful,
14   but WITHOUT ANY WARRANTY; without even the implied warranty of
15   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16   Lesser General Public License for more details.
17 
18   You should have received a copy of the GNU Lesser General Public
19   License along with this library; if not, write to the Free Software
20   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21 
22 */
23 
24 /*
25 
26   he.c
27 
28   ForeRunnerHE ATM Adapter driver for ATM on Linux
29   Copyright (C) 1999-2001  Naval Research Laboratory
30 
31   Permission to use, copy, modify and distribute this software and its
32   documentation is hereby granted, provided that both the copyright
33   notice and this permission notice appear in all copies of the software,
34   derivative works or modified versions, and any portions thereof, and
35   that both notices appear in supporting documentation.
36 
37   NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
38   DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
39   RESULTING FROM THE USE OF THIS SOFTWARE.
40 
41   This driver was written using the "Programmer's Reference Manual for
42   ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
43 
44   AUTHORS:
45 	chas williams <chas@cmf.nrl.navy.mil>
46 	eric kinzie <ekinzie@cmf.nrl.navy.mil>
47 
48   NOTES:
49 	4096 supported 'connections'
50 	group 0 is used for all traffic
51 	interrupt queue 0 is used for all interrupts
52 	aal0 support (based on work from ulrich.u.muller@nokia.com)
53 
54  */
55 
56 #include <linux/module.h>
57 #include <linux/kernel.h>
58 #include <linux/skbuff.h>
59 #include <linux/pci.h>
60 #include <linux/errno.h>
61 #include <linux/types.h>
62 #include <linux/string.h>
63 #include <linux/delay.h>
64 #include <linux/init.h>
65 #include <linux/mm.h>
66 #include <linux/sched.h>
67 #include <linux/timer.h>
68 #include <linux/interrupt.h>
69 #include <linux/dma-mapping.h>
70 #include <asm/io.h>
71 #include <asm/byteorder.h>
72 #include <asm/uaccess.h>
73 
74 #include <linux/atmdev.h>
75 #include <linux/atm.h>
76 #include <linux/sonet.h>
77 
78 #undef USE_SCATTERGATHER
79 #undef USE_CHECKSUM_HW			/* still confused about this */
80 /* #undef HE_DEBUG */
81 
82 #include "he.h"
83 #include "suni.h"
84 #include <linux/atm_he.h>
85 
86 #define hprintk(fmt,args...)	printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
87 
88 #ifdef HE_DEBUG
89 #define HPRINTK(fmt,args...)	printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
90 #else /* !HE_DEBUG */
91 #define HPRINTK(fmt,args...)	do { } while (0)
92 #endif /* HE_DEBUG */
93 
94 /* declarations */
95 
96 static int he_open(struct atm_vcc *vcc);
97 static void he_close(struct atm_vcc *vcc);
98 static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
99 static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
100 static irqreturn_t he_irq_handler(int irq, void *dev_id);
101 static void he_tasklet(unsigned long data);
102 static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
103 static int he_start(struct atm_dev *dev);
104 static void he_stop(struct he_dev *dev);
105 static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
106 static unsigned char he_phy_get(struct atm_dev *, unsigned long);
107 
108 static u8 read_prom_byte(struct he_dev *he_dev, int addr);
109 
110 /* globals */
111 
112 static struct he_dev *he_devs;
113 static int disable64;
114 static short nvpibits = -1;
115 static short nvcibits = -1;
116 static short rx_skb_reserve = 16;
117 static int irq_coalesce = 1;
118 static int sdh = 0;
119 
120 /* Read from EEPROM = 0000 0011b */
121 static unsigned int readtab[] = {
122 	CS_HIGH | CLK_HIGH,
123 	CS_LOW | CLK_LOW,
124 	CLK_HIGH,               /* 0 */
125 	CLK_LOW,
126 	CLK_HIGH,               /* 0 */
127 	CLK_LOW,
128 	CLK_HIGH,               /* 0 */
129 	CLK_LOW,
130 	CLK_HIGH,               /* 0 */
131 	CLK_LOW,
132 	CLK_HIGH,               /* 0 */
133 	CLK_LOW,
134 	CLK_HIGH,               /* 0 */
135 	CLK_LOW | SI_HIGH,
136 	CLK_HIGH | SI_HIGH,     /* 1 */
137 	CLK_LOW | SI_HIGH,
138 	CLK_HIGH | SI_HIGH      /* 1 */
139 };
140 
141 /* Clock to read from/write to the EEPROM */
142 static unsigned int clocktab[] = {
143 	CLK_LOW,
144 	CLK_HIGH,
145 	CLK_LOW,
146 	CLK_HIGH,
147 	CLK_LOW,
148 	CLK_HIGH,
149 	CLK_LOW,
150 	CLK_HIGH,
151 	CLK_LOW,
152 	CLK_HIGH,
153 	CLK_LOW,
154 	CLK_HIGH,
155 	CLK_LOW,
156 	CLK_HIGH,
157 	CLK_LOW,
158 	CLK_HIGH,
159 	CLK_LOW
160 };
161 
162 static struct atmdev_ops he_ops =
163 {
164 	.open =		he_open,
165 	.close =	he_close,
166 	.ioctl =	he_ioctl,
167 	.send =		he_send,
168 	.phy_put =	he_phy_put,
169 	.phy_get =	he_phy_get,
170 	.proc_read =	he_proc_read,
171 	.owner =	THIS_MODULE
172 };
173 
174 #define he_writel(dev, val, reg)	do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
175 #define he_readl(dev, reg)		readl((dev)->membase + (reg))
176 
177 /* section 2.12 connection memory access */
178 
179 static __inline__ void
180 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
181 								unsigned flags)
182 {
183 	he_writel(he_dev, val, CON_DAT);
184 	(void) he_readl(he_dev, CON_DAT);		/* flush posted writes */
185 	he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
186 	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
187 }
188 
189 #define he_writel_rcm(dev, val, reg) 				\
190 			he_writel_internal(dev, val, reg, CON_CTL_RCM)
191 
192 #define he_writel_tcm(dev, val, reg) 				\
193 			he_writel_internal(dev, val, reg, CON_CTL_TCM)
194 
195 #define he_writel_mbox(dev, val, reg) 				\
196 			he_writel_internal(dev, val, reg, CON_CTL_MBOX)
197 
198 static unsigned
199 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
200 {
201 	he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
202 	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
203 	return he_readl(he_dev, CON_DAT);
204 }
205 
206 #define he_readl_rcm(dev, reg) \
207 			he_readl_internal(dev, reg, CON_CTL_RCM)
208 
209 #define he_readl_tcm(dev, reg) \
210 			he_readl_internal(dev, reg, CON_CTL_TCM)
211 
212 #define he_readl_mbox(dev, reg) \
213 			he_readl_internal(dev, reg, CON_CTL_MBOX)
214 
215 
216 /* figure 2.2 connection id */
217 
218 #define he_mkcid(dev, vpi, vci)		(((vpi << (dev)->vcibits) | vci) & 0x1fff)
219 
220 /* 2.5.1 per connection transmit state registers */
221 
222 #define he_writel_tsr0(dev, val, cid) \
223 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
224 #define he_readl_tsr0(dev, cid) \
225 		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
226 
227 #define he_writel_tsr1(dev, val, cid) \
228 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
229 
230 #define he_writel_tsr2(dev, val, cid) \
231 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
232 
233 #define he_writel_tsr3(dev, val, cid) \
234 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
235 
236 #define he_writel_tsr4(dev, val, cid) \
237 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
238 
239 	/* from page 2-20
240 	 *
241 	 * NOTE While the transmit connection is active, bits 23 through 0
242 	 *      of this register must not be written by the host.  Byte
243 	 *      enables should be used during normal operation when writing
244 	 *      the most significant byte.
245 	 */
246 
247 #define he_writel_tsr4_upper(dev, val, cid) \
248 		he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
249 							CON_CTL_TCM \
250 							| CON_BYTE_DISABLE_2 \
251 							| CON_BYTE_DISABLE_1 \
252 							| CON_BYTE_DISABLE_0)
253 
254 #define he_readl_tsr4(dev, cid) \
255 		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
256 
257 #define he_writel_tsr5(dev, val, cid) \
258 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
259 
260 #define he_writel_tsr6(dev, val, cid) \
261 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
262 
263 #define he_writel_tsr7(dev, val, cid) \
264 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
265 
266 
267 #define he_writel_tsr8(dev, val, cid) \
268 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
269 
270 #define he_writel_tsr9(dev, val, cid) \
271 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
272 
273 #define he_writel_tsr10(dev, val, cid) \
274 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
275 
276 #define he_writel_tsr11(dev, val, cid) \
277 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
278 
279 
280 #define he_writel_tsr12(dev, val, cid) \
281 		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
282 
283 #define he_writel_tsr13(dev, val, cid) \
284 		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
285 
286 
287 #define he_writel_tsr14(dev, val, cid) \
288 		he_writel_tcm(dev, val, CONFIG_TSRD | cid)
289 
290 #define he_writel_tsr14_upper(dev, val, cid) \
291 		he_writel_internal(dev, val, CONFIG_TSRD | cid, \
292 							CON_CTL_TCM \
293 							| CON_BYTE_DISABLE_2 \
294 							| CON_BYTE_DISABLE_1 \
295 							| CON_BYTE_DISABLE_0)
296 
297 /* 2.7.1 per connection receive state registers */
298 
299 #define he_writel_rsr0(dev, val, cid) \
300 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
301 #define he_readl_rsr0(dev, cid) \
302 		he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
303 
304 #define he_writel_rsr1(dev, val, cid) \
305 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
306 
307 #define he_writel_rsr2(dev, val, cid) \
308 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
309 
310 #define he_writel_rsr3(dev, val, cid) \
311 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
312 
313 #define he_writel_rsr4(dev, val, cid) \
314 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
315 
316 #define he_writel_rsr5(dev, val, cid) \
317 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
318 
319 #define he_writel_rsr6(dev, val, cid) \
320 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
321 
322 #define he_writel_rsr7(dev, val, cid) \
323 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
324 
325 static __inline__ struct atm_vcc*
326 __find_vcc(struct he_dev *he_dev, unsigned cid)
327 {
328 	struct hlist_head *head;
329 	struct atm_vcc *vcc;
330 	struct hlist_node *node;
331 	struct sock *s;
332 	short vpi;
333 	int vci;
334 
335 	vpi = cid >> he_dev->vcibits;
336 	vci = cid & ((1 << he_dev->vcibits) - 1);
337 	head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
338 
339 	sk_for_each(s, node, head) {
340 		vcc = atm_sk(s);
341 		if (vcc->dev == he_dev->atm_dev &&
342 		    vcc->vci == vci && vcc->vpi == vpi &&
343 		    vcc->qos.rxtp.traffic_class != ATM_NONE) {
344 				return vcc;
345 		}
346 	}
347 	return NULL;
348 }
349 
350 static int __devinit
351 he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
352 {
353 	struct atm_dev *atm_dev = NULL;
354 	struct he_dev *he_dev = NULL;
355 	int err = 0;
356 
357 	printk(KERN_INFO "ATM he driver\n");
358 
359 	if (pci_enable_device(pci_dev))
360 		return -EIO;
361 	if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)) != 0) {
362 		printk(KERN_WARNING "he: no suitable dma available\n");
363 		err = -EIO;
364 		goto init_one_failure;
365 	}
366 
367 	atm_dev = atm_dev_register(DEV_LABEL, &he_ops, -1, NULL);
368 	if (!atm_dev) {
369 		err = -ENODEV;
370 		goto init_one_failure;
371 	}
372 	pci_set_drvdata(pci_dev, atm_dev);
373 
374 	he_dev = kzalloc(sizeof(struct he_dev),
375 							GFP_KERNEL);
376 	if (!he_dev) {
377 		err = -ENOMEM;
378 		goto init_one_failure;
379 	}
380 	he_dev->pci_dev = pci_dev;
381 	he_dev->atm_dev = atm_dev;
382 	he_dev->atm_dev->dev_data = he_dev;
383 	atm_dev->dev_data = he_dev;
384 	he_dev->number = atm_dev->number;
385 	tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
386 	spin_lock_init(&he_dev->global_lock);
387 
388 	if (he_start(atm_dev)) {
389 		he_stop(he_dev);
390 		err = -ENODEV;
391 		goto init_one_failure;
392 	}
393 	he_dev->next = NULL;
394 	if (he_devs)
395 		he_dev->next = he_devs;
396 	he_devs = he_dev;
397 	return 0;
398 
399 init_one_failure:
400 	if (atm_dev)
401 		atm_dev_deregister(atm_dev);
402 	kfree(he_dev);
403 	pci_disable_device(pci_dev);
404 	return err;
405 }
406 
407 static void __devexit
408 he_remove_one (struct pci_dev *pci_dev)
409 {
410 	struct atm_dev *atm_dev;
411 	struct he_dev *he_dev;
412 
413 	atm_dev = pci_get_drvdata(pci_dev);
414 	he_dev = HE_DEV(atm_dev);
415 
416 	/* need to remove from he_devs */
417 
418 	he_stop(he_dev);
419 	atm_dev_deregister(atm_dev);
420 	kfree(he_dev);
421 
422 	pci_set_drvdata(pci_dev, NULL);
423 	pci_disable_device(pci_dev);
424 }
425 
426 
427 static unsigned
428 rate_to_atmf(unsigned rate)		/* cps to atm forum format */
429 {
430 #define NONZERO (1 << 14)
431 
432 	unsigned exp = 0;
433 
434 	if (rate == 0)
435 		return 0;
436 
437 	rate <<= 9;
438 	while (rate > 0x3ff) {
439 		++exp;
440 		rate >>= 1;
441 	}
442 
443 	return (NONZERO | (exp << 9) | (rate & 0x1ff));
444 }
445 
446 static void __devinit
447 he_init_rx_lbfp0(struct he_dev *he_dev)
448 {
449 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
450 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
451 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
452 	unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
453 
454 	lbufd_index = 0;
455 	lbm_offset = he_readl(he_dev, RCMLBM_BA);
456 
457 	he_writel(he_dev, lbufd_index, RLBF0_H);
458 
459 	for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
460 		lbufd_index += 2;
461 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
462 
463 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
464 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
465 
466 		if (++lbuf_count == lbufs_per_row) {
467 			lbuf_count = 0;
468 			row_offset += he_dev->bytes_per_row;
469 		}
470 		lbm_offset += 4;
471 	}
472 
473 	he_writel(he_dev, lbufd_index - 2, RLBF0_T);
474 	he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
475 }
476 
477 static void __devinit
478 he_init_rx_lbfp1(struct he_dev *he_dev)
479 {
480 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
481 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
482 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
483 	unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
484 
485 	lbufd_index = 1;
486 	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
487 
488 	he_writel(he_dev, lbufd_index, RLBF1_H);
489 
490 	for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
491 		lbufd_index += 2;
492 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
493 
494 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
495 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
496 
497 		if (++lbuf_count == lbufs_per_row) {
498 			lbuf_count = 0;
499 			row_offset += he_dev->bytes_per_row;
500 		}
501 		lbm_offset += 4;
502 	}
503 
504 	he_writel(he_dev, lbufd_index - 2, RLBF1_T);
505 	he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
506 }
507 
508 static void __devinit
509 he_init_tx_lbfp(struct he_dev *he_dev)
510 {
511 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
512 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
513 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
514 	unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
515 
516 	lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
517 	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
518 
519 	he_writel(he_dev, lbufd_index, TLBF_H);
520 
521 	for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
522 		lbufd_index += 1;
523 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
524 
525 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
526 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
527 
528 		if (++lbuf_count == lbufs_per_row) {
529 			lbuf_count = 0;
530 			row_offset += he_dev->bytes_per_row;
531 		}
532 		lbm_offset += 2;
533 	}
534 
535 	he_writel(he_dev, lbufd_index - 1, TLBF_T);
536 }
537 
538 static int __devinit
539 he_init_tpdrq(struct he_dev *he_dev)
540 {
541 	he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
542 		CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
543 	if (he_dev->tpdrq_base == NULL) {
544 		hprintk("failed to alloc tpdrq\n");
545 		return -ENOMEM;
546 	}
547 	memset(he_dev->tpdrq_base, 0,
548 				CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
549 
550 	he_dev->tpdrq_tail = he_dev->tpdrq_base;
551 	he_dev->tpdrq_head = he_dev->tpdrq_base;
552 
553 	he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
554 	he_writel(he_dev, 0, TPDRQ_T);
555 	he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
556 
557 	return 0;
558 }
559 
560 static void __devinit
561 he_init_cs_block(struct he_dev *he_dev)
562 {
563 	unsigned clock, rate, delta;
564 	int reg;
565 
566 	/* 5.1.7 cs block initialization */
567 
568 	for (reg = 0; reg < 0x20; ++reg)
569 		he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
570 
571 	/* rate grid timer reload values */
572 
573 	clock = he_is622(he_dev) ? 66667000 : 50000000;
574 	rate = he_dev->atm_dev->link_rate;
575 	delta = rate / 16 / 2;
576 
577 	for (reg = 0; reg < 0x10; ++reg) {
578 		/* 2.4 internal transmit function
579 		 *
580 	 	 * we initialize the first row in the rate grid.
581 		 * values are period (in clock cycles) of timer
582 		 */
583 		unsigned period = clock / rate;
584 
585 		he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
586 		rate -= delta;
587 	}
588 
589 	if (he_is622(he_dev)) {
590 		/* table 5.2 (4 cells per lbuf) */
591 		he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
592 		he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
593 		he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
594 		he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
595 		he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
596 
597 		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
598 		he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
599 		he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
600 		he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
601 		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
602 		he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
603 		he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
604 
605 		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
606 
607 		/* table 5.8 */
608 		he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
609 		he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
610 		he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
611 		he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
612 		he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
613 		he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
614 
615 		/* table 5.9 */
616 		he_writel_mbox(he_dev, 0x5, CS_OTPPER);
617 		he_writel_mbox(he_dev, 0x14, CS_OTWPER);
618 	} else {
619 		/* table 5.1 (4 cells per lbuf) */
620 		he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
621 		he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
622 		he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
623 		he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
624 		he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
625 
626 		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
627 		he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
628 		he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
629 		he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
630 		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
631 		he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
632 		he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
633 
634 		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
635 
636 		/* table 5.8 */
637 		he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
638 		he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
639 		he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
640 		he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
641 		he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
642 		he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
643 
644 		/* table 5.9 */
645 		he_writel_mbox(he_dev, 0x6, CS_OTPPER);
646 		he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
647 	}
648 
649 	he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
650 
651 	for (reg = 0; reg < 0x8; ++reg)
652 		he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
653 
654 }
655 
656 static int __devinit
657 he_init_cs_block_rcm(struct he_dev *he_dev)
658 {
659 	unsigned (*rategrid)[16][16];
660 	unsigned rate, delta;
661 	int i, j, reg;
662 
663 	unsigned rate_atmf, exp, man;
664 	unsigned long long rate_cps;
665 	int mult, buf, buf_limit = 4;
666 
667 	rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
668 	if (!rategrid)
669 		return -ENOMEM;
670 
671 	/* initialize rate grid group table */
672 
673 	for (reg = 0x0; reg < 0xff; ++reg)
674 		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
675 
676 	/* initialize rate controller groups */
677 
678 	for (reg = 0x100; reg < 0x1ff; ++reg)
679 		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
680 
681 	/* initialize tNrm lookup table */
682 
683 	/* the manual makes reference to a routine in a sample driver
684 	   for proper configuration; fortunately, we only need this
685 	   in order to support abr connection */
686 
687 	/* initialize rate to group table */
688 
689 	rate = he_dev->atm_dev->link_rate;
690 	delta = rate / 32;
691 
692 	/*
693 	 * 2.4 transmit internal functions
694 	 *
695 	 * we construct a copy of the rate grid used by the scheduler
696 	 * in order to construct the rate to group table below
697 	 */
698 
699 	for (j = 0; j < 16; j++) {
700 		(*rategrid)[0][j] = rate;
701 		rate -= delta;
702 	}
703 
704 	for (i = 1; i < 16; i++)
705 		for (j = 0; j < 16; j++)
706 			if (i > 14)
707 				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
708 			else
709 				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
710 
711 	/*
712 	 * 2.4 transmit internal function
713 	 *
714 	 * this table maps the upper 5 bits of exponent and mantissa
715 	 * of the atm forum representation of the rate into an index
716 	 * on rate grid
717 	 */
718 
719 	rate_atmf = 0;
720 	while (rate_atmf < 0x400) {
721 		man = (rate_atmf & 0x1f) << 4;
722 		exp = rate_atmf >> 5;
723 
724 		/*
725 			instead of '/ 512', use '>> 9' to prevent a call
726 			to divdu3 on x86 platforms
727 		*/
728 		rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
729 
730 		if (rate_cps < 10)
731 			rate_cps = 10;	/* 2.2.1 minimum payload rate is 10 cps */
732 
733 		for (i = 255; i > 0; i--)
734 			if ((*rategrid)[i/16][i%16] >= rate_cps)
735 				break;	 /* pick nearest rate instead? */
736 
737 		/*
738 		 * each table entry is 16 bits: (rate grid index (8 bits)
739 		 * and a buffer limit (8 bits)
740 		 * there are two table entries in each 32-bit register
741 		 */
742 
743 #ifdef notdef
744 		buf = rate_cps * he_dev->tx_numbuffs /
745 				(he_dev->atm_dev->link_rate * 2);
746 #else
747 		/* this is pretty, but avoids _divdu3 and is mostly correct */
748 		mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
749 		if (rate_cps > (272 * mult))
750 			buf = 4;
751 		else if (rate_cps > (204 * mult))
752 			buf = 3;
753 		else if (rate_cps > (136 * mult))
754 			buf = 2;
755 		else if (rate_cps > (68 * mult))
756 			buf = 1;
757 		else
758 			buf = 0;
759 #endif
760 		if (buf > buf_limit)
761 			buf = buf_limit;
762 		reg = (reg << 16) | ((i << 8) | buf);
763 
764 #define RTGTBL_OFFSET 0x400
765 
766 		if (rate_atmf & 0x1)
767 			he_writel_rcm(he_dev, reg,
768 				CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
769 
770 		++rate_atmf;
771 	}
772 
773 	kfree(rategrid);
774 	return 0;
775 }
776 
777 static int __devinit
778 he_init_group(struct he_dev *he_dev, int group)
779 {
780 	int i;
781 
782 	/* small buffer pool */
783 	he_dev->rbps_pool = pci_pool_create("rbps", he_dev->pci_dev,
784 			CONFIG_RBPS_BUFSIZE, 8, 0);
785 	if (he_dev->rbps_pool == NULL) {
786 		hprintk("unable to create rbps pages\n");
787 		return -ENOMEM;
788 	}
789 
790 	he_dev->rbps_base = pci_alloc_consistent(he_dev->pci_dev,
791 		CONFIG_RBPS_SIZE * sizeof(struct he_rbp), &he_dev->rbps_phys);
792 	if (he_dev->rbps_base == NULL) {
793 		hprintk("failed to alloc rbps_base\n");
794 		goto out_destroy_rbps_pool;
795 	}
796 	memset(he_dev->rbps_base, 0, CONFIG_RBPS_SIZE * sizeof(struct he_rbp));
797 	he_dev->rbps_virt = kmalloc(CONFIG_RBPS_SIZE * sizeof(struct he_virt), GFP_KERNEL);
798 	if (he_dev->rbps_virt == NULL) {
799 		hprintk("failed to alloc rbps_virt\n");
800 		goto out_free_rbps_base;
801 	}
802 
803 	for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
804 		dma_addr_t dma_handle;
805 		void *cpuaddr;
806 
807 		cpuaddr = pci_pool_alloc(he_dev->rbps_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
808 		if (cpuaddr == NULL)
809 			goto out_free_rbps_virt;
810 
811 		he_dev->rbps_virt[i].virt = cpuaddr;
812 		he_dev->rbps_base[i].status = RBP_LOANED | RBP_SMALLBUF | (i << RBP_INDEX_OFF);
813 		he_dev->rbps_base[i].phys = dma_handle;
814 
815 	}
816 	he_dev->rbps_tail = &he_dev->rbps_base[CONFIG_RBPS_SIZE - 1];
817 
818 	he_writel(he_dev, he_dev->rbps_phys, G0_RBPS_S + (group * 32));
819 	he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail),
820 						G0_RBPS_T + (group * 32));
821 	he_writel(he_dev, CONFIG_RBPS_BUFSIZE/4,
822 						G0_RBPS_BS + (group * 32));
823 	he_writel(he_dev,
824 			RBP_THRESH(CONFIG_RBPS_THRESH) |
825 			RBP_QSIZE(CONFIG_RBPS_SIZE - 1) |
826 			RBP_INT_ENB,
827 						G0_RBPS_QI + (group * 32));
828 
829 	/* large buffer pool */
830 	he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
831 			CONFIG_RBPL_BUFSIZE, 8, 0);
832 	if (he_dev->rbpl_pool == NULL) {
833 		hprintk("unable to create rbpl pool\n");
834 		goto out_free_rbps_virt;
835 	}
836 
837 	he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
838 		CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
839 	if (he_dev->rbpl_base == NULL) {
840 		hprintk("failed to alloc rbpl_base\n");
841 		goto out_destroy_rbpl_pool;
842 	}
843 	memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
844 	he_dev->rbpl_virt = kmalloc(CONFIG_RBPL_SIZE * sizeof(struct he_virt), GFP_KERNEL);
845 	if (he_dev->rbpl_virt == NULL) {
846 		hprintk("failed to alloc rbpl_virt\n");
847 		goto out_free_rbpl_base;
848 	}
849 
850 	for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
851 		dma_addr_t dma_handle;
852 		void *cpuaddr;
853 
854 		cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
855 		if (cpuaddr == NULL)
856 			goto out_free_rbpl_virt;
857 
858 		he_dev->rbpl_virt[i].virt = cpuaddr;
859 		he_dev->rbpl_base[i].status = RBP_LOANED | (i << RBP_INDEX_OFF);
860 		he_dev->rbpl_base[i].phys = dma_handle;
861 	}
862 	he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
863 
864 	he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
865 	he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
866 						G0_RBPL_T + (group * 32));
867 	he_writel(he_dev, CONFIG_RBPL_BUFSIZE/4,
868 						G0_RBPL_BS + (group * 32));
869 	he_writel(he_dev,
870 			RBP_THRESH(CONFIG_RBPL_THRESH) |
871 			RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
872 			RBP_INT_ENB,
873 						G0_RBPL_QI + (group * 32));
874 
875 	/* rx buffer ready queue */
876 
877 	he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
878 		CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
879 	if (he_dev->rbrq_base == NULL) {
880 		hprintk("failed to allocate rbrq\n");
881 		goto out_free_rbpl_virt;
882 	}
883 	memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
884 
885 	he_dev->rbrq_head = he_dev->rbrq_base;
886 	he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
887 	he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
888 	he_writel(he_dev,
889 		RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
890 						G0_RBRQ_Q + (group * 16));
891 	if (irq_coalesce) {
892 		hprintk("coalescing interrupts\n");
893 		he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
894 						G0_RBRQ_I + (group * 16));
895 	} else
896 		he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
897 						G0_RBRQ_I + (group * 16));
898 
899 	/* tx buffer ready queue */
900 
901 	he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
902 		CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
903 	if (he_dev->tbrq_base == NULL) {
904 		hprintk("failed to allocate tbrq\n");
905 		goto out_free_rbpq_base;
906 	}
907 	memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
908 
909 	he_dev->tbrq_head = he_dev->tbrq_base;
910 
911 	he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
912 	he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
913 	he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
914 	he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
915 
916 	return 0;
917 
918 out_free_rbpq_base:
919 	pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE *
920 			sizeof(struct he_rbrq), he_dev->rbrq_base,
921 			he_dev->rbrq_phys);
922 	i = CONFIG_RBPL_SIZE;
923 out_free_rbpl_virt:
924 	while (i--)
925 		pci_pool_free(he_dev->rbpl_pool, he_dev->rbpl_virt[i].virt,
926 				he_dev->rbpl_base[i].phys);
927 	kfree(he_dev->rbpl_virt);
928 
929 out_free_rbpl_base:
930 	pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE *
931 			sizeof(struct he_rbp), he_dev->rbpl_base,
932 			he_dev->rbpl_phys);
933 out_destroy_rbpl_pool:
934 	pci_pool_destroy(he_dev->rbpl_pool);
935 
936 	i = CONFIG_RBPS_SIZE;
937 out_free_rbps_virt:
938 	while (i--)
939 		pci_pool_free(he_dev->rbps_pool, he_dev->rbps_virt[i].virt,
940 				he_dev->rbps_base[i].phys);
941 	kfree(he_dev->rbps_virt);
942 
943 out_free_rbps_base:
944 	pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE *
945 			sizeof(struct he_rbp), he_dev->rbps_base,
946 			he_dev->rbps_phys);
947 out_destroy_rbps_pool:
948 	pci_pool_destroy(he_dev->rbps_pool);
949 	return -ENOMEM;
950 }
951 
952 static int __devinit
953 he_init_irq(struct he_dev *he_dev)
954 {
955 	int i;
956 
957 	/* 2.9.3.5  tail offset for each interrupt queue is located after the
958 		    end of the interrupt queue */
959 
960 	he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
961 			(CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
962 	if (he_dev->irq_base == NULL) {
963 		hprintk("failed to allocate irq\n");
964 		return -ENOMEM;
965 	}
966 	he_dev->irq_tailoffset = (unsigned *)
967 					&he_dev->irq_base[CONFIG_IRQ_SIZE];
968 	*he_dev->irq_tailoffset = 0;
969 	he_dev->irq_head = he_dev->irq_base;
970 	he_dev->irq_tail = he_dev->irq_base;
971 
972 	for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
973 		he_dev->irq_base[i].isw = ITYPE_INVALID;
974 
975 	he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
976 	he_writel(he_dev,
977 		IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
978 								IRQ0_HEAD);
979 	he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
980 	he_writel(he_dev, 0x0, IRQ0_DATA);
981 
982 	he_writel(he_dev, 0x0, IRQ1_BASE);
983 	he_writel(he_dev, 0x0, IRQ1_HEAD);
984 	he_writel(he_dev, 0x0, IRQ1_CNTL);
985 	he_writel(he_dev, 0x0, IRQ1_DATA);
986 
987 	he_writel(he_dev, 0x0, IRQ2_BASE);
988 	he_writel(he_dev, 0x0, IRQ2_HEAD);
989 	he_writel(he_dev, 0x0, IRQ2_CNTL);
990 	he_writel(he_dev, 0x0, IRQ2_DATA);
991 
992 	he_writel(he_dev, 0x0, IRQ3_BASE);
993 	he_writel(he_dev, 0x0, IRQ3_HEAD);
994 	he_writel(he_dev, 0x0, IRQ3_CNTL);
995 	he_writel(he_dev, 0x0, IRQ3_DATA);
996 
997 	/* 2.9.3.2 interrupt queue mapping registers */
998 
999 	he_writel(he_dev, 0x0, GRP_10_MAP);
1000 	he_writel(he_dev, 0x0, GRP_32_MAP);
1001 	he_writel(he_dev, 0x0, GRP_54_MAP);
1002 	he_writel(he_dev, 0x0, GRP_76_MAP);
1003 
1004 	if (request_irq(he_dev->pci_dev->irq, he_irq_handler, IRQF_DISABLED|IRQF_SHARED, DEV_LABEL, he_dev)) {
1005 		hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
1006 		return -EINVAL;
1007 	}
1008 
1009 	he_dev->irq = he_dev->pci_dev->irq;
1010 
1011 	return 0;
1012 }
1013 
1014 static int __devinit
1015 he_start(struct atm_dev *dev)
1016 {
1017 	struct he_dev *he_dev;
1018 	struct pci_dev *pci_dev;
1019 	unsigned long membase;
1020 
1021 	u16 command;
1022 	u32 gen_cntl_0, host_cntl, lb_swap;
1023 	u8 cache_size, timer;
1024 
1025 	unsigned err;
1026 	unsigned int status, reg;
1027 	int i, group;
1028 
1029 	he_dev = HE_DEV(dev);
1030 	pci_dev = he_dev->pci_dev;
1031 
1032 	membase = pci_resource_start(pci_dev, 0);
1033 	HPRINTK("membase = 0x%lx  irq = %d.\n", membase, pci_dev->irq);
1034 
1035 	/*
1036 	 * pci bus controller initialization
1037 	 */
1038 
1039 	/* 4.3 pci bus controller-specific initialization */
1040 	if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
1041 		hprintk("can't read GEN_CNTL_0\n");
1042 		return -EINVAL;
1043 	}
1044 	gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1045 	if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1046 		hprintk("can't write GEN_CNTL_0.\n");
1047 		return -EINVAL;
1048 	}
1049 
1050 	if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1051 		hprintk("can't read PCI_COMMAND.\n");
1052 		return -EINVAL;
1053 	}
1054 
1055 	command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1056 	if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1057 		hprintk("can't enable memory.\n");
1058 		return -EINVAL;
1059 	}
1060 
1061 	if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1062 		hprintk("can't read cache line size?\n");
1063 		return -EINVAL;
1064 	}
1065 
1066 	if (cache_size < 16) {
1067 		cache_size = 16;
1068 		if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1069 			hprintk("can't set cache line size to %d\n", cache_size);
1070 	}
1071 
1072 	if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1073 		hprintk("can't read latency timer?\n");
1074 		return -EINVAL;
1075 	}
1076 
1077 	/* from table 3.9
1078 	 *
1079 	 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1080 	 *
1081 	 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1082 	 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1083 	 *
1084 	 */
1085 #define LAT_TIMER 209
1086 	if (timer < LAT_TIMER) {
1087 		HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1088 		timer = LAT_TIMER;
1089 		if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1090 			hprintk("can't set latency timer to %d\n", timer);
1091 	}
1092 
1093 	if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1094 		hprintk("can't set up page mapping\n");
1095 		return -EINVAL;
1096 	}
1097 
1098 	/* 4.4 card reset */
1099 	he_writel(he_dev, 0x0, RESET_CNTL);
1100 	he_writel(he_dev, 0xff, RESET_CNTL);
1101 
1102 	udelay(16*1000);	/* 16 ms */
1103 	status = he_readl(he_dev, RESET_CNTL);
1104 	if ((status & BOARD_RST_STATUS) == 0) {
1105 		hprintk("reset failed\n");
1106 		return -EINVAL;
1107 	}
1108 
1109 	/* 4.5 set bus width */
1110 	host_cntl = he_readl(he_dev, HOST_CNTL);
1111 	if (host_cntl & PCI_BUS_SIZE64)
1112 		gen_cntl_0 |= ENBL_64;
1113 	else
1114 		gen_cntl_0 &= ~ENBL_64;
1115 
1116 	if (disable64 == 1) {
1117 		hprintk("disabling 64-bit pci bus transfers\n");
1118 		gen_cntl_0 &= ~ENBL_64;
1119 	}
1120 
1121 	if (gen_cntl_0 & ENBL_64)
1122 		hprintk("64-bit transfers enabled\n");
1123 
1124 	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1125 
1126 	/* 4.7 read prom contents */
1127 	for (i = 0; i < PROD_ID_LEN; ++i)
1128 		he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1129 
1130 	he_dev->media = read_prom_byte(he_dev, MEDIA);
1131 
1132 	for (i = 0; i < 6; ++i)
1133 		dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1134 
1135 	hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1136 				he_dev->prod_id,
1137 					he_dev->media & 0x40 ? "SM" : "MM",
1138 						dev->esi[0],
1139 						dev->esi[1],
1140 						dev->esi[2],
1141 						dev->esi[3],
1142 						dev->esi[4],
1143 						dev->esi[5]);
1144 	he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1145 						ATM_OC12_PCR : ATM_OC3_PCR;
1146 
1147 	/* 4.6 set host endianess */
1148 	lb_swap = he_readl(he_dev, LB_SWAP);
1149 	if (he_is622(he_dev))
1150 		lb_swap &= ~XFER_SIZE;		/* 4 cells */
1151 	else
1152 		lb_swap |= XFER_SIZE;		/* 8 cells */
1153 #ifdef __BIG_ENDIAN
1154 	lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1155 #else
1156 	lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1157 			DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1158 #endif /* __BIG_ENDIAN */
1159 	he_writel(he_dev, lb_swap, LB_SWAP);
1160 
1161 	/* 4.8 sdram controller initialization */
1162 	he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1163 
1164 	/* 4.9 initialize rnum value */
1165 	lb_swap |= SWAP_RNUM_MAX(0xf);
1166 	he_writel(he_dev, lb_swap, LB_SWAP);
1167 
1168 	/* 4.10 initialize the interrupt queues */
1169 	if ((err = he_init_irq(he_dev)) != 0)
1170 		return err;
1171 
1172 	/* 4.11 enable pci bus controller state machines */
1173 	host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1174 				QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1175 	he_writel(he_dev, host_cntl, HOST_CNTL);
1176 
1177 	gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1178 	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1179 
1180 	/*
1181 	 * atm network controller initialization
1182 	 */
1183 
1184 	/* 5.1.1 generic configuration state */
1185 
1186 	/*
1187 	 *		local (cell) buffer memory map
1188 	 *
1189 	 *             HE155                          HE622
1190 	 *
1191 	 *        0 ____________1023 bytes  0 _______________________2047 bytes
1192 	 *         |            |            |                   |   |
1193 	 *         |  utility   |            |        rx0        |   |
1194 	 *        5|____________|         255|___________________| u |
1195 	 *        6|            |         256|                   | t |
1196 	 *         |            |            |                   | i |
1197 	 *         |    rx0     |     row    |        tx         | l |
1198 	 *         |            |            |                   | i |
1199 	 *         |            |         767|___________________| t |
1200 	 *      517|____________|         768|                   | y |
1201 	 * row  518|            |            |        rx1        |   |
1202 	 *         |            |        1023|___________________|___|
1203 	 *         |            |
1204 	 *         |    tx      |
1205 	 *         |            |
1206 	 *         |            |
1207 	 *     1535|____________|
1208 	 *     1536|            |
1209 	 *         |    rx1     |
1210 	 *     2047|____________|
1211 	 *
1212 	 */
1213 
1214 	/* total 4096 connections */
1215 	he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1216 	he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1217 
1218 	if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1219 		hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1220 		return -ENODEV;
1221 	}
1222 
1223 	if (nvpibits != -1) {
1224 		he_dev->vpibits = nvpibits;
1225 		he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1226 	}
1227 
1228 	if (nvcibits != -1) {
1229 		he_dev->vcibits = nvcibits;
1230 		he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1231 	}
1232 
1233 
1234 	if (he_is622(he_dev)) {
1235 		he_dev->cells_per_row = 40;
1236 		he_dev->bytes_per_row = 2048;
1237 		he_dev->r0_numrows = 256;
1238 		he_dev->tx_numrows = 512;
1239 		he_dev->r1_numrows = 256;
1240 		he_dev->r0_startrow = 0;
1241 		he_dev->tx_startrow = 256;
1242 		he_dev->r1_startrow = 768;
1243 	} else {
1244 		he_dev->cells_per_row = 20;
1245 		he_dev->bytes_per_row = 1024;
1246 		he_dev->r0_numrows = 512;
1247 		he_dev->tx_numrows = 1018;
1248 		he_dev->r1_numrows = 512;
1249 		he_dev->r0_startrow = 6;
1250 		he_dev->tx_startrow = 518;
1251 		he_dev->r1_startrow = 1536;
1252 	}
1253 
1254 	he_dev->cells_per_lbuf = 4;
1255 	he_dev->buffer_limit = 4;
1256 	he_dev->r0_numbuffs = he_dev->r0_numrows *
1257 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1258 	if (he_dev->r0_numbuffs > 2560)
1259 		he_dev->r0_numbuffs = 2560;
1260 
1261 	he_dev->r1_numbuffs = he_dev->r1_numrows *
1262 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1263 	if (he_dev->r1_numbuffs > 2560)
1264 		he_dev->r1_numbuffs = 2560;
1265 
1266 	he_dev->tx_numbuffs = he_dev->tx_numrows *
1267 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1268 	if (he_dev->tx_numbuffs > 5120)
1269 		he_dev->tx_numbuffs = 5120;
1270 
1271 	/* 5.1.2 configure hardware dependent registers */
1272 
1273 	he_writel(he_dev,
1274 		SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1275 		RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1276 		(he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1277 		(he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1278 								LBARB);
1279 
1280 	he_writel(he_dev, BANK_ON |
1281 		(he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1282 								SDRAMCON);
1283 
1284 	he_writel(he_dev,
1285 		(he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1286 						RM_RW_WAIT(1), RCMCONFIG);
1287 	he_writel(he_dev,
1288 		(he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1289 						TM_RW_WAIT(1), TCMCONFIG);
1290 
1291 	he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1292 
1293 	he_writel(he_dev,
1294 		(he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1295 		(he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1296 		RX_VALVP(he_dev->vpibits) |
1297 		RX_VALVC(he_dev->vcibits),			 RC_CONFIG);
1298 
1299 	he_writel(he_dev, DRF_THRESH(0x20) |
1300 		(he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1301 		TX_VCI_MASK(he_dev->vcibits) |
1302 		LBFREE_CNT(he_dev->tx_numbuffs), 		TX_CONFIG);
1303 
1304 	he_writel(he_dev, 0x0, TXAAL5_PROTO);
1305 
1306 	he_writel(he_dev, PHY_INT_ENB |
1307 		(he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1308 								RH_CONFIG);
1309 
1310 	/* 5.1.3 initialize connection memory */
1311 
1312 	for (i = 0; i < TCM_MEM_SIZE; ++i)
1313 		he_writel_tcm(he_dev, 0, i);
1314 
1315 	for (i = 0; i < RCM_MEM_SIZE; ++i)
1316 		he_writel_rcm(he_dev, 0, i);
1317 
1318 	/*
1319 	 *	transmit connection memory map
1320 	 *
1321 	 *                  tx memory
1322 	 *          0x0 ___________________
1323 	 *             |                   |
1324 	 *             |                   |
1325 	 *             |       TSRa        |
1326 	 *             |                   |
1327 	 *             |                   |
1328 	 *       0x8000|___________________|
1329 	 *             |                   |
1330 	 *             |       TSRb        |
1331 	 *       0xc000|___________________|
1332 	 *             |                   |
1333 	 *             |       TSRc        |
1334 	 *       0xe000|___________________|
1335 	 *             |       TSRd        |
1336 	 *       0xf000|___________________|
1337 	 *             |       tmABR       |
1338 	 *      0x10000|___________________|
1339 	 *             |                   |
1340 	 *             |       tmTPD       |
1341 	 *             |___________________|
1342 	 *             |                   |
1343 	 *                      ....
1344 	 *      0x1ffff|___________________|
1345 	 *
1346 	 *
1347 	 */
1348 
1349 	he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1350 	he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1351 	he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1352 	he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1353 	he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1354 
1355 
1356 	/*
1357 	 *	receive connection memory map
1358 	 *
1359 	 *          0x0 ___________________
1360 	 *             |                   |
1361 	 *             |                   |
1362 	 *             |       RSRa        |
1363 	 *             |                   |
1364 	 *             |                   |
1365 	 *       0x8000|___________________|
1366 	 *             |                   |
1367 	 *             |             rx0/1 |
1368 	 *             |       LBM         |   link lists of local
1369 	 *             |             tx    |   buffer memory
1370 	 *             |                   |
1371 	 *       0xd000|___________________|
1372 	 *             |                   |
1373 	 *             |      rmABR        |
1374 	 *       0xe000|___________________|
1375 	 *             |                   |
1376 	 *             |       RSRb        |
1377 	 *             |___________________|
1378 	 *             |                   |
1379 	 *                      ....
1380 	 *       0xffff|___________________|
1381 	 */
1382 
1383 	he_writel(he_dev, 0x08000, RCMLBM_BA);
1384 	he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1385 	he_writel(he_dev, 0x0d800, RCMABR_BA);
1386 
1387 	/* 5.1.4 initialize local buffer free pools linked lists */
1388 
1389 	he_init_rx_lbfp0(he_dev);
1390 	he_init_rx_lbfp1(he_dev);
1391 
1392 	he_writel(he_dev, 0x0, RLBC_H);
1393 	he_writel(he_dev, 0x0, RLBC_T);
1394 	he_writel(he_dev, 0x0, RLBC_H2);
1395 
1396 	he_writel(he_dev, 512, RXTHRSH);	/* 10% of r0+r1 buffers */
1397 	he_writel(he_dev, 256, LITHRSH); 	/* 5% of r0+r1 buffers */
1398 
1399 	he_init_tx_lbfp(he_dev);
1400 
1401 	he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1402 
1403 	/* 5.1.5 initialize intermediate receive queues */
1404 
1405 	if (he_is622(he_dev)) {
1406 		he_writel(he_dev, 0x000f, G0_INMQ_S);
1407 		he_writel(he_dev, 0x200f, G0_INMQ_L);
1408 
1409 		he_writel(he_dev, 0x001f, G1_INMQ_S);
1410 		he_writel(he_dev, 0x201f, G1_INMQ_L);
1411 
1412 		he_writel(he_dev, 0x002f, G2_INMQ_S);
1413 		he_writel(he_dev, 0x202f, G2_INMQ_L);
1414 
1415 		he_writel(he_dev, 0x003f, G3_INMQ_S);
1416 		he_writel(he_dev, 0x203f, G3_INMQ_L);
1417 
1418 		he_writel(he_dev, 0x004f, G4_INMQ_S);
1419 		he_writel(he_dev, 0x204f, G4_INMQ_L);
1420 
1421 		he_writel(he_dev, 0x005f, G5_INMQ_S);
1422 		he_writel(he_dev, 0x205f, G5_INMQ_L);
1423 
1424 		he_writel(he_dev, 0x006f, G6_INMQ_S);
1425 		he_writel(he_dev, 0x206f, G6_INMQ_L);
1426 
1427 		he_writel(he_dev, 0x007f, G7_INMQ_S);
1428 		he_writel(he_dev, 0x207f, G7_INMQ_L);
1429 	} else {
1430 		he_writel(he_dev, 0x0000, G0_INMQ_S);
1431 		he_writel(he_dev, 0x0008, G0_INMQ_L);
1432 
1433 		he_writel(he_dev, 0x0001, G1_INMQ_S);
1434 		he_writel(he_dev, 0x0009, G1_INMQ_L);
1435 
1436 		he_writel(he_dev, 0x0002, G2_INMQ_S);
1437 		he_writel(he_dev, 0x000a, G2_INMQ_L);
1438 
1439 		he_writel(he_dev, 0x0003, G3_INMQ_S);
1440 		he_writel(he_dev, 0x000b, G3_INMQ_L);
1441 
1442 		he_writel(he_dev, 0x0004, G4_INMQ_S);
1443 		he_writel(he_dev, 0x000c, G4_INMQ_L);
1444 
1445 		he_writel(he_dev, 0x0005, G5_INMQ_S);
1446 		he_writel(he_dev, 0x000d, G5_INMQ_L);
1447 
1448 		he_writel(he_dev, 0x0006, G6_INMQ_S);
1449 		he_writel(he_dev, 0x000e, G6_INMQ_L);
1450 
1451 		he_writel(he_dev, 0x0007, G7_INMQ_S);
1452 		he_writel(he_dev, 0x000f, G7_INMQ_L);
1453 	}
1454 
1455 	/* 5.1.6 application tunable parameters */
1456 
1457 	he_writel(he_dev, 0x0, MCC);
1458 	he_writel(he_dev, 0x0, OEC);
1459 	he_writel(he_dev, 0x0, DCC);
1460 	he_writel(he_dev, 0x0, CEC);
1461 
1462 	/* 5.1.7 cs block initialization */
1463 
1464 	he_init_cs_block(he_dev);
1465 
1466 	/* 5.1.8 cs block connection memory initialization */
1467 
1468 	if (he_init_cs_block_rcm(he_dev) < 0)
1469 		return -ENOMEM;
1470 
1471 	/* 5.1.10 initialize host structures */
1472 
1473 	he_init_tpdrq(he_dev);
1474 
1475 	he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1476 		sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1477 	if (he_dev->tpd_pool == NULL) {
1478 		hprintk("unable to create tpd pci_pool\n");
1479 		return -ENOMEM;
1480 	}
1481 
1482 	INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1483 
1484 	if (he_init_group(he_dev, 0) != 0)
1485 		return -ENOMEM;
1486 
1487 	for (group = 1; group < HE_NUM_GROUPS; ++group) {
1488 		he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1489 		he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1490 		he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1491 		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1492 						G0_RBPS_BS + (group * 32));
1493 
1494 		he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1495 		he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1496 		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1497 						G0_RBPL_QI + (group * 32));
1498 		he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1499 
1500 		he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1501 		he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1502 		he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1503 						G0_RBRQ_Q + (group * 16));
1504 		he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1505 
1506 		he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1507 		he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1508 		he_writel(he_dev, TBRQ_THRESH(0x1),
1509 						G0_TBRQ_THRESH + (group * 16));
1510 		he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1511 	}
1512 
1513 	/* host status page */
1514 
1515 	he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1516 				sizeof(struct he_hsp), &he_dev->hsp_phys);
1517 	if (he_dev->hsp == NULL) {
1518 		hprintk("failed to allocate host status page\n");
1519 		return -ENOMEM;
1520 	}
1521 	memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1522 	he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1523 
1524 	/* initialize framer */
1525 
1526 #ifdef CONFIG_ATM_HE_USE_SUNI
1527 	if (he_isMM(he_dev))
1528 		suni_init(he_dev->atm_dev);
1529 	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1530 		he_dev->atm_dev->phy->start(he_dev->atm_dev);
1531 #endif /* CONFIG_ATM_HE_USE_SUNI */
1532 
1533 	if (sdh) {
1534 		/* this really should be in suni.c but for now... */
1535 		int val;
1536 
1537 		val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1538 		val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1539 		he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1540 		he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP);
1541 	}
1542 
1543 	/* 5.1.12 enable transmit and receive */
1544 
1545 	reg = he_readl_mbox(he_dev, CS_ERCTL0);
1546 	reg |= TX_ENABLE|ER_ENABLE;
1547 	he_writel_mbox(he_dev, reg, CS_ERCTL0);
1548 
1549 	reg = he_readl(he_dev, RC_CONFIG);
1550 	reg |= RX_ENABLE;
1551 	he_writel(he_dev, reg, RC_CONFIG);
1552 
1553 	for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1554 		he_dev->cs_stper[i].inuse = 0;
1555 		he_dev->cs_stper[i].pcr = -1;
1556 	}
1557 	he_dev->total_bw = 0;
1558 
1559 
1560 	/* atm linux initialization */
1561 
1562 	he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1563 	he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1564 
1565 	he_dev->irq_peak = 0;
1566 	he_dev->rbrq_peak = 0;
1567 	he_dev->rbpl_peak = 0;
1568 	he_dev->tbrq_peak = 0;
1569 
1570 	HPRINTK("hell bent for leather!\n");
1571 
1572 	return 0;
1573 }
1574 
1575 static void
1576 he_stop(struct he_dev *he_dev)
1577 {
1578 	u16 command;
1579 	u32 gen_cntl_0, reg;
1580 	struct pci_dev *pci_dev;
1581 
1582 	pci_dev = he_dev->pci_dev;
1583 
1584 	/* disable interrupts */
1585 
1586 	if (he_dev->membase) {
1587 		pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1588 		gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1589 		pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1590 
1591 		tasklet_disable(&he_dev->tasklet);
1592 
1593 		/* disable recv and transmit */
1594 
1595 		reg = he_readl_mbox(he_dev, CS_ERCTL0);
1596 		reg &= ~(TX_ENABLE|ER_ENABLE);
1597 		he_writel_mbox(he_dev, reg, CS_ERCTL0);
1598 
1599 		reg = he_readl(he_dev, RC_CONFIG);
1600 		reg &= ~(RX_ENABLE);
1601 		he_writel(he_dev, reg, RC_CONFIG);
1602 	}
1603 
1604 #ifdef CONFIG_ATM_HE_USE_SUNI
1605 	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1606 		he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1607 #endif /* CONFIG_ATM_HE_USE_SUNI */
1608 
1609 	if (he_dev->irq)
1610 		free_irq(he_dev->irq, he_dev);
1611 
1612 	if (he_dev->irq_base)
1613 		pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1614 			* sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1615 
1616 	if (he_dev->hsp)
1617 		pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1618 						he_dev->hsp, he_dev->hsp_phys);
1619 
1620 	if (he_dev->rbpl_base) {
1621 		int i;
1622 
1623 		for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
1624 			void *cpuaddr = he_dev->rbpl_virt[i].virt;
1625 			dma_addr_t dma_handle = he_dev->rbpl_base[i].phys;
1626 
1627 			pci_pool_free(he_dev->rbpl_pool, cpuaddr, dma_handle);
1628 		}
1629 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1630 			* sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1631 	}
1632 
1633 	if (he_dev->rbpl_pool)
1634 		pci_pool_destroy(he_dev->rbpl_pool);
1635 
1636 	if (he_dev->rbps_base) {
1637 		int i;
1638 
1639 		for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
1640 			void *cpuaddr = he_dev->rbps_virt[i].virt;
1641 			dma_addr_t dma_handle = he_dev->rbps_base[i].phys;
1642 
1643 			pci_pool_free(he_dev->rbps_pool, cpuaddr, dma_handle);
1644 		}
1645 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1646 			* sizeof(struct he_rbp), he_dev->rbps_base, he_dev->rbps_phys);
1647 	}
1648 
1649 	if (he_dev->rbps_pool)
1650 		pci_pool_destroy(he_dev->rbps_pool);
1651 
1652 	if (he_dev->rbrq_base)
1653 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1654 							he_dev->rbrq_base, he_dev->rbrq_phys);
1655 
1656 	if (he_dev->tbrq_base)
1657 		pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1658 							he_dev->tbrq_base, he_dev->tbrq_phys);
1659 
1660 	if (he_dev->tpdrq_base)
1661 		pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1662 							he_dev->tpdrq_base, he_dev->tpdrq_phys);
1663 
1664 	if (he_dev->tpd_pool)
1665 		pci_pool_destroy(he_dev->tpd_pool);
1666 
1667 	if (he_dev->pci_dev) {
1668 		pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1669 		command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1670 		pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1671 	}
1672 
1673 	if (he_dev->membase)
1674 		iounmap(he_dev->membase);
1675 }
1676 
1677 static struct he_tpd *
1678 __alloc_tpd(struct he_dev *he_dev)
1679 {
1680 	struct he_tpd *tpd;
1681 	dma_addr_t dma_handle;
1682 
1683 	tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &dma_handle);
1684 	if (tpd == NULL)
1685 		return NULL;
1686 
1687 	tpd->status = TPD_ADDR(dma_handle);
1688 	tpd->reserved = 0;
1689 	tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1690 	tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1691 	tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1692 
1693 	return tpd;
1694 }
1695 
1696 #define AAL5_LEN(buf,len) 						\
1697 			((((unsigned char *)(buf))[(len)-6] << 8) |	\
1698 				(((unsigned char *)(buf))[(len)-5]))
1699 
1700 /* 2.10.1.2 receive
1701  *
1702  * aal5 packets can optionally return the tcp checksum in the lower
1703  * 16 bits of the crc (RSR0_TCP_CKSUM)
1704  */
1705 
1706 #define TCP_CKSUM(buf,len) 						\
1707 			((((unsigned char *)(buf))[(len)-2] << 8) |	\
1708 				(((unsigned char *)(buf))[(len-1)]))
1709 
1710 static int
1711 he_service_rbrq(struct he_dev *he_dev, int group)
1712 {
1713 	struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1714 				((unsigned long)he_dev->rbrq_base |
1715 					he_dev->hsp->group[group].rbrq_tail);
1716 	struct he_rbp *rbp = NULL;
1717 	unsigned cid, lastcid = -1;
1718 	unsigned buf_len = 0;
1719 	struct sk_buff *skb;
1720 	struct atm_vcc *vcc = NULL;
1721 	struct he_vcc *he_vcc;
1722 	struct he_iovec *iov;
1723 	int pdus_assembled = 0;
1724 	int updated = 0;
1725 
1726 	read_lock(&vcc_sklist_lock);
1727 	while (he_dev->rbrq_head != rbrq_tail) {
1728 		++updated;
1729 
1730 		HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1731 			he_dev->rbrq_head, group,
1732 			RBRQ_ADDR(he_dev->rbrq_head),
1733 			RBRQ_BUFLEN(he_dev->rbrq_head),
1734 			RBRQ_CID(he_dev->rbrq_head),
1735 			RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1736 			RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1737 			RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1738 			RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1739 			RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1740 			RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1741 
1742 		if (RBRQ_ADDR(he_dev->rbrq_head) & RBP_SMALLBUF)
1743 			rbp = &he_dev->rbps_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1744 		else
1745 			rbp = &he_dev->rbpl_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1746 
1747 		buf_len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1748 		cid = RBRQ_CID(he_dev->rbrq_head);
1749 
1750 		if (cid != lastcid)
1751 			vcc = __find_vcc(he_dev, cid);
1752 		lastcid = cid;
1753 
1754 		if (vcc == NULL) {
1755 			hprintk("vcc == NULL  (cid 0x%x)\n", cid);
1756 			if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1757 					rbp->status &= ~RBP_LOANED;
1758 
1759 			goto next_rbrq_entry;
1760 		}
1761 
1762 		he_vcc = HE_VCC(vcc);
1763 		if (he_vcc == NULL) {
1764 			hprintk("he_vcc == NULL  (cid 0x%x)\n", cid);
1765 			if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1766 					rbp->status &= ~RBP_LOANED;
1767 			goto next_rbrq_entry;
1768 		}
1769 
1770 		if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1771 			hprintk("HBUF_ERR!  (cid 0x%x)\n", cid);
1772 				atomic_inc(&vcc->stats->rx_drop);
1773 			goto return_host_buffers;
1774 		}
1775 
1776 		he_vcc->iov_tail->iov_base = RBRQ_ADDR(he_dev->rbrq_head);
1777 		he_vcc->iov_tail->iov_len = buf_len;
1778 		he_vcc->pdu_len += buf_len;
1779 		++he_vcc->iov_tail;
1780 
1781 		if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1782 			lastcid = -1;
1783 			HPRINTK("wake_up rx_waitq  (cid 0x%x)\n", cid);
1784 			wake_up(&he_vcc->rx_waitq);
1785 			goto return_host_buffers;
1786 		}
1787 
1788 #ifdef notdef
1789 		if ((he_vcc->iov_tail - he_vcc->iov_head) > HE_MAXIOV) {
1790 			hprintk("iovec full!  cid 0x%x\n", cid);
1791 			goto return_host_buffers;
1792 		}
1793 #endif
1794 		if (!RBRQ_END_PDU(he_dev->rbrq_head))
1795 			goto next_rbrq_entry;
1796 
1797 		if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1798 				|| RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1799 			HPRINTK("%s%s (%d.%d)\n",
1800 				RBRQ_CRC_ERR(he_dev->rbrq_head)
1801 							? "CRC_ERR " : "",
1802 				RBRQ_LEN_ERR(he_dev->rbrq_head)
1803 							? "LEN_ERR" : "",
1804 							vcc->vpi, vcc->vci);
1805 			atomic_inc(&vcc->stats->rx_err);
1806 			goto return_host_buffers;
1807 		}
1808 
1809 		skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1810 							GFP_ATOMIC);
1811 		if (!skb) {
1812 			HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1813 			goto return_host_buffers;
1814 		}
1815 
1816 		if (rx_skb_reserve > 0)
1817 			skb_reserve(skb, rx_skb_reserve);
1818 
1819 		__net_timestamp(skb);
1820 
1821 		for (iov = he_vcc->iov_head;
1822 				iov < he_vcc->iov_tail; ++iov) {
1823 			if (iov->iov_base & RBP_SMALLBUF)
1824 				memcpy(skb_put(skb, iov->iov_len),
1825 					he_dev->rbps_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1826 			else
1827 				memcpy(skb_put(skb, iov->iov_len),
1828 					he_dev->rbpl_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1829 		}
1830 
1831 		switch (vcc->qos.aal) {
1832 			case ATM_AAL0:
1833 				/* 2.10.1.5 raw cell receive */
1834 				skb->len = ATM_AAL0_SDU;
1835 				skb_set_tail_pointer(skb, skb->len);
1836 				break;
1837 			case ATM_AAL5:
1838 				/* 2.10.1.2 aal5 receive */
1839 
1840 				skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1841 				skb_set_tail_pointer(skb, skb->len);
1842 #ifdef USE_CHECKSUM_HW
1843 				if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1844 					skb->ip_summed = CHECKSUM_COMPLETE;
1845 					skb->csum = TCP_CKSUM(skb->data,
1846 							he_vcc->pdu_len);
1847 				}
1848 #endif
1849 				break;
1850 		}
1851 
1852 #ifdef should_never_happen
1853 		if (skb->len > vcc->qos.rxtp.max_sdu)
1854 			hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)!  cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1855 #endif
1856 
1857 #ifdef notdef
1858 		ATM_SKB(skb)->vcc = vcc;
1859 #endif
1860 		spin_unlock(&he_dev->global_lock);
1861 		vcc->push(vcc, skb);
1862 		spin_lock(&he_dev->global_lock);
1863 
1864 		atomic_inc(&vcc->stats->rx);
1865 
1866 return_host_buffers:
1867 		++pdus_assembled;
1868 
1869 		for (iov = he_vcc->iov_head;
1870 				iov < he_vcc->iov_tail; ++iov) {
1871 			if (iov->iov_base & RBP_SMALLBUF)
1872 				rbp = &he_dev->rbps_base[RBP_INDEX(iov->iov_base)];
1873 			else
1874 				rbp = &he_dev->rbpl_base[RBP_INDEX(iov->iov_base)];
1875 
1876 			rbp->status &= ~RBP_LOANED;
1877 		}
1878 
1879 		he_vcc->iov_tail = he_vcc->iov_head;
1880 		he_vcc->pdu_len = 0;
1881 
1882 next_rbrq_entry:
1883 		he_dev->rbrq_head = (struct he_rbrq *)
1884 				((unsigned long) he_dev->rbrq_base |
1885 					RBRQ_MASK(++he_dev->rbrq_head));
1886 
1887 	}
1888 	read_unlock(&vcc_sklist_lock);
1889 
1890 	if (updated) {
1891 		if (updated > he_dev->rbrq_peak)
1892 			he_dev->rbrq_peak = updated;
1893 
1894 		he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1895 						G0_RBRQ_H + (group * 16));
1896 	}
1897 
1898 	return pdus_assembled;
1899 }
1900 
1901 static void
1902 he_service_tbrq(struct he_dev *he_dev, int group)
1903 {
1904 	struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1905 				((unsigned long)he_dev->tbrq_base |
1906 					he_dev->hsp->group[group].tbrq_tail);
1907 	struct he_tpd *tpd;
1908 	int slot, updated = 0;
1909 	struct he_tpd *__tpd;
1910 
1911 	/* 2.1.6 transmit buffer return queue */
1912 
1913 	while (he_dev->tbrq_head != tbrq_tail) {
1914 		++updated;
1915 
1916 		HPRINTK("tbrq%d 0x%x%s%s\n",
1917 			group,
1918 			TBRQ_TPD(he_dev->tbrq_head),
1919 			TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1920 			TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1921 		tpd = NULL;
1922 		list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1923 			if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1924 				tpd = __tpd;
1925 				list_del(&__tpd->entry);
1926 				break;
1927 			}
1928 		}
1929 
1930 		if (tpd == NULL) {
1931 			hprintk("unable to locate tpd for dma buffer %x\n",
1932 						TBRQ_TPD(he_dev->tbrq_head));
1933 			goto next_tbrq_entry;
1934 		}
1935 
1936 		if (TBRQ_EOS(he_dev->tbrq_head)) {
1937 			HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
1938 				he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
1939 			if (tpd->vcc)
1940 				wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
1941 
1942 			goto next_tbrq_entry;
1943 		}
1944 
1945 		for (slot = 0; slot < TPD_MAXIOV; ++slot) {
1946 			if (tpd->iovec[slot].addr)
1947 				pci_unmap_single(he_dev->pci_dev,
1948 					tpd->iovec[slot].addr,
1949 					tpd->iovec[slot].len & TPD_LEN_MASK,
1950 							PCI_DMA_TODEVICE);
1951 			if (tpd->iovec[slot].len & TPD_LST)
1952 				break;
1953 
1954 		}
1955 
1956 		if (tpd->skb) {	/* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
1957 			if (tpd->vcc && tpd->vcc->pop)
1958 				tpd->vcc->pop(tpd->vcc, tpd->skb);
1959 			else
1960 				dev_kfree_skb_any(tpd->skb);
1961 		}
1962 
1963 next_tbrq_entry:
1964 		if (tpd)
1965 			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1966 		he_dev->tbrq_head = (struct he_tbrq *)
1967 				((unsigned long) he_dev->tbrq_base |
1968 					TBRQ_MASK(++he_dev->tbrq_head));
1969 	}
1970 
1971 	if (updated) {
1972 		if (updated > he_dev->tbrq_peak)
1973 			he_dev->tbrq_peak = updated;
1974 
1975 		he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
1976 						G0_TBRQ_H + (group * 16));
1977 	}
1978 }
1979 
1980 
1981 static void
1982 he_service_rbpl(struct he_dev *he_dev, int group)
1983 {
1984 	struct he_rbp *newtail;
1985 	struct he_rbp *rbpl_head;
1986 	int moved = 0;
1987 
1988 	rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1989 					RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
1990 
1991 	for (;;) {
1992 		newtail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1993 						RBPL_MASK(he_dev->rbpl_tail+1));
1994 
1995 		/* table 3.42 -- rbpl_tail should never be set to rbpl_head */
1996 		if ((newtail == rbpl_head) || (newtail->status & RBP_LOANED))
1997 			break;
1998 
1999 		newtail->status |= RBP_LOANED;
2000 		he_dev->rbpl_tail = newtail;
2001 		++moved;
2002 	}
2003 
2004 	if (moved)
2005 		he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
2006 }
2007 
2008 static void
2009 he_service_rbps(struct he_dev *he_dev, int group)
2010 {
2011 	struct he_rbp *newtail;
2012 	struct he_rbp *rbps_head;
2013 	int moved = 0;
2014 
2015 	rbps_head = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2016 					RBPS_MASK(he_readl(he_dev, G0_RBPS_S)));
2017 
2018 	for (;;) {
2019 		newtail = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2020 						RBPS_MASK(he_dev->rbps_tail+1));
2021 
2022 		/* table 3.42 -- rbps_tail should never be set to rbps_head */
2023 		if ((newtail == rbps_head) || (newtail->status & RBP_LOANED))
2024 			break;
2025 
2026 		newtail->status |= RBP_LOANED;
2027 		he_dev->rbps_tail = newtail;
2028 		++moved;
2029 	}
2030 
2031 	if (moved)
2032 		he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail), G0_RBPS_T);
2033 }
2034 
2035 static void
2036 he_tasklet(unsigned long data)
2037 {
2038 	unsigned long flags;
2039 	struct he_dev *he_dev = (struct he_dev *) data;
2040 	int group, type;
2041 	int updated = 0;
2042 
2043 	HPRINTK("tasklet (0x%lx)\n", data);
2044 	spin_lock_irqsave(&he_dev->global_lock, flags);
2045 
2046 	while (he_dev->irq_head != he_dev->irq_tail) {
2047 		++updated;
2048 
2049 		type = ITYPE_TYPE(he_dev->irq_head->isw);
2050 		group = ITYPE_GROUP(he_dev->irq_head->isw);
2051 
2052 		switch (type) {
2053 			case ITYPE_RBRQ_THRESH:
2054 				HPRINTK("rbrq%d threshold\n", group);
2055 				/* fall through */
2056 			case ITYPE_RBRQ_TIMER:
2057 				if (he_service_rbrq(he_dev, group)) {
2058 					he_service_rbpl(he_dev, group);
2059 					he_service_rbps(he_dev, group);
2060 				}
2061 				break;
2062 			case ITYPE_TBRQ_THRESH:
2063 				HPRINTK("tbrq%d threshold\n", group);
2064 				/* fall through */
2065 			case ITYPE_TPD_COMPLETE:
2066 				he_service_tbrq(he_dev, group);
2067 				break;
2068 			case ITYPE_RBPL_THRESH:
2069 				he_service_rbpl(he_dev, group);
2070 				break;
2071 			case ITYPE_RBPS_THRESH:
2072 				he_service_rbps(he_dev, group);
2073 				break;
2074 			case ITYPE_PHY:
2075 				HPRINTK("phy interrupt\n");
2076 #ifdef CONFIG_ATM_HE_USE_SUNI
2077 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2078 				if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
2079 					he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
2080 				spin_lock_irqsave(&he_dev->global_lock, flags);
2081 #endif
2082 				break;
2083 			case ITYPE_OTHER:
2084 				switch (type|group) {
2085 					case ITYPE_PARITY:
2086 						hprintk("parity error\n");
2087 						break;
2088 					case ITYPE_ABORT:
2089 						hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
2090 						break;
2091 				}
2092 				break;
2093 			case ITYPE_TYPE(ITYPE_INVALID):
2094 				/* see 8.1.1 -- check all queues */
2095 
2096 				HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
2097 
2098 				he_service_rbrq(he_dev, 0);
2099 				he_service_rbpl(he_dev, 0);
2100 				he_service_rbps(he_dev, 0);
2101 				he_service_tbrq(he_dev, 0);
2102 				break;
2103 			default:
2104 				hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
2105 		}
2106 
2107 		he_dev->irq_head->isw = ITYPE_INVALID;
2108 
2109 		he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2110 	}
2111 
2112 	if (updated) {
2113 		if (updated > he_dev->irq_peak)
2114 			he_dev->irq_peak = updated;
2115 
2116 		he_writel(he_dev,
2117 			IRQ_SIZE(CONFIG_IRQ_SIZE) |
2118 			IRQ_THRESH(CONFIG_IRQ_THRESH) |
2119 			IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2120 		(void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2121 	}
2122 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2123 }
2124 
2125 static irqreturn_t
2126 he_irq_handler(int irq, void *dev_id)
2127 {
2128 	unsigned long flags;
2129 	struct he_dev *he_dev = (struct he_dev * )dev_id;
2130 	int handled = 0;
2131 
2132 	if (he_dev == NULL)
2133 		return IRQ_NONE;
2134 
2135 	spin_lock_irqsave(&he_dev->global_lock, flags);
2136 
2137 	he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2138 						(*he_dev->irq_tailoffset << 2));
2139 
2140 	if (he_dev->irq_tail == he_dev->irq_head) {
2141 		HPRINTK("tailoffset not updated?\n");
2142 		he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2143 			((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2144 		(void) he_readl(he_dev, INT_FIFO);	/* 8.1.2 controller errata */
2145 	}
2146 
2147 #ifdef DEBUG
2148 	if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2149 		hprintk("spurious (or shared) interrupt?\n");
2150 #endif
2151 
2152 	if (he_dev->irq_head != he_dev->irq_tail) {
2153 		handled = 1;
2154 		tasklet_schedule(&he_dev->tasklet);
2155 		he_writel(he_dev, INT_CLEAR_A, INT_FIFO);	/* clear interrupt */
2156 		(void) he_readl(he_dev, INT_FIFO);		/* flush posted writes */
2157 	}
2158 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2159 	return IRQ_RETVAL(handled);
2160 
2161 }
2162 
2163 static __inline__ void
2164 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2165 {
2166 	struct he_tpdrq *new_tail;
2167 
2168 	HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2169 					tpd, cid, he_dev->tpdrq_tail);
2170 
2171 	/* new_tail = he_dev->tpdrq_tail; */
2172 	new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2173 					TPDRQ_MASK(he_dev->tpdrq_tail+1));
2174 
2175 	/*
2176 	 * check to see if we are about to set the tail == head
2177 	 * if true, update the head pointer from the adapter
2178 	 * to see if this is really the case (reading the queue
2179 	 * head for every enqueue would be unnecessarily slow)
2180 	 */
2181 
2182 	if (new_tail == he_dev->tpdrq_head) {
2183 		he_dev->tpdrq_head = (struct he_tpdrq *)
2184 			(((unsigned long)he_dev->tpdrq_base) |
2185 				TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2186 
2187 		if (new_tail == he_dev->tpdrq_head) {
2188 			int slot;
2189 
2190 			hprintk("tpdrq full (cid 0x%x)\n", cid);
2191 			/*
2192 			 * FIXME
2193 			 * push tpd onto a transmit backlog queue
2194 			 * after service_tbrq, service the backlog
2195 			 * for now, we just drop the pdu
2196 			 */
2197 			for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2198 				if (tpd->iovec[slot].addr)
2199 					pci_unmap_single(he_dev->pci_dev,
2200 						tpd->iovec[slot].addr,
2201 						tpd->iovec[slot].len & TPD_LEN_MASK,
2202 								PCI_DMA_TODEVICE);
2203 			}
2204 			if (tpd->skb) {
2205 				if (tpd->vcc->pop)
2206 					tpd->vcc->pop(tpd->vcc, tpd->skb);
2207 				else
2208 					dev_kfree_skb_any(tpd->skb);
2209 				atomic_inc(&tpd->vcc->stats->tx_err);
2210 			}
2211 			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2212 			return;
2213 		}
2214 	}
2215 
2216 	/* 2.1.5 transmit packet descriptor ready queue */
2217 	list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2218 	he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2219 	he_dev->tpdrq_tail->cid = cid;
2220 	wmb();
2221 
2222 	he_dev->tpdrq_tail = new_tail;
2223 
2224 	he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2225 	(void) he_readl(he_dev, TPDRQ_T);		/* flush posted writes */
2226 }
2227 
2228 static int
2229 he_open(struct atm_vcc *vcc)
2230 {
2231 	unsigned long flags;
2232 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2233 	struct he_vcc *he_vcc;
2234 	int err = 0;
2235 	unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2236 	short vpi = vcc->vpi;
2237 	int vci = vcc->vci;
2238 
2239 	if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2240 		return 0;
2241 
2242 	HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2243 
2244 	set_bit(ATM_VF_ADDR, &vcc->flags);
2245 
2246 	cid = he_mkcid(he_dev, vpi, vci);
2247 
2248 	he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2249 	if (he_vcc == NULL) {
2250 		hprintk("unable to allocate he_vcc during open\n");
2251 		return -ENOMEM;
2252 	}
2253 
2254 	he_vcc->iov_tail = he_vcc->iov_head;
2255 	he_vcc->pdu_len = 0;
2256 	he_vcc->rc_index = -1;
2257 
2258 	init_waitqueue_head(&he_vcc->rx_waitq);
2259 	init_waitqueue_head(&he_vcc->tx_waitq);
2260 
2261 	vcc->dev_data = he_vcc;
2262 
2263 	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2264 		int pcr_goal;
2265 
2266 		pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2267 		if (pcr_goal == 0)
2268 			pcr_goal = he_dev->atm_dev->link_rate;
2269 		if (pcr_goal < 0)	/* means round down, technically */
2270 			pcr_goal = -pcr_goal;
2271 
2272 		HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2273 
2274 		switch (vcc->qos.aal) {
2275 			case ATM_AAL5:
2276 				tsr0_aal = TSR0_AAL5;
2277 				tsr4 = TSR4_AAL5;
2278 				break;
2279 			case ATM_AAL0:
2280 				tsr0_aal = TSR0_AAL0_SDU;
2281 				tsr4 = TSR4_AAL0_SDU;
2282 				break;
2283 			default:
2284 				err = -EINVAL;
2285 				goto open_failed;
2286 		}
2287 
2288 		spin_lock_irqsave(&he_dev->global_lock, flags);
2289 		tsr0 = he_readl_tsr0(he_dev, cid);
2290 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2291 
2292 		if (TSR0_CONN_STATE(tsr0) != 0) {
2293 			hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2294 			err = -EBUSY;
2295 			goto open_failed;
2296 		}
2297 
2298 		switch (vcc->qos.txtp.traffic_class) {
2299 			case ATM_UBR:
2300 				/* 2.3.3.1 open connection ubr */
2301 
2302 				tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2303 					TSR0_USE_WMIN | TSR0_UPDATE_GER;
2304 				break;
2305 
2306 			case ATM_CBR:
2307 				/* 2.3.3.2 open connection cbr */
2308 
2309 				/* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2310 				if ((he_dev->total_bw + pcr_goal)
2311 					> (he_dev->atm_dev->link_rate * 9 / 10))
2312 				{
2313 					err = -EBUSY;
2314 					goto open_failed;
2315 				}
2316 
2317 				spin_lock_irqsave(&he_dev->global_lock, flags);			/* also protects he_dev->cs_stper[] */
2318 
2319 				/* find an unused cs_stper register */
2320 				for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2321 					if (he_dev->cs_stper[reg].inuse == 0 ||
2322 					    he_dev->cs_stper[reg].pcr == pcr_goal)
2323 							break;
2324 
2325 				if (reg == HE_NUM_CS_STPER) {
2326 					err = -EBUSY;
2327 					spin_unlock_irqrestore(&he_dev->global_lock, flags);
2328 					goto open_failed;
2329 				}
2330 
2331 				he_dev->total_bw += pcr_goal;
2332 
2333 				he_vcc->rc_index = reg;
2334 				++he_dev->cs_stper[reg].inuse;
2335 				he_dev->cs_stper[reg].pcr = pcr_goal;
2336 
2337 				clock = he_is622(he_dev) ? 66667000 : 50000000;
2338 				period = clock / pcr_goal;
2339 
2340 				HPRINTK("rc_index = %d period = %d\n",
2341 								reg, period);
2342 
2343 				he_writel_mbox(he_dev, rate_to_atmf(period/2),
2344 							CS_STPER0 + reg);
2345 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2346 
2347 				tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2348 							TSR0_RC_INDEX(reg);
2349 
2350 				break;
2351 			default:
2352 				err = -EINVAL;
2353 				goto open_failed;
2354 		}
2355 
2356 		spin_lock_irqsave(&he_dev->global_lock, flags);
2357 
2358 		he_writel_tsr0(he_dev, tsr0, cid);
2359 		he_writel_tsr4(he_dev, tsr4 | 1, cid);
2360 		he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2361 					TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2362 		he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2363 		he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2364 
2365 		he_writel_tsr3(he_dev, 0x0, cid);
2366 		he_writel_tsr5(he_dev, 0x0, cid);
2367 		he_writel_tsr6(he_dev, 0x0, cid);
2368 		he_writel_tsr7(he_dev, 0x0, cid);
2369 		he_writel_tsr8(he_dev, 0x0, cid);
2370 		he_writel_tsr10(he_dev, 0x0, cid);
2371 		he_writel_tsr11(he_dev, 0x0, cid);
2372 		he_writel_tsr12(he_dev, 0x0, cid);
2373 		he_writel_tsr13(he_dev, 0x0, cid);
2374 		he_writel_tsr14(he_dev, 0x0, cid);
2375 		(void) he_readl_tsr0(he_dev, cid);		/* flush posted writes */
2376 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2377 	}
2378 
2379 	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2380 		unsigned aal;
2381 
2382 		HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2383 		 				&HE_VCC(vcc)->rx_waitq);
2384 
2385 		switch (vcc->qos.aal) {
2386 			case ATM_AAL5:
2387 				aal = RSR0_AAL5;
2388 				break;
2389 			case ATM_AAL0:
2390 				aal = RSR0_RAWCELL;
2391 				break;
2392 			default:
2393 				err = -EINVAL;
2394 				goto open_failed;
2395 		}
2396 
2397 		spin_lock_irqsave(&he_dev->global_lock, flags);
2398 
2399 		rsr0 = he_readl_rsr0(he_dev, cid);
2400 		if (rsr0 & RSR0_OPEN_CONN) {
2401 			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2402 
2403 			hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2404 			err = -EBUSY;
2405 			goto open_failed;
2406 		}
2407 
2408 		rsr1 = RSR1_GROUP(0);
2409 		rsr4 = RSR4_GROUP(0);
2410 		rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2411 				(RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2412 
2413 #ifdef USE_CHECKSUM_HW
2414 		if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2415 			rsr0 |= RSR0_TCP_CKSUM;
2416 #endif
2417 
2418 		he_writel_rsr4(he_dev, rsr4, cid);
2419 		he_writel_rsr1(he_dev, rsr1, cid);
2420 		/* 5.1.11 last parameter initialized should be
2421 			  the open/closed indication in rsr0 */
2422 		he_writel_rsr0(he_dev,
2423 			rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2424 		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2425 
2426 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2427 	}
2428 
2429 open_failed:
2430 
2431 	if (err) {
2432 		kfree(he_vcc);
2433 		clear_bit(ATM_VF_ADDR, &vcc->flags);
2434 	}
2435 	else
2436 		set_bit(ATM_VF_READY, &vcc->flags);
2437 
2438 	return err;
2439 }
2440 
2441 static void
2442 he_close(struct atm_vcc *vcc)
2443 {
2444 	unsigned long flags;
2445 	DECLARE_WAITQUEUE(wait, current);
2446 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2447 	struct he_tpd *tpd;
2448 	unsigned cid;
2449 	struct he_vcc *he_vcc = HE_VCC(vcc);
2450 #define MAX_RETRY 30
2451 	int retry = 0, sleep = 1, tx_inuse;
2452 
2453 	HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2454 
2455 	clear_bit(ATM_VF_READY, &vcc->flags);
2456 	cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2457 
2458 	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2459 		int timeout;
2460 
2461 		HPRINTK("close rx cid 0x%x\n", cid);
2462 
2463 		/* 2.7.2.2 close receive operation */
2464 
2465 		/* wait for previous close (if any) to finish */
2466 
2467 		spin_lock_irqsave(&he_dev->global_lock, flags);
2468 		while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2469 			HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2470 			udelay(250);
2471 		}
2472 
2473 		set_current_state(TASK_UNINTERRUPTIBLE);
2474 		add_wait_queue(&he_vcc->rx_waitq, &wait);
2475 
2476 		he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2477 		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2478 		he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2479 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2480 
2481 		timeout = schedule_timeout(30*HZ);
2482 
2483 		remove_wait_queue(&he_vcc->rx_waitq, &wait);
2484 		set_current_state(TASK_RUNNING);
2485 
2486 		if (timeout == 0)
2487 			hprintk("close rx timeout cid 0x%x\n", cid);
2488 
2489 		HPRINTK("close rx cid 0x%x complete\n", cid);
2490 
2491 	}
2492 
2493 	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2494 		volatile unsigned tsr4, tsr0;
2495 		int timeout;
2496 
2497 		HPRINTK("close tx cid 0x%x\n", cid);
2498 
2499 		/* 2.1.2
2500 		 *
2501 		 * ... the host must first stop queueing packets to the TPDRQ
2502 		 * on the connection to be closed, then wait for all outstanding
2503 		 * packets to be transmitted and their buffers returned to the
2504 		 * TBRQ. When the last packet on the connection arrives in the
2505 		 * TBRQ, the host issues the close command to the adapter.
2506 		 */
2507 
2508 		while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
2509 		       (retry < MAX_RETRY)) {
2510 			msleep(sleep);
2511 			if (sleep < 250)
2512 				sleep = sleep * 2;
2513 
2514 			++retry;
2515 		}
2516 
2517 		if (tx_inuse > 1)
2518 			hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2519 
2520 		/* 2.3.1.1 generic close operations with flush */
2521 
2522 		spin_lock_irqsave(&he_dev->global_lock, flags);
2523 		he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2524 					/* also clears TSR4_SESSION_ENDED */
2525 
2526 		switch (vcc->qos.txtp.traffic_class) {
2527 			case ATM_UBR:
2528 				he_writel_tsr1(he_dev,
2529 					TSR1_MCR(rate_to_atmf(200000))
2530 					| TSR1_PCR(0), cid);
2531 				break;
2532 			case ATM_CBR:
2533 				he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2534 				break;
2535 		}
2536 		(void) he_readl_tsr4(he_dev, cid);		/* flush posted writes */
2537 
2538 		tpd = __alloc_tpd(he_dev);
2539 		if (tpd == NULL) {
2540 			hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2541 			goto close_tx_incomplete;
2542 		}
2543 		tpd->status |= TPD_EOS | TPD_INT;
2544 		tpd->skb = NULL;
2545 		tpd->vcc = vcc;
2546 		wmb();
2547 
2548 		set_current_state(TASK_UNINTERRUPTIBLE);
2549 		add_wait_queue(&he_vcc->tx_waitq, &wait);
2550 		__enqueue_tpd(he_dev, tpd, cid);
2551 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2552 
2553 		timeout = schedule_timeout(30*HZ);
2554 
2555 		remove_wait_queue(&he_vcc->tx_waitq, &wait);
2556 		set_current_state(TASK_RUNNING);
2557 
2558 		spin_lock_irqsave(&he_dev->global_lock, flags);
2559 
2560 		if (timeout == 0) {
2561 			hprintk("close tx timeout cid 0x%x\n", cid);
2562 			goto close_tx_incomplete;
2563 		}
2564 
2565 		while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2566 			HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2567 			udelay(250);
2568 		}
2569 
2570 		while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2571 			HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2572 			udelay(250);
2573 		}
2574 
2575 close_tx_incomplete:
2576 
2577 		if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2578 			int reg = he_vcc->rc_index;
2579 
2580 			HPRINTK("cs_stper reg = %d\n", reg);
2581 
2582 			if (he_dev->cs_stper[reg].inuse == 0)
2583 				hprintk("cs_stper[%d].inuse = 0!\n", reg);
2584 			else
2585 				--he_dev->cs_stper[reg].inuse;
2586 
2587 			he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2588 		}
2589 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2590 
2591 		HPRINTK("close tx cid 0x%x complete\n", cid);
2592 	}
2593 
2594 	kfree(he_vcc);
2595 
2596 	clear_bit(ATM_VF_ADDR, &vcc->flags);
2597 }
2598 
2599 static int
2600 he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2601 {
2602 	unsigned long flags;
2603 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2604 	unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2605 	struct he_tpd *tpd;
2606 #ifdef USE_SCATTERGATHER
2607 	int i, slot = 0;
2608 #endif
2609 
2610 #define HE_TPD_BUFSIZE 0xffff
2611 
2612 	HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2613 
2614 	if ((skb->len > HE_TPD_BUFSIZE) ||
2615 	    ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2616 		hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2617 		if (vcc->pop)
2618 			vcc->pop(vcc, skb);
2619 		else
2620 			dev_kfree_skb_any(skb);
2621 		atomic_inc(&vcc->stats->tx_err);
2622 		return -EINVAL;
2623 	}
2624 
2625 #ifndef USE_SCATTERGATHER
2626 	if (skb_shinfo(skb)->nr_frags) {
2627 		hprintk("no scatter/gather support\n");
2628 		if (vcc->pop)
2629 			vcc->pop(vcc, skb);
2630 		else
2631 			dev_kfree_skb_any(skb);
2632 		atomic_inc(&vcc->stats->tx_err);
2633 		return -EINVAL;
2634 	}
2635 #endif
2636 	spin_lock_irqsave(&he_dev->global_lock, flags);
2637 
2638 	tpd = __alloc_tpd(he_dev);
2639 	if (tpd == NULL) {
2640 		if (vcc->pop)
2641 			vcc->pop(vcc, skb);
2642 		else
2643 			dev_kfree_skb_any(skb);
2644 		atomic_inc(&vcc->stats->tx_err);
2645 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2646 		return -ENOMEM;
2647 	}
2648 
2649 	if (vcc->qos.aal == ATM_AAL5)
2650 		tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2651 	else {
2652 		char *pti_clp = (void *) (skb->data + 3);
2653 		int clp, pti;
2654 
2655 		pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2656 		clp = (*pti_clp & ATM_HDR_CLP);
2657 		tpd->status |= TPD_CELLTYPE(pti);
2658 		if (clp)
2659 			tpd->status |= TPD_CLP;
2660 
2661 		skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2662 	}
2663 
2664 #ifdef USE_SCATTERGATHER
2665 	tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2666 				skb->len - skb->data_len, PCI_DMA_TODEVICE);
2667 	tpd->iovec[slot].len = skb->len - skb->data_len;
2668 	++slot;
2669 
2670 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2671 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2672 
2673 		if (slot == TPD_MAXIOV) {	/* queue tpd; start new tpd */
2674 			tpd->vcc = vcc;
2675 			tpd->skb = NULL;	/* not the last fragment
2676 						   so dont ->push() yet */
2677 			wmb();
2678 
2679 			__enqueue_tpd(he_dev, tpd, cid);
2680 			tpd = __alloc_tpd(he_dev);
2681 			if (tpd == NULL) {
2682 				if (vcc->pop)
2683 					vcc->pop(vcc, skb);
2684 				else
2685 					dev_kfree_skb_any(skb);
2686 				atomic_inc(&vcc->stats->tx_err);
2687 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2688 				return -ENOMEM;
2689 			}
2690 			tpd->status |= TPD_USERCELL;
2691 			slot = 0;
2692 		}
2693 
2694 		tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2695 			(void *) page_address(frag->page) + frag->page_offset,
2696 				frag->size, PCI_DMA_TODEVICE);
2697 		tpd->iovec[slot].len = frag->size;
2698 		++slot;
2699 
2700 	}
2701 
2702 	tpd->iovec[slot - 1].len |= TPD_LST;
2703 #else
2704 	tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2705 	tpd->length0 = skb->len | TPD_LST;
2706 #endif
2707 	tpd->status |= TPD_INT;
2708 
2709 	tpd->vcc = vcc;
2710 	tpd->skb = skb;
2711 	wmb();
2712 	ATM_SKB(skb)->vcc = vcc;
2713 
2714 	__enqueue_tpd(he_dev, tpd, cid);
2715 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2716 
2717 	atomic_inc(&vcc->stats->tx);
2718 
2719 	return 0;
2720 }
2721 
2722 static int
2723 he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2724 {
2725 	unsigned long flags;
2726 	struct he_dev *he_dev = HE_DEV(atm_dev);
2727 	struct he_ioctl_reg reg;
2728 	int err = 0;
2729 
2730 	switch (cmd) {
2731 		case HE_GET_REG:
2732 			if (!capable(CAP_NET_ADMIN))
2733 				return -EPERM;
2734 
2735 			if (copy_from_user(&reg, arg,
2736 					   sizeof(struct he_ioctl_reg)))
2737 				return -EFAULT;
2738 
2739 			spin_lock_irqsave(&he_dev->global_lock, flags);
2740 			switch (reg.type) {
2741 				case HE_REGTYPE_PCI:
2742 					if (reg.addr >= HE_REGMAP_SIZE) {
2743 						err = -EINVAL;
2744 						break;
2745 					}
2746 
2747 					reg.val = he_readl(he_dev, reg.addr);
2748 					break;
2749 				case HE_REGTYPE_RCM:
2750 					reg.val =
2751 						he_readl_rcm(he_dev, reg.addr);
2752 					break;
2753 				case HE_REGTYPE_TCM:
2754 					reg.val =
2755 						he_readl_tcm(he_dev, reg.addr);
2756 					break;
2757 				case HE_REGTYPE_MBOX:
2758 					reg.val =
2759 						he_readl_mbox(he_dev, reg.addr);
2760 					break;
2761 				default:
2762 					err = -EINVAL;
2763 					break;
2764 			}
2765 			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2766 			if (err == 0)
2767 				if (copy_to_user(arg, &reg,
2768 							sizeof(struct he_ioctl_reg)))
2769 					return -EFAULT;
2770 			break;
2771 		default:
2772 #ifdef CONFIG_ATM_HE_USE_SUNI
2773 			if (atm_dev->phy && atm_dev->phy->ioctl)
2774 				err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2775 #else /* CONFIG_ATM_HE_USE_SUNI */
2776 			err = -EINVAL;
2777 #endif /* CONFIG_ATM_HE_USE_SUNI */
2778 			break;
2779 	}
2780 
2781 	return err;
2782 }
2783 
2784 static void
2785 he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2786 {
2787 	unsigned long flags;
2788 	struct he_dev *he_dev = HE_DEV(atm_dev);
2789 
2790 	HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2791 
2792 	spin_lock_irqsave(&he_dev->global_lock, flags);
2793 	he_writel(he_dev, val, FRAMER + (addr*4));
2794 	(void) he_readl(he_dev, FRAMER + (addr*4));		/* flush posted writes */
2795 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2796 }
2797 
2798 
2799 static unsigned char
2800 he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2801 {
2802 	unsigned long flags;
2803 	struct he_dev *he_dev = HE_DEV(atm_dev);
2804 	unsigned reg;
2805 
2806 	spin_lock_irqsave(&he_dev->global_lock, flags);
2807 	reg = he_readl(he_dev, FRAMER + (addr*4));
2808 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2809 
2810 	HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2811 	return reg;
2812 }
2813 
2814 static int
2815 he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2816 {
2817 	unsigned long flags;
2818 	struct he_dev *he_dev = HE_DEV(dev);
2819 	int left, i;
2820 #ifdef notdef
2821 	struct he_rbrq *rbrq_tail;
2822 	struct he_tpdrq *tpdrq_head;
2823 	int rbpl_head, rbpl_tail;
2824 #endif
2825 	static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2826 
2827 
2828 	left = *pos;
2829 	if (!left--)
2830 		return sprintf(page, "ATM he driver\n");
2831 
2832 	if (!left--)
2833 		return sprintf(page, "%s%s\n\n",
2834 			he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2835 
2836 	if (!left--)
2837 		return sprintf(page, "Mismatched Cells  VPI/VCI Not Open  Dropped Cells  RCM Dropped Cells\n");
2838 
2839 	spin_lock_irqsave(&he_dev->global_lock, flags);
2840 	mcc += he_readl(he_dev, MCC);
2841 	oec += he_readl(he_dev, OEC);
2842 	dcc += he_readl(he_dev, DCC);
2843 	cec += he_readl(he_dev, CEC);
2844 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2845 
2846 	if (!left--)
2847 		return sprintf(page, "%16ld  %16ld  %13ld  %17ld\n\n",
2848 							mcc, oec, dcc, cec);
2849 
2850 	if (!left--)
2851 		return sprintf(page, "irq_size = %d  inuse = ?  peak = %d\n",
2852 				CONFIG_IRQ_SIZE, he_dev->irq_peak);
2853 
2854 	if (!left--)
2855 		return sprintf(page, "tpdrq_size = %d  inuse = ?\n",
2856 						CONFIG_TPDRQ_SIZE);
2857 
2858 	if (!left--)
2859 		return sprintf(page, "rbrq_size = %d  inuse = ?  peak = %d\n",
2860 				CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2861 
2862 	if (!left--)
2863 		return sprintf(page, "tbrq_size = %d  peak = %d\n",
2864 					CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2865 
2866 
2867 #ifdef notdef
2868 	rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2869 	rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2870 
2871 	inuse = rbpl_head - rbpl_tail;
2872 	if (inuse < 0)
2873 		inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2874 	inuse /= sizeof(struct he_rbp);
2875 
2876 	if (!left--)
2877 		return sprintf(page, "rbpl_size = %d  inuse = %d\n\n",
2878 						CONFIG_RBPL_SIZE, inuse);
2879 #endif
2880 
2881 	if (!left--)
2882 		return sprintf(page, "rate controller periods (cbr)\n                 pcr  #vc\n");
2883 
2884 	for (i = 0; i < HE_NUM_CS_STPER; ++i)
2885 		if (!left--)
2886 			return sprintf(page, "cs_stper%-2d  %8ld  %3d\n", i,
2887 						he_dev->cs_stper[i].pcr,
2888 						he_dev->cs_stper[i].inuse);
2889 
2890 	if (!left--)
2891 		return sprintf(page, "total bw (cbr): %d  (limit %d)\n",
2892 			he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2893 
2894 	return 0;
2895 }
2896 
2897 /* eeprom routines  -- see 4.7 */
2898 
2899 static u8 read_prom_byte(struct he_dev *he_dev, int addr)
2900 {
2901 	u32 val = 0, tmp_read = 0;
2902 	int i, j = 0;
2903 	u8 byte_read = 0;
2904 
2905 	val = readl(he_dev->membase + HOST_CNTL);
2906 	val &= 0xFFFFE0FF;
2907 
2908 	/* Turn on write enable */
2909 	val |= 0x800;
2910 	he_writel(he_dev, val, HOST_CNTL);
2911 
2912 	/* Send READ instruction */
2913 	for (i = 0; i < ARRAY_SIZE(readtab); i++) {
2914 		he_writel(he_dev, val | readtab[i], HOST_CNTL);
2915 		udelay(EEPROM_DELAY);
2916 	}
2917 
2918 	/* Next, we need to send the byte address to read from */
2919 	for (i = 7; i >= 0; i--) {
2920 		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2921 		udelay(EEPROM_DELAY);
2922 		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2923 		udelay(EEPROM_DELAY);
2924 	}
2925 
2926 	j = 0;
2927 
2928 	val &= 0xFFFFF7FF;      /* Turn off write enable */
2929 	he_writel(he_dev, val, HOST_CNTL);
2930 
2931 	/* Now, we can read data from the EEPROM by clocking it in */
2932 	for (i = 7; i >= 0; i--) {
2933 		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2934 		udelay(EEPROM_DELAY);
2935 		tmp_read = he_readl(he_dev, HOST_CNTL);
2936 		byte_read |= (unsigned char)
2937 			   ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
2938 		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2939 		udelay(EEPROM_DELAY);
2940 	}
2941 
2942 	he_writel(he_dev, val | ID_CS, HOST_CNTL);
2943 	udelay(EEPROM_DELAY);
2944 
2945 	return byte_read;
2946 }
2947 
2948 MODULE_LICENSE("GPL");
2949 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
2950 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
2951 module_param(disable64, bool, 0);
2952 MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
2953 module_param(nvpibits, short, 0);
2954 MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
2955 module_param(nvcibits, short, 0);
2956 MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
2957 module_param(rx_skb_reserve, short, 0);
2958 MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
2959 module_param(irq_coalesce, bool, 0);
2960 MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
2961 module_param(sdh, bool, 0);
2962 MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
2963 
2964 static struct pci_device_id he_pci_tbl[] = {
2965 	{ PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_HE, PCI_ANY_ID, PCI_ANY_ID,
2966 	  0, 0, 0 },
2967 	{ 0, }
2968 };
2969 
2970 MODULE_DEVICE_TABLE(pci, he_pci_tbl);
2971 
2972 static struct pci_driver he_driver = {
2973 	.name =		"he",
2974 	.probe =	he_init_one,
2975 	.remove =	__devexit_p(he_remove_one),
2976 	.id_table =	he_pci_tbl,
2977 };
2978 
2979 static int __init he_init(void)
2980 {
2981 	return pci_register_driver(&he_driver);
2982 }
2983 
2984 static void __exit he_cleanup(void)
2985 {
2986 	pci_unregister_driver(&he_driver);
2987 }
2988 
2989 module_init(he_init);
2990 module_exit(he_cleanup);
2991