xref: /linux/drivers/atm/he.c (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1 /*
2 
3   he.c
4 
5   ForeRunnerHE ATM Adapter driver for ATM on Linux
6   Copyright (C) 1999-2001  Naval Research Laboratory
7 
8   This library is free software; you can redistribute it and/or
9   modify it under the terms of the GNU Lesser General Public
10   License as published by the Free Software Foundation; either
11   version 2.1 of the License, or (at your option) any later version.
12 
13   This library is distributed in the hope that it will be useful,
14   but WITHOUT ANY WARRANTY; without even the implied warranty of
15   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16   Lesser General Public License for more details.
17 
18   You should have received a copy of the GNU Lesser General Public
19   License along with this library; if not, write to the Free Software
20   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21 
22 */
23 
24 /*
25 
26   he.c
27 
28   ForeRunnerHE ATM Adapter driver for ATM on Linux
29   Copyright (C) 1999-2001  Naval Research Laboratory
30 
31   Permission to use, copy, modify and distribute this software and its
32   documentation is hereby granted, provided that both the copyright
33   notice and this permission notice appear in all copies of the software,
34   derivative works or modified versions, and any portions thereof, and
35   that both notices appear in supporting documentation.
36 
37   NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
38   DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
39   RESULTING FROM THE USE OF THIS SOFTWARE.
40 
41   This driver was written using the "Programmer's Reference Manual for
42   ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
43 
44   AUTHORS:
45 	chas williams <chas@cmf.nrl.navy.mil>
46 	eric kinzie <ekinzie@cmf.nrl.navy.mil>
47 
48   NOTES:
49 	4096 supported 'connections'
50 	group 0 is used for all traffic
51 	interrupt queue 0 is used for all interrupts
52 	aal0 support (based on work from ulrich.u.muller@nokia.com)
53 
54  */
55 
56 #include <linux/module.h>
57 #include <linux/kernel.h>
58 #include <linux/skbuff.h>
59 #include <linux/pci.h>
60 #include <linux/errno.h>
61 #include <linux/types.h>
62 #include <linux/string.h>
63 #include <linux/delay.h>
64 #include <linux/init.h>
65 #include <linux/mm.h>
66 #include <linux/sched.h>
67 #include <linux/timer.h>
68 #include <linux/interrupt.h>
69 #include <linux/dma-mapping.h>
70 #include <linux/bitmap.h>
71 #include <linux/slab.h>
72 #include <asm/io.h>
73 #include <asm/byteorder.h>
74 #include <linux/uaccess.h>
75 
76 #include <linux/atmdev.h>
77 #include <linux/atm.h>
78 #include <linux/sonet.h>
79 
80 #undef USE_SCATTERGATHER
81 #undef USE_CHECKSUM_HW			/* still confused about this */
82 /* #undef HE_DEBUG */
83 
84 #include "he.h"
85 #include "suni.h"
86 #include <linux/atm_he.h>
87 
88 #define hprintk(fmt,args...)	printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
89 
90 #ifdef HE_DEBUG
91 #define HPRINTK(fmt,args...)	printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
92 #else /* !HE_DEBUG */
93 #define HPRINTK(fmt,args...)	do { } while (0)
94 #endif /* HE_DEBUG */
95 
96 /* declarations */
97 
98 static int he_open(struct atm_vcc *vcc);
99 static void he_close(struct atm_vcc *vcc);
100 static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
101 static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
102 static irqreturn_t he_irq_handler(int irq, void *dev_id);
103 static void he_tasklet(unsigned long data);
104 static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
105 static int he_start(struct atm_dev *dev);
106 static void he_stop(struct he_dev *dev);
107 static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
108 static unsigned char he_phy_get(struct atm_dev *, unsigned long);
109 
110 static u8 read_prom_byte(struct he_dev *he_dev, int addr);
111 
112 /* globals */
113 
114 static struct he_dev *he_devs;
115 static bool disable64;
116 static short nvpibits = -1;
117 static short nvcibits = -1;
118 static short rx_skb_reserve = 16;
119 static bool irq_coalesce = true;
120 static bool sdh;
121 
122 /* Read from EEPROM = 0000 0011b */
123 static unsigned int readtab[] = {
124 	CS_HIGH | CLK_HIGH,
125 	CS_LOW | CLK_LOW,
126 	CLK_HIGH,               /* 0 */
127 	CLK_LOW,
128 	CLK_HIGH,               /* 0 */
129 	CLK_LOW,
130 	CLK_HIGH,               /* 0 */
131 	CLK_LOW,
132 	CLK_HIGH,               /* 0 */
133 	CLK_LOW,
134 	CLK_HIGH,               /* 0 */
135 	CLK_LOW,
136 	CLK_HIGH,               /* 0 */
137 	CLK_LOW | SI_HIGH,
138 	CLK_HIGH | SI_HIGH,     /* 1 */
139 	CLK_LOW | SI_HIGH,
140 	CLK_HIGH | SI_HIGH      /* 1 */
141 };
142 
143 /* Clock to read from/write to the EEPROM */
144 static unsigned int clocktab[] = {
145 	CLK_LOW,
146 	CLK_HIGH,
147 	CLK_LOW,
148 	CLK_HIGH,
149 	CLK_LOW,
150 	CLK_HIGH,
151 	CLK_LOW,
152 	CLK_HIGH,
153 	CLK_LOW,
154 	CLK_HIGH,
155 	CLK_LOW,
156 	CLK_HIGH,
157 	CLK_LOW,
158 	CLK_HIGH,
159 	CLK_LOW,
160 	CLK_HIGH,
161 	CLK_LOW
162 };
163 
164 static const struct atmdev_ops he_ops =
165 {
166 	.open =		he_open,
167 	.close =	he_close,
168 	.ioctl =	he_ioctl,
169 	.send =		he_send,
170 	.phy_put =	he_phy_put,
171 	.phy_get =	he_phy_get,
172 	.proc_read =	he_proc_read,
173 	.owner =	THIS_MODULE
174 };
175 
176 #define he_writel(dev, val, reg)	do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
177 #define he_readl(dev, reg)		readl((dev)->membase + (reg))
178 
179 /* section 2.12 connection memory access */
180 
181 static __inline__ void
182 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
183 								unsigned flags)
184 {
185 	he_writel(he_dev, val, CON_DAT);
186 	(void) he_readl(he_dev, CON_DAT);		/* flush posted writes */
187 	he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
188 	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
189 }
190 
191 #define he_writel_rcm(dev, val, reg) 				\
192 			he_writel_internal(dev, val, reg, CON_CTL_RCM)
193 
194 #define he_writel_tcm(dev, val, reg) 				\
195 			he_writel_internal(dev, val, reg, CON_CTL_TCM)
196 
197 #define he_writel_mbox(dev, val, reg) 				\
198 			he_writel_internal(dev, val, reg, CON_CTL_MBOX)
199 
200 static unsigned
201 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
202 {
203 	he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
204 	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
205 	return he_readl(he_dev, CON_DAT);
206 }
207 
208 #define he_readl_rcm(dev, reg) \
209 			he_readl_internal(dev, reg, CON_CTL_RCM)
210 
211 #define he_readl_tcm(dev, reg) \
212 			he_readl_internal(dev, reg, CON_CTL_TCM)
213 
214 #define he_readl_mbox(dev, reg) \
215 			he_readl_internal(dev, reg, CON_CTL_MBOX)
216 
217 
218 /* figure 2.2 connection id */
219 
220 #define he_mkcid(dev, vpi, vci)		(((vpi << (dev)->vcibits) | vci) & 0x1fff)
221 
222 /* 2.5.1 per connection transmit state registers */
223 
224 #define he_writel_tsr0(dev, val, cid) \
225 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
226 #define he_readl_tsr0(dev, cid) \
227 		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
228 
229 #define he_writel_tsr1(dev, val, cid) \
230 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
231 
232 #define he_writel_tsr2(dev, val, cid) \
233 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
234 
235 #define he_writel_tsr3(dev, val, cid) \
236 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
237 
238 #define he_writel_tsr4(dev, val, cid) \
239 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
240 
241 	/* from page 2-20
242 	 *
243 	 * NOTE While the transmit connection is active, bits 23 through 0
244 	 *      of this register must not be written by the host.  Byte
245 	 *      enables should be used during normal operation when writing
246 	 *      the most significant byte.
247 	 */
248 
249 #define he_writel_tsr4_upper(dev, val, cid) \
250 		he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
251 							CON_CTL_TCM \
252 							| CON_BYTE_DISABLE_2 \
253 							| CON_BYTE_DISABLE_1 \
254 							| CON_BYTE_DISABLE_0)
255 
256 #define he_readl_tsr4(dev, cid) \
257 		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
258 
259 #define he_writel_tsr5(dev, val, cid) \
260 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
261 
262 #define he_writel_tsr6(dev, val, cid) \
263 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
264 
265 #define he_writel_tsr7(dev, val, cid) \
266 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
267 
268 
269 #define he_writel_tsr8(dev, val, cid) \
270 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
271 
272 #define he_writel_tsr9(dev, val, cid) \
273 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
274 
275 #define he_writel_tsr10(dev, val, cid) \
276 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
277 
278 #define he_writel_tsr11(dev, val, cid) \
279 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
280 
281 
282 #define he_writel_tsr12(dev, val, cid) \
283 		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
284 
285 #define he_writel_tsr13(dev, val, cid) \
286 		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
287 
288 
289 #define he_writel_tsr14(dev, val, cid) \
290 		he_writel_tcm(dev, val, CONFIG_TSRD | cid)
291 
292 #define he_writel_tsr14_upper(dev, val, cid) \
293 		he_writel_internal(dev, val, CONFIG_TSRD | cid, \
294 							CON_CTL_TCM \
295 							| CON_BYTE_DISABLE_2 \
296 							| CON_BYTE_DISABLE_1 \
297 							| CON_BYTE_DISABLE_0)
298 
299 /* 2.7.1 per connection receive state registers */
300 
301 #define he_writel_rsr0(dev, val, cid) \
302 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
303 #define he_readl_rsr0(dev, cid) \
304 		he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
305 
306 #define he_writel_rsr1(dev, val, cid) \
307 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
308 
309 #define he_writel_rsr2(dev, val, cid) \
310 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
311 
312 #define he_writel_rsr3(dev, val, cid) \
313 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
314 
315 #define he_writel_rsr4(dev, val, cid) \
316 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
317 
318 #define he_writel_rsr5(dev, val, cid) \
319 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
320 
321 #define he_writel_rsr6(dev, val, cid) \
322 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
323 
324 #define he_writel_rsr7(dev, val, cid) \
325 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
326 
327 static __inline__ struct atm_vcc*
328 __find_vcc(struct he_dev *he_dev, unsigned cid)
329 {
330 	struct hlist_head *head;
331 	struct atm_vcc *vcc;
332 	struct sock *s;
333 	short vpi;
334 	int vci;
335 
336 	vpi = cid >> he_dev->vcibits;
337 	vci = cid & ((1 << he_dev->vcibits) - 1);
338 	head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
339 
340 	sk_for_each(s, head) {
341 		vcc = atm_sk(s);
342 		if (vcc->dev == he_dev->atm_dev &&
343 		    vcc->vci == vci && vcc->vpi == vpi &&
344 		    vcc->qos.rxtp.traffic_class != ATM_NONE) {
345 				return vcc;
346 		}
347 	}
348 	return NULL;
349 }
350 
351 static int he_init_one(struct pci_dev *pci_dev,
352 		       const struct pci_device_id *pci_ent)
353 {
354 	struct atm_dev *atm_dev = NULL;
355 	struct he_dev *he_dev = NULL;
356 	int err = 0;
357 
358 	printk(KERN_INFO "ATM he driver\n");
359 
360 	if (pci_enable_device(pci_dev))
361 		return -EIO;
362 	if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)) != 0) {
363 		printk(KERN_WARNING "he: no suitable dma available\n");
364 		err = -EIO;
365 		goto init_one_failure;
366 	}
367 
368 	atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &he_ops, -1, NULL);
369 	if (!atm_dev) {
370 		err = -ENODEV;
371 		goto init_one_failure;
372 	}
373 	pci_set_drvdata(pci_dev, atm_dev);
374 
375 	he_dev = kzalloc_obj(struct he_dev, GFP_KERNEL);
376 	if (!he_dev) {
377 		err = -ENOMEM;
378 		goto init_one_failure;
379 	}
380 	he_dev->pci_dev = pci_dev;
381 	he_dev->atm_dev = atm_dev;
382 	he_dev->atm_dev->dev_data = he_dev;
383 	atm_dev->dev_data = he_dev;
384 	he_dev->number = atm_dev->number;
385 	tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
386 	spin_lock_init(&he_dev->global_lock);
387 
388 	if (he_start(atm_dev)) {
389 		he_stop(he_dev);
390 		err = -ENODEV;
391 		goto init_one_failure;
392 	}
393 	he_dev->next = NULL;
394 	if (he_devs)
395 		he_dev->next = he_devs;
396 	he_devs = he_dev;
397 	return 0;
398 
399 init_one_failure:
400 	if (atm_dev)
401 		atm_dev_deregister(atm_dev);
402 	kfree(he_dev);
403 	pci_disable_device(pci_dev);
404 	return err;
405 }
406 
407 static void he_remove_one(struct pci_dev *pci_dev)
408 {
409 	struct atm_dev *atm_dev;
410 	struct he_dev *he_dev;
411 
412 	atm_dev = pci_get_drvdata(pci_dev);
413 	he_dev = HE_DEV(atm_dev);
414 
415 	/* need to remove from he_devs */
416 
417 	he_stop(he_dev);
418 	atm_dev_deregister(atm_dev);
419 	kfree(he_dev);
420 
421 	pci_disable_device(pci_dev);
422 }
423 
424 
425 static unsigned
426 rate_to_atmf(unsigned rate)		/* cps to atm forum format */
427 {
428 #define NONZERO (1 << 14)
429 
430 	unsigned exp = 0;
431 
432 	if (rate == 0)
433 		return 0;
434 
435 	rate <<= 9;
436 	while (rate > 0x3ff) {
437 		++exp;
438 		rate >>= 1;
439 	}
440 
441 	return (NONZERO | (exp << 9) | (rate & 0x1ff));
442 }
443 
444 static void he_init_rx_lbfp0(struct he_dev *he_dev)
445 {
446 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
447 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
448 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
449 	unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
450 
451 	lbufd_index = 0;
452 	lbm_offset = he_readl(he_dev, RCMLBM_BA);
453 
454 	he_writel(he_dev, lbufd_index, RLBF0_H);
455 
456 	for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
457 		lbufd_index += 2;
458 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
459 
460 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
461 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
462 
463 		if (++lbuf_count == lbufs_per_row) {
464 			lbuf_count = 0;
465 			row_offset += he_dev->bytes_per_row;
466 		}
467 		lbm_offset += 4;
468 	}
469 
470 	he_writel(he_dev, lbufd_index - 2, RLBF0_T);
471 	he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
472 }
473 
474 static void he_init_rx_lbfp1(struct he_dev *he_dev)
475 {
476 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
477 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
478 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
479 	unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
480 
481 	lbufd_index = 1;
482 	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
483 
484 	he_writel(he_dev, lbufd_index, RLBF1_H);
485 
486 	for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
487 		lbufd_index += 2;
488 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
489 
490 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
491 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
492 
493 		if (++lbuf_count == lbufs_per_row) {
494 			lbuf_count = 0;
495 			row_offset += he_dev->bytes_per_row;
496 		}
497 		lbm_offset += 4;
498 	}
499 
500 	he_writel(he_dev, lbufd_index - 2, RLBF1_T);
501 	he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
502 }
503 
504 static void he_init_tx_lbfp(struct he_dev *he_dev)
505 {
506 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
507 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
508 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
509 	unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
510 
511 	lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
512 	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
513 
514 	he_writel(he_dev, lbufd_index, TLBF_H);
515 
516 	for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
517 		lbufd_index += 1;
518 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
519 
520 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
521 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
522 
523 		if (++lbuf_count == lbufs_per_row) {
524 			lbuf_count = 0;
525 			row_offset += he_dev->bytes_per_row;
526 		}
527 		lbm_offset += 2;
528 	}
529 
530 	he_writel(he_dev, lbufd_index - 1, TLBF_T);
531 }
532 
533 static int he_init_tpdrq(struct he_dev *he_dev)
534 {
535 	he_dev->tpdrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
536 						CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
537 						&he_dev->tpdrq_phys,
538 						GFP_KERNEL);
539 	if (he_dev->tpdrq_base == NULL) {
540 		hprintk("failed to alloc tpdrq\n");
541 		return -ENOMEM;
542 	}
543 
544 	he_dev->tpdrq_tail = he_dev->tpdrq_base;
545 	he_dev->tpdrq_head = he_dev->tpdrq_base;
546 
547 	he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
548 	he_writel(he_dev, 0, TPDRQ_T);
549 	he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
550 
551 	return 0;
552 }
553 
554 static void he_init_cs_block(struct he_dev *he_dev)
555 {
556 	unsigned clock, rate, delta;
557 	int reg;
558 
559 	/* 5.1.7 cs block initialization */
560 
561 	for (reg = 0; reg < 0x20; ++reg)
562 		he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
563 
564 	/* rate grid timer reload values */
565 
566 	clock = he_is622(he_dev) ? 66667000 : 50000000;
567 	rate = he_dev->atm_dev->link_rate;
568 	delta = rate / 16 / 2;
569 
570 	for (reg = 0; reg < 0x10; ++reg) {
571 		/* 2.4 internal transmit function
572 		 *
573 	 	 * we initialize the first row in the rate grid.
574 		 * values are period (in clock cycles) of timer
575 		 */
576 		unsigned period = clock / rate;
577 
578 		he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
579 		rate -= delta;
580 	}
581 
582 	if (he_is622(he_dev)) {
583 		/* table 5.2 (4 cells per lbuf) */
584 		he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
585 		he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
586 		he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
587 		he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
588 		he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
589 
590 		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
591 		he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
592 		he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
593 		he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
594 		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
595 		he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
596 		he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
597 
598 		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
599 
600 		/* table 5.8 */
601 		he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
602 		he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
603 		he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
604 		he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
605 		he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
606 		he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
607 
608 		/* table 5.9 */
609 		he_writel_mbox(he_dev, 0x5, CS_OTPPER);
610 		he_writel_mbox(he_dev, 0x14, CS_OTWPER);
611 	} else {
612 		/* table 5.1 (4 cells per lbuf) */
613 		he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
614 		he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
615 		he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
616 		he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
617 		he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
618 
619 		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
620 		he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
621 		he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
622 		he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
623 		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
624 		he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
625 		he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
626 
627 		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
628 
629 		/* table 5.8 */
630 		he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
631 		he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
632 		he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
633 		he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
634 		he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
635 		he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
636 
637 		/* table 5.9 */
638 		he_writel_mbox(he_dev, 0x6, CS_OTPPER);
639 		he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
640 	}
641 
642 	he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
643 
644 	for (reg = 0; reg < 0x8; ++reg)
645 		he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
646 
647 }
648 
649 static int he_init_cs_block_rcm(struct he_dev *he_dev)
650 {
651 	unsigned (*rategrid)[16][16];
652 	unsigned rate, delta;
653 	int i, j, reg;
654 
655 	unsigned rate_atmf, exp, man;
656 	unsigned long long rate_cps;
657 	int mult, buf, buf_limit = 4;
658 
659 	rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
660 	if (!rategrid)
661 		return -ENOMEM;
662 
663 	/* initialize rate grid group table */
664 
665 	for (reg = 0x0; reg < 0xff; ++reg)
666 		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
667 
668 	/* initialize rate controller groups */
669 
670 	for (reg = 0x100; reg < 0x1ff; ++reg)
671 		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
672 
673 	/* initialize tNrm lookup table */
674 
675 	/* the manual makes reference to a routine in a sample driver
676 	   for proper configuration; fortunately, we only need this
677 	   in order to support abr connection */
678 
679 	/* initialize rate to group table */
680 
681 	rate = he_dev->atm_dev->link_rate;
682 	delta = rate / 32;
683 
684 	/*
685 	 * 2.4 transmit internal functions
686 	 *
687 	 * we construct a copy of the rate grid used by the scheduler
688 	 * in order to construct the rate to group table below
689 	 */
690 
691 	for (j = 0; j < 16; j++) {
692 		(*rategrid)[0][j] = rate;
693 		rate -= delta;
694 	}
695 
696 	for (i = 1; i < 16; i++)
697 		for (j = 0; j < 16; j++)
698 			if (i > 14)
699 				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
700 			else
701 				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
702 
703 	/*
704 	 * 2.4 transmit internal function
705 	 *
706 	 * this table maps the upper 5 bits of exponent and mantissa
707 	 * of the atm forum representation of the rate into an index
708 	 * on rate grid
709 	 */
710 
711 	rate_atmf = 0;
712 	while (rate_atmf < 0x400) {
713 		man = (rate_atmf & 0x1f) << 4;
714 		exp = rate_atmf >> 5;
715 
716 		/*
717 			instead of '/ 512', use '>> 9' to prevent a call
718 			to divdu3 on x86 platforms
719 		*/
720 		rate_cps = (unsigned long long) (1UL << exp) * (man + 512) >> 9;
721 
722 		if (rate_cps < 10)
723 			rate_cps = 10;	/* 2.2.1 minimum payload rate is 10 cps */
724 
725 		for (i = 255; i > 0; i--)
726 			if ((*rategrid)[i/16][i%16] >= rate_cps)
727 				break;	 /* pick nearest rate instead? */
728 
729 		/*
730 		 * each table entry is 16 bits: (rate grid index (8 bits)
731 		 * and a buffer limit (8 bits)
732 		 * there are two table entries in each 32-bit register
733 		 */
734 
735 #ifdef notdef
736 		buf = rate_cps * he_dev->tx_numbuffs /
737 				(he_dev->atm_dev->link_rate * 2);
738 #else
739 		/* this is pretty, but avoids _divdu3 and is mostly correct */
740 		mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
741 		if (rate_cps > (272ULL * mult))
742 			buf = 4;
743 		else if (rate_cps > (204ULL * mult))
744 			buf = 3;
745 		else if (rate_cps > (136ULL * mult))
746 			buf = 2;
747 		else if (rate_cps > (68ULL * mult))
748 			buf = 1;
749 		else
750 			buf = 0;
751 #endif
752 		if (buf > buf_limit)
753 			buf = buf_limit;
754 		reg = (reg << 16) | ((i << 8) | buf);
755 
756 #define RTGTBL_OFFSET 0x400
757 
758 		if (rate_atmf & 0x1)
759 			he_writel_rcm(he_dev, reg,
760 				CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
761 
762 		++rate_atmf;
763 	}
764 
765 	kfree(rategrid);
766 	return 0;
767 }
768 
769 static int he_init_group(struct he_dev *he_dev, int group)
770 {
771 	struct he_buff *heb, *next;
772 	dma_addr_t mapping;
773 	int i;
774 
775 	he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
776 	he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
777 	he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
778 	he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
779 		  G0_RBPS_BS + (group * 32));
780 
781 	/* bitmap table */
782 	he_dev->rbpl_table = bitmap_zalloc(RBPL_TABLE_SIZE, GFP_KERNEL);
783 	if (!he_dev->rbpl_table) {
784 		hprintk("unable to allocate rbpl bitmap table\n");
785 		return -ENOMEM;
786 	}
787 
788 	/* rbpl_virt 64-bit pointers */
789 	he_dev->rbpl_virt = kmalloc_objs(*he_dev->rbpl_virt, RBPL_TABLE_SIZE,
790 					 GFP_KERNEL);
791 	if (!he_dev->rbpl_virt) {
792 		hprintk("unable to allocate rbpl virt table\n");
793 		goto out_free_rbpl_table;
794 	}
795 
796 	/* large buffer pool */
797 	he_dev->rbpl_pool = dma_pool_create("rbpl", &he_dev->pci_dev->dev,
798 					    CONFIG_RBPL_BUFSIZE, 64, 0);
799 	if (he_dev->rbpl_pool == NULL) {
800 		hprintk("unable to create rbpl pool\n");
801 		goto out_free_rbpl_virt;
802 	}
803 
804 	he_dev->rbpl_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
805 					       CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
806 					       &he_dev->rbpl_phys, GFP_KERNEL);
807 	if (he_dev->rbpl_base == NULL) {
808 		hprintk("failed to alloc rbpl_base\n");
809 		goto out_destroy_rbpl_pool;
810 	}
811 
812 	INIT_LIST_HEAD(&he_dev->rbpl_outstanding);
813 
814 	for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
815 
816 		heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL, &mapping);
817 		if (!heb)
818 			goto out_free_rbpl;
819 		heb->mapping = mapping;
820 		list_add(&heb->entry, &he_dev->rbpl_outstanding);
821 
822 		set_bit(i, he_dev->rbpl_table);
823 		he_dev->rbpl_virt[i] = heb;
824 		he_dev->rbpl_hint = i + 1;
825 		he_dev->rbpl_base[i].idx =  i << RBP_IDX_OFFSET;
826 		he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data);
827 	}
828 	he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
829 
830 	he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
831 	he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
832 						G0_RBPL_T + (group * 32));
833 	he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4,
834 						G0_RBPL_BS + (group * 32));
835 	he_writel(he_dev,
836 			RBP_THRESH(CONFIG_RBPL_THRESH) |
837 			RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
838 			RBP_INT_ENB,
839 						G0_RBPL_QI + (group * 32));
840 
841 	/* rx buffer ready queue */
842 
843 	he_dev->rbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
844 					       CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
845 					       &he_dev->rbrq_phys, GFP_KERNEL);
846 	if (he_dev->rbrq_base == NULL) {
847 		hprintk("failed to allocate rbrq\n");
848 		goto out_free_rbpl;
849 	}
850 
851 	he_dev->rbrq_head = he_dev->rbrq_base;
852 	he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
853 	he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
854 	he_writel(he_dev,
855 		RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
856 						G0_RBRQ_Q + (group * 16));
857 	if (irq_coalesce) {
858 		hprintk("coalescing interrupts\n");
859 		he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
860 						G0_RBRQ_I + (group * 16));
861 	} else
862 		he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
863 						G0_RBRQ_I + (group * 16));
864 
865 	/* tx buffer ready queue */
866 
867 	he_dev->tbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
868 					       CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
869 					       &he_dev->tbrq_phys, GFP_KERNEL);
870 	if (he_dev->tbrq_base == NULL) {
871 		hprintk("failed to allocate tbrq\n");
872 		goto out_free_rbpq_base;
873 	}
874 
875 	he_dev->tbrq_head = he_dev->tbrq_base;
876 
877 	he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
878 	he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
879 	he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
880 	he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
881 
882 	return 0;
883 
884 out_free_rbpq_base:
885 	dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE *
886 			  sizeof(struct he_rbrq), he_dev->rbrq_base,
887 			  he_dev->rbrq_phys);
888 out_free_rbpl:
889 	list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
890 		dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
891 
892 	dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE *
893 			  sizeof(struct he_rbp), he_dev->rbpl_base,
894 			  he_dev->rbpl_phys);
895 out_destroy_rbpl_pool:
896 	dma_pool_destroy(he_dev->rbpl_pool);
897 out_free_rbpl_virt:
898 	kfree(he_dev->rbpl_virt);
899 out_free_rbpl_table:
900 	bitmap_free(he_dev->rbpl_table);
901 
902 	return -ENOMEM;
903 }
904 
905 static int he_init_irq(struct he_dev *he_dev)
906 {
907 	int i;
908 
909 	/* 2.9.3.5  tail offset for each interrupt queue is located after the
910 		    end of the interrupt queue */
911 
912 	he_dev->irq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
913 					      (CONFIG_IRQ_SIZE + 1) * sizeof(struct he_irq),
914 					      &he_dev->irq_phys, GFP_KERNEL);
915 	if (he_dev->irq_base == NULL) {
916 		hprintk("failed to allocate irq\n");
917 		return -ENOMEM;
918 	}
919 	he_dev->irq_tailoffset = (unsigned *)
920 					&he_dev->irq_base[CONFIG_IRQ_SIZE];
921 	*he_dev->irq_tailoffset = 0;
922 	he_dev->irq_head = he_dev->irq_base;
923 	he_dev->irq_tail = he_dev->irq_base;
924 
925 	for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
926 		he_dev->irq_base[i].isw = ITYPE_INVALID;
927 
928 	he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
929 	he_writel(he_dev,
930 		IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
931 								IRQ0_HEAD);
932 	he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
933 	he_writel(he_dev, 0x0, IRQ0_DATA);
934 
935 	he_writel(he_dev, 0x0, IRQ1_BASE);
936 	he_writel(he_dev, 0x0, IRQ1_HEAD);
937 	he_writel(he_dev, 0x0, IRQ1_CNTL);
938 	he_writel(he_dev, 0x0, IRQ1_DATA);
939 
940 	he_writel(he_dev, 0x0, IRQ2_BASE);
941 	he_writel(he_dev, 0x0, IRQ2_HEAD);
942 	he_writel(he_dev, 0x0, IRQ2_CNTL);
943 	he_writel(he_dev, 0x0, IRQ2_DATA);
944 
945 	he_writel(he_dev, 0x0, IRQ3_BASE);
946 	he_writel(he_dev, 0x0, IRQ3_HEAD);
947 	he_writel(he_dev, 0x0, IRQ3_CNTL);
948 	he_writel(he_dev, 0x0, IRQ3_DATA);
949 
950 	/* 2.9.3.2 interrupt queue mapping registers */
951 
952 	he_writel(he_dev, 0x0, GRP_10_MAP);
953 	he_writel(he_dev, 0x0, GRP_32_MAP);
954 	he_writel(he_dev, 0x0, GRP_54_MAP);
955 	he_writel(he_dev, 0x0, GRP_76_MAP);
956 
957 	if (request_irq(he_dev->pci_dev->irq,
958 			he_irq_handler, IRQF_SHARED, DEV_LABEL, he_dev)) {
959 		hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
960 		return -EINVAL;
961 	}
962 
963 	he_dev->irq = he_dev->pci_dev->irq;
964 
965 	return 0;
966 }
967 
968 static int he_start(struct atm_dev *dev)
969 {
970 	struct he_dev *he_dev;
971 	struct pci_dev *pci_dev;
972 	unsigned long membase;
973 
974 	u16 command;
975 	u32 gen_cntl_0, host_cntl, lb_swap;
976 	u8 cache_size, timer;
977 
978 	unsigned err;
979 	unsigned int status, reg;
980 	int i, group;
981 
982 	he_dev = HE_DEV(dev);
983 	pci_dev = he_dev->pci_dev;
984 
985 	membase = pci_resource_start(pci_dev, 0);
986 	HPRINTK("membase = 0x%lx  irq = %d.\n", membase, pci_dev->irq);
987 
988 	/*
989 	 * pci bus controller initialization
990 	 */
991 
992 	/* 4.3 pci bus controller-specific initialization */
993 	if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
994 		hprintk("can't read GEN_CNTL_0\n");
995 		return -EINVAL;
996 	}
997 	gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
998 	if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
999 		hprintk("can't write GEN_CNTL_0.\n");
1000 		return -EINVAL;
1001 	}
1002 
1003 	if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1004 		hprintk("can't read PCI_COMMAND.\n");
1005 		return -EINVAL;
1006 	}
1007 
1008 	command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1009 	if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1010 		hprintk("can't enable memory.\n");
1011 		return -EINVAL;
1012 	}
1013 
1014 	if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1015 		hprintk("can't read cache line size?\n");
1016 		return -EINVAL;
1017 	}
1018 
1019 	if (cache_size < 16) {
1020 		cache_size = 16;
1021 		if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1022 			hprintk("can't set cache line size to %d\n", cache_size);
1023 	}
1024 
1025 	if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1026 		hprintk("can't read latency timer?\n");
1027 		return -EINVAL;
1028 	}
1029 
1030 	/* from table 3.9
1031 	 *
1032 	 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1033 	 *
1034 	 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1035 	 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1036 	 *
1037 	 */
1038 #define LAT_TIMER 209
1039 	if (timer < LAT_TIMER) {
1040 		HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1041 		timer = LAT_TIMER;
1042 		if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1043 			hprintk("can't set latency timer to %d\n", timer);
1044 	}
1045 
1046 	if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1047 		hprintk("can't set up page mapping\n");
1048 		return -EINVAL;
1049 	}
1050 
1051 	/* 4.4 card reset */
1052 	he_writel(he_dev, 0x0, RESET_CNTL);
1053 	he_writel(he_dev, 0xff, RESET_CNTL);
1054 
1055 	msleep(16);	/* 16 ms */
1056 	status = he_readl(he_dev, RESET_CNTL);
1057 	if ((status & BOARD_RST_STATUS) == 0) {
1058 		hprintk("reset failed\n");
1059 		return -EINVAL;
1060 	}
1061 
1062 	/* 4.5 set bus width */
1063 	host_cntl = he_readl(he_dev, HOST_CNTL);
1064 	if (host_cntl & PCI_BUS_SIZE64)
1065 		gen_cntl_0 |= ENBL_64;
1066 	else
1067 		gen_cntl_0 &= ~ENBL_64;
1068 
1069 	if (disable64 == 1) {
1070 		hprintk("disabling 64-bit pci bus transfers\n");
1071 		gen_cntl_0 &= ~ENBL_64;
1072 	}
1073 
1074 	if (gen_cntl_0 & ENBL_64)
1075 		hprintk("64-bit transfers enabled\n");
1076 
1077 	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1078 
1079 	/* 4.7 read prom contents */
1080 	for (i = 0; i < PROD_ID_LEN; ++i)
1081 		he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1082 
1083 	he_dev->media = read_prom_byte(he_dev, MEDIA);
1084 
1085 	for (i = 0; i < 6; ++i)
1086 		dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1087 
1088 	hprintk("%s%s, %pM\n", he_dev->prod_id,
1089 		he_dev->media & 0x40 ? "SM" : "MM", dev->esi);
1090 	he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1091 						ATM_OC12_PCR : ATM_OC3_PCR;
1092 
1093 	/* 4.6 set host endianess */
1094 	lb_swap = he_readl(he_dev, LB_SWAP);
1095 	if (he_is622(he_dev))
1096 		lb_swap &= ~XFER_SIZE;		/* 4 cells */
1097 	else
1098 		lb_swap |= XFER_SIZE;		/* 8 cells */
1099 #ifdef __BIG_ENDIAN
1100 	lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1101 #else
1102 	lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1103 			DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1104 #endif /* __BIG_ENDIAN */
1105 	he_writel(he_dev, lb_swap, LB_SWAP);
1106 
1107 	/* 4.8 sdram controller initialization */
1108 	he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1109 
1110 	/* 4.9 initialize rnum value */
1111 	lb_swap |= SWAP_RNUM_MAX(0xf);
1112 	he_writel(he_dev, lb_swap, LB_SWAP);
1113 
1114 	/* 4.10 initialize the interrupt queues */
1115 	if ((err = he_init_irq(he_dev)) != 0)
1116 		return err;
1117 
1118 	/* 4.11 enable pci bus controller state machines */
1119 	host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1120 				QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1121 	he_writel(he_dev, host_cntl, HOST_CNTL);
1122 
1123 	gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1124 	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1125 
1126 	/*
1127 	 * atm network controller initialization
1128 	 */
1129 
1130 	/* 5.1.1 generic configuration state */
1131 
1132 	/*
1133 	 *		local (cell) buffer memory map
1134 	 *
1135 	 *             HE155                          HE622
1136 	 *
1137 	 *        0 ____________1023 bytes  0 _______________________2047 bytes
1138 	 *         |            |            |                   |   |
1139 	 *         |  utility   |            |        rx0        |   |
1140 	 *        5|____________|         255|___________________| u |
1141 	 *        6|            |         256|                   | t |
1142 	 *         |            |            |                   | i |
1143 	 *         |    rx0     |     row    |        tx         | l |
1144 	 *         |            |            |                   | i |
1145 	 *         |            |         767|___________________| t |
1146 	 *      517|____________|         768|                   | y |
1147 	 * row  518|            |            |        rx1        |   |
1148 	 *         |            |        1023|___________________|___|
1149 	 *         |            |
1150 	 *         |    tx      |
1151 	 *         |            |
1152 	 *         |            |
1153 	 *     1535|____________|
1154 	 *     1536|            |
1155 	 *         |    rx1     |
1156 	 *     2047|____________|
1157 	 *
1158 	 */
1159 
1160 	/* total 4096 connections */
1161 	he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1162 	he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1163 
1164 	if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1165 		hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1166 		return -ENODEV;
1167 	}
1168 
1169 	if (nvpibits != -1) {
1170 		he_dev->vpibits = nvpibits;
1171 		he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1172 	}
1173 
1174 	if (nvcibits != -1) {
1175 		he_dev->vcibits = nvcibits;
1176 		he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1177 	}
1178 
1179 
1180 	if (he_is622(he_dev)) {
1181 		he_dev->cells_per_row = 40;
1182 		he_dev->bytes_per_row = 2048;
1183 		he_dev->r0_numrows = 256;
1184 		he_dev->tx_numrows = 512;
1185 		he_dev->r1_numrows = 256;
1186 		he_dev->r0_startrow = 0;
1187 		he_dev->tx_startrow = 256;
1188 		he_dev->r1_startrow = 768;
1189 	} else {
1190 		he_dev->cells_per_row = 20;
1191 		he_dev->bytes_per_row = 1024;
1192 		he_dev->r0_numrows = 512;
1193 		he_dev->tx_numrows = 1018;
1194 		he_dev->r1_numrows = 512;
1195 		he_dev->r0_startrow = 6;
1196 		he_dev->tx_startrow = 518;
1197 		he_dev->r1_startrow = 1536;
1198 	}
1199 
1200 	he_dev->cells_per_lbuf = 4;
1201 	he_dev->buffer_limit = 4;
1202 	he_dev->r0_numbuffs = he_dev->r0_numrows *
1203 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1204 	if (he_dev->r0_numbuffs > 2560)
1205 		he_dev->r0_numbuffs = 2560;
1206 
1207 	he_dev->r1_numbuffs = he_dev->r1_numrows *
1208 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1209 	if (he_dev->r1_numbuffs > 2560)
1210 		he_dev->r1_numbuffs = 2560;
1211 
1212 	he_dev->tx_numbuffs = he_dev->tx_numrows *
1213 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1214 	if (he_dev->tx_numbuffs > 5120)
1215 		he_dev->tx_numbuffs = 5120;
1216 
1217 	/* 5.1.2 configure hardware dependent registers */
1218 
1219 	he_writel(he_dev,
1220 		SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1221 		RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1222 		(he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1223 		(he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1224 								LBARB);
1225 
1226 	he_writel(he_dev, BANK_ON |
1227 		(he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1228 								SDRAMCON);
1229 
1230 	he_writel(he_dev,
1231 		(he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1232 						RM_RW_WAIT(1), RCMCONFIG);
1233 	he_writel(he_dev,
1234 		(he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1235 						TM_RW_WAIT(1), TCMCONFIG);
1236 
1237 	he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1238 
1239 	he_writel(he_dev,
1240 		(he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1241 		(he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1242 		RX_VALVP(he_dev->vpibits) |
1243 		RX_VALVC(he_dev->vcibits),			 RC_CONFIG);
1244 
1245 	he_writel(he_dev, DRF_THRESH(0x20) |
1246 		(he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1247 		TX_VCI_MASK(he_dev->vcibits) |
1248 		LBFREE_CNT(he_dev->tx_numbuffs), 		TX_CONFIG);
1249 
1250 	he_writel(he_dev, 0x0, TXAAL5_PROTO);
1251 
1252 	he_writel(he_dev, PHY_INT_ENB |
1253 		(he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1254 								RH_CONFIG);
1255 
1256 	/* 5.1.3 initialize connection memory */
1257 
1258 	for (i = 0; i < TCM_MEM_SIZE; ++i)
1259 		he_writel_tcm(he_dev, 0, i);
1260 
1261 	for (i = 0; i < RCM_MEM_SIZE; ++i)
1262 		he_writel_rcm(he_dev, 0, i);
1263 
1264 	/*
1265 	 *	transmit connection memory map
1266 	 *
1267 	 *                  tx memory
1268 	 *          0x0 ___________________
1269 	 *             |                   |
1270 	 *             |                   |
1271 	 *             |       TSRa        |
1272 	 *             |                   |
1273 	 *             |                   |
1274 	 *       0x8000|___________________|
1275 	 *             |                   |
1276 	 *             |       TSRb        |
1277 	 *       0xc000|___________________|
1278 	 *             |                   |
1279 	 *             |       TSRc        |
1280 	 *       0xe000|___________________|
1281 	 *             |       TSRd        |
1282 	 *       0xf000|___________________|
1283 	 *             |       tmABR       |
1284 	 *      0x10000|___________________|
1285 	 *             |                   |
1286 	 *             |       tmTPD       |
1287 	 *             |___________________|
1288 	 *             |                   |
1289 	 *                      ....
1290 	 *      0x1ffff|___________________|
1291 	 *
1292 	 *
1293 	 */
1294 
1295 	he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1296 	he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1297 	he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1298 	he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1299 	he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1300 
1301 
1302 	/*
1303 	 *	receive connection memory map
1304 	 *
1305 	 *          0x0 ___________________
1306 	 *             |                   |
1307 	 *             |                   |
1308 	 *             |       RSRa        |
1309 	 *             |                   |
1310 	 *             |                   |
1311 	 *       0x8000|___________________|
1312 	 *             |                   |
1313 	 *             |             rx0/1 |
1314 	 *             |       LBM         |   link lists of local
1315 	 *             |             tx    |   buffer memory
1316 	 *             |                   |
1317 	 *       0xd000|___________________|
1318 	 *             |                   |
1319 	 *             |      rmABR        |
1320 	 *       0xe000|___________________|
1321 	 *             |                   |
1322 	 *             |       RSRb        |
1323 	 *             |___________________|
1324 	 *             |                   |
1325 	 *                      ....
1326 	 *       0xffff|___________________|
1327 	 */
1328 
1329 	he_writel(he_dev, 0x08000, RCMLBM_BA);
1330 	he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1331 	he_writel(he_dev, 0x0d800, RCMABR_BA);
1332 
1333 	/* 5.1.4 initialize local buffer free pools linked lists */
1334 
1335 	he_init_rx_lbfp0(he_dev);
1336 	he_init_rx_lbfp1(he_dev);
1337 
1338 	he_writel(he_dev, 0x0, RLBC_H);
1339 	he_writel(he_dev, 0x0, RLBC_T);
1340 	he_writel(he_dev, 0x0, RLBC_H2);
1341 
1342 	he_writel(he_dev, 512, RXTHRSH);	/* 10% of r0+r1 buffers */
1343 	he_writel(he_dev, 256, LITHRSH); 	/* 5% of r0+r1 buffers */
1344 
1345 	he_init_tx_lbfp(he_dev);
1346 
1347 	he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1348 
1349 	/* 5.1.5 initialize intermediate receive queues */
1350 
1351 	if (he_is622(he_dev)) {
1352 		he_writel(he_dev, 0x000f, G0_INMQ_S);
1353 		he_writel(he_dev, 0x200f, G0_INMQ_L);
1354 
1355 		he_writel(he_dev, 0x001f, G1_INMQ_S);
1356 		he_writel(he_dev, 0x201f, G1_INMQ_L);
1357 
1358 		he_writel(he_dev, 0x002f, G2_INMQ_S);
1359 		he_writel(he_dev, 0x202f, G2_INMQ_L);
1360 
1361 		he_writel(he_dev, 0x003f, G3_INMQ_S);
1362 		he_writel(he_dev, 0x203f, G3_INMQ_L);
1363 
1364 		he_writel(he_dev, 0x004f, G4_INMQ_S);
1365 		he_writel(he_dev, 0x204f, G4_INMQ_L);
1366 
1367 		he_writel(he_dev, 0x005f, G5_INMQ_S);
1368 		he_writel(he_dev, 0x205f, G5_INMQ_L);
1369 
1370 		he_writel(he_dev, 0x006f, G6_INMQ_S);
1371 		he_writel(he_dev, 0x206f, G6_INMQ_L);
1372 
1373 		he_writel(he_dev, 0x007f, G7_INMQ_S);
1374 		he_writel(he_dev, 0x207f, G7_INMQ_L);
1375 	} else {
1376 		he_writel(he_dev, 0x0000, G0_INMQ_S);
1377 		he_writel(he_dev, 0x0008, G0_INMQ_L);
1378 
1379 		he_writel(he_dev, 0x0001, G1_INMQ_S);
1380 		he_writel(he_dev, 0x0009, G1_INMQ_L);
1381 
1382 		he_writel(he_dev, 0x0002, G2_INMQ_S);
1383 		he_writel(he_dev, 0x000a, G2_INMQ_L);
1384 
1385 		he_writel(he_dev, 0x0003, G3_INMQ_S);
1386 		he_writel(he_dev, 0x000b, G3_INMQ_L);
1387 
1388 		he_writel(he_dev, 0x0004, G4_INMQ_S);
1389 		he_writel(he_dev, 0x000c, G4_INMQ_L);
1390 
1391 		he_writel(he_dev, 0x0005, G5_INMQ_S);
1392 		he_writel(he_dev, 0x000d, G5_INMQ_L);
1393 
1394 		he_writel(he_dev, 0x0006, G6_INMQ_S);
1395 		he_writel(he_dev, 0x000e, G6_INMQ_L);
1396 
1397 		he_writel(he_dev, 0x0007, G7_INMQ_S);
1398 		he_writel(he_dev, 0x000f, G7_INMQ_L);
1399 	}
1400 
1401 	/* 5.1.6 application tunable parameters */
1402 
1403 	he_writel(he_dev, 0x0, MCC);
1404 	he_writel(he_dev, 0x0, OEC);
1405 	he_writel(he_dev, 0x0, DCC);
1406 	he_writel(he_dev, 0x0, CEC);
1407 
1408 	/* 5.1.7 cs block initialization */
1409 
1410 	he_init_cs_block(he_dev);
1411 
1412 	/* 5.1.8 cs block connection memory initialization */
1413 
1414 	if (he_init_cs_block_rcm(he_dev) < 0)
1415 		return -ENOMEM;
1416 
1417 	/* 5.1.10 initialize host structures */
1418 
1419 	he_init_tpdrq(he_dev);
1420 
1421 	he_dev->tpd_pool = dma_pool_create("tpd", &he_dev->pci_dev->dev,
1422 					   sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1423 	if (he_dev->tpd_pool == NULL) {
1424 		hprintk("unable to create tpd dma_pool\n");
1425 		return -ENOMEM;
1426 	}
1427 
1428 	INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1429 
1430 	if (he_init_group(he_dev, 0) != 0)
1431 		return -ENOMEM;
1432 
1433 	for (group = 1; group < HE_NUM_GROUPS; ++group) {
1434 		he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1435 		he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1436 		he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1437 		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1438 						G0_RBPS_BS + (group * 32));
1439 
1440 		he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1441 		he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1442 		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1443 						G0_RBPL_QI + (group * 32));
1444 		he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1445 
1446 		he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1447 		he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1448 		he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1449 						G0_RBRQ_Q + (group * 16));
1450 		he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1451 
1452 		he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1453 		he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1454 		he_writel(he_dev, TBRQ_THRESH(0x1),
1455 						G0_TBRQ_THRESH + (group * 16));
1456 		he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1457 	}
1458 
1459 	/* host status page */
1460 
1461 	he_dev->hsp = dma_alloc_coherent(&he_dev->pci_dev->dev,
1462 					 sizeof(struct he_hsp),
1463 					 &he_dev->hsp_phys, GFP_KERNEL);
1464 	if (he_dev->hsp == NULL) {
1465 		hprintk("failed to allocate host status page\n");
1466 		return -ENOMEM;
1467 	}
1468 	he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1469 
1470 	/* initialize framer */
1471 
1472 #ifdef CONFIG_ATM_HE_USE_SUNI
1473 	if (he_isMM(he_dev))
1474 		suni_init(he_dev->atm_dev);
1475 	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1476 		he_dev->atm_dev->phy->start(he_dev->atm_dev);
1477 #endif /* CONFIG_ATM_HE_USE_SUNI */
1478 
1479 	if (sdh) {
1480 		/* this really should be in suni.c but for now... */
1481 		int val;
1482 
1483 		val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1484 		val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1485 		he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1486 		he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP);
1487 	}
1488 
1489 	/* 5.1.12 enable transmit and receive */
1490 
1491 	reg = he_readl_mbox(he_dev, CS_ERCTL0);
1492 	reg |= TX_ENABLE|ER_ENABLE;
1493 	he_writel_mbox(he_dev, reg, CS_ERCTL0);
1494 
1495 	reg = he_readl(he_dev, RC_CONFIG);
1496 	reg |= RX_ENABLE;
1497 	he_writel(he_dev, reg, RC_CONFIG);
1498 
1499 	for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1500 		he_dev->cs_stper[i].inuse = 0;
1501 		he_dev->cs_stper[i].pcr = -1;
1502 	}
1503 	he_dev->total_bw = 0;
1504 
1505 
1506 	/* atm linux initialization */
1507 
1508 	he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1509 	he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1510 
1511 	he_dev->irq_peak = 0;
1512 	he_dev->rbrq_peak = 0;
1513 	he_dev->rbpl_peak = 0;
1514 	he_dev->tbrq_peak = 0;
1515 
1516 	HPRINTK("hell bent for leather!\n");
1517 
1518 	return 0;
1519 }
1520 
1521 static void
1522 he_stop(struct he_dev *he_dev)
1523 {
1524 	struct he_buff *heb, *next;
1525 	struct pci_dev *pci_dev;
1526 	u32 gen_cntl_0, reg;
1527 	u16 command;
1528 
1529 	pci_dev = he_dev->pci_dev;
1530 
1531 	/* disable interrupts */
1532 
1533 	if (he_dev->membase) {
1534 		pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1535 		gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1536 		pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1537 
1538 		tasklet_disable(&he_dev->tasklet);
1539 
1540 		/* disable recv and transmit */
1541 
1542 		reg = he_readl_mbox(he_dev, CS_ERCTL0);
1543 		reg &= ~(TX_ENABLE|ER_ENABLE);
1544 		he_writel_mbox(he_dev, reg, CS_ERCTL0);
1545 
1546 		reg = he_readl(he_dev, RC_CONFIG);
1547 		reg &= ~(RX_ENABLE);
1548 		he_writel(he_dev, reg, RC_CONFIG);
1549 	}
1550 
1551 #ifdef CONFIG_ATM_HE_USE_SUNI
1552 	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1553 		he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1554 #endif /* CONFIG_ATM_HE_USE_SUNI */
1555 
1556 	if (he_dev->irq)
1557 		free_irq(he_dev->irq, he_dev);
1558 
1559 	if (he_dev->irq_base)
1560 		dma_free_coherent(&he_dev->pci_dev->dev, (CONFIG_IRQ_SIZE + 1)
1561 				  * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1562 
1563 	if (he_dev->hsp)
1564 		dma_free_coherent(&he_dev->pci_dev->dev, sizeof(struct he_hsp),
1565 				  he_dev->hsp, he_dev->hsp_phys);
1566 
1567 	if (he_dev->rbpl_base) {
1568 		list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
1569 			dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1570 
1571 		dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE
1572 				  * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1573 	}
1574 
1575 	kfree(he_dev->rbpl_virt);
1576 	bitmap_free(he_dev->rbpl_table);
1577 	dma_pool_destroy(he_dev->rbpl_pool);
1578 
1579 	if (he_dev->rbrq_base)
1580 		dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1581 				  he_dev->rbrq_base, he_dev->rbrq_phys);
1582 
1583 	if (he_dev->tbrq_base)
1584 		dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1585 				  he_dev->tbrq_base, he_dev->tbrq_phys);
1586 
1587 	if (he_dev->tpdrq_base)
1588 		dma_free_coherent(&he_dev->pci_dev->dev,
1589 				  CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
1590 				  he_dev->tpdrq_base, he_dev->tpdrq_phys);
1591 
1592 	dma_pool_destroy(he_dev->tpd_pool);
1593 
1594 	if (he_dev->pci_dev) {
1595 		pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1596 		command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1597 		pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1598 	}
1599 
1600 	if (he_dev->membase)
1601 		iounmap(he_dev->membase);
1602 }
1603 
1604 static struct he_tpd *
1605 __alloc_tpd(struct he_dev *he_dev)
1606 {
1607 	struct he_tpd *tpd;
1608 	dma_addr_t mapping;
1609 
1610 	tpd = dma_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC, &mapping);
1611 	if (tpd == NULL)
1612 		return NULL;
1613 
1614 	tpd->status = TPD_ADDR(mapping);
1615 	tpd->reserved = 0;
1616 	tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1617 	tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1618 	tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1619 
1620 	return tpd;
1621 }
1622 
1623 #define AAL5_LEN(buf,len) 						\
1624 			((((unsigned char *)(buf))[(len)-6] << 8) |	\
1625 				(((unsigned char *)(buf))[(len)-5]))
1626 
1627 /* 2.10.1.2 receive
1628  *
1629  * aal5 packets can optionally return the tcp checksum in the lower
1630  * 16 bits of the crc (RSR0_TCP_CKSUM)
1631  */
1632 
1633 #define TCP_CKSUM(buf,len) 						\
1634 			((((unsigned char *)(buf))[(len)-2] << 8) |	\
1635 				(((unsigned char *)(buf))[(len-1)]))
1636 
1637 static int
1638 he_service_rbrq(struct he_dev *he_dev, int group)
1639 {
1640 	struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1641 				((unsigned long)he_dev->rbrq_base |
1642 					he_dev->hsp->group[group].rbrq_tail);
1643 	unsigned cid, lastcid = -1;
1644 	struct sk_buff *skb;
1645 	struct atm_vcc *vcc = NULL;
1646 	struct he_vcc *he_vcc;
1647 	struct he_buff *heb, *next;
1648 	int i;
1649 	int pdus_assembled = 0;
1650 	int updated = 0;
1651 
1652 	read_lock(&vcc_sklist_lock);
1653 	while (he_dev->rbrq_head != rbrq_tail) {
1654 		++updated;
1655 
1656 		HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1657 			he_dev->rbrq_head, group,
1658 			RBRQ_ADDR(he_dev->rbrq_head),
1659 			RBRQ_BUFLEN(he_dev->rbrq_head),
1660 			RBRQ_CID(he_dev->rbrq_head),
1661 			RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1662 			RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1663 			RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1664 			RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1665 			RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1666 			RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1667 
1668 		i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET;
1669 		heb = he_dev->rbpl_virt[i];
1670 
1671 		cid = RBRQ_CID(he_dev->rbrq_head);
1672 		if (cid != lastcid)
1673 			vcc = __find_vcc(he_dev, cid);
1674 		lastcid = cid;
1675 
1676 		if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) {
1677 			hprintk("vcc/he_vcc == NULL  (cid 0x%x)\n", cid);
1678 			if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1679 				clear_bit(i, he_dev->rbpl_table);
1680 				list_del(&heb->entry);
1681 				dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1682 			}
1683 
1684 			goto next_rbrq_entry;
1685 		}
1686 
1687 		if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1688 			hprintk("HBUF_ERR!  (cid 0x%x)\n", cid);
1689 			atomic_inc(&vcc->stats->rx_drop);
1690 			goto return_host_buffers;
1691 		}
1692 
1693 		heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1694 		clear_bit(i, he_dev->rbpl_table);
1695 		list_move_tail(&heb->entry, &he_vcc->buffers);
1696 		he_vcc->pdu_len += heb->len;
1697 
1698 		if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1699 			lastcid = -1;
1700 			HPRINTK("wake_up rx_waitq  (cid 0x%x)\n", cid);
1701 			wake_up(&he_vcc->rx_waitq);
1702 			goto return_host_buffers;
1703 		}
1704 
1705 		if (!RBRQ_END_PDU(he_dev->rbrq_head))
1706 			goto next_rbrq_entry;
1707 
1708 		if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1709 				|| RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1710 			HPRINTK("%s%s (%d.%d)\n",
1711 				RBRQ_CRC_ERR(he_dev->rbrq_head)
1712 							? "CRC_ERR " : "",
1713 				RBRQ_LEN_ERR(he_dev->rbrq_head)
1714 							? "LEN_ERR" : "",
1715 							vcc->vpi, vcc->vci);
1716 			atomic_inc(&vcc->stats->rx_err);
1717 			goto return_host_buffers;
1718 		}
1719 
1720 		skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1721 							GFP_ATOMIC);
1722 		if (!skb) {
1723 			HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1724 			goto return_host_buffers;
1725 		}
1726 
1727 		if (rx_skb_reserve > 0)
1728 			skb_reserve(skb, rx_skb_reserve);
1729 
1730 		__net_timestamp(skb);
1731 
1732 		list_for_each_entry(heb, &he_vcc->buffers, entry)
1733 			skb_put_data(skb, &heb->data, heb->len);
1734 
1735 		switch (vcc->qos.aal) {
1736 			case ATM_AAL0:
1737 				/* 2.10.1.5 raw cell receive */
1738 				skb->len = ATM_AAL0_SDU;
1739 				skb_set_tail_pointer(skb, skb->len);
1740 				break;
1741 			case ATM_AAL5:
1742 				/* 2.10.1.2 aal5 receive */
1743 
1744 				skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1745 				skb_set_tail_pointer(skb, skb->len);
1746 #ifdef USE_CHECKSUM_HW
1747 				if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1748 					skb->ip_summed = CHECKSUM_COMPLETE;
1749 					skb->csum = TCP_CKSUM(skb->data,
1750 							he_vcc->pdu_len);
1751 				}
1752 #endif
1753 				break;
1754 		}
1755 
1756 #ifdef should_never_happen
1757 		if (skb->len > vcc->qos.rxtp.max_sdu)
1758 			hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)!  cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1759 #endif
1760 
1761 #ifdef notdef
1762 		ATM_SKB(skb)->vcc = vcc;
1763 #endif
1764 		spin_unlock(&he_dev->global_lock);
1765 		vcc->push(vcc, skb);
1766 		spin_lock(&he_dev->global_lock);
1767 
1768 		atomic_inc(&vcc->stats->rx);
1769 
1770 return_host_buffers:
1771 		++pdus_assembled;
1772 
1773 		list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry)
1774 			dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1775 		INIT_LIST_HEAD(&he_vcc->buffers);
1776 		he_vcc->pdu_len = 0;
1777 
1778 next_rbrq_entry:
1779 		he_dev->rbrq_head = (struct he_rbrq *)
1780 				((unsigned long) he_dev->rbrq_base |
1781 					RBRQ_MASK(he_dev->rbrq_head + 1));
1782 
1783 	}
1784 	read_unlock(&vcc_sklist_lock);
1785 
1786 	if (updated) {
1787 		if (updated > he_dev->rbrq_peak)
1788 			he_dev->rbrq_peak = updated;
1789 
1790 		he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1791 						G0_RBRQ_H + (group * 16));
1792 	}
1793 
1794 	return pdus_assembled;
1795 }
1796 
1797 static void
1798 he_service_tbrq(struct he_dev *he_dev, int group)
1799 {
1800 	struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1801 				((unsigned long)he_dev->tbrq_base |
1802 					he_dev->hsp->group[group].tbrq_tail);
1803 	struct he_tpd *tpd;
1804 	int slot, updated = 0;
1805 	struct he_tpd *__tpd;
1806 
1807 	/* 2.1.6 transmit buffer return queue */
1808 
1809 	while (he_dev->tbrq_head != tbrq_tail) {
1810 		++updated;
1811 
1812 		HPRINTK("tbrq%d 0x%x%s%s\n",
1813 			group,
1814 			TBRQ_TPD(he_dev->tbrq_head),
1815 			TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1816 			TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1817 		tpd = NULL;
1818 		list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1819 			if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1820 				tpd = __tpd;
1821 				list_del(&__tpd->entry);
1822 				break;
1823 			}
1824 		}
1825 
1826 		if (tpd == NULL) {
1827 			hprintk("unable to locate tpd for dma buffer %x\n",
1828 						TBRQ_TPD(he_dev->tbrq_head));
1829 			goto next_tbrq_entry;
1830 		}
1831 
1832 		if (TBRQ_EOS(he_dev->tbrq_head)) {
1833 			HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
1834 				he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
1835 			if (tpd->vcc)
1836 				wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
1837 
1838 			goto next_tbrq_entry;
1839 		}
1840 
1841 		for (slot = 0; slot < TPD_MAXIOV; ++slot) {
1842 			if (tpd->iovec[slot].addr)
1843 				dma_unmap_single(&he_dev->pci_dev->dev,
1844 					tpd->iovec[slot].addr,
1845 					tpd->iovec[slot].len & TPD_LEN_MASK,
1846 							DMA_TO_DEVICE);
1847 			if (tpd->iovec[slot].len & TPD_LST)
1848 				break;
1849 
1850 		}
1851 
1852 		if (tpd->skb) {	/* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
1853 			if (tpd->vcc && tpd->vcc->pop)
1854 				tpd->vcc->pop(tpd->vcc, tpd->skb);
1855 			else
1856 				dev_kfree_skb_any(tpd->skb);
1857 		}
1858 
1859 next_tbrq_entry:
1860 		if (tpd)
1861 			dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1862 		he_dev->tbrq_head = (struct he_tbrq *)
1863 				((unsigned long) he_dev->tbrq_base |
1864 					TBRQ_MASK(he_dev->tbrq_head + 1));
1865 	}
1866 
1867 	if (updated) {
1868 		if (updated > he_dev->tbrq_peak)
1869 			he_dev->tbrq_peak = updated;
1870 
1871 		he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
1872 						G0_TBRQ_H + (group * 16));
1873 	}
1874 }
1875 
1876 static void
1877 he_service_rbpl(struct he_dev *he_dev, int group)
1878 {
1879 	struct he_rbp *new_tail;
1880 	struct he_rbp *rbpl_head;
1881 	struct he_buff *heb;
1882 	dma_addr_t mapping;
1883 	int i;
1884 	int moved = 0;
1885 
1886 	rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1887 					RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
1888 
1889 	for (;;) {
1890 		new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1891 						RBPL_MASK(he_dev->rbpl_tail+1));
1892 
1893 		/* table 3.42 -- rbpl_tail should never be set to rbpl_head */
1894 		if (new_tail == rbpl_head)
1895 			break;
1896 
1897 		i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint);
1898 		if (i > (RBPL_TABLE_SIZE - 1)) {
1899 			i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE);
1900 			if (i > (RBPL_TABLE_SIZE - 1))
1901 				break;
1902 		}
1903 		he_dev->rbpl_hint = i + 1;
1904 
1905 		heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC, &mapping);
1906 		if (!heb)
1907 			break;
1908 		heb->mapping = mapping;
1909 		list_add(&heb->entry, &he_dev->rbpl_outstanding);
1910 		he_dev->rbpl_virt[i] = heb;
1911 		set_bit(i, he_dev->rbpl_table);
1912 		new_tail->idx = i << RBP_IDX_OFFSET;
1913 		new_tail->phys = mapping + offsetof(struct he_buff, data);
1914 
1915 		he_dev->rbpl_tail = new_tail;
1916 		++moved;
1917 	}
1918 
1919 	if (moved)
1920 		he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
1921 }
1922 
1923 static void
1924 he_tasklet(unsigned long data)
1925 {
1926 	unsigned long flags;
1927 	struct he_dev *he_dev = (struct he_dev *) data;
1928 	int group, type;
1929 	int updated = 0;
1930 
1931 	HPRINTK("tasklet (0x%lx)\n", data);
1932 	spin_lock_irqsave(&he_dev->global_lock, flags);
1933 
1934 	while (he_dev->irq_head != he_dev->irq_tail) {
1935 		++updated;
1936 
1937 		type = ITYPE_TYPE(he_dev->irq_head->isw);
1938 		group = ITYPE_GROUP(he_dev->irq_head->isw);
1939 
1940 		switch (type) {
1941 			case ITYPE_RBRQ_THRESH:
1942 				HPRINTK("rbrq%d threshold\n", group);
1943 				fallthrough;
1944 			case ITYPE_RBRQ_TIMER:
1945 				if (he_service_rbrq(he_dev, group))
1946 					he_service_rbpl(he_dev, group);
1947 				break;
1948 			case ITYPE_TBRQ_THRESH:
1949 				HPRINTK("tbrq%d threshold\n", group);
1950 				fallthrough;
1951 			case ITYPE_TPD_COMPLETE:
1952 				he_service_tbrq(he_dev, group);
1953 				break;
1954 			case ITYPE_RBPL_THRESH:
1955 				he_service_rbpl(he_dev, group);
1956 				break;
1957 			case ITYPE_RBPS_THRESH:
1958 				/* shouldn't happen unless small buffers enabled */
1959 				break;
1960 			case ITYPE_PHY:
1961 				HPRINTK("phy interrupt\n");
1962 #ifdef CONFIG_ATM_HE_USE_SUNI
1963 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
1964 				if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
1965 					he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
1966 				spin_lock_irqsave(&he_dev->global_lock, flags);
1967 #endif
1968 				break;
1969 			case ITYPE_OTHER:
1970 				switch (type|group) {
1971 					case ITYPE_PARITY:
1972 						hprintk("parity error\n");
1973 						break;
1974 					case ITYPE_ABORT:
1975 						hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
1976 						break;
1977 				}
1978 				break;
1979 			case ITYPE_TYPE(ITYPE_INVALID):
1980 				/* see 8.1.1 -- check all queues */
1981 
1982 				HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
1983 
1984 				he_service_rbrq(he_dev, 0);
1985 				he_service_rbpl(he_dev, 0);
1986 				he_service_tbrq(he_dev, 0);
1987 				break;
1988 			default:
1989 				hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
1990 		}
1991 
1992 		he_dev->irq_head->isw = ITYPE_INVALID;
1993 
1994 		he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
1995 	}
1996 
1997 	if (updated) {
1998 		if (updated > he_dev->irq_peak)
1999 			he_dev->irq_peak = updated;
2000 
2001 		he_writel(he_dev,
2002 			IRQ_SIZE(CONFIG_IRQ_SIZE) |
2003 			IRQ_THRESH(CONFIG_IRQ_THRESH) |
2004 			IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2005 		(void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2006 	}
2007 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2008 }
2009 
2010 static irqreturn_t
2011 he_irq_handler(int irq, void *dev_id)
2012 {
2013 	unsigned long flags;
2014 	struct he_dev *he_dev = (struct he_dev * )dev_id;
2015 	int handled = 0;
2016 
2017 	if (he_dev == NULL)
2018 		return IRQ_NONE;
2019 
2020 	spin_lock_irqsave(&he_dev->global_lock, flags);
2021 
2022 	he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2023 						(*he_dev->irq_tailoffset << 2));
2024 
2025 	if (he_dev->irq_tail == he_dev->irq_head) {
2026 		HPRINTK("tailoffset not updated?\n");
2027 		he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2028 			((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2029 		(void) he_readl(he_dev, INT_FIFO);	/* 8.1.2 controller errata */
2030 	}
2031 
2032 #ifdef DEBUG
2033 	if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2034 		hprintk("spurious (or shared) interrupt?\n");
2035 #endif
2036 
2037 	if (he_dev->irq_head != he_dev->irq_tail) {
2038 		handled = 1;
2039 		tasklet_schedule(&he_dev->tasklet);
2040 		he_writel(he_dev, INT_CLEAR_A, INT_FIFO);	/* clear interrupt */
2041 		(void) he_readl(he_dev, INT_FIFO);		/* flush posted writes */
2042 	}
2043 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2044 	return IRQ_RETVAL(handled);
2045 
2046 }
2047 
2048 static __inline__ void
2049 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2050 {
2051 	struct he_tpdrq *new_tail;
2052 
2053 	HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2054 					tpd, cid, he_dev->tpdrq_tail);
2055 
2056 	/* new_tail = he_dev->tpdrq_tail; */
2057 	new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2058 					TPDRQ_MASK(he_dev->tpdrq_tail+1));
2059 
2060 	/*
2061 	 * check to see if we are about to set the tail == head
2062 	 * if true, update the head pointer from the adapter
2063 	 * to see if this is really the case (reading the queue
2064 	 * head for every enqueue would be unnecessarily slow)
2065 	 */
2066 
2067 	if (new_tail == he_dev->tpdrq_head) {
2068 		he_dev->tpdrq_head = (struct he_tpdrq *)
2069 			(((unsigned long)he_dev->tpdrq_base) |
2070 				TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2071 
2072 		if (new_tail == he_dev->tpdrq_head) {
2073 			int slot;
2074 
2075 			hprintk("tpdrq full (cid 0x%x)\n", cid);
2076 			/*
2077 			 * FIXME
2078 			 * push tpd onto a transmit backlog queue
2079 			 * after service_tbrq, service the backlog
2080 			 * for now, we just drop the pdu
2081 			 */
2082 			for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2083 				if (tpd->iovec[slot].addr)
2084 					dma_unmap_single(&he_dev->pci_dev->dev,
2085 						tpd->iovec[slot].addr,
2086 						tpd->iovec[slot].len & TPD_LEN_MASK,
2087 								DMA_TO_DEVICE);
2088 			}
2089 			if (tpd->skb) {
2090 				if (tpd->vcc->pop)
2091 					tpd->vcc->pop(tpd->vcc, tpd->skb);
2092 				else
2093 					dev_kfree_skb_any(tpd->skb);
2094 				atomic_inc(&tpd->vcc->stats->tx_err);
2095 			}
2096 			dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2097 			return;
2098 		}
2099 	}
2100 
2101 	/* 2.1.5 transmit packet descriptor ready queue */
2102 	list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2103 	he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2104 	he_dev->tpdrq_tail->cid = cid;
2105 	wmb();
2106 
2107 	he_dev->tpdrq_tail = new_tail;
2108 
2109 	he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2110 	(void) he_readl(he_dev, TPDRQ_T);		/* flush posted writes */
2111 }
2112 
2113 static int
2114 he_open(struct atm_vcc *vcc)
2115 {
2116 	unsigned long flags;
2117 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2118 	struct he_vcc *he_vcc;
2119 	int err = 0;
2120 	unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2121 	short vpi = vcc->vpi;
2122 	int vci = vcc->vci;
2123 
2124 	if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2125 		return 0;
2126 
2127 	HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2128 
2129 	set_bit(ATM_VF_ADDR, &vcc->flags);
2130 
2131 	cid = he_mkcid(he_dev, vpi, vci);
2132 
2133 	he_vcc = kmalloc_obj(struct he_vcc, GFP_ATOMIC);
2134 	if (he_vcc == NULL) {
2135 		hprintk("unable to allocate he_vcc during open\n");
2136 		return -ENOMEM;
2137 	}
2138 
2139 	INIT_LIST_HEAD(&he_vcc->buffers);
2140 	he_vcc->pdu_len = 0;
2141 	he_vcc->rc_index = -1;
2142 
2143 	init_waitqueue_head(&he_vcc->rx_waitq);
2144 	init_waitqueue_head(&he_vcc->tx_waitq);
2145 
2146 	vcc->dev_data = he_vcc;
2147 
2148 	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2149 		int pcr_goal;
2150 
2151 		pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2152 		if (pcr_goal == 0)
2153 			pcr_goal = he_dev->atm_dev->link_rate;
2154 		if (pcr_goal < 0)	/* means round down, technically */
2155 			pcr_goal = -pcr_goal;
2156 
2157 		HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2158 
2159 		switch (vcc->qos.aal) {
2160 			case ATM_AAL5:
2161 				tsr0_aal = TSR0_AAL5;
2162 				tsr4 = TSR4_AAL5;
2163 				break;
2164 			case ATM_AAL0:
2165 				tsr0_aal = TSR0_AAL0_SDU;
2166 				tsr4 = TSR4_AAL0_SDU;
2167 				break;
2168 			default:
2169 				err = -EINVAL;
2170 				goto open_failed;
2171 		}
2172 
2173 		spin_lock_irqsave(&he_dev->global_lock, flags);
2174 		tsr0 = he_readl_tsr0(he_dev, cid);
2175 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2176 
2177 		if (TSR0_CONN_STATE(tsr0) != 0) {
2178 			hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2179 			err = -EBUSY;
2180 			goto open_failed;
2181 		}
2182 
2183 		switch (vcc->qos.txtp.traffic_class) {
2184 			case ATM_UBR:
2185 				/* 2.3.3.1 open connection ubr */
2186 
2187 				tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2188 					TSR0_USE_WMIN | TSR0_UPDATE_GER;
2189 				break;
2190 
2191 			case ATM_CBR:
2192 				/* 2.3.3.2 open connection cbr */
2193 
2194 				/* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2195 				if ((he_dev->total_bw + pcr_goal)
2196 					> (he_dev->atm_dev->link_rate * 9 / 10))
2197 				{
2198 					err = -EBUSY;
2199 					goto open_failed;
2200 				}
2201 
2202 				spin_lock_irqsave(&he_dev->global_lock, flags);			/* also protects he_dev->cs_stper[] */
2203 
2204 				/* find an unused cs_stper register */
2205 				for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2206 					if (he_dev->cs_stper[reg].inuse == 0 ||
2207 					    he_dev->cs_stper[reg].pcr == pcr_goal)
2208 							break;
2209 
2210 				if (reg == HE_NUM_CS_STPER) {
2211 					err = -EBUSY;
2212 					spin_unlock_irqrestore(&he_dev->global_lock, flags);
2213 					goto open_failed;
2214 				}
2215 
2216 				he_dev->total_bw += pcr_goal;
2217 
2218 				he_vcc->rc_index = reg;
2219 				++he_dev->cs_stper[reg].inuse;
2220 				he_dev->cs_stper[reg].pcr = pcr_goal;
2221 
2222 				clock = he_is622(he_dev) ? 66667000 : 50000000;
2223 				period = clock / pcr_goal;
2224 
2225 				HPRINTK("rc_index = %d period = %d\n",
2226 								reg, period);
2227 
2228 				he_writel_mbox(he_dev, rate_to_atmf(period/2),
2229 							CS_STPER0 + reg);
2230 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2231 
2232 				tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2233 							TSR0_RC_INDEX(reg);
2234 
2235 				break;
2236 			default:
2237 				err = -EINVAL;
2238 				goto open_failed;
2239 		}
2240 
2241 		spin_lock_irqsave(&he_dev->global_lock, flags);
2242 
2243 		he_writel_tsr0(he_dev, tsr0, cid);
2244 		he_writel_tsr4(he_dev, tsr4 | 1, cid);
2245 		he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2246 					TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2247 		he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2248 		he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2249 
2250 		he_writel_tsr3(he_dev, 0x0, cid);
2251 		he_writel_tsr5(he_dev, 0x0, cid);
2252 		he_writel_tsr6(he_dev, 0x0, cid);
2253 		he_writel_tsr7(he_dev, 0x0, cid);
2254 		he_writel_tsr8(he_dev, 0x0, cid);
2255 		he_writel_tsr10(he_dev, 0x0, cid);
2256 		he_writel_tsr11(he_dev, 0x0, cid);
2257 		he_writel_tsr12(he_dev, 0x0, cid);
2258 		he_writel_tsr13(he_dev, 0x0, cid);
2259 		he_writel_tsr14(he_dev, 0x0, cid);
2260 		(void) he_readl_tsr0(he_dev, cid);		/* flush posted writes */
2261 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2262 	}
2263 
2264 	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2265 		unsigned aal;
2266 
2267 		HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2268 		 				&HE_VCC(vcc)->rx_waitq);
2269 
2270 		switch (vcc->qos.aal) {
2271 			case ATM_AAL5:
2272 				aal = RSR0_AAL5;
2273 				break;
2274 			case ATM_AAL0:
2275 				aal = RSR0_RAWCELL;
2276 				break;
2277 			default:
2278 				err = -EINVAL;
2279 				goto open_failed;
2280 		}
2281 
2282 		spin_lock_irqsave(&he_dev->global_lock, flags);
2283 
2284 		rsr0 = he_readl_rsr0(he_dev, cid);
2285 		if (rsr0 & RSR0_OPEN_CONN) {
2286 			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2287 
2288 			hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2289 			err = -EBUSY;
2290 			goto open_failed;
2291 		}
2292 
2293 		rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY;
2294 		rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY;
2295 		rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2296 				(RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2297 
2298 #ifdef USE_CHECKSUM_HW
2299 		if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2300 			rsr0 |= RSR0_TCP_CKSUM;
2301 #endif
2302 
2303 		he_writel_rsr4(he_dev, rsr4, cid);
2304 		he_writel_rsr1(he_dev, rsr1, cid);
2305 		/* 5.1.11 last parameter initialized should be
2306 			  the open/closed indication in rsr0 */
2307 		he_writel_rsr0(he_dev,
2308 			rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2309 		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2310 
2311 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2312 	}
2313 
2314 open_failed:
2315 
2316 	if (err) {
2317 		kfree(he_vcc);
2318 		clear_bit(ATM_VF_ADDR, &vcc->flags);
2319 	}
2320 	else
2321 		set_bit(ATM_VF_READY, &vcc->flags);
2322 
2323 	return err;
2324 }
2325 
2326 static void
2327 he_close(struct atm_vcc *vcc)
2328 {
2329 	unsigned long flags;
2330 	DECLARE_WAITQUEUE(wait, current);
2331 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2332 	struct he_tpd *tpd;
2333 	unsigned cid;
2334 	struct he_vcc *he_vcc = HE_VCC(vcc);
2335 #define MAX_RETRY 30
2336 	int retry = 0, sleep = 1, tx_inuse;
2337 
2338 	HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2339 
2340 	clear_bit(ATM_VF_READY, &vcc->flags);
2341 	cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2342 
2343 	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2344 		int timeout;
2345 
2346 		HPRINTK("close rx cid 0x%x\n", cid);
2347 
2348 		/* 2.7.2.2 close receive operation */
2349 
2350 		/* wait for previous close (if any) to finish */
2351 
2352 		spin_lock_irqsave(&he_dev->global_lock, flags);
2353 		while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2354 			HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2355 			udelay(250);
2356 		}
2357 
2358 		set_current_state(TASK_UNINTERRUPTIBLE);
2359 		add_wait_queue(&he_vcc->rx_waitq, &wait);
2360 
2361 		he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2362 		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2363 		he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2364 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2365 
2366 		timeout = schedule_timeout(30*HZ);
2367 
2368 		remove_wait_queue(&he_vcc->rx_waitq, &wait);
2369 		set_current_state(TASK_RUNNING);
2370 
2371 		if (timeout == 0)
2372 			hprintk("close rx timeout cid 0x%x\n", cid);
2373 
2374 		HPRINTK("close rx cid 0x%x complete\n", cid);
2375 
2376 	}
2377 
2378 	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2379 		volatile unsigned tsr4, tsr0;
2380 		int timeout;
2381 
2382 		HPRINTK("close tx cid 0x%x\n", cid);
2383 
2384 		/* 2.1.2
2385 		 *
2386 		 * ... the host must first stop queueing packets to the TPDRQ
2387 		 * on the connection to be closed, then wait for all outstanding
2388 		 * packets to be transmitted and their buffers returned to the
2389 		 * TBRQ. When the last packet on the connection arrives in the
2390 		 * TBRQ, the host issues the close command to the adapter.
2391 		 */
2392 
2393 		while (((tx_inuse = refcount_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
2394 		       (retry < MAX_RETRY)) {
2395 			msleep(sleep);
2396 			if (sleep < 250)
2397 				sleep = sleep * 2;
2398 
2399 			++retry;
2400 		}
2401 
2402 		if (tx_inuse > 1)
2403 			hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2404 
2405 		/* 2.3.1.1 generic close operations with flush */
2406 
2407 		spin_lock_irqsave(&he_dev->global_lock, flags);
2408 		he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2409 					/* also clears TSR4_SESSION_ENDED */
2410 
2411 		switch (vcc->qos.txtp.traffic_class) {
2412 			case ATM_UBR:
2413 				he_writel_tsr1(he_dev,
2414 					TSR1_MCR(rate_to_atmf(200000))
2415 					| TSR1_PCR(0), cid);
2416 				break;
2417 			case ATM_CBR:
2418 				he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2419 				break;
2420 		}
2421 		(void) he_readl_tsr4(he_dev, cid);		/* flush posted writes */
2422 
2423 		tpd = __alloc_tpd(he_dev);
2424 		if (tpd == NULL) {
2425 			hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2426 			goto close_tx_incomplete;
2427 		}
2428 		tpd->status |= TPD_EOS | TPD_INT;
2429 		tpd->skb = NULL;
2430 		tpd->vcc = vcc;
2431 		wmb();
2432 
2433 		set_current_state(TASK_UNINTERRUPTIBLE);
2434 		add_wait_queue(&he_vcc->tx_waitq, &wait);
2435 		__enqueue_tpd(he_dev, tpd, cid);
2436 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2437 
2438 		timeout = schedule_timeout(30*HZ);
2439 
2440 		remove_wait_queue(&he_vcc->tx_waitq, &wait);
2441 		set_current_state(TASK_RUNNING);
2442 
2443 		spin_lock_irqsave(&he_dev->global_lock, flags);
2444 
2445 		if (timeout == 0) {
2446 			hprintk("close tx timeout cid 0x%x\n", cid);
2447 			goto close_tx_incomplete;
2448 		}
2449 
2450 		while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2451 			HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2452 			udelay(250);
2453 		}
2454 
2455 		while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2456 			HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2457 			udelay(250);
2458 		}
2459 
2460 close_tx_incomplete:
2461 
2462 		if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2463 			int reg = he_vcc->rc_index;
2464 
2465 			HPRINTK("cs_stper reg = %d\n", reg);
2466 
2467 			if (he_dev->cs_stper[reg].inuse == 0)
2468 				hprintk("cs_stper[%d].inuse = 0!\n", reg);
2469 			else
2470 				--he_dev->cs_stper[reg].inuse;
2471 
2472 			he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2473 		}
2474 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2475 
2476 		HPRINTK("close tx cid 0x%x complete\n", cid);
2477 	}
2478 
2479 	kfree(he_vcc);
2480 
2481 	clear_bit(ATM_VF_ADDR, &vcc->flags);
2482 }
2483 
2484 static int
2485 he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2486 {
2487 	unsigned long flags;
2488 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2489 	unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2490 	struct he_tpd *tpd;
2491 #ifdef USE_SCATTERGATHER
2492 	int i, slot = 0;
2493 #endif
2494 
2495 #define HE_TPD_BUFSIZE 0xffff
2496 
2497 	HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2498 
2499 	if ((skb->len > HE_TPD_BUFSIZE) ||
2500 	    ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2501 		hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2502 		if (vcc->pop)
2503 			vcc->pop(vcc, skb);
2504 		else
2505 			dev_kfree_skb_any(skb);
2506 		atomic_inc(&vcc->stats->tx_err);
2507 		return -EINVAL;
2508 	}
2509 
2510 #ifndef USE_SCATTERGATHER
2511 	if (skb_shinfo(skb)->nr_frags) {
2512 		hprintk("no scatter/gather support\n");
2513 		if (vcc->pop)
2514 			vcc->pop(vcc, skb);
2515 		else
2516 			dev_kfree_skb_any(skb);
2517 		atomic_inc(&vcc->stats->tx_err);
2518 		return -EINVAL;
2519 	}
2520 #endif
2521 	spin_lock_irqsave(&he_dev->global_lock, flags);
2522 
2523 	tpd = __alloc_tpd(he_dev);
2524 	if (tpd == NULL) {
2525 		if (vcc->pop)
2526 			vcc->pop(vcc, skb);
2527 		else
2528 			dev_kfree_skb_any(skb);
2529 		atomic_inc(&vcc->stats->tx_err);
2530 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2531 		return -ENOMEM;
2532 	}
2533 
2534 	if (vcc->qos.aal == ATM_AAL5)
2535 		tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2536 	else {
2537 		char *pti_clp = (void *) (skb->data + 3);
2538 		int clp, pti;
2539 
2540 		pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2541 		clp = (*pti_clp & ATM_HDR_CLP);
2542 		tpd->status |= TPD_CELLTYPE(pti);
2543 		if (clp)
2544 			tpd->status |= TPD_CLP;
2545 
2546 		skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2547 	}
2548 
2549 #ifdef USE_SCATTERGATHER
2550 	tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev, skb->data,
2551 				skb_headlen(skb), DMA_TO_DEVICE);
2552 	tpd->iovec[slot].len = skb_headlen(skb);
2553 	++slot;
2554 
2555 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2556 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2557 
2558 		if (slot == TPD_MAXIOV) {	/* queue tpd; start new tpd */
2559 			tpd->vcc = vcc;
2560 			tpd->skb = NULL;	/* not the last fragment
2561 						   so dont ->push() yet */
2562 			wmb();
2563 
2564 			__enqueue_tpd(he_dev, tpd, cid);
2565 			tpd = __alloc_tpd(he_dev);
2566 			if (tpd == NULL) {
2567 				if (vcc->pop)
2568 					vcc->pop(vcc, skb);
2569 				else
2570 					dev_kfree_skb_any(skb);
2571 				atomic_inc(&vcc->stats->tx_err);
2572 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2573 				return -ENOMEM;
2574 			}
2575 			tpd->status |= TPD_USERCELL;
2576 			slot = 0;
2577 		}
2578 
2579 		tpd->iovec[slot].addr = skb_frag_dma_map(&he_dev->pci_dev->dev,
2580 				frag, 0, skb_frag_size(frag), DMA_TO_DEVICE);
2581 		tpd->iovec[slot].len = skb_frag_size(frag);
2582 		++slot;
2583 
2584 	}
2585 
2586 	tpd->iovec[slot - 1].len |= TPD_LST;
2587 #else
2588 	tpd->address0 = dma_map_single(&he_dev->pci_dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
2589 	tpd->length0 = skb->len | TPD_LST;
2590 #endif
2591 	tpd->status |= TPD_INT;
2592 
2593 	tpd->vcc = vcc;
2594 	tpd->skb = skb;
2595 	wmb();
2596 	ATM_SKB(skb)->vcc = vcc;
2597 
2598 	__enqueue_tpd(he_dev, tpd, cid);
2599 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2600 
2601 	atomic_inc(&vcc->stats->tx);
2602 
2603 	return 0;
2604 }
2605 
2606 static int
2607 he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2608 {
2609 	unsigned long flags;
2610 	struct he_dev *he_dev = HE_DEV(atm_dev);
2611 	struct he_ioctl_reg reg;
2612 	int err = 0;
2613 
2614 	switch (cmd) {
2615 		case HE_GET_REG:
2616 			if (!capable(CAP_NET_ADMIN))
2617 				return -EPERM;
2618 
2619 			if (copy_from_user(&reg, arg,
2620 					   sizeof(struct he_ioctl_reg)))
2621 				return -EFAULT;
2622 
2623 			spin_lock_irqsave(&he_dev->global_lock, flags);
2624 			switch (reg.type) {
2625 				case HE_REGTYPE_PCI:
2626 					if (reg.addr >= HE_REGMAP_SIZE) {
2627 						err = -EINVAL;
2628 						break;
2629 					}
2630 
2631 					reg.val = he_readl(he_dev, reg.addr);
2632 					break;
2633 				case HE_REGTYPE_RCM:
2634 					reg.val =
2635 						he_readl_rcm(he_dev, reg.addr);
2636 					break;
2637 				case HE_REGTYPE_TCM:
2638 					reg.val =
2639 						he_readl_tcm(he_dev, reg.addr);
2640 					break;
2641 				case HE_REGTYPE_MBOX:
2642 					reg.val =
2643 						he_readl_mbox(he_dev, reg.addr);
2644 					break;
2645 				default:
2646 					err = -EINVAL;
2647 					break;
2648 			}
2649 			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2650 			if (err == 0)
2651 				if (copy_to_user(arg, &reg,
2652 							sizeof(struct he_ioctl_reg)))
2653 					return -EFAULT;
2654 			break;
2655 		default:
2656 #ifdef CONFIG_ATM_HE_USE_SUNI
2657 			if (atm_dev->phy && atm_dev->phy->ioctl)
2658 				err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2659 #else /* CONFIG_ATM_HE_USE_SUNI */
2660 			err = -EINVAL;
2661 #endif /* CONFIG_ATM_HE_USE_SUNI */
2662 			break;
2663 	}
2664 
2665 	return err;
2666 }
2667 
2668 static void
2669 he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2670 {
2671 	unsigned long flags;
2672 	struct he_dev *he_dev = HE_DEV(atm_dev);
2673 
2674 	HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2675 
2676 	spin_lock_irqsave(&he_dev->global_lock, flags);
2677 	he_writel(he_dev, val, FRAMER + (addr*4));
2678 	(void) he_readl(he_dev, FRAMER + (addr*4));		/* flush posted writes */
2679 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2680 }
2681 
2682 
2683 static unsigned char
2684 he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2685 {
2686 	unsigned long flags;
2687 	struct he_dev *he_dev = HE_DEV(atm_dev);
2688 	unsigned reg;
2689 
2690 	spin_lock_irqsave(&he_dev->global_lock, flags);
2691 	reg = he_readl(he_dev, FRAMER + (addr*4));
2692 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2693 
2694 	HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2695 	return reg;
2696 }
2697 
2698 static int
2699 he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2700 {
2701 	unsigned long flags;
2702 	struct he_dev *he_dev = HE_DEV(dev);
2703 	int left, i;
2704 #ifdef notdef
2705 	struct he_rbrq *rbrq_tail;
2706 	struct he_tpdrq *tpdrq_head;
2707 	int rbpl_head, rbpl_tail;
2708 #endif
2709 	static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2710 
2711 
2712 	left = *pos;
2713 	if (!left--)
2714 		return sprintf(page, "ATM he driver\n");
2715 
2716 	if (!left--)
2717 		return sprintf(page, "%s%s\n\n",
2718 			he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2719 
2720 	if (!left--)
2721 		return sprintf(page, "Mismatched Cells  VPI/VCI Not Open  Dropped Cells  RCM Dropped Cells\n");
2722 
2723 	spin_lock_irqsave(&he_dev->global_lock, flags);
2724 	mcc += he_readl(he_dev, MCC);
2725 	oec += he_readl(he_dev, OEC);
2726 	dcc += he_readl(he_dev, DCC);
2727 	cec += he_readl(he_dev, CEC);
2728 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2729 
2730 	if (!left--)
2731 		return sprintf(page, "%16ld  %16ld  %13ld  %17ld\n\n",
2732 							mcc, oec, dcc, cec);
2733 
2734 	if (!left--)
2735 		return sprintf(page, "irq_size = %d  inuse = ?  peak = %d\n",
2736 				CONFIG_IRQ_SIZE, he_dev->irq_peak);
2737 
2738 	if (!left--)
2739 		return sprintf(page, "tpdrq_size = %d  inuse = ?\n",
2740 						CONFIG_TPDRQ_SIZE);
2741 
2742 	if (!left--)
2743 		return sprintf(page, "rbrq_size = %d  inuse = ?  peak = %d\n",
2744 				CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2745 
2746 	if (!left--)
2747 		return sprintf(page, "tbrq_size = %d  peak = %d\n",
2748 					CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2749 
2750 
2751 #ifdef notdef
2752 	rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2753 	rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2754 
2755 	inuse = rbpl_head - rbpl_tail;
2756 	if (inuse < 0)
2757 		inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2758 	inuse /= sizeof(struct he_rbp);
2759 
2760 	if (!left--)
2761 		return sprintf(page, "rbpl_size = %d  inuse = %d\n\n",
2762 						CONFIG_RBPL_SIZE, inuse);
2763 #endif
2764 
2765 	if (!left--)
2766 		return sprintf(page, "rate controller periods (cbr)\n                 pcr  #vc\n");
2767 
2768 	for (i = 0; i < HE_NUM_CS_STPER; ++i)
2769 		if (!left--)
2770 			return sprintf(page, "cs_stper%-2d  %8ld  %3d\n", i,
2771 						he_dev->cs_stper[i].pcr,
2772 						he_dev->cs_stper[i].inuse);
2773 
2774 	if (!left--)
2775 		return sprintf(page, "total bw (cbr): %d  (limit %d)\n",
2776 			he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2777 
2778 	return 0;
2779 }
2780 
2781 /* eeprom routines  -- see 4.7 */
2782 
2783 static u8 read_prom_byte(struct he_dev *he_dev, int addr)
2784 {
2785 	u32 val = 0, tmp_read = 0;
2786 	int i, j = 0;
2787 	u8 byte_read = 0;
2788 
2789 	val = readl(he_dev->membase + HOST_CNTL);
2790 	val &= 0xFFFFE0FF;
2791 
2792 	/* Turn on write enable */
2793 	val |= 0x800;
2794 	he_writel(he_dev, val, HOST_CNTL);
2795 
2796 	/* Send READ instruction */
2797 	for (i = 0; i < ARRAY_SIZE(readtab); i++) {
2798 		he_writel(he_dev, val | readtab[i], HOST_CNTL);
2799 		udelay(EEPROM_DELAY);
2800 	}
2801 
2802 	/* Next, we need to send the byte address to read from */
2803 	for (i = 7; i >= 0; i--) {
2804 		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2805 		udelay(EEPROM_DELAY);
2806 		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2807 		udelay(EEPROM_DELAY);
2808 	}
2809 
2810 	j = 0;
2811 
2812 	val &= 0xFFFFF7FF;      /* Turn off write enable */
2813 	he_writel(he_dev, val, HOST_CNTL);
2814 
2815 	/* Now, we can read data from the EEPROM by clocking it in */
2816 	for (i = 7; i >= 0; i--) {
2817 		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2818 		udelay(EEPROM_DELAY);
2819 		tmp_read = he_readl(he_dev, HOST_CNTL);
2820 		byte_read |= (unsigned char)
2821 			   ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
2822 		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2823 		udelay(EEPROM_DELAY);
2824 	}
2825 
2826 	he_writel(he_dev, val | ID_CS, HOST_CNTL);
2827 	udelay(EEPROM_DELAY);
2828 
2829 	return byte_read;
2830 }
2831 
2832 MODULE_LICENSE("GPL");
2833 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
2834 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
2835 module_param(disable64, bool, 0);
2836 MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
2837 module_param(nvpibits, short, 0);
2838 MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
2839 module_param(nvcibits, short, 0);
2840 MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
2841 module_param(rx_skb_reserve, short, 0);
2842 MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
2843 module_param(irq_coalesce, bool, 0);
2844 MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
2845 module_param(sdh, bool, 0);
2846 MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
2847 
2848 static const struct pci_device_id he_pci_tbl[] = {
2849 	{ PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 },
2850 	{ 0, }
2851 };
2852 
2853 MODULE_DEVICE_TABLE(pci, he_pci_tbl);
2854 
2855 static struct pci_driver he_driver = {
2856 	.name =		"he",
2857 	.probe =	he_init_one,
2858 	.remove =	he_remove_one,
2859 	.id_table =	he_pci_tbl,
2860 };
2861 
2862 module_pci_driver(he_driver);
2863