xref: /linux/drivers/atm/he.c (revision 08f3e0873ac203449465c2b8473d684e2f9f41d1)
1 /*
2 
3   he.c
4 
5   ForeRunnerHE ATM Adapter driver for ATM on Linux
6   Copyright (C) 1999-2001  Naval Research Laboratory
7 
8   This library is free software; you can redistribute it and/or
9   modify it under the terms of the GNU Lesser General Public
10   License as published by the Free Software Foundation; either
11   version 2.1 of the License, or (at your option) any later version.
12 
13   This library is distributed in the hope that it will be useful,
14   but WITHOUT ANY WARRANTY; without even the implied warranty of
15   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16   Lesser General Public License for more details.
17 
18   You should have received a copy of the GNU Lesser General Public
19   License along with this library; if not, write to the Free Software
20   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21 
22 */
23 
24 /*
25 
26   he.c
27 
28   ForeRunnerHE ATM Adapter driver for ATM on Linux
29   Copyright (C) 1999-2001  Naval Research Laboratory
30 
31   Permission to use, copy, modify and distribute this software and its
32   documentation is hereby granted, provided that both the copyright
33   notice and this permission notice appear in all copies of the software,
34   derivative works or modified versions, and any portions thereof, and
35   that both notices appear in supporting documentation.
36 
37   NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
38   DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
39   RESULTING FROM THE USE OF THIS SOFTWARE.
40 
41   This driver was written using the "Programmer's Reference Manual for
42   ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
43 
44   AUTHORS:
45 	chas williams <chas@cmf.nrl.navy.mil>
46 	eric kinzie <ekinzie@cmf.nrl.navy.mil>
47 
48   NOTES:
49 	4096 supported 'connections'
50 	group 0 is used for all traffic
51 	interrupt queue 0 is used for all interrupts
52 	aal0 support (based on work from ulrich.u.muller@nokia.com)
53 
54  */
55 
56 #include <linux/module.h>
57 #include <linux/kernel.h>
58 #include <linux/skbuff.h>
59 #include <linux/pci.h>
60 #include <linux/errno.h>
61 #include <linux/types.h>
62 #include <linux/string.h>
63 #include <linux/delay.h>
64 #include <linux/init.h>
65 #include <linux/mm.h>
66 #include <linux/sched.h>
67 #include <linux/timer.h>
68 #include <linux/interrupt.h>
69 #include <linux/dma-mapping.h>
70 #include <linux/bitmap.h>
71 #include <linux/slab.h>
72 #include <asm/io.h>
73 #include <asm/byteorder.h>
74 #include <linux/uaccess.h>
75 
76 #include <linux/atmdev.h>
77 #include <linux/atm.h>
78 #include <linux/sonet.h>
79 
80 #undef USE_SCATTERGATHER
81 #undef USE_CHECKSUM_HW			/* still confused about this */
82 /* #undef HE_DEBUG */
83 
84 #include "he.h"
85 #include "suni.h"
86 #include <linux/atm_he.h>
87 
88 #define hprintk(fmt,args...)	printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
89 
90 #ifdef HE_DEBUG
91 #define HPRINTK(fmt,args...)	printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
92 #else /* !HE_DEBUG */
93 #define HPRINTK(fmt,args...)	do { } while (0)
94 #endif /* HE_DEBUG */
95 
96 /* declarations */
97 
98 static int he_open(struct atm_vcc *vcc);
99 static void he_close(struct atm_vcc *vcc);
100 static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
101 static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
102 static irqreturn_t he_irq_handler(int irq, void *dev_id);
103 static void he_tasklet(unsigned long data);
104 static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
105 static int he_start(struct atm_dev *dev);
106 static void he_stop(struct he_dev *dev);
107 static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
108 static unsigned char he_phy_get(struct atm_dev *, unsigned long);
109 
110 static u8 read_prom_byte(struct he_dev *he_dev, int addr);
111 
112 /* globals */
113 
114 static struct he_dev *he_devs;
115 static bool disable64;
116 static short nvpibits = -1;
117 static short nvcibits = -1;
118 static short rx_skb_reserve = 16;
119 static bool irq_coalesce = true;
120 static bool sdh;
121 
122 /* Read from EEPROM = 0000 0011b */
123 static unsigned int readtab[] = {
124 	CS_HIGH | CLK_HIGH,
125 	CS_LOW | CLK_LOW,
126 	CLK_HIGH,               /* 0 */
127 	CLK_LOW,
128 	CLK_HIGH,               /* 0 */
129 	CLK_LOW,
130 	CLK_HIGH,               /* 0 */
131 	CLK_LOW,
132 	CLK_HIGH,               /* 0 */
133 	CLK_LOW,
134 	CLK_HIGH,               /* 0 */
135 	CLK_LOW,
136 	CLK_HIGH,               /* 0 */
137 	CLK_LOW | SI_HIGH,
138 	CLK_HIGH | SI_HIGH,     /* 1 */
139 	CLK_LOW | SI_HIGH,
140 	CLK_HIGH | SI_HIGH      /* 1 */
141 };
142 
143 /* Clock to read from/write to the EEPROM */
144 static unsigned int clocktab[] = {
145 	CLK_LOW,
146 	CLK_HIGH,
147 	CLK_LOW,
148 	CLK_HIGH,
149 	CLK_LOW,
150 	CLK_HIGH,
151 	CLK_LOW,
152 	CLK_HIGH,
153 	CLK_LOW,
154 	CLK_HIGH,
155 	CLK_LOW,
156 	CLK_HIGH,
157 	CLK_LOW,
158 	CLK_HIGH,
159 	CLK_LOW,
160 	CLK_HIGH,
161 	CLK_LOW
162 };
163 
164 static const struct atmdev_ops he_ops =
165 {
166 	.open =		he_open,
167 	.close =	he_close,
168 	.ioctl =	he_ioctl,
169 	.send =		he_send,
170 	.phy_put =	he_phy_put,
171 	.phy_get =	he_phy_get,
172 	.proc_read =	he_proc_read,
173 	.owner =	THIS_MODULE
174 };
175 
176 #define he_writel(dev, val, reg)	do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
177 #define he_readl(dev, reg)		readl((dev)->membase + (reg))
178 
179 /* section 2.12 connection memory access */
180 
181 static __inline__ void
182 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
183 								unsigned flags)
184 {
185 	he_writel(he_dev, val, CON_DAT);
186 	(void) he_readl(he_dev, CON_DAT);		/* flush posted writes */
187 	he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
188 	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
189 }
190 
191 #define he_writel_rcm(dev, val, reg) 				\
192 			he_writel_internal(dev, val, reg, CON_CTL_RCM)
193 
194 #define he_writel_tcm(dev, val, reg) 				\
195 			he_writel_internal(dev, val, reg, CON_CTL_TCM)
196 
197 #define he_writel_mbox(dev, val, reg) 				\
198 			he_writel_internal(dev, val, reg, CON_CTL_MBOX)
199 
200 static unsigned
201 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
202 {
203 	he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
204 	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
205 	return he_readl(he_dev, CON_DAT);
206 }
207 
208 #define he_readl_rcm(dev, reg) \
209 			he_readl_internal(dev, reg, CON_CTL_RCM)
210 
211 #define he_readl_tcm(dev, reg) \
212 			he_readl_internal(dev, reg, CON_CTL_TCM)
213 
214 #define he_readl_mbox(dev, reg) \
215 			he_readl_internal(dev, reg, CON_CTL_MBOX)
216 
217 
218 /* figure 2.2 connection id */
219 
220 #define he_mkcid(dev, vpi, vci)		(((vpi << (dev)->vcibits) | vci) & 0x1fff)
221 
222 /* 2.5.1 per connection transmit state registers */
223 
224 #define he_writel_tsr0(dev, val, cid) \
225 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
226 #define he_readl_tsr0(dev, cid) \
227 		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
228 
229 #define he_writel_tsr1(dev, val, cid) \
230 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
231 
232 #define he_writel_tsr2(dev, val, cid) \
233 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
234 
235 #define he_writel_tsr3(dev, val, cid) \
236 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
237 
238 #define he_writel_tsr4(dev, val, cid) \
239 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
240 
241 	/* from page 2-20
242 	 *
243 	 * NOTE While the transmit connection is active, bits 23 through 0
244 	 *      of this register must not be written by the host.  Byte
245 	 *      enables should be used during normal operation when writing
246 	 *      the most significant byte.
247 	 */
248 
249 #define he_writel_tsr4_upper(dev, val, cid) \
250 		he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
251 							CON_CTL_TCM \
252 							| CON_BYTE_DISABLE_2 \
253 							| CON_BYTE_DISABLE_1 \
254 							| CON_BYTE_DISABLE_0)
255 
256 #define he_readl_tsr4(dev, cid) \
257 		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
258 
259 #define he_writel_tsr5(dev, val, cid) \
260 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
261 
262 #define he_writel_tsr6(dev, val, cid) \
263 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
264 
265 #define he_writel_tsr7(dev, val, cid) \
266 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
267 
268 
269 #define he_writel_tsr8(dev, val, cid) \
270 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
271 
272 #define he_writel_tsr9(dev, val, cid) \
273 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
274 
275 #define he_writel_tsr10(dev, val, cid) \
276 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
277 
278 #define he_writel_tsr11(dev, val, cid) \
279 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
280 
281 
282 #define he_writel_tsr12(dev, val, cid) \
283 		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
284 
285 #define he_writel_tsr13(dev, val, cid) \
286 		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
287 
288 
289 #define he_writel_tsr14(dev, val, cid) \
290 		he_writel_tcm(dev, val, CONFIG_TSRD | cid)
291 
292 #define he_writel_tsr14_upper(dev, val, cid) \
293 		he_writel_internal(dev, val, CONFIG_TSRD | cid, \
294 							CON_CTL_TCM \
295 							| CON_BYTE_DISABLE_2 \
296 							| CON_BYTE_DISABLE_1 \
297 							| CON_BYTE_DISABLE_0)
298 
299 /* 2.7.1 per connection receive state registers */
300 
301 #define he_writel_rsr0(dev, val, cid) \
302 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
303 #define he_readl_rsr0(dev, cid) \
304 		he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
305 
306 #define he_writel_rsr1(dev, val, cid) \
307 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
308 
309 #define he_writel_rsr2(dev, val, cid) \
310 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
311 
312 #define he_writel_rsr3(dev, val, cid) \
313 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
314 
315 #define he_writel_rsr4(dev, val, cid) \
316 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
317 
318 #define he_writel_rsr5(dev, val, cid) \
319 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
320 
321 #define he_writel_rsr6(dev, val, cid) \
322 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
323 
324 #define he_writel_rsr7(dev, val, cid) \
325 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
326 
327 static __inline__ struct atm_vcc*
328 __find_vcc(struct he_dev *he_dev, unsigned cid)
329 {
330 	struct hlist_head *head;
331 	struct atm_vcc *vcc;
332 	struct sock *s;
333 	short vpi;
334 	int vci;
335 
336 	vpi = cid >> he_dev->vcibits;
337 	vci = cid & ((1 << he_dev->vcibits) - 1);
338 	head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
339 
340 	sk_for_each(s, head) {
341 		vcc = atm_sk(s);
342 		if (vcc->dev == he_dev->atm_dev &&
343 		    vcc->vci == vci && vcc->vpi == vpi &&
344 		    vcc->qos.rxtp.traffic_class != ATM_NONE) {
345 				return vcc;
346 		}
347 	}
348 	return NULL;
349 }
350 
351 static int he_init_one(struct pci_dev *pci_dev,
352 		       const struct pci_device_id *pci_ent)
353 {
354 	struct atm_dev *atm_dev = NULL;
355 	struct he_dev *he_dev = NULL;
356 	int err = 0;
357 
358 	printk(KERN_INFO "ATM he driver\n");
359 
360 	if (pci_enable_device(pci_dev))
361 		return -EIO;
362 	if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)) != 0) {
363 		printk(KERN_WARNING "he: no suitable dma available\n");
364 		err = -EIO;
365 		goto init_one_failure;
366 	}
367 
368 	atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &he_ops, -1, NULL);
369 	if (!atm_dev) {
370 		err = -ENODEV;
371 		goto init_one_failure;
372 	}
373 	pci_set_drvdata(pci_dev, atm_dev);
374 
375 	he_dev = kzalloc(sizeof(struct he_dev),
376 							GFP_KERNEL);
377 	if (!he_dev) {
378 		err = -ENOMEM;
379 		goto init_one_failure;
380 	}
381 	he_dev->pci_dev = pci_dev;
382 	he_dev->atm_dev = atm_dev;
383 	he_dev->atm_dev->dev_data = he_dev;
384 	atm_dev->dev_data = he_dev;
385 	he_dev->number = atm_dev->number;
386 	tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
387 	spin_lock_init(&he_dev->global_lock);
388 
389 	if (he_start(atm_dev)) {
390 		he_stop(he_dev);
391 		err = -ENODEV;
392 		goto init_one_failure;
393 	}
394 	he_dev->next = NULL;
395 	if (he_devs)
396 		he_dev->next = he_devs;
397 	he_devs = he_dev;
398 	return 0;
399 
400 init_one_failure:
401 	if (atm_dev)
402 		atm_dev_deregister(atm_dev);
403 	kfree(he_dev);
404 	pci_disable_device(pci_dev);
405 	return err;
406 }
407 
408 static void he_remove_one(struct pci_dev *pci_dev)
409 {
410 	struct atm_dev *atm_dev;
411 	struct he_dev *he_dev;
412 
413 	atm_dev = pci_get_drvdata(pci_dev);
414 	he_dev = HE_DEV(atm_dev);
415 
416 	/* need to remove from he_devs */
417 
418 	he_stop(he_dev);
419 	atm_dev_deregister(atm_dev);
420 	kfree(he_dev);
421 
422 	pci_disable_device(pci_dev);
423 }
424 
425 
426 static unsigned
427 rate_to_atmf(unsigned rate)		/* cps to atm forum format */
428 {
429 #define NONZERO (1 << 14)
430 
431 	unsigned exp = 0;
432 
433 	if (rate == 0)
434 		return 0;
435 
436 	rate <<= 9;
437 	while (rate > 0x3ff) {
438 		++exp;
439 		rate >>= 1;
440 	}
441 
442 	return (NONZERO | (exp << 9) | (rate & 0x1ff));
443 }
444 
445 static void he_init_rx_lbfp0(struct he_dev *he_dev)
446 {
447 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
448 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
449 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
450 	unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
451 
452 	lbufd_index = 0;
453 	lbm_offset = he_readl(he_dev, RCMLBM_BA);
454 
455 	he_writel(he_dev, lbufd_index, RLBF0_H);
456 
457 	for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
458 		lbufd_index += 2;
459 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
460 
461 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
462 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
463 
464 		if (++lbuf_count == lbufs_per_row) {
465 			lbuf_count = 0;
466 			row_offset += he_dev->bytes_per_row;
467 		}
468 		lbm_offset += 4;
469 	}
470 
471 	he_writel(he_dev, lbufd_index - 2, RLBF0_T);
472 	he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
473 }
474 
475 static void he_init_rx_lbfp1(struct he_dev *he_dev)
476 {
477 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
478 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
479 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
480 	unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
481 
482 	lbufd_index = 1;
483 	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
484 
485 	he_writel(he_dev, lbufd_index, RLBF1_H);
486 
487 	for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
488 		lbufd_index += 2;
489 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
490 
491 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
492 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
493 
494 		if (++lbuf_count == lbufs_per_row) {
495 			lbuf_count = 0;
496 			row_offset += he_dev->bytes_per_row;
497 		}
498 		lbm_offset += 4;
499 	}
500 
501 	he_writel(he_dev, lbufd_index - 2, RLBF1_T);
502 	he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
503 }
504 
505 static void he_init_tx_lbfp(struct he_dev *he_dev)
506 {
507 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
508 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
509 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
510 	unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
511 
512 	lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
513 	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
514 
515 	he_writel(he_dev, lbufd_index, TLBF_H);
516 
517 	for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
518 		lbufd_index += 1;
519 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
520 
521 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
522 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
523 
524 		if (++lbuf_count == lbufs_per_row) {
525 			lbuf_count = 0;
526 			row_offset += he_dev->bytes_per_row;
527 		}
528 		lbm_offset += 2;
529 	}
530 
531 	he_writel(he_dev, lbufd_index - 1, TLBF_T);
532 }
533 
534 static int he_init_tpdrq(struct he_dev *he_dev)
535 {
536 	he_dev->tpdrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
537 						CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
538 						&he_dev->tpdrq_phys,
539 						GFP_KERNEL);
540 	if (he_dev->tpdrq_base == NULL) {
541 		hprintk("failed to alloc tpdrq\n");
542 		return -ENOMEM;
543 	}
544 
545 	he_dev->tpdrq_tail = he_dev->tpdrq_base;
546 	he_dev->tpdrq_head = he_dev->tpdrq_base;
547 
548 	he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
549 	he_writel(he_dev, 0, TPDRQ_T);
550 	he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
551 
552 	return 0;
553 }
554 
555 static void he_init_cs_block(struct he_dev *he_dev)
556 {
557 	unsigned clock, rate, delta;
558 	int reg;
559 
560 	/* 5.1.7 cs block initialization */
561 
562 	for (reg = 0; reg < 0x20; ++reg)
563 		he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
564 
565 	/* rate grid timer reload values */
566 
567 	clock = he_is622(he_dev) ? 66667000 : 50000000;
568 	rate = he_dev->atm_dev->link_rate;
569 	delta = rate / 16 / 2;
570 
571 	for (reg = 0; reg < 0x10; ++reg) {
572 		/* 2.4 internal transmit function
573 		 *
574 	 	 * we initialize the first row in the rate grid.
575 		 * values are period (in clock cycles) of timer
576 		 */
577 		unsigned period = clock / rate;
578 
579 		he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
580 		rate -= delta;
581 	}
582 
583 	if (he_is622(he_dev)) {
584 		/* table 5.2 (4 cells per lbuf) */
585 		he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
586 		he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
587 		he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
588 		he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
589 		he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
590 
591 		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
592 		he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
593 		he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
594 		he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
595 		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
596 		he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
597 		he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
598 
599 		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
600 
601 		/* table 5.8 */
602 		he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
603 		he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
604 		he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
605 		he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
606 		he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
607 		he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
608 
609 		/* table 5.9 */
610 		he_writel_mbox(he_dev, 0x5, CS_OTPPER);
611 		he_writel_mbox(he_dev, 0x14, CS_OTWPER);
612 	} else {
613 		/* table 5.1 (4 cells per lbuf) */
614 		he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
615 		he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
616 		he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
617 		he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
618 		he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
619 
620 		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
621 		he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
622 		he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
623 		he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
624 		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
625 		he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
626 		he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
627 
628 		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
629 
630 		/* table 5.8 */
631 		he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
632 		he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
633 		he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
634 		he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
635 		he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
636 		he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
637 
638 		/* table 5.9 */
639 		he_writel_mbox(he_dev, 0x6, CS_OTPPER);
640 		he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
641 	}
642 
643 	he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
644 
645 	for (reg = 0; reg < 0x8; ++reg)
646 		he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
647 
648 }
649 
650 static int he_init_cs_block_rcm(struct he_dev *he_dev)
651 {
652 	unsigned (*rategrid)[16][16];
653 	unsigned rate, delta;
654 	int i, j, reg;
655 
656 	unsigned rate_atmf, exp, man;
657 	unsigned long long rate_cps;
658 	int mult, buf, buf_limit = 4;
659 
660 	rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
661 	if (!rategrid)
662 		return -ENOMEM;
663 
664 	/* initialize rate grid group table */
665 
666 	for (reg = 0x0; reg < 0xff; ++reg)
667 		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
668 
669 	/* initialize rate controller groups */
670 
671 	for (reg = 0x100; reg < 0x1ff; ++reg)
672 		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
673 
674 	/* initialize tNrm lookup table */
675 
676 	/* the manual makes reference to a routine in a sample driver
677 	   for proper configuration; fortunately, we only need this
678 	   in order to support abr connection */
679 
680 	/* initialize rate to group table */
681 
682 	rate = he_dev->atm_dev->link_rate;
683 	delta = rate / 32;
684 
685 	/*
686 	 * 2.4 transmit internal functions
687 	 *
688 	 * we construct a copy of the rate grid used by the scheduler
689 	 * in order to construct the rate to group table below
690 	 */
691 
692 	for (j = 0; j < 16; j++) {
693 		(*rategrid)[0][j] = rate;
694 		rate -= delta;
695 	}
696 
697 	for (i = 1; i < 16; i++)
698 		for (j = 0; j < 16; j++)
699 			if (i > 14)
700 				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
701 			else
702 				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
703 
704 	/*
705 	 * 2.4 transmit internal function
706 	 *
707 	 * this table maps the upper 5 bits of exponent and mantissa
708 	 * of the atm forum representation of the rate into an index
709 	 * on rate grid
710 	 */
711 
712 	rate_atmf = 0;
713 	while (rate_atmf < 0x400) {
714 		man = (rate_atmf & 0x1f) << 4;
715 		exp = rate_atmf >> 5;
716 
717 		/*
718 			instead of '/ 512', use '>> 9' to prevent a call
719 			to divdu3 on x86 platforms
720 		*/
721 		rate_cps = (unsigned long long) (1UL << exp) * (man + 512) >> 9;
722 
723 		if (rate_cps < 10)
724 			rate_cps = 10;	/* 2.2.1 minimum payload rate is 10 cps */
725 
726 		for (i = 255; i > 0; i--)
727 			if ((*rategrid)[i/16][i%16] >= rate_cps)
728 				break;	 /* pick nearest rate instead? */
729 
730 		/*
731 		 * each table entry is 16 bits: (rate grid index (8 bits)
732 		 * and a buffer limit (8 bits)
733 		 * there are two table entries in each 32-bit register
734 		 */
735 
736 #ifdef notdef
737 		buf = rate_cps * he_dev->tx_numbuffs /
738 				(he_dev->atm_dev->link_rate * 2);
739 #else
740 		/* this is pretty, but avoids _divdu3 and is mostly correct */
741 		mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
742 		if (rate_cps > (272ULL * mult))
743 			buf = 4;
744 		else if (rate_cps > (204ULL * mult))
745 			buf = 3;
746 		else if (rate_cps > (136ULL * mult))
747 			buf = 2;
748 		else if (rate_cps > (68ULL * mult))
749 			buf = 1;
750 		else
751 			buf = 0;
752 #endif
753 		if (buf > buf_limit)
754 			buf = buf_limit;
755 		reg = (reg << 16) | ((i << 8) | buf);
756 
757 #define RTGTBL_OFFSET 0x400
758 
759 		if (rate_atmf & 0x1)
760 			he_writel_rcm(he_dev, reg,
761 				CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
762 
763 		++rate_atmf;
764 	}
765 
766 	kfree(rategrid);
767 	return 0;
768 }
769 
770 static int he_init_group(struct he_dev *he_dev, int group)
771 {
772 	struct he_buff *heb, *next;
773 	dma_addr_t mapping;
774 	int i;
775 
776 	he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
777 	he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
778 	he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
779 	he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
780 		  G0_RBPS_BS + (group * 32));
781 
782 	/* bitmap table */
783 	he_dev->rbpl_table = kmalloc_array(BITS_TO_LONGS(RBPL_TABLE_SIZE),
784 					   sizeof(*he_dev->rbpl_table),
785 					   GFP_KERNEL);
786 	if (!he_dev->rbpl_table) {
787 		hprintk("unable to allocate rbpl bitmap table\n");
788 		return -ENOMEM;
789 	}
790 	bitmap_zero(he_dev->rbpl_table, RBPL_TABLE_SIZE);
791 
792 	/* rbpl_virt 64-bit pointers */
793 	he_dev->rbpl_virt = kmalloc_array(RBPL_TABLE_SIZE,
794 					  sizeof(*he_dev->rbpl_virt),
795 					  GFP_KERNEL);
796 	if (!he_dev->rbpl_virt) {
797 		hprintk("unable to allocate rbpl virt table\n");
798 		goto out_free_rbpl_table;
799 	}
800 
801 	/* large buffer pool */
802 	he_dev->rbpl_pool = dma_pool_create("rbpl", &he_dev->pci_dev->dev,
803 					    CONFIG_RBPL_BUFSIZE, 64, 0);
804 	if (he_dev->rbpl_pool == NULL) {
805 		hprintk("unable to create rbpl pool\n");
806 		goto out_free_rbpl_virt;
807 	}
808 
809 	he_dev->rbpl_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
810 					       CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
811 					       &he_dev->rbpl_phys, GFP_KERNEL);
812 	if (he_dev->rbpl_base == NULL) {
813 		hprintk("failed to alloc rbpl_base\n");
814 		goto out_destroy_rbpl_pool;
815 	}
816 
817 	INIT_LIST_HEAD(&he_dev->rbpl_outstanding);
818 
819 	for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
820 
821 		heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL, &mapping);
822 		if (!heb)
823 			goto out_free_rbpl;
824 		heb->mapping = mapping;
825 		list_add(&heb->entry, &he_dev->rbpl_outstanding);
826 
827 		set_bit(i, he_dev->rbpl_table);
828 		he_dev->rbpl_virt[i] = heb;
829 		he_dev->rbpl_hint = i + 1;
830 		he_dev->rbpl_base[i].idx =  i << RBP_IDX_OFFSET;
831 		he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data);
832 	}
833 	he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
834 
835 	he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
836 	he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
837 						G0_RBPL_T + (group * 32));
838 	he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4,
839 						G0_RBPL_BS + (group * 32));
840 	he_writel(he_dev,
841 			RBP_THRESH(CONFIG_RBPL_THRESH) |
842 			RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
843 			RBP_INT_ENB,
844 						G0_RBPL_QI + (group * 32));
845 
846 	/* rx buffer ready queue */
847 
848 	he_dev->rbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
849 					       CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
850 					       &he_dev->rbrq_phys, GFP_KERNEL);
851 	if (he_dev->rbrq_base == NULL) {
852 		hprintk("failed to allocate rbrq\n");
853 		goto out_free_rbpl;
854 	}
855 
856 	he_dev->rbrq_head = he_dev->rbrq_base;
857 	he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
858 	he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
859 	he_writel(he_dev,
860 		RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
861 						G0_RBRQ_Q + (group * 16));
862 	if (irq_coalesce) {
863 		hprintk("coalescing interrupts\n");
864 		he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
865 						G0_RBRQ_I + (group * 16));
866 	} else
867 		he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
868 						G0_RBRQ_I + (group * 16));
869 
870 	/* tx buffer ready queue */
871 
872 	he_dev->tbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
873 					       CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
874 					       &he_dev->tbrq_phys, GFP_KERNEL);
875 	if (he_dev->tbrq_base == NULL) {
876 		hprintk("failed to allocate tbrq\n");
877 		goto out_free_rbpq_base;
878 	}
879 
880 	he_dev->tbrq_head = he_dev->tbrq_base;
881 
882 	he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
883 	he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
884 	he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
885 	he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
886 
887 	return 0;
888 
889 out_free_rbpq_base:
890 	dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE *
891 			  sizeof(struct he_rbrq), he_dev->rbrq_base,
892 			  he_dev->rbrq_phys);
893 out_free_rbpl:
894 	list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
895 		dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
896 
897 	dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE *
898 			  sizeof(struct he_rbp), he_dev->rbpl_base,
899 			  he_dev->rbpl_phys);
900 out_destroy_rbpl_pool:
901 	dma_pool_destroy(he_dev->rbpl_pool);
902 out_free_rbpl_virt:
903 	kfree(he_dev->rbpl_virt);
904 out_free_rbpl_table:
905 	kfree(he_dev->rbpl_table);
906 
907 	return -ENOMEM;
908 }
909 
910 static int he_init_irq(struct he_dev *he_dev)
911 {
912 	int i;
913 
914 	/* 2.9.3.5  tail offset for each interrupt queue is located after the
915 		    end of the interrupt queue */
916 
917 	he_dev->irq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
918 					      (CONFIG_IRQ_SIZE + 1) * sizeof(struct he_irq),
919 					      &he_dev->irq_phys, GFP_KERNEL);
920 	if (he_dev->irq_base == NULL) {
921 		hprintk("failed to allocate irq\n");
922 		return -ENOMEM;
923 	}
924 	he_dev->irq_tailoffset = (unsigned *)
925 					&he_dev->irq_base[CONFIG_IRQ_SIZE];
926 	*he_dev->irq_tailoffset = 0;
927 	he_dev->irq_head = he_dev->irq_base;
928 	he_dev->irq_tail = he_dev->irq_base;
929 
930 	for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
931 		he_dev->irq_base[i].isw = ITYPE_INVALID;
932 
933 	he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
934 	he_writel(he_dev,
935 		IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
936 								IRQ0_HEAD);
937 	he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
938 	he_writel(he_dev, 0x0, IRQ0_DATA);
939 
940 	he_writel(he_dev, 0x0, IRQ1_BASE);
941 	he_writel(he_dev, 0x0, IRQ1_HEAD);
942 	he_writel(he_dev, 0x0, IRQ1_CNTL);
943 	he_writel(he_dev, 0x0, IRQ1_DATA);
944 
945 	he_writel(he_dev, 0x0, IRQ2_BASE);
946 	he_writel(he_dev, 0x0, IRQ2_HEAD);
947 	he_writel(he_dev, 0x0, IRQ2_CNTL);
948 	he_writel(he_dev, 0x0, IRQ2_DATA);
949 
950 	he_writel(he_dev, 0x0, IRQ3_BASE);
951 	he_writel(he_dev, 0x0, IRQ3_HEAD);
952 	he_writel(he_dev, 0x0, IRQ3_CNTL);
953 	he_writel(he_dev, 0x0, IRQ3_DATA);
954 
955 	/* 2.9.3.2 interrupt queue mapping registers */
956 
957 	he_writel(he_dev, 0x0, GRP_10_MAP);
958 	he_writel(he_dev, 0x0, GRP_32_MAP);
959 	he_writel(he_dev, 0x0, GRP_54_MAP);
960 	he_writel(he_dev, 0x0, GRP_76_MAP);
961 
962 	if (request_irq(he_dev->pci_dev->irq,
963 			he_irq_handler, IRQF_SHARED, DEV_LABEL, he_dev)) {
964 		hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
965 		return -EINVAL;
966 	}
967 
968 	he_dev->irq = he_dev->pci_dev->irq;
969 
970 	return 0;
971 }
972 
973 static int he_start(struct atm_dev *dev)
974 {
975 	struct he_dev *he_dev;
976 	struct pci_dev *pci_dev;
977 	unsigned long membase;
978 
979 	u16 command;
980 	u32 gen_cntl_0, host_cntl, lb_swap;
981 	u8 cache_size, timer;
982 
983 	unsigned err;
984 	unsigned int status, reg;
985 	int i, group;
986 
987 	he_dev = HE_DEV(dev);
988 	pci_dev = he_dev->pci_dev;
989 
990 	membase = pci_resource_start(pci_dev, 0);
991 	HPRINTK("membase = 0x%lx  irq = %d.\n", membase, pci_dev->irq);
992 
993 	/*
994 	 * pci bus controller initialization
995 	 */
996 
997 	/* 4.3 pci bus controller-specific initialization */
998 	if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
999 		hprintk("can't read GEN_CNTL_0\n");
1000 		return -EINVAL;
1001 	}
1002 	gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1003 	if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1004 		hprintk("can't write GEN_CNTL_0.\n");
1005 		return -EINVAL;
1006 	}
1007 
1008 	if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1009 		hprintk("can't read PCI_COMMAND.\n");
1010 		return -EINVAL;
1011 	}
1012 
1013 	command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1014 	if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1015 		hprintk("can't enable memory.\n");
1016 		return -EINVAL;
1017 	}
1018 
1019 	if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1020 		hprintk("can't read cache line size?\n");
1021 		return -EINVAL;
1022 	}
1023 
1024 	if (cache_size < 16) {
1025 		cache_size = 16;
1026 		if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1027 			hprintk("can't set cache line size to %d\n", cache_size);
1028 	}
1029 
1030 	if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1031 		hprintk("can't read latency timer?\n");
1032 		return -EINVAL;
1033 	}
1034 
1035 	/* from table 3.9
1036 	 *
1037 	 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1038 	 *
1039 	 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1040 	 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1041 	 *
1042 	 */
1043 #define LAT_TIMER 209
1044 	if (timer < LAT_TIMER) {
1045 		HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1046 		timer = LAT_TIMER;
1047 		if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1048 			hprintk("can't set latency timer to %d\n", timer);
1049 	}
1050 
1051 	if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1052 		hprintk("can't set up page mapping\n");
1053 		return -EINVAL;
1054 	}
1055 
1056 	/* 4.4 card reset */
1057 	he_writel(he_dev, 0x0, RESET_CNTL);
1058 	he_writel(he_dev, 0xff, RESET_CNTL);
1059 
1060 	msleep(16);	/* 16 ms */
1061 	status = he_readl(he_dev, RESET_CNTL);
1062 	if ((status & BOARD_RST_STATUS) == 0) {
1063 		hprintk("reset failed\n");
1064 		return -EINVAL;
1065 	}
1066 
1067 	/* 4.5 set bus width */
1068 	host_cntl = he_readl(he_dev, HOST_CNTL);
1069 	if (host_cntl & PCI_BUS_SIZE64)
1070 		gen_cntl_0 |= ENBL_64;
1071 	else
1072 		gen_cntl_0 &= ~ENBL_64;
1073 
1074 	if (disable64 == 1) {
1075 		hprintk("disabling 64-bit pci bus transfers\n");
1076 		gen_cntl_0 &= ~ENBL_64;
1077 	}
1078 
1079 	if (gen_cntl_0 & ENBL_64)
1080 		hprintk("64-bit transfers enabled\n");
1081 
1082 	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1083 
1084 	/* 4.7 read prom contents */
1085 	for (i = 0; i < PROD_ID_LEN; ++i)
1086 		he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1087 
1088 	he_dev->media = read_prom_byte(he_dev, MEDIA);
1089 
1090 	for (i = 0; i < 6; ++i)
1091 		dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1092 
1093 	hprintk("%s%s, %pM\n", he_dev->prod_id,
1094 		he_dev->media & 0x40 ? "SM" : "MM", dev->esi);
1095 	he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1096 						ATM_OC12_PCR : ATM_OC3_PCR;
1097 
1098 	/* 4.6 set host endianess */
1099 	lb_swap = he_readl(he_dev, LB_SWAP);
1100 	if (he_is622(he_dev))
1101 		lb_swap &= ~XFER_SIZE;		/* 4 cells */
1102 	else
1103 		lb_swap |= XFER_SIZE;		/* 8 cells */
1104 #ifdef __BIG_ENDIAN
1105 	lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1106 #else
1107 	lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1108 			DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1109 #endif /* __BIG_ENDIAN */
1110 	he_writel(he_dev, lb_swap, LB_SWAP);
1111 
1112 	/* 4.8 sdram controller initialization */
1113 	he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1114 
1115 	/* 4.9 initialize rnum value */
1116 	lb_swap |= SWAP_RNUM_MAX(0xf);
1117 	he_writel(he_dev, lb_swap, LB_SWAP);
1118 
1119 	/* 4.10 initialize the interrupt queues */
1120 	if ((err = he_init_irq(he_dev)) != 0)
1121 		return err;
1122 
1123 	/* 4.11 enable pci bus controller state machines */
1124 	host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1125 				QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1126 	he_writel(he_dev, host_cntl, HOST_CNTL);
1127 
1128 	gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1129 	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1130 
1131 	/*
1132 	 * atm network controller initialization
1133 	 */
1134 
1135 	/* 5.1.1 generic configuration state */
1136 
1137 	/*
1138 	 *		local (cell) buffer memory map
1139 	 *
1140 	 *             HE155                          HE622
1141 	 *
1142 	 *        0 ____________1023 bytes  0 _______________________2047 bytes
1143 	 *         |            |            |                   |   |
1144 	 *         |  utility   |            |        rx0        |   |
1145 	 *        5|____________|         255|___________________| u |
1146 	 *        6|            |         256|                   | t |
1147 	 *         |            |            |                   | i |
1148 	 *         |    rx0     |     row    |        tx         | l |
1149 	 *         |            |            |                   | i |
1150 	 *         |            |         767|___________________| t |
1151 	 *      517|____________|         768|                   | y |
1152 	 * row  518|            |            |        rx1        |   |
1153 	 *         |            |        1023|___________________|___|
1154 	 *         |            |
1155 	 *         |    tx      |
1156 	 *         |            |
1157 	 *         |            |
1158 	 *     1535|____________|
1159 	 *     1536|            |
1160 	 *         |    rx1     |
1161 	 *     2047|____________|
1162 	 *
1163 	 */
1164 
1165 	/* total 4096 connections */
1166 	he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1167 	he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1168 
1169 	if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1170 		hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1171 		return -ENODEV;
1172 	}
1173 
1174 	if (nvpibits != -1) {
1175 		he_dev->vpibits = nvpibits;
1176 		he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1177 	}
1178 
1179 	if (nvcibits != -1) {
1180 		he_dev->vcibits = nvcibits;
1181 		he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1182 	}
1183 
1184 
1185 	if (he_is622(he_dev)) {
1186 		he_dev->cells_per_row = 40;
1187 		he_dev->bytes_per_row = 2048;
1188 		he_dev->r0_numrows = 256;
1189 		he_dev->tx_numrows = 512;
1190 		he_dev->r1_numrows = 256;
1191 		he_dev->r0_startrow = 0;
1192 		he_dev->tx_startrow = 256;
1193 		he_dev->r1_startrow = 768;
1194 	} else {
1195 		he_dev->cells_per_row = 20;
1196 		he_dev->bytes_per_row = 1024;
1197 		he_dev->r0_numrows = 512;
1198 		he_dev->tx_numrows = 1018;
1199 		he_dev->r1_numrows = 512;
1200 		he_dev->r0_startrow = 6;
1201 		he_dev->tx_startrow = 518;
1202 		he_dev->r1_startrow = 1536;
1203 	}
1204 
1205 	he_dev->cells_per_lbuf = 4;
1206 	he_dev->buffer_limit = 4;
1207 	he_dev->r0_numbuffs = he_dev->r0_numrows *
1208 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1209 	if (he_dev->r0_numbuffs > 2560)
1210 		he_dev->r0_numbuffs = 2560;
1211 
1212 	he_dev->r1_numbuffs = he_dev->r1_numrows *
1213 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1214 	if (he_dev->r1_numbuffs > 2560)
1215 		he_dev->r1_numbuffs = 2560;
1216 
1217 	he_dev->tx_numbuffs = he_dev->tx_numrows *
1218 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1219 	if (he_dev->tx_numbuffs > 5120)
1220 		he_dev->tx_numbuffs = 5120;
1221 
1222 	/* 5.1.2 configure hardware dependent registers */
1223 
1224 	he_writel(he_dev,
1225 		SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1226 		RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1227 		(he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1228 		(he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1229 								LBARB);
1230 
1231 	he_writel(he_dev, BANK_ON |
1232 		(he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1233 								SDRAMCON);
1234 
1235 	he_writel(he_dev,
1236 		(he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1237 						RM_RW_WAIT(1), RCMCONFIG);
1238 	he_writel(he_dev,
1239 		(he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1240 						TM_RW_WAIT(1), TCMCONFIG);
1241 
1242 	he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1243 
1244 	he_writel(he_dev,
1245 		(he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1246 		(he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1247 		RX_VALVP(he_dev->vpibits) |
1248 		RX_VALVC(he_dev->vcibits),			 RC_CONFIG);
1249 
1250 	he_writel(he_dev, DRF_THRESH(0x20) |
1251 		(he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1252 		TX_VCI_MASK(he_dev->vcibits) |
1253 		LBFREE_CNT(he_dev->tx_numbuffs), 		TX_CONFIG);
1254 
1255 	he_writel(he_dev, 0x0, TXAAL5_PROTO);
1256 
1257 	he_writel(he_dev, PHY_INT_ENB |
1258 		(he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1259 								RH_CONFIG);
1260 
1261 	/* 5.1.3 initialize connection memory */
1262 
1263 	for (i = 0; i < TCM_MEM_SIZE; ++i)
1264 		he_writel_tcm(he_dev, 0, i);
1265 
1266 	for (i = 0; i < RCM_MEM_SIZE; ++i)
1267 		he_writel_rcm(he_dev, 0, i);
1268 
1269 	/*
1270 	 *	transmit connection memory map
1271 	 *
1272 	 *                  tx memory
1273 	 *          0x0 ___________________
1274 	 *             |                   |
1275 	 *             |                   |
1276 	 *             |       TSRa        |
1277 	 *             |                   |
1278 	 *             |                   |
1279 	 *       0x8000|___________________|
1280 	 *             |                   |
1281 	 *             |       TSRb        |
1282 	 *       0xc000|___________________|
1283 	 *             |                   |
1284 	 *             |       TSRc        |
1285 	 *       0xe000|___________________|
1286 	 *             |       TSRd        |
1287 	 *       0xf000|___________________|
1288 	 *             |       tmABR       |
1289 	 *      0x10000|___________________|
1290 	 *             |                   |
1291 	 *             |       tmTPD       |
1292 	 *             |___________________|
1293 	 *             |                   |
1294 	 *                      ....
1295 	 *      0x1ffff|___________________|
1296 	 *
1297 	 *
1298 	 */
1299 
1300 	he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1301 	he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1302 	he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1303 	he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1304 	he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1305 
1306 
1307 	/*
1308 	 *	receive connection memory map
1309 	 *
1310 	 *          0x0 ___________________
1311 	 *             |                   |
1312 	 *             |                   |
1313 	 *             |       RSRa        |
1314 	 *             |                   |
1315 	 *             |                   |
1316 	 *       0x8000|___________________|
1317 	 *             |                   |
1318 	 *             |             rx0/1 |
1319 	 *             |       LBM         |   link lists of local
1320 	 *             |             tx    |   buffer memory
1321 	 *             |                   |
1322 	 *       0xd000|___________________|
1323 	 *             |                   |
1324 	 *             |      rmABR        |
1325 	 *       0xe000|___________________|
1326 	 *             |                   |
1327 	 *             |       RSRb        |
1328 	 *             |___________________|
1329 	 *             |                   |
1330 	 *                      ....
1331 	 *       0xffff|___________________|
1332 	 */
1333 
1334 	he_writel(he_dev, 0x08000, RCMLBM_BA);
1335 	he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1336 	he_writel(he_dev, 0x0d800, RCMABR_BA);
1337 
1338 	/* 5.1.4 initialize local buffer free pools linked lists */
1339 
1340 	he_init_rx_lbfp0(he_dev);
1341 	he_init_rx_lbfp1(he_dev);
1342 
1343 	he_writel(he_dev, 0x0, RLBC_H);
1344 	he_writel(he_dev, 0x0, RLBC_T);
1345 	he_writel(he_dev, 0x0, RLBC_H2);
1346 
1347 	he_writel(he_dev, 512, RXTHRSH);	/* 10% of r0+r1 buffers */
1348 	he_writel(he_dev, 256, LITHRSH); 	/* 5% of r0+r1 buffers */
1349 
1350 	he_init_tx_lbfp(he_dev);
1351 
1352 	he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1353 
1354 	/* 5.1.5 initialize intermediate receive queues */
1355 
1356 	if (he_is622(he_dev)) {
1357 		he_writel(he_dev, 0x000f, G0_INMQ_S);
1358 		he_writel(he_dev, 0x200f, G0_INMQ_L);
1359 
1360 		he_writel(he_dev, 0x001f, G1_INMQ_S);
1361 		he_writel(he_dev, 0x201f, G1_INMQ_L);
1362 
1363 		he_writel(he_dev, 0x002f, G2_INMQ_S);
1364 		he_writel(he_dev, 0x202f, G2_INMQ_L);
1365 
1366 		he_writel(he_dev, 0x003f, G3_INMQ_S);
1367 		he_writel(he_dev, 0x203f, G3_INMQ_L);
1368 
1369 		he_writel(he_dev, 0x004f, G4_INMQ_S);
1370 		he_writel(he_dev, 0x204f, G4_INMQ_L);
1371 
1372 		he_writel(he_dev, 0x005f, G5_INMQ_S);
1373 		he_writel(he_dev, 0x205f, G5_INMQ_L);
1374 
1375 		he_writel(he_dev, 0x006f, G6_INMQ_S);
1376 		he_writel(he_dev, 0x206f, G6_INMQ_L);
1377 
1378 		he_writel(he_dev, 0x007f, G7_INMQ_S);
1379 		he_writel(he_dev, 0x207f, G7_INMQ_L);
1380 	} else {
1381 		he_writel(he_dev, 0x0000, G0_INMQ_S);
1382 		he_writel(he_dev, 0x0008, G0_INMQ_L);
1383 
1384 		he_writel(he_dev, 0x0001, G1_INMQ_S);
1385 		he_writel(he_dev, 0x0009, G1_INMQ_L);
1386 
1387 		he_writel(he_dev, 0x0002, G2_INMQ_S);
1388 		he_writel(he_dev, 0x000a, G2_INMQ_L);
1389 
1390 		he_writel(he_dev, 0x0003, G3_INMQ_S);
1391 		he_writel(he_dev, 0x000b, G3_INMQ_L);
1392 
1393 		he_writel(he_dev, 0x0004, G4_INMQ_S);
1394 		he_writel(he_dev, 0x000c, G4_INMQ_L);
1395 
1396 		he_writel(he_dev, 0x0005, G5_INMQ_S);
1397 		he_writel(he_dev, 0x000d, G5_INMQ_L);
1398 
1399 		he_writel(he_dev, 0x0006, G6_INMQ_S);
1400 		he_writel(he_dev, 0x000e, G6_INMQ_L);
1401 
1402 		he_writel(he_dev, 0x0007, G7_INMQ_S);
1403 		he_writel(he_dev, 0x000f, G7_INMQ_L);
1404 	}
1405 
1406 	/* 5.1.6 application tunable parameters */
1407 
1408 	he_writel(he_dev, 0x0, MCC);
1409 	he_writel(he_dev, 0x0, OEC);
1410 	he_writel(he_dev, 0x0, DCC);
1411 	he_writel(he_dev, 0x0, CEC);
1412 
1413 	/* 5.1.7 cs block initialization */
1414 
1415 	he_init_cs_block(he_dev);
1416 
1417 	/* 5.1.8 cs block connection memory initialization */
1418 
1419 	if (he_init_cs_block_rcm(he_dev) < 0)
1420 		return -ENOMEM;
1421 
1422 	/* 5.1.10 initialize host structures */
1423 
1424 	he_init_tpdrq(he_dev);
1425 
1426 	he_dev->tpd_pool = dma_pool_create("tpd", &he_dev->pci_dev->dev,
1427 					   sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1428 	if (he_dev->tpd_pool == NULL) {
1429 		hprintk("unable to create tpd dma_pool\n");
1430 		return -ENOMEM;
1431 	}
1432 
1433 	INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1434 
1435 	if (he_init_group(he_dev, 0) != 0)
1436 		return -ENOMEM;
1437 
1438 	for (group = 1; group < HE_NUM_GROUPS; ++group) {
1439 		he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1440 		he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1441 		he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1442 		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1443 						G0_RBPS_BS + (group * 32));
1444 
1445 		he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1446 		he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1447 		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1448 						G0_RBPL_QI + (group * 32));
1449 		he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1450 
1451 		he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1452 		he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1453 		he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1454 						G0_RBRQ_Q + (group * 16));
1455 		he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1456 
1457 		he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1458 		he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1459 		he_writel(he_dev, TBRQ_THRESH(0x1),
1460 						G0_TBRQ_THRESH + (group * 16));
1461 		he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1462 	}
1463 
1464 	/* host status page */
1465 
1466 	he_dev->hsp = dma_alloc_coherent(&he_dev->pci_dev->dev,
1467 					 sizeof(struct he_hsp),
1468 					 &he_dev->hsp_phys, GFP_KERNEL);
1469 	if (he_dev->hsp == NULL) {
1470 		hprintk("failed to allocate host status page\n");
1471 		return -ENOMEM;
1472 	}
1473 	he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1474 
1475 	/* initialize framer */
1476 
1477 #ifdef CONFIG_ATM_HE_USE_SUNI
1478 	if (he_isMM(he_dev))
1479 		suni_init(he_dev->atm_dev);
1480 	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1481 		he_dev->atm_dev->phy->start(he_dev->atm_dev);
1482 #endif /* CONFIG_ATM_HE_USE_SUNI */
1483 
1484 	if (sdh) {
1485 		/* this really should be in suni.c but for now... */
1486 		int val;
1487 
1488 		val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1489 		val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1490 		he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1491 		he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP);
1492 	}
1493 
1494 	/* 5.1.12 enable transmit and receive */
1495 
1496 	reg = he_readl_mbox(he_dev, CS_ERCTL0);
1497 	reg |= TX_ENABLE|ER_ENABLE;
1498 	he_writel_mbox(he_dev, reg, CS_ERCTL0);
1499 
1500 	reg = he_readl(he_dev, RC_CONFIG);
1501 	reg |= RX_ENABLE;
1502 	he_writel(he_dev, reg, RC_CONFIG);
1503 
1504 	for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1505 		he_dev->cs_stper[i].inuse = 0;
1506 		he_dev->cs_stper[i].pcr = -1;
1507 	}
1508 	he_dev->total_bw = 0;
1509 
1510 
1511 	/* atm linux initialization */
1512 
1513 	he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1514 	he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1515 
1516 	he_dev->irq_peak = 0;
1517 	he_dev->rbrq_peak = 0;
1518 	he_dev->rbpl_peak = 0;
1519 	he_dev->tbrq_peak = 0;
1520 
1521 	HPRINTK("hell bent for leather!\n");
1522 
1523 	return 0;
1524 }
1525 
1526 static void
1527 he_stop(struct he_dev *he_dev)
1528 {
1529 	struct he_buff *heb, *next;
1530 	struct pci_dev *pci_dev;
1531 	u32 gen_cntl_0, reg;
1532 	u16 command;
1533 
1534 	pci_dev = he_dev->pci_dev;
1535 
1536 	/* disable interrupts */
1537 
1538 	if (he_dev->membase) {
1539 		pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1540 		gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1541 		pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1542 
1543 		tasklet_disable(&he_dev->tasklet);
1544 
1545 		/* disable recv and transmit */
1546 
1547 		reg = he_readl_mbox(he_dev, CS_ERCTL0);
1548 		reg &= ~(TX_ENABLE|ER_ENABLE);
1549 		he_writel_mbox(he_dev, reg, CS_ERCTL0);
1550 
1551 		reg = he_readl(he_dev, RC_CONFIG);
1552 		reg &= ~(RX_ENABLE);
1553 		he_writel(he_dev, reg, RC_CONFIG);
1554 	}
1555 
1556 #ifdef CONFIG_ATM_HE_USE_SUNI
1557 	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1558 		he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1559 #endif /* CONFIG_ATM_HE_USE_SUNI */
1560 
1561 	if (he_dev->irq)
1562 		free_irq(he_dev->irq, he_dev);
1563 
1564 	if (he_dev->irq_base)
1565 		dma_free_coherent(&he_dev->pci_dev->dev, (CONFIG_IRQ_SIZE + 1)
1566 				  * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1567 
1568 	if (he_dev->hsp)
1569 		dma_free_coherent(&he_dev->pci_dev->dev, sizeof(struct he_hsp),
1570 				  he_dev->hsp, he_dev->hsp_phys);
1571 
1572 	if (he_dev->rbpl_base) {
1573 		list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
1574 			dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1575 
1576 		dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE
1577 				  * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1578 	}
1579 
1580 	kfree(he_dev->rbpl_virt);
1581 	kfree(he_dev->rbpl_table);
1582 	dma_pool_destroy(he_dev->rbpl_pool);
1583 
1584 	if (he_dev->rbrq_base)
1585 		dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1586 				  he_dev->rbrq_base, he_dev->rbrq_phys);
1587 
1588 	if (he_dev->tbrq_base)
1589 		dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1590 				  he_dev->tbrq_base, he_dev->tbrq_phys);
1591 
1592 	if (he_dev->tpdrq_base)
1593 		dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1594 				  he_dev->tpdrq_base, he_dev->tpdrq_phys);
1595 
1596 	dma_pool_destroy(he_dev->tpd_pool);
1597 
1598 	if (he_dev->pci_dev) {
1599 		pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1600 		command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1601 		pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1602 	}
1603 
1604 	if (he_dev->membase)
1605 		iounmap(he_dev->membase);
1606 }
1607 
1608 static struct he_tpd *
1609 __alloc_tpd(struct he_dev *he_dev)
1610 {
1611 	struct he_tpd *tpd;
1612 	dma_addr_t mapping;
1613 
1614 	tpd = dma_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC, &mapping);
1615 	if (tpd == NULL)
1616 		return NULL;
1617 
1618 	tpd->status = TPD_ADDR(mapping);
1619 	tpd->reserved = 0;
1620 	tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1621 	tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1622 	tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1623 
1624 	return tpd;
1625 }
1626 
1627 #define AAL5_LEN(buf,len) 						\
1628 			((((unsigned char *)(buf))[(len)-6] << 8) |	\
1629 				(((unsigned char *)(buf))[(len)-5]))
1630 
1631 /* 2.10.1.2 receive
1632  *
1633  * aal5 packets can optionally return the tcp checksum in the lower
1634  * 16 bits of the crc (RSR0_TCP_CKSUM)
1635  */
1636 
1637 #define TCP_CKSUM(buf,len) 						\
1638 			((((unsigned char *)(buf))[(len)-2] << 8) |	\
1639 				(((unsigned char *)(buf))[(len-1)]))
1640 
1641 static int
1642 he_service_rbrq(struct he_dev *he_dev, int group)
1643 {
1644 	struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1645 				((unsigned long)he_dev->rbrq_base |
1646 					he_dev->hsp->group[group].rbrq_tail);
1647 	unsigned cid, lastcid = -1;
1648 	struct sk_buff *skb;
1649 	struct atm_vcc *vcc = NULL;
1650 	struct he_vcc *he_vcc;
1651 	struct he_buff *heb, *next;
1652 	int i;
1653 	int pdus_assembled = 0;
1654 	int updated = 0;
1655 
1656 	read_lock(&vcc_sklist_lock);
1657 	while (he_dev->rbrq_head != rbrq_tail) {
1658 		++updated;
1659 
1660 		HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1661 			he_dev->rbrq_head, group,
1662 			RBRQ_ADDR(he_dev->rbrq_head),
1663 			RBRQ_BUFLEN(he_dev->rbrq_head),
1664 			RBRQ_CID(he_dev->rbrq_head),
1665 			RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1666 			RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1667 			RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1668 			RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1669 			RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1670 			RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1671 
1672 		i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET;
1673 		heb = he_dev->rbpl_virt[i];
1674 
1675 		cid = RBRQ_CID(he_dev->rbrq_head);
1676 		if (cid != lastcid)
1677 			vcc = __find_vcc(he_dev, cid);
1678 		lastcid = cid;
1679 
1680 		if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) {
1681 			hprintk("vcc/he_vcc == NULL  (cid 0x%x)\n", cid);
1682 			if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1683 				clear_bit(i, he_dev->rbpl_table);
1684 				list_del(&heb->entry);
1685 				dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1686 			}
1687 
1688 			goto next_rbrq_entry;
1689 		}
1690 
1691 		if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1692 			hprintk("HBUF_ERR!  (cid 0x%x)\n", cid);
1693 			atomic_inc(&vcc->stats->rx_drop);
1694 			goto return_host_buffers;
1695 		}
1696 
1697 		heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1698 		clear_bit(i, he_dev->rbpl_table);
1699 		list_move_tail(&heb->entry, &he_vcc->buffers);
1700 		he_vcc->pdu_len += heb->len;
1701 
1702 		if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1703 			lastcid = -1;
1704 			HPRINTK("wake_up rx_waitq  (cid 0x%x)\n", cid);
1705 			wake_up(&he_vcc->rx_waitq);
1706 			goto return_host_buffers;
1707 		}
1708 
1709 		if (!RBRQ_END_PDU(he_dev->rbrq_head))
1710 			goto next_rbrq_entry;
1711 
1712 		if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1713 				|| RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1714 			HPRINTK("%s%s (%d.%d)\n",
1715 				RBRQ_CRC_ERR(he_dev->rbrq_head)
1716 							? "CRC_ERR " : "",
1717 				RBRQ_LEN_ERR(he_dev->rbrq_head)
1718 							? "LEN_ERR" : "",
1719 							vcc->vpi, vcc->vci);
1720 			atomic_inc(&vcc->stats->rx_err);
1721 			goto return_host_buffers;
1722 		}
1723 
1724 		skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1725 							GFP_ATOMIC);
1726 		if (!skb) {
1727 			HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1728 			goto return_host_buffers;
1729 		}
1730 
1731 		if (rx_skb_reserve > 0)
1732 			skb_reserve(skb, rx_skb_reserve);
1733 
1734 		__net_timestamp(skb);
1735 
1736 		list_for_each_entry(heb, &he_vcc->buffers, entry)
1737 			skb_put_data(skb, &heb->data, heb->len);
1738 
1739 		switch (vcc->qos.aal) {
1740 			case ATM_AAL0:
1741 				/* 2.10.1.5 raw cell receive */
1742 				skb->len = ATM_AAL0_SDU;
1743 				skb_set_tail_pointer(skb, skb->len);
1744 				break;
1745 			case ATM_AAL5:
1746 				/* 2.10.1.2 aal5 receive */
1747 
1748 				skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1749 				skb_set_tail_pointer(skb, skb->len);
1750 #ifdef USE_CHECKSUM_HW
1751 				if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1752 					skb->ip_summed = CHECKSUM_COMPLETE;
1753 					skb->csum = TCP_CKSUM(skb->data,
1754 							he_vcc->pdu_len);
1755 				}
1756 #endif
1757 				break;
1758 		}
1759 
1760 #ifdef should_never_happen
1761 		if (skb->len > vcc->qos.rxtp.max_sdu)
1762 			hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)!  cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1763 #endif
1764 
1765 #ifdef notdef
1766 		ATM_SKB(skb)->vcc = vcc;
1767 #endif
1768 		spin_unlock(&he_dev->global_lock);
1769 		vcc->push(vcc, skb);
1770 		spin_lock(&he_dev->global_lock);
1771 
1772 		atomic_inc(&vcc->stats->rx);
1773 
1774 return_host_buffers:
1775 		++pdus_assembled;
1776 
1777 		list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry)
1778 			dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1779 		INIT_LIST_HEAD(&he_vcc->buffers);
1780 		he_vcc->pdu_len = 0;
1781 
1782 next_rbrq_entry:
1783 		he_dev->rbrq_head = (struct he_rbrq *)
1784 				((unsigned long) he_dev->rbrq_base |
1785 					RBRQ_MASK(he_dev->rbrq_head + 1));
1786 
1787 	}
1788 	read_unlock(&vcc_sklist_lock);
1789 
1790 	if (updated) {
1791 		if (updated > he_dev->rbrq_peak)
1792 			he_dev->rbrq_peak = updated;
1793 
1794 		he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1795 						G0_RBRQ_H + (group * 16));
1796 	}
1797 
1798 	return pdus_assembled;
1799 }
1800 
1801 static void
1802 he_service_tbrq(struct he_dev *he_dev, int group)
1803 {
1804 	struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1805 				((unsigned long)he_dev->tbrq_base |
1806 					he_dev->hsp->group[group].tbrq_tail);
1807 	struct he_tpd *tpd;
1808 	int slot, updated = 0;
1809 	struct he_tpd *__tpd;
1810 
1811 	/* 2.1.6 transmit buffer return queue */
1812 
1813 	while (he_dev->tbrq_head != tbrq_tail) {
1814 		++updated;
1815 
1816 		HPRINTK("tbrq%d 0x%x%s%s\n",
1817 			group,
1818 			TBRQ_TPD(he_dev->tbrq_head),
1819 			TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1820 			TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1821 		tpd = NULL;
1822 		list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1823 			if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1824 				tpd = __tpd;
1825 				list_del(&__tpd->entry);
1826 				break;
1827 			}
1828 		}
1829 
1830 		if (tpd == NULL) {
1831 			hprintk("unable to locate tpd for dma buffer %x\n",
1832 						TBRQ_TPD(he_dev->tbrq_head));
1833 			goto next_tbrq_entry;
1834 		}
1835 
1836 		if (TBRQ_EOS(he_dev->tbrq_head)) {
1837 			HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
1838 				he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
1839 			if (tpd->vcc)
1840 				wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
1841 
1842 			goto next_tbrq_entry;
1843 		}
1844 
1845 		for (slot = 0; slot < TPD_MAXIOV; ++slot) {
1846 			if (tpd->iovec[slot].addr)
1847 				dma_unmap_single(&he_dev->pci_dev->dev,
1848 					tpd->iovec[slot].addr,
1849 					tpd->iovec[slot].len & TPD_LEN_MASK,
1850 							DMA_TO_DEVICE);
1851 			if (tpd->iovec[slot].len & TPD_LST)
1852 				break;
1853 
1854 		}
1855 
1856 		if (tpd->skb) {	/* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
1857 			if (tpd->vcc && tpd->vcc->pop)
1858 				tpd->vcc->pop(tpd->vcc, tpd->skb);
1859 			else
1860 				dev_kfree_skb_any(tpd->skb);
1861 		}
1862 
1863 next_tbrq_entry:
1864 		if (tpd)
1865 			dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1866 		he_dev->tbrq_head = (struct he_tbrq *)
1867 				((unsigned long) he_dev->tbrq_base |
1868 					TBRQ_MASK(he_dev->tbrq_head + 1));
1869 	}
1870 
1871 	if (updated) {
1872 		if (updated > he_dev->tbrq_peak)
1873 			he_dev->tbrq_peak = updated;
1874 
1875 		he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
1876 						G0_TBRQ_H + (group * 16));
1877 	}
1878 }
1879 
1880 static void
1881 he_service_rbpl(struct he_dev *he_dev, int group)
1882 {
1883 	struct he_rbp *new_tail;
1884 	struct he_rbp *rbpl_head;
1885 	struct he_buff *heb;
1886 	dma_addr_t mapping;
1887 	int i;
1888 	int moved = 0;
1889 
1890 	rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1891 					RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
1892 
1893 	for (;;) {
1894 		new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1895 						RBPL_MASK(he_dev->rbpl_tail+1));
1896 
1897 		/* table 3.42 -- rbpl_tail should never be set to rbpl_head */
1898 		if (new_tail == rbpl_head)
1899 			break;
1900 
1901 		i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint);
1902 		if (i > (RBPL_TABLE_SIZE - 1)) {
1903 			i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE);
1904 			if (i > (RBPL_TABLE_SIZE - 1))
1905 				break;
1906 		}
1907 		he_dev->rbpl_hint = i + 1;
1908 
1909 		heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC, &mapping);
1910 		if (!heb)
1911 			break;
1912 		heb->mapping = mapping;
1913 		list_add(&heb->entry, &he_dev->rbpl_outstanding);
1914 		he_dev->rbpl_virt[i] = heb;
1915 		set_bit(i, he_dev->rbpl_table);
1916 		new_tail->idx = i << RBP_IDX_OFFSET;
1917 		new_tail->phys = mapping + offsetof(struct he_buff, data);
1918 
1919 		he_dev->rbpl_tail = new_tail;
1920 		++moved;
1921 	}
1922 
1923 	if (moved)
1924 		he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
1925 }
1926 
1927 static void
1928 he_tasklet(unsigned long data)
1929 {
1930 	unsigned long flags;
1931 	struct he_dev *he_dev = (struct he_dev *) data;
1932 	int group, type;
1933 	int updated = 0;
1934 
1935 	HPRINTK("tasklet (0x%lx)\n", data);
1936 	spin_lock_irqsave(&he_dev->global_lock, flags);
1937 
1938 	while (he_dev->irq_head != he_dev->irq_tail) {
1939 		++updated;
1940 
1941 		type = ITYPE_TYPE(he_dev->irq_head->isw);
1942 		group = ITYPE_GROUP(he_dev->irq_head->isw);
1943 
1944 		switch (type) {
1945 			case ITYPE_RBRQ_THRESH:
1946 				HPRINTK("rbrq%d threshold\n", group);
1947 				fallthrough;
1948 			case ITYPE_RBRQ_TIMER:
1949 				if (he_service_rbrq(he_dev, group))
1950 					he_service_rbpl(he_dev, group);
1951 				break;
1952 			case ITYPE_TBRQ_THRESH:
1953 				HPRINTK("tbrq%d threshold\n", group);
1954 				fallthrough;
1955 			case ITYPE_TPD_COMPLETE:
1956 				he_service_tbrq(he_dev, group);
1957 				break;
1958 			case ITYPE_RBPL_THRESH:
1959 				he_service_rbpl(he_dev, group);
1960 				break;
1961 			case ITYPE_RBPS_THRESH:
1962 				/* shouldn't happen unless small buffers enabled */
1963 				break;
1964 			case ITYPE_PHY:
1965 				HPRINTK("phy interrupt\n");
1966 #ifdef CONFIG_ATM_HE_USE_SUNI
1967 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
1968 				if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
1969 					he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
1970 				spin_lock_irqsave(&he_dev->global_lock, flags);
1971 #endif
1972 				break;
1973 			case ITYPE_OTHER:
1974 				switch (type|group) {
1975 					case ITYPE_PARITY:
1976 						hprintk("parity error\n");
1977 						break;
1978 					case ITYPE_ABORT:
1979 						hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
1980 						break;
1981 				}
1982 				break;
1983 			case ITYPE_TYPE(ITYPE_INVALID):
1984 				/* see 8.1.1 -- check all queues */
1985 
1986 				HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
1987 
1988 				he_service_rbrq(he_dev, 0);
1989 				he_service_rbpl(he_dev, 0);
1990 				he_service_tbrq(he_dev, 0);
1991 				break;
1992 			default:
1993 				hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
1994 		}
1995 
1996 		he_dev->irq_head->isw = ITYPE_INVALID;
1997 
1998 		he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
1999 	}
2000 
2001 	if (updated) {
2002 		if (updated > he_dev->irq_peak)
2003 			he_dev->irq_peak = updated;
2004 
2005 		he_writel(he_dev,
2006 			IRQ_SIZE(CONFIG_IRQ_SIZE) |
2007 			IRQ_THRESH(CONFIG_IRQ_THRESH) |
2008 			IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2009 		(void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2010 	}
2011 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2012 }
2013 
2014 static irqreturn_t
2015 he_irq_handler(int irq, void *dev_id)
2016 {
2017 	unsigned long flags;
2018 	struct he_dev *he_dev = (struct he_dev * )dev_id;
2019 	int handled = 0;
2020 
2021 	if (he_dev == NULL)
2022 		return IRQ_NONE;
2023 
2024 	spin_lock_irqsave(&he_dev->global_lock, flags);
2025 
2026 	he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2027 						(*he_dev->irq_tailoffset << 2));
2028 
2029 	if (he_dev->irq_tail == he_dev->irq_head) {
2030 		HPRINTK("tailoffset not updated?\n");
2031 		he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2032 			((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2033 		(void) he_readl(he_dev, INT_FIFO);	/* 8.1.2 controller errata */
2034 	}
2035 
2036 #ifdef DEBUG
2037 	if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2038 		hprintk("spurious (or shared) interrupt?\n");
2039 #endif
2040 
2041 	if (he_dev->irq_head != he_dev->irq_tail) {
2042 		handled = 1;
2043 		tasklet_schedule(&he_dev->tasklet);
2044 		he_writel(he_dev, INT_CLEAR_A, INT_FIFO);	/* clear interrupt */
2045 		(void) he_readl(he_dev, INT_FIFO);		/* flush posted writes */
2046 	}
2047 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2048 	return IRQ_RETVAL(handled);
2049 
2050 }
2051 
2052 static __inline__ void
2053 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2054 {
2055 	struct he_tpdrq *new_tail;
2056 
2057 	HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2058 					tpd, cid, he_dev->tpdrq_tail);
2059 
2060 	/* new_tail = he_dev->tpdrq_tail; */
2061 	new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2062 					TPDRQ_MASK(he_dev->tpdrq_tail+1));
2063 
2064 	/*
2065 	 * check to see if we are about to set the tail == head
2066 	 * if true, update the head pointer from the adapter
2067 	 * to see if this is really the case (reading the queue
2068 	 * head for every enqueue would be unnecessarily slow)
2069 	 */
2070 
2071 	if (new_tail == he_dev->tpdrq_head) {
2072 		he_dev->tpdrq_head = (struct he_tpdrq *)
2073 			(((unsigned long)he_dev->tpdrq_base) |
2074 				TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2075 
2076 		if (new_tail == he_dev->tpdrq_head) {
2077 			int slot;
2078 
2079 			hprintk("tpdrq full (cid 0x%x)\n", cid);
2080 			/*
2081 			 * FIXME
2082 			 * push tpd onto a transmit backlog queue
2083 			 * after service_tbrq, service the backlog
2084 			 * for now, we just drop the pdu
2085 			 */
2086 			for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2087 				if (tpd->iovec[slot].addr)
2088 					dma_unmap_single(&he_dev->pci_dev->dev,
2089 						tpd->iovec[slot].addr,
2090 						tpd->iovec[slot].len & TPD_LEN_MASK,
2091 								DMA_TO_DEVICE);
2092 			}
2093 			if (tpd->skb) {
2094 				if (tpd->vcc->pop)
2095 					tpd->vcc->pop(tpd->vcc, tpd->skb);
2096 				else
2097 					dev_kfree_skb_any(tpd->skb);
2098 				atomic_inc(&tpd->vcc->stats->tx_err);
2099 			}
2100 			dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2101 			return;
2102 		}
2103 	}
2104 
2105 	/* 2.1.5 transmit packet descriptor ready queue */
2106 	list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2107 	he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2108 	he_dev->tpdrq_tail->cid = cid;
2109 	wmb();
2110 
2111 	he_dev->tpdrq_tail = new_tail;
2112 
2113 	he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2114 	(void) he_readl(he_dev, TPDRQ_T);		/* flush posted writes */
2115 }
2116 
2117 static int
2118 he_open(struct atm_vcc *vcc)
2119 {
2120 	unsigned long flags;
2121 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2122 	struct he_vcc *he_vcc;
2123 	int err = 0;
2124 	unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2125 	short vpi = vcc->vpi;
2126 	int vci = vcc->vci;
2127 
2128 	if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2129 		return 0;
2130 
2131 	HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2132 
2133 	set_bit(ATM_VF_ADDR, &vcc->flags);
2134 
2135 	cid = he_mkcid(he_dev, vpi, vci);
2136 
2137 	he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2138 	if (he_vcc == NULL) {
2139 		hprintk("unable to allocate he_vcc during open\n");
2140 		return -ENOMEM;
2141 	}
2142 
2143 	INIT_LIST_HEAD(&he_vcc->buffers);
2144 	he_vcc->pdu_len = 0;
2145 	he_vcc->rc_index = -1;
2146 
2147 	init_waitqueue_head(&he_vcc->rx_waitq);
2148 	init_waitqueue_head(&he_vcc->tx_waitq);
2149 
2150 	vcc->dev_data = he_vcc;
2151 
2152 	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2153 		int pcr_goal;
2154 
2155 		pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2156 		if (pcr_goal == 0)
2157 			pcr_goal = he_dev->atm_dev->link_rate;
2158 		if (pcr_goal < 0)	/* means round down, technically */
2159 			pcr_goal = -pcr_goal;
2160 
2161 		HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2162 
2163 		switch (vcc->qos.aal) {
2164 			case ATM_AAL5:
2165 				tsr0_aal = TSR0_AAL5;
2166 				tsr4 = TSR4_AAL5;
2167 				break;
2168 			case ATM_AAL0:
2169 				tsr0_aal = TSR0_AAL0_SDU;
2170 				tsr4 = TSR4_AAL0_SDU;
2171 				break;
2172 			default:
2173 				err = -EINVAL;
2174 				goto open_failed;
2175 		}
2176 
2177 		spin_lock_irqsave(&he_dev->global_lock, flags);
2178 		tsr0 = he_readl_tsr0(he_dev, cid);
2179 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2180 
2181 		if (TSR0_CONN_STATE(tsr0) != 0) {
2182 			hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2183 			err = -EBUSY;
2184 			goto open_failed;
2185 		}
2186 
2187 		switch (vcc->qos.txtp.traffic_class) {
2188 			case ATM_UBR:
2189 				/* 2.3.3.1 open connection ubr */
2190 
2191 				tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2192 					TSR0_USE_WMIN | TSR0_UPDATE_GER;
2193 				break;
2194 
2195 			case ATM_CBR:
2196 				/* 2.3.3.2 open connection cbr */
2197 
2198 				/* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2199 				if ((he_dev->total_bw + pcr_goal)
2200 					> (he_dev->atm_dev->link_rate * 9 / 10))
2201 				{
2202 					err = -EBUSY;
2203 					goto open_failed;
2204 				}
2205 
2206 				spin_lock_irqsave(&he_dev->global_lock, flags);			/* also protects he_dev->cs_stper[] */
2207 
2208 				/* find an unused cs_stper register */
2209 				for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2210 					if (he_dev->cs_stper[reg].inuse == 0 ||
2211 					    he_dev->cs_stper[reg].pcr == pcr_goal)
2212 							break;
2213 
2214 				if (reg == HE_NUM_CS_STPER) {
2215 					err = -EBUSY;
2216 					spin_unlock_irqrestore(&he_dev->global_lock, flags);
2217 					goto open_failed;
2218 				}
2219 
2220 				he_dev->total_bw += pcr_goal;
2221 
2222 				he_vcc->rc_index = reg;
2223 				++he_dev->cs_stper[reg].inuse;
2224 				he_dev->cs_stper[reg].pcr = pcr_goal;
2225 
2226 				clock = he_is622(he_dev) ? 66667000 : 50000000;
2227 				period = clock / pcr_goal;
2228 
2229 				HPRINTK("rc_index = %d period = %d\n",
2230 								reg, period);
2231 
2232 				he_writel_mbox(he_dev, rate_to_atmf(period/2),
2233 							CS_STPER0 + reg);
2234 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2235 
2236 				tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2237 							TSR0_RC_INDEX(reg);
2238 
2239 				break;
2240 			default:
2241 				err = -EINVAL;
2242 				goto open_failed;
2243 		}
2244 
2245 		spin_lock_irqsave(&he_dev->global_lock, flags);
2246 
2247 		he_writel_tsr0(he_dev, tsr0, cid);
2248 		he_writel_tsr4(he_dev, tsr4 | 1, cid);
2249 		he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2250 					TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2251 		he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2252 		he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2253 
2254 		he_writel_tsr3(he_dev, 0x0, cid);
2255 		he_writel_tsr5(he_dev, 0x0, cid);
2256 		he_writel_tsr6(he_dev, 0x0, cid);
2257 		he_writel_tsr7(he_dev, 0x0, cid);
2258 		he_writel_tsr8(he_dev, 0x0, cid);
2259 		he_writel_tsr10(he_dev, 0x0, cid);
2260 		he_writel_tsr11(he_dev, 0x0, cid);
2261 		he_writel_tsr12(he_dev, 0x0, cid);
2262 		he_writel_tsr13(he_dev, 0x0, cid);
2263 		he_writel_tsr14(he_dev, 0x0, cid);
2264 		(void) he_readl_tsr0(he_dev, cid);		/* flush posted writes */
2265 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2266 	}
2267 
2268 	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2269 		unsigned aal;
2270 
2271 		HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2272 		 				&HE_VCC(vcc)->rx_waitq);
2273 
2274 		switch (vcc->qos.aal) {
2275 			case ATM_AAL5:
2276 				aal = RSR0_AAL5;
2277 				break;
2278 			case ATM_AAL0:
2279 				aal = RSR0_RAWCELL;
2280 				break;
2281 			default:
2282 				err = -EINVAL;
2283 				goto open_failed;
2284 		}
2285 
2286 		spin_lock_irqsave(&he_dev->global_lock, flags);
2287 
2288 		rsr0 = he_readl_rsr0(he_dev, cid);
2289 		if (rsr0 & RSR0_OPEN_CONN) {
2290 			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2291 
2292 			hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2293 			err = -EBUSY;
2294 			goto open_failed;
2295 		}
2296 
2297 		rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY;
2298 		rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY;
2299 		rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2300 				(RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2301 
2302 #ifdef USE_CHECKSUM_HW
2303 		if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2304 			rsr0 |= RSR0_TCP_CKSUM;
2305 #endif
2306 
2307 		he_writel_rsr4(he_dev, rsr4, cid);
2308 		he_writel_rsr1(he_dev, rsr1, cid);
2309 		/* 5.1.11 last parameter initialized should be
2310 			  the open/closed indication in rsr0 */
2311 		he_writel_rsr0(he_dev,
2312 			rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2313 		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2314 
2315 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2316 	}
2317 
2318 open_failed:
2319 
2320 	if (err) {
2321 		kfree(he_vcc);
2322 		clear_bit(ATM_VF_ADDR, &vcc->flags);
2323 	}
2324 	else
2325 		set_bit(ATM_VF_READY, &vcc->flags);
2326 
2327 	return err;
2328 }
2329 
2330 static void
2331 he_close(struct atm_vcc *vcc)
2332 {
2333 	unsigned long flags;
2334 	DECLARE_WAITQUEUE(wait, current);
2335 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2336 	struct he_tpd *tpd;
2337 	unsigned cid;
2338 	struct he_vcc *he_vcc = HE_VCC(vcc);
2339 #define MAX_RETRY 30
2340 	int retry = 0, sleep = 1, tx_inuse;
2341 
2342 	HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2343 
2344 	clear_bit(ATM_VF_READY, &vcc->flags);
2345 	cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2346 
2347 	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2348 		int timeout;
2349 
2350 		HPRINTK("close rx cid 0x%x\n", cid);
2351 
2352 		/* 2.7.2.2 close receive operation */
2353 
2354 		/* wait for previous close (if any) to finish */
2355 
2356 		spin_lock_irqsave(&he_dev->global_lock, flags);
2357 		while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2358 			HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2359 			udelay(250);
2360 		}
2361 
2362 		set_current_state(TASK_UNINTERRUPTIBLE);
2363 		add_wait_queue(&he_vcc->rx_waitq, &wait);
2364 
2365 		he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2366 		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2367 		he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2368 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2369 
2370 		timeout = schedule_timeout(30*HZ);
2371 
2372 		remove_wait_queue(&he_vcc->rx_waitq, &wait);
2373 		set_current_state(TASK_RUNNING);
2374 
2375 		if (timeout == 0)
2376 			hprintk("close rx timeout cid 0x%x\n", cid);
2377 
2378 		HPRINTK("close rx cid 0x%x complete\n", cid);
2379 
2380 	}
2381 
2382 	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2383 		volatile unsigned tsr4, tsr0;
2384 		int timeout;
2385 
2386 		HPRINTK("close tx cid 0x%x\n", cid);
2387 
2388 		/* 2.1.2
2389 		 *
2390 		 * ... the host must first stop queueing packets to the TPDRQ
2391 		 * on the connection to be closed, then wait for all outstanding
2392 		 * packets to be transmitted and their buffers returned to the
2393 		 * TBRQ. When the last packet on the connection arrives in the
2394 		 * TBRQ, the host issues the close command to the adapter.
2395 		 */
2396 
2397 		while (((tx_inuse = refcount_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
2398 		       (retry < MAX_RETRY)) {
2399 			msleep(sleep);
2400 			if (sleep < 250)
2401 				sleep = sleep * 2;
2402 
2403 			++retry;
2404 		}
2405 
2406 		if (tx_inuse > 1)
2407 			hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2408 
2409 		/* 2.3.1.1 generic close operations with flush */
2410 
2411 		spin_lock_irqsave(&he_dev->global_lock, flags);
2412 		he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2413 					/* also clears TSR4_SESSION_ENDED */
2414 
2415 		switch (vcc->qos.txtp.traffic_class) {
2416 			case ATM_UBR:
2417 				he_writel_tsr1(he_dev,
2418 					TSR1_MCR(rate_to_atmf(200000))
2419 					| TSR1_PCR(0), cid);
2420 				break;
2421 			case ATM_CBR:
2422 				he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2423 				break;
2424 		}
2425 		(void) he_readl_tsr4(he_dev, cid);		/* flush posted writes */
2426 
2427 		tpd = __alloc_tpd(he_dev);
2428 		if (tpd == NULL) {
2429 			hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2430 			goto close_tx_incomplete;
2431 		}
2432 		tpd->status |= TPD_EOS | TPD_INT;
2433 		tpd->skb = NULL;
2434 		tpd->vcc = vcc;
2435 		wmb();
2436 
2437 		set_current_state(TASK_UNINTERRUPTIBLE);
2438 		add_wait_queue(&he_vcc->tx_waitq, &wait);
2439 		__enqueue_tpd(he_dev, tpd, cid);
2440 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2441 
2442 		timeout = schedule_timeout(30*HZ);
2443 
2444 		remove_wait_queue(&he_vcc->tx_waitq, &wait);
2445 		set_current_state(TASK_RUNNING);
2446 
2447 		spin_lock_irqsave(&he_dev->global_lock, flags);
2448 
2449 		if (timeout == 0) {
2450 			hprintk("close tx timeout cid 0x%x\n", cid);
2451 			goto close_tx_incomplete;
2452 		}
2453 
2454 		while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2455 			HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2456 			udelay(250);
2457 		}
2458 
2459 		while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2460 			HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2461 			udelay(250);
2462 		}
2463 
2464 close_tx_incomplete:
2465 
2466 		if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2467 			int reg = he_vcc->rc_index;
2468 
2469 			HPRINTK("cs_stper reg = %d\n", reg);
2470 
2471 			if (he_dev->cs_stper[reg].inuse == 0)
2472 				hprintk("cs_stper[%d].inuse = 0!\n", reg);
2473 			else
2474 				--he_dev->cs_stper[reg].inuse;
2475 
2476 			he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2477 		}
2478 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2479 
2480 		HPRINTK("close tx cid 0x%x complete\n", cid);
2481 	}
2482 
2483 	kfree(he_vcc);
2484 
2485 	clear_bit(ATM_VF_ADDR, &vcc->flags);
2486 }
2487 
2488 static int
2489 he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2490 {
2491 	unsigned long flags;
2492 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2493 	unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2494 	struct he_tpd *tpd;
2495 #ifdef USE_SCATTERGATHER
2496 	int i, slot = 0;
2497 #endif
2498 
2499 #define HE_TPD_BUFSIZE 0xffff
2500 
2501 	HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2502 
2503 	if ((skb->len > HE_TPD_BUFSIZE) ||
2504 	    ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2505 		hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2506 		if (vcc->pop)
2507 			vcc->pop(vcc, skb);
2508 		else
2509 			dev_kfree_skb_any(skb);
2510 		atomic_inc(&vcc->stats->tx_err);
2511 		return -EINVAL;
2512 	}
2513 
2514 #ifndef USE_SCATTERGATHER
2515 	if (skb_shinfo(skb)->nr_frags) {
2516 		hprintk("no scatter/gather support\n");
2517 		if (vcc->pop)
2518 			vcc->pop(vcc, skb);
2519 		else
2520 			dev_kfree_skb_any(skb);
2521 		atomic_inc(&vcc->stats->tx_err);
2522 		return -EINVAL;
2523 	}
2524 #endif
2525 	spin_lock_irqsave(&he_dev->global_lock, flags);
2526 
2527 	tpd = __alloc_tpd(he_dev);
2528 	if (tpd == NULL) {
2529 		if (vcc->pop)
2530 			vcc->pop(vcc, skb);
2531 		else
2532 			dev_kfree_skb_any(skb);
2533 		atomic_inc(&vcc->stats->tx_err);
2534 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2535 		return -ENOMEM;
2536 	}
2537 
2538 	if (vcc->qos.aal == ATM_AAL5)
2539 		tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2540 	else {
2541 		char *pti_clp = (void *) (skb->data + 3);
2542 		int clp, pti;
2543 
2544 		pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2545 		clp = (*pti_clp & ATM_HDR_CLP);
2546 		tpd->status |= TPD_CELLTYPE(pti);
2547 		if (clp)
2548 			tpd->status |= TPD_CLP;
2549 
2550 		skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2551 	}
2552 
2553 #ifdef USE_SCATTERGATHER
2554 	tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev, skb->data,
2555 				skb_headlen(skb), DMA_TO_DEVICE);
2556 	tpd->iovec[slot].len = skb_headlen(skb);
2557 	++slot;
2558 
2559 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2560 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2561 
2562 		if (slot == TPD_MAXIOV) {	/* queue tpd; start new tpd */
2563 			tpd->vcc = vcc;
2564 			tpd->skb = NULL;	/* not the last fragment
2565 						   so dont ->push() yet */
2566 			wmb();
2567 
2568 			__enqueue_tpd(he_dev, tpd, cid);
2569 			tpd = __alloc_tpd(he_dev);
2570 			if (tpd == NULL) {
2571 				if (vcc->pop)
2572 					vcc->pop(vcc, skb);
2573 				else
2574 					dev_kfree_skb_any(skb);
2575 				atomic_inc(&vcc->stats->tx_err);
2576 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2577 				return -ENOMEM;
2578 			}
2579 			tpd->status |= TPD_USERCELL;
2580 			slot = 0;
2581 		}
2582 
2583 		tpd->iovec[slot].addr = skb_frag_dma_map(&he_dev->pci_dev->dev,
2584 				frag, 0, skb_frag_size(frag), DMA_TO_DEVICE);
2585 		tpd->iovec[slot].len = skb_frag_size(frag);
2586 		++slot;
2587 
2588 	}
2589 
2590 	tpd->iovec[slot - 1].len |= TPD_LST;
2591 #else
2592 	tpd->address0 = dma_map_single(&he_dev->pci_dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
2593 	tpd->length0 = skb->len | TPD_LST;
2594 #endif
2595 	tpd->status |= TPD_INT;
2596 
2597 	tpd->vcc = vcc;
2598 	tpd->skb = skb;
2599 	wmb();
2600 	ATM_SKB(skb)->vcc = vcc;
2601 
2602 	__enqueue_tpd(he_dev, tpd, cid);
2603 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2604 
2605 	atomic_inc(&vcc->stats->tx);
2606 
2607 	return 0;
2608 }
2609 
2610 static int
2611 he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2612 {
2613 	unsigned long flags;
2614 	struct he_dev *he_dev = HE_DEV(atm_dev);
2615 	struct he_ioctl_reg reg;
2616 	int err = 0;
2617 
2618 	switch (cmd) {
2619 		case HE_GET_REG:
2620 			if (!capable(CAP_NET_ADMIN))
2621 				return -EPERM;
2622 
2623 			if (copy_from_user(&reg, arg,
2624 					   sizeof(struct he_ioctl_reg)))
2625 				return -EFAULT;
2626 
2627 			spin_lock_irqsave(&he_dev->global_lock, flags);
2628 			switch (reg.type) {
2629 				case HE_REGTYPE_PCI:
2630 					if (reg.addr >= HE_REGMAP_SIZE) {
2631 						err = -EINVAL;
2632 						break;
2633 					}
2634 
2635 					reg.val = he_readl(he_dev, reg.addr);
2636 					break;
2637 				case HE_REGTYPE_RCM:
2638 					reg.val =
2639 						he_readl_rcm(he_dev, reg.addr);
2640 					break;
2641 				case HE_REGTYPE_TCM:
2642 					reg.val =
2643 						he_readl_tcm(he_dev, reg.addr);
2644 					break;
2645 				case HE_REGTYPE_MBOX:
2646 					reg.val =
2647 						he_readl_mbox(he_dev, reg.addr);
2648 					break;
2649 				default:
2650 					err = -EINVAL;
2651 					break;
2652 			}
2653 			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2654 			if (err == 0)
2655 				if (copy_to_user(arg, &reg,
2656 							sizeof(struct he_ioctl_reg)))
2657 					return -EFAULT;
2658 			break;
2659 		default:
2660 #ifdef CONFIG_ATM_HE_USE_SUNI
2661 			if (atm_dev->phy && atm_dev->phy->ioctl)
2662 				err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2663 #else /* CONFIG_ATM_HE_USE_SUNI */
2664 			err = -EINVAL;
2665 #endif /* CONFIG_ATM_HE_USE_SUNI */
2666 			break;
2667 	}
2668 
2669 	return err;
2670 }
2671 
2672 static void
2673 he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2674 {
2675 	unsigned long flags;
2676 	struct he_dev *he_dev = HE_DEV(atm_dev);
2677 
2678 	HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2679 
2680 	spin_lock_irqsave(&he_dev->global_lock, flags);
2681 	he_writel(he_dev, val, FRAMER + (addr*4));
2682 	(void) he_readl(he_dev, FRAMER + (addr*4));		/* flush posted writes */
2683 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2684 }
2685 
2686 
2687 static unsigned char
2688 he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2689 {
2690 	unsigned long flags;
2691 	struct he_dev *he_dev = HE_DEV(atm_dev);
2692 	unsigned reg;
2693 
2694 	spin_lock_irqsave(&he_dev->global_lock, flags);
2695 	reg = he_readl(he_dev, FRAMER + (addr*4));
2696 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2697 
2698 	HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2699 	return reg;
2700 }
2701 
2702 static int
2703 he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2704 {
2705 	unsigned long flags;
2706 	struct he_dev *he_dev = HE_DEV(dev);
2707 	int left, i;
2708 #ifdef notdef
2709 	struct he_rbrq *rbrq_tail;
2710 	struct he_tpdrq *tpdrq_head;
2711 	int rbpl_head, rbpl_tail;
2712 #endif
2713 	static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2714 
2715 
2716 	left = *pos;
2717 	if (!left--)
2718 		return sprintf(page, "ATM he driver\n");
2719 
2720 	if (!left--)
2721 		return sprintf(page, "%s%s\n\n",
2722 			he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2723 
2724 	if (!left--)
2725 		return sprintf(page, "Mismatched Cells  VPI/VCI Not Open  Dropped Cells  RCM Dropped Cells\n");
2726 
2727 	spin_lock_irqsave(&he_dev->global_lock, flags);
2728 	mcc += he_readl(he_dev, MCC);
2729 	oec += he_readl(he_dev, OEC);
2730 	dcc += he_readl(he_dev, DCC);
2731 	cec += he_readl(he_dev, CEC);
2732 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2733 
2734 	if (!left--)
2735 		return sprintf(page, "%16ld  %16ld  %13ld  %17ld\n\n",
2736 							mcc, oec, dcc, cec);
2737 
2738 	if (!left--)
2739 		return sprintf(page, "irq_size = %d  inuse = ?  peak = %d\n",
2740 				CONFIG_IRQ_SIZE, he_dev->irq_peak);
2741 
2742 	if (!left--)
2743 		return sprintf(page, "tpdrq_size = %d  inuse = ?\n",
2744 						CONFIG_TPDRQ_SIZE);
2745 
2746 	if (!left--)
2747 		return sprintf(page, "rbrq_size = %d  inuse = ?  peak = %d\n",
2748 				CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2749 
2750 	if (!left--)
2751 		return sprintf(page, "tbrq_size = %d  peak = %d\n",
2752 					CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2753 
2754 
2755 #ifdef notdef
2756 	rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2757 	rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2758 
2759 	inuse = rbpl_head - rbpl_tail;
2760 	if (inuse < 0)
2761 		inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2762 	inuse /= sizeof(struct he_rbp);
2763 
2764 	if (!left--)
2765 		return sprintf(page, "rbpl_size = %d  inuse = %d\n\n",
2766 						CONFIG_RBPL_SIZE, inuse);
2767 #endif
2768 
2769 	if (!left--)
2770 		return sprintf(page, "rate controller periods (cbr)\n                 pcr  #vc\n");
2771 
2772 	for (i = 0; i < HE_NUM_CS_STPER; ++i)
2773 		if (!left--)
2774 			return sprintf(page, "cs_stper%-2d  %8ld  %3d\n", i,
2775 						he_dev->cs_stper[i].pcr,
2776 						he_dev->cs_stper[i].inuse);
2777 
2778 	if (!left--)
2779 		return sprintf(page, "total bw (cbr): %d  (limit %d)\n",
2780 			he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2781 
2782 	return 0;
2783 }
2784 
2785 /* eeprom routines  -- see 4.7 */
2786 
2787 static u8 read_prom_byte(struct he_dev *he_dev, int addr)
2788 {
2789 	u32 val = 0, tmp_read = 0;
2790 	int i, j = 0;
2791 	u8 byte_read = 0;
2792 
2793 	val = readl(he_dev->membase + HOST_CNTL);
2794 	val &= 0xFFFFE0FF;
2795 
2796 	/* Turn on write enable */
2797 	val |= 0x800;
2798 	he_writel(he_dev, val, HOST_CNTL);
2799 
2800 	/* Send READ instruction */
2801 	for (i = 0; i < ARRAY_SIZE(readtab); i++) {
2802 		he_writel(he_dev, val | readtab[i], HOST_CNTL);
2803 		udelay(EEPROM_DELAY);
2804 	}
2805 
2806 	/* Next, we need to send the byte address to read from */
2807 	for (i = 7; i >= 0; i--) {
2808 		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2809 		udelay(EEPROM_DELAY);
2810 		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2811 		udelay(EEPROM_DELAY);
2812 	}
2813 
2814 	j = 0;
2815 
2816 	val &= 0xFFFFF7FF;      /* Turn off write enable */
2817 	he_writel(he_dev, val, HOST_CNTL);
2818 
2819 	/* Now, we can read data from the EEPROM by clocking it in */
2820 	for (i = 7; i >= 0; i--) {
2821 		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2822 		udelay(EEPROM_DELAY);
2823 		tmp_read = he_readl(he_dev, HOST_CNTL);
2824 		byte_read |= (unsigned char)
2825 			   ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
2826 		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2827 		udelay(EEPROM_DELAY);
2828 	}
2829 
2830 	he_writel(he_dev, val | ID_CS, HOST_CNTL);
2831 	udelay(EEPROM_DELAY);
2832 
2833 	return byte_read;
2834 }
2835 
2836 MODULE_LICENSE("GPL");
2837 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
2838 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
2839 module_param(disable64, bool, 0);
2840 MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
2841 module_param(nvpibits, short, 0);
2842 MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
2843 module_param(nvcibits, short, 0);
2844 MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
2845 module_param(rx_skb_reserve, short, 0);
2846 MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
2847 module_param(irq_coalesce, bool, 0);
2848 MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
2849 module_param(sdh, bool, 0);
2850 MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
2851 
2852 static const struct pci_device_id he_pci_tbl[] = {
2853 	{ PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 },
2854 	{ 0, }
2855 };
2856 
2857 MODULE_DEVICE_TABLE(pci, he_pci_tbl);
2858 
2859 static struct pci_driver he_driver = {
2860 	.name =		"he",
2861 	.probe =	he_init_one,
2862 	.remove =	he_remove_one,
2863 	.id_table =	he_pci_tbl,
2864 };
2865 
2866 module_pci_driver(he_driver);
2867