xref: /linux/drivers/atm/he.c (revision 32a92f8c89326985e05dce8b22d3f0aa07a3e1bd)
1 /*
2 
3   he.c
4 
5   ForeRunnerHE ATM Adapter driver for ATM on Linux
6   Copyright (C) 1999-2001  Naval Research Laboratory
7 
8   This library is free software; you can redistribute it and/or
9   modify it under the terms of the GNU Lesser General Public
10   License as published by the Free Software Foundation; either
11   version 2.1 of the License, or (at your option) any later version.
12 
13   This library is distributed in the hope that it will be useful,
14   but WITHOUT ANY WARRANTY; without even the implied warranty of
15   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16   Lesser General Public License for more details.
17 
18   You should have received a copy of the GNU Lesser General Public
19   License along with this library; if not, write to the Free Software
20   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21 
22 */
23 
24 /*
25 
26   he.c
27 
28   ForeRunnerHE ATM Adapter driver for ATM on Linux
29   Copyright (C) 1999-2001  Naval Research Laboratory
30 
31   Permission to use, copy, modify and distribute this software and its
32   documentation is hereby granted, provided that both the copyright
33   notice and this permission notice appear in all copies of the software,
34   derivative works or modified versions, and any portions thereof, and
35   that both notices appear in supporting documentation.
36 
37   NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
38   DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
39   RESULTING FROM THE USE OF THIS SOFTWARE.
40 
41   This driver was written using the "Programmer's Reference Manual for
42   ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
43 
44   AUTHORS:
45 	chas williams <chas@cmf.nrl.navy.mil>
46 	eric kinzie <ekinzie@cmf.nrl.navy.mil>
47 
48   NOTES:
49 	4096 supported 'connections'
50 	group 0 is used for all traffic
51 	interrupt queue 0 is used for all interrupts
52 	aal0 support (based on work from ulrich.u.muller@nokia.com)
53 
54  */
55 
56 #include <linux/module.h>
57 #include <linux/kernel.h>
58 #include <linux/skbuff.h>
59 #include <linux/pci.h>
60 #include <linux/errno.h>
61 #include <linux/types.h>
62 #include <linux/string.h>
63 #include <linux/delay.h>
64 #include <linux/init.h>
65 #include <linux/mm.h>
66 #include <linux/sched.h>
67 #include <linux/timer.h>
68 #include <linux/interrupt.h>
69 #include <linux/dma-mapping.h>
70 #include <linux/bitmap.h>
71 #include <linux/slab.h>
72 #include <asm/io.h>
73 #include <asm/byteorder.h>
74 #include <linux/uaccess.h>
75 
76 #include <linux/atmdev.h>
77 #include <linux/atm.h>
78 #include <linux/sonet.h>
79 
80 #undef USE_SCATTERGATHER
81 #undef USE_CHECKSUM_HW			/* still confused about this */
82 /* #undef HE_DEBUG */
83 
84 #include "he.h"
85 #include "suni.h"
86 #include <linux/atm_he.h>
87 
88 #define hprintk(fmt,args...)	printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
89 
90 #ifdef HE_DEBUG
91 #define HPRINTK(fmt,args...)	printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
92 #else /* !HE_DEBUG */
93 #define HPRINTK(fmt,args...)	do { } while (0)
94 #endif /* HE_DEBUG */
95 
96 /* declarations */
97 
98 static int he_open(struct atm_vcc *vcc);
99 static void he_close(struct atm_vcc *vcc);
100 static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
101 static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
102 static irqreturn_t he_irq_handler(int irq, void *dev_id);
103 static void he_tasklet(unsigned long data);
104 static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
105 static int he_start(struct atm_dev *dev);
106 static void he_stop(struct he_dev *dev);
107 static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
108 static unsigned char he_phy_get(struct atm_dev *, unsigned long);
109 
110 static u8 read_prom_byte(struct he_dev *he_dev, int addr);
111 
112 /* globals */
113 
114 static struct he_dev *he_devs;
115 static bool disable64;
116 static short nvpibits = -1;
117 static short nvcibits = -1;
118 static short rx_skb_reserve = 16;
119 static bool irq_coalesce = true;
120 static bool sdh;
121 
122 /* Read from EEPROM = 0000 0011b */
123 static unsigned int readtab[] = {
124 	CS_HIGH | CLK_HIGH,
125 	CS_LOW | CLK_LOW,
126 	CLK_HIGH,               /* 0 */
127 	CLK_LOW,
128 	CLK_HIGH,               /* 0 */
129 	CLK_LOW,
130 	CLK_HIGH,               /* 0 */
131 	CLK_LOW,
132 	CLK_HIGH,               /* 0 */
133 	CLK_LOW,
134 	CLK_HIGH,               /* 0 */
135 	CLK_LOW,
136 	CLK_HIGH,               /* 0 */
137 	CLK_LOW | SI_HIGH,
138 	CLK_HIGH | SI_HIGH,     /* 1 */
139 	CLK_LOW | SI_HIGH,
140 	CLK_HIGH | SI_HIGH      /* 1 */
141 };
142 
143 /* Clock to read from/write to the EEPROM */
144 static unsigned int clocktab[] = {
145 	CLK_LOW,
146 	CLK_HIGH,
147 	CLK_LOW,
148 	CLK_HIGH,
149 	CLK_LOW,
150 	CLK_HIGH,
151 	CLK_LOW,
152 	CLK_HIGH,
153 	CLK_LOW,
154 	CLK_HIGH,
155 	CLK_LOW,
156 	CLK_HIGH,
157 	CLK_LOW,
158 	CLK_HIGH,
159 	CLK_LOW,
160 	CLK_HIGH,
161 	CLK_LOW
162 };
163 
164 static const struct atmdev_ops he_ops =
165 {
166 	.open =		he_open,
167 	.close =	he_close,
168 	.ioctl =	he_ioctl,
169 	.send =		he_send,
170 	.phy_put =	he_phy_put,
171 	.phy_get =	he_phy_get,
172 	.proc_read =	he_proc_read,
173 	.owner =	THIS_MODULE
174 };
175 
176 #define he_writel(dev, val, reg)	do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
177 #define he_readl(dev, reg)		readl((dev)->membase + (reg))
178 
179 /* section 2.12 connection memory access */
180 
181 static __inline__ void
he_writel_internal(struct he_dev * he_dev,unsigned val,unsigned addr,unsigned flags)182 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
183 								unsigned flags)
184 {
185 	he_writel(he_dev, val, CON_DAT);
186 	(void) he_readl(he_dev, CON_DAT);		/* flush posted writes */
187 	he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
188 	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
189 }
190 
191 #define he_writel_rcm(dev, val, reg) 				\
192 			he_writel_internal(dev, val, reg, CON_CTL_RCM)
193 
194 #define he_writel_tcm(dev, val, reg) 				\
195 			he_writel_internal(dev, val, reg, CON_CTL_TCM)
196 
197 #define he_writel_mbox(dev, val, reg) 				\
198 			he_writel_internal(dev, val, reg, CON_CTL_MBOX)
199 
200 static unsigned
he_readl_internal(struct he_dev * he_dev,unsigned addr,unsigned flags)201 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
202 {
203 	he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
204 	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
205 	return he_readl(he_dev, CON_DAT);
206 }
207 
208 #define he_readl_rcm(dev, reg) \
209 			he_readl_internal(dev, reg, CON_CTL_RCM)
210 
211 #define he_readl_tcm(dev, reg) \
212 			he_readl_internal(dev, reg, CON_CTL_TCM)
213 
214 #define he_readl_mbox(dev, reg) \
215 			he_readl_internal(dev, reg, CON_CTL_MBOX)
216 
217 
218 /* figure 2.2 connection id */
219 
220 #define he_mkcid(dev, vpi, vci)		(((vpi << (dev)->vcibits) | vci) & 0x1fff)
221 
222 /* 2.5.1 per connection transmit state registers */
223 
224 #define he_writel_tsr0(dev, val, cid) \
225 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
226 #define he_readl_tsr0(dev, cid) \
227 		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
228 
229 #define he_writel_tsr1(dev, val, cid) \
230 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
231 
232 #define he_writel_tsr2(dev, val, cid) \
233 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
234 
235 #define he_writel_tsr3(dev, val, cid) \
236 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
237 
238 #define he_writel_tsr4(dev, val, cid) \
239 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
240 
241 	/* from page 2-20
242 	 *
243 	 * NOTE While the transmit connection is active, bits 23 through 0
244 	 *      of this register must not be written by the host.  Byte
245 	 *      enables should be used during normal operation when writing
246 	 *      the most significant byte.
247 	 */
248 
249 #define he_writel_tsr4_upper(dev, val, cid) \
250 		he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
251 							CON_CTL_TCM \
252 							| CON_BYTE_DISABLE_2 \
253 							| CON_BYTE_DISABLE_1 \
254 							| CON_BYTE_DISABLE_0)
255 
256 #define he_readl_tsr4(dev, cid) \
257 		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
258 
259 #define he_writel_tsr5(dev, val, cid) \
260 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
261 
262 #define he_writel_tsr6(dev, val, cid) \
263 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
264 
265 #define he_writel_tsr7(dev, val, cid) \
266 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
267 
268 
269 #define he_writel_tsr8(dev, val, cid) \
270 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
271 
272 #define he_writel_tsr9(dev, val, cid) \
273 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
274 
275 #define he_writel_tsr10(dev, val, cid) \
276 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
277 
278 #define he_writel_tsr11(dev, val, cid) \
279 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
280 
281 
282 #define he_writel_tsr12(dev, val, cid) \
283 		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
284 
285 #define he_writel_tsr13(dev, val, cid) \
286 		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
287 
288 
289 #define he_writel_tsr14(dev, val, cid) \
290 		he_writel_tcm(dev, val, CONFIG_TSRD | cid)
291 
292 #define he_writel_tsr14_upper(dev, val, cid) \
293 		he_writel_internal(dev, val, CONFIG_TSRD | cid, \
294 							CON_CTL_TCM \
295 							| CON_BYTE_DISABLE_2 \
296 							| CON_BYTE_DISABLE_1 \
297 							| CON_BYTE_DISABLE_0)
298 
299 /* 2.7.1 per connection receive state registers */
300 
301 #define he_writel_rsr0(dev, val, cid) \
302 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
303 #define he_readl_rsr0(dev, cid) \
304 		he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
305 
306 #define he_writel_rsr1(dev, val, cid) \
307 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
308 
309 #define he_writel_rsr2(dev, val, cid) \
310 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
311 
312 #define he_writel_rsr3(dev, val, cid) \
313 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
314 
315 #define he_writel_rsr4(dev, val, cid) \
316 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
317 
318 #define he_writel_rsr5(dev, val, cid) \
319 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
320 
321 #define he_writel_rsr6(dev, val, cid) \
322 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
323 
324 #define he_writel_rsr7(dev, val, cid) \
325 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
326 
327 static __inline__ struct atm_vcc*
__find_vcc(struct he_dev * he_dev,unsigned cid)328 __find_vcc(struct he_dev *he_dev, unsigned cid)
329 {
330 	struct hlist_head *head;
331 	struct atm_vcc *vcc;
332 	struct sock *s;
333 	short vpi;
334 	int vci;
335 
336 	vpi = cid >> he_dev->vcibits;
337 	vci = cid & ((1 << he_dev->vcibits) - 1);
338 	head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
339 
340 	sk_for_each(s, head) {
341 		vcc = atm_sk(s);
342 		if (vcc->dev == he_dev->atm_dev &&
343 		    vcc->vci == vci && vcc->vpi == vpi &&
344 		    vcc->qos.rxtp.traffic_class != ATM_NONE) {
345 				return vcc;
346 		}
347 	}
348 	return NULL;
349 }
350 
he_init_one(struct pci_dev * pci_dev,const struct pci_device_id * pci_ent)351 static int he_init_one(struct pci_dev *pci_dev,
352 		       const struct pci_device_id *pci_ent)
353 {
354 	struct atm_dev *atm_dev = NULL;
355 	struct he_dev *he_dev = NULL;
356 	int err = 0;
357 
358 	printk(KERN_INFO "ATM he driver\n");
359 
360 	if (pci_enable_device(pci_dev))
361 		return -EIO;
362 	if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)) != 0) {
363 		printk(KERN_WARNING "he: no suitable dma available\n");
364 		err = -EIO;
365 		goto init_one_failure;
366 	}
367 
368 	atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &he_ops, -1, NULL);
369 	if (!atm_dev) {
370 		err = -ENODEV;
371 		goto init_one_failure;
372 	}
373 	pci_set_drvdata(pci_dev, atm_dev);
374 
375 	he_dev = kzalloc_obj(struct he_dev);
376 	if (!he_dev) {
377 		err = -ENOMEM;
378 		goto init_one_failure;
379 	}
380 	he_dev->pci_dev = pci_dev;
381 	he_dev->atm_dev = atm_dev;
382 	he_dev->atm_dev->dev_data = he_dev;
383 	atm_dev->dev_data = he_dev;
384 	he_dev->number = atm_dev->number;
385 	tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
386 	spin_lock_init(&he_dev->global_lock);
387 
388 	if (he_start(atm_dev)) {
389 		he_stop(he_dev);
390 		err = -ENODEV;
391 		goto init_one_failure;
392 	}
393 	he_dev->next = NULL;
394 	if (he_devs)
395 		he_dev->next = he_devs;
396 	he_devs = he_dev;
397 	return 0;
398 
399 init_one_failure:
400 	if (atm_dev)
401 		atm_dev_deregister(atm_dev);
402 	kfree(he_dev);
403 	pci_disable_device(pci_dev);
404 	return err;
405 }
406 
he_remove_one(struct pci_dev * pci_dev)407 static void he_remove_one(struct pci_dev *pci_dev)
408 {
409 	struct atm_dev *atm_dev;
410 	struct he_dev *he_dev;
411 
412 	atm_dev = pci_get_drvdata(pci_dev);
413 	he_dev = HE_DEV(atm_dev);
414 
415 	/* need to remove from he_devs */
416 
417 	he_stop(he_dev);
418 	atm_dev_deregister(atm_dev);
419 	kfree(he_dev);
420 
421 	pci_disable_device(pci_dev);
422 }
423 
424 
425 static unsigned
rate_to_atmf(unsigned rate)426 rate_to_atmf(unsigned rate)		/* cps to atm forum format */
427 {
428 #define NONZERO (1 << 14)
429 
430 	unsigned exp = 0;
431 
432 	if (rate == 0)
433 		return 0;
434 
435 	rate <<= 9;
436 	while (rate > 0x3ff) {
437 		++exp;
438 		rate >>= 1;
439 	}
440 
441 	return (NONZERO | (exp << 9) | (rate & 0x1ff));
442 }
443 
he_init_rx_lbfp0(struct he_dev * he_dev)444 static void he_init_rx_lbfp0(struct he_dev *he_dev)
445 {
446 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
447 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
448 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
449 	unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
450 
451 	lbufd_index = 0;
452 	lbm_offset = he_readl(he_dev, RCMLBM_BA);
453 
454 	he_writel(he_dev, lbufd_index, RLBF0_H);
455 
456 	for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
457 		lbufd_index += 2;
458 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
459 
460 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
461 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
462 
463 		if (++lbuf_count == lbufs_per_row) {
464 			lbuf_count = 0;
465 			row_offset += he_dev->bytes_per_row;
466 		}
467 		lbm_offset += 4;
468 	}
469 
470 	he_writel(he_dev, lbufd_index - 2, RLBF0_T);
471 	he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
472 }
473 
he_init_rx_lbfp1(struct he_dev * he_dev)474 static void he_init_rx_lbfp1(struct he_dev *he_dev)
475 {
476 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
477 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
478 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
479 	unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
480 
481 	lbufd_index = 1;
482 	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
483 
484 	he_writel(he_dev, lbufd_index, RLBF1_H);
485 
486 	for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
487 		lbufd_index += 2;
488 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
489 
490 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
491 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
492 
493 		if (++lbuf_count == lbufs_per_row) {
494 			lbuf_count = 0;
495 			row_offset += he_dev->bytes_per_row;
496 		}
497 		lbm_offset += 4;
498 	}
499 
500 	he_writel(he_dev, lbufd_index - 2, RLBF1_T);
501 	he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
502 }
503 
he_init_tx_lbfp(struct he_dev * he_dev)504 static void he_init_tx_lbfp(struct he_dev *he_dev)
505 {
506 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
507 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
508 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
509 	unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
510 
511 	lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
512 	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
513 
514 	he_writel(he_dev, lbufd_index, TLBF_H);
515 
516 	for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
517 		lbufd_index += 1;
518 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
519 
520 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
521 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
522 
523 		if (++lbuf_count == lbufs_per_row) {
524 			lbuf_count = 0;
525 			row_offset += he_dev->bytes_per_row;
526 		}
527 		lbm_offset += 2;
528 	}
529 
530 	he_writel(he_dev, lbufd_index - 1, TLBF_T);
531 }
532 
he_init_tpdrq(struct he_dev * he_dev)533 static int he_init_tpdrq(struct he_dev *he_dev)
534 {
535 	he_dev->tpdrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
536 						CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
537 						&he_dev->tpdrq_phys,
538 						GFP_KERNEL);
539 	if (he_dev->tpdrq_base == NULL) {
540 		hprintk("failed to alloc tpdrq\n");
541 		return -ENOMEM;
542 	}
543 
544 	he_dev->tpdrq_tail = he_dev->tpdrq_base;
545 	he_dev->tpdrq_head = he_dev->tpdrq_base;
546 
547 	he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
548 	he_writel(he_dev, 0, TPDRQ_T);
549 	he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
550 
551 	return 0;
552 }
553 
he_init_cs_block(struct he_dev * he_dev)554 static void he_init_cs_block(struct he_dev *he_dev)
555 {
556 	unsigned clock, rate, delta;
557 	int reg;
558 
559 	/* 5.1.7 cs block initialization */
560 
561 	for (reg = 0; reg < 0x20; ++reg)
562 		he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
563 
564 	/* rate grid timer reload values */
565 
566 	clock = he_is622(he_dev) ? 66667000 : 50000000;
567 	rate = he_dev->atm_dev->link_rate;
568 	delta = rate / 16 / 2;
569 
570 	for (reg = 0; reg < 0x10; ++reg) {
571 		/* 2.4 internal transmit function
572 		 *
573 	 	 * we initialize the first row in the rate grid.
574 		 * values are period (in clock cycles) of timer
575 		 */
576 		unsigned period = clock / rate;
577 
578 		he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
579 		rate -= delta;
580 	}
581 
582 	if (he_is622(he_dev)) {
583 		/* table 5.2 (4 cells per lbuf) */
584 		he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
585 		he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
586 		he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
587 		he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
588 		he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
589 
590 		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
591 		he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
592 		he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
593 		he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
594 		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
595 		he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
596 		he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
597 
598 		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
599 
600 		/* table 5.8 */
601 		he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
602 		he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
603 		he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
604 		he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
605 		he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
606 		he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
607 
608 		/* table 5.9 */
609 		he_writel_mbox(he_dev, 0x5, CS_OTPPER);
610 		he_writel_mbox(he_dev, 0x14, CS_OTWPER);
611 	} else {
612 		/* table 5.1 (4 cells per lbuf) */
613 		he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
614 		he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
615 		he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
616 		he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
617 		he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
618 
619 		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
620 		he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
621 		he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
622 		he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
623 		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
624 		he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
625 		he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
626 
627 		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
628 
629 		/* table 5.8 */
630 		he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
631 		he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
632 		he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
633 		he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
634 		he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
635 		he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
636 
637 		/* table 5.9 */
638 		he_writel_mbox(he_dev, 0x6, CS_OTPPER);
639 		he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
640 	}
641 
642 	he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
643 
644 	for (reg = 0; reg < 0x8; ++reg)
645 		he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
646 
647 }
648 
he_init_cs_block_rcm(struct he_dev * he_dev)649 static int he_init_cs_block_rcm(struct he_dev *he_dev)
650 {
651 	unsigned (*rategrid)[16][16];
652 	unsigned rate, delta;
653 	int i, j, reg;
654 
655 	unsigned rate_atmf, exp, man;
656 	unsigned long long rate_cps;
657 	int mult, buf, buf_limit = 4;
658 
659 	rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
660 	if (!rategrid)
661 		return -ENOMEM;
662 
663 	/* initialize rate grid group table */
664 
665 	for (reg = 0x0; reg < 0xff; ++reg)
666 		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
667 
668 	/* initialize rate controller groups */
669 
670 	for (reg = 0x100; reg < 0x1ff; ++reg)
671 		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
672 
673 	/* initialize tNrm lookup table */
674 
675 	/* the manual makes reference to a routine in a sample driver
676 	   for proper configuration; fortunately, we only need this
677 	   in order to support abr connection */
678 
679 	/* initialize rate to group table */
680 
681 	rate = he_dev->atm_dev->link_rate;
682 	delta = rate / 32;
683 
684 	/*
685 	 * 2.4 transmit internal functions
686 	 *
687 	 * we construct a copy of the rate grid used by the scheduler
688 	 * in order to construct the rate to group table below
689 	 */
690 
691 	for (j = 0; j < 16; j++) {
692 		(*rategrid)[0][j] = rate;
693 		rate -= delta;
694 	}
695 
696 	for (i = 1; i < 16; i++)
697 		for (j = 0; j < 16; j++)
698 			if (i > 14)
699 				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
700 			else
701 				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
702 
703 	/*
704 	 * 2.4 transmit internal function
705 	 *
706 	 * this table maps the upper 5 bits of exponent and mantissa
707 	 * of the atm forum representation of the rate into an index
708 	 * on rate grid
709 	 */
710 
711 	rate_atmf = 0;
712 	while (rate_atmf < 0x400) {
713 		man = (rate_atmf & 0x1f) << 4;
714 		exp = rate_atmf >> 5;
715 
716 		/*
717 			instead of '/ 512', use '>> 9' to prevent a call
718 			to divdu3 on x86 platforms
719 		*/
720 		rate_cps = (unsigned long long) (1UL << exp) * (man + 512) >> 9;
721 
722 		if (rate_cps < 10)
723 			rate_cps = 10;	/* 2.2.1 minimum payload rate is 10 cps */
724 
725 		for (i = 255; i > 0; i--)
726 			if ((*rategrid)[i/16][i%16] >= rate_cps)
727 				break;	 /* pick nearest rate instead? */
728 
729 		/*
730 		 * each table entry is 16 bits: (rate grid index (8 bits)
731 		 * and a buffer limit (8 bits)
732 		 * there are two table entries in each 32-bit register
733 		 */
734 
735 #ifdef notdef
736 		buf = rate_cps * he_dev->tx_numbuffs /
737 				(he_dev->atm_dev->link_rate * 2);
738 #else
739 		/* this is pretty, but avoids _divdu3 and is mostly correct */
740 		mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
741 		if (rate_cps > (272ULL * mult))
742 			buf = 4;
743 		else if (rate_cps > (204ULL * mult))
744 			buf = 3;
745 		else if (rate_cps > (136ULL * mult))
746 			buf = 2;
747 		else if (rate_cps > (68ULL * mult))
748 			buf = 1;
749 		else
750 			buf = 0;
751 #endif
752 		if (buf > buf_limit)
753 			buf = buf_limit;
754 		reg = (reg << 16) | ((i << 8) | buf);
755 
756 #define RTGTBL_OFFSET 0x400
757 
758 		if (rate_atmf & 0x1)
759 			he_writel_rcm(he_dev, reg,
760 				CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
761 
762 		++rate_atmf;
763 	}
764 
765 	kfree(rategrid);
766 	return 0;
767 }
768 
he_init_group(struct he_dev * he_dev,int group)769 static int he_init_group(struct he_dev *he_dev, int group)
770 {
771 	struct he_buff *heb, *next;
772 	dma_addr_t mapping;
773 	int i;
774 
775 	he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
776 	he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
777 	he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
778 	he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
779 		  G0_RBPS_BS + (group * 32));
780 
781 	/* bitmap table */
782 	he_dev->rbpl_table = bitmap_zalloc(RBPL_TABLE_SIZE, GFP_KERNEL);
783 	if (!he_dev->rbpl_table) {
784 		hprintk("unable to allocate rbpl bitmap table\n");
785 		return -ENOMEM;
786 	}
787 
788 	/* rbpl_virt 64-bit pointers */
789 	he_dev->rbpl_virt = kmalloc_objs(*he_dev->rbpl_virt, RBPL_TABLE_SIZE);
790 	if (!he_dev->rbpl_virt) {
791 		hprintk("unable to allocate rbpl virt table\n");
792 		goto out_free_rbpl_table;
793 	}
794 
795 	/* large buffer pool */
796 	he_dev->rbpl_pool = dma_pool_create("rbpl", &he_dev->pci_dev->dev,
797 					    CONFIG_RBPL_BUFSIZE, 64, 0);
798 	if (he_dev->rbpl_pool == NULL) {
799 		hprintk("unable to create rbpl pool\n");
800 		goto out_free_rbpl_virt;
801 	}
802 
803 	he_dev->rbpl_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
804 					       CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
805 					       &he_dev->rbpl_phys, GFP_KERNEL);
806 	if (he_dev->rbpl_base == NULL) {
807 		hprintk("failed to alloc rbpl_base\n");
808 		goto out_destroy_rbpl_pool;
809 	}
810 
811 	INIT_LIST_HEAD(&he_dev->rbpl_outstanding);
812 
813 	for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
814 
815 		heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL, &mapping);
816 		if (!heb)
817 			goto out_free_rbpl;
818 		heb->mapping = mapping;
819 		list_add(&heb->entry, &he_dev->rbpl_outstanding);
820 
821 		set_bit(i, he_dev->rbpl_table);
822 		he_dev->rbpl_virt[i] = heb;
823 		he_dev->rbpl_hint = i + 1;
824 		he_dev->rbpl_base[i].idx =  i << RBP_IDX_OFFSET;
825 		he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data);
826 	}
827 	he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
828 
829 	he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
830 	he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
831 						G0_RBPL_T + (group * 32));
832 	he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4,
833 						G0_RBPL_BS + (group * 32));
834 	he_writel(he_dev,
835 			RBP_THRESH(CONFIG_RBPL_THRESH) |
836 			RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
837 			RBP_INT_ENB,
838 						G0_RBPL_QI + (group * 32));
839 
840 	/* rx buffer ready queue */
841 
842 	he_dev->rbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
843 					       CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
844 					       &he_dev->rbrq_phys, GFP_KERNEL);
845 	if (he_dev->rbrq_base == NULL) {
846 		hprintk("failed to allocate rbrq\n");
847 		goto out_free_rbpl;
848 	}
849 
850 	he_dev->rbrq_head = he_dev->rbrq_base;
851 	he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
852 	he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
853 	he_writel(he_dev,
854 		RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
855 						G0_RBRQ_Q + (group * 16));
856 	if (irq_coalesce) {
857 		hprintk("coalescing interrupts\n");
858 		he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
859 						G0_RBRQ_I + (group * 16));
860 	} else
861 		he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
862 						G0_RBRQ_I + (group * 16));
863 
864 	/* tx buffer ready queue */
865 
866 	he_dev->tbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
867 					       CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
868 					       &he_dev->tbrq_phys, GFP_KERNEL);
869 	if (he_dev->tbrq_base == NULL) {
870 		hprintk("failed to allocate tbrq\n");
871 		goto out_free_rbpq_base;
872 	}
873 
874 	he_dev->tbrq_head = he_dev->tbrq_base;
875 
876 	he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
877 	he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
878 	he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
879 	he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
880 
881 	return 0;
882 
883 out_free_rbpq_base:
884 	dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE *
885 			  sizeof(struct he_rbrq), he_dev->rbrq_base,
886 			  he_dev->rbrq_phys);
887 out_free_rbpl:
888 	list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
889 		dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
890 
891 	dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE *
892 			  sizeof(struct he_rbp), he_dev->rbpl_base,
893 			  he_dev->rbpl_phys);
894 out_destroy_rbpl_pool:
895 	dma_pool_destroy(he_dev->rbpl_pool);
896 out_free_rbpl_virt:
897 	kfree(he_dev->rbpl_virt);
898 out_free_rbpl_table:
899 	bitmap_free(he_dev->rbpl_table);
900 
901 	return -ENOMEM;
902 }
903 
he_init_irq(struct he_dev * he_dev)904 static int he_init_irq(struct he_dev *he_dev)
905 {
906 	int i;
907 
908 	/* 2.9.3.5  tail offset for each interrupt queue is located after the
909 		    end of the interrupt queue */
910 
911 	he_dev->irq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
912 					      (CONFIG_IRQ_SIZE + 1) * sizeof(struct he_irq),
913 					      &he_dev->irq_phys, GFP_KERNEL);
914 	if (he_dev->irq_base == NULL) {
915 		hprintk("failed to allocate irq\n");
916 		return -ENOMEM;
917 	}
918 	he_dev->irq_tailoffset = (unsigned *)
919 					&he_dev->irq_base[CONFIG_IRQ_SIZE];
920 	*he_dev->irq_tailoffset = 0;
921 	he_dev->irq_head = he_dev->irq_base;
922 	he_dev->irq_tail = he_dev->irq_base;
923 
924 	for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
925 		he_dev->irq_base[i].isw = ITYPE_INVALID;
926 
927 	he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
928 	he_writel(he_dev,
929 		IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
930 								IRQ0_HEAD);
931 	he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
932 	he_writel(he_dev, 0x0, IRQ0_DATA);
933 
934 	he_writel(he_dev, 0x0, IRQ1_BASE);
935 	he_writel(he_dev, 0x0, IRQ1_HEAD);
936 	he_writel(he_dev, 0x0, IRQ1_CNTL);
937 	he_writel(he_dev, 0x0, IRQ1_DATA);
938 
939 	he_writel(he_dev, 0x0, IRQ2_BASE);
940 	he_writel(he_dev, 0x0, IRQ2_HEAD);
941 	he_writel(he_dev, 0x0, IRQ2_CNTL);
942 	he_writel(he_dev, 0x0, IRQ2_DATA);
943 
944 	he_writel(he_dev, 0x0, IRQ3_BASE);
945 	he_writel(he_dev, 0x0, IRQ3_HEAD);
946 	he_writel(he_dev, 0x0, IRQ3_CNTL);
947 	he_writel(he_dev, 0x0, IRQ3_DATA);
948 
949 	/* 2.9.3.2 interrupt queue mapping registers */
950 
951 	he_writel(he_dev, 0x0, GRP_10_MAP);
952 	he_writel(he_dev, 0x0, GRP_32_MAP);
953 	he_writel(he_dev, 0x0, GRP_54_MAP);
954 	he_writel(he_dev, 0x0, GRP_76_MAP);
955 
956 	if (request_irq(he_dev->pci_dev->irq,
957 			he_irq_handler, IRQF_SHARED, DEV_LABEL, he_dev)) {
958 		hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
959 		return -EINVAL;
960 	}
961 
962 	he_dev->irq = he_dev->pci_dev->irq;
963 
964 	return 0;
965 }
966 
he_start(struct atm_dev * dev)967 static int he_start(struct atm_dev *dev)
968 {
969 	struct he_dev *he_dev;
970 	struct pci_dev *pci_dev;
971 	unsigned long membase;
972 
973 	u16 command;
974 	u32 gen_cntl_0, host_cntl, lb_swap;
975 	u8 cache_size, timer;
976 
977 	unsigned err;
978 	unsigned int status, reg;
979 	int i, group;
980 
981 	he_dev = HE_DEV(dev);
982 	pci_dev = he_dev->pci_dev;
983 
984 	membase = pci_resource_start(pci_dev, 0);
985 	HPRINTK("membase = 0x%lx  irq = %d.\n", membase, pci_dev->irq);
986 
987 	/*
988 	 * pci bus controller initialization
989 	 */
990 
991 	/* 4.3 pci bus controller-specific initialization */
992 	if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
993 		hprintk("can't read GEN_CNTL_0\n");
994 		return -EINVAL;
995 	}
996 	gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
997 	if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
998 		hprintk("can't write GEN_CNTL_0.\n");
999 		return -EINVAL;
1000 	}
1001 
1002 	if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1003 		hprintk("can't read PCI_COMMAND.\n");
1004 		return -EINVAL;
1005 	}
1006 
1007 	command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1008 	if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1009 		hprintk("can't enable memory.\n");
1010 		return -EINVAL;
1011 	}
1012 
1013 	if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1014 		hprintk("can't read cache line size?\n");
1015 		return -EINVAL;
1016 	}
1017 
1018 	if (cache_size < 16) {
1019 		cache_size = 16;
1020 		if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1021 			hprintk("can't set cache line size to %d\n", cache_size);
1022 	}
1023 
1024 	if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1025 		hprintk("can't read latency timer?\n");
1026 		return -EINVAL;
1027 	}
1028 
1029 	/* from table 3.9
1030 	 *
1031 	 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1032 	 *
1033 	 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1034 	 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1035 	 *
1036 	 */
1037 #define LAT_TIMER 209
1038 	if (timer < LAT_TIMER) {
1039 		HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1040 		timer = LAT_TIMER;
1041 		if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1042 			hprintk("can't set latency timer to %d\n", timer);
1043 	}
1044 
1045 	if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1046 		hprintk("can't set up page mapping\n");
1047 		return -EINVAL;
1048 	}
1049 
1050 	/* 4.4 card reset */
1051 	he_writel(he_dev, 0x0, RESET_CNTL);
1052 	he_writel(he_dev, 0xff, RESET_CNTL);
1053 
1054 	msleep(16);	/* 16 ms */
1055 	status = he_readl(he_dev, RESET_CNTL);
1056 	if ((status & BOARD_RST_STATUS) == 0) {
1057 		hprintk("reset failed\n");
1058 		return -EINVAL;
1059 	}
1060 
1061 	/* 4.5 set bus width */
1062 	host_cntl = he_readl(he_dev, HOST_CNTL);
1063 	if (host_cntl & PCI_BUS_SIZE64)
1064 		gen_cntl_0 |= ENBL_64;
1065 	else
1066 		gen_cntl_0 &= ~ENBL_64;
1067 
1068 	if (disable64 == 1) {
1069 		hprintk("disabling 64-bit pci bus transfers\n");
1070 		gen_cntl_0 &= ~ENBL_64;
1071 	}
1072 
1073 	if (gen_cntl_0 & ENBL_64)
1074 		hprintk("64-bit transfers enabled\n");
1075 
1076 	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1077 
1078 	/* 4.7 read prom contents */
1079 	for (i = 0; i < PROD_ID_LEN; ++i)
1080 		he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1081 
1082 	he_dev->media = read_prom_byte(he_dev, MEDIA);
1083 
1084 	for (i = 0; i < 6; ++i)
1085 		dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1086 
1087 	hprintk("%s%s, %pM\n", he_dev->prod_id,
1088 		he_dev->media & 0x40 ? "SM" : "MM", dev->esi);
1089 	he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1090 						ATM_OC12_PCR : ATM_OC3_PCR;
1091 
1092 	/* 4.6 set host endianess */
1093 	lb_swap = he_readl(he_dev, LB_SWAP);
1094 	if (he_is622(he_dev))
1095 		lb_swap &= ~XFER_SIZE;		/* 4 cells */
1096 	else
1097 		lb_swap |= XFER_SIZE;		/* 8 cells */
1098 #ifdef __BIG_ENDIAN
1099 	lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1100 #else
1101 	lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1102 			DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1103 #endif /* __BIG_ENDIAN */
1104 	he_writel(he_dev, lb_swap, LB_SWAP);
1105 
1106 	/* 4.8 sdram controller initialization */
1107 	he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1108 
1109 	/* 4.9 initialize rnum value */
1110 	lb_swap |= SWAP_RNUM_MAX(0xf);
1111 	he_writel(he_dev, lb_swap, LB_SWAP);
1112 
1113 	/* 4.10 initialize the interrupt queues */
1114 	if ((err = he_init_irq(he_dev)) != 0)
1115 		return err;
1116 
1117 	/* 4.11 enable pci bus controller state machines */
1118 	host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1119 				QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1120 	he_writel(he_dev, host_cntl, HOST_CNTL);
1121 
1122 	gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1123 	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1124 
1125 	/*
1126 	 * atm network controller initialization
1127 	 */
1128 
1129 	/* 5.1.1 generic configuration state */
1130 
1131 	/*
1132 	 *		local (cell) buffer memory map
1133 	 *
1134 	 *             HE155                          HE622
1135 	 *
1136 	 *        0 ____________1023 bytes  0 _______________________2047 bytes
1137 	 *         |            |            |                   |   |
1138 	 *         |  utility   |            |        rx0        |   |
1139 	 *        5|____________|         255|___________________| u |
1140 	 *        6|            |         256|                   | t |
1141 	 *         |            |            |                   | i |
1142 	 *         |    rx0     |     row    |        tx         | l |
1143 	 *         |            |            |                   | i |
1144 	 *         |            |         767|___________________| t |
1145 	 *      517|____________|         768|                   | y |
1146 	 * row  518|            |            |        rx1        |   |
1147 	 *         |            |        1023|___________________|___|
1148 	 *         |            |
1149 	 *         |    tx      |
1150 	 *         |            |
1151 	 *         |            |
1152 	 *     1535|____________|
1153 	 *     1536|            |
1154 	 *         |    rx1     |
1155 	 *     2047|____________|
1156 	 *
1157 	 */
1158 
1159 	/* total 4096 connections */
1160 	he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1161 	he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1162 
1163 	if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1164 		hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1165 		return -ENODEV;
1166 	}
1167 
1168 	if (nvpibits != -1) {
1169 		he_dev->vpibits = nvpibits;
1170 		he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1171 	}
1172 
1173 	if (nvcibits != -1) {
1174 		he_dev->vcibits = nvcibits;
1175 		he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1176 	}
1177 
1178 
1179 	if (he_is622(he_dev)) {
1180 		he_dev->cells_per_row = 40;
1181 		he_dev->bytes_per_row = 2048;
1182 		he_dev->r0_numrows = 256;
1183 		he_dev->tx_numrows = 512;
1184 		he_dev->r1_numrows = 256;
1185 		he_dev->r0_startrow = 0;
1186 		he_dev->tx_startrow = 256;
1187 		he_dev->r1_startrow = 768;
1188 	} else {
1189 		he_dev->cells_per_row = 20;
1190 		he_dev->bytes_per_row = 1024;
1191 		he_dev->r0_numrows = 512;
1192 		he_dev->tx_numrows = 1018;
1193 		he_dev->r1_numrows = 512;
1194 		he_dev->r0_startrow = 6;
1195 		he_dev->tx_startrow = 518;
1196 		he_dev->r1_startrow = 1536;
1197 	}
1198 
1199 	he_dev->cells_per_lbuf = 4;
1200 	he_dev->buffer_limit = 4;
1201 	he_dev->r0_numbuffs = he_dev->r0_numrows *
1202 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1203 	if (he_dev->r0_numbuffs > 2560)
1204 		he_dev->r0_numbuffs = 2560;
1205 
1206 	he_dev->r1_numbuffs = he_dev->r1_numrows *
1207 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1208 	if (he_dev->r1_numbuffs > 2560)
1209 		he_dev->r1_numbuffs = 2560;
1210 
1211 	he_dev->tx_numbuffs = he_dev->tx_numrows *
1212 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1213 	if (he_dev->tx_numbuffs > 5120)
1214 		he_dev->tx_numbuffs = 5120;
1215 
1216 	/* 5.1.2 configure hardware dependent registers */
1217 
1218 	he_writel(he_dev,
1219 		SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1220 		RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1221 		(he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1222 		(he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1223 								LBARB);
1224 
1225 	he_writel(he_dev, BANK_ON |
1226 		(he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1227 								SDRAMCON);
1228 
1229 	he_writel(he_dev,
1230 		(he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1231 						RM_RW_WAIT(1), RCMCONFIG);
1232 	he_writel(he_dev,
1233 		(he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1234 						TM_RW_WAIT(1), TCMCONFIG);
1235 
1236 	he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1237 
1238 	he_writel(he_dev,
1239 		(he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1240 		(he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1241 		RX_VALVP(he_dev->vpibits) |
1242 		RX_VALVC(he_dev->vcibits),			 RC_CONFIG);
1243 
1244 	he_writel(he_dev, DRF_THRESH(0x20) |
1245 		(he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1246 		TX_VCI_MASK(he_dev->vcibits) |
1247 		LBFREE_CNT(he_dev->tx_numbuffs), 		TX_CONFIG);
1248 
1249 	he_writel(he_dev, 0x0, TXAAL5_PROTO);
1250 
1251 	he_writel(he_dev, PHY_INT_ENB |
1252 		(he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1253 								RH_CONFIG);
1254 
1255 	/* 5.1.3 initialize connection memory */
1256 
1257 	for (i = 0; i < TCM_MEM_SIZE; ++i)
1258 		he_writel_tcm(he_dev, 0, i);
1259 
1260 	for (i = 0; i < RCM_MEM_SIZE; ++i)
1261 		he_writel_rcm(he_dev, 0, i);
1262 
1263 	/*
1264 	 *	transmit connection memory map
1265 	 *
1266 	 *                  tx memory
1267 	 *          0x0 ___________________
1268 	 *             |                   |
1269 	 *             |                   |
1270 	 *             |       TSRa        |
1271 	 *             |                   |
1272 	 *             |                   |
1273 	 *       0x8000|___________________|
1274 	 *             |                   |
1275 	 *             |       TSRb        |
1276 	 *       0xc000|___________________|
1277 	 *             |                   |
1278 	 *             |       TSRc        |
1279 	 *       0xe000|___________________|
1280 	 *             |       TSRd        |
1281 	 *       0xf000|___________________|
1282 	 *             |       tmABR       |
1283 	 *      0x10000|___________________|
1284 	 *             |                   |
1285 	 *             |       tmTPD       |
1286 	 *             |___________________|
1287 	 *             |                   |
1288 	 *                      ....
1289 	 *      0x1ffff|___________________|
1290 	 *
1291 	 *
1292 	 */
1293 
1294 	he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1295 	he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1296 	he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1297 	he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1298 	he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1299 
1300 
1301 	/*
1302 	 *	receive connection memory map
1303 	 *
1304 	 *          0x0 ___________________
1305 	 *             |                   |
1306 	 *             |                   |
1307 	 *             |       RSRa        |
1308 	 *             |                   |
1309 	 *             |                   |
1310 	 *       0x8000|___________________|
1311 	 *             |                   |
1312 	 *             |             rx0/1 |
1313 	 *             |       LBM         |   link lists of local
1314 	 *             |             tx    |   buffer memory
1315 	 *             |                   |
1316 	 *       0xd000|___________________|
1317 	 *             |                   |
1318 	 *             |      rmABR        |
1319 	 *       0xe000|___________________|
1320 	 *             |                   |
1321 	 *             |       RSRb        |
1322 	 *             |___________________|
1323 	 *             |                   |
1324 	 *                      ....
1325 	 *       0xffff|___________________|
1326 	 */
1327 
1328 	he_writel(he_dev, 0x08000, RCMLBM_BA);
1329 	he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1330 	he_writel(he_dev, 0x0d800, RCMABR_BA);
1331 
1332 	/* 5.1.4 initialize local buffer free pools linked lists */
1333 
1334 	he_init_rx_lbfp0(he_dev);
1335 	he_init_rx_lbfp1(he_dev);
1336 
1337 	he_writel(he_dev, 0x0, RLBC_H);
1338 	he_writel(he_dev, 0x0, RLBC_T);
1339 	he_writel(he_dev, 0x0, RLBC_H2);
1340 
1341 	he_writel(he_dev, 512, RXTHRSH);	/* 10% of r0+r1 buffers */
1342 	he_writel(he_dev, 256, LITHRSH); 	/* 5% of r0+r1 buffers */
1343 
1344 	he_init_tx_lbfp(he_dev);
1345 
1346 	he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1347 
1348 	/* 5.1.5 initialize intermediate receive queues */
1349 
1350 	if (he_is622(he_dev)) {
1351 		he_writel(he_dev, 0x000f, G0_INMQ_S);
1352 		he_writel(he_dev, 0x200f, G0_INMQ_L);
1353 
1354 		he_writel(he_dev, 0x001f, G1_INMQ_S);
1355 		he_writel(he_dev, 0x201f, G1_INMQ_L);
1356 
1357 		he_writel(he_dev, 0x002f, G2_INMQ_S);
1358 		he_writel(he_dev, 0x202f, G2_INMQ_L);
1359 
1360 		he_writel(he_dev, 0x003f, G3_INMQ_S);
1361 		he_writel(he_dev, 0x203f, G3_INMQ_L);
1362 
1363 		he_writel(he_dev, 0x004f, G4_INMQ_S);
1364 		he_writel(he_dev, 0x204f, G4_INMQ_L);
1365 
1366 		he_writel(he_dev, 0x005f, G5_INMQ_S);
1367 		he_writel(he_dev, 0x205f, G5_INMQ_L);
1368 
1369 		he_writel(he_dev, 0x006f, G6_INMQ_S);
1370 		he_writel(he_dev, 0x206f, G6_INMQ_L);
1371 
1372 		he_writel(he_dev, 0x007f, G7_INMQ_S);
1373 		he_writel(he_dev, 0x207f, G7_INMQ_L);
1374 	} else {
1375 		he_writel(he_dev, 0x0000, G0_INMQ_S);
1376 		he_writel(he_dev, 0x0008, G0_INMQ_L);
1377 
1378 		he_writel(he_dev, 0x0001, G1_INMQ_S);
1379 		he_writel(he_dev, 0x0009, G1_INMQ_L);
1380 
1381 		he_writel(he_dev, 0x0002, G2_INMQ_S);
1382 		he_writel(he_dev, 0x000a, G2_INMQ_L);
1383 
1384 		he_writel(he_dev, 0x0003, G3_INMQ_S);
1385 		he_writel(he_dev, 0x000b, G3_INMQ_L);
1386 
1387 		he_writel(he_dev, 0x0004, G4_INMQ_S);
1388 		he_writel(he_dev, 0x000c, G4_INMQ_L);
1389 
1390 		he_writel(he_dev, 0x0005, G5_INMQ_S);
1391 		he_writel(he_dev, 0x000d, G5_INMQ_L);
1392 
1393 		he_writel(he_dev, 0x0006, G6_INMQ_S);
1394 		he_writel(he_dev, 0x000e, G6_INMQ_L);
1395 
1396 		he_writel(he_dev, 0x0007, G7_INMQ_S);
1397 		he_writel(he_dev, 0x000f, G7_INMQ_L);
1398 	}
1399 
1400 	/* 5.1.6 application tunable parameters */
1401 
1402 	he_writel(he_dev, 0x0, MCC);
1403 	he_writel(he_dev, 0x0, OEC);
1404 	he_writel(he_dev, 0x0, DCC);
1405 	he_writel(he_dev, 0x0, CEC);
1406 
1407 	/* 5.1.7 cs block initialization */
1408 
1409 	he_init_cs_block(he_dev);
1410 
1411 	/* 5.1.8 cs block connection memory initialization */
1412 
1413 	if (he_init_cs_block_rcm(he_dev) < 0)
1414 		return -ENOMEM;
1415 
1416 	/* 5.1.10 initialize host structures */
1417 
1418 	he_init_tpdrq(he_dev);
1419 
1420 	he_dev->tpd_pool = dma_pool_create("tpd", &he_dev->pci_dev->dev,
1421 					   sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1422 	if (he_dev->tpd_pool == NULL) {
1423 		hprintk("unable to create tpd dma_pool\n");
1424 		return -ENOMEM;
1425 	}
1426 
1427 	INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1428 
1429 	if (he_init_group(he_dev, 0) != 0)
1430 		return -ENOMEM;
1431 
1432 	for (group = 1; group < HE_NUM_GROUPS; ++group) {
1433 		he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1434 		he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1435 		he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1436 		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1437 						G0_RBPS_BS + (group * 32));
1438 
1439 		he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1440 		he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1441 		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1442 						G0_RBPL_QI + (group * 32));
1443 		he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1444 
1445 		he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1446 		he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1447 		he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1448 						G0_RBRQ_Q + (group * 16));
1449 		he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1450 
1451 		he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1452 		he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1453 		he_writel(he_dev, TBRQ_THRESH(0x1),
1454 						G0_TBRQ_THRESH + (group * 16));
1455 		he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1456 	}
1457 
1458 	/* host status page */
1459 
1460 	he_dev->hsp = dma_alloc_coherent(&he_dev->pci_dev->dev,
1461 					 sizeof(struct he_hsp),
1462 					 &he_dev->hsp_phys, GFP_KERNEL);
1463 	if (he_dev->hsp == NULL) {
1464 		hprintk("failed to allocate host status page\n");
1465 		return -ENOMEM;
1466 	}
1467 	he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1468 
1469 	/* initialize framer */
1470 
1471 #ifdef CONFIG_ATM_HE_USE_SUNI
1472 	if (he_isMM(he_dev))
1473 		suni_init(he_dev->atm_dev);
1474 	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1475 		he_dev->atm_dev->phy->start(he_dev->atm_dev);
1476 #endif /* CONFIG_ATM_HE_USE_SUNI */
1477 
1478 	if (sdh) {
1479 		/* this really should be in suni.c but for now... */
1480 		int val;
1481 
1482 		val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1483 		val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1484 		he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1485 		he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP);
1486 	}
1487 
1488 	/* 5.1.12 enable transmit and receive */
1489 
1490 	reg = he_readl_mbox(he_dev, CS_ERCTL0);
1491 	reg |= TX_ENABLE|ER_ENABLE;
1492 	he_writel_mbox(he_dev, reg, CS_ERCTL0);
1493 
1494 	reg = he_readl(he_dev, RC_CONFIG);
1495 	reg |= RX_ENABLE;
1496 	he_writel(he_dev, reg, RC_CONFIG);
1497 
1498 	for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1499 		he_dev->cs_stper[i].inuse = 0;
1500 		he_dev->cs_stper[i].pcr = -1;
1501 	}
1502 	he_dev->total_bw = 0;
1503 
1504 
1505 	/* atm linux initialization */
1506 
1507 	he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1508 	he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1509 
1510 	he_dev->irq_peak = 0;
1511 	he_dev->rbrq_peak = 0;
1512 	he_dev->rbpl_peak = 0;
1513 	he_dev->tbrq_peak = 0;
1514 
1515 	HPRINTK("hell bent for leather!\n");
1516 
1517 	return 0;
1518 }
1519 
1520 static void
he_stop(struct he_dev * he_dev)1521 he_stop(struct he_dev *he_dev)
1522 {
1523 	struct he_buff *heb, *next;
1524 	struct pci_dev *pci_dev;
1525 	u32 gen_cntl_0, reg;
1526 	u16 command;
1527 
1528 	pci_dev = he_dev->pci_dev;
1529 
1530 	/* disable interrupts */
1531 
1532 	if (he_dev->membase) {
1533 		pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1534 		gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1535 		pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1536 
1537 		tasklet_disable(&he_dev->tasklet);
1538 
1539 		/* disable recv and transmit */
1540 
1541 		reg = he_readl_mbox(he_dev, CS_ERCTL0);
1542 		reg &= ~(TX_ENABLE|ER_ENABLE);
1543 		he_writel_mbox(he_dev, reg, CS_ERCTL0);
1544 
1545 		reg = he_readl(he_dev, RC_CONFIG);
1546 		reg &= ~(RX_ENABLE);
1547 		he_writel(he_dev, reg, RC_CONFIG);
1548 	}
1549 
1550 #ifdef CONFIG_ATM_HE_USE_SUNI
1551 	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1552 		he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1553 #endif /* CONFIG_ATM_HE_USE_SUNI */
1554 
1555 	if (he_dev->irq)
1556 		free_irq(he_dev->irq, he_dev);
1557 
1558 	if (he_dev->irq_base)
1559 		dma_free_coherent(&he_dev->pci_dev->dev, (CONFIG_IRQ_SIZE + 1)
1560 				  * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1561 
1562 	if (he_dev->hsp)
1563 		dma_free_coherent(&he_dev->pci_dev->dev, sizeof(struct he_hsp),
1564 				  he_dev->hsp, he_dev->hsp_phys);
1565 
1566 	if (he_dev->rbpl_base) {
1567 		list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
1568 			dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1569 
1570 		dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE
1571 				  * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1572 	}
1573 
1574 	kfree(he_dev->rbpl_virt);
1575 	bitmap_free(he_dev->rbpl_table);
1576 	dma_pool_destroy(he_dev->rbpl_pool);
1577 
1578 	if (he_dev->rbrq_base)
1579 		dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1580 				  he_dev->rbrq_base, he_dev->rbrq_phys);
1581 
1582 	if (he_dev->tbrq_base)
1583 		dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1584 				  he_dev->tbrq_base, he_dev->tbrq_phys);
1585 
1586 	if (he_dev->tpdrq_base)
1587 		dma_free_coherent(&he_dev->pci_dev->dev,
1588 				  CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
1589 				  he_dev->tpdrq_base, he_dev->tpdrq_phys);
1590 
1591 	dma_pool_destroy(he_dev->tpd_pool);
1592 
1593 	if (he_dev->pci_dev) {
1594 		pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1595 		command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1596 		pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1597 	}
1598 
1599 	if (he_dev->membase)
1600 		iounmap(he_dev->membase);
1601 }
1602 
1603 static struct he_tpd *
__alloc_tpd(struct he_dev * he_dev)1604 __alloc_tpd(struct he_dev *he_dev)
1605 {
1606 	struct he_tpd *tpd;
1607 	dma_addr_t mapping;
1608 
1609 	tpd = dma_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC, &mapping);
1610 	if (tpd == NULL)
1611 		return NULL;
1612 
1613 	tpd->status = TPD_ADDR(mapping);
1614 	tpd->reserved = 0;
1615 	tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1616 	tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1617 	tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1618 
1619 	return tpd;
1620 }
1621 
1622 #define AAL5_LEN(buf,len) 						\
1623 			((((unsigned char *)(buf))[(len)-6] << 8) |	\
1624 				(((unsigned char *)(buf))[(len)-5]))
1625 
1626 /* 2.10.1.2 receive
1627  *
1628  * aal5 packets can optionally return the tcp checksum in the lower
1629  * 16 bits of the crc (RSR0_TCP_CKSUM)
1630  */
1631 
1632 #define TCP_CKSUM(buf,len) 						\
1633 			((((unsigned char *)(buf))[(len)-2] << 8) |	\
1634 				(((unsigned char *)(buf))[(len-1)]))
1635 
1636 static int
he_service_rbrq(struct he_dev * he_dev,int group)1637 he_service_rbrq(struct he_dev *he_dev, int group)
1638 {
1639 	struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1640 				((unsigned long)he_dev->rbrq_base |
1641 					he_dev->hsp->group[group].rbrq_tail);
1642 	unsigned cid, lastcid = -1;
1643 	struct sk_buff *skb;
1644 	struct atm_vcc *vcc = NULL;
1645 	struct he_vcc *he_vcc;
1646 	struct he_buff *heb, *next;
1647 	int i;
1648 	int pdus_assembled = 0;
1649 	int updated = 0;
1650 
1651 	read_lock(&vcc_sklist_lock);
1652 	while (he_dev->rbrq_head != rbrq_tail) {
1653 		++updated;
1654 
1655 		HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1656 			he_dev->rbrq_head, group,
1657 			RBRQ_ADDR(he_dev->rbrq_head),
1658 			RBRQ_BUFLEN(he_dev->rbrq_head),
1659 			RBRQ_CID(he_dev->rbrq_head),
1660 			RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1661 			RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1662 			RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1663 			RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1664 			RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1665 			RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1666 
1667 		i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET;
1668 		heb = he_dev->rbpl_virt[i];
1669 
1670 		cid = RBRQ_CID(he_dev->rbrq_head);
1671 		if (cid != lastcid)
1672 			vcc = __find_vcc(he_dev, cid);
1673 		lastcid = cid;
1674 
1675 		if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) {
1676 			hprintk("vcc/he_vcc == NULL  (cid 0x%x)\n", cid);
1677 			if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1678 				clear_bit(i, he_dev->rbpl_table);
1679 				list_del(&heb->entry);
1680 				dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1681 			}
1682 
1683 			goto next_rbrq_entry;
1684 		}
1685 
1686 		if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1687 			hprintk("HBUF_ERR!  (cid 0x%x)\n", cid);
1688 			atomic_inc(&vcc->stats->rx_drop);
1689 			goto return_host_buffers;
1690 		}
1691 
1692 		heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1693 		clear_bit(i, he_dev->rbpl_table);
1694 		list_move_tail(&heb->entry, &he_vcc->buffers);
1695 		he_vcc->pdu_len += heb->len;
1696 
1697 		if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1698 			lastcid = -1;
1699 			HPRINTK("wake_up rx_waitq  (cid 0x%x)\n", cid);
1700 			wake_up(&he_vcc->rx_waitq);
1701 			goto return_host_buffers;
1702 		}
1703 
1704 		if (!RBRQ_END_PDU(he_dev->rbrq_head))
1705 			goto next_rbrq_entry;
1706 
1707 		if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1708 				|| RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1709 			HPRINTK("%s%s (%d.%d)\n",
1710 				RBRQ_CRC_ERR(he_dev->rbrq_head)
1711 							? "CRC_ERR " : "",
1712 				RBRQ_LEN_ERR(he_dev->rbrq_head)
1713 							? "LEN_ERR" : "",
1714 							vcc->vpi, vcc->vci);
1715 			atomic_inc(&vcc->stats->rx_err);
1716 			goto return_host_buffers;
1717 		}
1718 
1719 		skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1720 							GFP_ATOMIC);
1721 		if (!skb) {
1722 			HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1723 			goto return_host_buffers;
1724 		}
1725 
1726 		if (rx_skb_reserve > 0)
1727 			skb_reserve(skb, rx_skb_reserve);
1728 
1729 		__net_timestamp(skb);
1730 
1731 		list_for_each_entry(heb, &he_vcc->buffers, entry)
1732 			skb_put_data(skb, &heb->data, heb->len);
1733 
1734 		switch (vcc->qos.aal) {
1735 			case ATM_AAL0:
1736 				/* 2.10.1.5 raw cell receive */
1737 				skb->len = ATM_AAL0_SDU;
1738 				skb_set_tail_pointer(skb, skb->len);
1739 				break;
1740 			case ATM_AAL5:
1741 				/* 2.10.1.2 aal5 receive */
1742 
1743 				skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1744 				skb_set_tail_pointer(skb, skb->len);
1745 #ifdef USE_CHECKSUM_HW
1746 				if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1747 					skb->ip_summed = CHECKSUM_COMPLETE;
1748 					skb->csum = TCP_CKSUM(skb->data,
1749 							he_vcc->pdu_len);
1750 				}
1751 #endif
1752 				break;
1753 		}
1754 
1755 #ifdef should_never_happen
1756 		if (skb->len > vcc->qos.rxtp.max_sdu)
1757 			hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)!  cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1758 #endif
1759 
1760 #ifdef notdef
1761 		ATM_SKB(skb)->vcc = vcc;
1762 #endif
1763 		spin_unlock(&he_dev->global_lock);
1764 		vcc->push(vcc, skb);
1765 		spin_lock(&he_dev->global_lock);
1766 
1767 		atomic_inc(&vcc->stats->rx);
1768 
1769 return_host_buffers:
1770 		++pdus_assembled;
1771 
1772 		list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry)
1773 			dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1774 		INIT_LIST_HEAD(&he_vcc->buffers);
1775 		he_vcc->pdu_len = 0;
1776 
1777 next_rbrq_entry:
1778 		he_dev->rbrq_head = (struct he_rbrq *)
1779 				((unsigned long) he_dev->rbrq_base |
1780 					RBRQ_MASK(he_dev->rbrq_head + 1));
1781 
1782 	}
1783 	read_unlock(&vcc_sklist_lock);
1784 
1785 	if (updated) {
1786 		if (updated > he_dev->rbrq_peak)
1787 			he_dev->rbrq_peak = updated;
1788 
1789 		he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1790 						G0_RBRQ_H + (group * 16));
1791 	}
1792 
1793 	return pdus_assembled;
1794 }
1795 
1796 static void
he_service_tbrq(struct he_dev * he_dev,int group)1797 he_service_tbrq(struct he_dev *he_dev, int group)
1798 {
1799 	struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1800 				((unsigned long)he_dev->tbrq_base |
1801 					he_dev->hsp->group[group].tbrq_tail);
1802 	struct he_tpd *tpd;
1803 	int slot, updated = 0;
1804 	struct he_tpd *__tpd;
1805 
1806 	/* 2.1.6 transmit buffer return queue */
1807 
1808 	while (he_dev->tbrq_head != tbrq_tail) {
1809 		++updated;
1810 
1811 		HPRINTK("tbrq%d 0x%x%s%s\n",
1812 			group,
1813 			TBRQ_TPD(he_dev->tbrq_head),
1814 			TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1815 			TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1816 		tpd = NULL;
1817 		list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1818 			if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1819 				tpd = __tpd;
1820 				list_del(&__tpd->entry);
1821 				break;
1822 			}
1823 		}
1824 
1825 		if (tpd == NULL) {
1826 			hprintk("unable to locate tpd for dma buffer %x\n",
1827 						TBRQ_TPD(he_dev->tbrq_head));
1828 			goto next_tbrq_entry;
1829 		}
1830 
1831 		if (TBRQ_EOS(he_dev->tbrq_head)) {
1832 			HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
1833 				he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
1834 			if (tpd->vcc)
1835 				wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
1836 
1837 			goto next_tbrq_entry;
1838 		}
1839 
1840 		for (slot = 0; slot < TPD_MAXIOV; ++slot) {
1841 			if (tpd->iovec[slot].addr)
1842 				dma_unmap_single(&he_dev->pci_dev->dev,
1843 					tpd->iovec[slot].addr,
1844 					tpd->iovec[slot].len & TPD_LEN_MASK,
1845 							DMA_TO_DEVICE);
1846 			if (tpd->iovec[slot].len & TPD_LST)
1847 				break;
1848 
1849 		}
1850 
1851 		if (tpd->skb) {	/* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
1852 			if (tpd->vcc && tpd->vcc->pop)
1853 				tpd->vcc->pop(tpd->vcc, tpd->skb);
1854 			else
1855 				dev_kfree_skb_any(tpd->skb);
1856 		}
1857 
1858 next_tbrq_entry:
1859 		if (tpd)
1860 			dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1861 		he_dev->tbrq_head = (struct he_tbrq *)
1862 				((unsigned long) he_dev->tbrq_base |
1863 					TBRQ_MASK(he_dev->tbrq_head + 1));
1864 	}
1865 
1866 	if (updated) {
1867 		if (updated > he_dev->tbrq_peak)
1868 			he_dev->tbrq_peak = updated;
1869 
1870 		he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
1871 						G0_TBRQ_H + (group * 16));
1872 	}
1873 }
1874 
1875 static void
he_service_rbpl(struct he_dev * he_dev,int group)1876 he_service_rbpl(struct he_dev *he_dev, int group)
1877 {
1878 	struct he_rbp *new_tail;
1879 	struct he_rbp *rbpl_head;
1880 	struct he_buff *heb;
1881 	dma_addr_t mapping;
1882 	int i;
1883 	int moved = 0;
1884 
1885 	rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1886 					RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
1887 
1888 	for (;;) {
1889 		new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1890 						RBPL_MASK(he_dev->rbpl_tail+1));
1891 
1892 		/* table 3.42 -- rbpl_tail should never be set to rbpl_head */
1893 		if (new_tail == rbpl_head)
1894 			break;
1895 
1896 		i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint);
1897 		if (i > (RBPL_TABLE_SIZE - 1)) {
1898 			i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE);
1899 			if (i > (RBPL_TABLE_SIZE - 1))
1900 				break;
1901 		}
1902 		he_dev->rbpl_hint = i + 1;
1903 
1904 		heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC, &mapping);
1905 		if (!heb)
1906 			break;
1907 		heb->mapping = mapping;
1908 		list_add(&heb->entry, &he_dev->rbpl_outstanding);
1909 		he_dev->rbpl_virt[i] = heb;
1910 		set_bit(i, he_dev->rbpl_table);
1911 		new_tail->idx = i << RBP_IDX_OFFSET;
1912 		new_tail->phys = mapping + offsetof(struct he_buff, data);
1913 
1914 		he_dev->rbpl_tail = new_tail;
1915 		++moved;
1916 	}
1917 
1918 	if (moved)
1919 		he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
1920 }
1921 
1922 static void
he_tasklet(unsigned long data)1923 he_tasklet(unsigned long data)
1924 {
1925 	unsigned long flags;
1926 	struct he_dev *he_dev = (struct he_dev *) data;
1927 	int group, type;
1928 	int updated = 0;
1929 
1930 	HPRINTK("tasklet (0x%lx)\n", data);
1931 	spin_lock_irqsave(&he_dev->global_lock, flags);
1932 
1933 	while (he_dev->irq_head != he_dev->irq_tail) {
1934 		++updated;
1935 
1936 		type = ITYPE_TYPE(he_dev->irq_head->isw);
1937 		group = ITYPE_GROUP(he_dev->irq_head->isw);
1938 
1939 		switch (type) {
1940 			case ITYPE_RBRQ_THRESH:
1941 				HPRINTK("rbrq%d threshold\n", group);
1942 				fallthrough;
1943 			case ITYPE_RBRQ_TIMER:
1944 				if (he_service_rbrq(he_dev, group))
1945 					he_service_rbpl(he_dev, group);
1946 				break;
1947 			case ITYPE_TBRQ_THRESH:
1948 				HPRINTK("tbrq%d threshold\n", group);
1949 				fallthrough;
1950 			case ITYPE_TPD_COMPLETE:
1951 				he_service_tbrq(he_dev, group);
1952 				break;
1953 			case ITYPE_RBPL_THRESH:
1954 				he_service_rbpl(he_dev, group);
1955 				break;
1956 			case ITYPE_RBPS_THRESH:
1957 				/* shouldn't happen unless small buffers enabled */
1958 				break;
1959 			case ITYPE_PHY:
1960 				HPRINTK("phy interrupt\n");
1961 #ifdef CONFIG_ATM_HE_USE_SUNI
1962 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
1963 				if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
1964 					he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
1965 				spin_lock_irqsave(&he_dev->global_lock, flags);
1966 #endif
1967 				break;
1968 			case ITYPE_OTHER:
1969 				switch (type|group) {
1970 					case ITYPE_PARITY:
1971 						hprintk("parity error\n");
1972 						break;
1973 					case ITYPE_ABORT:
1974 						hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
1975 						break;
1976 				}
1977 				break;
1978 			case ITYPE_TYPE(ITYPE_INVALID):
1979 				/* see 8.1.1 -- check all queues */
1980 
1981 				HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
1982 
1983 				he_service_rbrq(he_dev, 0);
1984 				he_service_rbpl(he_dev, 0);
1985 				he_service_tbrq(he_dev, 0);
1986 				break;
1987 			default:
1988 				hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
1989 		}
1990 
1991 		he_dev->irq_head->isw = ITYPE_INVALID;
1992 
1993 		he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
1994 	}
1995 
1996 	if (updated) {
1997 		if (updated > he_dev->irq_peak)
1998 			he_dev->irq_peak = updated;
1999 
2000 		he_writel(he_dev,
2001 			IRQ_SIZE(CONFIG_IRQ_SIZE) |
2002 			IRQ_THRESH(CONFIG_IRQ_THRESH) |
2003 			IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2004 		(void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2005 	}
2006 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2007 }
2008 
2009 static irqreturn_t
he_irq_handler(int irq,void * dev_id)2010 he_irq_handler(int irq, void *dev_id)
2011 {
2012 	unsigned long flags;
2013 	struct he_dev *he_dev = (struct he_dev * )dev_id;
2014 	int handled = 0;
2015 
2016 	if (he_dev == NULL)
2017 		return IRQ_NONE;
2018 
2019 	spin_lock_irqsave(&he_dev->global_lock, flags);
2020 
2021 	he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2022 						(*he_dev->irq_tailoffset << 2));
2023 
2024 	if (he_dev->irq_tail == he_dev->irq_head) {
2025 		HPRINTK("tailoffset not updated?\n");
2026 		he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2027 			((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2028 		(void) he_readl(he_dev, INT_FIFO);	/* 8.1.2 controller errata */
2029 	}
2030 
2031 #ifdef DEBUG
2032 	if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2033 		hprintk("spurious (or shared) interrupt?\n");
2034 #endif
2035 
2036 	if (he_dev->irq_head != he_dev->irq_tail) {
2037 		handled = 1;
2038 		tasklet_schedule(&he_dev->tasklet);
2039 		he_writel(he_dev, INT_CLEAR_A, INT_FIFO);	/* clear interrupt */
2040 		(void) he_readl(he_dev, INT_FIFO);		/* flush posted writes */
2041 	}
2042 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2043 	return IRQ_RETVAL(handled);
2044 
2045 }
2046 
2047 static __inline__ void
__enqueue_tpd(struct he_dev * he_dev,struct he_tpd * tpd,unsigned cid)2048 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2049 {
2050 	struct he_tpdrq *new_tail;
2051 
2052 	HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2053 					tpd, cid, he_dev->tpdrq_tail);
2054 
2055 	/* new_tail = he_dev->tpdrq_tail; */
2056 	new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2057 					TPDRQ_MASK(he_dev->tpdrq_tail+1));
2058 
2059 	/*
2060 	 * check to see if we are about to set the tail == head
2061 	 * if true, update the head pointer from the adapter
2062 	 * to see if this is really the case (reading the queue
2063 	 * head for every enqueue would be unnecessarily slow)
2064 	 */
2065 
2066 	if (new_tail == he_dev->tpdrq_head) {
2067 		he_dev->tpdrq_head = (struct he_tpdrq *)
2068 			(((unsigned long)he_dev->tpdrq_base) |
2069 				TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2070 
2071 		if (new_tail == he_dev->tpdrq_head) {
2072 			int slot;
2073 
2074 			hprintk("tpdrq full (cid 0x%x)\n", cid);
2075 			/*
2076 			 * FIXME
2077 			 * push tpd onto a transmit backlog queue
2078 			 * after service_tbrq, service the backlog
2079 			 * for now, we just drop the pdu
2080 			 */
2081 			for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2082 				if (tpd->iovec[slot].addr)
2083 					dma_unmap_single(&he_dev->pci_dev->dev,
2084 						tpd->iovec[slot].addr,
2085 						tpd->iovec[slot].len & TPD_LEN_MASK,
2086 								DMA_TO_DEVICE);
2087 			}
2088 			if (tpd->skb) {
2089 				if (tpd->vcc->pop)
2090 					tpd->vcc->pop(tpd->vcc, tpd->skb);
2091 				else
2092 					dev_kfree_skb_any(tpd->skb);
2093 				atomic_inc(&tpd->vcc->stats->tx_err);
2094 			}
2095 			dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2096 			return;
2097 		}
2098 	}
2099 
2100 	/* 2.1.5 transmit packet descriptor ready queue */
2101 	list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2102 	he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2103 	he_dev->tpdrq_tail->cid = cid;
2104 	wmb();
2105 
2106 	he_dev->tpdrq_tail = new_tail;
2107 
2108 	he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2109 	(void) he_readl(he_dev, TPDRQ_T);		/* flush posted writes */
2110 }
2111 
2112 static int
he_open(struct atm_vcc * vcc)2113 he_open(struct atm_vcc *vcc)
2114 {
2115 	unsigned long flags;
2116 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2117 	struct he_vcc *he_vcc;
2118 	int err = 0;
2119 	unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2120 	short vpi = vcc->vpi;
2121 	int vci = vcc->vci;
2122 
2123 	if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2124 		return 0;
2125 
2126 	HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2127 
2128 	set_bit(ATM_VF_ADDR, &vcc->flags);
2129 
2130 	cid = he_mkcid(he_dev, vpi, vci);
2131 
2132 	he_vcc = kmalloc_obj(struct he_vcc, GFP_ATOMIC);
2133 	if (he_vcc == NULL) {
2134 		hprintk("unable to allocate he_vcc during open\n");
2135 		return -ENOMEM;
2136 	}
2137 
2138 	INIT_LIST_HEAD(&he_vcc->buffers);
2139 	he_vcc->pdu_len = 0;
2140 	he_vcc->rc_index = -1;
2141 
2142 	init_waitqueue_head(&he_vcc->rx_waitq);
2143 	init_waitqueue_head(&he_vcc->tx_waitq);
2144 
2145 	vcc->dev_data = he_vcc;
2146 
2147 	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2148 		int pcr_goal;
2149 
2150 		pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2151 		if (pcr_goal == 0)
2152 			pcr_goal = he_dev->atm_dev->link_rate;
2153 		if (pcr_goal < 0)	/* means round down, technically */
2154 			pcr_goal = -pcr_goal;
2155 
2156 		HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2157 
2158 		switch (vcc->qos.aal) {
2159 			case ATM_AAL5:
2160 				tsr0_aal = TSR0_AAL5;
2161 				tsr4 = TSR4_AAL5;
2162 				break;
2163 			case ATM_AAL0:
2164 				tsr0_aal = TSR0_AAL0_SDU;
2165 				tsr4 = TSR4_AAL0_SDU;
2166 				break;
2167 			default:
2168 				err = -EINVAL;
2169 				goto open_failed;
2170 		}
2171 
2172 		spin_lock_irqsave(&he_dev->global_lock, flags);
2173 		tsr0 = he_readl_tsr0(he_dev, cid);
2174 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2175 
2176 		if (TSR0_CONN_STATE(tsr0) != 0) {
2177 			hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2178 			err = -EBUSY;
2179 			goto open_failed;
2180 		}
2181 
2182 		switch (vcc->qos.txtp.traffic_class) {
2183 			case ATM_UBR:
2184 				/* 2.3.3.1 open connection ubr */
2185 
2186 				tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2187 					TSR0_USE_WMIN | TSR0_UPDATE_GER;
2188 				break;
2189 
2190 			case ATM_CBR:
2191 				/* 2.3.3.2 open connection cbr */
2192 
2193 				/* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2194 				if ((he_dev->total_bw + pcr_goal)
2195 					> (he_dev->atm_dev->link_rate * 9 / 10))
2196 				{
2197 					err = -EBUSY;
2198 					goto open_failed;
2199 				}
2200 
2201 				spin_lock_irqsave(&he_dev->global_lock, flags);			/* also protects he_dev->cs_stper[] */
2202 
2203 				/* find an unused cs_stper register */
2204 				for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2205 					if (he_dev->cs_stper[reg].inuse == 0 ||
2206 					    he_dev->cs_stper[reg].pcr == pcr_goal)
2207 							break;
2208 
2209 				if (reg == HE_NUM_CS_STPER) {
2210 					err = -EBUSY;
2211 					spin_unlock_irqrestore(&he_dev->global_lock, flags);
2212 					goto open_failed;
2213 				}
2214 
2215 				he_dev->total_bw += pcr_goal;
2216 
2217 				he_vcc->rc_index = reg;
2218 				++he_dev->cs_stper[reg].inuse;
2219 				he_dev->cs_stper[reg].pcr = pcr_goal;
2220 
2221 				clock = he_is622(he_dev) ? 66667000 : 50000000;
2222 				period = clock / pcr_goal;
2223 
2224 				HPRINTK("rc_index = %d period = %d\n",
2225 								reg, period);
2226 
2227 				he_writel_mbox(he_dev, rate_to_atmf(period/2),
2228 							CS_STPER0 + reg);
2229 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2230 
2231 				tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2232 							TSR0_RC_INDEX(reg);
2233 
2234 				break;
2235 			default:
2236 				err = -EINVAL;
2237 				goto open_failed;
2238 		}
2239 
2240 		spin_lock_irqsave(&he_dev->global_lock, flags);
2241 
2242 		he_writel_tsr0(he_dev, tsr0, cid);
2243 		he_writel_tsr4(he_dev, tsr4 | 1, cid);
2244 		he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2245 					TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2246 		he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2247 		he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2248 
2249 		he_writel_tsr3(he_dev, 0x0, cid);
2250 		he_writel_tsr5(he_dev, 0x0, cid);
2251 		he_writel_tsr6(he_dev, 0x0, cid);
2252 		he_writel_tsr7(he_dev, 0x0, cid);
2253 		he_writel_tsr8(he_dev, 0x0, cid);
2254 		he_writel_tsr10(he_dev, 0x0, cid);
2255 		he_writel_tsr11(he_dev, 0x0, cid);
2256 		he_writel_tsr12(he_dev, 0x0, cid);
2257 		he_writel_tsr13(he_dev, 0x0, cid);
2258 		he_writel_tsr14(he_dev, 0x0, cid);
2259 		(void) he_readl_tsr0(he_dev, cid);		/* flush posted writes */
2260 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2261 	}
2262 
2263 	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2264 		unsigned aal;
2265 
2266 		HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2267 		 				&HE_VCC(vcc)->rx_waitq);
2268 
2269 		switch (vcc->qos.aal) {
2270 			case ATM_AAL5:
2271 				aal = RSR0_AAL5;
2272 				break;
2273 			case ATM_AAL0:
2274 				aal = RSR0_RAWCELL;
2275 				break;
2276 			default:
2277 				err = -EINVAL;
2278 				goto open_failed;
2279 		}
2280 
2281 		spin_lock_irqsave(&he_dev->global_lock, flags);
2282 
2283 		rsr0 = he_readl_rsr0(he_dev, cid);
2284 		if (rsr0 & RSR0_OPEN_CONN) {
2285 			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2286 
2287 			hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2288 			err = -EBUSY;
2289 			goto open_failed;
2290 		}
2291 
2292 		rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY;
2293 		rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY;
2294 		rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2295 				(RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2296 
2297 #ifdef USE_CHECKSUM_HW
2298 		if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2299 			rsr0 |= RSR0_TCP_CKSUM;
2300 #endif
2301 
2302 		he_writel_rsr4(he_dev, rsr4, cid);
2303 		he_writel_rsr1(he_dev, rsr1, cid);
2304 		/* 5.1.11 last parameter initialized should be
2305 			  the open/closed indication in rsr0 */
2306 		he_writel_rsr0(he_dev,
2307 			rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2308 		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2309 
2310 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2311 	}
2312 
2313 open_failed:
2314 
2315 	if (err) {
2316 		kfree(he_vcc);
2317 		clear_bit(ATM_VF_ADDR, &vcc->flags);
2318 	}
2319 	else
2320 		set_bit(ATM_VF_READY, &vcc->flags);
2321 
2322 	return err;
2323 }
2324 
2325 static void
he_close(struct atm_vcc * vcc)2326 he_close(struct atm_vcc *vcc)
2327 {
2328 	unsigned long flags;
2329 	DECLARE_WAITQUEUE(wait, current);
2330 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2331 	struct he_tpd *tpd;
2332 	unsigned cid;
2333 	struct he_vcc *he_vcc = HE_VCC(vcc);
2334 #define MAX_RETRY 30
2335 	int retry = 0, sleep = 1, tx_inuse;
2336 
2337 	HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2338 
2339 	clear_bit(ATM_VF_READY, &vcc->flags);
2340 	cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2341 
2342 	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2343 		int timeout;
2344 
2345 		HPRINTK("close rx cid 0x%x\n", cid);
2346 
2347 		/* 2.7.2.2 close receive operation */
2348 
2349 		/* wait for previous close (if any) to finish */
2350 
2351 		spin_lock_irqsave(&he_dev->global_lock, flags);
2352 		while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2353 			HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2354 			udelay(250);
2355 		}
2356 
2357 		set_current_state(TASK_UNINTERRUPTIBLE);
2358 		add_wait_queue(&he_vcc->rx_waitq, &wait);
2359 
2360 		he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2361 		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2362 		he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2363 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2364 
2365 		timeout = schedule_timeout(30*HZ);
2366 
2367 		remove_wait_queue(&he_vcc->rx_waitq, &wait);
2368 		set_current_state(TASK_RUNNING);
2369 
2370 		if (timeout == 0)
2371 			hprintk("close rx timeout cid 0x%x\n", cid);
2372 
2373 		HPRINTK("close rx cid 0x%x complete\n", cid);
2374 
2375 	}
2376 
2377 	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2378 		volatile unsigned tsr4, tsr0;
2379 		int timeout;
2380 
2381 		HPRINTK("close tx cid 0x%x\n", cid);
2382 
2383 		/* 2.1.2
2384 		 *
2385 		 * ... the host must first stop queueing packets to the TPDRQ
2386 		 * on the connection to be closed, then wait for all outstanding
2387 		 * packets to be transmitted and their buffers returned to the
2388 		 * TBRQ. When the last packet on the connection arrives in the
2389 		 * TBRQ, the host issues the close command to the adapter.
2390 		 */
2391 
2392 		while (((tx_inuse = refcount_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
2393 		       (retry < MAX_RETRY)) {
2394 			msleep(sleep);
2395 			if (sleep < 250)
2396 				sleep = sleep * 2;
2397 
2398 			++retry;
2399 		}
2400 
2401 		if (tx_inuse > 1)
2402 			hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2403 
2404 		/* 2.3.1.1 generic close operations with flush */
2405 
2406 		spin_lock_irqsave(&he_dev->global_lock, flags);
2407 		he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2408 					/* also clears TSR4_SESSION_ENDED */
2409 
2410 		switch (vcc->qos.txtp.traffic_class) {
2411 			case ATM_UBR:
2412 				he_writel_tsr1(he_dev,
2413 					TSR1_MCR(rate_to_atmf(200000))
2414 					| TSR1_PCR(0), cid);
2415 				break;
2416 			case ATM_CBR:
2417 				he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2418 				break;
2419 		}
2420 		(void) he_readl_tsr4(he_dev, cid);		/* flush posted writes */
2421 
2422 		tpd = __alloc_tpd(he_dev);
2423 		if (tpd == NULL) {
2424 			hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2425 			goto close_tx_incomplete;
2426 		}
2427 		tpd->status |= TPD_EOS | TPD_INT;
2428 		tpd->skb = NULL;
2429 		tpd->vcc = vcc;
2430 		wmb();
2431 
2432 		set_current_state(TASK_UNINTERRUPTIBLE);
2433 		add_wait_queue(&he_vcc->tx_waitq, &wait);
2434 		__enqueue_tpd(he_dev, tpd, cid);
2435 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2436 
2437 		timeout = schedule_timeout(30*HZ);
2438 
2439 		remove_wait_queue(&he_vcc->tx_waitq, &wait);
2440 		set_current_state(TASK_RUNNING);
2441 
2442 		spin_lock_irqsave(&he_dev->global_lock, flags);
2443 
2444 		if (timeout == 0) {
2445 			hprintk("close tx timeout cid 0x%x\n", cid);
2446 			goto close_tx_incomplete;
2447 		}
2448 
2449 		while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2450 			HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2451 			udelay(250);
2452 		}
2453 
2454 		while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2455 			HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2456 			udelay(250);
2457 		}
2458 
2459 close_tx_incomplete:
2460 
2461 		if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2462 			int reg = he_vcc->rc_index;
2463 
2464 			HPRINTK("cs_stper reg = %d\n", reg);
2465 
2466 			if (he_dev->cs_stper[reg].inuse == 0)
2467 				hprintk("cs_stper[%d].inuse = 0!\n", reg);
2468 			else
2469 				--he_dev->cs_stper[reg].inuse;
2470 
2471 			he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2472 		}
2473 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2474 
2475 		HPRINTK("close tx cid 0x%x complete\n", cid);
2476 	}
2477 
2478 	kfree(he_vcc);
2479 
2480 	clear_bit(ATM_VF_ADDR, &vcc->flags);
2481 }
2482 
2483 static int
he_send(struct atm_vcc * vcc,struct sk_buff * skb)2484 he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2485 {
2486 	unsigned long flags;
2487 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2488 	unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2489 	struct he_tpd *tpd;
2490 #ifdef USE_SCATTERGATHER
2491 	int i, slot = 0;
2492 #endif
2493 
2494 #define HE_TPD_BUFSIZE 0xffff
2495 
2496 	HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2497 
2498 	if ((skb->len > HE_TPD_BUFSIZE) ||
2499 	    ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2500 		hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2501 		if (vcc->pop)
2502 			vcc->pop(vcc, skb);
2503 		else
2504 			dev_kfree_skb_any(skb);
2505 		atomic_inc(&vcc->stats->tx_err);
2506 		return -EINVAL;
2507 	}
2508 
2509 #ifndef USE_SCATTERGATHER
2510 	if (skb_shinfo(skb)->nr_frags) {
2511 		hprintk("no scatter/gather support\n");
2512 		if (vcc->pop)
2513 			vcc->pop(vcc, skb);
2514 		else
2515 			dev_kfree_skb_any(skb);
2516 		atomic_inc(&vcc->stats->tx_err);
2517 		return -EINVAL;
2518 	}
2519 #endif
2520 	spin_lock_irqsave(&he_dev->global_lock, flags);
2521 
2522 	tpd = __alloc_tpd(he_dev);
2523 	if (tpd == NULL) {
2524 		if (vcc->pop)
2525 			vcc->pop(vcc, skb);
2526 		else
2527 			dev_kfree_skb_any(skb);
2528 		atomic_inc(&vcc->stats->tx_err);
2529 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2530 		return -ENOMEM;
2531 	}
2532 
2533 	if (vcc->qos.aal == ATM_AAL5)
2534 		tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2535 	else {
2536 		char *pti_clp = (void *) (skb->data + 3);
2537 		int clp, pti;
2538 
2539 		pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2540 		clp = (*pti_clp & ATM_HDR_CLP);
2541 		tpd->status |= TPD_CELLTYPE(pti);
2542 		if (clp)
2543 			tpd->status |= TPD_CLP;
2544 
2545 		skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2546 	}
2547 
2548 #ifdef USE_SCATTERGATHER
2549 	tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev, skb->data,
2550 				skb_headlen(skb), DMA_TO_DEVICE);
2551 	tpd->iovec[slot].len = skb_headlen(skb);
2552 	++slot;
2553 
2554 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2555 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2556 
2557 		if (slot == TPD_MAXIOV) {	/* queue tpd; start new tpd */
2558 			tpd->vcc = vcc;
2559 			tpd->skb = NULL;	/* not the last fragment
2560 						   so dont ->push() yet */
2561 			wmb();
2562 
2563 			__enqueue_tpd(he_dev, tpd, cid);
2564 			tpd = __alloc_tpd(he_dev);
2565 			if (tpd == NULL) {
2566 				if (vcc->pop)
2567 					vcc->pop(vcc, skb);
2568 				else
2569 					dev_kfree_skb_any(skb);
2570 				atomic_inc(&vcc->stats->tx_err);
2571 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2572 				return -ENOMEM;
2573 			}
2574 			tpd->status |= TPD_USERCELL;
2575 			slot = 0;
2576 		}
2577 
2578 		tpd->iovec[slot].addr = skb_frag_dma_map(&he_dev->pci_dev->dev,
2579 				frag, 0, skb_frag_size(frag), DMA_TO_DEVICE);
2580 		tpd->iovec[slot].len = skb_frag_size(frag);
2581 		++slot;
2582 
2583 	}
2584 
2585 	tpd->iovec[slot - 1].len |= TPD_LST;
2586 #else
2587 	tpd->address0 = dma_map_single(&he_dev->pci_dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
2588 	tpd->length0 = skb->len | TPD_LST;
2589 #endif
2590 	tpd->status |= TPD_INT;
2591 
2592 	tpd->vcc = vcc;
2593 	tpd->skb = skb;
2594 	wmb();
2595 	ATM_SKB(skb)->vcc = vcc;
2596 
2597 	__enqueue_tpd(he_dev, tpd, cid);
2598 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2599 
2600 	atomic_inc(&vcc->stats->tx);
2601 
2602 	return 0;
2603 }
2604 
2605 static int
he_ioctl(struct atm_dev * atm_dev,unsigned int cmd,void __user * arg)2606 he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2607 {
2608 	unsigned long flags;
2609 	struct he_dev *he_dev = HE_DEV(atm_dev);
2610 	struct he_ioctl_reg reg;
2611 	int err = 0;
2612 
2613 	switch (cmd) {
2614 		case HE_GET_REG:
2615 			if (!capable(CAP_NET_ADMIN))
2616 				return -EPERM;
2617 
2618 			if (copy_from_user(&reg, arg,
2619 					   sizeof(struct he_ioctl_reg)))
2620 				return -EFAULT;
2621 
2622 			spin_lock_irqsave(&he_dev->global_lock, flags);
2623 			switch (reg.type) {
2624 				case HE_REGTYPE_PCI:
2625 					if (reg.addr >= HE_REGMAP_SIZE) {
2626 						err = -EINVAL;
2627 						break;
2628 					}
2629 
2630 					reg.val = he_readl(he_dev, reg.addr);
2631 					break;
2632 				case HE_REGTYPE_RCM:
2633 					reg.val =
2634 						he_readl_rcm(he_dev, reg.addr);
2635 					break;
2636 				case HE_REGTYPE_TCM:
2637 					reg.val =
2638 						he_readl_tcm(he_dev, reg.addr);
2639 					break;
2640 				case HE_REGTYPE_MBOX:
2641 					reg.val =
2642 						he_readl_mbox(he_dev, reg.addr);
2643 					break;
2644 				default:
2645 					err = -EINVAL;
2646 					break;
2647 			}
2648 			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2649 			if (err == 0)
2650 				if (copy_to_user(arg, &reg,
2651 							sizeof(struct he_ioctl_reg)))
2652 					return -EFAULT;
2653 			break;
2654 		default:
2655 #ifdef CONFIG_ATM_HE_USE_SUNI
2656 			if (atm_dev->phy && atm_dev->phy->ioctl)
2657 				err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2658 #else /* CONFIG_ATM_HE_USE_SUNI */
2659 			err = -EINVAL;
2660 #endif /* CONFIG_ATM_HE_USE_SUNI */
2661 			break;
2662 	}
2663 
2664 	return err;
2665 }
2666 
2667 static void
he_phy_put(struct atm_dev * atm_dev,unsigned char val,unsigned long addr)2668 he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2669 {
2670 	unsigned long flags;
2671 	struct he_dev *he_dev = HE_DEV(atm_dev);
2672 
2673 	HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2674 
2675 	spin_lock_irqsave(&he_dev->global_lock, flags);
2676 	he_writel(he_dev, val, FRAMER + (addr*4));
2677 	(void) he_readl(he_dev, FRAMER + (addr*4));		/* flush posted writes */
2678 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2679 }
2680 
2681 
2682 static unsigned char
he_phy_get(struct atm_dev * atm_dev,unsigned long addr)2683 he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2684 {
2685 	unsigned long flags;
2686 	struct he_dev *he_dev = HE_DEV(atm_dev);
2687 	unsigned reg;
2688 
2689 	spin_lock_irqsave(&he_dev->global_lock, flags);
2690 	reg = he_readl(he_dev, FRAMER + (addr*4));
2691 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2692 
2693 	HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2694 	return reg;
2695 }
2696 
2697 static int
he_proc_read(struct atm_dev * dev,loff_t * pos,char * page)2698 he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2699 {
2700 	unsigned long flags;
2701 	struct he_dev *he_dev = HE_DEV(dev);
2702 	int left, i;
2703 #ifdef notdef
2704 	struct he_rbrq *rbrq_tail;
2705 	struct he_tpdrq *tpdrq_head;
2706 	int rbpl_head, rbpl_tail;
2707 #endif
2708 	static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2709 
2710 
2711 	left = *pos;
2712 	if (!left--)
2713 		return sprintf(page, "ATM he driver\n");
2714 
2715 	if (!left--)
2716 		return sprintf(page, "%s%s\n\n",
2717 			he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2718 
2719 	if (!left--)
2720 		return sprintf(page, "Mismatched Cells  VPI/VCI Not Open  Dropped Cells  RCM Dropped Cells\n");
2721 
2722 	spin_lock_irqsave(&he_dev->global_lock, flags);
2723 	mcc += he_readl(he_dev, MCC);
2724 	oec += he_readl(he_dev, OEC);
2725 	dcc += he_readl(he_dev, DCC);
2726 	cec += he_readl(he_dev, CEC);
2727 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2728 
2729 	if (!left--)
2730 		return sprintf(page, "%16ld  %16ld  %13ld  %17ld\n\n",
2731 							mcc, oec, dcc, cec);
2732 
2733 	if (!left--)
2734 		return sprintf(page, "irq_size = %d  inuse = ?  peak = %d\n",
2735 				CONFIG_IRQ_SIZE, he_dev->irq_peak);
2736 
2737 	if (!left--)
2738 		return sprintf(page, "tpdrq_size = %d  inuse = ?\n",
2739 						CONFIG_TPDRQ_SIZE);
2740 
2741 	if (!left--)
2742 		return sprintf(page, "rbrq_size = %d  inuse = ?  peak = %d\n",
2743 				CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2744 
2745 	if (!left--)
2746 		return sprintf(page, "tbrq_size = %d  peak = %d\n",
2747 					CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2748 
2749 
2750 #ifdef notdef
2751 	rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2752 	rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2753 
2754 	inuse = rbpl_head - rbpl_tail;
2755 	if (inuse < 0)
2756 		inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2757 	inuse /= sizeof(struct he_rbp);
2758 
2759 	if (!left--)
2760 		return sprintf(page, "rbpl_size = %d  inuse = %d\n\n",
2761 						CONFIG_RBPL_SIZE, inuse);
2762 #endif
2763 
2764 	if (!left--)
2765 		return sprintf(page, "rate controller periods (cbr)\n                 pcr  #vc\n");
2766 
2767 	for (i = 0; i < HE_NUM_CS_STPER; ++i)
2768 		if (!left--)
2769 			return sprintf(page, "cs_stper%-2d  %8ld  %3d\n", i,
2770 						he_dev->cs_stper[i].pcr,
2771 						he_dev->cs_stper[i].inuse);
2772 
2773 	if (!left--)
2774 		return sprintf(page, "total bw (cbr): %d  (limit %d)\n",
2775 			he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2776 
2777 	return 0;
2778 }
2779 
2780 /* eeprom routines  -- see 4.7 */
2781 
read_prom_byte(struct he_dev * he_dev,int addr)2782 static u8 read_prom_byte(struct he_dev *he_dev, int addr)
2783 {
2784 	u32 val = 0, tmp_read = 0;
2785 	int i, j = 0;
2786 	u8 byte_read = 0;
2787 
2788 	val = readl(he_dev->membase + HOST_CNTL);
2789 	val &= 0xFFFFE0FF;
2790 
2791 	/* Turn on write enable */
2792 	val |= 0x800;
2793 	he_writel(he_dev, val, HOST_CNTL);
2794 
2795 	/* Send READ instruction */
2796 	for (i = 0; i < ARRAY_SIZE(readtab); i++) {
2797 		he_writel(he_dev, val | readtab[i], HOST_CNTL);
2798 		udelay(EEPROM_DELAY);
2799 	}
2800 
2801 	/* Next, we need to send the byte address to read from */
2802 	for (i = 7; i >= 0; i--) {
2803 		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2804 		udelay(EEPROM_DELAY);
2805 		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2806 		udelay(EEPROM_DELAY);
2807 	}
2808 
2809 	j = 0;
2810 
2811 	val &= 0xFFFFF7FF;      /* Turn off write enable */
2812 	he_writel(he_dev, val, HOST_CNTL);
2813 
2814 	/* Now, we can read data from the EEPROM by clocking it in */
2815 	for (i = 7; i >= 0; i--) {
2816 		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2817 		udelay(EEPROM_DELAY);
2818 		tmp_read = he_readl(he_dev, HOST_CNTL);
2819 		byte_read |= (unsigned char)
2820 			   ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
2821 		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2822 		udelay(EEPROM_DELAY);
2823 	}
2824 
2825 	he_writel(he_dev, val | ID_CS, HOST_CNTL);
2826 	udelay(EEPROM_DELAY);
2827 
2828 	return byte_read;
2829 }
2830 
2831 MODULE_LICENSE("GPL");
2832 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
2833 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
2834 module_param(disable64, bool, 0);
2835 MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
2836 module_param(nvpibits, short, 0);
2837 MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
2838 module_param(nvcibits, short, 0);
2839 MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
2840 module_param(rx_skb_reserve, short, 0);
2841 MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
2842 module_param(irq_coalesce, bool, 0);
2843 MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
2844 module_param(sdh, bool, 0);
2845 MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
2846 
2847 static const struct pci_device_id he_pci_tbl[] = {
2848 	{ PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 },
2849 	{ 0, }
2850 };
2851 
2852 MODULE_DEVICE_TABLE(pci, he_pci_tbl);
2853 
2854 static struct pci_driver he_driver = {
2855 	.name =		"he",
2856 	.probe =	he_init_one,
2857 	.remove =	he_remove_one,
2858 	.id_table =	he_pci_tbl,
2859 };
2860 
2861 module_pci_driver(he_driver);
2862