Lines Matching refs:he_dev

88 #define hprintk(fmt,args...)	printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
91 #define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
106 static void he_stop(struct he_dev *dev);
110 static u8 read_prom_byte(struct he_dev *he_dev, int addr);
114 static struct he_dev *he_devs;
182 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr, in he_writel_internal() argument
185 he_writel(he_dev, val, CON_DAT); in he_writel_internal()
186 (void) he_readl(he_dev, CON_DAT); /* flush posted writes */ in he_writel_internal()
187 he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL); in he_writel_internal()
188 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY); in he_writel_internal()
201 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags) in he_readl_internal() argument
203 he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL); in he_readl_internal()
204 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY); in he_readl_internal()
205 return he_readl(he_dev, CON_DAT); in he_readl_internal()
328 __find_vcc(struct he_dev *he_dev, unsigned cid) in __find_vcc() argument
336 vpi = cid >> he_dev->vcibits; in __find_vcc()
337 vci = cid & ((1 << he_dev->vcibits) - 1); in __find_vcc()
342 if (vcc->dev == he_dev->atm_dev && in __find_vcc()
355 struct he_dev *he_dev = NULL; in he_init_one() local
375 he_dev = kzalloc(sizeof(struct he_dev), in he_init_one()
377 if (!he_dev) { in he_init_one()
381 he_dev->pci_dev = pci_dev; in he_init_one()
382 he_dev->atm_dev = atm_dev; in he_init_one()
383 he_dev->atm_dev->dev_data = he_dev; in he_init_one()
384 atm_dev->dev_data = he_dev; in he_init_one()
385 he_dev->number = atm_dev->number; in he_init_one()
386 tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev); in he_init_one()
387 spin_lock_init(&he_dev->global_lock); in he_init_one()
390 he_stop(he_dev); in he_init_one()
394 he_dev->next = NULL; in he_init_one()
396 he_dev->next = he_devs; in he_init_one()
397 he_devs = he_dev; in he_init_one()
403 kfree(he_dev); in he_init_one()
411 struct he_dev *he_dev; in he_remove_one() local
414 he_dev = HE_DEV(atm_dev); in he_remove_one()
418 he_stop(he_dev); in he_remove_one()
420 kfree(he_dev); in he_remove_one()
445 static void he_init_rx_lbfp0(struct he_dev *he_dev) in he_init_rx_lbfp0() argument
448 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf; in he_init_rx_lbfp0()
449 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD; in he_init_rx_lbfp0()
450 unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row; in he_init_rx_lbfp0()
453 lbm_offset = he_readl(he_dev, RCMLBM_BA); in he_init_rx_lbfp0()
455 he_writel(he_dev, lbufd_index, RLBF0_H); in he_init_rx_lbfp0()
457 for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) { in he_init_rx_lbfp0()
461 he_writel_rcm(he_dev, lbuf_addr, lbm_offset); in he_init_rx_lbfp0()
462 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1); in he_init_rx_lbfp0()
466 row_offset += he_dev->bytes_per_row; in he_init_rx_lbfp0()
471 he_writel(he_dev, lbufd_index - 2, RLBF0_T); in he_init_rx_lbfp0()
472 he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C); in he_init_rx_lbfp0()
475 static void he_init_rx_lbfp1(struct he_dev *he_dev) in he_init_rx_lbfp1() argument
478 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf; in he_init_rx_lbfp1()
479 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD; in he_init_rx_lbfp1()
480 unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row; in he_init_rx_lbfp1()
483 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index); in he_init_rx_lbfp1()
485 he_writel(he_dev, lbufd_index, RLBF1_H); in he_init_rx_lbfp1()
487 for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) { in he_init_rx_lbfp1()
491 he_writel_rcm(he_dev, lbuf_addr, lbm_offset); in he_init_rx_lbfp1()
492 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1); in he_init_rx_lbfp1()
496 row_offset += he_dev->bytes_per_row; in he_init_rx_lbfp1()
501 he_writel(he_dev, lbufd_index - 2, RLBF1_T); in he_init_rx_lbfp1()
502 he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C); in he_init_rx_lbfp1()
505 static void he_init_tx_lbfp(struct he_dev *he_dev) in he_init_tx_lbfp() argument
508 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf; in he_init_tx_lbfp()
509 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD; in he_init_tx_lbfp()
510 unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row; in he_init_tx_lbfp()
512 lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs; in he_init_tx_lbfp()
513 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index); in he_init_tx_lbfp()
515 he_writel(he_dev, lbufd_index, TLBF_H); in he_init_tx_lbfp()
517 for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) { in he_init_tx_lbfp()
521 he_writel_rcm(he_dev, lbuf_addr, lbm_offset); in he_init_tx_lbfp()
522 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1); in he_init_tx_lbfp()
526 row_offset += he_dev->bytes_per_row; in he_init_tx_lbfp()
531 he_writel(he_dev, lbufd_index - 1, TLBF_T); in he_init_tx_lbfp()
534 static int he_init_tpdrq(struct he_dev *he_dev) in he_init_tpdrq() argument
536 he_dev->tpdrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev, in he_init_tpdrq()
538 &he_dev->tpdrq_phys, in he_init_tpdrq()
540 if (he_dev->tpdrq_base == NULL) { in he_init_tpdrq()
545 he_dev->tpdrq_tail = he_dev->tpdrq_base; in he_init_tpdrq()
546 he_dev->tpdrq_head = he_dev->tpdrq_base; in he_init_tpdrq()
548 he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H); in he_init_tpdrq()
549 he_writel(he_dev, 0, TPDRQ_T); in he_init_tpdrq()
550 he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S); in he_init_tpdrq()
555 static void he_init_cs_block(struct he_dev *he_dev) in he_init_cs_block() argument
563 he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg); in he_init_cs_block()
567 clock = he_is622(he_dev) ? 66667000 : 50000000; in he_init_cs_block()
568 rate = he_dev->atm_dev->link_rate; in he_init_cs_block()
579 he_writel_mbox(he_dev, period, CS_TGRLD0 + reg); in he_init_cs_block()
583 if (he_is622(he_dev)) { in he_init_cs_block()
585 he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0); in he_init_cs_block()
586 he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1); in he_init_cs_block()
587 he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2); in he_init_cs_block()
588 he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3); in he_init_cs_block()
589 he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4); in he_init_cs_block()
592 he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0); in he_init_cs_block()
593 he_writel_mbox(he_dev, 0x1801, CS_ERCTL1); in he_init_cs_block()
594 he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2); in he_init_cs_block()
595 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0); in he_init_cs_block()
596 he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1); in he_init_cs_block()
597 he_writel_mbox(he_dev, 0x14585, CS_RTFWR); in he_init_cs_block()
599 he_writel_mbox(he_dev, 0x4680, CS_RTATR); in he_init_cs_block()
602 he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET); in he_init_cs_block()
603 he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX); in he_init_cs_block()
604 he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN); in he_init_cs_block()
605 he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC); in he_init_cs_block()
606 he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC); in he_init_cs_block()
607 he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL); in he_init_cs_block()
610 he_writel_mbox(he_dev, 0x5, CS_OTPPER); in he_init_cs_block()
611 he_writel_mbox(he_dev, 0x14, CS_OTWPER); in he_init_cs_block()
614 he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0); in he_init_cs_block()
615 he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1); in he_init_cs_block()
616 he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2); in he_init_cs_block()
617 he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3); in he_init_cs_block()
618 he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4); in he_init_cs_block()
621 he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0); in he_init_cs_block()
622 he_writel_mbox(he_dev, 0x4701, CS_ERCTL1); in he_init_cs_block()
623 he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2); in he_init_cs_block()
624 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0); in he_init_cs_block()
625 he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1); in he_init_cs_block()
626 he_writel_mbox(he_dev, 0xf424, CS_RTFWR); in he_init_cs_block()
628 he_writel_mbox(he_dev, 0x4680, CS_RTATR); in he_init_cs_block()
631 he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET); in he_init_cs_block()
632 he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX); in he_init_cs_block()
633 he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN); in he_init_cs_block()
634 he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC); in he_init_cs_block()
635 he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC); in he_init_cs_block()
636 he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL); in he_init_cs_block()
639 he_writel_mbox(he_dev, 0x6, CS_OTPPER); in he_init_cs_block()
640 he_writel_mbox(he_dev, 0x1e, CS_OTWPER); in he_init_cs_block()
643 he_writel_mbox(he_dev, 0x8, CS_OTTLIM); in he_init_cs_block()
646 he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg); in he_init_cs_block()
650 static int he_init_cs_block_rcm(struct he_dev *he_dev) in he_init_cs_block_rcm() argument
667 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg); in he_init_cs_block_rcm()
672 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg); in he_init_cs_block_rcm()
682 rate = he_dev->atm_dev->link_rate; in he_init_cs_block_rcm()
737 buf = rate_cps * he_dev->tx_numbuffs / in he_init_cs_block_rcm()
738 (he_dev->atm_dev->link_rate * 2); in he_init_cs_block_rcm()
741 mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR; in he_init_cs_block_rcm()
760 he_writel_rcm(he_dev, reg, in he_init_cs_block_rcm()
770 static int he_init_group(struct he_dev *he_dev, int group) in he_init_group() argument
776 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32)); in he_init_group()
777 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32)); in he_init_group()
778 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32)); in he_init_group()
779 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0), in he_init_group()
783 he_dev->rbpl_table = bitmap_zalloc(RBPL_TABLE_SIZE, GFP_KERNEL); in he_init_group()
784 if (!he_dev->rbpl_table) { in he_init_group()
790 he_dev->rbpl_virt = kmalloc_array(RBPL_TABLE_SIZE, in he_init_group()
791 sizeof(*he_dev->rbpl_virt), in he_init_group()
793 if (!he_dev->rbpl_virt) { in he_init_group()
799 he_dev->rbpl_pool = dma_pool_create("rbpl", &he_dev->pci_dev->dev, in he_init_group()
801 if (he_dev->rbpl_pool == NULL) { in he_init_group()
806 he_dev->rbpl_base = dma_alloc_coherent(&he_dev->pci_dev->dev, in he_init_group()
808 &he_dev->rbpl_phys, GFP_KERNEL); in he_init_group()
809 if (he_dev->rbpl_base == NULL) { in he_init_group()
814 INIT_LIST_HEAD(&he_dev->rbpl_outstanding); in he_init_group()
818 heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL, &mapping); in he_init_group()
822 list_add(&heb->entry, &he_dev->rbpl_outstanding); in he_init_group()
824 set_bit(i, he_dev->rbpl_table); in he_init_group()
825 he_dev->rbpl_virt[i] = heb; in he_init_group()
826 he_dev->rbpl_hint = i + 1; in he_init_group()
827 he_dev->rbpl_base[i].idx = i << RBP_IDX_OFFSET; in he_init_group()
828 he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data); in he_init_group()
830 he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1]; in he_init_group()
832 he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32)); in he_init_group()
833 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), in he_init_group()
835 he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4, in he_init_group()
837 he_writel(he_dev, in he_init_group()
845 he_dev->rbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev, in he_init_group()
847 &he_dev->rbrq_phys, GFP_KERNEL); in he_init_group()
848 if (he_dev->rbrq_base == NULL) { in he_init_group()
853 he_dev->rbrq_head = he_dev->rbrq_base; in he_init_group()
854 he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16)); in he_init_group()
855 he_writel(he_dev, 0, G0_RBRQ_H + (group * 16)); in he_init_group()
856 he_writel(he_dev, in he_init_group()
861 he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7), in he_init_group()
864 he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1), in he_init_group()
869 he_dev->tbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev, in he_init_group()
871 &he_dev->tbrq_phys, GFP_KERNEL); in he_init_group()
872 if (he_dev->tbrq_base == NULL) { in he_init_group()
877 he_dev->tbrq_head = he_dev->tbrq_base; in he_init_group()
879 he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16)); in he_init_group()
880 he_writel(he_dev, 0, G0_TBRQ_H + (group * 16)); in he_init_group()
881 he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16)); in he_init_group()
882 he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16)); in he_init_group()
887 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * in he_init_group()
888 sizeof(struct he_rbrq), he_dev->rbrq_base, in he_init_group()
889 he_dev->rbrq_phys); in he_init_group()
891 list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry) in he_init_group()
892 dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping); in he_init_group()
894 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE * in he_init_group()
895 sizeof(struct he_rbp), he_dev->rbpl_base, in he_init_group()
896 he_dev->rbpl_phys); in he_init_group()
898 dma_pool_destroy(he_dev->rbpl_pool); in he_init_group()
900 kfree(he_dev->rbpl_virt); in he_init_group()
902 bitmap_free(he_dev->rbpl_table); in he_init_group()
907 static int he_init_irq(struct he_dev *he_dev) in he_init_irq() argument
914 he_dev->irq_base = dma_alloc_coherent(&he_dev->pci_dev->dev, in he_init_irq()
916 &he_dev->irq_phys, GFP_KERNEL); in he_init_irq()
917 if (he_dev->irq_base == NULL) { in he_init_irq()
921 he_dev->irq_tailoffset = (unsigned *) in he_init_irq()
922 &he_dev->irq_base[CONFIG_IRQ_SIZE]; in he_init_irq()
923 *he_dev->irq_tailoffset = 0; in he_init_irq()
924 he_dev->irq_head = he_dev->irq_base; in he_init_irq()
925 he_dev->irq_tail = he_dev->irq_base; in he_init_irq()
928 he_dev->irq_base[i].isw = ITYPE_INVALID; in he_init_irq()
930 he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE); in he_init_irq()
931 he_writel(he_dev, in he_init_irq()
934 he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL); in he_init_irq()
935 he_writel(he_dev, 0x0, IRQ0_DATA); in he_init_irq()
937 he_writel(he_dev, 0x0, IRQ1_BASE); in he_init_irq()
938 he_writel(he_dev, 0x0, IRQ1_HEAD); in he_init_irq()
939 he_writel(he_dev, 0x0, IRQ1_CNTL); in he_init_irq()
940 he_writel(he_dev, 0x0, IRQ1_DATA); in he_init_irq()
942 he_writel(he_dev, 0x0, IRQ2_BASE); in he_init_irq()
943 he_writel(he_dev, 0x0, IRQ2_HEAD); in he_init_irq()
944 he_writel(he_dev, 0x0, IRQ2_CNTL); in he_init_irq()
945 he_writel(he_dev, 0x0, IRQ2_DATA); in he_init_irq()
947 he_writel(he_dev, 0x0, IRQ3_BASE); in he_init_irq()
948 he_writel(he_dev, 0x0, IRQ3_HEAD); in he_init_irq()
949 he_writel(he_dev, 0x0, IRQ3_CNTL); in he_init_irq()
950 he_writel(he_dev, 0x0, IRQ3_DATA); in he_init_irq()
954 he_writel(he_dev, 0x0, GRP_10_MAP); in he_init_irq()
955 he_writel(he_dev, 0x0, GRP_32_MAP); in he_init_irq()
956 he_writel(he_dev, 0x0, GRP_54_MAP); in he_init_irq()
957 he_writel(he_dev, 0x0, GRP_76_MAP); in he_init_irq()
959 if (request_irq(he_dev->pci_dev->irq, in he_init_irq()
960 he_irq_handler, IRQF_SHARED, DEV_LABEL, he_dev)) { in he_init_irq()
961 hprintk("irq %d already in use\n", he_dev->pci_dev->irq); in he_init_irq()
965 he_dev->irq = he_dev->pci_dev->irq; in he_init_irq()
972 struct he_dev *he_dev; in he_start() local
984 he_dev = HE_DEV(dev); in he_start()
985 pci_dev = he_dev->pci_dev; in he_start()
1048 if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) { in he_start()
1054 he_writel(he_dev, 0x0, RESET_CNTL); in he_start()
1055 he_writel(he_dev, 0xff, RESET_CNTL); in he_start()
1058 status = he_readl(he_dev, RESET_CNTL); in he_start()
1065 host_cntl = he_readl(he_dev, HOST_CNTL); in he_start()
1083 he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i); in he_start()
1085 he_dev->media = read_prom_byte(he_dev, MEDIA); in he_start()
1088 dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i); in he_start()
1090 hprintk("%s%s, %pM\n", he_dev->prod_id, in he_start()
1091 he_dev->media & 0x40 ? "SM" : "MM", dev->esi); in he_start()
1092 he_dev->atm_dev->link_rate = he_is622(he_dev) ? in he_start()
1096 lb_swap = he_readl(he_dev, LB_SWAP); in he_start()
1097 if (he_is622(he_dev)) in he_start()
1107 he_writel(he_dev, lb_swap, LB_SWAP); in he_start()
1110 he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL); in he_start()
1114 he_writel(he_dev, lb_swap, LB_SWAP); in he_start()
1117 if ((err = he_init_irq(he_dev)) != 0) in he_start()
1123 he_writel(he_dev, host_cntl, HOST_CNTL); in he_start()
1163 he_dev->vcibits = CONFIG_DEFAULT_VCIBITS; in he_start()
1164 he_dev->vpibits = CONFIG_DEFAULT_VPIBITS; in he_start()
1172 he_dev->vpibits = nvpibits; in he_start()
1173 he_dev->vcibits = HE_MAXCIDBITS - nvpibits; in he_start()
1177 he_dev->vcibits = nvcibits; in he_start()
1178 he_dev->vpibits = HE_MAXCIDBITS - nvcibits; in he_start()
1182 if (he_is622(he_dev)) { in he_start()
1183 he_dev->cells_per_row = 40; in he_start()
1184 he_dev->bytes_per_row = 2048; in he_start()
1185 he_dev->r0_numrows = 256; in he_start()
1186 he_dev->tx_numrows = 512; in he_start()
1187 he_dev->r1_numrows = 256; in he_start()
1188 he_dev->r0_startrow = 0; in he_start()
1189 he_dev->tx_startrow = 256; in he_start()
1190 he_dev->r1_startrow = 768; in he_start()
1192 he_dev->cells_per_row = 20; in he_start()
1193 he_dev->bytes_per_row = 1024; in he_start()
1194 he_dev->r0_numrows = 512; in he_start()
1195 he_dev->tx_numrows = 1018; in he_start()
1196 he_dev->r1_numrows = 512; in he_start()
1197 he_dev->r0_startrow = 6; in he_start()
1198 he_dev->tx_startrow = 518; in he_start()
1199 he_dev->r1_startrow = 1536; in he_start()
1202 he_dev->cells_per_lbuf = 4; in he_start()
1203 he_dev->buffer_limit = 4; in he_start()
1204 he_dev->r0_numbuffs = he_dev->r0_numrows * in he_start()
1205 he_dev->cells_per_row / he_dev->cells_per_lbuf; in he_start()
1206 if (he_dev->r0_numbuffs > 2560) in he_start()
1207 he_dev->r0_numbuffs = 2560; in he_start()
1209 he_dev->r1_numbuffs = he_dev->r1_numrows * in he_start()
1210 he_dev->cells_per_row / he_dev->cells_per_lbuf; in he_start()
1211 if (he_dev->r1_numbuffs > 2560) in he_start()
1212 he_dev->r1_numbuffs = 2560; in he_start()
1214 he_dev->tx_numbuffs = he_dev->tx_numrows * in he_start()
1215 he_dev->cells_per_row / he_dev->cells_per_lbuf; in he_start()
1216 if (he_dev->tx_numbuffs > 5120) in he_start()
1217 he_dev->tx_numbuffs = 5120; in he_start()
1221 he_writel(he_dev, in he_start()
1224 (he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) | in he_start()
1225 (he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)), in he_start()
1228 he_writel(he_dev, BANK_ON | in he_start()
1229 (he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)), in he_start()
1232 he_writel(he_dev, in he_start()
1233 (he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) | in he_start()
1235 he_writel(he_dev, in he_start()
1236 (he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) | in he_start()
1239 he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG); in he_start()
1241 he_writel(he_dev, in he_start()
1242 (he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) | in he_start()
1243 (he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) | in he_start()
1244 RX_VALVP(he_dev->vpibits) | in he_start()
1245 RX_VALVC(he_dev->vcibits), RC_CONFIG); in he_start()
1247 he_writel(he_dev, DRF_THRESH(0x20) | in he_start()
1248 (he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) | in he_start()
1249 TX_VCI_MASK(he_dev->vcibits) | in he_start()
1250 LBFREE_CNT(he_dev->tx_numbuffs), TX_CONFIG); in he_start()
1252 he_writel(he_dev, 0x0, TXAAL5_PROTO); in he_start()
1254 he_writel(he_dev, PHY_INT_ENB | in he_start()
1255 (he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)), in he_start()
1261 he_writel_tcm(he_dev, 0, i); in he_start()
1264 he_writel_rcm(he_dev, 0, i); in he_start()
1297 he_writel(he_dev, CONFIG_TSRB, TSRB_BA); in he_start()
1298 he_writel(he_dev, CONFIG_TSRC, TSRC_BA); in he_start()
1299 he_writel(he_dev, CONFIG_TSRD, TSRD_BA); in he_start()
1300 he_writel(he_dev, CONFIG_TMABR, TMABR_BA); in he_start()
1301 he_writel(he_dev, CONFIG_TPDBA, TPD_BA); in he_start()
1331 he_writel(he_dev, 0x08000, RCMLBM_BA); in he_start()
1332 he_writel(he_dev, 0x0e000, RCMRSRB_BA); in he_start()
1333 he_writel(he_dev, 0x0d800, RCMABR_BA); in he_start()
1337 he_init_rx_lbfp0(he_dev); in he_start()
1338 he_init_rx_lbfp1(he_dev); in he_start()
1340 he_writel(he_dev, 0x0, RLBC_H); in he_start()
1341 he_writel(he_dev, 0x0, RLBC_T); in he_start()
1342 he_writel(he_dev, 0x0, RLBC_H2); in he_start()
1344 he_writel(he_dev, 512, RXTHRSH); /* 10% of r0+r1 buffers */ in he_start()
1345 he_writel(he_dev, 256, LITHRSH); /* 5% of r0+r1 buffers */ in he_start()
1347 he_init_tx_lbfp(he_dev); in he_start()
1349 he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA); in he_start()
1353 if (he_is622(he_dev)) { in he_start()
1354 he_writel(he_dev, 0x000f, G0_INMQ_S); in he_start()
1355 he_writel(he_dev, 0x200f, G0_INMQ_L); in he_start()
1357 he_writel(he_dev, 0x001f, G1_INMQ_S); in he_start()
1358 he_writel(he_dev, 0x201f, G1_INMQ_L); in he_start()
1360 he_writel(he_dev, 0x002f, G2_INMQ_S); in he_start()
1361 he_writel(he_dev, 0x202f, G2_INMQ_L); in he_start()
1363 he_writel(he_dev, 0x003f, G3_INMQ_S); in he_start()
1364 he_writel(he_dev, 0x203f, G3_INMQ_L); in he_start()
1366 he_writel(he_dev, 0x004f, G4_INMQ_S); in he_start()
1367 he_writel(he_dev, 0x204f, G4_INMQ_L); in he_start()
1369 he_writel(he_dev, 0x005f, G5_INMQ_S); in he_start()
1370 he_writel(he_dev, 0x205f, G5_INMQ_L); in he_start()
1372 he_writel(he_dev, 0x006f, G6_INMQ_S); in he_start()
1373 he_writel(he_dev, 0x206f, G6_INMQ_L); in he_start()
1375 he_writel(he_dev, 0x007f, G7_INMQ_S); in he_start()
1376 he_writel(he_dev, 0x207f, G7_INMQ_L); in he_start()
1378 he_writel(he_dev, 0x0000, G0_INMQ_S); in he_start()
1379 he_writel(he_dev, 0x0008, G0_INMQ_L); in he_start()
1381 he_writel(he_dev, 0x0001, G1_INMQ_S); in he_start()
1382 he_writel(he_dev, 0x0009, G1_INMQ_L); in he_start()
1384 he_writel(he_dev, 0x0002, G2_INMQ_S); in he_start()
1385 he_writel(he_dev, 0x000a, G2_INMQ_L); in he_start()
1387 he_writel(he_dev, 0x0003, G3_INMQ_S); in he_start()
1388 he_writel(he_dev, 0x000b, G3_INMQ_L); in he_start()
1390 he_writel(he_dev, 0x0004, G4_INMQ_S); in he_start()
1391 he_writel(he_dev, 0x000c, G4_INMQ_L); in he_start()
1393 he_writel(he_dev, 0x0005, G5_INMQ_S); in he_start()
1394 he_writel(he_dev, 0x000d, G5_INMQ_L); in he_start()
1396 he_writel(he_dev, 0x0006, G6_INMQ_S); in he_start()
1397 he_writel(he_dev, 0x000e, G6_INMQ_L); in he_start()
1399 he_writel(he_dev, 0x0007, G7_INMQ_S); in he_start()
1400 he_writel(he_dev, 0x000f, G7_INMQ_L); in he_start()
1405 he_writel(he_dev, 0x0, MCC); in he_start()
1406 he_writel(he_dev, 0x0, OEC); in he_start()
1407 he_writel(he_dev, 0x0, DCC); in he_start()
1408 he_writel(he_dev, 0x0, CEC); in he_start()
1412 he_init_cs_block(he_dev); in he_start()
1416 if (he_init_cs_block_rcm(he_dev) < 0) in he_start()
1421 he_init_tpdrq(he_dev); in he_start()
1423 he_dev->tpd_pool = dma_pool_create("tpd", &he_dev->pci_dev->dev, in he_start()
1425 if (he_dev->tpd_pool == NULL) { in he_start()
1430 INIT_LIST_HEAD(&he_dev->outstanding_tpds); in he_start()
1432 if (he_init_group(he_dev, 0) != 0) in he_start()
1436 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32)); in he_start()
1437 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32)); in he_start()
1438 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32)); in he_start()
1439 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0), in he_start()
1442 he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32)); in he_start()
1443 he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32)); in he_start()
1444 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0), in he_start()
1446 he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32)); in he_start()
1448 he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16)); in he_start()
1449 he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16)); in he_start()
1450 he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0), in he_start()
1452 he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16)); in he_start()
1454 he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16)); in he_start()
1455 he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16)); in he_start()
1456 he_writel(he_dev, TBRQ_THRESH(0x1), in he_start()
1458 he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16)); in he_start()
1463 he_dev->hsp = dma_alloc_coherent(&he_dev->pci_dev->dev, in he_start()
1465 &he_dev->hsp_phys, GFP_KERNEL); in he_start()
1466 if (he_dev->hsp == NULL) { in he_start()
1470 he_writel(he_dev, he_dev->hsp_phys, HSP_BA); in he_start()
1475 if (he_isMM(he_dev)) in he_start()
1476 suni_init(he_dev->atm_dev); in he_start()
1477 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start) in he_start()
1478 he_dev->atm_dev->phy->start(he_dev->atm_dev); in he_start()
1485 val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM); in he_start()
1487 he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM); in he_start()
1488 he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP); in he_start()
1493 reg = he_readl_mbox(he_dev, CS_ERCTL0); in he_start()
1495 he_writel_mbox(he_dev, reg, CS_ERCTL0); in he_start()
1497 reg = he_readl(he_dev, RC_CONFIG); in he_start()
1499 he_writel(he_dev, reg, RC_CONFIG); in he_start()
1502 he_dev->cs_stper[i].inuse = 0; in he_start()
1503 he_dev->cs_stper[i].pcr = -1; in he_start()
1505 he_dev->total_bw = 0; in he_start()
1510 he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits; in he_start()
1511 he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits; in he_start()
1513 he_dev->irq_peak = 0; in he_start()
1514 he_dev->rbrq_peak = 0; in he_start()
1515 he_dev->rbpl_peak = 0; in he_start()
1516 he_dev->tbrq_peak = 0; in he_start()
1524 he_stop(struct he_dev *he_dev) in he_stop() argument
1531 pci_dev = he_dev->pci_dev; in he_stop()
1535 if (he_dev->membase) { in he_stop()
1540 tasklet_disable(&he_dev->tasklet); in he_stop()
1544 reg = he_readl_mbox(he_dev, CS_ERCTL0); in he_stop()
1546 he_writel_mbox(he_dev, reg, CS_ERCTL0); in he_stop()
1548 reg = he_readl(he_dev, RC_CONFIG); in he_stop()
1550 he_writel(he_dev, reg, RC_CONFIG); in he_stop()
1554 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop) in he_stop()
1555 he_dev->atm_dev->phy->stop(he_dev->atm_dev); in he_stop()
1558 if (he_dev->irq) in he_stop()
1559 free_irq(he_dev->irq, he_dev); in he_stop()
1561 if (he_dev->irq_base) in he_stop()
1562 dma_free_coherent(&he_dev->pci_dev->dev, (CONFIG_IRQ_SIZE + 1) in he_stop()
1563 * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys); in he_stop()
1565 if (he_dev->hsp) in he_stop()
1566 dma_free_coherent(&he_dev->pci_dev->dev, sizeof(struct he_hsp), in he_stop()
1567 he_dev->hsp, he_dev->hsp_phys); in he_stop()
1569 if (he_dev->rbpl_base) { in he_stop()
1570 list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry) in he_stop()
1571 dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping); in he_stop()
1573 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE in he_stop()
1574 * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys); in he_stop()
1577 kfree(he_dev->rbpl_virt); in he_stop()
1578 bitmap_free(he_dev->rbpl_table); in he_stop()
1579 dma_pool_destroy(he_dev->rbpl_pool); in he_stop()
1581 if (he_dev->rbrq_base) in he_stop()
1582 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), in he_stop()
1583 he_dev->rbrq_base, he_dev->rbrq_phys); in he_stop()
1585 if (he_dev->tbrq_base) in he_stop()
1586 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), in he_stop()
1587 he_dev->tbrq_base, he_dev->tbrq_phys); in he_stop()
1589 if (he_dev->tpdrq_base) in he_stop()
1590 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), in he_stop()
1591 he_dev->tpdrq_base, he_dev->tpdrq_phys); in he_stop()
1593 dma_pool_destroy(he_dev->tpd_pool); in he_stop()
1595 if (he_dev->pci_dev) { in he_stop()
1596 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command); in he_stop()
1598 pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command); in he_stop()
1601 if (he_dev->membase) in he_stop()
1602 iounmap(he_dev->membase); in he_stop()
1606 __alloc_tpd(struct he_dev *he_dev) in __alloc_tpd() argument
1611 tpd = dma_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC, &mapping); in __alloc_tpd()
1639 he_service_rbrq(struct he_dev *he_dev, int group) in he_service_rbrq() argument
1642 ((unsigned long)he_dev->rbrq_base | in he_service_rbrq()
1643 he_dev->hsp->group[group].rbrq_tail); in he_service_rbrq()
1654 while (he_dev->rbrq_head != rbrq_tail) { in he_service_rbrq()
1658 he_dev->rbrq_head, group, in he_service_rbrq()
1659 RBRQ_ADDR(he_dev->rbrq_head), in he_service_rbrq()
1660 RBRQ_BUFLEN(he_dev->rbrq_head), in he_service_rbrq()
1661 RBRQ_CID(he_dev->rbrq_head), in he_service_rbrq()
1662 RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "", in he_service_rbrq()
1663 RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "", in he_service_rbrq()
1664 RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "", in he_service_rbrq()
1665 RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "", in he_service_rbrq()
1666 RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "", in he_service_rbrq()
1667 RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : ""); in he_service_rbrq()
1669 i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET; in he_service_rbrq()
1670 heb = he_dev->rbpl_virt[i]; in he_service_rbrq()
1672 cid = RBRQ_CID(he_dev->rbrq_head); in he_service_rbrq()
1674 vcc = __find_vcc(he_dev, cid); in he_service_rbrq()
1679 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) { in he_service_rbrq()
1680 clear_bit(i, he_dev->rbpl_table); in he_service_rbrq()
1682 dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping); in he_service_rbrq()
1688 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) { in he_service_rbrq()
1694 heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4; in he_service_rbrq()
1695 clear_bit(i, he_dev->rbpl_table); in he_service_rbrq()
1699 if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) { in he_service_rbrq()
1706 if (!RBRQ_END_PDU(he_dev->rbrq_head)) in he_service_rbrq()
1709 if (RBRQ_LEN_ERR(he_dev->rbrq_head) in he_service_rbrq()
1710 || RBRQ_CRC_ERR(he_dev->rbrq_head)) { in he_service_rbrq()
1712 RBRQ_CRC_ERR(he_dev->rbrq_head) in he_service_rbrq()
1714 RBRQ_LEN_ERR(he_dev->rbrq_head) in he_service_rbrq()
1765 spin_unlock(&he_dev->global_lock); in he_service_rbrq()
1767 spin_lock(&he_dev->global_lock); in he_service_rbrq()
1775 dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping); in he_service_rbrq()
1780 he_dev->rbrq_head = (struct he_rbrq *) in he_service_rbrq()
1781 ((unsigned long) he_dev->rbrq_base | in he_service_rbrq()
1782 RBRQ_MASK(he_dev->rbrq_head + 1)); in he_service_rbrq()
1788 if (updated > he_dev->rbrq_peak) in he_service_rbrq()
1789 he_dev->rbrq_peak = updated; in he_service_rbrq()
1791 he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head), in he_service_rbrq()
1799 he_service_tbrq(struct he_dev *he_dev, int group) in he_service_tbrq() argument
1802 ((unsigned long)he_dev->tbrq_base | in he_service_tbrq()
1803 he_dev->hsp->group[group].tbrq_tail); in he_service_tbrq()
1810 while (he_dev->tbrq_head != tbrq_tail) { in he_service_tbrq()
1815 TBRQ_TPD(he_dev->tbrq_head), in he_service_tbrq()
1816 TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "", in he_service_tbrq()
1817 TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : ""); in he_service_tbrq()
1819 list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) { in he_service_tbrq()
1820 if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) { in he_service_tbrq()
1829 TBRQ_TPD(he_dev->tbrq_head)); in he_service_tbrq()
1833 if (TBRQ_EOS(he_dev->tbrq_head)) { in he_service_tbrq()
1835 he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci)); in he_service_tbrq()
1844 dma_unmap_single(&he_dev->pci_dev->dev, in he_service_tbrq()
1862 dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status)); in he_service_tbrq()
1863 he_dev->tbrq_head = (struct he_tbrq *) in he_service_tbrq()
1864 ((unsigned long) he_dev->tbrq_base | in he_service_tbrq()
1865 TBRQ_MASK(he_dev->tbrq_head + 1)); in he_service_tbrq()
1869 if (updated > he_dev->tbrq_peak) in he_service_tbrq()
1870 he_dev->tbrq_peak = updated; in he_service_tbrq()
1872 he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head), in he_service_tbrq()
1878 he_service_rbpl(struct he_dev *he_dev, int group) in he_service_rbpl() argument
1887 rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base | in he_service_rbpl()
1888 RBPL_MASK(he_readl(he_dev, G0_RBPL_S))); in he_service_rbpl()
1891 new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base | in he_service_rbpl()
1892 RBPL_MASK(he_dev->rbpl_tail+1)); in he_service_rbpl()
1898 i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint); in he_service_rbpl()
1900 i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE); in he_service_rbpl()
1904 he_dev->rbpl_hint = i + 1; in he_service_rbpl()
1906 heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC, &mapping); in he_service_rbpl()
1910 list_add(&heb->entry, &he_dev->rbpl_outstanding); in he_service_rbpl()
1911 he_dev->rbpl_virt[i] = heb; in he_service_rbpl()
1912 set_bit(i, he_dev->rbpl_table); in he_service_rbpl()
1916 he_dev->rbpl_tail = new_tail; in he_service_rbpl()
1921 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T); in he_service_rbpl()
1928 struct he_dev *he_dev = (struct he_dev *) data; in he_tasklet() local
1933 spin_lock_irqsave(&he_dev->global_lock, flags); in he_tasklet()
1935 while (he_dev->irq_head != he_dev->irq_tail) { in he_tasklet()
1938 type = ITYPE_TYPE(he_dev->irq_head->isw); in he_tasklet()
1939 group = ITYPE_GROUP(he_dev->irq_head->isw); in he_tasklet()
1946 if (he_service_rbrq(he_dev, group)) in he_tasklet()
1947 he_service_rbpl(he_dev, group); in he_tasklet()
1953 he_service_tbrq(he_dev, group); in he_tasklet()
1956 he_service_rbpl(he_dev, group); in he_tasklet()
1964 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_tasklet()
1965 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt) in he_tasklet()
1966 he_dev->atm_dev->phy->interrupt(he_dev->atm_dev); in he_tasklet()
1967 spin_lock_irqsave(&he_dev->global_lock, flags); in he_tasklet()
1976 hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR)); in he_tasklet()
1983 HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw); in he_tasklet()
1985 he_service_rbrq(he_dev, 0); in he_tasklet()
1986 he_service_rbpl(he_dev, 0); in he_tasklet()
1987 he_service_tbrq(he_dev, 0); in he_tasklet()
1990 hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw); in he_tasklet()
1993 he_dev->irq_head->isw = ITYPE_INVALID; in he_tasklet()
1995 he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK); in he_tasklet()
1999 if (updated > he_dev->irq_peak) in he_tasklet()
2000 he_dev->irq_peak = updated; in he_tasklet()
2002 he_writel(he_dev, in he_tasklet()
2005 IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD); in he_tasklet()
2006 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */ in he_tasklet()
2008 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_tasklet()
2015 struct he_dev *he_dev = (struct he_dev * )dev_id; in he_irq_handler() local
2018 if (he_dev == NULL) in he_irq_handler()
2021 spin_lock_irqsave(&he_dev->global_lock, flags); in he_irq_handler()
2023 he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) | in he_irq_handler()
2024 (*he_dev->irq_tailoffset << 2)); in he_irq_handler()
2026 if (he_dev->irq_tail == he_dev->irq_head) { in he_irq_handler()
2028 he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base | in he_irq_handler()
2029 ((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2)); in he_irq_handler()
2030 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata */ in he_irq_handler()
2034 if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */) in he_irq_handler()
2038 if (he_dev->irq_head != he_dev->irq_tail) { in he_irq_handler()
2040 tasklet_schedule(&he_dev->tasklet); in he_irq_handler()
2041 he_writel(he_dev, INT_CLEAR_A, INT_FIFO); /* clear interrupt */ in he_irq_handler()
2042 (void) he_readl(he_dev, INT_FIFO); /* flush posted writes */ in he_irq_handler()
2044 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_irq_handler()
2050 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid) in __enqueue_tpd() argument
2055 tpd, cid, he_dev->tpdrq_tail); in __enqueue_tpd()
2058 new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base | in __enqueue_tpd()
2059 TPDRQ_MASK(he_dev->tpdrq_tail+1)); in __enqueue_tpd()
2068 if (new_tail == he_dev->tpdrq_head) { in __enqueue_tpd()
2069 he_dev->tpdrq_head = (struct he_tpdrq *) in __enqueue_tpd()
2070 (((unsigned long)he_dev->tpdrq_base) | in __enqueue_tpd()
2071 TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H))); in __enqueue_tpd()
2073 if (new_tail == he_dev->tpdrq_head) { in __enqueue_tpd()
2085 dma_unmap_single(&he_dev->pci_dev->dev, in __enqueue_tpd()
2097 dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status)); in __enqueue_tpd()
2103 list_add_tail(&tpd->entry, &he_dev->outstanding_tpds); in __enqueue_tpd()
2104 he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status); in __enqueue_tpd()
2105 he_dev->tpdrq_tail->cid = cid; in __enqueue_tpd()
2108 he_dev->tpdrq_tail = new_tail; in __enqueue_tpd()
2110 he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T); in __enqueue_tpd()
2111 (void) he_readl(he_dev, TPDRQ_T); /* flush posted writes */ in __enqueue_tpd()
2118 struct he_dev *he_dev = HE_DEV(vcc->dev); in he_open() local
2132 cid = he_mkcid(he_dev, vpi, vci); in he_open()
2154 pcr_goal = he_dev->atm_dev->link_rate; in he_open()
2174 spin_lock_irqsave(&he_dev->global_lock, flags); in he_open()
2175 tsr0 = he_readl_tsr0(he_dev, cid); in he_open()
2176 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_open()
2196 if ((he_dev->total_bw + pcr_goal) in he_open()
2197 > (he_dev->atm_dev->link_rate * 9 / 10)) in he_open()
2203 spin_lock_irqsave(&he_dev->global_lock, flags); /* also protects he_dev->cs_stper[] */ in he_open()
2207 if (he_dev->cs_stper[reg].inuse == 0 || in he_open()
2208 he_dev->cs_stper[reg].pcr == pcr_goal) in he_open()
2213 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_open()
2217 he_dev->total_bw += pcr_goal; in he_open()
2220 ++he_dev->cs_stper[reg].inuse; in he_open()
2221 he_dev->cs_stper[reg].pcr = pcr_goal; in he_open()
2223 clock = he_is622(he_dev) ? 66667000 : 50000000; in he_open()
2229 he_writel_mbox(he_dev, rate_to_atmf(period/2), in he_open()
2231 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_open()
2242 spin_lock_irqsave(&he_dev->global_lock, flags); in he_open()
2244 he_writel_tsr0(he_dev, tsr0, cid); in he_open()
2245 he_writel_tsr4(he_dev, tsr4 | 1, cid); in he_open()
2246 he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) | in he_open()
2248 he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid); in he_open()
2249 he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid); in he_open()
2251 he_writel_tsr3(he_dev, 0x0, cid); in he_open()
2252 he_writel_tsr5(he_dev, 0x0, cid); in he_open()
2253 he_writel_tsr6(he_dev, 0x0, cid); in he_open()
2254 he_writel_tsr7(he_dev, 0x0, cid); in he_open()
2255 he_writel_tsr8(he_dev, 0x0, cid); in he_open()
2256 he_writel_tsr10(he_dev, 0x0, cid); in he_open()
2257 he_writel_tsr11(he_dev, 0x0, cid); in he_open()
2258 he_writel_tsr12(he_dev, 0x0, cid); in he_open()
2259 he_writel_tsr13(he_dev, 0x0, cid); in he_open()
2260 he_writel_tsr14(he_dev, 0x0, cid); in he_open()
2261 (void) he_readl_tsr0(he_dev, cid); /* flush posted writes */ in he_open()
2262 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_open()
2283 spin_lock_irqsave(&he_dev->global_lock, flags); in he_open()
2285 rsr0 = he_readl_rsr0(he_dev, cid); in he_open()
2287 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_open()
2304 he_writel_rsr4(he_dev, rsr4, cid); in he_open()
2305 he_writel_rsr1(he_dev, rsr1, cid); in he_open()
2308 he_writel_rsr0(he_dev, in he_open()
2310 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */ in he_open()
2312 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_open()
2332 struct he_dev *he_dev = HE_DEV(vcc->dev); in he_close() local
2342 cid = he_mkcid(he_dev, vcc->vpi, vcc->vci); in he_close()
2353 spin_lock_irqsave(&he_dev->global_lock, flags); in he_close()
2354 while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) { in he_close()
2362 he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid); in he_close()
2363 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */ in he_close()
2364 he_writel_mbox(he_dev, cid, RXCON_CLOSE); in he_close()
2365 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_close()
2408 spin_lock_irqsave(&he_dev->global_lock, flags); in he_close()
2409 he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid); in he_close()
2414 he_writel_tsr1(he_dev, in he_close()
2419 he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid); in he_close()
2422 (void) he_readl_tsr4(he_dev, cid); /* flush posted writes */ in he_close()
2424 tpd = __alloc_tpd(he_dev); in he_close()
2436 __enqueue_tpd(he_dev, tpd, cid); in he_close()
2437 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_close()
2444 spin_lock_irqsave(&he_dev->global_lock, flags); in he_close()
2451 while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) { in he_close()
2456 while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) { in he_close()
2468 if (he_dev->cs_stper[reg].inuse == 0) in he_close()
2471 --he_dev->cs_stper[reg].inuse; in he_close()
2473 he_dev->total_bw -= he_dev->cs_stper[reg].pcr; in he_close()
2475 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_close()
2489 struct he_dev *he_dev = HE_DEV(vcc->dev); in he_send() local
2490 unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci); in he_send()
2522 spin_lock_irqsave(&he_dev->global_lock, flags); in he_send()
2524 tpd = __alloc_tpd(he_dev); in he_send()
2531 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_send()
2551 tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev, skb->data, in he_send()
2565 __enqueue_tpd(he_dev, tpd, cid); in he_send()
2566 tpd = __alloc_tpd(he_dev); in he_send()
2573 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_send()
2580 tpd->iovec[slot].addr = skb_frag_dma_map(&he_dev->pci_dev->dev, in he_send()
2589 tpd->address0 = dma_map_single(&he_dev->pci_dev->dev, skb->data, skb->len, DMA_TO_DEVICE); in he_send()
2599 __enqueue_tpd(he_dev, tpd, cid); in he_send()
2600 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_send()
2611 struct he_dev *he_dev = HE_DEV(atm_dev); in he_ioctl() local
2624 spin_lock_irqsave(&he_dev->global_lock, flags); in he_ioctl()
2632 reg.val = he_readl(he_dev, reg.addr); in he_ioctl()
2636 he_readl_rcm(he_dev, reg.addr); in he_ioctl()
2640 he_readl_tcm(he_dev, reg.addr); in he_ioctl()
2644 he_readl_mbox(he_dev, reg.addr); in he_ioctl()
2650 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_ioctl()
2673 struct he_dev *he_dev = HE_DEV(atm_dev); in he_phy_put() local
2677 spin_lock_irqsave(&he_dev->global_lock, flags); in he_phy_put()
2678 he_writel(he_dev, val, FRAMER + (addr*4)); in he_phy_put()
2679 (void) he_readl(he_dev, FRAMER + (addr*4)); /* flush posted writes */ in he_phy_put()
2680 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_phy_put()
2688 struct he_dev *he_dev = HE_DEV(atm_dev); in he_phy_get() local
2691 spin_lock_irqsave(&he_dev->global_lock, flags); in he_phy_get()
2692 reg = he_readl(he_dev, FRAMER + (addr*4)); in he_phy_get()
2693 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_phy_get()
2703 struct he_dev *he_dev = HE_DEV(dev); in he_proc_read() local
2719 he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM"); in he_proc_read()
2724 spin_lock_irqsave(&he_dev->global_lock, flags); in he_proc_read()
2725 mcc += he_readl(he_dev, MCC); in he_proc_read()
2726 oec += he_readl(he_dev, OEC); in he_proc_read()
2727 dcc += he_readl(he_dev, DCC); in he_proc_read()
2728 cec += he_readl(he_dev, CEC); in he_proc_read()
2729 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_proc_read()
2737 CONFIG_IRQ_SIZE, he_dev->irq_peak); in he_proc_read()
2745 CONFIG_RBRQ_SIZE, he_dev->rbrq_peak); in he_proc_read()
2749 CONFIG_TBRQ_SIZE, he_dev->tbrq_peak); in he_proc_read()
2753 rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S)); in he_proc_read()
2754 rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T)); in he_proc_read()
2772 he_dev->cs_stper[i].pcr, in he_proc_read()
2773 he_dev->cs_stper[i].inuse); in he_proc_read()
2777 he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9); in he_proc_read()
2784 static u8 read_prom_byte(struct he_dev *he_dev, int addr) in read_prom_byte() argument
2790 val = readl(he_dev->membase + HOST_CNTL); in read_prom_byte()
2795 he_writel(he_dev, val, HOST_CNTL); in read_prom_byte()
2799 he_writel(he_dev, val | readtab[i], HOST_CNTL); in read_prom_byte()
2805 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL); in read_prom_byte()
2807 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL); in read_prom_byte()
2814 he_writel(he_dev, val, HOST_CNTL); in read_prom_byte()
2818 he_writel(he_dev, val | clocktab[j++], HOST_CNTL); in read_prom_byte()
2820 tmp_read = he_readl(he_dev, HOST_CNTL); in read_prom_byte()
2823 he_writel(he_dev, val | clocktab[j++], HOST_CNTL); in read_prom_byte()
2827 he_writel(he_dev, val | ID_CS, HOST_CNTL); in read_prom_byte()