Lines Matching +full:reg +full:- +full:spacing

29           Complete the ABR logic of the driver, and added the ABR work-
32 Add the flow control logic to the driver to allow rate-limit VC.
72 #define PRIV(dev) ((struct suni_priv *) dev->phy_data)
100 que->next = NULL; in ia_init_rtn_q()
101 que->tail = NULL; in ia_init_rtn_q()
106 data->next = NULL; in ia_enque_head_rtn_q()
107 if (que->next == NULL) in ia_enque_head_rtn_q()
108 que->next = que->tail = data; in ia_enque_head_rtn_q()
110 data->next = que->next; in ia_enque_head_rtn_q()
111 que->next = data; in ia_enque_head_rtn_q()
119 return -ENOMEM; in ia_enque_rtn_q()
120 entry->data = data; in ia_enque_rtn_q()
121 entry->next = NULL; in ia_enque_rtn_q()
122 if (que->next == NULL) in ia_enque_rtn_q()
123 que->next = que->tail = entry; in ia_enque_rtn_q()
125 que->tail->next = entry; in ia_enque_rtn_q()
126 que->tail = que->tail->next; in ia_enque_rtn_q()
133 if (que->next == NULL) in ia_deque_rtn_q()
135 tmpdata = que->next; in ia_deque_rtn_q()
136 if ( que->next == que->tail) in ia_deque_rtn_q()
137 que->next = que->tail = NULL; in ia_deque_rtn_q()
139 que->next = que->next->next; in ia_deque_rtn_q()
149 tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff; in ia_hack_tcq()
150 while (dev->host_tcq_wr != tcq_wr) { in ia_hack_tcq()
151 desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr); in ia_hack_tcq()
153 else if (!dev->desc_tbl[desc1 -1].timestamp) { in ia_hack_tcq()
154 IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);) in ia_hack_tcq()
155 *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0; in ia_hack_tcq()
157 else if (dev->desc_tbl[desc1 -1].timestamp) { in ia_hack_tcq()
158 if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) { in ia_hack_tcq()
162 iavcc_r->vc_desc_cnt--; in ia_hack_tcq()
163 dev->desc_tbl[desc1 -1].timestamp = 0; in ia_hack_tcq()
165 dev->desc_tbl[desc1 -1].txskb, desc1);) in ia_hack_tcq()
166 if (iavcc_r->pcr < dev->rate_limit) { in ia_hack_tcq()
167 IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE; in ia_hack_tcq()
168 if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0) in ia_hack_tcq()
171 dev->desc_tbl[desc1 -1].iavcc = NULL; in ia_hack_tcq()
172 dev->desc_tbl[desc1 -1].txskb = NULL; in ia_hack_tcq()
174 dev->host_tcq_wr += 2; in ia_hack_tcq()
175 if (dev->host_tcq_wr > dev->ffL.tcq_ed) in ia_hack_tcq()
176 dev->host_tcq_wr = dev->ffL.tcq_st; in ia_hack_tcq()
188 if((time_after(jiffies,timer+50)) || ((dev->ffL.tcq_rd==dev->host_tcq_wr))) { in get_desc()
191 while (i < dev->num_tx_desc) { in get_desc()
192 if (!dev->desc_tbl[i].timestamp) { in get_desc()
196 ltimeout = dev->desc_tbl[i].iavcc->ltimeout; in get_desc()
197 delta = jiffies - dev->desc_tbl[i].timestamp; in get_desc()
199 …IF_ABR(printk("RECOVER run!! desc_tbl %d = %d delta = %ld, time = %ld\n", i,dev->desc_tbl[i].time… in get_desc()
200 if (dev->ffL.tcq_rd == dev->ffL.tcq_st) in get_desc()
201 dev->ffL.tcq_rd = dev->ffL.tcq_ed; in get_desc()
203 dev->ffL.tcq_rd -= 2; in get_desc()
204 *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1; in get_desc()
205 if (!dev->desc_tbl[i].txskb || !(iavcc_r = dev->desc_tbl[i].iavcc)) in get_desc()
208 iavcc_r->vc_desc_cnt--; in get_desc()
209 dev->desc_tbl[i].timestamp = 0; in get_desc()
210 dev->desc_tbl[i].iavcc = NULL; in get_desc()
211 dev->desc_tbl[i].txskb = NULL; in get_desc()
216 if (dev->ffL.tcq_rd == dev->host_tcq_wr) in get_desc()
220 desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd); in get_desc()
222 while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) { in get_desc()
223 dev->ffL.tcq_rd += 2; in get_desc()
224 if (dev->ffL.tcq_rd > dev->ffL.tcq_ed) in get_desc()
225 dev->ffL.tcq_rd = dev->ffL.tcq_st; in get_desc()
226 if (dev->ffL.tcq_rd == dev->host_tcq_wr) in get_desc()
228 desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd); in get_desc()
232 dev->desc_tbl[desc_num -1].timestamp = jiffies; in get_desc()
241 struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR; in clear_lockup()
242 struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR; in clear_lockup()
245 if (vcc->qos.txtp.traffic_class == ATM_ABR) { in clear_lockup()
246 vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status); in clear_lockup()
247 vcstatus->cnt++; in clear_lockup()
249 if( vcstatus->cnt == 0x05 ) { in clear_lockup()
250 abr_vc += vcc->vci; in clear_lockup()
251 eabr_vc += vcc->vci; in clear_lockup()
252 if( eabr_vc->last_desc ) { in clear_lockup()
253 if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) { in clear_lockup()
256 if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE)) in clear_lockup()
260 tempCellSlot = abr_vc->last_cell_slot; in clear_lockup()
261 tempFract = abr_vc->fraction; in clear_lockup()
262 if((tempCellSlot == dev->testTable[vcc->vci]->lastTime) in clear_lockup()
263 && (tempFract == dev->testTable[vcc->vci]->fract)) in clear_lockup()
265 dev->testTable[vcc->vci]->lastTime = tempCellSlot; in clear_lockup()
266 dev->testTable[vcc->vci]->fract = tempFract; in clear_lockup()
269 vcstatus->cnt = 0; in clear_lockup()
270 } /* vcstatus->cnt */ in clear_lockup()
274 writew(0xFFFD, dev->seg_reg+MODE_REG_0); in clear_lockup()
277 abr_vc->status &= 0xFFF8; in clear_lockup()
278 abr_vc->status |= 0x0001; /* state is idle */ in clear_lockup()
279 shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR; in clear_lockup()
280 for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ ); in clear_lockup()
281 if (i < dev->num_vc) in clear_lockup()
282 shd_tbl[i] = vcc->vci; in clear_lockup()
284 IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);) in clear_lockup()
285 writew(T_ONLINE, dev->seg_reg+MODE_REG_0); in clear_lockup()
286 writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG); in clear_lockup()
287 writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG); in clear_lockup()
288 vcstatus->cnt = 0; in clear_lockup()
297 ** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
299 ** +----+----+------------------+-------------------------------+
300 ** | R | NZ | 5-bit exponent | 9-bit mantissa |
301 ** +----+----+------------------+-------------------------------+
329 flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK); in cellrate_to_float()
331 flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK); in cellrate_to_float()
337 ** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
353 cps <<= (exp - M_BITS);
355 cps >>= (M_BITS - exp);
361 srv_p->class_type = ATM_ABR; in init_abr_vc()
362 srv_p->pcr = dev->LineRate; in init_abr_vc()
363 srv_p->mcr = 0; in init_abr_vc()
364 srv_p->icr = 0x055cb7; in init_abr_vc()
365 srv_p->tbe = 0xffffff; in init_abr_vc()
366 srv_p->frtt = 0x3a; in init_abr_vc()
367 srv_p->rif = 0xf; in init_abr_vc()
368 srv_p->rdf = 0xb; in init_abr_vc()
369 srv_p->nrm = 0x4; in init_abr_vc()
370 srv_p->trm = 0x7; in init_abr_vc()
371 srv_p->cdf = 0x3; in init_abr_vc()
372 srv_p->adtf = 50; in init_abr_vc()
384 f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR; in ia_open_abr_vc()
385 f_abr_vc += vcc->vci; in ia_open_abr_vc()
389 if (srv_p->pcr == 0) in ia_open_abr_vc()
391 if (srv_p->pcr > dev->LineRate) in ia_open_abr_vc()
392 srv_p->pcr = dev->LineRate; in ia_open_abr_vc()
393 if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate) in ia_open_abr_vc()
395 if (srv_p->mcr > srv_p->pcr) in ia_open_abr_vc()
397 if (!(srv_p->icr)) in ia_open_abr_vc()
398 srv_p->icr = srv_p->pcr; in ia_open_abr_vc()
399 if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr)) in ia_open_abr_vc()
401 if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE)) in ia_open_abr_vc()
403 if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT)) in ia_open_abr_vc()
405 if (srv_p->nrm > MAX_NRM) in ia_open_abr_vc()
407 if (srv_p->trm > MAX_TRM) in ia_open_abr_vc()
409 if (srv_p->adtf > MAX_ADTF) in ia_open_abr_vc()
411 else if (srv_p->adtf == 0) in ia_open_abr_vc()
412 srv_p->adtf = 1; in ia_open_abr_vc()
413 if (srv_p->cdf > MAX_CDF) in ia_open_abr_vc()
415 if (srv_p->rif > MAX_RIF) in ia_open_abr_vc()
417 if (srv_p->rdf > MAX_RDF) in ia_open_abr_vc()
421 f_abr_vc->f_vc_type = ABR; in ia_open_abr_vc()
422 nrm = 2 << srv_p->nrm; /* (2 ** (srv_p->nrm +1)) */ in ia_open_abr_vc()
423 /* i.e 2**n = 2 << (n-1) */ in ia_open_abr_vc()
424 f_abr_vc->f_nrm = nrm << 8 | nrm; in ia_open_abr_vc()
425 trm = 100000/(2 << (16 - srv_p->trm)); in ia_open_abr_vc()
427 f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm; in ia_open_abr_vc()
428 crm = srv_p->tbe / nrm; in ia_open_abr_vc()
430 f_abr_vc->f_crm = crm & 0xff; in ia_open_abr_vc()
431 f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr); in ia_open_abr_vc()
432 icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ? in ia_open_abr_vc()
433 ((srv_p->tbe/srv_p->frtt)*1000000) : in ia_open_abr_vc()
434 (1000000/(srv_p->frtt/srv_p->tbe))); in ia_open_abr_vc()
435 f_abr_vc->f_icr = cellrate_to_float(icr); in ia_open_abr_vc()
436 adtf = (10000 * srv_p->adtf)/8192; in ia_open_abr_vc()
438 f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff; in ia_open_abr_vc()
439 f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr); in ia_open_abr_vc()
440 f_abr_vc->f_acr = f_abr_vc->f_icr; in ia_open_abr_vc()
441 f_abr_vc->f_status = 0x0042; in ia_open_abr_vc()
444 ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize); in ia_open_abr_vc()
445 *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR; in ia_open_abr_vc()
446 r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize); in ia_open_abr_vc()
447 r_abr_vc += vcc->vci; in ia_open_abr_vc()
448 r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f; in ia_open_abr_vc()
449 air = srv_p->pcr << (15 - srv_p->rif); in ia_open_abr_vc()
451 r_abr_vc->r_air = cellrate_to_float(air); in ia_open_abr_vc()
452 dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR; in ia_open_abr_vc()
453 dev->sum_mcr += srv_p->mcr; in ia_open_abr_vc()
454 dev->n_abr++; in ia_open_abr_vc()
467 u32 spacing; in ia_cbr_setup() local
475 if (vcc->qos.txtp.max_pcr <= 0) { in ia_cbr_setup()
477 return -1; in ia_cbr_setup()
479 rate = vcc->qos.txtp.max_pcr; in ia_cbr_setup()
480 entries = rate / dev->Granularity; in ia_cbr_setup()
482 entries, rate, dev->Granularity);) in ia_cbr_setup()
485 rateLow = entries * dev->Granularity; in ia_cbr_setup()
486 rateHigh = (entries + 1) * dev->Granularity; in ia_cbr_setup()
487 if (3*(rate - rateLow) > (rateHigh - rate)) in ia_cbr_setup()
489 if (entries > dev->CbrRemEntries) { in ia_cbr_setup()
492 entries, dev->CbrRemEntries);) in ia_cbr_setup()
493 return -EBUSY; in ia_cbr_setup()
497 ia_vcc->NumCbrEntry = entries; in ia_cbr_setup()
498 dev->sum_mcr += entries * dev->Granularity; in ia_cbr_setup()
503 spacing = dev->CbrTotEntries / entries; in ia_cbr_setup()
504 sp_mod = dev->CbrTotEntries % entries; // get modulo in ia_cbr_setup()
507 vcIndex = vcc->vci; in ia_cbr_setup()
508 IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);) in ia_cbr_setup()
515 idealSlot = dev->CbrEntryPt; in ia_cbr_setup()
516 dev->CbrEntryPt += 2; // Adding 2 helps to prevent clumping in ia_cbr_setup()
517 if (dev->CbrEntryPt >= dev->CbrTotEntries) in ia_cbr_setup()
518 dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary in ia_cbr_setup()
520 idealSlot += (u32)(spacing + fracSlot); // Point to the next location in ia_cbr_setup()
525 if (idealSlot >= (int)dev->CbrTotEntries) in ia_cbr_setup()
526 idealSlot -= dev->CbrTotEntries; in ia_cbr_setup()
529 SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize); in ia_cbr_setup()
539 testSlot = idealSlot - inc; in ia_cbr_setup()
541 testSlot += dev->CbrTotEntries; in ia_cbr_setup()
550 if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary in ia_cbr_setup()
551 testSlot -= dev->CbrTotEntries; in ia_cbr_setup()
552 IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);) in ia_cbr_setup()
564 dev->CbrRemEntries--; in ia_cbr_setup()
565 toBeAssigned--; in ia_cbr_setup()
569 dev->NumEnabledCBR++; in ia_cbr_setup()
570 if (dev->NumEnabledCBR == 1) { in ia_cbr_setup()
571 writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS); in ia_cbr_setup()
581 iadev = INPH_IA_DEV(vcc->dev); in ia_cbrVc_close()
582 iadev->NumEnabledCBR--; in ia_cbrVc_close()
583 SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize); in ia_cbrVc_close()
584 if (iadev->NumEnabledCBR == 0) { in ia_cbrVc_close()
585 writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS); in ia_cbrVc_close()
589 for (i=0; i < iadev->CbrTotEntries; i++) in ia_cbrVc_close()
591 if (*SchedTbl == vcc->vci) { in ia_cbrVc_close()
592 iadev->CbrRemEntries++; in ia_cbrVc_close()
604 if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd) in ia_avail_descs()
605 tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2; in ia_avail_descs()
607 tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr - in ia_avail_descs()
608 iadev->ffL.tcq_st) / 2; in ia_avail_descs()
620 while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) { in ia_que_tx()
621 if (!(vcc = ATM_SKB(skb)->vcc)) { in ia_que_tx()
626 if (!test_bit(ATM_VF_READY,&vcc->flags)) { in ia_que_tx()
628 printk("Free the SKB on closed vci %d \n", vcc->vci); in ia_que_tx()
632 skb_queue_head(&iadev->tx_backlog, skb); in ia_que_tx()
634 num_desc--; in ia_que_tx()
646 while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) { in ia_tx_poll()
647 skb = rtne->data.txskb; in ia_tx_poll()
652 vcc = ATM_SKB(skb)->vcc; in ia_tx_poll()
666 skb1 = skb_dequeue(&iavcc->txing_skb); in ia_tx_poll()
669 printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci); in ia_tx_poll()
672 if ((vcc->pop) && (skb1->len != 0)) in ia_tx_poll()
674 vcc->pop(vcc, skb1); in ia_tx_poll()
675 IF_EVENT(printk("Transmit Done - skb 0x%lx return\n", in ia_tx_poll()
680 skb1 = skb_dequeue(&iavcc->txing_skb); in ia_tx_poll()
683 IF_EVENT(printk("IA: Vci %d - skb not found requeued\n",vcc->vci);) in ia_tx_poll()
684 ia_enque_head_rtn_q (&iadev->tx_return_q, rtne); in ia_tx_poll()
687 if ((vcc->pop) && (skb->len != 0)) in ia_tx_poll()
689 vcc->pop(vcc, skb); in ia_tx_poll()
690 IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);) in ia_tx_poll()
717 for (i=15; i>=0; i--) {
723 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
725 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
752 for (i=15; i>=0; i--) { in ia_eeprom_get()
763 iadev->memType = memType;
765 iadev->num_tx_desc = IA_TX_BUF;
766 iadev->tx_buf_sz = IA_TX_BUF_SZ;
767 iadev->num_rx_desc = IA_RX_BUF;
768 iadev->rx_buf_sz = IA_RX_BUF_SZ;
771 iadev->num_tx_desc = IA_TX_BUF / 2;
773 iadev->num_tx_desc = IA_TX_BUF;
774 iadev->tx_buf_sz = IA_TX_BUF_SZ;
776 iadev->num_rx_desc = IA_RX_BUF / 2;
778 iadev->num_rx_desc = IA_RX_BUF;
779 iadev->rx_buf_sz = IA_RX_BUF_SZ;
783 iadev->num_tx_desc = IA_TX_BUF / 8;
785 iadev->num_tx_desc = IA_TX_BUF;
786 iadev->tx_buf_sz = IA_TX_BUF_SZ;
788 iadev->num_rx_desc = IA_RX_BUF / 8;
790 iadev->num_rx_desc = IA_RX_BUF;
791 iadev->rx_buf_sz = IA_RX_BUF_SZ;
793 iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz);
795 iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
796 iadev->rx_buf_sz, iadev->rx_pkt_ram);)
800 iadev->phy_type = PHY_OC3C_S;
802 iadev->phy_type = PHY_UTP155;
804 iadev->phy_type = PHY_OC3C_M;
807 iadev->phy_type = memType & FE_MASK;
808 IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n",
809 memType,iadev->phy_type);)
810 if (iadev->phy_type == FE_25MBIT_PHY)
811 iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
812 else if (iadev->phy_type == FE_DS3_PHY)
813 iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
814 else if (iadev->phy_type == FE_E3_PHY)
815 iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
817 iadev->LineRate = (u32)(ATM_OC3_PCR);
818 IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
822 static u32 ia_phy_read32(struct iadev_priv *ia, unsigned int reg) argument
824 return readl(ia->phy + (reg >> 2));
827 static void ia_phy_write32(struct iadev_priv *ia, unsigned int reg, u32 val) argument
829 writel(val, ia->phy + (reg >> 2));
836 if (iadev->phy_type & FE_25MBIT_PHY) {
838 iadev->carrier_detect = (status & MB25_IS_GSB) ? 1 : 0;
839 } else if (iadev->phy_type & FE_DS3_PHY) {
842 iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
843 } else if (iadev->phy_type & FE_E3_PHY) {
846 iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
849 iadev->carrier_detect = (status & SUNI_LOSV) ? 0 : 1;
853 iadev->carrier_detect ? "detected" : "lost signal");
859 mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
864 iadev->carrier_detect =
869 u16 reg; member
876 while (len--) {
877 ia_phy_write32(iadev, regs->reg, regs->val);
895 iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
916 iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
958 if (iadev->phy_type & FE_DS3_PHY)
969 suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
1018 RAM_BASE*((iadev->mem)/(128 * 1024))
1020 IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1022 IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1025 /*-- some utilities and memory allocation stuff will come here -------------*/
1032 // regval = readl((u32)ia_cmds->maddr);
1033 tcq_wr_ptr = readw(iadev->seg_reg+TCQ_WR_PTR);
1035 tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1036 readw(iadev->seg_ram+tcq_wr_ptr-2));
1037 printk(" host_tcq_wr = 0x%x host_tcq_rd = 0x%x \n", iadev->host_tcq_wr,
1038 iadev->ffL.tcq_rd);
1039 tcq_st_ptr = readw(iadev->seg_reg+TCQ_ST_ADR);
1040 tcq_ed_ptr = readw(iadev->seg_reg+TCQ_ED_ADR);
1044 tmp = iadev->seg_ram+tcq_st_ptr;
1048 for(i=0; i <iadev->num_tx_desc; i++)
1049 printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1053 /*----------------------------- Receiving side stuff --------------------------*/
1064 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1067 excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;
1069 if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1072 vci = readw(iadev->reass_ram+excpq_rd_ptr);
1073 error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;
1076 if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))
1077 excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1078 writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);
1079 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1088 writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr);
1089 iadev->rfL.fdq_wr +=2;
1090 if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1091 iadev->rfL.fdq_wr = iadev->rfL.fdq_st;
1092 writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);
1109 if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff))
1111 printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);
1112 return -EINVAL;
1115 desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;
1116 IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n",
1117 iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1119 readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1120 /* update the read pointer - maybe we shud do this in the end*/
1121 if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed)
1122 iadev->rfL.pcq_rd = iadev->rfL.pcq_st;
1124 iadev->rfL.pcq_rd += 2;
1125 writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);
1128 update stuff. - doesn't seem to be any update necessary
1130 buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1133 if (!desc || (desc > iadev->num_rx_desc) ||
1134 ((buf_desc_ptr->vc_index & 0xffff) >= iadev->num_vc)) {
1137 return -1;
1139 vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];
1144 return -1;
1149 status = (u_short) (buf_desc_ptr->desc_mode);
1152 atomic_inc(&vcc->stats->rx_err);
1170 buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;
1171 dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;
1172 len = dma_addr - buf_addr;
1173 if (len > iadev->rx_buf_sz) {
1174 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1175 atomic_inc(&vcc->stats->rx_err);
1180 if (vcc->vci < 32)
1186 ATM_SKB(skb)->vcc = vcc;
1188 skb_queue_tail(&iadev->rx_dma_q, skb);
1191 wr_ptr = iadev->rx_dle_q.write;
1192 wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
1194 wr_ptr->local_pkt_addr = buf_addr;
1195 wr_ptr->bytes = len; /* We don't know this do we ?? */
1196 wr_ptr->mode = DMA_INT_ENABLE;
1199 if(++wr_ptr == iadev->rx_dle_q.end)
1200 wr_ptr = iadev->rx_dle_q.start;
1201 iadev->rx_dle_q.write = wr_ptr;
1204 writel(1, iadev->dma+IPHASE5575_RX_COUNTER);
1218 status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;
1228 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1233 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1235 iadev->rxing = 1;
1239 if (iadev->rxing) {
1240 iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1241 iadev->rx_tmp_jif = jiffies;
1242 iadev->rxing = 0;
1244 else if ((time_after(jiffies, iadev->rx_tmp_jif + 50)) &&
1245 ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1246 for (i = 1; i <= iadev->num_rx_desc; i++)
1249 writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1250 iadev->rxing = 1;
1286 - do we really need to do this. Think not. */
1290 dle = iadev->rx_dle_q.read;
1291 dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);
1292 cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));
1296 skb = skb_dequeue(&iadev->rx_dma_q);
1302 if (!(len = skb->len))
1313 dma_unmap_single(&iadev->pci->dev, iadev->rx_dle_q.write->sys_pkt_addr,
1316 vcc = ATM_SKB(skb)->vcc;
1325 atomic_inc(&vcc->stats->rx_err);
1326 atm_return(vcc, skb->truesize);
1331 trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1332 skb->len - sizeof(*trailer));
1333 length = swap_byte_order(trailer->length);
1334 if ((length > iadev->rx_buf_sz) || (length >
1335 (skb->len - sizeof(struct cpcs_trailer))))
1337 atomic_inc(&vcc->stats->rx_err);
1339 length, skb->len);)
1340 atm_return(vcc, skb->truesize);
1347 IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);
1348 xdump(skb->data, skb->len, "RX: ");
1352 vcc->push(vcc,skb);
1353 atomic_inc(&vcc->stats->rx);
1354 iadev->rx_pkt_cnt++;
1357 if (++dle == iadev->rx_dle_q.end)
1358 dle = iadev->rx_dle_q.start;
1360 iadev->rx_dle_q.read = dle;
1364 if (!iadev->rxing) {
1365 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1367 state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1369 iadev->reass_reg+REASS_MASK_REG);
1370 iadev->rxing++;
1381 IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1383 if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;
1384 iadev = INPH_IA_DEV(vcc->dev);
1385 if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
1386 if (iadev->phy_type & FE_25MBIT_PHY) {
1388 return -EINVAL;
1393 vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
1394 vc_table += vcc->vci;
1397 *vc_table = vcc->vci << 6;
1400 if ((vcc->qos.rxtp.traffic_class == ATM_ABR) ||
1401 (vcc->qos.txtp.traffic_class == ATM_ABR))
1408 reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
1409 reass_ptr += vcc->vci;
1413 if (iadev->rx_open[vcc->vci])
1415 vcc->dev->number, vcc->vci);
1416 iadev->rx_open[vcc->vci] = vcc;
1434 // spin_lock_init(&iadev->rx_lock);
1436 /* Allocate 4k bytes - more aligned than needed (4k boundary) */
1437 dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
1438 &iadev->rx_dle_dma, GFP_KERNEL);
1443 iadev->rx_dle_q.start = (struct dle *)dle_addr;
1444 iadev->rx_dle_q.read = iadev->rx_dle_q.start;
1445 iadev->rx_dle_q.write = iadev->rx_dle_q.start;
1446 iadev->rx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1452 writel(iadev->rx_dle_dma & 0xfffff000,
1453 iadev->dma + IPHASE5575_RX_LIST_ADDR);
1455 iadev->dma+IPHASE5575_TX_LIST_ADDR,
1456 readl(iadev->dma + IPHASE5575_TX_LIST_ADDR));
1458 iadev->dma+IPHASE5575_RX_LIST_ADDR,
1459 readl(iadev->dma + IPHASE5575_RX_LIST_ADDR));)
1461 writew(0xffff, iadev->reass_reg+REASS_MASK_REG);
1462 writew(0, iadev->reass_reg+MODE_REG);
1463 writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);
1466 -------------------------------
1468 Buffer descr 0x0000 (736 - 23K)
1469 VP Table 0x5c00 (256 - 512)
1470 Except q 0x5e00 (128 - 512)
1471 Free buffer q 0x6000 (1K - 2K)
1472 Packet comp q 0x6800 (1K - 2K)
1473 Reass Table 0x7000 (1K - 2K)
1474 VC Table 0x7800 (1K - 2K)
1475 ABR VC Table 0x8000 (1K - 32K)
1479 writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);
1481 writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);
1484 iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1485 buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1488 rx_pkt_start = iadev->rx_pkt_ram;
1489 for(i=1; i<=iadev->num_rx_desc; i++)
1492 buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;
1493 buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;
1495 rx_pkt_start += iadev->rx_buf_sz;
1498 i = FREE_BUF_DESC_Q*iadev->memSize;
1499 writew(i >> 16, iadev->reass_reg+REASS_QUEUE_BASE);
1500 writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1501 writew(i+iadev->num_rx_desc*sizeof(u_short),
1502 iadev->reass_reg+FREEQ_ED_ADR);
1503 writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1504 writew(i+iadev->num_rx_desc*sizeof(u_short),
1505 iadev->reass_reg+FREEQ_WR_PTR);
1507 freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);
1508 freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);
1509 for(i=1; i<=iadev->num_rx_desc; i++)
1516 i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1517 writew(i, iadev->reass_reg+PCQ_ST_ADR);
1518 writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1519 writew(i, iadev->reass_reg+PCQ_RD_PTR);
1520 writew(i, iadev->reass_reg+PCQ_WR_PTR);
1523 i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1524 writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1526 iadev->reass_reg+EXCP_Q_ED_ADR);
1527 writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1528 writew(i, iadev->reass_reg+EXCP_Q_WR_PTR);
1531 iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1532 iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1533 iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1534 iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1535 iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1536 iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1537 iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1538 iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1541 iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd,
1542 iadev->rfL.pcq_wr);)
1543 /* just for check - no VP TBL */
1545 /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */
1547 - I guess we can write all 1s or 0x000f in the entire memory
1552 i = REASS_TABLE * iadev->memSize;
1553 writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);
1555 reass_table = (u16 *)(iadev->reass_ram+i);
1556 j = REASS_TABLE_SZ * iadev->memSize;
1561 while (i != iadev->num_vc) {
1565 i = RX_VC_TABLE * iadev->memSize;
1566 writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1567 vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
1568 j = RX_VC_TABLE_SZ * iadev->memSize;
1580 i = ABR_VC_TABLE * iadev->memSize;
1581 writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1583 i = ABR_VC_TABLE * iadev->memSize;
1584 abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);
1585 j = REASS_TABLE_SZ * iadev->memSize;
1588 abr_vc_table->rdf = 0x0003;
1589 abr_vc_table->air = 0x5eb1;
1596 writew(0xff00, iadev->reass_reg+VP_FILTER);
1597 writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1598 writew(0x1, iadev->reass_reg+PROTOCOL_ID);
1604 writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1607 j += 2 * (j - 1);
1609 writew(i, iadev->reass_reg+TMOUT_RANGE);
1612 for(i=0; i<iadev->num_tx_desc;i++)
1613 iadev->desc_tbl[i].timestamp = 0;
1615 /* to clear the interrupt status register - read it */
1616 readw(iadev->reass_reg+REASS_INTR_STATUS_REG);
1618 /* Mask Register - clear it */
1619 writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);
1621 skb_queue_head_init(&iadev->rx_dma_q);
1622 iadev->rx_free_desc_qhead = NULL;
1624 iadev->rx_open = kcalloc(iadev->num_vc, sizeof(void *), GFP_KERNEL);
1625 if (!iadev->rx_open) {
1627 dev->number);
1631 iadev->rxing = 1;
1632 iadev->rx_pkt_cnt = 0;
1634 writew(R_ONLINE, iadev->reass_reg+MODE_REG);
1638 dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1639 iadev->rx_dle_dma);
1641 return -ENOMEM;
1649 Buffer descr 0x0000 (128 - 4K)
1650 UBR sched 0x1000 (1K - 4K)
1651 UBR Wait q 0x2000 (1K - 4K)
1653 (128 - 256) each
1654 extended VC 0x4000 (1K - 8K)
1655 ABR sched 0x6000 and ABR wait queue (1K - 2K) each
1657 VC table 0x8000 (1K - 32K)
1668 status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);
1672 spin_lock_irqsave(&iadev->tx_lock, flags);
1674 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1675 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1676 if (iadev->close_pending)
1677 wake_up(&iadev->close_wait);
1696 spin_lock_irqsave(&iadev->tx_lock, flags);
1697 dle = iadev->tx_dle_q.read;
1698 dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) &
1699 (sizeof(struct dle)*DLE_ENTRIES - 1);
1700 cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1704 skb = skb_dequeue(&iadev->tx_dma_q);
1708 if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1709 dma_unmap_single(&iadev->pci->dev, dle->sys_pkt_addr, skb->len,
1712 vcc = ATM_SKB(skb)->vcc;
1715 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1723 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1727 if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1728 if ((vcc->pop) && (skb->len != 0))
1730 vcc->pop(vcc, skb);
1736 else { /* Hold the rate-limited skb for flow control */
1738 skb_queue_tail(&iavcc->txing_skb, skb);
1741 if (++dle == iadev->tx_dle_q.end)
1742 dle = iadev->tx_dle_q.start;
1744 iadev->tx_dle_q.read = dle;
1745 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1755 IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)
1756 if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
1757 iadev = INPH_IA_DEV(vcc->dev);
1759 if (iadev->phy_type & FE_25MBIT_PHY) {
1760 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1762 return -EINVAL;
1764 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1766 return -EINVAL;
1771 if (vcc->qos.txtp.max_sdu >
1772 (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1774 vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1775 vcc->dev_data = NULL;
1777 return -EINVAL;
1779 ia_vcc->vc_desc_cnt = 0;
1780 ia_vcc->txing = 1;
1783 if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR)
1784 vcc->qos.txtp.pcr = iadev->LineRate;
1785 else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1786 vcc->qos.txtp.pcr = iadev->LineRate;
1787 else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0))
1788 vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1789 if (vcc->qos.txtp.pcr > iadev->LineRate)
1790 vcc->qos.txtp.pcr = iadev->LineRate;
1791 ia_vcc->pcr = vcc->qos.txtp.pcr;
1793 if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1794 else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1795 else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1796 else ia_vcc->ltimeout = 2700 * HZ / ia_vcc->pcr;
1797 if (ia_vcc->pcr < iadev->rate_limit)
1798 skb_queue_head_init (&ia_vcc->txing_skb);
1799 if (ia_vcc->pcr < iadev->rate_limit) {
1802 if (vcc->qos.txtp.max_sdu != 0) {
1803 if (ia_vcc->pcr > 60000)
1804 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1805 else if (ia_vcc->pcr > 2000)
1806 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1808 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1811 sk->sk_sndbuf = 24576;
1814 vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
1815 evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
1816 vc += vcc->vci;
1817 evc += vcc->vci;
1826 evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;
1827 evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;
1830 if (vcc->qos.txtp.traffic_class == ATM_UBR)
1832 vc->type = UBR;
1833 vc->status = CRC_APPEND;
1834 vc->acr = cellrate_to_float(iadev->LineRate);
1835 if (vcc->qos.txtp.pcr > 0)
1836 vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);
1838 vcc->qos.txtp.max_pcr,vc->acr);)
1840 else if (vcc->qos.txtp.traffic_class == ATM_ABR)
1844 if (vcc->qos.txtp.pcr > 0)
1845 srv_p.pcr = vcc->qos.txtp.pcr;
1846 if (vcc->qos.txtp.min_pcr > 0) {
1847 int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1848 if (tmpsum > iadev->LineRate)
1849 return -EBUSY;
1850 srv_p.mcr = vcc->qos.txtp.min_pcr;
1851 iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1854 if (vcc->qos.txtp.icr)
1855 srv_p.icr = vcc->qos.txtp.icr;
1856 if (vcc->qos.txtp.tbe)
1857 srv_p.tbe = vcc->qos.txtp.tbe;
1858 if (vcc->qos.txtp.frtt)
1859 srv_p.frtt = vcc->qos.txtp.frtt;
1860 if (vcc->qos.txtp.rif)
1861 srv_p.rif = vcc->qos.txtp.rif;
1862 if (vcc->qos.txtp.rdf)
1863 srv_p.rdf = vcc->qos.txtp.rdf;
1864 if (vcc->qos.txtp.nrm_pres)
1865 srv_p.nrm = vcc->qos.txtp.nrm;
1866 if (vcc->qos.txtp.trm_pres)
1867 srv_p.trm = vcc->qos.txtp.trm;
1868 if (vcc->qos.txtp.adtf_pres)
1869 srv_p.adtf = vcc->qos.txtp.adtf;
1870 if (vcc->qos.txtp.cdf_pres)
1871 srv_p.cdf = vcc->qos.txtp.cdf;
1874 IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d mcr = %d\n",
1877 } else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1878 if (iadev->phy_type & FE_25MBIT_PHY) {
1880 return -EINVAL;
1882 if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1884 return -1;
1886 vc->type = CBR;
1887 vc->status = CRC_APPEND;
1895 iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1918 spin_lock_init(&iadev->tx_lock);
1920 IF_INIT(printk("Tx MASK REG: 0x%0x\n",
1921 readw(iadev->seg_reg+SEG_MASK_REG));)
1924 dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
1925 &iadev->tx_dle_dma, GFP_KERNEL);
1930 iadev->tx_dle_q.start = (struct dle*)dle_addr;
1931 iadev->tx_dle_q.read = iadev->tx_dle_q.start;
1932 iadev->tx_dle_q.write = iadev->tx_dle_q.start;
1933 iadev->tx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1936 writel(iadev->tx_dle_dma & 0xfffff000,
1937 iadev->dma + IPHASE5575_TX_LIST_ADDR);
1938 writew(0xffff, iadev->seg_reg+SEG_MASK_REG);
1939 writew(0, iadev->seg_reg+MODE_REG_0);
1940 writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);
1941 iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1942 iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1943 iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1947 --------------------------------
1948 Buffer descr 0x0000 (128 - 4K)
1950 (512 - 1K) each
1951 TCQ - 4K, PRQ - 5K
1952 CBR Table 0x1800 (as needed) - 6K
1953 UBR Table 0x3000 (1K - 4K) - 12K
1954 UBR Wait queue 0x4000 (1K - 4K) - 16K
1955 ABR sched 0x5000 and ABR wait queue (1K - 2K) each
1956 ABR Tbl - 20K, ABR Wq - 22K
1957 extended VC 0x6000 (1K - 8K) - 24K
1958 VC Table 0x8000 (1K - 32K) - 32K
1965 writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);
1968 buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);
1972 for(i=1; i<=iadev->num_tx_desc; i++)
1975 buf_desc_ptr->desc_mode = AAL5;
1976 buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;
1977 buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;
1979 tx_pkt_start += iadev->tx_buf_sz;
1981 iadev->tx_buf = kmalloc_array(iadev->num_tx_desc,
1982 sizeof(*iadev->tx_buf),
1984 if (!iadev->tx_buf) {
1988 for (i= 0; i< iadev->num_tx_desc; i++)
1997 iadev->tx_buf[i].cpcs = cpcs;
1998 iadev->tx_buf[i].dma_addr = dma_map_single(&iadev->pci->dev,
2003 iadev->desc_tbl = kmalloc_array(iadev->num_tx_desc,
2004 sizeof(*iadev->desc_tbl),
2006 if (!iadev->desc_tbl) {
2012 i = TX_COMP_Q * iadev->memSize;
2013 writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);
2016 writew(i, iadev->seg_reg+TCQ_ST_ADR);
2017 writew(i, iadev->seg_reg+TCQ_RD_PTR);
2018 writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR);
2019 iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
2020 writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2021 iadev->seg_reg+TCQ_ED_ADR);
2023 tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);
2024 tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);
2025 for(i=1; i<=iadev->num_tx_desc; i++)
2032 i = PKT_RDY_Q * iadev->memSize;
2033 writew(i, iadev->seg_reg+PRQ_ST_ADR);
2034 writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2035 iadev->seg_reg+PRQ_ED_ADR);
2036 writew(i, iadev->seg_reg+PRQ_RD_PTR);
2037 writew(i, iadev->seg_reg+PRQ_WR_PTR);
2040 iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2041 iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2042 iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2044 iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2045 iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2046 iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2050 prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);
2051 prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);
2052 for(i=1; i<=iadev->num_tx_desc; i++)
2060 writew(0,iadev->seg_reg+CBR_PTR_BASE);
2062 tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2064 writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2068 readw(iadev->seg_reg+CBR_PTR_BASE));)
2069 tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2070 writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2071 IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2072 readw(iadev->seg_reg+CBR_TAB_BEG));)
2073 writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2074 tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2075 writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2076 IF_INIT(printk("iadev->seg_reg = 0x%p CBR_PTR_BASE = 0x%x\n",
2077 iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2079 readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2080 readw(iadev->seg_reg+CBR_TAB_END+1));)
2083 memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize,
2084 0, iadev->num_vc*6);
2085 iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2086 iadev->CbrEntryPt = 0;
2087 iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2088 iadev->NumEnabledCBR = 0;
2092 - SCHEDSZ is 1K (# of entries).
2093 - UBR Table size is 4K
2094 - UBR wait queue is 4K
2101 while (i != iadev->num_vc) {
2106 i = MAIN_VC_TABLE * iadev->memSize;
2107 writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2108 i = EXT_VC_TABLE * iadev->memSize;
2109 writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2110 i = UBR_SCHED_TABLE * iadev->memSize;
2111 writew((i & 0xffff) >> 11, iadev->seg_reg+UBR_SBPTR_BASE);
2112 i = UBR_WAIT_Q * iadev->memSize;
2113 writew((i >> 7) & 0xffff, iadev->seg_reg+UBRWQ_BASE);
2114 memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2115 0, iadev->num_vc*8);
2116 /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/
2118 - SCHEDSZ is 1K (# of entries).
2119 - ABR Table size is 2K
2120 - ABR wait queue is 2K
2124 i = ABR_SCHED_TABLE * iadev->memSize;
2125 writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2126 i = ABR_WAIT_Q * iadev->memSize;
2127 writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2129 i = ABR_SCHED_TABLE*iadev->memSize;
2130 memset((caddr_t)(iadev->seg_ram+i), 0, iadev->num_vc*4);
2131 vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
2132 evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
2133 iadev->testTable = kmalloc_array(iadev->num_vc,
2134 sizeof(*iadev->testTable),
2136 if (!iadev->testTable) {
2140 for(i=0; i<iadev->num_vc; i++)
2144 iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2146 if (!iadev->testTable[i])
2148 iadev->testTable[i]->lastTime = 0;
2149 iadev->testTable[i]->fract = 0;
2150 iadev->testTable[i]->vc_status = VC_UBR;
2158 if (iadev->phy_type & FE_25MBIT_PHY) {
2159 writew(RATE25, iadev->seg_reg+MAXRATE);
2160 writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2163 writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2164 writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2167 writew(0, iadev->seg_reg+IDLEHEADHI);
2168 writew(0, iadev->seg_reg+IDLEHEADLO);
2171 writew(0xaa00, iadev->seg_reg+ABRUBR_ARB);
2173 iadev->close_pending = 0;
2174 init_waitqueue_head(&iadev->close_wait);
2175 init_waitqueue_head(&iadev->timeout_wait);
2176 skb_queue_head_init(&iadev->tx_dma_q);
2177 ia_init_rtn_q(&iadev->tx_return_q);
2180 writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);
2181 skb_queue_head_init (&iadev->tx_backlog);
2184 writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);
2187 writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);
2189 /* Interrupt Status Register - read to clear */
2190 readw(iadev->seg_reg+SEG_INTR_STATUS_REG);
2192 /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */
2193 writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2194 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2195 iadev->tx_pkt_cnt = 0;
2196 iadev->rate_limit = iadev->LineRate / 3;
2201 while (--i >= 0)
2202 kfree(iadev->testTable[i]);
2203 kfree(iadev->testTable);
2205 kfree(iadev->desc_tbl);
2207 i = iadev->num_tx_desc;
2209 while (--i >= 0) {
2210 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2212 dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
2213 sizeof(*desc->cpcs), DMA_TO_DEVICE);
2214 kfree(desc->cpcs);
2216 kfree(iadev->tx_buf);
2218 dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2219 iadev->tx_dle_dma);
2221 return -ENOMEM;
2233 while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))
2240 IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);)
2246 writel(STAT_DLERINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2257 writel(STAT_DLETINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2271 /*----------------------------- entries --------------------------------*/
2281 iadev->reg+IPHASE5575_MAC1)));
2282 mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));
2285 dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));
2288 dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));
2300 error = pci_read_config_dword(iadev->pci, i * 4, &pci[i]);
2304 writel(0, iadev->reg+IPHASE5575_EXT_RESET);
2306 error = pci_write_config_dword(iadev->pci, i * 4, pci[i]);
2328 dev->ci_range.vpi_bits = 0;
2329 dev->ci_range.vci_bits = NR_VCI_LD;
2332 real_base = pci_resource_start (iadev->pci, 0);
2333 iadev->irq = iadev->pci->irq;
2335 error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
2338 dev->number,error);
2339 return -EINVAL;
2342 dev->number, iadev->pci->revision, real_base, iadev->irq);)
2346 iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2348 if (iadev->pci_map_size == 0x100000){
2349 iadev->num_vc = 4096;
2350 dev->ci_range.vci_bits = NR_VCI_4K_LD;
2351 iadev->memSize = 4;
2353 else if (iadev->pci_map_size == 0x40000) {
2354 iadev->num_vc = 1024;
2355 iadev->memSize = 1;
2358 printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2359 return -EINVAL;
2361 IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)
2364 pci_set_master(iadev->pci);
2372 base = ioremap(real_base,iadev->pci_map_size); /* ioremap is not resolved ??? */
2377 dev->number);
2378 return -ENOMEM;
2381 dev->number, iadev->pci->revision, base, iadev->irq);)
2384 iadev->mem = iadev->pci_map_size /2;
2385 iadev->real_base = real_base;
2386 iadev->base = base;
2389 iadev->reg = base + REG_BASE;
2391 iadev->seg_reg = base + SEG_BASE;
2393 iadev->reass_reg = base + REASS_BASE;
2395 iadev->phy = base + PHY_BASE;
2396 iadev->dma = base + PHY_BASE;
2397 /* RAM - Segmentation RAm and Reassembly RAM */
2398 iadev->ram = base + ACTUAL_RAM_BASE;
2399 iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;
2400 iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;
2404 iadev->reg,iadev->seg_reg,iadev->reass_reg,
2405 iadev->phy, iadev->ram, iadev->seg_ram,
2406 iadev->reass_ram);)
2411 iounmap(iadev->base);
2416 printk("%s%02X",i ? "-" : "",dev->esi[i]);
2421 iounmap(iadev->base);
2429 if (!iadev->carrier_detect)
2431 iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2432 iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2433 iadev->drop_rxpkt += readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2434 iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2435 iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2436 iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2447 ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2451 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2457 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2458 spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2459 if (ia_dev[i]->close_pending)
2460 wake_up(&ia_dev[i]->close_wait);
2462 spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2473 writel(value, INPH_IA_DEV(dev)->phy+addr);
2478 return readl(INPH_IA_DEV(dev)->phy+addr);
2485 kfree(iadev->desc_tbl);
2486 for (i = 0; i < iadev->num_vc; i++)
2487 kfree(iadev->testTable[i]);
2488 kfree(iadev->testTable);
2489 for (i = 0; i < iadev->num_tx_desc; i++) {
2490 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2492 dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
2493 sizeof(*desc->cpcs), DMA_TO_DEVICE);
2494 kfree(desc->cpcs);
2496 kfree(iadev->tx_buf);
2497 dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2498 iadev->tx_dle_dma);
2503 kfree(iadev->rx_open);
2504 dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2505 iadev->rx_dle_dma);
2516 if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) {
2518 dev->number, iadev->irq);
2519 error = -EAGAIN;
2524 if ((error = pci_write_config_word(iadev->pci,
2529 "master (0x%x)\n",dev->number, error);
2530 error = -EIO;
2538 IF_INIT(printk("Bus ctrl reg: %08x\n",
2539 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2540 ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2556 writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2558 IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2559 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));
2560 printk("Bus status reg after init: %08x\n",
2561 readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)
2571 ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2572 writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2573 IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2574 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2582 if (iadev->phy_type & FE_25MBIT_PHY)
2584 else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2590 if (dev->phy->start) {
2591 error = dev->phy->start(dev);
2595 /* Get iadev->carrier_detect status */
2605 free_irq(iadev->irq, dev);
2620 iadev = INPH_IA_DEV(vcc->dev);
2624 IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d vci = %d\n",
2625 ia_vcc->vc_desc_cnt,vcc->vci);)
2626 clear_bit(ATM_VF_READY,&vcc->flags);
2629 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2630 iadev->close_pending++;
2631 prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
2633 finish_wait(&iadev->timeout_wait, &wait);
2634 spin_lock_irqsave(&iadev->tx_lock, flags);
2635 while((skb = skb_dequeue(&iadev->tx_backlog))) {
2636 if (ATM_SKB(skb)->vcc == vcc){
2637 if (vcc->pop) vcc->pop(vcc, skb);
2644 skb_queue_tail(&iadev->tx_backlog, skb);
2645 IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);)
2646 closetime = 300000 / ia_vcc->pcr;
2649 spin_unlock_irqrestore(&iadev->tx_lock, flags);
2650 wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
2651 spin_lock_irqsave(&iadev->tx_lock, flags);
2652 iadev->close_pending--;
2653 iadev->testTable[vcc->vci]->lastTime = 0;
2654 iadev->testTable[vcc->vci]->fract = 0;
2655 iadev->testTable[vcc->vci]->vc_status = VC_UBR;
2656 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2657 if (vcc->qos.txtp.min_pcr > 0)
2658 iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2660 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2662 iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2665 spin_unlock_irqrestore(&iadev->tx_lock, flags);
2668 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2670 vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2671 vc_table += vcc->vci;
2674 vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2675 vc_table += vcc->vci;
2676 *vc_table = (vcc->vci << 6) | 15;
2677 if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2679 (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2680 abr_vc_table += vcc->vci;
2681 abr_vc_table->rdf = 0x0003;
2682 abr_vc_table->air = 0x5eb1;
2685 rx_dle_intr(vcc->dev);
2686 iadev->rx_open[vcc->vci] = NULL;
2690 vcc->dev_data = NULL;
2691 clear_bit(ATM_VF_ADDR,&vcc->flags);
2699 if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
2702 vcc->dev_data = NULL;
2704 if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)
2707 set_bit(ATM_VF_ADDR,&vcc->flags);
2709 if (vcc->qos.aal != ATM_AAL5)
2710 return -EINVAL;
2712 vcc->dev->number, vcc->vpi, vcc->vci);)
2716 if (!ia_vcc) return -ENOMEM;
2717 vcc->dev_data = ia_vcc;
2733 set_bit(ATM_VF_READY,&vcc->flags);
2763 if (!dev->phy->ioctl) return -EINVAL;
2764 return dev->phy->ioctl(dev,cmd,arg);
2766 if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
2779 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2782 if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2787 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2790 if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2800 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2802 if (!regs_local) return -ENOMEM;
2803 ffL = &regs_local->ffredn;
2804 rfL = &regs_local->rfredn;
2807 ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2810 ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2814 return -EFAULT;
2823 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2831 printk("skb = 0x%p\n", skb_peek(&iadev->tx_backlog));
2832 printk("rtn_q: 0x%p\n",ia_deque_rtn_q(&iadev->tx_return_q));
2838 stats = &PRIV(_ia_dev[board])->sonet_stats;
2839 printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2840 printk("line_bip : %d\n", atomic_read(&stats->line_bip));
2841 printk("path_bip : %d\n", atomic_read(&stats->path_bip));
2842 printk("line_febe : %d\n", atomic_read(&stats->line_febe));
2843 printk("path_febe : %d\n", atomic_read(&stats->path_febe));
2844 printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
2845 printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2846 printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
2847 printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
2852 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2853 for (i = 1; i <= iadev->num_rx_desc; i++)
2856 iadev->reass_reg+REASS_MASK_REG);
2857 iadev->rxing = 1;
2863 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2867 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2897 iadev = INPH_IA_DEV(vcc->dev);
2899 if (!iavcc->txing) {
2901 if (vcc->pop)
2902 vcc->pop(vcc, skb);
2908 if (skb->len > iadev->tx_buf_sz - 8) {
2910 if (vcc->pop)
2911 vcc->pop(vcc, skb);
2916 if ((unsigned long)skb->data & 3) {
2918 if (vcc->pop)
2919 vcc->pop(vcc, skb);
2935 if ((desc == 0) || (desc > iadev->num_tx_desc))
2938 atomic_inc(&vcc->stats->tx);
2939 if (vcc->pop)
2940 vcc->pop(vcc, skb);
2953 iavcc->vc_desc_cnt++;
2954 iadev->desc_tbl[desc-1].iavcc = iavcc;
2955 iadev->desc_tbl[desc-1].txskb = skb;
2958 iadev->ffL.tcq_rd += 2;
2959 if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2960 iadev->ffL.tcq_rd = iadev->ffL.tcq_st;
2961 writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2966 *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc;
2968 iadev->ffL.prq_wr += 2;
2969 if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2970 iadev->ffL.prq_wr = iadev->ffL.prq_st;
2974 total_len = skb->len + sizeof(struct cpcs_trailer);
2976 IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)
2979 trailer = iadev->tx_buf[desc-1].cpcs;
2980 IF_TX(printk("Sent: skb = 0x%p skb->data: 0x%p len: %d, desc: %d\n",
2981 skb, skb->data, skb->len, desc);)
2982 trailer->control = 0;
2984 trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2985 trailer->crc32 = 0; /* not needed - dummy bytes */
2989 skb->len, tcnter++);
2990 xdump(skb->data, skb->len, "TX: ");
2994 buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
2996 buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;
2997 /* Huh ? p.115 of users guide describes this as a read-only register */
2998 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2999 buf_desc_ptr->vc_index = vcc->vci;
3000 buf_desc_ptr->bytes = total_len;
3002 if (vcc->qos.txtp.traffic_class == ATM_ABR)
3006 wr_ptr = iadev->tx_dle_q.write;
3008 wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
3009 skb->len, DMA_TO_DEVICE);
3010 wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) |
3011 buf_desc_ptr->buf_start_lo;
3012 /* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */
3013 wr_ptr->bytes = skb->len;
3015 /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3016 if ((wr_ptr->bytes >> 2) == 0xb)
3017 wr_ptr->bytes = 0x30;
3019 wr_ptr->mode = TX_DLE_PSI;
3020 wr_ptr->prq_wr_ptr_data = 0;
3023 if (++wr_ptr == iadev->tx_dle_q.end)
3024 wr_ptr = iadev->tx_dle_q.start;
3027 wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3028 wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) |
3029 buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3031 wr_ptr->bytes = sizeof(struct cpcs_trailer);
3032 wr_ptr->mode = DMA_INT_ENABLE;
3033 wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3036 if (++wr_ptr == iadev->tx_dle_q.end)
3037 wr_ptr = iadev->tx_dle_q.start;
3039 iadev->tx_dle_q.write = wr_ptr;
3040 ATM_DESC(skb) = vcc->vci;
3041 skb_queue_tail(&iadev->tx_dma_q, skb);
3043 atomic_inc(&vcc->stats->tx);
3044 iadev->tx_pkt_cnt++;
3046 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
3050 if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3051 if (iavcc->vc_desc_cnt > 10) {
3052 vcc->tx_quota = vcc->tx_quota * 3 / 4;
3053 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3054 iavcc->flow_inc = -1;
3055 iavcc->saved_tx_quota = vcc->tx_quota;
3056 } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3057 // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3058 printk("Tx2: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3059 iavcc->flow_inc = 0;
3072 iadev = INPH_IA_DEV(vcc->dev);
3073 if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3078 return -EINVAL;
3080 spin_lock_irqsave(&iadev->tx_lock, flags);
3081 if (!test_bit(ATM_VF_READY,&vcc->flags)){
3083 spin_unlock_irqrestore(&iadev->tx_lock, flags);
3084 return -EINVAL;
3086 ATM_SKB(skb)->vcc = vcc;
3088 if (skb_peek(&iadev->tx_backlog)) {
3089 skb_queue_tail(&iadev->tx_backlog, skb);
3093 skb_queue_tail(&iadev->tx_backlog, skb);
3096 spin_unlock_irqrestore(&iadev->tx_lock, flags);
3106 if(!left--) {
3107 if (iadev->phy_type == FE_25MBIT_PHY) {
3108 n = sprintf(page, " Board Type : Iphase5525-1KVC-128K\n");
3111 if (iadev->phy_type == FE_DS3_PHY)
3112 n = sprintf(page, " Board Type : Iphase-ATM-DS3");
3113 else if (iadev->phy_type == FE_E3_PHY)
3114 n = sprintf(page, " Board Type : Iphase-ATM-E3");
3115 else if (iadev->phy_type == FE_UTP_OPTION)
3116 n = sprintf(page, " Board Type : Iphase-ATM-UTP155");
3118 n = sprintf(page, " Board Type : Iphase-ATM-OC3");
3120 if (iadev->pci_map_size == 0x40000)
3121 n += sprintf(tmpPtr, "-1KVC-");
3123 n += sprintf(tmpPtr, "-4KVC-");
3125 if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3127 else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3144 iadev->num_tx_desc, iadev->tx_buf_sz,
3145 iadev->num_rx_desc, iadev->rx_buf_sz,
3146 iadev->rx_pkt_cnt, iadev->tx_pkt_cnt,
3147 iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3148 iadev->drop_rxcell, iadev->drop_rxpkt);
3173 ret = -ENOMEM;
3177 iadev->pci = pdev;
3180 pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3182 ret = -ENODEV;
3185 dev = atm_dev_register(DEV_LABEL, &pdev->dev, &ops, -1, NULL);
3187 ret = -ENOMEM;
3190 dev->dev_data = iadev;
3191 IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3192 IF_INIT(printk("dev_id = 0x%p iadev->LineRate = %d \n", dev,
3193 iadev->LineRate);)
3202 iadev_count--;
3205 ret = -EINVAL;
3210 iadev->next_board = ia_boards;
3235 if (dev->phy && dev->phy->stop)
3236 dev->phy->stop(dev);
3238 /* De-register device */
3239 free_irq(iadev->irq, dev);
3240 iadev_count--;
3243 IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);)
3246 iounmap(iadev->base);