Lines Matching +full:6 +full:bd
80 [UCC_GETH_NUM_OF_THREADS_6] = 6, in ucc_geth_thread_count()
226 u8 __iomem *bd) in get_new_skb() argument
244 out_be32(&((struct qe_bd __iomem *)bd)->buf, in get_new_skb()
251 out_be32((u32 __iomem *)bd, in get_new_skb()
252 (R_E | R_I | (in_be32((u32 __iomem*)bd) & R_W))); in get_new_skb()
259 u8 __iomem *bd; in rx_bd_buffer_set() local
264 bd = ugeth->p_rx_bd_ring[rxQ]; in rx_bd_buffer_set()
268 bd_status = in_be32((u32 __iomem *)bd); in rx_bd_buffer_set()
269 skb = get_new_skb(ugeth, bd); in rx_bd_buffer_set()
277 /* advance the BD pointer */ in rx_bd_buffer_set()
278 bd += sizeof(struct qe_bd); in rx_bd_buffer_set()
764 pr_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x\n", in dump_regs()
765 (u32)&ugeth->p_tx_glbl_pram->iphoffset[6], in dump_regs()
766 ugeth->p_tx_glbl_pram->iphoffset[6]); in dump_regs()
788 pr_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x\n", in dump_regs()
789 (u32)&ugeth->p_tx_glbl_pram->vtagtable[6], in dump_regs()
790 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6])); in dump_regs()
864 pr_info("l3qt[6] : addr - 0x%08x, val - 0x%08x\n", in dump_regs()
865 (u32)&ugeth->p_rx_glbl_pram->l3qt[6], in dump_regs()
866 in_be32(&ugeth->p_rx_glbl_pram->l3qt[6])); in dump_regs()
940 pr_info("RX BD QS tables:\n"); in dump_regs()
943 pr_info("RX BD QS table[%d]:\n", i); in dump_regs()
1812 u8 __iomem *bd; in ucc_geth_free_rx() local
1821 bd = ugeth->p_rx_bd_ring[i]; in ucc_geth_free_rx()
1825 in_be32(&((struct qe_bd __iomem *)bd)->buf), in ucc_geth_free_rx()
1834 bd += sizeof(struct qe_bd); in ucc_geth_free_rx()
1851 u8 __iomem *bd; in ucc_geth_free_tx() local
1859 bd = ugeth->p_tx_bd_ring[i]; in ucc_geth_free_tx()
1860 if (!bd) in ucc_geth_free_tx()
1865 in_be32(&((struct qe_bd __iomem *)bd)->buf), in ucc_geth_free_tx()
1866 (in_be32((u32 __iomem *)bd) & in ucc_geth_free_tx()
2033 /* Rx BD lengths */ in ucc_struct_init()
2039 pr_err("Rx BD ring length must be multiple of 4, no smaller than 8\n"); in ucc_struct_init()
2044 /* Tx BD lengths */ in ucc_struct_init()
2048 pr_err("Tx BD ring length must be no smaller than 2\n"); in ucc_struct_init()
2145 u8 __iomem *bd; in ucc_geth_alloc_tx() local
2164 pr_err("Can not allocate memory for Tx bd rings\n"); in ucc_geth_alloc_tx()
2167 /* Zero unused end of bd ring, according to spec */ in ucc_geth_alloc_tx()
2185 bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j]; in ucc_geth_alloc_tx()
2187 /* clear bd buffer */ in ucc_geth_alloc_tx()
2188 out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); in ucc_geth_alloc_tx()
2189 /* set bd status and length */ in ucc_geth_alloc_tx()
2190 out_be32((u32 __iomem *)bd, 0); in ucc_geth_alloc_tx()
2191 bd += sizeof(struct qe_bd); in ucc_geth_alloc_tx()
2193 bd -= sizeof(struct qe_bd); in ucc_geth_alloc_tx()
2194 /* set bd status and length */ in ucc_geth_alloc_tx()
2195 out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */ in ucc_geth_alloc_tx()
2207 u8 __iomem *bd; in ucc_geth_alloc_rx() local
2224 pr_err("Can not allocate memory for Rx bd rings\n"); in ucc_geth_alloc_rx()
2243 bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j]; in ucc_geth_alloc_rx()
2245 /* set bd status and length */ in ucc_geth_alloc_rx()
2246 out_be32((u32 __iomem *)bd, R_I); in ucc_geth_alloc_rx()
2247 /* clear bd buffer */ in ucc_geth_alloc_rx()
2248 out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); in ucc_geth_alloc_rx()
2249 bd += sizeof(struct qe_bd); in ucc_geth_alloc_rx()
2251 bd -= sizeof(struct qe_bd); in ucc_geth_alloc_rx()
2252 /* set bd status and length */ in ucc_geth_alloc_rx()
2253 out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */ in ucc_geth_alloc_rx()
2448 /* Assume BD rings are already established */ in ucc_geth_startup()
2500 ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6); in ucc_geth_startup()
2674 /* Assume BD rings are already established */ in ucc_geth_startup()
2920 u8 __iomem *bd; /* BD pointer */ in ucc_geth_start_xmit() local
2932 /* Start from the next BD that should be filled */ in ucc_geth_start_xmit()
2933 bd = ugeth->txBd[txQ]; in ucc_geth_start_xmit()
2934 bd_status = in_be32((u32 __iomem *)bd); in ucc_geth_start_xmit()
2944 out_be32(&((struct qe_bd __iomem *)bd)->buf, in ucc_geth_start_xmit()
2952 /* set bd status and length */ in ucc_geth_start_xmit()
2953 out_be32((u32 __iomem *)bd, bd_status); in ucc_geth_start_xmit()
2955 /* Move to next BD in the ring */ in ucc_geth_start_xmit()
2957 bd += sizeof(struct qe_bd); in ucc_geth_start_xmit()
2959 bd = ugeth->p_tx_bd_ring[txQ]; in ucc_geth_start_xmit()
2961 /* If the next BD still needs to be cleaned up, then the bds in ucc_geth_start_xmit()
2963 if (bd == ugeth->confBd[txQ]) { in ucc_geth_start_xmit()
2968 ugeth->txBd[txQ] = bd; in ucc_geth_start_xmit()
2976 /* This is done by writing a running counter of the bd in ucc_geth_start_xmit()
2993 u8 __iomem *bd; in ucc_geth_rx() local
3004 bd = ugeth->rxBd[rxQ]; in ucc_geth_rx()
3006 bd_status = in_be32((u32 __iomem *)bd); in ucc_geth_rx()
3008 /* while there are received buffers and BD is full (~R_E) */ in ucc_geth_rx()
3010 bdBuffer = (u8 *) in_be32(&((struct qe_bd __iomem *)bd)->buf); in ucc_geth_rx()
3041 skb = get_new_skb(ugeth, bd); in ucc_geth_rx()
3057 bd = ugeth->p_rx_bd_ring[rxQ]; in ucc_geth_rx()
3059 bd += sizeof(struct qe_bd); in ucc_geth_rx()
3061 bd_status = in_be32((u32 __iomem *)bd); in ucc_geth_rx()
3064 ugeth->rxBd[rxQ] = bd; in ucc_geth_rx()
3070 /* Start from the next BD that should be filled */ in ucc_geth_tx()
3074 u8 __iomem *bd; /* BD pointer */ in ucc_geth_tx() local
3077 bd = ugeth->confBd[txQ]; in ucc_geth_tx()
3078 bd_status = in_be32((u32 __iomem *)bd); in ucc_geth_tx()
3084 /* BD contains already transmitted buffer. */ in ucc_geth_tx()
3086 /* the BD to be used with the current frame */ in ucc_geth_tx()
3106 /* Advance the confirmation BD pointer */ in ucc_geth_tx()
3108 bd += sizeof(struct qe_bd); in ucc_geth_tx()
3110 bd = ugeth->p_tx_bd_ring[txQ]; in ucc_geth_tx()
3111 bd_status = in_be32((u32 __iomem *)bd); in ucc_geth_tx()
3113 ugeth->confBd[txQ] = bd; in ucc_geth_tx()