Lines Matching defs:desc

65 #define POOL_ALLOC_SIZE		(sizeof(struct desc) * (RX_DESCS + TX_DESCS))
201 struct desc *desc_tab; /* coherent */
222 struct desc {
260 (n) * sizeof(struct desc))
264 ((n) + RX_DESCS) * sizeof(struct desc))
638 static inline void debug_desc(u32 phys, struct desc *desc)
643 phys, desc->next, desc->buf_len, desc->pkt_len,
644 desc->data, desc->dest_id, desc->src_id, desc->flags,
645 desc->qos, desc->padlen, desc->vlan_tci,
646 desc->dst_mac_0, desc->dst_mac_1, desc->dst_mac_2,
647 desc->dst_mac_3, desc->dst_mac_4, desc->dst_mac_5,
648 desc->src_mac_0, desc->src_mac_1, desc->src_mac_2,
649 desc->src_mac_3, desc->src_mac_4, desc->src_mac_5);
657 struct desc *tab;
665 n_desc = (phys - tab_phys) / sizeof(struct desc);
673 struct desc *desc)
675 debug_desc(phys, desc);
683 static inline void dma_unmap_tx(struct port *port, struct desc *desc)
686 dma_unmap_single(&port->netdev->dev, desc->data,
687 desc->buf_len, DMA_TO_DEVICE);
689 dma_unmap_single(&port->netdev->dev, desc->data & ~3,
690 ALIGN((desc->data & 3) + desc->buf_len, 4),
721 struct desc *desc;
748 desc = rx_desc_ptr(port, n);
761 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4));
766 /* put the desc back on RX-ready queue */
767 desc->buf_len = MAX_MRU;
768 desc->pkt_len = 0;
769 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
777 dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN,
780 dma_sync_single_for_cpu(&dev->dev, desc->data - NET_IP_ALIGN,
783 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4);
786 skb_put(skb, desc->pkt_len);
799 desc->data = phys + NET_IP_ALIGN;
801 desc->buf_len = MAX_MRU;
802 desc->pkt_len = 0;
803 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
824 struct desc *desc;
832 n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc);
834 desc = tx_desc_ptr(port, n_desc);
835 debug_desc(phys, desc);
839 port->netdev->stats.tx_bytes += desc->pkt_len;
841 dma_unmap_tx(port, desc);
851 queue_put_desc(port->plat->txreadyq, phys, desc);
869 struct desc *desc;
911 desc = tx_desc_ptr(port, n);
918 desc->data = phys + offset;
919 desc->buf_len = desc->pkt_len = len;
923 queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc);
1127 struct desc *desc = rx_desc_ptr(port, i);
1139 desc->buf_len = MAX_MRU;
1140 desc->data = dma_map_single(&port->netdev->dev, data,
1142 if (dma_mapping_error(&port->netdev->dev, desc->data)) {
1146 desc->data += NET_IP_ALIGN;
1159 struct desc *desc = rx_desc_ptr(port, i);
1163 desc->data - NET_IP_ALIGN,
1169 struct desc *desc = tx_desc_ptr(port, i);
1172 dma_unmap_tx(port, desc);
1371 struct desc *desc;
1375 desc = tx_desc_ptr(port, n);
1377 desc->buf_len = desc->pkt_len = 1;
1379 queue_put_desc(TX_QUEUE(port->id), phys, desc);