Lines Matching refs:tf

329 static inline unsigned int tbnet_frame_size(const struct tbnet_frame *tf)  in tbnet_frame_size()  argument
331 return tf->frame.size ? : TBNET_FRAME_SIZE; in tbnet_frame_size()
340 struct tbnet_frame *tf = &ring->frames[i]; in tbnet_free_buffers() local
345 if (!tf->page) in tbnet_free_buffers()
358 trace_tbnet_free_frame(i, tf->page, tf->frame.buffer_phy, dir); in tbnet_free_buffers()
360 if (tf->frame.buffer_phy) in tbnet_free_buffers()
361 dma_unmap_page(dma_dev, tf->frame.buffer_phy, size, in tbnet_free_buffers()
364 __free_pages(tf->page, order); in tbnet_free_buffers()
365 tf->page = NULL; in tbnet_free_buffers()
507 struct tbnet_frame *tf = &ring->frames[index]; in tbnet_alloc_rx_buffers() local
510 if (tf->page) in tbnet_alloc_rx_buffers()
517 tf->page = dev_alloc_pages(TBNET_RX_PAGE_ORDER); in tbnet_alloc_rx_buffers()
518 if (!tf->page) { in tbnet_alloc_rx_buffers()
523 dma_addr = dma_map_page(dma_dev, tf->page, 0, in tbnet_alloc_rx_buffers()
530 tf->frame.buffer_phy = dma_addr; in tbnet_alloc_rx_buffers()
531 tf->dev = net->dev; in tbnet_alloc_rx_buffers()
533 trace_tbnet_alloc_rx_frame(index, tf->page, dma_addr, in tbnet_alloc_rx_buffers()
536 tb_ring_rx(ring->ring, &tf->frame); in tbnet_alloc_rx_buffers()
552 struct tbnet_frame *tf; in tbnet_get_tx_buffer() local
560 tf = &ring->frames[index]; in tbnet_get_tx_buffer()
561 tf->frame.size = 0; in tbnet_get_tx_buffer()
563 dma_sync_single_for_cpu(dma_dev, tf->frame.buffer_phy, in tbnet_get_tx_buffer()
564 tbnet_frame_size(tf), DMA_TO_DEVICE); in tbnet_get_tx_buffer()
566 return tf; in tbnet_get_tx_buffer()
572 struct tbnet_frame *tf = container_of(frame, typeof(*tf), frame); in tbnet_tx_callback() local
573 struct tbnet *net = netdev_priv(tf->dev); in tbnet_tx_callback()
589 struct tbnet_frame *tf = &ring->frames[i]; in tbnet_alloc_tx_buffers() local
592 tf->page = alloc_page(GFP_KERNEL); in tbnet_alloc_tx_buffers()
593 if (!tf->page) { in tbnet_alloc_tx_buffers()
598 dma_addr = dma_map_page(dma_dev, tf->page, 0, TBNET_FRAME_SIZE, in tbnet_alloc_tx_buffers()
601 __free_page(tf->page); in tbnet_alloc_tx_buffers()
602 tf->page = NULL; in tbnet_alloc_tx_buffers()
607 tf->dev = net->dev; in tbnet_alloc_tx_buffers()
608 tf->frame.buffer_phy = dma_addr; in tbnet_alloc_tx_buffers()
609 tf->frame.callback = tbnet_tx_callback; in tbnet_alloc_tx_buffers()
610 tf->frame.sof = TBIP_PDF_FRAME_START; in tbnet_alloc_tx_buffers()
611 tf->frame.eof = TBIP_PDF_FRAME_END; in tbnet_alloc_tx_buffers()
613 trace_tbnet_alloc_tx_frame(i, tf->page, dma_addr, DMA_TO_DEVICE); in tbnet_alloc_tx_buffers()
731 static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf, in tbnet_check_frame() argument
737 if (tf->frame.flags & RING_DESC_CRC_ERROR) { in tbnet_check_frame()
740 } else if (tf->frame.flags & RING_DESC_BUFFER_OVERRUN) { in tbnet_check_frame()
746 size = tbnet_frame_size(tf); in tbnet_check_frame()
814 struct tbnet_frame *tf; in tbnet_poll() local
835 tf = container_of(frame, typeof(*tf), frame); in tbnet_poll()
837 page = tf->page; in tbnet_poll()
838 tf->page = NULL; in tbnet_poll()
843 if (!tbnet_check_frame(net, tf, hdr)) { in tbnet_poll()