Lines Matching refs:rx_ring

1000 		struct al_eth_ring *ring = &adapter->rx_ring[i];  in al_eth_init_rings()
1041 struct al_eth_ring *rx_ring, in al_eth_alloc_rx_buf() argument
1067 error = bus_dmamap_load_mbuf_sg(rx_ring->dma_buf_tag, rx_info->dma_map, in al_eth_alloc_rx_buf()
1070 device_printf(rx_ring->dev, "failed to map mbuf, error = %d\n", in al_eth_alloc_rx_buf()
1088 struct al_eth_ring *rx_ring = &adapter->rx_ring[qid]; in al_eth_refill_rx_bufs() local
1092 next_to_use = rx_ring->next_to_use; in al_eth_refill_rx_bufs()
1097 &rx_ring->rx_buffer_info[next_to_use]; in al_eth_refill_rx_bufs()
1100 rx_ring, rx_info) < 0)) { in al_eth_refill_rx_bufs()
1106 rc = al_eth_rx_buffer_add(rx_ring->dma_q, in al_eth_refill_rx_bufs()
1114 next_to_use = AL_ETH_RX_RING_IDX_NEXT(rx_ring, next_to_use); in al_eth_refill_rx_bufs()
1120 qid, i, al_udma_available_get(rx_ring->dma_q)); in al_eth_refill_rx_bufs()
1123 al_eth_rx_buffer_action(rx_ring->dma_q, i); in al_eth_refill_rx_bufs()
1125 rx_ring->next_to_use = next_to_use; in al_eth_refill_rx_bufs()
1451 struct al_eth_ring *rx_ring = arg; in al_eth_rx_recv_irq_filter() local
1455 device_printf_dbg(rx_ring->dev, "%s for ring ID = %d\n", __func__, in al_eth_rx_recv_irq_filter()
1456 rx_ring->ring_id); in al_eth_rx_recv_irq_filter()
1462 if ((napi == 0) || (napi && rx_ring->enqueue_is_running == 0)) in al_eth_rx_recv_irq_filter()
1463 taskqueue_enqueue(rx_ring->enqueue_tq, &rx_ring->enqueue_task); in al_eth_rx_recv_irq_filter()
1516 struct al_eth_ring *rx_ring, struct al_eth_pkt *hal_pkt, in al_eth_rx_mbuf() argument
1521 &rx_ring->rx_buffer_info[*next_to_clean]; in al_eth_rx_mbuf()
1529 *next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring, in al_eth_rx_mbuf()
1537 mbuf->m_pkthdr.rcvif = rx_ring->netdev; in al_eth_rx_mbuf()
1556 smbuf->m_pkthdr.rcvif = rx_ring->netdev; in al_eth_rx_mbuf()
1562 *next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring, in al_eth_rx_mbuf()
1570 bus_dmamap_unload(rx_ring->dma_buf_tag, rx_info->dma_map); in al_eth_rx_mbuf()
1573 *next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring, *next_to_clean); in al_eth_rx_mbuf()
1581 struct al_eth_ring *rx_ring = arg; in al_eth_rx_recv_work() local
1583 unsigned int qid = rx_ring->ring_id; in al_eth_rx_recv_work()
1584 struct al_eth_pkt *hal_pkt = &rx_ring->hal_pkt; in al_eth_rx_recv_work()
1585 uint16_t next_to_clean = rx_ring->next_to_clean; in al_eth_rx_recv_work()
1591 rx_ring->enqueue_is_running = 1; in al_eth_rx_recv_work()
1598 descs = al_eth_pkt_rx(rx_ring->dma_q, hal_pkt); in al_eth_rx_recv_work()
1602 device_printf_dbg(rx_ring->dev, "rx_poll: q %d got packet " in al_eth_rx_recv_work()
1604 device_printf_dbg(rx_ring->dev, "rx_poll: q %d flags %x. " in al_eth_rx_recv_work()
1611 device_printf(rx_ring->dev, "receive packet with error. " in al_eth_rx_recv_work()
1613 next_to_clean = AL_ETH_RX_RING_IDX_ADD(rx_ring, in al_eth_rx_recv_work()
1619 mbuf = al_eth_rx_mbuf(rx_ring->adapter, rx_ring, hal_pkt, descs, in al_eth_rx_recv_work()
1624 next_to_clean = AL_ETH_RX_RING_IDX_ADD(rx_ring, in al_eth_rx_recv_work()
1629 if (__predict_true(if_getcapenable(rx_ring->netdev) & IFCAP_RXCSUM || in al_eth_rx_recv_work()
1630 if_getcapenable(rx_ring->netdev) & IFCAP_RXCSUM_IPV6)) { in al_eth_rx_recv_work()
1631 al_eth_rx_checksum(rx_ring->adapter, hal_pkt, mbuf); in al_eth_rx_recv_work()
1642 if ((rx_ring->lro_enabled != 0) && in al_eth_rx_recv_work()
1651 if (rx_ring->lro.lro_cnt != 0) { in al_eth_rx_recv_work()
1652 if (tcp_lro_rx(&rx_ring->lro, mbuf, 0) == 0) in al_eth_rx_recv_work()
1658 if_input(rx_ring->netdev, mbuf); in al_eth_rx_recv_work()
1662 rx_ring->next_to_clean = next_to_clean; in al_eth_rx_recv_work()
1664 refill_required = al_udma_available_get(rx_ring->dma_q); in al_eth_rx_recv_work()
1665 refill_actual = al_eth_refill_rx_bufs(rx_ring->adapter, qid, in al_eth_rx_recv_work()
1669 device_printf_dbg(rx_ring->dev, in al_eth_rx_recv_work()
1673 tcp_lro_flush_all(&rx_ring->lro); in al_eth_rx_recv_work()
1676 rx_ring->enqueue_is_running = 0; in al_eth_rx_recv_work()
1680 al_eth_irq_config(rx_ring->unmask_reg_offset, rx_ring->unmask_val); in al_eth_rx_recv_work()
2116 adapter->irq_tbl[irq_idx].data = &adapter->rx_ring[i]; in al_eth_setup_int_mode()
2448 struct al_eth_ring *rx_ring = &adapter->rx_ring[qid]; in al_eth_setup_rx_resources() local
2449 device_t dev = rx_ring->dev; in al_eth_setup_rx_resources()
2450 struct al_udma_q_params *q_params = &rx_ring->q_params; in al_eth_setup_rx_resources()
2454 size = sizeof(struct al_eth_rx_buffer) * rx_ring->sw_count; in al_eth_setup_rx_resources()
2459 rx_ring->rx_buffer_info = malloc(size, M_IFAL, M_ZERO | M_WAITOK); in al_eth_setup_rx_resources()
2460 rx_ring->descs_size = rx_ring->hw_count * sizeof(union al_udma_desc); in al_eth_setup_rx_resources()
2461 q_params->size = rx_ring->hw_count; in al_eth_setup_rx_resources()
2466 (void**)&q_params->desc_base, rx_ring->descs_size); in al_eth_setup_rx_resources()
2473 rx_ring->cdescs_size = rx_ring->hw_count * q_params->cdesc_size; in al_eth_setup_rx_resources()
2477 (void**)&q_params->cdesc_base, rx_ring->cdescs_size); in al_eth_setup_rx_resources()
2483 NET_TASK_INIT(&rx_ring->enqueue_task, 0, al_eth_rx_recv_work, rx_ring); in al_eth_setup_rx_resources()
2484 rx_ring->enqueue_tq = taskqueue_create_fast("al_rx_enque", M_NOWAIT, in al_eth_setup_rx_resources()
2485 taskqueue_thread_enqueue, &rx_ring->enqueue_tq); in al_eth_setup_rx_resources()
2486 taskqueue_start_threads(&rx_ring->enqueue_tq, 1, PI_NET, "%s rxeq", in al_eth_setup_rx_resources()
2501 &rx_ring->dma_buf_tag); in al_eth_setup_rx_resources()
2508 for (size = 0; size < rx_ring->sw_count; size++) { in al_eth_setup_rx_resources()
2509 ret = bus_dmamap_create(rx_ring->dma_buf_tag, 0, in al_eth_setup_rx_resources()
2510 &rx_ring->rx_buffer_info[size].dma_map); in al_eth_setup_rx_resources()
2518 memset(q_params->cdesc_base, 0, rx_ring->cdescs_size); in al_eth_setup_rx_resources()
2522 int err = tcp_lro_init(&rx_ring->lro); in al_eth_setup_rx_resources()
2529 rx_ring->lro_enabled = true; in al_eth_setup_rx_resources()
2530 rx_ring->lro.ifp = adapter->netdev; in al_eth_setup_rx_resources()
2534 rx_ring->next_to_clean = 0; in al_eth_setup_rx_resources()
2535 rx_ring->next_to_use = 0; in al_eth_setup_rx_resources()
2550 struct al_eth_ring *rx_ring = &adapter->rx_ring[qid]; in al_eth_free_rx_resources() local
2551 struct al_udma_q_params *q_params = &rx_ring->q_params; in al_eth_free_rx_resources()
2555 while (taskqueue_cancel(rx_ring->enqueue_tq, in al_eth_free_rx_resources()
2556 &rx_ring->enqueue_task, NULL)) { in al_eth_free_rx_resources()
2557 taskqueue_drain(rx_ring->enqueue_tq, &rx_ring->enqueue_task); in al_eth_free_rx_resources()
2560 taskqueue_free(rx_ring->enqueue_tq); in al_eth_free_rx_resources()
2562 for (size = 0; size < rx_ring->sw_count; size++) { in al_eth_free_rx_resources()
2563 m_freem(rx_ring->rx_buffer_info[size].m); in al_eth_free_rx_resources()
2564 rx_ring->rx_buffer_info[size].m = NULL; in al_eth_free_rx_resources()
2565 bus_dmamap_unload(rx_ring->dma_buf_tag, in al_eth_free_rx_resources()
2566 rx_ring->rx_buffer_info[size].dma_map); in al_eth_free_rx_resources()
2567 bus_dmamap_destroy(rx_ring->dma_buf_tag, in al_eth_free_rx_resources()
2568 rx_ring->rx_buffer_info[size].dma_map); in al_eth_free_rx_resources()
2570 bus_dma_tag_destroy(rx_ring->dma_buf_tag); in al_eth_free_rx_resources()
2572 free(rx_ring->rx_buffer_info, M_IFAL); in al_eth_free_rx_resources()
2573 rx_ring->rx_buffer_info = NULL; in al_eth_free_rx_resources()
2594 tcp_lro_free(&rx_ring->lro); in al_eth_free_rx_resources()
2609 if (adapter->rx_ring[i].q_params.desc_base != 0) in al_eth_free_all_rx_resources()
3051 q_params = &adapter->rx_ring[qid].q_params; in al_eth_udma_queue_enable()