Lines Matching full:com
39 struct gve_ring_com *com = &rx->com; in gve_rx_free_ring_gqi() local
56 if (com->qpl != NULL) { in gve_rx_free_ring_gqi()
57 gve_free_qpl(priv, com->qpl); in gve_rx_free_ring_gqi()
58 com->qpl = NULL; in gve_rx_free_ring_gqi()
66 struct gve_ring_com *com = &rx->com; in gve_rx_free_ring() local
76 if (com->q_resources != NULL) { in gve_rx_free_ring()
77 gve_dma_free_coherent(&com->q_resources_mem); in gve_rx_free_ring()
78 com->q_resources = NULL; in gve_rx_free_ring()
85 struct gve_ring_com *com = &rx->com; in gve_prefill_rx_slots() local
89 for (i = 0; i < com->priv->rx_desc_cnt; i++) { in gve_prefill_rx_slots()
92 rx->page_info[i].page_address = com->qpl->dmas[i].cpu_addr; in gve_prefill_rx_slots()
93 rx->page_info[i].page = com->qpl->pages[i]; in gve_prefill_rx_slots()
95 dma = &com->qpl->dmas[i]; in gve_prefill_rx_slots()
107 struct gve_ring_com *com = &rx->com; in gve_rx_alloc_ring_gqi() local
122 com->qpl = gve_alloc_qpl(priv, i + priv->tx_cfg.max_queues, in gve_rx_alloc_ring_gqi()
124 if (com->qpl == NULL) { in gve_rx_alloc_ring_gqi()
156 struct gve_ring_com *com = &rx->com; in gve_rx_alloc_ring() local
159 com->priv = priv; in gve_rx_alloc_ring()
160 com->id = i; in gve_rx_alloc_ring()
165 PAGE_SIZE, &com->q_resources_mem); in gve_rx_alloc_ring()
171 com->q_resources = com->q_resources_mem.cpu_addr; in gve_rx_alloc_ring()
219 struct gve_priv *priv = rx->com.priv; in gve_rx_clear_data_ring()
244 struct gve_priv *priv = rx->com.priv; in gve_rx_clear_desc_ring()
277 struct gve_ring_com *com = &rx->com; in gve_start_rx_ring() local
286 NET_TASK_INIT(&com->cleanup_task, 0, gve_rx_cleanup_tq, rx); in gve_start_rx_ring()
288 NET_TASK_INIT(&com->cleanup_task, 0, gve_rx_cleanup_tq_dqo, rx); in gve_start_rx_ring()
289 com->cleanup_tq = taskqueue_create_fast("gve rx", M_WAITOK, in gve_start_rx_ring()
290 taskqueue_thread_enqueue, &com->cleanup_tq); in gve_start_rx_ring()
292 taskqueue_start_threads(&com->cleanup_tq, 1, PI_NET, in gve_start_rx_ring()
297 gve_db_bar_write_4(priv, com->db_offset, rx->fill_cnt); in gve_start_rx_ring()
305 struct gve_ring_com *com; in gve_create_rx_rings() local
325 com = &rx->com; in gve_create_rx_rings()
327 com->irq_db_offset = 4 * be32toh(priv->irq_db_indices[com->ntfy_id].index); in gve_create_rx_rings()
329 bus_dmamap_sync(com->q_resources_mem.tag, com->q_resources_mem.map, in gve_create_rx_rings()
331 com->db_offset = 4 * be32toh(com->q_resources->db_index); in gve_create_rx_rings()
332 com->counter_idx = be32toh(com->q_resources->counter_index); in gve_create_rx_rings()
345 struct gve_ring_com *com = &rx->com; in gve_stop_rx_ring() local
347 if (com->cleanup_tq != NULL) { in gve_stop_rx_ring()
348 taskqueue_quiesce(com->cleanup_tq); in gve_stop_rx_ring()
349 taskqueue_free(com->cleanup_tq); in gve_stop_rx_ring()
350 com->cleanup_tq = NULL; in gve_stop_rx_ring()
380 struct gve_priv *priv = rx->com.priv; in gve_rx_intr()
381 struct gve_ring_com *com = &rx->com; in gve_rx_intr() local
386 gve_db_bar_write_4(priv, com->irq_db_offset, GVE_IRQ_MASK); in gve_rx_intr()
387 taskqueue_enqueue(rx->com.cleanup_tq, &rx->com.cleanup_task); in gve_rx_intr()
560 page_dma_handle = &(rx->com.qpl->dmas[idx]); in gve_rx()
690 gve_db_bar_write_4(priv, rx->com.db_offset, rx->fill_cnt); in gve_rx_cleanup()
697 struct gve_priv *priv = rx->com.priv; in gve_rx_cleanup_tq()
704 gve_db_bar_write_4(priv, rx->com.irq_db_offset, in gve_rx_cleanup_tq()
715 gve_db_bar_write_4(priv, rx->com.irq_db_offset, GVE_IRQ_MASK); in gve_rx_cleanup_tq()
716 taskqueue_enqueue(rx->com.cleanup_tq, &rx->com.cleanup_task); in gve_rx_cleanup_tq()