Searched refs:qpl (Results 1 – 9 of 9) sorted by relevance
/freebsd/sys/dev/gve/ |
H A D | gve_qpl.c | 60 struct gve_queue_page_list *qpl = &priv->qpls[id]; in gve_free_qpl() local 63 for (i = 0; i < qpl->num_dmas; i++) { in gve_free_qpl() 64 gve_dmamap_destroy(&qpl->dmas[i]); in gve_free_qpl() 67 if (qpl->kva) { in gve_free_qpl() 68 pmap_qremove(qpl->kva, qpl->num_pages); in gve_free_qpl() 69 kva_free(qpl->kva, PAGE_SIZE * qpl->num_pages); in gve_free_qpl() 72 for (i = 0; i < qpl->num_pages; i++) { in gve_free_qpl() 79 if (vm_page_unwire_noq(qpl->pages[i])) { in gve_free_qpl() 80 if (!qpl->kva) { in gve_free_qpl() 81 pmap_qremove((vm_offset_t)qpl->dmas[i].cpu_addr, 1); in gve_free_qpl() [all …]
|
H A D | gve_tx.c | 40 struct gve_queue_page_list *qpl = tx->com.qpl; in gve_tx_fifo_init() local 43 fifo->size = qpl->num_pages * PAGE_SIZE; in gve_tx_fifo_init() 44 fifo->base = qpl->kva; in gve_tx_fifo_init() 112 com->qpl = &priv->qpls[i]; in gve_tx_alloc_ring_gqi() 113 if (com->qpl == NULL) { in gve_tx_alloc_ring_gqi() 438 gve_dma_sync_for_device(struct gve_queue_page_list *qpl, in gve_dma_sync_for_device() argument 447 dma = &(qpl->dmas[page]); in gve_dma_sync_for_device() 706 gve_dma_sync_for_device(tx->com.qpl, in gve_xmit() 726 gve_dma_sync_for_device(tx->com.qpl, in gve_xmit()
|
H A D | gve_rx.c | 86 rx->page_info[i].page_address = com->qpl->dmas[i].cpu_addr; in gve_prefill_rx_slots() 87 rx->page_info[i].page = com->qpl->pages[i]; in gve_prefill_rx_slots() 89 dma = &com->qpl->dmas[i]; in gve_prefill_rx_slots() 116 com->qpl = &priv->qpls[priv->tx_cfg.max_queues + i]; in gve_rx_alloc_ring_gqi() 117 if (com->qpl == NULL) { in gve_rx_alloc_ring_gqi() 557 page_dma_handle = &(rx->com.qpl->dmas[idx]); in gve_rx()
|
H A D | gve_adminq.c | 279 htobe32((rx->com.qpl)->id); in gve_adminq_create_rx_queue() 341 htobe32((tx->com.qpl)->id); in gve_adminq_create_tx_queue() 506 struct gve_queue_page_list *qpl) in gve_adminq_register_page_list() argument 509 uint32_t num_entries = qpl->num_pages; in gve_adminq_register_page_list() 510 uint32_t size = num_entries * sizeof(qpl->dmas[0].bus_addr); in gve_adminq_register_page_list() 523 page_list[i] = htobe64(qpl->dmas[i].bus_addr); in gve_adminq_register_page_list() 529 .page_list_id = htobe32(qpl->id), in gve_adminq_register_page_list()
|
H A D | gve_rx_dqo.c | 126 rx->com.qpl = &priv->qpls[priv->tx_cfg.max_queues + i]; in gve_rx_alloc_ring_dqo() 127 if (rx->com.qpl == NULL) { in gve_rx_alloc_ring_dqo() 226 vm_page_t page = rx->com.qpl->pages[buf - rx->dqo.bufs]; in gve_clear_rx_ring_dqo() 346 return (&(rx->com.qpl->dmas[buf - rx->dqo.bufs])); in gve_get_page_dma_handle() 384 page = rx->com.qpl->pages[buf - rx->dqo.bufs]; in gve_rx_maybe_extract_from_used_bufs() 734 void *va = rx->com.qpl->dmas[page_idx].cpu_addr; in gve_get_cpu_addr_for_qpl_buf() 801 page = rx->com.qpl->pages[page_idx]; in gve_rx_add_extmbuf_to_ctx() 802 page_addr = rx->com.qpl->dmas[page_idx].cpu_addr; in gve_rx_add_extmbuf_to_ctx()
|
H A D | gve_tx_dqo.c | 213 tx->com.qpl = &priv->qpls[i]; in gve_tx_alloc_ring_dqo() 215 tx->com.qpl->num_pages; in gve_tx_alloc_ring_dqo() 629 *va = (char *)tx->com.qpl->dmas[page_id].cpu_addr + offset; in gve_tx_buf_get_addr_dqo() 630 *dma_addr = tx->com.qpl->dmas[page_id].bus_addr + offset; in gve_tx_buf_get_addr_dqo() 638 return (&tx->com.qpl->dmas[page_id]); in gve_get_page_dma_handle() 1001 tx->com.qpl->num_pages; in gve_clear_tx_ring_dqo()
|
H A D | gve_adminq.h | 443 struct gve_queue_page_list *qpl);
|
H A D | gve.h | 216 struct gve_queue_page_list *qpl; member
|
/freebsd/contrib/unbound/services/ |
H A D | listen_dnsport.c | 3085 size_t qpl = strlen(HTTP_QUERY_PARAM); in http2_req_header_cb() local 3094 if(valuelen <= el+qpl || in http2_req_header_cb() 3095 memcmp(HTTP_QUERY_PARAM, value+el, qpl) != 0) { in http2_req_header_cb() 3102 value+(el+qpl), valuelen-(el+qpl))) { in http2_req_header_cb()
|