Lines Matching +full:com +full:- +full:offset
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2023-2024 Google LLC
35 gve_reg_bar_read_4(struct gve_priv *priv, bus_size_t offset) in gve_reg_bar_read_4() argument
37 return (be32toh(bus_read_4(priv->reg_bar, offset))); in gve_reg_bar_read_4()
41 gve_reg_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val) in gve_reg_bar_write_4() argument
43 bus_write_4(priv->reg_bar, offset, htobe32(val)); in gve_reg_bar_write_4()
47 gve_db_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val) in gve_db_bar_write_4() argument
49 bus_write_4(priv->db_bar, offset, htobe32(val)); in gve_db_bar_write_4()
53 gve_db_bar_dqo_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val) in gve_db_bar_dqo_write_4() argument
55 bus_write_4(priv->db_bar, offset, val); in gve_db_bar_dqo_write_4()
90 device_t dev = priv->dev; in gve_dma_alloc_coherent()
104 &dma->tag); in gve_dma_alloc_coherent()
111 err = bus_dmamem_alloc(dma->tag, (void **) &dma->cpu_addr, in gve_dma_alloc_coherent()
113 &dma->map); in gve_dma_alloc_coherent()
120 /* An address set by the callback will never be -1 */ in gve_dma_alloc_coherent()
121 dma->bus_addr = (bus_addr_t)-1; in gve_dma_alloc_coherent()
122 err = bus_dmamap_load(dma->tag, dma->map, dma->cpu_addr, size, in gve_dma_alloc_coherent()
123 gve_dmamap_load_callback, &dma->bus_addr, BUS_DMA_NOWAIT); in gve_dma_alloc_coherent()
124 if (err != 0 || dma->bus_addr == (bus_addr_t)-1) { in gve_dma_alloc_coherent()
132 bus_dmamem_free(dma->tag, dma->cpu_addr, dma->map); in gve_dma_alloc_coherent()
134 bus_dma_tag_destroy(dma->tag); in gve_dma_alloc_coherent()
136 dma->tag = NULL; in gve_dma_alloc_coherent()
144 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); in gve_dma_free_coherent()
145 bus_dmamap_unload(dma->tag, dma->map); in gve_dma_free_coherent()
146 bus_dmamem_free(dma->tag, dma->cpu_addr, dma->map); in gve_dma_free_coherent()
147 bus_dma_tag_destroy(dma->tag); in gve_dma_free_coherent()
155 device_t dev = priv->dev; in gve_dmamap_create()
169 &dma->tag); in gve_dmamap_create()
176 err = bus_dmamap_create(dma->tag, BUS_DMA_COHERENT, &dma->map); in gve_dmamap_create()
183 /* An address set by the callback will never be -1 */ in gve_dmamap_create()
184 dma->bus_addr = (bus_addr_t)-1; in gve_dmamap_create()
185 err = bus_dmamap_load(dma->tag, dma->map, dma->cpu_addr, size, in gve_dmamap_create()
186 gve_dmamap_load_callback, &dma->bus_addr, BUS_DMA_WAITOK); in gve_dmamap_create()
187 if (err != 0 || dma->bus_addr == (bus_addr_t)-1) { in gve_dmamap_create()
196 bus_dmamap_destroy(dma->tag, dma->map); in gve_dmamap_create()
198 bus_dma_tag_destroy(dma->tag); in gve_dmamap_create()
200 dma->tag = NULL; in gve_dmamap_create()
208 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); in gve_dmamap_destroy()
209 bus_dmamap_unload(dma->tag, dma->map); in gve_dmamap_destroy()
210 bus_dmamap_destroy(dma->tag, dma->map); in gve_dmamap_destroy()
211 bus_dma_tag_destroy(dma->tag); in gve_dmamap_destroy()
219 taskqueue_enqueue(priv->service_tq, &priv->service_task); in gve_mgmnt_intr()
232 if (priv->irq_tbl == NULL) { in gve_free_irqs()
233 device_printf(priv->dev, "No irq table, nothing to free\n"); in gve_free_irqs()
237 num_irqs = priv->tx_cfg.num_queues + priv->rx_cfg.num_queues + 1; in gve_free_irqs()
240 irq = &priv->irq_tbl[i]; in gve_free_irqs()
241 if (irq->res == NULL) in gve_free_irqs()
244 rid = rman_get_rid(irq->res); in gve_free_irqs()
246 rc = bus_teardown_intr(priv->dev, irq->res, irq->cookie); in gve_free_irqs()
248 device_printf(priv->dev, "Failed to teardown irq num %d\n", in gve_free_irqs()
251 rc = bus_release_resource(priv->dev, SYS_RES_IRQ, in gve_free_irqs()
252 rid, irq->res); in gve_free_irqs()
254 device_printf(priv->dev, "Failed to release irq num %d\n", in gve_free_irqs()
257 irq->res = NULL; in gve_free_irqs()
258 irq->cookie = NULL; in gve_free_irqs()
261 free(priv->irq_tbl, M_GVE); in gve_free_irqs()
262 priv->irq_tbl = NULL; in gve_free_irqs()
265 pci_release_msi(priv->dev); in gve_free_irqs()
271 int num_tx = priv->tx_cfg.num_queues; in gve_alloc_irqs()
272 int num_rx = priv->rx_cfg.num_queues; in gve_alloc_irqs()
280 struct gve_ring_com *com; in gve_alloc_irqs() local
284 if (pci_alloc_msix(priv->dev, &got_nvecs) != 0) { in gve_alloc_irqs()
285 device_printf(priv->dev, "Failed to acquire any msix vectors\n"); in gve_alloc_irqs()
289 device_printf(priv->dev, "Tried to acquire %d msix vectors, got only %d\n", in gve_alloc_irqs()
296 device_printf(priv->dev, "Enabled MSIX with %d vectors\n", got_nvecs); in gve_alloc_irqs()
298 priv->irq_tbl = malloc(sizeof(struct gve_irq) * req_nvecs, M_GVE, in gve_alloc_irqs()
302 irq = &priv->irq_tbl[i]; in gve_alloc_irqs()
303 tx = &priv->tx[i]; in gve_alloc_irqs()
304 com = &tx->com; in gve_alloc_irqs()
307 irq->res = bus_alloc_resource_any(priv->dev, SYS_RES_IRQ, in gve_alloc_irqs()
309 if (irq->res == NULL) { in gve_alloc_irqs()
310 device_printf(priv->dev, "Failed to alloc irq %d for Tx queue %d\n", in gve_alloc_irqs()
316 err = bus_setup_intr(priv->dev, irq->res, INTR_TYPE_NET | INTR_MPSAFE, in gve_alloc_irqs()
318 &priv->tx[i], &irq->cookie); in gve_alloc_irqs()
320 device_printf(priv->dev, "Failed to setup irq %d for Tx queue %d, " in gve_alloc_irqs()
325 bus_describe_intr(priv->dev, irq->res, irq->cookie, "tx%d", i); in gve_alloc_irqs()
326 com->ntfy_id = i; in gve_alloc_irqs()
330 irq = &priv->irq_tbl[i + j]; in gve_alloc_irqs()
331 rx = &priv->rx[j]; in gve_alloc_irqs()
332 com = &rx->com; in gve_alloc_irqs()
335 irq->res = bus_alloc_resource_any(priv->dev, SYS_RES_IRQ, in gve_alloc_irqs()
337 if (irq->res == NULL) { in gve_alloc_irqs()
338 device_printf(priv->dev, in gve_alloc_irqs()
344 err = bus_setup_intr(priv->dev, irq->res, INTR_TYPE_NET | INTR_MPSAFE, in gve_alloc_irqs()
346 &priv->rx[j], &irq->cookie); in gve_alloc_irqs()
348 device_printf(priv->dev, "Failed to setup irq %d for Rx queue %d, " in gve_alloc_irqs()
353 bus_describe_intr(priv->dev, irq->res, irq->cookie, "rx%d", j); in gve_alloc_irqs()
354 com->ntfy_id = i + j; in gve_alloc_irqs()
359 irq = &priv->irq_tbl[m]; in gve_alloc_irqs()
361 irq->res = bus_alloc_resource_any(priv->dev, SYS_RES_IRQ, in gve_alloc_irqs()
363 if (irq->res == NULL) { in gve_alloc_irqs()
364 device_printf(priv->dev, "Failed to allocate irq %d for mgmnt queue\n", rid); in gve_alloc_irqs()
369 err = bus_setup_intr(priv->dev, irq->res, INTR_TYPE_NET | INTR_MPSAFE, in gve_alloc_irqs()
370 gve_mgmnt_intr, NULL, priv, &irq->cookie); in gve_alloc_irqs()
372 device_printf(priv->dev, "Failed to setup irq %d for mgmnt queue, err: %d\n", in gve_alloc_irqs()
377 bus_describe_intr(priv->dev, irq->res, irq->cookie, "mgmnt"); in gve_alloc_irqs()
411 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) { in gve_unmask_all_queue_irqs()
412 tx = &priv->tx[idx]; in gve_unmask_all_queue_irqs()
414 gve_db_bar_write_4(priv, tx->com.irq_db_offset, 0); in gve_unmask_all_queue_irqs()
416 gve_db_bar_dqo_write_4(priv, tx->com.irq_db_offset, in gve_unmask_all_queue_irqs()
420 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { in gve_unmask_all_queue_irqs()
421 rx = &priv->rx[idx]; in gve_unmask_all_queue_irqs()
423 gve_db_bar_write_4(priv, rx->com.irq_db_offset, 0); in gve_unmask_all_queue_irqs()
425 gve_db_bar_dqo_write_4(priv, rx->com.irq_db_offset, in gve_unmask_all_queue_irqs()
433 for (int idx = 0; idx < priv->tx_cfg.num_queues; idx++) { in gve_mask_all_queue_irqs()
434 struct gve_tx_ring *tx = &priv->tx[idx]; in gve_mask_all_queue_irqs()
435 gve_db_bar_write_4(priv, tx->com.irq_db_offset, GVE_IRQ_MASK); in gve_mask_all_queue_irqs()
437 for (int idx = 0; idx < priv->rx_cfg.num_queues; idx++) { in gve_mask_all_queue_irqs()
438 struct gve_rx_ring *rx = &priv->rx[idx]; in gve_mask_all_queue_irqs()
439 gve_db_bar_write_4(priv, rx->com.irq_db_offset, GVE_IRQ_MASK); in gve_mask_all_queue_irqs()