1*54dfc97bSShailend Chand /*- 2*54dfc97bSShailend Chand * SPDX-License-Identifier: BSD-3-Clause 3*54dfc97bSShailend Chand * 4*54dfc97bSShailend Chand * Copyright (c) 2023 Google LLC 5*54dfc97bSShailend Chand * 6*54dfc97bSShailend Chand * Redistribution and use in source and binary forms, with or without modification, 7*54dfc97bSShailend Chand * are permitted provided that the following conditions are met: 8*54dfc97bSShailend Chand * 9*54dfc97bSShailend Chand * 1. Redistributions of source code must retain the above copyright notice, this 10*54dfc97bSShailend Chand * list of conditions and the following disclaimer. 11*54dfc97bSShailend Chand * 12*54dfc97bSShailend Chand * 2. Redistributions in binary form must reproduce the above copyright notice, 13*54dfc97bSShailend Chand * this list of conditions and the following disclaimer in the documentation 14*54dfc97bSShailend Chand * and/or other materials provided with the distribution. 15*54dfc97bSShailend Chand * 16*54dfc97bSShailend Chand * 3. Neither the name of the copyright holder nor the names of its contributors 17*54dfc97bSShailend Chand * may be used to endorse or promote products derived from this software without 18*54dfc97bSShailend Chand * specific prior written permission. 19*54dfc97bSShailend Chand * 20*54dfc97bSShailend Chand * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 21*54dfc97bSShailend Chand * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 22*54dfc97bSShailend Chand * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23*54dfc97bSShailend Chand * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR 24*54dfc97bSShailend Chand * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 25*54dfc97bSShailend Chand * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26*54dfc97bSShailend Chand * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 27*54dfc97bSShailend Chand * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28*54dfc97bSShailend Chand * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 29*54dfc97bSShailend Chand * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30*54dfc97bSShailend Chand */ 31*54dfc97bSShailend Chand #include "gve.h" 32*54dfc97bSShailend Chand 33*54dfc97bSShailend Chand uint32_t 34*54dfc97bSShailend Chand gve_reg_bar_read_4(struct gve_priv *priv, bus_size_t offset) 35*54dfc97bSShailend Chand { 36*54dfc97bSShailend Chand return (be32toh(bus_read_4(priv->reg_bar, offset))); 37*54dfc97bSShailend Chand } 38*54dfc97bSShailend Chand 39*54dfc97bSShailend Chand void 40*54dfc97bSShailend Chand gve_reg_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val) 41*54dfc97bSShailend Chand { 42*54dfc97bSShailend Chand bus_write_4(priv->reg_bar, offset, htobe32(val)); 43*54dfc97bSShailend Chand } 44*54dfc97bSShailend Chand 45*54dfc97bSShailend Chand void 46*54dfc97bSShailend Chand gve_db_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val) 47*54dfc97bSShailend Chand { 48*54dfc97bSShailend Chand bus_write_4(priv->db_bar, offset, htobe32(val)); 49*54dfc97bSShailend Chand } 50*54dfc97bSShailend Chand 51*54dfc97bSShailend Chand void 52*54dfc97bSShailend Chand gve_alloc_counters(counter_u64_t *stat, int num_stats) 53*54dfc97bSShailend Chand { 54*54dfc97bSShailend Chand int i; 55*54dfc97bSShailend Chand 56*54dfc97bSShailend Chand for (i = 0; i < num_stats; i++) 57*54dfc97bSShailend Chand stat[i] = counter_u64_alloc(M_WAITOK); 58*54dfc97bSShailend Chand } 59*54dfc97bSShailend Chand 60*54dfc97bSShailend Chand void 61*54dfc97bSShailend Chand gve_free_counters(counter_u64_t *stat, int num_stats) 62*54dfc97bSShailend Chand { 63*54dfc97bSShailend Chand int i; 64*54dfc97bSShailend Chand 65*54dfc97bSShailend Chand for (i = 0; i < num_stats; i++) 66*54dfc97bSShailend Chand counter_u64_free(stat[i]); 67*54dfc97bSShailend Chand } 68*54dfc97bSShailend Chand 69*54dfc97bSShailend Chand /* Currently assumes a single segment. */ 70*54dfc97bSShailend Chand static void 71*54dfc97bSShailend Chand gve_dmamap_load_callback(void *arg, bus_dma_segment_t *segs, int nseg, 72*54dfc97bSShailend Chand int error) 73*54dfc97bSShailend Chand { 74*54dfc97bSShailend Chand if (error == 0) 75*54dfc97bSShailend Chand *(bus_addr_t *) arg = segs[0].ds_addr; 76*54dfc97bSShailend Chand } 77*54dfc97bSShailend Chand 78*54dfc97bSShailend Chand int 79*54dfc97bSShailend Chand gve_dma_alloc_coherent(struct gve_priv *priv, int size, int align, 80*54dfc97bSShailend Chand struct gve_dma_handle *dma) 81*54dfc97bSShailend Chand { 82*54dfc97bSShailend Chand int err; 83*54dfc97bSShailend Chand device_t dev = priv->dev; 84*54dfc97bSShailend Chand 85*54dfc97bSShailend Chand err = bus_dma_tag_create( 86*54dfc97bSShailend Chand bus_get_dma_tag(dev), /* parent */ 87*54dfc97bSShailend Chand align, 0, /* alignment, bounds */ 88*54dfc97bSShailend Chand BUS_SPACE_MAXADDR, /* lowaddr */ 89*54dfc97bSShailend Chand BUS_SPACE_MAXADDR, /* highaddr */ 90*54dfc97bSShailend Chand NULL, NULL, /* filter, filterarg */ 91*54dfc97bSShailend Chand size, /* maxsize */ 92*54dfc97bSShailend Chand 1, /* nsegments */ 93*54dfc97bSShailend Chand size, /* maxsegsize */ 94*54dfc97bSShailend Chand BUS_DMA_ALLOCNOW, /* flags */ 95*54dfc97bSShailend Chand NULL, /* lockfunc */ 96*54dfc97bSShailend Chand NULL, /* lockarg */ 97*54dfc97bSShailend Chand &dma->tag); 98*54dfc97bSShailend Chand if (err != 0) { 99*54dfc97bSShailend Chand device_printf(dev, "%s: bus_dma_tag_create failed: %d\n", 100*54dfc97bSShailend Chand __func__, err); 101*54dfc97bSShailend Chand goto clear_tag; 102*54dfc97bSShailend Chand } 103*54dfc97bSShailend Chand 104*54dfc97bSShailend Chand err = bus_dmamem_alloc(dma->tag, (void **) &dma->cpu_addr, 105*54dfc97bSShailend Chand BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 106*54dfc97bSShailend Chand &dma->map); 107*54dfc97bSShailend Chand if (err != 0) { 108*54dfc97bSShailend Chand device_printf(dev, "%s: bus_dmamem_alloc(%ju) failed: %d\n", 109*54dfc97bSShailend Chand __func__, (uintmax_t)size, err); 110*54dfc97bSShailend Chand goto destroy_tag; 111*54dfc97bSShailend Chand } 112*54dfc97bSShailend Chand 113*54dfc97bSShailend Chand /* An address set by the callback will never be -1 */ 114*54dfc97bSShailend Chand dma->bus_addr = (bus_addr_t)-1; 115*54dfc97bSShailend Chand err = bus_dmamap_load(dma->tag, dma->map, dma->cpu_addr, size, 116*54dfc97bSShailend Chand gve_dmamap_load_callback, &dma->bus_addr, BUS_DMA_NOWAIT); 117*54dfc97bSShailend Chand if (err != 0 || dma->bus_addr == (bus_addr_t)-1) { 118*54dfc97bSShailend Chand device_printf(dev, "%s: bus_dmamap_load failed: %d\n", __func__, err); 119*54dfc97bSShailend Chand goto free_mem; 120*54dfc97bSShailend Chand } 121*54dfc97bSShailend Chand 122*54dfc97bSShailend Chand return (0); 123*54dfc97bSShailend Chand 124*54dfc97bSShailend Chand free_mem: 125*54dfc97bSShailend Chand bus_dmamem_free(dma->tag, dma->cpu_addr, dma->map); 126*54dfc97bSShailend Chand destroy_tag: 127*54dfc97bSShailend Chand bus_dma_tag_destroy(dma->tag); 128*54dfc97bSShailend Chand clear_tag: 129*54dfc97bSShailend Chand dma->tag = NULL; 130*54dfc97bSShailend Chand 131*54dfc97bSShailend Chand return (err); 132*54dfc97bSShailend Chand } 133*54dfc97bSShailend Chand 134*54dfc97bSShailend Chand void 135*54dfc97bSShailend Chand gve_dma_free_coherent(struct gve_dma_handle *dma) 136*54dfc97bSShailend Chand { 137*54dfc97bSShailend Chand bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 138*54dfc97bSShailend Chand bus_dmamap_unload(dma->tag, dma->map); 139*54dfc97bSShailend Chand bus_dmamem_free(dma->tag, dma->cpu_addr, dma->map); 140*54dfc97bSShailend Chand bus_dma_tag_destroy(dma->tag); 141*54dfc97bSShailend Chand } 142*54dfc97bSShailend Chand 143*54dfc97bSShailend Chand int 144*54dfc97bSShailend Chand gve_dmamap_create(struct gve_priv *priv, int size, int align, 145*54dfc97bSShailend Chand struct gve_dma_handle *dma) 146*54dfc97bSShailend Chand { 147*54dfc97bSShailend Chand int err; 148*54dfc97bSShailend Chand device_t dev = priv->dev; 149*54dfc97bSShailend Chand 150*54dfc97bSShailend Chand err = bus_dma_tag_create( 151*54dfc97bSShailend Chand bus_get_dma_tag(dev), /* parent */ 152*54dfc97bSShailend Chand align, 0, /* alignment, bounds */ 153*54dfc97bSShailend Chand BUS_SPACE_MAXADDR, /* lowaddr */ 154*54dfc97bSShailend Chand BUS_SPACE_MAXADDR, /* highaddr */ 155*54dfc97bSShailend Chand NULL, NULL, /* filter, filterarg */ 156*54dfc97bSShailend Chand size, /* maxsize */ 157*54dfc97bSShailend Chand 1, /* nsegments */ 158*54dfc97bSShailend Chand size, /* maxsegsize */ 159*54dfc97bSShailend Chand BUS_DMA_ALLOCNOW, /* flags */ 160*54dfc97bSShailend Chand NULL, /* lockfunc */ 161*54dfc97bSShailend Chand NULL, /* lockarg */ 162*54dfc97bSShailend Chand &dma->tag); 163*54dfc97bSShailend Chand if (err != 0) { 164*54dfc97bSShailend Chand device_printf(dev, "%s: bus_dma_tag_create failed: %d\n", 165*54dfc97bSShailend Chand __func__, err); 166*54dfc97bSShailend Chand goto clear_tag; 167*54dfc97bSShailend Chand } 168*54dfc97bSShailend Chand 169*54dfc97bSShailend Chand err = bus_dmamap_create(dma->tag, BUS_DMA_COHERENT, &dma->map); 170*54dfc97bSShailend Chand if (err != 0) { 171*54dfc97bSShailend Chand device_printf(dev, "%s: bus_dmamap_create failed: %d\n", 172*54dfc97bSShailend Chand __func__, err); 173*54dfc97bSShailend Chand goto destroy_tag; 174*54dfc97bSShailend Chand } 175*54dfc97bSShailend Chand 176*54dfc97bSShailend Chand /* An address set by the callback will never be -1 */ 177*54dfc97bSShailend Chand dma->bus_addr = (bus_addr_t)-1; 178*54dfc97bSShailend Chand err = bus_dmamap_load(dma->tag, dma->map, dma->cpu_addr, size, 179*54dfc97bSShailend Chand gve_dmamap_load_callback, &dma->bus_addr, BUS_DMA_WAITOK); 180*54dfc97bSShailend Chand if (err != 0 || dma->bus_addr == (bus_addr_t)-1) { 181*54dfc97bSShailend Chand device_printf(dev, "%s: bus_dmamap_load failed: %d\n", 182*54dfc97bSShailend Chand __func__, err); 183*54dfc97bSShailend Chand goto destroy_map; 184*54dfc97bSShailend Chand } 185*54dfc97bSShailend Chand 186*54dfc97bSShailend Chand return (0); 187*54dfc97bSShailend Chand 188*54dfc97bSShailend Chand destroy_map: 189*54dfc97bSShailend Chand bus_dmamap_destroy(dma->tag, dma->map); 190*54dfc97bSShailend Chand destroy_tag: 191*54dfc97bSShailend Chand bus_dma_tag_destroy(dma->tag); 192*54dfc97bSShailend Chand clear_tag: 193*54dfc97bSShailend Chand dma->tag = NULL; 194*54dfc97bSShailend Chand 195*54dfc97bSShailend Chand return (err); 196*54dfc97bSShailend Chand } 197*54dfc97bSShailend Chand 198*54dfc97bSShailend Chand void 199*54dfc97bSShailend Chand gve_dmamap_destroy(struct gve_dma_handle *dma) 200*54dfc97bSShailend Chand { 201*54dfc97bSShailend Chand bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 202*54dfc97bSShailend Chand bus_dmamap_unload(dma->tag, dma->map); 203*54dfc97bSShailend Chand bus_dmamap_destroy(dma->tag, dma->map); 204*54dfc97bSShailend Chand bus_dma_tag_destroy(dma->tag); 205*54dfc97bSShailend Chand } 206*54dfc97bSShailend Chand 207*54dfc97bSShailend Chand static int 208*54dfc97bSShailend Chand gve_mgmnt_intr(void *arg) 209*54dfc97bSShailend Chand { 210*54dfc97bSShailend Chand struct gve_priv *priv = arg; 211*54dfc97bSShailend Chand 212*54dfc97bSShailend Chand taskqueue_enqueue(priv->service_tq, &priv->service_task); 213*54dfc97bSShailend Chand return (FILTER_HANDLED); 214*54dfc97bSShailend Chand } 215*54dfc97bSShailend Chand 216*54dfc97bSShailend Chand void 217*54dfc97bSShailend Chand gve_free_irqs(struct gve_priv *priv) 218*54dfc97bSShailend Chand { 219*54dfc97bSShailend Chand struct gve_irq *irq; 220*54dfc97bSShailend Chand int num_irqs; 221*54dfc97bSShailend Chand int rid; 222*54dfc97bSShailend Chand int rc; 223*54dfc97bSShailend Chand int i; 224*54dfc97bSShailend Chand 225*54dfc97bSShailend Chand if (priv->irq_tbl == NULL) { 226*54dfc97bSShailend Chand device_printf(priv->dev, "No irq table, nothing to free\n"); 227*54dfc97bSShailend Chand return; 228*54dfc97bSShailend Chand } 229*54dfc97bSShailend Chand 230*54dfc97bSShailend Chand num_irqs = priv->tx_cfg.num_queues + priv->rx_cfg.num_queues + 1; 231*54dfc97bSShailend Chand 232*54dfc97bSShailend Chand for (i = 0; i < num_irqs; i++) { 233*54dfc97bSShailend Chand irq = &priv->irq_tbl[i]; 234*54dfc97bSShailend Chand if (irq->res == NULL) 235*54dfc97bSShailend Chand continue; 236*54dfc97bSShailend Chand 237*54dfc97bSShailend Chand rid = rman_get_rid(irq->res); 238*54dfc97bSShailend Chand 239*54dfc97bSShailend Chand rc = bus_teardown_intr(priv->dev, irq->res, irq->cookie); 240*54dfc97bSShailend Chand if (rc != 0) 241*54dfc97bSShailend Chand device_printf(priv->dev, "Failed to teardown irq num %d\n", 242*54dfc97bSShailend Chand rid); 243*54dfc97bSShailend Chand 244*54dfc97bSShailend Chand rc = bus_release_resource(priv->dev, SYS_RES_IRQ, 245*54dfc97bSShailend Chand rid, irq->res); 246*54dfc97bSShailend Chand if (rc != 0) 247*54dfc97bSShailend Chand device_printf(priv->dev, "Failed to release irq num %d\n", 248*54dfc97bSShailend Chand rid); 249*54dfc97bSShailend Chand 250*54dfc97bSShailend Chand irq->res = NULL; 251*54dfc97bSShailend Chand irq->cookie = NULL; 252*54dfc97bSShailend Chand } 253*54dfc97bSShailend Chand 254*54dfc97bSShailend Chand free(priv->irq_tbl, M_GVE); 255*54dfc97bSShailend Chand priv->irq_tbl = NULL; 256*54dfc97bSShailend Chand 257*54dfc97bSShailend Chand /* Safe to call even if msix was never alloced */ 258*54dfc97bSShailend Chand pci_release_msi(priv->dev); 259*54dfc97bSShailend Chand } 260*54dfc97bSShailend Chand 261*54dfc97bSShailend Chand int 262*54dfc97bSShailend Chand gve_alloc_irqs(struct gve_priv *priv) 263*54dfc97bSShailend Chand { 264*54dfc97bSShailend Chand int num_tx = priv->tx_cfg.num_queues; 265*54dfc97bSShailend Chand int num_rx = priv->rx_cfg.num_queues; 266*54dfc97bSShailend Chand int req_nvecs = num_tx + num_rx + 1; 267*54dfc97bSShailend Chand int got_nvecs = req_nvecs; 268*54dfc97bSShailend Chand struct gve_irq *irq; 269*54dfc97bSShailend Chand int i, j, m; 270*54dfc97bSShailend Chand int rid; 271*54dfc97bSShailend Chand int err; 272*54dfc97bSShailend Chand 273*54dfc97bSShailend Chand struct gve_ring_com *com; 274*54dfc97bSShailend Chand struct gve_rx_ring *rx; 275*54dfc97bSShailend Chand struct gve_tx_ring *tx; 276*54dfc97bSShailend Chand 277*54dfc97bSShailend Chand if (pci_alloc_msix(priv->dev, &got_nvecs) != 0) { 278*54dfc97bSShailend Chand device_printf(priv->dev, "Failed to acquire any msix vectors\n"); 279*54dfc97bSShailend Chand err = ENXIO; 280*54dfc97bSShailend Chand goto abort; 281*54dfc97bSShailend Chand } else if (got_nvecs != req_nvecs) { 282*54dfc97bSShailend Chand device_printf(priv->dev, "Tried to acquire %d msix vectors, got only %d\n", 283*54dfc97bSShailend Chand req_nvecs, got_nvecs); 284*54dfc97bSShailend Chand err = ENOSPC; 285*54dfc97bSShailend Chand goto abort; 286*54dfc97bSShailend Chand } 287*54dfc97bSShailend Chand 288*54dfc97bSShailend Chand if (bootverbose) 289*54dfc97bSShailend Chand device_printf(priv->dev, "Enabled MSIX with %d vectors\n", got_nvecs); 290*54dfc97bSShailend Chand 291*54dfc97bSShailend Chand priv->irq_tbl = malloc(sizeof(struct gve_irq) * req_nvecs, M_GVE, 292*54dfc97bSShailend Chand M_WAITOK | M_ZERO); 293*54dfc97bSShailend Chand 294*54dfc97bSShailend Chand for (i = 0; i < num_tx; i++) { 295*54dfc97bSShailend Chand irq = &priv->irq_tbl[i]; 296*54dfc97bSShailend Chand tx = &priv->tx[i]; 297*54dfc97bSShailend Chand com = &tx->com; 298*54dfc97bSShailend Chand rid = i + 1; 299*54dfc97bSShailend Chand 300*54dfc97bSShailend Chand irq->res = bus_alloc_resource_any(priv->dev, SYS_RES_IRQ, 301*54dfc97bSShailend Chand &rid, RF_ACTIVE); 302*54dfc97bSShailend Chand if (irq->res == NULL) { 303*54dfc97bSShailend Chand device_printf(priv->dev, "Failed to alloc irq %d for Tx queue %d\n", 304*54dfc97bSShailend Chand rid, i); 305*54dfc97bSShailend Chand err = ENOMEM; 306*54dfc97bSShailend Chand goto abort; 307*54dfc97bSShailend Chand } 308*54dfc97bSShailend Chand 309*54dfc97bSShailend Chand err = bus_setup_intr(priv->dev, irq->res, INTR_TYPE_NET | INTR_MPSAFE, 310*54dfc97bSShailend Chand gve_tx_intr, NULL, &priv->tx[i], &irq->cookie); 311*54dfc97bSShailend Chand if (err != 0) { 312*54dfc97bSShailend Chand device_printf(priv->dev, "Failed to setup irq %d for Tx queue %d, " 313*54dfc97bSShailend Chand "err: %d\n", rid, i, err); 314*54dfc97bSShailend Chand goto abort; 315*54dfc97bSShailend Chand } 316*54dfc97bSShailend Chand 317*54dfc97bSShailend Chand bus_describe_intr(priv->dev, irq->res, irq->cookie, "tx%d", i); 318*54dfc97bSShailend Chand com->ntfy_id = i; 319*54dfc97bSShailend Chand } 320*54dfc97bSShailend Chand 321*54dfc97bSShailend Chand for (j = 0; j < num_rx; j++) { 322*54dfc97bSShailend Chand irq = &priv->irq_tbl[i + j]; 323*54dfc97bSShailend Chand rx = &priv->rx[j]; 324*54dfc97bSShailend Chand com = &rx->com; 325*54dfc97bSShailend Chand rid = i + j + 1; 326*54dfc97bSShailend Chand 327*54dfc97bSShailend Chand irq->res = bus_alloc_resource_any(priv->dev, SYS_RES_IRQ, 328*54dfc97bSShailend Chand &rid, RF_ACTIVE); 329*54dfc97bSShailend Chand if (irq->res == NULL) { 330*54dfc97bSShailend Chand device_printf(priv->dev, 331*54dfc97bSShailend Chand "Failed to alloc irq %d for Rx queue %d", rid, j); 332*54dfc97bSShailend Chand err = ENOMEM; 333*54dfc97bSShailend Chand goto abort; 334*54dfc97bSShailend Chand } 335*54dfc97bSShailend Chand 336*54dfc97bSShailend Chand err = bus_setup_intr(priv->dev, irq->res, INTR_TYPE_NET | INTR_MPSAFE, 337*54dfc97bSShailend Chand gve_rx_intr, NULL, &priv->rx[j], &irq->cookie); 338*54dfc97bSShailend Chand if (err != 0) { 339*54dfc97bSShailend Chand device_printf(priv->dev, "Failed to setup irq %d for Rx queue %d, " 340*54dfc97bSShailend Chand "err: %d\n", rid, j, err); 341*54dfc97bSShailend Chand goto abort; 342*54dfc97bSShailend Chand } 343*54dfc97bSShailend Chand 344*54dfc97bSShailend Chand bus_describe_intr(priv->dev, irq->res, irq->cookie, "rx%d", j); 345*54dfc97bSShailend Chand com->ntfy_id = i + j; 346*54dfc97bSShailend Chand } 347*54dfc97bSShailend Chand 348*54dfc97bSShailend Chand m = i + j; 349*54dfc97bSShailend Chand rid = m + 1; 350*54dfc97bSShailend Chand irq = &priv->irq_tbl[m]; 351*54dfc97bSShailend Chand 352*54dfc97bSShailend Chand irq->res = bus_alloc_resource_any(priv->dev, SYS_RES_IRQ, 353*54dfc97bSShailend Chand &rid, RF_ACTIVE); 354*54dfc97bSShailend Chand if (irq->res == NULL) { 355*54dfc97bSShailend Chand device_printf(priv->dev, "Failed to allocate irq %d for mgmnt queue\n", rid); 356*54dfc97bSShailend Chand err = ENOMEM; 357*54dfc97bSShailend Chand goto abort; 358*54dfc97bSShailend Chand } 359*54dfc97bSShailend Chand 360*54dfc97bSShailend Chand err = bus_setup_intr(priv->dev, irq->res, INTR_TYPE_NET | INTR_MPSAFE, 361*54dfc97bSShailend Chand gve_mgmnt_intr, NULL, priv, &irq->cookie); 362*54dfc97bSShailend Chand if (err != 0) { 363*54dfc97bSShailend Chand device_printf(priv->dev, "Failed to setup irq %d for mgmnt queue, err: %d\n", 364*54dfc97bSShailend Chand rid, err); 365*54dfc97bSShailend Chand goto abort; 366*54dfc97bSShailend Chand } 367*54dfc97bSShailend Chand 368*54dfc97bSShailend Chand bus_describe_intr(priv->dev, irq->res, irq->cookie, "mgmnt"); 369*54dfc97bSShailend Chand 370*54dfc97bSShailend Chand return (0); 371*54dfc97bSShailend Chand 372*54dfc97bSShailend Chand abort: 373*54dfc97bSShailend Chand gve_free_irqs(priv); 374*54dfc97bSShailend Chand return (err); 375*54dfc97bSShailend Chand } 376*54dfc97bSShailend Chand 377*54dfc97bSShailend Chand void 378*54dfc97bSShailend Chand gve_unmask_all_queue_irqs(struct gve_priv *priv) 379*54dfc97bSShailend Chand { 380*54dfc97bSShailend Chand struct gve_tx_ring *tx; 381*54dfc97bSShailend Chand struct gve_rx_ring *rx; 382*54dfc97bSShailend Chand int idx; 383*54dfc97bSShailend Chand 384*54dfc97bSShailend Chand for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) { 385*54dfc97bSShailend Chand tx = &priv->tx[idx]; 386*54dfc97bSShailend Chand gve_db_bar_write_4(priv, tx->com.irq_db_offset, 0); 387*54dfc97bSShailend Chand } 388*54dfc97bSShailend Chand for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { 389*54dfc97bSShailend Chand rx = &priv->rx[idx]; 390*54dfc97bSShailend Chand gve_db_bar_write_4(priv, rx->com.irq_db_offset, 0); 391*54dfc97bSShailend Chand } 392*54dfc97bSShailend Chand } 393*54dfc97bSShailend Chand 394*54dfc97bSShailend Chand void 395*54dfc97bSShailend Chand gve_mask_all_queue_irqs(struct gve_priv *priv) 396*54dfc97bSShailend Chand { 397*54dfc97bSShailend Chand for (int idx = 0; idx < priv->tx_cfg.num_queues; idx++) { 398*54dfc97bSShailend Chand struct gve_tx_ring *tx = &priv->tx[idx]; 399*54dfc97bSShailend Chand gve_db_bar_write_4(priv, tx->com.irq_db_offset, GVE_IRQ_MASK); 400*54dfc97bSShailend Chand } 401*54dfc97bSShailend Chand for (int idx = 0; idx < priv->rx_cfg.num_queues; idx++) { 402*54dfc97bSShailend Chand struct gve_rx_ring *rx = &priv->rx[idx]; 403*54dfc97bSShailend Chand gve_db_bar_write_4(priv, rx->com.irq_db_offset, GVE_IRQ_MASK); 404*54dfc97bSShailend Chand } 405*54dfc97bSShailend Chand } 406