1*ce110ea1SWei Hu /*- 2*ce110ea1SWei Hu * SPDX-License-Identifier: BSD-2-Clause 3*ce110ea1SWei Hu * 4*ce110ea1SWei Hu * Copyright (c) 2021 Microsoft Corp. 5*ce110ea1SWei Hu * All rights reserved. 6*ce110ea1SWei Hu * 7*ce110ea1SWei Hu * Redistribution and use in source and binary forms, with or without 8*ce110ea1SWei Hu * modification, are permitted provided that the following conditions 9*ce110ea1SWei Hu * are met: 10*ce110ea1SWei Hu * 11*ce110ea1SWei Hu * 1. Redistributions of source code must retain the above copyright 12*ce110ea1SWei Hu * notice, this list of conditions and the following disclaimer. 13*ce110ea1SWei Hu * 14*ce110ea1SWei Hu * 2. Redistributions in binary form must reproduce the above copyright 15*ce110ea1SWei Hu * notice, this list of conditions and the following disclaimer in the 16*ce110ea1SWei Hu * documentation and/or other materials provided with the distribution. 17*ce110ea1SWei Hu * 18*ce110ea1SWei Hu * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19*ce110ea1SWei Hu * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20*ce110ea1SWei Hu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21*ce110ea1SWei Hu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22*ce110ea1SWei Hu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23*ce110ea1SWei Hu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24*ce110ea1SWei Hu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25*ce110ea1SWei Hu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26*ce110ea1SWei Hu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27*ce110ea1SWei Hu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28*ce110ea1SWei Hu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29*ce110ea1SWei Hu */ 30*ce110ea1SWei Hu #include <sys/cdefs.h> 31*ce110ea1SWei Hu __FBSDID("$FreeBSD$"); 32*ce110ea1SWei Hu 33*ce110ea1SWei Hu #include <sys/param.h> 34*ce110ea1SWei Hu #include <sys/systm.h> 35*ce110ea1SWei Hu #include <sys/bus.h> 36*ce110ea1SWei Hu #include <sys/kernel.h> 37*ce110ea1SWei Hu #include <sys/kthread.h> 38*ce110ea1SWei Hu #include <sys/malloc.h> 39*ce110ea1SWei Hu #include <sys/mbuf.h> 40*ce110ea1SWei Hu #include <sys/module.h> 41*ce110ea1SWei Hu #include <sys/rman.h> 42*ce110ea1SWei Hu #include <sys/smp.h> 43*ce110ea1SWei Hu #include <sys/socket.h> 44*ce110ea1SWei Hu #include <sys/sysctl.h> 45*ce110ea1SWei Hu #include <sys/taskqueue.h> 46*ce110ea1SWei Hu #include <sys/time.h> 47*ce110ea1SWei Hu #include <sys/eventhandler.h> 48*ce110ea1SWei Hu 49*ce110ea1SWei Hu #include <machine/bus.h> 50*ce110ea1SWei Hu #include <machine/resource.h> 51*ce110ea1SWei Hu #include <machine/in_cksum.h> 52*ce110ea1SWei Hu 53*ce110ea1SWei Hu #include <net/if.h> 54*ce110ea1SWei Hu #include <net/if_var.h> 55*ce110ea1SWei Hu 56*ce110ea1SWei Hu #include <dev/pci/pcivar.h> 57*ce110ea1SWei Hu #include <dev/pci/pcireg.h> 58*ce110ea1SWei Hu 59*ce110ea1SWei Hu #include "gdma_util.h" 60*ce110ea1SWei Hu #include "mana.h" 61*ce110ea1SWei Hu 62*ce110ea1SWei Hu 63*ce110ea1SWei Hu static mana_vendor_id_t mana_id_table[] = { 64*ce110ea1SWei Hu { PCI_VENDOR_ID_MICROSOFT, PCI_DEV_ID_MANA_VF}, 65*ce110ea1SWei Hu /* Last entry */ 66*ce110ea1SWei Hu { 0, 0} 67*ce110ea1SWei Hu }; 68*ce110ea1SWei Hu 69*ce110ea1SWei Hu static inline uint32_t 70*ce110ea1SWei Hu mana_gd_r32(struct gdma_context *g, uint64_t offset) 71*ce110ea1SWei Hu { 72*ce110ea1SWei Hu uint32_t v = bus_space_read_4(g->gd_bus.bar0_t, 73*ce110ea1SWei Hu g->gd_bus.bar0_h, offset); 74*ce110ea1SWei Hu rmb(); 75*ce110ea1SWei Hu return (v); 76*ce110ea1SWei Hu } 77*ce110ea1SWei Hu 78*ce110ea1SWei Hu #if defined(__amd64__) 79*ce110ea1SWei Hu static inline uint64_t 80*ce110ea1SWei Hu mana_gd_r64(struct gdma_context *g, uint64_t offset) 81*ce110ea1SWei Hu { 82*ce110ea1SWei Hu uint64_t v = bus_space_read_8(g->gd_bus.bar0_t, 83*ce110ea1SWei Hu g->gd_bus.bar0_h, offset); 84*ce110ea1SWei Hu rmb(); 85*ce110ea1SWei Hu return (v); 86*ce110ea1SWei Hu } 87*ce110ea1SWei Hu #else 88*ce110ea1SWei Hu static inline uint64_t 89*ce110ea1SWei Hu mana_gd_r64(struct gdma_context *g, uint64_t offset) 90*ce110ea1SWei Hu { 91*ce110ea1SWei Hu uint64_t v; 92*ce110ea1SWei Hu uint32_t *vp = (uint32_t *)&v; 93*ce110ea1SWei Hu 94*ce110ea1SWei Hu *vp = mana_gd_r32(g, offset); 95*ce110ea1SWei Hu *(vp + 1) = mana_gd_r32(g, offset + 4); 96*ce110ea1SWei Hu rmb(); 97*ce110ea1SWei Hu return (v); 98*ce110ea1SWei Hu } 99*ce110ea1SWei Hu #endif 100*ce110ea1SWei Hu 101*ce110ea1SWei Hu static int 102*ce110ea1SWei Hu mana_gd_query_max_resources(device_t dev) 103*ce110ea1SWei Hu { 104*ce110ea1SWei Hu struct gdma_context *gc = device_get_softc(dev); 105*ce110ea1SWei Hu struct gdma_query_max_resources_resp resp = {}; 106*ce110ea1SWei Hu struct gdma_general_req req = {}; 107*ce110ea1SWei Hu int err; 108*ce110ea1SWei Hu 109*ce110ea1SWei Hu mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_MAX_RESOURCES, 110*ce110ea1SWei Hu sizeof(req), sizeof(resp)); 111*ce110ea1SWei Hu 112*ce110ea1SWei Hu err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); 113*ce110ea1SWei Hu if (err || resp.hdr.status) { 114*ce110ea1SWei Hu device_printf(gc->dev, 115*ce110ea1SWei Hu "Failed to query resource info: %d, 0x%x\n", 116*ce110ea1SWei Hu err, resp.hdr.status); 117*ce110ea1SWei Hu return err ? err : EPROTO; 118*ce110ea1SWei Hu } 119*ce110ea1SWei Hu 120*ce110ea1SWei Hu mana_dbg(NULL, "max_msix %u, max_eq %u, max_cq %u, " 121*ce110ea1SWei Hu "max_sq %u, max_rq %u\n", 122*ce110ea1SWei Hu resp.max_msix, resp.max_eq, resp.max_cq, 123*ce110ea1SWei Hu resp.max_sq, resp.max_rq); 124*ce110ea1SWei Hu 125*ce110ea1SWei Hu if (gc->num_msix_usable > resp.max_msix) 126*ce110ea1SWei Hu gc->num_msix_usable = resp.max_msix; 127*ce110ea1SWei Hu 128*ce110ea1SWei Hu if (gc->num_msix_usable <= 1) 129*ce110ea1SWei Hu return ENOSPC; 130*ce110ea1SWei Hu 131*ce110ea1SWei Hu gc->max_num_queues = mp_ncpus; 132*ce110ea1SWei Hu if (gc->max_num_queues > MANA_MAX_NUM_QUEUES) 133*ce110ea1SWei Hu gc->max_num_queues = MANA_MAX_NUM_QUEUES; 134*ce110ea1SWei Hu 135*ce110ea1SWei Hu if (gc->max_num_queues > resp.max_eq) 136*ce110ea1SWei Hu gc->max_num_queues = resp.max_eq; 137*ce110ea1SWei Hu 138*ce110ea1SWei Hu if (gc->max_num_queues > resp.max_cq) 139*ce110ea1SWei Hu gc->max_num_queues = resp.max_cq; 140*ce110ea1SWei Hu 141*ce110ea1SWei Hu if (gc->max_num_queues > resp.max_sq) 142*ce110ea1SWei Hu gc->max_num_queues = resp.max_sq; 143*ce110ea1SWei Hu 144*ce110ea1SWei Hu if (gc->max_num_queues > resp.max_rq) 145*ce110ea1SWei Hu gc->max_num_queues = resp.max_rq; 146*ce110ea1SWei Hu 147*ce110ea1SWei Hu return 0; 148*ce110ea1SWei Hu } 149*ce110ea1SWei Hu 150*ce110ea1SWei Hu static int 151*ce110ea1SWei Hu mana_gd_detect_devices(device_t dev) 152*ce110ea1SWei Hu { 153*ce110ea1SWei Hu struct gdma_context *gc = device_get_softc(dev); 154*ce110ea1SWei Hu struct gdma_list_devices_resp resp = {}; 155*ce110ea1SWei Hu struct gdma_general_req req = {}; 156*ce110ea1SWei Hu struct gdma_dev_id gd_dev; 157*ce110ea1SWei Hu uint32_t i, max_num_devs; 158*ce110ea1SWei Hu uint16_t dev_type; 159*ce110ea1SWei Hu int err; 160*ce110ea1SWei Hu 161*ce110ea1SWei Hu mana_gd_init_req_hdr(&req.hdr, GDMA_LIST_DEVICES, sizeof(req), 162*ce110ea1SWei Hu sizeof(resp)); 163*ce110ea1SWei Hu 164*ce110ea1SWei Hu err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); 165*ce110ea1SWei Hu if (err || resp.hdr.status) { 166*ce110ea1SWei Hu device_printf(gc->dev, 167*ce110ea1SWei Hu "Failed to detect devices: %d, 0x%x\n", err, 168*ce110ea1SWei Hu resp.hdr.status); 169*ce110ea1SWei Hu return err ? err : EPROTO; 170*ce110ea1SWei Hu } 171*ce110ea1SWei Hu 172*ce110ea1SWei Hu max_num_devs = min_t(uint32_t, MAX_NUM_GDMA_DEVICES, resp.num_of_devs); 173*ce110ea1SWei Hu 174*ce110ea1SWei Hu for (i = 0; i < max_num_devs; i++) { 175*ce110ea1SWei Hu gd_dev = resp.devs[i]; 176*ce110ea1SWei Hu dev_type = gd_dev.type; 177*ce110ea1SWei Hu 178*ce110ea1SWei Hu mana_dbg(NULL, "gdma dev %d, type %u\n", 179*ce110ea1SWei Hu i, dev_type); 180*ce110ea1SWei Hu 181*ce110ea1SWei Hu /* HWC is already detected in mana_hwc_create_channel(). */ 182*ce110ea1SWei Hu if (dev_type == GDMA_DEVICE_HWC) 183*ce110ea1SWei Hu continue; 184*ce110ea1SWei Hu 185*ce110ea1SWei Hu if (dev_type == GDMA_DEVICE_MANA) { 186*ce110ea1SWei Hu gc->mana.gdma_context = gc; 187*ce110ea1SWei Hu gc->mana.dev_id = gd_dev; 188*ce110ea1SWei Hu } 189*ce110ea1SWei Hu } 190*ce110ea1SWei Hu 191*ce110ea1SWei Hu return gc->mana.dev_id.type == 0 ? ENODEV : 0; 192*ce110ea1SWei Hu } 193*ce110ea1SWei Hu 194*ce110ea1SWei Hu int 195*ce110ea1SWei Hu mana_gd_send_request(struct gdma_context *gc, uint32_t req_len, 196*ce110ea1SWei Hu const void *req, uint32_t resp_len, void *resp) 197*ce110ea1SWei Hu { 198*ce110ea1SWei Hu struct hw_channel_context *hwc = gc->hwc.driver_data; 199*ce110ea1SWei Hu 200*ce110ea1SWei Hu return mana_hwc_send_request(hwc, req_len, req, resp_len, resp); 201*ce110ea1SWei Hu } 202*ce110ea1SWei Hu 203*ce110ea1SWei Hu void 204*ce110ea1SWei Hu mana_gd_dma_map_paddr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 205*ce110ea1SWei Hu { 206*ce110ea1SWei Hu bus_addr_t *paddr = arg; 207*ce110ea1SWei Hu 208*ce110ea1SWei Hu if (error) 209*ce110ea1SWei Hu return; 210*ce110ea1SWei Hu 211*ce110ea1SWei Hu KASSERT(nseg == 1, ("too many segments %d!", nseg)); 212*ce110ea1SWei Hu *paddr = segs->ds_addr; 213*ce110ea1SWei Hu } 214*ce110ea1SWei Hu 215*ce110ea1SWei Hu int 216*ce110ea1SWei Hu mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length, 217*ce110ea1SWei Hu struct gdma_mem_info *gmi) 218*ce110ea1SWei Hu { 219*ce110ea1SWei Hu bus_addr_t dma_handle; 220*ce110ea1SWei Hu void *buf; 221*ce110ea1SWei Hu int err; 222*ce110ea1SWei Hu 223*ce110ea1SWei Hu if (!gc || !gmi) 224*ce110ea1SWei Hu return EINVAL; 225*ce110ea1SWei Hu 226*ce110ea1SWei Hu if (length < PAGE_SIZE || (length != roundup_pow_of_two(length))) 227*ce110ea1SWei Hu return EINVAL; 228*ce110ea1SWei Hu 229*ce110ea1SWei Hu err = bus_dma_tag_create(bus_get_dma_tag(gc->dev), /* parent */ 230*ce110ea1SWei Hu PAGE_SIZE, 0, /* alignment, boundary */ 231*ce110ea1SWei Hu BUS_SPACE_MAXADDR, /* lowaddr */ 232*ce110ea1SWei Hu BUS_SPACE_MAXADDR, /* highaddr */ 233*ce110ea1SWei Hu NULL, NULL, /* filter, filterarg */ 234*ce110ea1SWei Hu length, /* maxsize */ 235*ce110ea1SWei Hu 1, /* nsegments */ 236*ce110ea1SWei Hu length, /* maxsegsize */ 237*ce110ea1SWei Hu 0, /* flags */ 238*ce110ea1SWei Hu NULL, NULL, /* lockfunc, lockfuncarg*/ 239*ce110ea1SWei Hu &gmi->dma_tag); 240*ce110ea1SWei Hu if (err) { 241*ce110ea1SWei Hu device_printf(gc->dev, 242*ce110ea1SWei Hu "failed to create dma tag, err: %d\n", err); 243*ce110ea1SWei Hu return (err); 244*ce110ea1SWei Hu } 245*ce110ea1SWei Hu 246*ce110ea1SWei Hu /* 247*ce110ea1SWei Hu * Must have BUS_DMA_ZERO flag to clear the dma memory. 248*ce110ea1SWei Hu * Otherwise the queue overflow detection mechanism does 249*ce110ea1SWei Hu * not work. 250*ce110ea1SWei Hu */ 251*ce110ea1SWei Hu err = bus_dmamem_alloc(gmi->dma_tag, &buf, 252*ce110ea1SWei Hu BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &gmi->dma_map); 253*ce110ea1SWei Hu if (err) { 254*ce110ea1SWei Hu device_printf(gc->dev, 255*ce110ea1SWei Hu "failed to alloc dma mem, err: %d\n", err); 256*ce110ea1SWei Hu bus_dma_tag_destroy(gmi->dma_tag); 257*ce110ea1SWei Hu return (err); 258*ce110ea1SWei Hu } 259*ce110ea1SWei Hu 260*ce110ea1SWei Hu err = bus_dmamap_load(gmi->dma_tag, gmi->dma_map, buf, 261*ce110ea1SWei Hu length, mana_gd_dma_map_paddr, &dma_handle, BUS_DMA_NOWAIT); 262*ce110ea1SWei Hu if (err) { 263*ce110ea1SWei Hu device_printf(gc->dev, 264*ce110ea1SWei Hu "failed to load dma mem, err: %d\n", err); 265*ce110ea1SWei Hu bus_dmamem_free(gmi->dma_tag, buf, gmi->dma_map); 266*ce110ea1SWei Hu bus_dma_tag_destroy(gmi->dma_tag); 267*ce110ea1SWei Hu return (err); 268*ce110ea1SWei Hu } 269*ce110ea1SWei Hu 270*ce110ea1SWei Hu gmi->dev = gc->dev; 271*ce110ea1SWei Hu gmi->dma_handle = dma_handle; 272*ce110ea1SWei Hu gmi->virt_addr = buf; 273*ce110ea1SWei Hu gmi->length = length; 274*ce110ea1SWei Hu 275*ce110ea1SWei Hu return 0; 276*ce110ea1SWei Hu } 277*ce110ea1SWei Hu 278*ce110ea1SWei Hu void 279*ce110ea1SWei Hu mana_gd_free_memory(struct gdma_mem_info *gmi) 280*ce110ea1SWei Hu { 281*ce110ea1SWei Hu bus_dmamap_unload(gmi->dma_tag, gmi->dma_map); 282*ce110ea1SWei Hu bus_dmamem_free(gmi->dma_tag, gmi->virt_addr, gmi->dma_map); 283*ce110ea1SWei Hu bus_dma_tag_destroy(gmi->dma_tag); 284*ce110ea1SWei Hu } 285*ce110ea1SWei Hu 286*ce110ea1SWei Hu static int 287*ce110ea1SWei Hu mana_gd_create_hw_eq(struct gdma_context *gc, 288*ce110ea1SWei Hu struct gdma_queue *queue) 289*ce110ea1SWei Hu { 290*ce110ea1SWei Hu struct gdma_create_queue_resp resp = {}; 291*ce110ea1SWei Hu struct gdma_create_queue_req req = {}; 292*ce110ea1SWei Hu int err; 293*ce110ea1SWei Hu 294*ce110ea1SWei Hu if (queue->type != GDMA_EQ) 295*ce110ea1SWei Hu return EINVAL; 296*ce110ea1SWei Hu 297*ce110ea1SWei Hu mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_QUEUE, 298*ce110ea1SWei Hu sizeof(req), sizeof(resp)); 299*ce110ea1SWei Hu 300*ce110ea1SWei Hu req.hdr.dev_id = queue->gdma_dev->dev_id; 301*ce110ea1SWei Hu req.type = queue->type; 302*ce110ea1SWei Hu req.pdid = queue->gdma_dev->pdid; 303*ce110ea1SWei Hu req.doolbell_id = queue->gdma_dev->doorbell; 304*ce110ea1SWei Hu req.gdma_region = queue->mem_info.gdma_region; 305*ce110ea1SWei Hu req.queue_size = queue->queue_size; 306*ce110ea1SWei Hu req.log2_throttle_limit = queue->eq.log2_throttle_limit; 307*ce110ea1SWei Hu req.eq_pci_msix_index = queue->eq.msix_index; 308*ce110ea1SWei Hu 309*ce110ea1SWei Hu err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); 310*ce110ea1SWei Hu if (err || resp.hdr.status) { 311*ce110ea1SWei Hu device_printf(gc->dev, 312*ce110ea1SWei Hu "Failed to create queue: %d, 0x%x\n", 313*ce110ea1SWei Hu err, resp.hdr.status); 314*ce110ea1SWei Hu return err ? err : EPROTO; 315*ce110ea1SWei Hu } 316*ce110ea1SWei Hu 317*ce110ea1SWei Hu queue->id = resp.queue_index; 318*ce110ea1SWei Hu queue->eq.disable_needed = true; 319*ce110ea1SWei Hu queue->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; 320*ce110ea1SWei Hu return 0; 321*ce110ea1SWei Hu } 322*ce110ea1SWei Hu 323*ce110ea1SWei Hu static 324*ce110ea1SWei Hu int mana_gd_disable_queue(struct gdma_queue *queue) 325*ce110ea1SWei Hu { 326*ce110ea1SWei Hu struct gdma_context *gc = queue->gdma_dev->gdma_context; 327*ce110ea1SWei Hu struct gdma_disable_queue_req req = {}; 328*ce110ea1SWei Hu struct gdma_general_resp resp = {}; 329*ce110ea1SWei Hu int err; 330*ce110ea1SWei Hu 331*ce110ea1SWei Hu if (queue->type != GDMA_EQ) 332*ce110ea1SWei Hu mana_warn(NULL, "Not event queue type 0x%x\n", 333*ce110ea1SWei Hu queue->type); 334*ce110ea1SWei Hu 335*ce110ea1SWei Hu mana_gd_init_req_hdr(&req.hdr, GDMA_DISABLE_QUEUE, 336*ce110ea1SWei Hu sizeof(req), sizeof(resp)); 337*ce110ea1SWei Hu 338*ce110ea1SWei Hu req.hdr.dev_id = queue->gdma_dev->dev_id; 339*ce110ea1SWei Hu req.type = queue->type; 340*ce110ea1SWei Hu req.queue_index = queue->id; 341*ce110ea1SWei Hu req.alloc_res_id_on_creation = 1; 342*ce110ea1SWei Hu 343*ce110ea1SWei Hu err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); 344*ce110ea1SWei Hu if (err || resp.hdr.status) { 345*ce110ea1SWei Hu device_printf(gc->dev, 346*ce110ea1SWei Hu "Failed to disable queue: %d, 0x%x\n", err, 347*ce110ea1SWei Hu resp.hdr.status); 348*ce110ea1SWei Hu return err ? err : EPROTO; 349*ce110ea1SWei Hu } 350*ce110ea1SWei Hu 351*ce110ea1SWei Hu return 0; 352*ce110ea1SWei Hu } 353*ce110ea1SWei Hu 354*ce110ea1SWei Hu #define DOORBELL_OFFSET_SQ 0x0 355*ce110ea1SWei Hu #define DOORBELL_OFFSET_RQ 0x400 356*ce110ea1SWei Hu #define DOORBELL_OFFSET_CQ 0x800 357*ce110ea1SWei Hu #define DOORBELL_OFFSET_EQ 0xFF8 358*ce110ea1SWei Hu 359*ce110ea1SWei Hu static void 360*ce110ea1SWei Hu mana_gd_ring_doorbell(struct gdma_context *gc, uint32_t db_index, 361*ce110ea1SWei Hu enum gdma_queue_type q_type, uint32_t qid, 362*ce110ea1SWei Hu uint32_t tail_ptr, uint8_t num_req) 363*ce110ea1SWei Hu { 364*ce110ea1SWei Hu union gdma_doorbell_entry e = {}; 365*ce110ea1SWei Hu void __iomem *addr; 366*ce110ea1SWei Hu 367*ce110ea1SWei Hu addr = (char *)gc->db_page_base + gc->db_page_size * db_index; 368*ce110ea1SWei Hu switch (q_type) { 369*ce110ea1SWei Hu case GDMA_EQ: 370*ce110ea1SWei Hu e.eq.id = qid; 371*ce110ea1SWei Hu e.eq.tail_ptr = tail_ptr; 372*ce110ea1SWei Hu e.eq.arm = num_req; 373*ce110ea1SWei Hu 374*ce110ea1SWei Hu addr = (char *)addr + DOORBELL_OFFSET_EQ; 375*ce110ea1SWei Hu break; 376*ce110ea1SWei Hu 377*ce110ea1SWei Hu case GDMA_CQ: 378*ce110ea1SWei Hu e.cq.id = qid; 379*ce110ea1SWei Hu e.cq.tail_ptr = tail_ptr; 380*ce110ea1SWei Hu e.cq.arm = num_req; 381*ce110ea1SWei Hu 382*ce110ea1SWei Hu addr = (char *)addr + DOORBELL_OFFSET_CQ; 383*ce110ea1SWei Hu break; 384*ce110ea1SWei Hu 385*ce110ea1SWei Hu case GDMA_RQ: 386*ce110ea1SWei Hu e.rq.id = qid; 387*ce110ea1SWei Hu e.rq.tail_ptr = tail_ptr; 388*ce110ea1SWei Hu e.rq.wqe_cnt = num_req; 389*ce110ea1SWei Hu 390*ce110ea1SWei Hu addr = (char *)addr + DOORBELL_OFFSET_RQ; 391*ce110ea1SWei Hu break; 392*ce110ea1SWei Hu 393*ce110ea1SWei Hu case GDMA_SQ: 394*ce110ea1SWei Hu e.sq.id = qid; 395*ce110ea1SWei Hu e.sq.tail_ptr = tail_ptr; 396*ce110ea1SWei Hu 397*ce110ea1SWei Hu addr = (char *)addr + DOORBELL_OFFSET_SQ; 398*ce110ea1SWei Hu break; 399*ce110ea1SWei Hu 400*ce110ea1SWei Hu default: 401*ce110ea1SWei Hu mana_warn(NULL, "Invalid queue type 0x%x\n", q_type); 402*ce110ea1SWei Hu return; 403*ce110ea1SWei Hu } 404*ce110ea1SWei Hu 405*ce110ea1SWei Hu /* Ensure all writes are done before ring doorbell */ 406*ce110ea1SWei Hu wmb(); 407*ce110ea1SWei Hu 408*ce110ea1SWei Hu #if defined(__amd64__) 409*ce110ea1SWei Hu writeq(addr, e.as_uint64); 410*ce110ea1SWei Hu #else 411*ce110ea1SWei Hu uint32_t *p = (uint32_t *)&e.as_uint64; 412*ce110ea1SWei Hu writel(addr, *p); 413*ce110ea1SWei Hu writel((char *)addr + 4, *(p + 1)); 414*ce110ea1SWei Hu #endif 415*ce110ea1SWei Hu } 416*ce110ea1SWei Hu 417*ce110ea1SWei Hu void 418*ce110ea1SWei Hu mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue) 419*ce110ea1SWei Hu { 420*ce110ea1SWei Hu mana_gd_ring_doorbell(gc, queue->gdma_dev->doorbell, queue->type, 421*ce110ea1SWei Hu queue->id, queue->head * GDMA_WQE_BU_SIZE, 1); 422*ce110ea1SWei Hu } 423*ce110ea1SWei Hu 424*ce110ea1SWei Hu void 425*ce110ea1SWei Hu mana_gd_arm_cq(struct gdma_queue *cq) 426*ce110ea1SWei Hu { 427*ce110ea1SWei Hu struct gdma_context *gc = cq->gdma_dev->gdma_context; 428*ce110ea1SWei Hu 429*ce110ea1SWei Hu uint32_t num_cqe = cq->queue_size / GDMA_CQE_SIZE; 430*ce110ea1SWei Hu 431*ce110ea1SWei Hu uint32_t head = cq->head % (num_cqe << GDMA_CQE_OWNER_BITS); 432*ce110ea1SWei Hu 433*ce110ea1SWei Hu mana_gd_ring_doorbell(gc, cq->gdma_dev->doorbell, cq->type, cq->id, 434*ce110ea1SWei Hu head, SET_ARM_BIT); 435*ce110ea1SWei Hu } 436*ce110ea1SWei Hu 437*ce110ea1SWei Hu static void 438*ce110ea1SWei Hu mana_gd_process_eqe(struct gdma_queue *eq) 439*ce110ea1SWei Hu { 440*ce110ea1SWei Hu uint32_t head = eq->head % (eq->queue_size / GDMA_EQE_SIZE); 441*ce110ea1SWei Hu struct gdma_context *gc = eq->gdma_dev->gdma_context; 442*ce110ea1SWei Hu struct gdma_eqe *eq_eqe_ptr = eq->queue_mem_ptr; 443*ce110ea1SWei Hu union gdma_eqe_info eqe_info; 444*ce110ea1SWei Hu enum gdma_eqe_type type; 445*ce110ea1SWei Hu struct gdma_event event; 446*ce110ea1SWei Hu struct gdma_queue *cq; 447*ce110ea1SWei Hu struct gdma_eqe *eqe; 448*ce110ea1SWei Hu uint32_t cq_id; 449*ce110ea1SWei Hu 450*ce110ea1SWei Hu eqe = &eq_eqe_ptr[head]; 451*ce110ea1SWei Hu eqe_info.as_uint32 = eqe->eqe_info; 452*ce110ea1SWei Hu type = eqe_info.type; 453*ce110ea1SWei Hu 454*ce110ea1SWei Hu switch (type) { 455*ce110ea1SWei Hu case GDMA_EQE_COMPLETION: 456*ce110ea1SWei Hu cq_id = eqe->details[0] & 0xFFFFFF; 457*ce110ea1SWei Hu if (cq_id >= gc->max_num_cqs) { 458*ce110ea1SWei Hu mana_warn(NULL, 459*ce110ea1SWei Hu "failed: cq_id %u > max_num_cqs %u\n", 460*ce110ea1SWei Hu cq_id, gc->max_num_cqs); 461*ce110ea1SWei Hu break; 462*ce110ea1SWei Hu } 463*ce110ea1SWei Hu 464*ce110ea1SWei Hu cq = gc->cq_table[cq_id]; 465*ce110ea1SWei Hu if (!cq || cq->type != GDMA_CQ || cq->id != cq_id) { 466*ce110ea1SWei Hu mana_warn(NULL, 467*ce110ea1SWei Hu "failed: invalid cq_id %u\n", cq_id); 468*ce110ea1SWei Hu break; 469*ce110ea1SWei Hu } 470*ce110ea1SWei Hu 471*ce110ea1SWei Hu if (cq->cq.callback) 472*ce110ea1SWei Hu cq->cq.callback(cq->cq.context, cq); 473*ce110ea1SWei Hu 474*ce110ea1SWei Hu break; 475*ce110ea1SWei Hu 476*ce110ea1SWei Hu case GDMA_EQE_TEST_EVENT: 477*ce110ea1SWei Hu gc->test_event_eq_id = eq->id; 478*ce110ea1SWei Hu 479*ce110ea1SWei Hu mana_dbg(NULL, 480*ce110ea1SWei Hu "EQE TEST EVENT received for EQ %u\n", eq->id); 481*ce110ea1SWei Hu 482*ce110ea1SWei Hu complete(&gc->eq_test_event); 483*ce110ea1SWei Hu break; 484*ce110ea1SWei Hu 485*ce110ea1SWei Hu case GDMA_EQE_HWC_INIT_EQ_ID_DB: 486*ce110ea1SWei Hu case GDMA_EQE_HWC_INIT_DATA: 487*ce110ea1SWei Hu case GDMA_EQE_HWC_INIT_DONE: 488*ce110ea1SWei Hu if (!eq->eq.callback) 489*ce110ea1SWei Hu break; 490*ce110ea1SWei Hu 491*ce110ea1SWei Hu event.type = type; 492*ce110ea1SWei Hu memcpy(&event.details, &eqe->details, GDMA_EVENT_DATA_SIZE); 493*ce110ea1SWei Hu eq->eq.callback(eq->eq.context, eq, &event); 494*ce110ea1SWei Hu break; 495*ce110ea1SWei Hu 496*ce110ea1SWei Hu default: 497*ce110ea1SWei Hu break; 498*ce110ea1SWei Hu } 499*ce110ea1SWei Hu } 500*ce110ea1SWei Hu 501*ce110ea1SWei Hu static void 502*ce110ea1SWei Hu mana_gd_process_eq_events(void *arg) 503*ce110ea1SWei Hu { 504*ce110ea1SWei Hu uint32_t owner_bits, new_bits, old_bits; 505*ce110ea1SWei Hu union gdma_eqe_info eqe_info; 506*ce110ea1SWei Hu struct gdma_eqe *eq_eqe_ptr; 507*ce110ea1SWei Hu struct gdma_queue *eq = arg; 508*ce110ea1SWei Hu struct gdma_context *gc; 509*ce110ea1SWei Hu uint32_t head, num_eqe; 510*ce110ea1SWei Hu struct gdma_eqe *eqe; 511*ce110ea1SWei Hu unsigned int arm_bit; 512*ce110ea1SWei Hu int i, j; 513*ce110ea1SWei Hu 514*ce110ea1SWei Hu gc = eq->gdma_dev->gdma_context; 515*ce110ea1SWei Hu 516*ce110ea1SWei Hu num_eqe = eq->queue_size / GDMA_EQE_SIZE; 517*ce110ea1SWei Hu eq_eqe_ptr = eq->queue_mem_ptr; 518*ce110ea1SWei Hu 519*ce110ea1SWei Hu bus_dmamap_sync(eq->mem_info.dma_tag, eq->mem_info.dma_map, 520*ce110ea1SWei Hu BUS_DMASYNC_POSTREAD); 521*ce110ea1SWei Hu 522*ce110ea1SWei Hu /* Process up to 5 EQEs at a time, and update the HW head. */ 523*ce110ea1SWei Hu for (i = 0; i < 5; i++) { 524*ce110ea1SWei Hu eqe = &eq_eqe_ptr[eq->head % num_eqe]; 525*ce110ea1SWei Hu eqe_info.as_uint32 = eqe->eqe_info; 526*ce110ea1SWei Hu owner_bits = eqe_info.owner_bits; 527*ce110ea1SWei Hu 528*ce110ea1SWei Hu old_bits = (eq->head / num_eqe - 1) & GDMA_EQE_OWNER_MASK; 529*ce110ea1SWei Hu 530*ce110ea1SWei Hu /* No more entries */ 531*ce110ea1SWei Hu if (owner_bits == old_bits) 532*ce110ea1SWei Hu break; 533*ce110ea1SWei Hu 534*ce110ea1SWei Hu new_bits = (eq->head / num_eqe) & GDMA_EQE_OWNER_MASK; 535*ce110ea1SWei Hu if (owner_bits != new_bits) { 536*ce110ea1SWei Hu /* Something wrong. Log for debugging purpose */ 537*ce110ea1SWei Hu device_printf(gc->dev, 538*ce110ea1SWei Hu "EQ %d: overflow detected, " 539*ce110ea1SWei Hu "i = %d, eq->head = %u " 540*ce110ea1SWei Hu "got owner_bits = %u, new_bits = %u " 541*ce110ea1SWei Hu "eqe addr %p, eqe->eqe_info 0x%x, " 542*ce110ea1SWei Hu "eqe type = %x, reserved1 = %x, client_id = %x, " 543*ce110ea1SWei Hu "reserved2 = %x, owner_bits = %x\n", 544*ce110ea1SWei Hu eq->id, i, eq->head, 545*ce110ea1SWei Hu owner_bits, new_bits, 546*ce110ea1SWei Hu eqe, eqe->eqe_info, 547*ce110ea1SWei Hu eqe_info.type, eqe_info.reserved1, 548*ce110ea1SWei Hu eqe_info.client_id, eqe_info.reserved2, 549*ce110ea1SWei Hu eqe_info.owner_bits); 550*ce110ea1SWei Hu 551*ce110ea1SWei Hu uint32_t *eqe_dump = (uint32_t *) eq_eqe_ptr; 552*ce110ea1SWei Hu for (j = 0; j < 20; j++) { 553*ce110ea1SWei Hu device_printf(gc->dev, "%p: %x\t%x\t%x\t%x\n", 554*ce110ea1SWei Hu &eqe_dump[j * 4], eqe_dump[j * 4], eqe_dump[j * 4 + 1], 555*ce110ea1SWei Hu eqe_dump[j * 4 + 2], eqe_dump[j * 4 + 3]); 556*ce110ea1SWei Hu } 557*ce110ea1SWei Hu break; 558*ce110ea1SWei Hu } 559*ce110ea1SWei Hu 560*ce110ea1SWei Hu mana_gd_process_eqe(eq); 561*ce110ea1SWei Hu 562*ce110ea1SWei Hu eq->head++; 563*ce110ea1SWei Hu } 564*ce110ea1SWei Hu 565*ce110ea1SWei Hu bus_dmamap_sync(eq->mem_info.dma_tag, eq->mem_info.dma_map, 566*ce110ea1SWei Hu BUS_DMASYNC_PREREAD); 567*ce110ea1SWei Hu 568*ce110ea1SWei Hu /* Always rearm the EQ for HWC. */ 569*ce110ea1SWei Hu if (mana_gd_is_hwc(eq->gdma_dev)) { 570*ce110ea1SWei Hu arm_bit = SET_ARM_BIT; 571*ce110ea1SWei Hu } else if (eq->eq.work_done < eq->eq.budget && 572*ce110ea1SWei Hu eq->eq.do_not_ring_db == false) { 573*ce110ea1SWei Hu arm_bit = SET_ARM_BIT; 574*ce110ea1SWei Hu } else { 575*ce110ea1SWei Hu arm_bit = 0; 576*ce110ea1SWei Hu } 577*ce110ea1SWei Hu 578*ce110ea1SWei Hu head = eq->head % (num_eqe << GDMA_EQE_OWNER_BITS); 579*ce110ea1SWei Hu 580*ce110ea1SWei Hu mana_gd_ring_doorbell(gc, eq->gdma_dev->doorbell, eq->type, eq->id, 581*ce110ea1SWei Hu head, arm_bit); 582*ce110ea1SWei Hu } 583*ce110ea1SWei Hu 584*ce110ea1SWei Hu #define MANA_POLL_BUDGET 8 585*ce110ea1SWei Hu #define MANA_RX_BUDGET 256 586*ce110ea1SWei Hu 587*ce110ea1SWei Hu static void 588*ce110ea1SWei Hu mana_poll(void *arg, int pending) 589*ce110ea1SWei Hu { 590*ce110ea1SWei Hu struct gdma_queue *eq = arg; 591*ce110ea1SWei Hu int i; 592*ce110ea1SWei Hu 593*ce110ea1SWei Hu eq->eq.work_done = 0; 594*ce110ea1SWei Hu eq->eq.budget = MANA_RX_BUDGET; 595*ce110ea1SWei Hu 596*ce110ea1SWei Hu for (i = 0; i < MANA_POLL_BUDGET; i++) { 597*ce110ea1SWei Hu /* 598*ce110ea1SWei Hu * If this is the last loop, set the budget big enough 599*ce110ea1SWei Hu * so it will arm the EQ any way. 600*ce110ea1SWei Hu */ 601*ce110ea1SWei Hu if (i == (MANA_POLL_BUDGET - 1)) 602*ce110ea1SWei Hu eq->eq.budget = CQE_POLLING_BUFFER + 1; 603*ce110ea1SWei Hu 604*ce110ea1SWei Hu mana_gd_process_eq_events(eq); 605*ce110ea1SWei Hu 606*ce110ea1SWei Hu if (eq->eq.work_done < eq->eq.budget) 607*ce110ea1SWei Hu break; 608*ce110ea1SWei Hu 609*ce110ea1SWei Hu eq->eq.work_done = 0; 610*ce110ea1SWei Hu } 611*ce110ea1SWei Hu } 612*ce110ea1SWei Hu 613*ce110ea1SWei Hu static void 614*ce110ea1SWei Hu mana_gd_schedule_task(void *arg) 615*ce110ea1SWei Hu { 616*ce110ea1SWei Hu struct gdma_queue *eq = arg; 617*ce110ea1SWei Hu 618*ce110ea1SWei Hu taskqueue_enqueue(eq->eq.cleanup_tq, &eq->eq.cleanup_task); 619*ce110ea1SWei Hu } 620*ce110ea1SWei Hu 621*ce110ea1SWei Hu static int 622*ce110ea1SWei Hu mana_gd_register_irq(struct gdma_queue *queue, 623*ce110ea1SWei Hu const struct gdma_queue_spec *spec) 624*ce110ea1SWei Hu { 625*ce110ea1SWei Hu static int mana_last_bind_cpu = -1; 626*ce110ea1SWei Hu struct gdma_dev *gd = queue->gdma_dev; 627*ce110ea1SWei Hu bool is_mana = mana_gd_is_mana(gd); 628*ce110ea1SWei Hu struct gdma_irq_context *gic; 629*ce110ea1SWei Hu struct gdma_context *gc; 630*ce110ea1SWei Hu struct gdma_resource *r; 631*ce110ea1SWei Hu unsigned int msi_index; 632*ce110ea1SWei Hu int err; 633*ce110ea1SWei Hu 634*ce110ea1SWei Hu gc = gd->gdma_context; 635*ce110ea1SWei Hu r = &gc->msix_resource; 636*ce110ea1SWei Hu 637*ce110ea1SWei Hu mtx_lock_spin(&r->lock_spin); 638*ce110ea1SWei Hu 639*ce110ea1SWei Hu msi_index = find_first_zero_bit(r->map, r->size); 640*ce110ea1SWei Hu if (msi_index >= r->size) { 641*ce110ea1SWei Hu err = ENOSPC; 642*ce110ea1SWei Hu } else { 643*ce110ea1SWei Hu bitmap_set(r->map, msi_index, 1); 644*ce110ea1SWei Hu queue->eq.msix_index = msi_index; 645*ce110ea1SWei Hu err = 0; 646*ce110ea1SWei Hu } 647*ce110ea1SWei Hu 648*ce110ea1SWei Hu mtx_unlock_spin(&r->lock_spin); 649*ce110ea1SWei Hu 650*ce110ea1SWei Hu if (err) 651*ce110ea1SWei Hu return err; 652*ce110ea1SWei Hu 653*ce110ea1SWei Hu if (unlikely(msi_index >= gc->num_msix_usable)) { 654*ce110ea1SWei Hu device_printf(gc->dev, 655*ce110ea1SWei Hu "chose an invalid msix index %d, usable %d\n", 656*ce110ea1SWei Hu msi_index, gc->num_msix_usable); 657*ce110ea1SWei Hu return ENOSPC; 658*ce110ea1SWei Hu } 659*ce110ea1SWei Hu 660*ce110ea1SWei Hu gic = &gc->irq_contexts[msi_index]; 661*ce110ea1SWei Hu 662*ce110ea1SWei Hu if (is_mana) { 663*ce110ea1SWei Hu struct mana_port_context *apc = if_getsoftc(spec->eq.ndev); 664*ce110ea1SWei Hu queue->eq.do_not_ring_db = false; 665*ce110ea1SWei Hu 666*ce110ea1SWei Hu NET_TASK_INIT(&queue->eq.cleanup_task, 0, mana_poll, queue); 667*ce110ea1SWei Hu queue->eq.cleanup_tq = 668*ce110ea1SWei Hu taskqueue_create_fast("mana eq cleanup", 669*ce110ea1SWei Hu M_WAITOK, taskqueue_thread_enqueue, 670*ce110ea1SWei Hu &queue->eq.cleanup_tq); 671*ce110ea1SWei Hu 672*ce110ea1SWei Hu if (mana_last_bind_cpu < 0) 673*ce110ea1SWei Hu mana_last_bind_cpu = CPU_FIRST(); 674*ce110ea1SWei Hu queue->eq.cpu = mana_last_bind_cpu; 675*ce110ea1SWei Hu mana_last_bind_cpu = CPU_NEXT(mana_last_bind_cpu); 676*ce110ea1SWei Hu 677*ce110ea1SWei Hu /* XXX Name is not optimal. However we have to start 678*ce110ea1SWei Hu * the task here. Otherwise, test eq will have no 679*ce110ea1SWei Hu * handler. 680*ce110ea1SWei Hu */ 681*ce110ea1SWei Hu if (apc->bind_cleanup_thread_cpu) { 682*ce110ea1SWei Hu cpuset_t cpu_mask; 683*ce110ea1SWei Hu CPU_SETOF(queue->eq.cpu, &cpu_mask); 684*ce110ea1SWei Hu taskqueue_start_threads_cpuset(&queue->eq.cleanup_tq, 685*ce110ea1SWei Hu 1, PI_NET, &cpu_mask, 686*ce110ea1SWei Hu "mana eq poll msix %u on cpu %d", 687*ce110ea1SWei Hu msi_index, queue->eq.cpu); 688*ce110ea1SWei Hu } else { 689*ce110ea1SWei Hu 690*ce110ea1SWei Hu taskqueue_start_threads(&queue->eq.cleanup_tq, 1, 691*ce110ea1SWei Hu PI_NET, "mana eq poll on msix %u", msi_index); 692*ce110ea1SWei Hu } 693*ce110ea1SWei Hu } 694*ce110ea1SWei Hu 695*ce110ea1SWei Hu if (unlikely(gic->handler || gic->arg)) { 696*ce110ea1SWei Hu device_printf(gc->dev, 697*ce110ea1SWei Hu "interrupt handler or arg already assigned, " 698*ce110ea1SWei Hu "msix index: %d\n", msi_index); 699*ce110ea1SWei Hu } 700*ce110ea1SWei Hu 701*ce110ea1SWei Hu gic->arg = queue; 702*ce110ea1SWei Hu 703*ce110ea1SWei Hu if (is_mana) 704*ce110ea1SWei Hu gic->handler = mana_gd_schedule_task; 705*ce110ea1SWei Hu else 706*ce110ea1SWei Hu gic->handler = mana_gd_process_eq_events; 707*ce110ea1SWei Hu 708*ce110ea1SWei Hu mana_dbg(NULL, "registered msix index %d vector %d irq %ju\n", 709*ce110ea1SWei Hu msi_index, gic->msix_e.vector, rman_get_start(gic->res)); 710*ce110ea1SWei Hu 711*ce110ea1SWei Hu return 0; 712*ce110ea1SWei Hu } 713*ce110ea1SWei Hu 714*ce110ea1SWei Hu static void 715*ce110ea1SWei Hu mana_gd_deregiser_irq(struct gdma_queue *queue) 716*ce110ea1SWei Hu { 717*ce110ea1SWei Hu struct gdma_dev *gd = queue->gdma_dev; 718*ce110ea1SWei Hu struct gdma_irq_context *gic; 719*ce110ea1SWei Hu struct gdma_context *gc; 720*ce110ea1SWei Hu struct gdma_resource *r; 721*ce110ea1SWei Hu unsigned int msix_index; 722*ce110ea1SWei Hu 723*ce110ea1SWei Hu gc = gd->gdma_context; 724*ce110ea1SWei Hu r = &gc->msix_resource; 725*ce110ea1SWei Hu 726*ce110ea1SWei Hu /* At most num_online_cpus() + 1 interrupts are used. */ 727*ce110ea1SWei Hu msix_index = queue->eq.msix_index; 728*ce110ea1SWei Hu if (unlikely(msix_index >= gc->num_msix_usable)) 729*ce110ea1SWei Hu return; 730*ce110ea1SWei Hu 731*ce110ea1SWei Hu gic = &gc->irq_contexts[msix_index]; 732*ce110ea1SWei Hu gic->handler = NULL; 733*ce110ea1SWei Hu gic->arg = NULL; 734*ce110ea1SWei Hu 735*ce110ea1SWei Hu mtx_lock_spin(&r->lock_spin); 736*ce110ea1SWei Hu bitmap_clear(r->map, msix_index, 1); 737*ce110ea1SWei Hu mtx_unlock_spin(&r->lock_spin); 738*ce110ea1SWei Hu 739*ce110ea1SWei Hu queue->eq.msix_index = INVALID_PCI_MSIX_INDEX; 740*ce110ea1SWei Hu 741*ce110ea1SWei Hu mana_dbg(NULL, "deregistered msix index %d vector %d irq %ju\n", 742*ce110ea1SWei Hu msix_index, gic->msix_e.vector, rman_get_start(gic->res)); 743*ce110ea1SWei Hu } 744*ce110ea1SWei Hu 745*ce110ea1SWei Hu int 746*ce110ea1SWei Hu mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq) 747*ce110ea1SWei Hu { 748*ce110ea1SWei Hu struct gdma_generate_test_event_req req = {}; 749*ce110ea1SWei Hu struct gdma_general_resp resp = {}; 750*ce110ea1SWei Hu device_t dev = gc->dev; 751*ce110ea1SWei Hu int err; 752*ce110ea1SWei Hu 753*ce110ea1SWei Hu sx_xlock(&gc->eq_test_event_sx); 754*ce110ea1SWei Hu 755*ce110ea1SWei Hu init_completion(&gc->eq_test_event); 756*ce110ea1SWei Hu gc->test_event_eq_id = INVALID_QUEUE_ID; 757*ce110ea1SWei Hu 758*ce110ea1SWei Hu mana_gd_init_req_hdr(&req.hdr, GDMA_GENERATE_TEST_EQE, 759*ce110ea1SWei Hu sizeof(req), sizeof(resp)); 760*ce110ea1SWei Hu 761*ce110ea1SWei Hu req.hdr.dev_id = eq->gdma_dev->dev_id; 762*ce110ea1SWei Hu req.queue_index = eq->id; 763*ce110ea1SWei Hu 764*ce110ea1SWei Hu err = mana_gd_send_request(gc, sizeof(req), &req, 765*ce110ea1SWei Hu sizeof(resp), &resp); 766*ce110ea1SWei Hu if (err) { 767*ce110ea1SWei Hu device_printf(dev, "test_eq failed: %d\n", err); 768*ce110ea1SWei Hu goto out; 769*ce110ea1SWei Hu } 770*ce110ea1SWei Hu 771*ce110ea1SWei Hu err = EPROTO; 772*ce110ea1SWei Hu 773*ce110ea1SWei Hu if (resp.hdr.status) { 774*ce110ea1SWei Hu device_printf(dev, "test_eq failed: 0x%x\n", 775*ce110ea1SWei Hu resp.hdr.status); 776*ce110ea1SWei Hu goto out; 777*ce110ea1SWei Hu } 778*ce110ea1SWei Hu 779*ce110ea1SWei Hu if (wait_for_completion_timeout(&gc->eq_test_event, 30 * hz)) { 780*ce110ea1SWei Hu device_printf(dev, "test_eq timed out on queue %d\n", 781*ce110ea1SWei Hu eq->id); 782*ce110ea1SWei Hu goto out; 783*ce110ea1SWei Hu } 784*ce110ea1SWei Hu 785*ce110ea1SWei Hu if (eq->id != gc->test_event_eq_id) { 786*ce110ea1SWei Hu device_printf(dev, 787*ce110ea1SWei Hu "test_eq got an event on wrong queue %d (%d)\n", 788*ce110ea1SWei Hu gc->test_event_eq_id, eq->id); 789*ce110ea1SWei Hu goto out; 790*ce110ea1SWei Hu } 791*ce110ea1SWei Hu 792*ce110ea1SWei Hu err = 0; 793*ce110ea1SWei Hu out: 794*ce110ea1SWei Hu sx_xunlock(&gc->eq_test_event_sx); 795*ce110ea1SWei Hu return err; 796*ce110ea1SWei Hu } 797*ce110ea1SWei Hu 798*ce110ea1SWei Hu static void 799*ce110ea1SWei Hu mana_gd_destroy_eq(struct gdma_context *gc, bool flush_evenets, 800*ce110ea1SWei Hu struct gdma_queue *queue) 801*ce110ea1SWei Hu { 802*ce110ea1SWei Hu int err; 803*ce110ea1SWei Hu 804*ce110ea1SWei Hu if (flush_evenets) { 805*ce110ea1SWei Hu err = mana_gd_test_eq(gc, queue); 806*ce110ea1SWei Hu if (err) 807*ce110ea1SWei Hu device_printf(gc->dev, 808*ce110ea1SWei Hu "Failed to flush EQ: %d\n", err); 809*ce110ea1SWei Hu } 810*ce110ea1SWei Hu 811*ce110ea1SWei Hu mana_gd_deregiser_irq(queue); 812*ce110ea1SWei Hu 813*ce110ea1SWei Hu if (mana_gd_is_mana(queue->gdma_dev)) { 814*ce110ea1SWei Hu while (taskqueue_cancel(queue->eq.cleanup_tq, 815*ce110ea1SWei Hu &queue->eq.cleanup_task, NULL)) 816*ce110ea1SWei Hu taskqueue_drain(queue->eq.cleanup_tq, 817*ce110ea1SWei Hu &queue->eq.cleanup_task); 818*ce110ea1SWei Hu 819*ce110ea1SWei Hu taskqueue_free(queue->eq.cleanup_tq); 820*ce110ea1SWei Hu } 821*ce110ea1SWei Hu 822*ce110ea1SWei Hu if (queue->eq.disable_needed) 823*ce110ea1SWei Hu mana_gd_disable_queue(queue); 824*ce110ea1SWei Hu } 825*ce110ea1SWei Hu 826*ce110ea1SWei Hu static int mana_gd_create_eq(struct gdma_dev *gd, 827*ce110ea1SWei Hu const struct gdma_queue_spec *spec, 828*ce110ea1SWei Hu bool create_hwq, struct gdma_queue *queue) 829*ce110ea1SWei Hu { 830*ce110ea1SWei Hu struct gdma_context *gc = gd->gdma_context; 831*ce110ea1SWei Hu device_t dev = gc->dev; 832*ce110ea1SWei Hu uint32_t log2_num_entries; 833*ce110ea1SWei Hu int err; 834*ce110ea1SWei Hu 835*ce110ea1SWei Hu queue->eq.msix_index = INVALID_PCI_MSIX_INDEX; 836*ce110ea1SWei Hu 837*ce110ea1SWei Hu log2_num_entries = ilog2(queue->queue_size / GDMA_EQE_SIZE); 838*ce110ea1SWei Hu 839*ce110ea1SWei Hu if (spec->eq.log2_throttle_limit > log2_num_entries) { 840*ce110ea1SWei Hu device_printf(dev, 841*ce110ea1SWei Hu "EQ throttling limit (%lu) > maximum EQE (%u)\n", 842*ce110ea1SWei Hu spec->eq.log2_throttle_limit, log2_num_entries); 843*ce110ea1SWei Hu return EINVAL; 844*ce110ea1SWei Hu } 845*ce110ea1SWei Hu 846*ce110ea1SWei Hu err = mana_gd_register_irq(queue, spec); 847*ce110ea1SWei Hu if (err) { 848*ce110ea1SWei Hu device_printf(dev, "Failed to register irq: %d\n", err); 849*ce110ea1SWei Hu return err; 850*ce110ea1SWei Hu } 851*ce110ea1SWei Hu 852*ce110ea1SWei Hu queue->eq.callback = spec->eq.callback; 853*ce110ea1SWei Hu queue->eq.context = spec->eq.context; 854*ce110ea1SWei Hu queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries); 855*ce110ea1SWei Hu queue->eq.log2_throttle_limit = spec->eq.log2_throttle_limit ?: 1; 856*ce110ea1SWei Hu 857*ce110ea1SWei Hu if (create_hwq) { 858*ce110ea1SWei Hu err = mana_gd_create_hw_eq(gc, queue); 859*ce110ea1SWei Hu if (err) 860*ce110ea1SWei Hu goto out; 861*ce110ea1SWei Hu 862*ce110ea1SWei Hu err = mana_gd_test_eq(gc, queue); 863*ce110ea1SWei Hu if (err) 864*ce110ea1SWei Hu goto out; 865*ce110ea1SWei Hu } 866*ce110ea1SWei Hu 867*ce110ea1SWei Hu return 0; 868*ce110ea1SWei Hu out: 869*ce110ea1SWei Hu device_printf(dev, "Failed to create EQ: %d\n", err); 870*ce110ea1SWei Hu mana_gd_destroy_eq(gc, false, queue); 871*ce110ea1SWei Hu return err; 872*ce110ea1SWei Hu } 873*ce110ea1SWei Hu 874*ce110ea1SWei Hu static void 875*ce110ea1SWei Hu mana_gd_create_cq(const struct gdma_queue_spec *spec, 876*ce110ea1SWei Hu struct gdma_queue *queue) 877*ce110ea1SWei Hu { 878*ce110ea1SWei Hu uint32_t log2_num_entries = ilog2(spec->queue_size / GDMA_CQE_SIZE); 879*ce110ea1SWei Hu 880*ce110ea1SWei Hu queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries); 881*ce110ea1SWei Hu queue->cq.parent = spec->cq.parent_eq; 882*ce110ea1SWei Hu queue->cq.context = spec->cq.context; 883*ce110ea1SWei Hu queue->cq.callback = spec->cq.callback; 884*ce110ea1SWei Hu } 885*ce110ea1SWei Hu 886*ce110ea1SWei Hu static void 887*ce110ea1SWei Hu mana_gd_destroy_cq(struct gdma_context *gc, 888*ce110ea1SWei Hu struct gdma_queue *queue) 889*ce110ea1SWei Hu { 890*ce110ea1SWei Hu uint32_t id = queue->id; 891*ce110ea1SWei Hu 892*ce110ea1SWei Hu if (id >= gc->max_num_cqs) 893*ce110ea1SWei Hu return; 894*ce110ea1SWei Hu 895*ce110ea1SWei Hu if (!gc->cq_table[id]) 896*ce110ea1SWei Hu return; 897*ce110ea1SWei Hu 898*ce110ea1SWei Hu gc->cq_table[id] = NULL; 899*ce110ea1SWei Hu } 900*ce110ea1SWei Hu 901*ce110ea1SWei Hu int mana_gd_create_hwc_queue(struct gdma_dev *gd, 902*ce110ea1SWei Hu const struct gdma_queue_spec *spec, 903*ce110ea1SWei Hu struct gdma_queue **queue_ptr) 904*ce110ea1SWei Hu { 905*ce110ea1SWei Hu struct gdma_context *gc = gd->gdma_context; 906*ce110ea1SWei Hu struct gdma_mem_info *gmi; 907*ce110ea1SWei Hu struct gdma_queue *queue; 908*ce110ea1SWei Hu int err; 909*ce110ea1SWei Hu 910*ce110ea1SWei Hu queue = malloc(sizeof(*queue), M_DEVBUF, M_WAITOK | M_ZERO); 911*ce110ea1SWei Hu if (!queue) 912*ce110ea1SWei Hu return ENOMEM; 913*ce110ea1SWei Hu 914*ce110ea1SWei Hu gmi = &queue->mem_info; 915*ce110ea1SWei Hu err = mana_gd_alloc_memory(gc, spec->queue_size, gmi); 916*ce110ea1SWei Hu if (err) 917*ce110ea1SWei Hu goto free_q; 918*ce110ea1SWei Hu 919*ce110ea1SWei Hu queue->head = 0; 920*ce110ea1SWei Hu queue->tail = 0; 921*ce110ea1SWei Hu queue->queue_mem_ptr = gmi->virt_addr; 922*ce110ea1SWei Hu queue->queue_size = spec->queue_size; 923*ce110ea1SWei Hu queue->monitor_avl_buf = spec->monitor_avl_buf; 924*ce110ea1SWei Hu queue->type = spec->type; 925*ce110ea1SWei Hu queue->gdma_dev = gd; 926*ce110ea1SWei Hu 927*ce110ea1SWei Hu if (spec->type == GDMA_EQ) 928*ce110ea1SWei Hu err = mana_gd_create_eq(gd, spec, false, queue); 929*ce110ea1SWei Hu else if (spec->type == GDMA_CQ) 930*ce110ea1SWei Hu mana_gd_create_cq(spec, queue); 931*ce110ea1SWei Hu 932*ce110ea1SWei Hu if (err) 933*ce110ea1SWei Hu goto out; 934*ce110ea1SWei Hu 935*ce110ea1SWei Hu *queue_ptr = queue; 936*ce110ea1SWei Hu return 0; 937*ce110ea1SWei Hu out: 938*ce110ea1SWei Hu mana_gd_free_memory(gmi); 939*ce110ea1SWei Hu free_q: 940*ce110ea1SWei Hu free(queue, M_DEVBUF); 941*ce110ea1SWei Hu return err; 942*ce110ea1SWei Hu } 943*ce110ea1SWei Hu 944*ce110ea1SWei Hu static void 945*ce110ea1SWei Hu mana_gd_destroy_dma_region(struct gdma_context *gc, uint64_t gdma_region) 946*ce110ea1SWei Hu { 947*ce110ea1SWei Hu struct gdma_destroy_dma_region_req req = {}; 948*ce110ea1SWei Hu struct gdma_general_resp resp = {}; 949*ce110ea1SWei Hu int err; 950*ce110ea1SWei Hu 951*ce110ea1SWei Hu if (gdma_region == GDMA_INVALID_DMA_REGION) 952*ce110ea1SWei Hu return; 953*ce110ea1SWei Hu 954*ce110ea1SWei Hu mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DMA_REGION, sizeof(req), 955*ce110ea1SWei Hu sizeof(resp)); 956*ce110ea1SWei Hu req.gdma_region = gdma_region; 957*ce110ea1SWei Hu 958*ce110ea1SWei Hu err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), 959*ce110ea1SWei Hu &resp); 960*ce110ea1SWei Hu if (err || resp.hdr.status) 961*ce110ea1SWei Hu device_printf(gc->dev, 962*ce110ea1SWei Hu "Failed to destroy DMA region: %d, 0x%x\n", 963*ce110ea1SWei Hu err, resp.hdr.status); 964*ce110ea1SWei Hu } 965*ce110ea1SWei Hu 966*ce110ea1SWei Hu static int 967*ce110ea1SWei Hu mana_gd_create_dma_region(struct gdma_dev *gd, 968*ce110ea1SWei Hu struct gdma_mem_info *gmi) 969*ce110ea1SWei Hu { 970*ce110ea1SWei Hu unsigned int num_page = gmi->length / PAGE_SIZE; 971*ce110ea1SWei Hu struct gdma_create_dma_region_req *req = NULL; 972*ce110ea1SWei Hu struct gdma_create_dma_region_resp resp = {}; 973*ce110ea1SWei Hu struct gdma_context *gc = gd->gdma_context; 974*ce110ea1SWei Hu struct hw_channel_context *hwc; 975*ce110ea1SWei Hu uint32_t length = gmi->length; 976*ce110ea1SWei Hu uint32_t req_msg_size; 977*ce110ea1SWei Hu int err; 978*ce110ea1SWei Hu int i; 979*ce110ea1SWei Hu 980*ce110ea1SWei Hu if (length < PAGE_SIZE || !is_power_of_2(length)) { 981*ce110ea1SWei Hu mana_err(NULL, "gmi size incorrect: %u\n", length); 982*ce110ea1SWei Hu return EINVAL; 983*ce110ea1SWei Hu } 984*ce110ea1SWei Hu 985*ce110ea1SWei Hu if (offset_in_page((uint64_t)gmi->virt_addr) != 0) { 986*ce110ea1SWei Hu mana_err(NULL, "gmi not page aligned: %p\n", 987*ce110ea1SWei Hu gmi->virt_addr); 988*ce110ea1SWei Hu return EINVAL; 989*ce110ea1SWei Hu } 990*ce110ea1SWei Hu 991*ce110ea1SWei Hu hwc = gc->hwc.driver_data; 992*ce110ea1SWei Hu req_msg_size = sizeof(*req) + num_page * sizeof(uint64_t); 993*ce110ea1SWei Hu if (req_msg_size > hwc->max_req_msg_size) { 994*ce110ea1SWei Hu mana_err(NULL, "req msg size too large: %u, %u\n", 995*ce110ea1SWei Hu req_msg_size, hwc->max_req_msg_size); 996*ce110ea1SWei Hu return EINVAL; 997*ce110ea1SWei Hu } 998*ce110ea1SWei Hu 999*ce110ea1SWei Hu req = malloc(req_msg_size, M_DEVBUF, M_WAITOK | M_ZERO); 1000*ce110ea1SWei Hu if (!req) 1001*ce110ea1SWei Hu return ENOMEM; 1002*ce110ea1SWei Hu 1003*ce110ea1SWei Hu mana_gd_init_req_hdr(&req->hdr, GDMA_CREATE_DMA_REGION, 1004*ce110ea1SWei Hu req_msg_size, sizeof(resp)); 1005*ce110ea1SWei Hu req->length = length; 1006*ce110ea1SWei Hu req->offset_in_page = 0; 1007*ce110ea1SWei Hu req->gdma_page_type = GDMA_PAGE_TYPE_4K; 1008*ce110ea1SWei Hu req->page_count = num_page; 1009*ce110ea1SWei Hu req->page_addr_list_len = num_page; 1010*ce110ea1SWei Hu 1011*ce110ea1SWei Hu for (i = 0; i < num_page; i++) 1012*ce110ea1SWei Hu req->page_addr_list[i] = gmi->dma_handle + i * PAGE_SIZE; 1013*ce110ea1SWei Hu 1014*ce110ea1SWei Hu err = mana_gd_send_request(gc, req_msg_size, req, sizeof(resp), &resp); 1015*ce110ea1SWei Hu if (err) 1016*ce110ea1SWei Hu goto out; 1017*ce110ea1SWei Hu 1018*ce110ea1SWei Hu if (resp.hdr.status || resp.gdma_region == GDMA_INVALID_DMA_REGION) { 1019*ce110ea1SWei Hu device_printf(gc->dev, "Failed to create DMA region: 0x%x\n", 1020*ce110ea1SWei Hu resp.hdr.status); 1021*ce110ea1SWei Hu err = EPROTO; 1022*ce110ea1SWei Hu goto out; 1023*ce110ea1SWei Hu } 1024*ce110ea1SWei Hu 1025*ce110ea1SWei Hu gmi->gdma_region = resp.gdma_region; 1026*ce110ea1SWei Hu out: 1027*ce110ea1SWei Hu free(req, M_DEVBUF); 1028*ce110ea1SWei Hu return err; 1029*ce110ea1SWei Hu } 1030*ce110ea1SWei Hu 1031*ce110ea1SWei Hu int 1032*ce110ea1SWei Hu mana_gd_create_mana_eq(struct gdma_dev *gd, 1033*ce110ea1SWei Hu const struct gdma_queue_spec *spec, 1034*ce110ea1SWei Hu struct gdma_queue **queue_ptr) 1035*ce110ea1SWei Hu { 1036*ce110ea1SWei Hu struct gdma_context *gc = gd->gdma_context; 1037*ce110ea1SWei Hu struct gdma_mem_info *gmi; 1038*ce110ea1SWei Hu struct gdma_queue *queue; 1039*ce110ea1SWei Hu int err; 1040*ce110ea1SWei Hu 1041*ce110ea1SWei Hu if (spec->type != GDMA_EQ) 1042*ce110ea1SWei Hu return EINVAL; 1043*ce110ea1SWei Hu 1044*ce110ea1SWei Hu queue = malloc(sizeof(*queue), M_DEVBUF, M_WAITOK | M_ZERO); 1045*ce110ea1SWei Hu if (!queue) 1046*ce110ea1SWei Hu return ENOMEM; 1047*ce110ea1SWei Hu 1048*ce110ea1SWei Hu gmi = &queue->mem_info; 1049*ce110ea1SWei Hu err = mana_gd_alloc_memory(gc, spec->queue_size, gmi); 1050*ce110ea1SWei Hu if (err) 1051*ce110ea1SWei Hu goto free_q; 1052*ce110ea1SWei Hu 1053*ce110ea1SWei Hu err = mana_gd_create_dma_region(gd, gmi); 1054*ce110ea1SWei Hu if (err) 1055*ce110ea1SWei Hu goto out; 1056*ce110ea1SWei Hu 1057*ce110ea1SWei Hu queue->head = 0; 1058*ce110ea1SWei Hu queue->tail = 0; 1059*ce110ea1SWei Hu queue->queue_mem_ptr = gmi->virt_addr; 1060*ce110ea1SWei Hu queue->queue_size = spec->queue_size; 1061*ce110ea1SWei Hu queue->monitor_avl_buf = spec->monitor_avl_buf; 1062*ce110ea1SWei Hu queue->type = spec->type; 1063*ce110ea1SWei Hu queue->gdma_dev = gd; 1064*ce110ea1SWei Hu 1065*ce110ea1SWei Hu err = mana_gd_create_eq(gd, spec, true, queue); 1066*ce110ea1SWei Hu if (err) 1067*ce110ea1SWei Hu goto out; 1068*ce110ea1SWei Hu 1069*ce110ea1SWei Hu *queue_ptr = queue; 1070*ce110ea1SWei Hu return 0; 1071*ce110ea1SWei Hu 1072*ce110ea1SWei Hu out: 1073*ce110ea1SWei Hu mana_gd_free_memory(gmi); 1074*ce110ea1SWei Hu free_q: 1075*ce110ea1SWei Hu free(queue, M_DEVBUF); 1076*ce110ea1SWei Hu return err; 1077*ce110ea1SWei Hu } 1078*ce110ea1SWei Hu 1079*ce110ea1SWei Hu int mana_gd_create_mana_wq_cq(struct gdma_dev *gd, 1080*ce110ea1SWei Hu const struct gdma_queue_spec *spec, 1081*ce110ea1SWei Hu struct gdma_queue **queue_ptr) 1082*ce110ea1SWei Hu { 1083*ce110ea1SWei Hu struct gdma_context *gc = gd->gdma_context; 1084*ce110ea1SWei Hu struct gdma_mem_info *gmi; 1085*ce110ea1SWei Hu struct gdma_queue *queue; 1086*ce110ea1SWei Hu int err; 1087*ce110ea1SWei Hu 1088*ce110ea1SWei Hu if (spec->type != GDMA_CQ && spec->type != GDMA_SQ && 1089*ce110ea1SWei Hu spec->type != GDMA_RQ) 1090*ce110ea1SWei Hu return EINVAL; 1091*ce110ea1SWei Hu 1092*ce110ea1SWei Hu queue = malloc(sizeof(*queue), M_DEVBUF, M_WAITOK | M_ZERO); 1093*ce110ea1SWei Hu if (!queue) 1094*ce110ea1SWei Hu return ENOMEM; 1095*ce110ea1SWei Hu 1096*ce110ea1SWei Hu gmi = &queue->mem_info; 1097*ce110ea1SWei Hu err = mana_gd_alloc_memory(gc, spec->queue_size, gmi); 1098*ce110ea1SWei Hu if (err) 1099*ce110ea1SWei Hu goto free_q; 1100*ce110ea1SWei Hu 1101*ce110ea1SWei Hu err = mana_gd_create_dma_region(gd, gmi); 1102*ce110ea1SWei Hu if (err) 1103*ce110ea1SWei Hu goto out; 1104*ce110ea1SWei Hu 1105*ce110ea1SWei Hu queue->head = 0; 1106*ce110ea1SWei Hu queue->tail = 0; 1107*ce110ea1SWei Hu queue->queue_mem_ptr = gmi->virt_addr; 1108*ce110ea1SWei Hu queue->queue_size = spec->queue_size; 1109*ce110ea1SWei Hu queue->monitor_avl_buf = spec->monitor_avl_buf; 1110*ce110ea1SWei Hu queue->type = spec->type; 1111*ce110ea1SWei Hu queue->gdma_dev = gd; 1112*ce110ea1SWei Hu 1113*ce110ea1SWei Hu if (spec->type == GDMA_CQ) 1114*ce110ea1SWei Hu mana_gd_create_cq(spec, queue); 1115*ce110ea1SWei Hu 1116*ce110ea1SWei Hu *queue_ptr = queue; 1117*ce110ea1SWei Hu return 0; 1118*ce110ea1SWei Hu 1119*ce110ea1SWei Hu out: 1120*ce110ea1SWei Hu mana_gd_free_memory(gmi); 1121*ce110ea1SWei Hu free_q: 1122*ce110ea1SWei Hu free(queue, M_DEVBUF); 1123*ce110ea1SWei Hu return err; 1124*ce110ea1SWei Hu } 1125*ce110ea1SWei Hu 1126*ce110ea1SWei Hu void 1127*ce110ea1SWei Hu mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue) 1128*ce110ea1SWei Hu { 1129*ce110ea1SWei Hu struct gdma_mem_info *gmi = &queue->mem_info; 1130*ce110ea1SWei Hu 1131*ce110ea1SWei Hu switch (queue->type) { 1132*ce110ea1SWei Hu case GDMA_EQ: 1133*ce110ea1SWei Hu mana_gd_destroy_eq(gc, queue->eq.disable_needed, queue); 1134*ce110ea1SWei Hu break; 1135*ce110ea1SWei Hu 1136*ce110ea1SWei Hu case GDMA_CQ: 1137*ce110ea1SWei Hu mana_gd_destroy_cq(gc, queue); 1138*ce110ea1SWei Hu break; 1139*ce110ea1SWei Hu 1140*ce110ea1SWei Hu case GDMA_RQ: 1141*ce110ea1SWei Hu break; 1142*ce110ea1SWei Hu 1143*ce110ea1SWei Hu case GDMA_SQ: 1144*ce110ea1SWei Hu break; 1145*ce110ea1SWei Hu 1146*ce110ea1SWei Hu default: 1147*ce110ea1SWei Hu device_printf(gc->dev, 1148*ce110ea1SWei Hu "Can't destroy unknown queue: type = %d\n", 1149*ce110ea1SWei Hu queue->type); 1150*ce110ea1SWei Hu return; 1151*ce110ea1SWei Hu } 1152*ce110ea1SWei Hu 1153*ce110ea1SWei Hu mana_gd_destroy_dma_region(gc, gmi->gdma_region); 1154*ce110ea1SWei Hu mana_gd_free_memory(gmi); 1155*ce110ea1SWei Hu free(queue, M_DEVBUF); 1156*ce110ea1SWei Hu } 1157*ce110ea1SWei Hu 1158*ce110ea1SWei Hu int 1159*ce110ea1SWei Hu mana_gd_verify_vf_version(device_t dev) 1160*ce110ea1SWei Hu { 1161*ce110ea1SWei Hu struct gdma_context *gc = device_get_softc(dev); 1162*ce110ea1SWei Hu struct gdma_verify_ver_resp resp = {}; 1163*ce110ea1SWei Hu struct gdma_verify_ver_req req = {}; 1164*ce110ea1SWei Hu int err; 1165*ce110ea1SWei Hu 1166*ce110ea1SWei Hu mana_gd_init_req_hdr(&req.hdr, GDMA_VERIFY_VF_DRIVER_VERSION, 1167*ce110ea1SWei Hu sizeof(req), sizeof(resp)); 1168*ce110ea1SWei Hu 1169*ce110ea1SWei Hu req.protocol_ver_min = GDMA_PROTOCOL_FIRST; 1170*ce110ea1SWei Hu req.protocol_ver_max = GDMA_PROTOCOL_LAST; 1171*ce110ea1SWei Hu 1172*ce110ea1SWei Hu err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); 1173*ce110ea1SWei Hu if (err || resp.hdr.status) { 1174*ce110ea1SWei Hu device_printf(gc->dev, 1175*ce110ea1SWei Hu "VfVerifyVersionOutput: %d, status=0x%x\n", 1176*ce110ea1SWei Hu err, resp.hdr.status); 1177*ce110ea1SWei Hu return err ? err : EPROTO; 1178*ce110ea1SWei Hu } 1179*ce110ea1SWei Hu 1180*ce110ea1SWei Hu return 0; 1181*ce110ea1SWei Hu } 1182*ce110ea1SWei Hu 1183*ce110ea1SWei Hu int 1184*ce110ea1SWei Hu mana_gd_register_device(struct gdma_dev *gd) 1185*ce110ea1SWei Hu { 1186*ce110ea1SWei Hu struct gdma_context *gc = gd->gdma_context; 1187*ce110ea1SWei Hu struct gdma_register_device_resp resp = {}; 1188*ce110ea1SWei Hu struct gdma_general_req req = {}; 1189*ce110ea1SWei Hu int err; 1190*ce110ea1SWei Hu 1191*ce110ea1SWei Hu gd->pdid = INVALID_PDID; 1192*ce110ea1SWei Hu gd->doorbell = INVALID_DOORBELL; 1193*ce110ea1SWei Hu gd->gpa_mkey = INVALID_MEM_KEY; 1194*ce110ea1SWei Hu 1195*ce110ea1SWei Hu mana_gd_init_req_hdr(&req.hdr, GDMA_REGISTER_DEVICE, sizeof(req), 1196*ce110ea1SWei Hu sizeof(resp)); 1197*ce110ea1SWei Hu 1198*ce110ea1SWei Hu req.hdr.dev_id = gd->dev_id; 1199*ce110ea1SWei Hu 1200*ce110ea1SWei Hu err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); 1201*ce110ea1SWei Hu if (err || resp.hdr.status) { 1202*ce110ea1SWei Hu device_printf(gc->dev, 1203*ce110ea1SWei Hu "gdma_register_device_resp failed: %d, 0x%x\n", 1204*ce110ea1SWei Hu err, resp.hdr.status); 1205*ce110ea1SWei Hu return err ? err : -EPROTO; 1206*ce110ea1SWei Hu } 1207*ce110ea1SWei Hu 1208*ce110ea1SWei Hu gd->pdid = resp.pdid; 1209*ce110ea1SWei Hu gd->gpa_mkey = resp.gpa_mkey; 1210*ce110ea1SWei Hu gd->doorbell = resp.db_id; 1211*ce110ea1SWei Hu 1212*ce110ea1SWei Hu mana_dbg(NULL, "mana device pdid %u, gpa_mkey %u, doorbell %u \n", 1213*ce110ea1SWei Hu gd->pdid, gd->gpa_mkey, gd->doorbell); 1214*ce110ea1SWei Hu 1215*ce110ea1SWei Hu return 0; 1216*ce110ea1SWei Hu } 1217*ce110ea1SWei Hu 1218*ce110ea1SWei Hu int 1219*ce110ea1SWei Hu mana_gd_deregister_device(struct gdma_dev *gd) 1220*ce110ea1SWei Hu { 1221*ce110ea1SWei Hu struct gdma_context *gc = gd->gdma_context; 1222*ce110ea1SWei Hu struct gdma_general_resp resp = {}; 1223*ce110ea1SWei Hu struct gdma_general_req req = {}; 1224*ce110ea1SWei Hu int err; 1225*ce110ea1SWei Hu 1226*ce110ea1SWei Hu if (gd->pdid == INVALID_PDID) 1227*ce110ea1SWei Hu return EINVAL; 1228*ce110ea1SWei Hu 1229*ce110ea1SWei Hu mana_gd_init_req_hdr(&req.hdr, GDMA_DEREGISTER_DEVICE, sizeof(req), 1230*ce110ea1SWei Hu sizeof(resp)); 1231*ce110ea1SWei Hu 1232*ce110ea1SWei Hu req.hdr.dev_id = gd->dev_id; 1233*ce110ea1SWei Hu 1234*ce110ea1SWei Hu err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); 1235*ce110ea1SWei Hu if (err || resp.hdr.status) { 1236*ce110ea1SWei Hu device_printf(gc->dev, 1237*ce110ea1SWei Hu "Failed to deregister device: %d, 0x%x\n", 1238*ce110ea1SWei Hu err, resp.hdr.status); 1239*ce110ea1SWei Hu if (!err) 1240*ce110ea1SWei Hu err = EPROTO; 1241*ce110ea1SWei Hu } 1242*ce110ea1SWei Hu 1243*ce110ea1SWei Hu gd->pdid = INVALID_PDID; 1244*ce110ea1SWei Hu gd->doorbell = INVALID_DOORBELL; 1245*ce110ea1SWei Hu gd->gpa_mkey = INVALID_MEM_KEY; 1246*ce110ea1SWei Hu 1247*ce110ea1SWei Hu return err; 1248*ce110ea1SWei Hu } 1249*ce110ea1SWei Hu 1250*ce110ea1SWei Hu uint32_t 1251*ce110ea1SWei Hu mana_gd_wq_avail_space(struct gdma_queue *wq) 1252*ce110ea1SWei Hu { 1253*ce110ea1SWei Hu uint32_t used_space = (wq->head - wq->tail) * GDMA_WQE_BU_SIZE; 1254*ce110ea1SWei Hu uint32_t wq_size = wq->queue_size; 1255*ce110ea1SWei Hu 1256*ce110ea1SWei Hu if (used_space > wq_size) { 1257*ce110ea1SWei Hu mana_warn(NULL, "failed: used space %u > queue size %u\n", 1258*ce110ea1SWei Hu used_space, wq_size); 1259*ce110ea1SWei Hu } 1260*ce110ea1SWei Hu 1261*ce110ea1SWei Hu return wq_size - used_space; 1262*ce110ea1SWei Hu } 1263*ce110ea1SWei Hu 1264*ce110ea1SWei Hu uint8_t * 1265*ce110ea1SWei Hu mana_gd_get_wqe_ptr(const struct gdma_queue *wq, uint32_t wqe_offset) 1266*ce110ea1SWei Hu { 1267*ce110ea1SWei Hu uint32_t offset = 1268*ce110ea1SWei Hu (wqe_offset * GDMA_WQE_BU_SIZE) & (wq->queue_size - 1); 1269*ce110ea1SWei Hu 1270*ce110ea1SWei Hu if ((offset + GDMA_WQE_BU_SIZE) > wq->queue_size) { 1271*ce110ea1SWei Hu mana_warn(NULL, "failed: write end out of queue bound %u, " 1272*ce110ea1SWei Hu "queue size %u\n", 1273*ce110ea1SWei Hu offset + GDMA_WQE_BU_SIZE, wq->queue_size); 1274*ce110ea1SWei Hu } 1275*ce110ea1SWei Hu 1276*ce110ea1SWei Hu return (uint8_t *)wq->queue_mem_ptr + offset; 1277*ce110ea1SWei Hu } 1278*ce110ea1SWei Hu 1279*ce110ea1SWei Hu static uint32_t 1280*ce110ea1SWei Hu mana_gd_write_client_oob(const struct gdma_wqe_request *wqe_req, 1281*ce110ea1SWei Hu enum gdma_queue_type q_type, 1282*ce110ea1SWei Hu uint32_t client_oob_size, uint32_t sgl_data_size, 1283*ce110ea1SWei Hu uint8_t *wqe_ptr) 1284*ce110ea1SWei Hu { 1285*ce110ea1SWei Hu bool oob_in_sgl = !!(wqe_req->flags & GDMA_WR_OOB_IN_SGL); 1286*ce110ea1SWei Hu bool pad_data = !!(wqe_req->flags & GDMA_WR_PAD_BY_SGE0); 1287*ce110ea1SWei Hu struct gdma_wqe *header = (struct gdma_wqe *)wqe_ptr; 1288*ce110ea1SWei Hu uint8_t *ptr; 1289*ce110ea1SWei Hu 1290*ce110ea1SWei Hu memset(header, 0, sizeof(struct gdma_wqe)); 1291*ce110ea1SWei Hu header->num_sge = wqe_req->num_sge; 1292*ce110ea1SWei Hu header->inline_oob_size_div4 = client_oob_size / sizeof(uint32_t); 1293*ce110ea1SWei Hu 1294*ce110ea1SWei Hu if (oob_in_sgl) { 1295*ce110ea1SWei Hu if (!pad_data || wqe_req->num_sge < 2) { 1296*ce110ea1SWei Hu mana_warn(NULL, "no pad_data or num_sge < 2\n"); 1297*ce110ea1SWei Hu } 1298*ce110ea1SWei Hu 1299*ce110ea1SWei Hu header->client_oob_in_sgl = 1; 1300*ce110ea1SWei Hu 1301*ce110ea1SWei Hu if (pad_data) 1302*ce110ea1SWei Hu header->last_vbytes = wqe_req->sgl[0].size; 1303*ce110ea1SWei Hu } 1304*ce110ea1SWei Hu 1305*ce110ea1SWei Hu if (q_type == GDMA_SQ) 1306*ce110ea1SWei Hu header->client_data_unit = wqe_req->client_data_unit; 1307*ce110ea1SWei Hu 1308*ce110ea1SWei Hu /* 1309*ce110ea1SWei Hu * The size of gdma_wqe + client_oob_size must be less than or equal 1310*ce110ea1SWei Hu * to one Basic Unit (i.e. 32 bytes), so the pointer can't go beyond 1311*ce110ea1SWei Hu * the queue memory buffer boundary. 1312*ce110ea1SWei Hu */ 1313*ce110ea1SWei Hu ptr = wqe_ptr + sizeof(header); 1314*ce110ea1SWei Hu 1315*ce110ea1SWei Hu if (wqe_req->inline_oob_data && wqe_req->inline_oob_size > 0) { 1316*ce110ea1SWei Hu memcpy(ptr, wqe_req->inline_oob_data, wqe_req->inline_oob_size); 1317*ce110ea1SWei Hu 1318*ce110ea1SWei Hu if (client_oob_size > wqe_req->inline_oob_size) 1319*ce110ea1SWei Hu memset(ptr + wqe_req->inline_oob_size, 0, 1320*ce110ea1SWei Hu client_oob_size - wqe_req->inline_oob_size); 1321*ce110ea1SWei Hu } 1322*ce110ea1SWei Hu 1323*ce110ea1SWei Hu return sizeof(header) + client_oob_size; 1324*ce110ea1SWei Hu } 1325*ce110ea1SWei Hu 1326*ce110ea1SWei Hu static void 1327*ce110ea1SWei Hu mana_gd_write_sgl(struct gdma_queue *wq, uint8_t *wqe_ptr, 1328*ce110ea1SWei Hu const struct gdma_wqe_request *wqe_req) 1329*ce110ea1SWei Hu { 1330*ce110ea1SWei Hu uint32_t sgl_size = sizeof(struct gdma_sge) * wqe_req->num_sge; 1331*ce110ea1SWei Hu const uint8_t *address = (uint8_t *)wqe_req->sgl; 1332*ce110ea1SWei Hu uint8_t *base_ptr, *end_ptr; 1333*ce110ea1SWei Hu uint32_t size_to_end; 1334*ce110ea1SWei Hu 1335*ce110ea1SWei Hu base_ptr = wq->queue_mem_ptr; 1336*ce110ea1SWei Hu end_ptr = base_ptr + wq->queue_size; 1337*ce110ea1SWei Hu size_to_end = (uint32_t)(end_ptr - wqe_ptr); 1338*ce110ea1SWei Hu 1339*ce110ea1SWei Hu if (size_to_end < sgl_size) { 1340*ce110ea1SWei Hu memcpy(wqe_ptr, address, size_to_end); 1341*ce110ea1SWei Hu 1342*ce110ea1SWei Hu wqe_ptr = base_ptr; 1343*ce110ea1SWei Hu address += size_to_end; 1344*ce110ea1SWei Hu sgl_size -= size_to_end; 1345*ce110ea1SWei Hu } 1346*ce110ea1SWei Hu 1347*ce110ea1SWei Hu memcpy(wqe_ptr, address, sgl_size); 1348*ce110ea1SWei Hu } 1349*ce110ea1SWei Hu 1350*ce110ea1SWei Hu int 1351*ce110ea1SWei Hu mana_gd_post_work_request(struct gdma_queue *wq, 1352*ce110ea1SWei Hu const struct gdma_wqe_request *wqe_req, 1353*ce110ea1SWei Hu struct gdma_posted_wqe_info *wqe_info) 1354*ce110ea1SWei Hu { 1355*ce110ea1SWei Hu uint32_t client_oob_size = wqe_req->inline_oob_size; 1356*ce110ea1SWei Hu struct gdma_context *gc; 1357*ce110ea1SWei Hu uint32_t sgl_data_size; 1358*ce110ea1SWei Hu uint32_t max_wqe_size; 1359*ce110ea1SWei Hu uint32_t wqe_size; 1360*ce110ea1SWei Hu uint8_t *wqe_ptr; 1361*ce110ea1SWei Hu 1362*ce110ea1SWei Hu if (wqe_req->num_sge == 0) 1363*ce110ea1SWei Hu return EINVAL; 1364*ce110ea1SWei Hu 1365*ce110ea1SWei Hu if (wq->type == GDMA_RQ) { 1366*ce110ea1SWei Hu if (client_oob_size != 0) 1367*ce110ea1SWei Hu return EINVAL; 1368*ce110ea1SWei Hu 1369*ce110ea1SWei Hu client_oob_size = INLINE_OOB_SMALL_SIZE; 1370*ce110ea1SWei Hu 1371*ce110ea1SWei Hu max_wqe_size = GDMA_MAX_RQE_SIZE; 1372*ce110ea1SWei Hu } else { 1373*ce110ea1SWei Hu if (client_oob_size != INLINE_OOB_SMALL_SIZE && 1374*ce110ea1SWei Hu client_oob_size != INLINE_OOB_LARGE_SIZE) 1375*ce110ea1SWei Hu return EINVAL; 1376*ce110ea1SWei Hu 1377*ce110ea1SWei Hu max_wqe_size = GDMA_MAX_SQE_SIZE; 1378*ce110ea1SWei Hu } 1379*ce110ea1SWei Hu 1380*ce110ea1SWei Hu sgl_data_size = sizeof(struct gdma_sge) * wqe_req->num_sge; 1381*ce110ea1SWei Hu wqe_size = ALIGN(sizeof(struct gdma_wqe) + client_oob_size + 1382*ce110ea1SWei Hu sgl_data_size, GDMA_WQE_BU_SIZE); 1383*ce110ea1SWei Hu if (wqe_size > max_wqe_size) 1384*ce110ea1SWei Hu return EINVAL; 1385*ce110ea1SWei Hu 1386*ce110ea1SWei Hu if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq)) { 1387*ce110ea1SWei Hu gc = wq->gdma_dev->gdma_context; 1388*ce110ea1SWei Hu device_printf(gc->dev, "unsuccessful flow control!\n"); 1389*ce110ea1SWei Hu return ENOSPC; 1390*ce110ea1SWei Hu } 1391*ce110ea1SWei Hu 1392*ce110ea1SWei Hu if (wqe_info) 1393*ce110ea1SWei Hu wqe_info->wqe_size_in_bu = wqe_size / GDMA_WQE_BU_SIZE; 1394*ce110ea1SWei Hu 1395*ce110ea1SWei Hu wqe_ptr = mana_gd_get_wqe_ptr(wq, wq->head); 1396*ce110ea1SWei Hu wqe_ptr += mana_gd_write_client_oob(wqe_req, wq->type, client_oob_size, 1397*ce110ea1SWei Hu sgl_data_size, wqe_ptr); 1398*ce110ea1SWei Hu if (wqe_ptr >= (uint8_t *)wq->queue_mem_ptr + wq->queue_size) 1399*ce110ea1SWei Hu wqe_ptr -= wq->queue_size; 1400*ce110ea1SWei Hu 1401*ce110ea1SWei Hu mana_gd_write_sgl(wq, wqe_ptr, wqe_req); 1402*ce110ea1SWei Hu 1403*ce110ea1SWei Hu wq->head += wqe_size / GDMA_WQE_BU_SIZE; 1404*ce110ea1SWei Hu 1405*ce110ea1SWei Hu bus_dmamap_sync(wq->mem_info.dma_tag, wq->mem_info.dma_map, 1406*ce110ea1SWei Hu BUS_DMASYNC_PREWRITE); 1407*ce110ea1SWei Hu 1408*ce110ea1SWei Hu return 0; 1409*ce110ea1SWei Hu } 1410*ce110ea1SWei Hu 1411*ce110ea1SWei Hu int 1412*ce110ea1SWei Hu mana_gd_post_and_ring(struct gdma_queue *queue, 1413*ce110ea1SWei Hu const struct gdma_wqe_request *wqe_req, 1414*ce110ea1SWei Hu struct gdma_posted_wqe_info *wqe_info) 1415*ce110ea1SWei Hu { 1416*ce110ea1SWei Hu struct gdma_context *gc = queue->gdma_dev->gdma_context; 1417*ce110ea1SWei Hu int err; 1418*ce110ea1SWei Hu 1419*ce110ea1SWei Hu err = mana_gd_post_work_request(queue, wqe_req, wqe_info); 1420*ce110ea1SWei Hu if (err) 1421*ce110ea1SWei Hu return err; 1422*ce110ea1SWei Hu 1423*ce110ea1SWei Hu mana_gd_wq_ring_doorbell(gc, queue); 1424*ce110ea1SWei Hu 1425*ce110ea1SWei Hu return 0; 1426*ce110ea1SWei Hu } 1427*ce110ea1SWei Hu 1428*ce110ea1SWei Hu static int 1429*ce110ea1SWei Hu mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp) 1430*ce110ea1SWei Hu { 1431*ce110ea1SWei Hu unsigned int num_cqe = cq->queue_size / sizeof(struct gdma_cqe); 1432*ce110ea1SWei Hu struct gdma_cqe *cq_cqe = cq->queue_mem_ptr; 1433*ce110ea1SWei Hu uint32_t owner_bits, new_bits, old_bits; 1434*ce110ea1SWei Hu struct gdma_cqe *cqe; 1435*ce110ea1SWei Hu 1436*ce110ea1SWei Hu cqe = &cq_cqe[cq->head % num_cqe]; 1437*ce110ea1SWei Hu owner_bits = cqe->cqe_info.owner_bits; 1438*ce110ea1SWei Hu 1439*ce110ea1SWei Hu old_bits = (cq->head / num_cqe - 1) & GDMA_CQE_OWNER_MASK; 1440*ce110ea1SWei Hu /* Return 0 if no more entries. */ 1441*ce110ea1SWei Hu if (owner_bits == old_bits) 1442*ce110ea1SWei Hu return 0; 1443*ce110ea1SWei Hu 1444*ce110ea1SWei Hu new_bits = (cq->head / num_cqe) & GDMA_CQE_OWNER_MASK; 1445*ce110ea1SWei Hu /* Return -1 if overflow detected. */ 1446*ce110ea1SWei Hu if (owner_bits != new_bits) 1447*ce110ea1SWei Hu return -1; 1448*ce110ea1SWei Hu 1449*ce110ea1SWei Hu comp->wq_num = cqe->cqe_info.wq_num; 1450*ce110ea1SWei Hu comp->is_sq = cqe->cqe_info.is_sq; 1451*ce110ea1SWei Hu memcpy(comp->cqe_data, cqe->cqe_data, GDMA_COMP_DATA_SIZE); 1452*ce110ea1SWei Hu 1453*ce110ea1SWei Hu return 1; 1454*ce110ea1SWei Hu } 1455*ce110ea1SWei Hu 1456*ce110ea1SWei Hu int 1457*ce110ea1SWei Hu mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe) 1458*ce110ea1SWei Hu { 1459*ce110ea1SWei Hu int cqe_idx; 1460*ce110ea1SWei Hu int ret; 1461*ce110ea1SWei Hu 1462*ce110ea1SWei Hu bus_dmamap_sync(cq->mem_info.dma_tag, cq->mem_info.dma_map, 1463*ce110ea1SWei Hu BUS_DMASYNC_POSTREAD); 1464*ce110ea1SWei Hu 1465*ce110ea1SWei Hu for (cqe_idx = 0; cqe_idx < num_cqe; cqe_idx++) { 1466*ce110ea1SWei Hu ret = mana_gd_read_cqe(cq, &comp[cqe_idx]); 1467*ce110ea1SWei Hu 1468*ce110ea1SWei Hu if (ret < 0) { 1469*ce110ea1SWei Hu cq->head -= cqe_idx; 1470*ce110ea1SWei Hu return ret; 1471*ce110ea1SWei Hu } 1472*ce110ea1SWei Hu 1473*ce110ea1SWei Hu if (ret == 0) 1474*ce110ea1SWei Hu break; 1475*ce110ea1SWei Hu 1476*ce110ea1SWei Hu cq->head++; 1477*ce110ea1SWei Hu } 1478*ce110ea1SWei Hu 1479*ce110ea1SWei Hu return cqe_idx; 1480*ce110ea1SWei Hu } 1481*ce110ea1SWei Hu 1482*ce110ea1SWei Hu static void 1483*ce110ea1SWei Hu mana_gd_intr(void *arg) 1484*ce110ea1SWei Hu { 1485*ce110ea1SWei Hu struct gdma_irq_context *gic = arg; 1486*ce110ea1SWei Hu 1487*ce110ea1SWei Hu if (gic->handler) { 1488*ce110ea1SWei Hu gic->handler(gic->arg); 1489*ce110ea1SWei Hu } 1490*ce110ea1SWei Hu } 1491*ce110ea1SWei Hu 1492*ce110ea1SWei Hu int 1493*ce110ea1SWei Hu mana_gd_alloc_res_map(uint32_t res_avail, 1494*ce110ea1SWei Hu struct gdma_resource *r, const char *lock_name) 1495*ce110ea1SWei Hu { 1496*ce110ea1SWei Hu int n = howmany(res_avail, BITS_PER_LONG); 1497*ce110ea1SWei Hu 1498*ce110ea1SWei Hu r->map = 1499*ce110ea1SWei Hu malloc(n * sizeof(unsigned long), M_DEVBUF, M_WAITOK | M_ZERO); 1500*ce110ea1SWei Hu if (!r->map) 1501*ce110ea1SWei Hu return ENOMEM; 1502*ce110ea1SWei Hu 1503*ce110ea1SWei Hu r->size = res_avail; 1504*ce110ea1SWei Hu mtx_init(&r->lock_spin, lock_name, NULL, MTX_SPIN); 1505*ce110ea1SWei Hu 1506*ce110ea1SWei Hu mana_dbg(NULL, 1507*ce110ea1SWei Hu "total res %u, total number of unsigned longs %u\n", 1508*ce110ea1SWei Hu r->size, n); 1509*ce110ea1SWei Hu return (0); 1510*ce110ea1SWei Hu } 1511*ce110ea1SWei Hu 1512*ce110ea1SWei Hu void 1513*ce110ea1SWei Hu mana_gd_free_res_map(struct gdma_resource *r) 1514*ce110ea1SWei Hu { 1515*ce110ea1SWei Hu if (!r || !r->map) 1516*ce110ea1SWei Hu return; 1517*ce110ea1SWei Hu 1518*ce110ea1SWei Hu free(r->map, M_DEVBUF); 1519*ce110ea1SWei Hu r->map = NULL; 1520*ce110ea1SWei Hu r->size = 0; 1521*ce110ea1SWei Hu } 1522*ce110ea1SWei Hu 1523*ce110ea1SWei Hu static void 1524*ce110ea1SWei Hu mana_gd_init_registers(struct gdma_context *gc) 1525*ce110ea1SWei Hu { 1526*ce110ea1SWei Hu uint64_t bar0_va = rman_get_bushandle(gc->bar0); 1527*ce110ea1SWei Hu 1528*ce110ea1SWei Hu gc->db_page_size = mana_gd_r32(gc, GDMA_REG_DB_PAGE_SIZE) & 0xFFFF; 1529*ce110ea1SWei Hu 1530*ce110ea1SWei Hu gc->db_page_base = 1531*ce110ea1SWei Hu (void *) (bar0_va + mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET)); 1532*ce110ea1SWei Hu 1533*ce110ea1SWei Hu gc->shm_base = 1534*ce110ea1SWei Hu (void *) (bar0_va + mana_gd_r64(gc, GDMA_REG_SHM_OFFSET)); 1535*ce110ea1SWei Hu 1536*ce110ea1SWei Hu mana_dbg(NULL, "db_page_size 0x%xx, db_page_base %p," 1537*ce110ea1SWei Hu " shm_base %p\n", 1538*ce110ea1SWei Hu gc->db_page_size, gc->db_page_base, gc->shm_base); 1539*ce110ea1SWei Hu } 1540*ce110ea1SWei Hu 1541*ce110ea1SWei Hu static struct resource * 1542*ce110ea1SWei Hu mana_gd_alloc_bar(device_t dev, int bar) 1543*ce110ea1SWei Hu { 1544*ce110ea1SWei Hu struct resource *res = NULL; 1545*ce110ea1SWei Hu struct pci_map *pm; 1546*ce110ea1SWei Hu int rid, type; 1547*ce110ea1SWei Hu 1548*ce110ea1SWei Hu if (bar < 0 || bar > PCIR_MAX_BAR_0) 1549*ce110ea1SWei Hu goto alloc_bar_out; 1550*ce110ea1SWei Hu 1551*ce110ea1SWei Hu pm = pci_find_bar(dev, PCIR_BAR(bar)); 1552*ce110ea1SWei Hu if (!pm) 1553*ce110ea1SWei Hu goto alloc_bar_out; 1554*ce110ea1SWei Hu 1555*ce110ea1SWei Hu if (PCI_BAR_IO(pm->pm_value)) 1556*ce110ea1SWei Hu type = SYS_RES_IOPORT; 1557*ce110ea1SWei Hu else 1558*ce110ea1SWei Hu type = SYS_RES_MEMORY; 1559*ce110ea1SWei Hu if (type < 0) 1560*ce110ea1SWei Hu goto alloc_bar_out; 1561*ce110ea1SWei Hu 1562*ce110ea1SWei Hu rid = PCIR_BAR(bar); 1563*ce110ea1SWei Hu res = bus_alloc_resource_any(dev, type, &rid, RF_ACTIVE); 1564*ce110ea1SWei Hu #if defined(__amd64__) 1565*ce110ea1SWei Hu if (res) 1566*ce110ea1SWei Hu mana_dbg(NULL, "bar %d: rid 0x%x, type 0x%jx," 1567*ce110ea1SWei Hu " handle 0x%jx\n", 1568*ce110ea1SWei Hu bar, rid, res->r_bustag, res->r_bushandle); 1569*ce110ea1SWei Hu #endif 1570*ce110ea1SWei Hu 1571*ce110ea1SWei Hu alloc_bar_out: 1572*ce110ea1SWei Hu return (res); 1573*ce110ea1SWei Hu } 1574*ce110ea1SWei Hu 1575*ce110ea1SWei Hu static void 1576*ce110ea1SWei Hu mana_gd_free_pci_res(struct gdma_context *gc) 1577*ce110ea1SWei Hu { 1578*ce110ea1SWei Hu if (!gc || gc->dev) 1579*ce110ea1SWei Hu return; 1580*ce110ea1SWei Hu 1581*ce110ea1SWei Hu if (gc->bar0 != NULL) { 1582*ce110ea1SWei Hu bus_release_resource(gc->dev, SYS_RES_MEMORY, 1583*ce110ea1SWei Hu PCIR_BAR(GDMA_BAR0), gc->bar0); 1584*ce110ea1SWei Hu } 1585*ce110ea1SWei Hu 1586*ce110ea1SWei Hu if (gc->msix != NULL) { 1587*ce110ea1SWei Hu bus_release_resource(gc->dev, SYS_RES_MEMORY, 1588*ce110ea1SWei Hu gc->msix_rid, gc->msix); 1589*ce110ea1SWei Hu } 1590*ce110ea1SWei Hu } 1591*ce110ea1SWei Hu 1592*ce110ea1SWei Hu static int 1593*ce110ea1SWei Hu mana_gd_setup_irqs(device_t dev) 1594*ce110ea1SWei Hu { 1595*ce110ea1SWei Hu unsigned int max_queues_per_port = mp_ncpus; 1596*ce110ea1SWei Hu struct gdma_context *gc = device_get_softc(dev); 1597*ce110ea1SWei Hu struct gdma_irq_context *gic; 1598*ce110ea1SWei Hu unsigned int max_irqs; 1599*ce110ea1SWei Hu int nvec; 1600*ce110ea1SWei Hu int rc, rcc, i; 1601*ce110ea1SWei Hu 1602*ce110ea1SWei Hu if (max_queues_per_port > MANA_MAX_NUM_QUEUES) 1603*ce110ea1SWei Hu max_queues_per_port = MANA_MAX_NUM_QUEUES; 1604*ce110ea1SWei Hu 1605*ce110ea1SWei Hu max_irqs = max_queues_per_port * MAX_PORTS_IN_MANA_DEV; 1606*ce110ea1SWei Hu 1607*ce110ea1SWei Hu /* Need 1 interrupt for the Hardware communication Channel (HWC) */ 1608*ce110ea1SWei Hu max_irqs++; 1609*ce110ea1SWei Hu 1610*ce110ea1SWei Hu nvec = max_irqs; 1611*ce110ea1SWei Hu rc = pci_alloc_msix(dev, &nvec); 1612*ce110ea1SWei Hu if (unlikely(rc != 0)) { 1613*ce110ea1SWei Hu device_printf(dev, 1614*ce110ea1SWei Hu "Failed to allocate MSIX, vectors %d, error: %d\n", 1615*ce110ea1SWei Hu nvec, rc); 1616*ce110ea1SWei Hu rc = ENOSPC; 1617*ce110ea1SWei Hu goto err_setup_irq_alloc; 1618*ce110ea1SWei Hu } 1619*ce110ea1SWei Hu 1620*ce110ea1SWei Hu if (nvec != max_irqs) { 1621*ce110ea1SWei Hu if (nvec == 1) { 1622*ce110ea1SWei Hu device_printf(dev, 1623*ce110ea1SWei Hu "Not enough number of MSI-x allocated: %d\n", 1624*ce110ea1SWei Hu nvec); 1625*ce110ea1SWei Hu rc = ENOSPC; 1626*ce110ea1SWei Hu goto err_setup_irq_release; 1627*ce110ea1SWei Hu } 1628*ce110ea1SWei Hu device_printf(dev, "Allocated only %d MSI-x (%d requested)\n", 1629*ce110ea1SWei Hu nvec, max_irqs); 1630*ce110ea1SWei Hu } 1631*ce110ea1SWei Hu 1632*ce110ea1SWei Hu gc->irq_contexts = malloc(nvec * sizeof(struct gdma_irq_context), 1633*ce110ea1SWei Hu M_DEVBUF, M_WAITOK | M_ZERO); 1634*ce110ea1SWei Hu if (!gc->irq_contexts) { 1635*ce110ea1SWei Hu rc = ENOMEM; 1636*ce110ea1SWei Hu goto err_setup_irq_release; 1637*ce110ea1SWei Hu } 1638*ce110ea1SWei Hu 1639*ce110ea1SWei Hu for (i = 0; i < nvec; i++) { 1640*ce110ea1SWei Hu gic = &gc->irq_contexts[i]; 1641*ce110ea1SWei Hu gic->msix_e.entry = i; 1642*ce110ea1SWei Hu /* Vector starts from 1. */ 1643*ce110ea1SWei Hu gic->msix_e.vector = i + 1; 1644*ce110ea1SWei Hu gic->handler = NULL; 1645*ce110ea1SWei Hu gic->arg = NULL; 1646*ce110ea1SWei Hu 1647*ce110ea1SWei Hu gic->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 1648*ce110ea1SWei Hu &gic->msix_e.vector, RF_ACTIVE | RF_SHAREABLE); 1649*ce110ea1SWei Hu if (unlikely(gic->res == NULL)) { 1650*ce110ea1SWei Hu rc = ENOMEM; 1651*ce110ea1SWei Hu device_printf(dev, "could not allocate resource " 1652*ce110ea1SWei Hu "for irq vector %d\n", gic->msix_e.vector); 1653*ce110ea1SWei Hu goto err_setup_irq; 1654*ce110ea1SWei Hu } 1655*ce110ea1SWei Hu 1656*ce110ea1SWei Hu rc = bus_setup_intr(dev, gic->res, 1657*ce110ea1SWei Hu INTR_TYPE_NET | INTR_MPSAFE, NULL, mana_gd_intr, 1658*ce110ea1SWei Hu gic, &gic->cookie); 1659*ce110ea1SWei Hu if (unlikely(rc != 0)) { 1660*ce110ea1SWei Hu device_printf(dev, "failed to register interrupt " 1661*ce110ea1SWei Hu "handler for irq %ju vector %d: error %d\n", 1662*ce110ea1SWei Hu rman_get_start(gic->res), gic->msix_e.vector, rc); 1663*ce110ea1SWei Hu goto err_setup_irq; 1664*ce110ea1SWei Hu } 1665*ce110ea1SWei Hu gic->requested = true; 1666*ce110ea1SWei Hu 1667*ce110ea1SWei Hu mana_dbg(NULL, "added msix vector %d irq %ju\n", 1668*ce110ea1SWei Hu gic->msix_e.vector, rman_get_start(gic->res)); 1669*ce110ea1SWei Hu } 1670*ce110ea1SWei Hu 1671*ce110ea1SWei Hu rc = mana_gd_alloc_res_map(nvec, &gc->msix_resource, 1672*ce110ea1SWei Hu "gdma msix res lock"); 1673*ce110ea1SWei Hu if (rc != 0) { 1674*ce110ea1SWei Hu device_printf(dev, "failed to allocate memory " 1675*ce110ea1SWei Hu "for msix bitmap\n"); 1676*ce110ea1SWei Hu goto err_setup_irq; 1677*ce110ea1SWei Hu } 1678*ce110ea1SWei Hu 1679*ce110ea1SWei Hu gc->max_num_msix = nvec; 1680*ce110ea1SWei Hu gc->num_msix_usable = nvec; 1681*ce110ea1SWei Hu 1682*ce110ea1SWei Hu mana_dbg(NULL, "setup %d msix interrupts\n", nvec); 1683*ce110ea1SWei Hu 1684*ce110ea1SWei Hu return (0); 1685*ce110ea1SWei Hu 1686*ce110ea1SWei Hu err_setup_irq: 1687*ce110ea1SWei Hu for (; i >= 0; i--) { 1688*ce110ea1SWei Hu gic = &gc->irq_contexts[i]; 1689*ce110ea1SWei Hu rcc = 0; 1690*ce110ea1SWei Hu 1691*ce110ea1SWei Hu /* 1692*ce110ea1SWei Hu * If gic->requested is true, we need to free both intr and 1693*ce110ea1SWei Hu * resources. 1694*ce110ea1SWei Hu */ 1695*ce110ea1SWei Hu if (gic->requested) 1696*ce110ea1SWei Hu rcc = bus_teardown_intr(dev, gic->res, gic->cookie); 1697*ce110ea1SWei Hu if (unlikely(rcc != 0)) 1698*ce110ea1SWei Hu device_printf(dev, "could not release " 1699*ce110ea1SWei Hu "irq vector %d, error: %d\n", 1700*ce110ea1SWei Hu gic->msix_e.vector, rcc); 1701*ce110ea1SWei Hu 1702*ce110ea1SWei Hu rcc = 0; 1703*ce110ea1SWei Hu if (gic->res != NULL) { 1704*ce110ea1SWei Hu rcc = bus_release_resource(dev, SYS_RES_IRQ, 1705*ce110ea1SWei Hu gic->msix_e.vector, gic->res); 1706*ce110ea1SWei Hu } 1707*ce110ea1SWei Hu if (unlikely(rcc != 0)) 1708*ce110ea1SWei Hu device_printf(dev, "dev has no parent while " 1709*ce110ea1SWei Hu "releasing resource for irq vector %d\n", 1710*ce110ea1SWei Hu gic->msix_e.vector); 1711*ce110ea1SWei Hu gic->requested = false; 1712*ce110ea1SWei Hu gic->res = NULL; 1713*ce110ea1SWei Hu } 1714*ce110ea1SWei Hu 1715*ce110ea1SWei Hu free(gc->irq_contexts, M_DEVBUF); 1716*ce110ea1SWei Hu gc->irq_contexts = NULL; 1717*ce110ea1SWei Hu err_setup_irq_release: 1718*ce110ea1SWei Hu pci_release_msi(dev); 1719*ce110ea1SWei Hu err_setup_irq_alloc: 1720*ce110ea1SWei Hu return (rc); 1721*ce110ea1SWei Hu } 1722*ce110ea1SWei Hu 1723*ce110ea1SWei Hu static void 1724*ce110ea1SWei Hu mana_gd_remove_irqs(device_t dev) 1725*ce110ea1SWei Hu { 1726*ce110ea1SWei Hu struct gdma_context *gc = device_get_softc(dev); 1727*ce110ea1SWei Hu struct gdma_irq_context *gic; 1728*ce110ea1SWei Hu int rc, i; 1729*ce110ea1SWei Hu 1730*ce110ea1SWei Hu mana_gd_free_res_map(&gc->msix_resource); 1731*ce110ea1SWei Hu 1732*ce110ea1SWei Hu for (i = 0; i < gc->max_num_msix; i++) { 1733*ce110ea1SWei Hu gic = &gc->irq_contexts[i]; 1734*ce110ea1SWei Hu if (gic->requested) { 1735*ce110ea1SWei Hu rc = bus_teardown_intr(dev, gic->res, gic->cookie); 1736*ce110ea1SWei Hu if (unlikely(rc != 0)) { 1737*ce110ea1SWei Hu device_printf(dev, "failed to tear down " 1738*ce110ea1SWei Hu "irq vector %d, error: %d\n", 1739*ce110ea1SWei Hu gic->msix_e.vector, rc); 1740*ce110ea1SWei Hu } 1741*ce110ea1SWei Hu gic->requested = false; 1742*ce110ea1SWei Hu } 1743*ce110ea1SWei Hu 1744*ce110ea1SWei Hu if (gic->res != NULL) { 1745*ce110ea1SWei Hu rc = bus_release_resource(dev, SYS_RES_IRQ, 1746*ce110ea1SWei Hu gic->msix_e.vector, gic->res); 1747*ce110ea1SWei Hu if (unlikely(rc != 0)) { 1748*ce110ea1SWei Hu device_printf(dev, "dev has no parent while " 1749*ce110ea1SWei Hu "releasing resource for irq vector %d\n", 1750*ce110ea1SWei Hu gic->msix_e.vector); 1751*ce110ea1SWei Hu } 1752*ce110ea1SWei Hu gic->res = NULL; 1753*ce110ea1SWei Hu } 1754*ce110ea1SWei Hu } 1755*ce110ea1SWei Hu 1756*ce110ea1SWei Hu gc->max_num_msix = 0; 1757*ce110ea1SWei Hu gc->num_msix_usable = 0; 1758*ce110ea1SWei Hu free(gc->irq_contexts, M_DEVBUF); 1759*ce110ea1SWei Hu gc->irq_contexts = NULL; 1760*ce110ea1SWei Hu 1761*ce110ea1SWei Hu pci_release_msi(dev); 1762*ce110ea1SWei Hu } 1763*ce110ea1SWei Hu 1764*ce110ea1SWei Hu static int 1765*ce110ea1SWei Hu mana_gd_probe(device_t dev) 1766*ce110ea1SWei Hu { 1767*ce110ea1SWei Hu mana_vendor_id_t *ent; 1768*ce110ea1SWei Hu char adapter_name[60]; 1769*ce110ea1SWei Hu uint16_t pci_vendor_id = 0; 1770*ce110ea1SWei Hu uint16_t pci_device_id = 0; 1771*ce110ea1SWei Hu 1772*ce110ea1SWei Hu pci_vendor_id = pci_get_vendor(dev); 1773*ce110ea1SWei Hu pci_device_id = pci_get_device(dev); 1774*ce110ea1SWei Hu 1775*ce110ea1SWei Hu ent = mana_id_table; 1776*ce110ea1SWei Hu while (ent->vendor_id != 0) { 1777*ce110ea1SWei Hu if ((pci_vendor_id == ent->vendor_id) && 1778*ce110ea1SWei Hu (pci_device_id == ent->device_id)) { 1779*ce110ea1SWei Hu mana_dbg(NULL, "vendor=%x device=%x\n", 1780*ce110ea1SWei Hu pci_vendor_id, pci_device_id); 1781*ce110ea1SWei Hu 1782*ce110ea1SWei Hu sprintf(adapter_name, DEVICE_DESC); 1783*ce110ea1SWei Hu device_set_desc_copy(dev, adapter_name); 1784*ce110ea1SWei Hu return (BUS_PROBE_DEFAULT); 1785*ce110ea1SWei Hu } 1786*ce110ea1SWei Hu 1787*ce110ea1SWei Hu ent++; 1788*ce110ea1SWei Hu } 1789*ce110ea1SWei Hu 1790*ce110ea1SWei Hu return (ENXIO); 1791*ce110ea1SWei Hu } 1792*ce110ea1SWei Hu 1793*ce110ea1SWei Hu /** 1794*ce110ea1SWei Hu * mana_attach - Device Initialization Routine 1795*ce110ea1SWei Hu * @dev: device information struct 1796*ce110ea1SWei Hu * 1797*ce110ea1SWei Hu * Returns 0 on success, otherwise on failure. 1798*ce110ea1SWei Hu * 1799*ce110ea1SWei Hu * mana_attach initializes a GDMA adapter identified by a device structure. 1800*ce110ea1SWei Hu **/ 1801*ce110ea1SWei Hu static int 1802*ce110ea1SWei Hu mana_gd_attach(device_t dev) 1803*ce110ea1SWei Hu { 1804*ce110ea1SWei Hu struct gdma_context *gc; 1805*ce110ea1SWei Hu int msix_rid; 1806*ce110ea1SWei Hu int rc; 1807*ce110ea1SWei Hu 1808*ce110ea1SWei Hu gc = device_get_softc(dev); 1809*ce110ea1SWei Hu gc->dev = dev; 1810*ce110ea1SWei Hu 1811*ce110ea1SWei Hu pci_enable_io(dev, SYS_RES_IOPORT); 1812*ce110ea1SWei Hu pci_enable_io(dev, SYS_RES_MEMORY); 1813*ce110ea1SWei Hu 1814*ce110ea1SWei Hu pci_enable_busmaster(dev); 1815*ce110ea1SWei Hu 1816*ce110ea1SWei Hu gc->bar0 = mana_gd_alloc_bar(dev, GDMA_BAR0); 1817*ce110ea1SWei Hu if (unlikely(gc->bar0 == NULL)) { 1818*ce110ea1SWei Hu device_printf(dev, 1819*ce110ea1SWei Hu "unable to allocate bus resource for bar0!\n"); 1820*ce110ea1SWei Hu rc = ENOMEM; 1821*ce110ea1SWei Hu goto err_disable_dev; 1822*ce110ea1SWei Hu } 1823*ce110ea1SWei Hu 1824*ce110ea1SWei Hu /* Store bar0 tage and handle for quick access */ 1825*ce110ea1SWei Hu gc->gd_bus.bar0_t = rman_get_bustag(gc->bar0); 1826*ce110ea1SWei Hu gc->gd_bus.bar0_h = rman_get_bushandle(gc->bar0); 1827*ce110ea1SWei Hu 1828*ce110ea1SWei Hu /* Map MSI-x vector table */ 1829*ce110ea1SWei Hu msix_rid = pci_msix_table_bar(dev); 1830*ce110ea1SWei Hu 1831*ce110ea1SWei Hu mana_dbg(NULL, "msix_rid 0x%x\n", msix_rid); 1832*ce110ea1SWei Hu 1833*ce110ea1SWei Hu gc->msix = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 1834*ce110ea1SWei Hu &msix_rid, RF_ACTIVE); 1835*ce110ea1SWei Hu if (unlikely(gc->msix == NULL)) { 1836*ce110ea1SWei Hu device_printf(dev, 1837*ce110ea1SWei Hu "unable to allocate bus resource for msix!\n"); 1838*ce110ea1SWei Hu rc = ENOMEM; 1839*ce110ea1SWei Hu goto err_free_pci_res; 1840*ce110ea1SWei Hu } 1841*ce110ea1SWei Hu gc->msix_rid = msix_rid; 1842*ce110ea1SWei Hu 1843*ce110ea1SWei Hu if (unlikely(gc->gd_bus.bar0_h == 0)) { 1844*ce110ea1SWei Hu device_printf(dev, "failed to map bar0!\n"); 1845*ce110ea1SWei Hu rc = ENXIO; 1846*ce110ea1SWei Hu goto err_free_pci_res; 1847*ce110ea1SWei Hu } 1848*ce110ea1SWei Hu 1849*ce110ea1SWei Hu mana_gd_init_registers(gc); 1850*ce110ea1SWei Hu 1851*ce110ea1SWei Hu mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base); 1852*ce110ea1SWei Hu 1853*ce110ea1SWei Hu rc = mana_gd_setup_irqs(dev); 1854*ce110ea1SWei Hu if (rc) { 1855*ce110ea1SWei Hu goto err_free_pci_res; 1856*ce110ea1SWei Hu } 1857*ce110ea1SWei Hu 1858*ce110ea1SWei Hu sx_init(&gc->eq_test_event_sx, "gdma test event sx"); 1859*ce110ea1SWei Hu 1860*ce110ea1SWei Hu rc = mana_hwc_create_channel(gc); 1861*ce110ea1SWei Hu if (rc) { 1862*ce110ea1SWei Hu mana_dbg(NULL, "Failed to create hwc channel\n"); 1863*ce110ea1SWei Hu if (rc == EIO) 1864*ce110ea1SWei Hu goto err_clean_up_gdma; 1865*ce110ea1SWei Hu else 1866*ce110ea1SWei Hu goto err_remove_irq; 1867*ce110ea1SWei Hu } 1868*ce110ea1SWei Hu 1869*ce110ea1SWei Hu rc = mana_gd_verify_vf_version(dev); 1870*ce110ea1SWei Hu if (rc) { 1871*ce110ea1SWei Hu mana_dbg(NULL, "Failed to verify vf\n"); 1872*ce110ea1SWei Hu goto err_clean_up_gdma; 1873*ce110ea1SWei Hu } 1874*ce110ea1SWei Hu 1875*ce110ea1SWei Hu rc = mana_gd_query_max_resources(dev); 1876*ce110ea1SWei Hu if (rc) { 1877*ce110ea1SWei Hu mana_dbg(NULL, "Failed to query max resources\n"); 1878*ce110ea1SWei Hu goto err_clean_up_gdma; 1879*ce110ea1SWei Hu } 1880*ce110ea1SWei Hu 1881*ce110ea1SWei Hu rc = mana_gd_detect_devices(dev); 1882*ce110ea1SWei Hu if (rc) { 1883*ce110ea1SWei Hu mana_dbg(NULL, "Failed to detect mana device\n"); 1884*ce110ea1SWei Hu goto err_clean_up_gdma; 1885*ce110ea1SWei Hu } 1886*ce110ea1SWei Hu 1887*ce110ea1SWei Hu rc = mana_probe(&gc->mana); 1888*ce110ea1SWei Hu if (rc) { 1889*ce110ea1SWei Hu mana_dbg(NULL, "Failed to probe mana device\n"); 1890*ce110ea1SWei Hu goto err_clean_up_gdma; 1891*ce110ea1SWei Hu } 1892*ce110ea1SWei Hu 1893*ce110ea1SWei Hu return (0); 1894*ce110ea1SWei Hu 1895*ce110ea1SWei Hu err_clean_up_gdma: 1896*ce110ea1SWei Hu mana_hwc_destroy_channel(gc); 1897*ce110ea1SWei Hu if (gc->cq_table) 1898*ce110ea1SWei Hu free(gc->cq_table, M_DEVBUF); 1899*ce110ea1SWei Hu gc->cq_table = NULL; 1900*ce110ea1SWei Hu err_remove_irq: 1901*ce110ea1SWei Hu mana_gd_remove_irqs(dev); 1902*ce110ea1SWei Hu err_free_pci_res: 1903*ce110ea1SWei Hu mana_gd_free_pci_res(gc); 1904*ce110ea1SWei Hu err_disable_dev: 1905*ce110ea1SWei Hu pci_disable_busmaster(dev); 1906*ce110ea1SWei Hu 1907*ce110ea1SWei Hu return(rc); 1908*ce110ea1SWei Hu } 1909*ce110ea1SWei Hu 1910*ce110ea1SWei Hu /** 1911*ce110ea1SWei Hu * mana_detach - Device Removal Routine 1912*ce110ea1SWei Hu * @pdev: device information struct 1913*ce110ea1SWei Hu * 1914*ce110ea1SWei Hu * mana_detach is called by the device subsystem to alert the driver 1915*ce110ea1SWei Hu * that it should release a PCI device. 1916*ce110ea1SWei Hu **/ 1917*ce110ea1SWei Hu static int 1918*ce110ea1SWei Hu mana_gd_detach(device_t dev) 1919*ce110ea1SWei Hu { 1920*ce110ea1SWei Hu struct gdma_context *gc = device_get_softc(dev); 1921*ce110ea1SWei Hu 1922*ce110ea1SWei Hu mana_remove(&gc->mana); 1923*ce110ea1SWei Hu 1924*ce110ea1SWei Hu mana_hwc_destroy_channel(gc); 1925*ce110ea1SWei Hu free(gc->cq_table, M_DEVBUF); 1926*ce110ea1SWei Hu gc->cq_table = NULL; 1927*ce110ea1SWei Hu 1928*ce110ea1SWei Hu mana_gd_remove_irqs(dev); 1929*ce110ea1SWei Hu 1930*ce110ea1SWei Hu mana_gd_free_pci_res(gc); 1931*ce110ea1SWei Hu 1932*ce110ea1SWei Hu pci_disable_busmaster(dev); 1933*ce110ea1SWei Hu 1934*ce110ea1SWei Hu return (bus_generic_detach(dev)); 1935*ce110ea1SWei Hu } 1936*ce110ea1SWei Hu 1937*ce110ea1SWei Hu 1938*ce110ea1SWei Hu /********************************************************************* 1939*ce110ea1SWei Hu * FreeBSD Device Interface Entry Points 1940*ce110ea1SWei Hu *********************************************************************/ 1941*ce110ea1SWei Hu 1942*ce110ea1SWei Hu static device_method_t mana_methods[] = { 1943*ce110ea1SWei Hu /* Device interface */ 1944*ce110ea1SWei Hu DEVMETHOD(device_probe, mana_gd_probe), 1945*ce110ea1SWei Hu DEVMETHOD(device_attach, mana_gd_attach), 1946*ce110ea1SWei Hu DEVMETHOD(device_detach, mana_gd_detach), 1947*ce110ea1SWei Hu DEVMETHOD_END 1948*ce110ea1SWei Hu }; 1949*ce110ea1SWei Hu 1950*ce110ea1SWei Hu static driver_t mana_driver = { 1951*ce110ea1SWei Hu "mana", mana_methods, sizeof(struct gdma_context), 1952*ce110ea1SWei Hu }; 1953*ce110ea1SWei Hu 1954*ce110ea1SWei Hu devclass_t mana_devclass; 1955*ce110ea1SWei Hu DRIVER_MODULE(mana, pci, mana_driver, mana_devclass, 0, 0); 1956*ce110ea1SWei Hu MODULE_PNP_INFO("U16:vendor;U16:device", pci, mana, mana_id_table, 1957*ce110ea1SWei Hu nitems(mana_id_table) - 1); 1958*ce110ea1SWei Hu MODULE_DEPEND(mana, pci, 1, 1, 1); 1959*ce110ea1SWei Hu MODULE_DEPEND(mana, ether, 1, 1, 1); 1960*ce110ea1SWei Hu 1961*ce110ea1SWei Hu /*********************************************************************/ 1962