1 /****************************************************************************** 2 3 Copyright (c) 2013-2017, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 #include <sys/limits.h> 36 37 #include "ixl.h" 38 39 /******************************************************************** 40 * Manage DMA'able memory. 41 *******************************************************************/ 42 static void 43 i40e_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error) 44 { 45 if (error) 46 return; 47 *(bus_addr_t *) arg = segs->ds_addr; 48 return; 49 } 50 51 i40e_status 52 i40e_allocate_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem, u32 size) 53 { 54 mem->va = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 55 return(mem->va == NULL); 56 } 57 58 i40e_status 59 i40e_free_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem) 60 { 61 free(mem->va, M_DEVBUF); 62 mem->va = NULL; 63 64 return(0); 65 } 66 67 i40e_status 68 i40e_allocate_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem, 69 enum i40e_memory_type type __unused, u64 size, u32 alignment) 70 { 71 device_t dev = ((struct i40e_osdep *)hw->back)->dev; 72 int err; 73 74 75 err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 76 alignment, 0, /* alignment, bounds */ 77 BUS_SPACE_MAXADDR, /* lowaddr */ 78 BUS_SPACE_MAXADDR, /* highaddr */ 79 NULL, NULL, /* filter, filterarg */ 80 size, /* maxsize */ 81 1, /* nsegments */ 82 size, /* maxsegsize */ 83 BUS_DMA_ALLOCNOW, /* flags */ 84 NULL, /* lockfunc */ 85 NULL, /* lockfuncarg */ 86 &mem->tag); 87 if (err != 0) { 88 device_printf(dev, 89 "i40e_allocate_dma: bus_dma_tag_create failed, " 90 "error %u\n", err); 91 goto fail_0; 92 } 93 err = bus_dmamem_alloc(mem->tag, (void **)&mem->va, 94 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &mem->map); 95 if (err != 0) { 96 device_printf(dev, 97 "i40e_allocate_dma: bus_dmamem_alloc failed, " 98 "error %u\n", err); 99 goto fail_1; 100 } 101 err = bus_dmamap_load(mem->tag, mem->map, mem->va, 102 size, 103 i40e_dmamap_cb, 104 &mem->pa, 105 BUS_DMA_NOWAIT); 106 if (err != 0) { 107 device_printf(dev, 108 "i40e_allocate_dma: bus_dmamap_load failed, " 109 "error %u\n", err); 110 goto fail_2; 111 } 112 mem->nseg = 1; 113 mem->size = size; 114 bus_dmamap_sync(mem->tag, mem->map, 115 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 116 return (0); 117 fail_2: 118 bus_dmamem_free(mem->tag, mem->va, mem->map); 119 fail_1: 120 bus_dma_tag_destroy(mem->tag); 121 fail_0: 122 mem->map = NULL; 123 mem->tag = NULL; 124 return (err); 125 } 126 127 i40e_status 128 i40e_free_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem) 129 { 130 bus_dmamap_sync(mem->tag, mem->map, 131 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 132 bus_dmamap_unload(mem->tag, mem->map); 133 bus_dmamem_free(mem->tag, mem->va, mem->map); 134 bus_dma_tag_destroy(mem->tag); 135 return (0); 136 } 137 138 void 139 i40e_init_spinlock(struct i40e_spinlock *lock) 140 { 141 mtx_init(&lock->mutex, "mutex", 142 "ixl spinlock", MTX_DEF | MTX_DUPOK); 143 } 144 145 void 146 i40e_acquire_spinlock(struct i40e_spinlock *lock) 147 { 148 mtx_lock(&lock->mutex); 149 } 150 151 void 152 i40e_release_spinlock(struct i40e_spinlock *lock) 153 { 154 mtx_unlock(&lock->mutex); 155 } 156 157 void 158 i40e_destroy_spinlock(struct i40e_spinlock *lock) 159 { 160 if (mtx_initialized(&lock->mutex)) 161 mtx_destroy(&lock->mutex); 162 } 163 164 void 165 i40e_msec_pause(int msecs) 166 { 167 int ticks_to_pause = (msecs * hz) / 1000; 168 int start_ticks = ticks; 169 170 if (cold || SCHEDULER_STOPPED()) { 171 i40e_msec_delay(msecs); 172 return; 173 } 174 175 while (1) { 176 kern_yield(PRI_USER); 177 int yielded_ticks = ticks - start_ticks; 178 if (yielded_ticks > ticks_to_pause) 179 break; 180 else if (yielded_ticks < 0 181 && (yielded_ticks + INT_MAX + 1 > ticks_to_pause)) { 182 break; 183 } 184 } 185 } 186 187 /* 188 * Helper function for debug statement printing 189 */ 190 void 191 i40e_debug_shared(struct i40e_hw *hw, enum i40e_debug_mask mask, char *fmt, ...) 192 { 193 va_list args; 194 device_t dev; 195 196 if (!(mask & ((struct i40e_hw *)hw)->debug_mask)) 197 return; 198 199 dev = ((struct i40e_osdep *)hw->back)->dev; 200 201 /* Re-implement device_printf() */ 202 device_print_prettyname(dev); 203 va_start(args, fmt); 204 vprintf(fmt, args); 205 va_end(args); 206 } 207 208 const char * 209 ixl_vc_opcode_str(uint16_t op) 210 { 211 switch (op) { 212 case VIRTCHNL_OP_VERSION: 213 return ("VERSION"); 214 case VIRTCHNL_OP_RESET_VF: 215 return ("RESET_VF"); 216 case VIRTCHNL_OP_GET_VF_RESOURCES: 217 return ("GET_VF_RESOURCES"); 218 case VIRTCHNL_OP_CONFIG_TX_QUEUE: 219 return ("CONFIG_TX_QUEUE"); 220 case VIRTCHNL_OP_CONFIG_RX_QUEUE: 221 return ("CONFIG_RX_QUEUE"); 222 case VIRTCHNL_OP_CONFIG_VSI_QUEUES: 223 return ("CONFIG_VSI_QUEUES"); 224 case VIRTCHNL_OP_CONFIG_IRQ_MAP: 225 return ("CONFIG_IRQ_MAP"); 226 case VIRTCHNL_OP_ENABLE_QUEUES: 227 return ("ENABLE_QUEUES"); 228 case VIRTCHNL_OP_DISABLE_QUEUES: 229 return ("DISABLE_QUEUES"); 230 case VIRTCHNL_OP_ADD_ETH_ADDR: 231 return ("ADD_ETH_ADDR"); 232 case VIRTCHNL_OP_DEL_ETH_ADDR: 233 return ("DEL_ETH_ADDR"); 234 case VIRTCHNL_OP_ADD_VLAN: 235 return ("ADD_VLAN"); 236 case VIRTCHNL_OP_DEL_VLAN: 237 return ("DEL_VLAN"); 238 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 239 return ("CONFIG_PROMISCUOUS_MODE"); 240 case VIRTCHNL_OP_GET_STATS: 241 return ("GET_STATS"); 242 case VIRTCHNL_OP_RSVD: 243 return ("RSVD"); 244 case VIRTCHNL_OP_EVENT: 245 return ("EVENT"); 246 case VIRTCHNL_OP_CONFIG_RSS_KEY: 247 return ("CONFIG_RSS_KEY"); 248 case VIRTCHNL_OP_CONFIG_RSS_LUT: 249 return ("CONFIG_RSS_LUT"); 250 case VIRTCHNL_OP_GET_RSS_HENA_CAPS: 251 return ("GET_RSS_HENA_CAPS"); 252 case VIRTCHNL_OP_SET_RSS_HENA: 253 return ("SET_RSS_HENA"); 254 default: 255 return ("UNKNOWN"); 256 } 257 } 258 259 u16 260 i40e_read_pci_cfg(struct i40e_hw *hw, u32 reg) 261 { 262 u16 value; 263 264 value = pci_read_config(((struct i40e_osdep *)hw->back)->dev, 265 reg, 2); 266 267 return (value); 268 } 269 270 void 271 i40e_write_pci_cfg(struct i40e_hw *hw, u32 reg, u16 value) 272 { 273 pci_write_config(((struct i40e_osdep *)hw->back)->dev, 274 reg, value, 2); 275 276 return; 277 } 278 279