ena.c (755e60ca046390bdcfc097b6a8f1a032d47a7b65) | ena.c (82e558eacf222ac497bc11fa9f2c7778e97fbc7a) |
---|---|
1/*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2015-2021 Amazon.com, Inc. or its affiliates. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions --- 22 unchanged lines hidden (view full) --- 31__FBSDID("$FreeBSD$"); 32 33#include "opt_rss.h" 34 35#include <sys/param.h> 36#include <sys/systm.h> 37#include <sys/bus.h> 38#include <sys/endian.h> | 1/*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2015-2021 Amazon.com, Inc. or its affiliates. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions --- 22 unchanged lines hidden (view full) --- 31__FBSDID("$FreeBSD$"); 32 33#include "opt_rss.h" 34 35#include <sys/param.h> 36#include <sys/systm.h> 37#include <sys/bus.h> 38#include <sys/endian.h> |
39#include <sys/eventhandler.h> |
|
39#include <sys/kernel.h> 40#include <sys/kthread.h> 41#include <sys/malloc.h> 42#include <sys/mbuf.h> 43#include <sys/module.h> 44#include <sys/rman.h> 45#include <sys/smp.h> 46#include <sys/socket.h> 47#include <sys/sockio.h> 48#include <sys/sysctl.h> 49#include <sys/taskqueue.h> 50#include <sys/time.h> | 40#include <sys/kernel.h> 41#include <sys/kthread.h> 42#include <sys/malloc.h> 43#include <sys/mbuf.h> 44#include <sys/module.h> 45#include <sys/rman.h> 46#include <sys/smp.h> 47#include <sys/socket.h> 48#include <sys/sockio.h> 49#include <sys/sysctl.h> 50#include <sys/taskqueue.h> 51#include <sys/time.h> |
51#include <sys/eventhandler.h> | |
52 | 52 |
53#include <vm/vm.h> 54#include <vm/pmap.h> 55 |
|
53#include <machine/atomic.h> 54#include <machine/bus.h> | 56#include <machine/atomic.h> 57#include <machine/bus.h> |
55#include <machine/resource.h> | |
56#include <machine/in_cksum.h> | 58#include <machine/in_cksum.h> |
59#include <machine/resource.h> |
|
57 | 60 |
61#include <dev/pci/pcireg.h> 62#include <dev/pci/pcivar.h> 63 |
|
58#include <net/bpf.h> 59#include <net/ethernet.h> 60#include <net/if.h> | 64#include <net/bpf.h> 65#include <net/ethernet.h> 66#include <net/if.h> |
61#include <net/if_var.h> | |
62#include <net/if_arp.h> 63#include <net/if_dl.h> 64#include <net/if_media.h> 65#include <net/if_types.h> | 67#include <net/if_arp.h> 68#include <net/if_dl.h> 69#include <net/if_media.h> 70#include <net/if_types.h> |
71#include <net/if_var.h> |
|
66#include <net/if_vlan_var.h> | 72#include <net/if_vlan_var.h> |
67 68#include <netinet/in_systm.h> | |
69#include <netinet/in.h> | 73#include <netinet/in.h> |
74#include <netinet/in_systm.h> |
|
70#include <netinet/if_ether.h> 71#include <netinet/ip.h> 72#include <netinet/ip6.h> 73#include <netinet/tcp.h> 74#include <netinet/udp.h> 75 | 75#include <netinet/if_ether.h> 76#include <netinet/ip.h> 77#include <netinet/ip6.h> 78#include <netinet/tcp.h> 79#include <netinet/udp.h> 80 |
76#include <dev/pci/pcivar.h> 77#include <dev/pci/pcireg.h> 78 79#include <vm/vm.h> 80#include <vm/pmap.h> 81 82#include "ena_datapath.h" | |
83#include "ena.h" | 81#include "ena.h" |
84#include "ena_sysctl.h" | 82#include "ena_datapath.h" |
85#include "ena_rss.h" | 83#include "ena_rss.h" |
84#include "ena_sysctl.h" |
|
86 87#ifdef DEV_NETMAP 88#include "ena_netmap.h" 89#endif /* DEV_NETMAP */ 90 91/********************************************************* 92 * Function prototypes 93 *********************************************************/ | 85 86#ifdef DEV_NETMAP 87#include "ena_netmap.h" 88#endif /* DEV_NETMAP */ 89 90/********************************************************* 91 * Function prototypes 92 *********************************************************/ |
94static int ena_probe(device_t); 95static void ena_intr_msix_mgmnt(void *); 96static void ena_free_pci_resources(struct ena_adapter *); 97static int ena_change_mtu(if_t, int); | 93static int ena_probe(device_t); 94static void ena_intr_msix_mgmnt(void *); 95static void ena_free_pci_resources(struct ena_adapter *); 96static int ena_change_mtu(if_t, int); |
98static inline void ena_alloc_counters(counter_u64_t *, int); 99static inline void ena_free_counters(counter_u64_t *, int); 100static inline void ena_reset_counters(counter_u64_t *, int); | 97static inline void ena_alloc_counters(counter_u64_t *, int); 98static inline void ena_free_counters(counter_u64_t *, int); 99static inline void ena_reset_counters(counter_u64_t *, int); |
101static void ena_init_io_rings_common(struct ena_adapter *, 102 struct ena_ring *, uint16_t); 103static void ena_init_io_rings_basic(struct ena_adapter *); 104static void ena_init_io_rings_advanced(struct ena_adapter *); 105static void ena_init_io_rings(struct ena_adapter *); 106static void ena_free_io_ring_resources(struct ena_adapter *, unsigned int); 107static void ena_free_all_io_rings_resources(struct ena_adapter *); 108static int ena_setup_tx_dma_tag(struct ena_adapter *); 109static int ena_free_tx_dma_tag(struct ena_adapter *); 110static int ena_setup_rx_dma_tag(struct ena_adapter *); 111static int ena_free_rx_dma_tag(struct ena_adapter *); 112static void ena_release_all_tx_dmamap(struct ena_ring *); 113static int ena_setup_tx_resources(struct ena_adapter *, int); 114static void ena_free_tx_resources(struct ena_adapter *, int); 115static int ena_setup_all_tx_resources(struct ena_adapter *); 116static void ena_free_all_tx_resources(struct ena_adapter *); 117static int ena_setup_rx_resources(struct ena_adapter *, unsigned int); 118static void ena_free_rx_resources(struct ena_adapter *, unsigned int); 119static int ena_setup_all_rx_resources(struct ena_adapter *); 120static void ena_free_all_rx_resources(struct ena_adapter *); | 100static void ena_init_io_rings_common(struct ena_adapter *, struct ena_ring *, 101 uint16_t); 102static void ena_init_io_rings_basic(struct ena_adapter *); 103static void ena_init_io_rings_advanced(struct ena_adapter *); 104static void ena_init_io_rings(struct ena_adapter *); 105static void ena_free_io_ring_resources(struct ena_adapter *, unsigned int); 106static void ena_free_all_io_rings_resources(struct ena_adapter *); 107static int ena_setup_tx_dma_tag(struct ena_adapter *); 108static int ena_free_tx_dma_tag(struct ena_adapter *); 109static int ena_setup_rx_dma_tag(struct ena_adapter *); 110static int ena_free_rx_dma_tag(struct ena_adapter *); 111static void ena_release_all_tx_dmamap(struct ena_ring *); 112static int ena_setup_tx_resources(struct ena_adapter *, int); 113static void ena_free_tx_resources(struct ena_adapter *, int); 114static int ena_setup_all_tx_resources(struct ena_adapter *); 115static void ena_free_all_tx_resources(struct ena_adapter *); 116static int ena_setup_rx_resources(struct ena_adapter *, unsigned int); 117static void ena_free_rx_resources(struct ena_adapter *, unsigned int); 118static int ena_setup_all_rx_resources(struct ena_adapter *); 119static void ena_free_all_rx_resources(struct ena_adapter *); |
121static inline int ena_alloc_rx_mbuf(struct ena_adapter *, struct ena_ring *, 122 struct ena_rx_buffer *); | 120static inline int ena_alloc_rx_mbuf(struct ena_adapter *, struct ena_ring *, 121 struct ena_rx_buffer *); |
123static void ena_free_rx_mbuf(struct ena_adapter *, struct ena_ring *, | 122static void ena_free_rx_mbuf(struct ena_adapter *, struct ena_ring *, |
124 struct ena_rx_buffer *); | 123 struct ena_rx_buffer *); |
125static void ena_free_rx_bufs(struct ena_adapter *, unsigned int); 126static void ena_refill_all_rx_bufs(struct ena_adapter *); 127static void ena_free_all_rx_bufs(struct ena_adapter *); 128static void ena_free_tx_bufs(struct ena_adapter *, unsigned int); 129static void ena_free_all_tx_bufs(struct ena_adapter *); 130static void ena_destroy_all_tx_queues(struct ena_adapter *); 131static void ena_destroy_all_rx_queues(struct ena_adapter *); 132static void ena_destroy_all_io_queues(struct ena_adapter *); 133static int ena_create_io_queues(struct ena_adapter *); 134static int ena_handle_msix(void *); 135static int ena_enable_msix(struct ena_adapter *); 136static void ena_setup_mgmnt_intr(struct ena_adapter *); 137static int ena_setup_io_intr(struct ena_adapter *); 138static int ena_request_mgmnt_irq(struct ena_adapter *); 139static int ena_request_io_irq(struct ena_adapter *); 140static void ena_free_mgmnt_irq(struct ena_adapter *); 141static void ena_free_io_irq(struct ena_adapter *); 142static void ena_free_irqs(struct ena_adapter*); 143static void ena_disable_msix(struct ena_adapter *); 144static void ena_unmask_all_io_irqs(struct ena_adapter *); 145static int ena_up_complete(struct ena_adapter *); 146static uint64_t ena_get_counter(if_t, ift_counter); 147static int ena_media_change(if_t); 148static void ena_media_status(if_t, struct ifmediareq *); 149static void ena_init(void *); 150static int ena_ioctl(if_t, u_long, caddr_t); 151static int ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *); 152static void ena_update_host_info(struct ena_admin_host_info *, if_t); 153static void ena_update_hwassist(struct ena_adapter *); 154static int ena_setup_ifnet(device_t, struct ena_adapter *, | 124static void ena_free_rx_bufs(struct ena_adapter *, unsigned int); 125static void ena_refill_all_rx_bufs(struct ena_adapter *); 126static void ena_free_all_rx_bufs(struct ena_adapter *); 127static void ena_free_tx_bufs(struct ena_adapter *, unsigned int); 128static void ena_free_all_tx_bufs(struct ena_adapter *); 129static void ena_destroy_all_tx_queues(struct ena_adapter *); 130static void ena_destroy_all_rx_queues(struct ena_adapter *); 131static void ena_destroy_all_io_queues(struct ena_adapter *); 132static int ena_create_io_queues(struct ena_adapter *); 133static int ena_handle_msix(void *); 134static int ena_enable_msix(struct ena_adapter *); 135static void ena_setup_mgmnt_intr(struct ena_adapter *); 136static int ena_setup_io_intr(struct ena_adapter *); 137static int ena_request_mgmnt_irq(struct ena_adapter *); 138static int ena_request_io_irq(struct ena_adapter *); 139static void ena_free_mgmnt_irq(struct ena_adapter *); 140static void ena_free_io_irq(struct ena_adapter *); 141static void ena_free_irqs(struct ena_adapter *); 142static void ena_disable_msix(struct ena_adapter *); 143static void ena_unmask_all_io_irqs(struct ena_adapter *); 144static int ena_up_complete(struct ena_adapter *); 145static uint64_t ena_get_counter(if_t, ift_counter); 146static int ena_media_change(if_t); 147static void ena_media_status(if_t, struct ifmediareq *); 148static void ena_init(void *); 149static int ena_ioctl(if_t, u_long, caddr_t); 150static int ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *); 151static void ena_update_host_info(struct ena_admin_host_info *, if_t); 152static void ena_update_hwassist(struct ena_adapter *); 153static int ena_setup_ifnet(device_t, struct ena_adapter *, |
155 struct ena_com_dev_get_features_ctx *); | 154 struct ena_com_dev_get_features_ctx *); |
156static int ena_enable_wc(device_t, struct resource *); 157static int ena_set_queues_placement_policy(device_t, struct ena_com_dev *, | 155static int ena_enable_wc(device_t, struct resource *); 156static int ena_set_queues_placement_policy(device_t, struct ena_com_dev *, |
158 struct ena_admin_feature_llq_desc *, struct ena_llq_configurations *); | 157 struct ena_admin_feature_llq_desc *, struct ena_llq_configurations *); |
159static int ena_map_llq_mem_bar(device_t, struct ena_com_dev *); 160static uint32_t ena_calc_max_io_queue_num(device_t, struct ena_com_dev *, | 158static int ena_map_llq_mem_bar(device_t, struct ena_com_dev *); 159static uint32_t ena_calc_max_io_queue_num(device_t, struct ena_com_dev *, |
161 struct ena_com_dev_get_features_ctx *); | 160 struct ena_com_dev_get_features_ctx *); |
162static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *); 163static void ena_config_host_info(struct ena_com_dev *, device_t); 164static int ena_attach(device_t); 165static int ena_detach(device_t); 166static int ena_device_init(struct ena_adapter *, device_t, | 161static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *); 162static void ena_config_host_info(struct ena_com_dev *, device_t); 163static int ena_attach(device_t); 164static int ena_detach(device_t); 165static int ena_device_init(struct ena_adapter *, device_t, |
167 struct ena_com_dev_get_features_ctx *, int *); | 166 struct ena_com_dev_get_features_ctx *, int *); |
168static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *); | 167static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *); |
169static void ena_update_on_link_change(void *, struct ena_admin_aenq_entry *); | 168static void ena_update_on_link_change(void *, struct ena_admin_aenq_entry *); |
170static void unimplemented_aenq_handler(void *, 171 struct ena_admin_aenq_entry *); 172static int ena_copy_eni_metrics(struct ena_adapter *); 173static void ena_timer_service(void *); | 169static void unimplemented_aenq_handler(void *, struct ena_admin_aenq_entry *); 170static int ena_copy_eni_metrics(struct ena_adapter *); 171static void ena_timer_service(void *); |
174 175static char ena_version[] = DEVICE_NAME DRV_MODULE_NAME " v" DRV_MODULE_VERSION; 176 177static ena_vendor_info_t ena_vendor_info_array[] = { | 172 173static char ena_version[] = DEVICE_NAME DRV_MODULE_NAME " v" DRV_MODULE_VERSION; 174 175static ena_vendor_info_t ena_vendor_info_array[] = { |
178 { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF, 0}, 179 { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF_RSERV0, 0}, 180 { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF, 0}, 181 { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF_RSERV0, 0}, 182 /* Last entry */ 183 { 0, 0, 0 } | 176 { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF, 0 }, 177 { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF_RSERV0, 0 }, 178 { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF, 0 }, 179 { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF_RSERV0, 0 }, 180 /* Last entry */ 181 { 0, 0, 0 } |
184}; 185 186struct sx ena_global_lock; 187 188/* 189 * Contains pointers to event handlers, e.g. link state chage. 190 */ 191static struct ena_aenq_handlers aenq_handlers; 192 193void 194ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error) 195{ 196 if (error != 0) 197 return; | 182}; 183 184struct sx ena_global_lock; 185 186/* 187 * Contains pointers to event handlers, e.g. link state chage. 188 */ 189static struct ena_aenq_handlers aenq_handlers; 190 191void 192ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error) 193{ 194 if (error != 0) 195 return; |
198 *(bus_addr_t *) arg = segs[0].ds_addr; | 196 *(bus_addr_t *)arg = segs[0].ds_addr; |
199} 200 201int | 197} 198 199int |
202ena_dma_alloc(device_t dmadev, bus_size_t size, 203 ena_mem_handle_t *dma, int mapflags, bus_size_t alignment, int domain) | 200ena_dma_alloc(device_t dmadev, bus_size_t size, ena_mem_handle_t *dma, 201 int mapflags, bus_size_t alignment, int domain) |
204{ | 202{ |
205 struct ena_adapter* adapter = device_get_softc(dmadev); | 203 struct ena_adapter *adapter = device_get_softc(dmadev); |
206 device_t pdev = adapter->pdev; 207 uint32_t maxsize; 208 uint64_t dma_space_addr; 209 int error; 210 211 maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE; 212 213 dma_space_addr = ENA_DMA_BIT_MASK(adapter->dma_width); 214 if (unlikely(dma_space_addr == 0)) 215 dma_space_addr = BUS_SPACE_MAXADDR; 216 217 error = bus_dma_tag_create(bus_get_dma_tag(dmadev), /* parent */ | 204 device_t pdev = adapter->pdev; 205 uint32_t maxsize; 206 uint64_t dma_space_addr; 207 int error; 208 209 maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE; 210 211 dma_space_addr = ENA_DMA_BIT_MASK(adapter->dma_width); 212 if (unlikely(dma_space_addr == 0)) 213 dma_space_addr = BUS_SPACE_MAXADDR; 214 215 error = bus_dma_tag_create(bus_get_dma_tag(dmadev), /* parent */ |
218 alignment, 0, /* alignment, bounds */ 219 dma_space_addr, /* lowaddr of exclusion window */ 220 BUS_SPACE_MAXADDR,/* highaddr of exclusion window */ 221 NULL, NULL, /* filter, filterarg */ 222 maxsize, /* maxsize */ 223 1, /* nsegments */ 224 maxsize, /* maxsegsize */ 225 BUS_DMA_ALLOCNOW, /* flags */ 226 NULL, /* lockfunc */ 227 NULL, /* lockarg */ | 216 alignment, 0, /* alignment, bounds */ 217 dma_space_addr, /* lowaddr of exclusion window */ 218 BUS_SPACE_MAXADDR, /* highaddr of exclusion window */ 219 NULL, NULL, /* filter, filterarg */ 220 maxsize, /* maxsize */ 221 1, /* nsegments */ 222 maxsize, /* maxsegsize */ 223 BUS_DMA_ALLOCNOW, /* flags */ 224 NULL, /* lockfunc */ 225 NULL, /* lockarg */ |
228 &dma->tag); 229 if (unlikely(error != 0)) { 230 ena_log(pdev, ERR, "bus_dma_tag_create failed: %d\n", error); 231 goto fail_tag; 232 } 233 234 error = bus_dma_tag_set_domain(dma->tag, domain); 235 if (unlikely(error != 0)) { 236 ena_log(pdev, ERR, "bus_dma_tag_set_domain failed: %d\n", 237 error); 238 goto fail_map_create; 239 } 240 | 226 &dma->tag); 227 if (unlikely(error != 0)) { 228 ena_log(pdev, ERR, "bus_dma_tag_create failed: %d\n", error); 229 goto fail_tag; 230 } 231 232 error = bus_dma_tag_set_domain(dma->tag, domain); 233 if (unlikely(error != 0)) { 234 ena_log(pdev, ERR, "bus_dma_tag_set_domain failed: %d\n", 235 error); 236 goto fail_map_create; 237 } 238 |
241 error = bus_dmamem_alloc(dma->tag, (void**) &dma->vaddr, | 239 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, |
242 BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->map); 243 if (unlikely(error != 0)) { 244 ena_log(pdev, ERR, "bus_dmamem_alloc(%ju) failed: %d\n", 245 (uintmax_t)size, error); 246 goto fail_map_create; 247 } 248 249 dma->paddr = 0; | 240 BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->map); 241 if (unlikely(error != 0)) { 242 ena_log(pdev, ERR, "bus_dmamem_alloc(%ju) failed: %d\n", 243 (uintmax_t)size, error); 244 goto fail_map_create; 245 } 246 247 dma->paddr = 0; |
250 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, 251 size, ena_dmamap_callback, &dma->paddr, mapflags); | 248 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, 249 ena_dmamap_callback, &dma->paddr, mapflags); |
252 if (unlikely((error != 0) || (dma->paddr == 0))) { 253 ena_log(pdev, ERR, "bus_dmamap_load failed: %d\n", error); 254 goto fail_map_load; 255 } 256 257 bus_dmamap_sync(dma->tag, dma->map, 258 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 259 --- 22 unchanged lines hidden (view full) --- 282 } 283 284 if (adapter->registers != NULL) { 285 bus_release_resource(pdev, SYS_RES_MEMORY, 286 PCIR_BAR(ENA_REG_BAR), adapter->registers); 287 } 288 289 if (adapter->msix != NULL) { | 250 if (unlikely((error != 0) || (dma->paddr == 0))) { 251 ena_log(pdev, ERR, "bus_dmamap_load failed: %d\n", error); 252 goto fail_map_load; 253 } 254 255 bus_dmamap_sync(dma->tag, dma->map, 256 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 257 --- 22 unchanged lines hidden (view full) --- 280 } 281 282 if (adapter->registers != NULL) { 283 bus_release_resource(pdev, SYS_RES_MEMORY, 284 PCIR_BAR(ENA_REG_BAR), adapter->registers); 285 } 286 287 if (adapter->msix != NULL) { |
290 bus_release_resource(pdev, SYS_RES_MEMORY, 291 adapter->msix_rid, adapter->msix); | 288 bus_release_resource(pdev, SYS_RES_MEMORY, adapter->msix_rid, 289 adapter->msix); |
292 } 293} 294 295static int 296ena_probe(device_t dev) 297{ 298 ena_vendor_info_t *ent; | 290 } 291} 292 293static int 294ena_probe(device_t dev) 295{ 296 ena_vendor_info_t *ent; |
299 uint16_t pci_vendor_id = 0; 300 uint16_t pci_device_id = 0; | 297 uint16_t pci_vendor_id = 0; 298 uint16_t pci_device_id = 0; |
301 302 pci_vendor_id = pci_get_vendor(dev); 303 pci_device_id = pci_get_device(dev); 304 305 ent = ena_vendor_info_array; 306 while (ent->vendor_id != 0) { 307 if ((pci_vendor_id == ent->vendor_id) && 308 (pci_device_id == ent->device_id)) { | 299 300 pci_vendor_id = pci_get_vendor(dev); 301 pci_device_id = pci_get_device(dev); 302 303 ent = ena_vendor_info_array; 304 while (ent->vendor_id != 0) { 305 if ((pci_vendor_id == ent->vendor_id) && 306 (pci_device_id == ent->device_id)) { |
309 ena_log_raw(DBG, "vendor=%x device=%x\n", 310 pci_vendor_id, pci_device_id); | 307 ena_log_raw(DBG, "vendor=%x device=%x\n", pci_vendor_id, 308 pci_device_id); |
311 312 device_set_desc(dev, DEVICE_DESC); 313 return (BUS_PROBE_DEFAULT); 314 } 315 316 ent++; | 309 310 device_set_desc(dev, DEVICE_DESC); 311 return (BUS_PROBE_DEFAULT); 312 } 313 314 ent++; |
317 | |
318 } 319 320 return (ENXIO); 321} 322 323static int 324ena_change_mtu(if_t ifp, int new_mtu) 325{ 326 struct ena_adapter *adapter = if_getsoftc(ifp); 327 device_t pdev = adapter->pdev; 328 int rc; 329 330 if ((new_mtu > adapter->max_mtu) || (new_mtu < ENA_MIN_MTU)) { | 315 } 316 317 return (ENXIO); 318} 319 320static int 321ena_change_mtu(if_t ifp, int new_mtu) 322{ 323 struct ena_adapter *adapter = if_getsoftc(ifp); 324 device_t pdev = adapter->pdev; 325 int rc; 326 327 if ((new_mtu > adapter->max_mtu) || (new_mtu < ENA_MIN_MTU)) { |
331 ena_log(pdev, ERR, "Invalid MTU setting. " 332 "new_mtu: %d max mtu: %d min mtu: %d\n", | 328 ena_log(pdev, ERR, "Invalid MTU setting. new_mtu: %d max mtu: %d min mtu: %d\n", |
333 new_mtu, adapter->max_mtu, ENA_MIN_MTU); 334 return (EINVAL); 335 } 336 337 rc = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu); 338 if (likely(rc == 0)) { 339 ena_log(pdev, DBG, "set MTU to %d\n", new_mtu); 340 if_setmtu(ifp, new_mtu); --- 30 unchanged lines hidden (view full) --- 371 for (; begin < end; ++begin) 372 counter_u64_zero(*begin); 373} 374 375static void 376ena_init_io_rings_common(struct ena_adapter *adapter, struct ena_ring *ring, 377 uint16_t qid) 378{ | 329 new_mtu, adapter->max_mtu, ENA_MIN_MTU); 330 return (EINVAL); 331 } 332 333 rc = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu); 334 if (likely(rc == 0)) { 335 ena_log(pdev, DBG, "set MTU to %d\n", new_mtu); 336 if_setmtu(ifp, new_mtu); --- 30 unchanged lines hidden (view full) --- 367 for (; begin < end; ++begin) 368 counter_u64_zero(*begin); 369} 370 371static void 372ena_init_io_rings_common(struct ena_adapter *adapter, struct ena_ring *ring, 373 uint16_t qid) 374{ |
379 | |
380 ring->qid = qid; 381 ring->adapter = adapter; 382 ring->ena_dev = adapter->ena_dev; 383 atomic_store_8(&ring->first_interrupt, false); 384 ring->no_interrupt_event_cnt = 0; 385} 386 387static void --- 39 unchanged lines hidden (view full) --- 427 int i; 428 429 for (i = 0; i < adapter->num_io_queues; i++) { 430 txr = &adapter->tx_ring[i]; 431 rxr = &adapter->rx_ring[i]; 432 433 /* Allocate a buf ring */ 434 txr->buf_ring_size = adapter->buf_ring_size; | 375 ring->qid = qid; 376 ring->adapter = adapter; 377 ring->ena_dev = adapter->ena_dev; 378 atomic_store_8(&ring->first_interrupt, false); 379 ring->no_interrupt_event_cnt = 0; 380} 381 382static void --- 39 unchanged lines hidden (view full) --- 422 int i; 423 424 for (i = 0; i < adapter->num_io_queues; i++) { 425 txr = &adapter->tx_ring[i]; 426 rxr = &adapter->rx_ring[i]; 427 428 /* Allocate a buf ring */ 429 txr->buf_ring_size = adapter->buf_ring_size; |
435 txr->br = buf_ring_alloc(txr->buf_ring_size, M_DEVBUF, 436 M_WAITOK, &txr->ring_mtx); | 430 txr->br = buf_ring_alloc(txr->buf_ring_size, M_DEVBUF, M_WAITOK, 431 &txr->ring_mtx); |
437 438 /* Allocate Tx statistics. */ 439 ena_alloc_counters((counter_u64_t *)&txr->tx_stats, 440 sizeof(txr->tx_stats)); 441 txr->tx_last_cleanup_ticks = ticks; 442 443 /* Allocate Rx statistics. */ 444 ena_alloc_counters((counter_u64_t *)&rxr->rx_stats, --- 42 unchanged lines hidden (view full) --- 487 488static void 489ena_free_all_io_rings_resources(struct ena_adapter *adapter) 490{ 491 int i; 492 493 for (i = 0; i < adapter->num_io_queues; i++) 494 ena_free_io_ring_resources(adapter, i); | 432 433 /* Allocate Tx statistics. */ 434 ena_alloc_counters((counter_u64_t *)&txr->tx_stats, 435 sizeof(txr->tx_stats)); 436 txr->tx_last_cleanup_ticks = ticks; 437 438 /* Allocate Rx statistics. */ 439 ena_alloc_counters((counter_u64_t *)&rxr->rx_stats, --- 42 unchanged lines hidden (view full) --- 482 483static void 484ena_free_all_io_rings_resources(struct ena_adapter *adapter) 485{ 486 int i; 487 488 for (i = 0; i < adapter->num_io_queues; i++) 489 ena_free_io_ring_resources(adapter, i); |
495 | |
496} 497 498static int 499ena_setup_tx_dma_tag(struct ena_adapter *adapter) 500{ 501 int ret; 502 503 /* Create DMA tag for Tx buffers */ 504 ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), 505 1, 0, /* alignment, bounds */ 506 ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */ | 490} 491 492static int 493ena_setup_tx_dma_tag(struct ena_adapter *adapter) 494{ 495 int ret; 496 497 /* Create DMA tag for Tx buffers */ 498 ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), 499 1, 0, /* alignment, bounds */ 500 ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */ |
507 BUS_SPACE_MAXADDR, /* highaddr of excl window */ | 501 BUS_SPACE_MAXADDR, /* highaddr of excl window */ |
508 NULL, NULL, /* filter, filterarg */ 509 ENA_TSO_MAXSIZE, /* maxsize */ 510 adapter->max_tx_sgl_size - 1, /* nsegments */ 511 ENA_TSO_MAXSIZE, /* maxsegsize */ 512 0, /* flags */ 513 NULL, /* lockfunc */ 514 NULL, /* lockfuncarg */ 515 &adapter->tx_buf_tag); --- 18 unchanged lines hidden (view full) --- 534ena_setup_rx_dma_tag(struct ena_adapter *adapter) 535{ 536 int ret; 537 538 /* Create DMA tag for Rx buffers*/ 539 ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), /* parent */ 540 1, 0, /* alignment, bounds */ 541 ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */ | 502 NULL, NULL, /* filter, filterarg */ 503 ENA_TSO_MAXSIZE, /* maxsize */ 504 adapter->max_tx_sgl_size - 1, /* nsegments */ 505 ENA_TSO_MAXSIZE, /* maxsegsize */ 506 0, /* flags */ 507 NULL, /* lockfunc */ 508 NULL, /* lockfuncarg */ 509 &adapter->tx_buf_tag); --- 18 unchanged lines hidden (view full) --- 528ena_setup_rx_dma_tag(struct ena_adapter *adapter) 529{ 530 int ret; 531 532 /* Create DMA tag for Rx buffers*/ 533 ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), /* parent */ 534 1, 0, /* alignment, bounds */ 535 ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */ |
542 BUS_SPACE_MAXADDR, /* highaddr of excl window */ | 536 BUS_SPACE_MAXADDR, /* highaddr of excl window */ |
543 NULL, NULL, /* filter, filterarg */ 544 ena_mbuf_sz, /* maxsize */ 545 adapter->max_rx_sgl_size, /* nsegments */ 546 ena_mbuf_sz, /* maxsegsize */ 547 0, /* flags */ 548 NULL, /* lockfunc */ 549 NULL, /* lockarg */ 550 &adapter->rx_buf_tag); --- 14 unchanged lines hidden (view full) --- 565 return (ret); 566} 567 568static void 569ena_release_all_tx_dmamap(struct ena_ring *tx_ring) 570{ 571 struct ena_adapter *adapter = tx_ring->adapter; 572 struct ena_tx_buffer *tx_info; | 537 NULL, NULL, /* filter, filterarg */ 538 ena_mbuf_sz, /* maxsize */ 539 adapter->max_rx_sgl_size, /* nsegments */ 540 ena_mbuf_sz, /* maxsegsize */ 541 0, /* flags */ 542 NULL, /* lockfunc */ 543 NULL, /* lockarg */ 544 &adapter->rx_buf_tag); --- 14 unchanged lines hidden (view full) --- 559 return (ret); 560} 561 562static void 563ena_release_all_tx_dmamap(struct ena_ring *tx_ring) 564{ 565 struct ena_adapter *adapter = tx_ring->adapter; 566 struct ena_tx_buffer *tx_info; |
573 bus_dma_tag_t tx_tag = adapter->tx_buf_tag;; | 567 bus_dma_tag_t tx_tag = adapter->tx_buf_tag; |
574 int i; 575#ifdef DEV_NETMAP 576 struct ena_netmap_tx_info *nm_info; 577 int j; 578#endif /* DEV_NETMAP */ 579 580 for (i = 0; i < tx_ring->ring_size; ++i) { 581 tx_info = &tx_ring->tx_buffer_info[i]; --- 74 unchanged lines hidden (view full) --- 656 ENA_RING_MTX_UNLOCK(tx_ring); 657 658 /* ... and create the buffer DMA maps */ 659 for (i = 0; i < tx_ring->ring_size; i++) { 660 err = bus_dmamap_create(adapter->tx_buf_tag, 0, 661 &tx_ring->tx_buffer_info[i].dmamap); 662 if (unlikely(err != 0)) { 663 ena_log(pdev, ERR, | 568 int i; 569#ifdef DEV_NETMAP 570 struct ena_netmap_tx_info *nm_info; 571 int j; 572#endif /* DEV_NETMAP */ 573 574 for (i = 0; i < tx_ring->ring_size; ++i) { 575 tx_info = &tx_ring->tx_buffer_info[i]; --- 74 unchanged lines hidden (view full) --- 650 ENA_RING_MTX_UNLOCK(tx_ring); 651 652 /* ... and create the buffer DMA maps */ 653 for (i = 0; i < tx_ring->ring_size; i++) { 654 err = bus_dmamap_create(adapter->tx_buf_tag, 0, 655 &tx_ring->tx_buffer_info[i].dmamap); 656 if (unlikely(err != 0)) { 657 ena_log(pdev, ERR, |
664 "Unable to create Tx DMA map for buffer %d\n", 665 i); | 658 "Unable to create Tx DMA map for buffer %d\n", i); |
666 goto err_map_release; 667 } 668 669#ifdef DEV_NETMAP 670 if (adapter->ifp->if_capenable & IFCAP_NETMAP) { 671 map = tx_ring->tx_buffer_info[i].nm_info.map_seg; 672 for (j = 0; j < ENA_PKT_MAX_BUFS; j++) { 673 err = bus_dmamap_create(adapter->tx_buf_tag, 0, 674 &map[j]); 675 if (unlikely(err != 0)) { 676 ena_log(pdev, ERR, | 659 goto err_map_release; 660 } 661 662#ifdef DEV_NETMAP 663 if (adapter->ifp->if_capenable & IFCAP_NETMAP) { 664 map = tx_ring->tx_buffer_info[i].nm_info.map_seg; 665 for (j = 0; j < ENA_PKT_MAX_BUFS; j++) { 666 err = bus_dmamap_create(adapter->tx_buf_tag, 0, 667 &map[j]); 668 if (unlikely(err != 0)) { 669 ena_log(pdev, ERR, |
677 "Unable to create " 678 "Tx DMA for buffer %d %d\n", i, j); | 670 "Unable to create Tx DMA for buffer %d %d\n", 671 i, j); |
679 goto err_map_release; 680 } 681 } 682 } 683#endif /* DEV_NETMAP */ 684 } 685 686 /* Allocate taskqueues */ --- 45 unchanged lines hidden (view full) --- 732ena_free_tx_resources(struct ena_adapter *adapter, int qid) 733{ 734 struct ena_ring *tx_ring = &adapter->tx_ring[qid]; 735#ifdef DEV_NETMAP 736 struct ena_netmap_tx_info *nm_info; 737 int j; 738#endif /* DEV_NETMAP */ 739 | 672 goto err_map_release; 673 } 674 } 675 } 676#endif /* DEV_NETMAP */ 677 } 678 679 /* Allocate taskqueues */ --- 45 unchanged lines hidden (view full) --- 725ena_free_tx_resources(struct ena_adapter *adapter, int qid) 726{ 727 struct ena_ring *tx_ring = &adapter->tx_ring[qid]; 728#ifdef DEV_NETMAP 729 struct ena_netmap_tx_info *nm_info; 730 int j; 731#endif /* DEV_NETMAP */ 732 |
740 while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task, 741 NULL)) | 733 while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task, NULL)) |
742 taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task); 743 744 taskqueue_free(tx_ring->enqueue_tq); 745 746 ENA_RING_MTX_LOCK(tx_ring); 747 /* Flush buffer ring, */ 748 drbr_flush(adapter->ifp, tx_ring->br); 749 --- 240 unchanged lines hidden (view full) --- 990{ 991 int i; 992 993 for (i = 0; i < adapter->num_io_queues; i++) 994 ena_free_rx_resources(adapter, i); 995} 996 997static inline int | 734 taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task); 735 736 taskqueue_free(tx_ring->enqueue_tq); 737 738 ENA_RING_MTX_LOCK(tx_ring); 739 /* Flush buffer ring, */ 740 drbr_flush(adapter->ifp, tx_ring->br); 741 --- 240 unchanged lines hidden (view full) --- 982{ 983 int i; 984 985 for (i = 0; i < adapter->num_io_queues; i++) 986 ena_free_rx_resources(adapter, i); 987} 988 989static inline int |
998ena_alloc_rx_mbuf(struct ena_adapter *adapter, 999 struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info) | 990ena_alloc_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring, 991 struct ena_rx_buffer *rx_info) |
1000{ 1001 device_t pdev = adapter->pdev; 1002 struct ena_com_buf *ena_buf; 1003 bus_dma_segment_t segs[1]; 1004 int nsegs, error; 1005 int mlen; 1006 1007 /* if previous allocated frag is not used */ --- 14 unchanged lines hidden (view full) --- 1022 mlen = MCLBYTES; 1023 } else { 1024 mlen = rx_ring->rx_mbuf_sz; 1025 } 1026 /* Set mbuf length*/ 1027 rx_info->mbuf->m_pkthdr.len = rx_info->mbuf->m_len = mlen; 1028 1029 /* Map packets for DMA */ | 992{ 993 device_t pdev = adapter->pdev; 994 struct ena_com_buf *ena_buf; 995 bus_dma_segment_t segs[1]; 996 int nsegs, error; 997 int mlen; 998 999 /* if previous allocated frag is not used */ --- 14 unchanged lines hidden (view full) --- 1014 mlen = MCLBYTES; 1015 } else { 1016 mlen = rx_ring->rx_mbuf_sz; 1017 } 1018 /* Set mbuf length*/ 1019 rx_info->mbuf->m_pkthdr.len = rx_info->mbuf->m_len = mlen; 1020 1021 /* Map packets for DMA */ |
1030 ena_log(pdev, DBG, "Using tag %p for buffers' DMA mapping, mbuf %p len: %d\n", 1031 adapter->rx_buf_tag,rx_info->mbuf, rx_info->mbuf->m_len); | 1022 ena_log(pdev, DBG, 1023 "Using tag %p for buffers' DMA mapping, mbuf %p len: %d\n", 1024 adapter->rx_buf_tag, rx_info->mbuf, rx_info->mbuf->m_len); |
1032 error = bus_dmamap_load_mbuf_sg(adapter->rx_buf_tag, rx_info->map, 1033 rx_info->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); 1034 if (unlikely((error != 0) || (nsegs != 1))) { 1035 ena_log(pdev, WARN, 1036 "failed to map mbuf, error: %d, nsegs: %d\n", error, nsegs); 1037 counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1); 1038 goto exit; | 1025 error = bus_dmamap_load_mbuf_sg(adapter->rx_buf_tag, rx_info->map, 1026 rx_info->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); 1027 if (unlikely((error != 0) || (nsegs != 1))) { 1028 ena_log(pdev, WARN, 1029 "failed to map mbuf, error: %d, nsegs: %d\n", error, nsegs); 1030 counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1); 1031 goto exit; |
1039 | |
1040 } 1041 1042 bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD); 1043 1044 ena_buf = &rx_info->ena_buf; 1045 ena_buf->paddr = segs[0].ds_addr; 1046 ena_buf->len = mlen; 1047 | 1032 } 1033 1034 bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD); 1035 1036 ena_buf = &rx_info->ena_buf; 1037 ena_buf->paddr = segs[0].ds_addr; 1038 ena_buf->len = mlen; 1039 |
1048 ena_log(pdev, DBG, "ALLOC RX BUF: mbuf %p, rx_info %p, len %d, paddr %#jx\n", 1049 rx_info->mbuf, rx_info,ena_buf->len, (uintmax_t)ena_buf->paddr); | 1040 ena_log(pdev, DBG, 1041 "ALLOC RX BUF: mbuf %p, rx_info %p, len %d, paddr %#jx\n", 1042 rx_info->mbuf, rx_info, ena_buf->len, (uintmax_t)ena_buf->paddr); |
1050 1051 return (0); 1052 1053exit: 1054 m_freem(rx_info->mbuf); 1055 rx_info->mbuf = NULL; 1056 return (EFAULT); 1057} 1058 1059static void 1060ena_free_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring, 1061 struct ena_rx_buffer *rx_info) 1062{ | 1043 1044 return (0); 1045 1046exit: 1047 m_freem(rx_info->mbuf); 1048 rx_info->mbuf = NULL; 1049 return (EFAULT); 1050} 1051 1052static void 1053ena_free_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring, 1054 struct ena_rx_buffer *rx_info) 1055{ |
1063 | |
1064 if (rx_info->mbuf == NULL) { 1065 ena_log(adapter->pdev, WARN, 1066 "Trying to free unallocated buffer\n"); 1067 return; 1068 } 1069 1070 bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, 1071 BUS_DMASYNC_POSTREAD); --- 26 unchanged lines hidden (view full) --- 1098 1099 ena_log_io(pdev, DBG, "RX buffer - next to use: %d\n", 1100 next_to_use); 1101 1102 req_id = rx_ring->free_rx_ids[next_to_use]; 1103 rx_info = &rx_ring->rx_buffer_info[req_id]; 1104#ifdef DEV_NETMAP 1105 if (ena_rx_ring_in_netmap(adapter, rx_ring->qid)) | 1056 if (rx_info->mbuf == NULL) { 1057 ena_log(adapter->pdev, WARN, 1058 "Trying to free unallocated buffer\n"); 1059 return; 1060 } 1061 1062 bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, 1063 BUS_DMASYNC_POSTREAD); --- 26 unchanged lines hidden (view full) --- 1090 1091 ena_log_io(pdev, DBG, "RX buffer - next to use: %d\n", 1092 next_to_use); 1093 1094 req_id = rx_ring->free_rx_ids[next_to_use]; 1095 rx_info = &rx_ring->rx_buffer_info[req_id]; 1096#ifdef DEV_NETMAP 1097 if (ena_rx_ring_in_netmap(adapter, rx_ring->qid)) |
1106 rc = ena_netmap_alloc_rx_slot(adapter, rx_ring, rx_info); | 1098 rc = ena_netmap_alloc_rx_slot(adapter, rx_ring, 1099 rx_info); |
1107 else 1108#endif /* DEV_NETMAP */ 1109 rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info); 1110 if (unlikely(rc != 0)) { 1111 ena_log_io(pdev, WARN, 1112 "failed to alloc buffer for rx queue %d\n", 1113 rx_ring->qid); 1114 break; --- 8 unchanged lines hidden (view full) --- 1123 } 1124 next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use, 1125 rx_ring->ring_size); 1126 } 1127 1128 if (unlikely(i < num)) { 1129 counter_u64_add(rx_ring->rx_stats.refil_partial, 1); 1130 ena_log_io(pdev, WARN, | 1100 else 1101#endif /* DEV_NETMAP */ 1102 rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info); 1103 if (unlikely(rc != 0)) { 1104 ena_log_io(pdev, WARN, 1105 "failed to alloc buffer for rx queue %d\n", 1106 rx_ring->qid); 1107 break; --- 8 unchanged lines hidden (view full) --- 1116 } 1117 next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use, 1118 rx_ring->ring_size); 1119 } 1120 1121 if (unlikely(i < num)) { 1122 counter_u64_add(rx_ring->rx_stats.refil_partial, 1); 1123 ena_log_io(pdev, WARN, |
1131 "refilled rx qid %d with only %d mbufs (from %d)\n", 1132 rx_ring->qid, i, num); | 1124 "refilled rx qid %d with only %d mbufs (from %d)\n", 1125 rx_ring->qid, i, num); |
1133 } 1134 1135 if (likely(i != 0)) 1136 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); 1137 1138 rx_ring->next_to_use = next_to_use; 1139 return (i); 1140} --- 31 unchanged lines hidden (view full) --- 1172 /* Revert old size and trigger the reset */ 1173 adapter->buf_ring_size = old_buf_ring_size; 1174 ena_free_all_io_rings_resources(adapter); 1175 ena_init_io_rings_advanced(adapter); 1176 1177 ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, 1178 adapter); 1179 ena_trigger_reset(adapter, ENA_REGS_RESET_OS_TRIGGER); | 1126 } 1127 1128 if (likely(i != 0)) 1129 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); 1130 1131 rx_ring->next_to_use = next_to_use; 1132 return (i); 1133} --- 31 unchanged lines hidden (view full) --- 1165 /* Revert old size and trigger the reset */ 1166 adapter->buf_ring_size = old_buf_ring_size; 1167 ena_free_all_io_rings_resources(adapter); 1168 ena_init_io_rings_advanced(adapter); 1169 1170 ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, 1171 adapter); 1172 ena_trigger_reset(adapter, ENA_REGS_RESET_OS_TRIGGER); |
1180 | |
1181 } 1182 } 1183 1184 return (rc); 1185} 1186 1187int 1188ena_update_queue_size(struct ena_adapter *adapter, uint32_t new_tx_size, --- 132 unchanged lines hidden (view full) --- 1321 1322 for (i = 0; i < adapter->num_io_queues; i++) { 1323 rx_ring = &adapter->rx_ring[i]; 1324 bufs_num = rx_ring->ring_size - 1; 1325 rc = ena_refill_rx_bufs(rx_ring, bufs_num); 1326 if (unlikely(rc != bufs_num)) 1327 ena_log_io(adapter->pdev, WARN, 1328 "refilling Queue %d failed. " | 1173 } 1174 } 1175 1176 return (rc); 1177} 1178 1179int 1180ena_update_queue_size(struct ena_adapter *adapter, uint32_t new_tx_size, --- 132 unchanged lines hidden (view full) --- 1313 1314 for (i = 0; i < adapter->num_io_queues; i++) { 1315 rx_ring = &adapter->rx_ring[i]; 1316 bufs_num = rx_ring->ring_size - 1; 1317 rc = ena_refill_rx_bufs(rx_ring, bufs_num); 1318 if (unlikely(rc != bufs_num)) 1319 ena_log_io(adapter->pdev, WARN, 1320 "refilling Queue %d failed. " |
1329 "Allocated %d buffers from: %d\n", i, rc, bufs_num); | 1321 "Allocated %d buffers from: %d\n", 1322 i, rc, bufs_num); |
1330#ifdef DEV_NETMAP 1331 rx_ring->initialized = true; 1332#endif /* DEV_NETMAP */ 1333 } 1334} 1335 1336static void 1337ena_free_all_rx_bufs(struct ena_adapter *adapter) --- 19 unchanged lines hidden (view full) --- 1357 for (int i = 0; i < tx_ring->ring_size; i++) { 1358 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; 1359 1360 if (tx_info->mbuf == NULL) 1361 continue; 1362 1363 if (print_once) { 1364 ena_log(adapter->pdev, WARN, | 1323#ifdef DEV_NETMAP 1324 rx_ring->initialized = true; 1325#endif /* DEV_NETMAP */ 1326 } 1327} 1328 1329static void 1330ena_free_all_rx_bufs(struct ena_adapter *adapter) --- 19 unchanged lines hidden (view full) --- 1350 for (int i = 0; i < tx_ring->ring_size; i++) { 1351 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; 1352 1353 if (tx_info->mbuf == NULL) 1354 continue; 1355 1356 if (print_once) { 1357 ena_log(adapter->pdev, WARN, |
1365 "free uncompleted tx mbuf qid %d idx 0x%x\n", 1366 qid, i); | 1358 "free uncompleted tx mbuf qid %d idx 0x%x\n", qid, 1359 i); |
1367 print_once = false; 1368 } else { 1369 ena_log(adapter->pdev, DBG, | 1360 print_once = false; 1361 } else { 1362 ena_log(adapter->pdev, DBG, |
1370 "free uncompleted tx mbuf qid %d idx 0x%x\n", 1371 qid, i); | 1363 "free uncompleted tx mbuf qid %d idx 0x%x\n", qid, 1364 i); |
1372 } 1373 1374 bus_dmamap_sync(adapter->tx_buf_tag, tx_info->dmamap, 1375 BUS_DMASYNC_POSTWRITE); 1376 bus_dmamap_unload(adapter->tx_buf_tag, tx_info->dmamap); 1377 1378 m_free(tx_info->mbuf); 1379 tx_info->mbuf = NULL; 1380 } 1381 ENA_RING_MTX_UNLOCK(tx_ring); 1382} 1383 1384static void 1385ena_free_all_tx_bufs(struct ena_adapter *adapter) 1386{ | 1365 } 1366 1367 bus_dmamap_sync(adapter->tx_buf_tag, tx_info->dmamap, 1368 BUS_DMASYNC_POSTWRITE); 1369 bus_dmamap_unload(adapter->tx_buf_tag, tx_info->dmamap); 1370 1371 m_free(tx_info->mbuf); 1372 tx_info->mbuf = NULL; 1373 } 1374 ENA_RING_MTX_UNLOCK(tx_ring); 1375} 1376 1377static void 1378ena_free_all_tx_bufs(struct ena_adapter *adapter) 1379{ |
1387 | |
1388 for (int i = 0; i < adapter->num_io_queues; i++) 1389 ena_free_tx_bufs(adapter, i); 1390} 1391 1392static void 1393ena_destroy_all_tx_queues(struct ena_adapter *adapter) 1394{ 1395 uint16_t ena_qid; --- 20 unchanged lines hidden (view full) --- 1416static void 1417ena_destroy_all_io_queues(struct ena_adapter *adapter) 1418{ 1419 struct ena_que *queue; 1420 int i; 1421 1422 for (i = 0; i < adapter->num_io_queues; i++) { 1423 queue = &adapter->que[i]; | 1380 for (int i = 0; i < adapter->num_io_queues; i++) 1381 ena_free_tx_bufs(adapter, i); 1382} 1383 1384static void 1385ena_destroy_all_tx_queues(struct ena_adapter *adapter) 1386{ 1387 uint16_t ena_qid; --- 20 unchanged lines hidden (view full) --- 1408static void 1409ena_destroy_all_io_queues(struct ena_adapter *adapter) 1410{ 1411 struct ena_que *queue; 1412 int i; 1413 1414 for (i = 0; i < adapter->num_io_queues; i++) { 1415 queue = &adapter->que[i]; |
1424 while (taskqueue_cancel(queue->cleanup_tq, 1425 &queue->cleanup_task, NULL)) 1426 taskqueue_drain(queue->cleanup_tq, 1427 &queue->cleanup_task); | 1416 while (taskqueue_cancel(queue->cleanup_tq, &queue->cleanup_task, NULL)) 1417 taskqueue_drain(queue->cleanup_tq, &queue->cleanup_task); |
1428 taskqueue_free(queue->cleanup_tq); 1429 } 1430 1431 ena_destroy_all_tx_queues(adapter); 1432 ena_destroy_all_rx_queues(adapter); 1433} 1434 1435static int --- 22 unchanged lines hidden (view full) --- 1458 rc = ena_com_create_io_queue(ena_dev, &ctx); 1459 if (rc != 0) { 1460 ena_log(adapter->pdev, ERR, 1461 "Failed to create io TX queue #%d rc: %d\n", i, rc); 1462 goto err_tx; 1463 } 1464 ring = &adapter->tx_ring[i]; 1465 rc = ena_com_get_io_handlers(ena_dev, ena_qid, | 1418 taskqueue_free(queue->cleanup_tq); 1419 } 1420 1421 ena_destroy_all_tx_queues(adapter); 1422 ena_destroy_all_rx_queues(adapter); 1423} 1424 1425static int --- 22 unchanged lines hidden (view full) --- 1448 rc = ena_com_create_io_queue(ena_dev, &ctx); 1449 if (rc != 0) { 1450 ena_log(adapter->pdev, ERR, 1451 "Failed to create io TX queue #%d rc: %d\n", i, rc); 1452 goto err_tx; 1453 } 1454 ring = &adapter->tx_ring[i]; 1455 rc = ena_com_get_io_handlers(ena_dev, ena_qid, |
1466 &ring->ena_com_io_sq, 1467 &ring->ena_com_io_cq); | 1456 &ring->ena_com_io_sq, &ring->ena_com_io_cq); |
1468 if (rc != 0) { 1469 ena_log(adapter->pdev, ERR, 1470 "Failed to get TX queue handlers. TX queue num" | 1457 if (rc != 0) { 1458 ena_log(adapter->pdev, ERR, 1459 "Failed to get TX queue handlers. TX queue num" |
1471 " %d rc: %d\n", i, rc); | 1460 " %d rc: %d\n", 1461 i, rc); |
1472 ena_com_destroy_io_queue(ena_dev, ena_qid); 1473 goto err_tx; 1474 } 1475 1476 if (ctx.numa_node >= 0) { 1477 ena_com_update_numa_node(ring->ena_com_io_cq, 1478 ctx.numa_node); 1479 } --- 14 unchanged lines hidden (view full) --- 1494 if (unlikely(rc != 0)) { 1495 ena_log(adapter->pdev, ERR, 1496 "Failed to create io RX queue[%d] rc: %d\n", i, rc); 1497 goto err_rx; 1498 } 1499 1500 ring = &adapter->rx_ring[i]; 1501 rc = ena_com_get_io_handlers(ena_dev, ena_qid, | 1462 ena_com_destroy_io_queue(ena_dev, ena_qid); 1463 goto err_tx; 1464 } 1465 1466 if (ctx.numa_node >= 0) { 1467 ena_com_update_numa_node(ring->ena_com_io_cq, 1468 ctx.numa_node); 1469 } --- 14 unchanged lines hidden (view full) --- 1484 if (unlikely(rc != 0)) { 1485 ena_log(adapter->pdev, ERR, 1486 "Failed to create io RX queue[%d] rc: %d\n", i, rc); 1487 goto err_rx; 1488 } 1489 1490 ring = &adapter->rx_ring[i]; 1491 rc = ena_com_get_io_handlers(ena_dev, ena_qid, |
1502 &ring->ena_com_io_sq, 1503 &ring->ena_com_io_cq); | 1492 &ring->ena_com_io_sq, &ring->ena_com_io_cq); |
1504 if (unlikely(rc != 0)) { 1505 ena_log(adapter->pdev, ERR, 1506 "Failed to get RX queue handlers. RX queue num" | 1493 if (unlikely(rc != 0)) { 1494 ena_log(adapter->pdev, ERR, 1495 "Failed to get RX queue handlers. RX queue num" |
1507 " %d rc: %d\n", i, rc); | 1496 " %d rc: %d\n", 1497 i, rc); |
1508 ena_com_destroy_io_queue(ena_dev, ena_qid); 1509 goto err_rx; 1510 } 1511 1512 if (ctx.numa_node >= 0) { 1513 ena_com_update_numa_node(ring->ena_com_io_cq, 1514 ctx.numa_node); 1515 } --- 5 unchanged lines hidden (view full) --- 1521 NET_TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue); 1522 queue->cleanup_tq = taskqueue_create_fast("ena cleanup", 1523 M_WAITOK, taskqueue_thread_enqueue, &queue->cleanup_tq); 1524 1525#ifdef RSS 1526 cpu_mask = &queue->cpu_mask; 1527#endif 1528 taskqueue_start_threads_cpuset(&queue->cleanup_tq, 1, PI_NET, | 1498 ena_com_destroy_io_queue(ena_dev, ena_qid); 1499 goto err_rx; 1500 } 1501 1502 if (ctx.numa_node >= 0) { 1503 ena_com_update_numa_node(ring->ena_com_io_cq, 1504 ctx.numa_node); 1505 } --- 5 unchanged lines hidden (view full) --- 1511 NET_TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue); 1512 queue->cleanup_tq = taskqueue_create_fast("ena cleanup", 1513 M_WAITOK, taskqueue_thread_enqueue, &queue->cleanup_tq); 1514 1515#ifdef RSS 1516 cpu_mask = &queue->cpu_mask; 1517#endif 1518 taskqueue_start_threads_cpuset(&queue->cleanup_tq, 1, PI_NET, |
1529 cpu_mask, 1530 "%s queue %d cleanup", | 1519 cpu_mask, "%s queue %d cleanup", |
1531 device_get_nameunit(adapter->pdev), i); 1532 } 1533 1534 return (0); 1535 1536err_rx: 1537 while (i--) 1538 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i)); --- 57 unchanged lines hidden (view full) --- 1596 } 1597 1598 /* Reserved the max msix vectors we might need */ 1599 msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues); 1600 1601 adapter->msix_entries = malloc(msix_vecs * sizeof(struct msix_entry), 1602 M_DEVBUF, M_WAITOK | M_ZERO); 1603 | 1520 device_get_nameunit(adapter->pdev), i); 1521 } 1522 1523 return (0); 1524 1525err_rx: 1526 while (i--) 1527 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i)); --- 57 unchanged lines hidden (view full) --- 1585 } 1586 1587 /* Reserved the max msix vectors we might need */ 1588 msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues); 1589 1590 adapter->msix_entries = malloc(msix_vecs * sizeof(struct msix_entry), 1591 M_DEVBUF, M_WAITOK | M_ZERO); 1592 |
1604 ena_log(dev, DBG, "trying to enable MSI-X, vectors: %d\n", 1605 msix_vecs); | 1593 ena_log(dev, DBG, "trying to enable MSI-X, vectors: %d\n", msix_vecs); |
1606 1607 for (i = 0; i < msix_vecs; i++) { 1608 adapter->msix_entries[i].entry = i; 1609 /* Vectors must start from 1 */ 1610 adapter->msix_entries[i].vector = i + 1; 1611 } 1612 1613 msix_req = msix_vecs; 1614 rc = pci_alloc_msix(dev, &msix_vecs); 1615 if (unlikely(rc != 0)) { | 1594 1595 for (i = 0; i < msix_vecs; i++) { 1596 adapter->msix_entries[i].entry = i; 1597 /* Vectors must start from 1 */ 1598 adapter->msix_entries[i].vector = i + 1; 1599 } 1600 1601 msix_req = msix_vecs; 1602 rc = pci_alloc_msix(dev, &msix_vecs); 1603 if (unlikely(rc != 0)) { |
1616 ena_log(dev, ERR, 1617 "Failed to enable MSIX, vectors %d rc %d\n", msix_vecs, rc); | 1604 ena_log(dev, ERR, "Failed to enable MSIX, vectors %d rc %d\n", 1605 msix_vecs, rc); |
1618 1619 rc = ENOSPC; 1620 goto err_msix_free; 1621 } 1622 1623 if (msix_vecs != msix_req) { 1624 if (msix_vecs == ENA_ADMIN_MSIX_VEC) { 1625 ena_log(dev, ERR, 1626 "Not enough number of MSI-x allocated: %d\n", 1627 msix_vecs); 1628 pci_release_msi(dev); 1629 rc = ENOSPC; 1630 goto err_msix_free; 1631 } | 1606 1607 rc = ENOSPC; 1608 goto err_msix_free; 1609 } 1610 1611 if (msix_vecs != msix_req) { 1612 if (msix_vecs == ENA_ADMIN_MSIX_VEC) { 1613 ena_log(dev, ERR, 1614 "Not enough number of MSI-x allocated: %d\n", 1615 msix_vecs); 1616 pci_release_msi(dev); 1617 rc = ENOSPC; 1618 goto err_msix_free; 1619 } |
1632 ena_log(dev, ERR, "Enable only %d MSI-x (out of %d), reduce " 1633 "the number of queues\n", msix_vecs, msix_req); | 1620 ena_log(dev, ERR, 1621 "Enable only %d MSI-x (out of %d), reduce " 1622 "the number of queues\n", 1623 msix_vecs, msix_req); |
1634 } 1635 1636 adapter->msix_vecs = msix_vecs; 1637 ENA_FLAG_SET_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter); 1638 1639 return (0); 1640 1641err_msix_free: 1642 free(adapter->msix_entries, M_DEVBUF); 1643 adapter->msix_entries = NULL; 1644 1645 return (rc); 1646} 1647 1648static void 1649ena_setup_mgmnt_intr(struct ena_adapter *adapter) 1650{ | 1624 } 1625 1626 adapter->msix_vecs = msix_vecs; 1627 ENA_FLAG_SET_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter); 1628 1629 return (0); 1630 1631err_msix_free: 1632 free(adapter->msix_entries, M_DEVBUF); 1633 adapter->msix_entries = NULL; 1634 1635 return (rc); 1636} 1637 1638static void 1639ena_setup_mgmnt_intr(struct ena_adapter *adapter) 1640{ |
1651 1652 snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name, 1653 ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s", 1654 device_get_nameunit(adapter->pdev)); | 1641 snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name, ENA_IRQNAME_SIZE, 1642 "ena-mgmnt@pci:%s", device_get_nameunit(adapter->pdev)); |
1655 /* 1656 * Handler is NULL on purpose, it will be set 1657 * when mgmnt interrupt is acquired 1658 */ 1659 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = NULL; 1660 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter; 1661 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector = 1662 adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector; --- 68 unchanged lines hidden (view full) --- 1731 1732 if (unlikely(irq->res == NULL)) { 1733 ena_log(pdev, ERR, "could not allocate irq vector: %d\n", 1734 irq->vector); 1735 return (ENXIO); 1736 } 1737 1738 rc = bus_setup_intr(adapter->pdev, irq->res, | 1643 /* 1644 * Handler is NULL on purpose, it will be set 1645 * when mgmnt interrupt is acquired 1646 */ 1647 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = NULL; 1648 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter; 1649 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector = 1650 adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector; --- 68 unchanged lines hidden (view full) --- 1719 1720 if (unlikely(irq->res == NULL)) { 1721 ena_log(pdev, ERR, "could not allocate irq vector: %d\n", 1722 irq->vector); 1723 return (ENXIO); 1724 } 1725 1726 rc = bus_setup_intr(adapter->pdev, irq->res, |
1739 INTR_TYPE_NET | INTR_MPSAFE, NULL, ena_intr_msix_mgmnt, 1740 irq->data, &irq->cookie); | 1727 INTR_TYPE_NET | INTR_MPSAFE, NULL, ena_intr_msix_mgmnt, irq->data, 1728 &irq->cookie); |
1741 if (unlikely(rc != 0)) { | 1729 if (unlikely(rc != 0)) { |
1742 ena_log(pdev, ERR, "failed to register " 1743 "interrupt handler for irq %ju: %d\n", | 1730 ena_log(pdev, ERR, 1731 "failed to register interrupt handler for irq %ju: %d\n", |
1744 rman_get_start(irq->res), rc); 1745 goto err_res_free; 1746 } 1747 irq->requested = true; 1748 1749 return (rc); 1750 1751err_res_free: 1752 ena_log(pdev, INFO, "releasing resource for irq %d\n", irq->vector); | 1732 rman_get_start(irq->res), rc); 1733 goto err_res_free; 1734 } 1735 irq->requested = true; 1736 1737 return (rc); 1738 1739err_res_free: 1740 ena_log(pdev, INFO, "releasing resource for irq %d\n", irq->vector); |
1753 rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 1754 irq->vector, irq->res); | 1741 rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, irq->vector, 1742 irq->res); |
1755 if (unlikely(rcc != 0)) | 1743 if (unlikely(rcc != 0)) |
1756 ena_log(pdev, ERR, "dev has no parent while " 1757 "releasing res for irq: %d\n", irq->vector); | 1744 ena_log(pdev, ERR, 1745 "dev has no parent while releasing res for irq: %d\n", 1746 irq->vector); |
1758 irq->res = NULL; 1759 1760 return (rc); 1761} 1762 1763static int 1764ena_request_io_irq(struct ena_adapter *adapter) 1765{ --- 15 unchanged lines hidden (view full) --- 1781 1782 if (unlikely(irq->requested)) 1783 continue; 1784 1785 irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ, 1786 &irq->vector, flags); 1787 if (unlikely(irq->res == NULL)) { 1788 rc = ENOMEM; | 1747 irq->res = NULL; 1748 1749 return (rc); 1750} 1751 1752static int 1753ena_request_io_irq(struct ena_adapter *adapter) 1754{ --- 15 unchanged lines hidden (view full) --- 1770 1771 if (unlikely(irq->requested)) 1772 continue; 1773 1774 irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ, 1775 &irq->vector, flags); 1776 if (unlikely(irq->res == NULL)) { 1777 rc = ENOMEM; |
1789 ena_log(pdev, ERR, "could not allocate irq vector: %d\n", 1790 irq->vector); | 1778 ena_log(pdev, ERR, 1779 "could not allocate irq vector: %d\n", irq->vector); |
1791 goto err; 1792 } 1793 1794 rc = bus_setup_intr(adapter->pdev, irq->res, | 1780 goto err; 1781 } 1782 1783 rc = bus_setup_intr(adapter->pdev, irq->res, |
1795 INTR_TYPE_NET | INTR_MPSAFE, irq->handler, NULL, 1796 irq->data, &irq->cookie); 1797 if (unlikely(rc != 0)) { 1798 ena_log(pdev, ERR, "failed to register " 1799 "interrupt handler for irq %ju: %d\n", | 1784 INTR_TYPE_NET | INTR_MPSAFE, irq->handler, NULL, irq->data, 1785 &irq->cookie); 1786 if (unlikely(rc != 0)) { 1787 ena_log(pdev, ERR, 1788 "failed to register interrupt handler for irq %ju: %d\n", |
1800 rman_get_start(irq->res), rc); 1801 goto err; 1802 } 1803 irq->requested = true; 1804 1805#ifdef RSS 1806 rc = bus_bind_intr(adapter->pdev, irq->res, irq->cpu); 1807 if (unlikely(rc != 0)) { | 1789 rman_get_start(irq->res), rc); 1790 goto err; 1791 } 1792 irq->requested = true; 1793 1794#ifdef RSS 1795 rc = bus_bind_intr(adapter->pdev, irq->res, irq->cpu); 1796 if (unlikely(rc != 0)) { |
1808 ena_log(pdev, ERR, "failed to bind " 1809 "interrupt handler for irq %ju to cpu %d: %d\n", | 1797 ena_log(pdev, ERR, 1798 "failed to bind interrupt handler for irq %ju to cpu %d: %d\n", |
1810 rman_get_start(irq->res), irq->cpu, rc); 1811 goto err; 1812 } 1813 1814 ena_log(pdev, INFO, "queue %d - cpu %d\n", 1815 i - ENA_IO_IRQ_FIRST_IDX, irq->cpu); 1816#endif 1817 } --- 4 unchanged lines hidden (view full) --- 1822 1823 for (; i >= ENA_IO_IRQ_FIRST_IDX; i--) { 1824 irq = &adapter->irq_tbl[i]; 1825 rcc = 0; 1826 1827 /* Once we entered err: section and irq->requested is true we 1828 free both intr and resources */ 1829 if (irq->requested) | 1799 rman_get_start(irq->res), irq->cpu, rc); 1800 goto err; 1801 } 1802 1803 ena_log(pdev, INFO, "queue %d - cpu %d\n", 1804 i - ENA_IO_IRQ_FIRST_IDX, irq->cpu); 1805#endif 1806 } --- 4 unchanged lines hidden (view full) --- 1811 1812 for (; i >= ENA_IO_IRQ_FIRST_IDX; i--) { 1813 irq = &adapter->irq_tbl[i]; 1814 rcc = 0; 1815 1816 /* Once we entered err: section and irq->requested is true we 1817 free both intr and resources */ 1818 if (irq->requested) |
1830 rcc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie); | 1819 rcc = bus_teardown_intr(adapter->pdev, irq->res, 1820 irq->cookie); |
1831 if (unlikely(rcc != 0)) | 1821 if (unlikely(rcc != 0)) |
1832 ena_log(pdev, ERR, "could not release irq: %d, error: %d\n", | 1822 ena_log(pdev, ERR, 1823 "could not release irq: %d, error: %d\n", |
1833 irq->vector, rcc); 1834 1835 /* If we entered err: section without irq->requested set we know 1836 it was bus_alloc_resource_any() that needs cleanup, provided 1837 res is not NULL. In case res is NULL no work in needed in 1838 this iteration */ 1839 rcc = 0; 1840 if (irq->res != NULL) { 1841 rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 1842 irq->vector, irq->res); 1843 } 1844 if (unlikely(rcc != 0)) | 1824 irq->vector, rcc); 1825 1826 /* If we entered err: section without irq->requested set we know 1827 it was bus_alloc_resource_any() that needs cleanup, provided 1828 res is not NULL. In case res is NULL no work in needed in 1829 this iteration */ 1830 rcc = 0; 1831 if (irq->res != NULL) { 1832 rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 1833 irq->vector, irq->res); 1834 } 1835 if (unlikely(rcc != 0)) |
1845 ena_log(pdev, ERR, "dev has no parent while " 1846 "releasing res for irq: %d\n", irq->vector); | 1836 ena_log(pdev, ERR, 1837 "dev has no parent while releasing res for irq: %d\n", 1838 irq->vector); |
1847 irq->requested = false; 1848 irq->res = NULL; 1849 } 1850 1851 return (rc); 1852} 1853 1854static void --- 14 unchanged lines hidden (view full) --- 1869 } 1870 1871 if (irq->res != NULL) { 1872 ena_log(pdev, DBG, "release resource irq: %d\n", irq->vector); 1873 rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 1874 irq->vector, irq->res); 1875 irq->res = NULL; 1876 if (unlikely(rc != 0)) | 1839 irq->requested = false; 1840 irq->res = NULL; 1841 } 1842 1843 return (rc); 1844} 1845 1846static void --- 14 unchanged lines hidden (view full) --- 1861 } 1862 1863 if (irq->res != NULL) { 1864 ena_log(pdev, DBG, "release resource irq: %d\n", irq->vector); 1865 rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 1866 irq->vector, irq->res); 1867 irq->res = NULL; 1868 if (unlikely(rc != 0)) |
1877 ena_log(pdev, ERR, "dev has no parent while " 1878 "releasing res for irq: %d\n", irq->vector); | 1869 ena_log(pdev, ERR, 1870 "dev has no parent while releasing res for irq: %d\n", 1871 irq->vector); |
1879 } 1880} 1881 1882static void 1883ena_free_io_irq(struct ena_adapter *adapter) 1884{ 1885 device_t pdev = adapter->pdev; 1886 struct ena_irq *irq; 1887 int rc; 1888 1889 for (int i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { 1890 irq = &adapter->irq_tbl[i]; 1891 if (irq->requested) { 1892 ena_log(pdev, DBG, "tear down irq: %d\n", irq->vector); 1893 rc = bus_teardown_intr(adapter->pdev, irq->res, 1894 irq->cookie); 1895 if (unlikely(rc != 0)) { | 1872 } 1873} 1874 1875static void 1876ena_free_io_irq(struct ena_adapter *adapter) 1877{ 1878 device_t pdev = adapter->pdev; 1879 struct ena_irq *irq; 1880 int rc; 1881 1882 for (int i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { 1883 irq = &adapter->irq_tbl[i]; 1884 if (irq->requested) { 1885 ena_log(pdev, DBG, "tear down irq: %d\n", irq->vector); 1886 rc = bus_teardown_intr(adapter->pdev, irq->res, 1887 irq->cookie); 1888 if (unlikely(rc != 0)) { |
1896 ena_log(pdev, ERR, "failed to tear down irq: %d\n", | 1889 ena_log(pdev, ERR, 1890 "failed to tear down irq: %d\n", |
1897 irq->vector); 1898 } 1899 irq->requested = 0; 1900 } 1901 1902 if (irq->res != NULL) { 1903 ena_log(pdev, DBG, "release resource irq: %d\n", 1904 irq->vector); 1905 rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 1906 irq->vector, irq->res); 1907 irq->res = NULL; 1908 if (unlikely(rc != 0)) { | 1891 irq->vector); 1892 } 1893 irq->requested = 0; 1894 } 1895 1896 if (irq->res != NULL) { 1897 ena_log(pdev, DBG, "release resource irq: %d\n", 1898 irq->vector); 1899 rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 1900 irq->vector, irq->res); 1901 irq->res = NULL; 1902 if (unlikely(rc != 0)) { |
1909 ena_log(pdev, ERR, "dev has no parent" 1910 " while releasing res for irq: %d\n", | 1903 ena_log(pdev, ERR, 1904 "dev has no parent while releasing res for irq: %d\n", |
1911 irq->vector); 1912 } 1913 } 1914 } 1915} 1916 1917static void | 1905 irq->vector); 1906 } 1907 } 1908 } 1909} 1910 1911static void |
1918ena_free_irqs(struct ena_adapter* adapter) | 1912ena_free_irqs(struct ena_adapter *adapter) |
1919{ | 1913{ |
1920 | |
1921 ena_free_io_irq(adapter); 1922 ena_free_mgmnt_irq(adapter); 1923 ena_disable_msix(adapter); 1924} 1925 1926static void 1927ena_disable_msix(struct ena_adapter *adapter) 1928{ | 1914 ena_free_io_irq(adapter); 1915 ena_free_mgmnt_irq(adapter); 1916 ena_disable_msix(adapter); 1917} 1918 1919static void 1920ena_disable_msix(struct ena_adapter *adapter) 1921{ |
1929 | |
1930 if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) { 1931 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter); 1932 pci_release_msi(adapter->pdev); 1933 } 1934 1935 adapter->msix_vecs = 0; 1936 free(adapter->msix_entries, M_DEVBUF); 1937 adapter->msix_entries = NULL; 1938} 1939 1940static void 1941ena_unmask_all_io_irqs(struct ena_adapter *adapter) 1942{ | 1922 if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) { 1923 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter); 1924 pci_release_msi(adapter->pdev); 1925 } 1926 1927 adapter->msix_vecs = 0; 1928 free(adapter->msix_entries, M_DEVBUF); 1929 adapter->msix_entries = NULL; 1930} 1931 1932static void 1933ena_unmask_all_io_irqs(struct ena_adapter *adapter) 1934{ |
1943 struct ena_com_io_cq* io_cq; | 1935 struct ena_com_io_cq *io_cq; |
1944 struct ena_eth_io_intr_reg intr_reg; 1945 struct ena_ring *tx_ring; 1946 uint16_t ena_qid; 1947 int i; 1948 1949 /* Unmask interrupts for all queues */ 1950 for (i = 0; i < adapter->num_io_queues; i++) { 1951 ena_qid = ENA_IO_TXQ_IDX(i); --- 26 unchanged lines hidden (view full) --- 1978 ena_refill_all_rx_bufs(adapter); 1979 ena_reset_counters((counter_u64_t *)&adapter->hw_stats, 1980 sizeof(adapter->hw_stats)); 1981 1982 return (0); 1983} 1984 1985static void | 1936 struct ena_eth_io_intr_reg intr_reg; 1937 struct ena_ring *tx_ring; 1938 uint16_t ena_qid; 1939 int i; 1940 1941 /* Unmask interrupts for all queues */ 1942 for (i = 0; i < adapter->num_io_queues; i++) { 1943 ena_qid = ENA_IO_TXQ_IDX(i); --- 26 unchanged lines hidden (view full) --- 1970 ena_refill_all_rx_bufs(adapter); 1971 ena_reset_counters((counter_u64_t *)&adapter->hw_stats, 1972 sizeof(adapter->hw_stats)); 1973 1974 return (0); 1975} 1976 1977static void |
1986set_io_rings_size(struct ena_adapter *adapter, int new_tx_size, 1987 int new_rx_size) | 1978set_io_rings_size(struct ena_adapter *adapter, int new_tx_size, int new_rx_size) |
1988{ 1989 int i; 1990 1991 for (i = 0; i < adapter->num_io_queues; i++) { 1992 adapter->tx_ring[i].ring_size = new_tx_size; 1993 adapter->rx_ring[i].ring_size = new_rx_size; 1994 } 1995} --- 26 unchanged lines hidden (view full) --- 2022 if (unlikely(rc != 0)) { 2023 ena_log(pdev, ERR, "err_setup_rx\n"); 2024 goto err_setup_rx; 2025 } 2026 2027 /* Create IO queues for Rx & Tx */ 2028 rc = ena_create_io_queues(adapter); 2029 if (unlikely(rc != 0)) { | 1979{ 1980 int i; 1981 1982 for (i = 0; i < adapter->num_io_queues; i++) { 1983 adapter->tx_ring[i].ring_size = new_tx_size; 1984 adapter->rx_ring[i].ring_size = new_rx_size; 1985 } 1986} --- 26 unchanged lines hidden (view full) --- 2013 if (unlikely(rc != 0)) { 2014 ena_log(pdev, ERR, "err_setup_rx\n"); 2015 goto err_setup_rx; 2016 } 2017 2018 /* Create IO queues for Rx & Tx */ 2019 rc = ena_create_io_queues(adapter); 2020 if (unlikely(rc != 0)) { |
2030 ena_log(pdev, ERR, 2031 "create IO queues failed\n"); | 2021 ena_log(pdev, ERR, "create IO queues failed\n"); |
2032 goto err_io_que; 2033 } 2034 2035 return (0); 2036 2037err_io_que: 2038 ena_free_all_rx_resources(adapter); 2039err_setup_rx: --- 15 unchanged lines hidden (view full) --- 2055 ena_log(pdev, ERR, 2056 "Not enough memory to create queues with sizes TX=%d, RX=%d\n", 2057 cur_tx_ring_size, cur_rx_ring_size); 2058 2059 new_tx_ring_size = cur_tx_ring_size; 2060 new_rx_ring_size = cur_rx_ring_size; 2061 2062 /* | 2022 goto err_io_que; 2023 } 2024 2025 return (0); 2026 2027err_io_que: 2028 ena_free_all_rx_resources(adapter); 2029err_setup_rx: --- 15 unchanged lines hidden (view full) --- 2045 ena_log(pdev, ERR, 2046 "Not enough memory to create queues with sizes TX=%d, RX=%d\n", 2047 cur_tx_ring_size, cur_rx_ring_size); 2048 2049 new_tx_ring_size = cur_tx_ring_size; 2050 new_rx_ring_size = cur_rx_ring_size; 2051 2052 /* |
2063 * Decrease the size of a larger queue, or decrease both if they are 2064 * the same size. | 2053 * Decrease the size of a larger queue, or decrease both if they 2054 * are the same size. |
2065 */ 2066 if (cur_rx_ring_size <= cur_tx_ring_size) 2067 new_tx_ring_size = cur_tx_ring_size / 2; 2068 if (cur_rx_ring_size >= cur_tx_ring_size) 2069 new_rx_ring_size = cur_rx_ring_size / 2; 2070 2071 if (new_tx_ring_size < ENA_MIN_RING_SIZE || 2072 new_rx_ring_size < ENA_MIN_RING_SIZE) { --- 37 unchanged lines hidden (view full) --- 2110 } 2111 rc = ena_request_io_irq(adapter); 2112 if (unlikely(rc != 0)) { 2113 ena_log(adapter->pdev, ERR, "err_req_irq\n"); 2114 goto error; 2115 } 2116 2117 ena_log(adapter->pdev, INFO, | 2055 */ 2056 if (cur_rx_ring_size <= cur_tx_ring_size) 2057 new_tx_ring_size = cur_tx_ring_size / 2; 2058 if (cur_rx_ring_size >= cur_tx_ring_size) 2059 new_rx_ring_size = cur_rx_ring_size / 2; 2060 2061 if (new_tx_ring_size < ENA_MIN_RING_SIZE || 2062 new_rx_ring_size < ENA_MIN_RING_SIZE) { --- 37 unchanged lines hidden (view full) --- 2100 } 2101 rc = ena_request_io_irq(adapter); 2102 if (unlikely(rc != 0)) { 2103 ena_log(adapter->pdev, ERR, "err_req_irq\n"); 2104 goto error; 2105 } 2106 2107 ena_log(adapter->pdev, INFO, |
2118 "Creating %u IO queues. Rx queue size: %d, Tx queue size: %d, " 2119 "LLQ is %s\n", | 2108 "Creating %u IO queues. Rx queue size: %d, Tx queue size: %d, LLQ is %s\n", |
2120 adapter->num_io_queues, 2121 adapter->requested_rx_ring_size, 2122 adapter->requested_tx_ring_size, 2123 (adapter->ena_dev->tx_mem_queue_type == | 2109 adapter->num_io_queues, 2110 adapter->requested_rx_ring_size, 2111 adapter->requested_tx_ring_size, 2112 (adapter->ena_dev->tx_mem_queue_type == |
2124 ENA_ADMIN_PLACEMENT_POLICY_DEV) ? "ENABLED" : "DISABLED"); | 2113 ENA_ADMIN_PLACEMENT_POLICY_DEV) ? "ENABLED" : "DISABLED"); |
2125 2126 rc = create_queues_with_size_backoff(adapter); 2127 if (unlikely(rc != 0)) { 2128 ena_log(adapter->pdev, ERR, 2129 "error creating queues with size backoff\n"); 2130 goto err_create_queues_with_backoff; 2131 } 2132 2133 if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) 2134 if_link_state_change(adapter->ifp, LINK_STATE_UP); 2135 2136 rc = ena_up_complete(adapter); 2137 if (unlikely(rc != 0)) 2138 goto err_up_complete; 2139 2140 counter_u64_add(adapter->dev_stats.interface_up, 1); 2141 2142 ena_update_hwassist(adapter); 2143 | 2114 2115 rc = create_queues_with_size_backoff(adapter); 2116 if (unlikely(rc != 0)) { 2117 ena_log(adapter->pdev, ERR, 2118 "error creating queues with size backoff\n"); 2119 goto err_create_queues_with_backoff; 2120 } 2121 2122 if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) 2123 if_link_state_change(adapter->ifp, LINK_STATE_UP); 2124 2125 rc = ena_up_complete(adapter); 2126 if (unlikely(rc != 0)) 2127 goto err_up_complete; 2128 2129 counter_u64_add(adapter->dev_stats.interface_up, 1); 2130 2131 ena_update_hwassist(adapter); 2132 |
2144 if_setdrvflagbits(adapter->ifp, IFF_DRV_RUNNING, 2145 IFF_DRV_OACTIVE); | 2133 if_setdrvflagbits(adapter->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); |
2146 2147 ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP, adapter); 2148 2149 ena_unmask_all_io_irqs(adapter); 2150 2151 return (0); 2152 2153err_up_complete: --- 100 unchanged lines hidden (view full) --- 2254 2255 rc = ena_up(adapter); 2256 ENA_LOCK_UNLOCK(); 2257 break; 2258 2259 case SIOCSIFFLAGS: 2260 if ((ifp->if_flags & IFF_UP) != 0) { 2261 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { | 2134 2135 ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP, adapter); 2136 2137 ena_unmask_all_io_irqs(adapter); 2138 2139 return (0); 2140 2141err_up_complete: --- 100 unchanged lines hidden (view full) --- 2242 2243 rc = ena_up(adapter); 2244 ENA_LOCK_UNLOCK(); 2245 break; 2246 2247 case SIOCSIFFLAGS: 2248 if ((ifp->if_flags & IFF_UP) != 0) { 2249 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { |
2262 if ((ifp->if_flags & (IFF_PROMISC | 2263 IFF_ALLMULTI)) != 0) { | 2250 if ((ifp->if_flags & 2251 (IFF_PROMISC | IFF_ALLMULTI)) != 0) { |
2264 ena_log(adapter->pdev, INFO, 2265 "ioctl promisc/allmulti\n"); 2266 } 2267 } else { 2268 ENA_LOCK_LOCK(); 2269 rc = ena_up(adapter); 2270 ENA_LOCK_UNLOCK(); 2271 } --- 53 unchanged lines hidden (view full) --- 2325 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)) != 0) 2326 caps |= IFCAP_TXCSUM; 2327 2328 if ((feat->offload.tx & 2329 (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK | 2330 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)) != 0) 2331 caps |= IFCAP_TXCSUM_IPV6; 2332 | 2252 ena_log(adapter->pdev, INFO, 2253 "ioctl promisc/allmulti\n"); 2254 } 2255 } else { 2256 ENA_LOCK_LOCK(); 2257 rc = ena_up(adapter); 2258 ENA_LOCK_UNLOCK(); 2259 } --- 53 unchanged lines hidden (view full) --- 2313 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)) != 0) 2314 caps |= IFCAP_TXCSUM; 2315 2316 if ((feat->offload.tx & 2317 (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK | 2318 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)) != 0) 2319 caps |= IFCAP_TXCSUM_IPV6; 2320 |
2333 if ((feat->offload.tx & 2334 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0) | 2321 if ((feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0) |
2335 caps |= IFCAP_TSO4; 2336 | 2322 caps |= IFCAP_TSO4; 2323 |
2337 if ((feat->offload.tx & 2338 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) != 0) | 2324 if ((feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) != 0) |
2339 caps |= IFCAP_TSO6; 2340 2341 if ((feat->offload.rx_supported & 2342 (ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK | 2343 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK)) != 0) 2344 caps |= IFCAP_RXCSUM; 2345 2346 if ((feat->offload.rx_supported & 2347 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) != 0) 2348 caps |= IFCAP_RXCSUM_IPV6; 2349 2350 caps |= IFCAP_LRO | IFCAP_JUMBO_MTU; 2351 2352 return (caps); 2353} 2354 2355static void 2356ena_update_host_info(struct ena_admin_host_info *host_info, if_t ifp) 2357{ | 2325 caps |= IFCAP_TSO6; 2326 2327 if ((feat->offload.rx_supported & 2328 (ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK | 2329 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK)) != 0) 2330 caps |= IFCAP_RXCSUM; 2331 2332 if ((feat->offload.rx_supported & 2333 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) != 0) 2334 caps |= IFCAP_RXCSUM_IPV6; 2335 2336 caps |= IFCAP_LRO | IFCAP_JUMBO_MTU; 2337 2338 return (caps); 2339} 2340 2341static void 2342ena_update_host_info(struct ena_admin_host_info *host_info, if_t ifp) 2343{ |
2358 2359 host_info->supported_network_features[0] = 2360 (uint32_t)if_getcapabilities(ifp); | 2344 host_info->supported_network_features[0] = (uint32_t)if_getcapabilities(ifp); |
2361} 2362 2363static void 2364ena_update_hwassist(struct ena_adapter *adapter) 2365{ 2366 if_t ifp = adapter->ifp; 2367 uint32_t feat = adapter->tx_offload_cap; 2368 int cap = if_getcapenable(ifp); --- 34 unchanged lines hidden (view full) --- 2403 if (unlikely(ifp == NULL)) { 2404 ena_log(pdev, ERR, "can not allocate ifnet structure\n"); 2405 return (ENXIO); 2406 } 2407 if_initname(ifp, device_get_name(pdev), device_get_unit(pdev)); 2408 if_setdev(ifp, pdev); 2409 if_setsoftc(ifp, adapter); 2410 | 2345} 2346 2347static void 2348ena_update_hwassist(struct ena_adapter *adapter) 2349{ 2350 if_t ifp = adapter->ifp; 2351 uint32_t feat = adapter->tx_offload_cap; 2352 int cap = if_getcapenable(ifp); --- 34 unchanged lines hidden (view full) --- 2387 if (unlikely(ifp == NULL)) { 2388 ena_log(pdev, ERR, "can not allocate ifnet structure\n"); 2389 return (ENXIO); 2390 } 2391 if_initname(ifp, device_get_name(pdev), device_get_unit(pdev)); 2392 if_setdev(ifp, pdev); 2393 if_setsoftc(ifp, adapter); 2394 |
2411 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | 2412 IFF_KNOWSEPOCH); | 2395 if_setflags(ifp, 2396 IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_KNOWSEPOCH); |
2413 if_setinitfn(ifp, ena_init); 2414 if_settransmitfn(ifp, ena_mq_start); 2415 if_setqflushfn(ifp, ena_qflush); 2416 if_setioctlfn(ifp, ena_ioctl); 2417 if_setgetcounterfn(ifp, ena_get_counter); 2418 2419 if_setsendqlen(ifp, adapter->requested_tx_ring_size); 2420 if_setsendqready(ifp); --- 15 unchanged lines hidden (view full) --- 2436 2437 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 2438 if_setcapenable(ifp, if_getcapabilities(ifp)); 2439 2440 /* 2441 * Specify the media types supported by this adapter and register 2442 * callbacks to update media and link information 2443 */ | 2397 if_setinitfn(ifp, ena_init); 2398 if_settransmitfn(ifp, ena_mq_start); 2399 if_setqflushfn(ifp, ena_qflush); 2400 if_setioctlfn(ifp, ena_ioctl); 2401 if_setgetcounterfn(ifp, ena_get_counter); 2402 2403 if_setsendqlen(ifp, adapter->requested_tx_ring_size); 2404 if_setsendqready(ifp); --- 15 unchanged lines hidden (view full) --- 2420 2421 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 2422 if_setcapenable(ifp, if_getcapabilities(ifp)); 2423 2424 /* 2425 * Specify the media types supported by this adapter and register 2426 * callbacks to update media and link information 2427 */ |
2444 ifmedia_init(&adapter->media, IFM_IMASK, 2445 ena_media_change, ena_media_status); | 2428 ifmedia_init(&adapter->media, IFM_IMASK, ena_media_change, 2429 ena_media_status); |
2446 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2447 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 2448 2449 ether_ifattach(ifp, adapter->mac_addr); 2450 2451 return (0); 2452} 2453 --- 5 unchanged lines hidden (view full) --- 2459 ENA_LOCK_ASSERT(); 2460 2461 if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) 2462 return; 2463 2464 ena_log(adapter->pdev, INFO, "device is going DOWN\n"); 2465 2466 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP, adapter); | 2430 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2431 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 2432 2433 ether_ifattach(ifp, adapter->mac_addr); 2434 2435 return (0); 2436} 2437 --- 5 unchanged lines hidden (view full) --- 2443 ENA_LOCK_ASSERT(); 2444 2445 if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) 2446 return; 2447 2448 ena_log(adapter->pdev, INFO, "device is going DOWN\n"); 2449 2450 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP, adapter); |
2467 if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, 2468 IFF_DRV_RUNNING); | 2451 if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); |
2469 2470 ena_free_io_irq(adapter); 2471 2472 if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) { | 2452 2453 ena_free_io_irq(adapter); 2454 2455 if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) { |
2473 rc = ena_com_dev_reset(adapter->ena_dev, 2474 adapter->reset_reason); | 2456 rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); |
2475 if (unlikely(rc != 0)) | 2457 if (unlikely(rc != 0)) |
2476 ena_log(adapter->pdev, ERR, 2477 "Device reset failed\n"); | 2458 ena_log(adapter->pdev, ERR, "Device reset failed\n"); |
2478 } 2479 2480 ena_destroy_all_io_queues(adapter); 2481 2482 ena_free_all_tx_bufs(adapter); 2483 ena_free_all_rx_bufs(adapter); 2484 ena_free_all_tx_resources(adapter); 2485 ena_free_all_rx_resources(adapter); --- 7 unchanged lines hidden (view full) --- 2493{ 2494 uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues; 2495 2496 /* Regular queues capabilities */ 2497 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 2498 struct ena_admin_queue_ext_feature_fields *max_queue_ext = 2499 &get_feat_ctx->max_queue_ext.max_queue_ext; 2500 io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num, | 2459 } 2460 2461 ena_destroy_all_io_queues(adapter); 2462 2463 ena_free_all_tx_bufs(adapter); 2464 ena_free_all_rx_bufs(adapter); 2465 ena_free_all_tx_resources(adapter); 2466 ena_free_all_rx_resources(adapter); --- 7 unchanged lines hidden (view full) --- 2474{ 2475 uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues; 2476 2477 /* Regular queues capabilities */ 2478 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 2479 struct ena_admin_queue_ext_feature_fields *max_queue_ext = 2480 &get_feat_ctx->max_queue_ext.max_queue_ext; 2481 io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num, |
2501 max_queue_ext->max_rx_cq_num); | 2482 max_queue_ext->max_rx_cq_num); |
2502 2503 io_tx_sq_num = max_queue_ext->max_tx_sq_num; 2504 io_tx_cq_num = max_queue_ext->max_tx_cq_num; 2505 } else { 2506 struct ena_admin_queue_feature_desc *max_queues = 2507 &get_feat_ctx->max_queues; 2508 io_tx_sq_num = max_queues->max_sq_num; 2509 io_tx_cq_num = max_queues->max_cq_num; --- 61 unchanged lines hidden (view full) --- 2571 ena_log(pdev, WARN, 2572 "LLQ is advertised as supported but device doesn't expose mem bar.\n"); 2573 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 2574 return (0); 2575 } 2576 2577 rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); 2578 if (unlikely(rc != 0)) { | 2483 2484 io_tx_sq_num = max_queue_ext->max_tx_sq_num; 2485 io_tx_cq_num = max_queue_ext->max_tx_cq_num; 2486 } else { 2487 struct ena_admin_queue_feature_desc *max_queues = 2488 &get_feat_ctx->max_queues; 2489 io_tx_sq_num = max_queues->max_sq_num; 2490 io_tx_cq_num = max_queues->max_cq_num; --- 61 unchanged lines hidden (view full) --- 2552 ena_log(pdev, WARN, 2553 "LLQ is advertised as supported but device doesn't expose mem bar.\n"); 2554 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 2555 return (0); 2556 } 2557 2558 rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); 2559 if (unlikely(rc != 0)) { |
2579 ena_log(pdev, WARN, "Failed to configure the device mode. " | 2560 ena_log(pdev, WARN, 2561 "Failed to configure the device mode. " |
2580 "Fallback to host mode policy.\n"); 2581 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 2582 } 2583 2584 return (0); 2585} 2586 2587static int 2588ena_map_llq_mem_bar(device_t pdev, struct ena_com_dev *ena_dev) 2589{ 2590 struct ena_adapter *adapter = device_get_softc(pdev); 2591 int rc, rid; 2592 2593 /* Try to allocate resources for LLQ bar */ 2594 rid = PCIR_BAR(ENA_MEM_BAR); | 2562 "Fallback to host mode policy.\n"); 2563 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 2564 } 2565 2566 return (0); 2567} 2568 2569static int 2570ena_map_llq_mem_bar(device_t pdev, struct ena_com_dev *ena_dev) 2571{ 2572 struct ena_adapter *adapter = device_get_softc(pdev); 2573 int rc, rid; 2574 2575 /* Try to allocate resources for LLQ bar */ 2576 rid = PCIR_BAR(ENA_MEM_BAR); |
2595 adapter->memory = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, 2596 &rid, RF_ACTIVE); | 2577 adapter->memory = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, &rid, 2578 RF_ACTIVE); |
2597 if (unlikely(adapter->memory == NULL)) { | 2579 if (unlikely(adapter->memory == NULL)) { |
2598 ena_log(pdev, WARN, "unable to allocate LLQ bar resource. " 2599 "Fallback to host mode policy.\n"); | 2580 ena_log(pdev, WARN, 2581 "unable to allocate LLQ bar resource. Fallback to host mode policy.\n"); |
2600 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 2601 return (0); 2602 } 2603 2604 /* Enable write combining for better LLQ performance */ 2605 rc = ena_enable_wc(adapter->pdev, adapter->memory); 2606 if (unlikely(rc != 0)) { 2607 ena_log(pdev, ERR, "failed to enable write combining.\n"); --- 4 unchanged lines hidden (view full) --- 2612 * Save virtual address of the device's memory region 2613 * for the ena_com layer. 2614 */ 2615 ena_dev->mem_bar = rman_get_virtual(adapter->memory); 2616 2617 return (0); 2618} 2619 | 2582 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 2583 return (0); 2584 } 2585 2586 /* Enable write combining for better LLQ performance */ 2587 rc = ena_enable_wc(adapter->pdev, adapter->memory); 2588 if (unlikely(rc != 0)) { 2589 ena_log(pdev, ERR, "failed to enable write combining.\n"); --- 4 unchanged lines hidden (view full) --- 2594 * Save virtual address of the device's memory region 2595 * for the ena_com layer. 2596 */ 2597 ena_dev->mem_bar = rman_get_virtual(adapter->memory); 2598 2599 return (0); 2600} 2601 |
2620static inline 2621void set_default_llq_configurations(struct ena_llq_configurations *llq_config, 2622 struct ena_admin_feature_llq_desc *llq) | 2602static inline void 2603set_default_llq_configurations(struct ena_llq_configurations *llq_config, 2604 struct ena_admin_feature_llq_desc *llq) |
2623{ | 2605{ |
2624 | |
2625 llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; 2626 llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; 2627 llq_config->llq_num_decs_before_header = 2628 ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; | 2606 llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; 2607 llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; 2608 llq_config->llq_num_decs_before_header = 2609 ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; |
2629 if ((llq->entry_size_ctrl_supported & 2630 ENA_ADMIN_LIST_ENTRY_SIZE_256B) != 0 && 2631 ena_force_large_llq_header) { | 2610 if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) != 2611 0 && ena_force_large_llq_header) { |
2632 llq_config->llq_ring_entry_size = 2633 ENA_ADMIN_LIST_ENTRY_SIZE_256B; 2634 llq_config->llq_ring_entry_size_value = 256; 2635 } else { 2636 llq_config->llq_ring_entry_size = 2637 ENA_ADMIN_LIST_ENTRY_SIZE_128B; 2638 llq_config->llq_ring_entry_size_value = 128; 2639 } --- 27 unchanged lines hidden (view full) --- 2667 2668 ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 2669 max_queue_ext->max_per_packet_tx_descs); 2670 ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 2671 max_queue_ext->max_per_packet_rx_descs); 2672 } else { 2673 struct ena_admin_queue_feature_desc *max_queues = 2674 &ctx->get_feat_ctx->max_queues; | 2612 llq_config->llq_ring_entry_size = 2613 ENA_ADMIN_LIST_ENTRY_SIZE_256B; 2614 llq_config->llq_ring_entry_size_value = 256; 2615 } else { 2616 llq_config->llq_ring_entry_size = 2617 ENA_ADMIN_LIST_ENTRY_SIZE_128B; 2618 llq_config->llq_ring_entry_size_value = 128; 2619 } --- 27 unchanged lines hidden (view full) --- 2647 2648 ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 2649 max_queue_ext->max_per_packet_tx_descs); 2650 ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 2651 max_queue_ext->max_per_packet_rx_descs); 2652 } else { 2653 struct ena_admin_queue_feature_desc *max_queues = 2654 &ctx->get_feat_ctx->max_queues; |
2675 max_rx_queue_size = min_t(uint32_t, 2676 max_queues->max_cq_depth, | 2655 max_rx_queue_size = min_t(uint32_t, max_queues->max_cq_depth, |
2677 max_queues->max_sq_depth); 2678 max_tx_queue_size = max_queues->max_cq_depth; 2679 2680 if (ena_dev->tx_mem_queue_type == 2681 ENA_ADMIN_PLACEMENT_POLICY_DEV) 2682 max_tx_queue_size = min_t(uint32_t, max_tx_queue_size, 2683 llq->max_llq_depth); 2684 else --- 12 unchanged lines hidden (view full) --- 2697 2698 /* 2699 * When forcing large headers, we multiply the entry size by 2, 2700 * and therefore divide the queue size by 2, leaving the amount 2701 * of memory used by the queues unchanged. 2702 */ 2703 if (ena_force_large_llq_header) { 2704 if ((llq->entry_size_ctrl_supported & | 2656 max_queues->max_sq_depth); 2657 max_tx_queue_size = max_queues->max_cq_depth; 2658 2659 if (ena_dev->tx_mem_queue_type == 2660 ENA_ADMIN_PLACEMENT_POLICY_DEV) 2661 max_tx_queue_size = min_t(uint32_t, max_tx_queue_size, 2662 llq->max_llq_depth); 2663 else --- 12 unchanged lines hidden (view full) --- 2676 2677 /* 2678 * When forcing large headers, we multiply the entry size by 2, 2679 * and therefore divide the queue size by 2, leaving the amount 2680 * of memory used by the queues unchanged. 2681 */ 2682 if (ena_force_large_llq_header) { 2683 if ((llq->entry_size_ctrl_supported & |
2705 ENA_ADMIN_LIST_ENTRY_SIZE_256B) != 0 && | 2684 ENA_ADMIN_LIST_ENTRY_SIZE_256B) != 0 && |
2706 ena_dev->tx_mem_queue_type == | 2685 ena_dev->tx_mem_queue_type == |
2707 ENA_ADMIN_PLACEMENT_POLICY_DEV) { | 2686 ENA_ADMIN_PLACEMENT_POLICY_DEV) { |
2708 max_tx_queue_size /= 2; 2709 ena_log(ctx->pdev, INFO, 2710 "Forcing large headers and decreasing maximum Tx queue size to %d\n", 2711 max_tx_queue_size); 2712 } else { 2713 ena_log(ctx->pdev, WARN, 2714 "Forcing large headers failed: LLQ is disabled or device does not support large headers\n"); 2715 } --- 36 unchanged lines hidden (view full) --- 2752 host_info->os_type = ENA_ADMIN_OS_FREEBSD; 2753 host_info->kernel_ver = osreldate; 2754 2755 sprintf(host_info->kernel_ver_str, "%d", osreldate); 2756 host_info->os_dist = 0; 2757 strncpy(host_info->os_dist_str, osrelease, 2758 sizeof(host_info->os_dist_str) - 1); 2759 | 2687 max_tx_queue_size /= 2; 2688 ena_log(ctx->pdev, INFO, 2689 "Forcing large headers and decreasing maximum Tx queue size to %d\n", 2690 max_tx_queue_size); 2691 } else { 2692 ena_log(ctx->pdev, WARN, 2693 "Forcing large headers failed: LLQ is disabled or device does not support large headers\n"); 2694 } --- 36 unchanged lines hidden (view full) --- 2731 host_info->os_type = ENA_ADMIN_OS_FREEBSD; 2732 host_info->kernel_ver = osreldate; 2733 2734 sprintf(host_info->kernel_ver_str, "%d", osreldate); 2735 host_info->os_dist = 0; 2736 strncpy(host_info->os_dist_str, osrelease, 2737 sizeof(host_info->os_dist_str) - 1); 2738 |
2760 host_info->driver_version = 2761 (DRV_MODULE_VER_MAJOR) | 2762 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | 2763 (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); | 2739 host_info->driver_version = (DRV_MODULE_VER_MAJOR) | 2740 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | 2741 (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); |
2764 host_info->num_cpus = mp_ncpus; 2765 host_info->driver_supported_features = 2766 ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK | 2767 ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK; 2768 2769 rc = ena_com_set_host_attributes(ena_dev); 2770 if (unlikely(rc != 0)) { 2771 if (rc == EOPNOTSUPP) --- 9 unchanged lines hidden (view full) --- 2781err: 2782 ena_com_delete_host_info(ena_dev); 2783} 2784 2785static int 2786ena_device_init(struct ena_adapter *adapter, device_t pdev, 2787 struct ena_com_dev_get_features_ctx *get_feat_ctx, int *wd_active) 2788{ | 2742 host_info->num_cpus = mp_ncpus; 2743 host_info->driver_supported_features = 2744 ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK | 2745 ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK; 2746 2747 rc = ena_com_set_host_attributes(ena_dev); 2748 if (unlikely(rc != 0)) { 2749 if (rc == EOPNOTSUPP) --- 9 unchanged lines hidden (view full) --- 2759err: 2760 ena_com_delete_host_info(ena_dev); 2761} 2762 2763static int 2764ena_device_init(struct ena_adapter *adapter, device_t pdev, 2765 struct ena_com_dev_get_features_ctx *get_feat_ctx, int *wd_active) 2766{ |
2789 struct ena_com_dev* ena_dev = adapter->ena_dev; | 2767 struct ena_com_dev *ena_dev = adapter->ena_dev; |
2790 bool readless_supported; 2791 uint32_t aenq_groups; 2792 int dma_width; 2793 int rc; 2794 2795 rc = ena_com_mmio_reg_read_request_init(ena_dev); 2796 if (unlikely(rc != 0)) { 2797 ena_log(pdev, ERR, "failed to init mmio read less\n"); --- 73 unchanged lines hidden (view full) --- 2871 ena_com_delete_host_info(ena_dev); 2872 ena_com_admin_destroy(ena_dev); 2873err_mmio_read_less: 2874 ena_com_mmio_reg_read_request_destroy(ena_dev); 2875 2876 return (rc); 2877} 2878 | 2768 bool readless_supported; 2769 uint32_t aenq_groups; 2770 int dma_width; 2771 int rc; 2772 2773 rc = ena_com_mmio_reg_read_request_init(ena_dev); 2774 if (unlikely(rc != 0)) { 2775 ena_log(pdev, ERR, "failed to init mmio read less\n"); --- 73 unchanged lines hidden (view full) --- 2849 ena_com_delete_host_info(ena_dev); 2850 ena_com_admin_destroy(ena_dev); 2851err_mmio_read_less: 2852 ena_com_mmio_reg_read_request_destroy(ena_dev); 2853 2854 return (rc); 2855} 2856 |
2879static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter) | 2857static int 2858ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter) |
2880{ 2881 struct ena_com_dev *ena_dev = adapter->ena_dev; 2882 int rc; 2883 2884 rc = ena_enable_msix(adapter); 2885 if (unlikely(rc != 0)) { 2886 ena_log(adapter->pdev, ERR, "Error with MSI-X enablement\n"); 2887 return (rc); --- 15 unchanged lines hidden (view full) --- 2903 2904err_disable_msix: 2905 ena_disable_msix(adapter); 2906 2907 return (rc); 2908} 2909 2910/* Function called on ENA_ADMIN_KEEP_ALIVE event */ | 2859{ 2860 struct ena_com_dev *ena_dev = adapter->ena_dev; 2861 int rc; 2862 2863 rc = ena_enable_msix(adapter); 2864 if (unlikely(rc != 0)) { 2865 ena_log(adapter->pdev, ERR, "Error with MSI-X enablement\n"); 2866 return (rc); --- 15 unchanged lines hidden (view full) --- 2882 2883err_disable_msix: 2884 ena_disable_msix(adapter); 2885 2886 return (rc); 2887} 2888 2889/* Function called on ENA_ADMIN_KEEP_ALIVE event */ |
2911static void ena_keep_alive_wd(void *adapter_data, 2912 struct ena_admin_aenq_entry *aenq_e) | 2890static void 2891ena_keep_alive_wd(void *adapter_data, struct ena_admin_aenq_entry *aenq_e) |
2913{ 2914 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 2915 struct ena_admin_aenq_keep_alive_desc *desc; 2916 sbintime_t stime; 2917 uint64_t rx_drops; 2918 uint64_t tx_drops; 2919 2920 desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; --- 5 unchanged lines hidden (view full) --- 2926 counter_u64_zero(adapter->hw_stats.tx_drops); 2927 counter_u64_add(adapter->hw_stats.tx_drops, tx_drops); 2928 2929 stime = getsbinuptime(); 2930 atomic_store_rel_64(&adapter->keep_alive_timestamp, stime); 2931} 2932 2933/* Check for keep alive expiration */ | 2892{ 2893 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 2894 struct ena_admin_aenq_keep_alive_desc *desc; 2895 sbintime_t stime; 2896 uint64_t rx_drops; 2897 uint64_t tx_drops; 2898 2899 desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; --- 5 unchanged lines hidden (view full) --- 2905 counter_u64_zero(adapter->hw_stats.tx_drops); 2906 counter_u64_add(adapter->hw_stats.tx_drops, tx_drops); 2907 2908 stime = getsbinuptime(); 2909 atomic_store_rel_64(&adapter->keep_alive_timestamp, stime); 2910} 2911 2912/* Check for keep alive expiration */ |
2934static void check_for_missing_keep_alive(struct ena_adapter *adapter) | 2913static void 2914check_for_missing_keep_alive(struct ena_adapter *adapter) |
2935{ 2936 sbintime_t timestamp, time; 2937 2938 if (adapter->wd_active == 0) 2939 return; 2940 2941 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) 2942 return; 2943 2944 timestamp = atomic_load_acq_64(&adapter->keep_alive_timestamp); 2945 time = getsbinuptime() - timestamp; 2946 if (unlikely(time > adapter->keep_alive_timeout)) { 2947 ena_log(adapter->pdev, ERR, "Keep alive watchdog timeout.\n"); 2948 counter_u64_add(adapter->dev_stats.wd_expired, 1); 2949 ena_trigger_reset(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO); 2950 } 2951} 2952 2953/* Check if admin queue is enabled */ | 2915{ 2916 sbintime_t timestamp, time; 2917 2918 if (adapter->wd_active == 0) 2919 return; 2920 2921 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) 2922 return; 2923 2924 timestamp = atomic_load_acq_64(&adapter->keep_alive_timestamp); 2925 time = getsbinuptime() - timestamp; 2926 if (unlikely(time > adapter->keep_alive_timeout)) { 2927 ena_log(adapter->pdev, ERR, "Keep alive watchdog timeout.\n"); 2928 counter_u64_add(adapter->dev_stats.wd_expired, 1); 2929 ena_trigger_reset(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO); 2930 } 2931} 2932 2933/* Check if admin queue is enabled */ |
2954static void check_for_admin_com_state(struct ena_adapter *adapter) | 2934static void 2935check_for_admin_com_state(struct ena_adapter *adapter) |
2955{ | 2936{ |
2956 if (unlikely(ena_com_get_admin_running_state(adapter->ena_dev) == 2957 false)) { | 2937 if (unlikely(ena_com_get_admin_running_state(adapter->ena_dev) == false)) { |
2958 ena_log(adapter->pdev, ERR, 2959 "ENA admin queue is not in running state!\n"); 2960 counter_u64_add(adapter->dev_stats.admin_q_pause, 1); 2961 ena_trigger_reset(adapter, ENA_REGS_RESET_ADMIN_TO); 2962 } 2963} 2964 2965static int 2966check_for_rx_interrupt_queue(struct ena_adapter *adapter, 2967 struct ena_ring *rx_ring) 2968{ 2969 if (likely(atomic_load_8(&rx_ring->first_interrupt))) 2970 return (0); 2971 2972 if (ena_com_cq_empty(rx_ring->ena_com_io_cq)) 2973 return (0); 2974 2975 rx_ring->no_interrupt_event_cnt++; 2976 | 2938 ena_log(adapter->pdev, ERR, 2939 "ENA admin queue is not in running state!\n"); 2940 counter_u64_add(adapter->dev_stats.admin_q_pause, 1); 2941 ena_trigger_reset(adapter, ENA_REGS_RESET_ADMIN_TO); 2942 } 2943} 2944 2945static int 2946check_for_rx_interrupt_queue(struct ena_adapter *adapter, 2947 struct ena_ring *rx_ring) 2948{ 2949 if (likely(atomic_load_8(&rx_ring->first_interrupt))) 2950 return (0); 2951 2952 if (ena_com_cq_empty(rx_ring->ena_com_io_cq)) 2953 return (0); 2954 2955 rx_ring->no_interrupt_event_cnt++; 2956 |
2977 if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) { 2978 ena_log(adapter->pdev, ERR, "Potential MSIX issue on Rx side " 2979 "Queue = %d. Reset the device\n", rx_ring->qid); | 2957 if (rx_ring->no_interrupt_event_cnt == 2958 ENA_MAX_NO_INTERRUPT_ITERATIONS) { 2959 ena_log(adapter->pdev, ERR, 2960 "Potential MSIX issue on Rx side Queue = %d. Reset the device\n", 2961 rx_ring->qid); |
2980 ena_trigger_reset(adapter, ENA_REGS_RESET_MISS_INTERRUPT); 2981 return (EIO); 2982 } 2983 2984 return (0); 2985} 2986 2987static int --- 24 unchanged lines hidden (view full) --- 3012 if (unlikely(!atomic_load_8(&tx_ring->first_interrupt) && 3013 time_offset > 2 * adapter->missing_tx_timeout)) { 3014 /* 3015 * If after graceful period interrupt is still not 3016 * received, we schedule a reset. 3017 */ 3018 ena_log(pdev, ERR, 3019 "Potential MSIX issue on Tx side Queue = %d. " | 2962 ena_trigger_reset(adapter, ENA_REGS_RESET_MISS_INTERRUPT); 2963 return (EIO); 2964 } 2965 2966 return (0); 2967} 2968 2969static int --- 24 unchanged lines hidden (view full) --- 2994 if (unlikely(!atomic_load_8(&tx_ring->first_interrupt) && 2995 time_offset > 2 * adapter->missing_tx_timeout)) { 2996 /* 2997 * If after graceful period interrupt is still not 2998 * received, we schedule a reset. 2999 */ 3000 ena_log(pdev, ERR, 3001 "Potential MSIX issue on Tx side Queue = %d. " |
3020 "Reset the device\n", tx_ring->qid); | 3002 "Reset the device\n", 3003 tx_ring->qid); |
3021 ena_trigger_reset(adapter, 3022 ENA_REGS_RESET_MISS_INTERRUPT); 3023 return (EIO); 3024 } 3025 3026 /* Check again if packet is still waiting */ 3027 if (unlikely(time_offset > adapter->missing_tx_timeout)) { 3028 3029 if (!tx_buf->print_once) { 3030 time_since_last_cleanup = TICKS_2_USEC(ticks - 3031 tx_ring->tx_last_cleanup_ticks); | 3004 ena_trigger_reset(adapter, 3005 ENA_REGS_RESET_MISS_INTERRUPT); 3006 return (EIO); 3007 } 3008 3009 /* Check again if packet is still waiting */ 3010 if (unlikely(time_offset > adapter->missing_tx_timeout)) { 3011 3012 if (!tx_buf->print_once) { 3013 time_since_last_cleanup = TICKS_2_USEC(ticks - 3014 tx_ring->tx_last_cleanup_ticks); |
3032 missing_tx_comp_to = 3033 sbttoms(adapter->missing_tx_timeout); 3034 ena_log(pdev, WARN, "Found a Tx that wasn't " 3035 "completed on time, qid %d, index %d." 3036 "%d usecs have passed since last cleanup." 3037 "Missing Tx timeout value %d msecs.\n", | 3015 missing_tx_comp_to = sbttoms( 3016 adapter->missing_tx_timeout); 3017 ena_log(pdev, WARN, 3018 "Found a Tx that wasn't completed on time, qid %d, index %d." 3019 "%d usecs have passed since last cleanup. Missing Tx timeout value %d msecs.\n", |
3038 tx_ring->qid, i, time_since_last_cleanup, 3039 missing_tx_comp_to); 3040 } 3041 3042 tx_buf->print_once = true; 3043 missed_tx++; 3044 } 3045 } --- 83 unchanged lines hidden (view full) --- 3129 return; 3130 3131 if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) 3132 return; 3133 3134 for (i = 0; i < adapter->num_io_queues; i++) { 3135 rx_ring = &adapter->rx_ring[i]; 3136 | 3020 tx_ring->qid, i, time_since_last_cleanup, 3021 missing_tx_comp_to); 3022 } 3023 3024 tx_buf->print_once = true; 3025 missed_tx++; 3026 } 3027 } --- 83 unchanged lines hidden (view full) --- 3111 return; 3112 3113 if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) 3114 return; 3115 3116 for (i = 0; i < adapter->num_io_queues; i++) { 3117 rx_ring = &adapter->rx_ring[i]; 3118 |
3137 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq); | 3119 refill_required = ena_com_free_q_entries( 3120 rx_ring->ena_com_io_sq); |
3138 if (unlikely(refill_required == (rx_ring->ring_size - 1))) { 3139 rx_ring->empty_rx_queue++; 3140 | 3121 if (unlikely(refill_required == (rx_ring->ring_size - 1))) { 3122 rx_ring->empty_rx_queue++; 3123 |
3141 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { | 3124 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { |
3142 counter_u64_add(rx_ring->rx_stats.empty_rx_ring, 3143 1); 3144 3145 ena_log(adapter->pdev, WARN, 3146 "Rx ring %d is stalled. Triggering the refill function\n", 3147 i); 3148 3149 taskqueue_enqueue(rx_ring->que->cleanup_tq, 3150 &rx_ring->que->cleanup_task); 3151 rx_ring->empty_rx_queue = 0; 3152 } 3153 } else { 3154 rx_ring->empty_rx_queue = 0; 3155 } 3156 } 3157} 3158 | 3125 counter_u64_add(rx_ring->rx_stats.empty_rx_ring, 3126 1); 3127 3128 ena_log(adapter->pdev, WARN, 3129 "Rx ring %d is stalled. Triggering the refill function\n", 3130 i); 3131 3132 taskqueue_enqueue(rx_ring->que->cleanup_tq, 3133 &rx_ring->que->cleanup_task); 3134 rx_ring->empty_rx_queue = 0; 3135 } 3136 } else { 3137 rx_ring->empty_rx_queue = 0; 3138 } 3139 } 3140} 3141 |
3159static void ena_update_hints(struct ena_adapter *adapter, 3160 struct ena_admin_ena_hw_hints *hints) | 3142static void 3143ena_update_hints(struct ena_adapter *adapter, 3144 struct ena_admin_ena_hw_hints *hints) |
3161{ 3162 struct ena_com_dev *ena_dev = adapter->ena_dev; 3163 3164 if (hints->admin_completion_tx_timeout) 3165 ena_dev->admin_queue.completion_timeout = 3166 hints->admin_completion_tx_timeout * 1000; 3167 3168 if (hints->mmio_read_timeout) 3169 /* convert to usec */ | 3145{ 3146 struct ena_com_dev *ena_dev = adapter->ena_dev; 3147 3148 if (hints->admin_completion_tx_timeout) 3149 ena_dev->admin_queue.completion_timeout = 3150 hints->admin_completion_tx_timeout * 1000; 3151 3152 if (hints->mmio_read_timeout) 3153 /* convert to usec */ |
3170 ena_dev->mmio_read.reg_read_to = 3171 hints->mmio_read_timeout * 1000; | 3154 ena_dev->mmio_read.reg_read_to = hints->mmio_read_timeout * 1000; |
3172 3173 if (hints->missed_tx_completion_count_threshold_to_reset) 3174 adapter->missing_tx_threshold = 3175 hints->missed_tx_completion_count_threshold_to_reset; 3176 3177 if (hints->missing_tx_completion_timeout) { 3178 if (hints->missing_tx_completion_timeout == | 3155 3156 if (hints->missed_tx_completion_count_threshold_to_reset) 3157 adapter->missing_tx_threshold = 3158 hints->missed_tx_completion_count_threshold_to_reset; 3159 3160 if (hints->missing_tx_completion_timeout) { 3161 if (hints->missing_tx_completion_timeout == |
3179 ENA_HW_HINTS_NO_TIMEOUT) | 3162 ENA_HW_HINTS_NO_TIMEOUT) |
3180 adapter->missing_tx_timeout = ENA_HW_HINTS_NO_TIMEOUT; 3181 else | 3163 adapter->missing_tx_timeout = ENA_HW_HINTS_NO_TIMEOUT; 3164 else |
3182 adapter->missing_tx_timeout = 3183 SBT_1MS * hints->missing_tx_completion_timeout; | 3165 adapter->missing_tx_timeout = SBT_1MS * 3166 hints->missing_tx_completion_timeout; |
3184 } 3185 3186 if (hints->driver_watchdog_timeout) { 3187 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) 3188 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; 3189 else | 3167 } 3168 3169 if (hints->driver_watchdog_timeout) { 3170 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) 3171 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; 3172 else |
3190 adapter->keep_alive_timeout = 3191 SBT_1MS * hints->driver_watchdog_timeout; | 3173 adapter->keep_alive_timeout = SBT_1MS * 3174 hints->driver_watchdog_timeout; |
3192 } 3193} 3194 3195/** 3196 * ena_copy_eni_metrics - Get and copy ENI metrics from the HW. 3197 * @adapter: ENA device adapter 3198 * 3199 * Returns 0 on success, EOPNOTSUPP if current HW doesn't support those metrics --- 146 unchanged lines hidden (view full) --- 3346 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); 3347 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); 3348} 3349 3350static int 3351ena_device_validate_params(struct ena_adapter *adapter, 3352 struct ena_com_dev_get_features_ctx *get_feat_ctx) 3353{ | 3175 } 3176} 3177 3178/** 3179 * ena_copy_eni_metrics - Get and copy ENI metrics from the HW. 3180 * @adapter: ENA device adapter 3181 * 3182 * Returns 0 on success, EOPNOTSUPP if current HW doesn't support those metrics --- 146 unchanged lines hidden (view full) --- 3329 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); 3330 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); 3331} 3332 3333static int 3334ena_device_validate_params(struct ena_adapter *adapter, 3335 struct ena_com_dev_get_features_ctx *get_feat_ctx) 3336{ |
3354 | |
3355 if (memcmp(get_feat_ctx->dev_attr.mac_addr, adapter->mac_addr, 3356 ETHER_ADDR_LEN) != 0) { 3357 ena_log(adapter->pdev, ERR, "Error, mac addresses differ\n"); 3358 return (EINVAL); 3359 } 3360 3361 if (get_feat_ctx->dev_attr.max_mtu < if_getmtu(adapter->ifp)) { 3362 ena_log(adapter->pdev, ERR, --- 46 unchanged lines hidden (view full) --- 3409 } 3410 3411 /* 3412 * Effective value of used MSIX vectors should be the same as before 3413 * `ena_destroy_device()`, if possible, or closest to it if less vectors 3414 * are available. 3415 */ 3416 if ((adapter->msix_vecs - ENA_ADMIN_MSIX_VEC) < adapter->num_io_queues) | 3337 if (memcmp(get_feat_ctx->dev_attr.mac_addr, adapter->mac_addr, 3338 ETHER_ADDR_LEN) != 0) { 3339 ena_log(adapter->pdev, ERR, "Error, mac addresses differ\n"); 3340 return (EINVAL); 3341 } 3342 3343 if (get_feat_ctx->dev_attr.max_mtu < if_getmtu(adapter->ifp)) { 3344 ena_log(adapter->pdev, ERR, --- 46 unchanged lines hidden (view full) --- 3391 } 3392 3393 /* 3394 * Effective value of used MSIX vectors should be the same as before 3395 * `ena_destroy_device()`, if possible, or closest to it if less vectors 3396 * are available. 3397 */ 3398 if ((adapter->msix_vecs - ENA_ADMIN_MSIX_VEC) < adapter->num_io_queues) |
3417 adapter->num_io_queues = 3418 adapter->msix_vecs - ENA_ADMIN_MSIX_VEC; | 3399 adapter->num_io_queues = adapter->msix_vecs - ENA_ADMIN_MSIX_VEC; |
3419 3420 /* Re-initialize rings basic information */ 3421 ena_init_io_rings(adapter); 3422 3423 /* If the interface was up before the reset bring it up */ 3424 if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter)) { 3425 rc = ena_up(adapter); 3426 if (rc != 0) { --- 108 unchanged lines hidden (view full) --- 3535 ena_dev = malloc(sizeof(struct ena_com_dev), M_DEVBUF, 3536 M_WAITOK | M_ZERO); 3537 3538 adapter->ena_dev = ena_dev; 3539 ena_dev->dmadev = pdev; 3540 3541 rid = PCIR_BAR(ENA_REG_BAR); 3542 adapter->memory = NULL; | 3400 3401 /* Re-initialize rings basic information */ 3402 ena_init_io_rings(adapter); 3403 3404 /* If the interface was up before the reset bring it up */ 3405 if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter)) { 3406 rc = ena_up(adapter); 3407 if (rc != 0) { --- 108 unchanged lines hidden (view full) --- 3516 ena_dev = malloc(sizeof(struct ena_com_dev), M_DEVBUF, 3517 M_WAITOK | M_ZERO); 3518 3519 adapter->ena_dev = ena_dev; 3520 ena_dev->dmadev = pdev; 3521 3522 rid = PCIR_BAR(ENA_REG_BAR); 3523 adapter->memory = NULL; |
3543 adapter->registers = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, 3544 &rid, RF_ACTIVE); | 3524 adapter->registers = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, &rid, 3525 RF_ACTIVE); |
3545 if (unlikely(adapter->registers == NULL)) { 3546 ena_log(pdev, ERR, 3547 "unable to allocate bus resource: registers!\n"); 3548 rc = ENOMEM; 3549 goto err_dev_free; 3550 } 3551 3552 /* MSIx vector table may reside on BAR0 with registers or on BAR1. */ --- 9 unchanged lines hidden (view full) --- 3562 } 3563 adapter->msix_rid = msix_rid; 3564 } 3565 3566 ena_dev->bus = malloc(sizeof(struct ena_bus), M_DEVBUF, 3567 M_WAITOK | M_ZERO); 3568 3569 /* Store register resources */ | 3526 if (unlikely(adapter->registers == NULL)) { 3527 ena_log(pdev, ERR, 3528 "unable to allocate bus resource: registers!\n"); 3529 rc = ENOMEM; 3530 goto err_dev_free; 3531 } 3532 3533 /* MSIx vector table may reside on BAR0 with registers or on BAR1. */ --- 9 unchanged lines hidden (view full) --- 3543 } 3544 adapter->msix_rid = msix_rid; 3545 } 3546 3547 ena_dev->bus = malloc(sizeof(struct ena_bus), M_DEVBUF, 3548 M_WAITOK | M_ZERO); 3549 3550 /* Store register resources */ |
3570 ((struct ena_bus*)(ena_dev->bus))->reg_bar_t = 3571 rman_get_bustag(adapter->registers); 3572 ((struct ena_bus*)(ena_dev->bus))->reg_bar_h = 3573 rman_get_bushandle(adapter->registers); | 3551 ((struct ena_bus *)(ena_dev->bus))->reg_bar_t = rman_get_bustag( 3552 adapter->registers); 3553 ((struct ena_bus *)(ena_dev->bus))->reg_bar_h = rman_get_bushandle( 3554 adapter->registers); |
3574 | 3555 |
3575 if (unlikely(((struct ena_bus*)(ena_dev->bus))->reg_bar_h == 0)) { | 3556 if (unlikely(((struct ena_bus *)(ena_dev->bus))->reg_bar_h == 0)) { |
3576 ena_log(pdev, ERR, "failed to pmap registers bar\n"); 3577 rc = ENXIO; 3578 goto err_bus_free; 3579 } 3580 3581 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 3582 3583 /* Initially clear all the flags */ --- 11 unchanged lines hidden (view full) --- 3595 3596 rc = ena_map_llq_mem_bar(pdev, ena_dev); 3597 if (unlikely(rc != 0)) { 3598 ena_log(pdev, ERR, "failed to map ENA mem bar"); 3599 goto err_com_free; 3600 } 3601 3602 rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq, | 3557 ena_log(pdev, ERR, "failed to pmap registers bar\n"); 3558 rc = ENXIO; 3559 goto err_bus_free; 3560 } 3561 3562 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 3563 3564 /* Initially clear all the flags */ --- 11 unchanged lines hidden (view full) --- 3576 3577 rc = ena_map_llq_mem_bar(pdev, ena_dev); 3578 if (unlikely(rc != 0)) { 3579 ena_log(pdev, ERR, "failed to map ENA mem bar"); 3580 goto err_com_free; 3581 } 3582 3583 rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq, |
3603 &llq_config); | 3584 &llq_config); |
3604 if (unlikely(rc != 0)) { 3605 ena_log(pdev, ERR, "failed to set placement policy\n"); 3606 goto err_com_free; 3607 } 3608 3609 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) | 3585 if (unlikely(rc != 0)) { 3586 ena_log(pdev, ERR, "failed to set placement policy\n"); 3587 goto err_com_free; 3588 } 3589 3590 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) |
3610 adapter->disable_meta_caching = 3611 !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags & | 3591 adapter->disable_meta_caching = !!( 3592 get_feat_ctx.llq.accel_mode.u.get.supported_flags & |
3612 BIT(ENA_ADMIN_DISABLE_META_CACHING)); 3613 3614 adapter->keep_alive_timestamp = getsbinuptime(); 3615 3616 adapter->tx_offload_cap = get_feat_ctx.offload.tx; 3617 3618 memcpy(adapter->mac_addr, get_feat_ctx.dev_attr.mac_addr, 3619 ETHER_ADDR_LEN); --- 66 unchanged lines hidden (view full) --- 3686 ena_log(pdev, ERR, "Error with network interface setup\n"); 3687 goto err_msix_free; 3688 } 3689 3690 /* Initialize reset task queue */ 3691 TASK_INIT(&adapter->reset_task, 0, ena_reset_task, adapter); 3692 adapter->reset_tq = taskqueue_create("ena_reset_enqueue", 3693 M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->reset_tq); | 3593 BIT(ENA_ADMIN_DISABLE_META_CACHING)); 3594 3595 adapter->keep_alive_timestamp = getsbinuptime(); 3596 3597 adapter->tx_offload_cap = get_feat_ctx.offload.tx; 3598 3599 memcpy(adapter->mac_addr, get_feat_ctx.dev_attr.mac_addr, 3600 ETHER_ADDR_LEN); --- 66 unchanged lines hidden (view full) --- 3667 ena_log(pdev, ERR, "Error with network interface setup\n"); 3668 goto err_msix_free; 3669 } 3670 3671 /* Initialize reset task queue */ 3672 TASK_INIT(&adapter->reset_task, 0, ena_reset_task, adapter); 3673 adapter->reset_tq = taskqueue_create("ena_reset_enqueue", 3674 M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->reset_tq); |
3694 taskqueue_start_threads(&adapter->reset_tq, 1, PI_NET, 3695 "%s rstq", device_get_nameunit(adapter->pdev)); | 3675 taskqueue_start_threads(&adapter->reset_tq, 1, PI_NET, "%s rstq", 3676 device_get_nameunit(adapter->pdev)); |
3696 3697 /* Initialize metrics task queue */ 3698 TASK_INIT(&adapter->metrics_task, 0, ena_metrics_task, adapter); 3699 adapter->metrics_tq = taskqueue_create("ena_metrics_enqueue", 3700 M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->metrics_tq); | 3677 3678 /* Initialize metrics task queue */ 3679 TASK_INIT(&adapter->metrics_task, 0, ena_metrics_task, adapter); 3680 adapter->metrics_tq = taskqueue_create("ena_metrics_enqueue", 3681 M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->metrics_tq); |
3701 taskqueue_start_threads(&adapter->metrics_tq, 1, PI_NET, 3702 "%s metricsq", device_get_nameunit(adapter->pdev)); | 3682 taskqueue_start_threads(&adapter->metrics_tq, 1, PI_NET, "%s metricsq", 3683 device_get_nameunit(adapter->pdev)); |
3703 3704 /* Initialize statistics */ 3705 ena_alloc_counters((counter_u64_t *)&adapter->dev_stats, 3706 sizeof(struct ena_stats_dev)); 3707 ena_alloc_counters((counter_u64_t *)&adapter->hw_stats, 3708 sizeof(struct ena_hw_stats)); 3709 ena_sysctl_add_nodes(adapter); 3710 --- 155 unchanged lines hidden (view full) --- 3866 if_link_state_change(ifp, LINK_STATE_UP); 3867 } else { 3868 ena_log(adapter->pdev, INFO, "link is DOWN\n"); 3869 if_link_state_change(ifp, LINK_STATE_DOWN); 3870 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_LINK_UP, adapter); 3871 } 3872} 3873 | 3684 3685 /* Initialize statistics */ 3686 ena_alloc_counters((counter_u64_t *)&adapter->dev_stats, 3687 sizeof(struct ena_stats_dev)); 3688 ena_alloc_counters((counter_u64_t *)&adapter->hw_stats, 3689 sizeof(struct ena_hw_stats)); 3690 ena_sysctl_add_nodes(adapter); 3691 --- 155 unchanged lines hidden (view full) --- 3847 if_link_state_change(ifp, LINK_STATE_UP); 3848 } else { 3849 ena_log(adapter->pdev, INFO, "link is DOWN\n"); 3850 if_link_state_change(ifp, LINK_STATE_DOWN); 3851 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_LINK_UP, adapter); 3852 } 3853} 3854 |
3874static void ena_notification(void *adapter_data, 3875 struct ena_admin_aenq_entry *aenq_e) | 3855static void 3856ena_notification(void *adapter_data, struct ena_admin_aenq_entry *aenq_e) |
3876{ 3877 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 3878 struct ena_admin_ena_hw_hints *hints; 3879 | 3857{ 3858 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 3859 struct ena_admin_ena_hw_hints *hints; 3860 |
3880 ENA_WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION, adapter->ena_dev, 3881 "Invalid group(%x) expected %x\n", aenq_e->aenq_common_desc.group, 3882 ENA_ADMIN_NOTIFICATION); | 3861 ENA_WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION, 3862 adapter->ena_dev, "Invalid group(%x) expected %x\n", 3863 aenq_e->aenq_common_desc.group, ENA_ADMIN_NOTIFICATION); |
3883 3884 switch (aenq_e->aenq_common_desc.syndrome) { 3885 case ENA_ADMIN_UPDATE_HINTS: 3886 hints = 3887 (struct ena_admin_ena_hw_hints *)(&aenq_e->inline_data_w4); 3888 ena_update_hints(adapter, hints); 3889 break; 3890 default: --- 38 unchanged lines hidden (view full) --- 3929 }, 3930 .unimplemented_handler = unimplemented_aenq_handler 3931}; 3932 3933/********************************************************************* 3934 * FreeBSD Device Interface Entry Points 3935 *********************************************************************/ 3936 | 3864 3865 switch (aenq_e->aenq_common_desc.syndrome) { 3866 case ENA_ADMIN_UPDATE_HINTS: 3867 hints = 3868 (struct ena_admin_ena_hw_hints *)(&aenq_e->inline_data_w4); 3869 ena_update_hints(adapter, hints); 3870 break; 3871 default: --- 38 unchanged lines hidden (view full) --- 3910 }, 3911 .unimplemented_handler = unimplemented_aenq_handler 3912}; 3913 3914/********************************************************************* 3915 * FreeBSD Device Interface Entry Points 3916 *********************************************************************/ 3917 |
3937static device_method_t ena_methods[] = { 3938 /* Device interface */ 3939 DEVMETHOD(device_probe, ena_probe), 3940 DEVMETHOD(device_attach, ena_attach), 3941 DEVMETHOD(device_detach, ena_detach), 3942 DEVMETHOD_END | 3918static device_method_t ena_methods[] = { /* Device interface */ 3919 DEVMETHOD(device_probe, ena_probe), 3920 DEVMETHOD(device_attach, ena_attach), 3921 DEVMETHOD(device_detach, ena_detach), DEVMETHOD_END |
3943}; 3944 3945static driver_t ena_driver = { | 3922}; 3923 3924static driver_t ena_driver = { |
3946 "ena", ena_methods, sizeof(struct ena_adapter), | 3925 "ena", 3926 ena_methods, 3927 sizeof(struct ena_adapter), |
3947}; 3948 3949DRIVER_MODULE(ena, pci, ena_driver, 0, 0); 3950MODULE_PNP_INFO("U16:vendor;U16:device", pci, ena, ena_vendor_info_array, 3951 nitems(ena_vendor_info_array) - 1); 3952MODULE_DEPEND(ena, pci, 1, 1, 1); 3953MODULE_DEPEND(ena, ether, 1, 1, 1); 3954#ifdef DEV_NETMAP 3955MODULE_DEPEND(ena, netmap, 1, 1, 1); 3956#endif /* DEV_NETMAP */ 3957 3958/*********************************************************************/ | 3928}; 3929 3930DRIVER_MODULE(ena, pci, ena_driver, 0, 0); 3931MODULE_PNP_INFO("U16:vendor;U16:device", pci, ena, ena_vendor_info_array, 3932 nitems(ena_vendor_info_array) - 1); 3933MODULE_DEPEND(ena, pci, 1, 1, 1); 3934MODULE_DEPEND(ena, ether, 1, 1, 1); 3935#ifdef DEV_NETMAP 3936MODULE_DEPEND(ena, netmap, 1, 1, 1); 3937#endif /* DEV_NETMAP */ 3938 3939/*********************************************************************/ |