xref: /linux/drivers/net/ethernet/amazon/ena/ena_netdev.c (revision 6b8e327cfa2dfb9da2bd70326494a1f5ca9968f7)
1 /*
2  * Copyright 2015 Amazon.com, Inc. or its affiliates.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 
35 #ifdef CONFIG_RFS_ACCEL
36 #include <linux/cpu_rmap.h>
37 #endif /* CONFIG_RFS_ACCEL */
38 #include <linux/ethtool.h>
39 #include <linux/if_vlan.h>
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/numa.h>
43 #include <linux/pci.h>
44 #include <linux/utsname.h>
45 #include <linux/version.h>
46 #include <linux/vmalloc.h>
47 #include <net/ip.h>
48 
49 #include "ena_netdev.h"
50 #include "ena_pci_id_tbl.h"
51 
52 static char version[] = DEVICE_NAME " v" DRV_MODULE_VERSION "\n";
53 
54 MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
55 MODULE_DESCRIPTION(DEVICE_NAME);
56 MODULE_LICENSE("GPL");
57 MODULE_VERSION(DRV_MODULE_VERSION);
58 
59 /* Time in jiffies before concluding the transmitter is hung. */
60 #define TX_TIMEOUT  (5 * HZ)
61 
62 #define ENA_NAPI_BUDGET 64
63 
64 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
65 		NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
66 static int debug = -1;
67 module_param(debug, int, 0);
68 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69 
70 static struct ena_aenq_handlers aenq_handlers;
71 
72 static struct workqueue_struct *ena_wq;
73 
74 MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
75 
76 static int ena_rss_init_default(struct ena_adapter *adapter);
77 static void check_for_admin_com_state(struct ena_adapter *adapter);
78 static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
79 static int ena_restore_device(struct ena_adapter *adapter);
80 
81 static void ena_tx_timeout(struct net_device *dev)
82 {
83 	struct ena_adapter *adapter = netdev_priv(dev);
84 
85 	/* Change the state of the device to trigger reset
86 	 * Check that we are not in the middle or a trigger already
87 	 */
88 
89 	if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
90 		return;
91 
92 	adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
93 	u64_stats_update_begin(&adapter->syncp);
94 	adapter->dev_stats.tx_timeout++;
95 	u64_stats_update_end(&adapter->syncp);
96 
97 	netif_err(adapter, tx_err, dev, "Transmit time out\n");
98 }
99 
100 static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
101 {
102 	int i;
103 
104 	for (i = 0; i < adapter->num_queues; i++)
105 		adapter->rx_ring[i].mtu = mtu;
106 }
107 
108 static int ena_change_mtu(struct net_device *dev, int new_mtu)
109 {
110 	struct ena_adapter *adapter = netdev_priv(dev);
111 	int ret;
112 
113 	ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
114 	if (!ret) {
115 		netif_dbg(adapter, drv, dev, "set MTU to %d\n", new_mtu);
116 		update_rx_ring_mtu(adapter, new_mtu);
117 		dev->mtu = new_mtu;
118 	} else {
119 		netif_err(adapter, drv, dev, "Failed to set MTU to %d\n",
120 			  new_mtu);
121 	}
122 
123 	return ret;
124 }
125 
126 static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
127 {
128 #ifdef CONFIG_RFS_ACCEL
129 	u32 i;
130 	int rc;
131 
132 	adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_queues);
133 	if (!adapter->netdev->rx_cpu_rmap)
134 		return -ENOMEM;
135 	for (i = 0; i < adapter->num_queues; i++) {
136 		int irq_idx = ENA_IO_IRQ_IDX(i);
137 
138 		rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
139 				      pci_irq_vector(adapter->pdev, irq_idx));
140 		if (rc) {
141 			free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
142 			adapter->netdev->rx_cpu_rmap = NULL;
143 			return rc;
144 		}
145 	}
146 #endif /* CONFIG_RFS_ACCEL */
147 	return 0;
148 }
149 
150 static void ena_init_io_rings_common(struct ena_adapter *adapter,
151 				     struct ena_ring *ring, u16 qid)
152 {
153 	ring->qid = qid;
154 	ring->pdev = adapter->pdev;
155 	ring->dev = &adapter->pdev->dev;
156 	ring->netdev = adapter->netdev;
157 	ring->napi = &adapter->ena_napi[qid].napi;
158 	ring->adapter = adapter;
159 	ring->ena_dev = adapter->ena_dev;
160 	ring->per_napi_packets = 0;
161 	ring->per_napi_bytes = 0;
162 	ring->cpu = 0;
163 	ring->first_interrupt = false;
164 	ring->no_interrupt_event_cnt = 0;
165 	u64_stats_init(&ring->syncp);
166 }
167 
168 static void ena_init_io_rings(struct ena_adapter *adapter)
169 {
170 	struct ena_com_dev *ena_dev;
171 	struct ena_ring *txr, *rxr;
172 	int i;
173 
174 	ena_dev = adapter->ena_dev;
175 
176 	for (i = 0; i < adapter->num_queues; i++) {
177 		txr = &adapter->tx_ring[i];
178 		rxr = &adapter->rx_ring[i];
179 
180 		/* TX/RX common ring state */
181 		ena_init_io_rings_common(adapter, txr, i);
182 		ena_init_io_rings_common(adapter, rxr, i);
183 
184 		/* TX specific ring state */
185 		txr->ring_size = adapter->tx_ring_size;
186 		txr->tx_max_header_size = ena_dev->tx_max_header_size;
187 		txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
188 		txr->sgl_size = adapter->max_tx_sgl_size;
189 		txr->smoothed_interval =
190 			ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
191 
192 		/* RX specific ring state */
193 		rxr->ring_size = adapter->rx_ring_size;
194 		rxr->rx_copybreak = adapter->rx_copybreak;
195 		rxr->sgl_size = adapter->max_rx_sgl_size;
196 		rxr->smoothed_interval =
197 			ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
198 		rxr->empty_rx_queue = 0;
199 	}
200 }
201 
202 /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors)
203  * @adapter: network interface device structure
204  * @qid: queue index
205  *
206  * Return 0 on success, negative on failure
207  */
208 static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
209 {
210 	struct ena_ring *tx_ring = &adapter->tx_ring[qid];
211 	struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
212 	int size, i, node;
213 
214 	if (tx_ring->tx_buffer_info) {
215 		netif_err(adapter, ifup,
216 			  adapter->netdev, "tx_buffer_info info is not NULL");
217 		return -EEXIST;
218 	}
219 
220 	size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
221 	node = cpu_to_node(ena_irq->cpu);
222 
223 	tx_ring->tx_buffer_info = vzalloc_node(size, node);
224 	if (!tx_ring->tx_buffer_info) {
225 		tx_ring->tx_buffer_info = vzalloc(size);
226 		if (!tx_ring->tx_buffer_info)
227 			return -ENOMEM;
228 	}
229 
230 	size = sizeof(u16) * tx_ring->ring_size;
231 	tx_ring->free_tx_ids = vzalloc_node(size, node);
232 	if (!tx_ring->free_tx_ids) {
233 		tx_ring->free_tx_ids = vzalloc(size);
234 		if (!tx_ring->free_tx_ids) {
235 			vfree(tx_ring->tx_buffer_info);
236 			return -ENOMEM;
237 		}
238 	}
239 
240 	/* Req id ring for TX out of order completions */
241 	for (i = 0; i < tx_ring->ring_size; i++)
242 		tx_ring->free_tx_ids[i] = i;
243 
244 	/* Reset tx statistics */
245 	memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats));
246 
247 	tx_ring->next_to_use = 0;
248 	tx_ring->next_to_clean = 0;
249 	tx_ring->cpu = ena_irq->cpu;
250 	return 0;
251 }
252 
253 /* ena_free_tx_resources - Free I/O Tx Resources per Queue
254  * @adapter: network interface device structure
255  * @qid: queue index
256  *
257  * Free all transmit software resources
258  */
259 static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
260 {
261 	struct ena_ring *tx_ring = &adapter->tx_ring[qid];
262 
263 	vfree(tx_ring->tx_buffer_info);
264 	tx_ring->tx_buffer_info = NULL;
265 
266 	vfree(tx_ring->free_tx_ids);
267 	tx_ring->free_tx_ids = NULL;
268 }
269 
270 /* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues
271  * @adapter: private structure
272  *
273  * Return 0 on success, negative on failure
274  */
275 static int ena_setup_all_tx_resources(struct ena_adapter *adapter)
276 {
277 	int i, rc = 0;
278 
279 	for (i = 0; i < adapter->num_queues; i++) {
280 		rc = ena_setup_tx_resources(adapter, i);
281 		if (rc)
282 			goto err_setup_tx;
283 	}
284 
285 	return 0;
286 
287 err_setup_tx:
288 
289 	netif_err(adapter, ifup, adapter->netdev,
290 		  "Tx queue %d: allocation failed\n", i);
291 
292 	/* rewind the index freeing the rings as we go */
293 	while (i--)
294 		ena_free_tx_resources(adapter, i);
295 	return rc;
296 }
297 
298 /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
299  * @adapter: board private structure
300  *
301  * Free all transmit software resources
302  */
303 static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
304 {
305 	int i;
306 
307 	for (i = 0; i < adapter->num_queues; i++)
308 		ena_free_tx_resources(adapter, i);
309 }
310 
311 static inline int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id)
312 {
313 	if (likely(req_id < rx_ring->ring_size))
314 		return 0;
315 
316 	netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
317 		  "Invalid rx req_id: %hu\n", req_id);
318 
319 	u64_stats_update_begin(&rx_ring->syncp);
320 	rx_ring->rx_stats.bad_req_id++;
321 	u64_stats_update_end(&rx_ring->syncp);
322 
323 	/* Trigger device reset */
324 	rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
325 	set_bit(ENA_FLAG_TRIGGER_RESET, &rx_ring->adapter->flags);
326 	return -EFAULT;
327 }
328 
329 /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
330  * @adapter: network interface device structure
331  * @qid: queue index
332  *
333  * Returns 0 on success, negative on failure
334  */
335 static int ena_setup_rx_resources(struct ena_adapter *adapter,
336 				  u32 qid)
337 {
338 	struct ena_ring *rx_ring = &adapter->rx_ring[qid];
339 	struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
340 	int size, node, i;
341 
342 	if (rx_ring->rx_buffer_info) {
343 		netif_err(adapter, ifup, adapter->netdev,
344 			  "rx_buffer_info is not NULL");
345 		return -EEXIST;
346 	}
347 
348 	/* alloc extra element so in rx path
349 	 * we can always prefetch rx_info + 1
350 	 */
351 	size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1);
352 	node = cpu_to_node(ena_irq->cpu);
353 
354 	rx_ring->rx_buffer_info = vzalloc_node(size, node);
355 	if (!rx_ring->rx_buffer_info) {
356 		rx_ring->rx_buffer_info = vzalloc(size);
357 		if (!rx_ring->rx_buffer_info)
358 			return -ENOMEM;
359 	}
360 
361 	size = sizeof(u16) * rx_ring->ring_size;
362 	rx_ring->free_rx_ids = vzalloc_node(size, node);
363 	if (!rx_ring->free_rx_ids) {
364 		rx_ring->free_rx_ids = vzalloc(size);
365 		if (!rx_ring->free_rx_ids) {
366 			vfree(rx_ring->rx_buffer_info);
367 			return -ENOMEM;
368 		}
369 	}
370 
371 	/* Req id ring for receiving RX pkts out of order */
372 	for (i = 0; i < rx_ring->ring_size; i++)
373 		rx_ring->free_rx_ids[i] = i;
374 
375 	/* Reset rx statistics */
376 	memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats));
377 
378 	rx_ring->next_to_clean = 0;
379 	rx_ring->next_to_use = 0;
380 	rx_ring->cpu = ena_irq->cpu;
381 
382 	return 0;
383 }
384 
385 /* ena_free_rx_resources - Free I/O Rx Resources
386  * @adapter: network interface device structure
387  * @qid: queue index
388  *
389  * Free all receive software resources
390  */
391 static void ena_free_rx_resources(struct ena_adapter *adapter,
392 				  u32 qid)
393 {
394 	struct ena_ring *rx_ring = &adapter->rx_ring[qid];
395 
396 	vfree(rx_ring->rx_buffer_info);
397 	rx_ring->rx_buffer_info = NULL;
398 
399 	vfree(rx_ring->free_rx_ids);
400 	rx_ring->free_rx_ids = NULL;
401 }
402 
403 /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
404  * @adapter: board private structure
405  *
406  * Return 0 on success, negative on failure
407  */
408 static int ena_setup_all_rx_resources(struct ena_adapter *adapter)
409 {
410 	int i, rc = 0;
411 
412 	for (i = 0; i < adapter->num_queues; i++) {
413 		rc = ena_setup_rx_resources(adapter, i);
414 		if (rc)
415 			goto err_setup_rx;
416 	}
417 
418 	return 0;
419 
420 err_setup_rx:
421 
422 	netif_err(adapter, ifup, adapter->netdev,
423 		  "Rx queue %d: allocation failed\n", i);
424 
425 	/* rewind the index freeing the rings as we go */
426 	while (i--)
427 		ena_free_rx_resources(adapter, i);
428 	return rc;
429 }
430 
431 /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues
432  * @adapter: board private structure
433  *
434  * Free all receive software resources
435  */
436 static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
437 {
438 	int i;
439 
440 	for (i = 0; i < adapter->num_queues; i++)
441 		ena_free_rx_resources(adapter, i);
442 }
443 
444 static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
445 				    struct ena_rx_buffer *rx_info, gfp_t gfp)
446 {
447 	struct ena_com_buf *ena_buf;
448 	struct page *page;
449 	dma_addr_t dma;
450 
451 	/* if previous allocated page is not used */
452 	if (unlikely(rx_info->page))
453 		return 0;
454 
455 	page = alloc_page(gfp);
456 	if (unlikely(!page)) {
457 		u64_stats_update_begin(&rx_ring->syncp);
458 		rx_ring->rx_stats.page_alloc_fail++;
459 		u64_stats_update_end(&rx_ring->syncp);
460 		return -ENOMEM;
461 	}
462 
463 	dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
464 			   DMA_FROM_DEVICE);
465 	if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
466 		u64_stats_update_begin(&rx_ring->syncp);
467 		rx_ring->rx_stats.dma_mapping_err++;
468 		u64_stats_update_end(&rx_ring->syncp);
469 
470 		__free_page(page);
471 		return -EIO;
472 	}
473 	netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
474 		  "alloc page %p, rx_info %p\n", page, rx_info);
475 
476 	rx_info->page = page;
477 	rx_info->page_offset = 0;
478 	ena_buf = &rx_info->ena_buf;
479 	ena_buf->paddr = dma;
480 	ena_buf->len = ENA_PAGE_SIZE;
481 
482 	return 0;
483 }
484 
485 static void ena_free_rx_page(struct ena_ring *rx_ring,
486 			     struct ena_rx_buffer *rx_info)
487 {
488 	struct page *page = rx_info->page;
489 	struct ena_com_buf *ena_buf = &rx_info->ena_buf;
490 
491 	if (unlikely(!page)) {
492 		netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
493 			   "Trying to free unallocated buffer\n");
494 		return;
495 	}
496 
497 	dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE,
498 		       DMA_FROM_DEVICE);
499 
500 	__free_page(page);
501 	rx_info->page = NULL;
502 }
503 
504 static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
505 {
506 	u16 next_to_use, req_id;
507 	u32 i;
508 	int rc;
509 
510 	next_to_use = rx_ring->next_to_use;
511 
512 	for (i = 0; i < num; i++) {
513 		struct ena_rx_buffer *rx_info;
514 
515 		req_id = rx_ring->free_rx_ids[next_to_use];
516 		rc = validate_rx_req_id(rx_ring, req_id);
517 		if (unlikely(rc < 0))
518 			break;
519 
520 		rx_info = &rx_ring->rx_buffer_info[req_id];
521 
522 
523 		rc = ena_alloc_rx_page(rx_ring, rx_info,
524 				       GFP_ATOMIC | __GFP_COMP);
525 		if (unlikely(rc < 0)) {
526 			netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
527 				   "failed to alloc buffer for rx queue %d\n",
528 				   rx_ring->qid);
529 			break;
530 		}
531 		rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
532 						&rx_info->ena_buf,
533 						req_id);
534 		if (unlikely(rc)) {
535 			netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
536 				   "failed to add buffer for rx queue %d\n",
537 				   rx_ring->qid);
538 			break;
539 		}
540 		next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
541 						   rx_ring->ring_size);
542 	}
543 
544 	if (unlikely(i < num)) {
545 		u64_stats_update_begin(&rx_ring->syncp);
546 		rx_ring->rx_stats.refil_partial++;
547 		u64_stats_update_end(&rx_ring->syncp);
548 		netdev_warn(rx_ring->netdev,
549 			    "refilled rx qid %d with only %d buffers (from %d)\n",
550 			    rx_ring->qid, i, num);
551 	}
552 
553 	/* ena_com_write_sq_doorbell issues a wmb() */
554 	if (likely(i))
555 		ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
556 
557 	rx_ring->next_to_use = next_to_use;
558 
559 	return i;
560 }
561 
562 static void ena_free_rx_bufs(struct ena_adapter *adapter,
563 			     u32 qid)
564 {
565 	struct ena_ring *rx_ring = &adapter->rx_ring[qid];
566 	u32 i;
567 
568 	for (i = 0; i < rx_ring->ring_size; i++) {
569 		struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
570 
571 		if (rx_info->page)
572 			ena_free_rx_page(rx_ring, rx_info);
573 	}
574 }
575 
576 /* ena_refill_all_rx_bufs - allocate all queues Rx buffers
577  * @adapter: board private structure
578  *
579  */
580 static void ena_refill_all_rx_bufs(struct ena_adapter *adapter)
581 {
582 	struct ena_ring *rx_ring;
583 	int i, rc, bufs_num;
584 
585 	for (i = 0; i < adapter->num_queues; i++) {
586 		rx_ring = &adapter->rx_ring[i];
587 		bufs_num = rx_ring->ring_size - 1;
588 		rc = ena_refill_rx_bufs(rx_ring, bufs_num);
589 
590 		if (unlikely(rc != bufs_num))
591 			netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
592 				   "refilling Queue %d failed. allocated %d buffers from: %d\n",
593 				   i, rc, bufs_num);
594 	}
595 }
596 
597 static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
598 {
599 	int i;
600 
601 	for (i = 0; i < adapter->num_queues; i++)
602 		ena_free_rx_bufs(adapter, i);
603 }
604 
605 /* ena_free_tx_bufs - Free Tx Buffers per Queue
606  * @tx_ring: TX ring for which buffers be freed
607  */
608 static void ena_free_tx_bufs(struct ena_ring *tx_ring)
609 {
610 	bool print_once = true;
611 	u32 i;
612 
613 	for (i = 0; i < tx_ring->ring_size; i++) {
614 		struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
615 		struct ena_com_buf *ena_buf;
616 		int nr_frags;
617 		int j;
618 
619 		if (!tx_info->skb)
620 			continue;
621 
622 		if (print_once) {
623 			netdev_notice(tx_ring->netdev,
624 				      "free uncompleted tx skb qid %d idx 0x%x\n",
625 				      tx_ring->qid, i);
626 			print_once = false;
627 		} else {
628 			netdev_dbg(tx_ring->netdev,
629 				   "free uncompleted tx skb qid %d idx 0x%x\n",
630 				   tx_ring->qid, i);
631 		}
632 
633 		ena_buf = tx_info->bufs;
634 		dma_unmap_single(tx_ring->dev,
635 				 ena_buf->paddr,
636 				 ena_buf->len,
637 				 DMA_TO_DEVICE);
638 
639 		/* unmap remaining mapped pages */
640 		nr_frags = tx_info->num_of_bufs - 1;
641 		for (j = 0; j < nr_frags; j++) {
642 			ena_buf++;
643 			dma_unmap_page(tx_ring->dev,
644 				       ena_buf->paddr,
645 				       ena_buf->len,
646 				       DMA_TO_DEVICE);
647 		}
648 
649 		dev_kfree_skb_any(tx_info->skb);
650 	}
651 	netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
652 						  tx_ring->qid));
653 }
654 
655 static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
656 {
657 	struct ena_ring *tx_ring;
658 	int i;
659 
660 	for (i = 0; i < adapter->num_queues; i++) {
661 		tx_ring = &adapter->tx_ring[i];
662 		ena_free_tx_bufs(tx_ring);
663 	}
664 }
665 
666 static void ena_destroy_all_tx_queues(struct ena_adapter *adapter)
667 {
668 	u16 ena_qid;
669 	int i;
670 
671 	for (i = 0; i < adapter->num_queues; i++) {
672 		ena_qid = ENA_IO_TXQ_IDX(i);
673 		ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
674 	}
675 }
676 
677 static void ena_destroy_all_rx_queues(struct ena_adapter *adapter)
678 {
679 	u16 ena_qid;
680 	int i;
681 
682 	for (i = 0; i < adapter->num_queues; i++) {
683 		ena_qid = ENA_IO_RXQ_IDX(i);
684 		ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
685 	}
686 }
687 
688 static void ena_destroy_all_io_queues(struct ena_adapter *adapter)
689 {
690 	ena_destroy_all_tx_queues(adapter);
691 	ena_destroy_all_rx_queues(adapter);
692 }
693 
694 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
695 {
696 	struct ena_tx_buffer *tx_info = NULL;
697 
698 	if (likely(req_id < tx_ring->ring_size)) {
699 		tx_info = &tx_ring->tx_buffer_info[req_id];
700 		if (likely(tx_info->skb))
701 			return 0;
702 	}
703 
704 	if (tx_info)
705 		netif_err(tx_ring->adapter, tx_done, tx_ring->netdev,
706 			  "tx_info doesn't have valid skb\n");
707 	else
708 		netif_err(tx_ring->adapter, tx_done, tx_ring->netdev,
709 			  "Invalid req_id: %hu\n", req_id);
710 
711 	u64_stats_update_begin(&tx_ring->syncp);
712 	tx_ring->tx_stats.bad_req_id++;
713 	u64_stats_update_end(&tx_ring->syncp);
714 
715 	/* Trigger device reset */
716 	tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
717 	set_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags);
718 	return -EFAULT;
719 }
720 
721 static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
722 {
723 	struct netdev_queue *txq;
724 	bool above_thresh;
725 	u32 tx_bytes = 0;
726 	u32 total_done = 0;
727 	u16 next_to_clean;
728 	u16 req_id;
729 	int tx_pkts = 0;
730 	int rc;
731 
732 	next_to_clean = tx_ring->next_to_clean;
733 	txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid);
734 
735 	while (tx_pkts < budget) {
736 		struct ena_tx_buffer *tx_info;
737 		struct sk_buff *skb;
738 		struct ena_com_buf *ena_buf;
739 		int i, nr_frags;
740 
741 		rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
742 						&req_id);
743 		if (rc)
744 			break;
745 
746 		rc = validate_tx_req_id(tx_ring, req_id);
747 		if (rc)
748 			break;
749 
750 		tx_info = &tx_ring->tx_buffer_info[req_id];
751 		skb = tx_info->skb;
752 
753 		/* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
754 		prefetch(&skb->end);
755 
756 		tx_info->skb = NULL;
757 		tx_info->last_jiffies = 0;
758 
759 		if (likely(tx_info->num_of_bufs != 0)) {
760 			ena_buf = tx_info->bufs;
761 
762 			dma_unmap_single(tx_ring->dev,
763 					 dma_unmap_addr(ena_buf, paddr),
764 					 dma_unmap_len(ena_buf, len),
765 					 DMA_TO_DEVICE);
766 
767 			/* unmap remaining mapped pages */
768 			nr_frags = tx_info->num_of_bufs - 1;
769 			for (i = 0; i < nr_frags; i++) {
770 				ena_buf++;
771 				dma_unmap_page(tx_ring->dev,
772 					       dma_unmap_addr(ena_buf, paddr),
773 					       dma_unmap_len(ena_buf, len),
774 					       DMA_TO_DEVICE);
775 			}
776 		}
777 
778 		netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
779 			  "tx_poll: q %d skb %p completed\n", tx_ring->qid,
780 			  skb);
781 
782 		tx_bytes += skb->len;
783 		dev_kfree_skb(skb);
784 		tx_pkts++;
785 		total_done += tx_info->tx_descs;
786 
787 		tx_ring->free_tx_ids[next_to_clean] = req_id;
788 		next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
789 						     tx_ring->ring_size);
790 	}
791 
792 	tx_ring->next_to_clean = next_to_clean;
793 	ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
794 	ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
795 
796 	netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
797 
798 	netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
799 		  "tx_poll: q %d done. total pkts: %d\n",
800 		  tx_ring->qid, tx_pkts);
801 
802 	/* need to make the rings circular update visible to
803 	 * ena_start_xmit() before checking for netif_queue_stopped().
804 	 */
805 	smp_mb();
806 
807 	above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) >
808 		ENA_TX_WAKEUP_THRESH;
809 	if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) {
810 		__netif_tx_lock(txq, smp_processor_id());
811 		above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) >
812 			ENA_TX_WAKEUP_THRESH;
813 		if (netif_tx_queue_stopped(txq) && above_thresh) {
814 			netif_tx_wake_queue(txq);
815 			u64_stats_update_begin(&tx_ring->syncp);
816 			tx_ring->tx_stats.queue_wakeup++;
817 			u64_stats_update_end(&tx_ring->syncp);
818 		}
819 		__netif_tx_unlock(txq);
820 	}
821 
822 	tx_ring->per_napi_bytes += tx_bytes;
823 	tx_ring->per_napi_packets += tx_pkts;
824 
825 	return tx_pkts;
826 }
827 
828 static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags)
829 {
830 	struct sk_buff *skb;
831 
832 	if (frags)
833 		skb = napi_get_frags(rx_ring->napi);
834 	else
835 		skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
836 						rx_ring->rx_copybreak);
837 
838 	if (unlikely(!skb)) {
839 		u64_stats_update_begin(&rx_ring->syncp);
840 		rx_ring->rx_stats.skb_alloc_fail++;
841 		u64_stats_update_end(&rx_ring->syncp);
842 		netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
843 			  "Failed to allocate skb. frags: %d\n", frags);
844 		return NULL;
845 	}
846 
847 	return skb;
848 }
849 
850 static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
851 				  struct ena_com_rx_buf_info *ena_bufs,
852 				  u32 descs,
853 				  u16 *next_to_clean)
854 {
855 	struct sk_buff *skb;
856 	struct ena_rx_buffer *rx_info;
857 	u16 len, req_id, buf = 0;
858 	void *va;
859 
860 	len = ena_bufs[buf].len;
861 	req_id = ena_bufs[buf].req_id;
862 	rx_info = &rx_ring->rx_buffer_info[req_id];
863 
864 	if (unlikely(!rx_info->page)) {
865 		netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
866 			  "Page is NULL\n");
867 		return NULL;
868 	}
869 
870 	netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
871 		  "rx_info %p page %p\n",
872 		  rx_info, rx_info->page);
873 
874 	/* save virt address of first buffer */
875 	va = page_address(rx_info->page) + rx_info->page_offset;
876 	prefetch(va + NET_IP_ALIGN);
877 
878 	if (len <= rx_ring->rx_copybreak) {
879 		skb = ena_alloc_skb(rx_ring, false);
880 		if (unlikely(!skb))
881 			return NULL;
882 
883 		netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
884 			  "rx allocated small packet. len %d. data_len %d\n",
885 			  skb->len, skb->data_len);
886 
887 		/* sync this buffer for CPU use */
888 		dma_sync_single_for_cpu(rx_ring->dev,
889 					dma_unmap_addr(&rx_info->ena_buf, paddr),
890 					len,
891 					DMA_FROM_DEVICE);
892 		skb_copy_to_linear_data(skb, va, len);
893 		dma_sync_single_for_device(rx_ring->dev,
894 					   dma_unmap_addr(&rx_info->ena_buf, paddr),
895 					   len,
896 					   DMA_FROM_DEVICE);
897 
898 		skb_put(skb, len);
899 		skb->protocol = eth_type_trans(skb, rx_ring->netdev);
900 		rx_ring->free_rx_ids[*next_to_clean] = req_id;
901 		*next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
902 						     rx_ring->ring_size);
903 		return skb;
904 	}
905 
906 	skb = ena_alloc_skb(rx_ring, true);
907 	if (unlikely(!skb))
908 		return NULL;
909 
910 	do {
911 		dma_unmap_page(rx_ring->dev,
912 			       dma_unmap_addr(&rx_info->ena_buf, paddr),
913 			       ENA_PAGE_SIZE, DMA_FROM_DEVICE);
914 
915 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
916 				rx_info->page_offset, len, ENA_PAGE_SIZE);
917 
918 		netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
919 			  "rx skb updated. len %d. data_len %d\n",
920 			  skb->len, skb->data_len);
921 
922 		rx_info->page = NULL;
923 
924 		rx_ring->free_rx_ids[*next_to_clean] = req_id;
925 		*next_to_clean =
926 			ENA_RX_RING_IDX_NEXT(*next_to_clean,
927 					     rx_ring->ring_size);
928 		if (likely(--descs == 0))
929 			break;
930 
931 		buf++;
932 		len = ena_bufs[buf].len;
933 		req_id = ena_bufs[buf].req_id;
934 		rx_info = &rx_ring->rx_buffer_info[req_id];
935 	} while (1);
936 
937 	return skb;
938 }
939 
940 /* ena_rx_checksum - indicate in skb if hw indicated a good cksum
941  * @adapter: structure containing adapter specific data
942  * @ena_rx_ctx: received packet context/metadata
943  * @skb: skb currently being received and modified
944  */
945 static inline void ena_rx_checksum(struct ena_ring *rx_ring,
946 				   struct ena_com_rx_ctx *ena_rx_ctx,
947 				   struct sk_buff *skb)
948 {
949 	/* Rx csum disabled */
950 	if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) {
951 		skb->ip_summed = CHECKSUM_NONE;
952 		return;
953 	}
954 
955 	/* For fragmented packets the checksum isn't valid */
956 	if (ena_rx_ctx->frag) {
957 		skb->ip_summed = CHECKSUM_NONE;
958 		return;
959 	}
960 
961 	/* if IP and error */
962 	if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) &&
963 		     (ena_rx_ctx->l3_csum_err))) {
964 		/* ipv4 checksum error */
965 		skb->ip_summed = CHECKSUM_NONE;
966 		u64_stats_update_begin(&rx_ring->syncp);
967 		rx_ring->rx_stats.bad_csum++;
968 		u64_stats_update_end(&rx_ring->syncp);
969 		netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
970 			  "RX IPv4 header checksum error\n");
971 		return;
972 	}
973 
974 	/* if TCP/UDP */
975 	if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
976 		   (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) {
977 		if (unlikely(ena_rx_ctx->l4_csum_err)) {
978 			/* TCP/UDP checksum error */
979 			u64_stats_update_begin(&rx_ring->syncp);
980 			rx_ring->rx_stats.bad_csum++;
981 			u64_stats_update_end(&rx_ring->syncp);
982 			netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
983 				  "RX L4 checksum error\n");
984 			skb->ip_summed = CHECKSUM_NONE;
985 			return;
986 		}
987 
988 		skb->ip_summed = CHECKSUM_UNNECESSARY;
989 	}
990 }
991 
992 static void ena_set_rx_hash(struct ena_ring *rx_ring,
993 			    struct ena_com_rx_ctx *ena_rx_ctx,
994 			    struct sk_buff *skb)
995 {
996 	enum pkt_hash_types hash_type;
997 
998 	if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) {
999 		if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1000 			   (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)))
1001 
1002 			hash_type = PKT_HASH_TYPE_L4;
1003 		else
1004 			hash_type = PKT_HASH_TYPE_NONE;
1005 
1006 		/* Override hash type if the packet is fragmented */
1007 		if (ena_rx_ctx->frag)
1008 			hash_type = PKT_HASH_TYPE_NONE;
1009 
1010 		skb_set_hash(skb, ena_rx_ctx->hash, hash_type);
1011 	}
1012 }
1013 
1014 /* ena_clean_rx_irq - Cleanup RX irq
1015  * @rx_ring: RX ring to clean
1016  * @napi: napi handler
1017  * @budget: how many packets driver is allowed to clean
1018  *
1019  * Returns the number of cleaned buffers.
1020  */
1021 static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
1022 			    u32 budget)
1023 {
1024 	u16 next_to_clean = rx_ring->next_to_clean;
1025 	u32 res_budget, work_done;
1026 
1027 	struct ena_com_rx_ctx ena_rx_ctx;
1028 	struct ena_adapter *adapter;
1029 	struct sk_buff *skb;
1030 	int refill_required;
1031 	int refill_threshold;
1032 	int rc = 0;
1033 	int total_len = 0;
1034 	int rx_copybreak_pkt = 0;
1035 	int i;
1036 
1037 	netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1038 		  "%s qid %d\n", __func__, rx_ring->qid);
1039 	res_budget = budget;
1040 
1041 	do {
1042 		ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
1043 		ena_rx_ctx.max_bufs = rx_ring->sgl_size;
1044 		ena_rx_ctx.descs = 0;
1045 		rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
1046 				    rx_ring->ena_com_io_sq,
1047 				    &ena_rx_ctx);
1048 		if (unlikely(rc))
1049 			goto error;
1050 
1051 		if (unlikely(ena_rx_ctx.descs == 0))
1052 			break;
1053 
1054 		netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1055 			  "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
1056 			  rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
1057 			  ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
1058 
1059 		/* allocate skb and fill it */
1060 		skb = ena_rx_skb(rx_ring, rx_ring->ena_bufs, ena_rx_ctx.descs,
1061 				 &next_to_clean);
1062 
1063 		/* exit if we failed to retrieve a buffer */
1064 		if (unlikely(!skb)) {
1065 			for (i = 0; i < ena_rx_ctx.descs; i++) {
1066 				rx_ring->free_tx_ids[next_to_clean] =
1067 					rx_ring->ena_bufs[i].req_id;
1068 				next_to_clean =
1069 					ENA_RX_RING_IDX_NEXT(next_to_clean,
1070 							     rx_ring->ring_size);
1071 			}
1072 			break;
1073 		}
1074 
1075 		ena_rx_checksum(rx_ring, &ena_rx_ctx, skb);
1076 
1077 		ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb);
1078 
1079 		skb_record_rx_queue(skb, rx_ring->qid);
1080 
1081 		if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) {
1082 			total_len += rx_ring->ena_bufs[0].len;
1083 			rx_copybreak_pkt++;
1084 			napi_gro_receive(napi, skb);
1085 		} else {
1086 			total_len += skb->len;
1087 			napi_gro_frags(napi);
1088 		}
1089 
1090 		res_budget--;
1091 	} while (likely(res_budget));
1092 
1093 	work_done = budget - res_budget;
1094 	rx_ring->per_napi_bytes += total_len;
1095 	rx_ring->per_napi_packets += work_done;
1096 	u64_stats_update_begin(&rx_ring->syncp);
1097 	rx_ring->rx_stats.bytes += total_len;
1098 	rx_ring->rx_stats.cnt += work_done;
1099 	rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt;
1100 	u64_stats_update_end(&rx_ring->syncp);
1101 
1102 	rx_ring->next_to_clean = next_to_clean;
1103 
1104 	refill_required = ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
1105 	refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER;
1106 
1107 	/* Optimization, try to batch new rx buffers */
1108 	if (refill_required > refill_threshold) {
1109 		ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
1110 		ena_refill_rx_bufs(rx_ring, refill_required);
1111 	}
1112 
1113 	return work_done;
1114 
1115 error:
1116 	adapter = netdev_priv(rx_ring->netdev);
1117 
1118 	u64_stats_update_begin(&rx_ring->syncp);
1119 	rx_ring->rx_stats.bad_desc_num++;
1120 	u64_stats_update_end(&rx_ring->syncp);
1121 
1122 	/* Too many desc from the device. Trigger reset */
1123 	adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
1124 	set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
1125 
1126 	return 0;
1127 }
1128 
1129 inline void ena_adjust_intr_moderation(struct ena_ring *rx_ring,
1130 				       struct ena_ring *tx_ring)
1131 {
1132 	/* We apply adaptive moderation on Rx path only.
1133 	 * Tx uses static interrupt moderation.
1134 	 */
1135 	ena_com_calculate_interrupt_delay(rx_ring->ena_dev,
1136 					  rx_ring->per_napi_packets,
1137 					  rx_ring->per_napi_bytes,
1138 					  &rx_ring->smoothed_interval,
1139 					  &rx_ring->moder_tbl_idx);
1140 
1141 	/* Reset per napi packets/bytes */
1142 	tx_ring->per_napi_packets = 0;
1143 	tx_ring->per_napi_bytes = 0;
1144 	rx_ring->per_napi_packets = 0;
1145 	rx_ring->per_napi_bytes = 0;
1146 }
1147 
1148 static inline void ena_unmask_interrupt(struct ena_ring *tx_ring,
1149 					struct ena_ring *rx_ring)
1150 {
1151 	struct ena_eth_io_intr_reg intr_reg;
1152 
1153 	/* Update intr register: rx intr delay,
1154 	 * tx intr delay and interrupt unmask
1155 	 */
1156 	ena_com_update_intr_reg(&intr_reg,
1157 				rx_ring->smoothed_interval,
1158 				tx_ring->smoothed_interval,
1159 				true);
1160 
1161 	/* It is a shared MSI-X.
1162 	 * Tx and Rx CQ have pointer to it.
1163 	 * So we use one of them to reach the intr reg
1164 	 */
1165 	ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
1166 }
1167 
1168 static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring,
1169 					     struct ena_ring *rx_ring)
1170 {
1171 	int cpu = get_cpu();
1172 	int numa_node;
1173 
1174 	/* Check only one ring since the 2 rings are running on the same cpu */
1175 	if (likely(tx_ring->cpu == cpu))
1176 		goto out;
1177 
1178 	numa_node = cpu_to_node(cpu);
1179 	put_cpu();
1180 
1181 	if (numa_node != NUMA_NO_NODE) {
1182 		ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
1183 		ena_com_update_numa_node(rx_ring->ena_com_io_cq, numa_node);
1184 	}
1185 
1186 	tx_ring->cpu = cpu;
1187 	rx_ring->cpu = cpu;
1188 
1189 	return;
1190 out:
1191 	put_cpu();
1192 }
1193 
1194 static int ena_io_poll(struct napi_struct *napi, int budget)
1195 {
1196 	struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
1197 	struct ena_ring *tx_ring, *rx_ring;
1198 
1199 	u32 tx_work_done;
1200 	u32 rx_work_done;
1201 	int tx_budget;
1202 	int napi_comp_call = 0;
1203 	int ret;
1204 
1205 	tx_ring = ena_napi->tx_ring;
1206 	rx_ring = ena_napi->rx_ring;
1207 
1208 	tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
1209 
1210 	if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1211 	    test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
1212 		napi_complete_done(napi, 0);
1213 		return 0;
1214 	}
1215 
1216 	tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
1217 	rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
1218 
1219 	/* If the device is about to reset or down, avoid unmask
1220 	 * the interrupt and return 0 so NAPI won't reschedule
1221 	 */
1222 	if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1223 		     test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) {
1224 		napi_complete_done(napi, 0);
1225 		ret = 0;
1226 
1227 	} else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) {
1228 		napi_comp_call = 1;
1229 
1230 		/* Update numa and unmask the interrupt only when schedule
1231 		 * from the interrupt context (vs from sk_busy_loop)
1232 		 */
1233 		if (napi_complete_done(napi, rx_work_done)) {
1234 			/* Tx and Rx share the same interrupt vector */
1235 			if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
1236 				ena_adjust_intr_moderation(rx_ring, tx_ring);
1237 
1238 			ena_unmask_interrupt(tx_ring, rx_ring);
1239 		}
1240 
1241 		ena_update_ring_numa_node(tx_ring, rx_ring);
1242 
1243 		ret = rx_work_done;
1244 	} else {
1245 		ret = budget;
1246 	}
1247 
1248 	u64_stats_update_begin(&tx_ring->syncp);
1249 	tx_ring->tx_stats.napi_comp += napi_comp_call;
1250 	tx_ring->tx_stats.tx_poll++;
1251 	u64_stats_update_end(&tx_ring->syncp);
1252 
1253 	return ret;
1254 }
1255 
1256 static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data)
1257 {
1258 	struct ena_adapter *adapter = (struct ena_adapter *)data;
1259 
1260 	ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
1261 
1262 	/* Don't call the aenq handler before probe is done */
1263 	if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)))
1264 		ena_com_aenq_intr_handler(adapter->ena_dev, data);
1265 
1266 	return IRQ_HANDLED;
1267 }
1268 
1269 /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx
1270  * @irq: interrupt number
1271  * @data: pointer to a network interface private napi device structure
1272  */
1273 static irqreturn_t ena_intr_msix_io(int irq, void *data)
1274 {
1275 	struct ena_napi *ena_napi = data;
1276 
1277 	ena_napi->tx_ring->first_interrupt = true;
1278 	ena_napi->rx_ring->first_interrupt = true;
1279 
1280 	napi_schedule_irqoff(&ena_napi->napi);
1281 
1282 	return IRQ_HANDLED;
1283 }
1284 
1285 /* Reserve a single MSI-X vector for management (admin + aenq).
1286  * plus reserve one vector for each potential io queue.
1287  * the number of potential io queues is the minimum of what the device
1288  * supports and the number of vCPUs.
1289  */
1290 static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
1291 {
1292 	int msix_vecs, irq_cnt;
1293 
1294 	if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
1295 		netif_err(adapter, probe, adapter->netdev,
1296 			  "Error, MSI-X is already enabled\n");
1297 		return -EPERM;
1298 	}
1299 
1300 	/* Reserved the max msix vectors we might need */
1301 	msix_vecs = ENA_MAX_MSIX_VEC(num_queues);
1302 
1303 	netif_dbg(adapter, probe, adapter->netdev,
1304 		  "trying to enable MSI-X, vectors %d\n", msix_vecs);
1305 
1306 	irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC,
1307 					msix_vecs, PCI_IRQ_MSIX);
1308 
1309 	if (irq_cnt < 0) {
1310 		netif_err(adapter, probe, adapter->netdev,
1311 			  "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt);
1312 		return -ENOSPC;
1313 	}
1314 
1315 	if (irq_cnt != msix_vecs) {
1316 		netif_notice(adapter, probe, adapter->netdev,
1317 			     "enable only %d MSI-X (out of %d), reduce the number of queues\n",
1318 			     irq_cnt, msix_vecs);
1319 		adapter->num_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
1320 	}
1321 
1322 	if (ena_init_rx_cpu_rmap(adapter))
1323 		netif_warn(adapter, probe, adapter->netdev,
1324 			   "Failed to map IRQs to CPUs\n");
1325 
1326 	adapter->msix_vecs = irq_cnt;
1327 	set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags);
1328 
1329 	return 0;
1330 }
1331 
1332 static void ena_setup_mgmnt_intr(struct ena_adapter *adapter)
1333 {
1334 	u32 cpu;
1335 
1336 	snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name,
1337 		 ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s",
1338 		 pci_name(adapter->pdev));
1339 	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler =
1340 		ena_intr_msix_mgmnt;
1341 	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
1342 	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
1343 		pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX);
1344 	cpu = cpumask_first(cpu_online_mask);
1345 	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu;
1346 	cpumask_set_cpu(cpu,
1347 			&adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask);
1348 }
1349 
1350 static void ena_setup_io_intr(struct ena_adapter *adapter)
1351 {
1352 	struct net_device *netdev;
1353 	int irq_idx, i, cpu;
1354 
1355 	netdev = adapter->netdev;
1356 
1357 	for (i = 0; i < adapter->num_queues; i++) {
1358 		irq_idx = ENA_IO_IRQ_IDX(i);
1359 		cpu = i % num_online_cpus();
1360 
1361 		snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
1362 			 "%s-Tx-Rx-%d", netdev->name, i);
1363 		adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io;
1364 		adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i];
1365 		adapter->irq_tbl[irq_idx].vector =
1366 			pci_irq_vector(adapter->pdev, irq_idx);
1367 		adapter->irq_tbl[irq_idx].cpu = cpu;
1368 
1369 		cpumask_set_cpu(cpu,
1370 				&adapter->irq_tbl[irq_idx].affinity_hint_mask);
1371 	}
1372 }
1373 
1374 static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
1375 {
1376 	unsigned long flags = 0;
1377 	struct ena_irq *irq;
1378 	int rc;
1379 
1380 	irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1381 	rc = request_irq(irq->vector, irq->handler, flags, irq->name,
1382 			 irq->data);
1383 	if (rc) {
1384 		netif_err(adapter, probe, adapter->netdev,
1385 			  "failed to request admin irq\n");
1386 		return rc;
1387 	}
1388 
1389 	netif_dbg(adapter, probe, adapter->netdev,
1390 		  "set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
1391 		  irq->affinity_hint_mask.bits[0], irq->vector);
1392 
1393 	irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
1394 
1395 	return rc;
1396 }
1397 
1398 static int ena_request_io_irq(struct ena_adapter *adapter)
1399 {
1400 	unsigned long flags = 0;
1401 	struct ena_irq *irq;
1402 	int rc = 0, i, k;
1403 
1404 	if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
1405 		netif_err(adapter, ifup, adapter->netdev,
1406 			  "Failed to request I/O IRQ: MSI-X is not enabled\n");
1407 		return -EINVAL;
1408 	}
1409 
1410 	for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1411 		irq = &adapter->irq_tbl[i];
1412 		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
1413 				 irq->data);
1414 		if (rc) {
1415 			netif_err(adapter, ifup, adapter->netdev,
1416 				  "Failed to request I/O IRQ. index %d rc %d\n",
1417 				   i, rc);
1418 			goto err;
1419 		}
1420 
1421 		netif_dbg(adapter, ifup, adapter->netdev,
1422 			  "set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
1423 			  i, irq->affinity_hint_mask.bits[0], irq->vector);
1424 
1425 		irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
1426 	}
1427 
1428 	return rc;
1429 
1430 err:
1431 	for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) {
1432 		irq = &adapter->irq_tbl[k];
1433 		free_irq(irq->vector, irq->data);
1434 	}
1435 
1436 	return rc;
1437 }
1438 
1439 static void ena_free_mgmnt_irq(struct ena_adapter *adapter)
1440 {
1441 	struct ena_irq *irq;
1442 
1443 	irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1444 	synchronize_irq(irq->vector);
1445 	irq_set_affinity_hint(irq->vector, NULL);
1446 	free_irq(irq->vector, irq->data);
1447 }
1448 
1449 static void ena_free_io_irq(struct ena_adapter *adapter)
1450 {
1451 	struct ena_irq *irq;
1452 	int i;
1453 
1454 #ifdef CONFIG_RFS_ACCEL
1455 	if (adapter->msix_vecs >= 1) {
1456 		free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
1457 		adapter->netdev->rx_cpu_rmap = NULL;
1458 	}
1459 #endif /* CONFIG_RFS_ACCEL */
1460 
1461 	for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1462 		irq = &adapter->irq_tbl[i];
1463 		irq_set_affinity_hint(irq->vector, NULL);
1464 		free_irq(irq->vector, irq->data);
1465 	}
1466 }
1467 
1468 static void ena_disable_msix(struct ena_adapter *adapter)
1469 {
1470 	if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags))
1471 		pci_free_irq_vectors(adapter->pdev);
1472 }
1473 
1474 static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
1475 {
1476 	int i;
1477 
1478 	if (!netif_running(adapter->netdev))
1479 		return;
1480 
1481 	for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++)
1482 		synchronize_irq(adapter->irq_tbl[i].vector);
1483 }
1484 
1485 static void ena_del_napi(struct ena_adapter *adapter)
1486 {
1487 	int i;
1488 
1489 	for (i = 0; i < adapter->num_queues; i++)
1490 		netif_napi_del(&adapter->ena_napi[i].napi);
1491 }
1492 
1493 static void ena_init_napi(struct ena_adapter *adapter)
1494 {
1495 	struct ena_napi *napi;
1496 	int i;
1497 
1498 	for (i = 0; i < adapter->num_queues; i++) {
1499 		napi = &adapter->ena_napi[i];
1500 
1501 		netif_napi_add(adapter->netdev,
1502 			       &adapter->ena_napi[i].napi,
1503 			       ena_io_poll,
1504 			       ENA_NAPI_BUDGET);
1505 		napi->rx_ring = &adapter->rx_ring[i];
1506 		napi->tx_ring = &adapter->tx_ring[i];
1507 		napi->qid = i;
1508 	}
1509 }
1510 
1511 static void ena_napi_disable_all(struct ena_adapter *adapter)
1512 {
1513 	int i;
1514 
1515 	for (i = 0; i < adapter->num_queues; i++)
1516 		napi_disable(&adapter->ena_napi[i].napi);
1517 }
1518 
1519 static void ena_napi_enable_all(struct ena_adapter *adapter)
1520 {
1521 	int i;
1522 
1523 	for (i = 0; i < adapter->num_queues; i++)
1524 		napi_enable(&adapter->ena_napi[i].napi);
1525 }
1526 
1527 static void ena_restore_ethtool_params(struct ena_adapter *adapter)
1528 {
1529 	adapter->tx_usecs = 0;
1530 	adapter->rx_usecs = 0;
1531 	adapter->tx_frames = 1;
1532 	adapter->rx_frames = 1;
1533 }
1534 
1535 /* Configure the Rx forwarding */
1536 static int ena_rss_configure(struct ena_adapter *adapter)
1537 {
1538 	struct ena_com_dev *ena_dev = adapter->ena_dev;
1539 	int rc;
1540 
1541 	/* In case the RSS table wasn't initialized by probe */
1542 	if (!ena_dev->rss.tbl_log_size) {
1543 		rc = ena_rss_init_default(adapter);
1544 		if (rc && (rc != -EOPNOTSUPP)) {
1545 			netif_err(adapter, ifup, adapter->netdev,
1546 				  "Failed to init RSS rc: %d\n", rc);
1547 			return rc;
1548 		}
1549 	}
1550 
1551 	/* Set indirect table */
1552 	rc = ena_com_indirect_table_set(ena_dev);
1553 	if (unlikely(rc && rc != -EOPNOTSUPP))
1554 		return rc;
1555 
1556 	/* Configure hash function (if supported) */
1557 	rc = ena_com_set_hash_function(ena_dev);
1558 	if (unlikely(rc && (rc != -EOPNOTSUPP)))
1559 		return rc;
1560 
1561 	/* Configure hash inputs (if supported) */
1562 	rc = ena_com_set_hash_ctrl(ena_dev);
1563 	if (unlikely(rc && (rc != -EOPNOTSUPP)))
1564 		return rc;
1565 
1566 	return 0;
1567 }
1568 
1569 static int ena_up_complete(struct ena_adapter *adapter)
1570 {
1571 	int rc;
1572 
1573 	rc = ena_rss_configure(adapter);
1574 	if (rc)
1575 		return rc;
1576 
1577 	ena_init_napi(adapter);
1578 
1579 	ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
1580 
1581 	ena_refill_all_rx_bufs(adapter);
1582 
1583 	/* enable transmits */
1584 	netif_tx_start_all_queues(adapter->netdev);
1585 
1586 	ena_restore_ethtool_params(adapter);
1587 
1588 	ena_napi_enable_all(adapter);
1589 
1590 	return 0;
1591 }
1592 
1593 static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
1594 {
1595 	struct ena_com_create_io_ctx ctx = { 0 };
1596 	struct ena_com_dev *ena_dev;
1597 	struct ena_ring *tx_ring;
1598 	u32 msix_vector;
1599 	u16 ena_qid;
1600 	int rc;
1601 
1602 	ena_dev = adapter->ena_dev;
1603 
1604 	tx_ring = &adapter->tx_ring[qid];
1605 	msix_vector = ENA_IO_IRQ_IDX(qid);
1606 	ena_qid = ENA_IO_TXQ_IDX(qid);
1607 
1608 	ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
1609 	ctx.qid = ena_qid;
1610 	ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
1611 	ctx.msix_vector = msix_vector;
1612 	ctx.queue_size = adapter->tx_ring_size;
1613 	ctx.numa_node = cpu_to_node(tx_ring->cpu);
1614 
1615 	rc = ena_com_create_io_queue(ena_dev, &ctx);
1616 	if (rc) {
1617 		netif_err(adapter, ifup, adapter->netdev,
1618 			  "Failed to create I/O TX queue num %d rc: %d\n",
1619 			  qid, rc);
1620 		return rc;
1621 	}
1622 
1623 	rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1624 				     &tx_ring->ena_com_io_sq,
1625 				     &tx_ring->ena_com_io_cq);
1626 	if (rc) {
1627 		netif_err(adapter, ifup, adapter->netdev,
1628 			  "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
1629 			  qid, rc);
1630 		ena_com_destroy_io_queue(ena_dev, ena_qid);
1631 		return rc;
1632 	}
1633 
1634 	ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node);
1635 	return rc;
1636 }
1637 
1638 static int ena_create_all_io_tx_queues(struct ena_adapter *adapter)
1639 {
1640 	struct ena_com_dev *ena_dev = adapter->ena_dev;
1641 	int rc, i;
1642 
1643 	for (i = 0; i < adapter->num_queues; i++) {
1644 		rc = ena_create_io_tx_queue(adapter, i);
1645 		if (rc)
1646 			goto create_err;
1647 	}
1648 
1649 	return 0;
1650 
1651 create_err:
1652 	while (i--)
1653 		ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
1654 
1655 	return rc;
1656 }
1657 
1658 static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
1659 {
1660 	struct ena_com_dev *ena_dev;
1661 	struct ena_com_create_io_ctx ctx = { 0 };
1662 	struct ena_ring *rx_ring;
1663 	u32 msix_vector;
1664 	u16 ena_qid;
1665 	int rc;
1666 
1667 	ena_dev = adapter->ena_dev;
1668 
1669 	rx_ring = &adapter->rx_ring[qid];
1670 	msix_vector = ENA_IO_IRQ_IDX(qid);
1671 	ena_qid = ENA_IO_RXQ_IDX(qid);
1672 
1673 	ctx.qid = ena_qid;
1674 	ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
1675 	ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
1676 	ctx.msix_vector = msix_vector;
1677 	ctx.queue_size = adapter->rx_ring_size;
1678 	ctx.numa_node = cpu_to_node(rx_ring->cpu);
1679 
1680 	rc = ena_com_create_io_queue(ena_dev, &ctx);
1681 	if (rc) {
1682 		netif_err(adapter, ifup, adapter->netdev,
1683 			  "Failed to create I/O RX queue num %d rc: %d\n",
1684 			  qid, rc);
1685 		return rc;
1686 	}
1687 
1688 	rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1689 				     &rx_ring->ena_com_io_sq,
1690 				     &rx_ring->ena_com_io_cq);
1691 	if (rc) {
1692 		netif_err(adapter, ifup, adapter->netdev,
1693 			  "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
1694 			  qid, rc);
1695 		ena_com_destroy_io_queue(ena_dev, ena_qid);
1696 		return rc;
1697 	}
1698 
1699 	ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
1700 
1701 	return rc;
1702 }
1703 
1704 static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
1705 {
1706 	struct ena_com_dev *ena_dev = adapter->ena_dev;
1707 	int rc, i;
1708 
1709 	for (i = 0; i < adapter->num_queues; i++) {
1710 		rc = ena_create_io_rx_queue(adapter, i);
1711 		if (rc)
1712 			goto create_err;
1713 	}
1714 
1715 	return 0;
1716 
1717 create_err:
1718 	while (i--)
1719 		ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
1720 
1721 	return rc;
1722 }
1723 
1724 static int ena_up(struct ena_adapter *adapter)
1725 {
1726 	int rc, i;
1727 
1728 	netdev_dbg(adapter->netdev, "%s\n", __func__);
1729 
1730 	ena_setup_io_intr(adapter);
1731 
1732 	rc = ena_request_io_irq(adapter);
1733 	if (rc)
1734 		goto err_req_irq;
1735 
1736 	/* allocate transmit descriptors */
1737 	rc = ena_setup_all_tx_resources(adapter);
1738 	if (rc)
1739 		goto err_setup_tx;
1740 
1741 	/* allocate receive descriptors */
1742 	rc = ena_setup_all_rx_resources(adapter);
1743 	if (rc)
1744 		goto err_setup_rx;
1745 
1746 	/* Create TX queues */
1747 	rc = ena_create_all_io_tx_queues(adapter);
1748 	if (rc)
1749 		goto err_create_tx_queues;
1750 
1751 	/* Create RX queues */
1752 	rc = ena_create_all_io_rx_queues(adapter);
1753 	if (rc)
1754 		goto err_create_rx_queues;
1755 
1756 	rc = ena_up_complete(adapter);
1757 	if (rc)
1758 		goto err_up;
1759 
1760 	if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
1761 		netif_carrier_on(adapter->netdev);
1762 
1763 	u64_stats_update_begin(&adapter->syncp);
1764 	adapter->dev_stats.interface_up++;
1765 	u64_stats_update_end(&adapter->syncp);
1766 
1767 	set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
1768 
1769 	/* Enable completion queues interrupt */
1770 	for (i = 0; i < adapter->num_queues; i++)
1771 		ena_unmask_interrupt(&adapter->tx_ring[i],
1772 				     &adapter->rx_ring[i]);
1773 
1774 	/* schedule napi in case we had pending packets
1775 	 * from the last time we disable napi
1776 	 */
1777 	for (i = 0; i < adapter->num_queues; i++)
1778 		napi_schedule(&adapter->ena_napi[i].napi);
1779 
1780 	return rc;
1781 
1782 err_up:
1783 	ena_destroy_all_rx_queues(adapter);
1784 err_create_rx_queues:
1785 	ena_destroy_all_tx_queues(adapter);
1786 err_create_tx_queues:
1787 	ena_free_all_io_rx_resources(adapter);
1788 err_setup_rx:
1789 	ena_free_all_io_tx_resources(adapter);
1790 err_setup_tx:
1791 	ena_free_io_irq(adapter);
1792 err_req_irq:
1793 
1794 	return rc;
1795 }
1796 
1797 static void ena_down(struct ena_adapter *adapter)
1798 {
1799 	netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__);
1800 
1801 	clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
1802 
1803 	u64_stats_update_begin(&adapter->syncp);
1804 	adapter->dev_stats.interface_down++;
1805 	u64_stats_update_end(&adapter->syncp);
1806 
1807 	netif_carrier_off(adapter->netdev);
1808 	netif_tx_disable(adapter->netdev);
1809 
1810 	/* After this point the napi handler won't enable the tx queue */
1811 	ena_napi_disable_all(adapter);
1812 
1813 	/* After destroy the queue there won't be any new interrupts */
1814 
1815 	if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
1816 		int rc;
1817 
1818 		rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
1819 		if (rc)
1820 			dev_err(&adapter->pdev->dev, "Device reset failed\n");
1821 	}
1822 
1823 	ena_destroy_all_io_queues(adapter);
1824 
1825 	ena_disable_io_intr_sync(adapter);
1826 	ena_free_io_irq(adapter);
1827 	ena_del_napi(adapter);
1828 
1829 	ena_free_all_tx_bufs(adapter);
1830 	ena_free_all_rx_bufs(adapter);
1831 	ena_free_all_io_tx_resources(adapter);
1832 	ena_free_all_io_rx_resources(adapter);
1833 }
1834 
1835 /* ena_open - Called when a network interface is made active
1836  * @netdev: network interface device structure
1837  *
1838  * Returns 0 on success, negative value on failure
1839  *
1840  * The open entry point is called when a network interface is made
1841  * active by the system (IFF_UP).  At this point all resources needed
1842  * for transmit and receive operations are allocated, the interrupt
1843  * handler is registered with the OS, the watchdog timer is started,
1844  * and the stack is notified that the interface is ready.
1845  */
1846 static int ena_open(struct net_device *netdev)
1847 {
1848 	struct ena_adapter *adapter = netdev_priv(netdev);
1849 	int rc;
1850 
1851 	/* Notify the stack of the actual queue counts. */
1852 	rc = netif_set_real_num_tx_queues(netdev, adapter->num_queues);
1853 	if (rc) {
1854 		netif_err(adapter, ifup, netdev, "Can't set num tx queues\n");
1855 		return rc;
1856 	}
1857 
1858 	rc = netif_set_real_num_rx_queues(netdev, adapter->num_queues);
1859 	if (rc) {
1860 		netif_err(adapter, ifup, netdev, "Can't set num rx queues\n");
1861 		return rc;
1862 	}
1863 
1864 	rc = ena_up(adapter);
1865 	if (rc)
1866 		return rc;
1867 
1868 	return rc;
1869 }
1870 
1871 /* ena_close - Disables a network interface
1872  * @netdev: network interface device structure
1873  *
1874  * Returns 0, this is not allowed to fail
1875  *
1876  * The close entry point is called when an interface is de-activated
1877  * by the OS.  The hardware is still under the drivers control, but
1878  * needs to be disabled.  A global MAC reset is issued to stop the
1879  * hardware, and all transmit and receive resources are freed.
1880  */
1881 static int ena_close(struct net_device *netdev)
1882 {
1883 	struct ena_adapter *adapter = netdev_priv(netdev);
1884 
1885 	netif_dbg(adapter, ifdown, netdev, "%s\n", __func__);
1886 
1887 	if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
1888 		ena_down(adapter);
1889 
1890 	/* Check for device status and issue reset if needed*/
1891 	check_for_admin_com_state(adapter);
1892 	if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
1893 		netif_err(adapter, ifdown, adapter->netdev,
1894 			  "Destroy failure, restarting device\n");
1895 		ena_dump_stats_to_dmesg(adapter);
1896 		/* rtnl lock already obtained in dev_ioctl() layer */
1897 		ena_destroy_device(adapter, false);
1898 		ena_restore_device(adapter);
1899 	}
1900 
1901 	return 0;
1902 }
1903 
1904 static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb)
1905 {
1906 	u32 mss = skb_shinfo(skb)->gso_size;
1907 	struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
1908 	u8 l4_protocol = 0;
1909 
1910 	if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) {
1911 		ena_tx_ctx->l4_csum_enable = 1;
1912 		if (mss) {
1913 			ena_tx_ctx->tso_enable = 1;
1914 			ena_meta->l4_hdr_len = tcp_hdr(skb)->doff;
1915 			ena_tx_ctx->l4_csum_partial = 0;
1916 		} else {
1917 			ena_tx_ctx->tso_enable = 0;
1918 			ena_meta->l4_hdr_len = 0;
1919 			ena_tx_ctx->l4_csum_partial = 1;
1920 		}
1921 
1922 		switch (ip_hdr(skb)->version) {
1923 		case IPVERSION:
1924 			ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
1925 			if (ip_hdr(skb)->frag_off & htons(IP_DF))
1926 				ena_tx_ctx->df = 1;
1927 			if (mss)
1928 				ena_tx_ctx->l3_csum_enable = 1;
1929 			l4_protocol = ip_hdr(skb)->protocol;
1930 			break;
1931 		case 6:
1932 			ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
1933 			l4_protocol = ipv6_hdr(skb)->nexthdr;
1934 			break;
1935 		default:
1936 			break;
1937 		}
1938 
1939 		if (l4_protocol == IPPROTO_TCP)
1940 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
1941 		else
1942 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
1943 
1944 		ena_meta->mss = mss;
1945 		ena_meta->l3_hdr_len = skb_network_header_len(skb);
1946 		ena_meta->l3_hdr_offset = skb_network_offset(skb);
1947 		ena_tx_ctx->meta_valid = 1;
1948 
1949 	} else {
1950 		ena_tx_ctx->meta_valid = 0;
1951 	}
1952 }
1953 
1954 static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
1955 				       struct sk_buff *skb)
1956 {
1957 	int num_frags, header_len, rc;
1958 
1959 	num_frags = skb_shinfo(skb)->nr_frags;
1960 	header_len = skb_headlen(skb);
1961 
1962 	if (num_frags < tx_ring->sgl_size)
1963 		return 0;
1964 
1965 	if ((num_frags == tx_ring->sgl_size) &&
1966 	    (header_len < tx_ring->tx_max_header_size))
1967 		return 0;
1968 
1969 	u64_stats_update_begin(&tx_ring->syncp);
1970 	tx_ring->tx_stats.linearize++;
1971 	u64_stats_update_end(&tx_ring->syncp);
1972 
1973 	rc = skb_linearize(skb);
1974 	if (unlikely(rc)) {
1975 		u64_stats_update_begin(&tx_ring->syncp);
1976 		tx_ring->tx_stats.linearize_failed++;
1977 		u64_stats_update_end(&tx_ring->syncp);
1978 	}
1979 
1980 	return rc;
1981 }
1982 
1983 /* Called with netif_tx_lock. */
1984 static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
1985 {
1986 	struct ena_adapter *adapter = netdev_priv(dev);
1987 	struct ena_tx_buffer *tx_info;
1988 	struct ena_com_tx_ctx ena_tx_ctx;
1989 	struct ena_ring *tx_ring;
1990 	struct netdev_queue *txq;
1991 	struct ena_com_buf *ena_buf;
1992 	void *push_hdr;
1993 	u32 len, last_frag;
1994 	u16 next_to_use;
1995 	u16 req_id;
1996 	u16 push_len;
1997 	u16 header_len;
1998 	dma_addr_t dma;
1999 	int qid, rc, nb_hw_desc;
2000 	int i = -1;
2001 
2002 	netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
2003 	/*  Determine which tx ring we will be placed on */
2004 	qid = skb_get_queue_mapping(skb);
2005 	tx_ring = &adapter->tx_ring[qid];
2006 	txq = netdev_get_tx_queue(dev, qid);
2007 
2008 	rc = ena_check_and_linearize_skb(tx_ring, skb);
2009 	if (unlikely(rc))
2010 		goto error_drop_packet;
2011 
2012 	skb_tx_timestamp(skb);
2013 	len = skb_headlen(skb);
2014 
2015 	next_to_use = tx_ring->next_to_use;
2016 	req_id = tx_ring->free_tx_ids[next_to_use];
2017 	tx_info = &tx_ring->tx_buffer_info[req_id];
2018 	tx_info->num_of_bufs = 0;
2019 
2020 	WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
2021 	ena_buf = tx_info->bufs;
2022 	tx_info->skb = skb;
2023 
2024 	if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2025 		/* prepared the push buffer */
2026 		push_len = min_t(u32, len, tx_ring->tx_max_header_size);
2027 		header_len = push_len;
2028 		push_hdr = skb->data;
2029 	} else {
2030 		push_len = 0;
2031 		header_len = min_t(u32, len, tx_ring->tx_max_header_size);
2032 		push_hdr = NULL;
2033 	}
2034 
2035 	netif_dbg(adapter, tx_queued, dev,
2036 		  "skb: %p header_buf->vaddr: %p push_len: %d\n", skb,
2037 		  push_hdr, push_len);
2038 
2039 	if (len > push_len) {
2040 		dma = dma_map_single(tx_ring->dev, skb->data + push_len,
2041 				     len - push_len, DMA_TO_DEVICE);
2042 		if (dma_mapping_error(tx_ring->dev, dma))
2043 			goto error_report_dma_error;
2044 
2045 		ena_buf->paddr = dma;
2046 		ena_buf->len = len - push_len;
2047 
2048 		ena_buf++;
2049 		tx_info->num_of_bufs++;
2050 	}
2051 
2052 	last_frag = skb_shinfo(skb)->nr_frags;
2053 
2054 	for (i = 0; i < last_frag; i++) {
2055 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2056 
2057 		len = skb_frag_size(frag);
2058 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
2059 				       DMA_TO_DEVICE);
2060 		if (dma_mapping_error(tx_ring->dev, dma))
2061 			goto error_report_dma_error;
2062 
2063 		ena_buf->paddr = dma;
2064 		ena_buf->len = len;
2065 		ena_buf++;
2066 	}
2067 
2068 	tx_info->num_of_bufs += last_frag;
2069 
2070 	memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
2071 	ena_tx_ctx.ena_bufs = tx_info->bufs;
2072 	ena_tx_ctx.push_header = push_hdr;
2073 	ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
2074 	ena_tx_ctx.req_id = req_id;
2075 	ena_tx_ctx.header_len = header_len;
2076 
2077 	/* set flags and meta data */
2078 	ena_tx_csum(&ena_tx_ctx, skb);
2079 
2080 	/* prepare the packet's descriptors to dma engine */
2081 	rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx,
2082 				&nb_hw_desc);
2083 
2084 	if (unlikely(rc)) {
2085 		netif_err(adapter, tx_queued, dev,
2086 			  "failed to prepare tx bufs\n");
2087 		u64_stats_update_begin(&tx_ring->syncp);
2088 		tx_ring->tx_stats.queue_stop++;
2089 		tx_ring->tx_stats.prepare_ctx_err++;
2090 		u64_stats_update_end(&tx_ring->syncp);
2091 		netif_tx_stop_queue(txq);
2092 		goto error_unmap_dma;
2093 	}
2094 
2095 	netdev_tx_sent_queue(txq, skb->len);
2096 
2097 	u64_stats_update_begin(&tx_ring->syncp);
2098 	tx_ring->tx_stats.cnt++;
2099 	tx_ring->tx_stats.bytes += skb->len;
2100 	u64_stats_update_end(&tx_ring->syncp);
2101 
2102 	tx_info->tx_descs = nb_hw_desc;
2103 	tx_info->last_jiffies = jiffies;
2104 	tx_info->print_once = 0;
2105 
2106 	tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
2107 		tx_ring->ring_size);
2108 
2109 	/* stop the queue when no more space available, the packet can have up
2110 	 * to sgl_size + 2. one for the meta descriptor and one for header
2111 	 * (if the header is larger than tx_max_header_size).
2112 	 */
2113 	if (unlikely(ena_com_sq_empty_space(tx_ring->ena_com_io_sq) <
2114 		     (tx_ring->sgl_size + 2))) {
2115 		netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n",
2116 			  __func__, qid);
2117 
2118 		netif_tx_stop_queue(txq);
2119 		u64_stats_update_begin(&tx_ring->syncp);
2120 		tx_ring->tx_stats.queue_stop++;
2121 		u64_stats_update_end(&tx_ring->syncp);
2122 
2123 		/* There is a rare condition where this function decide to
2124 		 * stop the queue but meanwhile clean_tx_irq updates
2125 		 * next_to_completion and terminates.
2126 		 * The queue will remain stopped forever.
2127 		 * To solve this issue add a mb() to make sure that
2128 		 * netif_tx_stop_queue() write is vissible before checking if
2129 		 * there is additional space in the queue.
2130 		 */
2131 		smp_mb();
2132 
2133 		if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq)
2134 				> ENA_TX_WAKEUP_THRESH) {
2135 			netif_tx_wake_queue(txq);
2136 			u64_stats_update_begin(&tx_ring->syncp);
2137 			tx_ring->tx_stats.queue_wakeup++;
2138 			u64_stats_update_end(&tx_ring->syncp);
2139 		}
2140 	}
2141 
2142 	if (netif_xmit_stopped(txq) || !skb->xmit_more) {
2143 		/* trigger the dma engine. ena_com_write_sq_doorbell()
2144 		 * has a mb
2145 		 */
2146 		ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
2147 		u64_stats_update_begin(&tx_ring->syncp);
2148 		tx_ring->tx_stats.doorbells++;
2149 		u64_stats_update_end(&tx_ring->syncp);
2150 	}
2151 
2152 	return NETDEV_TX_OK;
2153 
2154 error_report_dma_error:
2155 	u64_stats_update_begin(&tx_ring->syncp);
2156 	tx_ring->tx_stats.dma_mapping_err++;
2157 	u64_stats_update_end(&tx_ring->syncp);
2158 	netdev_warn(adapter->netdev, "failed to map skb\n");
2159 
2160 	tx_info->skb = NULL;
2161 
2162 error_unmap_dma:
2163 	if (i >= 0) {
2164 		/* save value of frag that failed */
2165 		last_frag = i;
2166 
2167 		/* start back at beginning and unmap skb */
2168 		tx_info->skb = NULL;
2169 		ena_buf = tx_info->bufs;
2170 		dma_unmap_single(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
2171 				 dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
2172 
2173 		/* unmap remaining mapped pages */
2174 		for (i = 0; i < last_frag; i++) {
2175 			ena_buf++;
2176 			dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
2177 				       dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
2178 		}
2179 	}
2180 
2181 error_drop_packet:
2182 
2183 	dev_kfree_skb(skb);
2184 	return NETDEV_TX_OK;
2185 }
2186 
2187 #ifdef CONFIG_NET_POLL_CONTROLLER
2188 static void ena_netpoll(struct net_device *netdev)
2189 {
2190 	struct ena_adapter *adapter = netdev_priv(netdev);
2191 	int i;
2192 
2193 	/* Dont schedule NAPI if the driver is in the middle of reset
2194 	 * or netdev is down.
2195 	 */
2196 
2197 	if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags) ||
2198 	    test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
2199 		return;
2200 
2201 	for (i = 0; i < adapter->num_queues; i++)
2202 		napi_schedule(&adapter->ena_napi[i].napi);
2203 }
2204 #endif /* CONFIG_NET_POLL_CONTROLLER */
2205 
2206 static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
2207 			    struct net_device *sb_dev,
2208 			    select_queue_fallback_t fallback)
2209 {
2210 	u16 qid;
2211 	/* we suspect that this is good for in--kernel network services that
2212 	 * want to loop incoming skb rx to tx in normal user generated traffic,
2213 	 * most probably we will not get to this
2214 	 */
2215 	if (skb_rx_queue_recorded(skb))
2216 		qid = skb_get_rx_queue(skb);
2217 	else
2218 		qid = fallback(dev, skb, NULL);
2219 
2220 	return qid;
2221 }
2222 
2223 static void ena_config_host_info(struct ena_com_dev *ena_dev)
2224 {
2225 	struct ena_admin_host_info *host_info;
2226 	int rc;
2227 
2228 	/* Allocate only the host info */
2229 	rc = ena_com_allocate_host_info(ena_dev);
2230 	if (rc) {
2231 		pr_err("Cannot allocate host info\n");
2232 		return;
2233 	}
2234 
2235 	host_info = ena_dev->host_attr.host_info;
2236 
2237 	host_info->os_type = ENA_ADMIN_OS_LINUX;
2238 	host_info->kernel_ver = LINUX_VERSION_CODE;
2239 	strncpy(host_info->kernel_ver_str, utsname()->version,
2240 		sizeof(host_info->kernel_ver_str) - 1);
2241 	host_info->os_dist = 0;
2242 	strncpy(host_info->os_dist_str, utsname()->release,
2243 		sizeof(host_info->os_dist_str) - 1);
2244 	host_info->driver_version =
2245 		(DRV_MODULE_VER_MAJOR) |
2246 		(DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
2247 		(DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
2248 
2249 	rc = ena_com_set_host_attributes(ena_dev);
2250 	if (rc) {
2251 		if (rc == -EOPNOTSUPP)
2252 			pr_warn("Cannot set host attributes\n");
2253 		else
2254 			pr_err("Cannot set host attributes\n");
2255 
2256 		goto err;
2257 	}
2258 
2259 	return;
2260 
2261 err:
2262 	ena_com_delete_host_info(ena_dev);
2263 }
2264 
2265 static void ena_config_debug_area(struct ena_adapter *adapter)
2266 {
2267 	u32 debug_area_size;
2268 	int rc, ss_count;
2269 
2270 	ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS);
2271 	if (ss_count <= 0) {
2272 		netif_err(adapter, drv, adapter->netdev,
2273 			  "SS count is negative\n");
2274 		return;
2275 	}
2276 
2277 	/* allocate 32 bytes for each string and 64bit for the value */
2278 	debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
2279 
2280 	rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size);
2281 	if (rc) {
2282 		pr_err("Cannot allocate debug area\n");
2283 		return;
2284 	}
2285 
2286 	rc = ena_com_set_host_attributes(adapter->ena_dev);
2287 	if (rc) {
2288 		if (rc == -EOPNOTSUPP)
2289 			netif_warn(adapter, drv, adapter->netdev,
2290 				   "Cannot set host attributes\n");
2291 		else
2292 			netif_err(adapter, drv, adapter->netdev,
2293 				  "Cannot set host attributes\n");
2294 		goto err;
2295 	}
2296 
2297 	return;
2298 err:
2299 	ena_com_delete_debug_area(adapter->ena_dev);
2300 }
2301 
2302 static void ena_get_stats64(struct net_device *netdev,
2303 			    struct rtnl_link_stats64 *stats)
2304 {
2305 	struct ena_adapter *adapter = netdev_priv(netdev);
2306 	struct ena_ring *rx_ring, *tx_ring;
2307 	unsigned int start;
2308 	u64 rx_drops;
2309 	int i;
2310 
2311 	if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2312 		return;
2313 
2314 	for (i = 0; i < adapter->num_queues; i++) {
2315 		u64 bytes, packets;
2316 
2317 		tx_ring = &adapter->tx_ring[i];
2318 
2319 		do {
2320 			start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
2321 			packets = tx_ring->tx_stats.cnt;
2322 			bytes = tx_ring->tx_stats.bytes;
2323 		} while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
2324 
2325 		stats->tx_packets += packets;
2326 		stats->tx_bytes += bytes;
2327 
2328 		rx_ring = &adapter->rx_ring[i];
2329 
2330 		do {
2331 			start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
2332 			packets = rx_ring->rx_stats.cnt;
2333 			bytes = rx_ring->rx_stats.bytes;
2334 		} while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
2335 
2336 		stats->rx_packets += packets;
2337 		stats->rx_bytes += bytes;
2338 	}
2339 
2340 	do {
2341 		start = u64_stats_fetch_begin_irq(&adapter->syncp);
2342 		rx_drops = adapter->dev_stats.rx_drops;
2343 	} while (u64_stats_fetch_retry_irq(&adapter->syncp, start));
2344 
2345 	stats->rx_dropped = rx_drops;
2346 
2347 	stats->multicast = 0;
2348 	stats->collisions = 0;
2349 
2350 	stats->rx_length_errors = 0;
2351 	stats->rx_crc_errors = 0;
2352 	stats->rx_frame_errors = 0;
2353 	stats->rx_fifo_errors = 0;
2354 	stats->rx_missed_errors = 0;
2355 	stats->tx_window_errors = 0;
2356 
2357 	stats->rx_errors = 0;
2358 	stats->tx_errors = 0;
2359 }
2360 
2361 static const struct net_device_ops ena_netdev_ops = {
2362 	.ndo_open		= ena_open,
2363 	.ndo_stop		= ena_close,
2364 	.ndo_start_xmit		= ena_start_xmit,
2365 	.ndo_select_queue	= ena_select_queue,
2366 	.ndo_get_stats64	= ena_get_stats64,
2367 	.ndo_tx_timeout		= ena_tx_timeout,
2368 	.ndo_change_mtu		= ena_change_mtu,
2369 	.ndo_set_mac_address	= NULL,
2370 	.ndo_validate_addr	= eth_validate_addr,
2371 #ifdef CONFIG_NET_POLL_CONTROLLER
2372 	.ndo_poll_controller	= ena_netpoll,
2373 #endif /* CONFIG_NET_POLL_CONTROLLER */
2374 };
2375 
2376 static int ena_device_validate_params(struct ena_adapter *adapter,
2377 				      struct ena_com_dev_get_features_ctx *get_feat_ctx)
2378 {
2379 	struct net_device *netdev = adapter->netdev;
2380 	int rc;
2381 
2382 	rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr,
2383 			      adapter->mac_addr);
2384 	if (!rc) {
2385 		netif_err(adapter, drv, netdev,
2386 			  "Error, mac address are different\n");
2387 		return -EINVAL;
2388 	}
2389 
2390 	if ((get_feat_ctx->max_queues.max_cq_num < adapter->num_queues) ||
2391 	    (get_feat_ctx->max_queues.max_sq_num < adapter->num_queues)) {
2392 		netif_err(adapter, drv, netdev,
2393 			  "Error, device doesn't support enough queues\n");
2394 		return -EINVAL;
2395 	}
2396 
2397 	if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) {
2398 		netif_err(adapter, drv, netdev,
2399 			  "Error, device max mtu is smaller than netdev MTU\n");
2400 		return -EINVAL;
2401 	}
2402 
2403 	return 0;
2404 }
2405 
2406 static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
2407 			   struct ena_com_dev_get_features_ctx *get_feat_ctx,
2408 			   bool *wd_state)
2409 {
2410 	struct device *dev = &pdev->dev;
2411 	bool readless_supported;
2412 	u32 aenq_groups;
2413 	int dma_width;
2414 	int rc;
2415 
2416 	rc = ena_com_mmio_reg_read_request_init(ena_dev);
2417 	if (rc) {
2418 		dev_err(dev, "failed to init mmio read less\n");
2419 		return rc;
2420 	}
2421 
2422 	/* The PCIe configuration space revision id indicate if mmio reg
2423 	 * read is disabled
2424 	 */
2425 	readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ);
2426 	ena_com_set_mmio_read_mode(ena_dev, readless_supported);
2427 
2428 	rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
2429 	if (rc) {
2430 		dev_err(dev, "Can not reset device\n");
2431 		goto err_mmio_read_less;
2432 	}
2433 
2434 	rc = ena_com_validate_version(ena_dev);
2435 	if (rc) {
2436 		dev_err(dev, "device version is too low\n");
2437 		goto err_mmio_read_less;
2438 	}
2439 
2440 	dma_width = ena_com_get_dma_width(ena_dev);
2441 	if (dma_width < 0) {
2442 		dev_err(dev, "Invalid dma width value %d", dma_width);
2443 		rc = dma_width;
2444 		goto err_mmio_read_less;
2445 	}
2446 
2447 	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width));
2448 	if (rc) {
2449 		dev_err(dev, "pci_set_dma_mask failed 0x%x\n", rc);
2450 		goto err_mmio_read_less;
2451 	}
2452 
2453 	rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width));
2454 	if (rc) {
2455 		dev_err(dev, "err_pci_set_consistent_dma_mask failed 0x%x\n",
2456 			rc);
2457 		goto err_mmio_read_less;
2458 	}
2459 
2460 	/* ENA admin level init */
2461 	rc = ena_com_admin_init(ena_dev, &aenq_handlers, true);
2462 	if (rc) {
2463 		dev_err(dev,
2464 			"Can not initialize ena admin queue with device\n");
2465 		goto err_mmio_read_less;
2466 	}
2467 
2468 	/* To enable the msix interrupts the driver needs to know the number
2469 	 * of queues. So the driver uses polling mode to retrieve this
2470 	 * information
2471 	 */
2472 	ena_com_set_admin_polling_mode(ena_dev, true);
2473 
2474 	ena_config_host_info(ena_dev);
2475 
2476 	/* Get Device Attributes*/
2477 	rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
2478 	if (rc) {
2479 		dev_err(dev, "Cannot get attribute for ena device rc=%d\n", rc);
2480 		goto err_admin_init;
2481 	}
2482 
2483 	/* Try to turn all the available aenq groups */
2484 	aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
2485 		BIT(ENA_ADMIN_FATAL_ERROR) |
2486 		BIT(ENA_ADMIN_WARNING) |
2487 		BIT(ENA_ADMIN_NOTIFICATION) |
2488 		BIT(ENA_ADMIN_KEEP_ALIVE);
2489 
2490 	aenq_groups &= get_feat_ctx->aenq.supported_groups;
2491 
2492 	rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
2493 	if (rc) {
2494 		dev_err(dev, "Cannot configure aenq groups rc= %d\n", rc);
2495 		goto err_admin_init;
2496 	}
2497 
2498 	*wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
2499 
2500 	return 0;
2501 
2502 err_admin_init:
2503 	ena_com_delete_host_info(ena_dev);
2504 	ena_com_admin_destroy(ena_dev);
2505 err_mmio_read_less:
2506 	ena_com_mmio_reg_read_request_destroy(ena_dev);
2507 
2508 	return rc;
2509 }
2510 
2511 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter,
2512 						    int io_vectors)
2513 {
2514 	struct ena_com_dev *ena_dev = adapter->ena_dev;
2515 	struct device *dev = &adapter->pdev->dev;
2516 	int rc;
2517 
2518 	rc = ena_enable_msix(adapter, io_vectors);
2519 	if (rc) {
2520 		dev_err(dev, "Can not reserve msix vectors\n");
2521 		return rc;
2522 	}
2523 
2524 	ena_setup_mgmnt_intr(adapter);
2525 
2526 	rc = ena_request_mgmnt_irq(adapter);
2527 	if (rc) {
2528 		dev_err(dev, "Can not setup management interrupts\n");
2529 		goto err_disable_msix;
2530 	}
2531 
2532 	ena_com_set_admin_polling_mode(ena_dev, false);
2533 
2534 	ena_com_admin_aenq_enable(ena_dev);
2535 
2536 	return 0;
2537 
2538 err_disable_msix:
2539 	ena_disable_msix(adapter);
2540 
2541 	return rc;
2542 }
2543 
2544 static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
2545 {
2546 	struct net_device *netdev = adapter->netdev;
2547 	struct ena_com_dev *ena_dev = adapter->ena_dev;
2548 	bool dev_up;
2549 
2550 	if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
2551 		return;
2552 
2553 	netif_carrier_off(netdev);
2554 
2555 	del_timer_sync(&adapter->timer_service);
2556 
2557 	dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2558 	adapter->dev_up_before_reset = dev_up;
2559 
2560 	if (!graceful)
2561 		ena_com_set_admin_running_state(ena_dev, false);
2562 
2563 	if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2564 		ena_down(adapter);
2565 
2566 	/* Before releasing the ENA resources, a device reset is required.
2567 	 * (to prevent the device from accessing them).
2568 	 * In case the reset flag is set and the device is up, ena_down()
2569 	 * already perform the reset, so it can be skipped.
2570 	 */
2571 	if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
2572 		ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
2573 
2574 	ena_free_mgmnt_irq(adapter);
2575 
2576 	ena_disable_msix(adapter);
2577 
2578 	ena_com_abort_admin_commands(ena_dev);
2579 
2580 	ena_com_wait_for_abort_completion(ena_dev);
2581 
2582 	ena_com_admin_destroy(ena_dev);
2583 
2584 	ena_com_mmio_reg_read_request_destroy(ena_dev);
2585 
2586 	adapter->reset_reason = ENA_REGS_RESET_NORMAL;
2587 
2588 	clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2589 	clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
2590 }
2591 
2592 static int ena_restore_device(struct ena_adapter *adapter)
2593 {
2594 	struct ena_com_dev_get_features_ctx get_feat_ctx;
2595 	struct ena_com_dev *ena_dev = adapter->ena_dev;
2596 	struct pci_dev *pdev = adapter->pdev;
2597 	bool wd_state;
2598 	int rc;
2599 
2600 	set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
2601 	rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state);
2602 	if (rc) {
2603 		dev_err(&pdev->dev, "Can not initialize device\n");
2604 		goto err;
2605 	}
2606 	adapter->wd_state = wd_state;
2607 
2608 	rc = ena_device_validate_params(adapter, &get_feat_ctx);
2609 	if (rc) {
2610 		dev_err(&pdev->dev, "Validation of device parameters failed\n");
2611 		goto err_device_destroy;
2612 	}
2613 
2614 	clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
2615 	/* Make sure we don't have a race with AENQ Links state handler */
2616 	if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
2617 		netif_carrier_on(adapter->netdev);
2618 
2619 	rc = ena_enable_msix_and_set_admin_interrupts(adapter,
2620 						      adapter->num_queues);
2621 	if (rc) {
2622 		dev_err(&pdev->dev, "Enable MSI-X failed\n");
2623 		goto err_device_destroy;
2624 	}
2625 	/* If the interface was up before the reset bring it up */
2626 	if (adapter->dev_up_before_reset) {
2627 		rc = ena_up(adapter);
2628 		if (rc) {
2629 			dev_err(&pdev->dev, "Failed to create I/O queues\n");
2630 			goto err_disable_msix;
2631 		}
2632 	}
2633 
2634 	set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
2635 	mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
2636 	dev_err(&pdev->dev, "Device reset completed successfully\n");
2637 
2638 	return rc;
2639 err_disable_msix:
2640 	ena_free_mgmnt_irq(adapter);
2641 	ena_disable_msix(adapter);
2642 err_device_destroy:
2643 	ena_com_admin_destroy(ena_dev);
2644 err:
2645 	clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
2646 	clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
2647 	dev_err(&pdev->dev,
2648 		"Reset attempt failed. Can not reset the device\n");
2649 
2650 	return rc;
2651 }
2652 
2653 static void ena_fw_reset_device(struct work_struct *work)
2654 {
2655 	struct ena_adapter *adapter =
2656 		container_of(work, struct ena_adapter, reset_task);
2657 	struct pci_dev *pdev = adapter->pdev;
2658 
2659 	if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
2660 		dev_err(&pdev->dev,
2661 			"device reset schedule while reset bit is off\n");
2662 		return;
2663 	}
2664 	rtnl_lock();
2665 	ena_destroy_device(adapter, false);
2666 	ena_restore_device(adapter);
2667 	rtnl_unlock();
2668 }
2669 
2670 static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
2671 					struct ena_ring *rx_ring)
2672 {
2673 	if (likely(rx_ring->first_interrupt))
2674 		return 0;
2675 
2676 	if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
2677 		return 0;
2678 
2679 	rx_ring->no_interrupt_event_cnt++;
2680 
2681 	if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
2682 		netif_err(adapter, rx_err, adapter->netdev,
2683 			  "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
2684 			  rx_ring->qid);
2685 		adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
2686 		smp_mb__before_atomic();
2687 		set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2688 		return -EIO;
2689 	}
2690 
2691 	return 0;
2692 }
2693 
2694 static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
2695 					  struct ena_ring *tx_ring)
2696 {
2697 	struct ena_tx_buffer *tx_buf;
2698 	unsigned long last_jiffies;
2699 	u32 missed_tx = 0;
2700 	int i, rc = 0;
2701 
2702 	for (i = 0; i < tx_ring->ring_size; i++) {
2703 		tx_buf = &tx_ring->tx_buffer_info[i];
2704 		last_jiffies = tx_buf->last_jiffies;
2705 
2706 		if (last_jiffies == 0)
2707 			/* no pending Tx at this location */
2708 			continue;
2709 
2710 		if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies +
2711 			     2 * adapter->missing_tx_completion_to))) {
2712 			/* If after graceful period interrupt is still not
2713 			 * received, we schedule a reset
2714 			 */
2715 			netif_err(adapter, tx_err, adapter->netdev,
2716 				  "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
2717 				  tx_ring->qid);
2718 			adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
2719 			smp_mb__before_atomic();
2720 			set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2721 			return -EIO;
2722 		}
2723 
2724 		if (unlikely(time_is_before_jiffies(last_jiffies +
2725 				adapter->missing_tx_completion_to))) {
2726 			if (!tx_buf->print_once)
2727 				netif_notice(adapter, tx_err, adapter->netdev,
2728 					     "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
2729 					     tx_ring->qid, i);
2730 
2731 			tx_buf->print_once = 1;
2732 			missed_tx++;
2733 		}
2734 	}
2735 
2736 	if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) {
2737 		netif_err(adapter, tx_err, adapter->netdev,
2738 			  "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
2739 			  missed_tx,
2740 			  adapter->missing_tx_completion_threshold);
2741 		adapter->reset_reason =
2742 			ENA_REGS_RESET_MISS_TX_CMPL;
2743 		set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2744 		rc = -EIO;
2745 	}
2746 
2747 	u64_stats_update_begin(&tx_ring->syncp);
2748 	tx_ring->tx_stats.missed_tx = missed_tx;
2749 	u64_stats_update_end(&tx_ring->syncp);
2750 
2751 	return rc;
2752 }
2753 
2754 static void check_for_missing_completions(struct ena_adapter *adapter)
2755 {
2756 	struct ena_ring *tx_ring;
2757 	struct ena_ring *rx_ring;
2758 	int i, budget, rc;
2759 
2760 	/* Make sure the driver doesn't turn the device in other process */
2761 	smp_rmb();
2762 
2763 	if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2764 		return;
2765 
2766 	if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
2767 		return;
2768 
2769 	if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
2770 		return;
2771 
2772 	budget = ENA_MONITORED_TX_QUEUES;
2773 
2774 	for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
2775 		tx_ring = &adapter->tx_ring[i];
2776 		rx_ring = &adapter->rx_ring[i];
2777 
2778 		rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
2779 		if (unlikely(rc))
2780 			return;
2781 
2782 		rc = check_for_rx_interrupt_queue(adapter, rx_ring);
2783 		if (unlikely(rc))
2784 			return;
2785 
2786 		budget--;
2787 		if (!budget)
2788 			break;
2789 	}
2790 
2791 	adapter->last_monitored_tx_qid = i % adapter->num_queues;
2792 }
2793 
2794 /* trigger napi schedule after 2 consecutive detections */
2795 #define EMPTY_RX_REFILL 2
2796 /* For the rare case where the device runs out of Rx descriptors and the
2797  * napi handler failed to refill new Rx descriptors (due to a lack of memory
2798  * for example).
2799  * This case will lead to a deadlock:
2800  * The device won't send interrupts since all the new Rx packets will be dropped
2801  * The napi handler won't allocate new Rx descriptors so the device will be
2802  * able to send new packets.
2803  *
2804  * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
2805  * It is recommended to have at least 512MB, with a minimum of 128MB for
2806  * constrained environment).
2807  *
2808  * When such a situation is detected - Reschedule napi
2809  */
2810 static void check_for_empty_rx_ring(struct ena_adapter *adapter)
2811 {
2812 	struct ena_ring *rx_ring;
2813 	int i, refill_required;
2814 
2815 	if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2816 		return;
2817 
2818 	if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
2819 		return;
2820 
2821 	for (i = 0; i < adapter->num_queues; i++) {
2822 		rx_ring = &adapter->rx_ring[i];
2823 
2824 		refill_required =
2825 			ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
2826 		if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
2827 			rx_ring->empty_rx_queue++;
2828 
2829 			if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
2830 				u64_stats_update_begin(&rx_ring->syncp);
2831 				rx_ring->rx_stats.empty_rx_ring++;
2832 				u64_stats_update_end(&rx_ring->syncp);
2833 
2834 				netif_err(adapter, drv, adapter->netdev,
2835 					  "trigger refill for ring %d\n", i);
2836 
2837 				napi_schedule(rx_ring->napi);
2838 				rx_ring->empty_rx_queue = 0;
2839 			}
2840 		} else {
2841 			rx_ring->empty_rx_queue = 0;
2842 		}
2843 	}
2844 }
2845 
2846 /* Check for keep alive expiration */
2847 static void check_for_missing_keep_alive(struct ena_adapter *adapter)
2848 {
2849 	unsigned long keep_alive_expired;
2850 
2851 	if (!adapter->wd_state)
2852 		return;
2853 
2854 	if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2855 		return;
2856 
2857 	keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies +
2858 					   adapter->keep_alive_timeout);
2859 	if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
2860 		netif_err(adapter, drv, adapter->netdev,
2861 			  "Keep alive watchdog timeout.\n");
2862 		u64_stats_update_begin(&adapter->syncp);
2863 		adapter->dev_stats.wd_expired++;
2864 		u64_stats_update_end(&adapter->syncp);
2865 		adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
2866 		set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2867 	}
2868 }
2869 
2870 static void check_for_admin_com_state(struct ena_adapter *adapter)
2871 {
2872 	if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) {
2873 		netif_err(adapter, drv, adapter->netdev,
2874 			  "ENA admin queue is not in running state!\n");
2875 		u64_stats_update_begin(&adapter->syncp);
2876 		adapter->dev_stats.admin_q_pause++;
2877 		u64_stats_update_end(&adapter->syncp);
2878 		adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
2879 		set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2880 	}
2881 }
2882 
2883 static void ena_update_hints(struct ena_adapter *adapter,
2884 			     struct ena_admin_ena_hw_hints *hints)
2885 {
2886 	struct net_device *netdev = adapter->netdev;
2887 
2888 	if (hints->admin_completion_tx_timeout)
2889 		adapter->ena_dev->admin_queue.completion_timeout =
2890 			hints->admin_completion_tx_timeout * 1000;
2891 
2892 	if (hints->mmio_read_timeout)
2893 		/* convert to usec */
2894 		adapter->ena_dev->mmio_read.reg_read_to =
2895 			hints->mmio_read_timeout * 1000;
2896 
2897 	if (hints->missed_tx_completion_count_threshold_to_reset)
2898 		adapter->missing_tx_completion_threshold =
2899 			hints->missed_tx_completion_count_threshold_to_reset;
2900 
2901 	if (hints->missing_tx_completion_timeout) {
2902 		if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2903 			adapter->missing_tx_completion_to = ENA_HW_HINTS_NO_TIMEOUT;
2904 		else
2905 			adapter->missing_tx_completion_to =
2906 				msecs_to_jiffies(hints->missing_tx_completion_timeout);
2907 	}
2908 
2909 	if (hints->netdev_wd_timeout)
2910 		netdev->watchdog_timeo = msecs_to_jiffies(hints->netdev_wd_timeout);
2911 
2912 	if (hints->driver_watchdog_timeout) {
2913 		if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2914 			adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
2915 		else
2916 			adapter->keep_alive_timeout =
2917 				msecs_to_jiffies(hints->driver_watchdog_timeout);
2918 	}
2919 }
2920 
2921 static void ena_update_host_info(struct ena_admin_host_info *host_info,
2922 				 struct net_device *netdev)
2923 {
2924 	host_info->supported_network_features[0] =
2925 		netdev->features & GENMASK_ULL(31, 0);
2926 	host_info->supported_network_features[1] =
2927 		(netdev->features & GENMASK_ULL(63, 32)) >> 32;
2928 }
2929 
2930 static void ena_timer_service(struct timer_list *t)
2931 {
2932 	struct ena_adapter *adapter = from_timer(adapter, t, timer_service);
2933 	u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
2934 	struct ena_admin_host_info *host_info =
2935 		adapter->ena_dev->host_attr.host_info;
2936 
2937 	check_for_missing_keep_alive(adapter);
2938 
2939 	check_for_admin_com_state(adapter);
2940 
2941 	check_for_missing_completions(adapter);
2942 
2943 	check_for_empty_rx_ring(adapter);
2944 
2945 	if (debug_area)
2946 		ena_dump_stats_to_buf(adapter, debug_area);
2947 
2948 	if (host_info)
2949 		ena_update_host_info(host_info, adapter->netdev);
2950 
2951 	if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
2952 		netif_err(adapter, drv, adapter->netdev,
2953 			  "Trigger reset is on\n");
2954 		ena_dump_stats_to_dmesg(adapter);
2955 		queue_work(ena_wq, &adapter->reset_task);
2956 		return;
2957 	}
2958 
2959 	/* Reset the timer */
2960 	mod_timer(&adapter->timer_service, jiffies + HZ);
2961 }
2962 
2963 static int ena_calc_io_queue_num(struct pci_dev *pdev,
2964 				 struct ena_com_dev *ena_dev,
2965 				 struct ena_com_dev_get_features_ctx *get_feat_ctx)
2966 {
2967 	int io_sq_num, io_queue_num;
2968 
2969 	/* In case of LLQ use the llq number in the get feature cmd */
2970 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2971 		io_sq_num = get_feat_ctx->max_queues.max_llq_num;
2972 
2973 		if (io_sq_num == 0) {
2974 			dev_err(&pdev->dev,
2975 				"Trying to use LLQ but llq_num is 0. Fall back into regular queues\n");
2976 
2977 			ena_dev->tx_mem_queue_type =
2978 				ENA_ADMIN_PLACEMENT_POLICY_HOST;
2979 			io_sq_num = get_feat_ctx->max_queues.max_sq_num;
2980 		}
2981 	} else {
2982 		io_sq_num = get_feat_ctx->max_queues.max_sq_num;
2983 	}
2984 
2985 	io_queue_num = min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
2986 	io_queue_num = min_t(int, io_queue_num, io_sq_num);
2987 	io_queue_num = min_t(int, io_queue_num,
2988 			     get_feat_ctx->max_queues.max_cq_num);
2989 	/* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
2990 	io_queue_num = min_t(int, io_queue_num, pci_msix_vec_count(pdev) - 1);
2991 	if (unlikely(!io_queue_num)) {
2992 		dev_err(&pdev->dev, "The device doesn't have io queues\n");
2993 		return -EFAULT;
2994 	}
2995 
2996 	return io_queue_num;
2997 }
2998 
2999 static void ena_set_push_mode(struct pci_dev *pdev, struct ena_com_dev *ena_dev,
3000 			      struct ena_com_dev_get_features_ctx *get_feat_ctx)
3001 {
3002 	bool has_mem_bar;
3003 
3004 	has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR);
3005 
3006 	/* Enable push mode if device supports LLQ */
3007 	if (has_mem_bar && (get_feat_ctx->max_queues.max_llq_num > 0))
3008 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
3009 	else
3010 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3011 }
3012 
3013 static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
3014 				 struct net_device *netdev)
3015 {
3016 	netdev_features_t dev_features = 0;
3017 
3018 	/* Set offload features */
3019 	if (feat->offload.tx &
3020 		ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
3021 		dev_features |= NETIF_F_IP_CSUM;
3022 
3023 	if (feat->offload.tx &
3024 		ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
3025 		dev_features |= NETIF_F_IPV6_CSUM;
3026 
3027 	if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
3028 		dev_features |= NETIF_F_TSO;
3029 
3030 	if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK)
3031 		dev_features |= NETIF_F_TSO6;
3032 
3033 	if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK)
3034 		dev_features |= NETIF_F_TSO_ECN;
3035 
3036 	if (feat->offload.rx_supported &
3037 		ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
3038 		dev_features |= NETIF_F_RXCSUM;
3039 
3040 	if (feat->offload.rx_supported &
3041 		ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
3042 		dev_features |= NETIF_F_RXCSUM;
3043 
3044 	netdev->features =
3045 		dev_features |
3046 		NETIF_F_SG |
3047 		NETIF_F_RXHASH |
3048 		NETIF_F_HIGHDMA;
3049 
3050 	netdev->hw_features |= netdev->features;
3051 	netdev->vlan_features |= netdev->features;
3052 }
3053 
3054 static void ena_set_conf_feat_params(struct ena_adapter *adapter,
3055 				     struct ena_com_dev_get_features_ctx *feat)
3056 {
3057 	struct net_device *netdev = adapter->netdev;
3058 
3059 	/* Copy mac address */
3060 	if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) {
3061 		eth_hw_addr_random(netdev);
3062 		ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
3063 	} else {
3064 		ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr);
3065 		ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3066 	}
3067 
3068 	/* Set offload features */
3069 	ena_set_dev_offloads(feat, netdev);
3070 
3071 	adapter->max_mtu = feat->dev_attr.max_mtu;
3072 	netdev->max_mtu = adapter->max_mtu;
3073 	netdev->min_mtu = ENA_MIN_MTU;
3074 }
3075 
3076 static int ena_rss_init_default(struct ena_adapter *adapter)
3077 {
3078 	struct ena_com_dev *ena_dev = adapter->ena_dev;
3079 	struct device *dev = &adapter->pdev->dev;
3080 	int rc, i;
3081 	u32 val;
3082 
3083 	rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
3084 	if (unlikely(rc)) {
3085 		dev_err(dev, "Cannot init indirect table\n");
3086 		goto err_rss_init;
3087 	}
3088 
3089 	for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
3090 		val = ethtool_rxfh_indir_default(i, adapter->num_queues);
3091 		rc = ena_com_indirect_table_fill_entry(ena_dev, i,
3092 						       ENA_IO_RXQ_IDX(val));
3093 		if (unlikely(rc && (rc != -EOPNOTSUPP))) {
3094 			dev_err(dev, "Cannot fill indirect table\n");
3095 			goto err_fill_indir;
3096 		}
3097 	}
3098 
3099 	rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
3100 					ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
3101 	if (unlikely(rc && (rc != -EOPNOTSUPP))) {
3102 		dev_err(dev, "Cannot fill hash function\n");
3103 		goto err_fill_indir;
3104 	}
3105 
3106 	rc = ena_com_set_default_hash_ctrl(ena_dev);
3107 	if (unlikely(rc && (rc != -EOPNOTSUPP))) {
3108 		dev_err(dev, "Cannot fill hash control\n");
3109 		goto err_fill_indir;
3110 	}
3111 
3112 	return 0;
3113 
3114 err_fill_indir:
3115 	ena_com_rss_destroy(ena_dev);
3116 err_rss_init:
3117 
3118 	return rc;
3119 }
3120 
3121 static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
3122 {
3123 	int release_bars;
3124 
3125 	if (ena_dev->mem_bar)
3126 		devm_iounmap(&pdev->dev, ena_dev->mem_bar);
3127 
3128 	if (ena_dev->reg_bar)
3129 		devm_iounmap(&pdev->dev, ena_dev->reg_bar);
3130 
3131 	release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
3132 	pci_release_selected_regions(pdev, release_bars);
3133 }
3134 
3135 static int ena_calc_queue_size(struct pci_dev *pdev,
3136 			       struct ena_com_dev *ena_dev,
3137 			       u16 *max_tx_sgl_size,
3138 			       u16 *max_rx_sgl_size,
3139 			       struct ena_com_dev_get_features_ctx *get_feat_ctx)
3140 {
3141 	u32 queue_size = ENA_DEFAULT_RING_SIZE;
3142 
3143 	queue_size = min_t(u32, queue_size,
3144 			   get_feat_ctx->max_queues.max_cq_depth);
3145 	queue_size = min_t(u32, queue_size,
3146 			   get_feat_ctx->max_queues.max_sq_depth);
3147 
3148 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
3149 		queue_size = min_t(u32, queue_size,
3150 				   get_feat_ctx->max_queues.max_llq_depth);
3151 
3152 	queue_size = rounddown_pow_of_two(queue_size);
3153 
3154 	if (unlikely(!queue_size)) {
3155 		dev_err(&pdev->dev, "Invalid queue size\n");
3156 		return -EFAULT;
3157 	}
3158 
3159 	*max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
3160 				 get_feat_ctx->max_queues.max_packet_tx_descs);
3161 	*max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
3162 				 get_feat_ctx->max_queues.max_packet_rx_descs);
3163 
3164 	return queue_size;
3165 }
3166 
3167 /* ena_probe - Device Initialization Routine
3168  * @pdev: PCI device information struct
3169  * @ent: entry in ena_pci_tbl
3170  *
3171  * Returns 0 on success, negative on failure
3172  *
3173  * ena_probe initializes an adapter identified by a pci_dev structure.
3174  * The OS initialization, configuring of the adapter private structure,
3175  * and a hardware reset occur.
3176  */
3177 static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3178 {
3179 	struct ena_com_dev_get_features_ctx get_feat_ctx;
3180 	static int version_printed;
3181 	struct net_device *netdev;
3182 	struct ena_adapter *adapter;
3183 	struct ena_com_dev *ena_dev = NULL;
3184 	static int adapters_found;
3185 	int io_queue_num, bars, rc;
3186 	int queue_size;
3187 	u16 tx_sgl_size = 0;
3188 	u16 rx_sgl_size = 0;
3189 	bool wd_state;
3190 
3191 	dev_dbg(&pdev->dev, "%s\n", __func__);
3192 
3193 	if (version_printed++ == 0)
3194 		dev_info(&pdev->dev, "%s", version);
3195 
3196 	rc = pci_enable_device_mem(pdev);
3197 	if (rc) {
3198 		dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
3199 		return rc;
3200 	}
3201 
3202 	pci_set_master(pdev);
3203 
3204 	ena_dev = vzalloc(sizeof(*ena_dev));
3205 	if (!ena_dev) {
3206 		rc = -ENOMEM;
3207 		goto err_disable_device;
3208 	}
3209 
3210 	bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
3211 	rc = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
3212 	if (rc) {
3213 		dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n",
3214 			rc);
3215 		goto err_free_ena_dev;
3216 	}
3217 
3218 	ena_dev->reg_bar = devm_ioremap(&pdev->dev,
3219 					pci_resource_start(pdev, ENA_REG_BAR),
3220 					pci_resource_len(pdev, ENA_REG_BAR));
3221 	if (!ena_dev->reg_bar) {
3222 		dev_err(&pdev->dev, "failed to remap regs bar\n");
3223 		rc = -EFAULT;
3224 		goto err_free_region;
3225 	}
3226 
3227 	ena_dev->dmadev = &pdev->dev;
3228 
3229 	rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state);
3230 	if (rc) {
3231 		dev_err(&pdev->dev, "ena device init failed\n");
3232 		if (rc == -ETIME)
3233 			rc = -EPROBE_DEFER;
3234 		goto err_free_region;
3235 	}
3236 
3237 	ena_set_push_mode(pdev, ena_dev, &get_feat_ctx);
3238 
3239 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
3240 		ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
3241 						   pci_resource_start(pdev, ENA_MEM_BAR),
3242 						   pci_resource_len(pdev, ENA_MEM_BAR));
3243 		if (!ena_dev->mem_bar) {
3244 			rc = -EFAULT;
3245 			goto err_device_destroy;
3246 		}
3247 	}
3248 
3249 	/* initial Tx interrupt delay, Assumes 1 usec granularity.
3250 	* Updated during device initialization with the real granularity
3251 	*/
3252 	ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
3253 	io_queue_num = ena_calc_io_queue_num(pdev, ena_dev, &get_feat_ctx);
3254 	queue_size = ena_calc_queue_size(pdev, ena_dev, &tx_sgl_size,
3255 					 &rx_sgl_size, &get_feat_ctx);
3256 	if ((queue_size <= 0) || (io_queue_num <= 0)) {
3257 		rc = -EFAULT;
3258 		goto err_device_destroy;
3259 	}
3260 
3261 	dev_info(&pdev->dev, "creating %d io queues. queue size: %d\n",
3262 		 io_queue_num, queue_size);
3263 
3264 	/* dev zeroed in init_etherdev */
3265 	netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num);
3266 	if (!netdev) {
3267 		dev_err(&pdev->dev, "alloc_etherdev_mq failed\n");
3268 		rc = -ENOMEM;
3269 		goto err_device_destroy;
3270 	}
3271 
3272 	SET_NETDEV_DEV(netdev, &pdev->dev);
3273 
3274 	adapter = netdev_priv(netdev);
3275 	pci_set_drvdata(pdev, adapter);
3276 
3277 	adapter->ena_dev = ena_dev;
3278 	adapter->netdev = netdev;
3279 	adapter->pdev = pdev;
3280 
3281 	ena_set_conf_feat_params(adapter, &get_feat_ctx);
3282 
3283 	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3284 	adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3285 
3286 	adapter->tx_ring_size = queue_size;
3287 	adapter->rx_ring_size = queue_size;
3288 
3289 	adapter->max_tx_sgl_size = tx_sgl_size;
3290 	adapter->max_rx_sgl_size = rx_sgl_size;
3291 
3292 	adapter->num_queues = io_queue_num;
3293 	adapter->last_monitored_tx_qid = 0;
3294 
3295 	adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
3296 	adapter->wd_state = wd_state;
3297 
3298 	snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found);
3299 
3300 	rc = ena_com_init_interrupt_moderation(adapter->ena_dev);
3301 	if (rc) {
3302 		dev_err(&pdev->dev,
3303 			"Failed to query interrupt moderation feature\n");
3304 		goto err_netdev_destroy;
3305 	}
3306 	ena_init_io_rings(adapter);
3307 
3308 	netdev->netdev_ops = &ena_netdev_ops;
3309 	netdev->watchdog_timeo = TX_TIMEOUT;
3310 	ena_set_ethtool_ops(netdev);
3311 
3312 	netdev->priv_flags |= IFF_UNICAST_FLT;
3313 
3314 	u64_stats_init(&adapter->syncp);
3315 
3316 	rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num);
3317 	if (rc) {
3318 		dev_err(&pdev->dev,
3319 			"Failed to enable and set the admin interrupts\n");
3320 		goto err_worker_destroy;
3321 	}
3322 	rc = ena_rss_init_default(adapter);
3323 	if (rc && (rc != -EOPNOTSUPP)) {
3324 		dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc);
3325 		goto err_free_msix;
3326 	}
3327 
3328 	ena_config_debug_area(adapter);
3329 
3330 	memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
3331 
3332 	netif_carrier_off(netdev);
3333 
3334 	rc = register_netdev(netdev);
3335 	if (rc) {
3336 		dev_err(&pdev->dev, "Cannot register net device\n");
3337 		goto err_rss;
3338 	}
3339 
3340 	INIT_WORK(&adapter->reset_task, ena_fw_reset_device);
3341 
3342 	adapter->last_keep_alive_jiffies = jiffies;
3343 	adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
3344 	adapter->missing_tx_completion_to = TX_TIMEOUT;
3345 	adapter->missing_tx_completion_threshold = MAX_NUM_OF_TIMEOUTED_PACKETS;
3346 
3347 	ena_update_hints(adapter, &get_feat_ctx.hw_hints);
3348 
3349 	timer_setup(&adapter->timer_service, ena_timer_service, 0);
3350 	mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
3351 
3352 	dev_info(&pdev->dev, "%s found at mem %lx, mac addr %pM Queues %d\n",
3353 		 DEVICE_NAME, (long)pci_resource_start(pdev, 0),
3354 		 netdev->dev_addr, io_queue_num);
3355 
3356 	set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3357 
3358 	adapters_found++;
3359 
3360 	return 0;
3361 
3362 err_rss:
3363 	ena_com_delete_debug_area(ena_dev);
3364 	ena_com_rss_destroy(ena_dev);
3365 err_free_msix:
3366 	ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR);
3367 	ena_free_mgmnt_irq(adapter);
3368 	ena_disable_msix(adapter);
3369 err_worker_destroy:
3370 	ena_com_destroy_interrupt_moderation(ena_dev);
3371 	del_timer(&adapter->timer_service);
3372 err_netdev_destroy:
3373 	free_netdev(netdev);
3374 err_device_destroy:
3375 	ena_com_delete_host_info(ena_dev);
3376 	ena_com_admin_destroy(ena_dev);
3377 err_free_region:
3378 	ena_release_bars(ena_dev, pdev);
3379 err_free_ena_dev:
3380 	vfree(ena_dev);
3381 err_disable_device:
3382 	pci_disable_device(pdev);
3383 	return rc;
3384 }
3385 
3386 /*****************************************************************************/
3387 
3388 /* ena_remove - Device Removal Routine
3389  * @pdev: PCI device information struct
3390  *
3391  * ena_remove is called by the PCI subsystem to alert the driver
3392  * that it should release a PCI device.
3393  */
3394 static void ena_remove(struct pci_dev *pdev)
3395 {
3396 	struct ena_adapter *adapter = pci_get_drvdata(pdev);
3397 	struct ena_com_dev *ena_dev;
3398 	struct net_device *netdev;
3399 
3400 	ena_dev = adapter->ena_dev;
3401 	netdev = adapter->netdev;
3402 
3403 #ifdef CONFIG_RFS_ACCEL
3404 	if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) {
3405 		free_irq_cpu_rmap(netdev->rx_cpu_rmap);
3406 		netdev->rx_cpu_rmap = NULL;
3407 	}
3408 #endif /* CONFIG_RFS_ACCEL */
3409 	del_timer_sync(&adapter->timer_service);
3410 
3411 	cancel_work_sync(&adapter->reset_task);
3412 
3413 	unregister_netdev(netdev);
3414 
3415 	/* If the device is running then we want to make sure the device will be
3416 	 * reset to make sure no more events will be issued by the device.
3417 	 */
3418 	if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
3419 		set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3420 
3421 	rtnl_lock();
3422 	ena_destroy_device(adapter, true);
3423 	rtnl_unlock();
3424 
3425 	free_netdev(netdev);
3426 
3427 	ena_com_rss_destroy(ena_dev);
3428 
3429 	ena_com_delete_debug_area(ena_dev);
3430 
3431 	ena_com_delete_host_info(ena_dev);
3432 
3433 	ena_release_bars(ena_dev, pdev);
3434 
3435 	pci_disable_device(pdev);
3436 
3437 	ena_com_destroy_interrupt_moderation(ena_dev);
3438 
3439 	vfree(ena_dev);
3440 }
3441 
3442 #ifdef CONFIG_PM
3443 /* ena_suspend - PM suspend callback
3444  * @pdev: PCI device information struct
3445  * @state:power state
3446  */
3447 static int ena_suspend(struct pci_dev *pdev,  pm_message_t state)
3448 {
3449 	struct ena_adapter *adapter = pci_get_drvdata(pdev);
3450 
3451 	u64_stats_update_begin(&adapter->syncp);
3452 	adapter->dev_stats.suspend++;
3453 	u64_stats_update_end(&adapter->syncp);
3454 
3455 	rtnl_lock();
3456 	if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
3457 		dev_err(&pdev->dev,
3458 			"ignoring device reset request as the device is being suspended\n");
3459 		clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3460 	}
3461 	ena_destroy_device(adapter, true);
3462 	rtnl_unlock();
3463 	return 0;
3464 }
3465 
3466 /* ena_resume - PM resume callback
3467  * @pdev: PCI device information struct
3468  *
3469  */
3470 static int ena_resume(struct pci_dev *pdev)
3471 {
3472 	struct ena_adapter *adapter = pci_get_drvdata(pdev);
3473 	int rc;
3474 
3475 	u64_stats_update_begin(&adapter->syncp);
3476 	adapter->dev_stats.resume++;
3477 	u64_stats_update_end(&adapter->syncp);
3478 
3479 	rtnl_lock();
3480 	rc = ena_restore_device(adapter);
3481 	rtnl_unlock();
3482 	return rc;
3483 }
3484 #endif
3485 
3486 static struct pci_driver ena_pci_driver = {
3487 	.name		= DRV_MODULE_NAME,
3488 	.id_table	= ena_pci_tbl,
3489 	.probe		= ena_probe,
3490 	.remove		= ena_remove,
3491 #ifdef CONFIG_PM
3492 	.suspend    = ena_suspend,
3493 	.resume     = ena_resume,
3494 #endif
3495 	.sriov_configure = pci_sriov_configure_simple,
3496 };
3497 
3498 static int __init ena_init(void)
3499 {
3500 	pr_info("%s", version);
3501 
3502 	ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME);
3503 	if (!ena_wq) {
3504 		pr_err("Failed to create workqueue\n");
3505 		return -ENOMEM;
3506 	}
3507 
3508 	return pci_register_driver(&ena_pci_driver);
3509 }
3510 
3511 static void __exit ena_cleanup(void)
3512 {
3513 	pci_unregister_driver(&ena_pci_driver);
3514 
3515 	if (ena_wq) {
3516 		destroy_workqueue(ena_wq);
3517 		ena_wq = NULL;
3518 	}
3519 }
3520 
3521 /******************************************************************************
3522  ******************************** AENQ Handlers *******************************
3523  *****************************************************************************/
3524 /* ena_update_on_link_change:
3525  * Notify the network interface about the change in link status
3526  */
3527 static void ena_update_on_link_change(void *adapter_data,
3528 				      struct ena_admin_aenq_entry *aenq_e)
3529 {
3530 	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3531 	struct ena_admin_aenq_link_change_desc *aenq_desc =
3532 		(struct ena_admin_aenq_link_change_desc *)aenq_e;
3533 	int status = aenq_desc->flags &
3534 		ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
3535 
3536 	if (status) {
3537 		netdev_dbg(adapter->netdev, "%s\n", __func__);
3538 		set_bit(ENA_FLAG_LINK_UP, &adapter->flags);
3539 		if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags))
3540 			netif_carrier_on(adapter->netdev);
3541 	} else {
3542 		clear_bit(ENA_FLAG_LINK_UP, &adapter->flags);
3543 		netif_carrier_off(adapter->netdev);
3544 	}
3545 }
3546 
3547 static void ena_keep_alive_wd(void *adapter_data,
3548 			      struct ena_admin_aenq_entry *aenq_e)
3549 {
3550 	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3551 	struct ena_admin_aenq_keep_alive_desc *desc;
3552 	u64 rx_drops;
3553 
3554 	desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
3555 	adapter->last_keep_alive_jiffies = jiffies;
3556 
3557 	rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low;
3558 
3559 	u64_stats_update_begin(&adapter->syncp);
3560 	adapter->dev_stats.rx_drops = rx_drops;
3561 	u64_stats_update_end(&adapter->syncp);
3562 }
3563 
3564 static void ena_notification(void *adapter_data,
3565 			     struct ena_admin_aenq_entry *aenq_e)
3566 {
3567 	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3568 	struct ena_admin_ena_hw_hints *hints;
3569 
3570 	WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
3571 	     "Invalid group(%x) expected %x\n",
3572 	     aenq_e->aenq_common_desc.group,
3573 	     ENA_ADMIN_NOTIFICATION);
3574 
3575 	switch (aenq_e->aenq_common_desc.syndrom) {
3576 	case ENA_ADMIN_UPDATE_HINTS:
3577 		hints = (struct ena_admin_ena_hw_hints *)
3578 			(&aenq_e->inline_data_w4);
3579 		ena_update_hints(adapter, hints);
3580 		break;
3581 	default:
3582 		netif_err(adapter, drv, adapter->netdev,
3583 			  "Invalid aenq notification link state %d\n",
3584 			  aenq_e->aenq_common_desc.syndrom);
3585 	}
3586 }
3587 
3588 /* This handler will called for unknown event group or unimplemented handlers*/
3589 static void unimplemented_aenq_handler(void *data,
3590 				       struct ena_admin_aenq_entry *aenq_e)
3591 {
3592 	struct ena_adapter *adapter = (struct ena_adapter *)data;
3593 
3594 	netif_err(adapter, drv, adapter->netdev,
3595 		  "Unknown event was received or event with unimplemented handler\n");
3596 }
3597 
3598 static struct ena_aenq_handlers aenq_handlers = {
3599 	.handlers = {
3600 		[ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
3601 		[ENA_ADMIN_NOTIFICATION] = ena_notification,
3602 		[ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
3603 	},
3604 	.unimplemented_handler = unimplemented_aenq_handler
3605 };
3606 
3607 module_init(ena_init);
3608 module_exit(ena_cleanup);
3609