xref: /linux/drivers/net/ethernet/pensando/ionic/ionic_lif.c (revision 93a3545d812ae7cfe4426374e00a7d8f64ac02e0)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3 
4 #include <linux/printk.h>
5 #include <linux/dynamic_debug.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/rtnetlink.h>
9 #include <linux/interrupt.h>
10 #include <linux/pci.h>
11 #include <linux/cpumask.h>
12 
13 #include "ionic.h"
14 #include "ionic_bus.h"
15 #include "ionic_lif.h"
16 #include "ionic_txrx.h"
17 #include "ionic_ethtool.h"
18 #include "ionic_debugfs.h"
19 
20 /* queuetype support level */
21 static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = {
22 	[IONIC_QTYPE_ADMINQ]  = 0,   /* 0 = Base version with CQ support */
23 	[IONIC_QTYPE_NOTIFYQ] = 0,   /* 0 = Base version */
24 	[IONIC_QTYPE_RXQ]     = 0,   /* 0 = Base version with CQ+SG support */
25 	[IONIC_QTYPE_TXQ]     = 1,   /* 0 = Base version with CQ+SG support
26 				      * 1 =   ... with Tx SG version 1
27 				      */
28 };
29 
30 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode);
31 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
32 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
33 static void ionic_link_status_check(struct ionic_lif *lif);
34 static void ionic_lif_handle_fw_down(struct ionic_lif *lif);
35 static void ionic_lif_handle_fw_up(struct ionic_lif *lif);
36 static void ionic_lif_set_netdev_info(struct ionic_lif *lif);
37 
38 static int ionic_start_queues(struct ionic_lif *lif);
39 static void ionic_stop_queues(struct ionic_lif *lif);
40 static void ionic_lif_queue_identify(struct ionic_lif *lif);
41 
42 static void ionic_lif_deferred_work(struct work_struct *work)
43 {
44 	struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work);
45 	struct ionic_deferred *def = &lif->deferred;
46 	struct ionic_deferred_work *w = NULL;
47 
48 	spin_lock_bh(&def->lock);
49 	if (!list_empty(&def->list)) {
50 		w = list_first_entry(&def->list,
51 				     struct ionic_deferred_work, list);
52 		list_del(&w->list);
53 	}
54 	spin_unlock_bh(&def->lock);
55 
56 	if (w) {
57 		switch (w->type) {
58 		case IONIC_DW_TYPE_RX_MODE:
59 			ionic_lif_rx_mode(lif, w->rx_mode);
60 			break;
61 		case IONIC_DW_TYPE_RX_ADDR_ADD:
62 			ionic_lif_addr_add(lif, w->addr);
63 			break;
64 		case IONIC_DW_TYPE_RX_ADDR_DEL:
65 			ionic_lif_addr_del(lif, w->addr);
66 			break;
67 		case IONIC_DW_TYPE_LINK_STATUS:
68 			ionic_link_status_check(lif);
69 			break;
70 		case IONIC_DW_TYPE_LIF_RESET:
71 			if (w->fw_status)
72 				ionic_lif_handle_fw_up(lif);
73 			else
74 				ionic_lif_handle_fw_down(lif);
75 			break;
76 		default:
77 			break;
78 		}
79 		kfree(w);
80 		schedule_work(&def->work);
81 	}
82 }
83 
84 void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
85 				struct ionic_deferred_work *work)
86 {
87 	spin_lock_bh(&def->lock);
88 	list_add_tail(&work->list, &def->list);
89 	spin_unlock_bh(&def->lock);
90 	schedule_work(&def->work);
91 }
92 
93 static void ionic_link_status_check(struct ionic_lif *lif)
94 {
95 	struct net_device *netdev = lif->netdev;
96 	u16 link_status;
97 	bool link_up;
98 
99 	if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state) ||
100 	    test_bit(IONIC_LIF_F_QUEUE_RESET, lif->state))
101 		return;
102 
103 	link_status = le16_to_cpu(lif->info->status.link_status);
104 	link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
105 
106 	if (link_up) {
107 		if (!netif_carrier_ok(netdev)) {
108 			u32 link_speed;
109 
110 			ionic_port_identify(lif->ionic);
111 			link_speed = le32_to_cpu(lif->info->status.link_speed);
112 			netdev_info(netdev, "Link up - %d Gbps\n",
113 				    link_speed / 1000);
114 			netif_carrier_on(netdev);
115 		}
116 
117 		if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev))
118 			ionic_start_queues(lif);
119 	} else {
120 		if (netif_carrier_ok(netdev)) {
121 			netdev_info(netdev, "Link down\n");
122 			netif_carrier_off(netdev);
123 		}
124 
125 		if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev))
126 			ionic_stop_queues(lif);
127 	}
128 
129 	clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
130 }
131 
132 void ionic_link_status_check_request(struct ionic_lif *lif)
133 {
134 	struct ionic_deferred_work *work;
135 
136 	/* we only need one request outstanding at a time */
137 	if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
138 		return;
139 
140 	if (in_interrupt()) {
141 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
142 		if (!work)
143 			return;
144 
145 		work->type = IONIC_DW_TYPE_LINK_STATUS;
146 		ionic_lif_deferred_enqueue(&lif->deferred, work);
147 	} else {
148 		ionic_link_status_check(lif);
149 	}
150 }
151 
152 static irqreturn_t ionic_isr(int irq, void *data)
153 {
154 	struct napi_struct *napi = data;
155 
156 	napi_schedule_irqoff(napi);
157 
158 	return IRQ_HANDLED;
159 }
160 
161 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
162 {
163 	struct ionic_intr_info *intr = &qcq->intr;
164 	struct device *dev = lif->ionic->dev;
165 	struct ionic_queue *q = &qcq->q;
166 	const char *name;
167 
168 	if (lif->registered)
169 		name = lif->netdev->name;
170 	else
171 		name = dev_name(dev);
172 
173 	snprintf(intr->name, sizeof(intr->name),
174 		 "%s-%s-%s", IONIC_DRV_NAME, name, q->name);
175 
176 	return devm_request_irq(dev, intr->vector, ionic_isr,
177 				0, intr->name, &qcq->napi);
178 }
179 
180 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
181 {
182 	struct ionic *ionic = lif->ionic;
183 	int index;
184 
185 	index = find_first_zero_bit(ionic->intrs, ionic->nintrs);
186 	if (index == ionic->nintrs) {
187 		netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n",
188 			    __func__, index, ionic->nintrs);
189 		return -ENOSPC;
190 	}
191 
192 	set_bit(index, ionic->intrs);
193 	ionic_intr_init(&ionic->idev, intr, index);
194 
195 	return 0;
196 }
197 
198 static void ionic_intr_free(struct ionic *ionic, int index)
199 {
200 	if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs)
201 		clear_bit(index, ionic->intrs);
202 }
203 
204 static int ionic_qcq_enable(struct ionic_qcq *qcq)
205 {
206 	struct ionic_queue *q = &qcq->q;
207 	struct ionic_lif *lif = q->lif;
208 	struct ionic_dev *idev;
209 	struct device *dev;
210 
211 	struct ionic_admin_ctx ctx = {
212 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
213 		.cmd.q_control = {
214 			.opcode = IONIC_CMD_Q_CONTROL,
215 			.lif_index = cpu_to_le16(lif->index),
216 			.type = q->type,
217 			.index = cpu_to_le32(q->index),
218 			.oper = IONIC_Q_ENABLE,
219 		},
220 	};
221 
222 	idev = &lif->ionic->idev;
223 	dev = lif->ionic->dev;
224 
225 	dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
226 		ctx.cmd.q_control.index, ctx.cmd.q_control.type);
227 
228 	if (qcq->flags & IONIC_QCQ_F_INTR) {
229 		irq_set_affinity_hint(qcq->intr.vector,
230 				      &qcq->intr.affinity_mask);
231 		napi_enable(&qcq->napi);
232 		ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
233 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
234 				IONIC_INTR_MASK_CLEAR);
235 	}
236 
237 	return ionic_adminq_post_wait(lif, &ctx);
238 }
239 
240 static int ionic_qcq_disable(struct ionic_qcq *qcq)
241 {
242 	struct ionic_queue *q = &qcq->q;
243 	struct ionic_lif *lif = q->lif;
244 	struct ionic_dev *idev;
245 	struct device *dev;
246 
247 	struct ionic_admin_ctx ctx = {
248 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
249 		.cmd.q_control = {
250 			.opcode = IONIC_CMD_Q_CONTROL,
251 			.lif_index = cpu_to_le16(lif->index),
252 			.type = q->type,
253 			.index = cpu_to_le32(q->index),
254 			.oper = IONIC_Q_DISABLE,
255 		},
256 	};
257 
258 	idev = &lif->ionic->idev;
259 	dev = lif->ionic->dev;
260 
261 	dev_dbg(dev, "q_disable.index %d q_disable.qtype %d\n",
262 		ctx.cmd.q_control.index, ctx.cmd.q_control.type);
263 
264 	if (qcq->flags & IONIC_QCQ_F_INTR) {
265 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
266 				IONIC_INTR_MASK_SET);
267 		synchronize_irq(qcq->intr.vector);
268 		irq_set_affinity_hint(qcq->intr.vector, NULL);
269 		napi_disable(&qcq->napi);
270 	}
271 
272 	return ionic_adminq_post_wait(lif, &ctx);
273 }
274 
275 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
276 {
277 	struct ionic_dev *idev = &lif->ionic->idev;
278 
279 	if (!qcq)
280 		return;
281 
282 	if (!(qcq->flags & IONIC_QCQ_F_INITED))
283 		return;
284 
285 	if (qcq->flags & IONIC_QCQ_F_INTR) {
286 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
287 				IONIC_INTR_MASK_SET);
288 		netif_napi_del(&qcq->napi);
289 	}
290 
291 	qcq->flags &= ~IONIC_QCQ_F_INITED;
292 }
293 
294 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
295 {
296 	struct device *dev = lif->ionic->dev;
297 
298 	if (!qcq)
299 		return;
300 
301 	ionic_debugfs_del_qcq(qcq);
302 
303 	dma_free_coherent(dev, qcq->total_size, qcq->base, qcq->base_pa);
304 	qcq->base = NULL;
305 	qcq->base_pa = 0;
306 
307 	if (qcq->flags & IONIC_QCQ_F_INTR) {
308 		irq_set_affinity_hint(qcq->intr.vector, NULL);
309 		devm_free_irq(dev, qcq->intr.vector, &qcq->napi);
310 		qcq->intr.vector = 0;
311 		ionic_intr_free(lif->ionic, qcq->intr.index);
312 	}
313 
314 	devm_kfree(dev, qcq->cq.info);
315 	qcq->cq.info = NULL;
316 	devm_kfree(dev, qcq->q.info);
317 	qcq->q.info = NULL;
318 	devm_kfree(dev, qcq);
319 }
320 
321 static void ionic_qcqs_free(struct ionic_lif *lif)
322 {
323 	struct device *dev = lif->ionic->dev;
324 	unsigned int i;
325 
326 	if (lif->notifyqcq) {
327 		ionic_qcq_free(lif, lif->notifyqcq);
328 		lif->notifyqcq = NULL;
329 	}
330 
331 	if (lif->adminqcq) {
332 		ionic_qcq_free(lif, lif->adminqcq);
333 		lif->adminqcq = NULL;
334 	}
335 
336 	if (lif->rxqcqs) {
337 		for (i = 0; i < lif->nxqs; i++)
338 			if (lif->rxqcqs[i].stats)
339 				devm_kfree(dev, lif->rxqcqs[i].stats);
340 		devm_kfree(dev, lif->rxqcqs);
341 		lif->rxqcqs = NULL;
342 	}
343 
344 	if (lif->txqcqs) {
345 		for (i = 0; i < lif->nxqs; i++)
346 			if (lif->txqcqs[i].stats)
347 				devm_kfree(dev, lif->txqcqs[i].stats);
348 		devm_kfree(dev, lif->txqcqs);
349 		lif->txqcqs = NULL;
350 	}
351 }
352 
353 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
354 				      struct ionic_qcq *n_qcq)
355 {
356 	if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) {
357 		ionic_intr_free(n_qcq->cq.lif->ionic, n_qcq->intr.index);
358 		n_qcq->flags &= ~IONIC_QCQ_F_INTR;
359 	}
360 
361 	n_qcq->intr.vector = src_qcq->intr.vector;
362 	n_qcq->intr.index = src_qcq->intr.index;
363 }
364 
365 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
366 			   unsigned int index,
367 			   const char *name, unsigned int flags,
368 			   unsigned int num_descs, unsigned int desc_size,
369 			   unsigned int cq_desc_size,
370 			   unsigned int sg_desc_size,
371 			   unsigned int pid, struct ionic_qcq **qcq)
372 {
373 	struct ionic_dev *idev = &lif->ionic->idev;
374 	u32 q_size, cq_size, sg_size, total_size;
375 	struct device *dev = lif->ionic->dev;
376 	void *q_base, *cq_base, *sg_base;
377 	dma_addr_t cq_base_pa = 0;
378 	dma_addr_t sg_base_pa = 0;
379 	dma_addr_t q_base_pa = 0;
380 	struct ionic_qcq *new;
381 	int err;
382 
383 	*qcq = NULL;
384 
385 	q_size  = num_descs * desc_size;
386 	cq_size = num_descs * cq_desc_size;
387 	sg_size = num_descs * sg_desc_size;
388 
389 	total_size = ALIGN(q_size, PAGE_SIZE) + ALIGN(cq_size, PAGE_SIZE);
390 	/* Note: aligning q_size/cq_size is not enough due to cq_base
391 	 * address aligning as q_base could be not aligned to the page.
392 	 * Adding PAGE_SIZE.
393 	 */
394 	total_size += PAGE_SIZE;
395 	if (flags & IONIC_QCQ_F_SG) {
396 		total_size += ALIGN(sg_size, PAGE_SIZE);
397 		total_size += PAGE_SIZE;
398 	}
399 
400 	new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL);
401 	if (!new) {
402 		netdev_err(lif->netdev, "Cannot allocate queue structure\n");
403 		err = -ENOMEM;
404 		goto err_out;
405 	}
406 
407 	new->flags = flags;
408 
409 	new->q.info = devm_kzalloc(dev, sizeof(*new->q.info) * num_descs,
410 				   GFP_KERNEL);
411 	if (!new->q.info) {
412 		netdev_err(lif->netdev, "Cannot allocate queue info\n");
413 		err = -ENOMEM;
414 		goto err_out;
415 	}
416 
417 	new->q.type = type;
418 
419 	err = ionic_q_init(lif, idev, &new->q, index, name, num_descs,
420 			   desc_size, sg_desc_size, pid);
421 	if (err) {
422 		netdev_err(lif->netdev, "Cannot initialize queue\n");
423 		goto err_out;
424 	}
425 
426 	if (flags & IONIC_QCQ_F_INTR) {
427 		err = ionic_intr_alloc(lif, &new->intr);
428 		if (err) {
429 			netdev_warn(lif->netdev, "no intr for %s: %d\n",
430 				    name, err);
431 			goto err_out;
432 		}
433 
434 		err = ionic_bus_get_irq(lif->ionic, new->intr.index);
435 		if (err < 0) {
436 			netdev_warn(lif->netdev, "no vector for %s: %d\n",
437 				    name, err);
438 			goto err_out_free_intr;
439 		}
440 		new->intr.vector = err;
441 		ionic_intr_mask_assert(idev->intr_ctrl, new->intr.index,
442 				       IONIC_INTR_MASK_SET);
443 
444 		err = ionic_request_irq(lif, new);
445 		if (err) {
446 			netdev_warn(lif->netdev, "irq request failed %d\n", err);
447 			goto err_out_free_intr;
448 		}
449 
450 		new->intr.cpu = cpumask_local_spread(new->intr.index,
451 						     dev_to_node(dev));
452 		if (new->intr.cpu != -1)
453 			cpumask_set_cpu(new->intr.cpu,
454 					&new->intr.affinity_mask);
455 	} else {
456 		new->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
457 	}
458 
459 	new->cq.info = devm_kzalloc(dev, sizeof(*new->cq.info) * num_descs,
460 				    GFP_KERNEL);
461 	if (!new->cq.info) {
462 		netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
463 		err = -ENOMEM;
464 		goto err_out_free_irq;
465 	}
466 
467 	err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
468 	if (err) {
469 		netdev_err(lif->netdev, "Cannot initialize completion queue\n");
470 		goto err_out_free_irq;
471 	}
472 
473 	new->base = dma_alloc_coherent(dev, total_size, &new->base_pa,
474 				       GFP_KERNEL);
475 	if (!new->base) {
476 		netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
477 		err = -ENOMEM;
478 		goto err_out_free_irq;
479 	}
480 
481 	new->total_size = total_size;
482 
483 	q_base = new->base;
484 	q_base_pa = new->base_pa;
485 
486 	cq_base = (void *)ALIGN((uintptr_t)q_base + q_size, PAGE_SIZE);
487 	cq_base_pa = ALIGN(q_base_pa + q_size, PAGE_SIZE);
488 
489 	if (flags & IONIC_QCQ_F_SG) {
490 		sg_base = (void *)ALIGN((uintptr_t)cq_base + cq_size,
491 					PAGE_SIZE);
492 		sg_base_pa = ALIGN(cq_base_pa + cq_size, PAGE_SIZE);
493 		ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
494 	}
495 
496 	ionic_q_map(&new->q, q_base, q_base_pa);
497 	ionic_cq_map(&new->cq, cq_base, cq_base_pa);
498 	ionic_cq_bind(&new->cq, &new->q);
499 
500 	*qcq = new;
501 
502 	return 0;
503 
504 err_out_free_irq:
505 	if (flags & IONIC_QCQ_F_INTR)
506 		devm_free_irq(dev, new->intr.vector, &new->napi);
507 err_out_free_intr:
508 	if (flags & IONIC_QCQ_F_INTR)
509 		ionic_intr_free(lif->ionic, new->intr.index);
510 err_out:
511 	dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
512 	return err;
513 }
514 
515 static int ionic_qcqs_alloc(struct ionic_lif *lif)
516 {
517 	struct device *dev = lif->ionic->dev;
518 	unsigned int q_list_size;
519 	unsigned int flags;
520 	int err;
521 	int i;
522 
523 	flags = IONIC_QCQ_F_INTR;
524 	err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags,
525 			      IONIC_ADMINQ_LENGTH,
526 			      sizeof(struct ionic_admin_cmd),
527 			      sizeof(struct ionic_admin_comp),
528 			      0, lif->kern_pid, &lif->adminqcq);
529 	if (err)
530 		return err;
531 	ionic_debugfs_add_qcq(lif, lif->adminqcq);
532 
533 	if (lif->ionic->nnqs_per_lif) {
534 		flags = IONIC_QCQ_F_NOTIFYQ;
535 		err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq",
536 				      flags, IONIC_NOTIFYQ_LENGTH,
537 				      sizeof(struct ionic_notifyq_cmd),
538 				      sizeof(union ionic_notifyq_comp),
539 				      0, lif->kern_pid, &lif->notifyqcq);
540 		if (err)
541 			goto err_out_free_adminqcq;
542 		ionic_debugfs_add_qcq(lif, lif->notifyqcq);
543 
544 		/* Let the notifyq ride on the adminq interrupt */
545 		ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq);
546 	}
547 
548 	q_list_size = sizeof(*lif->txqcqs) * lif->nxqs;
549 	err = -ENOMEM;
550 	lif->txqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL);
551 	if (!lif->txqcqs)
552 		goto err_out_free_notifyqcq;
553 	for (i = 0; i < lif->nxqs; i++) {
554 		lif->txqcqs[i].stats = devm_kzalloc(dev,
555 						    sizeof(struct ionic_q_stats),
556 						    GFP_KERNEL);
557 		if (!lif->txqcqs[i].stats)
558 			goto err_out_free_tx_stats;
559 	}
560 
561 	lif->rxqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL);
562 	if (!lif->rxqcqs)
563 		goto err_out_free_tx_stats;
564 	for (i = 0; i < lif->nxqs; i++) {
565 		lif->rxqcqs[i].stats = devm_kzalloc(dev,
566 						    sizeof(struct ionic_q_stats),
567 						    GFP_KERNEL);
568 		if (!lif->rxqcqs[i].stats)
569 			goto err_out_free_rx_stats;
570 	}
571 
572 	return 0;
573 
574 err_out_free_rx_stats:
575 	for (i = 0; i < lif->nxqs; i++)
576 		if (lif->rxqcqs[i].stats)
577 			devm_kfree(dev, lif->rxqcqs[i].stats);
578 	devm_kfree(dev, lif->rxqcqs);
579 	lif->rxqcqs = NULL;
580 err_out_free_tx_stats:
581 	for (i = 0; i < lif->nxqs; i++)
582 		if (lif->txqcqs[i].stats)
583 			devm_kfree(dev, lif->txqcqs[i].stats);
584 	devm_kfree(dev, lif->txqcqs);
585 	lif->txqcqs = NULL;
586 err_out_free_notifyqcq:
587 	if (lif->notifyqcq) {
588 		ionic_qcq_free(lif, lif->notifyqcq);
589 		lif->notifyqcq = NULL;
590 	}
591 err_out_free_adminqcq:
592 	ionic_qcq_free(lif, lif->adminqcq);
593 	lif->adminqcq = NULL;
594 
595 	return err;
596 }
597 
598 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
599 {
600 	struct device *dev = lif->ionic->dev;
601 	struct ionic_queue *q = &qcq->q;
602 	struct ionic_cq *cq = &qcq->cq;
603 	struct ionic_admin_ctx ctx = {
604 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
605 		.cmd.q_init = {
606 			.opcode = IONIC_CMD_Q_INIT,
607 			.lif_index = cpu_to_le16(lif->index),
608 			.type = q->type,
609 			.ver = lif->qtype_info[q->type].version,
610 			.index = cpu_to_le32(q->index),
611 			.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
612 					     IONIC_QINIT_F_SG),
613 			.intr_index = cpu_to_le16(lif->rxqcqs[q->index].qcq->intr.index),
614 			.pid = cpu_to_le16(q->pid),
615 			.ring_size = ilog2(q->num_descs),
616 			.ring_base = cpu_to_le64(q->base_pa),
617 			.cq_ring_base = cpu_to_le64(cq->base_pa),
618 			.sg_ring_base = cpu_to_le64(q->sg_base_pa),
619 		},
620 	};
621 	int err;
622 
623 	dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
624 	dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
625 	dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
626 	dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
627 	dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
628 	dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver);
629 
630 	q->tail = q->info;
631 	q->head = q->tail;
632 	cq->tail = cq->info;
633 
634 	err = ionic_adminq_post_wait(lif, &ctx);
635 	if (err)
636 		return err;
637 
638 	q->hw_type = ctx.comp.q_init.hw_type;
639 	q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
640 	q->dbval = IONIC_DBELL_QID(q->hw_index);
641 
642 	dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
643 	dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
644 
645 	qcq->flags |= IONIC_QCQ_F_INITED;
646 
647 	return 0;
648 }
649 
650 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
651 {
652 	struct device *dev = lif->ionic->dev;
653 	struct ionic_queue *q = &qcq->q;
654 	struct ionic_cq *cq = &qcq->cq;
655 	struct ionic_admin_ctx ctx = {
656 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
657 		.cmd.q_init = {
658 			.opcode = IONIC_CMD_Q_INIT,
659 			.lif_index = cpu_to_le16(lif->index),
660 			.type = q->type,
661 			.ver = lif->qtype_info[q->type].version,
662 			.index = cpu_to_le32(q->index),
663 			.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
664 					     IONIC_QINIT_F_SG),
665 			.intr_index = cpu_to_le16(cq->bound_intr->index),
666 			.pid = cpu_to_le16(q->pid),
667 			.ring_size = ilog2(q->num_descs),
668 			.ring_base = cpu_to_le64(q->base_pa),
669 			.cq_ring_base = cpu_to_le64(cq->base_pa),
670 			.sg_ring_base = cpu_to_le64(q->sg_base_pa),
671 		},
672 	};
673 	int err;
674 
675 	dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid);
676 	dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
677 	dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
678 	dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
679 	dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
680 	dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver);
681 
682 	q->tail = q->info;
683 	q->head = q->tail;
684 	cq->tail = cq->info;
685 
686 	err = ionic_adminq_post_wait(lif, &ctx);
687 	if (err)
688 		return err;
689 
690 	q->hw_type = ctx.comp.q_init.hw_type;
691 	q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
692 	q->dbval = IONIC_DBELL_QID(q->hw_index);
693 
694 	dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
695 	dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
696 
697 	netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi,
698 		       NAPI_POLL_WEIGHT);
699 
700 	qcq->flags |= IONIC_QCQ_F_INITED;
701 
702 	return 0;
703 }
704 
705 static bool ionic_notifyq_service(struct ionic_cq *cq,
706 				  struct ionic_cq_info *cq_info)
707 {
708 	union ionic_notifyq_comp *comp = cq_info->cq_desc;
709 	struct ionic_deferred_work *work;
710 	struct net_device *netdev;
711 	struct ionic_queue *q;
712 	struct ionic_lif *lif;
713 	u64 eid;
714 
715 	q = cq->bound_q;
716 	lif = q->info[0].cb_arg;
717 	netdev = lif->netdev;
718 	eid = le64_to_cpu(comp->event.eid);
719 
720 	/* Have we run out of new completions to process? */
721 	if (eid <= lif->last_eid)
722 		return false;
723 
724 	lif->last_eid = eid;
725 
726 	dev_dbg(lif->ionic->dev, "notifyq event:\n");
727 	dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
728 			 comp, sizeof(*comp), true);
729 
730 	switch (le16_to_cpu(comp->event.ecode)) {
731 	case IONIC_EVENT_LINK_CHANGE:
732 		ionic_link_status_check_request(lif);
733 		break;
734 	case IONIC_EVENT_RESET:
735 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
736 		if (!work) {
737 			netdev_err(lif->netdev, "%s OOM\n", __func__);
738 		} else {
739 			work->type = IONIC_DW_TYPE_LIF_RESET;
740 			ionic_lif_deferred_enqueue(&lif->deferred, work);
741 		}
742 		break;
743 	default:
744 		netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n",
745 			    comp->event.ecode, eid);
746 		break;
747 	}
748 
749 	return true;
750 }
751 
752 static int ionic_notifyq_clean(struct ionic_lif *lif, int budget)
753 {
754 	struct ionic_dev *idev = &lif->ionic->idev;
755 	struct ionic_cq *cq = &lif->notifyqcq->cq;
756 	u32 work_done;
757 
758 	work_done = ionic_cq_service(cq, budget, ionic_notifyq_service,
759 				     NULL, NULL);
760 	if (work_done)
761 		ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
762 				   work_done, IONIC_INTR_CRED_RESET_COALESCE);
763 
764 	return work_done;
765 }
766 
767 static bool ionic_adminq_service(struct ionic_cq *cq,
768 				 struct ionic_cq_info *cq_info)
769 {
770 	struct ionic_admin_comp *comp = cq_info->cq_desc;
771 
772 	if (!color_match(comp->color, cq->done_color))
773 		return false;
774 
775 	ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index));
776 
777 	return true;
778 }
779 
780 static int ionic_adminq_napi(struct napi_struct *napi, int budget)
781 {
782 	struct ionic_lif *lif = napi_to_cq(napi)->lif;
783 	int n_work = 0;
784 	int a_work = 0;
785 
786 	if (likely(lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED))
787 		n_work = ionic_notifyq_clean(lif, budget);
788 	a_work = ionic_napi(napi, budget, ionic_adminq_service, NULL, NULL);
789 
790 	return max(n_work, a_work);
791 }
792 
793 void ionic_get_stats64(struct net_device *netdev,
794 		       struct rtnl_link_stats64 *ns)
795 {
796 	struct ionic_lif *lif = netdev_priv(netdev);
797 	struct ionic_lif_stats *ls;
798 
799 	memset(ns, 0, sizeof(*ns));
800 	ls = &lif->info->stats;
801 
802 	ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) +
803 			 le64_to_cpu(ls->rx_mcast_packets) +
804 			 le64_to_cpu(ls->rx_bcast_packets);
805 
806 	ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) +
807 			 le64_to_cpu(ls->tx_mcast_packets) +
808 			 le64_to_cpu(ls->tx_bcast_packets);
809 
810 	ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) +
811 		       le64_to_cpu(ls->rx_mcast_bytes) +
812 		       le64_to_cpu(ls->rx_bcast_bytes);
813 
814 	ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) +
815 		       le64_to_cpu(ls->tx_mcast_bytes) +
816 		       le64_to_cpu(ls->tx_bcast_bytes);
817 
818 	ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) +
819 			 le64_to_cpu(ls->rx_mcast_drop_packets) +
820 			 le64_to_cpu(ls->rx_bcast_drop_packets);
821 
822 	ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) +
823 			 le64_to_cpu(ls->tx_mcast_drop_packets) +
824 			 le64_to_cpu(ls->tx_bcast_drop_packets);
825 
826 	ns->multicast = le64_to_cpu(ls->rx_mcast_packets);
827 
828 	ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty);
829 
830 	ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) +
831 			       le64_to_cpu(ls->rx_queue_disabled) +
832 			       le64_to_cpu(ls->rx_desc_fetch_error) +
833 			       le64_to_cpu(ls->rx_desc_data_error);
834 
835 	ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) +
836 				le64_to_cpu(ls->tx_queue_disabled) +
837 				le64_to_cpu(ls->tx_desc_fetch_error) +
838 				le64_to_cpu(ls->tx_desc_data_error);
839 
840 	ns->rx_errors = ns->rx_over_errors +
841 			ns->rx_missed_errors;
842 
843 	ns->tx_errors = ns->tx_aborted_errors;
844 }
845 
846 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
847 {
848 	struct ionic_admin_ctx ctx = {
849 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
850 		.cmd.rx_filter_add = {
851 			.opcode = IONIC_CMD_RX_FILTER_ADD,
852 			.lif_index = cpu_to_le16(lif->index),
853 			.match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
854 		},
855 	};
856 	struct ionic_rx_filter *f;
857 	int err;
858 
859 	/* don't bother if we already have it */
860 	spin_lock_bh(&lif->rx_filters.lock);
861 	f = ionic_rx_filter_by_addr(lif, addr);
862 	spin_unlock_bh(&lif->rx_filters.lock);
863 	if (f)
864 		return 0;
865 
866 	netdev_dbg(lif->netdev, "rx_filter add ADDR %pM (id %d)\n", addr,
867 		   ctx.comp.rx_filter_add.filter_id);
868 
869 	memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
870 	err = ionic_adminq_post_wait(lif, &ctx);
871 	if (err && err != -EEXIST)
872 		return err;
873 
874 	return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
875 }
876 
877 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
878 {
879 	struct ionic_admin_ctx ctx = {
880 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
881 		.cmd.rx_filter_del = {
882 			.opcode = IONIC_CMD_RX_FILTER_DEL,
883 			.lif_index = cpu_to_le16(lif->index),
884 		},
885 	};
886 	struct ionic_rx_filter *f;
887 	int err;
888 
889 	spin_lock_bh(&lif->rx_filters.lock);
890 	f = ionic_rx_filter_by_addr(lif, addr);
891 	if (!f) {
892 		spin_unlock_bh(&lif->rx_filters.lock);
893 		return -ENOENT;
894 	}
895 
896 	ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
897 	ionic_rx_filter_free(lif, f);
898 	spin_unlock_bh(&lif->rx_filters.lock);
899 
900 	err = ionic_adminq_post_wait(lif, &ctx);
901 	if (err && err != -EEXIST)
902 		return err;
903 
904 	netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", addr,
905 		   ctx.cmd.rx_filter_del.filter_id);
906 
907 	return 0;
908 }
909 
910 static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add)
911 {
912 	struct ionic *ionic = lif->ionic;
913 	struct ionic_deferred_work *work;
914 	unsigned int nmfilters;
915 	unsigned int nufilters;
916 
917 	if (add) {
918 		/* Do we have space for this filter?  We test the counters
919 		 * here before checking the need for deferral so that we
920 		 * can return an overflow error to the stack.
921 		 */
922 		nmfilters = le32_to_cpu(ionic->ident.lif.eth.max_mcast_filters);
923 		nufilters = le32_to_cpu(ionic->ident.lif.eth.max_ucast_filters);
924 
925 		if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters))
926 			lif->nmcast++;
927 		else if (!is_multicast_ether_addr(addr) &&
928 			 lif->nucast < nufilters)
929 			lif->nucast++;
930 		else
931 			return -ENOSPC;
932 	} else {
933 		if (is_multicast_ether_addr(addr) && lif->nmcast)
934 			lif->nmcast--;
935 		else if (!is_multicast_ether_addr(addr) && lif->nucast)
936 			lif->nucast--;
937 	}
938 
939 	if (in_interrupt()) {
940 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
941 		if (!work) {
942 			netdev_err(lif->netdev, "%s OOM\n", __func__);
943 			return -ENOMEM;
944 		}
945 		work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD :
946 				   IONIC_DW_TYPE_RX_ADDR_DEL;
947 		memcpy(work->addr, addr, ETH_ALEN);
948 		netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n",
949 			   add ? "add" : "del", addr);
950 		ionic_lif_deferred_enqueue(&lif->deferred, work);
951 	} else {
952 		netdev_dbg(lif->netdev, "rx_filter %s %pM\n",
953 			   add ? "add" : "del", addr);
954 		if (add)
955 			return ionic_lif_addr_add(lif, addr);
956 		else
957 			return ionic_lif_addr_del(lif, addr);
958 	}
959 
960 	return 0;
961 }
962 
963 static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
964 {
965 	return ionic_lif_addr(netdev_priv(netdev), addr, true);
966 }
967 
968 static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
969 {
970 	return ionic_lif_addr(netdev_priv(netdev), addr, false);
971 }
972 
973 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
974 {
975 	struct ionic_admin_ctx ctx = {
976 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
977 		.cmd.rx_mode_set = {
978 			.opcode = IONIC_CMD_RX_MODE_SET,
979 			.lif_index = cpu_to_le16(lif->index),
980 			.rx_mode = cpu_to_le16(rx_mode),
981 		},
982 	};
983 	char buf[128];
984 	int err;
985 	int i;
986 #define REMAIN(__x) (sizeof(buf) - (__x))
987 
988 	i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
989 		      lif->rx_mode, rx_mode);
990 	if (rx_mode & IONIC_RX_MODE_F_UNICAST)
991 		i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
992 	if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
993 		i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
994 	if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
995 		i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
996 	if (rx_mode & IONIC_RX_MODE_F_PROMISC)
997 		i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
998 	if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
999 		i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
1000 	netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf);
1001 
1002 	err = ionic_adminq_post_wait(lif, &ctx);
1003 	if (err)
1004 		netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n",
1005 			    rx_mode, err);
1006 	else
1007 		lif->rx_mode = rx_mode;
1008 }
1009 
1010 static void _ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
1011 {
1012 	struct ionic_deferred_work *work;
1013 
1014 	if (in_interrupt()) {
1015 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
1016 		if (!work) {
1017 			netdev_err(lif->netdev, "%s OOM\n", __func__);
1018 			return;
1019 		}
1020 		work->type = IONIC_DW_TYPE_RX_MODE;
1021 		work->rx_mode = rx_mode;
1022 		netdev_dbg(lif->netdev, "deferred: rx_mode\n");
1023 		ionic_lif_deferred_enqueue(&lif->deferred, work);
1024 	} else {
1025 		ionic_lif_rx_mode(lif, rx_mode);
1026 	}
1027 }
1028 
1029 static void ionic_set_rx_mode(struct net_device *netdev)
1030 {
1031 	struct ionic_lif *lif = netdev_priv(netdev);
1032 	struct ionic_identity *ident;
1033 	unsigned int nfilters;
1034 	unsigned int rx_mode;
1035 
1036 	ident = &lif->ionic->ident;
1037 
1038 	rx_mode = IONIC_RX_MODE_F_UNICAST;
1039 	rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
1040 	rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
1041 	rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
1042 	rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
1043 
1044 	/* sync unicast addresses
1045 	 * next check to see if we're in an overflow state
1046 	 *    if so, we track that we overflowed and enable NIC PROMISC
1047 	 *    else if the overflow is set and not needed
1048 	 *       we remove our overflow flag and check the netdev flags
1049 	 *       to see if we can disable NIC PROMISC
1050 	 */
1051 	__dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
1052 	nfilters = le32_to_cpu(ident->lif.eth.max_ucast_filters);
1053 	if (netdev_uc_count(netdev) + 1 > nfilters) {
1054 		rx_mode |= IONIC_RX_MODE_F_PROMISC;
1055 		lif->uc_overflow = true;
1056 	} else if (lif->uc_overflow) {
1057 		lif->uc_overflow = false;
1058 		if (!(netdev->flags & IFF_PROMISC))
1059 			rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
1060 	}
1061 
1062 	/* same for multicast */
1063 	__dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
1064 	nfilters = le32_to_cpu(ident->lif.eth.max_mcast_filters);
1065 	if (netdev_mc_count(netdev) > nfilters) {
1066 		rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
1067 		lif->mc_overflow = true;
1068 	} else if (lif->mc_overflow) {
1069 		lif->mc_overflow = false;
1070 		if (!(netdev->flags & IFF_ALLMULTI))
1071 			rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
1072 	}
1073 
1074 	if (lif->rx_mode != rx_mode)
1075 		_ionic_lif_rx_mode(lif, rx_mode);
1076 }
1077 
1078 static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
1079 {
1080 	u64 wanted = 0;
1081 
1082 	if (features & NETIF_F_HW_VLAN_CTAG_TX)
1083 		wanted |= IONIC_ETH_HW_VLAN_TX_TAG;
1084 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
1085 		wanted |= IONIC_ETH_HW_VLAN_RX_STRIP;
1086 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1087 		wanted |= IONIC_ETH_HW_VLAN_RX_FILTER;
1088 	if (features & NETIF_F_RXHASH)
1089 		wanted |= IONIC_ETH_HW_RX_HASH;
1090 	if (features & NETIF_F_RXCSUM)
1091 		wanted |= IONIC_ETH_HW_RX_CSUM;
1092 	if (features & NETIF_F_SG)
1093 		wanted |= IONIC_ETH_HW_TX_SG;
1094 	if (features & NETIF_F_HW_CSUM)
1095 		wanted |= IONIC_ETH_HW_TX_CSUM;
1096 	if (features & NETIF_F_TSO)
1097 		wanted |= IONIC_ETH_HW_TSO;
1098 	if (features & NETIF_F_TSO6)
1099 		wanted |= IONIC_ETH_HW_TSO_IPV6;
1100 	if (features & NETIF_F_TSO_ECN)
1101 		wanted |= IONIC_ETH_HW_TSO_ECN;
1102 	if (features & NETIF_F_GSO_GRE)
1103 		wanted |= IONIC_ETH_HW_TSO_GRE;
1104 	if (features & NETIF_F_GSO_GRE_CSUM)
1105 		wanted |= IONIC_ETH_HW_TSO_GRE_CSUM;
1106 	if (features & NETIF_F_GSO_IPXIP4)
1107 		wanted |= IONIC_ETH_HW_TSO_IPXIP4;
1108 	if (features & NETIF_F_GSO_IPXIP6)
1109 		wanted |= IONIC_ETH_HW_TSO_IPXIP6;
1110 	if (features & NETIF_F_GSO_UDP_TUNNEL)
1111 		wanted |= IONIC_ETH_HW_TSO_UDP;
1112 	if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM)
1113 		wanted |= IONIC_ETH_HW_TSO_UDP_CSUM;
1114 
1115 	return cpu_to_le64(wanted);
1116 }
1117 
1118 static int ionic_set_nic_features(struct ionic_lif *lif,
1119 				  netdev_features_t features)
1120 {
1121 	struct device *dev = lif->ionic->dev;
1122 	struct ionic_admin_ctx ctx = {
1123 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1124 		.cmd.lif_setattr = {
1125 			.opcode = IONIC_CMD_LIF_SETATTR,
1126 			.index = cpu_to_le16(lif->index),
1127 			.attr = IONIC_LIF_ATTR_FEATURES,
1128 		},
1129 	};
1130 	u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG |
1131 			 IONIC_ETH_HW_VLAN_RX_STRIP |
1132 			 IONIC_ETH_HW_VLAN_RX_FILTER;
1133 	u64 old_hw_features;
1134 	int err;
1135 
1136 	ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features);
1137 	err = ionic_adminq_post_wait(lif, &ctx);
1138 	if (err)
1139 		return err;
1140 
1141 	old_hw_features = lif->hw_features;
1142 	lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features &
1143 				       ctx.comp.lif_setattr.features);
1144 
1145 	if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH)
1146 		ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1147 
1148 	if ((vlan_flags & features) &&
1149 	    !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
1150 		dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
1151 
1152 	if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1153 		dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n");
1154 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1155 		dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n");
1156 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1157 		dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n");
1158 	if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1159 		dev_dbg(dev, "feature ETH_HW_RX_HASH\n");
1160 	if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1161 		dev_dbg(dev, "feature ETH_HW_TX_SG\n");
1162 	if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1163 		dev_dbg(dev, "feature ETH_HW_TX_CSUM\n");
1164 	if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1165 		dev_dbg(dev, "feature ETH_HW_RX_CSUM\n");
1166 	if (lif->hw_features & IONIC_ETH_HW_TSO)
1167 		dev_dbg(dev, "feature ETH_HW_TSO\n");
1168 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1169 		dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n");
1170 	if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1171 		dev_dbg(dev, "feature ETH_HW_TSO_ECN\n");
1172 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1173 		dev_dbg(dev, "feature ETH_HW_TSO_GRE\n");
1174 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1175 		dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n");
1176 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1177 		dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n");
1178 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1179 		dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n");
1180 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1181 		dev_dbg(dev, "feature ETH_HW_TSO_UDP\n");
1182 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1183 		dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n");
1184 
1185 	return 0;
1186 }
1187 
1188 static int ionic_init_nic_features(struct ionic_lif *lif)
1189 {
1190 	struct net_device *netdev = lif->netdev;
1191 	netdev_features_t features;
1192 	int err;
1193 
1194 	/* set up what we expect to support by default */
1195 	features = NETIF_F_HW_VLAN_CTAG_TX |
1196 		   NETIF_F_HW_VLAN_CTAG_RX |
1197 		   NETIF_F_HW_VLAN_CTAG_FILTER |
1198 		   NETIF_F_RXHASH |
1199 		   NETIF_F_SG |
1200 		   NETIF_F_HW_CSUM |
1201 		   NETIF_F_RXCSUM |
1202 		   NETIF_F_TSO |
1203 		   NETIF_F_TSO6 |
1204 		   NETIF_F_TSO_ECN;
1205 
1206 	err = ionic_set_nic_features(lif, features);
1207 	if (err)
1208 		return err;
1209 
1210 	/* tell the netdev what we actually can support */
1211 	netdev->features |= NETIF_F_HIGHDMA;
1212 
1213 	if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1214 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
1215 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1216 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1217 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1218 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1219 	if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1220 		netdev->hw_features |= NETIF_F_RXHASH;
1221 	if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1222 		netdev->hw_features |= NETIF_F_SG;
1223 
1224 	if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1225 		netdev->hw_enc_features |= NETIF_F_HW_CSUM;
1226 	if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1227 		netdev->hw_enc_features |= NETIF_F_RXCSUM;
1228 	if (lif->hw_features & IONIC_ETH_HW_TSO)
1229 		netdev->hw_enc_features |= NETIF_F_TSO;
1230 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1231 		netdev->hw_enc_features |= NETIF_F_TSO6;
1232 	if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1233 		netdev->hw_enc_features |= NETIF_F_TSO_ECN;
1234 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1235 		netdev->hw_enc_features |= NETIF_F_GSO_GRE;
1236 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1237 		netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM;
1238 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1239 		netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4;
1240 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1241 		netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6;
1242 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1243 		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
1244 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1245 		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
1246 
1247 	netdev->hw_features |= netdev->hw_enc_features;
1248 	netdev->features |= netdev->hw_features;
1249 	netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES;
1250 
1251 	netdev->priv_flags |= IFF_UNICAST_FLT |
1252 			      IFF_LIVE_ADDR_CHANGE;
1253 
1254 	return 0;
1255 }
1256 
1257 static int ionic_set_features(struct net_device *netdev,
1258 			      netdev_features_t features)
1259 {
1260 	struct ionic_lif *lif = netdev_priv(netdev);
1261 	int err;
1262 
1263 	netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n",
1264 		   __func__, (u64)lif->netdev->features, (u64)features);
1265 
1266 	err = ionic_set_nic_features(lif, features);
1267 
1268 	return err;
1269 }
1270 
1271 static int ionic_set_mac_address(struct net_device *netdev, void *sa)
1272 {
1273 	struct sockaddr *addr = sa;
1274 	u8 *mac;
1275 	int err;
1276 
1277 	mac = (u8 *)addr->sa_data;
1278 	if (ether_addr_equal(netdev->dev_addr, mac))
1279 		return 0;
1280 
1281 	err = eth_prepare_mac_addr_change(netdev, addr);
1282 	if (err)
1283 		return err;
1284 
1285 	if (!is_zero_ether_addr(netdev->dev_addr)) {
1286 		netdev_info(netdev, "deleting mac addr %pM\n",
1287 			    netdev->dev_addr);
1288 		ionic_addr_del(netdev, netdev->dev_addr);
1289 	}
1290 
1291 	eth_commit_mac_addr_change(netdev, addr);
1292 	netdev_info(netdev, "updating mac addr %pM\n", mac);
1293 
1294 	return ionic_addr_add(netdev, mac);
1295 }
1296 
1297 static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
1298 {
1299 	struct ionic_lif *lif = netdev_priv(netdev);
1300 	struct ionic_admin_ctx ctx = {
1301 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1302 		.cmd.lif_setattr = {
1303 			.opcode = IONIC_CMD_LIF_SETATTR,
1304 			.index = cpu_to_le16(lif->index),
1305 			.attr = IONIC_LIF_ATTR_MTU,
1306 			.mtu = cpu_to_le32(new_mtu),
1307 		},
1308 	};
1309 	int err;
1310 
1311 	err = ionic_adminq_post_wait(lif, &ctx);
1312 	if (err)
1313 		return err;
1314 
1315 	netdev->mtu = new_mtu;
1316 	err = ionic_reset_queues(lif, NULL, NULL);
1317 
1318 	return err;
1319 }
1320 
1321 static void ionic_tx_timeout_work(struct work_struct *ws)
1322 {
1323 	struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work);
1324 
1325 	netdev_info(lif->netdev, "Tx Timeout recovery\n");
1326 
1327 	rtnl_lock();
1328 	ionic_reset_queues(lif, NULL, NULL);
1329 	rtnl_unlock();
1330 }
1331 
1332 static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1333 {
1334 	struct ionic_lif *lif = netdev_priv(netdev);
1335 
1336 	schedule_work(&lif->tx_timeout_work);
1337 }
1338 
1339 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1340 				 u16 vid)
1341 {
1342 	struct ionic_lif *lif = netdev_priv(netdev);
1343 	struct ionic_admin_ctx ctx = {
1344 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1345 		.cmd.rx_filter_add = {
1346 			.opcode = IONIC_CMD_RX_FILTER_ADD,
1347 			.lif_index = cpu_to_le16(lif->index),
1348 			.match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
1349 			.vlan.vlan = cpu_to_le16(vid),
1350 		},
1351 	};
1352 	int err;
1353 
1354 	err = ionic_adminq_post_wait(lif, &ctx);
1355 	if (err)
1356 		return err;
1357 
1358 	netdev_dbg(netdev, "rx_filter add VLAN %d (id %d)\n", vid,
1359 		   ctx.comp.rx_filter_add.filter_id);
1360 
1361 	return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
1362 }
1363 
1364 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1365 				  u16 vid)
1366 {
1367 	struct ionic_lif *lif = netdev_priv(netdev);
1368 	struct ionic_admin_ctx ctx = {
1369 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1370 		.cmd.rx_filter_del = {
1371 			.opcode = IONIC_CMD_RX_FILTER_DEL,
1372 			.lif_index = cpu_to_le16(lif->index),
1373 		},
1374 	};
1375 	struct ionic_rx_filter *f;
1376 
1377 	spin_lock_bh(&lif->rx_filters.lock);
1378 
1379 	f = ionic_rx_filter_by_vlan(lif, vid);
1380 	if (!f) {
1381 		spin_unlock_bh(&lif->rx_filters.lock);
1382 		return -ENOENT;
1383 	}
1384 
1385 	netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", vid,
1386 		   le32_to_cpu(ctx.cmd.rx_filter_del.filter_id));
1387 
1388 	ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
1389 	ionic_rx_filter_free(lif, f);
1390 	spin_unlock_bh(&lif->rx_filters.lock);
1391 
1392 	return ionic_adminq_post_wait(lif, &ctx);
1393 }
1394 
1395 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
1396 			 const u8 *key, const u32 *indir)
1397 {
1398 	struct ionic_admin_ctx ctx = {
1399 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1400 		.cmd.lif_setattr = {
1401 			.opcode = IONIC_CMD_LIF_SETATTR,
1402 			.attr = IONIC_LIF_ATTR_RSS,
1403 			.rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa),
1404 		},
1405 	};
1406 	unsigned int i, tbl_sz;
1407 
1408 	if (lif->hw_features & IONIC_ETH_HW_RX_HASH) {
1409 		lif->rss_types = types;
1410 		ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types);
1411 	}
1412 
1413 	if (key)
1414 		memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
1415 
1416 	if (indir) {
1417 		tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1418 		for (i = 0; i < tbl_sz; i++)
1419 			lif->rss_ind_tbl[i] = indir[i];
1420 	}
1421 
1422 	memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
1423 	       IONIC_RSS_HASH_KEY_SIZE);
1424 
1425 	return ionic_adminq_post_wait(lif, &ctx);
1426 }
1427 
1428 static int ionic_lif_rss_init(struct ionic_lif *lif)
1429 {
1430 	unsigned int tbl_sz;
1431 	unsigned int i;
1432 
1433 	lif->rss_types = IONIC_RSS_TYPE_IPV4     |
1434 			 IONIC_RSS_TYPE_IPV4_TCP |
1435 			 IONIC_RSS_TYPE_IPV4_UDP |
1436 			 IONIC_RSS_TYPE_IPV6     |
1437 			 IONIC_RSS_TYPE_IPV6_TCP |
1438 			 IONIC_RSS_TYPE_IPV6_UDP;
1439 
1440 	/* Fill indirection table with 'default' values */
1441 	tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1442 	for (i = 0; i < tbl_sz; i++)
1443 		lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs);
1444 
1445 	return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1446 }
1447 
1448 static void ionic_lif_rss_deinit(struct ionic_lif *lif)
1449 {
1450 	int tbl_sz;
1451 
1452 	tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1453 	memset(lif->rss_ind_tbl, 0, tbl_sz);
1454 	memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE);
1455 
1456 	ionic_lif_rss_config(lif, 0x0, NULL, NULL);
1457 }
1458 
1459 static void ionic_txrx_disable(struct ionic_lif *lif)
1460 {
1461 	unsigned int i;
1462 	int err;
1463 
1464 	if (lif->txqcqs) {
1465 		for (i = 0; i < lif->nxqs; i++) {
1466 			err = ionic_qcq_disable(lif->txqcqs[i].qcq);
1467 			if (err == -ETIMEDOUT)
1468 				break;
1469 		}
1470 	}
1471 
1472 	if (lif->rxqcqs) {
1473 		for (i = 0; i < lif->nxqs; i++) {
1474 			err = ionic_qcq_disable(lif->rxqcqs[i].qcq);
1475 			if (err == -ETIMEDOUT)
1476 				break;
1477 		}
1478 	}
1479 }
1480 
1481 static void ionic_txrx_deinit(struct ionic_lif *lif)
1482 {
1483 	unsigned int i;
1484 
1485 	if (lif->txqcqs) {
1486 		for (i = 0; i < lif->nxqs; i++) {
1487 			ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
1488 			ionic_tx_flush(&lif->txqcqs[i].qcq->cq);
1489 			ionic_tx_empty(&lif->txqcqs[i].qcq->q);
1490 		}
1491 	}
1492 
1493 	if (lif->rxqcqs) {
1494 		for (i = 0; i < lif->nxqs; i++) {
1495 			ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
1496 			ionic_rx_flush(&lif->rxqcqs[i].qcq->cq);
1497 			ionic_rx_empty(&lif->rxqcqs[i].qcq->q);
1498 		}
1499 	}
1500 	lif->rx_mode = 0;
1501 }
1502 
1503 static void ionic_txrx_free(struct ionic_lif *lif)
1504 {
1505 	unsigned int i;
1506 
1507 	if (lif->txqcqs) {
1508 		for (i = 0; i < lif->nxqs; i++) {
1509 			ionic_qcq_free(lif, lif->txqcqs[i].qcq);
1510 			lif->txqcqs[i].qcq = NULL;
1511 		}
1512 	}
1513 
1514 	if (lif->rxqcqs) {
1515 		for (i = 0; i < lif->nxqs; i++) {
1516 			ionic_qcq_free(lif, lif->rxqcqs[i].qcq);
1517 			lif->rxqcqs[i].qcq = NULL;
1518 		}
1519 	}
1520 }
1521 
1522 static int ionic_txrx_alloc(struct ionic_lif *lif)
1523 {
1524 	unsigned int sg_desc_sz;
1525 	unsigned int flags;
1526 	unsigned int i;
1527 	int err = 0;
1528 
1529 	if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
1530 	    lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
1531 					  sizeof(struct ionic_txq_sg_desc_v1))
1532 		sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
1533 	else
1534 		sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
1535 
1536 	flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
1537 	for (i = 0; i < lif->nxqs; i++) {
1538 		err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
1539 				      lif->ntxq_descs,
1540 				      sizeof(struct ionic_txq_desc),
1541 				      sizeof(struct ionic_txq_comp),
1542 				      sg_desc_sz,
1543 				      lif->kern_pid, &lif->txqcqs[i].qcq);
1544 		if (err)
1545 			goto err_out;
1546 
1547 		lif->txqcqs[i].qcq->stats = lif->txqcqs[i].stats;
1548 		ionic_debugfs_add_qcq(lif, lif->txqcqs[i].qcq);
1549 	}
1550 
1551 	flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
1552 	for (i = 0; i < lif->nxqs; i++) {
1553 		err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
1554 				      lif->nrxq_descs,
1555 				      sizeof(struct ionic_rxq_desc),
1556 				      sizeof(struct ionic_rxq_comp),
1557 				      sizeof(struct ionic_rxq_sg_desc),
1558 				      lif->kern_pid, &lif->rxqcqs[i].qcq);
1559 		if (err)
1560 			goto err_out;
1561 
1562 		lif->rxqcqs[i].qcq->stats = lif->rxqcqs[i].stats;
1563 
1564 		ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
1565 				     lif->rxqcqs[i].qcq->intr.index,
1566 				     lif->rx_coalesce_hw);
1567 		ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq,
1568 					  lif->txqcqs[i].qcq);
1569 		ionic_debugfs_add_qcq(lif, lif->rxqcqs[i].qcq);
1570 	}
1571 
1572 	return 0;
1573 
1574 err_out:
1575 	ionic_txrx_free(lif);
1576 
1577 	return err;
1578 }
1579 
1580 static int ionic_txrx_init(struct ionic_lif *lif)
1581 {
1582 	unsigned int i;
1583 	int err;
1584 
1585 	for (i = 0; i < lif->nxqs; i++) {
1586 		err = ionic_lif_txq_init(lif, lif->txqcqs[i].qcq);
1587 		if (err)
1588 			goto err_out;
1589 
1590 		err = ionic_lif_rxq_init(lif, lif->rxqcqs[i].qcq);
1591 		if (err) {
1592 			ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
1593 			goto err_out;
1594 		}
1595 	}
1596 
1597 	if (lif->netdev->features & NETIF_F_RXHASH)
1598 		ionic_lif_rss_init(lif);
1599 
1600 	ionic_set_rx_mode(lif->netdev);
1601 
1602 	return 0;
1603 
1604 err_out:
1605 	while (i--) {
1606 		ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
1607 		ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
1608 	}
1609 
1610 	return err;
1611 }
1612 
1613 static int ionic_txrx_enable(struct ionic_lif *lif)
1614 {
1615 	int i, err;
1616 
1617 	for (i = 0; i < lif->nxqs; i++) {
1618 		ionic_rx_fill(&lif->rxqcqs[i].qcq->q);
1619 		err = ionic_qcq_enable(lif->rxqcqs[i].qcq);
1620 		if (err)
1621 			goto err_out;
1622 
1623 		err = ionic_qcq_enable(lif->txqcqs[i].qcq);
1624 		if (err) {
1625 			if (err != -ETIMEDOUT)
1626 				ionic_qcq_disable(lif->rxqcqs[i].qcq);
1627 			goto err_out;
1628 		}
1629 	}
1630 
1631 	return 0;
1632 
1633 err_out:
1634 	while (i--) {
1635 		err = ionic_qcq_disable(lif->txqcqs[i].qcq);
1636 		if (err == -ETIMEDOUT)
1637 			break;
1638 		err = ionic_qcq_disable(lif->rxqcqs[i].qcq);
1639 		if (err == -ETIMEDOUT)
1640 			break;
1641 	}
1642 
1643 	return err;
1644 }
1645 
1646 static int ionic_start_queues(struct ionic_lif *lif)
1647 {
1648 	int err;
1649 
1650 	if (test_and_set_bit(IONIC_LIF_F_UP, lif->state))
1651 		return 0;
1652 
1653 	err = ionic_txrx_enable(lif);
1654 	if (err) {
1655 		clear_bit(IONIC_LIF_F_UP, lif->state);
1656 		return err;
1657 	}
1658 	netif_tx_wake_all_queues(lif->netdev);
1659 
1660 	return 0;
1661 }
1662 
1663 int ionic_open(struct net_device *netdev)
1664 {
1665 	struct ionic_lif *lif = netdev_priv(netdev);
1666 	int err;
1667 
1668 	err = ionic_txrx_alloc(lif);
1669 	if (err)
1670 		return err;
1671 
1672 	err = ionic_txrx_init(lif);
1673 	if (err)
1674 		goto err_out;
1675 
1676 	err = netif_set_real_num_tx_queues(netdev, lif->nxqs);
1677 	if (err)
1678 		goto err_txrx_deinit;
1679 
1680 	err = netif_set_real_num_rx_queues(netdev, lif->nxqs);
1681 	if (err)
1682 		goto err_txrx_deinit;
1683 
1684 	/* don't start the queues until we have link */
1685 	if (netif_carrier_ok(netdev)) {
1686 		err = ionic_start_queues(lif);
1687 		if (err)
1688 			goto err_txrx_deinit;
1689 	}
1690 
1691 	return 0;
1692 
1693 err_txrx_deinit:
1694 	ionic_txrx_deinit(lif);
1695 err_out:
1696 	ionic_txrx_free(lif);
1697 	return err;
1698 }
1699 
1700 static void ionic_stop_queues(struct ionic_lif *lif)
1701 {
1702 	if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state))
1703 		return;
1704 
1705 	netif_tx_disable(lif->netdev);
1706 	ionic_txrx_disable(lif);
1707 }
1708 
1709 int ionic_stop(struct net_device *netdev)
1710 {
1711 	struct ionic_lif *lif = netdev_priv(netdev);
1712 
1713 	if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
1714 		return 0;
1715 
1716 	ionic_stop_queues(lif);
1717 	ionic_txrx_deinit(lif);
1718 	ionic_txrx_free(lif);
1719 
1720 	return 0;
1721 }
1722 
1723 static int ionic_get_vf_config(struct net_device *netdev,
1724 			       int vf, struct ifla_vf_info *ivf)
1725 {
1726 	struct ionic_lif *lif = netdev_priv(netdev);
1727 	struct ionic *ionic = lif->ionic;
1728 	int ret = 0;
1729 
1730 	if (!netif_device_present(netdev))
1731 		return -EBUSY;
1732 
1733 	down_read(&ionic->vf_op_lock);
1734 
1735 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1736 		ret = -EINVAL;
1737 	} else {
1738 		ivf->vf           = vf;
1739 		ivf->vlan         = ionic->vfs[vf].vlanid;
1740 		ivf->qos	  = 0;
1741 		ivf->spoofchk     = ionic->vfs[vf].spoofchk;
1742 		ivf->linkstate    = ionic->vfs[vf].linkstate;
1743 		ivf->max_tx_rate  = ionic->vfs[vf].maxrate;
1744 		ivf->trusted      = ionic->vfs[vf].trusted;
1745 		ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr);
1746 	}
1747 
1748 	up_read(&ionic->vf_op_lock);
1749 	return ret;
1750 }
1751 
1752 static int ionic_get_vf_stats(struct net_device *netdev, int vf,
1753 			      struct ifla_vf_stats *vf_stats)
1754 {
1755 	struct ionic_lif *lif = netdev_priv(netdev);
1756 	struct ionic *ionic = lif->ionic;
1757 	struct ionic_lif_stats *vs;
1758 	int ret = 0;
1759 
1760 	if (!netif_device_present(netdev))
1761 		return -EBUSY;
1762 
1763 	down_read(&ionic->vf_op_lock);
1764 
1765 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1766 		ret = -EINVAL;
1767 	} else {
1768 		memset(vf_stats, 0, sizeof(*vf_stats));
1769 		vs = &ionic->vfs[vf].stats;
1770 
1771 		vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets);
1772 		vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets);
1773 		vf_stats->rx_bytes   = le64_to_cpu(vs->rx_ucast_bytes);
1774 		vf_stats->tx_bytes   = le64_to_cpu(vs->tx_ucast_bytes);
1775 		vf_stats->broadcast  = le64_to_cpu(vs->rx_bcast_packets);
1776 		vf_stats->multicast  = le64_to_cpu(vs->rx_mcast_packets);
1777 		vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) +
1778 				       le64_to_cpu(vs->rx_mcast_drop_packets) +
1779 				       le64_to_cpu(vs->rx_bcast_drop_packets);
1780 		vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) +
1781 				       le64_to_cpu(vs->tx_mcast_drop_packets) +
1782 				       le64_to_cpu(vs->tx_bcast_drop_packets);
1783 	}
1784 
1785 	up_read(&ionic->vf_op_lock);
1786 	return ret;
1787 }
1788 
1789 static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1790 {
1791 	struct ionic_lif *lif = netdev_priv(netdev);
1792 	struct ionic *ionic = lif->ionic;
1793 	int ret;
1794 
1795 	if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
1796 		return -EINVAL;
1797 
1798 	if (!netif_device_present(netdev))
1799 		return -EBUSY;
1800 
1801 	down_write(&ionic->vf_op_lock);
1802 
1803 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1804 		ret = -EINVAL;
1805 	} else {
1806 		ret = ionic_set_vf_config(ionic, vf, IONIC_VF_ATTR_MAC, mac);
1807 		if (!ret)
1808 			ether_addr_copy(ionic->vfs[vf].macaddr, mac);
1809 	}
1810 
1811 	up_write(&ionic->vf_op_lock);
1812 	return ret;
1813 }
1814 
1815 static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1816 			     u8 qos, __be16 proto)
1817 {
1818 	struct ionic_lif *lif = netdev_priv(netdev);
1819 	struct ionic *ionic = lif->ionic;
1820 	int ret;
1821 
1822 	/* until someday when we support qos */
1823 	if (qos)
1824 		return -EINVAL;
1825 
1826 	if (vlan > 4095)
1827 		return -EINVAL;
1828 
1829 	if (proto != htons(ETH_P_8021Q))
1830 		return -EPROTONOSUPPORT;
1831 
1832 	if (!netif_device_present(netdev))
1833 		return -EBUSY;
1834 
1835 	down_write(&ionic->vf_op_lock);
1836 
1837 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1838 		ret = -EINVAL;
1839 	} else {
1840 		ret = ionic_set_vf_config(ionic, vf,
1841 					  IONIC_VF_ATTR_VLAN, (u8 *)&vlan);
1842 		if (!ret)
1843 			ionic->vfs[vf].vlanid = vlan;
1844 	}
1845 
1846 	up_write(&ionic->vf_op_lock);
1847 	return ret;
1848 }
1849 
1850 static int ionic_set_vf_rate(struct net_device *netdev, int vf,
1851 			     int tx_min, int tx_max)
1852 {
1853 	struct ionic_lif *lif = netdev_priv(netdev);
1854 	struct ionic *ionic = lif->ionic;
1855 	int ret;
1856 
1857 	/* setting the min just seems silly */
1858 	if (tx_min)
1859 		return -EINVAL;
1860 
1861 	if (!netif_device_present(netdev))
1862 		return -EBUSY;
1863 
1864 	down_write(&ionic->vf_op_lock);
1865 
1866 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1867 		ret = -EINVAL;
1868 	} else {
1869 		ret = ionic_set_vf_config(ionic, vf,
1870 					  IONIC_VF_ATTR_RATE, (u8 *)&tx_max);
1871 		if (!ret)
1872 			lif->ionic->vfs[vf].maxrate = tx_max;
1873 	}
1874 
1875 	up_write(&ionic->vf_op_lock);
1876 	return ret;
1877 }
1878 
1879 static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
1880 {
1881 	struct ionic_lif *lif = netdev_priv(netdev);
1882 	struct ionic *ionic = lif->ionic;
1883 	u8 data = set;  /* convert to u8 for config */
1884 	int ret;
1885 
1886 	if (!netif_device_present(netdev))
1887 		return -EBUSY;
1888 
1889 	down_write(&ionic->vf_op_lock);
1890 
1891 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1892 		ret = -EINVAL;
1893 	} else {
1894 		ret = ionic_set_vf_config(ionic, vf,
1895 					  IONIC_VF_ATTR_SPOOFCHK, &data);
1896 		if (!ret)
1897 			ionic->vfs[vf].spoofchk = data;
1898 	}
1899 
1900 	up_write(&ionic->vf_op_lock);
1901 	return ret;
1902 }
1903 
1904 static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
1905 {
1906 	struct ionic_lif *lif = netdev_priv(netdev);
1907 	struct ionic *ionic = lif->ionic;
1908 	u8 data = set;  /* convert to u8 for config */
1909 	int ret;
1910 
1911 	if (!netif_device_present(netdev))
1912 		return -EBUSY;
1913 
1914 	down_write(&ionic->vf_op_lock);
1915 
1916 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1917 		ret = -EINVAL;
1918 	} else {
1919 		ret = ionic_set_vf_config(ionic, vf,
1920 					  IONIC_VF_ATTR_TRUST, &data);
1921 		if (!ret)
1922 			ionic->vfs[vf].trusted = data;
1923 	}
1924 
1925 	up_write(&ionic->vf_op_lock);
1926 	return ret;
1927 }
1928 
1929 static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
1930 {
1931 	struct ionic_lif *lif = netdev_priv(netdev);
1932 	struct ionic *ionic = lif->ionic;
1933 	u8 data;
1934 	int ret;
1935 
1936 	switch (set) {
1937 	case IFLA_VF_LINK_STATE_ENABLE:
1938 		data = IONIC_VF_LINK_STATUS_UP;
1939 		break;
1940 	case IFLA_VF_LINK_STATE_DISABLE:
1941 		data = IONIC_VF_LINK_STATUS_DOWN;
1942 		break;
1943 	case IFLA_VF_LINK_STATE_AUTO:
1944 		data = IONIC_VF_LINK_STATUS_AUTO;
1945 		break;
1946 	default:
1947 		return -EINVAL;
1948 	}
1949 
1950 	if (!netif_device_present(netdev))
1951 		return -EBUSY;
1952 
1953 	down_write(&ionic->vf_op_lock);
1954 
1955 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1956 		ret = -EINVAL;
1957 	} else {
1958 		ret = ionic_set_vf_config(ionic, vf,
1959 					  IONIC_VF_ATTR_LINKSTATE, &data);
1960 		if (!ret)
1961 			ionic->vfs[vf].linkstate = set;
1962 	}
1963 
1964 	up_write(&ionic->vf_op_lock);
1965 	return ret;
1966 }
1967 
1968 static const struct net_device_ops ionic_netdev_ops = {
1969 	.ndo_open               = ionic_open,
1970 	.ndo_stop               = ionic_stop,
1971 	.ndo_start_xmit		= ionic_start_xmit,
1972 	.ndo_get_stats64	= ionic_get_stats64,
1973 	.ndo_set_rx_mode	= ionic_set_rx_mode,
1974 	.ndo_set_features	= ionic_set_features,
1975 	.ndo_set_mac_address	= ionic_set_mac_address,
1976 	.ndo_validate_addr	= eth_validate_addr,
1977 	.ndo_tx_timeout         = ionic_tx_timeout,
1978 	.ndo_change_mtu         = ionic_change_mtu,
1979 	.ndo_vlan_rx_add_vid    = ionic_vlan_rx_add_vid,
1980 	.ndo_vlan_rx_kill_vid   = ionic_vlan_rx_kill_vid,
1981 	.ndo_set_vf_vlan	= ionic_set_vf_vlan,
1982 	.ndo_set_vf_trust	= ionic_set_vf_trust,
1983 	.ndo_set_vf_mac		= ionic_set_vf_mac,
1984 	.ndo_set_vf_rate	= ionic_set_vf_rate,
1985 	.ndo_set_vf_spoofchk	= ionic_set_vf_spoofchk,
1986 	.ndo_get_vf_config	= ionic_get_vf_config,
1987 	.ndo_set_vf_link_state	= ionic_set_vf_link_state,
1988 	.ndo_get_vf_stats       = ionic_get_vf_stats,
1989 };
1990 
1991 int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg)
1992 {
1993 	bool running;
1994 	int err = 0;
1995 
1996 	err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
1997 	if (err)
1998 		return err;
1999 
2000 	running = netif_running(lif->netdev);
2001 	if (running) {
2002 		netif_device_detach(lif->netdev);
2003 		err = ionic_stop(lif->netdev);
2004 		if (err)
2005 			goto reset_out;
2006 	}
2007 
2008 	if (cb)
2009 		cb(lif, arg);
2010 
2011 	if (running) {
2012 		err = ionic_open(lif->netdev);
2013 		netif_device_attach(lif->netdev);
2014 	}
2015 
2016 reset_out:
2017 	clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
2018 
2019 	return err;
2020 }
2021 
2022 static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index)
2023 {
2024 	struct device *dev = ionic->dev;
2025 	struct net_device *netdev;
2026 	struct ionic_lif *lif;
2027 	int tbl_sz;
2028 	int err;
2029 
2030 	netdev = alloc_etherdev_mqs(sizeof(*lif),
2031 				    ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
2032 	if (!netdev) {
2033 		dev_err(dev, "Cannot allocate netdev, aborting\n");
2034 		return ERR_PTR(-ENOMEM);
2035 	}
2036 
2037 	SET_NETDEV_DEV(netdev, dev);
2038 
2039 	lif = netdev_priv(netdev);
2040 	lif->netdev = netdev;
2041 	ionic->master_lif = lif;
2042 	netdev->netdev_ops = &ionic_netdev_ops;
2043 	ionic_ethtool_set_ops(netdev);
2044 
2045 	netdev->watchdog_timeo = 2 * HZ;
2046 	netif_carrier_off(netdev);
2047 
2048 	netdev->min_mtu = IONIC_MIN_MTU;
2049 	netdev->max_mtu = IONIC_MAX_MTU;
2050 
2051 	lif->neqs = ionic->neqs_per_lif;
2052 	lif->nxqs = ionic->ntxqs_per_lif;
2053 
2054 	lif->ionic = ionic;
2055 	lif->index = index;
2056 	lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
2057 	lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
2058 
2059 	/* Convert the default coalesce value to actual hw resolution */
2060 	lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT;
2061 	lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic,
2062 						    lif->rx_coalesce_usecs);
2063 
2064 	snprintf(lif->name, sizeof(lif->name), "lif%u", index);
2065 
2066 	spin_lock_init(&lif->adminq_lock);
2067 
2068 	spin_lock_init(&lif->deferred.lock);
2069 	INIT_LIST_HEAD(&lif->deferred.list);
2070 	INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work);
2071 
2072 	/* allocate lif info */
2073 	lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE);
2074 	lif->info = dma_alloc_coherent(dev, lif->info_sz,
2075 				       &lif->info_pa, GFP_KERNEL);
2076 	if (!lif->info) {
2077 		dev_err(dev, "Failed to allocate lif info, aborting\n");
2078 		err = -ENOMEM;
2079 		goto err_out_free_netdev;
2080 	}
2081 
2082 	ionic_debugfs_add_lif(lif);
2083 
2084 	/* allocate queues */
2085 	err = ionic_qcqs_alloc(lif);
2086 	if (err)
2087 		goto err_out_free_lif_info;
2088 
2089 	/* allocate rss indirection table */
2090 	tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
2091 	lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz;
2092 	lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz,
2093 					      &lif->rss_ind_tbl_pa,
2094 					      GFP_KERNEL);
2095 
2096 	if (!lif->rss_ind_tbl) {
2097 		err = -ENOMEM;
2098 		dev_err(dev, "Failed to allocate rss indirection table, aborting\n");
2099 		goto err_out_free_qcqs;
2100 	}
2101 	netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
2102 
2103 	list_add_tail(&lif->list, &ionic->lifs);
2104 
2105 	return lif;
2106 
2107 err_out_free_qcqs:
2108 	ionic_qcqs_free(lif);
2109 err_out_free_lif_info:
2110 	dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
2111 	lif->info = NULL;
2112 	lif->info_pa = 0;
2113 err_out_free_netdev:
2114 	free_netdev(lif->netdev);
2115 	lif = NULL;
2116 
2117 	return ERR_PTR(err);
2118 }
2119 
2120 int ionic_lifs_alloc(struct ionic *ionic)
2121 {
2122 	struct ionic_lif *lif;
2123 
2124 	INIT_LIST_HEAD(&ionic->lifs);
2125 
2126 	/* only build the first lif, others are for later features */
2127 	set_bit(0, ionic->lifbits);
2128 
2129 	lif = ionic_lif_alloc(ionic, 0);
2130 	if (IS_ERR_OR_NULL(lif)) {
2131 		clear_bit(0, ionic->lifbits);
2132 		return -ENOMEM;
2133 	}
2134 
2135 	lif->lif_type = IONIC_LIF_TYPE_CLASSIC;
2136 	ionic_lif_queue_identify(lif);
2137 
2138 	return 0;
2139 }
2140 
2141 static void ionic_lif_reset(struct ionic_lif *lif)
2142 {
2143 	struct ionic_dev *idev = &lif->ionic->idev;
2144 
2145 	mutex_lock(&lif->ionic->dev_cmd_lock);
2146 	ionic_dev_cmd_lif_reset(idev, lif->index);
2147 	ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2148 	mutex_unlock(&lif->ionic->dev_cmd_lock);
2149 }
2150 
2151 static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
2152 {
2153 	struct ionic *ionic = lif->ionic;
2154 
2155 	if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state))
2156 		return;
2157 
2158 	dev_info(ionic->dev, "FW Down: Stopping LIFs\n");
2159 
2160 	netif_device_detach(lif->netdev);
2161 
2162 	if (test_bit(IONIC_LIF_F_UP, lif->state)) {
2163 		dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
2164 		ionic_stop_queues(lif);
2165 	}
2166 
2167 	if (netif_running(lif->netdev)) {
2168 		ionic_txrx_deinit(lif);
2169 		ionic_txrx_free(lif);
2170 	}
2171 	ionic_lifs_deinit(ionic);
2172 	ionic_reset(ionic);
2173 	ionic_qcqs_free(lif);
2174 
2175 	dev_info(ionic->dev, "FW Down: LIFs stopped\n");
2176 }
2177 
2178 static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
2179 {
2180 	struct ionic *ionic = lif->ionic;
2181 	int err;
2182 
2183 	if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2184 		return;
2185 
2186 	dev_info(ionic->dev, "FW Up: restarting LIFs\n");
2187 
2188 	ionic_init_devinfo(ionic);
2189 	ionic_port_init(ionic);
2190 	err = ionic_qcqs_alloc(lif);
2191 	if (err)
2192 		goto err_out;
2193 
2194 	err = ionic_lifs_init(ionic);
2195 	if (err)
2196 		goto err_qcqs_free;
2197 
2198 	if (lif->registered)
2199 		ionic_lif_set_netdev_info(lif);
2200 
2201 	ionic_rx_filter_replay(lif);
2202 
2203 	if (netif_running(lif->netdev)) {
2204 		err = ionic_txrx_alloc(lif);
2205 		if (err)
2206 			goto err_lifs_deinit;
2207 
2208 		err = ionic_txrx_init(lif);
2209 		if (err)
2210 			goto err_txrx_free;
2211 	}
2212 
2213 	clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
2214 	ionic_link_status_check_request(lif);
2215 	netif_device_attach(lif->netdev);
2216 	dev_info(ionic->dev, "FW Up: LIFs restarted\n");
2217 
2218 	return;
2219 
2220 err_txrx_free:
2221 	ionic_txrx_free(lif);
2222 err_lifs_deinit:
2223 	ionic_lifs_deinit(ionic);
2224 err_qcqs_free:
2225 	ionic_qcqs_free(lif);
2226 err_out:
2227 	dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err);
2228 }
2229 
2230 static void ionic_lif_free(struct ionic_lif *lif)
2231 {
2232 	struct device *dev = lif->ionic->dev;
2233 
2234 	/* free rss indirection table */
2235 	dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl,
2236 			  lif->rss_ind_tbl_pa);
2237 	lif->rss_ind_tbl = NULL;
2238 	lif->rss_ind_tbl_pa = 0;
2239 
2240 	/* free queues */
2241 	ionic_qcqs_free(lif);
2242 	if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2243 		ionic_lif_reset(lif);
2244 
2245 	/* free lif info */
2246 	dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
2247 	lif->info = NULL;
2248 	lif->info_pa = 0;
2249 
2250 	/* unmap doorbell page */
2251 	ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
2252 	lif->kern_dbpage = NULL;
2253 	kfree(lif->dbid_inuse);
2254 	lif->dbid_inuse = NULL;
2255 
2256 	/* free netdev & lif */
2257 	ionic_debugfs_del_lif(lif);
2258 	list_del(&lif->list);
2259 	free_netdev(lif->netdev);
2260 }
2261 
2262 void ionic_lifs_free(struct ionic *ionic)
2263 {
2264 	struct list_head *cur, *tmp;
2265 	struct ionic_lif *lif;
2266 
2267 	list_for_each_safe(cur, tmp, &ionic->lifs) {
2268 		lif = list_entry(cur, struct ionic_lif, list);
2269 
2270 		ionic_lif_free(lif);
2271 	}
2272 }
2273 
2274 static void ionic_lif_deinit(struct ionic_lif *lif)
2275 {
2276 	if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state))
2277 		return;
2278 
2279 	if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
2280 		cancel_work_sync(&lif->deferred.work);
2281 		cancel_work_sync(&lif->tx_timeout_work);
2282 		ionic_rx_filters_deinit(lif);
2283 	}
2284 
2285 	if (lif->netdev->features & NETIF_F_RXHASH)
2286 		ionic_lif_rss_deinit(lif);
2287 
2288 	napi_disable(&lif->adminqcq->napi);
2289 	ionic_lif_qcq_deinit(lif, lif->notifyqcq);
2290 	ionic_lif_qcq_deinit(lif, lif->adminqcq);
2291 
2292 	ionic_lif_reset(lif);
2293 }
2294 
2295 void ionic_lifs_deinit(struct ionic *ionic)
2296 {
2297 	struct list_head *cur, *tmp;
2298 	struct ionic_lif *lif;
2299 
2300 	list_for_each_safe(cur, tmp, &ionic->lifs) {
2301 		lif = list_entry(cur, struct ionic_lif, list);
2302 		ionic_lif_deinit(lif);
2303 	}
2304 }
2305 
2306 static int ionic_lif_adminq_init(struct ionic_lif *lif)
2307 {
2308 	struct device *dev = lif->ionic->dev;
2309 	struct ionic_q_init_comp comp;
2310 	struct ionic_dev *idev;
2311 	struct ionic_qcq *qcq;
2312 	struct ionic_queue *q;
2313 	int err;
2314 
2315 	idev = &lif->ionic->idev;
2316 	qcq = lif->adminqcq;
2317 	q = &qcq->q;
2318 
2319 	mutex_lock(&lif->ionic->dev_cmd_lock);
2320 	ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index);
2321 	err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2322 	ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
2323 	mutex_unlock(&lif->ionic->dev_cmd_lock);
2324 	if (err) {
2325 		netdev_err(lif->netdev, "adminq init failed %d\n", err);
2326 		return err;
2327 	}
2328 
2329 	q->hw_type = comp.hw_type;
2330 	q->hw_index = le32_to_cpu(comp.hw_index);
2331 	q->dbval = IONIC_DBELL_QID(q->hw_index);
2332 
2333 	dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
2334 	dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
2335 
2336 	netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi,
2337 		       NAPI_POLL_WEIGHT);
2338 
2339 	napi_enable(&qcq->napi);
2340 
2341 	if (qcq->flags & IONIC_QCQ_F_INTR)
2342 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
2343 				IONIC_INTR_MASK_CLEAR);
2344 
2345 	qcq->flags |= IONIC_QCQ_F_INITED;
2346 
2347 	return 0;
2348 }
2349 
2350 static int ionic_lif_notifyq_init(struct ionic_lif *lif)
2351 {
2352 	struct ionic_qcq *qcq = lif->notifyqcq;
2353 	struct device *dev = lif->ionic->dev;
2354 	struct ionic_queue *q = &qcq->q;
2355 	int err;
2356 
2357 	struct ionic_admin_ctx ctx = {
2358 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2359 		.cmd.q_init = {
2360 			.opcode = IONIC_CMD_Q_INIT,
2361 			.lif_index = cpu_to_le16(lif->index),
2362 			.type = q->type,
2363 			.ver = lif->qtype_info[q->type].version,
2364 			.index = cpu_to_le32(q->index),
2365 			.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
2366 					     IONIC_QINIT_F_ENA),
2367 			.intr_index = cpu_to_le16(lif->adminqcq->intr.index),
2368 			.pid = cpu_to_le16(q->pid),
2369 			.ring_size = ilog2(q->num_descs),
2370 			.ring_base = cpu_to_le64(q->base_pa),
2371 		}
2372 	};
2373 
2374 	dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid);
2375 	dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index);
2376 	dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
2377 	dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
2378 
2379 	err = ionic_adminq_post_wait(lif, &ctx);
2380 	if (err)
2381 		return err;
2382 
2383 	lif->last_eid = 0;
2384 	q->hw_type = ctx.comp.q_init.hw_type;
2385 	q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
2386 	q->dbval = IONIC_DBELL_QID(q->hw_index);
2387 
2388 	dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type);
2389 	dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index);
2390 
2391 	/* preset the callback info */
2392 	q->info[0].cb_arg = lif;
2393 
2394 	qcq->flags |= IONIC_QCQ_F_INITED;
2395 
2396 	return 0;
2397 }
2398 
2399 static int ionic_station_set(struct ionic_lif *lif)
2400 {
2401 	struct net_device *netdev = lif->netdev;
2402 	struct ionic_admin_ctx ctx = {
2403 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2404 		.cmd.lif_getattr = {
2405 			.opcode = IONIC_CMD_LIF_GETATTR,
2406 			.index = cpu_to_le16(lif->index),
2407 			.attr = IONIC_LIF_ATTR_MAC,
2408 		},
2409 	};
2410 	struct sockaddr addr;
2411 	int err;
2412 
2413 	err = ionic_adminq_post_wait(lif, &ctx);
2414 	if (err)
2415 		return err;
2416 	netdev_dbg(lif->netdev, "found initial MAC addr %pM\n",
2417 		   ctx.comp.lif_getattr.mac);
2418 	if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
2419 		return 0;
2420 
2421 	if (!is_zero_ether_addr(netdev->dev_addr)) {
2422 		/* If the netdev mac is non-zero and doesn't match the default
2423 		 * device address, it was set by something earlier and we're
2424 		 * likely here again after a fw-upgrade reset.  We need to be
2425 		 * sure the netdev mac is in our filter list.
2426 		 */
2427 		if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
2428 				      netdev->dev_addr))
2429 			ionic_lif_addr(lif, netdev->dev_addr, true);
2430 	} else {
2431 		/* Update the netdev mac with the device's mac */
2432 		memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
2433 		addr.sa_family = AF_INET;
2434 		err = eth_prepare_mac_addr_change(netdev, &addr);
2435 		if (err) {
2436 			netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n",
2437 				    addr.sa_data, err);
2438 			return 0;
2439 		}
2440 
2441 		eth_commit_mac_addr_change(netdev, &addr);
2442 	}
2443 
2444 	netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
2445 		   netdev->dev_addr);
2446 	ionic_lif_addr(lif, netdev->dev_addr, true);
2447 
2448 	return 0;
2449 }
2450 
2451 static int ionic_lif_init(struct ionic_lif *lif)
2452 {
2453 	struct ionic_dev *idev = &lif->ionic->idev;
2454 	struct device *dev = lif->ionic->dev;
2455 	struct ionic_lif_init_comp comp;
2456 	int dbpage_num;
2457 	int err;
2458 
2459 	mutex_lock(&lif->ionic->dev_cmd_lock);
2460 	ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa);
2461 	err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2462 	ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
2463 	mutex_unlock(&lif->ionic->dev_cmd_lock);
2464 	if (err)
2465 		return err;
2466 
2467 	lif->hw_index = le16_to_cpu(comp.hw_index);
2468 
2469 	/* now that we have the hw_index we can figure out our doorbell page */
2470 	lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
2471 	if (!lif->dbid_count) {
2472 		dev_err(dev, "No doorbell pages, aborting\n");
2473 		return -EINVAL;
2474 	}
2475 
2476 	lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL);
2477 	if (!lif->dbid_inuse) {
2478 		dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n");
2479 		return -ENOMEM;
2480 	}
2481 
2482 	/* first doorbell id reserved for kernel (dbid aka pid == zero) */
2483 	set_bit(0, lif->dbid_inuse);
2484 	lif->kern_pid = 0;
2485 
2486 	dbpage_num = ionic_db_page_num(lif, lif->kern_pid);
2487 	lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num);
2488 	if (!lif->kern_dbpage) {
2489 		dev_err(dev, "Cannot map dbpage, aborting\n");
2490 		err = -ENOMEM;
2491 		goto err_out_free_dbid;
2492 	}
2493 
2494 	err = ionic_lif_adminq_init(lif);
2495 	if (err)
2496 		goto err_out_adminq_deinit;
2497 
2498 	if (lif->ionic->nnqs_per_lif) {
2499 		err = ionic_lif_notifyq_init(lif);
2500 		if (err)
2501 			goto err_out_notifyq_deinit;
2502 	}
2503 
2504 	err = ionic_init_nic_features(lif);
2505 	if (err)
2506 		goto err_out_notifyq_deinit;
2507 
2508 	if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
2509 		err = ionic_rx_filters_init(lif);
2510 		if (err)
2511 			goto err_out_notifyq_deinit;
2512 	}
2513 
2514 	err = ionic_station_set(lif);
2515 	if (err)
2516 		goto err_out_notifyq_deinit;
2517 
2518 	lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
2519 
2520 	set_bit(IONIC_LIF_F_INITED, lif->state);
2521 
2522 	INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work);
2523 
2524 	return 0;
2525 
2526 err_out_notifyq_deinit:
2527 	ionic_lif_qcq_deinit(lif, lif->notifyqcq);
2528 err_out_adminq_deinit:
2529 	ionic_lif_qcq_deinit(lif, lif->adminqcq);
2530 	ionic_lif_reset(lif);
2531 	ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
2532 	lif->kern_dbpage = NULL;
2533 err_out_free_dbid:
2534 	kfree(lif->dbid_inuse);
2535 	lif->dbid_inuse = NULL;
2536 
2537 	return err;
2538 }
2539 
2540 int ionic_lifs_init(struct ionic *ionic)
2541 {
2542 	struct list_head *cur, *tmp;
2543 	struct ionic_lif *lif;
2544 	int err;
2545 
2546 	list_for_each_safe(cur, tmp, &ionic->lifs) {
2547 		lif = list_entry(cur, struct ionic_lif, list);
2548 		err = ionic_lif_init(lif);
2549 		if (err)
2550 			return err;
2551 	}
2552 
2553 	return 0;
2554 }
2555 
2556 static void ionic_lif_notify_work(struct work_struct *ws)
2557 {
2558 }
2559 
2560 static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
2561 {
2562 	struct ionic_admin_ctx ctx = {
2563 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2564 		.cmd.lif_setattr = {
2565 			.opcode = IONIC_CMD_LIF_SETATTR,
2566 			.index = cpu_to_le16(lif->index),
2567 			.attr = IONIC_LIF_ATTR_NAME,
2568 		},
2569 	};
2570 
2571 	strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
2572 		sizeof(ctx.cmd.lif_setattr.name));
2573 
2574 	ionic_adminq_post_wait(lif, &ctx);
2575 }
2576 
2577 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev)
2578 {
2579 	if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit)
2580 		return NULL;
2581 
2582 	return netdev_priv(netdev);
2583 }
2584 
2585 static int ionic_lif_notify(struct notifier_block *nb,
2586 			    unsigned long event, void *info)
2587 {
2588 	struct net_device *ndev = netdev_notifier_info_to_dev(info);
2589 	struct ionic *ionic = container_of(nb, struct ionic, nb);
2590 	struct ionic_lif *lif = ionic_netdev_lif(ndev);
2591 
2592 	if (!lif || lif->ionic != ionic)
2593 		return NOTIFY_DONE;
2594 
2595 	switch (event) {
2596 	case NETDEV_CHANGENAME:
2597 		ionic_lif_set_netdev_info(lif);
2598 		break;
2599 	}
2600 
2601 	return NOTIFY_DONE;
2602 }
2603 
2604 int ionic_lifs_register(struct ionic *ionic)
2605 {
2606 	int err;
2607 
2608 	INIT_WORK(&ionic->nb_work, ionic_lif_notify_work);
2609 
2610 	ionic->nb.notifier_call = ionic_lif_notify;
2611 
2612 	err = register_netdevice_notifier(&ionic->nb);
2613 	if (err)
2614 		ionic->nb.notifier_call = NULL;
2615 
2616 	/* only register LIF0 for now */
2617 	err = register_netdev(ionic->master_lif->netdev);
2618 	if (err) {
2619 		dev_err(ionic->dev, "Cannot register net device, aborting\n");
2620 		return err;
2621 	}
2622 	ionic->master_lif->registered = true;
2623 
2624 	return 0;
2625 }
2626 
2627 void ionic_lifs_unregister(struct ionic *ionic)
2628 {
2629 	if (ionic->nb.notifier_call) {
2630 		unregister_netdevice_notifier(&ionic->nb);
2631 		cancel_work_sync(&ionic->nb_work);
2632 		ionic->nb.notifier_call = NULL;
2633 	}
2634 
2635 	/* There is only one lif ever registered in the
2636 	 * current model, so don't bother searching the
2637 	 * ionic->lif for candidates to unregister
2638 	 */
2639 	if (ionic->master_lif &&
2640 	    ionic->master_lif->netdev->reg_state == NETREG_REGISTERED)
2641 		unregister_netdev(ionic->master_lif->netdev);
2642 }
2643 
2644 static void ionic_lif_queue_identify(struct ionic_lif *lif)
2645 {
2646 	struct ionic *ionic = lif->ionic;
2647 	union ionic_q_identity *q_ident;
2648 	struct ionic_dev *idev;
2649 	int qtype;
2650 	int err;
2651 
2652 	idev = &lif->ionic->idev;
2653 	q_ident = (union ionic_q_identity *)&idev->dev_cmd_regs->data;
2654 
2655 	for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) {
2656 		struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
2657 
2658 		/* filter out the ones we know about */
2659 		switch (qtype) {
2660 		case IONIC_QTYPE_ADMINQ:
2661 		case IONIC_QTYPE_NOTIFYQ:
2662 		case IONIC_QTYPE_RXQ:
2663 		case IONIC_QTYPE_TXQ:
2664 			break;
2665 		default:
2666 			continue;
2667 		}
2668 
2669 		memset(qti, 0, sizeof(*qti));
2670 
2671 		mutex_lock(&ionic->dev_cmd_lock);
2672 		ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype,
2673 					     ionic_qtype_versions[qtype]);
2674 		err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
2675 		if (!err) {
2676 			qti->version   = q_ident->version;
2677 			qti->supported = q_ident->supported;
2678 			qti->features  = le64_to_cpu(q_ident->features);
2679 			qti->desc_sz   = le16_to_cpu(q_ident->desc_sz);
2680 			qti->comp_sz   = le16_to_cpu(q_ident->comp_sz);
2681 			qti->sg_desc_sz   = le16_to_cpu(q_ident->sg_desc_sz);
2682 			qti->max_sg_elems = le16_to_cpu(q_ident->max_sg_elems);
2683 			qti->sg_desc_stride = le16_to_cpu(q_ident->sg_desc_stride);
2684 		}
2685 		mutex_unlock(&ionic->dev_cmd_lock);
2686 
2687 		if (err == -EINVAL) {
2688 			dev_err(ionic->dev, "qtype %d not supported\n", qtype);
2689 			continue;
2690 		} else if (err == -EIO) {
2691 			dev_err(ionic->dev, "q_ident failed, not supported on older FW\n");
2692 			return;
2693 		} else if (err) {
2694 			dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n",
2695 				qtype, err);
2696 			return;
2697 		}
2698 
2699 		dev_dbg(ionic->dev, " qtype[%d].version = %d\n",
2700 			qtype, qti->version);
2701 		dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n",
2702 			qtype, qti->supported);
2703 		dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n",
2704 			qtype, qti->features);
2705 		dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n",
2706 			qtype, qti->desc_sz);
2707 		dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n",
2708 			qtype, qti->comp_sz);
2709 		dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n",
2710 			qtype, qti->sg_desc_sz);
2711 		dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n",
2712 			qtype, qti->max_sg_elems);
2713 		dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n",
2714 			qtype, qti->sg_desc_stride);
2715 	}
2716 }
2717 
2718 int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
2719 		       union ionic_lif_identity *lid)
2720 {
2721 	struct ionic_dev *idev = &ionic->idev;
2722 	size_t sz;
2723 	int err;
2724 
2725 	sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data));
2726 
2727 	mutex_lock(&ionic->dev_cmd_lock);
2728 	ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1);
2729 	err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
2730 	memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz);
2731 	mutex_unlock(&ionic->dev_cmd_lock);
2732 	if (err)
2733 		return (err);
2734 
2735 	dev_dbg(ionic->dev, "capabilities 0x%llx\n",
2736 		le64_to_cpu(lid->capabilities));
2737 
2738 	dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n",
2739 		le32_to_cpu(lid->eth.max_ucast_filters));
2740 	dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n",
2741 		le32_to_cpu(lid->eth.max_mcast_filters));
2742 	dev_dbg(ionic->dev, "eth.features 0x%llx\n",
2743 		le64_to_cpu(lid->eth.config.features));
2744 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n",
2745 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ]));
2746 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n",
2747 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ]));
2748 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n",
2749 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ]));
2750 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n",
2751 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ]));
2752 	dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name);
2753 	dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac);
2754 	dev_dbg(ionic->dev, "eth.config.mtu %d\n",
2755 		le32_to_cpu(lid->eth.config.mtu));
2756 
2757 	return 0;
2758 }
2759 
2760 int ionic_lifs_size(struct ionic *ionic)
2761 {
2762 	struct ionic_identity *ident = &ionic->ident;
2763 	unsigned int nintrs, dev_nintrs;
2764 	union ionic_lif_config *lc;
2765 	unsigned int ntxqs_per_lif;
2766 	unsigned int nrxqs_per_lif;
2767 	unsigned int neqs_per_lif;
2768 	unsigned int nnqs_per_lif;
2769 	unsigned int nxqs, neqs;
2770 	unsigned int min_intrs;
2771 	int err;
2772 
2773 	lc = &ident->lif.eth.config;
2774 	dev_nintrs = le32_to_cpu(ident->dev.nintrs);
2775 	neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count);
2776 	nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]);
2777 	ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]);
2778 	nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]);
2779 
2780 	nxqs = min(ntxqs_per_lif, nrxqs_per_lif);
2781 	nxqs = min(nxqs, num_online_cpus());
2782 	neqs = min(neqs_per_lif, num_online_cpus());
2783 
2784 try_again:
2785 	/* interrupt usage:
2786 	 *    1 for master lif adminq/notifyq
2787 	 *    1 for each CPU for master lif TxRx queue pairs
2788 	 *    whatever's left is for RDMA queues
2789 	 */
2790 	nintrs = 1 + nxqs + neqs;
2791 	min_intrs = 2;  /* adminq + 1 TxRx queue pair */
2792 
2793 	if (nintrs > dev_nintrs)
2794 		goto try_fewer;
2795 
2796 	err = ionic_bus_alloc_irq_vectors(ionic, nintrs);
2797 	if (err < 0 && err != -ENOSPC) {
2798 		dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err);
2799 		return err;
2800 	}
2801 	if (err == -ENOSPC)
2802 		goto try_fewer;
2803 
2804 	if (err != nintrs) {
2805 		ionic_bus_free_irq_vectors(ionic);
2806 		goto try_fewer;
2807 	}
2808 
2809 	ionic->nnqs_per_lif = nnqs_per_lif;
2810 	ionic->neqs_per_lif = neqs;
2811 	ionic->ntxqs_per_lif = nxqs;
2812 	ionic->nrxqs_per_lif = nxqs;
2813 	ionic->nintrs = nintrs;
2814 
2815 	ionic_debugfs_add_sizes(ionic);
2816 
2817 	return 0;
2818 
2819 try_fewer:
2820 	if (nnqs_per_lif > 1) {
2821 		nnqs_per_lif >>= 1;
2822 		goto try_again;
2823 	}
2824 	if (neqs > 1) {
2825 		neqs >>= 1;
2826 		goto try_again;
2827 	}
2828 	if (nxqs > 1) {
2829 		nxqs >>= 1;
2830 		goto try_again;
2831 	}
2832 	dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs);
2833 	return -ENOSPC;
2834 }
2835