xref: /linux/drivers/net/ethernet/pensando/ionic/ionic_lif.c (revision 920c293af8d01942caa10300ad97eabf778e8598)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3 
4 #include <linux/ethtool.h>
5 #include <linux/printk.h>
6 #include <linux/dynamic_debug.h>
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/if_vlan.h>
10 #include <linux/rtnetlink.h>
11 #include <linux/interrupt.h>
12 #include <linux/pci.h>
13 #include <linux/cpumask.h>
14 #include <linux/crash_dump.h>
15 
16 #include "ionic.h"
17 #include "ionic_bus.h"
18 #include "ionic_lif.h"
19 #include "ionic_txrx.h"
20 #include "ionic_ethtool.h"
21 #include "ionic_debugfs.h"
22 
23 /* queuetype support level */
24 static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = {
25 	[IONIC_QTYPE_ADMINQ]  = 0,   /* 0 = Base version with CQ support */
26 	[IONIC_QTYPE_NOTIFYQ] = 0,   /* 0 = Base version */
27 	[IONIC_QTYPE_RXQ]     = 0,   /* 0 = Base version with CQ+SG support */
28 	[IONIC_QTYPE_TXQ]     = 1,   /* 0 = Base version with CQ+SG support
29 				      * 1 =   ... with Tx SG version 1
30 				      */
31 };
32 
33 static void ionic_lif_rx_mode(struct ionic_lif *lif);
34 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
35 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
36 static void ionic_link_status_check(struct ionic_lif *lif);
37 static void ionic_lif_handle_fw_down(struct ionic_lif *lif);
38 static void ionic_lif_handle_fw_up(struct ionic_lif *lif);
39 static void ionic_lif_set_netdev_info(struct ionic_lif *lif);
40 
41 static void ionic_txrx_deinit(struct ionic_lif *lif);
42 static int ionic_txrx_init(struct ionic_lif *lif);
43 static int ionic_start_queues(struct ionic_lif *lif);
44 static void ionic_stop_queues(struct ionic_lif *lif);
45 static void ionic_lif_queue_identify(struct ionic_lif *lif);
46 
47 static void ionic_dim_work(struct work_struct *work)
48 {
49 	struct dim *dim = container_of(work, struct dim, work);
50 	struct dim_cq_moder cur_moder;
51 	struct ionic_qcq *qcq;
52 	u32 new_coal;
53 
54 	cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
55 	qcq = container_of(dim, struct ionic_qcq, dim);
56 	new_coal = ionic_coal_usec_to_hw(qcq->q.lif->ionic, cur_moder.usec);
57 	new_coal = new_coal ? new_coal : 1;
58 
59 	if (qcq->intr.dim_coal_hw != new_coal) {
60 		unsigned int qi = qcq->cq.bound_q->index;
61 		struct ionic_lif *lif = qcq->q.lif;
62 
63 		qcq->intr.dim_coal_hw = new_coal;
64 
65 		ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
66 				     lif->rxqcqs[qi]->intr.index,
67 				     qcq->intr.dim_coal_hw);
68 	}
69 
70 	dim->state = DIM_START_MEASURE;
71 }
72 
73 static void ionic_lif_deferred_work(struct work_struct *work)
74 {
75 	struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work);
76 	struct ionic_deferred *def = &lif->deferred;
77 	struct ionic_deferred_work *w = NULL;
78 
79 	do {
80 		spin_lock_bh(&def->lock);
81 		if (!list_empty(&def->list)) {
82 			w = list_first_entry(&def->list,
83 					     struct ionic_deferred_work, list);
84 			list_del(&w->list);
85 		}
86 		spin_unlock_bh(&def->lock);
87 
88 		if (!w)
89 			break;
90 
91 		switch (w->type) {
92 		case IONIC_DW_TYPE_RX_MODE:
93 			ionic_lif_rx_mode(lif);
94 			break;
95 		case IONIC_DW_TYPE_RX_ADDR_ADD:
96 			ionic_lif_addr_add(lif, w->addr);
97 			break;
98 		case IONIC_DW_TYPE_RX_ADDR_DEL:
99 			ionic_lif_addr_del(lif, w->addr);
100 			break;
101 		case IONIC_DW_TYPE_LINK_STATUS:
102 			ionic_link_status_check(lif);
103 			break;
104 		case IONIC_DW_TYPE_LIF_RESET:
105 			if (w->fw_status)
106 				ionic_lif_handle_fw_up(lif);
107 			else
108 				ionic_lif_handle_fw_down(lif);
109 			break;
110 		default:
111 			break;
112 		}
113 		kfree(w);
114 		w = NULL;
115 	} while (true);
116 }
117 
118 void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
119 				struct ionic_deferred_work *work)
120 {
121 	spin_lock_bh(&def->lock);
122 	list_add_tail(&work->list, &def->list);
123 	spin_unlock_bh(&def->lock);
124 	schedule_work(&def->work);
125 }
126 
127 static void ionic_link_status_check(struct ionic_lif *lif)
128 {
129 	struct net_device *netdev = lif->netdev;
130 	u16 link_status;
131 	bool link_up;
132 
133 	if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
134 		return;
135 
136 	/* Don't put carrier back up if we're in a broken state */
137 	if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) {
138 		clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
139 		return;
140 	}
141 
142 	link_status = le16_to_cpu(lif->info->status.link_status);
143 	link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
144 
145 	if (link_up) {
146 		int err = 0;
147 
148 		if (netdev->flags & IFF_UP && netif_running(netdev)) {
149 			mutex_lock(&lif->queue_lock);
150 			err = ionic_start_queues(lif);
151 			if (err && err != -EBUSY) {
152 				netdev_err(lif->netdev,
153 					   "Failed to start queues: %d\n", err);
154 				set_bit(IONIC_LIF_F_BROKEN, lif->state);
155 				netif_carrier_off(lif->netdev);
156 			}
157 			mutex_unlock(&lif->queue_lock);
158 		}
159 
160 		if (!err && !netif_carrier_ok(netdev)) {
161 			ionic_port_identify(lif->ionic);
162 			netdev_info(netdev, "Link up - %d Gbps\n",
163 				    le32_to_cpu(lif->info->status.link_speed) / 1000);
164 			netif_carrier_on(netdev);
165 		}
166 	} else {
167 		if (netif_carrier_ok(netdev)) {
168 			netdev_info(netdev, "Link down\n");
169 			netif_carrier_off(netdev);
170 		}
171 
172 		if (netdev->flags & IFF_UP && netif_running(netdev)) {
173 			mutex_lock(&lif->queue_lock);
174 			ionic_stop_queues(lif);
175 			mutex_unlock(&lif->queue_lock);
176 		}
177 	}
178 
179 	clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
180 }
181 
182 void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep)
183 {
184 	struct ionic_deferred_work *work;
185 
186 	/* we only need one request outstanding at a time */
187 	if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
188 		return;
189 
190 	if (!can_sleep) {
191 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
192 		if (!work) {
193 			clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
194 			return;
195 		}
196 
197 		work->type = IONIC_DW_TYPE_LINK_STATUS;
198 		ionic_lif_deferred_enqueue(&lif->deferred, work);
199 	} else {
200 		ionic_link_status_check(lif);
201 	}
202 }
203 
204 static irqreturn_t ionic_isr(int irq, void *data)
205 {
206 	struct napi_struct *napi = data;
207 
208 	napi_schedule_irqoff(napi);
209 
210 	return IRQ_HANDLED;
211 }
212 
213 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
214 {
215 	struct ionic_intr_info *intr = &qcq->intr;
216 	struct device *dev = lif->ionic->dev;
217 	struct ionic_queue *q = &qcq->q;
218 	const char *name;
219 
220 	if (lif->registered)
221 		name = lif->netdev->name;
222 	else
223 		name = dev_name(dev);
224 
225 	snprintf(intr->name, sizeof(intr->name),
226 		 "%s-%s-%s", IONIC_DRV_NAME, name, q->name);
227 
228 	return devm_request_irq(dev, intr->vector, ionic_isr,
229 				0, intr->name, &qcq->napi);
230 }
231 
232 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
233 {
234 	struct ionic *ionic = lif->ionic;
235 	int index;
236 
237 	index = find_first_zero_bit(ionic->intrs, ionic->nintrs);
238 	if (index == ionic->nintrs) {
239 		netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n",
240 			    __func__, index, ionic->nintrs);
241 		return -ENOSPC;
242 	}
243 
244 	set_bit(index, ionic->intrs);
245 	ionic_intr_init(&ionic->idev, intr, index);
246 
247 	return 0;
248 }
249 
250 static void ionic_intr_free(struct ionic *ionic, int index)
251 {
252 	if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs)
253 		clear_bit(index, ionic->intrs);
254 }
255 
256 static int ionic_qcq_enable(struct ionic_qcq *qcq)
257 {
258 	struct ionic_queue *q = &qcq->q;
259 	struct ionic_lif *lif = q->lif;
260 	struct ionic_dev *idev;
261 	struct device *dev;
262 
263 	struct ionic_admin_ctx ctx = {
264 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
265 		.cmd.q_control = {
266 			.opcode = IONIC_CMD_Q_CONTROL,
267 			.lif_index = cpu_to_le16(lif->index),
268 			.type = q->type,
269 			.index = cpu_to_le32(q->index),
270 			.oper = IONIC_Q_ENABLE,
271 		},
272 	};
273 
274 	idev = &lif->ionic->idev;
275 	dev = lif->ionic->dev;
276 
277 	dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
278 		ctx.cmd.q_control.index, ctx.cmd.q_control.type);
279 
280 	if (qcq->flags & IONIC_QCQ_F_INTR) {
281 		irq_set_affinity_hint(qcq->intr.vector,
282 				      &qcq->intr.affinity_mask);
283 		napi_enable(&qcq->napi);
284 		ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
285 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
286 				IONIC_INTR_MASK_CLEAR);
287 	}
288 
289 	return ionic_adminq_post_wait(lif, &ctx);
290 }
291 
292 static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw)
293 {
294 	struct ionic_queue *q;
295 	struct ionic_lif *lif;
296 	int err = 0;
297 
298 	struct ionic_admin_ctx ctx = {
299 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
300 		.cmd.q_control = {
301 			.opcode = IONIC_CMD_Q_CONTROL,
302 			.oper = IONIC_Q_DISABLE,
303 		},
304 	};
305 
306 	if (!qcq)
307 		return -ENXIO;
308 
309 	q = &qcq->q;
310 	lif = q->lif;
311 
312 	if (qcq->flags & IONIC_QCQ_F_INTR) {
313 		struct ionic_dev *idev = &lif->ionic->idev;
314 
315 		cancel_work_sync(&qcq->dim.work);
316 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
317 				IONIC_INTR_MASK_SET);
318 		synchronize_irq(qcq->intr.vector);
319 		irq_set_affinity_hint(qcq->intr.vector, NULL);
320 		napi_disable(&qcq->napi);
321 	}
322 
323 	if (send_to_hw) {
324 		ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index);
325 		ctx.cmd.q_control.type = q->type;
326 		ctx.cmd.q_control.index = cpu_to_le32(q->index);
327 		dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n",
328 			ctx.cmd.q_control.index, ctx.cmd.q_control.type);
329 
330 		err = ionic_adminq_post_wait(lif, &ctx);
331 	}
332 
333 	return err;
334 }
335 
336 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
337 {
338 	struct ionic_dev *idev = &lif->ionic->idev;
339 
340 	if (!qcq)
341 		return;
342 
343 	if (!(qcq->flags & IONIC_QCQ_F_INITED))
344 		return;
345 
346 	if (qcq->flags & IONIC_QCQ_F_INTR) {
347 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
348 				IONIC_INTR_MASK_SET);
349 		netif_napi_del(&qcq->napi);
350 	}
351 
352 	qcq->flags &= ~IONIC_QCQ_F_INITED;
353 }
354 
355 static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
356 {
357 	if (!(qcq->flags & IONIC_QCQ_F_INTR) || qcq->intr.vector == 0)
358 		return;
359 
360 	irq_set_affinity_hint(qcq->intr.vector, NULL);
361 	devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi);
362 	qcq->intr.vector = 0;
363 	ionic_intr_free(lif->ionic, qcq->intr.index);
364 	qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
365 }
366 
367 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
368 {
369 	struct device *dev = lif->ionic->dev;
370 
371 	if (!qcq)
372 		return;
373 
374 	ionic_debugfs_del_qcq(qcq);
375 
376 	if (qcq->q_base) {
377 		dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa);
378 		qcq->q_base = NULL;
379 		qcq->q_base_pa = 0;
380 	}
381 
382 	if (qcq->cq_base) {
383 		dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa);
384 		qcq->cq_base = NULL;
385 		qcq->cq_base_pa = 0;
386 	}
387 
388 	if (qcq->sg_base) {
389 		dma_free_coherent(dev, qcq->sg_size, qcq->sg_base, qcq->sg_base_pa);
390 		qcq->sg_base = NULL;
391 		qcq->sg_base_pa = 0;
392 	}
393 
394 	ionic_qcq_intr_free(lif, qcq);
395 
396 	if (qcq->cq.info) {
397 		devm_kfree(dev, qcq->cq.info);
398 		qcq->cq.info = NULL;
399 	}
400 	if (qcq->q.info) {
401 		devm_kfree(dev, qcq->q.info);
402 		qcq->q.info = NULL;
403 	}
404 }
405 
406 static void ionic_qcqs_free(struct ionic_lif *lif)
407 {
408 	struct device *dev = lif->ionic->dev;
409 	struct ionic_qcq *adminqcq;
410 	unsigned long irqflags;
411 
412 	if (lif->notifyqcq) {
413 		ionic_qcq_free(lif, lif->notifyqcq);
414 		devm_kfree(dev, lif->notifyqcq);
415 		lif->notifyqcq = NULL;
416 	}
417 
418 	if (lif->adminqcq) {
419 		spin_lock_irqsave(&lif->adminq_lock, irqflags);
420 		adminqcq = READ_ONCE(lif->adminqcq);
421 		lif->adminqcq = NULL;
422 		spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
423 		if (adminqcq) {
424 			ionic_qcq_free(lif, adminqcq);
425 			devm_kfree(dev, adminqcq);
426 		}
427 	}
428 
429 	if (lif->rxqcqs) {
430 		devm_kfree(dev, lif->rxqstats);
431 		lif->rxqstats = NULL;
432 		devm_kfree(dev, lif->rxqcqs);
433 		lif->rxqcqs = NULL;
434 	}
435 
436 	if (lif->txqcqs) {
437 		devm_kfree(dev, lif->txqstats);
438 		lif->txqstats = NULL;
439 		devm_kfree(dev, lif->txqcqs);
440 		lif->txqcqs = NULL;
441 	}
442 }
443 
444 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
445 				      struct ionic_qcq *n_qcq)
446 {
447 	if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) {
448 		ionic_intr_free(n_qcq->cq.lif->ionic, n_qcq->intr.index);
449 		n_qcq->flags &= ~IONIC_QCQ_F_INTR;
450 	}
451 
452 	n_qcq->intr.vector = src_qcq->intr.vector;
453 	n_qcq->intr.index = src_qcq->intr.index;
454 }
455 
456 static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq)
457 {
458 	int err;
459 
460 	if (!(qcq->flags & IONIC_QCQ_F_INTR)) {
461 		qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
462 		return 0;
463 	}
464 
465 	err = ionic_intr_alloc(lif, &qcq->intr);
466 	if (err) {
467 		netdev_warn(lif->netdev, "no intr for %s: %d\n",
468 			    qcq->q.name, err);
469 		goto err_out;
470 	}
471 
472 	err = ionic_bus_get_irq(lif->ionic, qcq->intr.index);
473 	if (err < 0) {
474 		netdev_warn(lif->netdev, "no vector for %s: %d\n",
475 			    qcq->q.name, err);
476 		goto err_out_free_intr;
477 	}
478 	qcq->intr.vector = err;
479 	ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index,
480 			       IONIC_INTR_MASK_SET);
481 
482 	err = ionic_request_irq(lif, qcq);
483 	if (err) {
484 		netdev_warn(lif->netdev, "irq request failed %d\n", err);
485 		goto err_out_free_intr;
486 	}
487 
488 	/* try to get the irq on the local numa node first */
489 	qcq->intr.cpu = cpumask_local_spread(qcq->intr.index,
490 					     dev_to_node(lif->ionic->dev));
491 	if (qcq->intr.cpu != -1)
492 		cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask);
493 
494 	netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index);
495 	return 0;
496 
497 err_out_free_intr:
498 	ionic_intr_free(lif->ionic, qcq->intr.index);
499 err_out:
500 	return err;
501 }
502 
503 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
504 			   unsigned int index,
505 			   const char *name, unsigned int flags,
506 			   unsigned int num_descs, unsigned int desc_size,
507 			   unsigned int cq_desc_size,
508 			   unsigned int sg_desc_size,
509 			   unsigned int pid, struct ionic_qcq **qcq)
510 {
511 	struct ionic_dev *idev = &lif->ionic->idev;
512 	struct device *dev = lif->ionic->dev;
513 	void *q_base, *cq_base, *sg_base;
514 	dma_addr_t cq_base_pa = 0;
515 	dma_addr_t sg_base_pa = 0;
516 	dma_addr_t q_base_pa = 0;
517 	struct ionic_qcq *new;
518 	int err;
519 
520 	*qcq = NULL;
521 
522 	new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL);
523 	if (!new) {
524 		netdev_err(lif->netdev, "Cannot allocate queue structure\n");
525 		err = -ENOMEM;
526 		goto err_out;
527 	}
528 
529 	new->q.dev = dev;
530 	new->flags = flags;
531 
532 	new->q.info = devm_kcalloc(dev, num_descs, sizeof(*new->q.info),
533 				   GFP_KERNEL);
534 	if (!new->q.info) {
535 		netdev_err(lif->netdev, "Cannot allocate queue info\n");
536 		err = -ENOMEM;
537 		goto err_out_free_qcq;
538 	}
539 
540 	new->q.type = type;
541 	new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems;
542 
543 	err = ionic_q_init(lif, idev, &new->q, index, name, num_descs,
544 			   desc_size, sg_desc_size, pid);
545 	if (err) {
546 		netdev_err(lif->netdev, "Cannot initialize queue\n");
547 		goto err_out_free_q_info;
548 	}
549 
550 	err = ionic_alloc_qcq_interrupt(lif, new);
551 	if (err)
552 		goto err_out;
553 
554 	new->cq.info = devm_kcalloc(dev, num_descs, sizeof(*new->cq.info),
555 				    GFP_KERNEL);
556 	if (!new->cq.info) {
557 		netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
558 		err = -ENOMEM;
559 		goto err_out_free_irq;
560 	}
561 
562 	err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
563 	if (err) {
564 		netdev_err(lif->netdev, "Cannot initialize completion queue\n");
565 		goto err_out_free_cq_info;
566 	}
567 
568 	if (flags & IONIC_QCQ_F_NOTIFYQ) {
569 		int q_size, cq_size;
570 
571 		/* q & cq need to be contiguous in case of notifyq */
572 		q_size = ALIGN(num_descs * desc_size, PAGE_SIZE);
573 		cq_size = ALIGN(num_descs * cq_desc_size, PAGE_SIZE);
574 
575 		new->q_size = PAGE_SIZE + q_size + cq_size;
576 		new->q_base = dma_alloc_coherent(dev, new->q_size,
577 						 &new->q_base_pa, GFP_KERNEL);
578 		if (!new->q_base) {
579 			netdev_err(lif->netdev, "Cannot allocate qcq DMA memory\n");
580 			err = -ENOMEM;
581 			goto err_out_free_cq_info;
582 		}
583 		q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
584 		q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
585 		ionic_q_map(&new->q, q_base, q_base_pa);
586 
587 		cq_base = PTR_ALIGN(q_base + q_size, PAGE_SIZE);
588 		cq_base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE);
589 		ionic_cq_map(&new->cq, cq_base, cq_base_pa);
590 		ionic_cq_bind(&new->cq, &new->q);
591 	} else {
592 		new->q_size = PAGE_SIZE + (num_descs * desc_size);
593 		new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa,
594 						 GFP_KERNEL);
595 		if (!new->q_base) {
596 			netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
597 			err = -ENOMEM;
598 			goto err_out_free_cq_info;
599 		}
600 		q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
601 		q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
602 		ionic_q_map(&new->q, q_base, q_base_pa);
603 
604 		new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size);
605 		new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa,
606 						  GFP_KERNEL);
607 		if (!new->cq_base) {
608 			netdev_err(lif->netdev, "Cannot allocate cq DMA memory\n");
609 			err = -ENOMEM;
610 			goto err_out_free_q;
611 		}
612 		cq_base = PTR_ALIGN(new->cq_base, PAGE_SIZE);
613 		cq_base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE);
614 		ionic_cq_map(&new->cq, cq_base, cq_base_pa);
615 		ionic_cq_bind(&new->cq, &new->q);
616 	}
617 
618 	if (flags & IONIC_QCQ_F_SG) {
619 		new->sg_size = PAGE_SIZE + (num_descs * sg_desc_size);
620 		new->sg_base = dma_alloc_coherent(dev, new->sg_size, &new->sg_base_pa,
621 						  GFP_KERNEL);
622 		if (!new->sg_base) {
623 			netdev_err(lif->netdev, "Cannot allocate sg DMA memory\n");
624 			err = -ENOMEM;
625 			goto err_out_free_cq;
626 		}
627 		sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE);
628 		sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE);
629 		ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
630 	}
631 
632 	INIT_WORK(&new->dim.work, ionic_dim_work);
633 	new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
634 
635 	*qcq = new;
636 
637 	return 0;
638 
639 err_out_free_cq:
640 	dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa);
641 err_out_free_q:
642 	dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa);
643 err_out_free_cq_info:
644 	devm_kfree(dev, new->cq.info);
645 err_out_free_irq:
646 	if (flags & IONIC_QCQ_F_INTR) {
647 		devm_free_irq(dev, new->intr.vector, &new->napi);
648 		ionic_intr_free(lif->ionic, new->intr.index);
649 	}
650 err_out_free_q_info:
651 	devm_kfree(dev, new->q.info);
652 err_out_free_qcq:
653 	devm_kfree(dev, new);
654 err_out:
655 	dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
656 	return err;
657 }
658 
659 static int ionic_qcqs_alloc(struct ionic_lif *lif)
660 {
661 	struct device *dev = lif->ionic->dev;
662 	unsigned int flags;
663 	int err;
664 
665 	flags = IONIC_QCQ_F_INTR;
666 	err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags,
667 			      IONIC_ADMINQ_LENGTH,
668 			      sizeof(struct ionic_admin_cmd),
669 			      sizeof(struct ionic_admin_comp),
670 			      0, lif->kern_pid, &lif->adminqcq);
671 	if (err)
672 		return err;
673 	ionic_debugfs_add_qcq(lif, lif->adminqcq);
674 
675 	if (lif->ionic->nnqs_per_lif) {
676 		flags = IONIC_QCQ_F_NOTIFYQ;
677 		err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq",
678 				      flags, IONIC_NOTIFYQ_LENGTH,
679 				      sizeof(struct ionic_notifyq_cmd),
680 				      sizeof(union ionic_notifyq_comp),
681 				      0, lif->kern_pid, &lif->notifyqcq);
682 		if (err)
683 			goto err_out;
684 		ionic_debugfs_add_qcq(lif, lif->notifyqcq);
685 
686 		/* Let the notifyq ride on the adminq interrupt */
687 		ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq);
688 	}
689 
690 	err = -ENOMEM;
691 	lif->txqcqs = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif,
692 				   sizeof(*lif->txqcqs), GFP_KERNEL);
693 	if (!lif->txqcqs)
694 		goto err_out;
695 	lif->rxqcqs = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif,
696 				   sizeof(*lif->rxqcqs), GFP_KERNEL);
697 	if (!lif->rxqcqs)
698 		goto err_out;
699 
700 	lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif + 1,
701 				     sizeof(*lif->txqstats), GFP_KERNEL);
702 	if (!lif->txqstats)
703 		goto err_out;
704 	lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif + 1,
705 				     sizeof(*lif->rxqstats), GFP_KERNEL);
706 	if (!lif->rxqstats)
707 		goto err_out;
708 
709 	return 0;
710 
711 err_out:
712 	ionic_qcqs_free(lif);
713 	return err;
714 }
715 
716 static void ionic_qcq_sanitize(struct ionic_qcq *qcq)
717 {
718 	qcq->q.tail_idx = 0;
719 	qcq->q.head_idx = 0;
720 	qcq->cq.tail_idx = 0;
721 	qcq->cq.done_color = 1;
722 	memset(qcq->q_base, 0, qcq->q_size);
723 	memset(qcq->cq_base, 0, qcq->cq_size);
724 	memset(qcq->sg_base, 0, qcq->sg_size);
725 }
726 
727 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
728 {
729 	struct device *dev = lif->ionic->dev;
730 	struct ionic_queue *q = &qcq->q;
731 	struct ionic_cq *cq = &qcq->cq;
732 	struct ionic_admin_ctx ctx = {
733 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
734 		.cmd.q_init = {
735 			.opcode = IONIC_CMD_Q_INIT,
736 			.lif_index = cpu_to_le16(lif->index),
737 			.type = q->type,
738 			.ver = lif->qtype_info[q->type].version,
739 			.index = cpu_to_le32(q->index),
740 			.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
741 					     IONIC_QINIT_F_SG),
742 			.pid = cpu_to_le16(q->pid),
743 			.ring_size = ilog2(q->num_descs),
744 			.ring_base = cpu_to_le64(q->base_pa),
745 			.cq_ring_base = cpu_to_le64(cq->base_pa),
746 			.sg_ring_base = cpu_to_le64(q->sg_base_pa),
747 			.features = cpu_to_le64(q->features),
748 		},
749 	};
750 	unsigned int intr_index;
751 	int err;
752 
753 	intr_index = qcq->intr.index;
754 
755 	ctx.cmd.q_init.intr_index = cpu_to_le16(intr_index);
756 
757 	dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
758 	dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
759 	dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
760 	dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
761 	dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
762 	dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver);
763 	dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
764 
765 	ionic_qcq_sanitize(qcq);
766 
767 	err = ionic_adminq_post_wait(lif, &ctx);
768 	if (err)
769 		return err;
770 
771 	q->hw_type = ctx.comp.q_init.hw_type;
772 	q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
773 	q->dbval = IONIC_DBELL_QID(q->hw_index);
774 
775 	dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
776 	dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
777 
778 	if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
779 		netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi,
780 			       NAPI_POLL_WEIGHT);
781 
782 	qcq->flags |= IONIC_QCQ_F_INITED;
783 
784 	return 0;
785 }
786 
787 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
788 {
789 	struct device *dev = lif->ionic->dev;
790 	struct ionic_queue *q = &qcq->q;
791 	struct ionic_cq *cq = &qcq->cq;
792 	struct ionic_admin_ctx ctx = {
793 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
794 		.cmd.q_init = {
795 			.opcode = IONIC_CMD_Q_INIT,
796 			.lif_index = cpu_to_le16(lif->index),
797 			.type = q->type,
798 			.ver = lif->qtype_info[q->type].version,
799 			.index = cpu_to_le32(q->index),
800 			.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
801 					     IONIC_QINIT_F_SG),
802 			.intr_index = cpu_to_le16(cq->bound_intr->index),
803 			.pid = cpu_to_le16(q->pid),
804 			.ring_size = ilog2(q->num_descs),
805 			.ring_base = cpu_to_le64(q->base_pa),
806 			.cq_ring_base = cpu_to_le64(cq->base_pa),
807 			.sg_ring_base = cpu_to_le64(q->sg_base_pa),
808 			.features = cpu_to_le64(q->features),
809 		},
810 	};
811 	int err;
812 
813 	dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid);
814 	dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
815 	dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
816 	dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
817 	dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
818 	dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver);
819 	dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
820 
821 	ionic_qcq_sanitize(qcq);
822 
823 	err = ionic_adminq_post_wait(lif, &ctx);
824 	if (err)
825 		return err;
826 
827 	q->hw_type = ctx.comp.q_init.hw_type;
828 	q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
829 	q->dbval = IONIC_DBELL_QID(q->hw_index);
830 
831 	dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
832 	dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
833 
834 	if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
835 		netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi,
836 			       NAPI_POLL_WEIGHT);
837 	else
838 		netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi,
839 			       NAPI_POLL_WEIGHT);
840 
841 	qcq->flags |= IONIC_QCQ_F_INITED;
842 
843 	return 0;
844 }
845 
846 int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif)
847 {
848 	unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz;
849 	unsigned int txq_i, flags;
850 	struct ionic_qcq *txq;
851 	u64 features;
852 	int err;
853 
854 	mutex_lock(&lif->queue_lock);
855 
856 	if (lif->hwstamp_txq)
857 		goto out;
858 
859 	features = IONIC_Q_F_2X_CQ_DESC | IONIC_TXQ_F_HWSTAMP;
860 
861 	num_desc = IONIC_MIN_TXRX_DESC;
862 	desc_sz = sizeof(struct ionic_txq_desc);
863 	comp_sz = 2 * sizeof(struct ionic_txq_comp);
864 
865 	if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
866 	    lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == sizeof(struct ionic_txq_sg_desc_v1))
867 		sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
868 	else
869 		sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
870 
871 	txq_i = lif->ionic->ntxqs_per_lif;
872 	flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
873 
874 	err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags,
875 			      num_desc, desc_sz, comp_sz, sg_desc_sz,
876 			      lif->kern_pid, &txq);
877 	if (err)
878 		goto err_qcq_alloc;
879 
880 	txq->q.features = features;
881 
882 	ionic_link_qcq_interrupts(lif->adminqcq, txq);
883 	ionic_debugfs_add_qcq(lif, txq);
884 
885 	lif->hwstamp_txq = txq;
886 
887 	if (netif_running(lif->netdev)) {
888 		err = ionic_lif_txq_init(lif, txq);
889 		if (err)
890 			goto err_qcq_init;
891 
892 		if (test_bit(IONIC_LIF_F_UP, lif->state)) {
893 			err = ionic_qcq_enable(txq);
894 			if (err)
895 				goto err_qcq_enable;
896 		}
897 	}
898 
899 out:
900 	mutex_unlock(&lif->queue_lock);
901 
902 	return 0;
903 
904 err_qcq_enable:
905 	ionic_lif_qcq_deinit(lif, txq);
906 err_qcq_init:
907 	lif->hwstamp_txq = NULL;
908 	ionic_debugfs_del_qcq(txq);
909 	ionic_qcq_free(lif, txq);
910 	devm_kfree(lif->ionic->dev, txq);
911 err_qcq_alloc:
912 	mutex_unlock(&lif->queue_lock);
913 	return err;
914 }
915 
916 int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif)
917 {
918 	unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz;
919 	unsigned int rxq_i, flags;
920 	struct ionic_qcq *rxq;
921 	u64 features;
922 	int err;
923 
924 	mutex_lock(&lif->queue_lock);
925 
926 	if (lif->hwstamp_rxq)
927 		goto out;
928 
929 	features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP;
930 
931 	num_desc = IONIC_MIN_TXRX_DESC;
932 	desc_sz = sizeof(struct ionic_rxq_desc);
933 	comp_sz = 2 * sizeof(struct ionic_rxq_comp);
934 	sg_desc_sz = sizeof(struct ionic_rxq_sg_desc);
935 
936 	rxq_i = lif->ionic->nrxqs_per_lif;
937 	flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG;
938 
939 	err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags,
940 			      num_desc, desc_sz, comp_sz, sg_desc_sz,
941 			      lif->kern_pid, &rxq);
942 	if (err)
943 		goto err_qcq_alloc;
944 
945 	rxq->q.features = features;
946 
947 	ionic_link_qcq_interrupts(lif->adminqcq, rxq);
948 	ionic_debugfs_add_qcq(lif, rxq);
949 
950 	lif->hwstamp_rxq = rxq;
951 
952 	if (netif_running(lif->netdev)) {
953 		err = ionic_lif_rxq_init(lif, rxq);
954 		if (err)
955 			goto err_qcq_init;
956 
957 		if (test_bit(IONIC_LIF_F_UP, lif->state)) {
958 			ionic_rx_fill(&rxq->q);
959 			err = ionic_qcq_enable(rxq);
960 			if (err)
961 				goto err_qcq_enable;
962 		}
963 	}
964 
965 out:
966 	mutex_unlock(&lif->queue_lock);
967 
968 	return 0;
969 
970 err_qcq_enable:
971 	ionic_lif_qcq_deinit(lif, rxq);
972 err_qcq_init:
973 	lif->hwstamp_rxq = NULL;
974 	ionic_debugfs_del_qcq(rxq);
975 	ionic_qcq_free(lif, rxq);
976 	devm_kfree(lif->ionic->dev, rxq);
977 err_qcq_alloc:
978 	mutex_unlock(&lif->queue_lock);
979 	return err;
980 }
981 
982 int ionic_lif_config_hwstamp_rxq_all(struct ionic_lif *lif, bool rx_all)
983 {
984 	struct ionic_queue_params qparam;
985 
986 	ionic_init_queue_params(lif, &qparam);
987 
988 	if (rx_all)
989 		qparam.rxq_features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP;
990 	else
991 		qparam.rxq_features = 0;
992 
993 	/* if we're not running, just set the values and return */
994 	if (!netif_running(lif->netdev)) {
995 		lif->rxq_features = qparam.rxq_features;
996 		return 0;
997 	}
998 
999 	return ionic_reconfigure_queues(lif, &qparam);
1000 }
1001 
1002 int ionic_lif_set_hwstamp_txmode(struct ionic_lif *lif, u16 txstamp_mode)
1003 {
1004 	struct ionic_admin_ctx ctx = {
1005 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1006 		.cmd.lif_setattr = {
1007 			.opcode = IONIC_CMD_LIF_SETATTR,
1008 			.index = cpu_to_le16(lif->index),
1009 			.attr = IONIC_LIF_ATTR_TXSTAMP,
1010 			.txstamp_mode = cpu_to_le16(txstamp_mode),
1011 		},
1012 	};
1013 
1014 	return ionic_adminq_post_wait(lif, &ctx);
1015 }
1016 
1017 static void ionic_lif_del_hwstamp_rxfilt(struct ionic_lif *lif)
1018 {
1019 	struct ionic_admin_ctx ctx = {
1020 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1021 		.cmd.rx_filter_del = {
1022 			.opcode = IONIC_CMD_RX_FILTER_DEL,
1023 			.lif_index = cpu_to_le16(lif->index),
1024 		},
1025 	};
1026 	struct ionic_rx_filter *f;
1027 	u32 filter_id;
1028 	int err;
1029 
1030 	spin_lock_bh(&lif->rx_filters.lock);
1031 
1032 	f = ionic_rx_filter_rxsteer(lif);
1033 	if (!f) {
1034 		spin_unlock_bh(&lif->rx_filters.lock);
1035 		return;
1036 	}
1037 
1038 	filter_id = f->filter_id;
1039 	ionic_rx_filter_free(lif, f);
1040 
1041 	spin_unlock_bh(&lif->rx_filters.lock);
1042 
1043 	netdev_dbg(lif->netdev, "rx_filter del RXSTEER (id %d)\n", filter_id);
1044 
1045 	ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(filter_id);
1046 
1047 	err = ionic_adminq_post_wait(lif, &ctx);
1048 	if (err && err != -EEXIST)
1049 		netdev_dbg(lif->netdev, "failed to delete rx_filter RXSTEER (id %d)\n", filter_id);
1050 }
1051 
1052 static int ionic_lif_add_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class)
1053 {
1054 	struct ionic_admin_ctx ctx = {
1055 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1056 		.cmd.rx_filter_add = {
1057 			.opcode = IONIC_CMD_RX_FILTER_ADD,
1058 			.lif_index = cpu_to_le16(lif->index),
1059 			.match = cpu_to_le16(IONIC_RX_FILTER_STEER_PKTCLASS),
1060 			.pkt_class = cpu_to_le64(pkt_class),
1061 		},
1062 	};
1063 	u8 qtype;
1064 	u32 qid;
1065 	int err;
1066 
1067 	if (!lif->hwstamp_rxq)
1068 		return -EINVAL;
1069 
1070 	qtype = lif->hwstamp_rxq->q.type;
1071 	ctx.cmd.rx_filter_add.qtype = qtype;
1072 
1073 	qid = lif->hwstamp_rxq->q.index;
1074 	ctx.cmd.rx_filter_add.qid = cpu_to_le32(qid);
1075 
1076 	netdev_dbg(lif->netdev, "rx_filter add RXSTEER\n");
1077 	err = ionic_adminq_post_wait(lif, &ctx);
1078 	if (err && err != -EEXIST)
1079 		return err;
1080 
1081 	return ionic_rx_filter_save(lif, 0, qid, 0, &ctx);
1082 }
1083 
1084 int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class)
1085 {
1086 	ionic_lif_del_hwstamp_rxfilt(lif);
1087 
1088 	if (!pkt_class)
1089 		return 0;
1090 
1091 	return ionic_lif_add_hwstamp_rxfilt(lif, pkt_class);
1092 }
1093 
1094 static bool ionic_notifyq_service(struct ionic_cq *cq,
1095 				  struct ionic_cq_info *cq_info)
1096 {
1097 	union ionic_notifyq_comp *comp = cq_info->cq_desc;
1098 	struct ionic_deferred_work *work;
1099 	struct net_device *netdev;
1100 	struct ionic_queue *q;
1101 	struct ionic_lif *lif;
1102 	u64 eid;
1103 
1104 	q = cq->bound_q;
1105 	lif = q->info[0].cb_arg;
1106 	netdev = lif->netdev;
1107 	eid = le64_to_cpu(comp->event.eid);
1108 
1109 	/* Have we run out of new completions to process? */
1110 	if ((s64)(eid - lif->last_eid) <= 0)
1111 		return false;
1112 
1113 	lif->last_eid = eid;
1114 
1115 	dev_dbg(lif->ionic->dev, "notifyq event:\n");
1116 	dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
1117 			 comp, sizeof(*comp), true);
1118 
1119 	switch (le16_to_cpu(comp->event.ecode)) {
1120 	case IONIC_EVENT_LINK_CHANGE:
1121 		ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
1122 		break;
1123 	case IONIC_EVENT_RESET:
1124 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
1125 		if (!work) {
1126 			netdev_err(lif->netdev, "Reset event dropped\n");
1127 		} else {
1128 			work->type = IONIC_DW_TYPE_LIF_RESET;
1129 			ionic_lif_deferred_enqueue(&lif->deferred, work);
1130 		}
1131 		break;
1132 	default:
1133 		netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n",
1134 			    comp->event.ecode, eid);
1135 		break;
1136 	}
1137 
1138 	return true;
1139 }
1140 
1141 static bool ionic_adminq_service(struct ionic_cq *cq,
1142 				 struct ionic_cq_info *cq_info)
1143 {
1144 	struct ionic_admin_comp *comp = cq_info->cq_desc;
1145 
1146 	if (!color_match(comp->color, cq->done_color))
1147 		return false;
1148 
1149 	ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index));
1150 
1151 	return true;
1152 }
1153 
1154 static int ionic_adminq_napi(struct napi_struct *napi, int budget)
1155 {
1156 	struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr;
1157 	struct ionic_lif *lif = napi_to_cq(napi)->lif;
1158 	struct ionic_dev *idev = &lif->ionic->idev;
1159 	unsigned long irqflags;
1160 	unsigned int flags = 0;
1161 	int rx_work = 0;
1162 	int tx_work = 0;
1163 	int n_work = 0;
1164 	int a_work = 0;
1165 	int work_done;
1166 	int credits;
1167 
1168 	if (lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED)
1169 		n_work = ionic_cq_service(&lif->notifyqcq->cq, budget,
1170 					  ionic_notifyq_service, NULL, NULL);
1171 
1172 	spin_lock_irqsave(&lif->adminq_lock, irqflags);
1173 	if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED)
1174 		a_work = ionic_cq_service(&lif->adminqcq->cq, budget,
1175 					  ionic_adminq_service, NULL, NULL);
1176 	spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
1177 
1178 	if (lif->hwstamp_rxq)
1179 		rx_work = ionic_cq_service(&lif->hwstamp_rxq->cq, budget,
1180 					   ionic_rx_service, NULL, NULL);
1181 
1182 	if (lif->hwstamp_txq)
1183 		tx_work = ionic_cq_service(&lif->hwstamp_txq->cq, budget,
1184 					   ionic_tx_service, NULL, NULL);
1185 
1186 	work_done = max(max(n_work, a_work), max(rx_work, tx_work));
1187 	if (work_done < budget && napi_complete_done(napi, work_done)) {
1188 		flags |= IONIC_INTR_CRED_UNMASK;
1189 		intr->rearm_count++;
1190 	}
1191 
1192 	if (work_done || flags) {
1193 		flags |= IONIC_INTR_CRED_RESET_COALESCE;
1194 		credits = n_work + a_work + rx_work + tx_work;
1195 		ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags);
1196 	}
1197 
1198 	return work_done;
1199 }
1200 
1201 void ionic_get_stats64(struct net_device *netdev,
1202 		       struct rtnl_link_stats64 *ns)
1203 {
1204 	struct ionic_lif *lif = netdev_priv(netdev);
1205 	struct ionic_lif_stats *ls;
1206 
1207 	memset(ns, 0, sizeof(*ns));
1208 	ls = &lif->info->stats;
1209 
1210 	ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) +
1211 			 le64_to_cpu(ls->rx_mcast_packets) +
1212 			 le64_to_cpu(ls->rx_bcast_packets);
1213 
1214 	ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) +
1215 			 le64_to_cpu(ls->tx_mcast_packets) +
1216 			 le64_to_cpu(ls->tx_bcast_packets);
1217 
1218 	ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) +
1219 		       le64_to_cpu(ls->rx_mcast_bytes) +
1220 		       le64_to_cpu(ls->rx_bcast_bytes);
1221 
1222 	ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) +
1223 		       le64_to_cpu(ls->tx_mcast_bytes) +
1224 		       le64_to_cpu(ls->tx_bcast_bytes);
1225 
1226 	ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) +
1227 			 le64_to_cpu(ls->rx_mcast_drop_packets) +
1228 			 le64_to_cpu(ls->rx_bcast_drop_packets);
1229 
1230 	ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) +
1231 			 le64_to_cpu(ls->tx_mcast_drop_packets) +
1232 			 le64_to_cpu(ls->tx_bcast_drop_packets);
1233 
1234 	ns->multicast = le64_to_cpu(ls->rx_mcast_packets);
1235 
1236 	ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty);
1237 
1238 	ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) +
1239 			       le64_to_cpu(ls->rx_queue_disabled) +
1240 			       le64_to_cpu(ls->rx_desc_fetch_error) +
1241 			       le64_to_cpu(ls->rx_desc_data_error);
1242 
1243 	ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) +
1244 				le64_to_cpu(ls->tx_queue_disabled) +
1245 				le64_to_cpu(ls->tx_desc_fetch_error) +
1246 				le64_to_cpu(ls->tx_desc_data_error);
1247 
1248 	ns->rx_errors = ns->rx_over_errors +
1249 			ns->rx_missed_errors;
1250 
1251 	ns->tx_errors = ns->tx_aborted_errors;
1252 }
1253 
1254 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
1255 {
1256 	struct ionic_admin_ctx ctx = {
1257 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1258 		.cmd.rx_filter_add = {
1259 			.opcode = IONIC_CMD_RX_FILTER_ADD,
1260 			.lif_index = cpu_to_le16(lif->index),
1261 			.match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
1262 		},
1263 	};
1264 	struct ionic_rx_filter *f;
1265 	int err;
1266 
1267 	/* don't bother if we already have it */
1268 	spin_lock_bh(&lif->rx_filters.lock);
1269 	f = ionic_rx_filter_by_addr(lif, addr);
1270 	spin_unlock_bh(&lif->rx_filters.lock);
1271 	if (f)
1272 		return 0;
1273 
1274 	netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr);
1275 
1276 	memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
1277 	err = ionic_adminq_post_wait(lif, &ctx);
1278 	if (err && err != -EEXIST)
1279 		return err;
1280 
1281 	return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
1282 }
1283 
1284 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
1285 {
1286 	struct ionic_admin_ctx ctx = {
1287 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1288 		.cmd.rx_filter_del = {
1289 			.opcode = IONIC_CMD_RX_FILTER_DEL,
1290 			.lif_index = cpu_to_le16(lif->index),
1291 		},
1292 	};
1293 	struct ionic_rx_filter *f;
1294 	int err;
1295 
1296 	spin_lock_bh(&lif->rx_filters.lock);
1297 	f = ionic_rx_filter_by_addr(lif, addr);
1298 	if (!f) {
1299 		spin_unlock_bh(&lif->rx_filters.lock);
1300 		return -ENOENT;
1301 	}
1302 
1303 	netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n",
1304 		   addr, f->filter_id);
1305 
1306 	ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
1307 	ionic_rx_filter_free(lif, f);
1308 	spin_unlock_bh(&lif->rx_filters.lock);
1309 
1310 	err = ionic_adminq_post_wait(lif, &ctx);
1311 	if (err && err != -EEXIST)
1312 		return err;
1313 
1314 	return 0;
1315 }
1316 
1317 static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add)
1318 {
1319 	unsigned int nmfilters;
1320 	unsigned int nufilters;
1321 
1322 	if (add) {
1323 		/* Do we have space for this filter?  We test the counters
1324 		 * here before checking the need for deferral so that we
1325 		 * can return an overflow error to the stack.
1326 		 */
1327 		nmfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters);
1328 		nufilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
1329 
1330 		if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters))
1331 			lif->nmcast++;
1332 		else if (!is_multicast_ether_addr(addr) &&
1333 			 lif->nucast < nufilters)
1334 			lif->nucast++;
1335 		else
1336 			return -ENOSPC;
1337 	} else {
1338 		if (is_multicast_ether_addr(addr) && lif->nmcast)
1339 			lif->nmcast--;
1340 		else if (!is_multicast_ether_addr(addr) && lif->nucast)
1341 			lif->nucast--;
1342 	}
1343 
1344 	netdev_dbg(lif->netdev, "rx_filter %s %pM\n",
1345 		   add ? "add" : "del", addr);
1346 	if (add)
1347 		return ionic_lif_addr_add(lif, addr);
1348 	else
1349 		return ionic_lif_addr_del(lif, addr);
1350 
1351 	return 0;
1352 }
1353 
1354 static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
1355 {
1356 	return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR);
1357 }
1358 
1359 static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
1360 {
1361 	return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR);
1362 }
1363 
1364 static void ionic_lif_rx_mode(struct ionic_lif *lif)
1365 {
1366 	struct net_device *netdev = lif->netdev;
1367 	unsigned int nfilters;
1368 	unsigned int nd_flags;
1369 	char buf[128];
1370 	u16 rx_mode;
1371 	int i;
1372 #define REMAIN(__x) (sizeof(buf) - (__x))
1373 
1374 	mutex_lock(&lif->config_lock);
1375 
1376 	/* grab the flags once for local use */
1377 	nd_flags = netdev->flags;
1378 
1379 	rx_mode = IONIC_RX_MODE_F_UNICAST;
1380 	rx_mode |= (nd_flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
1381 	rx_mode |= (nd_flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
1382 	rx_mode |= (nd_flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
1383 	rx_mode |= (nd_flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
1384 
1385 	/* sync unicast addresses
1386 	 * next check to see if we're in an overflow state
1387 	 *    if so, we track that we overflowed and enable NIC PROMISC
1388 	 *    else if the overflow is set and not needed
1389 	 *       we remove our overflow flag and check the netdev flags
1390 	 *       to see if we can disable NIC PROMISC
1391 	 */
1392 	__dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
1393 	nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
1394 	if (netdev_uc_count(netdev) + 1 > nfilters) {
1395 		rx_mode |= IONIC_RX_MODE_F_PROMISC;
1396 		lif->uc_overflow = true;
1397 	} else if (lif->uc_overflow) {
1398 		lif->uc_overflow = false;
1399 		if (!(nd_flags & IFF_PROMISC))
1400 			rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
1401 	}
1402 
1403 	/* same for multicast */
1404 	__dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
1405 	nfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters);
1406 	if (netdev_mc_count(netdev) > nfilters) {
1407 		rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
1408 		lif->mc_overflow = true;
1409 	} else if (lif->mc_overflow) {
1410 		lif->mc_overflow = false;
1411 		if (!(nd_flags & IFF_ALLMULTI))
1412 			rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
1413 	}
1414 
1415 	i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
1416 		      lif->rx_mode, rx_mode);
1417 	if (rx_mode & IONIC_RX_MODE_F_UNICAST)
1418 		i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
1419 	if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
1420 		i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
1421 	if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
1422 		i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
1423 	if (rx_mode & IONIC_RX_MODE_F_PROMISC)
1424 		i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
1425 	if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
1426 		i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
1427 	if (rx_mode & IONIC_RX_MODE_F_RDMA_SNIFFER)
1428 		i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_RDMA_SNIFFER");
1429 	netdev_dbg(netdev, "lif%d %s\n", lif->index, buf);
1430 
1431 	if (lif->rx_mode != rx_mode) {
1432 		struct ionic_admin_ctx ctx = {
1433 			.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1434 			.cmd.rx_mode_set = {
1435 				.opcode = IONIC_CMD_RX_MODE_SET,
1436 				.lif_index = cpu_to_le16(lif->index),
1437 			},
1438 		};
1439 		int err;
1440 
1441 		ctx.cmd.rx_mode_set.rx_mode = cpu_to_le16(rx_mode);
1442 		err = ionic_adminq_post_wait(lif, &ctx);
1443 		if (err)
1444 			netdev_warn(netdev, "set rx_mode 0x%04x failed: %d\n",
1445 				    rx_mode, err);
1446 		else
1447 			lif->rx_mode = rx_mode;
1448 	}
1449 
1450 	mutex_unlock(&lif->config_lock);
1451 }
1452 
1453 static void ionic_set_rx_mode(struct net_device *netdev, bool can_sleep)
1454 {
1455 	struct ionic_lif *lif = netdev_priv(netdev);
1456 	struct ionic_deferred_work *work;
1457 
1458 	if (!can_sleep) {
1459 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
1460 		if (!work) {
1461 			netdev_err(lif->netdev, "rxmode change dropped\n");
1462 			return;
1463 		}
1464 		work->type = IONIC_DW_TYPE_RX_MODE;
1465 		netdev_dbg(lif->netdev, "deferred: rx_mode\n");
1466 		ionic_lif_deferred_enqueue(&lif->deferred, work);
1467 	} else {
1468 		ionic_lif_rx_mode(lif);
1469 	}
1470 }
1471 
1472 static void ionic_ndo_set_rx_mode(struct net_device *netdev)
1473 {
1474 	ionic_set_rx_mode(netdev, CAN_NOT_SLEEP);
1475 }
1476 
1477 static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
1478 {
1479 	u64 wanted = 0;
1480 
1481 	if (features & NETIF_F_HW_VLAN_CTAG_TX)
1482 		wanted |= IONIC_ETH_HW_VLAN_TX_TAG;
1483 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
1484 		wanted |= IONIC_ETH_HW_VLAN_RX_STRIP;
1485 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1486 		wanted |= IONIC_ETH_HW_VLAN_RX_FILTER;
1487 	if (features & NETIF_F_RXHASH)
1488 		wanted |= IONIC_ETH_HW_RX_HASH;
1489 	if (features & NETIF_F_RXCSUM)
1490 		wanted |= IONIC_ETH_HW_RX_CSUM;
1491 	if (features & NETIF_F_SG)
1492 		wanted |= IONIC_ETH_HW_TX_SG;
1493 	if (features & NETIF_F_HW_CSUM)
1494 		wanted |= IONIC_ETH_HW_TX_CSUM;
1495 	if (features & NETIF_F_TSO)
1496 		wanted |= IONIC_ETH_HW_TSO;
1497 	if (features & NETIF_F_TSO6)
1498 		wanted |= IONIC_ETH_HW_TSO_IPV6;
1499 	if (features & NETIF_F_TSO_ECN)
1500 		wanted |= IONIC_ETH_HW_TSO_ECN;
1501 	if (features & NETIF_F_GSO_GRE)
1502 		wanted |= IONIC_ETH_HW_TSO_GRE;
1503 	if (features & NETIF_F_GSO_GRE_CSUM)
1504 		wanted |= IONIC_ETH_HW_TSO_GRE_CSUM;
1505 	if (features & NETIF_F_GSO_IPXIP4)
1506 		wanted |= IONIC_ETH_HW_TSO_IPXIP4;
1507 	if (features & NETIF_F_GSO_IPXIP6)
1508 		wanted |= IONIC_ETH_HW_TSO_IPXIP6;
1509 	if (features & NETIF_F_GSO_UDP_TUNNEL)
1510 		wanted |= IONIC_ETH_HW_TSO_UDP;
1511 	if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM)
1512 		wanted |= IONIC_ETH_HW_TSO_UDP_CSUM;
1513 
1514 	return cpu_to_le64(wanted);
1515 }
1516 
1517 static int ionic_set_nic_features(struct ionic_lif *lif,
1518 				  netdev_features_t features)
1519 {
1520 	struct device *dev = lif->ionic->dev;
1521 	struct ionic_admin_ctx ctx = {
1522 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1523 		.cmd.lif_setattr = {
1524 			.opcode = IONIC_CMD_LIF_SETATTR,
1525 			.index = cpu_to_le16(lif->index),
1526 			.attr = IONIC_LIF_ATTR_FEATURES,
1527 		},
1528 	};
1529 	u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG |
1530 			 IONIC_ETH_HW_VLAN_RX_STRIP |
1531 			 IONIC_ETH_HW_VLAN_RX_FILTER;
1532 	u64 old_hw_features;
1533 	int err;
1534 
1535 	ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features);
1536 
1537 	if (lif->phc)
1538 		ctx.cmd.lif_setattr.features |= cpu_to_le64(IONIC_ETH_HW_TIMESTAMP);
1539 
1540 	err = ionic_adminq_post_wait(lif, &ctx);
1541 	if (err)
1542 		return err;
1543 
1544 	old_hw_features = lif->hw_features;
1545 	lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features &
1546 				       ctx.comp.lif_setattr.features);
1547 
1548 	if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH)
1549 		ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1550 
1551 	if ((vlan_flags & features) &&
1552 	    !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
1553 		dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
1554 
1555 	if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1556 		dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n");
1557 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1558 		dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n");
1559 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1560 		dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n");
1561 	if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1562 		dev_dbg(dev, "feature ETH_HW_RX_HASH\n");
1563 	if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1564 		dev_dbg(dev, "feature ETH_HW_TX_SG\n");
1565 	if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1566 		dev_dbg(dev, "feature ETH_HW_TX_CSUM\n");
1567 	if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1568 		dev_dbg(dev, "feature ETH_HW_RX_CSUM\n");
1569 	if (lif->hw_features & IONIC_ETH_HW_TSO)
1570 		dev_dbg(dev, "feature ETH_HW_TSO\n");
1571 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1572 		dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n");
1573 	if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1574 		dev_dbg(dev, "feature ETH_HW_TSO_ECN\n");
1575 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1576 		dev_dbg(dev, "feature ETH_HW_TSO_GRE\n");
1577 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1578 		dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n");
1579 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1580 		dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n");
1581 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1582 		dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n");
1583 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1584 		dev_dbg(dev, "feature ETH_HW_TSO_UDP\n");
1585 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1586 		dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n");
1587 	if (lif->hw_features & IONIC_ETH_HW_TIMESTAMP)
1588 		dev_dbg(dev, "feature ETH_HW_TIMESTAMP\n");
1589 
1590 	return 0;
1591 }
1592 
1593 static int ionic_init_nic_features(struct ionic_lif *lif)
1594 {
1595 	struct net_device *netdev = lif->netdev;
1596 	netdev_features_t features;
1597 	int err;
1598 
1599 	/* set up what we expect to support by default */
1600 	features = NETIF_F_HW_VLAN_CTAG_TX |
1601 		   NETIF_F_HW_VLAN_CTAG_RX |
1602 		   NETIF_F_HW_VLAN_CTAG_FILTER |
1603 		   NETIF_F_SG |
1604 		   NETIF_F_HW_CSUM |
1605 		   NETIF_F_RXCSUM |
1606 		   NETIF_F_TSO |
1607 		   NETIF_F_TSO6 |
1608 		   NETIF_F_TSO_ECN;
1609 
1610 	if (lif->nxqs > 1)
1611 		features |= NETIF_F_RXHASH;
1612 
1613 	err = ionic_set_nic_features(lif, features);
1614 	if (err)
1615 		return err;
1616 
1617 	/* tell the netdev what we actually can support */
1618 	netdev->features |= NETIF_F_HIGHDMA;
1619 
1620 	if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1621 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
1622 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1623 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1624 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1625 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1626 	if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1627 		netdev->hw_features |= NETIF_F_RXHASH;
1628 	if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1629 		netdev->hw_features |= NETIF_F_SG;
1630 
1631 	if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1632 		netdev->hw_enc_features |= NETIF_F_HW_CSUM;
1633 	if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1634 		netdev->hw_enc_features |= NETIF_F_RXCSUM;
1635 	if (lif->hw_features & IONIC_ETH_HW_TSO)
1636 		netdev->hw_enc_features |= NETIF_F_TSO;
1637 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1638 		netdev->hw_enc_features |= NETIF_F_TSO6;
1639 	if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1640 		netdev->hw_enc_features |= NETIF_F_TSO_ECN;
1641 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1642 		netdev->hw_enc_features |= NETIF_F_GSO_GRE;
1643 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1644 		netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM;
1645 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1646 		netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4;
1647 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1648 		netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6;
1649 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1650 		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
1651 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1652 		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
1653 
1654 	netdev->hw_features |= netdev->hw_enc_features;
1655 	netdev->features |= netdev->hw_features;
1656 	netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES;
1657 
1658 	netdev->priv_flags |= IFF_UNICAST_FLT |
1659 			      IFF_LIVE_ADDR_CHANGE;
1660 
1661 	return 0;
1662 }
1663 
1664 static int ionic_set_features(struct net_device *netdev,
1665 			      netdev_features_t features)
1666 {
1667 	struct ionic_lif *lif = netdev_priv(netdev);
1668 	int err;
1669 
1670 	netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n",
1671 		   __func__, (u64)lif->netdev->features, (u64)features);
1672 
1673 	err = ionic_set_nic_features(lif, features);
1674 
1675 	return err;
1676 }
1677 
1678 static int ionic_set_mac_address(struct net_device *netdev, void *sa)
1679 {
1680 	struct sockaddr *addr = sa;
1681 	u8 *mac;
1682 	int err;
1683 
1684 	mac = (u8 *)addr->sa_data;
1685 	if (ether_addr_equal(netdev->dev_addr, mac))
1686 		return 0;
1687 
1688 	err = eth_prepare_mac_addr_change(netdev, addr);
1689 	if (err)
1690 		return err;
1691 
1692 	if (!is_zero_ether_addr(netdev->dev_addr)) {
1693 		netdev_info(netdev, "deleting mac addr %pM\n",
1694 			    netdev->dev_addr);
1695 		ionic_addr_del(netdev, netdev->dev_addr);
1696 	}
1697 
1698 	eth_commit_mac_addr_change(netdev, addr);
1699 	netdev_info(netdev, "updating mac addr %pM\n", mac);
1700 
1701 	return ionic_addr_add(netdev, mac);
1702 }
1703 
1704 static void ionic_stop_queues_reconfig(struct ionic_lif *lif)
1705 {
1706 	/* Stop and clean the queues before reconfiguration */
1707 	mutex_lock(&lif->queue_lock);
1708 	netif_device_detach(lif->netdev);
1709 	ionic_stop_queues(lif);
1710 	ionic_txrx_deinit(lif);
1711 }
1712 
1713 static int ionic_start_queues_reconfig(struct ionic_lif *lif)
1714 {
1715 	int err;
1716 
1717 	/* Re-init the queues after reconfiguration */
1718 
1719 	/* The only way txrx_init can fail here is if communication
1720 	 * with FW is suddenly broken.  There's not much we can do
1721 	 * at this point - error messages have already been printed,
1722 	 * so we can continue on and the user can eventually do a
1723 	 * DOWN and UP to try to reset and clear the issue.
1724 	 */
1725 	err = ionic_txrx_init(lif);
1726 	mutex_unlock(&lif->queue_lock);
1727 	ionic_link_status_check_request(lif, CAN_SLEEP);
1728 	netif_device_attach(lif->netdev);
1729 
1730 	return err;
1731 }
1732 
1733 static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
1734 {
1735 	struct ionic_lif *lif = netdev_priv(netdev);
1736 	struct ionic_admin_ctx ctx = {
1737 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1738 		.cmd.lif_setattr = {
1739 			.opcode = IONIC_CMD_LIF_SETATTR,
1740 			.index = cpu_to_le16(lif->index),
1741 			.attr = IONIC_LIF_ATTR_MTU,
1742 			.mtu = cpu_to_le32(new_mtu),
1743 		},
1744 	};
1745 	int err;
1746 
1747 	err = ionic_adminq_post_wait(lif, &ctx);
1748 	if (err)
1749 		return err;
1750 
1751 	/* if we're not running, nothing more to do */
1752 	if (!netif_running(netdev)) {
1753 		netdev->mtu = new_mtu;
1754 		return 0;
1755 	}
1756 
1757 	ionic_stop_queues_reconfig(lif);
1758 	netdev->mtu = new_mtu;
1759 	return ionic_start_queues_reconfig(lif);
1760 }
1761 
1762 static void ionic_tx_timeout_work(struct work_struct *ws)
1763 {
1764 	struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work);
1765 
1766 	if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
1767 		return;
1768 
1769 	/* if we were stopped before this scheduled job was launched,
1770 	 * don't bother the queues as they are already stopped.
1771 	 */
1772 	if (!netif_running(lif->netdev))
1773 		return;
1774 
1775 	ionic_stop_queues_reconfig(lif);
1776 	ionic_start_queues_reconfig(lif);
1777 }
1778 
1779 static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1780 {
1781 	struct ionic_lif *lif = netdev_priv(netdev);
1782 
1783 	netdev_info(lif->netdev, "Tx Timeout triggered - txq %d\n", txqueue);
1784 	schedule_work(&lif->tx_timeout_work);
1785 }
1786 
1787 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1788 				 u16 vid)
1789 {
1790 	struct ionic_lif *lif = netdev_priv(netdev);
1791 	struct ionic_admin_ctx ctx = {
1792 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1793 		.cmd.rx_filter_add = {
1794 			.opcode = IONIC_CMD_RX_FILTER_ADD,
1795 			.lif_index = cpu_to_le16(lif->index),
1796 			.match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
1797 			.vlan.vlan = cpu_to_le16(vid),
1798 		},
1799 	};
1800 	int err;
1801 
1802 	netdev_dbg(netdev, "rx_filter add VLAN %d\n", vid);
1803 	err = ionic_adminq_post_wait(lif, &ctx);
1804 	if (err)
1805 		return err;
1806 
1807 	return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
1808 }
1809 
1810 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1811 				  u16 vid)
1812 {
1813 	struct ionic_lif *lif = netdev_priv(netdev);
1814 	struct ionic_admin_ctx ctx = {
1815 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1816 		.cmd.rx_filter_del = {
1817 			.opcode = IONIC_CMD_RX_FILTER_DEL,
1818 			.lif_index = cpu_to_le16(lif->index),
1819 		},
1820 	};
1821 	struct ionic_rx_filter *f;
1822 
1823 	spin_lock_bh(&lif->rx_filters.lock);
1824 
1825 	f = ionic_rx_filter_by_vlan(lif, vid);
1826 	if (!f) {
1827 		spin_unlock_bh(&lif->rx_filters.lock);
1828 		return -ENOENT;
1829 	}
1830 
1831 	netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n",
1832 		   vid, f->filter_id);
1833 
1834 	ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
1835 	ionic_rx_filter_free(lif, f);
1836 	spin_unlock_bh(&lif->rx_filters.lock);
1837 
1838 	return ionic_adminq_post_wait(lif, &ctx);
1839 }
1840 
1841 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
1842 			 const u8 *key, const u32 *indir)
1843 {
1844 	struct ionic_admin_ctx ctx = {
1845 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1846 		.cmd.lif_setattr = {
1847 			.opcode = IONIC_CMD_LIF_SETATTR,
1848 			.attr = IONIC_LIF_ATTR_RSS,
1849 			.rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa),
1850 		},
1851 	};
1852 	unsigned int i, tbl_sz;
1853 
1854 	if (lif->hw_features & IONIC_ETH_HW_RX_HASH) {
1855 		lif->rss_types = types;
1856 		ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types);
1857 	}
1858 
1859 	if (key)
1860 		memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
1861 
1862 	if (indir) {
1863 		tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1864 		for (i = 0; i < tbl_sz; i++)
1865 			lif->rss_ind_tbl[i] = indir[i];
1866 	}
1867 
1868 	memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
1869 	       IONIC_RSS_HASH_KEY_SIZE);
1870 
1871 	return ionic_adminq_post_wait(lif, &ctx);
1872 }
1873 
1874 static int ionic_lif_rss_init(struct ionic_lif *lif)
1875 {
1876 	unsigned int tbl_sz;
1877 	unsigned int i;
1878 
1879 	lif->rss_types = IONIC_RSS_TYPE_IPV4     |
1880 			 IONIC_RSS_TYPE_IPV4_TCP |
1881 			 IONIC_RSS_TYPE_IPV4_UDP |
1882 			 IONIC_RSS_TYPE_IPV6     |
1883 			 IONIC_RSS_TYPE_IPV6_TCP |
1884 			 IONIC_RSS_TYPE_IPV6_UDP;
1885 
1886 	/* Fill indirection table with 'default' values */
1887 	tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1888 	for (i = 0; i < tbl_sz; i++)
1889 		lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs);
1890 
1891 	return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1892 }
1893 
1894 static void ionic_lif_rss_deinit(struct ionic_lif *lif)
1895 {
1896 	int tbl_sz;
1897 
1898 	tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1899 	memset(lif->rss_ind_tbl, 0, tbl_sz);
1900 	memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE);
1901 
1902 	ionic_lif_rss_config(lif, 0x0, NULL, NULL);
1903 }
1904 
1905 static void ionic_lif_quiesce(struct ionic_lif *lif)
1906 {
1907 	struct ionic_admin_ctx ctx = {
1908 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1909 		.cmd.lif_setattr = {
1910 			.opcode = IONIC_CMD_LIF_SETATTR,
1911 			.index = cpu_to_le16(lif->index),
1912 			.attr = IONIC_LIF_ATTR_STATE,
1913 			.state = IONIC_LIF_QUIESCE,
1914 		},
1915 	};
1916 	int err;
1917 
1918 	err = ionic_adminq_post_wait(lif, &ctx);
1919 	if (err)
1920 		netdev_err(lif->netdev, "lif quiesce failed %d\n", err);
1921 }
1922 
1923 static void ionic_txrx_disable(struct ionic_lif *lif)
1924 {
1925 	unsigned int i;
1926 	int err = 0;
1927 
1928 	if (lif->txqcqs) {
1929 		for (i = 0; i < lif->nxqs; i++)
1930 			err = ionic_qcq_disable(lif->txqcqs[i], (err != -ETIMEDOUT));
1931 	}
1932 
1933 	if (lif->hwstamp_txq)
1934 		err = ionic_qcq_disable(lif->hwstamp_txq, (err != -ETIMEDOUT));
1935 
1936 	if (lif->rxqcqs) {
1937 		for (i = 0; i < lif->nxqs; i++)
1938 			err = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT));
1939 	}
1940 
1941 	if (lif->hwstamp_rxq)
1942 		err = ionic_qcq_disable(lif->hwstamp_rxq, (err != -ETIMEDOUT));
1943 
1944 	ionic_lif_quiesce(lif);
1945 }
1946 
1947 static void ionic_txrx_deinit(struct ionic_lif *lif)
1948 {
1949 	unsigned int i;
1950 
1951 	if (lif->txqcqs) {
1952 		for (i = 0; i < lif->nxqs && lif->txqcqs[i]; i++) {
1953 			ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
1954 			ionic_tx_flush(&lif->txqcqs[i]->cq);
1955 			ionic_tx_empty(&lif->txqcqs[i]->q);
1956 		}
1957 	}
1958 
1959 	if (lif->rxqcqs) {
1960 		for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) {
1961 			ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
1962 			ionic_rx_empty(&lif->rxqcqs[i]->q);
1963 		}
1964 	}
1965 	lif->rx_mode = 0;
1966 
1967 	if (lif->hwstamp_txq) {
1968 		ionic_lif_qcq_deinit(lif, lif->hwstamp_txq);
1969 		ionic_tx_flush(&lif->hwstamp_txq->cq);
1970 		ionic_tx_empty(&lif->hwstamp_txq->q);
1971 	}
1972 
1973 	if (lif->hwstamp_rxq) {
1974 		ionic_lif_qcq_deinit(lif, lif->hwstamp_rxq);
1975 		ionic_rx_empty(&lif->hwstamp_rxq->q);
1976 	}
1977 }
1978 
1979 static void ionic_txrx_free(struct ionic_lif *lif)
1980 {
1981 	unsigned int i;
1982 
1983 	if (lif->txqcqs) {
1984 		for (i = 0; i < lif->ionic->ntxqs_per_lif && lif->txqcqs[i]; i++) {
1985 			ionic_qcq_free(lif, lif->txqcqs[i]);
1986 			devm_kfree(lif->ionic->dev, lif->txqcqs[i]);
1987 			lif->txqcqs[i] = NULL;
1988 		}
1989 	}
1990 
1991 	if (lif->rxqcqs) {
1992 		for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) {
1993 			ionic_qcq_free(lif, lif->rxqcqs[i]);
1994 			devm_kfree(lif->ionic->dev, lif->rxqcqs[i]);
1995 			lif->rxqcqs[i] = NULL;
1996 		}
1997 	}
1998 
1999 	if (lif->hwstamp_txq) {
2000 		ionic_qcq_free(lif, lif->hwstamp_txq);
2001 		devm_kfree(lif->ionic->dev, lif->hwstamp_txq);
2002 		lif->hwstamp_txq = NULL;
2003 	}
2004 
2005 	if (lif->hwstamp_rxq) {
2006 		ionic_qcq_free(lif, lif->hwstamp_rxq);
2007 		devm_kfree(lif->ionic->dev, lif->hwstamp_rxq);
2008 		lif->hwstamp_rxq = NULL;
2009 	}
2010 }
2011 
2012 static int ionic_txrx_alloc(struct ionic_lif *lif)
2013 {
2014 	unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz;
2015 	unsigned int flags, i;
2016 	int err = 0;
2017 
2018 	num_desc = lif->ntxq_descs;
2019 	desc_sz = sizeof(struct ionic_txq_desc);
2020 	comp_sz = sizeof(struct ionic_txq_comp);
2021 
2022 	if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
2023 	    lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
2024 					  sizeof(struct ionic_txq_sg_desc_v1))
2025 		sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
2026 	else
2027 		sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
2028 
2029 	flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
2030 	if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
2031 		flags |= IONIC_QCQ_F_INTR;
2032 	for (i = 0; i < lif->nxqs; i++) {
2033 		err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
2034 				      num_desc, desc_sz, comp_sz, sg_desc_sz,
2035 				      lif->kern_pid, &lif->txqcqs[i]);
2036 		if (err)
2037 			goto err_out;
2038 
2039 		if (flags & IONIC_QCQ_F_INTR) {
2040 			ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2041 					     lif->txqcqs[i]->intr.index,
2042 					     lif->tx_coalesce_hw);
2043 			if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
2044 				lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
2045 		}
2046 
2047 		ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
2048 	}
2049 
2050 	flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
2051 
2052 	num_desc = lif->nrxq_descs;
2053 	desc_sz = sizeof(struct ionic_rxq_desc);
2054 	comp_sz = sizeof(struct ionic_rxq_comp);
2055 	sg_desc_sz = sizeof(struct ionic_rxq_sg_desc);
2056 
2057 	if (lif->rxq_features & IONIC_Q_F_2X_CQ_DESC)
2058 		comp_sz *= 2;
2059 
2060 	for (i = 0; i < lif->nxqs; i++) {
2061 		err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
2062 				      num_desc, desc_sz, comp_sz, sg_desc_sz,
2063 				      lif->kern_pid, &lif->rxqcqs[i]);
2064 		if (err)
2065 			goto err_out;
2066 
2067 		lif->rxqcqs[i]->q.features = lif->rxq_features;
2068 
2069 		ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2070 				     lif->rxqcqs[i]->intr.index,
2071 				     lif->rx_coalesce_hw);
2072 		if (test_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state))
2073 			lif->rxqcqs[i]->intr.dim_coal_hw = lif->rx_coalesce_hw;
2074 
2075 		if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
2076 			ionic_link_qcq_interrupts(lif->rxqcqs[i],
2077 						  lif->txqcqs[i]);
2078 
2079 		ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
2080 	}
2081 
2082 	return 0;
2083 
2084 err_out:
2085 	ionic_txrx_free(lif);
2086 
2087 	return err;
2088 }
2089 
2090 static int ionic_txrx_init(struct ionic_lif *lif)
2091 {
2092 	unsigned int i;
2093 	int err;
2094 
2095 	for (i = 0; i < lif->nxqs; i++) {
2096 		err = ionic_lif_txq_init(lif, lif->txqcqs[i]);
2097 		if (err)
2098 			goto err_out;
2099 
2100 		err = ionic_lif_rxq_init(lif, lif->rxqcqs[i]);
2101 		if (err) {
2102 			ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
2103 			goto err_out;
2104 		}
2105 	}
2106 
2107 	if (lif->netdev->features & NETIF_F_RXHASH)
2108 		ionic_lif_rss_init(lif);
2109 
2110 	ionic_set_rx_mode(lif->netdev, CAN_SLEEP);
2111 
2112 	return 0;
2113 
2114 err_out:
2115 	while (i--) {
2116 		ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
2117 		ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
2118 	}
2119 
2120 	return err;
2121 }
2122 
2123 static int ionic_txrx_enable(struct ionic_lif *lif)
2124 {
2125 	int derr = 0;
2126 	int i, err;
2127 
2128 	for (i = 0; i < lif->nxqs; i++) {
2129 		if (!(lif->rxqcqs[i] && lif->txqcqs[i])) {
2130 			dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i);
2131 			err = -ENXIO;
2132 			goto err_out;
2133 		}
2134 
2135 		ionic_rx_fill(&lif->rxqcqs[i]->q);
2136 		err = ionic_qcq_enable(lif->rxqcqs[i]);
2137 		if (err)
2138 			goto err_out;
2139 
2140 		err = ionic_qcq_enable(lif->txqcqs[i]);
2141 		if (err) {
2142 			derr = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT));
2143 			goto err_out;
2144 		}
2145 	}
2146 
2147 	if (lif->hwstamp_rxq) {
2148 		ionic_rx_fill(&lif->hwstamp_rxq->q);
2149 		err = ionic_qcq_enable(lif->hwstamp_rxq);
2150 		if (err)
2151 			goto err_out_hwstamp_rx;
2152 	}
2153 
2154 	if (lif->hwstamp_txq) {
2155 		err = ionic_qcq_enable(lif->hwstamp_txq);
2156 		if (err)
2157 			goto err_out_hwstamp_tx;
2158 	}
2159 
2160 	return 0;
2161 
2162 err_out_hwstamp_tx:
2163 	if (lif->hwstamp_rxq)
2164 		derr = ionic_qcq_disable(lif->hwstamp_rxq, (derr != -ETIMEDOUT));
2165 err_out_hwstamp_rx:
2166 	i = lif->nxqs;
2167 err_out:
2168 	while (i--) {
2169 		derr = ionic_qcq_disable(lif->txqcqs[i], (derr != -ETIMEDOUT));
2170 		derr = ionic_qcq_disable(lif->rxqcqs[i], (derr != -ETIMEDOUT));
2171 	}
2172 
2173 	return err;
2174 }
2175 
2176 static int ionic_start_queues(struct ionic_lif *lif)
2177 {
2178 	int err;
2179 
2180 	if (test_bit(IONIC_LIF_F_BROKEN, lif->state))
2181 		return -EIO;
2182 
2183 	if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2184 		return -EBUSY;
2185 
2186 	if (test_and_set_bit(IONIC_LIF_F_UP, lif->state))
2187 		return 0;
2188 
2189 	err = ionic_txrx_enable(lif);
2190 	if (err) {
2191 		clear_bit(IONIC_LIF_F_UP, lif->state);
2192 		return err;
2193 	}
2194 	netif_tx_wake_all_queues(lif->netdev);
2195 
2196 	return 0;
2197 }
2198 
2199 static int ionic_open(struct net_device *netdev)
2200 {
2201 	struct ionic_lif *lif = netdev_priv(netdev);
2202 	int err;
2203 
2204 	/* If recovering from a broken state, clear the bit and we'll try again */
2205 	if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state))
2206 		netdev_info(netdev, "clearing broken state\n");
2207 
2208 	err = ionic_txrx_alloc(lif);
2209 	if (err)
2210 		return err;
2211 
2212 	err = ionic_txrx_init(lif);
2213 	if (err)
2214 		goto err_txrx_free;
2215 
2216 	err = netif_set_real_num_tx_queues(netdev, lif->nxqs);
2217 	if (err)
2218 		goto err_txrx_deinit;
2219 
2220 	err = netif_set_real_num_rx_queues(netdev, lif->nxqs);
2221 	if (err)
2222 		goto err_txrx_deinit;
2223 
2224 	/* don't start the queues until we have link */
2225 	if (netif_carrier_ok(netdev)) {
2226 		err = ionic_start_queues(lif);
2227 		if (err)
2228 			goto err_txrx_deinit;
2229 	}
2230 
2231 	return 0;
2232 
2233 err_txrx_deinit:
2234 	ionic_txrx_deinit(lif);
2235 err_txrx_free:
2236 	ionic_txrx_free(lif);
2237 	return err;
2238 }
2239 
2240 static void ionic_stop_queues(struct ionic_lif *lif)
2241 {
2242 	if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state))
2243 		return;
2244 
2245 	netif_tx_disable(lif->netdev);
2246 	ionic_txrx_disable(lif);
2247 }
2248 
2249 static int ionic_stop(struct net_device *netdev)
2250 {
2251 	struct ionic_lif *lif = netdev_priv(netdev);
2252 
2253 	if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2254 		return 0;
2255 
2256 	ionic_stop_queues(lif);
2257 	ionic_txrx_deinit(lif);
2258 	ionic_txrx_free(lif);
2259 
2260 	return 0;
2261 }
2262 
2263 static int ionic_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2264 {
2265 	struct ionic_lif *lif = netdev_priv(netdev);
2266 
2267 	switch (cmd) {
2268 	case SIOCSHWTSTAMP:
2269 		return ionic_lif_hwstamp_set(lif, ifr);
2270 	case SIOCGHWTSTAMP:
2271 		return ionic_lif_hwstamp_get(lif, ifr);
2272 	default:
2273 		return -EOPNOTSUPP;
2274 	}
2275 }
2276 
2277 static int ionic_get_vf_config(struct net_device *netdev,
2278 			       int vf, struct ifla_vf_info *ivf)
2279 {
2280 	struct ionic_lif *lif = netdev_priv(netdev);
2281 	struct ionic *ionic = lif->ionic;
2282 	int ret = 0;
2283 
2284 	if (!netif_device_present(netdev))
2285 		return -EBUSY;
2286 
2287 	down_read(&ionic->vf_op_lock);
2288 
2289 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2290 		ret = -EINVAL;
2291 	} else {
2292 		ivf->vf           = vf;
2293 		ivf->vlan         = le16_to_cpu(ionic->vfs[vf].vlanid);
2294 		ivf->qos	  = 0;
2295 		ivf->spoofchk     = ionic->vfs[vf].spoofchk;
2296 		ivf->linkstate    = ionic->vfs[vf].linkstate;
2297 		ivf->max_tx_rate  = le32_to_cpu(ionic->vfs[vf].maxrate);
2298 		ivf->trusted      = ionic->vfs[vf].trusted;
2299 		ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr);
2300 	}
2301 
2302 	up_read(&ionic->vf_op_lock);
2303 	return ret;
2304 }
2305 
2306 static int ionic_get_vf_stats(struct net_device *netdev, int vf,
2307 			      struct ifla_vf_stats *vf_stats)
2308 {
2309 	struct ionic_lif *lif = netdev_priv(netdev);
2310 	struct ionic *ionic = lif->ionic;
2311 	struct ionic_lif_stats *vs;
2312 	int ret = 0;
2313 
2314 	if (!netif_device_present(netdev))
2315 		return -EBUSY;
2316 
2317 	down_read(&ionic->vf_op_lock);
2318 
2319 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2320 		ret = -EINVAL;
2321 	} else {
2322 		memset(vf_stats, 0, sizeof(*vf_stats));
2323 		vs = &ionic->vfs[vf].stats;
2324 
2325 		vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets);
2326 		vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets);
2327 		vf_stats->rx_bytes   = le64_to_cpu(vs->rx_ucast_bytes);
2328 		vf_stats->tx_bytes   = le64_to_cpu(vs->tx_ucast_bytes);
2329 		vf_stats->broadcast  = le64_to_cpu(vs->rx_bcast_packets);
2330 		vf_stats->multicast  = le64_to_cpu(vs->rx_mcast_packets);
2331 		vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) +
2332 				       le64_to_cpu(vs->rx_mcast_drop_packets) +
2333 				       le64_to_cpu(vs->rx_bcast_drop_packets);
2334 		vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) +
2335 				       le64_to_cpu(vs->tx_mcast_drop_packets) +
2336 				       le64_to_cpu(vs->tx_bcast_drop_packets);
2337 	}
2338 
2339 	up_read(&ionic->vf_op_lock);
2340 	return ret;
2341 }
2342 
2343 static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
2344 {
2345 	struct ionic_lif *lif = netdev_priv(netdev);
2346 	struct ionic *ionic = lif->ionic;
2347 	int ret;
2348 
2349 	if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
2350 		return -EINVAL;
2351 
2352 	if (!netif_device_present(netdev))
2353 		return -EBUSY;
2354 
2355 	down_write(&ionic->vf_op_lock);
2356 
2357 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2358 		ret = -EINVAL;
2359 	} else {
2360 		ret = ionic_set_vf_config(ionic, vf, IONIC_VF_ATTR_MAC, mac);
2361 		if (!ret)
2362 			ether_addr_copy(ionic->vfs[vf].macaddr, mac);
2363 	}
2364 
2365 	up_write(&ionic->vf_op_lock);
2366 	return ret;
2367 }
2368 
2369 static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
2370 			     u8 qos, __be16 proto)
2371 {
2372 	struct ionic_lif *lif = netdev_priv(netdev);
2373 	struct ionic *ionic = lif->ionic;
2374 	int ret;
2375 
2376 	/* until someday when we support qos */
2377 	if (qos)
2378 		return -EINVAL;
2379 
2380 	if (vlan > 4095)
2381 		return -EINVAL;
2382 
2383 	if (proto != htons(ETH_P_8021Q))
2384 		return -EPROTONOSUPPORT;
2385 
2386 	if (!netif_device_present(netdev))
2387 		return -EBUSY;
2388 
2389 	down_write(&ionic->vf_op_lock);
2390 
2391 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2392 		ret = -EINVAL;
2393 	} else {
2394 		ret = ionic_set_vf_config(ionic, vf,
2395 					  IONIC_VF_ATTR_VLAN, (u8 *)&vlan);
2396 		if (!ret)
2397 			ionic->vfs[vf].vlanid = cpu_to_le16(vlan);
2398 	}
2399 
2400 	up_write(&ionic->vf_op_lock);
2401 	return ret;
2402 }
2403 
2404 static int ionic_set_vf_rate(struct net_device *netdev, int vf,
2405 			     int tx_min, int tx_max)
2406 {
2407 	struct ionic_lif *lif = netdev_priv(netdev);
2408 	struct ionic *ionic = lif->ionic;
2409 	int ret;
2410 
2411 	/* setting the min just seems silly */
2412 	if (tx_min)
2413 		return -EINVAL;
2414 
2415 	if (!netif_device_present(netdev))
2416 		return -EBUSY;
2417 
2418 	down_write(&ionic->vf_op_lock);
2419 
2420 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2421 		ret = -EINVAL;
2422 	} else {
2423 		ret = ionic_set_vf_config(ionic, vf,
2424 					  IONIC_VF_ATTR_RATE, (u8 *)&tx_max);
2425 		if (!ret)
2426 			lif->ionic->vfs[vf].maxrate = cpu_to_le32(tx_max);
2427 	}
2428 
2429 	up_write(&ionic->vf_op_lock);
2430 	return ret;
2431 }
2432 
2433 static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
2434 {
2435 	struct ionic_lif *lif = netdev_priv(netdev);
2436 	struct ionic *ionic = lif->ionic;
2437 	u8 data = set;  /* convert to u8 for config */
2438 	int ret;
2439 
2440 	if (!netif_device_present(netdev))
2441 		return -EBUSY;
2442 
2443 	down_write(&ionic->vf_op_lock);
2444 
2445 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2446 		ret = -EINVAL;
2447 	} else {
2448 		ret = ionic_set_vf_config(ionic, vf,
2449 					  IONIC_VF_ATTR_SPOOFCHK, &data);
2450 		if (!ret)
2451 			ionic->vfs[vf].spoofchk = data;
2452 	}
2453 
2454 	up_write(&ionic->vf_op_lock);
2455 	return ret;
2456 }
2457 
2458 static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
2459 {
2460 	struct ionic_lif *lif = netdev_priv(netdev);
2461 	struct ionic *ionic = lif->ionic;
2462 	u8 data = set;  /* convert to u8 for config */
2463 	int ret;
2464 
2465 	if (!netif_device_present(netdev))
2466 		return -EBUSY;
2467 
2468 	down_write(&ionic->vf_op_lock);
2469 
2470 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2471 		ret = -EINVAL;
2472 	} else {
2473 		ret = ionic_set_vf_config(ionic, vf,
2474 					  IONIC_VF_ATTR_TRUST, &data);
2475 		if (!ret)
2476 			ionic->vfs[vf].trusted = data;
2477 	}
2478 
2479 	up_write(&ionic->vf_op_lock);
2480 	return ret;
2481 }
2482 
2483 static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
2484 {
2485 	struct ionic_lif *lif = netdev_priv(netdev);
2486 	struct ionic *ionic = lif->ionic;
2487 	u8 data;
2488 	int ret;
2489 
2490 	switch (set) {
2491 	case IFLA_VF_LINK_STATE_ENABLE:
2492 		data = IONIC_VF_LINK_STATUS_UP;
2493 		break;
2494 	case IFLA_VF_LINK_STATE_DISABLE:
2495 		data = IONIC_VF_LINK_STATUS_DOWN;
2496 		break;
2497 	case IFLA_VF_LINK_STATE_AUTO:
2498 		data = IONIC_VF_LINK_STATUS_AUTO;
2499 		break;
2500 	default:
2501 		return -EINVAL;
2502 	}
2503 
2504 	if (!netif_device_present(netdev))
2505 		return -EBUSY;
2506 
2507 	down_write(&ionic->vf_op_lock);
2508 
2509 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2510 		ret = -EINVAL;
2511 	} else {
2512 		ret = ionic_set_vf_config(ionic, vf,
2513 					  IONIC_VF_ATTR_LINKSTATE, &data);
2514 		if (!ret)
2515 			ionic->vfs[vf].linkstate = set;
2516 	}
2517 
2518 	up_write(&ionic->vf_op_lock);
2519 	return ret;
2520 }
2521 
2522 static const struct net_device_ops ionic_netdev_ops = {
2523 	.ndo_open               = ionic_open,
2524 	.ndo_stop               = ionic_stop,
2525 	.ndo_eth_ioctl		= ionic_eth_ioctl,
2526 	.ndo_start_xmit		= ionic_start_xmit,
2527 	.ndo_get_stats64	= ionic_get_stats64,
2528 	.ndo_set_rx_mode	= ionic_ndo_set_rx_mode,
2529 	.ndo_set_features	= ionic_set_features,
2530 	.ndo_set_mac_address	= ionic_set_mac_address,
2531 	.ndo_validate_addr	= eth_validate_addr,
2532 	.ndo_tx_timeout         = ionic_tx_timeout,
2533 	.ndo_change_mtu         = ionic_change_mtu,
2534 	.ndo_vlan_rx_add_vid    = ionic_vlan_rx_add_vid,
2535 	.ndo_vlan_rx_kill_vid   = ionic_vlan_rx_kill_vid,
2536 	.ndo_set_vf_vlan	= ionic_set_vf_vlan,
2537 	.ndo_set_vf_trust	= ionic_set_vf_trust,
2538 	.ndo_set_vf_mac		= ionic_set_vf_mac,
2539 	.ndo_set_vf_rate	= ionic_set_vf_rate,
2540 	.ndo_set_vf_spoofchk	= ionic_set_vf_spoofchk,
2541 	.ndo_get_vf_config	= ionic_get_vf_config,
2542 	.ndo_set_vf_link_state	= ionic_set_vf_link_state,
2543 	.ndo_get_vf_stats       = ionic_get_vf_stats,
2544 };
2545 
2546 static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
2547 {
2548 	/* only swapping the queues, not the napi, flags, or other stuff */
2549 	swap(a->q.features,   b->q.features);
2550 	swap(a->q.num_descs,  b->q.num_descs);
2551 	swap(a->q.desc_size,  b->q.desc_size);
2552 	swap(a->q.base,       b->q.base);
2553 	swap(a->q.base_pa,    b->q.base_pa);
2554 	swap(a->q.info,       b->q.info);
2555 	swap(a->q_base,       b->q_base);
2556 	swap(a->q_base_pa,    b->q_base_pa);
2557 	swap(a->q_size,       b->q_size);
2558 
2559 	swap(a->q.sg_desc_size, b->q.sg_desc_size);
2560 	swap(a->q.sg_base,    b->q.sg_base);
2561 	swap(a->q.sg_base_pa, b->q.sg_base_pa);
2562 	swap(a->sg_base,      b->sg_base);
2563 	swap(a->sg_base_pa,   b->sg_base_pa);
2564 	swap(a->sg_size,      b->sg_size);
2565 
2566 	swap(a->cq.num_descs, b->cq.num_descs);
2567 	swap(a->cq.desc_size, b->cq.desc_size);
2568 	swap(a->cq.base,      b->cq.base);
2569 	swap(a->cq.base_pa,   b->cq.base_pa);
2570 	swap(a->cq.info,      b->cq.info);
2571 	swap(a->cq_base,      b->cq_base);
2572 	swap(a->cq_base_pa,   b->cq_base_pa);
2573 	swap(a->cq_size,      b->cq_size);
2574 
2575 	ionic_debugfs_del_qcq(a);
2576 	ionic_debugfs_add_qcq(a->q.lif, a);
2577 }
2578 
2579 int ionic_reconfigure_queues(struct ionic_lif *lif,
2580 			     struct ionic_queue_params *qparam)
2581 {
2582 	unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz;
2583 	struct ionic_qcq **tx_qcqs = NULL;
2584 	struct ionic_qcq **rx_qcqs = NULL;
2585 	unsigned int flags, i;
2586 	int err = 0;
2587 
2588 	/* allocate temporary qcq arrays to hold new queue structs */
2589 	if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) {
2590 		tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif,
2591 				       sizeof(struct ionic_qcq *), GFP_KERNEL);
2592 		if (!tx_qcqs) {
2593 			err = -ENOMEM;
2594 			goto err_out;
2595 		}
2596 	}
2597 	if (qparam->nxqs != lif->nxqs ||
2598 	    qparam->nrxq_descs != lif->nrxq_descs ||
2599 	    qparam->rxq_features != lif->rxq_features) {
2600 		rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif,
2601 				       sizeof(struct ionic_qcq *), GFP_KERNEL);
2602 		if (!rx_qcqs) {
2603 			err = -ENOMEM;
2604 			goto err_out;
2605 		}
2606 	}
2607 
2608 	/* allocate new desc_info and rings, but leave the interrupt setup
2609 	 * until later so as to not mess with the still-running queues
2610 	 */
2611 	if (tx_qcqs) {
2612 		num_desc = qparam->ntxq_descs;
2613 		desc_sz = sizeof(struct ionic_txq_desc);
2614 		comp_sz = sizeof(struct ionic_txq_comp);
2615 
2616 		if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
2617 		    lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
2618 		    sizeof(struct ionic_txq_sg_desc_v1))
2619 			sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
2620 		else
2621 			sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
2622 
2623 		for (i = 0; i < qparam->nxqs; i++) {
2624 			flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
2625 			err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
2626 					      num_desc, desc_sz, comp_sz, sg_desc_sz,
2627 					      lif->kern_pid, &tx_qcqs[i]);
2628 			if (err)
2629 				goto err_out;
2630 		}
2631 	}
2632 
2633 	if (rx_qcqs) {
2634 		num_desc = qparam->nrxq_descs;
2635 		desc_sz = sizeof(struct ionic_rxq_desc);
2636 		comp_sz = sizeof(struct ionic_rxq_comp);
2637 		sg_desc_sz = sizeof(struct ionic_rxq_sg_desc);
2638 
2639 		if (qparam->rxq_features & IONIC_Q_F_2X_CQ_DESC)
2640 			comp_sz *= 2;
2641 
2642 		for (i = 0; i < qparam->nxqs; i++) {
2643 			flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
2644 			err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
2645 					      num_desc, desc_sz, comp_sz, sg_desc_sz,
2646 					      lif->kern_pid, &rx_qcqs[i]);
2647 			if (err)
2648 				goto err_out;
2649 
2650 			rx_qcqs[i]->q.features = qparam->rxq_features;
2651 		}
2652 	}
2653 
2654 	/* stop and clean the queues */
2655 	ionic_stop_queues_reconfig(lif);
2656 
2657 	if (qparam->nxqs != lif->nxqs) {
2658 		err = netif_set_real_num_tx_queues(lif->netdev, qparam->nxqs);
2659 		if (err)
2660 			goto err_out_reinit_unlock;
2661 		err = netif_set_real_num_rx_queues(lif->netdev, qparam->nxqs);
2662 		if (err) {
2663 			netif_set_real_num_tx_queues(lif->netdev, lif->nxqs);
2664 			goto err_out_reinit_unlock;
2665 		}
2666 	}
2667 
2668 	/* swap new desc_info and rings, keeping existing interrupt config */
2669 	if (tx_qcqs) {
2670 		lif->ntxq_descs = qparam->ntxq_descs;
2671 		for (i = 0; i < qparam->nxqs; i++)
2672 			ionic_swap_queues(lif->txqcqs[i], tx_qcqs[i]);
2673 	}
2674 
2675 	if (rx_qcqs) {
2676 		lif->nrxq_descs = qparam->nrxq_descs;
2677 		for (i = 0; i < qparam->nxqs; i++)
2678 			ionic_swap_queues(lif->rxqcqs[i], rx_qcqs[i]);
2679 	}
2680 
2681 	/* if we need to change the interrupt layout, this is the time */
2682 	if (qparam->intr_split != test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) ||
2683 	    qparam->nxqs != lif->nxqs) {
2684 		if (qparam->intr_split) {
2685 			set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
2686 		} else {
2687 			clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
2688 			lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
2689 			lif->tx_coalesce_hw = lif->rx_coalesce_hw;
2690 		}
2691 
2692 		/* clear existing interrupt assignments */
2693 		for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) {
2694 			ionic_qcq_intr_free(lif, lif->txqcqs[i]);
2695 			ionic_qcq_intr_free(lif, lif->rxqcqs[i]);
2696 		}
2697 
2698 		/* re-assign the interrupts */
2699 		for (i = 0; i < qparam->nxqs; i++) {
2700 			lif->rxqcqs[i]->flags |= IONIC_QCQ_F_INTR;
2701 			err = ionic_alloc_qcq_interrupt(lif, lif->rxqcqs[i]);
2702 			ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2703 					     lif->rxqcqs[i]->intr.index,
2704 					     lif->rx_coalesce_hw);
2705 
2706 			if (qparam->intr_split) {
2707 				lif->txqcqs[i]->flags |= IONIC_QCQ_F_INTR;
2708 				err = ionic_alloc_qcq_interrupt(lif, lif->txqcqs[i]);
2709 				ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2710 						     lif->txqcqs[i]->intr.index,
2711 						     lif->tx_coalesce_hw);
2712 				if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
2713 					lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
2714 			} else {
2715 				lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2716 				ionic_link_qcq_interrupts(lif->rxqcqs[i], lif->txqcqs[i]);
2717 			}
2718 		}
2719 	}
2720 
2721 	/* now we can rework the debugfs mappings */
2722 	if (tx_qcqs) {
2723 		for (i = 0; i < qparam->nxqs; i++) {
2724 			ionic_debugfs_del_qcq(lif->txqcqs[i]);
2725 			ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
2726 		}
2727 	}
2728 
2729 	if (rx_qcqs) {
2730 		for (i = 0; i < qparam->nxqs; i++) {
2731 			ionic_debugfs_del_qcq(lif->rxqcqs[i]);
2732 			ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
2733 		}
2734 	}
2735 
2736 	swap(lif->nxqs, qparam->nxqs);
2737 	swap(lif->rxq_features, qparam->rxq_features);
2738 
2739 err_out_reinit_unlock:
2740 	/* re-init the queues, but don't lose an error code */
2741 	if (err)
2742 		ionic_start_queues_reconfig(lif);
2743 	else
2744 		err = ionic_start_queues_reconfig(lif);
2745 
2746 err_out:
2747 	/* free old allocs without cleaning intr */
2748 	for (i = 0; i < qparam->nxqs; i++) {
2749 		if (tx_qcqs && tx_qcqs[i]) {
2750 			tx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2751 			ionic_qcq_free(lif, tx_qcqs[i]);
2752 			devm_kfree(lif->ionic->dev, tx_qcqs[i]);
2753 			tx_qcqs[i] = NULL;
2754 		}
2755 		if (rx_qcqs && rx_qcqs[i]) {
2756 			rx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2757 			ionic_qcq_free(lif, rx_qcqs[i]);
2758 			devm_kfree(lif->ionic->dev, rx_qcqs[i]);
2759 			rx_qcqs[i] = NULL;
2760 		}
2761 	}
2762 
2763 	/* free q array */
2764 	if (rx_qcqs) {
2765 		devm_kfree(lif->ionic->dev, rx_qcqs);
2766 		rx_qcqs = NULL;
2767 	}
2768 	if (tx_qcqs) {
2769 		devm_kfree(lif->ionic->dev, tx_qcqs);
2770 		tx_qcqs = NULL;
2771 	}
2772 
2773 	/* clean the unused dma and info allocations when new set is smaller
2774 	 * than the full array, but leave the qcq shells in place
2775 	 */
2776 	for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) {
2777 		lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2778 		ionic_qcq_free(lif, lif->txqcqs[i]);
2779 
2780 		lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2781 		ionic_qcq_free(lif, lif->rxqcqs[i]);
2782 	}
2783 
2784 	if (err)
2785 		netdev_info(lif->netdev, "%s: failed %d\n", __func__, err);
2786 
2787 	return err;
2788 }
2789 
2790 int ionic_lif_alloc(struct ionic *ionic)
2791 {
2792 	struct device *dev = ionic->dev;
2793 	union ionic_lif_identity *lid;
2794 	struct net_device *netdev;
2795 	struct ionic_lif *lif;
2796 	int tbl_sz;
2797 	int err;
2798 
2799 	lid = kzalloc(sizeof(*lid), GFP_KERNEL);
2800 	if (!lid)
2801 		return -ENOMEM;
2802 
2803 	netdev = alloc_etherdev_mqs(sizeof(*lif),
2804 				    ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
2805 	if (!netdev) {
2806 		dev_err(dev, "Cannot allocate netdev, aborting\n");
2807 		err = -ENOMEM;
2808 		goto err_out_free_lid;
2809 	}
2810 
2811 	SET_NETDEV_DEV(netdev, dev);
2812 
2813 	lif = netdev_priv(netdev);
2814 	lif->netdev = netdev;
2815 	ionic->lif = lif;
2816 	netdev->netdev_ops = &ionic_netdev_ops;
2817 	ionic_ethtool_set_ops(netdev);
2818 
2819 	netdev->watchdog_timeo = 2 * HZ;
2820 	netif_carrier_off(netdev);
2821 
2822 	lif->identity = lid;
2823 	lif->lif_type = IONIC_LIF_TYPE_CLASSIC;
2824 	err = ionic_lif_identify(ionic, lif->lif_type, lif->identity);
2825 	if (err) {
2826 		dev_err(ionic->dev, "Cannot identify type %d: %d\n",
2827 			lif->lif_type, err);
2828 		goto err_out_free_netdev;
2829 	}
2830 	lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU,
2831 				     le32_to_cpu(lif->identity->eth.min_frame_size));
2832 	lif->netdev->max_mtu =
2833 		le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN;
2834 
2835 	lif->neqs = ionic->neqs_per_lif;
2836 	lif->nxqs = ionic->ntxqs_per_lif;
2837 
2838 	lif->ionic = ionic;
2839 	lif->index = 0;
2840 
2841 	if (is_kdump_kernel()) {
2842 		lif->ntxq_descs = IONIC_MIN_TXRX_DESC;
2843 		lif->nrxq_descs = IONIC_MIN_TXRX_DESC;
2844 	} else {
2845 		lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
2846 		lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
2847 	}
2848 
2849 	/* Convert the default coalesce value to actual hw resolution */
2850 	lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT;
2851 	lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic,
2852 						    lif->rx_coalesce_usecs);
2853 	lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
2854 	lif->tx_coalesce_hw = lif->rx_coalesce_hw;
2855 	set_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state);
2856 	set_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state);
2857 
2858 	snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index);
2859 
2860 	spin_lock_init(&lif->adminq_lock);
2861 
2862 	spin_lock_init(&lif->deferred.lock);
2863 	INIT_LIST_HEAD(&lif->deferred.list);
2864 	INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work);
2865 
2866 	/* allocate lif info */
2867 	lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE);
2868 	lif->info = dma_alloc_coherent(dev, lif->info_sz,
2869 				       &lif->info_pa, GFP_KERNEL);
2870 	if (!lif->info) {
2871 		dev_err(dev, "Failed to allocate lif info, aborting\n");
2872 		err = -ENOMEM;
2873 		goto err_out_free_netdev;
2874 	}
2875 
2876 	ionic_debugfs_add_lif(lif);
2877 
2878 	/* allocate control queues and txrx queue arrays */
2879 	ionic_lif_queue_identify(lif);
2880 	err = ionic_qcqs_alloc(lif);
2881 	if (err)
2882 		goto err_out_free_lif_info;
2883 
2884 	/* allocate rss indirection table */
2885 	tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
2886 	lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz;
2887 	lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz,
2888 					      &lif->rss_ind_tbl_pa,
2889 					      GFP_KERNEL);
2890 
2891 	if (!lif->rss_ind_tbl) {
2892 		err = -ENOMEM;
2893 		dev_err(dev, "Failed to allocate rss indirection table, aborting\n");
2894 		goto err_out_free_qcqs;
2895 	}
2896 	netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
2897 
2898 	ionic_lif_alloc_phc(lif);
2899 
2900 	return 0;
2901 
2902 err_out_free_qcqs:
2903 	ionic_qcqs_free(lif);
2904 err_out_free_lif_info:
2905 	dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
2906 	lif->info = NULL;
2907 	lif->info_pa = 0;
2908 err_out_free_netdev:
2909 	free_netdev(lif->netdev);
2910 	lif = NULL;
2911 err_out_free_lid:
2912 	kfree(lid);
2913 
2914 	return err;
2915 }
2916 
2917 static void ionic_lif_reset(struct ionic_lif *lif)
2918 {
2919 	struct ionic_dev *idev = &lif->ionic->idev;
2920 
2921 	mutex_lock(&lif->ionic->dev_cmd_lock);
2922 	ionic_dev_cmd_lif_reset(idev, lif->index);
2923 	ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2924 	mutex_unlock(&lif->ionic->dev_cmd_lock);
2925 }
2926 
2927 static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
2928 {
2929 	struct ionic *ionic = lif->ionic;
2930 
2931 	if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state))
2932 		return;
2933 
2934 	dev_info(ionic->dev, "FW Down: Stopping LIFs\n");
2935 
2936 	netif_device_detach(lif->netdev);
2937 
2938 	if (test_bit(IONIC_LIF_F_UP, lif->state)) {
2939 		dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
2940 		mutex_lock(&lif->queue_lock);
2941 		ionic_stop_queues(lif);
2942 		mutex_unlock(&lif->queue_lock);
2943 	}
2944 
2945 	if (netif_running(lif->netdev)) {
2946 		ionic_txrx_deinit(lif);
2947 		ionic_txrx_free(lif);
2948 	}
2949 	ionic_lif_deinit(lif);
2950 	ionic_reset(ionic);
2951 	ionic_qcqs_free(lif);
2952 
2953 	dev_info(ionic->dev, "FW Down: LIFs stopped\n");
2954 }
2955 
2956 static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
2957 {
2958 	struct ionic *ionic = lif->ionic;
2959 	int err;
2960 
2961 	if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2962 		return;
2963 
2964 	dev_info(ionic->dev, "FW Up: restarting LIFs\n");
2965 
2966 	ionic_init_devinfo(ionic);
2967 	err = ionic_identify(ionic);
2968 	if (err)
2969 		goto err_out;
2970 	err = ionic_port_identify(ionic);
2971 	if (err)
2972 		goto err_out;
2973 	err = ionic_port_init(ionic);
2974 	if (err)
2975 		goto err_out;
2976 	err = ionic_qcqs_alloc(lif);
2977 	if (err)
2978 		goto err_out;
2979 
2980 	err = ionic_lif_init(lif);
2981 	if (err)
2982 		goto err_qcqs_free;
2983 
2984 	if (lif->registered)
2985 		ionic_lif_set_netdev_info(lif);
2986 
2987 	ionic_rx_filter_replay(lif);
2988 
2989 	if (netif_running(lif->netdev)) {
2990 		err = ionic_txrx_alloc(lif);
2991 		if (err)
2992 			goto err_lifs_deinit;
2993 
2994 		err = ionic_txrx_init(lif);
2995 		if (err)
2996 			goto err_txrx_free;
2997 	}
2998 
2999 	clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
3000 	ionic_link_status_check_request(lif, CAN_SLEEP);
3001 	netif_device_attach(lif->netdev);
3002 	dev_info(ionic->dev, "FW Up: LIFs restarted\n");
3003 
3004 	/* restore the hardware timestamping queues */
3005 	ionic_lif_hwstamp_replay(lif);
3006 
3007 	return;
3008 
3009 err_txrx_free:
3010 	ionic_txrx_free(lif);
3011 err_lifs_deinit:
3012 	ionic_lif_deinit(lif);
3013 err_qcqs_free:
3014 	ionic_qcqs_free(lif);
3015 err_out:
3016 	dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err);
3017 }
3018 
3019 void ionic_lif_free(struct ionic_lif *lif)
3020 {
3021 	struct device *dev = lif->ionic->dev;
3022 
3023 	ionic_lif_free_phc(lif);
3024 
3025 	/* free rss indirection table */
3026 	dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl,
3027 			  lif->rss_ind_tbl_pa);
3028 	lif->rss_ind_tbl = NULL;
3029 	lif->rss_ind_tbl_pa = 0;
3030 
3031 	/* free queues */
3032 	ionic_qcqs_free(lif);
3033 	if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
3034 		ionic_lif_reset(lif);
3035 
3036 	/* free lif info */
3037 	kfree(lif->identity);
3038 	dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
3039 	lif->info = NULL;
3040 	lif->info_pa = 0;
3041 
3042 	/* unmap doorbell page */
3043 	ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
3044 	lif->kern_dbpage = NULL;
3045 	kfree(lif->dbid_inuse);
3046 	lif->dbid_inuse = NULL;
3047 
3048 	/* free netdev & lif */
3049 	ionic_debugfs_del_lif(lif);
3050 	free_netdev(lif->netdev);
3051 }
3052 
3053 void ionic_lif_deinit(struct ionic_lif *lif)
3054 {
3055 	if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state))
3056 		return;
3057 
3058 	if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
3059 		cancel_work_sync(&lif->deferred.work);
3060 		cancel_work_sync(&lif->tx_timeout_work);
3061 		ionic_rx_filters_deinit(lif);
3062 		if (lif->netdev->features & NETIF_F_RXHASH)
3063 			ionic_lif_rss_deinit(lif);
3064 	}
3065 
3066 	napi_disable(&lif->adminqcq->napi);
3067 	ionic_lif_qcq_deinit(lif, lif->notifyqcq);
3068 	ionic_lif_qcq_deinit(lif, lif->adminqcq);
3069 
3070 	mutex_destroy(&lif->config_lock);
3071 	mutex_destroy(&lif->queue_lock);
3072 	ionic_lif_reset(lif);
3073 }
3074 
3075 static int ionic_lif_adminq_init(struct ionic_lif *lif)
3076 {
3077 	struct device *dev = lif->ionic->dev;
3078 	struct ionic_q_init_comp comp;
3079 	struct ionic_dev *idev;
3080 	struct ionic_qcq *qcq;
3081 	struct ionic_queue *q;
3082 	int err;
3083 
3084 	idev = &lif->ionic->idev;
3085 	qcq = lif->adminqcq;
3086 	q = &qcq->q;
3087 
3088 	mutex_lock(&lif->ionic->dev_cmd_lock);
3089 	ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index);
3090 	err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
3091 	ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
3092 	mutex_unlock(&lif->ionic->dev_cmd_lock);
3093 	if (err) {
3094 		netdev_err(lif->netdev, "adminq init failed %d\n", err);
3095 		return err;
3096 	}
3097 
3098 	q->hw_type = comp.hw_type;
3099 	q->hw_index = le32_to_cpu(comp.hw_index);
3100 	q->dbval = IONIC_DBELL_QID(q->hw_index);
3101 
3102 	dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
3103 	dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
3104 
3105 	netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi,
3106 		       NAPI_POLL_WEIGHT);
3107 
3108 	napi_enable(&qcq->napi);
3109 
3110 	if (qcq->flags & IONIC_QCQ_F_INTR)
3111 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
3112 				IONIC_INTR_MASK_CLEAR);
3113 
3114 	qcq->flags |= IONIC_QCQ_F_INITED;
3115 
3116 	return 0;
3117 }
3118 
3119 static int ionic_lif_notifyq_init(struct ionic_lif *lif)
3120 {
3121 	struct ionic_qcq *qcq = lif->notifyqcq;
3122 	struct device *dev = lif->ionic->dev;
3123 	struct ionic_queue *q = &qcq->q;
3124 	int err;
3125 
3126 	struct ionic_admin_ctx ctx = {
3127 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
3128 		.cmd.q_init = {
3129 			.opcode = IONIC_CMD_Q_INIT,
3130 			.lif_index = cpu_to_le16(lif->index),
3131 			.type = q->type,
3132 			.ver = lif->qtype_info[q->type].version,
3133 			.index = cpu_to_le32(q->index),
3134 			.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
3135 					     IONIC_QINIT_F_ENA),
3136 			.intr_index = cpu_to_le16(lif->adminqcq->intr.index),
3137 			.pid = cpu_to_le16(q->pid),
3138 			.ring_size = ilog2(q->num_descs),
3139 			.ring_base = cpu_to_le64(q->base_pa),
3140 		}
3141 	};
3142 
3143 	dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid);
3144 	dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index);
3145 	dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
3146 	dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
3147 
3148 	err = ionic_adminq_post_wait(lif, &ctx);
3149 	if (err)
3150 		return err;
3151 
3152 	lif->last_eid = 0;
3153 	q->hw_type = ctx.comp.q_init.hw_type;
3154 	q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
3155 	q->dbval = IONIC_DBELL_QID(q->hw_index);
3156 
3157 	dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type);
3158 	dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index);
3159 
3160 	/* preset the callback info */
3161 	q->info[0].cb_arg = lif;
3162 
3163 	qcq->flags |= IONIC_QCQ_F_INITED;
3164 
3165 	return 0;
3166 }
3167 
3168 static int ionic_station_set(struct ionic_lif *lif)
3169 {
3170 	struct net_device *netdev = lif->netdev;
3171 	struct ionic_admin_ctx ctx = {
3172 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
3173 		.cmd.lif_getattr = {
3174 			.opcode = IONIC_CMD_LIF_GETATTR,
3175 			.index = cpu_to_le16(lif->index),
3176 			.attr = IONIC_LIF_ATTR_MAC,
3177 		},
3178 	};
3179 	struct sockaddr addr;
3180 	int err;
3181 
3182 	err = ionic_adminq_post_wait(lif, &ctx);
3183 	if (err)
3184 		return err;
3185 	netdev_dbg(lif->netdev, "found initial MAC addr %pM\n",
3186 		   ctx.comp.lif_getattr.mac);
3187 	if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
3188 		return 0;
3189 
3190 	if (!is_zero_ether_addr(netdev->dev_addr)) {
3191 		/* If the netdev mac is non-zero and doesn't match the default
3192 		 * device address, it was set by something earlier and we're
3193 		 * likely here again after a fw-upgrade reset.  We need to be
3194 		 * sure the netdev mac is in our filter list.
3195 		 */
3196 		if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
3197 				      netdev->dev_addr))
3198 			ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR);
3199 	} else {
3200 		/* Update the netdev mac with the device's mac */
3201 		memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
3202 		addr.sa_family = AF_INET;
3203 		err = eth_prepare_mac_addr_change(netdev, &addr);
3204 		if (err) {
3205 			netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n",
3206 				    addr.sa_data, err);
3207 			return 0;
3208 		}
3209 
3210 		eth_commit_mac_addr_change(netdev, &addr);
3211 	}
3212 
3213 	netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
3214 		   netdev->dev_addr);
3215 	ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR);
3216 
3217 	return 0;
3218 }
3219 
3220 int ionic_lif_init(struct ionic_lif *lif)
3221 {
3222 	struct ionic_dev *idev = &lif->ionic->idev;
3223 	struct device *dev = lif->ionic->dev;
3224 	struct ionic_lif_init_comp comp;
3225 	int dbpage_num;
3226 	int err;
3227 
3228 	mutex_lock(&lif->ionic->dev_cmd_lock);
3229 	ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa);
3230 	err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
3231 	ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
3232 	mutex_unlock(&lif->ionic->dev_cmd_lock);
3233 	if (err)
3234 		return err;
3235 
3236 	lif->hw_index = le16_to_cpu(comp.hw_index);
3237 	mutex_init(&lif->queue_lock);
3238 	mutex_init(&lif->config_lock);
3239 
3240 	/* now that we have the hw_index we can figure out our doorbell page */
3241 	lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
3242 	if (!lif->dbid_count) {
3243 		dev_err(dev, "No doorbell pages, aborting\n");
3244 		return -EINVAL;
3245 	}
3246 
3247 	lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL);
3248 	if (!lif->dbid_inuse) {
3249 		dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n");
3250 		return -ENOMEM;
3251 	}
3252 
3253 	/* first doorbell id reserved for kernel (dbid aka pid == zero) */
3254 	set_bit(0, lif->dbid_inuse);
3255 	lif->kern_pid = 0;
3256 
3257 	dbpage_num = ionic_db_page_num(lif, lif->kern_pid);
3258 	lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num);
3259 	if (!lif->kern_dbpage) {
3260 		dev_err(dev, "Cannot map dbpage, aborting\n");
3261 		err = -ENOMEM;
3262 		goto err_out_free_dbid;
3263 	}
3264 
3265 	err = ionic_lif_adminq_init(lif);
3266 	if (err)
3267 		goto err_out_adminq_deinit;
3268 
3269 	if (lif->ionic->nnqs_per_lif) {
3270 		err = ionic_lif_notifyq_init(lif);
3271 		if (err)
3272 			goto err_out_notifyq_deinit;
3273 	}
3274 
3275 	err = ionic_init_nic_features(lif);
3276 	if (err)
3277 		goto err_out_notifyq_deinit;
3278 
3279 	if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
3280 		err = ionic_rx_filters_init(lif);
3281 		if (err)
3282 			goto err_out_notifyq_deinit;
3283 	}
3284 
3285 	err = ionic_station_set(lif);
3286 	if (err)
3287 		goto err_out_notifyq_deinit;
3288 
3289 	lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
3290 
3291 	set_bit(IONIC_LIF_F_INITED, lif->state);
3292 
3293 	INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work);
3294 
3295 	return 0;
3296 
3297 err_out_notifyq_deinit:
3298 	ionic_lif_qcq_deinit(lif, lif->notifyqcq);
3299 err_out_adminq_deinit:
3300 	ionic_lif_qcq_deinit(lif, lif->adminqcq);
3301 	ionic_lif_reset(lif);
3302 	ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
3303 	lif->kern_dbpage = NULL;
3304 err_out_free_dbid:
3305 	kfree(lif->dbid_inuse);
3306 	lif->dbid_inuse = NULL;
3307 
3308 	return err;
3309 }
3310 
3311 static void ionic_lif_notify_work(struct work_struct *ws)
3312 {
3313 }
3314 
3315 static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
3316 {
3317 	struct ionic_admin_ctx ctx = {
3318 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
3319 		.cmd.lif_setattr = {
3320 			.opcode = IONIC_CMD_LIF_SETATTR,
3321 			.index = cpu_to_le16(lif->index),
3322 			.attr = IONIC_LIF_ATTR_NAME,
3323 		},
3324 	};
3325 
3326 	strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
3327 		sizeof(ctx.cmd.lif_setattr.name));
3328 
3329 	ionic_adminq_post_wait(lif, &ctx);
3330 }
3331 
3332 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev)
3333 {
3334 	if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit)
3335 		return NULL;
3336 
3337 	return netdev_priv(netdev);
3338 }
3339 
3340 static int ionic_lif_notify(struct notifier_block *nb,
3341 			    unsigned long event, void *info)
3342 {
3343 	struct net_device *ndev = netdev_notifier_info_to_dev(info);
3344 	struct ionic *ionic = container_of(nb, struct ionic, nb);
3345 	struct ionic_lif *lif = ionic_netdev_lif(ndev);
3346 
3347 	if (!lif || lif->ionic != ionic)
3348 		return NOTIFY_DONE;
3349 
3350 	switch (event) {
3351 	case NETDEV_CHANGENAME:
3352 		ionic_lif_set_netdev_info(lif);
3353 		break;
3354 	}
3355 
3356 	return NOTIFY_DONE;
3357 }
3358 
3359 int ionic_lif_register(struct ionic_lif *lif)
3360 {
3361 	int err;
3362 
3363 	ionic_lif_register_phc(lif);
3364 
3365 	INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work);
3366 
3367 	lif->ionic->nb.notifier_call = ionic_lif_notify;
3368 
3369 	err = register_netdevice_notifier(&lif->ionic->nb);
3370 	if (err)
3371 		lif->ionic->nb.notifier_call = NULL;
3372 
3373 	/* only register LIF0 for now */
3374 	err = register_netdev(lif->netdev);
3375 	if (err) {
3376 		dev_err(lif->ionic->dev, "Cannot register net device, aborting\n");
3377 		ionic_lif_unregister_phc(lif);
3378 		return err;
3379 	}
3380 
3381 	ionic_link_status_check_request(lif, CAN_SLEEP);
3382 	lif->registered = true;
3383 	ionic_lif_set_netdev_info(lif);
3384 
3385 	return 0;
3386 }
3387 
3388 void ionic_lif_unregister(struct ionic_lif *lif)
3389 {
3390 	if (lif->ionic->nb.notifier_call) {
3391 		unregister_netdevice_notifier(&lif->ionic->nb);
3392 		cancel_work_sync(&lif->ionic->nb_work);
3393 		lif->ionic->nb.notifier_call = NULL;
3394 	}
3395 
3396 	if (lif->netdev->reg_state == NETREG_REGISTERED)
3397 		unregister_netdev(lif->netdev);
3398 
3399 	ionic_lif_unregister_phc(lif);
3400 
3401 	lif->registered = false;
3402 }
3403 
3404 static void ionic_lif_queue_identify(struct ionic_lif *lif)
3405 {
3406 	union ionic_q_identity __iomem *q_ident;
3407 	struct ionic *ionic = lif->ionic;
3408 	struct ionic_dev *idev;
3409 	int qtype;
3410 	int err;
3411 
3412 	idev = &lif->ionic->idev;
3413 	q_ident = (union ionic_q_identity __iomem *)&idev->dev_cmd_regs->data;
3414 
3415 	for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) {
3416 		struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
3417 
3418 		/* filter out the ones we know about */
3419 		switch (qtype) {
3420 		case IONIC_QTYPE_ADMINQ:
3421 		case IONIC_QTYPE_NOTIFYQ:
3422 		case IONIC_QTYPE_RXQ:
3423 		case IONIC_QTYPE_TXQ:
3424 			break;
3425 		default:
3426 			continue;
3427 		}
3428 
3429 		memset(qti, 0, sizeof(*qti));
3430 
3431 		mutex_lock(&ionic->dev_cmd_lock);
3432 		ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype,
3433 					     ionic_qtype_versions[qtype]);
3434 		err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
3435 		if (!err) {
3436 			qti->version   = readb(&q_ident->version);
3437 			qti->supported = readb(&q_ident->supported);
3438 			qti->features  = readq(&q_ident->features);
3439 			qti->desc_sz   = readw(&q_ident->desc_sz);
3440 			qti->comp_sz   = readw(&q_ident->comp_sz);
3441 			qti->sg_desc_sz   = readw(&q_ident->sg_desc_sz);
3442 			qti->max_sg_elems = readw(&q_ident->max_sg_elems);
3443 			qti->sg_desc_stride = readw(&q_ident->sg_desc_stride);
3444 		}
3445 		mutex_unlock(&ionic->dev_cmd_lock);
3446 
3447 		if (err == -EINVAL) {
3448 			dev_err(ionic->dev, "qtype %d not supported\n", qtype);
3449 			continue;
3450 		} else if (err == -EIO) {
3451 			dev_err(ionic->dev, "q_ident failed, not supported on older FW\n");
3452 			return;
3453 		} else if (err) {
3454 			dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n",
3455 				qtype, err);
3456 			return;
3457 		}
3458 
3459 		dev_dbg(ionic->dev, " qtype[%d].version = %d\n",
3460 			qtype, qti->version);
3461 		dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n",
3462 			qtype, qti->supported);
3463 		dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n",
3464 			qtype, qti->features);
3465 		dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n",
3466 			qtype, qti->desc_sz);
3467 		dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n",
3468 			qtype, qti->comp_sz);
3469 		dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n",
3470 			qtype, qti->sg_desc_sz);
3471 		dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n",
3472 			qtype, qti->max_sg_elems);
3473 		dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n",
3474 			qtype, qti->sg_desc_stride);
3475 	}
3476 }
3477 
3478 int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
3479 		       union ionic_lif_identity *lid)
3480 {
3481 	struct ionic_dev *idev = &ionic->idev;
3482 	size_t sz;
3483 	int err;
3484 
3485 	sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data));
3486 
3487 	mutex_lock(&ionic->dev_cmd_lock);
3488 	ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1);
3489 	err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
3490 	memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz);
3491 	mutex_unlock(&ionic->dev_cmd_lock);
3492 	if (err)
3493 		return (err);
3494 
3495 	dev_dbg(ionic->dev, "capabilities 0x%llx\n",
3496 		le64_to_cpu(lid->capabilities));
3497 
3498 	dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n",
3499 		le32_to_cpu(lid->eth.max_ucast_filters));
3500 	dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n",
3501 		le32_to_cpu(lid->eth.max_mcast_filters));
3502 	dev_dbg(ionic->dev, "eth.features 0x%llx\n",
3503 		le64_to_cpu(lid->eth.config.features));
3504 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n",
3505 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ]));
3506 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n",
3507 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ]));
3508 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n",
3509 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ]));
3510 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n",
3511 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ]));
3512 	dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name);
3513 	dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac);
3514 	dev_dbg(ionic->dev, "eth.config.mtu %d\n",
3515 		le32_to_cpu(lid->eth.config.mtu));
3516 
3517 	return 0;
3518 }
3519 
3520 int ionic_lif_size(struct ionic *ionic)
3521 {
3522 	struct ionic_identity *ident = &ionic->ident;
3523 	unsigned int nintrs, dev_nintrs;
3524 	union ionic_lif_config *lc;
3525 	unsigned int ntxqs_per_lif;
3526 	unsigned int nrxqs_per_lif;
3527 	unsigned int neqs_per_lif;
3528 	unsigned int nnqs_per_lif;
3529 	unsigned int nxqs, neqs;
3530 	unsigned int min_intrs;
3531 	int err;
3532 
3533 	/* retrieve basic values from FW */
3534 	lc = &ident->lif.eth.config;
3535 	dev_nintrs = le32_to_cpu(ident->dev.nintrs);
3536 	neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count);
3537 	nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]);
3538 	ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]);
3539 	nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]);
3540 
3541 	/* limit values to play nice with kdump */
3542 	if (is_kdump_kernel()) {
3543 		dev_nintrs = 2;
3544 		neqs_per_lif = 0;
3545 		nnqs_per_lif = 0;
3546 		ntxqs_per_lif = 1;
3547 		nrxqs_per_lif = 1;
3548 	}
3549 
3550 	/* reserve last queue id for hardware timestamping */
3551 	if (lc->features & cpu_to_le64(IONIC_ETH_HW_TIMESTAMP)) {
3552 		if (ntxqs_per_lif <= 1 || nrxqs_per_lif <= 1) {
3553 			lc->features &= cpu_to_le64(~IONIC_ETH_HW_TIMESTAMP);
3554 		} else {
3555 			ntxqs_per_lif -= 1;
3556 			nrxqs_per_lif -= 1;
3557 		}
3558 	}
3559 
3560 	nxqs = min(ntxqs_per_lif, nrxqs_per_lif);
3561 	nxqs = min(nxqs, num_online_cpus());
3562 	neqs = min(neqs_per_lif, num_online_cpus());
3563 
3564 try_again:
3565 	/* interrupt usage:
3566 	 *    1 for master lif adminq/notifyq
3567 	 *    1 for each CPU for master lif TxRx queue pairs
3568 	 *    whatever's left is for RDMA queues
3569 	 */
3570 	nintrs = 1 + nxqs + neqs;
3571 	min_intrs = 2;  /* adminq + 1 TxRx queue pair */
3572 
3573 	if (nintrs > dev_nintrs)
3574 		goto try_fewer;
3575 
3576 	err = ionic_bus_alloc_irq_vectors(ionic, nintrs);
3577 	if (err < 0 && err != -ENOSPC) {
3578 		dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err);
3579 		return err;
3580 	}
3581 	if (err == -ENOSPC)
3582 		goto try_fewer;
3583 
3584 	if (err != nintrs) {
3585 		ionic_bus_free_irq_vectors(ionic);
3586 		goto try_fewer;
3587 	}
3588 
3589 	ionic->nnqs_per_lif = nnqs_per_lif;
3590 	ionic->neqs_per_lif = neqs;
3591 	ionic->ntxqs_per_lif = nxqs;
3592 	ionic->nrxqs_per_lif = nxqs;
3593 	ionic->nintrs = nintrs;
3594 
3595 	ionic_debugfs_add_sizes(ionic);
3596 
3597 	return 0;
3598 
3599 try_fewer:
3600 	if (nnqs_per_lif > 1) {
3601 		nnqs_per_lif >>= 1;
3602 		goto try_again;
3603 	}
3604 	if (neqs > 1) {
3605 		neqs >>= 1;
3606 		goto try_again;
3607 	}
3608 	if (nxqs > 1) {
3609 		nxqs >>= 1;
3610 		goto try_again;
3611 	}
3612 	dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs);
3613 	return -ENOSPC;
3614 }
3615