xref: /linux/drivers/net/ethernet/pensando/ionic/ionic_lif.c (revision 9d106c6dd81bb26ad7fc3ee89cb1d62557c8e2c9)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3 
4 #include <linux/printk.h>
5 #include <linux/dynamic_debug.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/rtnetlink.h>
9 #include <linux/interrupt.h>
10 #include <linux/pci.h>
11 #include <linux/cpumask.h>
12 
13 #include "ionic.h"
14 #include "ionic_bus.h"
15 #include "ionic_lif.h"
16 #include "ionic_txrx.h"
17 #include "ionic_ethtool.h"
18 #include "ionic_debugfs.h"
19 
20 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode);
21 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
22 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
23 static void ionic_link_status_check(struct ionic_lif *lif);
24 
25 static void ionic_lif_deferred_work(struct work_struct *work)
26 {
27 	struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work);
28 	struct ionic_deferred *def = &lif->deferred;
29 	struct ionic_deferred_work *w = NULL;
30 
31 	spin_lock_bh(&def->lock);
32 	if (!list_empty(&def->list)) {
33 		w = list_first_entry(&def->list,
34 				     struct ionic_deferred_work, list);
35 		list_del(&w->list);
36 	}
37 	spin_unlock_bh(&def->lock);
38 
39 	if (w) {
40 		switch (w->type) {
41 		case IONIC_DW_TYPE_RX_MODE:
42 			ionic_lif_rx_mode(lif, w->rx_mode);
43 			break;
44 		case IONIC_DW_TYPE_RX_ADDR_ADD:
45 			ionic_lif_addr_add(lif, w->addr);
46 			break;
47 		case IONIC_DW_TYPE_RX_ADDR_DEL:
48 			ionic_lif_addr_del(lif, w->addr);
49 			break;
50 		case IONIC_DW_TYPE_LINK_STATUS:
51 			ionic_link_status_check(lif);
52 			break;
53 		default:
54 			break;
55 		}
56 		kfree(w);
57 		schedule_work(&def->work);
58 	}
59 }
60 
61 static void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
62 				       struct ionic_deferred_work *work)
63 {
64 	spin_lock_bh(&def->lock);
65 	list_add_tail(&work->list, &def->list);
66 	spin_unlock_bh(&def->lock);
67 	schedule_work(&def->work);
68 }
69 
70 static void ionic_link_status_check(struct ionic_lif *lif)
71 {
72 	struct net_device *netdev = lif->netdev;
73 	u16 link_status;
74 	bool link_up;
75 
76 	link_status = le16_to_cpu(lif->info->status.link_status);
77 	link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
78 
79 	/* filter out the no-change cases */
80 	if (link_up == netif_carrier_ok(netdev))
81 		goto link_out;
82 
83 	if (link_up) {
84 		netdev_info(netdev, "Link up - %d Gbps\n",
85 			    le32_to_cpu(lif->info->status.link_speed) / 1000);
86 
87 		if (test_bit(IONIC_LIF_F_UP, lif->state)) {
88 			netif_tx_wake_all_queues(lif->netdev);
89 			netif_carrier_on(netdev);
90 		}
91 	} else {
92 		netdev_info(netdev, "Link down\n");
93 
94 		/* carrier off first to avoid watchdog timeout */
95 		netif_carrier_off(netdev);
96 		if (test_bit(IONIC_LIF_F_UP, lif->state))
97 			netif_tx_stop_all_queues(netdev);
98 	}
99 
100 link_out:
101 	clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
102 }
103 
104 static void ionic_link_status_check_request(struct ionic_lif *lif)
105 {
106 	struct ionic_deferred_work *work;
107 
108 	/* we only need one request outstanding at a time */
109 	if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
110 		return;
111 
112 	if (in_interrupt()) {
113 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
114 		if (!work)
115 			return;
116 
117 		work->type = IONIC_DW_TYPE_LINK_STATUS;
118 		ionic_lif_deferred_enqueue(&lif->deferred, work);
119 	} else {
120 		ionic_link_status_check(lif);
121 	}
122 }
123 
124 static irqreturn_t ionic_isr(int irq, void *data)
125 {
126 	struct napi_struct *napi = data;
127 
128 	napi_schedule_irqoff(napi);
129 
130 	return IRQ_HANDLED;
131 }
132 
133 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
134 {
135 	struct ionic_intr_info *intr = &qcq->intr;
136 	struct device *dev = lif->ionic->dev;
137 	struct ionic_queue *q = &qcq->q;
138 	const char *name;
139 
140 	if (lif->registered)
141 		name = lif->netdev->name;
142 	else
143 		name = dev_name(dev);
144 
145 	snprintf(intr->name, sizeof(intr->name),
146 		 "%s-%s-%s", IONIC_DRV_NAME, name, q->name);
147 
148 	return devm_request_irq(dev, intr->vector, ionic_isr,
149 				0, intr->name, &qcq->napi);
150 }
151 
152 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
153 {
154 	struct ionic *ionic = lif->ionic;
155 	int index;
156 
157 	index = find_first_zero_bit(ionic->intrs, ionic->nintrs);
158 	if (index == ionic->nintrs) {
159 		netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n",
160 			    __func__, index, ionic->nintrs);
161 		return -ENOSPC;
162 	}
163 
164 	set_bit(index, ionic->intrs);
165 	ionic_intr_init(&ionic->idev, intr, index);
166 
167 	return 0;
168 }
169 
170 static void ionic_intr_free(struct ionic_lif *lif, int index)
171 {
172 	if (index != INTR_INDEX_NOT_ASSIGNED && index < lif->ionic->nintrs)
173 		clear_bit(index, lif->ionic->intrs);
174 }
175 
176 static int ionic_qcq_enable(struct ionic_qcq *qcq)
177 {
178 	struct ionic_queue *q = &qcq->q;
179 	struct ionic_lif *lif = q->lif;
180 	struct ionic_dev *idev;
181 	struct device *dev;
182 
183 	struct ionic_admin_ctx ctx = {
184 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
185 		.cmd.q_control = {
186 			.opcode = IONIC_CMD_Q_CONTROL,
187 			.lif_index = cpu_to_le16(lif->index),
188 			.type = q->type,
189 			.index = cpu_to_le32(q->index),
190 			.oper = IONIC_Q_ENABLE,
191 		},
192 	};
193 
194 	idev = &lif->ionic->idev;
195 	dev = lif->ionic->dev;
196 
197 	dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
198 		ctx.cmd.q_control.index, ctx.cmd.q_control.type);
199 
200 	if (qcq->flags & IONIC_QCQ_F_INTR) {
201 		irq_set_affinity_hint(qcq->intr.vector,
202 				      &qcq->intr.affinity_mask);
203 		napi_enable(&qcq->napi);
204 		ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
205 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
206 				IONIC_INTR_MASK_CLEAR);
207 	}
208 
209 	return ionic_adminq_post_wait(lif, &ctx);
210 }
211 
212 static int ionic_qcq_disable(struct ionic_qcq *qcq)
213 {
214 	struct ionic_queue *q = &qcq->q;
215 	struct ionic_lif *lif = q->lif;
216 	struct ionic_dev *idev;
217 	struct device *dev;
218 
219 	struct ionic_admin_ctx ctx = {
220 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
221 		.cmd.q_control = {
222 			.opcode = IONIC_CMD_Q_CONTROL,
223 			.lif_index = cpu_to_le16(lif->index),
224 			.type = q->type,
225 			.index = cpu_to_le32(q->index),
226 			.oper = IONIC_Q_DISABLE,
227 		},
228 	};
229 
230 	idev = &lif->ionic->idev;
231 	dev = lif->ionic->dev;
232 
233 	dev_dbg(dev, "q_disable.index %d q_disable.qtype %d\n",
234 		ctx.cmd.q_control.index, ctx.cmd.q_control.type);
235 
236 	if (qcq->flags & IONIC_QCQ_F_INTR) {
237 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
238 				IONIC_INTR_MASK_SET);
239 		synchronize_irq(qcq->intr.vector);
240 		irq_set_affinity_hint(qcq->intr.vector, NULL);
241 		napi_disable(&qcq->napi);
242 	}
243 
244 	return ionic_adminq_post_wait(lif, &ctx);
245 }
246 
247 static void ionic_lif_quiesce(struct ionic_lif *lif)
248 {
249 	struct ionic_admin_ctx ctx = {
250 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
251 		.cmd.lif_setattr = {
252 			.opcode = IONIC_CMD_LIF_SETATTR,
253 			.attr = IONIC_LIF_ATTR_STATE,
254 			.index = lif->index,
255 			.state = IONIC_LIF_DISABLE
256 		},
257 	};
258 
259 	ionic_adminq_post_wait(lif, &ctx);
260 }
261 
262 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
263 {
264 	struct ionic_dev *idev = &lif->ionic->idev;
265 	struct device *dev = lif->ionic->dev;
266 
267 	if (!qcq)
268 		return;
269 
270 	ionic_debugfs_del_qcq(qcq);
271 
272 	if (!(qcq->flags & IONIC_QCQ_F_INITED))
273 		return;
274 
275 	if (qcq->flags & IONIC_QCQ_F_INTR) {
276 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
277 				IONIC_INTR_MASK_SET);
278 		irq_set_affinity_hint(qcq->intr.vector, NULL);
279 		devm_free_irq(dev, qcq->intr.vector, &qcq->napi);
280 		netif_napi_del(&qcq->napi);
281 		qcq->intr.vector = 0;
282 	}
283 
284 	qcq->flags &= ~IONIC_QCQ_F_INITED;
285 }
286 
287 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
288 {
289 	struct device *dev = lif->ionic->dev;
290 
291 	if (!qcq)
292 		return;
293 
294 	dma_free_coherent(dev, qcq->total_size, qcq->base, qcq->base_pa);
295 	qcq->base = NULL;
296 	qcq->base_pa = 0;
297 
298 	if (qcq->flags & IONIC_QCQ_F_INTR)
299 		ionic_intr_free(lif, qcq->intr.index);
300 
301 	devm_kfree(dev, qcq->cq.info);
302 	qcq->cq.info = NULL;
303 	devm_kfree(dev, qcq->q.info);
304 	qcq->q.info = NULL;
305 	devm_kfree(dev, qcq);
306 }
307 
308 static void ionic_qcqs_free(struct ionic_lif *lif)
309 {
310 	struct device *dev = lif->ionic->dev;
311 	unsigned int i;
312 
313 	if (lif->notifyqcq) {
314 		ionic_qcq_free(lif, lif->notifyqcq);
315 		lif->notifyqcq = NULL;
316 	}
317 
318 	if (lif->adminqcq) {
319 		ionic_qcq_free(lif, lif->adminqcq);
320 		lif->adminqcq = NULL;
321 	}
322 
323 	if (lif->rxqcqs) {
324 		for (i = 0; i < lif->nxqs; i++)
325 			if (lif->rxqcqs[i].stats)
326 				devm_kfree(dev, lif->rxqcqs[i].stats);
327 		devm_kfree(dev, lif->rxqcqs);
328 		lif->rxqcqs = NULL;
329 	}
330 
331 	if (lif->txqcqs) {
332 		for (i = 0; i < lif->nxqs; i++)
333 			if (lif->txqcqs[i].stats)
334 				devm_kfree(dev, lif->txqcqs[i].stats);
335 		devm_kfree(dev, lif->txqcqs);
336 		lif->txqcqs = NULL;
337 	}
338 }
339 
340 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
341 				      struct ionic_qcq *n_qcq)
342 {
343 	if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) {
344 		ionic_intr_free(n_qcq->cq.lif, n_qcq->intr.index);
345 		n_qcq->flags &= ~IONIC_QCQ_F_INTR;
346 	}
347 
348 	n_qcq->intr.vector = src_qcq->intr.vector;
349 	n_qcq->intr.index = src_qcq->intr.index;
350 }
351 
352 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
353 			   unsigned int index,
354 			   const char *name, unsigned int flags,
355 			   unsigned int num_descs, unsigned int desc_size,
356 			   unsigned int cq_desc_size,
357 			   unsigned int sg_desc_size,
358 			   unsigned int pid, struct ionic_qcq **qcq)
359 {
360 	struct ionic_dev *idev = &lif->ionic->idev;
361 	u32 q_size, cq_size, sg_size, total_size;
362 	struct device *dev = lif->ionic->dev;
363 	void *q_base, *cq_base, *sg_base;
364 	dma_addr_t cq_base_pa = 0;
365 	dma_addr_t sg_base_pa = 0;
366 	dma_addr_t q_base_pa = 0;
367 	struct ionic_qcq *new;
368 	int err;
369 
370 	*qcq = NULL;
371 
372 	q_size  = num_descs * desc_size;
373 	cq_size = num_descs * cq_desc_size;
374 	sg_size = num_descs * sg_desc_size;
375 
376 	total_size = ALIGN(q_size, PAGE_SIZE) + ALIGN(cq_size, PAGE_SIZE);
377 	/* Note: aligning q_size/cq_size is not enough due to cq_base
378 	 * address aligning as q_base could be not aligned to the page.
379 	 * Adding PAGE_SIZE.
380 	 */
381 	total_size += PAGE_SIZE;
382 	if (flags & IONIC_QCQ_F_SG) {
383 		total_size += ALIGN(sg_size, PAGE_SIZE);
384 		total_size += PAGE_SIZE;
385 	}
386 
387 	new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL);
388 	if (!new) {
389 		netdev_err(lif->netdev, "Cannot allocate queue structure\n");
390 		err = -ENOMEM;
391 		goto err_out;
392 	}
393 
394 	new->flags = flags;
395 
396 	new->q.info = devm_kzalloc(dev, sizeof(*new->q.info) * num_descs,
397 				   GFP_KERNEL);
398 	if (!new->q.info) {
399 		netdev_err(lif->netdev, "Cannot allocate queue info\n");
400 		err = -ENOMEM;
401 		goto err_out;
402 	}
403 
404 	new->q.type = type;
405 
406 	err = ionic_q_init(lif, idev, &new->q, index, name, num_descs,
407 			   desc_size, sg_desc_size, pid);
408 	if (err) {
409 		netdev_err(lif->netdev, "Cannot initialize queue\n");
410 		goto err_out;
411 	}
412 
413 	if (flags & IONIC_QCQ_F_INTR) {
414 		err = ionic_intr_alloc(lif, &new->intr);
415 		if (err) {
416 			netdev_warn(lif->netdev, "no intr for %s: %d\n",
417 				    name, err);
418 			goto err_out;
419 		}
420 
421 		err = ionic_bus_get_irq(lif->ionic, new->intr.index);
422 		if (err < 0) {
423 			netdev_warn(lif->netdev, "no vector for %s: %d\n",
424 				    name, err);
425 			goto err_out_free_intr;
426 		}
427 		new->intr.vector = err;
428 		ionic_intr_mask_assert(idev->intr_ctrl, new->intr.index,
429 				       IONIC_INTR_MASK_SET);
430 
431 		new->intr.cpu = cpumask_local_spread(new->intr.index,
432 						     dev_to_node(dev));
433 		if (new->intr.cpu != -1)
434 			cpumask_set_cpu(new->intr.cpu,
435 					&new->intr.affinity_mask);
436 	} else {
437 		new->intr.index = INTR_INDEX_NOT_ASSIGNED;
438 	}
439 
440 	new->cq.info = devm_kzalloc(dev, sizeof(*new->cq.info) * num_descs,
441 				    GFP_KERNEL);
442 	if (!new->cq.info) {
443 		netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
444 		err = -ENOMEM;
445 		goto err_out_free_intr;
446 	}
447 
448 	err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
449 	if (err) {
450 		netdev_err(lif->netdev, "Cannot initialize completion queue\n");
451 		goto err_out_free_intr;
452 	}
453 
454 	new->base = dma_alloc_coherent(dev, total_size, &new->base_pa,
455 				       GFP_KERNEL);
456 	if (!new->base) {
457 		netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
458 		err = -ENOMEM;
459 		goto err_out_free_intr;
460 	}
461 
462 	new->total_size = total_size;
463 
464 	q_base = new->base;
465 	q_base_pa = new->base_pa;
466 
467 	cq_base = (void *)ALIGN((uintptr_t)q_base + q_size, PAGE_SIZE);
468 	cq_base_pa = ALIGN(q_base_pa + q_size, PAGE_SIZE);
469 
470 	if (flags & IONIC_QCQ_F_SG) {
471 		sg_base = (void *)ALIGN((uintptr_t)cq_base + cq_size,
472 					PAGE_SIZE);
473 		sg_base_pa = ALIGN(cq_base_pa + cq_size, PAGE_SIZE);
474 		ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
475 	}
476 
477 	ionic_q_map(&new->q, q_base, q_base_pa);
478 	ionic_cq_map(&new->cq, cq_base, cq_base_pa);
479 	ionic_cq_bind(&new->cq, &new->q);
480 
481 	*qcq = new;
482 
483 	return 0;
484 
485 err_out_free_intr:
486 	ionic_intr_free(lif, new->intr.index);
487 err_out:
488 	dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
489 	return err;
490 }
491 
492 static int ionic_qcqs_alloc(struct ionic_lif *lif)
493 {
494 	struct device *dev = lif->ionic->dev;
495 	unsigned int q_list_size;
496 	unsigned int flags;
497 	int err;
498 	int i;
499 
500 	flags = IONIC_QCQ_F_INTR;
501 	err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags,
502 			      IONIC_ADMINQ_LENGTH,
503 			      sizeof(struct ionic_admin_cmd),
504 			      sizeof(struct ionic_admin_comp),
505 			      0, lif->kern_pid, &lif->adminqcq);
506 	if (err)
507 		return err;
508 
509 	if (lif->ionic->nnqs_per_lif) {
510 		flags = IONIC_QCQ_F_NOTIFYQ;
511 		err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq",
512 				      flags, IONIC_NOTIFYQ_LENGTH,
513 				      sizeof(struct ionic_notifyq_cmd),
514 				      sizeof(union ionic_notifyq_comp),
515 				      0, lif->kern_pid, &lif->notifyqcq);
516 		if (err)
517 			goto err_out_free_adminqcq;
518 
519 		/* Let the notifyq ride on the adminq interrupt */
520 		ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq);
521 	}
522 
523 	q_list_size = sizeof(*lif->txqcqs) * lif->nxqs;
524 	err = -ENOMEM;
525 	lif->txqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL);
526 	if (!lif->txqcqs)
527 		goto err_out_free_notifyqcq;
528 	for (i = 0; i < lif->nxqs; i++) {
529 		lif->txqcqs[i].stats = devm_kzalloc(dev,
530 						    sizeof(struct ionic_q_stats),
531 						    GFP_KERNEL);
532 		if (!lif->txqcqs[i].stats)
533 			goto err_out_free_tx_stats;
534 	}
535 
536 	lif->rxqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL);
537 	if (!lif->rxqcqs)
538 		goto err_out_free_tx_stats;
539 	for (i = 0; i < lif->nxqs; i++) {
540 		lif->rxqcqs[i].stats = devm_kzalloc(dev,
541 						    sizeof(struct ionic_q_stats),
542 						    GFP_KERNEL);
543 		if (!lif->rxqcqs[i].stats)
544 			goto err_out_free_rx_stats;
545 	}
546 
547 	return 0;
548 
549 err_out_free_rx_stats:
550 	for (i = 0; i < lif->nxqs; i++)
551 		if (lif->rxqcqs[i].stats)
552 			devm_kfree(dev, lif->rxqcqs[i].stats);
553 	devm_kfree(dev, lif->rxqcqs);
554 	lif->rxqcqs = NULL;
555 err_out_free_tx_stats:
556 	for (i = 0; i < lif->nxqs; i++)
557 		if (lif->txqcqs[i].stats)
558 			devm_kfree(dev, lif->txqcqs[i].stats);
559 	devm_kfree(dev, lif->txqcqs);
560 	lif->txqcqs = NULL;
561 err_out_free_notifyqcq:
562 	if (lif->notifyqcq) {
563 		ionic_qcq_free(lif, lif->notifyqcq);
564 		lif->notifyqcq = NULL;
565 	}
566 err_out_free_adminqcq:
567 	ionic_qcq_free(lif, lif->adminqcq);
568 	lif->adminqcq = NULL;
569 
570 	return err;
571 }
572 
573 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
574 {
575 	struct device *dev = lif->ionic->dev;
576 	struct ionic_queue *q = &qcq->q;
577 	struct ionic_cq *cq = &qcq->cq;
578 	struct ionic_admin_ctx ctx = {
579 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
580 		.cmd.q_init = {
581 			.opcode = IONIC_CMD_Q_INIT,
582 			.lif_index = cpu_to_le16(lif->index),
583 			.type = q->type,
584 			.index = cpu_to_le32(q->index),
585 			.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
586 					     IONIC_QINIT_F_SG),
587 			.intr_index = cpu_to_le16(lif->rxqcqs[q->index].qcq->intr.index),
588 			.pid = cpu_to_le16(q->pid),
589 			.ring_size = ilog2(q->num_descs),
590 			.ring_base = cpu_to_le64(q->base_pa),
591 			.cq_ring_base = cpu_to_le64(cq->base_pa),
592 			.sg_ring_base = cpu_to_le64(q->sg_base_pa),
593 		},
594 	};
595 	int err;
596 
597 	dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
598 	dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
599 	dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
600 	dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
601 
602 	err = ionic_adminq_post_wait(lif, &ctx);
603 	if (err)
604 		return err;
605 
606 	q->hw_type = ctx.comp.q_init.hw_type;
607 	q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
608 	q->dbval = IONIC_DBELL_QID(q->hw_index);
609 
610 	dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
611 	dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
612 
613 	qcq->flags |= IONIC_QCQ_F_INITED;
614 
615 	ionic_debugfs_add_qcq(lif, qcq);
616 
617 	return 0;
618 }
619 
620 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
621 {
622 	struct device *dev = lif->ionic->dev;
623 	struct ionic_queue *q = &qcq->q;
624 	struct ionic_cq *cq = &qcq->cq;
625 	struct ionic_admin_ctx ctx = {
626 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
627 		.cmd.q_init = {
628 			.opcode = IONIC_CMD_Q_INIT,
629 			.lif_index = cpu_to_le16(lif->index),
630 			.type = q->type,
631 			.index = cpu_to_le32(q->index),
632 			.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
633 					     IONIC_QINIT_F_SG),
634 			.intr_index = cpu_to_le16(cq->bound_intr->index),
635 			.pid = cpu_to_le16(q->pid),
636 			.ring_size = ilog2(q->num_descs),
637 			.ring_base = cpu_to_le64(q->base_pa),
638 			.cq_ring_base = cpu_to_le64(cq->base_pa),
639 			.sg_ring_base = cpu_to_le64(q->sg_base_pa),
640 		},
641 	};
642 	int err;
643 
644 	dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid);
645 	dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
646 	dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
647 	dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
648 
649 	err = ionic_adminq_post_wait(lif, &ctx);
650 	if (err)
651 		return err;
652 
653 	q->hw_type = ctx.comp.q_init.hw_type;
654 	q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
655 	q->dbval = IONIC_DBELL_QID(q->hw_index);
656 
657 	dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
658 	dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
659 
660 	netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi,
661 		       NAPI_POLL_WEIGHT);
662 
663 	err = ionic_request_irq(lif, qcq);
664 	if (err) {
665 		netif_napi_del(&qcq->napi);
666 		return err;
667 	}
668 
669 	qcq->flags |= IONIC_QCQ_F_INITED;
670 
671 	ionic_debugfs_add_qcq(lif, qcq);
672 
673 	return 0;
674 }
675 
676 static bool ionic_notifyq_service(struct ionic_cq *cq,
677 				  struct ionic_cq_info *cq_info)
678 {
679 	union ionic_notifyq_comp *comp = cq_info->cq_desc;
680 	struct net_device *netdev;
681 	struct ionic_queue *q;
682 	struct ionic_lif *lif;
683 	u64 eid;
684 
685 	q = cq->bound_q;
686 	lif = q->info[0].cb_arg;
687 	netdev = lif->netdev;
688 	eid = le64_to_cpu(comp->event.eid);
689 
690 	/* Have we run out of new completions to process? */
691 	if (eid <= lif->last_eid)
692 		return false;
693 
694 	lif->last_eid = eid;
695 
696 	dev_dbg(lif->ionic->dev, "notifyq event:\n");
697 	dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
698 			 comp, sizeof(*comp), true);
699 
700 	switch (le16_to_cpu(comp->event.ecode)) {
701 	case IONIC_EVENT_LINK_CHANGE:
702 		ionic_link_status_check_request(lif);
703 		break;
704 	case IONIC_EVENT_RESET:
705 		netdev_info(netdev, "Notifyq IONIC_EVENT_RESET eid=%lld\n",
706 			    eid);
707 		netdev_info(netdev, "  reset_code=%d state=%d\n",
708 			    comp->reset.reset_code,
709 			    comp->reset.state);
710 		break;
711 	default:
712 		netdev_warn(netdev, "Notifyq unknown event ecode=%d eid=%lld\n",
713 			    comp->event.ecode, eid);
714 		break;
715 	}
716 
717 	return true;
718 }
719 
720 static int ionic_notifyq_clean(struct ionic_lif *lif, int budget)
721 {
722 	struct ionic_dev *idev = &lif->ionic->idev;
723 	struct ionic_cq *cq = &lif->notifyqcq->cq;
724 	u32 work_done;
725 
726 	work_done = ionic_cq_service(cq, budget, ionic_notifyq_service,
727 				     NULL, NULL);
728 	if (work_done)
729 		ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
730 				   work_done, IONIC_INTR_CRED_RESET_COALESCE);
731 
732 	return work_done;
733 }
734 
735 static bool ionic_adminq_service(struct ionic_cq *cq,
736 				 struct ionic_cq_info *cq_info)
737 {
738 	struct ionic_admin_comp *comp = cq_info->cq_desc;
739 
740 	if (!color_match(comp->color, cq->done_color))
741 		return false;
742 
743 	ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index));
744 
745 	return true;
746 }
747 
748 static int ionic_adminq_napi(struct napi_struct *napi, int budget)
749 {
750 	struct ionic_lif *lif = napi_to_cq(napi)->lif;
751 	int n_work = 0;
752 	int a_work = 0;
753 
754 	if (likely(lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED))
755 		n_work = ionic_notifyq_clean(lif, budget);
756 	a_work = ionic_napi(napi, budget, ionic_adminq_service, NULL, NULL);
757 
758 	return max(n_work, a_work);
759 }
760 
761 static void ionic_get_stats64(struct net_device *netdev,
762 			      struct rtnl_link_stats64 *ns)
763 {
764 	struct ionic_lif *lif = netdev_priv(netdev);
765 	struct ionic_lif_stats *ls;
766 
767 	memset(ns, 0, sizeof(*ns));
768 	ls = &lif->info->stats;
769 
770 	ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) +
771 			 le64_to_cpu(ls->rx_mcast_packets) +
772 			 le64_to_cpu(ls->rx_bcast_packets);
773 
774 	ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) +
775 			 le64_to_cpu(ls->tx_mcast_packets) +
776 			 le64_to_cpu(ls->tx_bcast_packets);
777 
778 	ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) +
779 		       le64_to_cpu(ls->rx_mcast_bytes) +
780 		       le64_to_cpu(ls->rx_bcast_bytes);
781 
782 	ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) +
783 		       le64_to_cpu(ls->tx_mcast_bytes) +
784 		       le64_to_cpu(ls->tx_bcast_bytes);
785 
786 	ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) +
787 			 le64_to_cpu(ls->rx_mcast_drop_packets) +
788 			 le64_to_cpu(ls->rx_bcast_drop_packets);
789 
790 	ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) +
791 			 le64_to_cpu(ls->tx_mcast_drop_packets) +
792 			 le64_to_cpu(ls->tx_bcast_drop_packets);
793 
794 	ns->multicast = le64_to_cpu(ls->rx_mcast_packets);
795 
796 	ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty);
797 
798 	ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) +
799 			       le64_to_cpu(ls->rx_queue_disabled) +
800 			       le64_to_cpu(ls->rx_desc_fetch_error) +
801 			       le64_to_cpu(ls->rx_desc_data_error);
802 
803 	ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) +
804 				le64_to_cpu(ls->tx_queue_disabled) +
805 				le64_to_cpu(ls->tx_desc_fetch_error) +
806 				le64_to_cpu(ls->tx_desc_data_error);
807 
808 	ns->rx_errors = ns->rx_over_errors +
809 			ns->rx_missed_errors;
810 
811 	ns->tx_errors = ns->tx_aborted_errors;
812 }
813 
814 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
815 {
816 	struct ionic_admin_ctx ctx = {
817 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
818 		.cmd.rx_filter_add = {
819 			.opcode = IONIC_CMD_RX_FILTER_ADD,
820 			.lif_index = cpu_to_le16(lif->index),
821 			.match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
822 		},
823 	};
824 	struct ionic_rx_filter *f;
825 	int err;
826 
827 	/* don't bother if we already have it */
828 	spin_lock_bh(&lif->rx_filters.lock);
829 	f = ionic_rx_filter_by_addr(lif, addr);
830 	spin_unlock_bh(&lif->rx_filters.lock);
831 	if (f)
832 		return 0;
833 
834 	netdev_dbg(lif->netdev, "rx_filter add ADDR %pM (id %d)\n", addr,
835 		   ctx.comp.rx_filter_add.filter_id);
836 
837 	memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
838 	err = ionic_adminq_post_wait(lif, &ctx);
839 	if (err && err != -EEXIST)
840 		return err;
841 
842 	return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
843 }
844 
845 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
846 {
847 	struct ionic_admin_ctx ctx = {
848 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
849 		.cmd.rx_filter_del = {
850 			.opcode = IONIC_CMD_RX_FILTER_DEL,
851 			.lif_index = cpu_to_le16(lif->index),
852 		},
853 	};
854 	struct ionic_rx_filter *f;
855 	int err;
856 
857 	spin_lock_bh(&lif->rx_filters.lock);
858 	f = ionic_rx_filter_by_addr(lif, addr);
859 	if (!f) {
860 		spin_unlock_bh(&lif->rx_filters.lock);
861 		return -ENOENT;
862 	}
863 
864 	ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
865 	ionic_rx_filter_free(lif, f);
866 	spin_unlock_bh(&lif->rx_filters.lock);
867 
868 	err = ionic_adminq_post_wait(lif, &ctx);
869 	if (err && err != -EEXIST)
870 		return err;
871 
872 	netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", addr,
873 		   ctx.cmd.rx_filter_del.filter_id);
874 
875 	return 0;
876 }
877 
878 static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add)
879 {
880 	struct ionic *ionic = lif->ionic;
881 	struct ionic_deferred_work *work;
882 	unsigned int nmfilters;
883 	unsigned int nufilters;
884 
885 	if (add) {
886 		/* Do we have space for this filter?  We test the counters
887 		 * here before checking the need for deferral so that we
888 		 * can return an overflow error to the stack.
889 		 */
890 		nmfilters = le32_to_cpu(ionic->ident.lif.eth.max_mcast_filters);
891 		nufilters = le32_to_cpu(ionic->ident.lif.eth.max_ucast_filters);
892 
893 		if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters))
894 			lif->nmcast++;
895 		else if (!is_multicast_ether_addr(addr) &&
896 			 lif->nucast < nufilters)
897 			lif->nucast++;
898 		else
899 			return -ENOSPC;
900 	} else {
901 		if (is_multicast_ether_addr(addr) && lif->nmcast)
902 			lif->nmcast--;
903 		else if (!is_multicast_ether_addr(addr) && lif->nucast)
904 			lif->nucast--;
905 	}
906 
907 	if (in_interrupt()) {
908 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
909 		if (!work) {
910 			netdev_err(lif->netdev, "%s OOM\n", __func__);
911 			return -ENOMEM;
912 		}
913 		work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD :
914 				   IONIC_DW_TYPE_RX_ADDR_DEL;
915 		memcpy(work->addr, addr, ETH_ALEN);
916 		netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n",
917 			   add ? "add" : "del", addr);
918 		ionic_lif_deferred_enqueue(&lif->deferred, work);
919 	} else {
920 		netdev_dbg(lif->netdev, "rx_filter %s %pM\n",
921 			   add ? "add" : "del", addr);
922 		if (add)
923 			return ionic_lif_addr_add(lif, addr);
924 		else
925 			return ionic_lif_addr_del(lif, addr);
926 	}
927 
928 	return 0;
929 }
930 
931 static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
932 {
933 	return ionic_lif_addr(netdev_priv(netdev), addr, true);
934 }
935 
936 static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
937 {
938 	return ionic_lif_addr(netdev_priv(netdev), addr, false);
939 }
940 
941 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
942 {
943 	struct ionic_admin_ctx ctx = {
944 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
945 		.cmd.rx_mode_set = {
946 			.opcode = IONIC_CMD_RX_MODE_SET,
947 			.lif_index = cpu_to_le16(lif->index),
948 			.rx_mode = cpu_to_le16(rx_mode),
949 		},
950 	};
951 	char buf[128];
952 	int err;
953 	int i;
954 #define REMAIN(__x) (sizeof(buf) - (__x))
955 
956 	i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
957 		      lif->rx_mode, rx_mode);
958 	if (rx_mode & IONIC_RX_MODE_F_UNICAST)
959 		i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
960 	if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
961 		i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
962 	if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
963 		i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
964 	if (rx_mode & IONIC_RX_MODE_F_PROMISC)
965 		i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
966 	if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
967 		i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
968 	netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf);
969 
970 	err = ionic_adminq_post_wait(lif, &ctx);
971 	if (err)
972 		netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n",
973 			    rx_mode, err);
974 	else
975 		lif->rx_mode = rx_mode;
976 }
977 
978 static void _ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
979 {
980 	struct ionic_deferred_work *work;
981 
982 	if (in_interrupt()) {
983 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
984 		if (!work) {
985 			netdev_err(lif->netdev, "%s OOM\n", __func__);
986 			return;
987 		}
988 		work->type = IONIC_DW_TYPE_RX_MODE;
989 		work->rx_mode = rx_mode;
990 		netdev_dbg(lif->netdev, "deferred: rx_mode\n");
991 		ionic_lif_deferred_enqueue(&lif->deferred, work);
992 	} else {
993 		ionic_lif_rx_mode(lif, rx_mode);
994 	}
995 }
996 
997 static void ionic_set_rx_mode(struct net_device *netdev)
998 {
999 	struct ionic_lif *lif = netdev_priv(netdev);
1000 	struct ionic_identity *ident;
1001 	unsigned int nfilters;
1002 	unsigned int rx_mode;
1003 
1004 	ident = &lif->ionic->ident;
1005 
1006 	rx_mode = IONIC_RX_MODE_F_UNICAST;
1007 	rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
1008 	rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
1009 	rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
1010 	rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
1011 
1012 	/* sync unicast addresses
1013 	 * next check to see if we're in an overflow state
1014 	 *    if so, we track that we overflowed and enable NIC PROMISC
1015 	 *    else if the overflow is set and not needed
1016 	 *       we remove our overflow flag and check the netdev flags
1017 	 *       to see if we can disable NIC PROMISC
1018 	 */
1019 	__dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
1020 	nfilters = le32_to_cpu(ident->lif.eth.max_ucast_filters);
1021 	if (netdev_uc_count(netdev) + 1 > nfilters) {
1022 		rx_mode |= IONIC_RX_MODE_F_PROMISC;
1023 		lif->uc_overflow = true;
1024 	} else if (lif->uc_overflow) {
1025 		lif->uc_overflow = false;
1026 		if (!(netdev->flags & IFF_PROMISC))
1027 			rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
1028 	}
1029 
1030 	/* same for multicast */
1031 	__dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
1032 	nfilters = le32_to_cpu(ident->lif.eth.max_mcast_filters);
1033 	if (netdev_mc_count(netdev) > nfilters) {
1034 		rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
1035 		lif->mc_overflow = true;
1036 	} else if (lif->mc_overflow) {
1037 		lif->mc_overflow = false;
1038 		if (!(netdev->flags & IFF_ALLMULTI))
1039 			rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
1040 	}
1041 
1042 	if (lif->rx_mode != rx_mode)
1043 		_ionic_lif_rx_mode(lif, rx_mode);
1044 }
1045 
1046 static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
1047 {
1048 	u64 wanted = 0;
1049 
1050 	if (features & NETIF_F_HW_VLAN_CTAG_TX)
1051 		wanted |= IONIC_ETH_HW_VLAN_TX_TAG;
1052 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
1053 		wanted |= IONIC_ETH_HW_VLAN_RX_STRIP;
1054 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1055 		wanted |= IONIC_ETH_HW_VLAN_RX_FILTER;
1056 	if (features & NETIF_F_RXHASH)
1057 		wanted |= IONIC_ETH_HW_RX_HASH;
1058 	if (features & NETIF_F_RXCSUM)
1059 		wanted |= IONIC_ETH_HW_RX_CSUM;
1060 	if (features & NETIF_F_SG)
1061 		wanted |= IONIC_ETH_HW_TX_SG;
1062 	if (features & NETIF_F_HW_CSUM)
1063 		wanted |= IONIC_ETH_HW_TX_CSUM;
1064 	if (features & NETIF_F_TSO)
1065 		wanted |= IONIC_ETH_HW_TSO;
1066 	if (features & NETIF_F_TSO6)
1067 		wanted |= IONIC_ETH_HW_TSO_IPV6;
1068 	if (features & NETIF_F_TSO_ECN)
1069 		wanted |= IONIC_ETH_HW_TSO_ECN;
1070 	if (features & NETIF_F_GSO_GRE)
1071 		wanted |= IONIC_ETH_HW_TSO_GRE;
1072 	if (features & NETIF_F_GSO_GRE_CSUM)
1073 		wanted |= IONIC_ETH_HW_TSO_GRE_CSUM;
1074 	if (features & NETIF_F_GSO_IPXIP4)
1075 		wanted |= IONIC_ETH_HW_TSO_IPXIP4;
1076 	if (features & NETIF_F_GSO_IPXIP6)
1077 		wanted |= IONIC_ETH_HW_TSO_IPXIP6;
1078 	if (features & NETIF_F_GSO_UDP_TUNNEL)
1079 		wanted |= IONIC_ETH_HW_TSO_UDP;
1080 	if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM)
1081 		wanted |= IONIC_ETH_HW_TSO_UDP_CSUM;
1082 
1083 	return cpu_to_le64(wanted);
1084 }
1085 
1086 static int ionic_set_nic_features(struct ionic_lif *lif,
1087 				  netdev_features_t features)
1088 {
1089 	struct device *dev = lif->ionic->dev;
1090 	struct ionic_admin_ctx ctx = {
1091 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1092 		.cmd.lif_setattr = {
1093 			.opcode = IONIC_CMD_LIF_SETATTR,
1094 			.index = cpu_to_le16(lif->index),
1095 			.attr = IONIC_LIF_ATTR_FEATURES,
1096 		},
1097 	};
1098 	u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG |
1099 			 IONIC_ETH_HW_VLAN_RX_STRIP |
1100 			 IONIC_ETH_HW_VLAN_RX_FILTER;
1101 	u64 old_hw_features;
1102 	int err;
1103 
1104 	ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features);
1105 	err = ionic_adminq_post_wait(lif, &ctx);
1106 	if (err)
1107 		return err;
1108 
1109 	old_hw_features = lif->hw_features;
1110 	lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features &
1111 				       ctx.comp.lif_setattr.features);
1112 
1113 	if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH)
1114 		ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1115 
1116 	if ((vlan_flags & features) &&
1117 	    !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
1118 		dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
1119 
1120 	if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1121 		dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n");
1122 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1123 		dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n");
1124 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1125 		dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n");
1126 	if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1127 		dev_dbg(dev, "feature ETH_HW_RX_HASH\n");
1128 	if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1129 		dev_dbg(dev, "feature ETH_HW_TX_SG\n");
1130 	if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1131 		dev_dbg(dev, "feature ETH_HW_TX_CSUM\n");
1132 	if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1133 		dev_dbg(dev, "feature ETH_HW_RX_CSUM\n");
1134 	if (lif->hw_features & IONIC_ETH_HW_TSO)
1135 		dev_dbg(dev, "feature ETH_HW_TSO\n");
1136 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1137 		dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n");
1138 	if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1139 		dev_dbg(dev, "feature ETH_HW_TSO_ECN\n");
1140 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1141 		dev_dbg(dev, "feature ETH_HW_TSO_GRE\n");
1142 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1143 		dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n");
1144 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1145 		dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n");
1146 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1147 		dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n");
1148 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1149 		dev_dbg(dev, "feature ETH_HW_TSO_UDP\n");
1150 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1151 		dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n");
1152 
1153 	return 0;
1154 }
1155 
1156 static int ionic_init_nic_features(struct ionic_lif *lif)
1157 {
1158 	struct net_device *netdev = lif->netdev;
1159 	netdev_features_t features;
1160 	int err;
1161 
1162 	/* no netdev features on the management device */
1163 	if (lif->ionic->is_mgmt_nic)
1164 		return 0;
1165 
1166 	/* set up what we expect to support by default */
1167 	features = NETIF_F_HW_VLAN_CTAG_TX |
1168 		   NETIF_F_HW_VLAN_CTAG_RX |
1169 		   NETIF_F_HW_VLAN_CTAG_FILTER |
1170 		   NETIF_F_RXHASH |
1171 		   NETIF_F_SG |
1172 		   NETIF_F_HW_CSUM |
1173 		   NETIF_F_RXCSUM |
1174 		   NETIF_F_TSO |
1175 		   NETIF_F_TSO6 |
1176 		   NETIF_F_TSO_ECN;
1177 
1178 	err = ionic_set_nic_features(lif, features);
1179 	if (err)
1180 		return err;
1181 
1182 	/* tell the netdev what we actually can support */
1183 	netdev->features |= NETIF_F_HIGHDMA;
1184 
1185 	if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1186 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
1187 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1188 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1189 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1190 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1191 	if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1192 		netdev->hw_features |= NETIF_F_RXHASH;
1193 	if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1194 		netdev->hw_features |= NETIF_F_SG;
1195 
1196 	if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1197 		netdev->hw_enc_features |= NETIF_F_HW_CSUM;
1198 	if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1199 		netdev->hw_enc_features |= NETIF_F_RXCSUM;
1200 	if (lif->hw_features & IONIC_ETH_HW_TSO)
1201 		netdev->hw_enc_features |= NETIF_F_TSO;
1202 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1203 		netdev->hw_enc_features |= NETIF_F_TSO6;
1204 	if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1205 		netdev->hw_enc_features |= NETIF_F_TSO_ECN;
1206 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1207 		netdev->hw_enc_features |= NETIF_F_GSO_GRE;
1208 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1209 		netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM;
1210 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1211 		netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4;
1212 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1213 		netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6;
1214 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1215 		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
1216 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1217 		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
1218 
1219 	netdev->hw_features |= netdev->hw_enc_features;
1220 	netdev->features |= netdev->hw_features;
1221 
1222 	netdev->priv_flags |= IFF_UNICAST_FLT;
1223 
1224 	return 0;
1225 }
1226 
1227 static int ionic_set_features(struct net_device *netdev,
1228 			      netdev_features_t features)
1229 {
1230 	struct ionic_lif *lif = netdev_priv(netdev);
1231 	int err;
1232 
1233 	netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n",
1234 		   __func__, (u64)lif->netdev->features, (u64)features);
1235 
1236 	err = ionic_set_nic_features(lif, features);
1237 
1238 	return err;
1239 }
1240 
1241 static int ionic_set_mac_address(struct net_device *netdev, void *sa)
1242 {
1243 	struct sockaddr *addr = sa;
1244 	u8 *mac;
1245 	int err;
1246 
1247 	mac = (u8 *)addr->sa_data;
1248 	if (ether_addr_equal(netdev->dev_addr, mac))
1249 		return 0;
1250 
1251 	err = eth_prepare_mac_addr_change(netdev, addr);
1252 	if (err)
1253 		return err;
1254 
1255 	if (!is_zero_ether_addr(netdev->dev_addr)) {
1256 		netdev_info(netdev, "deleting mac addr %pM\n",
1257 			    netdev->dev_addr);
1258 		ionic_addr_del(netdev, netdev->dev_addr);
1259 	}
1260 
1261 	eth_commit_mac_addr_change(netdev, addr);
1262 	netdev_info(netdev, "updating mac addr %pM\n", mac);
1263 
1264 	return ionic_addr_add(netdev, mac);
1265 }
1266 
1267 static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
1268 {
1269 	struct ionic_lif *lif = netdev_priv(netdev);
1270 	struct ionic_admin_ctx ctx = {
1271 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1272 		.cmd.lif_setattr = {
1273 			.opcode = IONIC_CMD_LIF_SETATTR,
1274 			.index = cpu_to_le16(lif->index),
1275 			.attr = IONIC_LIF_ATTR_MTU,
1276 			.mtu = cpu_to_le32(new_mtu),
1277 		},
1278 	};
1279 	int err;
1280 
1281 	err = ionic_adminq_post_wait(lif, &ctx);
1282 	if (err)
1283 		return err;
1284 
1285 	netdev->mtu = new_mtu;
1286 	err = ionic_reset_queues(lif);
1287 
1288 	return err;
1289 }
1290 
1291 static void ionic_tx_timeout_work(struct work_struct *ws)
1292 {
1293 	struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work);
1294 
1295 	netdev_info(lif->netdev, "Tx Timeout recovery\n");
1296 
1297 	rtnl_lock();
1298 	ionic_reset_queues(lif);
1299 	rtnl_unlock();
1300 }
1301 
1302 static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1303 {
1304 	struct ionic_lif *lif = netdev_priv(netdev);
1305 
1306 	schedule_work(&lif->tx_timeout_work);
1307 }
1308 
1309 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1310 				 u16 vid)
1311 {
1312 	struct ionic_lif *lif = netdev_priv(netdev);
1313 	struct ionic_admin_ctx ctx = {
1314 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1315 		.cmd.rx_filter_add = {
1316 			.opcode = IONIC_CMD_RX_FILTER_ADD,
1317 			.lif_index = cpu_to_le16(lif->index),
1318 			.match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
1319 			.vlan.vlan = cpu_to_le16(vid),
1320 		},
1321 	};
1322 	int err;
1323 
1324 	err = ionic_adminq_post_wait(lif, &ctx);
1325 	if (err)
1326 		return err;
1327 
1328 	netdev_dbg(netdev, "rx_filter add VLAN %d (id %d)\n", vid,
1329 		   ctx.comp.rx_filter_add.filter_id);
1330 
1331 	return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
1332 }
1333 
1334 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1335 				  u16 vid)
1336 {
1337 	struct ionic_lif *lif = netdev_priv(netdev);
1338 	struct ionic_admin_ctx ctx = {
1339 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1340 		.cmd.rx_filter_del = {
1341 			.opcode = IONIC_CMD_RX_FILTER_DEL,
1342 			.lif_index = cpu_to_le16(lif->index),
1343 		},
1344 	};
1345 	struct ionic_rx_filter *f;
1346 
1347 	spin_lock_bh(&lif->rx_filters.lock);
1348 
1349 	f = ionic_rx_filter_by_vlan(lif, vid);
1350 	if (!f) {
1351 		spin_unlock_bh(&lif->rx_filters.lock);
1352 		return -ENOENT;
1353 	}
1354 
1355 	netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", vid,
1356 		   le32_to_cpu(ctx.cmd.rx_filter_del.filter_id));
1357 
1358 	ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
1359 	ionic_rx_filter_free(lif, f);
1360 	spin_unlock_bh(&lif->rx_filters.lock);
1361 
1362 	return ionic_adminq_post_wait(lif, &ctx);
1363 }
1364 
1365 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
1366 			 const u8 *key, const u32 *indir)
1367 {
1368 	struct ionic_admin_ctx ctx = {
1369 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1370 		.cmd.lif_setattr = {
1371 			.opcode = IONIC_CMD_LIF_SETATTR,
1372 			.attr = IONIC_LIF_ATTR_RSS,
1373 			.rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa),
1374 		},
1375 	};
1376 	unsigned int i, tbl_sz;
1377 
1378 	if (lif->hw_features & IONIC_ETH_HW_RX_HASH) {
1379 		lif->rss_types = types;
1380 		ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types);
1381 	}
1382 
1383 	if (key)
1384 		memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
1385 
1386 	if (indir) {
1387 		tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1388 		for (i = 0; i < tbl_sz; i++)
1389 			lif->rss_ind_tbl[i] = indir[i];
1390 	}
1391 
1392 	memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
1393 	       IONIC_RSS_HASH_KEY_SIZE);
1394 
1395 	return ionic_adminq_post_wait(lif, &ctx);
1396 }
1397 
1398 static int ionic_lif_rss_init(struct ionic_lif *lif)
1399 {
1400 	unsigned int tbl_sz;
1401 	unsigned int i;
1402 
1403 	lif->rss_types = IONIC_RSS_TYPE_IPV4     |
1404 			 IONIC_RSS_TYPE_IPV4_TCP |
1405 			 IONIC_RSS_TYPE_IPV4_UDP |
1406 			 IONIC_RSS_TYPE_IPV6     |
1407 			 IONIC_RSS_TYPE_IPV6_TCP |
1408 			 IONIC_RSS_TYPE_IPV6_UDP;
1409 
1410 	/* Fill indirection table with 'default' values */
1411 	tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1412 	for (i = 0; i < tbl_sz; i++)
1413 		lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs);
1414 
1415 	return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1416 }
1417 
1418 static void ionic_lif_rss_deinit(struct ionic_lif *lif)
1419 {
1420 	int tbl_sz;
1421 
1422 	tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1423 	memset(lif->rss_ind_tbl, 0, tbl_sz);
1424 	memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE);
1425 
1426 	ionic_lif_rss_config(lif, 0x0, NULL, NULL);
1427 }
1428 
1429 static void ionic_txrx_disable(struct ionic_lif *lif)
1430 {
1431 	unsigned int i;
1432 	int err;
1433 
1434 	for (i = 0; i < lif->nxqs; i++) {
1435 		err = ionic_qcq_disable(lif->txqcqs[i].qcq);
1436 		if (err == -ETIMEDOUT)
1437 			break;
1438 		err = ionic_qcq_disable(lif->rxqcqs[i].qcq);
1439 		if (err == -ETIMEDOUT)
1440 			break;
1441 	}
1442 }
1443 
1444 static void ionic_txrx_deinit(struct ionic_lif *lif)
1445 {
1446 	unsigned int i;
1447 
1448 	for (i = 0; i < lif->nxqs; i++) {
1449 		ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
1450 		ionic_tx_flush(&lif->txqcqs[i].qcq->cq);
1451 
1452 		ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
1453 		ionic_rx_flush(&lif->rxqcqs[i].qcq->cq);
1454 		ionic_rx_empty(&lif->rxqcqs[i].qcq->q);
1455 	}
1456 }
1457 
1458 static void ionic_txrx_free(struct ionic_lif *lif)
1459 {
1460 	unsigned int i;
1461 
1462 	for (i = 0; i < lif->nxqs; i++) {
1463 		ionic_qcq_free(lif, lif->txqcqs[i].qcq);
1464 		lif->txqcqs[i].qcq = NULL;
1465 
1466 		ionic_qcq_free(lif, lif->rxqcqs[i].qcq);
1467 		lif->rxqcqs[i].qcq = NULL;
1468 	}
1469 }
1470 
1471 static int ionic_txrx_alloc(struct ionic_lif *lif)
1472 {
1473 	unsigned int flags;
1474 	unsigned int i;
1475 	int err = 0;
1476 
1477 	flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
1478 	for (i = 0; i < lif->nxqs; i++) {
1479 		err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
1480 				      lif->ntxq_descs,
1481 				      sizeof(struct ionic_txq_desc),
1482 				      sizeof(struct ionic_txq_comp),
1483 				      sizeof(struct ionic_txq_sg_desc),
1484 				      lif->kern_pid, &lif->txqcqs[i].qcq);
1485 		if (err)
1486 			goto err_out;
1487 
1488 		lif->txqcqs[i].qcq->stats = lif->txqcqs[i].stats;
1489 	}
1490 
1491 	flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
1492 	for (i = 0; i < lif->nxqs; i++) {
1493 		err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
1494 				      lif->nrxq_descs,
1495 				      sizeof(struct ionic_rxq_desc),
1496 				      sizeof(struct ionic_rxq_comp),
1497 				      sizeof(struct ionic_rxq_sg_desc),
1498 				      lif->kern_pid, &lif->rxqcqs[i].qcq);
1499 		if (err)
1500 			goto err_out;
1501 
1502 		lif->rxqcqs[i].qcq->stats = lif->rxqcqs[i].stats;
1503 
1504 		ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
1505 				     lif->rxqcqs[i].qcq->intr.index,
1506 				     lif->rx_coalesce_hw);
1507 		ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq,
1508 					  lif->txqcqs[i].qcq);
1509 	}
1510 
1511 	return 0;
1512 
1513 err_out:
1514 	ionic_txrx_free(lif);
1515 
1516 	return err;
1517 }
1518 
1519 static int ionic_txrx_init(struct ionic_lif *lif)
1520 {
1521 	unsigned int i;
1522 	int err;
1523 
1524 	for (i = 0; i < lif->nxqs; i++) {
1525 		err = ionic_lif_txq_init(lif, lif->txqcqs[i].qcq);
1526 		if (err)
1527 			goto err_out;
1528 
1529 		err = ionic_lif_rxq_init(lif, lif->rxqcqs[i].qcq);
1530 		if (err) {
1531 			ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
1532 			goto err_out;
1533 		}
1534 	}
1535 
1536 	if (lif->netdev->features & NETIF_F_RXHASH)
1537 		ionic_lif_rss_init(lif);
1538 
1539 	ionic_set_rx_mode(lif->netdev);
1540 
1541 	return 0;
1542 
1543 err_out:
1544 	while (i--) {
1545 		ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
1546 		ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
1547 	}
1548 
1549 	return err;
1550 }
1551 
1552 static int ionic_txrx_enable(struct ionic_lif *lif)
1553 {
1554 	int i, err;
1555 
1556 	for (i = 0; i < lif->nxqs; i++) {
1557 		err = ionic_qcq_enable(lif->txqcqs[i].qcq);
1558 		if (err)
1559 			goto err_out;
1560 
1561 		ionic_rx_fill(&lif->rxqcqs[i].qcq->q);
1562 		err = ionic_qcq_enable(lif->rxqcqs[i].qcq);
1563 		if (err) {
1564 			if (err != -ETIMEDOUT)
1565 				ionic_qcq_disable(lif->txqcqs[i].qcq);
1566 			goto err_out;
1567 		}
1568 	}
1569 
1570 	return 0;
1571 
1572 err_out:
1573 	while (i--) {
1574 		err = ionic_qcq_disable(lif->rxqcqs[i].qcq);
1575 		if (err == -ETIMEDOUT)
1576 			break;
1577 		err = ionic_qcq_disable(lif->txqcqs[i].qcq);
1578 		if (err == -ETIMEDOUT)
1579 			break;
1580 	}
1581 
1582 	return err;
1583 }
1584 
1585 int ionic_open(struct net_device *netdev)
1586 {
1587 	struct ionic_lif *lif = netdev_priv(netdev);
1588 	int err;
1589 
1590 	netif_carrier_off(netdev);
1591 
1592 	err = ionic_txrx_alloc(lif);
1593 	if (err)
1594 		return err;
1595 
1596 	err = ionic_txrx_init(lif);
1597 	if (err)
1598 		goto err_txrx_free;
1599 
1600 	err = ionic_txrx_enable(lif);
1601 	if (err)
1602 		goto err_txrx_deinit;
1603 
1604 	netif_set_real_num_tx_queues(netdev, lif->nxqs);
1605 	netif_set_real_num_rx_queues(netdev, lif->nxqs);
1606 
1607 	set_bit(IONIC_LIF_F_UP, lif->state);
1608 
1609 	ionic_link_status_check_request(lif);
1610 	if (netif_carrier_ok(netdev))
1611 		netif_tx_wake_all_queues(netdev);
1612 
1613 	return 0;
1614 
1615 err_txrx_deinit:
1616 	ionic_txrx_deinit(lif);
1617 err_txrx_free:
1618 	ionic_txrx_free(lif);
1619 	return err;
1620 }
1621 
1622 int ionic_stop(struct net_device *netdev)
1623 {
1624 	struct ionic_lif *lif = netdev_priv(netdev);
1625 	int err = 0;
1626 
1627 	if (!test_bit(IONIC_LIF_F_UP, lif->state)) {
1628 		dev_dbg(lif->ionic->dev, "%s: %s state=DOWN\n",
1629 			__func__, lif->name);
1630 		return 0;
1631 	}
1632 	dev_dbg(lif->ionic->dev, "%s: %s state=UP\n", __func__, lif->name);
1633 	clear_bit(IONIC_LIF_F_UP, lif->state);
1634 
1635 	/* carrier off before disabling queues to avoid watchdog timeout */
1636 	netif_carrier_off(netdev);
1637 	netif_tx_stop_all_queues(netdev);
1638 	netif_tx_disable(netdev);
1639 
1640 	ionic_txrx_disable(lif);
1641 	ionic_lif_quiesce(lif);
1642 	ionic_txrx_deinit(lif);
1643 	ionic_txrx_free(lif);
1644 
1645 	return err;
1646 }
1647 
1648 static int ionic_get_vf_config(struct net_device *netdev,
1649 			       int vf, struct ifla_vf_info *ivf)
1650 {
1651 	struct ionic_lif *lif = netdev_priv(netdev);
1652 	struct ionic *ionic = lif->ionic;
1653 	int ret = 0;
1654 
1655 	down_read(&ionic->vf_op_lock);
1656 
1657 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1658 		ret = -EINVAL;
1659 	} else {
1660 		ivf->vf           = vf;
1661 		ivf->vlan         = ionic->vfs[vf].vlanid;
1662 		ivf->qos	  = 0;
1663 		ivf->spoofchk     = ionic->vfs[vf].spoofchk;
1664 		ivf->linkstate    = ionic->vfs[vf].linkstate;
1665 		ivf->max_tx_rate  = ionic->vfs[vf].maxrate;
1666 		ivf->trusted      = ionic->vfs[vf].trusted;
1667 		ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr);
1668 	}
1669 
1670 	up_read(&ionic->vf_op_lock);
1671 	return ret;
1672 }
1673 
1674 static int ionic_get_vf_stats(struct net_device *netdev, int vf,
1675 			      struct ifla_vf_stats *vf_stats)
1676 {
1677 	struct ionic_lif *lif = netdev_priv(netdev);
1678 	struct ionic *ionic = lif->ionic;
1679 	struct ionic_lif_stats *vs;
1680 	int ret = 0;
1681 
1682 	down_read(&ionic->vf_op_lock);
1683 
1684 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1685 		ret = -EINVAL;
1686 	} else {
1687 		memset(vf_stats, 0, sizeof(*vf_stats));
1688 		vs = &ionic->vfs[vf].stats;
1689 
1690 		vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets);
1691 		vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets);
1692 		vf_stats->rx_bytes   = le64_to_cpu(vs->rx_ucast_bytes);
1693 		vf_stats->tx_bytes   = le64_to_cpu(vs->tx_ucast_bytes);
1694 		vf_stats->broadcast  = le64_to_cpu(vs->rx_bcast_packets);
1695 		vf_stats->multicast  = le64_to_cpu(vs->rx_mcast_packets);
1696 		vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) +
1697 				       le64_to_cpu(vs->rx_mcast_drop_packets) +
1698 				       le64_to_cpu(vs->rx_bcast_drop_packets);
1699 		vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) +
1700 				       le64_to_cpu(vs->tx_mcast_drop_packets) +
1701 				       le64_to_cpu(vs->tx_bcast_drop_packets);
1702 	}
1703 
1704 	up_read(&ionic->vf_op_lock);
1705 	return ret;
1706 }
1707 
1708 static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1709 {
1710 	struct ionic_lif *lif = netdev_priv(netdev);
1711 	struct ionic *ionic = lif->ionic;
1712 	int ret;
1713 
1714 	if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
1715 		return -EINVAL;
1716 
1717 	down_write(&ionic->vf_op_lock);
1718 
1719 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1720 		ret = -EINVAL;
1721 	} else {
1722 		ret = ionic_set_vf_config(ionic, vf, IONIC_VF_ATTR_MAC, mac);
1723 		if (!ret)
1724 			ether_addr_copy(ionic->vfs[vf].macaddr, mac);
1725 	}
1726 
1727 	up_write(&ionic->vf_op_lock);
1728 	return ret;
1729 }
1730 
1731 static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1732 			     u8 qos, __be16 proto)
1733 {
1734 	struct ionic_lif *lif = netdev_priv(netdev);
1735 	struct ionic *ionic = lif->ionic;
1736 	int ret;
1737 
1738 	/* until someday when we support qos */
1739 	if (qos)
1740 		return -EINVAL;
1741 
1742 	if (vlan > 4095)
1743 		return -EINVAL;
1744 
1745 	if (proto != htons(ETH_P_8021Q))
1746 		return -EPROTONOSUPPORT;
1747 
1748 	down_write(&ionic->vf_op_lock);
1749 
1750 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1751 		ret = -EINVAL;
1752 	} else {
1753 		ret = ionic_set_vf_config(ionic, vf,
1754 					  IONIC_VF_ATTR_VLAN, (u8 *)&vlan);
1755 		if (!ret)
1756 			ionic->vfs[vf].vlanid = vlan;
1757 	}
1758 
1759 	up_write(&ionic->vf_op_lock);
1760 	return ret;
1761 }
1762 
1763 static int ionic_set_vf_rate(struct net_device *netdev, int vf,
1764 			     int tx_min, int tx_max)
1765 {
1766 	struct ionic_lif *lif = netdev_priv(netdev);
1767 	struct ionic *ionic = lif->ionic;
1768 	int ret;
1769 
1770 	/* setting the min just seems silly */
1771 	if (tx_min)
1772 		return -EINVAL;
1773 
1774 	down_write(&ionic->vf_op_lock);
1775 
1776 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1777 		ret = -EINVAL;
1778 	} else {
1779 		ret = ionic_set_vf_config(ionic, vf,
1780 					  IONIC_VF_ATTR_RATE, (u8 *)&tx_max);
1781 		if (!ret)
1782 			lif->ionic->vfs[vf].maxrate = tx_max;
1783 	}
1784 
1785 	up_write(&ionic->vf_op_lock);
1786 	return ret;
1787 }
1788 
1789 static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
1790 {
1791 	struct ionic_lif *lif = netdev_priv(netdev);
1792 	struct ionic *ionic = lif->ionic;
1793 	u8 data = set;  /* convert to u8 for config */
1794 	int ret;
1795 
1796 	down_write(&ionic->vf_op_lock);
1797 
1798 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1799 		ret = -EINVAL;
1800 	} else {
1801 		ret = ionic_set_vf_config(ionic, vf,
1802 					  IONIC_VF_ATTR_SPOOFCHK, &data);
1803 		if (!ret)
1804 			ionic->vfs[vf].spoofchk = data;
1805 	}
1806 
1807 	up_write(&ionic->vf_op_lock);
1808 	return ret;
1809 }
1810 
1811 static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
1812 {
1813 	struct ionic_lif *lif = netdev_priv(netdev);
1814 	struct ionic *ionic = lif->ionic;
1815 	u8 data = set;  /* convert to u8 for config */
1816 	int ret;
1817 
1818 	down_write(&ionic->vf_op_lock);
1819 
1820 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1821 		ret = -EINVAL;
1822 	} else {
1823 		ret = ionic_set_vf_config(ionic, vf,
1824 					  IONIC_VF_ATTR_TRUST, &data);
1825 		if (!ret)
1826 			ionic->vfs[vf].trusted = data;
1827 	}
1828 
1829 	up_write(&ionic->vf_op_lock);
1830 	return ret;
1831 }
1832 
1833 static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
1834 {
1835 	struct ionic_lif *lif = netdev_priv(netdev);
1836 	struct ionic *ionic = lif->ionic;
1837 	u8 data;
1838 	int ret;
1839 
1840 	switch (set) {
1841 	case IFLA_VF_LINK_STATE_ENABLE:
1842 		data = IONIC_VF_LINK_STATUS_UP;
1843 		break;
1844 	case IFLA_VF_LINK_STATE_DISABLE:
1845 		data = IONIC_VF_LINK_STATUS_DOWN;
1846 		break;
1847 	case IFLA_VF_LINK_STATE_AUTO:
1848 		data = IONIC_VF_LINK_STATUS_AUTO;
1849 		break;
1850 	default:
1851 		return -EINVAL;
1852 	}
1853 
1854 	down_write(&ionic->vf_op_lock);
1855 
1856 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1857 		ret = -EINVAL;
1858 	} else {
1859 		ret = ionic_set_vf_config(ionic, vf,
1860 					  IONIC_VF_ATTR_LINKSTATE, &data);
1861 		if (!ret)
1862 			ionic->vfs[vf].linkstate = set;
1863 	}
1864 
1865 	up_write(&ionic->vf_op_lock);
1866 	return ret;
1867 }
1868 
1869 static const struct net_device_ops ionic_netdev_ops = {
1870 	.ndo_open               = ionic_open,
1871 	.ndo_stop               = ionic_stop,
1872 	.ndo_start_xmit		= ionic_start_xmit,
1873 	.ndo_get_stats64	= ionic_get_stats64,
1874 	.ndo_set_rx_mode	= ionic_set_rx_mode,
1875 	.ndo_set_features	= ionic_set_features,
1876 	.ndo_set_mac_address	= ionic_set_mac_address,
1877 	.ndo_validate_addr	= eth_validate_addr,
1878 	.ndo_tx_timeout         = ionic_tx_timeout,
1879 	.ndo_change_mtu         = ionic_change_mtu,
1880 	.ndo_vlan_rx_add_vid    = ionic_vlan_rx_add_vid,
1881 	.ndo_vlan_rx_kill_vid   = ionic_vlan_rx_kill_vid,
1882 	.ndo_set_vf_vlan	= ionic_set_vf_vlan,
1883 	.ndo_set_vf_trust	= ionic_set_vf_trust,
1884 	.ndo_set_vf_mac		= ionic_set_vf_mac,
1885 	.ndo_set_vf_rate	= ionic_set_vf_rate,
1886 	.ndo_set_vf_spoofchk	= ionic_set_vf_spoofchk,
1887 	.ndo_get_vf_config	= ionic_get_vf_config,
1888 	.ndo_set_vf_link_state	= ionic_set_vf_link_state,
1889 	.ndo_get_vf_stats       = ionic_get_vf_stats,
1890 };
1891 
1892 int ionic_reset_queues(struct ionic_lif *lif)
1893 {
1894 	bool running;
1895 	int err = 0;
1896 
1897 	/* Put off the next watchdog timeout */
1898 	netif_trans_update(lif->netdev);
1899 
1900 	err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
1901 	if (err)
1902 		return err;
1903 
1904 	running = netif_running(lif->netdev);
1905 	if (running)
1906 		err = ionic_stop(lif->netdev);
1907 	if (!err && running)
1908 		ionic_open(lif->netdev);
1909 
1910 	clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
1911 
1912 	return err;
1913 }
1914 
1915 static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index)
1916 {
1917 	struct device *dev = ionic->dev;
1918 	struct net_device *netdev;
1919 	struct ionic_lif *lif;
1920 	int tbl_sz;
1921 	int err;
1922 
1923 	netdev = alloc_etherdev_mqs(sizeof(*lif),
1924 				    ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
1925 	if (!netdev) {
1926 		dev_err(dev, "Cannot allocate netdev, aborting\n");
1927 		return ERR_PTR(-ENOMEM);
1928 	}
1929 
1930 	SET_NETDEV_DEV(netdev, dev);
1931 
1932 	lif = netdev_priv(netdev);
1933 	lif->netdev = netdev;
1934 	ionic->master_lif = lif;
1935 	netdev->netdev_ops = &ionic_netdev_ops;
1936 	ionic_ethtool_set_ops(netdev);
1937 
1938 	netdev->watchdog_timeo = 2 * HZ;
1939 	netdev->min_mtu = IONIC_MIN_MTU;
1940 	netdev->max_mtu = IONIC_MAX_MTU;
1941 
1942 	lif->neqs = ionic->neqs_per_lif;
1943 	lif->nxqs = ionic->ntxqs_per_lif;
1944 
1945 	lif->ionic = ionic;
1946 	lif->index = index;
1947 	lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
1948 	lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
1949 
1950 	/* Convert the default coalesce value to actual hw resolution */
1951 	lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT;
1952 	lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic,
1953 						    lif->rx_coalesce_usecs);
1954 
1955 	snprintf(lif->name, sizeof(lif->name), "lif%u", index);
1956 
1957 	spin_lock_init(&lif->adminq_lock);
1958 
1959 	spin_lock_init(&lif->deferred.lock);
1960 	INIT_LIST_HEAD(&lif->deferred.list);
1961 	INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work);
1962 
1963 	/* allocate lif info */
1964 	lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE);
1965 	lif->info = dma_alloc_coherent(dev, lif->info_sz,
1966 				       &lif->info_pa, GFP_KERNEL);
1967 	if (!lif->info) {
1968 		dev_err(dev, "Failed to allocate lif info, aborting\n");
1969 		err = -ENOMEM;
1970 		goto err_out_free_netdev;
1971 	}
1972 
1973 	/* allocate queues */
1974 	err = ionic_qcqs_alloc(lif);
1975 	if (err)
1976 		goto err_out_free_lif_info;
1977 
1978 	/* allocate rss indirection table */
1979 	tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1980 	lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz;
1981 	lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz,
1982 					      &lif->rss_ind_tbl_pa,
1983 					      GFP_KERNEL);
1984 
1985 	if (!lif->rss_ind_tbl) {
1986 		err = -ENOMEM;
1987 		dev_err(dev, "Failed to allocate rss indirection table, aborting\n");
1988 		goto err_out_free_qcqs;
1989 	}
1990 	netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
1991 
1992 	list_add_tail(&lif->list, &ionic->lifs);
1993 
1994 	return lif;
1995 
1996 err_out_free_qcqs:
1997 	ionic_qcqs_free(lif);
1998 err_out_free_lif_info:
1999 	dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
2000 	lif->info = NULL;
2001 	lif->info_pa = 0;
2002 err_out_free_netdev:
2003 	free_netdev(lif->netdev);
2004 	lif = NULL;
2005 
2006 	return ERR_PTR(err);
2007 }
2008 
2009 int ionic_lifs_alloc(struct ionic *ionic)
2010 {
2011 	struct ionic_lif *lif;
2012 
2013 	INIT_LIST_HEAD(&ionic->lifs);
2014 
2015 	/* only build the first lif, others are for later features */
2016 	set_bit(0, ionic->lifbits);
2017 	lif = ionic_lif_alloc(ionic, 0);
2018 
2019 	return PTR_ERR_OR_ZERO(lif);
2020 }
2021 
2022 static void ionic_lif_reset(struct ionic_lif *lif)
2023 {
2024 	struct ionic_dev *idev = &lif->ionic->idev;
2025 
2026 	mutex_lock(&lif->ionic->dev_cmd_lock);
2027 	ionic_dev_cmd_lif_reset(idev, lif->index);
2028 	ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2029 	mutex_unlock(&lif->ionic->dev_cmd_lock);
2030 }
2031 
2032 static void ionic_lif_free(struct ionic_lif *lif)
2033 {
2034 	struct device *dev = lif->ionic->dev;
2035 
2036 	/* free rss indirection table */
2037 	dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl,
2038 			  lif->rss_ind_tbl_pa);
2039 	lif->rss_ind_tbl = NULL;
2040 	lif->rss_ind_tbl_pa = 0;
2041 
2042 	/* free queues */
2043 	ionic_qcqs_free(lif);
2044 	ionic_lif_reset(lif);
2045 
2046 	/* free lif info */
2047 	dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
2048 	lif->info = NULL;
2049 	lif->info_pa = 0;
2050 
2051 	/* unmap doorbell page */
2052 	ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
2053 	lif->kern_dbpage = NULL;
2054 	kfree(lif->dbid_inuse);
2055 	lif->dbid_inuse = NULL;
2056 
2057 	/* free netdev & lif */
2058 	ionic_debugfs_del_lif(lif);
2059 	list_del(&lif->list);
2060 	free_netdev(lif->netdev);
2061 }
2062 
2063 void ionic_lifs_free(struct ionic *ionic)
2064 {
2065 	struct list_head *cur, *tmp;
2066 	struct ionic_lif *lif;
2067 
2068 	list_for_each_safe(cur, tmp, &ionic->lifs) {
2069 		lif = list_entry(cur, struct ionic_lif, list);
2070 
2071 		ionic_lif_free(lif);
2072 	}
2073 }
2074 
2075 static void ionic_lif_deinit(struct ionic_lif *lif)
2076 {
2077 	if (!test_bit(IONIC_LIF_F_INITED, lif->state))
2078 		return;
2079 
2080 	clear_bit(IONIC_LIF_F_INITED, lif->state);
2081 
2082 	ionic_rx_filters_deinit(lif);
2083 	if (lif->netdev->features & NETIF_F_RXHASH)
2084 		ionic_lif_rss_deinit(lif);
2085 
2086 	napi_disable(&lif->adminqcq->napi);
2087 	netif_napi_del(&lif->adminqcq->napi);
2088 	ionic_lif_qcq_deinit(lif, lif->notifyqcq);
2089 	ionic_lif_qcq_deinit(lif, lif->adminqcq);
2090 
2091 	ionic_lif_reset(lif);
2092 }
2093 
2094 void ionic_lifs_deinit(struct ionic *ionic)
2095 {
2096 	struct list_head *cur, *tmp;
2097 	struct ionic_lif *lif;
2098 
2099 	list_for_each_safe(cur, tmp, &ionic->lifs) {
2100 		lif = list_entry(cur, struct ionic_lif, list);
2101 		ionic_lif_deinit(lif);
2102 	}
2103 }
2104 
2105 static int ionic_lif_adminq_init(struct ionic_lif *lif)
2106 {
2107 	struct device *dev = lif->ionic->dev;
2108 	struct ionic_q_init_comp comp;
2109 	struct ionic_dev *idev;
2110 	struct ionic_qcq *qcq;
2111 	struct ionic_queue *q;
2112 	int err;
2113 
2114 	idev = &lif->ionic->idev;
2115 	qcq = lif->adminqcq;
2116 	q = &qcq->q;
2117 
2118 	mutex_lock(&lif->ionic->dev_cmd_lock);
2119 	ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index);
2120 	err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2121 	ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
2122 	mutex_unlock(&lif->ionic->dev_cmd_lock);
2123 	if (err) {
2124 		netdev_err(lif->netdev, "adminq init failed %d\n", err);
2125 		return err;
2126 	}
2127 
2128 	q->hw_type = comp.hw_type;
2129 	q->hw_index = le32_to_cpu(comp.hw_index);
2130 	q->dbval = IONIC_DBELL_QID(q->hw_index);
2131 
2132 	dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
2133 	dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
2134 
2135 	netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi,
2136 		       NAPI_POLL_WEIGHT);
2137 
2138 	err = ionic_request_irq(lif, qcq);
2139 	if (err) {
2140 		netdev_warn(lif->netdev, "adminq irq request failed %d\n", err);
2141 		netif_napi_del(&qcq->napi);
2142 		return err;
2143 	}
2144 
2145 	napi_enable(&qcq->napi);
2146 
2147 	if (qcq->flags & IONIC_QCQ_F_INTR)
2148 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
2149 				IONIC_INTR_MASK_CLEAR);
2150 
2151 	qcq->flags |= IONIC_QCQ_F_INITED;
2152 
2153 	ionic_debugfs_add_qcq(lif, qcq);
2154 
2155 	return 0;
2156 }
2157 
2158 static int ionic_lif_notifyq_init(struct ionic_lif *lif)
2159 {
2160 	struct ionic_qcq *qcq = lif->notifyqcq;
2161 	struct device *dev = lif->ionic->dev;
2162 	struct ionic_queue *q = &qcq->q;
2163 	int err;
2164 
2165 	struct ionic_admin_ctx ctx = {
2166 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2167 		.cmd.q_init = {
2168 			.opcode = IONIC_CMD_Q_INIT,
2169 			.lif_index = cpu_to_le16(lif->index),
2170 			.type = q->type,
2171 			.index = cpu_to_le32(q->index),
2172 			.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
2173 					     IONIC_QINIT_F_ENA),
2174 			.intr_index = cpu_to_le16(lif->adminqcq->intr.index),
2175 			.pid = cpu_to_le16(q->pid),
2176 			.ring_size = ilog2(q->num_descs),
2177 			.ring_base = cpu_to_le64(q->base_pa),
2178 		}
2179 	};
2180 
2181 	dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid);
2182 	dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index);
2183 	dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
2184 	dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
2185 
2186 	err = ionic_adminq_post_wait(lif, &ctx);
2187 	if (err)
2188 		return err;
2189 
2190 	q->hw_type = ctx.comp.q_init.hw_type;
2191 	q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
2192 	q->dbval = IONIC_DBELL_QID(q->hw_index);
2193 
2194 	dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type);
2195 	dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index);
2196 
2197 	/* preset the callback info */
2198 	q->info[0].cb_arg = lif;
2199 
2200 	qcq->flags |= IONIC_QCQ_F_INITED;
2201 
2202 	ionic_debugfs_add_qcq(lif, qcq);
2203 
2204 	return 0;
2205 }
2206 
2207 static int ionic_station_set(struct ionic_lif *lif)
2208 {
2209 	struct net_device *netdev = lif->netdev;
2210 	struct ionic_admin_ctx ctx = {
2211 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2212 		.cmd.lif_getattr = {
2213 			.opcode = IONIC_CMD_LIF_GETATTR,
2214 			.index = cpu_to_le16(lif->index),
2215 			.attr = IONIC_LIF_ATTR_MAC,
2216 		},
2217 	};
2218 	struct sockaddr addr;
2219 	int err;
2220 
2221 	err = ionic_adminq_post_wait(lif, &ctx);
2222 	if (err)
2223 		return err;
2224 
2225 	if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
2226 		return 0;
2227 
2228 	memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
2229 	addr.sa_family = AF_INET;
2230 	err = eth_prepare_mac_addr_change(netdev, &addr);
2231 	if (err) {
2232 		netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM\n",
2233 			    addr.sa_data);
2234 		return 0;
2235 	}
2236 
2237 	netdev_dbg(lif->netdev, "deleting station MAC addr %pM\n",
2238 		   netdev->dev_addr);
2239 	ionic_lif_addr(lif, netdev->dev_addr, false);
2240 
2241 	eth_commit_mac_addr_change(netdev, &addr);
2242 	netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
2243 		   netdev->dev_addr);
2244 	ionic_lif_addr(lif, netdev->dev_addr, true);
2245 
2246 	return 0;
2247 }
2248 
2249 static int ionic_lif_init(struct ionic_lif *lif)
2250 {
2251 	struct ionic_dev *idev = &lif->ionic->idev;
2252 	struct device *dev = lif->ionic->dev;
2253 	struct ionic_lif_init_comp comp;
2254 	int dbpage_num;
2255 	int err;
2256 
2257 	ionic_debugfs_add_lif(lif);
2258 
2259 	mutex_lock(&lif->ionic->dev_cmd_lock);
2260 	ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa);
2261 	err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2262 	ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
2263 	mutex_unlock(&lif->ionic->dev_cmd_lock);
2264 	if (err)
2265 		return err;
2266 
2267 	lif->hw_index = le16_to_cpu(comp.hw_index);
2268 
2269 	/* now that we have the hw_index we can figure out our doorbell page */
2270 	lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
2271 	if (!lif->dbid_count) {
2272 		dev_err(dev, "No doorbell pages, aborting\n");
2273 		return -EINVAL;
2274 	}
2275 
2276 	lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL);
2277 	if (!lif->dbid_inuse) {
2278 		dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n");
2279 		return -ENOMEM;
2280 	}
2281 
2282 	/* first doorbell id reserved for kernel (dbid aka pid == zero) */
2283 	set_bit(0, lif->dbid_inuse);
2284 	lif->kern_pid = 0;
2285 
2286 	dbpage_num = ionic_db_page_num(lif, lif->kern_pid);
2287 	lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num);
2288 	if (!lif->kern_dbpage) {
2289 		dev_err(dev, "Cannot map dbpage, aborting\n");
2290 		err = -ENOMEM;
2291 		goto err_out_free_dbid;
2292 	}
2293 
2294 	err = ionic_lif_adminq_init(lif);
2295 	if (err)
2296 		goto err_out_adminq_deinit;
2297 
2298 	if (lif->ionic->nnqs_per_lif) {
2299 		err = ionic_lif_notifyq_init(lif);
2300 		if (err)
2301 			goto err_out_notifyq_deinit;
2302 	}
2303 
2304 	err = ionic_init_nic_features(lif);
2305 	if (err)
2306 		goto err_out_notifyq_deinit;
2307 
2308 	err = ionic_rx_filters_init(lif);
2309 	if (err)
2310 		goto err_out_notifyq_deinit;
2311 
2312 	err = ionic_station_set(lif);
2313 	if (err)
2314 		goto err_out_notifyq_deinit;
2315 
2316 	lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
2317 
2318 	set_bit(IONIC_LIF_F_INITED, lif->state);
2319 
2320 	INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work);
2321 
2322 	return 0;
2323 
2324 err_out_notifyq_deinit:
2325 	ionic_lif_qcq_deinit(lif, lif->notifyqcq);
2326 err_out_adminq_deinit:
2327 	ionic_lif_qcq_deinit(lif, lif->adminqcq);
2328 	ionic_lif_reset(lif);
2329 	ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
2330 	lif->kern_dbpage = NULL;
2331 err_out_free_dbid:
2332 	kfree(lif->dbid_inuse);
2333 	lif->dbid_inuse = NULL;
2334 
2335 	return err;
2336 }
2337 
2338 int ionic_lifs_init(struct ionic *ionic)
2339 {
2340 	struct list_head *cur, *tmp;
2341 	struct ionic_lif *lif;
2342 	int err;
2343 
2344 	list_for_each_safe(cur, tmp, &ionic->lifs) {
2345 		lif = list_entry(cur, struct ionic_lif, list);
2346 		err = ionic_lif_init(lif);
2347 		if (err)
2348 			return err;
2349 	}
2350 
2351 	return 0;
2352 }
2353 
2354 static void ionic_lif_notify_work(struct work_struct *ws)
2355 {
2356 }
2357 
2358 static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
2359 {
2360 	struct ionic_admin_ctx ctx = {
2361 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2362 		.cmd.lif_setattr = {
2363 			.opcode = IONIC_CMD_LIF_SETATTR,
2364 			.index = cpu_to_le16(lif->index),
2365 			.attr = IONIC_LIF_ATTR_NAME,
2366 		},
2367 	};
2368 
2369 	strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
2370 		sizeof(ctx.cmd.lif_setattr.name));
2371 
2372 	ionic_adminq_post_wait(lif, &ctx);
2373 }
2374 
2375 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev)
2376 {
2377 	if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit)
2378 		return NULL;
2379 
2380 	return netdev_priv(netdev);
2381 }
2382 
2383 static int ionic_lif_notify(struct notifier_block *nb,
2384 			    unsigned long event, void *info)
2385 {
2386 	struct net_device *ndev = netdev_notifier_info_to_dev(info);
2387 	struct ionic *ionic = container_of(nb, struct ionic, nb);
2388 	struct ionic_lif *lif = ionic_netdev_lif(ndev);
2389 
2390 	if (!lif || lif->ionic != ionic)
2391 		return NOTIFY_DONE;
2392 
2393 	switch (event) {
2394 	case NETDEV_CHANGENAME:
2395 		ionic_lif_set_netdev_info(lif);
2396 		break;
2397 	}
2398 
2399 	return NOTIFY_DONE;
2400 }
2401 
2402 int ionic_lifs_register(struct ionic *ionic)
2403 {
2404 	int err;
2405 
2406 	/* the netdev is not registered on the management device, it is
2407 	 * only used as a vehicle for napi operations on the adminq
2408 	 */
2409 	if (ionic->is_mgmt_nic)
2410 		return 0;
2411 
2412 	INIT_WORK(&ionic->nb_work, ionic_lif_notify_work);
2413 
2414 	ionic->nb.notifier_call = ionic_lif_notify;
2415 
2416 	err = register_netdevice_notifier(&ionic->nb);
2417 	if (err)
2418 		ionic->nb.notifier_call = NULL;
2419 
2420 	/* only register LIF0 for now */
2421 	err = register_netdev(ionic->master_lif->netdev);
2422 	if (err) {
2423 		dev_err(ionic->dev, "Cannot register net device, aborting\n");
2424 		return err;
2425 	}
2426 
2427 	ionic_link_status_check_request(ionic->master_lif);
2428 	ionic->master_lif->registered = true;
2429 
2430 	return 0;
2431 }
2432 
2433 void ionic_lifs_unregister(struct ionic *ionic)
2434 {
2435 	if (ionic->nb.notifier_call) {
2436 		unregister_netdevice_notifier(&ionic->nb);
2437 		cancel_work_sync(&ionic->nb_work);
2438 		ionic->nb.notifier_call = NULL;
2439 	}
2440 
2441 	/* There is only one lif ever registered in the
2442 	 * current model, so don't bother searching the
2443 	 * ionic->lif for candidates to unregister
2444 	 */
2445 	if (!ionic->master_lif)
2446 		return;
2447 
2448 	cancel_work_sync(&ionic->master_lif->deferred.work);
2449 	cancel_work_sync(&ionic->master_lif->tx_timeout_work);
2450 	if (ionic->master_lif->netdev->reg_state == NETREG_REGISTERED)
2451 		unregister_netdev(ionic->master_lif->netdev);
2452 }
2453 
2454 int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
2455 		       union ionic_lif_identity *lid)
2456 {
2457 	struct ionic_dev *idev = &ionic->idev;
2458 	size_t sz;
2459 	int err;
2460 
2461 	sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data));
2462 
2463 	mutex_lock(&ionic->dev_cmd_lock);
2464 	ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1);
2465 	err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
2466 	memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz);
2467 	mutex_unlock(&ionic->dev_cmd_lock);
2468 	if (err)
2469 		return (err);
2470 
2471 	dev_dbg(ionic->dev, "capabilities 0x%llx\n",
2472 		le64_to_cpu(lid->capabilities));
2473 
2474 	dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n",
2475 		le32_to_cpu(lid->eth.max_ucast_filters));
2476 	dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n",
2477 		le32_to_cpu(lid->eth.max_mcast_filters));
2478 	dev_dbg(ionic->dev, "eth.features 0x%llx\n",
2479 		le64_to_cpu(lid->eth.config.features));
2480 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n",
2481 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ]));
2482 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n",
2483 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ]));
2484 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n",
2485 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ]));
2486 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n",
2487 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ]));
2488 	dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name);
2489 	dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac);
2490 	dev_dbg(ionic->dev, "eth.config.mtu %d\n",
2491 		le32_to_cpu(lid->eth.config.mtu));
2492 
2493 	return 0;
2494 }
2495 
2496 int ionic_lifs_size(struct ionic *ionic)
2497 {
2498 	struct ionic_identity *ident = &ionic->ident;
2499 	unsigned int nintrs, dev_nintrs;
2500 	union ionic_lif_config *lc;
2501 	unsigned int ntxqs_per_lif;
2502 	unsigned int nrxqs_per_lif;
2503 	unsigned int neqs_per_lif;
2504 	unsigned int nnqs_per_lif;
2505 	unsigned int nxqs, neqs;
2506 	unsigned int min_intrs;
2507 	int err;
2508 
2509 	lc = &ident->lif.eth.config;
2510 	dev_nintrs = le32_to_cpu(ident->dev.nintrs);
2511 	neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count);
2512 	nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]);
2513 	ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]);
2514 	nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]);
2515 
2516 	nxqs = min(ntxqs_per_lif, nrxqs_per_lif);
2517 	nxqs = min(nxqs, num_online_cpus());
2518 	neqs = min(neqs_per_lif, num_online_cpus());
2519 
2520 try_again:
2521 	/* interrupt usage:
2522 	 *    1 for master lif adminq/notifyq
2523 	 *    1 for each CPU for master lif TxRx queue pairs
2524 	 *    whatever's left is for RDMA queues
2525 	 */
2526 	nintrs = 1 + nxqs + neqs;
2527 	min_intrs = 2;  /* adminq + 1 TxRx queue pair */
2528 
2529 	if (nintrs > dev_nintrs)
2530 		goto try_fewer;
2531 
2532 	err = ionic_bus_alloc_irq_vectors(ionic, nintrs);
2533 	if (err < 0 && err != -ENOSPC) {
2534 		dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err);
2535 		return err;
2536 	}
2537 	if (err == -ENOSPC)
2538 		goto try_fewer;
2539 
2540 	if (err != nintrs) {
2541 		ionic_bus_free_irq_vectors(ionic);
2542 		goto try_fewer;
2543 	}
2544 
2545 	ionic->nnqs_per_lif = nnqs_per_lif;
2546 	ionic->neqs_per_lif = neqs;
2547 	ionic->ntxqs_per_lif = nxqs;
2548 	ionic->nrxqs_per_lif = nxqs;
2549 	ionic->nintrs = nintrs;
2550 
2551 	ionic_debugfs_add_sizes(ionic);
2552 
2553 	return 0;
2554 
2555 try_fewer:
2556 	if (nnqs_per_lif > 1) {
2557 		nnqs_per_lif >>= 1;
2558 		goto try_again;
2559 	}
2560 	if (neqs > 1) {
2561 		neqs >>= 1;
2562 		goto try_again;
2563 	}
2564 	if (nxqs > 1) {
2565 		nxqs >>= 1;
2566 		goto try_again;
2567 	}
2568 	dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs);
2569 	return -ENOSPC;
2570 }
2571