xref: /linux/drivers/net/ethernet/intel/idpf/idpf_lib.c (revision 24168c5e6dfbdd5b414f048f47f75d64533296ca)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #include "idpf.h"
5 #include "idpf_virtchnl.h"
6 
7 static const struct net_device_ops idpf_netdev_ops_splitq;
8 static const struct net_device_ops idpf_netdev_ops_singleq;
9 
10 /**
11  * idpf_init_vector_stack - Fill the MSIX vector stack with vector index
12  * @adapter: private data struct
13  *
14  * Return 0 on success, error on failure
15  */
16 static int idpf_init_vector_stack(struct idpf_adapter *adapter)
17 {
18 	struct idpf_vector_lifo *stack;
19 	u16 min_vec;
20 	u32 i;
21 
22 	mutex_lock(&adapter->vector_lock);
23 	min_vec = adapter->num_msix_entries - adapter->num_avail_msix;
24 	stack = &adapter->vector_stack;
25 	stack->size = adapter->num_msix_entries;
26 	/* set the base and top to point at start of the 'free pool' to
27 	 * distribute the unused vectors on-demand basis
28 	 */
29 	stack->base = min_vec;
30 	stack->top = min_vec;
31 
32 	stack->vec_idx = kcalloc(stack->size, sizeof(u16), GFP_KERNEL);
33 	if (!stack->vec_idx) {
34 		mutex_unlock(&adapter->vector_lock);
35 
36 		return -ENOMEM;
37 	}
38 
39 	for (i = 0; i < stack->size; i++)
40 		stack->vec_idx[i] = i;
41 
42 	mutex_unlock(&adapter->vector_lock);
43 
44 	return 0;
45 }
46 
47 /**
48  * idpf_deinit_vector_stack - zero out the MSIX vector stack
49  * @adapter: private data struct
50  */
51 static void idpf_deinit_vector_stack(struct idpf_adapter *adapter)
52 {
53 	struct idpf_vector_lifo *stack;
54 
55 	mutex_lock(&adapter->vector_lock);
56 	stack = &adapter->vector_stack;
57 	kfree(stack->vec_idx);
58 	stack->vec_idx = NULL;
59 	mutex_unlock(&adapter->vector_lock);
60 }
61 
62 /**
63  * idpf_mb_intr_rel_irq - Free the IRQ association with the OS
64  * @adapter: adapter structure
65  *
66  * This will also disable interrupt mode and queue up mailbox task. Mailbox
67  * task will reschedule itself if not in interrupt mode.
68  */
69 static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter)
70 {
71 	clear_bit(IDPF_MB_INTR_MODE, adapter->flags);
72 	free_irq(adapter->msix_entries[0].vector, adapter);
73 	queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
74 }
75 
76 /**
77  * idpf_intr_rel - Release interrupt capabilities and free memory
78  * @adapter: adapter to disable interrupts on
79  */
80 void idpf_intr_rel(struct idpf_adapter *adapter)
81 {
82 	if (!adapter->msix_entries)
83 		return;
84 
85 	idpf_mb_intr_rel_irq(adapter);
86 	pci_free_irq_vectors(adapter->pdev);
87 	idpf_send_dealloc_vectors_msg(adapter);
88 	idpf_deinit_vector_stack(adapter);
89 	kfree(adapter->msix_entries);
90 	adapter->msix_entries = NULL;
91 }
92 
93 /**
94  * idpf_mb_intr_clean - Interrupt handler for the mailbox
95  * @irq: interrupt number
96  * @data: pointer to the adapter structure
97  */
98 static irqreturn_t idpf_mb_intr_clean(int __always_unused irq, void *data)
99 {
100 	struct idpf_adapter *adapter = (struct idpf_adapter *)data;
101 
102 	queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
103 
104 	return IRQ_HANDLED;
105 }
106 
107 /**
108  * idpf_mb_irq_enable - Enable MSIX interrupt for the mailbox
109  * @adapter: adapter to get the hardware address for register write
110  */
111 static void idpf_mb_irq_enable(struct idpf_adapter *adapter)
112 {
113 	struct idpf_intr_reg *intr = &adapter->mb_vector.intr_reg;
114 	u32 val;
115 
116 	val = intr->dyn_ctl_intena_m | intr->dyn_ctl_itridx_m;
117 	writel(val, intr->dyn_ctl);
118 	writel(intr->icr_ena_ctlq_m, intr->icr_ena);
119 }
120 
121 /**
122  * idpf_mb_intr_req_irq - Request irq for the mailbox interrupt
123  * @adapter: adapter structure to pass to the mailbox irq handler
124  */
125 static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter)
126 {
127 	struct idpf_q_vector *mb_vector = &adapter->mb_vector;
128 	int irq_num, mb_vidx = 0, err;
129 
130 	irq_num = adapter->msix_entries[mb_vidx].vector;
131 	mb_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d",
132 				    dev_driver_string(&adapter->pdev->dev),
133 				    "Mailbox", mb_vidx);
134 	err = request_irq(irq_num, adapter->irq_mb_handler, 0,
135 			  mb_vector->name, adapter);
136 	if (err) {
137 		dev_err(&adapter->pdev->dev,
138 			"IRQ request for mailbox failed, error: %d\n", err);
139 
140 		return err;
141 	}
142 
143 	set_bit(IDPF_MB_INTR_MODE, adapter->flags);
144 
145 	return 0;
146 }
147 
148 /**
149  * idpf_set_mb_vec_id - Set vector index for mailbox
150  * @adapter: adapter structure to access the vector chunks
151  *
152  * The first vector id in the requested vector chunks from the CP is for
153  * the mailbox
154  */
155 static void idpf_set_mb_vec_id(struct idpf_adapter *adapter)
156 {
157 	if (adapter->req_vec_chunks)
158 		adapter->mb_vector.v_idx =
159 			le16_to_cpu(adapter->caps.mailbox_vector_id);
160 	else
161 		adapter->mb_vector.v_idx = 0;
162 }
163 
164 /**
165  * idpf_mb_intr_init - Initialize the mailbox interrupt
166  * @adapter: adapter structure to store the mailbox vector
167  */
168 static int idpf_mb_intr_init(struct idpf_adapter *adapter)
169 {
170 	adapter->dev_ops.reg_ops.mb_intr_reg_init(adapter);
171 	adapter->irq_mb_handler = idpf_mb_intr_clean;
172 
173 	return idpf_mb_intr_req_irq(adapter);
174 }
175 
176 /**
177  * idpf_vector_lifo_push - push MSIX vector index onto stack
178  * @adapter: private data struct
179  * @vec_idx: vector index to store
180  */
181 static int idpf_vector_lifo_push(struct idpf_adapter *adapter, u16 vec_idx)
182 {
183 	struct idpf_vector_lifo *stack = &adapter->vector_stack;
184 
185 	lockdep_assert_held(&adapter->vector_lock);
186 
187 	if (stack->top == stack->base) {
188 		dev_err(&adapter->pdev->dev, "Exceeded the vector stack limit: %d\n",
189 			stack->top);
190 		return -EINVAL;
191 	}
192 
193 	stack->vec_idx[--stack->top] = vec_idx;
194 
195 	return 0;
196 }
197 
198 /**
199  * idpf_vector_lifo_pop - pop MSIX vector index from stack
200  * @adapter: private data struct
201  */
202 static int idpf_vector_lifo_pop(struct idpf_adapter *adapter)
203 {
204 	struct idpf_vector_lifo *stack = &adapter->vector_stack;
205 
206 	lockdep_assert_held(&adapter->vector_lock);
207 
208 	if (stack->top == stack->size) {
209 		dev_err(&adapter->pdev->dev, "No interrupt vectors are available to distribute!\n");
210 
211 		return -EINVAL;
212 	}
213 
214 	return stack->vec_idx[stack->top++];
215 }
216 
217 /**
218  * idpf_vector_stash - Store the vector indexes onto the stack
219  * @adapter: private data struct
220  * @q_vector_idxs: vector index array
221  * @vec_info: info related to the number of vectors
222  *
223  * This function is a no-op if there are no vectors indexes to be stashed
224  */
225 static void idpf_vector_stash(struct idpf_adapter *adapter, u16 *q_vector_idxs,
226 			      struct idpf_vector_info *vec_info)
227 {
228 	int i, base = 0;
229 	u16 vec_idx;
230 
231 	lockdep_assert_held(&adapter->vector_lock);
232 
233 	if (!vec_info->num_curr_vecs)
234 		return;
235 
236 	/* For default vports, no need to stash vector allocated from the
237 	 * default pool onto the stack
238 	 */
239 	if (vec_info->default_vport)
240 		base = IDPF_MIN_Q_VEC;
241 
242 	for (i = vec_info->num_curr_vecs - 1; i >= base ; i--) {
243 		vec_idx = q_vector_idxs[i];
244 		idpf_vector_lifo_push(adapter, vec_idx);
245 		adapter->num_avail_msix++;
246 	}
247 }
248 
249 /**
250  * idpf_req_rel_vector_indexes - Request or release MSIX vector indexes
251  * @adapter: driver specific private structure
252  * @q_vector_idxs: vector index array
253  * @vec_info: info related to the number of vectors
254  *
255  * This is the core function to distribute the MSIX vectors acquired from the
256  * OS. It expects the caller to pass the number of vectors required and
257  * also previously allocated. First, it stashes previously allocated vector
258  * indexes on to the stack and then figures out if it can allocate requested
259  * vectors. It can wait on acquiring the mutex lock. If the caller passes 0 as
260  * requested vectors, then this function just stashes the already allocated
261  * vectors and returns 0.
262  *
263  * Returns actual number of vectors allocated on success, error value on failure
264  * If 0 is returned, implies the stack has no vectors to allocate which is also
265  * a failure case for the caller
266  */
267 int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter,
268 				u16 *q_vector_idxs,
269 				struct idpf_vector_info *vec_info)
270 {
271 	u16 num_req_vecs, num_alloc_vecs = 0, max_vecs;
272 	struct idpf_vector_lifo *stack;
273 	int i, j, vecid;
274 
275 	mutex_lock(&adapter->vector_lock);
276 	stack = &adapter->vector_stack;
277 	num_req_vecs = vec_info->num_req_vecs;
278 
279 	/* Stash interrupt vector indexes onto the stack if required */
280 	idpf_vector_stash(adapter, q_vector_idxs, vec_info);
281 
282 	if (!num_req_vecs)
283 		goto rel_lock;
284 
285 	if (vec_info->default_vport) {
286 		/* As IDPF_MIN_Q_VEC per default vport is put aside in the
287 		 * default pool of the stack, use them for default vports
288 		 */
289 		j = vec_info->index * IDPF_MIN_Q_VEC + IDPF_MBX_Q_VEC;
290 		for (i = 0; i < IDPF_MIN_Q_VEC; i++) {
291 			q_vector_idxs[num_alloc_vecs++] = stack->vec_idx[j++];
292 			num_req_vecs--;
293 		}
294 	}
295 
296 	/* Find if stack has enough vector to allocate */
297 	max_vecs = min(adapter->num_avail_msix, num_req_vecs);
298 
299 	for (j = 0; j < max_vecs; j++) {
300 		vecid = idpf_vector_lifo_pop(adapter);
301 		q_vector_idxs[num_alloc_vecs++] = vecid;
302 	}
303 	adapter->num_avail_msix -= max_vecs;
304 
305 rel_lock:
306 	mutex_unlock(&adapter->vector_lock);
307 
308 	return num_alloc_vecs;
309 }
310 
311 /**
312  * idpf_intr_req - Request interrupt capabilities
313  * @adapter: adapter to enable interrupts on
314  *
315  * Returns 0 on success, negative on failure
316  */
317 int idpf_intr_req(struct idpf_adapter *adapter)
318 {
319 	u16 default_vports = idpf_get_default_vports(adapter);
320 	int num_q_vecs, total_vecs, num_vec_ids;
321 	int min_vectors, v_actual, err;
322 	unsigned int vector;
323 	u16 *vecids;
324 
325 	total_vecs = idpf_get_reserved_vecs(adapter);
326 	num_q_vecs = total_vecs - IDPF_MBX_Q_VEC;
327 
328 	err = idpf_send_alloc_vectors_msg(adapter, num_q_vecs);
329 	if (err) {
330 		dev_err(&adapter->pdev->dev,
331 			"Failed to allocate %d vectors: %d\n", num_q_vecs, err);
332 
333 		return -EAGAIN;
334 	}
335 
336 	min_vectors = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports;
337 	v_actual = pci_alloc_irq_vectors(adapter->pdev, min_vectors,
338 					 total_vecs, PCI_IRQ_MSIX);
339 	if (v_actual < min_vectors) {
340 		dev_err(&adapter->pdev->dev, "Failed to allocate MSIX vectors: %d\n",
341 			v_actual);
342 		err = -EAGAIN;
343 		goto send_dealloc_vecs;
344 	}
345 
346 	adapter->msix_entries = kcalloc(v_actual, sizeof(struct msix_entry),
347 					GFP_KERNEL);
348 
349 	if (!adapter->msix_entries) {
350 		err = -ENOMEM;
351 		goto free_irq;
352 	}
353 
354 	idpf_set_mb_vec_id(adapter);
355 
356 	vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL);
357 	if (!vecids) {
358 		err = -ENOMEM;
359 		goto free_msix;
360 	}
361 
362 	if (adapter->req_vec_chunks) {
363 		struct virtchnl2_vector_chunks *vchunks;
364 		struct virtchnl2_alloc_vectors *ac;
365 
366 		ac = adapter->req_vec_chunks;
367 		vchunks = &ac->vchunks;
368 
369 		num_vec_ids = idpf_get_vec_ids(adapter, vecids, total_vecs,
370 					       vchunks);
371 		if (num_vec_ids < v_actual) {
372 			err = -EINVAL;
373 			goto free_vecids;
374 		}
375 	} else {
376 		int i;
377 
378 		for (i = 0; i < v_actual; i++)
379 			vecids[i] = i;
380 	}
381 
382 	for (vector = 0; vector < v_actual; vector++) {
383 		adapter->msix_entries[vector].entry = vecids[vector];
384 		adapter->msix_entries[vector].vector =
385 			pci_irq_vector(adapter->pdev, vector);
386 	}
387 
388 	adapter->num_req_msix = total_vecs;
389 	adapter->num_msix_entries = v_actual;
390 	/* 'num_avail_msix' is used to distribute excess vectors to the vports
391 	 * after considering the minimum vectors required per each default
392 	 * vport
393 	 */
394 	adapter->num_avail_msix = v_actual - min_vectors;
395 
396 	/* Fill MSIX vector lifo stack with vector indexes */
397 	err = idpf_init_vector_stack(adapter);
398 	if (err)
399 		goto free_vecids;
400 
401 	err = idpf_mb_intr_init(adapter);
402 	if (err)
403 		goto deinit_vec_stack;
404 	idpf_mb_irq_enable(adapter);
405 	kfree(vecids);
406 
407 	return 0;
408 
409 deinit_vec_stack:
410 	idpf_deinit_vector_stack(adapter);
411 free_vecids:
412 	kfree(vecids);
413 free_msix:
414 	kfree(adapter->msix_entries);
415 	adapter->msix_entries = NULL;
416 free_irq:
417 	pci_free_irq_vectors(adapter->pdev);
418 send_dealloc_vecs:
419 	idpf_send_dealloc_vectors_msg(adapter);
420 
421 	return err;
422 }
423 
424 /**
425  * idpf_find_mac_filter - Search filter list for specific mac filter
426  * @vconfig: Vport config structure
427  * @macaddr: The MAC address
428  *
429  * Returns ptr to the filter object or NULL. Must be called while holding the
430  * mac_filter_list_lock.
431  **/
432 static struct idpf_mac_filter *idpf_find_mac_filter(struct idpf_vport_config *vconfig,
433 						    const u8 *macaddr)
434 {
435 	struct idpf_mac_filter *f;
436 
437 	if (!macaddr)
438 		return NULL;
439 
440 	list_for_each_entry(f, &vconfig->user_config.mac_filter_list, list) {
441 		if (ether_addr_equal(macaddr, f->macaddr))
442 			return f;
443 	}
444 
445 	return NULL;
446 }
447 
448 /**
449  * __idpf_del_mac_filter - Delete a MAC filter from the filter list
450  * @vport_config: Vport config structure
451  * @macaddr: The MAC address
452  *
453  * Returns 0 on success, error value on failure
454  **/
455 static int __idpf_del_mac_filter(struct idpf_vport_config *vport_config,
456 				 const u8 *macaddr)
457 {
458 	struct idpf_mac_filter *f;
459 
460 	spin_lock_bh(&vport_config->mac_filter_list_lock);
461 	f = idpf_find_mac_filter(vport_config, macaddr);
462 	if (f) {
463 		list_del(&f->list);
464 		kfree(f);
465 	}
466 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
467 
468 	return 0;
469 }
470 
471 /**
472  * idpf_del_mac_filter - Delete a MAC filter from the filter list
473  * @vport: Main vport structure
474  * @np: Netdev private structure
475  * @macaddr: The MAC address
476  * @async: Don't wait for return message
477  *
478  * Removes filter from list and if interface is up, tells hardware about the
479  * removed filter.
480  **/
481 static int idpf_del_mac_filter(struct idpf_vport *vport,
482 			       struct idpf_netdev_priv *np,
483 			       const u8 *macaddr, bool async)
484 {
485 	struct idpf_vport_config *vport_config;
486 	struct idpf_mac_filter *f;
487 
488 	vport_config = np->adapter->vport_config[np->vport_idx];
489 
490 	spin_lock_bh(&vport_config->mac_filter_list_lock);
491 	f = idpf_find_mac_filter(vport_config, macaddr);
492 	if (f) {
493 		f->remove = true;
494 	} else {
495 		spin_unlock_bh(&vport_config->mac_filter_list_lock);
496 
497 		return -EINVAL;
498 	}
499 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
500 
501 	if (np->state == __IDPF_VPORT_UP) {
502 		int err;
503 
504 		err = idpf_add_del_mac_filters(vport, np, false, async);
505 		if (err)
506 			return err;
507 	}
508 
509 	return  __idpf_del_mac_filter(vport_config, macaddr);
510 }
511 
512 /**
513  * __idpf_add_mac_filter - Add mac filter helper function
514  * @vport_config: Vport config structure
515  * @macaddr: Address to add
516  *
517  * Takes mac_filter_list_lock spinlock to add new filter to list.
518  */
519 static int __idpf_add_mac_filter(struct idpf_vport_config *vport_config,
520 				 const u8 *macaddr)
521 {
522 	struct idpf_mac_filter *f;
523 
524 	spin_lock_bh(&vport_config->mac_filter_list_lock);
525 
526 	f = idpf_find_mac_filter(vport_config, macaddr);
527 	if (f) {
528 		f->remove = false;
529 		spin_unlock_bh(&vport_config->mac_filter_list_lock);
530 
531 		return 0;
532 	}
533 
534 	f = kzalloc(sizeof(*f), GFP_ATOMIC);
535 	if (!f) {
536 		spin_unlock_bh(&vport_config->mac_filter_list_lock);
537 
538 		return -ENOMEM;
539 	}
540 
541 	ether_addr_copy(f->macaddr, macaddr);
542 	list_add_tail(&f->list, &vport_config->user_config.mac_filter_list);
543 	f->add = true;
544 
545 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
546 
547 	return 0;
548 }
549 
550 /**
551  * idpf_add_mac_filter - Add a mac filter to the filter list
552  * @vport: Main vport structure
553  * @np: Netdev private structure
554  * @macaddr: The MAC address
555  * @async: Don't wait for return message
556  *
557  * Returns 0 on success or error on failure. If interface is up, we'll also
558  * send the virtchnl message to tell hardware about the filter.
559  **/
560 static int idpf_add_mac_filter(struct idpf_vport *vport,
561 			       struct idpf_netdev_priv *np,
562 			       const u8 *macaddr, bool async)
563 {
564 	struct idpf_vport_config *vport_config;
565 	int err;
566 
567 	vport_config = np->adapter->vport_config[np->vport_idx];
568 	err = __idpf_add_mac_filter(vport_config, macaddr);
569 	if (err)
570 		return err;
571 
572 	if (np->state == __IDPF_VPORT_UP)
573 		err = idpf_add_del_mac_filters(vport, np, true, async);
574 
575 	return err;
576 }
577 
578 /**
579  * idpf_del_all_mac_filters - Delete all MAC filters in list
580  * @vport: main vport struct
581  *
582  * Takes mac_filter_list_lock spinlock.  Deletes all filters
583  */
584 static void idpf_del_all_mac_filters(struct idpf_vport *vport)
585 {
586 	struct idpf_vport_config *vport_config;
587 	struct idpf_mac_filter *f, *ftmp;
588 
589 	vport_config = vport->adapter->vport_config[vport->idx];
590 	spin_lock_bh(&vport_config->mac_filter_list_lock);
591 
592 	list_for_each_entry_safe(f, ftmp, &vport_config->user_config.mac_filter_list,
593 				 list) {
594 		list_del(&f->list);
595 		kfree(f);
596 	}
597 
598 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
599 }
600 
601 /**
602  * idpf_restore_mac_filters - Re-add all MAC filters in list
603  * @vport: main vport struct
604  *
605  * Takes mac_filter_list_lock spinlock.  Sets add field to true for filters to
606  * resync filters back to HW.
607  */
608 static void idpf_restore_mac_filters(struct idpf_vport *vport)
609 {
610 	struct idpf_vport_config *vport_config;
611 	struct idpf_mac_filter *f;
612 
613 	vport_config = vport->adapter->vport_config[vport->idx];
614 	spin_lock_bh(&vport_config->mac_filter_list_lock);
615 
616 	list_for_each_entry(f, &vport_config->user_config.mac_filter_list, list)
617 		f->add = true;
618 
619 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
620 
621 	idpf_add_del_mac_filters(vport, netdev_priv(vport->netdev),
622 				 true, false);
623 }
624 
625 /**
626  * idpf_remove_mac_filters - Remove all MAC filters in list
627  * @vport: main vport struct
628  *
629  * Takes mac_filter_list_lock spinlock. Sets remove field to true for filters
630  * to remove filters in HW.
631  */
632 static void idpf_remove_mac_filters(struct idpf_vport *vport)
633 {
634 	struct idpf_vport_config *vport_config;
635 	struct idpf_mac_filter *f;
636 
637 	vport_config = vport->adapter->vport_config[vport->idx];
638 	spin_lock_bh(&vport_config->mac_filter_list_lock);
639 
640 	list_for_each_entry(f, &vport_config->user_config.mac_filter_list, list)
641 		f->remove = true;
642 
643 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
644 
645 	idpf_add_del_mac_filters(vport, netdev_priv(vport->netdev),
646 				 false, false);
647 }
648 
649 /**
650  * idpf_deinit_mac_addr - deinitialize mac address for vport
651  * @vport: main vport structure
652  */
653 static void idpf_deinit_mac_addr(struct idpf_vport *vport)
654 {
655 	struct idpf_vport_config *vport_config;
656 	struct idpf_mac_filter *f;
657 
658 	vport_config = vport->adapter->vport_config[vport->idx];
659 
660 	spin_lock_bh(&vport_config->mac_filter_list_lock);
661 
662 	f = idpf_find_mac_filter(vport_config, vport->default_mac_addr);
663 	if (f) {
664 		list_del(&f->list);
665 		kfree(f);
666 	}
667 
668 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
669 }
670 
671 /**
672  * idpf_init_mac_addr - initialize mac address for vport
673  * @vport: main vport structure
674  * @netdev: pointer to netdev struct associated with this vport
675  */
676 static int idpf_init_mac_addr(struct idpf_vport *vport,
677 			      struct net_device *netdev)
678 {
679 	struct idpf_netdev_priv *np = netdev_priv(netdev);
680 	struct idpf_adapter *adapter = vport->adapter;
681 	int err;
682 
683 	if (is_valid_ether_addr(vport->default_mac_addr)) {
684 		eth_hw_addr_set(netdev, vport->default_mac_addr);
685 		ether_addr_copy(netdev->perm_addr, vport->default_mac_addr);
686 
687 		return idpf_add_mac_filter(vport, np, vport->default_mac_addr,
688 					   false);
689 	}
690 
691 	if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS,
692 			     VIRTCHNL2_CAP_MACFILTER)) {
693 		dev_err(&adapter->pdev->dev,
694 			"MAC address is not provided and capability is not set\n");
695 
696 		return -EINVAL;
697 	}
698 
699 	eth_hw_addr_random(netdev);
700 	err = idpf_add_mac_filter(vport, np, netdev->dev_addr, false);
701 	if (err)
702 		return err;
703 
704 	dev_info(&adapter->pdev->dev, "Invalid MAC address %pM, using random %pM\n",
705 		 vport->default_mac_addr, netdev->dev_addr);
706 	ether_addr_copy(vport->default_mac_addr, netdev->dev_addr);
707 
708 	return 0;
709 }
710 
711 /**
712  * idpf_cfg_netdev - Allocate, configure and register a netdev
713  * @vport: main vport structure
714  *
715  * Returns 0 on success, negative value on failure.
716  */
717 static int idpf_cfg_netdev(struct idpf_vport *vport)
718 {
719 	struct idpf_adapter *adapter = vport->adapter;
720 	struct idpf_vport_config *vport_config;
721 	netdev_features_t dflt_features;
722 	netdev_features_t offloads = 0;
723 	struct idpf_netdev_priv *np;
724 	struct net_device *netdev;
725 	u16 idx = vport->idx;
726 	int err;
727 
728 	vport_config = adapter->vport_config[idx];
729 
730 	/* It's possible we already have a netdev allocated and registered for
731 	 * this vport
732 	 */
733 	if (test_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags)) {
734 		netdev = adapter->netdevs[idx];
735 		np = netdev_priv(netdev);
736 		np->vport = vport;
737 		np->vport_idx = vport->idx;
738 		np->vport_id = vport->vport_id;
739 		vport->netdev = netdev;
740 
741 		return idpf_init_mac_addr(vport, netdev);
742 	}
743 
744 	netdev = alloc_etherdev_mqs(sizeof(struct idpf_netdev_priv),
745 				    vport_config->max_q.max_txq,
746 				    vport_config->max_q.max_rxq);
747 	if (!netdev)
748 		return -ENOMEM;
749 
750 	vport->netdev = netdev;
751 	np = netdev_priv(netdev);
752 	np->vport = vport;
753 	np->adapter = adapter;
754 	np->vport_idx = vport->idx;
755 	np->vport_id = vport->vport_id;
756 
757 	spin_lock_init(&np->stats_lock);
758 
759 	err = idpf_init_mac_addr(vport, netdev);
760 	if (err) {
761 		free_netdev(vport->netdev);
762 		vport->netdev = NULL;
763 
764 		return err;
765 	}
766 
767 	/* assign netdev_ops */
768 	if (idpf_is_queue_model_split(vport->txq_model))
769 		netdev->netdev_ops = &idpf_netdev_ops_splitq;
770 	else
771 		netdev->netdev_ops = &idpf_netdev_ops_singleq;
772 
773 	/* setup watchdog timeout value to be 5 second */
774 	netdev->watchdog_timeo = 5 * HZ;
775 
776 	netdev->dev_port = idx;
777 
778 	/* configure default MTU size */
779 	netdev->min_mtu = ETH_MIN_MTU;
780 	netdev->max_mtu = vport->max_mtu;
781 
782 	dflt_features = NETIF_F_SG	|
783 			NETIF_F_HIGHDMA;
784 
785 	if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
786 		dflt_features |= NETIF_F_RXHASH;
787 	if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V4))
788 		dflt_features |= NETIF_F_IP_CSUM;
789 	if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V6))
790 		dflt_features |= NETIF_F_IPV6_CSUM;
791 	if (idpf_is_cap_ena(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM))
792 		dflt_features |= NETIF_F_RXCSUM;
793 	if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_SCTP_CSUM))
794 		dflt_features |= NETIF_F_SCTP_CRC;
795 
796 	if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV4_TCP))
797 		dflt_features |= NETIF_F_TSO;
798 	if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV6_TCP))
799 		dflt_features |= NETIF_F_TSO6;
800 	if (idpf_is_cap_ena_all(adapter, IDPF_SEG_CAPS,
801 				VIRTCHNL2_CAP_SEG_IPV4_UDP |
802 				VIRTCHNL2_CAP_SEG_IPV6_UDP))
803 		dflt_features |= NETIF_F_GSO_UDP_L4;
804 	if (idpf_is_cap_ena_all(adapter, IDPF_RSC_CAPS, IDPF_CAP_RSC))
805 		offloads |= NETIF_F_GRO_HW;
806 	/* advertise to stack only if offloads for encapsulated packets is
807 	 * supported
808 	 */
809 	if (idpf_is_cap_ena(vport->adapter, IDPF_SEG_CAPS,
810 			    VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL)) {
811 		offloads |= NETIF_F_GSO_UDP_TUNNEL	|
812 			    NETIF_F_GSO_GRE		|
813 			    NETIF_F_GSO_GRE_CSUM	|
814 			    NETIF_F_GSO_PARTIAL		|
815 			    NETIF_F_GSO_UDP_TUNNEL_CSUM	|
816 			    NETIF_F_GSO_IPXIP4		|
817 			    NETIF_F_GSO_IPXIP6		|
818 			    0;
819 
820 		if (!idpf_is_cap_ena_all(vport->adapter, IDPF_CSUM_CAPS,
821 					 IDPF_CAP_TUNNEL_TX_CSUM))
822 			netdev->gso_partial_features |=
823 				NETIF_F_GSO_UDP_TUNNEL_CSUM;
824 
825 		netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
826 		offloads |= NETIF_F_TSO_MANGLEID;
827 	}
828 	if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LOOPBACK))
829 		offloads |= NETIF_F_LOOPBACK;
830 
831 	netdev->features |= dflt_features;
832 	netdev->hw_features |= dflt_features | offloads;
833 	netdev->hw_enc_features |= dflt_features | offloads;
834 	idpf_set_ethtool_ops(netdev);
835 	SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
836 
837 	/* carrier off on init to avoid Tx hangs */
838 	netif_carrier_off(netdev);
839 
840 	/* make sure transmit queues start off as stopped */
841 	netif_tx_stop_all_queues(netdev);
842 
843 	/* The vport can be arbitrarily released so we need to also track
844 	 * netdevs in the adapter struct
845 	 */
846 	adapter->netdevs[idx] = netdev;
847 
848 	return 0;
849 }
850 
851 /**
852  * idpf_get_free_slot - get the next non-NULL location index in array
853  * @adapter: adapter in which to look for a free vport slot
854  */
855 static int idpf_get_free_slot(struct idpf_adapter *adapter)
856 {
857 	unsigned int i;
858 
859 	for (i = 0; i < adapter->max_vports; i++) {
860 		if (!adapter->vports[i])
861 			return i;
862 	}
863 
864 	return IDPF_NO_FREE_SLOT;
865 }
866 
867 /**
868  * idpf_remove_features - Turn off feature configs
869  * @vport: virtual port structure
870  */
871 static void idpf_remove_features(struct idpf_vport *vport)
872 {
873 	struct idpf_adapter *adapter = vport->adapter;
874 
875 	if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER))
876 		idpf_remove_mac_filters(vport);
877 }
878 
879 /**
880  * idpf_vport_stop - Disable a vport
881  * @vport: vport to disable
882  */
883 static void idpf_vport_stop(struct idpf_vport *vport)
884 {
885 	struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
886 
887 	if (np->state <= __IDPF_VPORT_DOWN)
888 		return;
889 
890 	netif_carrier_off(vport->netdev);
891 	netif_tx_disable(vport->netdev);
892 
893 	idpf_send_disable_vport_msg(vport);
894 	idpf_send_disable_queues_msg(vport);
895 	idpf_send_map_unmap_queue_vector_msg(vport, false);
896 	/* Normally we ask for queues in create_vport, but if the number of
897 	 * initially requested queues have changed, for example via ethtool
898 	 * set channels, we do delete queues and then add the queues back
899 	 * instead of deleting and reallocating the vport.
900 	 */
901 	if (test_and_clear_bit(IDPF_VPORT_DEL_QUEUES, vport->flags))
902 		idpf_send_delete_queues_msg(vport);
903 
904 	idpf_remove_features(vport);
905 
906 	vport->link_up = false;
907 	idpf_vport_intr_deinit(vport);
908 	idpf_vport_intr_rel(vport);
909 	idpf_vport_queues_rel(vport);
910 	np->state = __IDPF_VPORT_DOWN;
911 }
912 
913 /**
914  * idpf_stop - Disables a network interface
915  * @netdev: network interface device structure
916  *
917  * The stop entry point is called when an interface is de-activated by the OS,
918  * and the netdevice enters the DOWN state.  The hardware is still under the
919  * driver's control, but the netdev interface is disabled.
920  *
921  * Returns success only - not allowed to fail
922  */
923 static int idpf_stop(struct net_device *netdev)
924 {
925 	struct idpf_netdev_priv *np = netdev_priv(netdev);
926 	struct idpf_vport *vport;
927 
928 	if (test_bit(IDPF_REMOVE_IN_PROG, np->adapter->flags))
929 		return 0;
930 
931 	idpf_vport_ctrl_lock(netdev);
932 	vport = idpf_netdev_to_vport(netdev);
933 
934 	idpf_vport_stop(vport);
935 
936 	idpf_vport_ctrl_unlock(netdev);
937 
938 	return 0;
939 }
940 
941 /**
942  * idpf_decfg_netdev - Unregister the netdev
943  * @vport: vport for which netdev to be unregistered
944  */
945 static void idpf_decfg_netdev(struct idpf_vport *vport)
946 {
947 	struct idpf_adapter *adapter = vport->adapter;
948 
949 	unregister_netdev(vport->netdev);
950 	free_netdev(vport->netdev);
951 	vport->netdev = NULL;
952 
953 	adapter->netdevs[vport->idx] = NULL;
954 }
955 
956 /**
957  * idpf_vport_rel - Delete a vport and free its resources
958  * @vport: the vport being removed
959  */
960 static void idpf_vport_rel(struct idpf_vport *vport)
961 {
962 	struct idpf_adapter *adapter = vport->adapter;
963 	struct idpf_vport_config *vport_config;
964 	struct idpf_vector_info vec_info;
965 	struct idpf_rss_data *rss_data;
966 	struct idpf_vport_max_q max_q;
967 	u16 idx = vport->idx;
968 
969 	vport_config = adapter->vport_config[vport->idx];
970 	idpf_deinit_rss(vport);
971 	rss_data = &vport_config->user_config.rss_data;
972 	kfree(rss_data->rss_key);
973 	rss_data->rss_key = NULL;
974 
975 	idpf_send_destroy_vport_msg(vport);
976 
977 	/* Release all max queues allocated to the adapter's pool */
978 	max_q.max_rxq = vport_config->max_q.max_rxq;
979 	max_q.max_txq = vport_config->max_q.max_txq;
980 	max_q.max_bufq = vport_config->max_q.max_bufq;
981 	max_q.max_complq = vport_config->max_q.max_complq;
982 	idpf_vport_dealloc_max_qs(adapter, &max_q);
983 
984 	/* Release all the allocated vectors on the stack */
985 	vec_info.num_req_vecs = 0;
986 	vec_info.num_curr_vecs = vport->num_q_vectors;
987 	vec_info.default_vport = vport->default_vport;
988 
989 	idpf_req_rel_vector_indexes(adapter, vport->q_vector_idxs, &vec_info);
990 
991 	kfree(vport->q_vector_idxs);
992 	vport->q_vector_idxs = NULL;
993 
994 	kfree(adapter->vport_params_recvd[idx]);
995 	adapter->vport_params_recvd[idx] = NULL;
996 	kfree(adapter->vport_params_reqd[idx]);
997 	adapter->vport_params_reqd[idx] = NULL;
998 	if (adapter->vport_config[idx]) {
999 		kfree(adapter->vport_config[idx]->req_qs_chunks);
1000 		adapter->vport_config[idx]->req_qs_chunks = NULL;
1001 	}
1002 	kfree(vport);
1003 	adapter->num_alloc_vports--;
1004 }
1005 
1006 /**
1007  * idpf_vport_dealloc - cleanup and release a given vport
1008  * @vport: pointer to idpf vport structure
1009  *
1010  * returns nothing
1011  */
1012 static void idpf_vport_dealloc(struct idpf_vport *vport)
1013 {
1014 	struct idpf_adapter *adapter = vport->adapter;
1015 	unsigned int i = vport->idx;
1016 
1017 	idpf_deinit_mac_addr(vport);
1018 	idpf_vport_stop(vport);
1019 
1020 	if (!test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
1021 		idpf_decfg_netdev(vport);
1022 	if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
1023 		idpf_del_all_mac_filters(vport);
1024 
1025 	if (adapter->netdevs[i]) {
1026 		struct idpf_netdev_priv *np = netdev_priv(adapter->netdevs[i]);
1027 
1028 		np->vport = NULL;
1029 	}
1030 
1031 	idpf_vport_rel(vport);
1032 
1033 	adapter->vports[i] = NULL;
1034 	adapter->next_vport = idpf_get_free_slot(adapter);
1035 }
1036 
1037 /**
1038  * idpf_is_hsplit_supported - check whether the header split is supported
1039  * @vport: virtual port to check the capability for
1040  *
1041  * Return: true if it's supported by the HW/FW, false if not.
1042  */
1043 static bool idpf_is_hsplit_supported(const struct idpf_vport *vport)
1044 {
1045 	return idpf_is_queue_model_split(vport->rxq_model) &&
1046 	       idpf_is_cap_ena_all(vport->adapter, IDPF_HSPLIT_CAPS,
1047 				   IDPF_CAP_HSPLIT);
1048 }
1049 
1050 /**
1051  * idpf_vport_get_hsplit - get the current header split feature state
1052  * @vport: virtual port to query the state for
1053  *
1054  * Return: ``ETHTOOL_TCP_DATA_SPLIT_UNKNOWN`` if not supported,
1055  *         ``ETHTOOL_TCP_DATA_SPLIT_DISABLED`` if disabled,
1056  *         ``ETHTOOL_TCP_DATA_SPLIT_ENABLED`` if active.
1057  */
1058 u8 idpf_vport_get_hsplit(const struct idpf_vport *vport)
1059 {
1060 	const struct idpf_vport_user_config_data *config;
1061 
1062 	if (!idpf_is_hsplit_supported(vport))
1063 		return ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
1064 
1065 	config = &vport->adapter->vport_config[vport->idx]->user_config;
1066 
1067 	return test_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags) ?
1068 	       ETHTOOL_TCP_DATA_SPLIT_ENABLED :
1069 	       ETHTOOL_TCP_DATA_SPLIT_DISABLED;
1070 }
1071 
1072 /**
1073  * idpf_vport_set_hsplit - enable or disable header split on a given vport
1074  * @vport: virtual port to configure
1075  * @val: Ethtool flag controlling the header split state
1076  *
1077  * Return: true on success, false if not supported by the HW.
1078  */
1079 bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val)
1080 {
1081 	struct idpf_vport_user_config_data *config;
1082 
1083 	if (!idpf_is_hsplit_supported(vport))
1084 		return val == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
1085 
1086 	config = &vport->adapter->vport_config[vport->idx]->user_config;
1087 
1088 	switch (val) {
1089 	case ETHTOOL_TCP_DATA_SPLIT_UNKNOWN:
1090 		/* Default is to enable */
1091 	case ETHTOOL_TCP_DATA_SPLIT_ENABLED:
1092 		__set_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags);
1093 		return true;
1094 	case ETHTOOL_TCP_DATA_SPLIT_DISABLED:
1095 		__clear_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags);
1096 		return true;
1097 	default:
1098 		return false;
1099 	}
1100 }
1101 
1102 /**
1103  * idpf_vport_alloc - Allocates the next available struct vport in the adapter
1104  * @adapter: board private structure
1105  * @max_q: vport max queue info
1106  *
1107  * returns a pointer to a vport on success, NULL on failure.
1108  */
1109 static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
1110 					   struct idpf_vport_max_q *max_q)
1111 {
1112 	struct idpf_rss_data *rss_data;
1113 	u16 idx = adapter->next_vport;
1114 	struct idpf_vport *vport;
1115 	u16 num_max_q;
1116 
1117 	if (idx == IDPF_NO_FREE_SLOT)
1118 		return NULL;
1119 
1120 	vport = kzalloc(sizeof(*vport), GFP_KERNEL);
1121 	if (!vport)
1122 		return vport;
1123 
1124 	if (!adapter->vport_config[idx]) {
1125 		struct idpf_vport_config *vport_config;
1126 
1127 		vport_config = kzalloc(sizeof(*vport_config), GFP_KERNEL);
1128 		if (!vport_config) {
1129 			kfree(vport);
1130 
1131 			return NULL;
1132 		}
1133 
1134 		adapter->vport_config[idx] = vport_config;
1135 	}
1136 
1137 	vport->idx = idx;
1138 	vport->adapter = adapter;
1139 	vport->compln_clean_budget = IDPF_TX_COMPLQ_CLEAN_BUDGET;
1140 	vport->default_vport = adapter->num_alloc_vports <
1141 			       idpf_get_default_vports(adapter);
1142 
1143 	num_max_q = max(max_q->max_txq, max_q->max_rxq);
1144 	vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL);
1145 	if (!vport->q_vector_idxs) {
1146 		kfree(vport);
1147 
1148 		return NULL;
1149 	}
1150 	idpf_vport_init(vport, max_q);
1151 
1152 	/* This alloc is done separate from the LUT because it's not strictly
1153 	 * dependent on how many queues we have. If we change number of queues
1154 	 * and soft reset we'll need a new LUT but the key can remain the same
1155 	 * for as long as the vport exists.
1156 	 */
1157 	rss_data = &adapter->vport_config[idx]->user_config.rss_data;
1158 	rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL);
1159 	if (!rss_data->rss_key) {
1160 		kfree(vport);
1161 
1162 		return NULL;
1163 	}
1164 	/* Initialize default rss key */
1165 	netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size);
1166 
1167 	/* fill vport slot in the adapter struct */
1168 	adapter->vports[idx] = vport;
1169 	adapter->vport_ids[idx] = idpf_get_vport_id(vport);
1170 
1171 	adapter->num_alloc_vports++;
1172 	/* prepare adapter->next_vport for next use */
1173 	adapter->next_vport = idpf_get_free_slot(adapter);
1174 
1175 	return vport;
1176 }
1177 
1178 /**
1179  * idpf_get_stats64 - get statistics for network device structure
1180  * @netdev: network interface device structure
1181  * @stats: main device statistics structure
1182  */
1183 static void idpf_get_stats64(struct net_device *netdev,
1184 			     struct rtnl_link_stats64 *stats)
1185 {
1186 	struct idpf_netdev_priv *np = netdev_priv(netdev);
1187 
1188 	spin_lock_bh(&np->stats_lock);
1189 	*stats = np->netstats;
1190 	spin_unlock_bh(&np->stats_lock);
1191 }
1192 
1193 /**
1194  * idpf_statistics_task - Delayed task to get statistics over mailbox
1195  * @work: work_struct handle to our data
1196  */
1197 void idpf_statistics_task(struct work_struct *work)
1198 {
1199 	struct idpf_adapter *adapter;
1200 	int i;
1201 
1202 	adapter = container_of(work, struct idpf_adapter, stats_task.work);
1203 
1204 	for (i = 0; i < adapter->max_vports; i++) {
1205 		struct idpf_vport *vport = adapter->vports[i];
1206 
1207 		if (vport && !test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
1208 			idpf_send_get_stats_msg(vport);
1209 	}
1210 
1211 	queue_delayed_work(adapter->stats_wq, &adapter->stats_task,
1212 			   msecs_to_jiffies(10000));
1213 }
1214 
1215 /**
1216  * idpf_mbx_task - Delayed task to handle mailbox responses
1217  * @work: work_struct handle
1218  */
1219 void idpf_mbx_task(struct work_struct *work)
1220 {
1221 	struct idpf_adapter *adapter;
1222 
1223 	adapter = container_of(work, struct idpf_adapter, mbx_task.work);
1224 
1225 	if (test_bit(IDPF_MB_INTR_MODE, adapter->flags))
1226 		idpf_mb_irq_enable(adapter);
1227 	else
1228 		queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task,
1229 				   msecs_to_jiffies(300));
1230 
1231 	idpf_recv_mb_msg(adapter);
1232 }
1233 
1234 /**
1235  * idpf_service_task - Delayed task for handling mailbox responses
1236  * @work: work_struct handle to our data
1237  *
1238  */
1239 void idpf_service_task(struct work_struct *work)
1240 {
1241 	struct idpf_adapter *adapter;
1242 
1243 	adapter = container_of(work, struct idpf_adapter, serv_task.work);
1244 
1245 	if (idpf_is_reset_detected(adapter) &&
1246 	    !idpf_is_reset_in_prog(adapter) &&
1247 	    !test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) {
1248 		dev_info(&adapter->pdev->dev, "HW reset detected\n");
1249 		set_bit(IDPF_HR_FUNC_RESET, adapter->flags);
1250 		queue_delayed_work(adapter->vc_event_wq,
1251 				   &adapter->vc_event_task,
1252 				   msecs_to_jiffies(10));
1253 	}
1254 
1255 	queue_delayed_work(adapter->serv_wq, &adapter->serv_task,
1256 			   msecs_to_jiffies(300));
1257 }
1258 
1259 /**
1260  * idpf_restore_features - Restore feature configs
1261  * @vport: virtual port structure
1262  */
1263 static void idpf_restore_features(struct idpf_vport *vport)
1264 {
1265 	struct idpf_adapter *adapter = vport->adapter;
1266 
1267 	if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER))
1268 		idpf_restore_mac_filters(vport);
1269 }
1270 
1271 /**
1272  * idpf_set_real_num_queues - set number of queues for netdev
1273  * @vport: virtual port structure
1274  *
1275  * Returns 0 on success, negative on failure.
1276  */
1277 static int idpf_set_real_num_queues(struct idpf_vport *vport)
1278 {
1279 	int err;
1280 
1281 	err = netif_set_real_num_rx_queues(vport->netdev, vport->num_rxq);
1282 	if (err)
1283 		return err;
1284 
1285 	return netif_set_real_num_tx_queues(vport->netdev, vport->num_txq);
1286 }
1287 
1288 /**
1289  * idpf_up_complete - Complete interface up sequence
1290  * @vport: virtual port structure
1291  *
1292  * Returns 0 on success, negative on failure.
1293  */
1294 static int idpf_up_complete(struct idpf_vport *vport)
1295 {
1296 	struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
1297 
1298 	if (vport->link_up && !netif_carrier_ok(vport->netdev)) {
1299 		netif_carrier_on(vport->netdev);
1300 		netif_tx_start_all_queues(vport->netdev);
1301 	}
1302 
1303 	np->state = __IDPF_VPORT_UP;
1304 
1305 	return 0;
1306 }
1307 
1308 /**
1309  * idpf_rx_init_buf_tail - Write initial buffer ring tail value
1310  * @vport: virtual port struct
1311  */
1312 static void idpf_rx_init_buf_tail(struct idpf_vport *vport)
1313 {
1314 	int i, j;
1315 
1316 	for (i = 0; i < vport->num_rxq_grp; i++) {
1317 		struct idpf_rxq_group *grp = &vport->rxq_grps[i];
1318 
1319 		if (idpf_is_queue_model_split(vport->rxq_model)) {
1320 			for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
1321 				struct idpf_queue *q =
1322 					&grp->splitq.bufq_sets[j].bufq;
1323 
1324 				writel(q->next_to_alloc, q->tail);
1325 			}
1326 		} else {
1327 			for (j = 0; j < grp->singleq.num_rxq; j++) {
1328 				struct idpf_queue *q =
1329 					grp->singleq.rxqs[j];
1330 
1331 				writel(q->next_to_alloc, q->tail);
1332 			}
1333 		}
1334 	}
1335 }
1336 
1337 /**
1338  * idpf_vport_open - Bring up a vport
1339  * @vport: vport to bring up
1340  * @alloc_res: allocate queue resources
1341  */
1342 static int idpf_vport_open(struct idpf_vport *vport, bool alloc_res)
1343 {
1344 	struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
1345 	struct idpf_adapter *adapter = vport->adapter;
1346 	struct idpf_vport_config *vport_config;
1347 	int err;
1348 
1349 	if (np->state != __IDPF_VPORT_DOWN)
1350 		return -EBUSY;
1351 
1352 	/* we do not allow interface up just yet */
1353 	netif_carrier_off(vport->netdev);
1354 
1355 	if (alloc_res) {
1356 		err = idpf_vport_queues_alloc(vport);
1357 		if (err)
1358 			return err;
1359 	}
1360 
1361 	err = idpf_vport_intr_alloc(vport);
1362 	if (err) {
1363 		dev_err(&adapter->pdev->dev, "Failed to allocate interrupts for vport %u: %d\n",
1364 			vport->vport_id, err);
1365 		goto queues_rel;
1366 	}
1367 
1368 	err = idpf_vport_queue_ids_init(vport);
1369 	if (err) {
1370 		dev_err(&adapter->pdev->dev, "Failed to initialize queue ids for vport %u: %d\n",
1371 			vport->vport_id, err);
1372 		goto intr_rel;
1373 	}
1374 
1375 	err = idpf_vport_intr_init(vport);
1376 	if (err) {
1377 		dev_err(&adapter->pdev->dev, "Failed to initialize interrupts for vport %u: %d\n",
1378 			vport->vport_id, err);
1379 		goto intr_rel;
1380 	}
1381 
1382 	err = idpf_rx_bufs_init_all(vport);
1383 	if (err) {
1384 		dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n",
1385 			vport->vport_id, err);
1386 		goto intr_rel;
1387 	}
1388 
1389 	err = idpf_queue_reg_init(vport);
1390 	if (err) {
1391 		dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n",
1392 			vport->vport_id, err);
1393 		goto intr_rel;
1394 	}
1395 
1396 	idpf_rx_init_buf_tail(vport);
1397 
1398 	err = idpf_send_config_queues_msg(vport);
1399 	if (err) {
1400 		dev_err(&adapter->pdev->dev, "Failed to configure queues for vport %u, %d\n",
1401 			vport->vport_id, err);
1402 		goto intr_deinit;
1403 	}
1404 
1405 	err = idpf_send_map_unmap_queue_vector_msg(vport, true);
1406 	if (err) {
1407 		dev_err(&adapter->pdev->dev, "Failed to map queue vectors for vport %u: %d\n",
1408 			vport->vport_id, err);
1409 		goto intr_deinit;
1410 	}
1411 
1412 	err = idpf_send_enable_queues_msg(vport);
1413 	if (err) {
1414 		dev_err(&adapter->pdev->dev, "Failed to enable queues for vport %u: %d\n",
1415 			vport->vport_id, err);
1416 		goto unmap_queue_vectors;
1417 	}
1418 
1419 	err = idpf_send_enable_vport_msg(vport);
1420 	if (err) {
1421 		dev_err(&adapter->pdev->dev, "Failed to enable vport %u: %d\n",
1422 			vport->vport_id, err);
1423 		err = -EAGAIN;
1424 		goto disable_queues;
1425 	}
1426 
1427 	idpf_restore_features(vport);
1428 
1429 	vport_config = adapter->vport_config[vport->idx];
1430 	if (vport_config->user_config.rss_data.rss_lut)
1431 		err = idpf_config_rss(vport);
1432 	else
1433 		err = idpf_init_rss(vport);
1434 	if (err) {
1435 		dev_err(&adapter->pdev->dev, "Failed to initialize RSS for vport %u: %d\n",
1436 			vport->vport_id, err);
1437 		goto disable_vport;
1438 	}
1439 
1440 	err = idpf_up_complete(vport);
1441 	if (err) {
1442 		dev_err(&adapter->pdev->dev, "Failed to complete interface up for vport %u: %d\n",
1443 			vport->vport_id, err);
1444 		goto deinit_rss;
1445 	}
1446 
1447 	return 0;
1448 
1449 deinit_rss:
1450 	idpf_deinit_rss(vport);
1451 disable_vport:
1452 	idpf_send_disable_vport_msg(vport);
1453 disable_queues:
1454 	idpf_send_disable_queues_msg(vport);
1455 unmap_queue_vectors:
1456 	idpf_send_map_unmap_queue_vector_msg(vport, false);
1457 intr_deinit:
1458 	idpf_vport_intr_deinit(vport);
1459 intr_rel:
1460 	idpf_vport_intr_rel(vport);
1461 queues_rel:
1462 	idpf_vport_queues_rel(vport);
1463 
1464 	return err;
1465 }
1466 
1467 /**
1468  * idpf_init_task - Delayed initialization task
1469  * @work: work_struct handle to our data
1470  *
1471  * Init task finishes up pending work started in probe. Due to the asynchronous
1472  * nature in which the device communicates with hardware, we may have to wait
1473  * several milliseconds to get a response.  Instead of busy polling in probe,
1474  * pulling it out into a delayed work task prevents us from bogging down the
1475  * whole system waiting for a response from hardware.
1476  */
1477 void idpf_init_task(struct work_struct *work)
1478 {
1479 	struct idpf_vport_config *vport_config;
1480 	struct idpf_vport_max_q max_q;
1481 	struct idpf_adapter *adapter;
1482 	struct idpf_netdev_priv *np;
1483 	struct idpf_vport *vport;
1484 	u16 num_default_vports;
1485 	struct pci_dev *pdev;
1486 	bool default_vport;
1487 	int index, err;
1488 
1489 	adapter = container_of(work, struct idpf_adapter, init_task.work);
1490 
1491 	num_default_vports = idpf_get_default_vports(adapter);
1492 	if (adapter->num_alloc_vports < num_default_vports)
1493 		default_vport = true;
1494 	else
1495 		default_vport = false;
1496 
1497 	err = idpf_vport_alloc_max_qs(adapter, &max_q);
1498 	if (err)
1499 		goto unwind_vports;
1500 
1501 	err = idpf_send_create_vport_msg(adapter, &max_q);
1502 	if (err) {
1503 		idpf_vport_dealloc_max_qs(adapter, &max_q);
1504 		goto unwind_vports;
1505 	}
1506 
1507 	pdev = adapter->pdev;
1508 	vport = idpf_vport_alloc(adapter, &max_q);
1509 	if (!vport) {
1510 		err = -EFAULT;
1511 		dev_err(&pdev->dev, "failed to allocate vport: %d\n",
1512 			err);
1513 		idpf_vport_dealloc_max_qs(adapter, &max_q);
1514 		goto unwind_vports;
1515 	}
1516 
1517 	index = vport->idx;
1518 	vport_config = adapter->vport_config[index];
1519 
1520 	init_waitqueue_head(&vport->sw_marker_wq);
1521 
1522 	spin_lock_init(&vport_config->mac_filter_list_lock);
1523 
1524 	INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list);
1525 
1526 	err = idpf_check_supported_desc_ids(vport);
1527 	if (err) {
1528 		dev_err(&pdev->dev, "failed to get required descriptor ids\n");
1529 		goto cfg_netdev_err;
1530 	}
1531 
1532 	if (idpf_cfg_netdev(vport))
1533 		goto cfg_netdev_err;
1534 
1535 	err = idpf_send_get_rx_ptype_msg(vport);
1536 	if (err)
1537 		goto handle_err;
1538 
1539 	/* Once state is put into DOWN, driver is ready for dev_open */
1540 	np = netdev_priv(vport->netdev);
1541 	np->state = __IDPF_VPORT_DOWN;
1542 	if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED, vport_config->flags))
1543 		idpf_vport_open(vport, true);
1544 
1545 	/* Spawn and return 'idpf_init_task' work queue until all the
1546 	 * default vports are created
1547 	 */
1548 	if (adapter->num_alloc_vports < num_default_vports) {
1549 		queue_delayed_work(adapter->init_wq, &adapter->init_task,
1550 				   msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
1551 
1552 		return;
1553 	}
1554 
1555 	for (index = 0; index < adapter->max_vports; index++) {
1556 		if (adapter->netdevs[index] &&
1557 		    !test_bit(IDPF_VPORT_REG_NETDEV,
1558 			      adapter->vport_config[index]->flags)) {
1559 			register_netdev(adapter->netdevs[index]);
1560 			set_bit(IDPF_VPORT_REG_NETDEV,
1561 				adapter->vport_config[index]->flags);
1562 		}
1563 	}
1564 
1565 	/* As all the required vports are created, clear the reset flag
1566 	 * unconditionally here in case we were in reset and the link was down.
1567 	 */
1568 	clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
1569 	/* Start the statistics task now */
1570 	queue_delayed_work(adapter->stats_wq, &adapter->stats_task,
1571 			   msecs_to_jiffies(10 * (pdev->devfn & 0x07)));
1572 
1573 	return;
1574 
1575 handle_err:
1576 	idpf_decfg_netdev(vport);
1577 cfg_netdev_err:
1578 	idpf_vport_rel(vport);
1579 	adapter->vports[index] = NULL;
1580 unwind_vports:
1581 	if (default_vport) {
1582 		for (index = 0; index < adapter->max_vports; index++) {
1583 			if (adapter->vports[index])
1584 				idpf_vport_dealloc(adapter->vports[index]);
1585 		}
1586 	}
1587 	clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
1588 }
1589 
1590 /**
1591  * idpf_sriov_ena - Enable or change number of VFs
1592  * @adapter: private data struct
1593  * @num_vfs: number of VFs to allocate
1594  */
1595 static int idpf_sriov_ena(struct idpf_adapter *adapter, int num_vfs)
1596 {
1597 	struct device *dev = &adapter->pdev->dev;
1598 	int err;
1599 
1600 	err = idpf_send_set_sriov_vfs_msg(adapter, num_vfs);
1601 	if (err) {
1602 		dev_err(dev, "Failed to allocate VFs: %d\n", err);
1603 
1604 		return err;
1605 	}
1606 
1607 	err = pci_enable_sriov(adapter->pdev, num_vfs);
1608 	if (err) {
1609 		idpf_send_set_sriov_vfs_msg(adapter, 0);
1610 		dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
1611 
1612 		return err;
1613 	}
1614 
1615 	adapter->num_vfs = num_vfs;
1616 
1617 	return num_vfs;
1618 }
1619 
1620 /**
1621  * idpf_sriov_configure - Configure the requested VFs
1622  * @pdev: pointer to a pci_dev structure
1623  * @num_vfs: number of vfs to allocate
1624  *
1625  * Enable or change the number of VFs. Called when the user updates the number
1626  * of VFs in sysfs.
1627  **/
1628 int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs)
1629 {
1630 	struct idpf_adapter *adapter = pci_get_drvdata(pdev);
1631 
1632 	if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_SRIOV)) {
1633 		dev_info(&pdev->dev, "SR-IOV is not supported on this device\n");
1634 
1635 		return -EOPNOTSUPP;
1636 	}
1637 
1638 	if (num_vfs)
1639 		return idpf_sriov_ena(adapter, num_vfs);
1640 
1641 	if (pci_vfs_assigned(pdev)) {
1642 		dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs\n");
1643 
1644 		return -EBUSY;
1645 	}
1646 
1647 	pci_disable_sriov(adapter->pdev);
1648 	idpf_send_set_sriov_vfs_msg(adapter, 0);
1649 	adapter->num_vfs = 0;
1650 
1651 	return 0;
1652 }
1653 
1654 /**
1655  * idpf_deinit_task - Device deinit routine
1656  * @adapter: Driver specific private structure
1657  *
1658  * Extended remove logic which will be used for
1659  * hard reset as well
1660  */
1661 void idpf_deinit_task(struct idpf_adapter *adapter)
1662 {
1663 	unsigned int i;
1664 
1665 	/* Wait until the init_task is done else this thread might release
1666 	 * the resources first and the other thread might end up in a bad state
1667 	 */
1668 	cancel_delayed_work_sync(&adapter->init_task);
1669 
1670 	if (!adapter->vports)
1671 		return;
1672 
1673 	cancel_delayed_work_sync(&adapter->stats_task);
1674 
1675 	for (i = 0; i < adapter->max_vports; i++) {
1676 		if (adapter->vports[i])
1677 			idpf_vport_dealloc(adapter->vports[i]);
1678 	}
1679 }
1680 
1681 /**
1682  * idpf_check_reset_complete - check that reset is complete
1683  * @hw: pointer to hw struct
1684  * @reset_reg: struct with reset registers
1685  *
1686  * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
1687  **/
1688 static int idpf_check_reset_complete(struct idpf_hw *hw,
1689 				     struct idpf_reset_reg *reset_reg)
1690 {
1691 	struct idpf_adapter *adapter = hw->back;
1692 	int i;
1693 
1694 	for (i = 0; i < 2000; i++) {
1695 		u32 reg_val = readl(reset_reg->rstat);
1696 
1697 		/* 0xFFFFFFFF might be read if other side hasn't cleared the
1698 		 * register for us yet and 0xFFFFFFFF is not a valid value for
1699 		 * the register, so treat that as invalid.
1700 		 */
1701 		if (reg_val != 0xFFFFFFFF && (reg_val & reset_reg->rstat_m))
1702 			return 0;
1703 
1704 		usleep_range(5000, 10000);
1705 	}
1706 
1707 	dev_warn(&adapter->pdev->dev, "Device reset timeout!\n");
1708 	/* Clear the reset flag unconditionally here since the reset
1709 	 * technically isn't in progress anymore from the driver's perspective
1710 	 */
1711 	clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
1712 
1713 	return -EBUSY;
1714 }
1715 
1716 /**
1717  * idpf_set_vport_state - Set the vport state to be after the reset
1718  * @adapter: Driver specific private structure
1719  */
1720 static void idpf_set_vport_state(struct idpf_adapter *adapter)
1721 {
1722 	u16 i;
1723 
1724 	for (i = 0; i < adapter->max_vports; i++) {
1725 		struct idpf_netdev_priv *np;
1726 
1727 		if (!adapter->netdevs[i])
1728 			continue;
1729 
1730 		np = netdev_priv(adapter->netdevs[i]);
1731 		if (np->state == __IDPF_VPORT_UP)
1732 			set_bit(IDPF_VPORT_UP_REQUESTED,
1733 				adapter->vport_config[i]->flags);
1734 	}
1735 }
1736 
1737 /**
1738  * idpf_init_hard_reset - Initiate a hardware reset
1739  * @adapter: Driver specific private structure
1740  *
1741  * Deallocate the vports and all the resources associated with them and
1742  * reallocate. Also reinitialize the mailbox. Return 0 on success,
1743  * negative on failure.
1744  */
1745 static int idpf_init_hard_reset(struct idpf_adapter *adapter)
1746 {
1747 	struct idpf_reg_ops *reg_ops = &adapter->dev_ops.reg_ops;
1748 	struct device *dev = &adapter->pdev->dev;
1749 	struct net_device *netdev;
1750 	int err;
1751 	u16 i;
1752 
1753 	mutex_lock(&adapter->vport_ctrl_lock);
1754 
1755 	dev_info(dev, "Device HW Reset initiated\n");
1756 
1757 	/* Avoid TX hangs on reset */
1758 	for (i = 0; i < adapter->max_vports; i++) {
1759 		netdev = adapter->netdevs[i];
1760 		if (!netdev)
1761 			continue;
1762 
1763 		netif_carrier_off(netdev);
1764 		netif_tx_disable(netdev);
1765 	}
1766 
1767 	/* Prepare for reset */
1768 	if (test_and_clear_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
1769 		reg_ops->trigger_reset(adapter, IDPF_HR_DRV_LOAD);
1770 	} else if (test_and_clear_bit(IDPF_HR_FUNC_RESET, adapter->flags)) {
1771 		bool is_reset = idpf_is_reset_detected(adapter);
1772 
1773 		idpf_set_vport_state(adapter);
1774 		idpf_vc_core_deinit(adapter);
1775 		if (!is_reset)
1776 			reg_ops->trigger_reset(adapter, IDPF_HR_FUNC_RESET);
1777 		idpf_deinit_dflt_mbx(adapter);
1778 	} else {
1779 		dev_err(dev, "Unhandled hard reset cause\n");
1780 		err = -EBADRQC;
1781 		goto unlock_mutex;
1782 	}
1783 
1784 	/* Wait for reset to complete */
1785 	err = idpf_check_reset_complete(&adapter->hw, &adapter->reset_reg);
1786 	if (err) {
1787 		dev_err(dev, "The driver was unable to contact the device's firmware. Check that the FW is running. Driver state= 0x%x\n",
1788 			adapter->state);
1789 		goto unlock_mutex;
1790 	}
1791 
1792 	/* Reset is complete and so start building the driver resources again */
1793 	err = idpf_init_dflt_mbx(adapter);
1794 	if (err) {
1795 		dev_err(dev, "Failed to initialize default mailbox: %d\n", err);
1796 		goto unlock_mutex;
1797 	}
1798 
1799 	queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
1800 
1801 	/* Initialize the state machine, also allocate memory and request
1802 	 * resources
1803 	 */
1804 	err = idpf_vc_core_init(adapter);
1805 	if (err) {
1806 		idpf_deinit_dflt_mbx(adapter);
1807 		goto unlock_mutex;
1808 	}
1809 
1810 	/* Wait till all the vports are initialized to release the reset lock,
1811 	 * else user space callbacks may access uninitialized vports
1812 	 */
1813 	while (test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
1814 		msleep(100);
1815 
1816 unlock_mutex:
1817 	mutex_unlock(&adapter->vport_ctrl_lock);
1818 
1819 	return err;
1820 }
1821 
1822 /**
1823  * idpf_vc_event_task - Handle virtchannel event logic
1824  * @work: work queue struct
1825  */
1826 void idpf_vc_event_task(struct work_struct *work)
1827 {
1828 	struct idpf_adapter *adapter;
1829 
1830 	adapter = container_of(work, struct idpf_adapter, vc_event_task.work);
1831 
1832 	if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
1833 		return;
1834 
1835 	if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags) ||
1836 	    test_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
1837 		set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
1838 		idpf_init_hard_reset(adapter);
1839 	}
1840 }
1841 
1842 /**
1843  * idpf_initiate_soft_reset - Initiate a software reset
1844  * @vport: virtual port data struct
1845  * @reset_cause: reason for the soft reset
1846  *
1847  * Soft reset only reallocs vport queue resources. Returns 0 on success,
1848  * negative on failure.
1849  */
1850 int idpf_initiate_soft_reset(struct idpf_vport *vport,
1851 			     enum idpf_vport_reset_cause reset_cause)
1852 {
1853 	struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
1854 	enum idpf_vport_state current_state = np->state;
1855 	struct idpf_adapter *adapter = vport->adapter;
1856 	struct idpf_vport *new_vport;
1857 	int err, i;
1858 
1859 	/* If the system is low on memory, we can end up in bad state if we
1860 	 * free all the memory for queue resources and try to allocate them
1861 	 * again. Instead, we can pre-allocate the new resources before doing
1862 	 * anything and bailing if the alloc fails.
1863 	 *
1864 	 * Make a clone of the existing vport to mimic its current
1865 	 * configuration, then modify the new structure with any requested
1866 	 * changes. Once the allocation of the new resources is done, stop the
1867 	 * existing vport and copy the configuration to the main vport. If an
1868 	 * error occurred, the existing vport will be untouched.
1869 	 *
1870 	 */
1871 	new_vport = kzalloc(sizeof(*vport), GFP_KERNEL);
1872 	if (!new_vport)
1873 		return -ENOMEM;
1874 
1875 	/* This purposely avoids copying the end of the struct because it
1876 	 * contains wait_queues and mutexes and other stuff we don't want to
1877 	 * mess with. Nothing below should use those variables from new_vport
1878 	 * and should instead always refer to them in vport if they need to.
1879 	 */
1880 	memcpy(new_vport, vport, offsetof(struct idpf_vport, link_speed_mbps));
1881 
1882 	/* Adjust resource parameters prior to reallocating resources */
1883 	switch (reset_cause) {
1884 	case IDPF_SR_Q_CHANGE:
1885 		err = idpf_vport_adjust_qs(new_vport);
1886 		if (err)
1887 			goto free_vport;
1888 		break;
1889 	case IDPF_SR_Q_DESC_CHANGE:
1890 		/* Update queue parameters before allocating resources */
1891 		idpf_vport_calc_num_q_desc(new_vport);
1892 		break;
1893 	case IDPF_SR_MTU_CHANGE:
1894 	case IDPF_SR_RSC_CHANGE:
1895 		break;
1896 	default:
1897 		dev_err(&adapter->pdev->dev, "Unhandled soft reset cause\n");
1898 		err = -EINVAL;
1899 		goto free_vport;
1900 	}
1901 
1902 	err = idpf_vport_queues_alloc(new_vport);
1903 	if (err)
1904 		goto free_vport;
1905 	if (current_state <= __IDPF_VPORT_DOWN) {
1906 		idpf_send_delete_queues_msg(vport);
1907 	} else {
1908 		set_bit(IDPF_VPORT_DEL_QUEUES, vport->flags);
1909 		idpf_vport_stop(vport);
1910 	}
1911 
1912 	idpf_deinit_rss(vport);
1913 	/* We're passing in vport here because we need its wait_queue
1914 	 * to send a message and it should be getting all the vport
1915 	 * config data out of the adapter but we need to be careful not
1916 	 * to add code to add_queues to change the vport config within
1917 	 * vport itself as it will be wiped with a memcpy later.
1918 	 */
1919 	err = idpf_send_add_queues_msg(vport, new_vport->num_txq,
1920 				       new_vport->num_complq,
1921 				       new_vport->num_rxq,
1922 				       new_vport->num_bufq);
1923 	if (err)
1924 		goto err_reset;
1925 
1926 	/* Same comment as above regarding avoiding copying the wait_queues and
1927 	 * mutexes applies here. We do not want to mess with those if possible.
1928 	 */
1929 	memcpy(vport, new_vport, offsetof(struct idpf_vport, link_speed_mbps));
1930 
1931 	/* Since idpf_vport_queues_alloc was called with new_port, the queue
1932 	 * back pointers are currently pointing to the local new_vport. Reset
1933 	 * the backpointers to the original vport here
1934 	 */
1935 	for (i = 0; i < vport->num_txq_grp; i++) {
1936 		struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1937 		int j;
1938 
1939 		tx_qgrp->vport = vport;
1940 		for (j = 0; j < tx_qgrp->num_txq; j++)
1941 			tx_qgrp->txqs[j]->vport = vport;
1942 
1943 		if (idpf_is_queue_model_split(vport->txq_model))
1944 			tx_qgrp->complq->vport = vport;
1945 	}
1946 
1947 	for (i = 0; i < vport->num_rxq_grp; i++) {
1948 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1949 		struct idpf_queue *q;
1950 		u16 num_rxq;
1951 		int j;
1952 
1953 		rx_qgrp->vport = vport;
1954 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++)
1955 			rx_qgrp->splitq.bufq_sets[j].bufq.vport = vport;
1956 
1957 		if (idpf_is_queue_model_split(vport->rxq_model))
1958 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
1959 		else
1960 			num_rxq = rx_qgrp->singleq.num_rxq;
1961 
1962 		for (j = 0; j < num_rxq; j++) {
1963 			if (idpf_is_queue_model_split(vport->rxq_model))
1964 				q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
1965 			else
1966 				q = rx_qgrp->singleq.rxqs[j];
1967 			q->vport = vport;
1968 		}
1969 	}
1970 
1971 	if (reset_cause == IDPF_SR_Q_CHANGE)
1972 		idpf_vport_alloc_vec_indexes(vport);
1973 
1974 	err = idpf_set_real_num_queues(vport);
1975 	if (err)
1976 		goto err_reset;
1977 
1978 	if (current_state == __IDPF_VPORT_UP)
1979 		err = idpf_vport_open(vport, false);
1980 
1981 	kfree(new_vport);
1982 
1983 	return err;
1984 
1985 err_reset:
1986 	idpf_vport_queues_rel(new_vport);
1987 free_vport:
1988 	kfree(new_vport);
1989 
1990 	return err;
1991 }
1992 
1993 /**
1994  * idpf_addr_sync - Callback for dev_(mc|uc)_sync to add address
1995  * @netdev: the netdevice
1996  * @addr: address to add
1997  *
1998  * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1999  * __dev_(uc|mc)_sync from .set_rx_mode. Kernel takes addr_list_lock spinlock
2000  * meaning we cannot sleep in this context. Due to this, we have to add the
2001  * filter and send the virtchnl message asynchronously without waiting for the
2002  * response from the other side. We won't know whether or not the operation
2003  * actually succeeded until we get the message back.  Returns 0 on success,
2004  * negative on failure.
2005  */
2006 static int idpf_addr_sync(struct net_device *netdev, const u8 *addr)
2007 {
2008 	struct idpf_netdev_priv *np = netdev_priv(netdev);
2009 
2010 	return idpf_add_mac_filter(np->vport, np, addr, true);
2011 }
2012 
2013 /**
2014  * idpf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
2015  * @netdev: the netdevice
2016  * @addr: address to add
2017  *
2018  * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
2019  * __dev_(uc|mc)_sync from .set_rx_mode. Kernel takes addr_list_lock spinlock
2020  * meaning we cannot sleep in this context. Due to this we have to delete the
2021  * filter and send the virtchnl message asynchronously without waiting for the
2022  * return from the other side.  We won't know whether or not the operation
2023  * actually succeeded until we get the message back. Returns 0 on success,
2024  * negative on failure.
2025  */
2026 static int idpf_addr_unsync(struct net_device *netdev, const u8 *addr)
2027 {
2028 	struct idpf_netdev_priv *np = netdev_priv(netdev);
2029 
2030 	/* Under some circumstances, we might receive a request to delete
2031 	 * our own device address from our uc list. Because we store the
2032 	 * device address in the VSI's MAC filter list, we need to ignore
2033 	 * such requests and not delete our device address from this list.
2034 	 */
2035 	if (ether_addr_equal(addr, netdev->dev_addr))
2036 		return 0;
2037 
2038 	idpf_del_mac_filter(np->vport, np, addr, true);
2039 
2040 	return 0;
2041 }
2042 
2043 /**
2044  * idpf_set_rx_mode - NDO callback to set the netdev filters
2045  * @netdev: network interface device structure
2046  *
2047  * Stack takes addr_list_lock spinlock before calling our .set_rx_mode.  We
2048  * cannot sleep in this context.
2049  */
2050 static void idpf_set_rx_mode(struct net_device *netdev)
2051 {
2052 	struct idpf_netdev_priv *np = netdev_priv(netdev);
2053 	struct idpf_vport_user_config_data *config_data;
2054 	struct idpf_adapter *adapter;
2055 	bool changed = false;
2056 	struct device *dev;
2057 	int err;
2058 
2059 	adapter = np->adapter;
2060 	dev = &adapter->pdev->dev;
2061 
2062 	if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER)) {
2063 		__dev_uc_sync(netdev, idpf_addr_sync, idpf_addr_unsync);
2064 		__dev_mc_sync(netdev, idpf_addr_sync, idpf_addr_unsync);
2065 	}
2066 
2067 	if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PROMISC))
2068 		return;
2069 
2070 	config_data = &adapter->vport_config[np->vport_idx]->user_config;
2071 	/* IFF_PROMISC enables both unicast and multicast promiscuous,
2072 	 * while IFF_ALLMULTI only enables multicast such that:
2073 	 *
2074 	 * promisc  + allmulti		= unicast | multicast
2075 	 * promisc  + !allmulti		= unicast | multicast
2076 	 * !promisc + allmulti		= multicast
2077 	 */
2078 	if ((netdev->flags & IFF_PROMISC) &&
2079 	    !test_and_set_bit(__IDPF_PROMISC_UC, config_data->user_flags)) {
2080 		changed = true;
2081 		dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
2082 		if (!test_and_set_bit(__IDPF_PROMISC_MC, adapter->flags))
2083 			dev_info(dev, "Entering multicast promiscuous mode\n");
2084 	}
2085 
2086 	if (!(netdev->flags & IFF_PROMISC) &&
2087 	    test_and_clear_bit(__IDPF_PROMISC_UC, config_data->user_flags)) {
2088 		changed = true;
2089 		dev_info(dev, "Leaving promiscuous mode\n");
2090 	}
2091 
2092 	if (netdev->flags & IFF_ALLMULTI &&
2093 	    !test_and_set_bit(__IDPF_PROMISC_MC, config_data->user_flags)) {
2094 		changed = true;
2095 		dev_info(dev, "Entering multicast promiscuous mode\n");
2096 	}
2097 
2098 	if (!(netdev->flags & (IFF_ALLMULTI | IFF_PROMISC)) &&
2099 	    test_and_clear_bit(__IDPF_PROMISC_MC, config_data->user_flags)) {
2100 		changed = true;
2101 		dev_info(dev, "Leaving multicast promiscuous mode\n");
2102 	}
2103 
2104 	if (!changed)
2105 		return;
2106 
2107 	err = idpf_set_promiscuous(adapter, config_data, np->vport_id);
2108 	if (err)
2109 		dev_err(dev, "Failed to set promiscuous mode: %d\n", err);
2110 }
2111 
2112 /**
2113  * idpf_vport_manage_rss_lut - disable/enable RSS
2114  * @vport: the vport being changed
2115  *
2116  * In the event of disable request for RSS, this function will zero out RSS
2117  * LUT, while in the event of enable request for RSS, it will reconfigure RSS
2118  * LUT with the default LUT configuration.
2119  */
2120 static int idpf_vport_manage_rss_lut(struct idpf_vport *vport)
2121 {
2122 	bool ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH);
2123 	struct idpf_rss_data *rss_data;
2124 	u16 idx = vport->idx;
2125 	int lut_size;
2126 
2127 	rss_data = &vport->adapter->vport_config[idx]->user_config.rss_data;
2128 	lut_size = rss_data->rss_lut_size * sizeof(u32);
2129 
2130 	if (ena) {
2131 		/* This will contain the default or user configured LUT */
2132 		memcpy(rss_data->rss_lut, rss_data->cached_lut, lut_size);
2133 	} else {
2134 		/* Save a copy of the current LUT to be restored later if
2135 		 * requested.
2136 		 */
2137 		memcpy(rss_data->cached_lut, rss_data->rss_lut, lut_size);
2138 
2139 		/* Zero out the current LUT to disable */
2140 		memset(rss_data->rss_lut, 0, lut_size);
2141 	}
2142 
2143 	return idpf_config_rss(vport);
2144 }
2145 
2146 /**
2147  * idpf_set_features - set the netdev feature flags
2148  * @netdev: ptr to the netdev being adjusted
2149  * @features: the feature set that the stack is suggesting
2150  */
2151 static int idpf_set_features(struct net_device *netdev,
2152 			     netdev_features_t features)
2153 {
2154 	netdev_features_t changed = netdev->features ^ features;
2155 	struct idpf_adapter *adapter;
2156 	struct idpf_vport *vport;
2157 	int err = 0;
2158 
2159 	idpf_vport_ctrl_lock(netdev);
2160 	vport = idpf_netdev_to_vport(netdev);
2161 
2162 	adapter = vport->adapter;
2163 
2164 	if (idpf_is_reset_in_prog(adapter)) {
2165 		dev_err(&adapter->pdev->dev, "Device is resetting, changing netdev features temporarily unavailable.\n");
2166 		err = -EBUSY;
2167 		goto unlock_mutex;
2168 	}
2169 
2170 	if (changed & NETIF_F_RXHASH) {
2171 		netdev->features ^= NETIF_F_RXHASH;
2172 		err = idpf_vport_manage_rss_lut(vport);
2173 		if (err)
2174 			goto unlock_mutex;
2175 	}
2176 
2177 	if (changed & NETIF_F_GRO_HW) {
2178 		netdev->features ^= NETIF_F_GRO_HW;
2179 		err = idpf_initiate_soft_reset(vport, IDPF_SR_RSC_CHANGE);
2180 		if (err)
2181 			goto unlock_mutex;
2182 	}
2183 
2184 	if (changed & NETIF_F_LOOPBACK) {
2185 		netdev->features ^= NETIF_F_LOOPBACK;
2186 		err = idpf_send_ena_dis_loopback_msg(vport);
2187 	}
2188 
2189 unlock_mutex:
2190 	idpf_vport_ctrl_unlock(netdev);
2191 
2192 	return err;
2193 }
2194 
2195 /**
2196  * idpf_open - Called when a network interface becomes active
2197  * @netdev: network interface device structure
2198  *
2199  * The open entry point is called when a network interface is made
2200  * active by the system (IFF_UP).  At this point all resources needed
2201  * for transmit and receive operations are allocated, the interrupt
2202  * handler is registered with the OS, the netdev watchdog is enabled,
2203  * and the stack is notified that the interface is ready.
2204  *
2205  * Returns 0 on success, negative value on failure
2206  */
2207 static int idpf_open(struct net_device *netdev)
2208 {
2209 	struct idpf_vport *vport;
2210 	int err;
2211 
2212 	idpf_vport_ctrl_lock(netdev);
2213 	vport = idpf_netdev_to_vport(netdev);
2214 
2215 	err = idpf_vport_open(vport, true);
2216 
2217 	idpf_vport_ctrl_unlock(netdev);
2218 
2219 	return err;
2220 }
2221 
2222 /**
2223  * idpf_change_mtu - NDO callback to change the MTU
2224  * @netdev: network interface device structure
2225  * @new_mtu: new value for maximum frame size
2226  *
2227  * Returns 0 on success, negative on failure
2228  */
2229 static int idpf_change_mtu(struct net_device *netdev, int new_mtu)
2230 {
2231 	struct idpf_vport *vport;
2232 	int err;
2233 
2234 	idpf_vport_ctrl_lock(netdev);
2235 	vport = idpf_netdev_to_vport(netdev);
2236 
2237 	WRITE_ONCE(netdev->mtu, new_mtu);
2238 
2239 	err = idpf_initiate_soft_reset(vport, IDPF_SR_MTU_CHANGE);
2240 
2241 	idpf_vport_ctrl_unlock(netdev);
2242 
2243 	return err;
2244 }
2245 
2246 /**
2247  * idpf_features_check - Validate packet conforms to limits
2248  * @skb: skb buffer
2249  * @netdev: This port's netdev
2250  * @features: Offload features that the stack believes apply
2251  */
2252 static netdev_features_t idpf_features_check(struct sk_buff *skb,
2253 					     struct net_device *netdev,
2254 					     netdev_features_t features)
2255 {
2256 	struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
2257 	struct idpf_adapter *adapter = vport->adapter;
2258 	size_t len;
2259 
2260 	/* No point in doing any of this if neither checksum nor GSO are
2261 	 * being requested for this frame.  We can rule out both by just
2262 	 * checking for CHECKSUM_PARTIAL
2263 	 */
2264 	if (skb->ip_summed != CHECKSUM_PARTIAL)
2265 		return features;
2266 
2267 	/* We cannot support GSO if the MSS is going to be less than
2268 	 * 88 bytes. If it is then we need to drop support for GSO.
2269 	 */
2270 	if (skb_is_gso(skb) &&
2271 	    (skb_shinfo(skb)->gso_size < IDPF_TX_TSO_MIN_MSS))
2272 		features &= ~NETIF_F_GSO_MASK;
2273 
2274 	/* Ensure MACLEN is <= 126 bytes (63 words) and not an odd size */
2275 	len = skb_network_offset(skb);
2276 	if (unlikely(len & ~(126)))
2277 		goto unsupported;
2278 
2279 	len = skb_network_header_len(skb);
2280 	if (unlikely(len > idpf_get_max_tx_hdr_size(adapter)))
2281 		goto unsupported;
2282 
2283 	if (!skb->encapsulation)
2284 		return features;
2285 
2286 	/* L4TUNLEN can support 127 words */
2287 	len = skb_inner_network_header(skb) - skb_transport_header(skb);
2288 	if (unlikely(len & ~(127 * 2)))
2289 		goto unsupported;
2290 
2291 	/* IPLEN can support at most 127 dwords */
2292 	len = skb_inner_network_header_len(skb);
2293 	if (unlikely(len > idpf_get_max_tx_hdr_size(adapter)))
2294 		goto unsupported;
2295 
2296 	/* No need to validate L4LEN as TCP is the only protocol with a
2297 	 * a flexible value and we support all possible values supported
2298 	 * by TCP, which is at most 15 dwords
2299 	 */
2300 
2301 	return features;
2302 
2303 unsupported:
2304 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2305 }
2306 
2307 /**
2308  * idpf_set_mac - NDO callback to set port mac address
2309  * @netdev: network interface device structure
2310  * @p: pointer to an address structure
2311  *
2312  * Returns 0 on success, negative on failure
2313  **/
2314 static int idpf_set_mac(struct net_device *netdev, void *p)
2315 {
2316 	struct idpf_netdev_priv *np = netdev_priv(netdev);
2317 	struct idpf_vport_config *vport_config;
2318 	struct sockaddr *addr = p;
2319 	struct idpf_vport *vport;
2320 	int err = 0;
2321 
2322 	idpf_vport_ctrl_lock(netdev);
2323 	vport = idpf_netdev_to_vport(netdev);
2324 
2325 	if (!idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
2326 			     VIRTCHNL2_CAP_MACFILTER)) {
2327 		dev_info(&vport->adapter->pdev->dev, "Setting MAC address is not supported\n");
2328 		err = -EOPNOTSUPP;
2329 		goto unlock_mutex;
2330 	}
2331 
2332 	if (!is_valid_ether_addr(addr->sa_data)) {
2333 		dev_info(&vport->adapter->pdev->dev, "Invalid MAC address: %pM\n",
2334 			 addr->sa_data);
2335 		err = -EADDRNOTAVAIL;
2336 		goto unlock_mutex;
2337 	}
2338 
2339 	if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
2340 		goto unlock_mutex;
2341 
2342 	vport_config = vport->adapter->vport_config[vport->idx];
2343 	err = idpf_add_mac_filter(vport, np, addr->sa_data, false);
2344 	if (err) {
2345 		__idpf_del_mac_filter(vport_config, addr->sa_data);
2346 		goto unlock_mutex;
2347 	}
2348 
2349 	if (is_valid_ether_addr(vport->default_mac_addr))
2350 		idpf_del_mac_filter(vport, np, vport->default_mac_addr, false);
2351 
2352 	ether_addr_copy(vport->default_mac_addr, addr->sa_data);
2353 	eth_hw_addr_set(netdev, addr->sa_data);
2354 
2355 unlock_mutex:
2356 	idpf_vport_ctrl_unlock(netdev);
2357 
2358 	return err;
2359 }
2360 
2361 /**
2362  * idpf_alloc_dma_mem - Allocate dma memory
2363  * @hw: pointer to hw struct
2364  * @mem: pointer to dma_mem struct
2365  * @size: size of the memory to allocate
2366  */
2367 void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size)
2368 {
2369 	struct idpf_adapter *adapter = hw->back;
2370 	size_t sz = ALIGN(size, 4096);
2371 
2372 	mem->va = dma_alloc_coherent(&adapter->pdev->dev, sz,
2373 				     &mem->pa, GFP_KERNEL);
2374 	mem->size = sz;
2375 
2376 	return mem->va;
2377 }
2378 
2379 /**
2380  * idpf_free_dma_mem - Free the allocated dma memory
2381  * @hw: pointer to hw struct
2382  * @mem: pointer to dma_mem struct
2383  */
2384 void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem)
2385 {
2386 	struct idpf_adapter *adapter = hw->back;
2387 
2388 	dma_free_coherent(&adapter->pdev->dev, mem->size,
2389 			  mem->va, mem->pa);
2390 	mem->size = 0;
2391 	mem->va = NULL;
2392 	mem->pa = 0;
2393 }
2394 
2395 static const struct net_device_ops idpf_netdev_ops_splitq = {
2396 	.ndo_open = idpf_open,
2397 	.ndo_stop = idpf_stop,
2398 	.ndo_start_xmit = idpf_tx_splitq_start,
2399 	.ndo_features_check = idpf_features_check,
2400 	.ndo_set_rx_mode = idpf_set_rx_mode,
2401 	.ndo_validate_addr = eth_validate_addr,
2402 	.ndo_set_mac_address = idpf_set_mac,
2403 	.ndo_change_mtu = idpf_change_mtu,
2404 	.ndo_get_stats64 = idpf_get_stats64,
2405 	.ndo_set_features = idpf_set_features,
2406 	.ndo_tx_timeout = idpf_tx_timeout,
2407 };
2408 
2409 static const struct net_device_ops idpf_netdev_ops_singleq = {
2410 	.ndo_open = idpf_open,
2411 	.ndo_stop = idpf_stop,
2412 	.ndo_start_xmit = idpf_tx_singleq_start,
2413 	.ndo_features_check = idpf_features_check,
2414 	.ndo_set_rx_mode = idpf_set_rx_mode,
2415 	.ndo_validate_addr = eth_validate_addr,
2416 	.ndo_set_mac_address = idpf_set_mac,
2417 	.ndo_change_mtu = idpf_change_mtu,
2418 	.ndo_get_stats64 = idpf_get_stats64,
2419 	.ndo_set_features = idpf_set_features,
2420 	.ndo_tx_timeout = idpf_tx_timeout,
2421 };
2422