xref: /linux/drivers/net/ethernet/intel/idpf/idpf_lib.c (revision ccde82e909467abdf098a8ee6f63e1ecf9a47ce5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #include "idpf.h"
5 #include "idpf_virtchnl.h"
6 #include "idpf_ptp.h"
7 #include "xdp.h"
8 
9 static const struct net_device_ops idpf_netdev_ops;
10 
11 /**
12  * idpf_init_vector_stack - Fill the MSIX vector stack with vector index
13  * @adapter: private data struct
14  *
15  * Return 0 on success, error on failure
16  */
17 static int idpf_init_vector_stack(struct idpf_adapter *adapter)
18 {
19 	struct idpf_vector_lifo *stack;
20 	u16 min_vec;
21 	u32 i;
22 
23 	mutex_lock(&adapter->vector_lock);
24 	min_vec = adapter->num_msix_entries - adapter->num_avail_msix;
25 	stack = &adapter->vector_stack;
26 	stack->size = adapter->num_msix_entries;
27 	/* set the base and top to point at start of the 'free pool' to
28 	 * distribute the unused vectors on-demand basis
29 	 */
30 	stack->base = min_vec;
31 	stack->top = min_vec;
32 
33 	stack->vec_idx = kcalloc(stack->size, sizeof(u16), GFP_KERNEL);
34 	if (!stack->vec_idx) {
35 		mutex_unlock(&adapter->vector_lock);
36 
37 		return -ENOMEM;
38 	}
39 
40 	for (i = 0; i < stack->size; i++)
41 		stack->vec_idx[i] = i;
42 
43 	mutex_unlock(&adapter->vector_lock);
44 
45 	return 0;
46 }
47 
48 /**
49  * idpf_deinit_vector_stack - zero out the MSIX vector stack
50  * @adapter: private data struct
51  */
52 static void idpf_deinit_vector_stack(struct idpf_adapter *adapter)
53 {
54 	struct idpf_vector_lifo *stack;
55 
56 	mutex_lock(&adapter->vector_lock);
57 	stack = &adapter->vector_stack;
58 	kfree(stack->vec_idx);
59 	stack->vec_idx = NULL;
60 	mutex_unlock(&adapter->vector_lock);
61 }
62 
63 /**
64  * idpf_mb_intr_rel_irq - Free the IRQ association with the OS
65  * @adapter: adapter structure
66  *
67  * This will also disable interrupt mode and queue up mailbox task. Mailbox
68  * task will reschedule itself if not in interrupt mode.
69  */
70 static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter)
71 {
72 	clear_bit(IDPF_MB_INTR_MODE, adapter->flags);
73 	kfree(free_irq(adapter->msix_entries[0].vector, adapter));
74 	queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
75 }
76 
77 /**
78  * idpf_intr_rel - Release interrupt capabilities and free memory
79  * @adapter: adapter to disable interrupts on
80  */
81 void idpf_intr_rel(struct idpf_adapter *adapter)
82 {
83 	if (!adapter->msix_entries)
84 		return;
85 
86 	idpf_mb_intr_rel_irq(adapter);
87 	pci_free_irq_vectors(adapter->pdev);
88 	idpf_send_dealloc_vectors_msg(adapter);
89 	idpf_deinit_vector_stack(adapter);
90 	kfree(adapter->msix_entries);
91 	adapter->msix_entries = NULL;
92 	kfree(adapter->rdma_msix_entries);
93 	adapter->rdma_msix_entries = NULL;
94 }
95 
96 /**
97  * idpf_mb_intr_clean - Interrupt handler for the mailbox
98  * @irq: interrupt number
99  * @data: pointer to the adapter structure
100  */
101 static irqreturn_t idpf_mb_intr_clean(int __always_unused irq, void *data)
102 {
103 	struct idpf_adapter *adapter = (struct idpf_adapter *)data;
104 
105 	queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
106 
107 	return IRQ_HANDLED;
108 }
109 
110 /**
111  * idpf_mb_irq_enable - Enable MSIX interrupt for the mailbox
112  * @adapter: adapter to get the hardware address for register write
113  */
114 static void idpf_mb_irq_enable(struct idpf_adapter *adapter)
115 {
116 	struct idpf_intr_reg *intr = &adapter->mb_vector.intr_reg;
117 	u32 val;
118 
119 	val = intr->dyn_ctl_intena_m | intr->dyn_ctl_itridx_m;
120 	writel(val, intr->dyn_ctl);
121 	writel(intr->icr_ena_ctlq_m, intr->icr_ena);
122 }
123 
124 /**
125  * idpf_mb_intr_req_irq - Request irq for the mailbox interrupt
126  * @adapter: adapter structure to pass to the mailbox irq handler
127  */
128 static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter)
129 {
130 	int irq_num, mb_vidx = 0, err;
131 	char *name;
132 
133 	irq_num = adapter->msix_entries[mb_vidx].vector;
134 	name = kasprintf(GFP_KERNEL, "%s-%s-%d",
135 			 dev_driver_string(&adapter->pdev->dev),
136 			 "Mailbox", mb_vidx);
137 	err = request_irq(irq_num, adapter->irq_mb_handler, 0, name, adapter);
138 	if (err) {
139 		dev_err(&adapter->pdev->dev,
140 			"IRQ request for mailbox failed, error: %d\n", err);
141 
142 		return err;
143 	}
144 
145 	set_bit(IDPF_MB_INTR_MODE, adapter->flags);
146 
147 	return 0;
148 }
149 
150 /**
151  * idpf_mb_intr_init - Initialize the mailbox interrupt
152  * @adapter: adapter structure to store the mailbox vector
153  */
154 static int idpf_mb_intr_init(struct idpf_adapter *adapter)
155 {
156 	adapter->dev_ops.reg_ops.mb_intr_reg_init(adapter);
157 	adapter->irq_mb_handler = idpf_mb_intr_clean;
158 
159 	return idpf_mb_intr_req_irq(adapter);
160 }
161 
162 /**
163  * idpf_vector_lifo_push - push MSIX vector index onto stack
164  * @adapter: private data struct
165  * @vec_idx: vector index to store
166  */
167 static int idpf_vector_lifo_push(struct idpf_adapter *adapter, u16 vec_idx)
168 {
169 	struct idpf_vector_lifo *stack = &adapter->vector_stack;
170 
171 	lockdep_assert_held(&adapter->vector_lock);
172 
173 	if (stack->top == stack->base) {
174 		dev_err(&adapter->pdev->dev, "Exceeded the vector stack limit: %d\n",
175 			stack->top);
176 		return -EINVAL;
177 	}
178 
179 	stack->vec_idx[--stack->top] = vec_idx;
180 
181 	return 0;
182 }
183 
184 /**
185  * idpf_vector_lifo_pop - pop MSIX vector index from stack
186  * @adapter: private data struct
187  */
188 static int idpf_vector_lifo_pop(struct idpf_adapter *adapter)
189 {
190 	struct idpf_vector_lifo *stack = &adapter->vector_stack;
191 
192 	lockdep_assert_held(&adapter->vector_lock);
193 
194 	if (stack->top == stack->size) {
195 		dev_err(&adapter->pdev->dev, "No interrupt vectors are available to distribute!\n");
196 
197 		return -EINVAL;
198 	}
199 
200 	return stack->vec_idx[stack->top++];
201 }
202 
203 /**
204  * idpf_vector_stash - Store the vector indexes onto the stack
205  * @adapter: private data struct
206  * @q_vector_idxs: vector index array
207  * @vec_info: info related to the number of vectors
208  *
209  * This function is a no-op if there are no vectors indexes to be stashed
210  */
211 static void idpf_vector_stash(struct idpf_adapter *adapter, u16 *q_vector_idxs,
212 			      struct idpf_vector_info *vec_info)
213 {
214 	int i, base = 0;
215 	u16 vec_idx;
216 
217 	lockdep_assert_held(&adapter->vector_lock);
218 
219 	if (!vec_info->num_curr_vecs)
220 		return;
221 
222 	/* For default vports, no need to stash vector allocated from the
223 	 * default pool onto the stack
224 	 */
225 	if (vec_info->default_vport)
226 		base = IDPF_MIN_Q_VEC;
227 
228 	for (i = vec_info->num_curr_vecs - 1; i >= base ; i--) {
229 		vec_idx = q_vector_idxs[i];
230 		idpf_vector_lifo_push(adapter, vec_idx);
231 		adapter->num_avail_msix++;
232 	}
233 }
234 
235 /**
236  * idpf_req_rel_vector_indexes - Request or release MSIX vector indexes
237  * @adapter: driver specific private structure
238  * @q_vector_idxs: vector index array
239  * @vec_info: info related to the number of vectors
240  *
241  * This is the core function to distribute the MSIX vectors acquired from the
242  * OS. It expects the caller to pass the number of vectors required and
243  * also previously allocated. First, it stashes previously allocated vector
244  * indexes on to the stack and then figures out if it can allocate requested
245  * vectors. It can wait on acquiring the mutex lock. If the caller passes 0 as
246  * requested vectors, then this function just stashes the already allocated
247  * vectors and returns 0.
248  *
249  * Returns actual number of vectors allocated on success, error value on failure
250  * If 0 is returned, implies the stack has no vectors to allocate which is also
251  * a failure case for the caller
252  */
253 int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter,
254 				u16 *q_vector_idxs,
255 				struct idpf_vector_info *vec_info)
256 {
257 	u16 num_req_vecs, num_alloc_vecs = 0, max_vecs;
258 	struct idpf_vector_lifo *stack;
259 	int i, j, vecid;
260 
261 	mutex_lock(&adapter->vector_lock);
262 	stack = &adapter->vector_stack;
263 	num_req_vecs = vec_info->num_req_vecs;
264 
265 	/* Stash interrupt vector indexes onto the stack if required */
266 	idpf_vector_stash(adapter, q_vector_idxs, vec_info);
267 
268 	if (!num_req_vecs)
269 		goto rel_lock;
270 
271 	if (vec_info->default_vport) {
272 		/* As IDPF_MIN_Q_VEC per default vport is put aside in the
273 		 * default pool of the stack, use them for default vports
274 		 */
275 		j = vec_info->index * IDPF_MIN_Q_VEC + IDPF_MBX_Q_VEC;
276 		for (i = 0; i < IDPF_MIN_Q_VEC; i++) {
277 			q_vector_idxs[num_alloc_vecs++] = stack->vec_idx[j++];
278 			num_req_vecs--;
279 		}
280 	}
281 
282 	/* Find if stack has enough vector to allocate */
283 	max_vecs = min(adapter->num_avail_msix, num_req_vecs);
284 
285 	for (j = 0; j < max_vecs; j++) {
286 		vecid = idpf_vector_lifo_pop(adapter);
287 		q_vector_idxs[num_alloc_vecs++] = vecid;
288 	}
289 	adapter->num_avail_msix -= max_vecs;
290 
291 rel_lock:
292 	mutex_unlock(&adapter->vector_lock);
293 
294 	return num_alloc_vecs;
295 }
296 
297 /**
298  * idpf_intr_req - Request interrupt capabilities
299  * @adapter: adapter to enable interrupts on
300  *
301  * Returns 0 on success, negative on failure
302  */
303 int idpf_intr_req(struct idpf_adapter *adapter)
304 {
305 	u16 num_lan_vecs, min_lan_vecs, num_rdma_vecs = 0, min_rdma_vecs = 0;
306 	u16 default_vports = idpf_get_default_vports(adapter);
307 	int num_q_vecs, total_vecs, num_vec_ids;
308 	int min_vectors, actual_vecs, err;
309 	unsigned int vector;
310 	u16 *vecids;
311 	int i;
312 
313 	total_vecs = idpf_get_reserved_vecs(adapter);
314 	num_lan_vecs = total_vecs;
315 	if (idpf_is_rdma_cap_ena(adapter)) {
316 		num_rdma_vecs = idpf_get_reserved_rdma_vecs(adapter);
317 		min_rdma_vecs = IDPF_MIN_RDMA_VEC;
318 
319 		if (!num_rdma_vecs) {
320 			/* If idpf_get_reserved_rdma_vecs is 0, vectors are
321 			 * pulled from the LAN pool.
322 			 */
323 			num_rdma_vecs = min_rdma_vecs;
324 		} else if (num_rdma_vecs < min_rdma_vecs) {
325 			dev_err(&adapter->pdev->dev,
326 				"Not enough vectors reserved for RDMA (min: %u, current: %u)\n",
327 				min_rdma_vecs, num_rdma_vecs);
328 			return -EINVAL;
329 		}
330 	}
331 
332 	num_q_vecs = total_vecs - IDPF_MBX_Q_VEC;
333 
334 	err = idpf_send_alloc_vectors_msg(adapter, num_q_vecs);
335 	if (err) {
336 		dev_err(&adapter->pdev->dev,
337 			"Failed to allocate %d vectors: %d\n", num_q_vecs, err);
338 
339 		return -EAGAIN;
340 	}
341 
342 	min_lan_vecs = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports;
343 	min_vectors = min_lan_vecs + min_rdma_vecs;
344 	actual_vecs = pci_alloc_irq_vectors(adapter->pdev, min_vectors,
345 					    total_vecs, PCI_IRQ_MSIX);
346 	if (actual_vecs < 0) {
347 		dev_err(&adapter->pdev->dev, "Failed to allocate minimum MSIX vectors required: %d\n",
348 			min_vectors);
349 		err = actual_vecs;
350 		goto send_dealloc_vecs;
351 	}
352 
353 	if (idpf_is_rdma_cap_ena(adapter)) {
354 		if (actual_vecs < total_vecs) {
355 			dev_warn(&adapter->pdev->dev,
356 				 "Warning: %d vectors requested, only %d available. Defaulting to minimum (%d) for RDMA and remaining for LAN.\n",
357 				 total_vecs, actual_vecs, IDPF_MIN_RDMA_VEC);
358 			num_rdma_vecs = IDPF_MIN_RDMA_VEC;
359 		}
360 
361 		adapter->rdma_msix_entries = kcalloc(num_rdma_vecs,
362 						     sizeof(struct msix_entry),
363 						     GFP_KERNEL);
364 		if (!adapter->rdma_msix_entries) {
365 			err = -ENOMEM;
366 			goto free_irq;
367 		}
368 	}
369 
370 	num_lan_vecs = actual_vecs - num_rdma_vecs;
371 	adapter->msix_entries = kcalloc(num_lan_vecs, sizeof(struct msix_entry),
372 					GFP_KERNEL);
373 	if (!adapter->msix_entries) {
374 		err = -ENOMEM;
375 		goto free_rdma_msix;
376 	}
377 
378 	adapter->mb_vector.v_idx = le16_to_cpu(adapter->caps.mailbox_vector_id);
379 
380 	vecids = kcalloc(actual_vecs, sizeof(u16), GFP_KERNEL);
381 	if (!vecids) {
382 		err = -ENOMEM;
383 		goto free_msix;
384 	}
385 
386 	num_vec_ids = idpf_get_vec_ids(adapter, vecids, actual_vecs,
387 				       &adapter->req_vec_chunks->vchunks);
388 	if (num_vec_ids < actual_vecs) {
389 		err = -EINVAL;
390 		goto free_vecids;
391 	}
392 
393 	for (vector = 0; vector < num_lan_vecs; vector++) {
394 		adapter->msix_entries[vector].entry = vecids[vector];
395 		adapter->msix_entries[vector].vector =
396 			pci_irq_vector(adapter->pdev, vector);
397 	}
398 	for (i = 0; i < num_rdma_vecs; vector++, i++) {
399 		adapter->rdma_msix_entries[i].entry = vecids[vector];
400 		adapter->rdma_msix_entries[i].vector =
401 			pci_irq_vector(adapter->pdev, vector);
402 	}
403 
404 	/* 'num_avail_msix' is used to distribute excess vectors to the vports
405 	 * after considering the minimum vectors required per each default
406 	 * vport
407 	 */
408 	adapter->num_avail_msix = num_lan_vecs - min_lan_vecs;
409 	adapter->num_msix_entries = num_lan_vecs;
410 	if (idpf_is_rdma_cap_ena(adapter))
411 		adapter->num_rdma_msix_entries = num_rdma_vecs;
412 
413 	/* Fill MSIX vector lifo stack with vector indexes */
414 	err = idpf_init_vector_stack(adapter);
415 	if (err)
416 		goto free_vecids;
417 
418 	err = idpf_mb_intr_init(adapter);
419 	if (err)
420 		goto deinit_vec_stack;
421 	idpf_mb_irq_enable(adapter);
422 	kfree(vecids);
423 
424 	return 0;
425 
426 deinit_vec_stack:
427 	idpf_deinit_vector_stack(adapter);
428 free_vecids:
429 	kfree(vecids);
430 free_msix:
431 	kfree(adapter->msix_entries);
432 	adapter->msix_entries = NULL;
433 free_rdma_msix:
434 	kfree(adapter->rdma_msix_entries);
435 	adapter->rdma_msix_entries = NULL;
436 free_irq:
437 	pci_free_irq_vectors(adapter->pdev);
438 send_dealloc_vecs:
439 	idpf_send_dealloc_vectors_msg(adapter);
440 
441 	return err;
442 }
443 
444 /**
445  * idpf_find_mac_filter - Search filter list for specific mac filter
446  * @vconfig: Vport config structure
447  * @macaddr: The MAC address
448  *
449  * Returns ptr to the filter object or NULL. Must be called while holding the
450  * mac_filter_list_lock.
451  **/
452 static struct idpf_mac_filter *idpf_find_mac_filter(struct idpf_vport_config *vconfig,
453 						    const u8 *macaddr)
454 {
455 	struct idpf_mac_filter *f;
456 
457 	if (!macaddr)
458 		return NULL;
459 
460 	list_for_each_entry(f, &vconfig->user_config.mac_filter_list, list) {
461 		if (ether_addr_equal(macaddr, f->macaddr))
462 			return f;
463 	}
464 
465 	return NULL;
466 }
467 
468 /**
469  * __idpf_del_mac_filter - Delete a MAC filter from the filter list
470  * @vport_config: Vport config structure
471  * @macaddr: The MAC address
472  *
473  * Returns 0 on success, error value on failure
474  **/
475 static int __idpf_del_mac_filter(struct idpf_vport_config *vport_config,
476 				 const u8 *macaddr)
477 {
478 	struct idpf_mac_filter *f;
479 
480 	spin_lock_bh(&vport_config->mac_filter_list_lock);
481 	f = idpf_find_mac_filter(vport_config, macaddr);
482 	if (f) {
483 		list_del(&f->list);
484 		kfree(f);
485 	}
486 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
487 
488 	return 0;
489 }
490 
491 /**
492  * idpf_del_mac_filter - Delete a MAC filter from the filter list
493  * @vport: Main vport structure
494  * @np: Netdev private structure
495  * @macaddr: The MAC address
496  * @async: Don't wait for return message
497  *
498  * Removes filter from list and if interface is up, tells hardware about the
499  * removed filter.
500  **/
501 static int idpf_del_mac_filter(struct idpf_vport *vport,
502 			       struct idpf_netdev_priv *np,
503 			       const u8 *macaddr, bool async)
504 {
505 	struct idpf_vport_config *vport_config;
506 	struct idpf_mac_filter *f;
507 
508 	vport_config = np->adapter->vport_config[np->vport_idx];
509 
510 	spin_lock_bh(&vport_config->mac_filter_list_lock);
511 	f = idpf_find_mac_filter(vport_config, macaddr);
512 	if (f) {
513 		f->remove = true;
514 	} else {
515 		spin_unlock_bh(&vport_config->mac_filter_list_lock);
516 
517 		return -EINVAL;
518 	}
519 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
520 
521 	if (np->state == __IDPF_VPORT_UP) {
522 		int err;
523 
524 		err = idpf_add_del_mac_filters(vport, np, false, async);
525 		if (err)
526 			return err;
527 	}
528 
529 	return  __idpf_del_mac_filter(vport_config, macaddr);
530 }
531 
532 /**
533  * __idpf_add_mac_filter - Add mac filter helper function
534  * @vport_config: Vport config structure
535  * @macaddr: Address to add
536  *
537  * Takes mac_filter_list_lock spinlock to add new filter to list.
538  */
539 static int __idpf_add_mac_filter(struct idpf_vport_config *vport_config,
540 				 const u8 *macaddr)
541 {
542 	struct idpf_mac_filter *f;
543 
544 	spin_lock_bh(&vport_config->mac_filter_list_lock);
545 
546 	f = idpf_find_mac_filter(vport_config, macaddr);
547 	if (f) {
548 		f->remove = false;
549 		spin_unlock_bh(&vport_config->mac_filter_list_lock);
550 
551 		return 0;
552 	}
553 
554 	f = kzalloc(sizeof(*f), GFP_ATOMIC);
555 	if (!f) {
556 		spin_unlock_bh(&vport_config->mac_filter_list_lock);
557 
558 		return -ENOMEM;
559 	}
560 
561 	ether_addr_copy(f->macaddr, macaddr);
562 	list_add_tail(&f->list, &vport_config->user_config.mac_filter_list);
563 	f->add = true;
564 
565 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
566 
567 	return 0;
568 }
569 
570 /**
571  * idpf_add_mac_filter - Add a mac filter to the filter list
572  * @vport: Main vport structure
573  * @np: Netdev private structure
574  * @macaddr: The MAC address
575  * @async: Don't wait for return message
576  *
577  * Returns 0 on success or error on failure. If interface is up, we'll also
578  * send the virtchnl message to tell hardware about the filter.
579  **/
580 static int idpf_add_mac_filter(struct idpf_vport *vport,
581 			       struct idpf_netdev_priv *np,
582 			       const u8 *macaddr, bool async)
583 {
584 	struct idpf_vport_config *vport_config;
585 	int err;
586 
587 	vport_config = np->adapter->vport_config[np->vport_idx];
588 	err = __idpf_add_mac_filter(vport_config, macaddr);
589 	if (err)
590 		return err;
591 
592 	if (np->state == __IDPF_VPORT_UP)
593 		err = idpf_add_del_mac_filters(vport, np, true, async);
594 
595 	return err;
596 }
597 
598 /**
599  * idpf_del_all_mac_filters - Delete all MAC filters in list
600  * @vport: main vport struct
601  *
602  * Takes mac_filter_list_lock spinlock.  Deletes all filters
603  */
604 static void idpf_del_all_mac_filters(struct idpf_vport *vport)
605 {
606 	struct idpf_vport_config *vport_config;
607 	struct idpf_mac_filter *f, *ftmp;
608 
609 	vport_config = vport->adapter->vport_config[vport->idx];
610 	spin_lock_bh(&vport_config->mac_filter_list_lock);
611 
612 	list_for_each_entry_safe(f, ftmp, &vport_config->user_config.mac_filter_list,
613 				 list) {
614 		list_del(&f->list);
615 		kfree(f);
616 	}
617 
618 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
619 }
620 
621 /**
622  * idpf_restore_mac_filters - Re-add all MAC filters in list
623  * @vport: main vport struct
624  *
625  * Takes mac_filter_list_lock spinlock.  Sets add field to true for filters to
626  * resync filters back to HW.
627  */
628 static void idpf_restore_mac_filters(struct idpf_vport *vport)
629 {
630 	struct idpf_vport_config *vport_config;
631 	struct idpf_mac_filter *f;
632 
633 	vport_config = vport->adapter->vport_config[vport->idx];
634 	spin_lock_bh(&vport_config->mac_filter_list_lock);
635 
636 	list_for_each_entry(f, &vport_config->user_config.mac_filter_list, list)
637 		f->add = true;
638 
639 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
640 
641 	idpf_add_del_mac_filters(vport, netdev_priv(vport->netdev),
642 				 true, false);
643 }
644 
645 /**
646  * idpf_remove_mac_filters - Remove all MAC filters in list
647  * @vport: main vport struct
648  *
649  * Takes mac_filter_list_lock spinlock. Sets remove field to true for filters
650  * to remove filters in HW.
651  */
652 static void idpf_remove_mac_filters(struct idpf_vport *vport)
653 {
654 	struct idpf_vport_config *vport_config;
655 	struct idpf_mac_filter *f;
656 
657 	vport_config = vport->adapter->vport_config[vport->idx];
658 	spin_lock_bh(&vport_config->mac_filter_list_lock);
659 
660 	list_for_each_entry(f, &vport_config->user_config.mac_filter_list, list)
661 		f->remove = true;
662 
663 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
664 
665 	idpf_add_del_mac_filters(vport, netdev_priv(vport->netdev),
666 				 false, false);
667 }
668 
669 /**
670  * idpf_deinit_mac_addr - deinitialize mac address for vport
671  * @vport: main vport structure
672  */
673 static void idpf_deinit_mac_addr(struct idpf_vport *vport)
674 {
675 	struct idpf_vport_config *vport_config;
676 	struct idpf_mac_filter *f;
677 
678 	vport_config = vport->adapter->vport_config[vport->idx];
679 
680 	spin_lock_bh(&vport_config->mac_filter_list_lock);
681 
682 	f = idpf_find_mac_filter(vport_config, vport->default_mac_addr);
683 	if (f) {
684 		list_del(&f->list);
685 		kfree(f);
686 	}
687 
688 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
689 }
690 
691 /**
692  * idpf_init_mac_addr - initialize mac address for vport
693  * @vport: main vport structure
694  * @netdev: pointer to netdev struct associated with this vport
695  */
696 static int idpf_init_mac_addr(struct idpf_vport *vport,
697 			      struct net_device *netdev)
698 {
699 	struct idpf_netdev_priv *np = netdev_priv(netdev);
700 	struct idpf_adapter *adapter = vport->adapter;
701 	int err;
702 
703 	if (is_valid_ether_addr(vport->default_mac_addr)) {
704 		eth_hw_addr_set(netdev, vport->default_mac_addr);
705 		ether_addr_copy(netdev->perm_addr, vport->default_mac_addr);
706 
707 		return idpf_add_mac_filter(vport, np, vport->default_mac_addr,
708 					   false);
709 	}
710 
711 	if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS,
712 			     VIRTCHNL2_CAP_MACFILTER)) {
713 		dev_err(&adapter->pdev->dev,
714 			"MAC address is not provided and capability is not set\n");
715 
716 		return -EINVAL;
717 	}
718 
719 	eth_hw_addr_random(netdev);
720 	err = idpf_add_mac_filter(vport, np, netdev->dev_addr, false);
721 	if (err)
722 		return err;
723 
724 	dev_info(&adapter->pdev->dev, "Invalid MAC address %pM, using random %pM\n",
725 		 vport->default_mac_addr, netdev->dev_addr);
726 	ether_addr_copy(vport->default_mac_addr, netdev->dev_addr);
727 
728 	return 0;
729 }
730 
731 /**
732  * idpf_cfg_netdev - Allocate, configure and register a netdev
733  * @vport: main vport structure
734  *
735  * Returns 0 on success, negative value on failure.
736  */
737 static int idpf_cfg_netdev(struct idpf_vport *vport)
738 {
739 	struct idpf_adapter *adapter = vport->adapter;
740 	struct idpf_vport_config *vport_config;
741 	netdev_features_t other_offloads = 0;
742 	netdev_features_t csum_offloads = 0;
743 	netdev_features_t tso_offloads = 0;
744 	netdev_features_t dflt_features;
745 	struct idpf_netdev_priv *np;
746 	struct net_device *netdev;
747 	u16 idx = vport->idx;
748 	int err;
749 
750 	vport_config = adapter->vport_config[idx];
751 
752 	/* It's possible we already have a netdev allocated and registered for
753 	 * this vport
754 	 */
755 	if (test_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags)) {
756 		netdev = adapter->netdevs[idx];
757 		np = netdev_priv(netdev);
758 		np->vport = vport;
759 		np->vport_idx = vport->idx;
760 		np->vport_id = vport->vport_id;
761 		np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter);
762 		vport->netdev = netdev;
763 
764 		return idpf_init_mac_addr(vport, netdev);
765 	}
766 
767 	netdev = alloc_etherdev_mqs(sizeof(struct idpf_netdev_priv),
768 				    vport_config->max_q.max_txq,
769 				    vport_config->max_q.max_rxq);
770 	if (!netdev)
771 		return -ENOMEM;
772 
773 	vport->netdev = netdev;
774 	np = netdev_priv(netdev);
775 	np->vport = vport;
776 	np->adapter = adapter;
777 	np->vport_idx = vport->idx;
778 	np->vport_id = vport->vport_id;
779 	np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter);
780 	np->tx_max_bufs = idpf_get_max_tx_bufs(adapter);
781 
782 	spin_lock_init(&np->stats_lock);
783 
784 	err = idpf_init_mac_addr(vport, netdev);
785 	if (err) {
786 		free_netdev(vport->netdev);
787 		vport->netdev = NULL;
788 
789 		return err;
790 	}
791 
792 	/* assign netdev_ops */
793 	netdev->netdev_ops = &idpf_netdev_ops;
794 
795 	/* setup watchdog timeout value to be 5 second */
796 	netdev->watchdog_timeo = 5 * HZ;
797 
798 	netdev->dev_port = idx;
799 
800 	/* configure default MTU size */
801 	netdev->min_mtu = ETH_MIN_MTU;
802 	netdev->max_mtu = vport->max_mtu;
803 
804 	dflt_features = NETIF_F_SG	|
805 			NETIF_F_HIGHDMA;
806 
807 	if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
808 		dflt_features |= NETIF_F_RXHASH;
809 	if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS,
810 			    VIRTCHNL2_CAP_FLOW_STEER) &&
811 	    idpf_vport_is_cap_ena(vport, VIRTCHNL2_VPORT_SIDEBAND_FLOW_STEER))
812 		dflt_features |= NETIF_F_NTUPLE;
813 	if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V4))
814 		csum_offloads |= NETIF_F_IP_CSUM;
815 	if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V6))
816 		csum_offloads |= NETIF_F_IPV6_CSUM;
817 	if (idpf_is_cap_ena(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM))
818 		csum_offloads |= NETIF_F_RXCSUM;
819 	if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_SCTP_CSUM))
820 		csum_offloads |= NETIF_F_SCTP_CRC;
821 
822 	if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV4_TCP))
823 		tso_offloads |= NETIF_F_TSO;
824 	if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV6_TCP))
825 		tso_offloads |= NETIF_F_TSO6;
826 	if (idpf_is_cap_ena_all(adapter, IDPF_SEG_CAPS,
827 				VIRTCHNL2_CAP_SEG_IPV4_UDP |
828 				VIRTCHNL2_CAP_SEG_IPV6_UDP))
829 		tso_offloads |= NETIF_F_GSO_UDP_L4;
830 	if (idpf_is_cap_ena_all(adapter, IDPF_RSC_CAPS, IDPF_CAP_RSC))
831 		other_offloads |= NETIF_F_GRO_HW;
832 	if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LOOPBACK))
833 		other_offloads |= NETIF_F_LOOPBACK;
834 
835 	netdev->features |= dflt_features | csum_offloads | tso_offloads;
836 	netdev->hw_features |=  netdev->features | other_offloads;
837 	netdev->vlan_features |= netdev->features | other_offloads;
838 	netdev->hw_enc_features |= dflt_features | other_offloads;
839 	idpf_xdp_set_features(vport);
840 
841 	idpf_set_ethtool_ops(netdev);
842 	netif_set_affinity_auto(netdev);
843 	SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
844 
845 	/* carrier off on init to avoid Tx hangs */
846 	netif_carrier_off(netdev);
847 
848 	/* make sure transmit queues start off as stopped */
849 	netif_tx_stop_all_queues(netdev);
850 
851 	/* The vport can be arbitrarily released so we need to also track
852 	 * netdevs in the adapter struct
853 	 */
854 	adapter->netdevs[idx] = netdev;
855 
856 	return 0;
857 }
858 
859 /**
860  * idpf_get_free_slot - get the next non-NULL location index in array
861  * @adapter: adapter in which to look for a free vport slot
862  */
863 static int idpf_get_free_slot(struct idpf_adapter *adapter)
864 {
865 	unsigned int i;
866 
867 	for (i = 0; i < adapter->max_vports; i++) {
868 		if (!adapter->vports[i])
869 			return i;
870 	}
871 
872 	return IDPF_NO_FREE_SLOT;
873 }
874 
875 /**
876  * idpf_remove_features - Turn off feature configs
877  * @vport: virtual port structure
878  */
879 static void idpf_remove_features(struct idpf_vport *vport)
880 {
881 	struct idpf_adapter *adapter = vport->adapter;
882 
883 	if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER))
884 		idpf_remove_mac_filters(vport);
885 }
886 
887 /**
888  * idpf_vport_stop - Disable a vport
889  * @vport: vport to disable
890  * @rtnl: whether to take RTNL lock
891  */
892 static void idpf_vport_stop(struct idpf_vport *vport, bool rtnl)
893 {
894 	struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
895 
896 	if (np->state <= __IDPF_VPORT_DOWN)
897 		return;
898 
899 	if (rtnl)
900 		rtnl_lock();
901 
902 	netif_carrier_off(vport->netdev);
903 	netif_tx_disable(vport->netdev);
904 
905 	idpf_send_disable_vport_msg(vport);
906 	idpf_send_disable_queues_msg(vport);
907 	idpf_send_map_unmap_queue_vector_msg(vport, false);
908 	/* Normally we ask for queues in create_vport, but if the number of
909 	 * initially requested queues have changed, for example via ethtool
910 	 * set channels, we do delete queues and then add the queues back
911 	 * instead of deleting and reallocating the vport.
912 	 */
913 	if (test_and_clear_bit(IDPF_VPORT_DEL_QUEUES, vport->flags))
914 		idpf_send_delete_queues_msg(vport);
915 
916 	idpf_remove_features(vport);
917 
918 	vport->link_up = false;
919 	idpf_vport_intr_deinit(vport);
920 	idpf_xdp_rxq_info_deinit_all(vport);
921 	idpf_vport_queues_rel(vport);
922 	idpf_vport_intr_rel(vport);
923 	np->state = __IDPF_VPORT_DOWN;
924 
925 	if (rtnl)
926 		rtnl_unlock();
927 }
928 
929 /**
930  * idpf_stop - Disables a network interface
931  * @netdev: network interface device structure
932  *
933  * The stop entry point is called when an interface is de-activated by the OS,
934  * and the netdevice enters the DOWN state.  The hardware is still under the
935  * driver's control, but the netdev interface is disabled.
936  *
937  * Returns success only - not allowed to fail
938  */
939 static int idpf_stop(struct net_device *netdev)
940 {
941 	struct idpf_netdev_priv *np = netdev_priv(netdev);
942 	struct idpf_vport *vport;
943 
944 	if (test_bit(IDPF_REMOVE_IN_PROG, np->adapter->flags))
945 		return 0;
946 
947 	idpf_vport_ctrl_lock(netdev);
948 	vport = idpf_netdev_to_vport(netdev);
949 
950 	idpf_vport_stop(vport, false);
951 
952 	idpf_vport_ctrl_unlock(netdev);
953 
954 	return 0;
955 }
956 
957 /**
958  * idpf_decfg_netdev - Unregister the netdev
959  * @vport: vport for which netdev to be unregistered
960  */
961 static void idpf_decfg_netdev(struct idpf_vport *vport)
962 {
963 	struct idpf_adapter *adapter = vport->adapter;
964 	u16 idx = vport->idx;
965 
966 	kfree(vport->rx_ptype_lkup);
967 	vport->rx_ptype_lkup = NULL;
968 
969 	if (test_and_clear_bit(IDPF_VPORT_REG_NETDEV,
970 			       adapter->vport_config[idx]->flags)) {
971 		unregister_netdev(vport->netdev);
972 		free_netdev(vport->netdev);
973 	}
974 	vport->netdev = NULL;
975 
976 	adapter->netdevs[idx] = NULL;
977 }
978 
979 /**
980  * idpf_vport_rel - Delete a vport and free its resources
981  * @vport: the vport being removed
982  */
983 static void idpf_vport_rel(struct idpf_vport *vport)
984 {
985 	struct idpf_adapter *adapter = vport->adapter;
986 	struct idpf_vport_config *vport_config;
987 	struct idpf_vector_info vec_info;
988 	struct idpf_rss_data *rss_data;
989 	struct idpf_vport_max_q max_q;
990 	u16 idx = vport->idx;
991 
992 	vport_config = adapter->vport_config[vport->idx];
993 	idpf_deinit_rss(vport);
994 	rss_data = &vport_config->user_config.rss_data;
995 	kfree(rss_data->rss_key);
996 	rss_data->rss_key = NULL;
997 
998 	idpf_send_destroy_vport_msg(vport);
999 
1000 	/* Release all max queues allocated to the adapter's pool */
1001 	max_q.max_rxq = vport_config->max_q.max_rxq;
1002 	max_q.max_txq = vport_config->max_q.max_txq;
1003 	max_q.max_bufq = vport_config->max_q.max_bufq;
1004 	max_q.max_complq = vport_config->max_q.max_complq;
1005 	idpf_vport_dealloc_max_qs(adapter, &max_q);
1006 
1007 	/* Release all the allocated vectors on the stack */
1008 	vec_info.num_req_vecs = 0;
1009 	vec_info.num_curr_vecs = vport->num_q_vectors;
1010 	vec_info.default_vport = vport->default_vport;
1011 
1012 	idpf_req_rel_vector_indexes(adapter, vport->q_vector_idxs, &vec_info);
1013 
1014 	kfree(vport->q_vector_idxs);
1015 	vport->q_vector_idxs = NULL;
1016 
1017 	kfree(adapter->vport_params_recvd[idx]);
1018 	adapter->vport_params_recvd[idx] = NULL;
1019 	kfree(adapter->vport_params_reqd[idx]);
1020 	adapter->vport_params_reqd[idx] = NULL;
1021 	if (adapter->vport_config[idx]) {
1022 		kfree(adapter->vport_config[idx]->req_qs_chunks);
1023 		adapter->vport_config[idx]->req_qs_chunks = NULL;
1024 	}
1025 	kfree(vport);
1026 	adapter->num_alloc_vports--;
1027 }
1028 
1029 /**
1030  * idpf_vport_dealloc - cleanup and release a given vport
1031  * @vport: pointer to idpf vport structure
1032  *
1033  * returns nothing
1034  */
1035 static void idpf_vport_dealloc(struct idpf_vport *vport)
1036 {
1037 	struct idpf_adapter *adapter = vport->adapter;
1038 	unsigned int i = vport->idx;
1039 
1040 	idpf_idc_deinit_vport_aux_device(vport->vdev_info);
1041 
1042 	idpf_deinit_mac_addr(vport);
1043 	idpf_vport_stop(vport, true);
1044 
1045 	if (!test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
1046 		idpf_decfg_netdev(vport);
1047 	if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
1048 		idpf_del_all_mac_filters(vport);
1049 
1050 	if (adapter->netdevs[i]) {
1051 		struct idpf_netdev_priv *np = netdev_priv(adapter->netdevs[i]);
1052 
1053 		np->vport = NULL;
1054 	}
1055 
1056 	idpf_vport_rel(vport);
1057 
1058 	adapter->vports[i] = NULL;
1059 	adapter->next_vport = idpf_get_free_slot(adapter);
1060 }
1061 
1062 /**
1063  * idpf_is_hsplit_supported - check whether the header split is supported
1064  * @vport: virtual port to check the capability for
1065  *
1066  * Return: true if it's supported by the HW/FW, false if not.
1067  */
1068 static bool idpf_is_hsplit_supported(const struct idpf_vport *vport)
1069 {
1070 	return idpf_is_queue_model_split(vport->rxq_model) &&
1071 	       idpf_is_cap_ena_all(vport->adapter, IDPF_HSPLIT_CAPS,
1072 				   IDPF_CAP_HSPLIT);
1073 }
1074 
1075 /**
1076  * idpf_vport_get_hsplit - get the current header split feature state
1077  * @vport: virtual port to query the state for
1078  *
1079  * Return: ``ETHTOOL_TCP_DATA_SPLIT_UNKNOWN`` if not supported,
1080  *         ``ETHTOOL_TCP_DATA_SPLIT_DISABLED`` if disabled,
1081  *         ``ETHTOOL_TCP_DATA_SPLIT_ENABLED`` if active.
1082  */
1083 u8 idpf_vport_get_hsplit(const struct idpf_vport *vport)
1084 {
1085 	const struct idpf_vport_user_config_data *config;
1086 
1087 	if (!idpf_is_hsplit_supported(vport))
1088 		return ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
1089 
1090 	config = &vport->adapter->vport_config[vport->idx]->user_config;
1091 
1092 	return test_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags) ?
1093 	       ETHTOOL_TCP_DATA_SPLIT_ENABLED :
1094 	       ETHTOOL_TCP_DATA_SPLIT_DISABLED;
1095 }
1096 
1097 /**
1098  * idpf_vport_set_hsplit - enable or disable header split on a given vport
1099  * @vport: virtual port to configure
1100  * @val: Ethtool flag controlling the header split state
1101  *
1102  * Return: true on success, false if not supported by the HW.
1103  */
1104 bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val)
1105 {
1106 	struct idpf_vport_user_config_data *config;
1107 
1108 	if (!idpf_is_hsplit_supported(vport))
1109 		return val == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
1110 
1111 	config = &vport->adapter->vport_config[vport->idx]->user_config;
1112 
1113 	switch (val) {
1114 	case ETHTOOL_TCP_DATA_SPLIT_UNKNOWN:
1115 		/* Default is to enable */
1116 	case ETHTOOL_TCP_DATA_SPLIT_ENABLED:
1117 		__set_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags);
1118 		return true;
1119 	case ETHTOOL_TCP_DATA_SPLIT_DISABLED:
1120 		__clear_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags);
1121 		return true;
1122 	default:
1123 		return false;
1124 	}
1125 }
1126 
1127 /**
1128  * idpf_vport_alloc - Allocates the next available struct vport in the adapter
1129  * @adapter: board private structure
1130  * @max_q: vport max queue info
1131  *
1132  * returns a pointer to a vport on success, NULL on failure.
1133  */
1134 static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
1135 					   struct idpf_vport_max_q *max_q)
1136 {
1137 	struct idpf_rss_data *rss_data;
1138 	u16 idx = adapter->next_vport;
1139 	struct idpf_vport *vport;
1140 	u16 num_max_q;
1141 
1142 	if (idx == IDPF_NO_FREE_SLOT)
1143 		return NULL;
1144 
1145 	vport = kzalloc(sizeof(*vport), GFP_KERNEL);
1146 	if (!vport)
1147 		return vport;
1148 
1149 	num_max_q = max(max_q->max_txq, max_q->max_rxq) + IDPF_RESERVED_VECS;
1150 	if (!adapter->vport_config[idx]) {
1151 		struct idpf_vport_config *vport_config;
1152 		struct idpf_q_coalesce *q_coal;
1153 
1154 		vport_config = kzalloc(sizeof(*vport_config), GFP_KERNEL);
1155 		if (!vport_config) {
1156 			kfree(vport);
1157 
1158 			return NULL;
1159 		}
1160 
1161 		q_coal = kcalloc(num_max_q, sizeof(*q_coal), GFP_KERNEL);
1162 		if (!q_coal) {
1163 			kfree(vport_config);
1164 			kfree(vport);
1165 
1166 			return NULL;
1167 		}
1168 		for (int i = 0; i < num_max_q; i++) {
1169 			q_coal[i].tx_intr_mode = IDPF_ITR_DYNAMIC;
1170 			q_coal[i].tx_coalesce_usecs = IDPF_ITR_TX_DEF;
1171 			q_coal[i].rx_intr_mode = IDPF_ITR_DYNAMIC;
1172 			q_coal[i].rx_coalesce_usecs = IDPF_ITR_RX_DEF;
1173 		}
1174 		vport_config->user_config.q_coalesce = q_coal;
1175 
1176 		adapter->vport_config[idx] = vport_config;
1177 	}
1178 
1179 	vport->idx = idx;
1180 	vport->adapter = adapter;
1181 	vport->compln_clean_budget = IDPF_TX_COMPLQ_CLEAN_BUDGET;
1182 	vport->default_vport = adapter->num_alloc_vports <
1183 			       idpf_get_default_vports(adapter);
1184 
1185 	vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL);
1186 	if (!vport->q_vector_idxs)
1187 		goto free_vport;
1188 
1189 	idpf_vport_init(vport, max_q);
1190 
1191 	/* This alloc is done separate from the LUT because it's not strictly
1192 	 * dependent on how many queues we have. If we change number of queues
1193 	 * and soft reset we'll need a new LUT but the key can remain the same
1194 	 * for as long as the vport exists.
1195 	 */
1196 	rss_data = &adapter->vport_config[idx]->user_config.rss_data;
1197 	rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL);
1198 	if (!rss_data->rss_key)
1199 		goto free_vector_idxs;
1200 
1201 	/* Initialize default rss key */
1202 	netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size);
1203 
1204 	/* fill vport slot in the adapter struct */
1205 	adapter->vports[idx] = vport;
1206 	adapter->vport_ids[idx] = idpf_get_vport_id(vport);
1207 
1208 	adapter->num_alloc_vports++;
1209 	/* prepare adapter->next_vport for next use */
1210 	adapter->next_vport = idpf_get_free_slot(adapter);
1211 
1212 	return vport;
1213 
1214 free_vector_idxs:
1215 	kfree(vport->q_vector_idxs);
1216 free_vport:
1217 	kfree(vport);
1218 
1219 	return NULL;
1220 }
1221 
1222 /**
1223  * idpf_get_stats64 - get statistics for network device structure
1224  * @netdev: network interface device structure
1225  * @stats: main device statistics structure
1226  */
1227 static void idpf_get_stats64(struct net_device *netdev,
1228 			     struct rtnl_link_stats64 *stats)
1229 {
1230 	struct idpf_netdev_priv *np = netdev_priv(netdev);
1231 
1232 	spin_lock_bh(&np->stats_lock);
1233 	*stats = np->netstats;
1234 	spin_unlock_bh(&np->stats_lock);
1235 }
1236 
1237 /**
1238  * idpf_statistics_task - Delayed task to get statistics over mailbox
1239  * @work: work_struct handle to our data
1240  */
1241 void idpf_statistics_task(struct work_struct *work)
1242 {
1243 	struct idpf_adapter *adapter;
1244 	int i;
1245 
1246 	adapter = container_of(work, struct idpf_adapter, stats_task.work);
1247 
1248 	for (i = 0; i < adapter->max_vports; i++) {
1249 		struct idpf_vport *vport = adapter->vports[i];
1250 
1251 		if (vport && !test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
1252 			idpf_send_get_stats_msg(vport);
1253 	}
1254 
1255 	queue_delayed_work(adapter->stats_wq, &adapter->stats_task,
1256 			   msecs_to_jiffies(10000));
1257 }
1258 
1259 /**
1260  * idpf_mbx_task - Delayed task to handle mailbox responses
1261  * @work: work_struct handle
1262  */
1263 void idpf_mbx_task(struct work_struct *work)
1264 {
1265 	struct idpf_adapter *adapter;
1266 
1267 	adapter = container_of(work, struct idpf_adapter, mbx_task.work);
1268 
1269 	if (test_bit(IDPF_MB_INTR_MODE, adapter->flags))
1270 		idpf_mb_irq_enable(adapter);
1271 	else
1272 		queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task,
1273 				   msecs_to_jiffies(300));
1274 
1275 	idpf_recv_mb_msg(adapter);
1276 }
1277 
1278 /**
1279  * idpf_service_task - Delayed task for handling mailbox responses
1280  * @work: work_struct handle to our data
1281  *
1282  */
1283 void idpf_service_task(struct work_struct *work)
1284 {
1285 	struct idpf_adapter *adapter;
1286 
1287 	adapter = container_of(work, struct idpf_adapter, serv_task.work);
1288 
1289 	if (idpf_is_reset_detected(adapter) &&
1290 	    !idpf_is_reset_in_prog(adapter) &&
1291 	    !test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) {
1292 		dev_info(&adapter->pdev->dev, "HW reset detected\n");
1293 		set_bit(IDPF_HR_FUNC_RESET, adapter->flags);
1294 		queue_delayed_work(adapter->vc_event_wq,
1295 				   &adapter->vc_event_task,
1296 				   msecs_to_jiffies(10));
1297 	}
1298 
1299 	queue_delayed_work(adapter->serv_wq, &adapter->serv_task,
1300 			   msecs_to_jiffies(300));
1301 }
1302 
1303 /**
1304  * idpf_restore_features - Restore feature configs
1305  * @vport: virtual port structure
1306  */
1307 static void idpf_restore_features(struct idpf_vport *vport)
1308 {
1309 	struct idpf_adapter *adapter = vport->adapter;
1310 
1311 	if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER))
1312 		idpf_restore_mac_filters(vport);
1313 }
1314 
1315 /**
1316  * idpf_set_real_num_queues - set number of queues for netdev
1317  * @vport: virtual port structure
1318  *
1319  * Returns 0 on success, negative on failure.
1320  */
1321 static int idpf_set_real_num_queues(struct idpf_vport *vport)
1322 {
1323 	int err, txq = vport->num_txq - vport->num_xdp_txq;
1324 
1325 	err = netif_set_real_num_rx_queues(vport->netdev, vport->num_rxq);
1326 	if (err)
1327 		return err;
1328 
1329 	return netif_set_real_num_tx_queues(vport->netdev, txq);
1330 }
1331 
1332 /**
1333  * idpf_up_complete - Complete interface up sequence
1334  * @vport: virtual port structure
1335  *
1336  * Returns 0 on success, negative on failure.
1337  */
1338 static int idpf_up_complete(struct idpf_vport *vport)
1339 {
1340 	struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
1341 
1342 	if (vport->link_up && !netif_carrier_ok(vport->netdev)) {
1343 		netif_carrier_on(vport->netdev);
1344 		netif_tx_start_all_queues(vport->netdev);
1345 	}
1346 
1347 	np->state = __IDPF_VPORT_UP;
1348 
1349 	return 0;
1350 }
1351 
1352 /**
1353  * idpf_rx_init_buf_tail - Write initial buffer ring tail value
1354  * @vport: virtual port struct
1355  */
1356 static void idpf_rx_init_buf_tail(struct idpf_vport *vport)
1357 {
1358 	int i, j;
1359 
1360 	for (i = 0; i < vport->num_rxq_grp; i++) {
1361 		struct idpf_rxq_group *grp = &vport->rxq_grps[i];
1362 
1363 		if (idpf_is_queue_model_split(vport->rxq_model)) {
1364 			for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
1365 				const struct idpf_buf_queue *q =
1366 					&grp->splitq.bufq_sets[j].bufq;
1367 
1368 				writel(q->next_to_alloc, q->tail);
1369 			}
1370 		} else {
1371 			for (j = 0; j < grp->singleq.num_rxq; j++) {
1372 				const struct idpf_rx_queue *q =
1373 					grp->singleq.rxqs[j];
1374 
1375 				writel(q->next_to_alloc, q->tail);
1376 			}
1377 		}
1378 	}
1379 }
1380 
1381 /**
1382  * idpf_vport_open - Bring up a vport
1383  * @vport: vport to bring up
1384  * @rtnl: whether to take RTNL lock
1385  */
1386 static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
1387 {
1388 	struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
1389 	struct idpf_adapter *adapter = vport->adapter;
1390 	struct idpf_vport_config *vport_config;
1391 	int err;
1392 
1393 	if (np->state != __IDPF_VPORT_DOWN)
1394 		return -EBUSY;
1395 
1396 	if (rtnl)
1397 		rtnl_lock();
1398 
1399 	/* we do not allow interface up just yet */
1400 	netif_carrier_off(vport->netdev);
1401 
1402 	err = idpf_vport_intr_alloc(vport);
1403 	if (err) {
1404 		dev_err(&adapter->pdev->dev, "Failed to allocate interrupts for vport %u: %d\n",
1405 			vport->vport_id, err);
1406 		goto err_rtnl_unlock;
1407 	}
1408 
1409 	err = idpf_vport_queues_alloc(vport);
1410 	if (err)
1411 		goto intr_rel;
1412 
1413 	err = idpf_vport_queue_ids_init(vport);
1414 	if (err) {
1415 		dev_err(&adapter->pdev->dev, "Failed to initialize queue ids for vport %u: %d\n",
1416 			vport->vport_id, err);
1417 		goto queues_rel;
1418 	}
1419 
1420 	err = idpf_vport_intr_init(vport);
1421 	if (err) {
1422 		dev_err(&adapter->pdev->dev, "Failed to initialize interrupts for vport %u: %d\n",
1423 			vport->vport_id, err);
1424 		goto queues_rel;
1425 	}
1426 
1427 	err = idpf_rx_bufs_init_all(vport);
1428 	if (err) {
1429 		dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n",
1430 			vport->vport_id, err);
1431 		goto queues_rel;
1432 	}
1433 
1434 	err = idpf_queue_reg_init(vport);
1435 	if (err) {
1436 		dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n",
1437 			vport->vport_id, err);
1438 		goto queues_rel;
1439 	}
1440 
1441 	idpf_rx_init_buf_tail(vport);
1442 
1443 	err = idpf_xdp_rxq_info_init_all(vport);
1444 	if (err) {
1445 		netdev_err(vport->netdev,
1446 			   "Failed to initialize XDP RxQ info for vport %u: %pe\n",
1447 			   vport->vport_id, ERR_PTR(err));
1448 		goto intr_deinit;
1449 	}
1450 
1451 	idpf_vport_intr_ena(vport);
1452 
1453 	err = idpf_send_config_queues_msg(vport);
1454 	if (err) {
1455 		dev_err(&adapter->pdev->dev, "Failed to configure queues for vport %u, %d\n",
1456 			vport->vport_id, err);
1457 		goto rxq_deinit;
1458 	}
1459 
1460 	err = idpf_send_map_unmap_queue_vector_msg(vport, true);
1461 	if (err) {
1462 		dev_err(&adapter->pdev->dev, "Failed to map queue vectors for vport %u: %d\n",
1463 			vport->vport_id, err);
1464 		goto rxq_deinit;
1465 	}
1466 
1467 	err = idpf_send_enable_queues_msg(vport);
1468 	if (err) {
1469 		dev_err(&adapter->pdev->dev, "Failed to enable queues for vport %u: %d\n",
1470 			vport->vport_id, err);
1471 		goto unmap_queue_vectors;
1472 	}
1473 
1474 	err = idpf_send_enable_vport_msg(vport);
1475 	if (err) {
1476 		dev_err(&adapter->pdev->dev, "Failed to enable vport %u: %d\n",
1477 			vport->vport_id, err);
1478 		err = -EAGAIN;
1479 		goto disable_queues;
1480 	}
1481 
1482 	idpf_restore_features(vport);
1483 
1484 	vport_config = adapter->vport_config[vport->idx];
1485 	if (vport_config->user_config.rss_data.rss_lut)
1486 		err = idpf_config_rss(vport);
1487 	else
1488 		err = idpf_init_rss(vport);
1489 	if (err) {
1490 		dev_err(&adapter->pdev->dev, "Failed to initialize RSS for vport %u: %d\n",
1491 			vport->vport_id, err);
1492 		goto disable_vport;
1493 	}
1494 
1495 	err = idpf_up_complete(vport);
1496 	if (err) {
1497 		dev_err(&adapter->pdev->dev, "Failed to complete interface up for vport %u: %d\n",
1498 			vport->vport_id, err);
1499 		goto deinit_rss;
1500 	}
1501 
1502 	if (rtnl)
1503 		rtnl_unlock();
1504 
1505 	return 0;
1506 
1507 deinit_rss:
1508 	idpf_deinit_rss(vport);
1509 disable_vport:
1510 	idpf_send_disable_vport_msg(vport);
1511 disable_queues:
1512 	idpf_send_disable_queues_msg(vport);
1513 unmap_queue_vectors:
1514 	idpf_send_map_unmap_queue_vector_msg(vport, false);
1515 rxq_deinit:
1516 	idpf_xdp_rxq_info_deinit_all(vport);
1517 intr_deinit:
1518 	idpf_vport_intr_deinit(vport);
1519 queues_rel:
1520 	idpf_vport_queues_rel(vport);
1521 intr_rel:
1522 	idpf_vport_intr_rel(vport);
1523 
1524 err_rtnl_unlock:
1525 	if (rtnl)
1526 		rtnl_unlock();
1527 
1528 	return err;
1529 }
1530 
1531 /**
1532  * idpf_init_task - Delayed initialization task
1533  * @work: work_struct handle to our data
1534  *
1535  * Init task finishes up pending work started in probe. Due to the asynchronous
1536  * nature in which the device communicates with hardware, we may have to wait
1537  * several milliseconds to get a response.  Instead of busy polling in probe,
1538  * pulling it out into a delayed work task prevents us from bogging down the
1539  * whole system waiting for a response from hardware.
1540  */
1541 void idpf_init_task(struct work_struct *work)
1542 {
1543 	struct idpf_vport_config *vport_config;
1544 	struct idpf_vport_max_q max_q;
1545 	struct idpf_adapter *adapter;
1546 	struct idpf_netdev_priv *np;
1547 	struct idpf_vport *vport;
1548 	u16 num_default_vports;
1549 	struct pci_dev *pdev;
1550 	bool default_vport;
1551 	int index, err;
1552 
1553 	adapter = container_of(work, struct idpf_adapter, init_task.work);
1554 
1555 	num_default_vports = idpf_get_default_vports(adapter);
1556 	if (adapter->num_alloc_vports < num_default_vports)
1557 		default_vport = true;
1558 	else
1559 		default_vport = false;
1560 
1561 	err = idpf_vport_alloc_max_qs(adapter, &max_q);
1562 	if (err)
1563 		goto unwind_vports;
1564 
1565 	err = idpf_send_create_vport_msg(adapter, &max_q);
1566 	if (err) {
1567 		idpf_vport_dealloc_max_qs(adapter, &max_q);
1568 		goto unwind_vports;
1569 	}
1570 
1571 	pdev = adapter->pdev;
1572 	vport = idpf_vport_alloc(adapter, &max_q);
1573 	if (!vport) {
1574 		err = -EFAULT;
1575 		dev_err(&pdev->dev, "failed to allocate vport: %d\n",
1576 			err);
1577 		idpf_vport_dealloc_max_qs(adapter, &max_q);
1578 		goto unwind_vports;
1579 	}
1580 
1581 	index = vport->idx;
1582 	vport_config = adapter->vport_config[index];
1583 
1584 	spin_lock_init(&vport_config->mac_filter_list_lock);
1585 
1586 	INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list);
1587 	INIT_LIST_HEAD(&vport_config->user_config.flow_steer_list);
1588 
1589 	err = idpf_check_supported_desc_ids(vport);
1590 	if (err) {
1591 		dev_err(&pdev->dev, "failed to get required descriptor ids\n");
1592 		goto cfg_netdev_err;
1593 	}
1594 
1595 	if (idpf_cfg_netdev(vport))
1596 		goto cfg_netdev_err;
1597 
1598 	err = idpf_send_get_rx_ptype_msg(vport);
1599 	if (err)
1600 		goto handle_err;
1601 
1602 	/* Once state is put into DOWN, driver is ready for dev_open */
1603 	np = netdev_priv(vport->netdev);
1604 	np->state = __IDPF_VPORT_DOWN;
1605 	if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED, vport_config->flags))
1606 		idpf_vport_open(vport, true);
1607 
1608 	/* Spawn and return 'idpf_init_task' work queue until all the
1609 	 * default vports are created
1610 	 */
1611 	if (adapter->num_alloc_vports < num_default_vports) {
1612 		queue_delayed_work(adapter->init_wq, &adapter->init_task,
1613 				   msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
1614 
1615 		return;
1616 	}
1617 
1618 	for (index = 0; index < adapter->max_vports; index++) {
1619 		struct net_device *netdev = adapter->netdevs[index];
1620 		struct idpf_vport_config *vport_config;
1621 
1622 		vport_config = adapter->vport_config[index];
1623 
1624 		if (!netdev ||
1625 		    test_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags))
1626 			continue;
1627 
1628 		err = register_netdev(netdev);
1629 		if (err) {
1630 			dev_err(&pdev->dev, "failed to register netdev for vport %d: %pe\n",
1631 				index, ERR_PTR(err));
1632 			continue;
1633 		}
1634 		set_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags);
1635 	}
1636 
1637 	/* As all the required vports are created, clear the reset flag
1638 	 * unconditionally here in case we were in reset and the link was down.
1639 	 */
1640 	clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
1641 	/* Start the statistics task now */
1642 	queue_delayed_work(adapter->stats_wq, &adapter->stats_task,
1643 			   msecs_to_jiffies(10 * (pdev->devfn & 0x07)));
1644 
1645 	return;
1646 
1647 handle_err:
1648 	idpf_decfg_netdev(vport);
1649 cfg_netdev_err:
1650 	idpf_vport_rel(vport);
1651 	adapter->vports[index] = NULL;
1652 unwind_vports:
1653 	if (default_vport) {
1654 		for (index = 0; index < adapter->max_vports; index++) {
1655 			if (adapter->vports[index])
1656 				idpf_vport_dealloc(adapter->vports[index]);
1657 		}
1658 	}
1659 	clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
1660 }
1661 
1662 /**
1663  * idpf_sriov_ena - Enable or change number of VFs
1664  * @adapter: private data struct
1665  * @num_vfs: number of VFs to allocate
1666  */
1667 static int idpf_sriov_ena(struct idpf_adapter *adapter, int num_vfs)
1668 {
1669 	struct device *dev = &adapter->pdev->dev;
1670 	int err;
1671 
1672 	err = idpf_send_set_sriov_vfs_msg(adapter, num_vfs);
1673 	if (err) {
1674 		dev_err(dev, "Failed to allocate VFs: %d\n", err);
1675 
1676 		return err;
1677 	}
1678 
1679 	err = pci_enable_sriov(adapter->pdev, num_vfs);
1680 	if (err) {
1681 		idpf_send_set_sriov_vfs_msg(adapter, 0);
1682 		dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
1683 
1684 		return err;
1685 	}
1686 
1687 	adapter->num_vfs = num_vfs;
1688 
1689 	return num_vfs;
1690 }
1691 
1692 /**
1693  * idpf_sriov_configure - Configure the requested VFs
1694  * @pdev: pointer to a pci_dev structure
1695  * @num_vfs: number of vfs to allocate
1696  *
1697  * Enable or change the number of VFs. Called when the user updates the number
1698  * of VFs in sysfs.
1699  **/
1700 int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs)
1701 {
1702 	struct idpf_adapter *adapter = pci_get_drvdata(pdev);
1703 
1704 	if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_SRIOV)) {
1705 		dev_info(&pdev->dev, "SR-IOV is not supported on this device\n");
1706 
1707 		return -EOPNOTSUPP;
1708 	}
1709 
1710 	if (num_vfs)
1711 		return idpf_sriov_ena(adapter, num_vfs);
1712 
1713 	if (pci_vfs_assigned(pdev)) {
1714 		dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs\n");
1715 
1716 		return -EBUSY;
1717 	}
1718 
1719 	pci_disable_sriov(adapter->pdev);
1720 	idpf_send_set_sriov_vfs_msg(adapter, 0);
1721 	adapter->num_vfs = 0;
1722 
1723 	return 0;
1724 }
1725 
1726 /**
1727  * idpf_deinit_task - Device deinit routine
1728  * @adapter: Driver specific private structure
1729  *
1730  * Extended remove logic which will be used for
1731  * hard reset as well
1732  */
1733 void idpf_deinit_task(struct idpf_adapter *adapter)
1734 {
1735 	unsigned int i;
1736 
1737 	/* Wait until the init_task is done else this thread might release
1738 	 * the resources first and the other thread might end up in a bad state
1739 	 */
1740 	cancel_delayed_work_sync(&adapter->init_task);
1741 
1742 	if (!adapter->vports)
1743 		return;
1744 
1745 	cancel_delayed_work_sync(&adapter->stats_task);
1746 
1747 	for (i = 0; i < adapter->max_vports; i++) {
1748 		if (adapter->vports[i])
1749 			idpf_vport_dealloc(adapter->vports[i]);
1750 	}
1751 }
1752 
1753 /**
1754  * idpf_check_reset_complete - check that reset is complete
1755  * @hw: pointer to hw struct
1756  * @reset_reg: struct with reset registers
1757  *
1758  * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
1759  **/
1760 static int idpf_check_reset_complete(struct idpf_hw *hw,
1761 				     struct idpf_reset_reg *reset_reg)
1762 {
1763 	struct idpf_adapter *adapter = hw->back;
1764 	int i;
1765 
1766 	for (i = 0; i < 2000; i++) {
1767 		u32 reg_val = readl(reset_reg->rstat);
1768 
1769 		/* 0xFFFFFFFF might be read if other side hasn't cleared the
1770 		 * register for us yet and 0xFFFFFFFF is not a valid value for
1771 		 * the register, so treat that as invalid.
1772 		 */
1773 		if (reg_val != 0xFFFFFFFF && (reg_val & reset_reg->rstat_m))
1774 			return 0;
1775 
1776 		usleep_range(5000, 10000);
1777 	}
1778 
1779 	dev_warn(&adapter->pdev->dev, "Device reset timeout!\n");
1780 	/* Clear the reset flag unconditionally here since the reset
1781 	 * technically isn't in progress anymore from the driver's perspective
1782 	 */
1783 	clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
1784 
1785 	return -EBUSY;
1786 }
1787 
1788 /**
1789  * idpf_set_vport_state - Set the vport state to be after the reset
1790  * @adapter: Driver specific private structure
1791  */
1792 static void idpf_set_vport_state(struct idpf_adapter *adapter)
1793 {
1794 	u16 i;
1795 
1796 	for (i = 0; i < adapter->max_vports; i++) {
1797 		struct idpf_netdev_priv *np;
1798 
1799 		if (!adapter->netdevs[i])
1800 			continue;
1801 
1802 		np = netdev_priv(adapter->netdevs[i]);
1803 		if (np->state == __IDPF_VPORT_UP)
1804 			set_bit(IDPF_VPORT_UP_REQUESTED,
1805 				adapter->vport_config[i]->flags);
1806 	}
1807 }
1808 
1809 /**
1810  * idpf_init_hard_reset - Initiate a hardware reset
1811  * @adapter: Driver specific private structure
1812  *
1813  * Deallocate the vports and all the resources associated with them and
1814  * reallocate. Also reinitialize the mailbox. Return 0 on success,
1815  * negative on failure.
1816  */
1817 static int idpf_init_hard_reset(struct idpf_adapter *adapter)
1818 {
1819 	struct idpf_reg_ops *reg_ops = &adapter->dev_ops.reg_ops;
1820 	struct device *dev = &adapter->pdev->dev;
1821 	struct net_device *netdev;
1822 	int err;
1823 	u16 i;
1824 
1825 	mutex_lock(&adapter->vport_ctrl_lock);
1826 
1827 	dev_info(dev, "Device HW Reset initiated\n");
1828 
1829 	/* Avoid TX hangs on reset */
1830 	for (i = 0; i < adapter->max_vports; i++) {
1831 		netdev = adapter->netdevs[i];
1832 		if (!netdev)
1833 			continue;
1834 
1835 		netif_carrier_off(netdev);
1836 		netif_tx_disable(netdev);
1837 	}
1838 
1839 	/* Prepare for reset */
1840 	if (test_and_clear_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
1841 		reg_ops->trigger_reset(adapter, IDPF_HR_DRV_LOAD);
1842 	} else if (test_and_clear_bit(IDPF_HR_FUNC_RESET, adapter->flags)) {
1843 		bool is_reset = idpf_is_reset_detected(adapter);
1844 
1845 		idpf_idc_issue_reset_event(adapter->cdev_info);
1846 
1847 		idpf_set_vport_state(adapter);
1848 		idpf_vc_core_deinit(adapter);
1849 		if (!is_reset)
1850 			reg_ops->trigger_reset(adapter, IDPF_HR_FUNC_RESET);
1851 		idpf_deinit_dflt_mbx(adapter);
1852 	} else {
1853 		dev_err(dev, "Unhandled hard reset cause\n");
1854 		err = -EBADRQC;
1855 		goto unlock_mutex;
1856 	}
1857 
1858 	/* Wait for reset to complete */
1859 	err = idpf_check_reset_complete(&adapter->hw, &adapter->reset_reg);
1860 	if (err) {
1861 		dev_err(dev, "The driver was unable to contact the device's firmware. Check that the FW is running. Driver state= 0x%x\n",
1862 			adapter->state);
1863 		goto unlock_mutex;
1864 	}
1865 
1866 	/* Reset is complete and so start building the driver resources again */
1867 	err = idpf_init_dflt_mbx(adapter);
1868 	if (err) {
1869 		dev_err(dev, "Failed to initialize default mailbox: %d\n", err);
1870 		goto unlock_mutex;
1871 	}
1872 
1873 	queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
1874 
1875 	/* Initialize the state machine, also allocate memory and request
1876 	 * resources
1877 	 */
1878 	err = idpf_vc_core_init(adapter);
1879 	if (err) {
1880 		cancel_delayed_work_sync(&adapter->mbx_task);
1881 		idpf_deinit_dflt_mbx(adapter);
1882 		goto unlock_mutex;
1883 	}
1884 
1885 	/* Wait till all the vports are initialized to release the reset lock,
1886 	 * else user space callbacks may access uninitialized vports
1887 	 */
1888 	while (test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
1889 		msleep(100);
1890 
1891 unlock_mutex:
1892 	mutex_unlock(&adapter->vport_ctrl_lock);
1893 
1894 	/* Wait until all vports are created to init RDMA CORE AUX */
1895 	if (!err)
1896 		err = idpf_idc_init(adapter);
1897 
1898 	return err;
1899 }
1900 
1901 /**
1902  * idpf_vc_event_task - Handle virtchannel event logic
1903  * @work: work queue struct
1904  */
1905 void idpf_vc_event_task(struct work_struct *work)
1906 {
1907 	struct idpf_adapter *adapter;
1908 
1909 	adapter = container_of(work, struct idpf_adapter, vc_event_task.work);
1910 
1911 	if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
1912 		return;
1913 
1914 	if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags))
1915 		goto func_reset;
1916 
1917 	if (test_bit(IDPF_HR_DRV_LOAD, adapter->flags))
1918 		goto drv_load;
1919 
1920 	return;
1921 
1922 func_reset:
1923 	idpf_vc_xn_shutdown(adapter->vcxn_mngr);
1924 drv_load:
1925 	set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
1926 	idpf_init_hard_reset(adapter);
1927 }
1928 
1929 /**
1930  * idpf_initiate_soft_reset - Initiate a software reset
1931  * @vport: virtual port data struct
1932  * @reset_cause: reason for the soft reset
1933  *
1934  * Soft reset only reallocs vport queue resources. Returns 0 on success,
1935  * negative on failure.
1936  */
1937 int idpf_initiate_soft_reset(struct idpf_vport *vport,
1938 			     enum idpf_vport_reset_cause reset_cause)
1939 {
1940 	struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
1941 	enum idpf_vport_state current_state = np->state;
1942 	struct idpf_adapter *adapter = vport->adapter;
1943 	struct idpf_vport *new_vport;
1944 	int err;
1945 
1946 	/* If the system is low on memory, we can end up in bad state if we
1947 	 * free all the memory for queue resources and try to allocate them
1948 	 * again. Instead, we can pre-allocate the new resources before doing
1949 	 * anything and bailing if the alloc fails.
1950 	 *
1951 	 * Make a clone of the existing vport to mimic its current
1952 	 * configuration, then modify the new structure with any requested
1953 	 * changes. Once the allocation of the new resources is done, stop the
1954 	 * existing vport and copy the configuration to the main vport. If an
1955 	 * error occurred, the existing vport will be untouched.
1956 	 *
1957 	 */
1958 	new_vport = kzalloc(sizeof(*vport), GFP_KERNEL);
1959 	if (!new_vport)
1960 		return -ENOMEM;
1961 
1962 	/* This purposely avoids copying the end of the struct because it
1963 	 * contains wait_queues and mutexes and other stuff we don't want to
1964 	 * mess with. Nothing below should use those variables from new_vport
1965 	 * and should instead always refer to them in vport if they need to.
1966 	 */
1967 	memcpy(new_vport, vport, offsetof(struct idpf_vport, link_up));
1968 
1969 	/* Adjust resource parameters prior to reallocating resources */
1970 	switch (reset_cause) {
1971 	case IDPF_SR_Q_CHANGE:
1972 		err = idpf_vport_adjust_qs(new_vport);
1973 		if (err)
1974 			goto free_vport;
1975 		break;
1976 	case IDPF_SR_Q_DESC_CHANGE:
1977 		/* Update queue parameters before allocating resources */
1978 		idpf_vport_calc_num_q_desc(new_vport);
1979 		break;
1980 	case IDPF_SR_MTU_CHANGE:
1981 		idpf_idc_vdev_mtu_event(vport->vdev_info,
1982 					IIDC_RDMA_EVENT_BEFORE_MTU_CHANGE);
1983 		break;
1984 	case IDPF_SR_RSC_CHANGE:
1985 		break;
1986 	default:
1987 		dev_err(&adapter->pdev->dev, "Unhandled soft reset cause\n");
1988 		err = -EINVAL;
1989 		goto free_vport;
1990 	}
1991 
1992 	if (current_state <= __IDPF_VPORT_DOWN) {
1993 		idpf_send_delete_queues_msg(vport);
1994 	} else {
1995 		set_bit(IDPF_VPORT_DEL_QUEUES, vport->flags);
1996 		idpf_vport_stop(vport, false);
1997 	}
1998 
1999 	idpf_deinit_rss(vport);
2000 	/* We're passing in vport here because we need its wait_queue
2001 	 * to send a message and it should be getting all the vport
2002 	 * config data out of the adapter but we need to be careful not
2003 	 * to add code to add_queues to change the vport config within
2004 	 * vport itself as it will be wiped with a memcpy later.
2005 	 */
2006 	err = idpf_send_add_queues_msg(vport, new_vport->num_txq,
2007 				       new_vport->num_complq,
2008 				       new_vport->num_rxq,
2009 				       new_vport->num_bufq);
2010 	if (err)
2011 		goto err_reset;
2012 
2013 	/* Same comment as above regarding avoiding copying the wait_queues and
2014 	 * mutexes applies here. We do not want to mess with those if possible.
2015 	 */
2016 	memcpy(vport, new_vport, offsetof(struct idpf_vport, link_up));
2017 
2018 	if (reset_cause == IDPF_SR_Q_CHANGE)
2019 		idpf_vport_alloc_vec_indexes(vport);
2020 
2021 	err = idpf_set_real_num_queues(vport);
2022 	if (err)
2023 		goto err_open;
2024 
2025 	if (current_state == __IDPF_VPORT_UP)
2026 		err = idpf_vport_open(vport, false);
2027 
2028 	goto free_vport;
2029 
2030 err_reset:
2031 	idpf_send_add_queues_msg(vport, vport->num_txq, vport->num_complq,
2032 				 vport->num_rxq, vport->num_bufq);
2033 
2034 err_open:
2035 	if (current_state == __IDPF_VPORT_UP)
2036 		idpf_vport_open(vport, false);
2037 
2038 free_vport:
2039 	kfree(new_vport);
2040 
2041 	if (reset_cause == IDPF_SR_MTU_CHANGE)
2042 		idpf_idc_vdev_mtu_event(vport->vdev_info,
2043 					IIDC_RDMA_EVENT_AFTER_MTU_CHANGE);
2044 
2045 	return err;
2046 }
2047 
2048 /**
2049  * idpf_addr_sync - Callback for dev_(mc|uc)_sync to add address
2050  * @netdev: the netdevice
2051  * @addr: address to add
2052  *
2053  * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
2054  * __dev_(uc|mc)_sync from .set_rx_mode. Kernel takes addr_list_lock spinlock
2055  * meaning we cannot sleep in this context. Due to this, we have to add the
2056  * filter and send the virtchnl message asynchronously without waiting for the
2057  * response from the other side. We won't know whether or not the operation
2058  * actually succeeded until we get the message back.  Returns 0 on success,
2059  * negative on failure.
2060  */
2061 static int idpf_addr_sync(struct net_device *netdev, const u8 *addr)
2062 {
2063 	struct idpf_netdev_priv *np = netdev_priv(netdev);
2064 
2065 	return idpf_add_mac_filter(np->vport, np, addr, true);
2066 }
2067 
2068 /**
2069  * idpf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
2070  * @netdev: the netdevice
2071  * @addr: address to add
2072  *
2073  * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
2074  * __dev_(uc|mc)_sync from .set_rx_mode. Kernel takes addr_list_lock spinlock
2075  * meaning we cannot sleep in this context. Due to this we have to delete the
2076  * filter and send the virtchnl message asynchronously without waiting for the
2077  * return from the other side.  We won't know whether or not the operation
2078  * actually succeeded until we get the message back. Returns 0 on success,
2079  * negative on failure.
2080  */
2081 static int idpf_addr_unsync(struct net_device *netdev, const u8 *addr)
2082 {
2083 	struct idpf_netdev_priv *np = netdev_priv(netdev);
2084 
2085 	/* Under some circumstances, we might receive a request to delete
2086 	 * our own device address from our uc list. Because we store the
2087 	 * device address in the VSI's MAC filter list, we need to ignore
2088 	 * such requests and not delete our device address from this list.
2089 	 */
2090 	if (ether_addr_equal(addr, netdev->dev_addr))
2091 		return 0;
2092 
2093 	idpf_del_mac_filter(np->vport, np, addr, true);
2094 
2095 	return 0;
2096 }
2097 
2098 /**
2099  * idpf_set_rx_mode - NDO callback to set the netdev filters
2100  * @netdev: network interface device structure
2101  *
2102  * Stack takes addr_list_lock spinlock before calling our .set_rx_mode.  We
2103  * cannot sleep in this context.
2104  */
2105 static void idpf_set_rx_mode(struct net_device *netdev)
2106 {
2107 	struct idpf_netdev_priv *np = netdev_priv(netdev);
2108 	struct idpf_vport_user_config_data *config_data;
2109 	struct idpf_adapter *adapter;
2110 	bool changed = false;
2111 	struct device *dev;
2112 	int err;
2113 
2114 	adapter = np->adapter;
2115 	dev = &adapter->pdev->dev;
2116 
2117 	if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER)) {
2118 		__dev_uc_sync(netdev, idpf_addr_sync, idpf_addr_unsync);
2119 		__dev_mc_sync(netdev, idpf_addr_sync, idpf_addr_unsync);
2120 	}
2121 
2122 	if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PROMISC))
2123 		return;
2124 
2125 	config_data = &adapter->vport_config[np->vport_idx]->user_config;
2126 	/* IFF_PROMISC enables both unicast and multicast promiscuous,
2127 	 * while IFF_ALLMULTI only enables multicast such that:
2128 	 *
2129 	 * promisc  + allmulti		= unicast | multicast
2130 	 * promisc  + !allmulti		= unicast | multicast
2131 	 * !promisc + allmulti		= multicast
2132 	 */
2133 	if ((netdev->flags & IFF_PROMISC) &&
2134 	    !test_and_set_bit(__IDPF_PROMISC_UC, config_data->user_flags)) {
2135 		changed = true;
2136 		dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
2137 		if (!test_and_set_bit(__IDPF_PROMISC_MC, adapter->flags))
2138 			dev_info(dev, "Entering multicast promiscuous mode\n");
2139 	}
2140 
2141 	if (!(netdev->flags & IFF_PROMISC) &&
2142 	    test_and_clear_bit(__IDPF_PROMISC_UC, config_data->user_flags)) {
2143 		changed = true;
2144 		dev_info(dev, "Leaving promiscuous mode\n");
2145 	}
2146 
2147 	if (netdev->flags & IFF_ALLMULTI &&
2148 	    !test_and_set_bit(__IDPF_PROMISC_MC, config_data->user_flags)) {
2149 		changed = true;
2150 		dev_info(dev, "Entering multicast promiscuous mode\n");
2151 	}
2152 
2153 	if (!(netdev->flags & (IFF_ALLMULTI | IFF_PROMISC)) &&
2154 	    test_and_clear_bit(__IDPF_PROMISC_MC, config_data->user_flags)) {
2155 		changed = true;
2156 		dev_info(dev, "Leaving multicast promiscuous mode\n");
2157 	}
2158 
2159 	if (!changed)
2160 		return;
2161 
2162 	err = idpf_set_promiscuous(adapter, config_data, np->vport_id);
2163 	if (err)
2164 		dev_err(dev, "Failed to set promiscuous mode: %d\n", err);
2165 }
2166 
2167 /**
2168  * idpf_vport_manage_rss_lut - disable/enable RSS
2169  * @vport: the vport being changed
2170  *
2171  * In the event of disable request for RSS, this function will zero out RSS
2172  * LUT, while in the event of enable request for RSS, it will reconfigure RSS
2173  * LUT with the default LUT configuration.
2174  */
2175 static int idpf_vport_manage_rss_lut(struct idpf_vport *vport)
2176 {
2177 	bool ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH);
2178 	struct idpf_rss_data *rss_data;
2179 	u16 idx = vport->idx;
2180 	int lut_size;
2181 
2182 	rss_data = &vport->adapter->vport_config[idx]->user_config.rss_data;
2183 	lut_size = rss_data->rss_lut_size * sizeof(u32);
2184 
2185 	if (ena) {
2186 		/* This will contain the default or user configured LUT */
2187 		memcpy(rss_data->rss_lut, rss_data->cached_lut, lut_size);
2188 	} else {
2189 		/* Save a copy of the current LUT to be restored later if
2190 		 * requested.
2191 		 */
2192 		memcpy(rss_data->cached_lut, rss_data->rss_lut, lut_size);
2193 
2194 		/* Zero out the current LUT to disable */
2195 		memset(rss_data->rss_lut, 0, lut_size);
2196 	}
2197 
2198 	return idpf_config_rss(vport);
2199 }
2200 
2201 /**
2202  * idpf_set_features - set the netdev feature flags
2203  * @netdev: ptr to the netdev being adjusted
2204  * @features: the feature set that the stack is suggesting
2205  */
2206 static int idpf_set_features(struct net_device *netdev,
2207 			     netdev_features_t features)
2208 {
2209 	netdev_features_t changed = netdev->features ^ features;
2210 	struct idpf_adapter *adapter;
2211 	struct idpf_vport *vport;
2212 	int err = 0;
2213 
2214 	idpf_vport_ctrl_lock(netdev);
2215 	vport = idpf_netdev_to_vport(netdev);
2216 
2217 	adapter = vport->adapter;
2218 
2219 	if (idpf_is_reset_in_prog(adapter)) {
2220 		dev_err(&adapter->pdev->dev, "Device is resetting, changing netdev features temporarily unavailable.\n");
2221 		err = -EBUSY;
2222 		goto unlock_mutex;
2223 	}
2224 
2225 	if (changed & NETIF_F_RXHASH) {
2226 		netdev->features ^= NETIF_F_RXHASH;
2227 		err = idpf_vport_manage_rss_lut(vport);
2228 		if (err)
2229 			goto unlock_mutex;
2230 	}
2231 
2232 	if (changed & NETIF_F_GRO_HW) {
2233 		netdev->features ^= NETIF_F_GRO_HW;
2234 		err = idpf_initiate_soft_reset(vport, IDPF_SR_RSC_CHANGE);
2235 		if (err)
2236 			goto unlock_mutex;
2237 	}
2238 
2239 	if (changed & NETIF_F_LOOPBACK) {
2240 		netdev->features ^= NETIF_F_LOOPBACK;
2241 		err = idpf_send_ena_dis_loopback_msg(vport);
2242 	}
2243 
2244 unlock_mutex:
2245 	idpf_vport_ctrl_unlock(netdev);
2246 
2247 	return err;
2248 }
2249 
2250 /**
2251  * idpf_open - Called when a network interface becomes active
2252  * @netdev: network interface device structure
2253  *
2254  * The open entry point is called when a network interface is made
2255  * active by the system (IFF_UP).  At this point all resources needed
2256  * for transmit and receive operations are allocated, the interrupt
2257  * handler is registered with the OS, the netdev watchdog is enabled,
2258  * and the stack is notified that the interface is ready.
2259  *
2260  * Returns 0 on success, negative value on failure
2261  */
2262 static int idpf_open(struct net_device *netdev)
2263 {
2264 	struct idpf_vport *vport;
2265 	int err;
2266 
2267 	idpf_vport_ctrl_lock(netdev);
2268 	vport = idpf_netdev_to_vport(netdev);
2269 
2270 	err = idpf_set_real_num_queues(vport);
2271 	if (err)
2272 		goto unlock;
2273 
2274 	err = idpf_vport_open(vport, false);
2275 
2276 unlock:
2277 	idpf_vport_ctrl_unlock(netdev);
2278 
2279 	return err;
2280 }
2281 
2282 /**
2283  * idpf_change_mtu - NDO callback to change the MTU
2284  * @netdev: network interface device structure
2285  * @new_mtu: new value for maximum frame size
2286  *
2287  * Returns 0 on success, negative on failure
2288  */
2289 static int idpf_change_mtu(struct net_device *netdev, int new_mtu)
2290 {
2291 	struct idpf_vport *vport;
2292 	int err;
2293 
2294 	idpf_vport_ctrl_lock(netdev);
2295 	vport = idpf_netdev_to_vport(netdev);
2296 
2297 	WRITE_ONCE(netdev->mtu, new_mtu);
2298 
2299 	err = idpf_initiate_soft_reset(vport, IDPF_SR_MTU_CHANGE);
2300 
2301 	idpf_vport_ctrl_unlock(netdev);
2302 
2303 	return err;
2304 }
2305 
2306 /**
2307  * idpf_chk_tso_segment - Check skb is not using too many buffers
2308  * @skb: send buffer
2309  * @max_bufs: maximum number of buffers
2310  *
2311  * For TSO we need to count the TSO header and segment payload separately.  As
2312  * such we need to check cases where we have max_bufs-1 fragments or more as we
2313  * can potentially require max_bufs+1 DMA transactions, 1 for the TSO header, 1
2314  * for the segment payload in the first descriptor, and another max_buf-1 for
2315  * the fragments.
2316  *
2317  * Returns true if the packet needs to be software segmented by core stack.
2318  */
2319 static bool idpf_chk_tso_segment(const struct sk_buff *skb,
2320 				 unsigned int max_bufs)
2321 {
2322 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
2323 	const skb_frag_t *frag, *stale;
2324 	int nr_frags, sum;
2325 
2326 	/* no need to check if number of frags is less than max_bufs - 1 */
2327 	nr_frags = shinfo->nr_frags;
2328 	if (nr_frags < (max_bufs - 1))
2329 		return false;
2330 
2331 	/* We need to walk through the list and validate that each group
2332 	 * of max_bufs-2 fragments totals at least gso_size.
2333 	 */
2334 	nr_frags -= max_bufs - 2;
2335 	frag = &shinfo->frags[0];
2336 
2337 	/* Initialize size to the negative value of gso_size minus 1.  We use
2338 	 * this as the worst case scenario in which the frag ahead of us only
2339 	 * provides one byte which is why we are limited to max_bufs-2
2340 	 * descriptors for a single transmit as the header and previous
2341 	 * fragment are already consuming 2 descriptors.
2342 	 */
2343 	sum = 1 - shinfo->gso_size;
2344 
2345 	/* Add size of frags 0 through 4 to create our initial sum */
2346 	sum += skb_frag_size(frag++);
2347 	sum += skb_frag_size(frag++);
2348 	sum += skb_frag_size(frag++);
2349 	sum += skb_frag_size(frag++);
2350 	sum += skb_frag_size(frag++);
2351 
2352 	/* Walk through fragments adding latest fragment, testing it, and
2353 	 * then removing stale fragments from the sum.
2354 	 */
2355 	for (stale = &shinfo->frags[0];; stale++) {
2356 		int stale_size = skb_frag_size(stale);
2357 
2358 		sum += skb_frag_size(frag++);
2359 
2360 		/* The stale fragment may present us with a smaller
2361 		 * descriptor than the actual fragment size. To account
2362 		 * for that we need to remove all the data on the front and
2363 		 * figure out what the remainder would be in the last
2364 		 * descriptor associated with the fragment.
2365 		 */
2366 		if (stale_size > IDPF_TX_MAX_DESC_DATA) {
2367 			int align_pad = -(skb_frag_off(stale)) &
2368 					(IDPF_TX_MAX_READ_REQ_SIZE - 1);
2369 
2370 			sum -= align_pad;
2371 			stale_size -= align_pad;
2372 
2373 			do {
2374 				sum -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
2375 				stale_size -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
2376 			} while (stale_size > IDPF_TX_MAX_DESC_DATA);
2377 		}
2378 
2379 		/* if sum is negative we failed to make sufficient progress */
2380 		if (sum < 0)
2381 			return true;
2382 
2383 		if (!nr_frags--)
2384 			break;
2385 
2386 		sum -= stale_size;
2387 	}
2388 
2389 	return false;
2390 }
2391 
2392 /**
2393  * idpf_features_check - Validate packet conforms to limits
2394  * @skb: skb buffer
2395  * @netdev: This port's netdev
2396  * @features: Offload features that the stack believes apply
2397  */
2398 static netdev_features_t idpf_features_check(struct sk_buff *skb,
2399 					     struct net_device *netdev,
2400 					     netdev_features_t features)
2401 {
2402 	struct idpf_netdev_priv *np = netdev_priv(netdev);
2403 	u16 max_tx_hdr_size = np->max_tx_hdr_size;
2404 	size_t len;
2405 
2406 	/* No point in doing any of this if neither checksum nor GSO are
2407 	 * being requested for this frame.  We can rule out both by just
2408 	 * checking for CHECKSUM_PARTIAL
2409 	 */
2410 	if (skb->ip_summed != CHECKSUM_PARTIAL)
2411 		return features;
2412 
2413 	if (skb_is_gso(skb)) {
2414 		/* We cannot support GSO if the MSS is going to be less than
2415 		 * 88 bytes. If it is then we need to drop support for GSO.
2416 		 */
2417 		if (skb_shinfo(skb)->gso_size < IDPF_TX_TSO_MIN_MSS)
2418 			features &= ~NETIF_F_GSO_MASK;
2419 		else if (idpf_chk_tso_segment(skb, np->tx_max_bufs))
2420 			features &= ~NETIF_F_GSO_MASK;
2421 	}
2422 
2423 	/* Ensure MACLEN is <= 126 bytes (63 words) and not an odd size */
2424 	len = skb_network_offset(skb);
2425 	if (unlikely(len & ~(126)))
2426 		goto unsupported;
2427 
2428 	len = skb_network_header_len(skb);
2429 	if (unlikely(len > max_tx_hdr_size))
2430 		goto unsupported;
2431 
2432 	if (!skb->encapsulation)
2433 		return features;
2434 
2435 	/* L4TUNLEN can support 127 words */
2436 	len = skb_inner_network_header(skb) - skb_transport_header(skb);
2437 	if (unlikely(len & ~(127 * 2)))
2438 		goto unsupported;
2439 
2440 	/* IPLEN can support at most 127 dwords */
2441 	len = skb_inner_network_header_len(skb);
2442 	if (unlikely(len > max_tx_hdr_size))
2443 		goto unsupported;
2444 
2445 	/* No need to validate L4LEN as TCP is the only protocol with a
2446 	 * a flexible value and we support all possible values supported
2447 	 * by TCP, which is at most 15 dwords
2448 	 */
2449 
2450 	return features;
2451 
2452 unsupported:
2453 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2454 }
2455 
2456 /**
2457  * idpf_set_mac - NDO callback to set port mac address
2458  * @netdev: network interface device structure
2459  * @p: pointer to an address structure
2460  *
2461  * Returns 0 on success, negative on failure
2462  **/
2463 static int idpf_set_mac(struct net_device *netdev, void *p)
2464 {
2465 	struct idpf_netdev_priv *np = netdev_priv(netdev);
2466 	struct idpf_vport_config *vport_config;
2467 	struct sockaddr *addr = p;
2468 	u8 old_mac_addr[ETH_ALEN];
2469 	struct idpf_vport *vport;
2470 	int err = 0;
2471 
2472 	idpf_vport_ctrl_lock(netdev);
2473 	vport = idpf_netdev_to_vport(netdev);
2474 
2475 	if (!idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
2476 			     VIRTCHNL2_CAP_MACFILTER)) {
2477 		dev_info(&vport->adapter->pdev->dev, "Setting MAC address is not supported\n");
2478 		err = -EOPNOTSUPP;
2479 		goto unlock_mutex;
2480 	}
2481 
2482 	if (!is_valid_ether_addr(addr->sa_data)) {
2483 		dev_info(&vport->adapter->pdev->dev, "Invalid MAC address: %pM\n",
2484 			 addr->sa_data);
2485 		err = -EADDRNOTAVAIL;
2486 		goto unlock_mutex;
2487 	}
2488 
2489 	if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
2490 		goto unlock_mutex;
2491 
2492 	ether_addr_copy(old_mac_addr, vport->default_mac_addr);
2493 	ether_addr_copy(vport->default_mac_addr, addr->sa_data);
2494 	vport_config = vport->adapter->vport_config[vport->idx];
2495 	err = idpf_add_mac_filter(vport, np, addr->sa_data, false);
2496 	if (err) {
2497 		__idpf_del_mac_filter(vport_config, addr->sa_data);
2498 		ether_addr_copy(vport->default_mac_addr, netdev->dev_addr);
2499 		goto unlock_mutex;
2500 	}
2501 
2502 	if (is_valid_ether_addr(old_mac_addr))
2503 		__idpf_del_mac_filter(vport_config, old_mac_addr);
2504 
2505 	eth_hw_addr_set(netdev, addr->sa_data);
2506 
2507 unlock_mutex:
2508 	idpf_vport_ctrl_unlock(netdev);
2509 
2510 	return err;
2511 }
2512 
2513 /**
2514  * idpf_alloc_dma_mem - Allocate dma memory
2515  * @hw: pointer to hw struct
2516  * @mem: pointer to dma_mem struct
2517  * @size: size of the memory to allocate
2518  */
2519 void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size)
2520 {
2521 	struct idpf_adapter *adapter = hw->back;
2522 	size_t sz = ALIGN(size, 4096);
2523 
2524 	/* The control queue resources are freed under a spinlock, contiguous
2525 	 * pages will avoid IOMMU remapping and the use vmap (and vunmap in
2526 	 * dma_free_*() path.
2527 	 */
2528 	mem->va = dma_alloc_attrs(&adapter->pdev->dev, sz, &mem->pa,
2529 				  GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS);
2530 	mem->size = sz;
2531 
2532 	return mem->va;
2533 }
2534 
2535 /**
2536  * idpf_free_dma_mem - Free the allocated dma memory
2537  * @hw: pointer to hw struct
2538  * @mem: pointer to dma_mem struct
2539  */
2540 void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem)
2541 {
2542 	struct idpf_adapter *adapter = hw->back;
2543 
2544 	dma_free_attrs(&adapter->pdev->dev, mem->size,
2545 		       mem->va, mem->pa, DMA_ATTR_FORCE_CONTIGUOUS);
2546 	mem->size = 0;
2547 	mem->va = NULL;
2548 	mem->pa = 0;
2549 }
2550 
2551 static int idpf_hwtstamp_set(struct net_device *netdev,
2552 			     struct kernel_hwtstamp_config *config,
2553 			     struct netlink_ext_ack *extack)
2554 {
2555 	struct idpf_vport *vport;
2556 	int err;
2557 
2558 	idpf_vport_ctrl_lock(netdev);
2559 	vport = idpf_netdev_to_vport(netdev);
2560 
2561 	if (!vport->link_up) {
2562 		idpf_vport_ctrl_unlock(netdev);
2563 		return -EPERM;
2564 	}
2565 
2566 	if (!idpf_ptp_is_vport_tx_tstamp_ena(vport) &&
2567 	    !idpf_ptp_is_vport_rx_tstamp_ena(vport)) {
2568 		idpf_vport_ctrl_unlock(netdev);
2569 		return -EOPNOTSUPP;
2570 	}
2571 
2572 	err = idpf_ptp_set_timestamp_mode(vport, config);
2573 
2574 	idpf_vport_ctrl_unlock(netdev);
2575 
2576 	return err;
2577 }
2578 
2579 static int idpf_hwtstamp_get(struct net_device *netdev,
2580 			     struct kernel_hwtstamp_config *config)
2581 {
2582 	struct idpf_vport *vport;
2583 
2584 	idpf_vport_ctrl_lock(netdev);
2585 	vport = idpf_netdev_to_vport(netdev);
2586 
2587 	if (!vport->link_up) {
2588 		idpf_vport_ctrl_unlock(netdev);
2589 		return -EPERM;
2590 	}
2591 
2592 	if (!idpf_ptp_is_vport_tx_tstamp_ena(vport) &&
2593 	    !idpf_ptp_is_vport_rx_tstamp_ena(vport)) {
2594 		idpf_vport_ctrl_unlock(netdev);
2595 		return 0;
2596 	}
2597 
2598 	*config = vport->tstamp_config;
2599 
2600 	idpf_vport_ctrl_unlock(netdev);
2601 
2602 	return 0;
2603 }
2604 
2605 static const struct net_device_ops idpf_netdev_ops = {
2606 	.ndo_open = idpf_open,
2607 	.ndo_stop = idpf_stop,
2608 	.ndo_start_xmit = idpf_tx_start,
2609 	.ndo_features_check = idpf_features_check,
2610 	.ndo_set_rx_mode = idpf_set_rx_mode,
2611 	.ndo_validate_addr = eth_validate_addr,
2612 	.ndo_set_mac_address = idpf_set_mac,
2613 	.ndo_change_mtu = idpf_change_mtu,
2614 	.ndo_get_stats64 = idpf_get_stats64,
2615 	.ndo_set_features = idpf_set_features,
2616 	.ndo_tx_timeout = idpf_tx_timeout,
2617 	.ndo_hwtstamp_get = idpf_hwtstamp_get,
2618 	.ndo_hwtstamp_set = idpf_hwtstamp_set,
2619 	.ndo_bpf = idpf_xdp,
2620 	.ndo_xdp_xmit = idpf_xdp_xmit,
2621 };
2622