xref: /linux/drivers/net/ethernet/intel/idpf/idpf_lib.c (revision c17ee635fd3a482b2ad2bf5e269755c2eae5f25e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #include "idpf.h"
5 #include "idpf_virtchnl.h"
6 #include "idpf_ptp.h"
7 #include "xdp.h"
8 #include "xsk.h"
9 
10 static const struct net_device_ops idpf_netdev_ops;
11 
12 /**
13  * idpf_init_vector_stack - Fill the MSIX vector stack with vector index
14  * @adapter: private data struct
15  *
16  * Return 0 on success, error on failure
17  */
18 static int idpf_init_vector_stack(struct idpf_adapter *adapter)
19 {
20 	struct idpf_vector_lifo *stack;
21 	u16 min_vec;
22 	u32 i;
23 
24 	mutex_lock(&adapter->vector_lock);
25 	min_vec = adapter->num_msix_entries - adapter->num_avail_msix;
26 	stack = &adapter->vector_stack;
27 	stack->size = adapter->num_msix_entries;
28 	/* set the base and top to point at start of the 'free pool' to
29 	 * distribute the unused vectors on-demand basis
30 	 */
31 	stack->base = min_vec;
32 	stack->top = min_vec;
33 
34 	stack->vec_idx = kcalloc(stack->size, sizeof(u16), GFP_KERNEL);
35 	if (!stack->vec_idx) {
36 		mutex_unlock(&adapter->vector_lock);
37 
38 		return -ENOMEM;
39 	}
40 
41 	for (i = 0; i < stack->size; i++)
42 		stack->vec_idx[i] = i;
43 
44 	mutex_unlock(&adapter->vector_lock);
45 
46 	return 0;
47 }
48 
49 /**
50  * idpf_deinit_vector_stack - zero out the MSIX vector stack
51  * @adapter: private data struct
52  */
53 static void idpf_deinit_vector_stack(struct idpf_adapter *adapter)
54 {
55 	struct idpf_vector_lifo *stack;
56 
57 	mutex_lock(&adapter->vector_lock);
58 	stack = &adapter->vector_stack;
59 	kfree(stack->vec_idx);
60 	stack->vec_idx = NULL;
61 	mutex_unlock(&adapter->vector_lock);
62 }
63 
64 /**
65  * idpf_mb_intr_rel_irq - Free the IRQ association with the OS
66  * @adapter: adapter structure
67  *
68  * This will also disable interrupt mode and queue up mailbox task. Mailbox
69  * task will reschedule itself if not in interrupt mode.
70  */
71 static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter)
72 {
73 	clear_bit(IDPF_MB_INTR_MODE, adapter->flags);
74 	kfree(free_irq(adapter->msix_entries[0].vector, adapter));
75 	queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
76 }
77 
78 /**
79  * idpf_intr_rel - Release interrupt capabilities and free memory
80  * @adapter: adapter to disable interrupts on
81  */
82 void idpf_intr_rel(struct idpf_adapter *adapter)
83 {
84 	if (!adapter->msix_entries)
85 		return;
86 
87 	idpf_mb_intr_rel_irq(adapter);
88 	pci_free_irq_vectors(adapter->pdev);
89 	idpf_send_dealloc_vectors_msg(adapter);
90 	idpf_deinit_vector_stack(adapter);
91 	kfree(adapter->msix_entries);
92 	adapter->msix_entries = NULL;
93 	kfree(adapter->rdma_msix_entries);
94 	adapter->rdma_msix_entries = NULL;
95 }
96 
97 /**
98  * idpf_mb_intr_clean - Interrupt handler for the mailbox
99  * @irq: interrupt number
100  * @data: pointer to the adapter structure
101  */
102 static irqreturn_t idpf_mb_intr_clean(int __always_unused irq, void *data)
103 {
104 	struct idpf_adapter *adapter = (struct idpf_adapter *)data;
105 
106 	queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
107 
108 	return IRQ_HANDLED;
109 }
110 
111 /**
112  * idpf_mb_irq_enable - Enable MSIX interrupt for the mailbox
113  * @adapter: adapter to get the hardware address for register write
114  */
115 static void idpf_mb_irq_enable(struct idpf_adapter *adapter)
116 {
117 	struct idpf_intr_reg *intr = &adapter->mb_vector.intr_reg;
118 	u32 val;
119 
120 	val = intr->dyn_ctl_intena_m | intr->dyn_ctl_itridx_m;
121 	writel(val, intr->dyn_ctl);
122 	writel(intr->icr_ena_ctlq_m, intr->icr_ena);
123 }
124 
125 /**
126  * idpf_mb_intr_req_irq - Request irq for the mailbox interrupt
127  * @adapter: adapter structure to pass to the mailbox irq handler
128  */
129 static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter)
130 {
131 	int irq_num, mb_vidx = 0, err;
132 	char *name;
133 
134 	irq_num = adapter->msix_entries[mb_vidx].vector;
135 	name = kasprintf(GFP_KERNEL, "%s-%s-%d",
136 			 dev_driver_string(&adapter->pdev->dev),
137 			 "Mailbox", mb_vidx);
138 	err = request_irq(irq_num, adapter->irq_mb_handler, 0, name, adapter);
139 	if (err) {
140 		dev_err(&adapter->pdev->dev,
141 			"IRQ request for mailbox failed, error: %d\n", err);
142 
143 		return err;
144 	}
145 
146 	set_bit(IDPF_MB_INTR_MODE, adapter->flags);
147 
148 	return 0;
149 }
150 
151 /**
152  * idpf_mb_intr_init - Initialize the mailbox interrupt
153  * @adapter: adapter structure to store the mailbox vector
154  */
155 static int idpf_mb_intr_init(struct idpf_adapter *adapter)
156 {
157 	adapter->dev_ops.reg_ops.mb_intr_reg_init(adapter);
158 	adapter->irq_mb_handler = idpf_mb_intr_clean;
159 
160 	return idpf_mb_intr_req_irq(adapter);
161 }
162 
163 /**
164  * idpf_vector_lifo_push - push MSIX vector index onto stack
165  * @adapter: private data struct
166  * @vec_idx: vector index to store
167  */
168 static int idpf_vector_lifo_push(struct idpf_adapter *adapter, u16 vec_idx)
169 {
170 	struct idpf_vector_lifo *stack = &adapter->vector_stack;
171 
172 	lockdep_assert_held(&adapter->vector_lock);
173 
174 	if (stack->top == stack->base) {
175 		dev_err(&adapter->pdev->dev, "Exceeded the vector stack limit: %d\n",
176 			stack->top);
177 		return -EINVAL;
178 	}
179 
180 	stack->vec_idx[--stack->top] = vec_idx;
181 
182 	return 0;
183 }
184 
185 /**
186  * idpf_vector_lifo_pop - pop MSIX vector index from stack
187  * @adapter: private data struct
188  */
189 static int idpf_vector_lifo_pop(struct idpf_adapter *adapter)
190 {
191 	struct idpf_vector_lifo *stack = &adapter->vector_stack;
192 
193 	lockdep_assert_held(&adapter->vector_lock);
194 
195 	if (stack->top == stack->size) {
196 		dev_err(&adapter->pdev->dev, "No interrupt vectors are available to distribute!\n");
197 
198 		return -EINVAL;
199 	}
200 
201 	return stack->vec_idx[stack->top++];
202 }
203 
204 /**
205  * idpf_vector_stash - Store the vector indexes onto the stack
206  * @adapter: private data struct
207  * @q_vector_idxs: vector index array
208  * @vec_info: info related to the number of vectors
209  *
210  * This function is a no-op if there are no vectors indexes to be stashed
211  */
212 static void idpf_vector_stash(struct idpf_adapter *adapter, u16 *q_vector_idxs,
213 			      struct idpf_vector_info *vec_info)
214 {
215 	int i, base = 0;
216 	u16 vec_idx;
217 
218 	lockdep_assert_held(&adapter->vector_lock);
219 
220 	if (!vec_info->num_curr_vecs)
221 		return;
222 
223 	/* For default vports, no need to stash vector allocated from the
224 	 * default pool onto the stack
225 	 */
226 	if (vec_info->default_vport)
227 		base = IDPF_MIN_Q_VEC;
228 
229 	for (i = vec_info->num_curr_vecs - 1; i >= base ; i--) {
230 		vec_idx = q_vector_idxs[i];
231 		idpf_vector_lifo_push(adapter, vec_idx);
232 		adapter->num_avail_msix++;
233 	}
234 }
235 
236 /**
237  * idpf_req_rel_vector_indexes - Request or release MSIX vector indexes
238  * @adapter: driver specific private structure
239  * @q_vector_idxs: vector index array
240  * @vec_info: info related to the number of vectors
241  *
242  * This is the core function to distribute the MSIX vectors acquired from the
243  * OS. It expects the caller to pass the number of vectors required and
244  * also previously allocated. First, it stashes previously allocated vector
245  * indexes on to the stack and then figures out if it can allocate requested
246  * vectors. It can wait on acquiring the mutex lock. If the caller passes 0 as
247  * requested vectors, then this function just stashes the already allocated
248  * vectors and returns 0.
249  *
250  * Returns actual number of vectors allocated on success, error value on failure
251  * If 0 is returned, implies the stack has no vectors to allocate which is also
252  * a failure case for the caller
253  */
254 int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter,
255 				u16 *q_vector_idxs,
256 				struct idpf_vector_info *vec_info)
257 {
258 	u16 num_req_vecs, num_alloc_vecs = 0, max_vecs;
259 	struct idpf_vector_lifo *stack;
260 	int i, j, vecid;
261 
262 	mutex_lock(&adapter->vector_lock);
263 	stack = &adapter->vector_stack;
264 	num_req_vecs = vec_info->num_req_vecs;
265 
266 	/* Stash interrupt vector indexes onto the stack if required */
267 	idpf_vector_stash(adapter, q_vector_idxs, vec_info);
268 
269 	if (!num_req_vecs)
270 		goto rel_lock;
271 
272 	if (vec_info->default_vport) {
273 		/* As IDPF_MIN_Q_VEC per default vport is put aside in the
274 		 * default pool of the stack, use them for default vports
275 		 */
276 		j = vec_info->index * IDPF_MIN_Q_VEC + IDPF_MBX_Q_VEC;
277 		for (i = 0; i < IDPF_MIN_Q_VEC; i++) {
278 			q_vector_idxs[num_alloc_vecs++] = stack->vec_idx[j++];
279 			num_req_vecs--;
280 		}
281 	}
282 
283 	/* Find if stack has enough vector to allocate */
284 	max_vecs = min(adapter->num_avail_msix, num_req_vecs);
285 
286 	for (j = 0; j < max_vecs; j++) {
287 		vecid = idpf_vector_lifo_pop(adapter);
288 		q_vector_idxs[num_alloc_vecs++] = vecid;
289 	}
290 	adapter->num_avail_msix -= max_vecs;
291 
292 rel_lock:
293 	mutex_unlock(&adapter->vector_lock);
294 
295 	return num_alloc_vecs;
296 }
297 
298 /**
299  * idpf_intr_req - Request interrupt capabilities
300  * @adapter: adapter to enable interrupts on
301  *
302  * Returns 0 on success, negative on failure
303  */
304 int idpf_intr_req(struct idpf_adapter *adapter)
305 {
306 	u16 num_lan_vecs, min_lan_vecs, num_rdma_vecs = 0, min_rdma_vecs = 0;
307 	u16 default_vports = idpf_get_default_vports(adapter);
308 	int num_q_vecs, total_vecs, num_vec_ids;
309 	int min_vectors, actual_vecs, err;
310 	unsigned int vector;
311 	u16 *vecids;
312 	int i;
313 
314 	total_vecs = idpf_get_reserved_vecs(adapter);
315 	num_lan_vecs = total_vecs;
316 	if (idpf_is_rdma_cap_ena(adapter)) {
317 		num_rdma_vecs = idpf_get_reserved_rdma_vecs(adapter);
318 		min_rdma_vecs = IDPF_MIN_RDMA_VEC;
319 
320 		if (!num_rdma_vecs) {
321 			/* If idpf_get_reserved_rdma_vecs is 0, vectors are
322 			 * pulled from the LAN pool.
323 			 */
324 			num_rdma_vecs = min_rdma_vecs;
325 		} else if (num_rdma_vecs < min_rdma_vecs) {
326 			dev_err(&adapter->pdev->dev,
327 				"Not enough vectors reserved for RDMA (min: %u, current: %u)\n",
328 				min_rdma_vecs, num_rdma_vecs);
329 			return -EINVAL;
330 		}
331 	}
332 
333 	num_q_vecs = total_vecs - IDPF_MBX_Q_VEC;
334 
335 	err = idpf_send_alloc_vectors_msg(adapter, num_q_vecs);
336 	if (err) {
337 		dev_err(&adapter->pdev->dev,
338 			"Failed to allocate %d vectors: %d\n", num_q_vecs, err);
339 
340 		return -EAGAIN;
341 	}
342 
343 	min_lan_vecs = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports;
344 	min_vectors = min_lan_vecs + min_rdma_vecs;
345 	actual_vecs = pci_alloc_irq_vectors(adapter->pdev, min_vectors,
346 					    total_vecs, PCI_IRQ_MSIX);
347 	if (actual_vecs < 0) {
348 		dev_err(&adapter->pdev->dev, "Failed to allocate minimum MSIX vectors required: %d\n",
349 			min_vectors);
350 		err = actual_vecs;
351 		goto send_dealloc_vecs;
352 	}
353 
354 	if (idpf_is_rdma_cap_ena(adapter)) {
355 		if (actual_vecs < total_vecs) {
356 			dev_warn(&adapter->pdev->dev,
357 				 "Warning: %d vectors requested, only %d available. Defaulting to minimum (%d) for RDMA and remaining for LAN.\n",
358 				 total_vecs, actual_vecs, IDPF_MIN_RDMA_VEC);
359 			num_rdma_vecs = IDPF_MIN_RDMA_VEC;
360 		}
361 
362 		adapter->rdma_msix_entries = kzalloc_objs(struct msix_entry,
363 							  num_rdma_vecs);
364 		if (!adapter->rdma_msix_entries) {
365 			err = -ENOMEM;
366 			goto free_irq;
367 		}
368 	}
369 
370 	num_lan_vecs = actual_vecs - num_rdma_vecs;
371 	adapter->msix_entries = kzalloc_objs(struct msix_entry, num_lan_vecs);
372 	if (!adapter->msix_entries) {
373 		err = -ENOMEM;
374 		goto free_rdma_msix;
375 	}
376 
377 	adapter->mb_vector.v_idx = le16_to_cpu(adapter->caps.mailbox_vector_id);
378 
379 	vecids = kcalloc(actual_vecs, sizeof(u16), GFP_KERNEL);
380 	if (!vecids) {
381 		err = -ENOMEM;
382 		goto free_msix;
383 	}
384 
385 	num_vec_ids = idpf_get_vec_ids(adapter, vecids, actual_vecs,
386 				       &adapter->req_vec_chunks->vchunks);
387 	if (num_vec_ids < actual_vecs) {
388 		err = -EINVAL;
389 		goto free_vecids;
390 	}
391 
392 	for (vector = 0; vector < num_lan_vecs; vector++) {
393 		adapter->msix_entries[vector].entry = vecids[vector];
394 		adapter->msix_entries[vector].vector =
395 			pci_irq_vector(adapter->pdev, vector);
396 	}
397 	for (i = 0; i < num_rdma_vecs; vector++, i++) {
398 		adapter->rdma_msix_entries[i].entry = vecids[vector];
399 		adapter->rdma_msix_entries[i].vector =
400 			pci_irq_vector(adapter->pdev, vector);
401 	}
402 
403 	/* 'num_avail_msix' is used to distribute excess vectors to the vports
404 	 * after considering the minimum vectors required per each default
405 	 * vport
406 	 */
407 	adapter->num_avail_msix = num_lan_vecs - min_lan_vecs;
408 	adapter->num_msix_entries = num_lan_vecs;
409 	if (idpf_is_rdma_cap_ena(adapter))
410 		adapter->num_rdma_msix_entries = num_rdma_vecs;
411 
412 	/* Fill MSIX vector lifo stack with vector indexes */
413 	err = idpf_init_vector_stack(adapter);
414 	if (err)
415 		goto free_vecids;
416 
417 	err = idpf_mb_intr_init(adapter);
418 	if (err)
419 		goto deinit_vec_stack;
420 	idpf_mb_irq_enable(adapter);
421 	kfree(vecids);
422 
423 	return 0;
424 
425 deinit_vec_stack:
426 	idpf_deinit_vector_stack(adapter);
427 free_vecids:
428 	kfree(vecids);
429 free_msix:
430 	kfree(adapter->msix_entries);
431 	adapter->msix_entries = NULL;
432 free_rdma_msix:
433 	kfree(adapter->rdma_msix_entries);
434 	adapter->rdma_msix_entries = NULL;
435 free_irq:
436 	pci_free_irq_vectors(adapter->pdev);
437 send_dealloc_vecs:
438 	idpf_send_dealloc_vectors_msg(adapter);
439 
440 	return err;
441 }
442 
443 /**
444  * idpf_del_all_flow_steer_filters - Delete all flow steer filters in list
445  * @vport: main vport struct
446  *
447  * Takes flow_steer_list_lock spinlock.  Deletes all filters
448  */
449 static void idpf_del_all_flow_steer_filters(struct idpf_vport *vport)
450 {
451 	struct idpf_vport_config *vport_config;
452 	struct idpf_fsteer_fltr *f, *ftmp;
453 
454 	vport_config = vport->adapter->vport_config[vport->idx];
455 
456 	spin_lock_bh(&vport_config->flow_steer_list_lock);
457 	list_for_each_entry_safe(f, ftmp, &vport_config->user_config.flow_steer_list,
458 				 list) {
459 		list_del(&f->list);
460 		kfree(f);
461 	}
462 	vport_config->user_config.num_fsteer_fltrs = 0;
463 	spin_unlock_bh(&vport_config->flow_steer_list_lock);
464 }
465 
466 /**
467  * idpf_find_mac_filter - Search filter list for specific mac filter
468  * @vconfig: Vport config structure
469  * @macaddr: The MAC address
470  *
471  * Returns ptr to the filter object or NULL. Must be called while holding the
472  * mac_filter_list_lock.
473  **/
474 static struct idpf_mac_filter *idpf_find_mac_filter(struct idpf_vport_config *vconfig,
475 						    const u8 *macaddr)
476 {
477 	struct idpf_mac_filter *f;
478 
479 	if (!macaddr)
480 		return NULL;
481 
482 	list_for_each_entry(f, &vconfig->user_config.mac_filter_list, list) {
483 		if (ether_addr_equal(macaddr, f->macaddr))
484 			return f;
485 	}
486 
487 	return NULL;
488 }
489 
490 /**
491  * __idpf_del_mac_filter - Delete a MAC filter from the filter list
492  * @vport_config: Vport config structure
493  * @macaddr: The MAC address
494  *
495  * Returns 0 on success, error value on failure
496  **/
497 static int __idpf_del_mac_filter(struct idpf_vport_config *vport_config,
498 				 const u8 *macaddr)
499 {
500 	struct idpf_mac_filter *f;
501 
502 	spin_lock_bh(&vport_config->mac_filter_list_lock);
503 	f = idpf_find_mac_filter(vport_config, macaddr);
504 	if (f) {
505 		list_del(&f->list);
506 		kfree(f);
507 	}
508 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
509 
510 	return 0;
511 }
512 
513 /**
514  * idpf_del_mac_filter - Delete a MAC filter from the filter list
515  * @vport: Main vport structure
516  * @np: Netdev private structure
517  * @macaddr: The MAC address
518  * @async: Don't wait for return message
519  *
520  * Removes filter from list and if interface is up, tells hardware about the
521  * removed filter.
522  **/
523 static int idpf_del_mac_filter(struct idpf_vport *vport,
524 			       struct idpf_netdev_priv *np,
525 			       const u8 *macaddr, bool async)
526 {
527 	struct idpf_vport_config *vport_config;
528 	struct idpf_mac_filter *f;
529 
530 	vport_config = np->adapter->vport_config[np->vport_idx];
531 
532 	spin_lock_bh(&vport_config->mac_filter_list_lock);
533 	f = idpf_find_mac_filter(vport_config, macaddr);
534 	if (f) {
535 		f->remove = true;
536 	} else {
537 		spin_unlock_bh(&vport_config->mac_filter_list_lock);
538 
539 		return -EINVAL;
540 	}
541 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
542 
543 	if (test_bit(IDPF_VPORT_UP, np->state)) {
544 		int err;
545 
546 		err = idpf_add_del_mac_filters(np->adapter, vport_config,
547 					       vport->default_mac_addr,
548 					       np->vport_id, false, async);
549 		if (err)
550 			return err;
551 	}
552 
553 	return  __idpf_del_mac_filter(vport_config, macaddr);
554 }
555 
556 /**
557  * __idpf_add_mac_filter - Add mac filter helper function
558  * @vport_config: Vport config structure
559  * @macaddr: Address to add
560  *
561  * Takes mac_filter_list_lock spinlock to add new filter to list.
562  */
563 static int __idpf_add_mac_filter(struct idpf_vport_config *vport_config,
564 				 const u8 *macaddr)
565 {
566 	struct idpf_mac_filter *f;
567 
568 	spin_lock_bh(&vport_config->mac_filter_list_lock);
569 
570 	f = idpf_find_mac_filter(vport_config, macaddr);
571 	if (f) {
572 		f->remove = false;
573 		spin_unlock_bh(&vport_config->mac_filter_list_lock);
574 
575 		return 0;
576 	}
577 
578 	f = kzalloc_obj(*f, GFP_ATOMIC);
579 	if (!f) {
580 		spin_unlock_bh(&vport_config->mac_filter_list_lock);
581 
582 		return -ENOMEM;
583 	}
584 
585 	ether_addr_copy(f->macaddr, macaddr);
586 	list_add_tail(&f->list, &vport_config->user_config.mac_filter_list);
587 	f->add = true;
588 
589 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
590 
591 	return 0;
592 }
593 
594 /**
595  * idpf_add_mac_filter - Add a mac filter to the filter list
596  * @vport: Main vport structure
597  * @np: Netdev private structure
598  * @macaddr: The MAC address
599  * @async: Don't wait for return message
600  *
601  * Returns 0 on success or error on failure. If interface is up, we'll also
602  * send the virtchnl message to tell hardware about the filter.
603  **/
604 static int idpf_add_mac_filter(struct idpf_vport *vport,
605 			       struct idpf_netdev_priv *np,
606 			       const u8 *macaddr, bool async)
607 {
608 	struct idpf_vport_config *vport_config;
609 	int err;
610 
611 	vport_config = np->adapter->vport_config[np->vport_idx];
612 	err = __idpf_add_mac_filter(vport_config, macaddr);
613 	if (err)
614 		return err;
615 
616 	if (test_bit(IDPF_VPORT_UP, np->state))
617 		err = idpf_add_del_mac_filters(np->adapter, vport_config,
618 					       vport->default_mac_addr,
619 					       np->vport_id, true, async);
620 
621 	return err;
622 }
623 
624 /**
625  * idpf_del_all_mac_filters - Delete all MAC filters in list
626  * @vport: main vport struct
627  *
628  * Takes mac_filter_list_lock spinlock.  Deletes all filters
629  */
630 static void idpf_del_all_mac_filters(struct idpf_vport *vport)
631 {
632 	struct idpf_vport_config *vport_config;
633 	struct idpf_mac_filter *f, *ftmp;
634 
635 	vport_config = vport->adapter->vport_config[vport->idx];
636 	spin_lock_bh(&vport_config->mac_filter_list_lock);
637 
638 	list_for_each_entry_safe(f, ftmp, &vport_config->user_config.mac_filter_list,
639 				 list) {
640 		list_del(&f->list);
641 		kfree(f);
642 	}
643 
644 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
645 }
646 
647 /**
648  * idpf_restore_mac_filters - Re-add all MAC filters in list
649  * @vport: main vport struct
650  *
651  * Takes mac_filter_list_lock spinlock.  Sets add field to true for filters to
652  * resync filters back to HW.
653  */
654 static void idpf_restore_mac_filters(struct idpf_vport *vport)
655 {
656 	struct idpf_vport_config *vport_config;
657 	struct idpf_mac_filter *f;
658 
659 	vport_config = vport->adapter->vport_config[vport->idx];
660 	spin_lock_bh(&vport_config->mac_filter_list_lock);
661 
662 	list_for_each_entry(f, &vport_config->user_config.mac_filter_list, list)
663 		f->add = true;
664 
665 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
666 
667 	idpf_add_del_mac_filters(vport->adapter, vport_config,
668 				 vport->default_mac_addr, vport->vport_id,
669 				 true, false);
670 }
671 
672 /**
673  * idpf_remove_mac_filters - Remove all MAC filters in list
674  * @vport: main vport struct
675  *
676  * Takes mac_filter_list_lock spinlock. Sets remove field to true for filters
677  * to remove filters in HW.
678  */
679 static void idpf_remove_mac_filters(struct idpf_vport *vport)
680 {
681 	struct idpf_vport_config *vport_config;
682 	struct idpf_mac_filter *f;
683 
684 	vport_config = vport->adapter->vport_config[vport->idx];
685 	spin_lock_bh(&vport_config->mac_filter_list_lock);
686 
687 	list_for_each_entry(f, &vport_config->user_config.mac_filter_list, list)
688 		f->remove = true;
689 
690 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
691 
692 	idpf_add_del_mac_filters(vport->adapter, vport_config,
693 				 vport->default_mac_addr, vport->vport_id,
694 				 false, false);
695 }
696 
697 /**
698  * idpf_deinit_mac_addr - deinitialize mac address for vport
699  * @vport: main vport structure
700  */
701 static void idpf_deinit_mac_addr(struct idpf_vport *vport)
702 {
703 	struct idpf_vport_config *vport_config;
704 	struct idpf_mac_filter *f;
705 
706 	vport_config = vport->adapter->vport_config[vport->idx];
707 
708 	spin_lock_bh(&vport_config->mac_filter_list_lock);
709 
710 	f = idpf_find_mac_filter(vport_config, vport->default_mac_addr);
711 	if (f) {
712 		list_del(&f->list);
713 		kfree(f);
714 	}
715 
716 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
717 }
718 
719 /**
720  * idpf_init_mac_addr - initialize mac address for vport
721  * @vport: main vport structure
722  * @netdev: pointer to netdev struct associated with this vport
723  */
724 static int idpf_init_mac_addr(struct idpf_vport *vport,
725 			      struct net_device *netdev)
726 {
727 	struct idpf_netdev_priv *np = netdev_priv(netdev);
728 	struct idpf_adapter *adapter = vport->adapter;
729 	int err;
730 
731 	if (is_valid_ether_addr(vport->default_mac_addr)) {
732 		eth_hw_addr_set(netdev, vport->default_mac_addr);
733 		ether_addr_copy(netdev->perm_addr, vport->default_mac_addr);
734 
735 		return idpf_add_mac_filter(vport, np, vport->default_mac_addr,
736 					   false);
737 	}
738 
739 	if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS,
740 			     VIRTCHNL2_CAP_MACFILTER)) {
741 		dev_err(&adapter->pdev->dev,
742 			"MAC address is not provided and capability is not set\n");
743 
744 		return -EINVAL;
745 	}
746 
747 	eth_hw_addr_random(netdev);
748 	err = idpf_add_mac_filter(vport, np, netdev->dev_addr, false);
749 	if (err)
750 		return err;
751 
752 	dev_info(&adapter->pdev->dev, "Invalid MAC address %pM, using random %pM\n",
753 		 vport->default_mac_addr, netdev->dev_addr);
754 	ether_addr_copy(vport->default_mac_addr, netdev->dev_addr);
755 
756 	return 0;
757 }
758 
759 static void idpf_detach_and_close(struct idpf_adapter *adapter)
760 {
761 	int max_vports = adapter->max_vports;
762 
763 	for (int i = 0; i < max_vports; i++) {
764 		struct net_device *netdev = adapter->netdevs[i];
765 
766 		/* If the interface is in detached state, that means the
767 		 * previous reset was not handled successfully for this
768 		 * vport.
769 		 */
770 		if (!netif_device_present(netdev))
771 			continue;
772 
773 		/* Hold RTNL to protect racing with callbacks */
774 		rtnl_lock();
775 		netif_device_detach(netdev);
776 		if (netif_running(netdev)) {
777 			set_bit(IDPF_VPORT_UP_REQUESTED,
778 				adapter->vport_config[i]->flags);
779 			dev_close(netdev);
780 		}
781 		rtnl_unlock();
782 	}
783 }
784 
785 static void idpf_attach_and_open(struct idpf_adapter *adapter)
786 {
787 	int max_vports = adapter->max_vports;
788 
789 	for (int i = 0; i < max_vports; i++) {
790 		struct idpf_vport *vport = adapter->vports[i];
791 		struct idpf_vport_config *vport_config;
792 		struct net_device *netdev;
793 
794 		/* In case of a critical error in the init task, the vport
795 		 * will be freed. Only continue to restore the netdevs
796 		 * if the vport is allocated.
797 		 */
798 		if (!vport)
799 			continue;
800 
801 		/* No need for RTNL on attach as this function is called
802 		 * following detach and dev_close(). We do take RTNL for
803 		 * dev_open() below as it can race with external callbacks
804 		 * following the call to netif_device_attach().
805 		 */
806 		netdev = adapter->netdevs[i];
807 		netif_device_attach(netdev);
808 		vport_config = adapter->vport_config[vport->idx];
809 		if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED,
810 				       vport_config->flags)) {
811 			rtnl_lock();
812 			dev_open(netdev, NULL);
813 			rtnl_unlock();
814 		}
815 	}
816 }
817 
818 /**
819  * idpf_cfg_netdev - Allocate, configure and register a netdev
820  * @vport: main vport structure
821  *
822  * Returns 0 on success, negative value on failure.
823  */
824 static int idpf_cfg_netdev(struct idpf_vport *vport)
825 {
826 	struct idpf_adapter *adapter = vport->adapter;
827 	struct idpf_vport_config *vport_config;
828 	netdev_features_t other_offloads = 0;
829 	netdev_features_t csum_offloads = 0;
830 	netdev_features_t tso_offloads = 0;
831 	netdev_features_t dflt_features;
832 	struct idpf_netdev_priv *np;
833 	struct net_device *netdev;
834 	u16 idx = vport->idx;
835 	int err;
836 
837 	vport_config = adapter->vport_config[idx];
838 
839 	/* It's possible we already have a netdev allocated and registered for
840 	 * this vport
841 	 */
842 	if (test_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags)) {
843 		netdev = adapter->netdevs[idx];
844 		np = netdev_priv(netdev);
845 		np->vport = vport;
846 		np->vport_idx = vport->idx;
847 		np->vport_id = vport->vport_id;
848 		np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter);
849 		vport->netdev = netdev;
850 
851 		return idpf_init_mac_addr(vport, netdev);
852 	}
853 
854 	netdev = alloc_etherdev_mqs(sizeof(struct idpf_netdev_priv),
855 				    vport_config->max_q.max_txq,
856 				    vport_config->max_q.max_rxq);
857 	if (!netdev)
858 		return -ENOMEM;
859 
860 	vport->netdev = netdev;
861 	np = netdev_priv(netdev);
862 	np->vport = vport;
863 	np->adapter = adapter;
864 	np->vport_idx = vport->idx;
865 	np->vport_id = vport->vport_id;
866 	np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter);
867 	np->tx_max_bufs = idpf_get_max_tx_bufs(adapter);
868 
869 	spin_lock_init(&np->stats_lock);
870 
871 	err = idpf_init_mac_addr(vport, netdev);
872 	if (err) {
873 		free_netdev(vport->netdev);
874 		vport->netdev = NULL;
875 
876 		return err;
877 	}
878 
879 	/* assign netdev_ops */
880 	netdev->netdev_ops = &idpf_netdev_ops;
881 
882 	/* setup watchdog timeout value to be 5 second */
883 	netdev->watchdog_timeo = 5 * HZ;
884 
885 	netdev->dev_port = idx;
886 
887 	/* configure default MTU size */
888 	netdev->min_mtu = ETH_MIN_MTU;
889 	netdev->max_mtu = vport->max_mtu;
890 
891 	dflt_features = NETIF_F_SG	|
892 			NETIF_F_HIGHDMA;
893 
894 	if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
895 		dflt_features |= NETIF_F_RXHASH;
896 	if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS,
897 			    VIRTCHNL2_CAP_FLOW_STEER) &&
898 	    idpf_vport_is_cap_ena(vport, VIRTCHNL2_VPORT_SIDEBAND_FLOW_STEER))
899 		dflt_features |= NETIF_F_NTUPLE;
900 	if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V4))
901 		csum_offloads |= NETIF_F_IP_CSUM;
902 	if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V6))
903 		csum_offloads |= NETIF_F_IPV6_CSUM;
904 	if (idpf_is_cap_ena(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM))
905 		csum_offloads |= NETIF_F_RXCSUM;
906 	if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_SCTP_CSUM))
907 		csum_offloads |= NETIF_F_SCTP_CRC;
908 
909 	if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV4_TCP))
910 		tso_offloads |= NETIF_F_TSO;
911 	if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV6_TCP))
912 		tso_offloads |= NETIF_F_TSO6;
913 	if (idpf_is_cap_ena_all(adapter, IDPF_SEG_CAPS,
914 				VIRTCHNL2_CAP_SEG_IPV4_UDP |
915 				VIRTCHNL2_CAP_SEG_IPV6_UDP))
916 		tso_offloads |= NETIF_F_GSO_UDP_L4;
917 	if (idpf_is_cap_ena_all(adapter, IDPF_RSC_CAPS, IDPF_CAP_RSC))
918 		other_offloads |= NETIF_F_GRO_HW;
919 	if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LOOPBACK))
920 		other_offloads |= NETIF_F_LOOPBACK;
921 
922 	netdev->features |= dflt_features | csum_offloads | tso_offloads;
923 	netdev->hw_features |=  netdev->features | other_offloads;
924 	netdev->vlan_features |= netdev->features | other_offloads;
925 	netdev->hw_enc_features |= dflt_features | other_offloads;
926 	idpf_xdp_set_features(vport);
927 
928 	idpf_set_ethtool_ops(netdev);
929 	netif_set_affinity_auto(netdev);
930 	SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
931 
932 	/* carrier off on init to avoid Tx hangs */
933 	netif_carrier_off(netdev);
934 
935 	/* make sure transmit queues start off as stopped */
936 	netif_tx_stop_all_queues(netdev);
937 
938 	/* The vport can be arbitrarily released so we need to also track
939 	 * netdevs in the adapter struct
940 	 */
941 	adapter->netdevs[idx] = netdev;
942 
943 	return 0;
944 }
945 
946 /**
947  * idpf_get_free_slot - get the next non-NULL location index in array
948  * @adapter: adapter in which to look for a free vport slot
949  */
950 static int idpf_get_free_slot(struct idpf_adapter *adapter)
951 {
952 	unsigned int i;
953 
954 	for (i = 0; i < adapter->max_vports; i++) {
955 		if (!adapter->vports[i])
956 			return i;
957 	}
958 
959 	return IDPF_NO_FREE_SLOT;
960 }
961 
962 /**
963  * idpf_remove_features - Turn off feature configs
964  * @vport: virtual port structure
965  */
966 static void idpf_remove_features(struct idpf_vport *vport)
967 {
968 	struct idpf_adapter *adapter = vport->adapter;
969 
970 	if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER))
971 		idpf_remove_mac_filters(vport);
972 }
973 
974 /**
975  * idpf_vport_stop - Disable a vport
976  * @vport: vport to disable
977  * @rtnl: whether to take RTNL lock
978  */
979 static void idpf_vport_stop(struct idpf_vport *vport, bool rtnl)
980 {
981 	struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
982 	struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
983 	struct idpf_adapter *adapter = vport->adapter;
984 	struct idpf_queue_id_reg_info *chunks;
985 	u32 vport_id = vport->vport_id;
986 
987 	if (!test_bit(IDPF_VPORT_UP, np->state))
988 		return;
989 
990 	if (rtnl)
991 		rtnl_lock();
992 
993 	netif_carrier_off(vport->netdev);
994 	netif_tx_disable(vport->netdev);
995 
996 	chunks = &adapter->vport_config[vport->idx]->qid_reg_info;
997 
998 	idpf_send_disable_vport_msg(adapter, vport_id);
999 	idpf_send_disable_queues_msg(vport);
1000 	idpf_send_map_unmap_queue_vector_msg(adapter, rsrc, vport_id, false);
1001 	/* Normally we ask for queues in create_vport, but if the number of
1002 	 * initially requested queues have changed, for example via ethtool
1003 	 * set channels, we do delete queues and then add the queues back
1004 	 * instead of deleting and reallocating the vport.
1005 	 */
1006 	if (test_and_clear_bit(IDPF_VPORT_DEL_QUEUES, vport->flags))
1007 		idpf_send_delete_queues_msg(adapter, chunks, vport_id);
1008 
1009 	idpf_remove_features(vport);
1010 
1011 	vport->link_up = false;
1012 	idpf_vport_intr_deinit(vport, rsrc);
1013 	idpf_xdp_rxq_info_deinit_all(rsrc);
1014 	idpf_vport_queues_rel(vport, rsrc);
1015 	idpf_vport_intr_rel(rsrc);
1016 	clear_bit(IDPF_VPORT_UP, np->state);
1017 
1018 	if (rtnl)
1019 		rtnl_unlock();
1020 }
1021 
1022 /**
1023  * idpf_stop - Disables a network interface
1024  * @netdev: network interface device structure
1025  *
1026  * The stop entry point is called when an interface is de-activated by the OS,
1027  * and the netdevice enters the DOWN state.  The hardware is still under the
1028  * driver's control, but the netdev interface is disabled.
1029  *
1030  * Returns success only - not allowed to fail
1031  */
1032 static int idpf_stop(struct net_device *netdev)
1033 {
1034 	struct idpf_netdev_priv *np = netdev_priv(netdev);
1035 	struct idpf_vport *vport;
1036 
1037 	if (test_bit(IDPF_REMOVE_IN_PROG, np->adapter->flags))
1038 		return 0;
1039 
1040 	idpf_vport_ctrl_lock(netdev);
1041 	vport = idpf_netdev_to_vport(netdev);
1042 
1043 	idpf_vport_stop(vport, false);
1044 
1045 	idpf_vport_ctrl_unlock(netdev);
1046 
1047 	return 0;
1048 }
1049 
1050 /**
1051  * idpf_decfg_netdev - Unregister the netdev
1052  * @vport: vport for which netdev to be unregistered
1053  */
1054 static void idpf_decfg_netdev(struct idpf_vport *vport)
1055 {
1056 	struct idpf_adapter *adapter = vport->adapter;
1057 	u16 idx = vport->idx;
1058 
1059 	if (test_and_clear_bit(IDPF_VPORT_REG_NETDEV,
1060 			       adapter->vport_config[idx]->flags)) {
1061 		unregister_netdev(vport->netdev);
1062 		free_netdev(vport->netdev);
1063 	}
1064 	vport->netdev = NULL;
1065 
1066 	adapter->netdevs[idx] = NULL;
1067 }
1068 
1069 /**
1070  * idpf_vport_rel - Delete a vport and free its resources
1071  * @vport: the vport being removed
1072  */
1073 static void idpf_vport_rel(struct idpf_vport *vport)
1074 {
1075 	struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
1076 	struct idpf_adapter *adapter = vport->adapter;
1077 	struct idpf_vport_config *vport_config;
1078 	struct idpf_vector_info vec_info;
1079 	struct idpf_rss_data *rss_data;
1080 	struct idpf_vport_max_q max_q;
1081 	u16 idx = vport->idx;
1082 
1083 	vport_config = adapter->vport_config[vport->idx];
1084 	rss_data = &vport_config->user_config.rss_data;
1085 	idpf_deinit_rss_lut(rss_data);
1086 	kfree(rss_data->rss_key);
1087 	rss_data->rss_key = NULL;
1088 
1089 	idpf_send_destroy_vport_msg(adapter, vport->vport_id);
1090 
1091 	/* Release all max queues allocated to the adapter's pool */
1092 	max_q.max_rxq = vport_config->max_q.max_rxq;
1093 	max_q.max_txq = vport_config->max_q.max_txq;
1094 	max_q.max_bufq = vport_config->max_q.max_bufq;
1095 	max_q.max_complq = vport_config->max_q.max_complq;
1096 	idpf_vport_dealloc_max_qs(adapter, &max_q);
1097 
1098 	/* Release all the allocated vectors on the stack */
1099 	vec_info.num_req_vecs = 0;
1100 	vec_info.num_curr_vecs = rsrc->num_q_vectors;
1101 	vec_info.default_vport = vport->default_vport;
1102 
1103 	idpf_req_rel_vector_indexes(adapter, rsrc->q_vector_idxs, &vec_info);
1104 
1105 	kfree(rsrc->q_vector_idxs);
1106 	rsrc->q_vector_idxs = NULL;
1107 
1108 	idpf_vport_deinit_queue_reg_chunks(vport_config);
1109 
1110 	kfree(adapter->vport_params_recvd[idx]);
1111 	adapter->vport_params_recvd[idx] = NULL;
1112 	kfree(adapter->vport_params_reqd[idx]);
1113 	adapter->vport_params_reqd[idx] = NULL;
1114 
1115 	kfree(vport);
1116 	adapter->num_alloc_vports--;
1117 }
1118 
1119 /**
1120  * idpf_vport_dealloc - cleanup and release a given vport
1121  * @vport: pointer to idpf vport structure
1122  *
1123  * returns nothing
1124  */
1125 static void idpf_vport_dealloc(struct idpf_vport *vport)
1126 {
1127 	struct idpf_adapter *adapter = vport->adapter;
1128 	unsigned int i = vport->idx;
1129 
1130 	idpf_idc_deinit_vport_aux_device(vport->vdev_info);
1131 
1132 	idpf_deinit_mac_addr(vport);
1133 
1134 	if (!test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags)) {
1135 		idpf_vport_stop(vport, true);
1136 		idpf_decfg_netdev(vport);
1137 	}
1138 	if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) {
1139 		idpf_del_all_mac_filters(vport);
1140 		idpf_del_all_flow_steer_filters(vport);
1141 	}
1142 
1143 	if (adapter->netdevs[i]) {
1144 		struct idpf_netdev_priv *np = netdev_priv(adapter->netdevs[i]);
1145 
1146 		np->vport = NULL;
1147 	}
1148 
1149 	idpf_vport_rel(vport);
1150 
1151 	adapter->vports[i] = NULL;
1152 	adapter->next_vport = idpf_get_free_slot(adapter);
1153 }
1154 
1155 /**
1156  * idpf_is_hsplit_supported - check whether the header split is supported
1157  * @vport: virtual port to check the capability for
1158  *
1159  * Return: true if it's supported by the HW/FW, false if not.
1160  */
1161 static bool idpf_is_hsplit_supported(const struct idpf_vport *vport)
1162 {
1163 	return idpf_is_queue_model_split(vport->dflt_qv_rsrc.rxq_model) &&
1164 	       idpf_is_cap_ena_all(vport->adapter, IDPF_HSPLIT_CAPS,
1165 				   IDPF_CAP_HSPLIT);
1166 }
1167 
1168 /**
1169  * idpf_vport_get_hsplit - get the current header split feature state
1170  * @vport: virtual port to query the state for
1171  *
1172  * Return: ``ETHTOOL_TCP_DATA_SPLIT_UNKNOWN`` if not supported,
1173  *         ``ETHTOOL_TCP_DATA_SPLIT_DISABLED`` if disabled,
1174  *         ``ETHTOOL_TCP_DATA_SPLIT_ENABLED`` if active.
1175  */
1176 u8 idpf_vport_get_hsplit(const struct idpf_vport *vport)
1177 {
1178 	const struct idpf_vport_user_config_data *config;
1179 
1180 	if (!idpf_is_hsplit_supported(vport))
1181 		return ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
1182 
1183 	config = &vport->adapter->vport_config[vport->idx]->user_config;
1184 
1185 	return test_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags) ?
1186 	       ETHTOOL_TCP_DATA_SPLIT_ENABLED :
1187 	       ETHTOOL_TCP_DATA_SPLIT_DISABLED;
1188 }
1189 
1190 /**
1191  * idpf_vport_set_hsplit - enable or disable header split on a given vport
1192  * @vport: virtual port to configure
1193  * @val: Ethtool flag controlling the header split state
1194  *
1195  * Return: true on success, false if not supported by the HW.
1196  */
1197 bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val)
1198 {
1199 	struct idpf_vport_user_config_data *config;
1200 
1201 	if (!idpf_is_hsplit_supported(vport))
1202 		return val == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
1203 
1204 	config = &vport->adapter->vport_config[vport->idx]->user_config;
1205 
1206 	switch (val) {
1207 	case ETHTOOL_TCP_DATA_SPLIT_UNKNOWN:
1208 		/* Default is to enable */
1209 	case ETHTOOL_TCP_DATA_SPLIT_ENABLED:
1210 		__set_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags);
1211 		return true;
1212 	case ETHTOOL_TCP_DATA_SPLIT_DISABLED:
1213 		__clear_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags);
1214 		return true;
1215 	default:
1216 		return false;
1217 	}
1218 }
1219 
1220 /**
1221  * idpf_vport_alloc - Allocates the next available struct vport in the adapter
1222  * @adapter: board private structure
1223  * @max_q: vport max queue info
1224  *
1225  * returns a pointer to a vport on success, NULL on failure.
1226  */
1227 static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
1228 					   struct idpf_vport_max_q *max_q)
1229 {
1230 	struct idpf_rss_data *rss_data;
1231 	u16 idx = adapter->next_vport;
1232 	struct idpf_q_vec_rsrc *rsrc;
1233 	struct idpf_vport *vport;
1234 	u16 num_max_q;
1235 	int err;
1236 
1237 	if (idx == IDPF_NO_FREE_SLOT)
1238 		return NULL;
1239 
1240 	vport = kzalloc_obj(*vport);
1241 	if (!vport)
1242 		return vport;
1243 
1244 	num_max_q = max(max_q->max_txq, max_q->max_rxq) + IDPF_RESERVED_VECS;
1245 	if (!adapter->vport_config[idx]) {
1246 		struct idpf_vport_config *vport_config;
1247 		struct idpf_q_coalesce *q_coal;
1248 
1249 		vport_config = kzalloc_obj(*vport_config);
1250 		if (!vport_config) {
1251 			kfree(vport);
1252 
1253 			return NULL;
1254 		}
1255 
1256 		q_coal = kzalloc_objs(*q_coal, num_max_q);
1257 		if (!q_coal) {
1258 			kfree(vport_config);
1259 			kfree(vport);
1260 
1261 			return NULL;
1262 		}
1263 		for (int i = 0; i < num_max_q; i++) {
1264 			q_coal[i].tx_intr_mode = IDPF_ITR_DYNAMIC;
1265 			q_coal[i].tx_coalesce_usecs = IDPF_ITR_TX_DEF;
1266 			q_coal[i].rx_intr_mode = IDPF_ITR_DYNAMIC;
1267 			q_coal[i].rx_coalesce_usecs = IDPF_ITR_RX_DEF;
1268 		}
1269 		vport_config->user_config.q_coalesce = q_coal;
1270 
1271 		adapter->vport_config[idx] = vport_config;
1272 	}
1273 
1274 	vport->idx = idx;
1275 	vport->adapter = adapter;
1276 	vport->compln_clean_budget = IDPF_TX_COMPLQ_CLEAN_BUDGET;
1277 	vport->default_vport = adapter->num_alloc_vports <
1278 			       idpf_get_default_vports(adapter);
1279 
1280 	rsrc = &vport->dflt_qv_rsrc;
1281 	rsrc->dev = &adapter->pdev->dev;
1282 	rsrc->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL);
1283 	if (!rsrc->q_vector_idxs)
1284 		goto free_vport;
1285 
1286 	err = idpf_vport_init(vport, max_q);
1287 	if (err)
1288 		goto free_vector_idxs;
1289 
1290 	/* LUT and key are both initialized here. Key is not strictly dependent
1291 	 * on how many queues we have. If we change number of queues and soft
1292 	 * reset is initiated, LUT will be freed and a new LUT will be allocated
1293 	 * as per the updated number of queues during vport bringup. However,
1294 	 * the key remains the same for as long as the vport exists.
1295 	 */
1296 	rss_data = &adapter->vport_config[idx]->user_config.rss_data;
1297 	rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL);
1298 	if (!rss_data->rss_key)
1299 		goto free_qreg_chunks;
1300 
1301 	/* Initialize default RSS key */
1302 	netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size);
1303 
1304 	/* Initialize default RSS LUT */
1305 	err = idpf_init_rss_lut(vport, rss_data);
1306 	if (err)
1307 		goto free_rss_key;
1308 
1309 	/* fill vport slot in the adapter struct */
1310 	adapter->vports[idx] = vport;
1311 	adapter->vport_ids[idx] = idpf_get_vport_id(vport);
1312 
1313 	adapter->num_alloc_vports++;
1314 	/* prepare adapter->next_vport for next use */
1315 	adapter->next_vport = idpf_get_free_slot(adapter);
1316 
1317 	return vport;
1318 
1319 free_rss_key:
1320 	kfree(rss_data->rss_key);
1321 free_qreg_chunks:
1322 	idpf_vport_deinit_queue_reg_chunks(adapter->vport_config[idx]);
1323 free_vector_idxs:
1324 	kfree(rsrc->q_vector_idxs);
1325 free_vport:
1326 	kfree(vport);
1327 
1328 	return NULL;
1329 }
1330 
1331 /**
1332  * idpf_get_stats64 - get statistics for network device structure
1333  * @netdev: network interface device structure
1334  * @stats: main device statistics structure
1335  */
1336 static void idpf_get_stats64(struct net_device *netdev,
1337 			     struct rtnl_link_stats64 *stats)
1338 {
1339 	struct idpf_netdev_priv *np = netdev_priv(netdev);
1340 
1341 	spin_lock_bh(&np->stats_lock);
1342 	*stats = np->netstats;
1343 	spin_unlock_bh(&np->stats_lock);
1344 }
1345 
1346 /**
1347  * idpf_statistics_task - Delayed task to get statistics over mailbox
1348  * @work: work_struct handle to our data
1349  */
1350 void idpf_statistics_task(struct work_struct *work)
1351 {
1352 	struct idpf_adapter *adapter;
1353 	int i;
1354 
1355 	adapter = container_of(work, struct idpf_adapter, stats_task.work);
1356 
1357 	for (i = 0; i < adapter->max_vports; i++) {
1358 		struct idpf_vport *vport = adapter->vports[i];
1359 
1360 		if (vport && !test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
1361 			idpf_send_get_stats_msg(netdev_priv(vport->netdev),
1362 						&vport->port_stats);
1363 	}
1364 
1365 	queue_delayed_work(adapter->stats_wq, &adapter->stats_task,
1366 			   msecs_to_jiffies(10000));
1367 }
1368 
1369 /**
1370  * idpf_mbx_task - Delayed task to handle mailbox responses
1371  * @work: work_struct handle
1372  */
1373 void idpf_mbx_task(struct work_struct *work)
1374 {
1375 	struct idpf_adapter *adapter;
1376 
1377 	adapter = container_of(work, struct idpf_adapter, mbx_task.work);
1378 
1379 	if (test_bit(IDPF_MB_INTR_MODE, adapter->flags))
1380 		idpf_mb_irq_enable(adapter);
1381 	else
1382 		queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task,
1383 				   usecs_to_jiffies(300));
1384 
1385 	idpf_recv_mb_msg(adapter, adapter->hw.arq);
1386 }
1387 
1388 /**
1389  * idpf_service_task - Delayed task for handling mailbox responses
1390  * @work: work_struct handle to our data
1391  *
1392  */
1393 void idpf_service_task(struct work_struct *work)
1394 {
1395 	struct idpf_adapter *adapter;
1396 
1397 	adapter = container_of(work, struct idpf_adapter, serv_task.work);
1398 
1399 	if (idpf_is_reset_detected(adapter) &&
1400 	    !idpf_is_reset_in_prog(adapter) &&
1401 	    !test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) {
1402 		dev_info(&adapter->pdev->dev, "HW reset detected\n");
1403 		set_bit(IDPF_HR_FUNC_RESET, adapter->flags);
1404 		queue_delayed_work(adapter->vc_event_wq,
1405 				   &adapter->vc_event_task,
1406 				   msecs_to_jiffies(10));
1407 	}
1408 
1409 	queue_delayed_work(adapter->serv_wq, &adapter->serv_task,
1410 			   msecs_to_jiffies(300));
1411 }
1412 
1413 /**
1414  * idpf_restore_features - Restore feature configs
1415  * @vport: virtual port structure
1416  */
1417 static void idpf_restore_features(struct idpf_vport *vport)
1418 {
1419 	struct idpf_adapter *adapter = vport->adapter;
1420 
1421 	if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER))
1422 		idpf_restore_mac_filters(vport);
1423 }
1424 
1425 /**
1426  * idpf_set_real_num_queues - set number of queues for netdev
1427  * @vport: virtual port structure
1428  *
1429  * Returns 0 on success, negative on failure.
1430  */
1431 static int idpf_set_real_num_queues(struct idpf_vport *vport)
1432 {
1433 	int err, txq = vport->dflt_qv_rsrc.num_txq - vport->num_xdp_txq;
1434 
1435 	err = netif_set_real_num_rx_queues(vport->netdev,
1436 					   vport->dflt_qv_rsrc.num_rxq);
1437 	if (err)
1438 		return err;
1439 
1440 	return netif_set_real_num_tx_queues(vport->netdev, txq);
1441 }
1442 
1443 /**
1444  * idpf_up_complete - Complete interface up sequence
1445  * @vport: virtual port structure
1446  */
1447 static void idpf_up_complete(struct idpf_vport *vport)
1448 {
1449 	struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
1450 
1451 	if (vport->link_up && !netif_carrier_ok(vport->netdev)) {
1452 		netif_carrier_on(vport->netdev);
1453 		netif_tx_start_all_queues(vport->netdev);
1454 	}
1455 
1456 	set_bit(IDPF_VPORT_UP, np->state);
1457 }
1458 
1459 /**
1460  * idpf_rx_init_buf_tail - Write initial buffer ring tail value
1461  * @rsrc: pointer to queue and vector resources
1462  */
1463 static void idpf_rx_init_buf_tail(struct idpf_q_vec_rsrc *rsrc)
1464 {
1465 	for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
1466 		struct idpf_rxq_group *grp = &rsrc->rxq_grps[i];
1467 
1468 		if (idpf_is_queue_model_split(rsrc->rxq_model)) {
1469 			for (unsigned int j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
1470 				const struct idpf_buf_queue *q =
1471 					&grp->splitq.bufq_sets[j].bufq;
1472 
1473 				writel(q->next_to_alloc, q->tail);
1474 			}
1475 		} else {
1476 			for (unsigned int j = 0; j < grp->singleq.num_rxq; j++) {
1477 				const struct idpf_rx_queue *q =
1478 					grp->singleq.rxqs[j];
1479 
1480 				writel(q->next_to_alloc, q->tail);
1481 			}
1482 		}
1483 	}
1484 }
1485 
1486 /**
1487  * idpf_vport_open - Bring up a vport
1488  * @vport: vport to bring up
1489  * @rtnl: whether to take RTNL lock
1490  */
1491 static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
1492 {
1493 	struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
1494 	struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
1495 	struct idpf_adapter *adapter = vport->adapter;
1496 	struct idpf_vport_config *vport_config;
1497 	struct idpf_queue_id_reg_info *chunks;
1498 	struct idpf_rss_data *rss_data;
1499 	u32 vport_id = vport->vport_id;
1500 	int err;
1501 
1502 	if (test_bit(IDPF_VPORT_UP, np->state))
1503 		return -EBUSY;
1504 
1505 	if (rtnl)
1506 		rtnl_lock();
1507 
1508 	/* we do not allow interface up just yet */
1509 	netif_carrier_off(vport->netdev);
1510 
1511 	err = idpf_vport_intr_alloc(vport, rsrc);
1512 	if (err) {
1513 		dev_err(&adapter->pdev->dev, "Failed to allocate interrupts for vport %u: %d\n",
1514 			vport->vport_id, err);
1515 		goto err_rtnl_unlock;
1516 	}
1517 
1518 	err = idpf_vport_queues_alloc(vport, rsrc);
1519 	if (err)
1520 		goto intr_rel;
1521 
1522 	vport_config = adapter->vport_config[vport->idx];
1523 	chunks = &vport_config->qid_reg_info;
1524 
1525 	err = idpf_vport_queue_ids_init(vport, rsrc, chunks);
1526 	if (err) {
1527 		dev_err(&adapter->pdev->dev, "Failed to initialize queue ids for vport %u: %d\n",
1528 			vport->vport_id, err);
1529 		goto queues_rel;
1530 	}
1531 
1532 	err = idpf_vport_intr_init(vport, rsrc);
1533 	if (err) {
1534 		dev_err(&adapter->pdev->dev, "Failed to initialize interrupts for vport %u: %d\n",
1535 			vport->vport_id, err);
1536 		goto queues_rel;
1537 	}
1538 
1539 	err = idpf_queue_reg_init(vport, rsrc, chunks);
1540 	if (err) {
1541 		dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n",
1542 			vport->vport_id, err);
1543 		goto intr_deinit;
1544 	}
1545 
1546 	err = idpf_rx_bufs_init_all(vport, rsrc);
1547 	if (err) {
1548 		dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n",
1549 			vport->vport_id, err);
1550 		goto intr_deinit;
1551 	}
1552 
1553 	idpf_rx_init_buf_tail(rsrc);
1554 
1555 	err = idpf_xdp_rxq_info_init_all(rsrc);
1556 	if (err) {
1557 		netdev_err(vport->netdev,
1558 			   "Failed to initialize XDP RxQ info for vport %u: %pe\n",
1559 			   vport->vport_id, ERR_PTR(err));
1560 		goto intr_deinit;
1561 	}
1562 
1563 	idpf_vport_intr_ena(vport, rsrc);
1564 
1565 	err = idpf_send_config_queues_msg(adapter, rsrc, vport_id);
1566 	if (err) {
1567 		dev_err(&adapter->pdev->dev, "Failed to configure queues for vport %u, %d\n",
1568 			vport->vport_id, err);
1569 		goto rxq_deinit;
1570 	}
1571 
1572 	err = idpf_send_map_unmap_queue_vector_msg(adapter, rsrc, vport_id,
1573 						   true);
1574 	if (err) {
1575 		dev_err(&adapter->pdev->dev, "Failed to map queue vectors for vport %u: %d\n",
1576 			vport->vport_id, err);
1577 		goto rxq_deinit;
1578 	}
1579 
1580 	err = idpf_send_enable_queues_msg(vport);
1581 	if (err) {
1582 		dev_err(&adapter->pdev->dev, "Failed to enable queues for vport %u: %d\n",
1583 			vport->vport_id, err);
1584 		goto unmap_queue_vectors;
1585 	}
1586 
1587 	err = idpf_send_enable_vport_msg(adapter, vport_id);
1588 	if (err) {
1589 		dev_err(&adapter->pdev->dev, "Failed to enable vport %u: %d\n",
1590 			vport->vport_id, err);
1591 		err = -EAGAIN;
1592 		goto disable_queues;
1593 	}
1594 
1595 	idpf_restore_features(vport);
1596 
1597 	rss_data = &vport_config->user_config.rss_data;
1598 	err = idpf_config_rss(vport, rss_data);
1599 	if (err) {
1600 		dev_err(&adapter->pdev->dev, "Failed to configure RSS for vport %u: %d\n",
1601 			vport->vport_id, err);
1602 		goto disable_vport;
1603 	}
1604 
1605 	idpf_up_complete(vport);
1606 
1607 	if (rtnl)
1608 		rtnl_unlock();
1609 
1610 	return 0;
1611 
1612 disable_vport:
1613 	idpf_send_disable_vport_msg(adapter, vport_id);
1614 disable_queues:
1615 	idpf_send_disable_queues_msg(vport);
1616 unmap_queue_vectors:
1617 	idpf_send_map_unmap_queue_vector_msg(adapter, rsrc, vport_id, false);
1618 rxq_deinit:
1619 	idpf_xdp_rxq_info_deinit_all(rsrc);
1620 intr_deinit:
1621 	idpf_vport_intr_deinit(vport, rsrc);
1622 queues_rel:
1623 	idpf_vport_queues_rel(vport, rsrc);
1624 intr_rel:
1625 	idpf_vport_intr_rel(rsrc);
1626 
1627 err_rtnl_unlock:
1628 	if (rtnl)
1629 		rtnl_unlock();
1630 
1631 	return err;
1632 }
1633 
1634 /**
1635  * idpf_init_task - Delayed initialization task
1636  * @work: work_struct handle to our data
1637  *
1638  * Init task finishes up pending work started in probe. Due to the asynchronous
1639  * nature in which the device communicates with hardware, we may have to wait
1640  * several milliseconds to get a response.  Instead of busy polling in probe,
1641  * pulling it out into a delayed work task prevents us from bogging down the
1642  * whole system waiting for a response from hardware.
1643  */
1644 void idpf_init_task(struct work_struct *work)
1645 {
1646 	struct idpf_vport_config *vport_config;
1647 	struct idpf_vport_max_q max_q;
1648 	struct idpf_adapter *adapter;
1649 	struct idpf_vport *vport;
1650 	u16 num_default_vports;
1651 	struct pci_dev *pdev;
1652 	bool default_vport;
1653 	int index, err;
1654 
1655 	adapter = container_of(work, struct idpf_adapter, init_task.work);
1656 
1657 	num_default_vports = idpf_get_default_vports(adapter);
1658 	if (adapter->num_alloc_vports < num_default_vports)
1659 		default_vport = true;
1660 	else
1661 		default_vport = false;
1662 
1663 	err = idpf_vport_alloc_max_qs(adapter, &max_q);
1664 	if (err)
1665 		goto unwind_vports;
1666 
1667 	err = idpf_send_create_vport_msg(adapter, &max_q);
1668 	if (err) {
1669 		idpf_vport_dealloc_max_qs(adapter, &max_q);
1670 		goto unwind_vports;
1671 	}
1672 
1673 	pdev = adapter->pdev;
1674 	vport = idpf_vport_alloc(adapter, &max_q);
1675 	if (!vport) {
1676 		err = -EFAULT;
1677 		dev_err(&pdev->dev, "failed to allocate vport: %d\n",
1678 			err);
1679 		idpf_vport_dealloc_max_qs(adapter, &max_q);
1680 		goto unwind_vports;
1681 	}
1682 
1683 	index = vport->idx;
1684 	vport_config = adapter->vport_config[index];
1685 
1686 	spin_lock_init(&vport_config->mac_filter_list_lock);
1687 	spin_lock_init(&vport_config->flow_steer_list_lock);
1688 
1689 	INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list);
1690 	INIT_LIST_HEAD(&vport_config->user_config.flow_steer_list);
1691 
1692 	err = idpf_check_supported_desc_ids(vport);
1693 	if (err) {
1694 		dev_err(&pdev->dev, "failed to get required descriptor ids\n");
1695 		goto unwind_vports;
1696 	}
1697 
1698 	if (idpf_cfg_netdev(vport))
1699 		goto unwind_vports;
1700 
1701 	/* Spawn and return 'idpf_init_task' work queue until all the
1702 	 * default vports are created
1703 	 */
1704 	if (adapter->num_alloc_vports < num_default_vports) {
1705 		queue_delayed_work(adapter->init_wq, &adapter->init_task,
1706 				   msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
1707 
1708 		return;
1709 	}
1710 
1711 	for (index = 0; index < adapter->max_vports; index++) {
1712 		struct net_device *netdev = adapter->netdevs[index];
1713 		struct idpf_vport_config *vport_config;
1714 
1715 		vport_config = adapter->vport_config[index];
1716 
1717 		if (!netdev ||
1718 		    test_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags))
1719 			continue;
1720 
1721 		err = register_netdev(netdev);
1722 		if (err) {
1723 			dev_err(&pdev->dev, "failed to register netdev for vport %d: %pe\n",
1724 				index, ERR_PTR(err));
1725 			continue;
1726 		}
1727 		set_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags);
1728 	}
1729 
1730 	/* Clear the reset and load bits as all vports are created */
1731 	clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
1732 	clear_bit(IDPF_HR_DRV_LOAD, adapter->flags);
1733 	/* Start the statistics task now */
1734 	queue_delayed_work(adapter->stats_wq, &adapter->stats_task,
1735 			   msecs_to_jiffies(10 * (pdev->devfn & 0x07)));
1736 
1737 	return;
1738 
1739 unwind_vports:
1740 	if (default_vport) {
1741 		for (index = 0; index < adapter->max_vports; index++) {
1742 			if (adapter->vports[index])
1743 				idpf_vport_dealloc(adapter->vports[index]);
1744 		}
1745 	}
1746 	/* Cleanup after vc_core_init, which has no way of knowing the
1747 	 * init task failed on driver load.
1748 	 */
1749 	if (test_and_clear_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
1750 		cancel_delayed_work_sync(&adapter->serv_task);
1751 		cancel_delayed_work_sync(&adapter->mbx_task);
1752 	}
1753 	idpf_ptp_release(adapter);
1754 
1755 	clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
1756 }
1757 
1758 /**
1759  * idpf_sriov_ena - Enable or change number of VFs
1760  * @adapter: private data struct
1761  * @num_vfs: number of VFs to allocate
1762  */
1763 static int idpf_sriov_ena(struct idpf_adapter *adapter, int num_vfs)
1764 {
1765 	struct device *dev = &adapter->pdev->dev;
1766 	int err;
1767 
1768 	err = idpf_send_set_sriov_vfs_msg(adapter, num_vfs);
1769 	if (err) {
1770 		dev_err(dev, "Failed to allocate VFs: %d\n", err);
1771 
1772 		return err;
1773 	}
1774 
1775 	err = pci_enable_sriov(adapter->pdev, num_vfs);
1776 	if (err) {
1777 		idpf_send_set_sriov_vfs_msg(adapter, 0);
1778 		dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
1779 
1780 		return err;
1781 	}
1782 
1783 	adapter->num_vfs = num_vfs;
1784 
1785 	return num_vfs;
1786 }
1787 
1788 /**
1789  * idpf_sriov_configure - Configure the requested VFs
1790  * @pdev: pointer to a pci_dev structure
1791  * @num_vfs: number of vfs to allocate
1792  *
1793  * Enable or change the number of VFs. Called when the user updates the number
1794  * of VFs in sysfs.
1795  **/
1796 int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs)
1797 {
1798 	struct idpf_adapter *adapter = pci_get_drvdata(pdev);
1799 
1800 	if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_SRIOV)) {
1801 		dev_info(&pdev->dev, "SR-IOV is not supported on this device\n");
1802 
1803 		return -EOPNOTSUPP;
1804 	}
1805 
1806 	if (num_vfs)
1807 		return idpf_sriov_ena(adapter, num_vfs);
1808 
1809 	if (pci_vfs_assigned(pdev)) {
1810 		dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs\n");
1811 
1812 		return -EBUSY;
1813 	}
1814 
1815 	pci_disable_sriov(adapter->pdev);
1816 	idpf_send_set_sriov_vfs_msg(adapter, 0);
1817 	adapter->num_vfs = 0;
1818 
1819 	return 0;
1820 }
1821 
1822 /**
1823  * idpf_deinit_task - Device deinit routine
1824  * @adapter: Driver specific private structure
1825  *
1826  * Extended remove logic which will be used for
1827  * hard reset as well
1828  */
1829 void idpf_deinit_task(struct idpf_adapter *adapter)
1830 {
1831 	unsigned int i;
1832 
1833 	/* Wait until the init_task is done else this thread might release
1834 	 * the resources first and the other thread might end up in a bad state
1835 	 */
1836 	cancel_delayed_work_sync(&adapter->init_task);
1837 
1838 	if (!adapter->vports)
1839 		return;
1840 
1841 	cancel_delayed_work_sync(&adapter->stats_task);
1842 
1843 	for (i = 0; i < adapter->max_vports; i++) {
1844 		if (adapter->vports[i])
1845 			idpf_vport_dealloc(adapter->vports[i]);
1846 	}
1847 }
1848 
1849 /**
1850  * idpf_check_reset_complete - check that reset is complete
1851  * @hw: pointer to hw struct
1852  * @reset_reg: struct with reset registers
1853  *
1854  * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
1855  **/
1856 static int idpf_check_reset_complete(struct idpf_hw *hw,
1857 				     struct idpf_reset_reg *reset_reg)
1858 {
1859 	struct idpf_adapter *adapter = hw->back;
1860 	int i;
1861 
1862 	for (i = 0; i < 2000; i++) {
1863 		u32 reg_val = readl(reset_reg->rstat);
1864 
1865 		/* 0xFFFFFFFF might be read if other side hasn't cleared the
1866 		 * register for us yet and 0xFFFFFFFF is not a valid value for
1867 		 * the register, so treat that as invalid.
1868 		 */
1869 		if (reg_val != 0xFFFFFFFF && (reg_val & reset_reg->rstat_m))
1870 			return 0;
1871 
1872 		usleep_range(5000, 10000);
1873 	}
1874 
1875 	dev_warn(&adapter->pdev->dev, "Device reset timeout!\n");
1876 	/* Clear the reset flag unconditionally here since the reset
1877 	 * technically isn't in progress anymore from the driver's perspective
1878 	 */
1879 	clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
1880 
1881 	return -EBUSY;
1882 }
1883 
1884 /**
1885  * idpf_init_hard_reset - Initiate a hardware reset
1886  * @adapter: Driver specific private structure
1887  *
1888  * Deallocate the vports and all the resources associated with them and
1889  * reallocate. Also reinitialize the mailbox. Return 0 on success,
1890  * negative on failure.
1891  */
1892 static void idpf_init_hard_reset(struct idpf_adapter *adapter)
1893 {
1894 	struct idpf_reg_ops *reg_ops = &adapter->dev_ops.reg_ops;
1895 	struct device *dev = &adapter->pdev->dev;
1896 	int err;
1897 
1898 	idpf_detach_and_close(adapter);
1899 	mutex_lock(&adapter->vport_ctrl_lock);
1900 
1901 	dev_info(dev, "Device HW Reset initiated\n");
1902 
1903 	/* Prepare for reset */
1904 	if (test_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
1905 		reg_ops->trigger_reset(adapter, IDPF_HR_DRV_LOAD);
1906 	} else if (test_and_clear_bit(IDPF_HR_FUNC_RESET, adapter->flags)) {
1907 		bool is_reset = idpf_is_reset_detected(adapter);
1908 
1909 		idpf_idc_issue_reset_event(adapter->cdev_info);
1910 
1911 		idpf_vc_core_deinit(adapter);
1912 		if (!is_reset)
1913 			reg_ops->trigger_reset(adapter, IDPF_HR_FUNC_RESET);
1914 		idpf_deinit_dflt_mbx(adapter);
1915 	} else {
1916 		dev_err(dev, "Unhandled hard reset cause\n");
1917 		err = -EBADRQC;
1918 		goto unlock_mutex;
1919 	}
1920 
1921 	/* Wait for reset to complete */
1922 	err = idpf_check_reset_complete(&adapter->hw, &adapter->reset_reg);
1923 	if (err) {
1924 		dev_err(dev, "The driver was unable to contact the device's firmware. Check that the FW is running. Driver state= 0x%x\n",
1925 			adapter->state);
1926 		goto unlock_mutex;
1927 	}
1928 
1929 	/* Reset is complete and so start building the driver resources again */
1930 	err = idpf_init_dflt_mbx(adapter);
1931 	if (err) {
1932 		dev_err(dev, "Failed to initialize default mailbox: %d\n", err);
1933 		goto unlock_mutex;
1934 	}
1935 
1936 	queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
1937 
1938 	/* Initialize the state machine, also allocate memory and request
1939 	 * resources
1940 	 */
1941 	err = idpf_vc_core_init(adapter);
1942 	if (err) {
1943 		cancel_delayed_work_sync(&adapter->mbx_task);
1944 		idpf_deinit_dflt_mbx(adapter);
1945 		goto unlock_mutex;
1946 	}
1947 
1948 	/* Wait till all the vports are initialized to release the reset lock,
1949 	 * else user space callbacks may access uninitialized vports
1950 	 */
1951 	while (test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
1952 		msleep(100);
1953 
1954 unlock_mutex:
1955 	mutex_unlock(&adapter->vport_ctrl_lock);
1956 
1957 	/* Attempt to restore netdevs and initialize RDMA CORE AUX device,
1958 	 * provided vc_core_init succeeded. It is still possible that
1959 	 * vports are not allocated at this point if the init task failed.
1960 	 */
1961 	if (!err) {
1962 		idpf_attach_and_open(adapter);
1963 		idpf_idc_init(adapter);
1964 	}
1965 }
1966 
1967 /**
1968  * idpf_vc_event_task - Handle virtchannel event logic
1969  * @work: work queue struct
1970  */
1971 void idpf_vc_event_task(struct work_struct *work)
1972 {
1973 	struct idpf_adapter *adapter;
1974 
1975 	adapter = container_of(work, struct idpf_adapter, vc_event_task.work);
1976 
1977 	if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
1978 		return;
1979 
1980 	if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags))
1981 		goto func_reset;
1982 
1983 	if (test_bit(IDPF_HR_DRV_LOAD, adapter->flags))
1984 		goto drv_load;
1985 
1986 	return;
1987 
1988 func_reset:
1989 	idpf_vc_xn_shutdown(adapter->vcxn_mngr);
1990 drv_load:
1991 	set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
1992 	idpf_init_hard_reset(adapter);
1993 }
1994 
1995 /**
1996  * idpf_initiate_soft_reset - Initiate a software reset
1997  * @vport: virtual port data struct
1998  * @reset_cause: reason for the soft reset
1999  *
2000  * Soft reset only reallocs vport queue resources. Returns 0 on success,
2001  * negative on failure.
2002  */
2003 int idpf_initiate_soft_reset(struct idpf_vport *vport,
2004 			     enum idpf_vport_reset_cause reset_cause)
2005 {
2006 	struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
2007 	bool vport_is_up = test_bit(IDPF_VPORT_UP, np->state);
2008 	struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
2009 	struct idpf_adapter *adapter = vport->adapter;
2010 	struct idpf_vport_config *vport_config;
2011 	struct idpf_q_vec_rsrc *new_rsrc;
2012 	u32 vport_id = vport->vport_id;
2013 	struct idpf_vport *new_vport;
2014 	int err, tmp_err = 0;
2015 
2016 	/* If the system is low on memory, we can end up in bad state if we
2017 	 * free all the memory for queue resources and try to allocate them
2018 	 * again. Instead, we can pre-allocate the new resources before doing
2019 	 * anything and bailing if the alloc fails.
2020 	 *
2021 	 * Make a clone of the existing vport to mimic its current
2022 	 * configuration, then modify the new structure with any requested
2023 	 * changes. Once the allocation of the new resources is done, stop the
2024 	 * existing vport and copy the configuration to the main vport. If an
2025 	 * error occurred, the existing vport will be untouched.
2026 	 *
2027 	 */
2028 	new_vport = kzalloc_obj(*vport);
2029 	if (!new_vport)
2030 		return -ENOMEM;
2031 
2032 	/* This purposely avoids copying the end of the struct because it
2033 	 * contains wait_queues and mutexes and other stuff we don't want to
2034 	 * mess with. Nothing below should use those variables from new_vport
2035 	 * and should instead always refer to them in vport if they need to.
2036 	 */
2037 	memcpy(new_vport, vport, offsetof(struct idpf_vport, link_up));
2038 
2039 	new_rsrc = &new_vport->dflt_qv_rsrc;
2040 
2041 	/* Adjust resource parameters prior to reallocating resources */
2042 	switch (reset_cause) {
2043 	case IDPF_SR_Q_CHANGE:
2044 		err = idpf_vport_adjust_qs(new_vport, new_rsrc);
2045 		if (err)
2046 			goto free_vport;
2047 		break;
2048 	case IDPF_SR_Q_DESC_CHANGE:
2049 		/* Update queue parameters before allocating resources */
2050 		idpf_vport_calc_num_q_desc(new_vport, new_rsrc);
2051 		break;
2052 	case IDPF_SR_MTU_CHANGE:
2053 		idpf_idc_vdev_mtu_event(vport->vdev_info,
2054 					IIDC_RDMA_EVENT_BEFORE_MTU_CHANGE);
2055 		break;
2056 	case IDPF_SR_RSC_CHANGE:
2057 		break;
2058 	default:
2059 		dev_err(&adapter->pdev->dev, "Unhandled soft reset cause\n");
2060 		err = -EINVAL;
2061 		goto free_vport;
2062 	}
2063 
2064 	vport_config = adapter->vport_config[vport->idx];
2065 
2066 	if (!vport_is_up) {
2067 		idpf_send_delete_queues_msg(adapter, &vport_config->qid_reg_info,
2068 					    vport_id);
2069 	} else {
2070 		set_bit(IDPF_VPORT_DEL_QUEUES, vport->flags);
2071 		idpf_vport_stop(vport, false);
2072 	}
2073 
2074 	err = idpf_send_add_queues_msg(adapter, vport_config, new_rsrc,
2075 				       vport_id);
2076 	if (err)
2077 		goto err_reset;
2078 
2079 	/* Avoid copying the wait_queues and mutexes. We do not want to mess
2080 	 * with those if possible.
2081 	 */
2082 	memcpy(vport, new_vport, offsetof(struct idpf_vport, link_up));
2083 
2084 	if (reset_cause == IDPF_SR_Q_CHANGE)
2085 		idpf_vport_alloc_vec_indexes(vport, &vport->dflt_qv_rsrc);
2086 
2087 	err = idpf_set_real_num_queues(vport);
2088 	if (err)
2089 		goto err_open;
2090 
2091 	if (reset_cause == IDPF_SR_Q_CHANGE &&
2092 	    !netif_is_rxfh_configured(vport->netdev)) {
2093 		struct idpf_rss_data *rss_data;
2094 
2095 		rss_data = &vport_config->user_config.rss_data;
2096 		idpf_fill_dflt_rss_lut(vport, rss_data);
2097 	}
2098 
2099 	if (vport_is_up)
2100 		err = idpf_vport_open(vport, false);
2101 
2102 	goto free_vport;
2103 
2104 err_reset:
2105 	tmp_err = idpf_send_add_queues_msg(adapter, vport_config, rsrc,
2106 					   vport_id);
2107 
2108 err_open:
2109 	if (!tmp_err && vport_is_up)
2110 		idpf_vport_open(vport, false);
2111 
2112 free_vport:
2113 	kfree(new_vport);
2114 
2115 	if (reset_cause == IDPF_SR_MTU_CHANGE)
2116 		idpf_idc_vdev_mtu_event(vport->vdev_info,
2117 					IIDC_RDMA_EVENT_AFTER_MTU_CHANGE);
2118 
2119 	return err;
2120 }
2121 
2122 /**
2123  * idpf_addr_sync - Callback for dev_(mc|uc)_sync to add address
2124  * @netdev: the netdevice
2125  * @addr: address to add
2126  *
2127  * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
2128  * __dev_(uc|mc)_sync from .set_rx_mode. Kernel takes addr_list_lock spinlock
2129  * meaning we cannot sleep in this context. Due to this, we have to add the
2130  * filter and send the virtchnl message asynchronously without waiting for the
2131  * response from the other side. We won't know whether or not the operation
2132  * actually succeeded until we get the message back.  Returns 0 on success,
2133  * negative on failure.
2134  */
2135 static int idpf_addr_sync(struct net_device *netdev, const u8 *addr)
2136 {
2137 	struct idpf_netdev_priv *np = netdev_priv(netdev);
2138 
2139 	return idpf_add_mac_filter(np->vport, np, addr, true);
2140 }
2141 
2142 /**
2143  * idpf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
2144  * @netdev: the netdevice
2145  * @addr: address to add
2146  *
2147  * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
2148  * __dev_(uc|mc)_sync from .set_rx_mode. Kernel takes addr_list_lock spinlock
2149  * meaning we cannot sleep in this context. Due to this we have to delete the
2150  * filter and send the virtchnl message asynchronously without waiting for the
2151  * return from the other side.  We won't know whether or not the operation
2152  * actually succeeded until we get the message back. Returns 0 on success,
2153  * negative on failure.
2154  */
2155 static int idpf_addr_unsync(struct net_device *netdev, const u8 *addr)
2156 {
2157 	struct idpf_netdev_priv *np = netdev_priv(netdev);
2158 
2159 	/* Under some circumstances, we might receive a request to delete
2160 	 * our own device address from our uc list. Because we store the
2161 	 * device address in the VSI's MAC filter list, we need to ignore
2162 	 * such requests and not delete our device address from this list.
2163 	 */
2164 	if (ether_addr_equal(addr, netdev->dev_addr))
2165 		return 0;
2166 
2167 	idpf_del_mac_filter(np->vport, np, addr, true);
2168 
2169 	return 0;
2170 }
2171 
2172 /**
2173  * idpf_set_rx_mode - NDO callback to set the netdev filters
2174  * @netdev: network interface device structure
2175  *
2176  * Stack takes addr_list_lock spinlock before calling our .set_rx_mode.  We
2177  * cannot sleep in this context.
2178  */
2179 static void idpf_set_rx_mode(struct net_device *netdev)
2180 {
2181 	struct idpf_netdev_priv *np = netdev_priv(netdev);
2182 	struct idpf_vport_user_config_data *config_data;
2183 	struct idpf_adapter *adapter;
2184 	bool changed = false;
2185 	struct device *dev;
2186 	int err;
2187 
2188 	adapter = np->adapter;
2189 	dev = &adapter->pdev->dev;
2190 
2191 	if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER)) {
2192 		__dev_uc_sync(netdev, idpf_addr_sync, idpf_addr_unsync);
2193 		__dev_mc_sync(netdev, idpf_addr_sync, idpf_addr_unsync);
2194 	}
2195 
2196 	if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PROMISC))
2197 		return;
2198 
2199 	config_data = &adapter->vport_config[np->vport_idx]->user_config;
2200 	/* IFF_PROMISC enables both unicast and multicast promiscuous,
2201 	 * while IFF_ALLMULTI only enables multicast such that:
2202 	 *
2203 	 * promisc  + allmulti		= unicast | multicast
2204 	 * promisc  + !allmulti		= unicast | multicast
2205 	 * !promisc + allmulti		= multicast
2206 	 */
2207 	if ((netdev->flags & IFF_PROMISC) &&
2208 	    !test_and_set_bit(__IDPF_PROMISC_UC, config_data->user_flags)) {
2209 		changed = true;
2210 		dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
2211 		if (!test_and_set_bit(__IDPF_PROMISC_MC, adapter->flags))
2212 			dev_info(dev, "Entering multicast promiscuous mode\n");
2213 	}
2214 
2215 	if (!(netdev->flags & IFF_PROMISC) &&
2216 	    test_and_clear_bit(__IDPF_PROMISC_UC, config_data->user_flags)) {
2217 		changed = true;
2218 		dev_info(dev, "Leaving promiscuous mode\n");
2219 	}
2220 
2221 	if (netdev->flags & IFF_ALLMULTI &&
2222 	    !test_and_set_bit(__IDPF_PROMISC_MC, config_data->user_flags)) {
2223 		changed = true;
2224 		dev_info(dev, "Entering multicast promiscuous mode\n");
2225 	}
2226 
2227 	if (!(netdev->flags & (IFF_ALLMULTI | IFF_PROMISC)) &&
2228 	    test_and_clear_bit(__IDPF_PROMISC_MC, config_data->user_flags)) {
2229 		changed = true;
2230 		dev_info(dev, "Leaving multicast promiscuous mode\n");
2231 	}
2232 
2233 	if (!changed)
2234 		return;
2235 
2236 	err = idpf_set_promiscuous(adapter, config_data, np->vport_id);
2237 	if (err)
2238 		dev_err(dev, "Failed to set promiscuous mode: %d\n", err);
2239 }
2240 
2241 /**
2242  * idpf_set_features - set the netdev feature flags
2243  * @netdev: ptr to the netdev being adjusted
2244  * @features: the feature set that the stack is suggesting
2245  */
2246 static int idpf_set_features(struct net_device *netdev,
2247 			     netdev_features_t features)
2248 {
2249 	netdev_features_t changed = netdev->features ^ features;
2250 	struct idpf_adapter *adapter;
2251 	struct idpf_vport *vport;
2252 	int err = 0;
2253 
2254 	idpf_vport_ctrl_lock(netdev);
2255 	vport = idpf_netdev_to_vport(netdev);
2256 
2257 	adapter = vport->adapter;
2258 
2259 	if (idpf_is_reset_in_prog(adapter)) {
2260 		dev_err(&adapter->pdev->dev, "Device is resetting, changing netdev features temporarily unavailable.\n");
2261 		err = -EBUSY;
2262 		goto unlock_mutex;
2263 	}
2264 
2265 	if (changed & NETIF_F_RXHASH) {
2266 		struct idpf_netdev_priv *np = netdev_priv(netdev);
2267 
2268 		netdev->features ^= NETIF_F_RXHASH;
2269 
2270 		/* If the interface is not up when changing the rxhash, update
2271 		 * to the HW is skipped. The updated LUT will be committed to
2272 		 * the HW when the interface is brought up.
2273 		 */
2274 		if (test_bit(IDPF_VPORT_UP, np->state)) {
2275 			struct idpf_vport_config *vport_config;
2276 			struct idpf_rss_data *rss_data;
2277 
2278 			vport_config = adapter->vport_config[vport->idx];
2279 			rss_data = &vport_config->user_config.rss_data;
2280 			err = idpf_config_rss(vport, rss_data);
2281 			if (err)
2282 				goto unlock_mutex;
2283 		}
2284 	}
2285 
2286 	if (changed & NETIF_F_GRO_HW) {
2287 		netdev->features ^= NETIF_F_GRO_HW;
2288 		err = idpf_initiate_soft_reset(vport, IDPF_SR_RSC_CHANGE);
2289 		if (err)
2290 			goto unlock_mutex;
2291 	}
2292 
2293 	if (changed & NETIF_F_LOOPBACK) {
2294 		bool loopback_ena;
2295 
2296 		netdev->features ^= NETIF_F_LOOPBACK;
2297 		loopback_ena = idpf_is_feature_ena(vport, NETIF_F_LOOPBACK);
2298 
2299 		err = idpf_send_ena_dis_loopback_msg(adapter, vport->vport_id,
2300 						     loopback_ena);
2301 	}
2302 
2303 unlock_mutex:
2304 	idpf_vport_ctrl_unlock(netdev);
2305 
2306 	return err;
2307 }
2308 
2309 /**
2310  * idpf_open - Called when a network interface becomes active
2311  * @netdev: network interface device structure
2312  *
2313  * The open entry point is called when a network interface is made
2314  * active by the system (IFF_UP).  At this point all resources needed
2315  * for transmit and receive operations are allocated, the interrupt
2316  * handler is registered with the OS, the netdev watchdog is enabled,
2317  * and the stack is notified that the interface is ready.
2318  *
2319  * Returns 0 on success, negative value on failure
2320  */
2321 static int idpf_open(struct net_device *netdev)
2322 {
2323 	struct idpf_vport *vport;
2324 	int err;
2325 
2326 	idpf_vport_ctrl_lock(netdev);
2327 	vport = idpf_netdev_to_vport(netdev);
2328 
2329 	err = idpf_set_real_num_queues(vport);
2330 	if (err)
2331 		goto unlock;
2332 
2333 	err = idpf_vport_open(vport, false);
2334 
2335 unlock:
2336 	idpf_vport_ctrl_unlock(netdev);
2337 
2338 	return err;
2339 }
2340 
2341 /**
2342  * idpf_change_mtu - NDO callback to change the MTU
2343  * @netdev: network interface device structure
2344  * @new_mtu: new value for maximum frame size
2345  *
2346  * Returns 0 on success, negative on failure
2347  */
2348 static int idpf_change_mtu(struct net_device *netdev, int new_mtu)
2349 {
2350 	struct idpf_vport *vport;
2351 	int err;
2352 
2353 	idpf_vport_ctrl_lock(netdev);
2354 	vport = idpf_netdev_to_vport(netdev);
2355 
2356 	WRITE_ONCE(netdev->mtu, new_mtu);
2357 
2358 	err = idpf_initiate_soft_reset(vport, IDPF_SR_MTU_CHANGE);
2359 
2360 	idpf_vport_ctrl_unlock(netdev);
2361 
2362 	return err;
2363 }
2364 
2365 /**
2366  * idpf_chk_tso_segment - Check skb is not using too many buffers
2367  * @skb: send buffer
2368  * @max_bufs: maximum number of buffers
2369  *
2370  * For TSO we need to count the TSO header and segment payload separately.  As
2371  * such we need to check cases where we have max_bufs-1 fragments or more as we
2372  * can potentially require max_bufs+1 DMA transactions, 1 for the TSO header, 1
2373  * for the segment payload in the first descriptor, and another max_buf-1 for
2374  * the fragments.
2375  *
2376  * Returns true if the packet needs to be software segmented by core stack.
2377  */
2378 static bool idpf_chk_tso_segment(const struct sk_buff *skb,
2379 				 unsigned int max_bufs)
2380 {
2381 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
2382 	const skb_frag_t *frag, *stale;
2383 	int nr_frags, sum;
2384 
2385 	/* no need to check if number of frags is less than max_bufs - 1 */
2386 	nr_frags = shinfo->nr_frags;
2387 	if (nr_frags < (max_bufs - 1))
2388 		return false;
2389 
2390 	/* We need to walk through the list and validate that each group
2391 	 * of max_bufs-2 fragments totals at least gso_size.
2392 	 */
2393 	nr_frags -= max_bufs - 2;
2394 	frag = &shinfo->frags[0];
2395 
2396 	/* Initialize size to the negative value of gso_size minus 1.  We use
2397 	 * this as the worst case scenario in which the frag ahead of us only
2398 	 * provides one byte which is why we are limited to max_bufs-2
2399 	 * descriptors for a single transmit as the header and previous
2400 	 * fragment are already consuming 2 descriptors.
2401 	 */
2402 	sum = 1 - shinfo->gso_size;
2403 
2404 	/* Add size of frags 0 through 4 to create our initial sum */
2405 	sum += skb_frag_size(frag++);
2406 	sum += skb_frag_size(frag++);
2407 	sum += skb_frag_size(frag++);
2408 	sum += skb_frag_size(frag++);
2409 	sum += skb_frag_size(frag++);
2410 
2411 	/* Walk through fragments adding latest fragment, testing it, and
2412 	 * then removing stale fragments from the sum.
2413 	 */
2414 	for (stale = &shinfo->frags[0];; stale++) {
2415 		int stale_size = skb_frag_size(stale);
2416 
2417 		sum += skb_frag_size(frag++);
2418 
2419 		/* The stale fragment may present us with a smaller
2420 		 * descriptor than the actual fragment size. To account
2421 		 * for that we need to remove all the data on the front and
2422 		 * figure out what the remainder would be in the last
2423 		 * descriptor associated with the fragment.
2424 		 */
2425 		if (stale_size > IDPF_TX_MAX_DESC_DATA) {
2426 			int align_pad = -(skb_frag_off(stale)) &
2427 					(IDPF_TX_MAX_READ_REQ_SIZE - 1);
2428 
2429 			sum -= align_pad;
2430 			stale_size -= align_pad;
2431 
2432 			do {
2433 				sum -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
2434 				stale_size -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
2435 			} while (stale_size > IDPF_TX_MAX_DESC_DATA);
2436 		}
2437 
2438 		/* if sum is negative we failed to make sufficient progress */
2439 		if (sum < 0)
2440 			return true;
2441 
2442 		if (!nr_frags--)
2443 			break;
2444 
2445 		sum -= stale_size;
2446 	}
2447 
2448 	return false;
2449 }
2450 
2451 /**
2452  * idpf_features_check - Validate packet conforms to limits
2453  * @skb: skb buffer
2454  * @netdev: This port's netdev
2455  * @features: Offload features that the stack believes apply
2456  */
2457 static netdev_features_t idpf_features_check(struct sk_buff *skb,
2458 					     struct net_device *netdev,
2459 					     netdev_features_t features)
2460 {
2461 	struct idpf_netdev_priv *np = netdev_priv(netdev);
2462 	u16 max_tx_hdr_size = np->max_tx_hdr_size;
2463 	size_t len;
2464 
2465 	/* No point in doing any of this if neither checksum nor GSO are
2466 	 * being requested for this frame.  We can rule out both by just
2467 	 * checking for CHECKSUM_PARTIAL
2468 	 */
2469 	if (skb->ip_summed != CHECKSUM_PARTIAL)
2470 		return features;
2471 
2472 	if (skb_is_gso(skb)) {
2473 		/* We cannot support GSO if the MSS is going to be less than
2474 		 * 88 bytes. If it is then we need to drop support for GSO.
2475 		 */
2476 		if (skb_shinfo(skb)->gso_size < IDPF_TX_TSO_MIN_MSS)
2477 			features &= ~NETIF_F_GSO_MASK;
2478 		else if (idpf_chk_tso_segment(skb, np->tx_max_bufs))
2479 			features &= ~NETIF_F_GSO_MASK;
2480 	}
2481 
2482 	/* Ensure MACLEN is <= 126 bytes (63 words) and not an odd size */
2483 	len = skb_network_offset(skb);
2484 	if (unlikely(len & ~(126)))
2485 		goto unsupported;
2486 
2487 	len = skb_network_header_len(skb);
2488 	if (unlikely(len > max_tx_hdr_size))
2489 		goto unsupported;
2490 
2491 	if (!skb->encapsulation)
2492 		return features;
2493 
2494 	/* L4TUNLEN can support 127 words */
2495 	len = skb_inner_network_header(skb) - skb_transport_header(skb);
2496 	if (unlikely(len & ~(127 * 2)))
2497 		goto unsupported;
2498 
2499 	/* IPLEN can support at most 127 dwords */
2500 	len = skb_inner_network_header_len(skb);
2501 	if (unlikely(len > max_tx_hdr_size))
2502 		goto unsupported;
2503 
2504 	/* No need to validate L4LEN as TCP is the only protocol with a
2505 	 * a flexible value and we support all possible values supported
2506 	 * by TCP, which is at most 15 dwords
2507 	 */
2508 
2509 	return features;
2510 
2511 unsupported:
2512 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2513 }
2514 
2515 /**
2516  * idpf_set_mac - NDO callback to set port mac address
2517  * @netdev: network interface device structure
2518  * @p: pointer to an address structure
2519  *
2520  * Returns 0 on success, negative on failure
2521  **/
2522 static int idpf_set_mac(struct net_device *netdev, void *p)
2523 {
2524 	struct idpf_netdev_priv *np = netdev_priv(netdev);
2525 	struct idpf_vport_config *vport_config;
2526 	struct sockaddr *addr = p;
2527 	u8 old_mac_addr[ETH_ALEN];
2528 	struct idpf_vport *vport;
2529 	int err = 0;
2530 
2531 	idpf_vport_ctrl_lock(netdev);
2532 	vport = idpf_netdev_to_vport(netdev);
2533 
2534 	if (!idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
2535 			     VIRTCHNL2_CAP_MACFILTER)) {
2536 		dev_info(&vport->adapter->pdev->dev, "Setting MAC address is not supported\n");
2537 		err = -EOPNOTSUPP;
2538 		goto unlock_mutex;
2539 	}
2540 
2541 	if (!is_valid_ether_addr(addr->sa_data)) {
2542 		dev_info(&vport->adapter->pdev->dev, "Invalid MAC address: %pM\n",
2543 			 addr->sa_data);
2544 		err = -EADDRNOTAVAIL;
2545 		goto unlock_mutex;
2546 	}
2547 
2548 	if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
2549 		goto unlock_mutex;
2550 
2551 	ether_addr_copy(old_mac_addr, vport->default_mac_addr);
2552 	ether_addr_copy(vport->default_mac_addr, addr->sa_data);
2553 	vport_config = vport->adapter->vport_config[vport->idx];
2554 	err = idpf_add_mac_filter(vport, np, addr->sa_data, false);
2555 	if (err) {
2556 		__idpf_del_mac_filter(vport_config, addr->sa_data);
2557 		ether_addr_copy(vport->default_mac_addr, netdev->dev_addr);
2558 		goto unlock_mutex;
2559 	}
2560 
2561 	if (is_valid_ether_addr(old_mac_addr))
2562 		__idpf_del_mac_filter(vport_config, old_mac_addr);
2563 
2564 	eth_hw_addr_set(netdev, addr->sa_data);
2565 
2566 unlock_mutex:
2567 	idpf_vport_ctrl_unlock(netdev);
2568 
2569 	return err;
2570 }
2571 
2572 /**
2573  * idpf_alloc_dma_mem - Allocate dma memory
2574  * @hw: pointer to hw struct
2575  * @mem: pointer to dma_mem struct
2576  * @size: size of the memory to allocate
2577  */
2578 void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size)
2579 {
2580 	struct idpf_adapter *adapter = hw->back;
2581 	size_t sz = ALIGN(size, 4096);
2582 
2583 	/* The control queue resources are freed under a spinlock, contiguous
2584 	 * pages will avoid IOMMU remapping and the use vmap (and vunmap in
2585 	 * dma_free_*() path.
2586 	 */
2587 	mem->va = dma_alloc_attrs(&adapter->pdev->dev, sz, &mem->pa,
2588 				  GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS);
2589 	mem->size = sz;
2590 
2591 	return mem->va;
2592 }
2593 
2594 /**
2595  * idpf_free_dma_mem - Free the allocated dma memory
2596  * @hw: pointer to hw struct
2597  * @mem: pointer to dma_mem struct
2598  */
2599 void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem)
2600 {
2601 	struct idpf_adapter *adapter = hw->back;
2602 
2603 	dma_free_attrs(&adapter->pdev->dev, mem->size,
2604 		       mem->va, mem->pa, DMA_ATTR_FORCE_CONTIGUOUS);
2605 	mem->size = 0;
2606 	mem->va = NULL;
2607 	mem->pa = 0;
2608 }
2609 
2610 static int idpf_hwtstamp_set(struct net_device *netdev,
2611 			     struct kernel_hwtstamp_config *config,
2612 			     struct netlink_ext_ack *extack)
2613 {
2614 	struct idpf_vport *vport;
2615 	int err;
2616 
2617 	idpf_vport_ctrl_lock(netdev);
2618 	vport = idpf_netdev_to_vport(netdev);
2619 
2620 	if (!vport->link_up) {
2621 		idpf_vport_ctrl_unlock(netdev);
2622 		return -EPERM;
2623 	}
2624 
2625 	if (!idpf_ptp_is_vport_tx_tstamp_ena(vport) &&
2626 	    !idpf_ptp_is_vport_rx_tstamp_ena(vport)) {
2627 		idpf_vport_ctrl_unlock(netdev);
2628 		return -EOPNOTSUPP;
2629 	}
2630 
2631 	err = idpf_ptp_set_timestamp_mode(vport, config);
2632 
2633 	idpf_vport_ctrl_unlock(netdev);
2634 
2635 	return err;
2636 }
2637 
2638 static int idpf_hwtstamp_get(struct net_device *netdev,
2639 			     struct kernel_hwtstamp_config *config)
2640 {
2641 	struct idpf_vport *vport;
2642 
2643 	idpf_vport_ctrl_lock(netdev);
2644 	vport = idpf_netdev_to_vport(netdev);
2645 
2646 	if (!vport->link_up) {
2647 		idpf_vport_ctrl_unlock(netdev);
2648 		return -EPERM;
2649 	}
2650 
2651 	if (!idpf_ptp_is_vport_tx_tstamp_ena(vport) &&
2652 	    !idpf_ptp_is_vport_rx_tstamp_ena(vport)) {
2653 		idpf_vport_ctrl_unlock(netdev);
2654 		return 0;
2655 	}
2656 
2657 	*config = vport->tstamp_config;
2658 
2659 	idpf_vport_ctrl_unlock(netdev);
2660 
2661 	return 0;
2662 }
2663 
2664 static const struct net_device_ops idpf_netdev_ops = {
2665 	.ndo_open = idpf_open,
2666 	.ndo_stop = idpf_stop,
2667 	.ndo_start_xmit = idpf_tx_start,
2668 	.ndo_features_check = idpf_features_check,
2669 	.ndo_set_rx_mode = idpf_set_rx_mode,
2670 	.ndo_validate_addr = eth_validate_addr,
2671 	.ndo_set_mac_address = idpf_set_mac,
2672 	.ndo_change_mtu = idpf_change_mtu,
2673 	.ndo_get_stats64 = idpf_get_stats64,
2674 	.ndo_set_features = idpf_set_features,
2675 	.ndo_tx_timeout = idpf_tx_timeout,
2676 	.ndo_hwtstamp_get = idpf_hwtstamp_get,
2677 	.ndo_hwtstamp_set = idpf_hwtstamp_set,
2678 	.ndo_bpf = idpf_xdp,
2679 	.ndo_xdp_xmit = idpf_xdp_xmit,
2680 	.ndo_xsk_wakeup = idpf_xsk_wakeup,
2681 };
2682