xref: /linux/drivers/net/ethernet/intel/idpf/idpf_lib.c (revision e7d759f31ca295d589f7420719c311870bb3166f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #include "idpf.h"
5 
6 static const struct net_device_ops idpf_netdev_ops_splitq;
7 static const struct net_device_ops idpf_netdev_ops_singleq;
8 
9 const char * const idpf_vport_vc_state_str[] = {
10 	IDPF_FOREACH_VPORT_VC_STATE(IDPF_GEN_STRING)
11 };
12 
13 /**
14  * idpf_init_vector_stack - Fill the MSIX vector stack with vector index
15  * @adapter: private data struct
16  *
17  * Return 0 on success, error on failure
18  */
19 static int idpf_init_vector_stack(struct idpf_adapter *adapter)
20 {
21 	struct idpf_vector_lifo *stack;
22 	u16 min_vec;
23 	u32 i;
24 
25 	mutex_lock(&adapter->vector_lock);
26 	min_vec = adapter->num_msix_entries - adapter->num_avail_msix;
27 	stack = &adapter->vector_stack;
28 	stack->size = adapter->num_msix_entries;
29 	/* set the base and top to point at start of the 'free pool' to
30 	 * distribute the unused vectors on-demand basis
31 	 */
32 	stack->base = min_vec;
33 	stack->top = min_vec;
34 
35 	stack->vec_idx = kcalloc(stack->size, sizeof(u16), GFP_KERNEL);
36 	if (!stack->vec_idx) {
37 		mutex_unlock(&adapter->vector_lock);
38 
39 		return -ENOMEM;
40 	}
41 
42 	for (i = 0; i < stack->size; i++)
43 		stack->vec_idx[i] = i;
44 
45 	mutex_unlock(&adapter->vector_lock);
46 
47 	return 0;
48 }
49 
50 /**
51  * idpf_deinit_vector_stack - zero out the MSIX vector stack
52  * @adapter: private data struct
53  */
54 static void idpf_deinit_vector_stack(struct idpf_adapter *adapter)
55 {
56 	struct idpf_vector_lifo *stack;
57 
58 	mutex_lock(&adapter->vector_lock);
59 	stack = &adapter->vector_stack;
60 	kfree(stack->vec_idx);
61 	stack->vec_idx = NULL;
62 	mutex_unlock(&adapter->vector_lock);
63 }
64 
65 /**
66  * idpf_mb_intr_rel_irq - Free the IRQ association with the OS
67  * @adapter: adapter structure
68  *
69  * This will also disable interrupt mode and queue up mailbox task. Mailbox
70  * task will reschedule itself if not in interrupt mode.
71  */
72 static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter)
73 {
74 	clear_bit(IDPF_MB_INTR_MODE, adapter->flags);
75 	free_irq(adapter->msix_entries[0].vector, adapter);
76 	queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
77 }
78 
79 /**
80  * idpf_intr_rel - Release interrupt capabilities and free memory
81  * @adapter: adapter to disable interrupts on
82  */
83 void idpf_intr_rel(struct idpf_adapter *adapter)
84 {
85 	int err;
86 
87 	if (!adapter->msix_entries)
88 		return;
89 
90 	idpf_mb_intr_rel_irq(adapter);
91 	pci_free_irq_vectors(adapter->pdev);
92 
93 	err = idpf_send_dealloc_vectors_msg(adapter);
94 	if (err)
95 		dev_err(&adapter->pdev->dev,
96 			"Failed to deallocate vectors: %d\n", err);
97 
98 	idpf_deinit_vector_stack(adapter);
99 	kfree(adapter->msix_entries);
100 	adapter->msix_entries = NULL;
101 }
102 
103 /**
104  * idpf_mb_intr_clean - Interrupt handler for the mailbox
105  * @irq: interrupt number
106  * @data: pointer to the adapter structure
107  */
108 static irqreturn_t idpf_mb_intr_clean(int __always_unused irq, void *data)
109 {
110 	struct idpf_adapter *adapter = (struct idpf_adapter *)data;
111 
112 	queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
113 
114 	return IRQ_HANDLED;
115 }
116 
117 /**
118  * idpf_mb_irq_enable - Enable MSIX interrupt for the mailbox
119  * @adapter: adapter to get the hardware address for register write
120  */
121 static void idpf_mb_irq_enable(struct idpf_adapter *adapter)
122 {
123 	struct idpf_intr_reg *intr = &adapter->mb_vector.intr_reg;
124 	u32 val;
125 
126 	val = intr->dyn_ctl_intena_m | intr->dyn_ctl_itridx_m;
127 	writel(val, intr->dyn_ctl);
128 	writel(intr->icr_ena_ctlq_m, intr->icr_ena);
129 }
130 
131 /**
132  * idpf_mb_intr_req_irq - Request irq for the mailbox interrupt
133  * @adapter: adapter structure to pass to the mailbox irq handler
134  */
135 static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter)
136 {
137 	struct idpf_q_vector *mb_vector = &adapter->mb_vector;
138 	int irq_num, mb_vidx = 0, err;
139 
140 	irq_num = adapter->msix_entries[mb_vidx].vector;
141 	mb_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d",
142 				    dev_driver_string(&adapter->pdev->dev),
143 				    "Mailbox", mb_vidx);
144 	err = request_irq(irq_num, adapter->irq_mb_handler, 0,
145 			  mb_vector->name, adapter);
146 	if (err) {
147 		dev_err(&adapter->pdev->dev,
148 			"IRQ request for mailbox failed, error: %d\n", err);
149 
150 		return err;
151 	}
152 
153 	set_bit(IDPF_MB_INTR_MODE, adapter->flags);
154 
155 	return 0;
156 }
157 
158 /**
159  * idpf_set_mb_vec_id - Set vector index for mailbox
160  * @adapter: adapter structure to access the vector chunks
161  *
162  * The first vector id in the requested vector chunks from the CP is for
163  * the mailbox
164  */
165 static void idpf_set_mb_vec_id(struct idpf_adapter *adapter)
166 {
167 	if (adapter->req_vec_chunks)
168 		adapter->mb_vector.v_idx =
169 			le16_to_cpu(adapter->caps.mailbox_vector_id);
170 	else
171 		adapter->mb_vector.v_idx = 0;
172 }
173 
174 /**
175  * idpf_mb_intr_init - Initialize the mailbox interrupt
176  * @adapter: adapter structure to store the mailbox vector
177  */
178 static int idpf_mb_intr_init(struct idpf_adapter *adapter)
179 {
180 	adapter->dev_ops.reg_ops.mb_intr_reg_init(adapter);
181 	adapter->irq_mb_handler = idpf_mb_intr_clean;
182 
183 	return idpf_mb_intr_req_irq(adapter);
184 }
185 
186 /**
187  * idpf_vector_lifo_push - push MSIX vector index onto stack
188  * @adapter: private data struct
189  * @vec_idx: vector index to store
190  */
191 static int idpf_vector_lifo_push(struct idpf_adapter *adapter, u16 vec_idx)
192 {
193 	struct idpf_vector_lifo *stack = &adapter->vector_stack;
194 
195 	lockdep_assert_held(&adapter->vector_lock);
196 
197 	if (stack->top == stack->base) {
198 		dev_err(&adapter->pdev->dev, "Exceeded the vector stack limit: %d\n",
199 			stack->top);
200 		return -EINVAL;
201 	}
202 
203 	stack->vec_idx[--stack->top] = vec_idx;
204 
205 	return 0;
206 }
207 
208 /**
209  * idpf_vector_lifo_pop - pop MSIX vector index from stack
210  * @adapter: private data struct
211  */
212 static int idpf_vector_lifo_pop(struct idpf_adapter *adapter)
213 {
214 	struct idpf_vector_lifo *stack = &adapter->vector_stack;
215 
216 	lockdep_assert_held(&adapter->vector_lock);
217 
218 	if (stack->top == stack->size) {
219 		dev_err(&adapter->pdev->dev, "No interrupt vectors are available to distribute!\n");
220 
221 		return -EINVAL;
222 	}
223 
224 	return stack->vec_idx[stack->top++];
225 }
226 
227 /**
228  * idpf_vector_stash - Store the vector indexes onto the stack
229  * @adapter: private data struct
230  * @q_vector_idxs: vector index array
231  * @vec_info: info related to the number of vectors
232  *
233  * This function is a no-op if there are no vectors indexes to be stashed
234  */
235 static void idpf_vector_stash(struct idpf_adapter *adapter, u16 *q_vector_idxs,
236 			      struct idpf_vector_info *vec_info)
237 {
238 	int i, base = 0;
239 	u16 vec_idx;
240 
241 	lockdep_assert_held(&adapter->vector_lock);
242 
243 	if (!vec_info->num_curr_vecs)
244 		return;
245 
246 	/* For default vports, no need to stash vector allocated from the
247 	 * default pool onto the stack
248 	 */
249 	if (vec_info->default_vport)
250 		base = IDPF_MIN_Q_VEC;
251 
252 	for (i = vec_info->num_curr_vecs - 1; i >= base ; i--) {
253 		vec_idx = q_vector_idxs[i];
254 		idpf_vector_lifo_push(adapter, vec_idx);
255 		adapter->num_avail_msix++;
256 	}
257 }
258 
259 /**
260  * idpf_req_rel_vector_indexes - Request or release MSIX vector indexes
261  * @adapter: driver specific private structure
262  * @q_vector_idxs: vector index array
263  * @vec_info: info related to the number of vectors
264  *
265  * This is the core function to distribute the MSIX vectors acquired from the
266  * OS. It expects the caller to pass the number of vectors required and
267  * also previously allocated. First, it stashes previously allocated vector
268  * indexes on to the stack and then figures out if it can allocate requested
269  * vectors. It can wait on acquiring the mutex lock. If the caller passes 0 as
270  * requested vectors, then this function just stashes the already allocated
271  * vectors and returns 0.
272  *
273  * Returns actual number of vectors allocated on success, error value on failure
274  * If 0 is returned, implies the stack has no vectors to allocate which is also
275  * a failure case for the caller
276  */
277 int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter,
278 				u16 *q_vector_idxs,
279 				struct idpf_vector_info *vec_info)
280 {
281 	u16 num_req_vecs, num_alloc_vecs = 0, max_vecs;
282 	struct idpf_vector_lifo *stack;
283 	int i, j, vecid;
284 
285 	mutex_lock(&adapter->vector_lock);
286 	stack = &adapter->vector_stack;
287 	num_req_vecs = vec_info->num_req_vecs;
288 
289 	/* Stash interrupt vector indexes onto the stack if required */
290 	idpf_vector_stash(adapter, q_vector_idxs, vec_info);
291 
292 	if (!num_req_vecs)
293 		goto rel_lock;
294 
295 	if (vec_info->default_vport) {
296 		/* As IDPF_MIN_Q_VEC per default vport is put aside in the
297 		 * default pool of the stack, use them for default vports
298 		 */
299 		j = vec_info->index * IDPF_MIN_Q_VEC + IDPF_MBX_Q_VEC;
300 		for (i = 0; i < IDPF_MIN_Q_VEC; i++) {
301 			q_vector_idxs[num_alloc_vecs++] = stack->vec_idx[j++];
302 			num_req_vecs--;
303 		}
304 	}
305 
306 	/* Find if stack has enough vector to allocate */
307 	max_vecs = min(adapter->num_avail_msix, num_req_vecs);
308 
309 	for (j = 0; j < max_vecs; j++) {
310 		vecid = idpf_vector_lifo_pop(adapter);
311 		q_vector_idxs[num_alloc_vecs++] = vecid;
312 	}
313 	adapter->num_avail_msix -= max_vecs;
314 
315 rel_lock:
316 	mutex_unlock(&adapter->vector_lock);
317 
318 	return num_alloc_vecs;
319 }
320 
321 /**
322  * idpf_intr_req - Request interrupt capabilities
323  * @adapter: adapter to enable interrupts on
324  *
325  * Returns 0 on success, negative on failure
326  */
327 int idpf_intr_req(struct idpf_adapter *adapter)
328 {
329 	u16 default_vports = idpf_get_default_vports(adapter);
330 	int num_q_vecs, total_vecs, num_vec_ids;
331 	int min_vectors, v_actual, err;
332 	unsigned int vector;
333 	u16 *vecids;
334 
335 	total_vecs = idpf_get_reserved_vecs(adapter);
336 	num_q_vecs = total_vecs - IDPF_MBX_Q_VEC;
337 
338 	err = idpf_send_alloc_vectors_msg(adapter, num_q_vecs);
339 	if (err) {
340 		dev_err(&adapter->pdev->dev,
341 			"Failed to allocate %d vectors: %d\n", num_q_vecs, err);
342 
343 		return -EAGAIN;
344 	}
345 
346 	min_vectors = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports;
347 	v_actual = pci_alloc_irq_vectors(adapter->pdev, min_vectors,
348 					 total_vecs, PCI_IRQ_MSIX);
349 	if (v_actual < min_vectors) {
350 		dev_err(&adapter->pdev->dev, "Failed to allocate MSIX vectors: %d\n",
351 			v_actual);
352 		err = -EAGAIN;
353 		goto send_dealloc_vecs;
354 	}
355 
356 	adapter->msix_entries = kcalloc(v_actual, sizeof(struct msix_entry),
357 					GFP_KERNEL);
358 
359 	if (!adapter->msix_entries) {
360 		err = -ENOMEM;
361 		goto free_irq;
362 	}
363 
364 	idpf_set_mb_vec_id(adapter);
365 
366 	vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL);
367 	if (!vecids) {
368 		err = -ENOMEM;
369 		goto free_msix;
370 	}
371 
372 	if (adapter->req_vec_chunks) {
373 		struct virtchnl2_vector_chunks *vchunks;
374 		struct virtchnl2_alloc_vectors *ac;
375 
376 		ac = adapter->req_vec_chunks;
377 		vchunks = &ac->vchunks;
378 
379 		num_vec_ids = idpf_get_vec_ids(adapter, vecids, total_vecs,
380 					       vchunks);
381 		if (num_vec_ids < v_actual) {
382 			err = -EINVAL;
383 			goto free_vecids;
384 		}
385 	} else {
386 		int i;
387 
388 		for (i = 0; i < v_actual; i++)
389 			vecids[i] = i;
390 	}
391 
392 	for (vector = 0; vector < v_actual; vector++) {
393 		adapter->msix_entries[vector].entry = vecids[vector];
394 		adapter->msix_entries[vector].vector =
395 			pci_irq_vector(adapter->pdev, vector);
396 	}
397 
398 	adapter->num_req_msix = total_vecs;
399 	adapter->num_msix_entries = v_actual;
400 	/* 'num_avail_msix' is used to distribute excess vectors to the vports
401 	 * after considering the minimum vectors required per each default
402 	 * vport
403 	 */
404 	adapter->num_avail_msix = v_actual - min_vectors;
405 
406 	/* Fill MSIX vector lifo stack with vector indexes */
407 	err = idpf_init_vector_stack(adapter);
408 	if (err)
409 		goto free_vecids;
410 
411 	err = idpf_mb_intr_init(adapter);
412 	if (err)
413 		goto deinit_vec_stack;
414 	idpf_mb_irq_enable(adapter);
415 	kfree(vecids);
416 
417 	return 0;
418 
419 deinit_vec_stack:
420 	idpf_deinit_vector_stack(adapter);
421 free_vecids:
422 	kfree(vecids);
423 free_msix:
424 	kfree(adapter->msix_entries);
425 	adapter->msix_entries = NULL;
426 free_irq:
427 	pci_free_irq_vectors(adapter->pdev);
428 send_dealloc_vecs:
429 	idpf_send_dealloc_vectors_msg(adapter);
430 
431 	return err;
432 }
433 
434 /**
435  * idpf_find_mac_filter - Search filter list for specific mac filter
436  * @vconfig: Vport config structure
437  * @macaddr: The MAC address
438  *
439  * Returns ptr to the filter object or NULL. Must be called while holding the
440  * mac_filter_list_lock.
441  **/
442 static struct idpf_mac_filter *idpf_find_mac_filter(struct idpf_vport_config *vconfig,
443 						    const u8 *macaddr)
444 {
445 	struct idpf_mac_filter *f;
446 
447 	if (!macaddr)
448 		return NULL;
449 
450 	list_for_each_entry(f, &vconfig->user_config.mac_filter_list, list) {
451 		if (ether_addr_equal(macaddr, f->macaddr))
452 			return f;
453 	}
454 
455 	return NULL;
456 }
457 
458 /**
459  * __idpf_del_mac_filter - Delete a MAC filter from the filter list
460  * @vport_config: Vport config structure
461  * @macaddr: The MAC address
462  *
463  * Returns 0 on success, error value on failure
464  **/
465 static int __idpf_del_mac_filter(struct idpf_vport_config *vport_config,
466 				 const u8 *macaddr)
467 {
468 	struct idpf_mac_filter *f;
469 
470 	spin_lock_bh(&vport_config->mac_filter_list_lock);
471 	f = idpf_find_mac_filter(vport_config, macaddr);
472 	if (f) {
473 		list_del(&f->list);
474 		kfree(f);
475 	}
476 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
477 
478 	return 0;
479 }
480 
481 /**
482  * idpf_del_mac_filter - Delete a MAC filter from the filter list
483  * @vport: Main vport structure
484  * @np: Netdev private structure
485  * @macaddr: The MAC address
486  * @async: Don't wait for return message
487  *
488  * Removes filter from list and if interface is up, tells hardware about the
489  * removed filter.
490  **/
491 static int idpf_del_mac_filter(struct idpf_vport *vport,
492 			       struct idpf_netdev_priv *np,
493 			       const u8 *macaddr, bool async)
494 {
495 	struct idpf_vport_config *vport_config;
496 	struct idpf_mac_filter *f;
497 
498 	vport_config = np->adapter->vport_config[np->vport_idx];
499 
500 	spin_lock_bh(&vport_config->mac_filter_list_lock);
501 	f = idpf_find_mac_filter(vport_config, macaddr);
502 	if (f) {
503 		f->remove = true;
504 	} else {
505 		spin_unlock_bh(&vport_config->mac_filter_list_lock);
506 
507 		return -EINVAL;
508 	}
509 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
510 
511 	if (np->state == __IDPF_VPORT_UP) {
512 		int err;
513 
514 		err = idpf_add_del_mac_filters(vport, np, false, async);
515 		if (err)
516 			return err;
517 	}
518 
519 	return  __idpf_del_mac_filter(vport_config, macaddr);
520 }
521 
522 /**
523  * __idpf_add_mac_filter - Add mac filter helper function
524  * @vport_config: Vport config structure
525  * @macaddr: Address to add
526  *
527  * Takes mac_filter_list_lock spinlock to add new filter to list.
528  */
529 static int __idpf_add_mac_filter(struct idpf_vport_config *vport_config,
530 				 const u8 *macaddr)
531 {
532 	struct idpf_mac_filter *f;
533 
534 	spin_lock_bh(&vport_config->mac_filter_list_lock);
535 
536 	f = idpf_find_mac_filter(vport_config, macaddr);
537 	if (f) {
538 		f->remove = false;
539 		spin_unlock_bh(&vport_config->mac_filter_list_lock);
540 
541 		return 0;
542 	}
543 
544 	f = kzalloc(sizeof(*f), GFP_ATOMIC);
545 	if (!f) {
546 		spin_unlock_bh(&vport_config->mac_filter_list_lock);
547 
548 		return -ENOMEM;
549 	}
550 
551 	ether_addr_copy(f->macaddr, macaddr);
552 	list_add_tail(&f->list, &vport_config->user_config.mac_filter_list);
553 	f->add = true;
554 
555 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
556 
557 	return 0;
558 }
559 
560 /**
561  * idpf_add_mac_filter - Add a mac filter to the filter list
562  * @vport: Main vport structure
563  * @np: Netdev private structure
564  * @macaddr: The MAC address
565  * @async: Don't wait for return message
566  *
567  * Returns 0 on success or error on failure. If interface is up, we'll also
568  * send the virtchnl message to tell hardware about the filter.
569  **/
570 static int idpf_add_mac_filter(struct idpf_vport *vport,
571 			       struct idpf_netdev_priv *np,
572 			       const u8 *macaddr, bool async)
573 {
574 	struct idpf_vport_config *vport_config;
575 	int err;
576 
577 	vport_config = np->adapter->vport_config[np->vport_idx];
578 	err = __idpf_add_mac_filter(vport_config, macaddr);
579 	if (err)
580 		return err;
581 
582 	if (np->state == __IDPF_VPORT_UP)
583 		err = idpf_add_del_mac_filters(vport, np, true, async);
584 
585 	return err;
586 }
587 
588 /**
589  * idpf_del_all_mac_filters - Delete all MAC filters in list
590  * @vport: main vport struct
591  *
592  * Takes mac_filter_list_lock spinlock.  Deletes all filters
593  */
594 static void idpf_del_all_mac_filters(struct idpf_vport *vport)
595 {
596 	struct idpf_vport_config *vport_config;
597 	struct idpf_mac_filter *f, *ftmp;
598 
599 	vport_config = vport->adapter->vport_config[vport->idx];
600 	spin_lock_bh(&vport_config->mac_filter_list_lock);
601 
602 	list_for_each_entry_safe(f, ftmp, &vport_config->user_config.mac_filter_list,
603 				 list) {
604 		list_del(&f->list);
605 		kfree(f);
606 	}
607 
608 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
609 }
610 
611 /**
612  * idpf_restore_mac_filters - Re-add all MAC filters in list
613  * @vport: main vport struct
614  *
615  * Takes mac_filter_list_lock spinlock.  Sets add field to true for filters to
616  * resync filters back to HW.
617  */
618 static void idpf_restore_mac_filters(struct idpf_vport *vport)
619 {
620 	struct idpf_vport_config *vport_config;
621 	struct idpf_mac_filter *f;
622 
623 	vport_config = vport->adapter->vport_config[vport->idx];
624 	spin_lock_bh(&vport_config->mac_filter_list_lock);
625 
626 	list_for_each_entry(f, &vport_config->user_config.mac_filter_list, list)
627 		f->add = true;
628 
629 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
630 
631 	idpf_add_del_mac_filters(vport, netdev_priv(vport->netdev),
632 				 true, false);
633 }
634 
635 /**
636  * idpf_remove_mac_filters - Remove all MAC filters in list
637  * @vport: main vport struct
638  *
639  * Takes mac_filter_list_lock spinlock. Sets remove field to true for filters
640  * to remove filters in HW.
641  */
642 static void idpf_remove_mac_filters(struct idpf_vport *vport)
643 {
644 	struct idpf_vport_config *vport_config;
645 	struct idpf_mac_filter *f;
646 
647 	vport_config = vport->adapter->vport_config[vport->idx];
648 	spin_lock_bh(&vport_config->mac_filter_list_lock);
649 
650 	list_for_each_entry(f, &vport_config->user_config.mac_filter_list, list)
651 		f->remove = true;
652 
653 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
654 
655 	idpf_add_del_mac_filters(vport, netdev_priv(vport->netdev),
656 				 false, false);
657 }
658 
659 /**
660  * idpf_deinit_mac_addr - deinitialize mac address for vport
661  * @vport: main vport structure
662  */
663 static void idpf_deinit_mac_addr(struct idpf_vport *vport)
664 {
665 	struct idpf_vport_config *vport_config;
666 	struct idpf_mac_filter *f;
667 
668 	vport_config = vport->adapter->vport_config[vport->idx];
669 
670 	spin_lock_bh(&vport_config->mac_filter_list_lock);
671 
672 	f = idpf_find_mac_filter(vport_config, vport->default_mac_addr);
673 	if (f) {
674 		list_del(&f->list);
675 		kfree(f);
676 	}
677 
678 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
679 }
680 
681 /**
682  * idpf_init_mac_addr - initialize mac address for vport
683  * @vport: main vport structure
684  * @netdev: pointer to netdev struct associated with this vport
685  */
686 static int idpf_init_mac_addr(struct idpf_vport *vport,
687 			      struct net_device *netdev)
688 {
689 	struct idpf_netdev_priv *np = netdev_priv(netdev);
690 	struct idpf_adapter *adapter = vport->adapter;
691 	int err;
692 
693 	if (is_valid_ether_addr(vport->default_mac_addr)) {
694 		eth_hw_addr_set(netdev, vport->default_mac_addr);
695 		ether_addr_copy(netdev->perm_addr, vport->default_mac_addr);
696 
697 		return idpf_add_mac_filter(vport, np, vport->default_mac_addr,
698 					   false);
699 	}
700 
701 	if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS,
702 			     VIRTCHNL2_CAP_MACFILTER)) {
703 		dev_err(&adapter->pdev->dev,
704 			"MAC address is not provided and capability is not set\n");
705 
706 		return -EINVAL;
707 	}
708 
709 	eth_hw_addr_random(netdev);
710 	err = idpf_add_mac_filter(vport, np, netdev->dev_addr, false);
711 	if (err)
712 		return err;
713 
714 	dev_info(&adapter->pdev->dev, "Invalid MAC address %pM, using random %pM\n",
715 		 vport->default_mac_addr, netdev->dev_addr);
716 	ether_addr_copy(vport->default_mac_addr, netdev->dev_addr);
717 
718 	return 0;
719 }
720 
721 /**
722  * idpf_cfg_netdev - Allocate, configure and register a netdev
723  * @vport: main vport structure
724  *
725  * Returns 0 on success, negative value on failure.
726  */
727 static int idpf_cfg_netdev(struct idpf_vport *vport)
728 {
729 	struct idpf_adapter *adapter = vport->adapter;
730 	struct idpf_vport_config *vport_config;
731 	netdev_features_t dflt_features;
732 	netdev_features_t offloads = 0;
733 	struct idpf_netdev_priv *np;
734 	struct net_device *netdev;
735 	u16 idx = vport->idx;
736 	int err;
737 
738 	vport_config = adapter->vport_config[idx];
739 
740 	/* It's possible we already have a netdev allocated and registered for
741 	 * this vport
742 	 */
743 	if (test_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags)) {
744 		netdev = adapter->netdevs[idx];
745 		np = netdev_priv(netdev);
746 		np->vport = vport;
747 		np->vport_idx = vport->idx;
748 		np->vport_id = vport->vport_id;
749 		vport->netdev = netdev;
750 
751 		return idpf_init_mac_addr(vport, netdev);
752 	}
753 
754 	netdev = alloc_etherdev_mqs(sizeof(struct idpf_netdev_priv),
755 				    vport_config->max_q.max_txq,
756 				    vport_config->max_q.max_rxq);
757 	if (!netdev)
758 		return -ENOMEM;
759 
760 	vport->netdev = netdev;
761 	np = netdev_priv(netdev);
762 	np->vport = vport;
763 	np->adapter = adapter;
764 	np->vport_idx = vport->idx;
765 	np->vport_id = vport->vport_id;
766 
767 	spin_lock_init(&np->stats_lock);
768 
769 	err = idpf_init_mac_addr(vport, netdev);
770 	if (err) {
771 		free_netdev(vport->netdev);
772 		vport->netdev = NULL;
773 
774 		return err;
775 	}
776 
777 	/* assign netdev_ops */
778 	if (idpf_is_queue_model_split(vport->txq_model))
779 		netdev->netdev_ops = &idpf_netdev_ops_splitq;
780 	else
781 		netdev->netdev_ops = &idpf_netdev_ops_singleq;
782 
783 	/* setup watchdog timeout value to be 5 second */
784 	netdev->watchdog_timeo = 5 * HZ;
785 
786 	netdev->dev_port = idx;
787 
788 	/* configure default MTU size */
789 	netdev->min_mtu = ETH_MIN_MTU;
790 	netdev->max_mtu = vport->max_mtu;
791 
792 	dflt_features = NETIF_F_SG	|
793 			NETIF_F_HIGHDMA;
794 
795 	if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
796 		dflt_features |= NETIF_F_RXHASH;
797 	if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V4))
798 		dflt_features |= NETIF_F_IP_CSUM;
799 	if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V6))
800 		dflt_features |= NETIF_F_IPV6_CSUM;
801 	if (idpf_is_cap_ena(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM))
802 		dflt_features |= NETIF_F_RXCSUM;
803 	if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_SCTP_CSUM))
804 		dflt_features |= NETIF_F_SCTP_CRC;
805 
806 	if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV4_TCP))
807 		dflt_features |= NETIF_F_TSO;
808 	if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV6_TCP))
809 		dflt_features |= NETIF_F_TSO6;
810 	if (idpf_is_cap_ena_all(adapter, IDPF_SEG_CAPS,
811 				VIRTCHNL2_CAP_SEG_IPV4_UDP |
812 				VIRTCHNL2_CAP_SEG_IPV6_UDP))
813 		dflt_features |= NETIF_F_GSO_UDP_L4;
814 	if (idpf_is_cap_ena_all(adapter, IDPF_RSC_CAPS, IDPF_CAP_RSC))
815 		offloads |= NETIF_F_GRO_HW;
816 	/* advertise to stack only if offloads for encapsulated packets is
817 	 * supported
818 	 */
819 	if (idpf_is_cap_ena(vport->adapter, IDPF_SEG_CAPS,
820 			    VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL)) {
821 		offloads |= NETIF_F_GSO_UDP_TUNNEL	|
822 			    NETIF_F_GSO_GRE		|
823 			    NETIF_F_GSO_GRE_CSUM	|
824 			    NETIF_F_GSO_PARTIAL		|
825 			    NETIF_F_GSO_UDP_TUNNEL_CSUM	|
826 			    NETIF_F_GSO_IPXIP4		|
827 			    NETIF_F_GSO_IPXIP6		|
828 			    0;
829 
830 		if (!idpf_is_cap_ena_all(vport->adapter, IDPF_CSUM_CAPS,
831 					 IDPF_CAP_TUNNEL_TX_CSUM))
832 			netdev->gso_partial_features |=
833 				NETIF_F_GSO_UDP_TUNNEL_CSUM;
834 
835 		netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
836 		offloads |= NETIF_F_TSO_MANGLEID;
837 	}
838 	if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LOOPBACK))
839 		offloads |= NETIF_F_LOOPBACK;
840 
841 	netdev->features |= dflt_features;
842 	netdev->hw_features |= dflt_features | offloads;
843 	netdev->hw_enc_features |= dflt_features | offloads;
844 	idpf_set_ethtool_ops(netdev);
845 	SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
846 
847 	/* carrier off on init to avoid Tx hangs */
848 	netif_carrier_off(netdev);
849 
850 	/* make sure transmit queues start off as stopped */
851 	netif_tx_stop_all_queues(netdev);
852 
853 	/* The vport can be arbitrarily released so we need to also track
854 	 * netdevs in the adapter struct
855 	 */
856 	adapter->netdevs[idx] = netdev;
857 
858 	return 0;
859 }
860 
861 /**
862  * idpf_get_free_slot - get the next non-NULL location index in array
863  * @adapter: adapter in which to look for a free vport slot
864  */
865 static int idpf_get_free_slot(struct idpf_adapter *adapter)
866 {
867 	unsigned int i;
868 
869 	for (i = 0; i < adapter->max_vports; i++) {
870 		if (!adapter->vports[i])
871 			return i;
872 	}
873 
874 	return IDPF_NO_FREE_SLOT;
875 }
876 
877 /**
878  * idpf_remove_features - Turn off feature configs
879  * @vport: virtual port structure
880  */
881 static void idpf_remove_features(struct idpf_vport *vport)
882 {
883 	struct idpf_adapter *adapter = vport->adapter;
884 
885 	if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER))
886 		idpf_remove_mac_filters(vport);
887 }
888 
889 /**
890  * idpf_vport_stop - Disable a vport
891  * @vport: vport to disable
892  */
893 static void idpf_vport_stop(struct idpf_vport *vport)
894 {
895 	struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
896 
897 	if (np->state <= __IDPF_VPORT_DOWN)
898 		return;
899 
900 	netif_carrier_off(vport->netdev);
901 	netif_tx_disable(vport->netdev);
902 
903 	idpf_send_disable_vport_msg(vport);
904 	idpf_send_disable_queues_msg(vport);
905 	idpf_send_map_unmap_queue_vector_msg(vport, false);
906 	/* Normally we ask for queues in create_vport, but if the number of
907 	 * initially requested queues have changed, for example via ethtool
908 	 * set channels, we do delete queues and then add the queues back
909 	 * instead of deleting and reallocating the vport.
910 	 */
911 	if (test_and_clear_bit(IDPF_VPORT_DEL_QUEUES, vport->flags))
912 		idpf_send_delete_queues_msg(vport);
913 
914 	idpf_remove_features(vport);
915 
916 	vport->link_up = false;
917 	idpf_vport_intr_deinit(vport);
918 	idpf_vport_intr_rel(vport);
919 	idpf_vport_queues_rel(vport);
920 	np->state = __IDPF_VPORT_DOWN;
921 }
922 
923 /**
924  * idpf_stop - Disables a network interface
925  * @netdev: network interface device structure
926  *
927  * The stop entry point is called when an interface is de-activated by the OS,
928  * and the netdevice enters the DOWN state.  The hardware is still under the
929  * driver's control, but the netdev interface is disabled.
930  *
931  * Returns success only - not allowed to fail
932  */
933 static int idpf_stop(struct net_device *netdev)
934 {
935 	struct idpf_netdev_priv *np = netdev_priv(netdev);
936 	struct idpf_vport *vport;
937 
938 	if (test_bit(IDPF_REMOVE_IN_PROG, np->adapter->flags))
939 		return 0;
940 
941 	idpf_vport_ctrl_lock(netdev);
942 	vport = idpf_netdev_to_vport(netdev);
943 
944 	idpf_vport_stop(vport);
945 
946 	idpf_vport_ctrl_unlock(netdev);
947 
948 	return 0;
949 }
950 
951 /**
952  * idpf_decfg_netdev - Unregister the netdev
953  * @vport: vport for which netdev to be unregistered
954  */
955 static void idpf_decfg_netdev(struct idpf_vport *vport)
956 {
957 	struct idpf_adapter *adapter = vport->adapter;
958 
959 	unregister_netdev(vport->netdev);
960 	free_netdev(vport->netdev);
961 	vport->netdev = NULL;
962 
963 	adapter->netdevs[vport->idx] = NULL;
964 }
965 
966 /**
967  * idpf_vport_rel - Delete a vport and free its resources
968  * @vport: the vport being removed
969  */
970 static void idpf_vport_rel(struct idpf_vport *vport)
971 {
972 	struct idpf_adapter *adapter = vport->adapter;
973 	struct idpf_vport_config *vport_config;
974 	struct idpf_vector_info vec_info;
975 	struct idpf_rss_data *rss_data;
976 	struct idpf_vport_max_q max_q;
977 	u16 idx = vport->idx;
978 	int i;
979 
980 	vport_config = adapter->vport_config[vport->idx];
981 	idpf_deinit_rss(vport);
982 	rss_data = &vport_config->user_config.rss_data;
983 	kfree(rss_data->rss_key);
984 	rss_data->rss_key = NULL;
985 
986 	idpf_send_destroy_vport_msg(vport);
987 
988 	/* Set all bits as we dont know on which vc_state the vport vhnl_wq
989 	 * is waiting on and wakeup the virtchnl workqueue even if it is
990 	 * waiting for the response as we are going down
991 	 */
992 	for (i = 0; i < IDPF_VC_NBITS; i++)
993 		set_bit(i, vport->vc_state);
994 	wake_up(&vport->vchnl_wq);
995 
996 	mutex_destroy(&vport->vc_buf_lock);
997 
998 	/* Clear all the bits */
999 	for (i = 0; i < IDPF_VC_NBITS; i++)
1000 		clear_bit(i, vport->vc_state);
1001 
1002 	/* Release all max queues allocated to the adapter's pool */
1003 	max_q.max_rxq = vport_config->max_q.max_rxq;
1004 	max_q.max_txq = vport_config->max_q.max_txq;
1005 	max_q.max_bufq = vport_config->max_q.max_bufq;
1006 	max_q.max_complq = vport_config->max_q.max_complq;
1007 	idpf_vport_dealloc_max_qs(adapter, &max_q);
1008 
1009 	/* Release all the allocated vectors on the stack */
1010 	vec_info.num_req_vecs = 0;
1011 	vec_info.num_curr_vecs = vport->num_q_vectors;
1012 	vec_info.default_vport = vport->default_vport;
1013 
1014 	idpf_req_rel_vector_indexes(adapter, vport->q_vector_idxs, &vec_info);
1015 
1016 	kfree(vport->q_vector_idxs);
1017 	vport->q_vector_idxs = NULL;
1018 
1019 	kfree(adapter->vport_params_recvd[idx]);
1020 	adapter->vport_params_recvd[idx] = NULL;
1021 	kfree(adapter->vport_params_reqd[idx]);
1022 	adapter->vport_params_reqd[idx] = NULL;
1023 	if (adapter->vport_config[idx]) {
1024 		kfree(adapter->vport_config[idx]->req_qs_chunks);
1025 		adapter->vport_config[idx]->req_qs_chunks = NULL;
1026 	}
1027 	kfree(vport);
1028 	adapter->num_alloc_vports--;
1029 }
1030 
1031 /**
1032  * idpf_vport_dealloc - cleanup and release a given vport
1033  * @vport: pointer to idpf vport structure
1034  *
1035  * returns nothing
1036  */
1037 static void idpf_vport_dealloc(struct idpf_vport *vport)
1038 {
1039 	struct idpf_adapter *adapter = vport->adapter;
1040 	unsigned int i = vport->idx;
1041 
1042 	idpf_deinit_mac_addr(vport);
1043 	idpf_vport_stop(vport);
1044 
1045 	if (!test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
1046 		idpf_decfg_netdev(vport);
1047 	if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
1048 		idpf_del_all_mac_filters(vport);
1049 
1050 	if (adapter->netdevs[i]) {
1051 		struct idpf_netdev_priv *np = netdev_priv(adapter->netdevs[i]);
1052 
1053 		np->vport = NULL;
1054 	}
1055 
1056 	idpf_vport_rel(vport);
1057 
1058 	adapter->vports[i] = NULL;
1059 	adapter->next_vport = idpf_get_free_slot(adapter);
1060 }
1061 
1062 /**
1063  * idpf_is_hsplit_supported - check whether the header split is supported
1064  * @vport: virtual port to check the capability for
1065  *
1066  * Return: true if it's supported by the HW/FW, false if not.
1067  */
1068 static bool idpf_is_hsplit_supported(const struct idpf_vport *vport)
1069 {
1070 	return idpf_is_queue_model_split(vport->rxq_model) &&
1071 	       idpf_is_cap_ena_all(vport->adapter, IDPF_HSPLIT_CAPS,
1072 				   IDPF_CAP_HSPLIT);
1073 }
1074 
1075 /**
1076  * idpf_vport_get_hsplit - get the current header split feature state
1077  * @vport: virtual port to query the state for
1078  *
1079  * Return: ``ETHTOOL_TCP_DATA_SPLIT_UNKNOWN`` if not supported,
1080  *         ``ETHTOOL_TCP_DATA_SPLIT_DISABLED`` if disabled,
1081  *         ``ETHTOOL_TCP_DATA_SPLIT_ENABLED`` if active.
1082  */
1083 u8 idpf_vport_get_hsplit(const struct idpf_vport *vport)
1084 {
1085 	const struct idpf_vport_user_config_data *config;
1086 
1087 	if (!idpf_is_hsplit_supported(vport))
1088 		return ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
1089 
1090 	config = &vport->adapter->vport_config[vport->idx]->user_config;
1091 
1092 	return test_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags) ?
1093 	       ETHTOOL_TCP_DATA_SPLIT_ENABLED :
1094 	       ETHTOOL_TCP_DATA_SPLIT_DISABLED;
1095 }
1096 
1097 /**
1098  * idpf_vport_set_hsplit - enable or disable header split on a given vport
1099  * @vport: virtual port to configure
1100  * @val: Ethtool flag controlling the header split state
1101  *
1102  * Return: true on success, false if not supported by the HW.
1103  */
1104 bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val)
1105 {
1106 	struct idpf_vport_user_config_data *config;
1107 
1108 	if (!idpf_is_hsplit_supported(vport))
1109 		return val == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
1110 
1111 	config = &vport->adapter->vport_config[vport->idx]->user_config;
1112 
1113 	switch (val) {
1114 	case ETHTOOL_TCP_DATA_SPLIT_UNKNOWN:
1115 		/* Default is to enable */
1116 	case ETHTOOL_TCP_DATA_SPLIT_ENABLED:
1117 		__set_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags);
1118 		return true;
1119 	case ETHTOOL_TCP_DATA_SPLIT_DISABLED:
1120 		__clear_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags);
1121 		return true;
1122 	default:
1123 		return false;
1124 	}
1125 }
1126 
1127 /**
1128  * idpf_vport_alloc - Allocates the next available struct vport in the adapter
1129  * @adapter: board private structure
1130  * @max_q: vport max queue info
1131  *
1132  * returns a pointer to a vport on success, NULL on failure.
1133  */
1134 static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
1135 					   struct idpf_vport_max_q *max_q)
1136 {
1137 	struct idpf_rss_data *rss_data;
1138 	u16 idx = adapter->next_vport;
1139 	struct idpf_vport *vport;
1140 	u16 num_max_q;
1141 
1142 	if (idx == IDPF_NO_FREE_SLOT)
1143 		return NULL;
1144 
1145 	vport = kzalloc(sizeof(*vport), GFP_KERNEL);
1146 	if (!vport)
1147 		return vport;
1148 
1149 	if (!adapter->vport_config[idx]) {
1150 		struct idpf_vport_config *vport_config;
1151 
1152 		vport_config = kzalloc(sizeof(*vport_config), GFP_KERNEL);
1153 		if (!vport_config) {
1154 			kfree(vport);
1155 
1156 			return NULL;
1157 		}
1158 
1159 		adapter->vport_config[idx] = vport_config;
1160 	}
1161 
1162 	vport->idx = idx;
1163 	vport->adapter = adapter;
1164 	vport->compln_clean_budget = IDPF_TX_COMPLQ_CLEAN_BUDGET;
1165 	vport->default_vport = adapter->num_alloc_vports <
1166 			       idpf_get_default_vports(adapter);
1167 
1168 	num_max_q = max(max_q->max_txq, max_q->max_rxq);
1169 	vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL);
1170 	if (!vport->q_vector_idxs) {
1171 		kfree(vport);
1172 
1173 		return NULL;
1174 	}
1175 	idpf_vport_init(vport, max_q);
1176 
1177 	/* This alloc is done separate from the LUT because it's not strictly
1178 	 * dependent on how many queues we have. If we change number of queues
1179 	 * and soft reset we'll need a new LUT but the key can remain the same
1180 	 * for as long as the vport exists.
1181 	 */
1182 	rss_data = &adapter->vport_config[idx]->user_config.rss_data;
1183 	rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL);
1184 	if (!rss_data->rss_key) {
1185 		kfree(vport);
1186 
1187 		return NULL;
1188 	}
1189 	/* Initialize default rss key */
1190 	netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size);
1191 
1192 	/* fill vport slot in the adapter struct */
1193 	adapter->vports[idx] = vport;
1194 	adapter->vport_ids[idx] = idpf_get_vport_id(vport);
1195 
1196 	adapter->num_alloc_vports++;
1197 	/* prepare adapter->next_vport for next use */
1198 	adapter->next_vport = idpf_get_free_slot(adapter);
1199 
1200 	return vport;
1201 }
1202 
1203 /**
1204  * idpf_get_stats64 - get statistics for network device structure
1205  * @netdev: network interface device structure
1206  * @stats: main device statistics structure
1207  */
1208 static void idpf_get_stats64(struct net_device *netdev,
1209 			     struct rtnl_link_stats64 *stats)
1210 {
1211 	struct idpf_netdev_priv *np = netdev_priv(netdev);
1212 
1213 	spin_lock_bh(&np->stats_lock);
1214 	*stats = np->netstats;
1215 	spin_unlock_bh(&np->stats_lock);
1216 }
1217 
1218 /**
1219  * idpf_statistics_task - Delayed task to get statistics over mailbox
1220  * @work: work_struct handle to our data
1221  */
1222 void idpf_statistics_task(struct work_struct *work)
1223 {
1224 	struct idpf_adapter *adapter;
1225 	int i;
1226 
1227 	adapter = container_of(work, struct idpf_adapter, stats_task.work);
1228 
1229 	for (i = 0; i < adapter->max_vports; i++) {
1230 		struct idpf_vport *vport = adapter->vports[i];
1231 
1232 		if (vport && !test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
1233 			idpf_send_get_stats_msg(vport);
1234 	}
1235 
1236 	queue_delayed_work(adapter->stats_wq, &adapter->stats_task,
1237 			   msecs_to_jiffies(10000));
1238 }
1239 
1240 /**
1241  * idpf_mbx_task - Delayed task to handle mailbox responses
1242  * @work: work_struct handle
1243  */
1244 void idpf_mbx_task(struct work_struct *work)
1245 {
1246 	struct idpf_adapter *adapter;
1247 
1248 	adapter = container_of(work, struct idpf_adapter, mbx_task.work);
1249 
1250 	if (test_bit(IDPF_MB_INTR_MODE, adapter->flags))
1251 		idpf_mb_irq_enable(adapter);
1252 	else
1253 		queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task,
1254 				   msecs_to_jiffies(300));
1255 
1256 	idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_UNKNOWN, NULL, 0);
1257 }
1258 
1259 /**
1260  * idpf_service_task - Delayed task for handling mailbox responses
1261  * @work: work_struct handle to our data
1262  *
1263  */
1264 void idpf_service_task(struct work_struct *work)
1265 {
1266 	struct idpf_adapter *adapter;
1267 
1268 	adapter = container_of(work, struct idpf_adapter, serv_task.work);
1269 
1270 	if (idpf_is_reset_detected(adapter) &&
1271 	    !idpf_is_reset_in_prog(adapter) &&
1272 	    !test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) {
1273 		dev_info(&adapter->pdev->dev, "HW reset detected\n");
1274 		set_bit(IDPF_HR_FUNC_RESET, adapter->flags);
1275 		queue_delayed_work(adapter->vc_event_wq,
1276 				   &adapter->vc_event_task,
1277 				   msecs_to_jiffies(10));
1278 	}
1279 
1280 	queue_delayed_work(adapter->serv_wq, &adapter->serv_task,
1281 			   msecs_to_jiffies(300));
1282 }
1283 
1284 /**
1285  * idpf_restore_features - Restore feature configs
1286  * @vport: virtual port structure
1287  */
1288 static void idpf_restore_features(struct idpf_vport *vport)
1289 {
1290 	struct idpf_adapter *adapter = vport->adapter;
1291 
1292 	if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER))
1293 		idpf_restore_mac_filters(vport);
1294 }
1295 
1296 /**
1297  * idpf_set_real_num_queues - set number of queues for netdev
1298  * @vport: virtual port structure
1299  *
1300  * Returns 0 on success, negative on failure.
1301  */
1302 static int idpf_set_real_num_queues(struct idpf_vport *vport)
1303 {
1304 	int err;
1305 
1306 	err = netif_set_real_num_rx_queues(vport->netdev, vport->num_rxq);
1307 	if (err)
1308 		return err;
1309 
1310 	return netif_set_real_num_tx_queues(vport->netdev, vport->num_txq);
1311 }
1312 
1313 /**
1314  * idpf_up_complete - Complete interface up sequence
1315  * @vport: virtual port structure
1316  *
1317  * Returns 0 on success, negative on failure.
1318  */
1319 static int idpf_up_complete(struct idpf_vport *vport)
1320 {
1321 	struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
1322 
1323 	if (vport->link_up && !netif_carrier_ok(vport->netdev)) {
1324 		netif_carrier_on(vport->netdev);
1325 		netif_tx_start_all_queues(vport->netdev);
1326 	}
1327 
1328 	np->state = __IDPF_VPORT_UP;
1329 
1330 	return 0;
1331 }
1332 
1333 /**
1334  * idpf_rx_init_buf_tail - Write initial buffer ring tail value
1335  * @vport: virtual port struct
1336  */
1337 static void idpf_rx_init_buf_tail(struct idpf_vport *vport)
1338 {
1339 	int i, j;
1340 
1341 	for (i = 0; i < vport->num_rxq_grp; i++) {
1342 		struct idpf_rxq_group *grp = &vport->rxq_grps[i];
1343 
1344 		if (idpf_is_queue_model_split(vport->rxq_model)) {
1345 			for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
1346 				struct idpf_queue *q =
1347 					&grp->splitq.bufq_sets[j].bufq;
1348 
1349 				writel(q->next_to_alloc, q->tail);
1350 			}
1351 		} else {
1352 			for (j = 0; j < grp->singleq.num_rxq; j++) {
1353 				struct idpf_queue *q =
1354 					grp->singleq.rxqs[j];
1355 
1356 				writel(q->next_to_alloc, q->tail);
1357 			}
1358 		}
1359 	}
1360 }
1361 
1362 /**
1363  * idpf_vport_open - Bring up a vport
1364  * @vport: vport to bring up
1365  * @alloc_res: allocate queue resources
1366  */
1367 static int idpf_vport_open(struct idpf_vport *vport, bool alloc_res)
1368 {
1369 	struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
1370 	struct idpf_adapter *adapter = vport->adapter;
1371 	struct idpf_vport_config *vport_config;
1372 	int err;
1373 
1374 	if (np->state != __IDPF_VPORT_DOWN)
1375 		return -EBUSY;
1376 
1377 	/* we do not allow interface up just yet */
1378 	netif_carrier_off(vport->netdev);
1379 
1380 	if (alloc_res) {
1381 		err = idpf_vport_queues_alloc(vport);
1382 		if (err)
1383 			return err;
1384 	}
1385 
1386 	err = idpf_vport_intr_alloc(vport);
1387 	if (err) {
1388 		dev_err(&adapter->pdev->dev, "Failed to allocate interrupts for vport %u: %d\n",
1389 			vport->vport_id, err);
1390 		goto queues_rel;
1391 	}
1392 
1393 	err = idpf_vport_queue_ids_init(vport);
1394 	if (err) {
1395 		dev_err(&adapter->pdev->dev, "Failed to initialize queue ids for vport %u: %d\n",
1396 			vport->vport_id, err);
1397 		goto intr_rel;
1398 	}
1399 
1400 	err = idpf_vport_intr_init(vport);
1401 	if (err) {
1402 		dev_err(&adapter->pdev->dev, "Failed to initialize interrupts for vport %u: %d\n",
1403 			vport->vport_id, err);
1404 		goto intr_rel;
1405 	}
1406 
1407 	err = idpf_rx_bufs_init_all(vport);
1408 	if (err) {
1409 		dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n",
1410 			vport->vport_id, err);
1411 		goto intr_rel;
1412 	}
1413 
1414 	err = idpf_queue_reg_init(vport);
1415 	if (err) {
1416 		dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n",
1417 			vport->vport_id, err);
1418 		goto intr_rel;
1419 	}
1420 
1421 	idpf_rx_init_buf_tail(vport);
1422 
1423 	err = idpf_send_config_queues_msg(vport);
1424 	if (err) {
1425 		dev_err(&adapter->pdev->dev, "Failed to configure queues for vport %u, %d\n",
1426 			vport->vport_id, err);
1427 		goto intr_deinit;
1428 	}
1429 
1430 	err = idpf_send_map_unmap_queue_vector_msg(vport, true);
1431 	if (err) {
1432 		dev_err(&adapter->pdev->dev, "Failed to map queue vectors for vport %u: %d\n",
1433 			vport->vport_id, err);
1434 		goto intr_deinit;
1435 	}
1436 
1437 	err = idpf_send_enable_queues_msg(vport);
1438 	if (err) {
1439 		dev_err(&adapter->pdev->dev, "Failed to enable queues for vport %u: %d\n",
1440 			vport->vport_id, err);
1441 		goto unmap_queue_vectors;
1442 	}
1443 
1444 	err = idpf_send_enable_vport_msg(vport);
1445 	if (err) {
1446 		dev_err(&adapter->pdev->dev, "Failed to enable vport %u: %d\n",
1447 			vport->vport_id, err);
1448 		err = -EAGAIN;
1449 		goto disable_queues;
1450 	}
1451 
1452 	idpf_restore_features(vport);
1453 
1454 	vport_config = adapter->vport_config[vport->idx];
1455 	if (vport_config->user_config.rss_data.rss_lut)
1456 		err = idpf_config_rss(vport);
1457 	else
1458 		err = idpf_init_rss(vport);
1459 	if (err) {
1460 		dev_err(&adapter->pdev->dev, "Failed to initialize RSS for vport %u: %d\n",
1461 			vport->vport_id, err);
1462 		goto disable_vport;
1463 	}
1464 
1465 	err = idpf_up_complete(vport);
1466 	if (err) {
1467 		dev_err(&adapter->pdev->dev, "Failed to complete interface up for vport %u: %d\n",
1468 			vport->vport_id, err);
1469 		goto deinit_rss;
1470 	}
1471 
1472 	return 0;
1473 
1474 deinit_rss:
1475 	idpf_deinit_rss(vport);
1476 disable_vport:
1477 	idpf_send_disable_vport_msg(vport);
1478 disable_queues:
1479 	idpf_send_disable_queues_msg(vport);
1480 unmap_queue_vectors:
1481 	idpf_send_map_unmap_queue_vector_msg(vport, false);
1482 intr_deinit:
1483 	idpf_vport_intr_deinit(vport);
1484 intr_rel:
1485 	idpf_vport_intr_rel(vport);
1486 queues_rel:
1487 	idpf_vport_queues_rel(vport);
1488 
1489 	return err;
1490 }
1491 
1492 /**
1493  * idpf_init_task - Delayed initialization task
1494  * @work: work_struct handle to our data
1495  *
1496  * Init task finishes up pending work started in probe. Due to the asynchronous
1497  * nature in which the device communicates with hardware, we may have to wait
1498  * several milliseconds to get a response.  Instead of busy polling in probe,
1499  * pulling it out into a delayed work task prevents us from bogging down the
1500  * whole system waiting for a response from hardware.
1501  */
1502 void idpf_init_task(struct work_struct *work)
1503 {
1504 	struct idpf_vport_config *vport_config;
1505 	struct idpf_vport_max_q max_q;
1506 	struct idpf_adapter *adapter;
1507 	struct idpf_netdev_priv *np;
1508 	struct idpf_vport *vport;
1509 	u16 num_default_vports;
1510 	struct pci_dev *pdev;
1511 	bool default_vport;
1512 	int index, err;
1513 
1514 	adapter = container_of(work, struct idpf_adapter, init_task.work);
1515 
1516 	num_default_vports = idpf_get_default_vports(adapter);
1517 	if (adapter->num_alloc_vports < num_default_vports)
1518 		default_vport = true;
1519 	else
1520 		default_vport = false;
1521 
1522 	err = idpf_vport_alloc_max_qs(adapter, &max_q);
1523 	if (err)
1524 		goto unwind_vports;
1525 
1526 	err = idpf_send_create_vport_msg(adapter, &max_q);
1527 	if (err) {
1528 		idpf_vport_dealloc_max_qs(adapter, &max_q);
1529 		goto unwind_vports;
1530 	}
1531 
1532 	pdev = adapter->pdev;
1533 	vport = idpf_vport_alloc(adapter, &max_q);
1534 	if (!vport) {
1535 		err = -EFAULT;
1536 		dev_err(&pdev->dev, "failed to allocate vport: %d\n",
1537 			err);
1538 		idpf_vport_dealloc_max_qs(adapter, &max_q);
1539 		goto unwind_vports;
1540 	}
1541 
1542 	index = vport->idx;
1543 	vport_config = adapter->vport_config[index];
1544 
1545 	init_waitqueue_head(&vport->sw_marker_wq);
1546 	init_waitqueue_head(&vport->vchnl_wq);
1547 
1548 	mutex_init(&vport->vc_buf_lock);
1549 	spin_lock_init(&vport_config->mac_filter_list_lock);
1550 
1551 	INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list);
1552 
1553 	err = idpf_check_supported_desc_ids(vport);
1554 	if (err) {
1555 		dev_err(&pdev->dev, "failed to get required descriptor ids\n");
1556 		goto cfg_netdev_err;
1557 	}
1558 
1559 	if (idpf_cfg_netdev(vport))
1560 		goto cfg_netdev_err;
1561 
1562 	err = idpf_send_get_rx_ptype_msg(vport);
1563 	if (err)
1564 		goto handle_err;
1565 
1566 	/* Once state is put into DOWN, driver is ready for dev_open */
1567 	np = netdev_priv(vport->netdev);
1568 	np->state = __IDPF_VPORT_DOWN;
1569 	if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED, vport_config->flags))
1570 		idpf_vport_open(vport, true);
1571 
1572 	/* Spawn and return 'idpf_init_task' work queue until all the
1573 	 * default vports are created
1574 	 */
1575 	if (adapter->num_alloc_vports < num_default_vports) {
1576 		queue_delayed_work(adapter->init_wq, &adapter->init_task,
1577 				   msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
1578 
1579 		return;
1580 	}
1581 
1582 	for (index = 0; index < adapter->max_vports; index++) {
1583 		if (adapter->netdevs[index] &&
1584 		    !test_bit(IDPF_VPORT_REG_NETDEV,
1585 			      adapter->vport_config[index]->flags)) {
1586 			register_netdev(adapter->netdevs[index]);
1587 			set_bit(IDPF_VPORT_REG_NETDEV,
1588 				adapter->vport_config[index]->flags);
1589 		}
1590 	}
1591 
1592 	/* As all the required vports are created, clear the reset flag
1593 	 * unconditionally here in case we were in reset and the link was down.
1594 	 */
1595 	clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
1596 	/* Start the statistics task now */
1597 	queue_delayed_work(adapter->stats_wq, &adapter->stats_task,
1598 			   msecs_to_jiffies(10 * (pdev->devfn & 0x07)));
1599 
1600 	return;
1601 
1602 handle_err:
1603 	idpf_decfg_netdev(vport);
1604 cfg_netdev_err:
1605 	idpf_vport_rel(vport);
1606 	adapter->vports[index] = NULL;
1607 unwind_vports:
1608 	if (default_vport) {
1609 		for (index = 0; index < adapter->max_vports; index++) {
1610 			if (adapter->vports[index])
1611 				idpf_vport_dealloc(adapter->vports[index]);
1612 		}
1613 	}
1614 	clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
1615 }
1616 
1617 /**
1618  * idpf_sriov_ena - Enable or change number of VFs
1619  * @adapter: private data struct
1620  * @num_vfs: number of VFs to allocate
1621  */
1622 static int idpf_sriov_ena(struct idpf_adapter *adapter, int num_vfs)
1623 {
1624 	struct device *dev = &adapter->pdev->dev;
1625 	int err;
1626 
1627 	err = idpf_send_set_sriov_vfs_msg(adapter, num_vfs);
1628 	if (err) {
1629 		dev_err(dev, "Failed to allocate VFs: %d\n", err);
1630 
1631 		return err;
1632 	}
1633 
1634 	err = pci_enable_sriov(adapter->pdev, num_vfs);
1635 	if (err) {
1636 		idpf_send_set_sriov_vfs_msg(adapter, 0);
1637 		dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
1638 
1639 		return err;
1640 	}
1641 
1642 	adapter->num_vfs = num_vfs;
1643 
1644 	return num_vfs;
1645 }
1646 
1647 /**
1648  * idpf_sriov_configure - Configure the requested VFs
1649  * @pdev: pointer to a pci_dev structure
1650  * @num_vfs: number of vfs to allocate
1651  *
1652  * Enable or change the number of VFs. Called when the user updates the number
1653  * of VFs in sysfs.
1654  **/
1655 int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs)
1656 {
1657 	struct idpf_adapter *adapter = pci_get_drvdata(pdev);
1658 
1659 	if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_SRIOV)) {
1660 		dev_info(&pdev->dev, "SR-IOV is not supported on this device\n");
1661 
1662 		return -EOPNOTSUPP;
1663 	}
1664 
1665 	if (num_vfs)
1666 		return idpf_sriov_ena(adapter, num_vfs);
1667 
1668 	if (pci_vfs_assigned(pdev)) {
1669 		dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs\n");
1670 
1671 		return -EBUSY;
1672 	}
1673 
1674 	pci_disable_sriov(adapter->pdev);
1675 	idpf_send_set_sriov_vfs_msg(adapter, 0);
1676 	adapter->num_vfs = 0;
1677 
1678 	return 0;
1679 }
1680 
1681 /**
1682  * idpf_deinit_task - Device deinit routine
1683  * @adapter: Driver specific private structure
1684  *
1685  * Extended remove logic which will be used for
1686  * hard reset as well
1687  */
1688 void idpf_deinit_task(struct idpf_adapter *adapter)
1689 {
1690 	unsigned int i;
1691 
1692 	/* Wait until the init_task is done else this thread might release
1693 	 * the resources first and the other thread might end up in a bad state
1694 	 */
1695 	cancel_delayed_work_sync(&adapter->init_task);
1696 
1697 	if (!adapter->vports)
1698 		return;
1699 
1700 	cancel_delayed_work_sync(&adapter->stats_task);
1701 
1702 	for (i = 0; i < adapter->max_vports; i++) {
1703 		if (adapter->vports[i])
1704 			idpf_vport_dealloc(adapter->vports[i]);
1705 	}
1706 }
1707 
1708 /**
1709  * idpf_check_reset_complete - check that reset is complete
1710  * @hw: pointer to hw struct
1711  * @reset_reg: struct with reset registers
1712  *
1713  * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
1714  **/
1715 static int idpf_check_reset_complete(struct idpf_hw *hw,
1716 				     struct idpf_reset_reg *reset_reg)
1717 {
1718 	struct idpf_adapter *adapter = hw->back;
1719 	int i;
1720 
1721 	for (i = 0; i < 2000; i++) {
1722 		u32 reg_val = readl(reset_reg->rstat);
1723 
1724 		/* 0xFFFFFFFF might be read if other side hasn't cleared the
1725 		 * register for us yet and 0xFFFFFFFF is not a valid value for
1726 		 * the register, so treat that as invalid.
1727 		 */
1728 		if (reg_val != 0xFFFFFFFF && (reg_val & reset_reg->rstat_m))
1729 			return 0;
1730 
1731 		usleep_range(5000, 10000);
1732 	}
1733 
1734 	dev_warn(&adapter->pdev->dev, "Device reset timeout!\n");
1735 	/* Clear the reset flag unconditionally here since the reset
1736 	 * technically isn't in progress anymore from the driver's perspective
1737 	 */
1738 	clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
1739 
1740 	return -EBUSY;
1741 }
1742 
1743 /**
1744  * idpf_set_vport_state - Set the vport state to be after the reset
1745  * @adapter: Driver specific private structure
1746  */
1747 static void idpf_set_vport_state(struct idpf_adapter *adapter)
1748 {
1749 	u16 i;
1750 
1751 	for (i = 0; i < adapter->max_vports; i++) {
1752 		struct idpf_netdev_priv *np;
1753 
1754 		if (!adapter->netdevs[i])
1755 			continue;
1756 
1757 		np = netdev_priv(adapter->netdevs[i]);
1758 		if (np->state == __IDPF_VPORT_UP)
1759 			set_bit(IDPF_VPORT_UP_REQUESTED,
1760 				adapter->vport_config[i]->flags);
1761 	}
1762 }
1763 
1764 /**
1765  * idpf_init_hard_reset - Initiate a hardware reset
1766  * @adapter: Driver specific private structure
1767  *
1768  * Deallocate the vports and all the resources associated with them and
1769  * reallocate. Also reinitialize the mailbox. Return 0 on success,
1770  * negative on failure.
1771  */
1772 static int idpf_init_hard_reset(struct idpf_adapter *adapter)
1773 {
1774 	struct idpf_reg_ops *reg_ops = &adapter->dev_ops.reg_ops;
1775 	struct device *dev = &adapter->pdev->dev;
1776 	struct net_device *netdev;
1777 	int err;
1778 	u16 i;
1779 
1780 	mutex_lock(&adapter->vport_ctrl_lock);
1781 
1782 	dev_info(dev, "Device HW Reset initiated\n");
1783 
1784 	/* Avoid TX hangs on reset */
1785 	for (i = 0; i < adapter->max_vports; i++) {
1786 		netdev = adapter->netdevs[i];
1787 		if (!netdev)
1788 			continue;
1789 
1790 		netif_carrier_off(netdev);
1791 		netif_tx_disable(netdev);
1792 	}
1793 
1794 	/* Prepare for reset */
1795 	if (test_and_clear_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
1796 		reg_ops->trigger_reset(adapter, IDPF_HR_DRV_LOAD);
1797 	} else if (test_and_clear_bit(IDPF_HR_FUNC_RESET, adapter->flags)) {
1798 		bool is_reset = idpf_is_reset_detected(adapter);
1799 
1800 		idpf_set_vport_state(adapter);
1801 		idpf_vc_core_deinit(adapter);
1802 		if (!is_reset)
1803 			reg_ops->trigger_reset(adapter, IDPF_HR_FUNC_RESET);
1804 		idpf_deinit_dflt_mbx(adapter);
1805 	} else {
1806 		dev_err(dev, "Unhandled hard reset cause\n");
1807 		err = -EBADRQC;
1808 		goto unlock_mutex;
1809 	}
1810 
1811 	/* Wait for reset to complete */
1812 	err = idpf_check_reset_complete(&adapter->hw, &adapter->reset_reg);
1813 	if (err) {
1814 		dev_err(dev, "The driver was unable to contact the device's firmware. Check that the FW is running. Driver state= 0x%x\n",
1815 			adapter->state);
1816 		goto unlock_mutex;
1817 	}
1818 
1819 	/* Reset is complete and so start building the driver resources again */
1820 	err = idpf_init_dflt_mbx(adapter);
1821 	if (err) {
1822 		dev_err(dev, "Failed to initialize default mailbox: %d\n", err);
1823 		goto unlock_mutex;
1824 	}
1825 
1826 	/* Initialize the state machine, also allocate memory and request
1827 	 * resources
1828 	 */
1829 	err = idpf_vc_core_init(adapter);
1830 	if (err) {
1831 		idpf_deinit_dflt_mbx(adapter);
1832 		goto unlock_mutex;
1833 	}
1834 
1835 	/* Wait till all the vports are initialized to release the reset lock,
1836 	 * else user space callbacks may access uninitialized vports
1837 	 */
1838 	while (test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
1839 		msleep(100);
1840 
1841 unlock_mutex:
1842 	mutex_unlock(&adapter->vport_ctrl_lock);
1843 
1844 	return err;
1845 }
1846 
1847 /**
1848  * idpf_vc_event_task - Handle virtchannel event logic
1849  * @work: work queue struct
1850  */
1851 void idpf_vc_event_task(struct work_struct *work)
1852 {
1853 	struct idpf_adapter *adapter;
1854 
1855 	adapter = container_of(work, struct idpf_adapter, vc_event_task.work);
1856 
1857 	if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
1858 		return;
1859 
1860 	if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags) ||
1861 	    test_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
1862 		set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
1863 		idpf_init_hard_reset(adapter);
1864 	}
1865 }
1866 
1867 /**
1868  * idpf_initiate_soft_reset - Initiate a software reset
1869  * @vport: virtual port data struct
1870  * @reset_cause: reason for the soft reset
1871  *
1872  * Soft reset only reallocs vport queue resources. Returns 0 on success,
1873  * negative on failure.
1874  */
1875 int idpf_initiate_soft_reset(struct idpf_vport *vport,
1876 			     enum idpf_vport_reset_cause reset_cause)
1877 {
1878 	struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
1879 	enum idpf_vport_state current_state = np->state;
1880 	struct idpf_adapter *adapter = vport->adapter;
1881 	struct idpf_vport *new_vport;
1882 	int err, i;
1883 
1884 	/* If the system is low on memory, we can end up in bad state if we
1885 	 * free all the memory for queue resources and try to allocate them
1886 	 * again. Instead, we can pre-allocate the new resources before doing
1887 	 * anything and bailing if the alloc fails.
1888 	 *
1889 	 * Make a clone of the existing vport to mimic its current
1890 	 * configuration, then modify the new structure with any requested
1891 	 * changes. Once the allocation of the new resources is done, stop the
1892 	 * existing vport and copy the configuration to the main vport. If an
1893 	 * error occurred, the existing vport will be untouched.
1894 	 *
1895 	 */
1896 	new_vport = kzalloc(sizeof(*vport), GFP_KERNEL);
1897 	if (!new_vport)
1898 		return -ENOMEM;
1899 
1900 	/* This purposely avoids copying the end of the struct because it
1901 	 * contains wait_queues and mutexes and other stuff we don't want to
1902 	 * mess with. Nothing below should use those variables from new_vport
1903 	 * and should instead always refer to them in vport if they need to.
1904 	 */
1905 	memcpy(new_vport, vport, offsetof(struct idpf_vport, vc_state));
1906 
1907 	/* Adjust resource parameters prior to reallocating resources */
1908 	switch (reset_cause) {
1909 	case IDPF_SR_Q_CHANGE:
1910 		err = idpf_vport_adjust_qs(new_vport);
1911 		if (err)
1912 			goto free_vport;
1913 		break;
1914 	case IDPF_SR_Q_DESC_CHANGE:
1915 		/* Update queue parameters before allocating resources */
1916 		idpf_vport_calc_num_q_desc(new_vport);
1917 		break;
1918 	case IDPF_SR_MTU_CHANGE:
1919 	case IDPF_SR_RSC_CHANGE:
1920 		break;
1921 	default:
1922 		dev_err(&adapter->pdev->dev, "Unhandled soft reset cause\n");
1923 		err = -EINVAL;
1924 		goto free_vport;
1925 	}
1926 
1927 	err = idpf_vport_queues_alloc(new_vport);
1928 	if (err)
1929 		goto free_vport;
1930 	if (current_state <= __IDPF_VPORT_DOWN) {
1931 		idpf_send_delete_queues_msg(vport);
1932 	} else {
1933 		set_bit(IDPF_VPORT_DEL_QUEUES, vport->flags);
1934 		idpf_vport_stop(vport);
1935 	}
1936 
1937 	idpf_deinit_rss(vport);
1938 	/* We're passing in vport here because we need its wait_queue
1939 	 * to send a message and it should be getting all the vport
1940 	 * config data out of the adapter but we need to be careful not
1941 	 * to add code to add_queues to change the vport config within
1942 	 * vport itself as it will be wiped with a memcpy later.
1943 	 */
1944 	err = idpf_send_add_queues_msg(vport, new_vport->num_txq,
1945 				       new_vport->num_complq,
1946 				       new_vport->num_rxq,
1947 				       new_vport->num_bufq);
1948 	if (err)
1949 		goto err_reset;
1950 
1951 	/* Same comment as above regarding avoiding copying the wait_queues and
1952 	 * mutexes applies here. We do not want to mess with those if possible.
1953 	 */
1954 	memcpy(vport, new_vport, offsetof(struct idpf_vport, vc_state));
1955 
1956 	/* Since idpf_vport_queues_alloc was called with new_port, the queue
1957 	 * back pointers are currently pointing to the local new_vport. Reset
1958 	 * the backpointers to the original vport here
1959 	 */
1960 	for (i = 0; i < vport->num_txq_grp; i++) {
1961 		struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1962 		int j;
1963 
1964 		tx_qgrp->vport = vport;
1965 		for (j = 0; j < tx_qgrp->num_txq; j++)
1966 			tx_qgrp->txqs[j]->vport = vport;
1967 
1968 		if (idpf_is_queue_model_split(vport->txq_model))
1969 			tx_qgrp->complq->vport = vport;
1970 	}
1971 
1972 	for (i = 0; i < vport->num_rxq_grp; i++) {
1973 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1974 		struct idpf_queue *q;
1975 		u16 num_rxq;
1976 		int j;
1977 
1978 		rx_qgrp->vport = vport;
1979 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++)
1980 			rx_qgrp->splitq.bufq_sets[j].bufq.vport = vport;
1981 
1982 		if (idpf_is_queue_model_split(vport->rxq_model))
1983 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
1984 		else
1985 			num_rxq = rx_qgrp->singleq.num_rxq;
1986 
1987 		for (j = 0; j < num_rxq; j++) {
1988 			if (idpf_is_queue_model_split(vport->rxq_model))
1989 				q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
1990 			else
1991 				q = rx_qgrp->singleq.rxqs[j];
1992 			q->vport = vport;
1993 		}
1994 	}
1995 
1996 	if (reset_cause == IDPF_SR_Q_CHANGE)
1997 		idpf_vport_alloc_vec_indexes(vport);
1998 
1999 	err = idpf_set_real_num_queues(vport);
2000 	if (err)
2001 		goto err_reset;
2002 
2003 	if (current_state == __IDPF_VPORT_UP)
2004 		err = idpf_vport_open(vport, false);
2005 
2006 	kfree(new_vport);
2007 
2008 	return err;
2009 
2010 err_reset:
2011 	idpf_vport_queues_rel(new_vport);
2012 free_vport:
2013 	kfree(new_vport);
2014 
2015 	return err;
2016 }
2017 
2018 /**
2019  * idpf_addr_sync - Callback for dev_(mc|uc)_sync to add address
2020  * @netdev: the netdevice
2021  * @addr: address to add
2022  *
2023  * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
2024  * __dev_(uc|mc)_sync from .set_rx_mode. Kernel takes addr_list_lock spinlock
2025  * meaning we cannot sleep in this context. Due to this, we have to add the
2026  * filter and send the virtchnl message asynchronously without waiting for the
2027  * response from the other side. We won't know whether or not the operation
2028  * actually succeeded until we get the message back.  Returns 0 on success,
2029  * negative on failure.
2030  */
2031 static int idpf_addr_sync(struct net_device *netdev, const u8 *addr)
2032 {
2033 	struct idpf_netdev_priv *np = netdev_priv(netdev);
2034 
2035 	return idpf_add_mac_filter(np->vport, np, addr, true);
2036 }
2037 
2038 /**
2039  * idpf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
2040  * @netdev: the netdevice
2041  * @addr: address to add
2042  *
2043  * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
2044  * __dev_(uc|mc)_sync from .set_rx_mode. Kernel takes addr_list_lock spinlock
2045  * meaning we cannot sleep in this context. Due to this we have to delete the
2046  * filter and send the virtchnl message asynchronously without waiting for the
2047  * return from the other side.  We won't know whether or not the operation
2048  * actually succeeded until we get the message back. Returns 0 on success,
2049  * negative on failure.
2050  */
2051 static int idpf_addr_unsync(struct net_device *netdev, const u8 *addr)
2052 {
2053 	struct idpf_netdev_priv *np = netdev_priv(netdev);
2054 
2055 	/* Under some circumstances, we might receive a request to delete
2056 	 * our own device address from our uc list. Because we store the
2057 	 * device address in the VSI's MAC filter list, we need to ignore
2058 	 * such requests and not delete our device address from this list.
2059 	 */
2060 	if (ether_addr_equal(addr, netdev->dev_addr))
2061 		return 0;
2062 
2063 	idpf_del_mac_filter(np->vport, np, addr, true);
2064 
2065 	return 0;
2066 }
2067 
2068 /**
2069  * idpf_set_rx_mode - NDO callback to set the netdev filters
2070  * @netdev: network interface device structure
2071  *
2072  * Stack takes addr_list_lock spinlock before calling our .set_rx_mode.  We
2073  * cannot sleep in this context.
2074  */
2075 static void idpf_set_rx_mode(struct net_device *netdev)
2076 {
2077 	struct idpf_netdev_priv *np = netdev_priv(netdev);
2078 	struct idpf_vport_user_config_data *config_data;
2079 	struct idpf_adapter *adapter;
2080 	bool changed = false;
2081 	struct device *dev;
2082 	int err;
2083 
2084 	adapter = np->adapter;
2085 	dev = &adapter->pdev->dev;
2086 
2087 	if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER)) {
2088 		__dev_uc_sync(netdev, idpf_addr_sync, idpf_addr_unsync);
2089 		__dev_mc_sync(netdev, idpf_addr_sync, idpf_addr_unsync);
2090 	}
2091 
2092 	if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PROMISC))
2093 		return;
2094 
2095 	config_data = &adapter->vport_config[np->vport_idx]->user_config;
2096 	/* IFF_PROMISC enables both unicast and multicast promiscuous,
2097 	 * while IFF_ALLMULTI only enables multicast such that:
2098 	 *
2099 	 * promisc  + allmulti		= unicast | multicast
2100 	 * promisc  + !allmulti		= unicast | multicast
2101 	 * !promisc + allmulti		= multicast
2102 	 */
2103 	if ((netdev->flags & IFF_PROMISC) &&
2104 	    !test_and_set_bit(__IDPF_PROMISC_UC, config_data->user_flags)) {
2105 		changed = true;
2106 		dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
2107 		if (!test_and_set_bit(__IDPF_PROMISC_MC, adapter->flags))
2108 			dev_info(dev, "Entering multicast promiscuous mode\n");
2109 	}
2110 
2111 	if (!(netdev->flags & IFF_PROMISC) &&
2112 	    test_and_clear_bit(__IDPF_PROMISC_UC, config_data->user_flags)) {
2113 		changed = true;
2114 		dev_info(dev, "Leaving promiscuous mode\n");
2115 	}
2116 
2117 	if (netdev->flags & IFF_ALLMULTI &&
2118 	    !test_and_set_bit(__IDPF_PROMISC_MC, config_data->user_flags)) {
2119 		changed = true;
2120 		dev_info(dev, "Entering multicast promiscuous mode\n");
2121 	}
2122 
2123 	if (!(netdev->flags & (IFF_ALLMULTI | IFF_PROMISC)) &&
2124 	    test_and_clear_bit(__IDPF_PROMISC_MC, config_data->user_flags)) {
2125 		changed = true;
2126 		dev_info(dev, "Leaving multicast promiscuous mode\n");
2127 	}
2128 
2129 	if (!changed)
2130 		return;
2131 
2132 	err = idpf_set_promiscuous(adapter, config_data, np->vport_id);
2133 	if (err)
2134 		dev_err(dev, "Failed to set promiscuous mode: %d\n", err);
2135 }
2136 
2137 /**
2138  * idpf_vport_manage_rss_lut - disable/enable RSS
2139  * @vport: the vport being changed
2140  *
2141  * In the event of disable request for RSS, this function will zero out RSS
2142  * LUT, while in the event of enable request for RSS, it will reconfigure RSS
2143  * LUT with the default LUT configuration.
2144  */
2145 static int idpf_vport_manage_rss_lut(struct idpf_vport *vport)
2146 {
2147 	bool ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH);
2148 	struct idpf_rss_data *rss_data;
2149 	u16 idx = vport->idx;
2150 	int lut_size;
2151 
2152 	rss_data = &vport->adapter->vport_config[idx]->user_config.rss_data;
2153 	lut_size = rss_data->rss_lut_size * sizeof(u32);
2154 
2155 	if (ena) {
2156 		/* This will contain the default or user configured LUT */
2157 		memcpy(rss_data->rss_lut, rss_data->cached_lut, lut_size);
2158 	} else {
2159 		/* Save a copy of the current LUT to be restored later if
2160 		 * requested.
2161 		 */
2162 		memcpy(rss_data->cached_lut, rss_data->rss_lut, lut_size);
2163 
2164 		/* Zero out the current LUT to disable */
2165 		memset(rss_data->rss_lut, 0, lut_size);
2166 	}
2167 
2168 	return idpf_config_rss(vport);
2169 }
2170 
2171 /**
2172  * idpf_set_features - set the netdev feature flags
2173  * @netdev: ptr to the netdev being adjusted
2174  * @features: the feature set that the stack is suggesting
2175  */
2176 static int idpf_set_features(struct net_device *netdev,
2177 			     netdev_features_t features)
2178 {
2179 	netdev_features_t changed = netdev->features ^ features;
2180 	struct idpf_adapter *adapter;
2181 	struct idpf_vport *vport;
2182 	int err = 0;
2183 
2184 	idpf_vport_ctrl_lock(netdev);
2185 	vport = idpf_netdev_to_vport(netdev);
2186 
2187 	adapter = vport->adapter;
2188 
2189 	if (idpf_is_reset_in_prog(adapter)) {
2190 		dev_err(&adapter->pdev->dev, "Device is resetting, changing netdev features temporarily unavailable.\n");
2191 		err = -EBUSY;
2192 		goto unlock_mutex;
2193 	}
2194 
2195 	if (changed & NETIF_F_RXHASH) {
2196 		netdev->features ^= NETIF_F_RXHASH;
2197 		err = idpf_vport_manage_rss_lut(vport);
2198 		if (err)
2199 			goto unlock_mutex;
2200 	}
2201 
2202 	if (changed & NETIF_F_GRO_HW) {
2203 		netdev->features ^= NETIF_F_GRO_HW;
2204 		err = idpf_initiate_soft_reset(vport, IDPF_SR_RSC_CHANGE);
2205 		if (err)
2206 			goto unlock_mutex;
2207 	}
2208 
2209 	if (changed & NETIF_F_LOOPBACK) {
2210 		netdev->features ^= NETIF_F_LOOPBACK;
2211 		err = idpf_send_ena_dis_loopback_msg(vport);
2212 	}
2213 
2214 unlock_mutex:
2215 	idpf_vport_ctrl_unlock(netdev);
2216 
2217 	return err;
2218 }
2219 
2220 /**
2221  * idpf_open - Called when a network interface becomes active
2222  * @netdev: network interface device structure
2223  *
2224  * The open entry point is called when a network interface is made
2225  * active by the system (IFF_UP).  At this point all resources needed
2226  * for transmit and receive operations are allocated, the interrupt
2227  * handler is registered with the OS, the netdev watchdog is enabled,
2228  * and the stack is notified that the interface is ready.
2229  *
2230  * Returns 0 on success, negative value on failure
2231  */
2232 static int idpf_open(struct net_device *netdev)
2233 {
2234 	struct idpf_vport *vport;
2235 	int err;
2236 
2237 	idpf_vport_ctrl_lock(netdev);
2238 	vport = idpf_netdev_to_vport(netdev);
2239 
2240 	err = idpf_vport_open(vport, true);
2241 
2242 	idpf_vport_ctrl_unlock(netdev);
2243 
2244 	return err;
2245 }
2246 
2247 /**
2248  * idpf_change_mtu - NDO callback to change the MTU
2249  * @netdev: network interface device structure
2250  * @new_mtu: new value for maximum frame size
2251  *
2252  * Returns 0 on success, negative on failure
2253  */
2254 static int idpf_change_mtu(struct net_device *netdev, int new_mtu)
2255 {
2256 	struct idpf_vport *vport;
2257 	int err;
2258 
2259 	idpf_vport_ctrl_lock(netdev);
2260 	vport = idpf_netdev_to_vport(netdev);
2261 
2262 	netdev->mtu = new_mtu;
2263 
2264 	err = idpf_initiate_soft_reset(vport, IDPF_SR_MTU_CHANGE);
2265 
2266 	idpf_vport_ctrl_unlock(netdev);
2267 
2268 	return err;
2269 }
2270 
2271 /**
2272  * idpf_features_check - Validate packet conforms to limits
2273  * @skb: skb buffer
2274  * @netdev: This port's netdev
2275  * @features: Offload features that the stack believes apply
2276  */
2277 static netdev_features_t idpf_features_check(struct sk_buff *skb,
2278 					     struct net_device *netdev,
2279 					     netdev_features_t features)
2280 {
2281 	struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
2282 	struct idpf_adapter *adapter = vport->adapter;
2283 	size_t len;
2284 
2285 	/* No point in doing any of this if neither checksum nor GSO are
2286 	 * being requested for this frame.  We can rule out both by just
2287 	 * checking for CHECKSUM_PARTIAL
2288 	 */
2289 	if (skb->ip_summed != CHECKSUM_PARTIAL)
2290 		return features;
2291 
2292 	/* We cannot support GSO if the MSS is going to be less than
2293 	 * 88 bytes. If it is then we need to drop support for GSO.
2294 	 */
2295 	if (skb_is_gso(skb) &&
2296 	    (skb_shinfo(skb)->gso_size < IDPF_TX_TSO_MIN_MSS))
2297 		features &= ~NETIF_F_GSO_MASK;
2298 
2299 	/* Ensure MACLEN is <= 126 bytes (63 words) and not an odd size */
2300 	len = skb_network_offset(skb);
2301 	if (unlikely(len & ~(126)))
2302 		goto unsupported;
2303 
2304 	len = skb_network_header_len(skb);
2305 	if (unlikely(len > idpf_get_max_tx_hdr_size(adapter)))
2306 		goto unsupported;
2307 
2308 	if (!skb->encapsulation)
2309 		return features;
2310 
2311 	/* L4TUNLEN can support 127 words */
2312 	len = skb_inner_network_header(skb) - skb_transport_header(skb);
2313 	if (unlikely(len & ~(127 * 2)))
2314 		goto unsupported;
2315 
2316 	/* IPLEN can support at most 127 dwords */
2317 	len = skb_inner_network_header_len(skb);
2318 	if (unlikely(len > idpf_get_max_tx_hdr_size(adapter)))
2319 		goto unsupported;
2320 
2321 	/* No need to validate L4LEN as TCP is the only protocol with a
2322 	 * a flexible value and we support all possible values supported
2323 	 * by TCP, which is at most 15 dwords
2324 	 */
2325 
2326 	return features;
2327 
2328 unsupported:
2329 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2330 }
2331 
2332 /**
2333  * idpf_set_mac - NDO callback to set port mac address
2334  * @netdev: network interface device structure
2335  * @p: pointer to an address structure
2336  *
2337  * Returns 0 on success, negative on failure
2338  **/
2339 static int idpf_set_mac(struct net_device *netdev, void *p)
2340 {
2341 	struct idpf_netdev_priv *np = netdev_priv(netdev);
2342 	struct idpf_vport_config *vport_config;
2343 	struct sockaddr *addr = p;
2344 	struct idpf_vport *vport;
2345 	int err = 0;
2346 
2347 	idpf_vport_ctrl_lock(netdev);
2348 	vport = idpf_netdev_to_vport(netdev);
2349 
2350 	if (!idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
2351 			     VIRTCHNL2_CAP_MACFILTER)) {
2352 		dev_info(&vport->adapter->pdev->dev, "Setting MAC address is not supported\n");
2353 		err = -EOPNOTSUPP;
2354 		goto unlock_mutex;
2355 	}
2356 
2357 	if (!is_valid_ether_addr(addr->sa_data)) {
2358 		dev_info(&vport->adapter->pdev->dev, "Invalid MAC address: %pM\n",
2359 			 addr->sa_data);
2360 		err = -EADDRNOTAVAIL;
2361 		goto unlock_mutex;
2362 	}
2363 
2364 	if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
2365 		goto unlock_mutex;
2366 
2367 	vport_config = vport->adapter->vport_config[vport->idx];
2368 	err = idpf_add_mac_filter(vport, np, addr->sa_data, false);
2369 	if (err) {
2370 		__idpf_del_mac_filter(vport_config, addr->sa_data);
2371 		goto unlock_mutex;
2372 	}
2373 
2374 	if (is_valid_ether_addr(vport->default_mac_addr))
2375 		idpf_del_mac_filter(vport, np, vport->default_mac_addr, false);
2376 
2377 	ether_addr_copy(vport->default_mac_addr, addr->sa_data);
2378 	eth_hw_addr_set(netdev, addr->sa_data);
2379 
2380 unlock_mutex:
2381 	idpf_vport_ctrl_unlock(netdev);
2382 
2383 	return err;
2384 }
2385 
2386 /**
2387  * idpf_alloc_dma_mem - Allocate dma memory
2388  * @hw: pointer to hw struct
2389  * @mem: pointer to dma_mem struct
2390  * @size: size of the memory to allocate
2391  */
2392 void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size)
2393 {
2394 	struct idpf_adapter *adapter = hw->back;
2395 	size_t sz = ALIGN(size, 4096);
2396 
2397 	mem->va = dma_alloc_coherent(&adapter->pdev->dev, sz,
2398 				     &mem->pa, GFP_KERNEL);
2399 	mem->size = sz;
2400 
2401 	return mem->va;
2402 }
2403 
2404 /**
2405  * idpf_free_dma_mem - Free the allocated dma memory
2406  * @hw: pointer to hw struct
2407  * @mem: pointer to dma_mem struct
2408  */
2409 void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem)
2410 {
2411 	struct idpf_adapter *adapter = hw->back;
2412 
2413 	dma_free_coherent(&adapter->pdev->dev, mem->size,
2414 			  mem->va, mem->pa);
2415 	mem->size = 0;
2416 	mem->va = NULL;
2417 	mem->pa = 0;
2418 }
2419 
2420 static const struct net_device_ops idpf_netdev_ops_splitq = {
2421 	.ndo_open = idpf_open,
2422 	.ndo_stop = idpf_stop,
2423 	.ndo_start_xmit = idpf_tx_splitq_start,
2424 	.ndo_features_check = idpf_features_check,
2425 	.ndo_set_rx_mode = idpf_set_rx_mode,
2426 	.ndo_validate_addr = eth_validate_addr,
2427 	.ndo_set_mac_address = idpf_set_mac,
2428 	.ndo_change_mtu = idpf_change_mtu,
2429 	.ndo_get_stats64 = idpf_get_stats64,
2430 	.ndo_set_features = idpf_set_features,
2431 	.ndo_tx_timeout = idpf_tx_timeout,
2432 };
2433 
2434 static const struct net_device_ops idpf_netdev_ops_singleq = {
2435 	.ndo_open = idpf_open,
2436 	.ndo_stop = idpf_stop,
2437 	.ndo_start_xmit = idpf_tx_singleq_start,
2438 	.ndo_features_check = idpf_features_check,
2439 	.ndo_set_rx_mode = idpf_set_rx_mode,
2440 	.ndo_validate_addr = eth_validate_addr,
2441 	.ndo_set_mac_address = idpf_set_mac,
2442 	.ndo_change_mtu = idpf_change_mtu,
2443 	.ndo_get_stats64 = idpf_get_stats64,
2444 	.ndo_set_features = idpf_set_features,
2445 	.ndo_tx_timeout = idpf_tx_timeout,
2446 };
2447