xref: /linux/drivers/net/ethernet/intel/idpf/idpf_lib.c (revision ab475966455ce285c2c9978a3e3bfe97d75ff8d4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #include "idpf.h"
5 
6 static const struct net_device_ops idpf_netdev_ops_splitq;
7 static const struct net_device_ops idpf_netdev_ops_singleq;
8 
9 const char * const idpf_vport_vc_state_str[] = {
10 	IDPF_FOREACH_VPORT_VC_STATE(IDPF_GEN_STRING)
11 };
12 
13 /**
14  * idpf_init_vector_stack - Fill the MSIX vector stack with vector index
15  * @adapter: private data struct
16  *
17  * Return 0 on success, error on failure
18  */
19 static int idpf_init_vector_stack(struct idpf_adapter *adapter)
20 {
21 	struct idpf_vector_lifo *stack;
22 	u16 min_vec;
23 	u32 i;
24 
25 	mutex_lock(&adapter->vector_lock);
26 	min_vec = adapter->num_msix_entries - adapter->num_avail_msix;
27 	stack = &adapter->vector_stack;
28 	stack->size = adapter->num_msix_entries;
29 	/* set the base and top to point at start of the 'free pool' to
30 	 * distribute the unused vectors on-demand basis
31 	 */
32 	stack->base = min_vec;
33 	stack->top = min_vec;
34 
35 	stack->vec_idx = kcalloc(stack->size, sizeof(u16), GFP_KERNEL);
36 	if (!stack->vec_idx) {
37 		mutex_unlock(&adapter->vector_lock);
38 
39 		return -ENOMEM;
40 	}
41 
42 	for (i = 0; i < stack->size; i++)
43 		stack->vec_idx[i] = i;
44 
45 	mutex_unlock(&adapter->vector_lock);
46 
47 	return 0;
48 }
49 
50 /**
51  * idpf_deinit_vector_stack - zero out the MSIX vector stack
52  * @adapter: private data struct
53  */
54 static void idpf_deinit_vector_stack(struct idpf_adapter *adapter)
55 {
56 	struct idpf_vector_lifo *stack;
57 
58 	mutex_lock(&adapter->vector_lock);
59 	stack = &adapter->vector_stack;
60 	kfree(stack->vec_idx);
61 	stack->vec_idx = NULL;
62 	mutex_unlock(&adapter->vector_lock);
63 }
64 
65 /**
66  * idpf_mb_intr_rel_irq - Free the IRQ association with the OS
67  * @adapter: adapter structure
68  *
69  * This will also disable interrupt mode and queue up mailbox task. Mailbox
70  * task will reschedule itself if not in interrupt mode.
71  */
72 static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter)
73 {
74 	clear_bit(IDPF_MB_INTR_MODE, adapter->flags);
75 	free_irq(adapter->msix_entries[0].vector, adapter);
76 	queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
77 }
78 
79 /**
80  * idpf_intr_rel - Release interrupt capabilities and free memory
81  * @adapter: adapter to disable interrupts on
82  */
83 void idpf_intr_rel(struct idpf_adapter *adapter)
84 {
85 	int err;
86 
87 	if (!adapter->msix_entries)
88 		return;
89 
90 	idpf_mb_intr_rel_irq(adapter);
91 	pci_free_irq_vectors(adapter->pdev);
92 
93 	err = idpf_send_dealloc_vectors_msg(adapter);
94 	if (err)
95 		dev_err(&adapter->pdev->dev,
96 			"Failed to deallocate vectors: %d\n", err);
97 
98 	idpf_deinit_vector_stack(adapter);
99 	kfree(adapter->msix_entries);
100 	adapter->msix_entries = NULL;
101 }
102 
103 /**
104  * idpf_mb_intr_clean - Interrupt handler for the mailbox
105  * @irq: interrupt number
106  * @data: pointer to the adapter structure
107  */
108 static irqreturn_t idpf_mb_intr_clean(int __always_unused irq, void *data)
109 {
110 	struct idpf_adapter *adapter = (struct idpf_adapter *)data;
111 
112 	queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
113 
114 	return IRQ_HANDLED;
115 }
116 
117 /**
118  * idpf_mb_irq_enable - Enable MSIX interrupt for the mailbox
119  * @adapter: adapter to get the hardware address for register write
120  */
121 static void idpf_mb_irq_enable(struct idpf_adapter *adapter)
122 {
123 	struct idpf_intr_reg *intr = &adapter->mb_vector.intr_reg;
124 	u32 val;
125 
126 	val = intr->dyn_ctl_intena_m | intr->dyn_ctl_itridx_m;
127 	writel(val, intr->dyn_ctl);
128 	writel(intr->icr_ena_ctlq_m, intr->icr_ena);
129 }
130 
131 /**
132  * idpf_mb_intr_req_irq - Request irq for the mailbox interrupt
133  * @adapter: adapter structure to pass to the mailbox irq handler
134  */
135 static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter)
136 {
137 	struct idpf_q_vector *mb_vector = &adapter->mb_vector;
138 	int irq_num, mb_vidx = 0, err;
139 
140 	irq_num = adapter->msix_entries[mb_vidx].vector;
141 	mb_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d",
142 				    dev_driver_string(&adapter->pdev->dev),
143 				    "Mailbox", mb_vidx);
144 	err = request_irq(irq_num, adapter->irq_mb_handler, 0,
145 			  mb_vector->name, adapter);
146 	if (err) {
147 		dev_err(&adapter->pdev->dev,
148 			"IRQ request for mailbox failed, error: %d\n", err);
149 
150 		return err;
151 	}
152 
153 	set_bit(IDPF_MB_INTR_MODE, adapter->flags);
154 
155 	return 0;
156 }
157 
158 /**
159  * idpf_set_mb_vec_id - Set vector index for mailbox
160  * @adapter: adapter structure to access the vector chunks
161  *
162  * The first vector id in the requested vector chunks from the CP is for
163  * the mailbox
164  */
165 static void idpf_set_mb_vec_id(struct idpf_adapter *adapter)
166 {
167 	if (adapter->req_vec_chunks)
168 		adapter->mb_vector.v_idx =
169 			le16_to_cpu(adapter->caps.mailbox_vector_id);
170 	else
171 		adapter->mb_vector.v_idx = 0;
172 }
173 
174 /**
175  * idpf_mb_intr_init - Initialize the mailbox interrupt
176  * @adapter: adapter structure to store the mailbox vector
177  */
178 static int idpf_mb_intr_init(struct idpf_adapter *adapter)
179 {
180 	adapter->dev_ops.reg_ops.mb_intr_reg_init(adapter);
181 	adapter->irq_mb_handler = idpf_mb_intr_clean;
182 
183 	return idpf_mb_intr_req_irq(adapter);
184 }
185 
186 /**
187  * idpf_vector_lifo_push - push MSIX vector index onto stack
188  * @adapter: private data struct
189  * @vec_idx: vector index to store
190  */
191 static int idpf_vector_lifo_push(struct idpf_adapter *adapter, u16 vec_idx)
192 {
193 	struct idpf_vector_lifo *stack = &adapter->vector_stack;
194 
195 	lockdep_assert_held(&adapter->vector_lock);
196 
197 	if (stack->top == stack->base) {
198 		dev_err(&adapter->pdev->dev, "Exceeded the vector stack limit: %d\n",
199 			stack->top);
200 		return -EINVAL;
201 	}
202 
203 	stack->vec_idx[--stack->top] = vec_idx;
204 
205 	return 0;
206 }
207 
208 /**
209  * idpf_vector_lifo_pop - pop MSIX vector index from stack
210  * @adapter: private data struct
211  */
212 static int idpf_vector_lifo_pop(struct idpf_adapter *adapter)
213 {
214 	struct idpf_vector_lifo *stack = &adapter->vector_stack;
215 
216 	lockdep_assert_held(&adapter->vector_lock);
217 
218 	if (stack->top == stack->size) {
219 		dev_err(&adapter->pdev->dev, "No interrupt vectors are available to distribute!\n");
220 
221 		return -EINVAL;
222 	}
223 
224 	return stack->vec_idx[stack->top++];
225 }
226 
227 /**
228  * idpf_vector_stash - Store the vector indexes onto the stack
229  * @adapter: private data struct
230  * @q_vector_idxs: vector index array
231  * @vec_info: info related to the number of vectors
232  *
233  * This function is a no-op if there are no vectors indexes to be stashed
234  */
235 static void idpf_vector_stash(struct idpf_adapter *adapter, u16 *q_vector_idxs,
236 			      struct idpf_vector_info *vec_info)
237 {
238 	int i, base = 0;
239 	u16 vec_idx;
240 
241 	lockdep_assert_held(&adapter->vector_lock);
242 
243 	if (!vec_info->num_curr_vecs)
244 		return;
245 
246 	/* For default vports, no need to stash vector allocated from the
247 	 * default pool onto the stack
248 	 */
249 	if (vec_info->default_vport)
250 		base = IDPF_MIN_Q_VEC;
251 
252 	for (i = vec_info->num_curr_vecs - 1; i >= base ; i--) {
253 		vec_idx = q_vector_idxs[i];
254 		idpf_vector_lifo_push(adapter, vec_idx);
255 		adapter->num_avail_msix++;
256 	}
257 }
258 
259 /**
260  * idpf_req_rel_vector_indexes - Request or release MSIX vector indexes
261  * @adapter: driver specific private structure
262  * @q_vector_idxs: vector index array
263  * @vec_info: info related to the number of vectors
264  *
265  * This is the core function to distribute the MSIX vectors acquired from the
266  * OS. It expects the caller to pass the number of vectors required and
267  * also previously allocated. First, it stashes previously allocated vector
268  * indexes on to the stack and then figures out if it can allocate requested
269  * vectors. It can wait on acquiring the mutex lock. If the caller passes 0 as
270  * requested vectors, then this function just stashes the already allocated
271  * vectors and returns 0.
272  *
273  * Returns actual number of vectors allocated on success, error value on failure
274  * If 0 is returned, implies the stack has no vectors to allocate which is also
275  * a failure case for the caller
276  */
277 int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter,
278 				u16 *q_vector_idxs,
279 				struct idpf_vector_info *vec_info)
280 {
281 	u16 num_req_vecs, num_alloc_vecs = 0, max_vecs;
282 	struct idpf_vector_lifo *stack;
283 	int i, j, vecid;
284 
285 	mutex_lock(&adapter->vector_lock);
286 	stack = &adapter->vector_stack;
287 	num_req_vecs = vec_info->num_req_vecs;
288 
289 	/* Stash interrupt vector indexes onto the stack if required */
290 	idpf_vector_stash(adapter, q_vector_idxs, vec_info);
291 
292 	if (!num_req_vecs)
293 		goto rel_lock;
294 
295 	if (vec_info->default_vport) {
296 		/* As IDPF_MIN_Q_VEC per default vport is put aside in the
297 		 * default pool of the stack, use them for default vports
298 		 */
299 		j = vec_info->index * IDPF_MIN_Q_VEC + IDPF_MBX_Q_VEC;
300 		for (i = 0; i < IDPF_MIN_Q_VEC; i++) {
301 			q_vector_idxs[num_alloc_vecs++] = stack->vec_idx[j++];
302 			num_req_vecs--;
303 		}
304 	}
305 
306 	/* Find if stack has enough vector to allocate */
307 	max_vecs = min(adapter->num_avail_msix, num_req_vecs);
308 
309 	for (j = 0; j < max_vecs; j++) {
310 		vecid = idpf_vector_lifo_pop(adapter);
311 		q_vector_idxs[num_alloc_vecs++] = vecid;
312 	}
313 	adapter->num_avail_msix -= max_vecs;
314 
315 rel_lock:
316 	mutex_unlock(&adapter->vector_lock);
317 
318 	return num_alloc_vecs;
319 }
320 
321 /**
322  * idpf_intr_req - Request interrupt capabilities
323  * @adapter: adapter to enable interrupts on
324  *
325  * Returns 0 on success, negative on failure
326  */
327 int idpf_intr_req(struct idpf_adapter *adapter)
328 {
329 	u16 default_vports = idpf_get_default_vports(adapter);
330 	int num_q_vecs, total_vecs, num_vec_ids;
331 	int min_vectors, v_actual, err;
332 	unsigned int vector;
333 	u16 *vecids;
334 
335 	total_vecs = idpf_get_reserved_vecs(adapter);
336 	num_q_vecs = total_vecs - IDPF_MBX_Q_VEC;
337 
338 	err = idpf_send_alloc_vectors_msg(adapter, num_q_vecs);
339 	if (err) {
340 		dev_err(&adapter->pdev->dev,
341 			"Failed to allocate %d vectors: %d\n", num_q_vecs, err);
342 
343 		return -EAGAIN;
344 	}
345 
346 	min_vectors = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports;
347 	v_actual = pci_alloc_irq_vectors(adapter->pdev, min_vectors,
348 					 total_vecs, PCI_IRQ_MSIX);
349 	if (v_actual < min_vectors) {
350 		dev_err(&adapter->pdev->dev, "Failed to allocate MSIX vectors: %d\n",
351 			v_actual);
352 		err = -EAGAIN;
353 		goto send_dealloc_vecs;
354 	}
355 
356 	adapter->msix_entries = kcalloc(v_actual, sizeof(struct msix_entry),
357 					GFP_KERNEL);
358 
359 	if (!adapter->msix_entries) {
360 		err = -ENOMEM;
361 		goto free_irq;
362 	}
363 
364 	idpf_set_mb_vec_id(adapter);
365 
366 	vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL);
367 	if (!vecids) {
368 		err = -ENOMEM;
369 		goto free_msix;
370 	}
371 
372 	if (adapter->req_vec_chunks) {
373 		struct virtchnl2_vector_chunks *vchunks;
374 		struct virtchnl2_alloc_vectors *ac;
375 
376 		ac = adapter->req_vec_chunks;
377 		vchunks = &ac->vchunks;
378 
379 		num_vec_ids = idpf_get_vec_ids(adapter, vecids, total_vecs,
380 					       vchunks);
381 		if (num_vec_ids < v_actual) {
382 			err = -EINVAL;
383 			goto free_vecids;
384 		}
385 	} else {
386 		int i;
387 
388 		for (i = 0; i < v_actual; i++)
389 			vecids[i] = i;
390 	}
391 
392 	for (vector = 0; vector < v_actual; vector++) {
393 		adapter->msix_entries[vector].entry = vecids[vector];
394 		adapter->msix_entries[vector].vector =
395 			pci_irq_vector(adapter->pdev, vector);
396 	}
397 
398 	adapter->num_req_msix = total_vecs;
399 	adapter->num_msix_entries = v_actual;
400 	/* 'num_avail_msix' is used to distribute excess vectors to the vports
401 	 * after considering the minimum vectors required per each default
402 	 * vport
403 	 */
404 	adapter->num_avail_msix = v_actual - min_vectors;
405 
406 	/* Fill MSIX vector lifo stack with vector indexes */
407 	err = idpf_init_vector_stack(adapter);
408 	if (err)
409 		goto free_vecids;
410 
411 	err = idpf_mb_intr_init(adapter);
412 	if (err)
413 		goto deinit_vec_stack;
414 	idpf_mb_irq_enable(adapter);
415 	kfree(vecids);
416 
417 	return 0;
418 
419 deinit_vec_stack:
420 	idpf_deinit_vector_stack(adapter);
421 free_vecids:
422 	kfree(vecids);
423 free_msix:
424 	kfree(adapter->msix_entries);
425 	adapter->msix_entries = NULL;
426 free_irq:
427 	pci_free_irq_vectors(adapter->pdev);
428 send_dealloc_vecs:
429 	idpf_send_dealloc_vectors_msg(adapter);
430 
431 	return err;
432 }
433 
434 /**
435  * idpf_find_mac_filter - Search filter list for specific mac filter
436  * @vconfig: Vport config structure
437  * @macaddr: The MAC address
438  *
439  * Returns ptr to the filter object or NULL. Must be called while holding the
440  * mac_filter_list_lock.
441  **/
442 static struct idpf_mac_filter *idpf_find_mac_filter(struct idpf_vport_config *vconfig,
443 						    const u8 *macaddr)
444 {
445 	struct idpf_mac_filter *f;
446 
447 	if (!macaddr)
448 		return NULL;
449 
450 	list_for_each_entry(f, &vconfig->user_config.mac_filter_list, list) {
451 		if (ether_addr_equal(macaddr, f->macaddr))
452 			return f;
453 	}
454 
455 	return NULL;
456 }
457 
458 /**
459  * __idpf_del_mac_filter - Delete a MAC filter from the filter list
460  * @vport_config: Vport config structure
461  * @macaddr: The MAC address
462  *
463  * Returns 0 on success, error value on failure
464  **/
465 static int __idpf_del_mac_filter(struct idpf_vport_config *vport_config,
466 				 const u8 *macaddr)
467 {
468 	struct idpf_mac_filter *f;
469 
470 	spin_lock_bh(&vport_config->mac_filter_list_lock);
471 	f = idpf_find_mac_filter(vport_config, macaddr);
472 	if (f) {
473 		list_del(&f->list);
474 		kfree(f);
475 	}
476 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
477 
478 	return 0;
479 }
480 
481 /**
482  * idpf_del_mac_filter - Delete a MAC filter from the filter list
483  * @vport: Main vport structure
484  * @np: Netdev private structure
485  * @macaddr: The MAC address
486  * @async: Don't wait for return message
487  *
488  * Removes filter from list and if interface is up, tells hardware about the
489  * removed filter.
490  **/
491 static int idpf_del_mac_filter(struct idpf_vport *vport,
492 			       struct idpf_netdev_priv *np,
493 			       const u8 *macaddr, bool async)
494 {
495 	struct idpf_vport_config *vport_config;
496 	struct idpf_mac_filter *f;
497 
498 	vport_config = np->adapter->vport_config[np->vport_idx];
499 
500 	spin_lock_bh(&vport_config->mac_filter_list_lock);
501 	f = idpf_find_mac_filter(vport_config, macaddr);
502 	if (f) {
503 		f->remove = true;
504 	} else {
505 		spin_unlock_bh(&vport_config->mac_filter_list_lock);
506 
507 		return -EINVAL;
508 	}
509 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
510 
511 	if (np->state == __IDPF_VPORT_UP) {
512 		int err;
513 
514 		err = idpf_add_del_mac_filters(vport, np, false, async);
515 		if (err)
516 			return err;
517 	}
518 
519 	return  __idpf_del_mac_filter(vport_config, macaddr);
520 }
521 
522 /**
523  * __idpf_add_mac_filter - Add mac filter helper function
524  * @vport_config: Vport config structure
525  * @macaddr: Address to add
526  *
527  * Takes mac_filter_list_lock spinlock to add new filter to list.
528  */
529 static int __idpf_add_mac_filter(struct idpf_vport_config *vport_config,
530 				 const u8 *macaddr)
531 {
532 	struct idpf_mac_filter *f;
533 
534 	spin_lock_bh(&vport_config->mac_filter_list_lock);
535 
536 	f = idpf_find_mac_filter(vport_config, macaddr);
537 	if (f) {
538 		f->remove = false;
539 		spin_unlock_bh(&vport_config->mac_filter_list_lock);
540 
541 		return 0;
542 	}
543 
544 	f = kzalloc(sizeof(*f), GFP_ATOMIC);
545 	if (!f) {
546 		spin_unlock_bh(&vport_config->mac_filter_list_lock);
547 
548 		return -ENOMEM;
549 	}
550 
551 	ether_addr_copy(f->macaddr, macaddr);
552 	list_add_tail(&f->list, &vport_config->user_config.mac_filter_list);
553 	f->add = true;
554 
555 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
556 
557 	return 0;
558 }
559 
560 /**
561  * idpf_add_mac_filter - Add a mac filter to the filter list
562  * @vport: Main vport structure
563  * @np: Netdev private structure
564  * @macaddr: The MAC address
565  * @async: Don't wait for return message
566  *
567  * Returns 0 on success or error on failure. If interface is up, we'll also
568  * send the virtchnl message to tell hardware about the filter.
569  **/
570 static int idpf_add_mac_filter(struct idpf_vport *vport,
571 			       struct idpf_netdev_priv *np,
572 			       const u8 *macaddr, bool async)
573 {
574 	struct idpf_vport_config *vport_config;
575 	int err;
576 
577 	vport_config = np->adapter->vport_config[np->vport_idx];
578 	err = __idpf_add_mac_filter(vport_config, macaddr);
579 	if (err)
580 		return err;
581 
582 	if (np->state == __IDPF_VPORT_UP)
583 		err = idpf_add_del_mac_filters(vport, np, true, async);
584 
585 	return err;
586 }
587 
588 /**
589  * idpf_del_all_mac_filters - Delete all MAC filters in list
590  * @vport: main vport struct
591  *
592  * Takes mac_filter_list_lock spinlock.  Deletes all filters
593  */
594 static void idpf_del_all_mac_filters(struct idpf_vport *vport)
595 {
596 	struct idpf_vport_config *vport_config;
597 	struct idpf_mac_filter *f, *ftmp;
598 
599 	vport_config = vport->adapter->vport_config[vport->idx];
600 	spin_lock_bh(&vport_config->mac_filter_list_lock);
601 
602 	list_for_each_entry_safe(f, ftmp, &vport_config->user_config.mac_filter_list,
603 				 list) {
604 		list_del(&f->list);
605 		kfree(f);
606 	}
607 
608 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
609 }
610 
611 /**
612  * idpf_restore_mac_filters - Re-add all MAC filters in list
613  * @vport: main vport struct
614  *
615  * Takes mac_filter_list_lock spinlock.  Sets add field to true for filters to
616  * resync filters back to HW.
617  */
618 static void idpf_restore_mac_filters(struct idpf_vport *vport)
619 {
620 	struct idpf_vport_config *vport_config;
621 	struct idpf_mac_filter *f;
622 
623 	vport_config = vport->adapter->vport_config[vport->idx];
624 	spin_lock_bh(&vport_config->mac_filter_list_lock);
625 
626 	list_for_each_entry(f, &vport_config->user_config.mac_filter_list, list)
627 		f->add = true;
628 
629 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
630 
631 	idpf_add_del_mac_filters(vport, netdev_priv(vport->netdev),
632 				 true, false);
633 }
634 
635 /**
636  * idpf_remove_mac_filters - Remove all MAC filters in list
637  * @vport: main vport struct
638  *
639  * Takes mac_filter_list_lock spinlock. Sets remove field to true for filters
640  * to remove filters in HW.
641  */
642 static void idpf_remove_mac_filters(struct idpf_vport *vport)
643 {
644 	struct idpf_vport_config *vport_config;
645 	struct idpf_mac_filter *f;
646 
647 	vport_config = vport->adapter->vport_config[vport->idx];
648 	spin_lock_bh(&vport_config->mac_filter_list_lock);
649 
650 	list_for_each_entry(f, &vport_config->user_config.mac_filter_list, list)
651 		f->remove = true;
652 
653 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
654 
655 	idpf_add_del_mac_filters(vport, netdev_priv(vport->netdev),
656 				 false, false);
657 }
658 
659 /**
660  * idpf_deinit_mac_addr - deinitialize mac address for vport
661  * @vport: main vport structure
662  */
663 static void idpf_deinit_mac_addr(struct idpf_vport *vport)
664 {
665 	struct idpf_vport_config *vport_config;
666 	struct idpf_mac_filter *f;
667 
668 	vport_config = vport->adapter->vport_config[vport->idx];
669 
670 	spin_lock_bh(&vport_config->mac_filter_list_lock);
671 
672 	f = idpf_find_mac_filter(vport_config, vport->default_mac_addr);
673 	if (f) {
674 		list_del(&f->list);
675 		kfree(f);
676 	}
677 
678 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
679 }
680 
681 /**
682  * idpf_init_mac_addr - initialize mac address for vport
683  * @vport: main vport structure
684  * @netdev: pointer to netdev struct associated with this vport
685  */
686 static int idpf_init_mac_addr(struct idpf_vport *vport,
687 			      struct net_device *netdev)
688 {
689 	struct idpf_netdev_priv *np = netdev_priv(netdev);
690 	struct idpf_adapter *adapter = vport->adapter;
691 	int err;
692 
693 	if (is_valid_ether_addr(vport->default_mac_addr)) {
694 		eth_hw_addr_set(netdev, vport->default_mac_addr);
695 		ether_addr_copy(netdev->perm_addr, vport->default_mac_addr);
696 
697 		return idpf_add_mac_filter(vport, np, vport->default_mac_addr,
698 					   false);
699 	}
700 
701 	if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS,
702 			     VIRTCHNL2_CAP_MACFILTER)) {
703 		dev_err(&adapter->pdev->dev,
704 			"MAC address is not provided and capability is not set\n");
705 
706 		return -EINVAL;
707 	}
708 
709 	eth_hw_addr_random(netdev);
710 	err = idpf_add_mac_filter(vport, np, netdev->dev_addr, false);
711 	if (err)
712 		return err;
713 
714 	dev_info(&adapter->pdev->dev, "Invalid MAC address %pM, using random %pM\n",
715 		 vport->default_mac_addr, netdev->dev_addr);
716 	ether_addr_copy(vport->default_mac_addr, netdev->dev_addr);
717 
718 	return 0;
719 }
720 
721 /**
722  * idpf_cfg_netdev - Allocate, configure and register a netdev
723  * @vport: main vport structure
724  *
725  * Returns 0 on success, negative value on failure.
726  */
727 static int idpf_cfg_netdev(struct idpf_vport *vport)
728 {
729 	struct idpf_adapter *adapter = vport->adapter;
730 	struct idpf_vport_config *vport_config;
731 	netdev_features_t dflt_features;
732 	netdev_features_t offloads = 0;
733 	struct idpf_netdev_priv *np;
734 	struct net_device *netdev;
735 	u16 idx = vport->idx;
736 	int err;
737 
738 	vport_config = adapter->vport_config[idx];
739 
740 	/* It's possible we already have a netdev allocated and registered for
741 	 * this vport
742 	 */
743 	if (test_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags)) {
744 		netdev = adapter->netdevs[idx];
745 		np = netdev_priv(netdev);
746 		np->vport = vport;
747 		np->vport_idx = vport->idx;
748 		np->vport_id = vport->vport_id;
749 		vport->netdev = netdev;
750 
751 		return idpf_init_mac_addr(vport, netdev);
752 	}
753 
754 	netdev = alloc_etherdev_mqs(sizeof(struct idpf_netdev_priv),
755 				    vport_config->max_q.max_txq,
756 				    vport_config->max_q.max_rxq);
757 	if (!netdev)
758 		return -ENOMEM;
759 
760 	vport->netdev = netdev;
761 	np = netdev_priv(netdev);
762 	np->vport = vport;
763 	np->adapter = adapter;
764 	np->vport_idx = vport->idx;
765 	np->vport_id = vport->vport_id;
766 
767 	spin_lock_init(&np->stats_lock);
768 
769 	err = idpf_init_mac_addr(vport, netdev);
770 	if (err) {
771 		free_netdev(vport->netdev);
772 		vport->netdev = NULL;
773 
774 		return err;
775 	}
776 
777 	/* assign netdev_ops */
778 	if (idpf_is_queue_model_split(vport->txq_model))
779 		netdev->netdev_ops = &idpf_netdev_ops_splitq;
780 	else
781 		netdev->netdev_ops = &idpf_netdev_ops_singleq;
782 
783 	/* setup watchdog timeout value to be 5 second */
784 	netdev->watchdog_timeo = 5 * HZ;
785 
786 	/* configure default MTU size */
787 	netdev->min_mtu = ETH_MIN_MTU;
788 	netdev->max_mtu = vport->max_mtu;
789 
790 	dflt_features = NETIF_F_SG	|
791 			NETIF_F_HIGHDMA;
792 
793 	if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
794 		dflt_features |= NETIF_F_RXHASH;
795 	if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V4))
796 		dflt_features |= NETIF_F_IP_CSUM;
797 	if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V6))
798 		dflt_features |= NETIF_F_IPV6_CSUM;
799 	if (idpf_is_cap_ena(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM))
800 		dflt_features |= NETIF_F_RXCSUM;
801 	if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_SCTP_CSUM))
802 		dflt_features |= NETIF_F_SCTP_CRC;
803 
804 	if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV4_TCP))
805 		dflt_features |= NETIF_F_TSO;
806 	if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV6_TCP))
807 		dflt_features |= NETIF_F_TSO6;
808 	if (idpf_is_cap_ena_all(adapter, IDPF_SEG_CAPS,
809 				VIRTCHNL2_CAP_SEG_IPV4_UDP |
810 				VIRTCHNL2_CAP_SEG_IPV6_UDP))
811 		dflt_features |= NETIF_F_GSO_UDP_L4;
812 	if (idpf_is_cap_ena_all(adapter, IDPF_RSC_CAPS, IDPF_CAP_RSC))
813 		offloads |= NETIF_F_GRO_HW;
814 	/* advertise to stack only if offloads for encapsulated packets is
815 	 * supported
816 	 */
817 	if (idpf_is_cap_ena(vport->adapter, IDPF_SEG_CAPS,
818 			    VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL)) {
819 		offloads |= NETIF_F_GSO_UDP_TUNNEL	|
820 			    NETIF_F_GSO_GRE		|
821 			    NETIF_F_GSO_GRE_CSUM	|
822 			    NETIF_F_GSO_PARTIAL		|
823 			    NETIF_F_GSO_UDP_TUNNEL_CSUM	|
824 			    NETIF_F_GSO_IPXIP4		|
825 			    NETIF_F_GSO_IPXIP6		|
826 			    0;
827 
828 		if (!idpf_is_cap_ena_all(vport->adapter, IDPF_CSUM_CAPS,
829 					 IDPF_CAP_TUNNEL_TX_CSUM))
830 			netdev->gso_partial_features |=
831 				NETIF_F_GSO_UDP_TUNNEL_CSUM;
832 
833 		netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
834 		offloads |= NETIF_F_TSO_MANGLEID;
835 	}
836 	if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LOOPBACK))
837 		offloads |= NETIF_F_LOOPBACK;
838 
839 	netdev->features |= dflt_features;
840 	netdev->hw_features |= dflt_features | offloads;
841 	netdev->hw_enc_features |= dflt_features | offloads;
842 	idpf_set_ethtool_ops(netdev);
843 	SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
844 
845 	/* carrier off on init to avoid Tx hangs */
846 	netif_carrier_off(netdev);
847 
848 	/* make sure transmit queues start off as stopped */
849 	netif_tx_stop_all_queues(netdev);
850 
851 	/* The vport can be arbitrarily released so we need to also track
852 	 * netdevs in the adapter struct
853 	 */
854 	adapter->netdevs[idx] = netdev;
855 
856 	return 0;
857 }
858 
859 /**
860  * idpf_get_free_slot - get the next non-NULL location index in array
861  * @adapter: adapter in which to look for a free vport slot
862  */
863 static int idpf_get_free_slot(struct idpf_adapter *adapter)
864 {
865 	unsigned int i;
866 
867 	for (i = 0; i < adapter->max_vports; i++) {
868 		if (!adapter->vports[i])
869 			return i;
870 	}
871 
872 	return IDPF_NO_FREE_SLOT;
873 }
874 
875 /**
876  * idpf_remove_features - Turn off feature configs
877  * @vport: virtual port structure
878  */
879 static void idpf_remove_features(struct idpf_vport *vport)
880 {
881 	struct idpf_adapter *adapter = vport->adapter;
882 
883 	if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER))
884 		idpf_remove_mac_filters(vport);
885 }
886 
887 /**
888  * idpf_vport_stop - Disable a vport
889  * @vport: vport to disable
890  */
891 static void idpf_vport_stop(struct idpf_vport *vport)
892 {
893 	struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
894 
895 	if (np->state <= __IDPF_VPORT_DOWN)
896 		return;
897 
898 	netif_carrier_off(vport->netdev);
899 	netif_tx_disable(vport->netdev);
900 
901 	idpf_send_disable_vport_msg(vport);
902 	idpf_send_disable_queues_msg(vport);
903 	idpf_send_map_unmap_queue_vector_msg(vport, false);
904 	/* Normally we ask for queues in create_vport, but if the number of
905 	 * initially requested queues have changed, for example via ethtool
906 	 * set channels, we do delete queues and then add the queues back
907 	 * instead of deleting and reallocating the vport.
908 	 */
909 	if (test_and_clear_bit(IDPF_VPORT_DEL_QUEUES, vport->flags))
910 		idpf_send_delete_queues_msg(vport);
911 
912 	idpf_remove_features(vport);
913 
914 	vport->link_up = false;
915 	idpf_vport_intr_deinit(vport);
916 	idpf_vport_intr_rel(vport);
917 	idpf_vport_queues_rel(vport);
918 	np->state = __IDPF_VPORT_DOWN;
919 }
920 
921 /**
922  * idpf_stop - Disables a network interface
923  * @netdev: network interface device structure
924  *
925  * The stop entry point is called when an interface is de-activated by the OS,
926  * and the netdevice enters the DOWN state.  The hardware is still under the
927  * driver's control, but the netdev interface is disabled.
928  *
929  * Returns success only - not allowed to fail
930  */
931 static int idpf_stop(struct net_device *netdev)
932 {
933 	struct idpf_netdev_priv *np = netdev_priv(netdev);
934 	struct idpf_vport *vport;
935 
936 	if (test_bit(IDPF_REMOVE_IN_PROG, np->adapter->flags))
937 		return 0;
938 
939 	idpf_vport_ctrl_lock(netdev);
940 	vport = idpf_netdev_to_vport(netdev);
941 
942 	idpf_vport_stop(vport);
943 
944 	idpf_vport_ctrl_unlock(netdev);
945 
946 	return 0;
947 }
948 
949 /**
950  * idpf_decfg_netdev - Unregister the netdev
951  * @vport: vport for which netdev to be unregistered
952  */
953 static void idpf_decfg_netdev(struct idpf_vport *vport)
954 {
955 	struct idpf_adapter *adapter = vport->adapter;
956 
957 	unregister_netdev(vport->netdev);
958 	free_netdev(vport->netdev);
959 	vport->netdev = NULL;
960 
961 	adapter->netdevs[vport->idx] = NULL;
962 }
963 
964 /**
965  * idpf_vport_rel - Delete a vport and free its resources
966  * @vport: the vport being removed
967  */
968 static void idpf_vport_rel(struct idpf_vport *vport)
969 {
970 	struct idpf_adapter *adapter = vport->adapter;
971 	struct idpf_vport_config *vport_config;
972 	struct idpf_vector_info vec_info;
973 	struct idpf_rss_data *rss_data;
974 	struct idpf_vport_max_q max_q;
975 	u16 idx = vport->idx;
976 	int i;
977 
978 	vport_config = adapter->vport_config[vport->idx];
979 	idpf_deinit_rss(vport);
980 	rss_data = &vport_config->user_config.rss_data;
981 	kfree(rss_data->rss_key);
982 	rss_data->rss_key = NULL;
983 
984 	idpf_send_destroy_vport_msg(vport);
985 
986 	/* Set all bits as we dont know on which vc_state the vport vhnl_wq
987 	 * is waiting on and wakeup the virtchnl workqueue even if it is
988 	 * waiting for the response as we are going down
989 	 */
990 	for (i = 0; i < IDPF_VC_NBITS; i++)
991 		set_bit(i, vport->vc_state);
992 	wake_up(&vport->vchnl_wq);
993 
994 	mutex_destroy(&vport->vc_buf_lock);
995 
996 	/* Clear all the bits */
997 	for (i = 0; i < IDPF_VC_NBITS; i++)
998 		clear_bit(i, vport->vc_state);
999 
1000 	/* Release all max queues allocated to the adapter's pool */
1001 	max_q.max_rxq = vport_config->max_q.max_rxq;
1002 	max_q.max_txq = vport_config->max_q.max_txq;
1003 	max_q.max_bufq = vport_config->max_q.max_bufq;
1004 	max_q.max_complq = vport_config->max_q.max_complq;
1005 	idpf_vport_dealloc_max_qs(adapter, &max_q);
1006 
1007 	/* Release all the allocated vectors on the stack */
1008 	vec_info.num_req_vecs = 0;
1009 	vec_info.num_curr_vecs = vport->num_q_vectors;
1010 	vec_info.default_vport = vport->default_vport;
1011 
1012 	idpf_req_rel_vector_indexes(adapter, vport->q_vector_idxs, &vec_info);
1013 
1014 	kfree(vport->q_vector_idxs);
1015 	vport->q_vector_idxs = NULL;
1016 
1017 	kfree(adapter->vport_params_recvd[idx]);
1018 	adapter->vport_params_recvd[idx] = NULL;
1019 	kfree(adapter->vport_params_reqd[idx]);
1020 	adapter->vport_params_reqd[idx] = NULL;
1021 	if (adapter->vport_config[idx]) {
1022 		kfree(adapter->vport_config[idx]->req_qs_chunks);
1023 		adapter->vport_config[idx]->req_qs_chunks = NULL;
1024 	}
1025 	kfree(vport);
1026 	adapter->num_alloc_vports--;
1027 }
1028 
1029 /**
1030  * idpf_vport_dealloc - cleanup and release a given vport
1031  * @vport: pointer to idpf vport structure
1032  *
1033  * returns nothing
1034  */
1035 static void idpf_vport_dealloc(struct idpf_vport *vport)
1036 {
1037 	struct idpf_adapter *adapter = vport->adapter;
1038 	unsigned int i = vport->idx;
1039 
1040 	idpf_deinit_mac_addr(vport);
1041 	idpf_vport_stop(vport);
1042 
1043 	if (!test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
1044 		idpf_decfg_netdev(vport);
1045 	if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
1046 		idpf_del_all_mac_filters(vport);
1047 
1048 	if (adapter->netdevs[i]) {
1049 		struct idpf_netdev_priv *np = netdev_priv(adapter->netdevs[i]);
1050 
1051 		np->vport = NULL;
1052 	}
1053 
1054 	idpf_vport_rel(vport);
1055 
1056 	adapter->vports[i] = NULL;
1057 	adapter->next_vport = idpf_get_free_slot(adapter);
1058 }
1059 
1060 /**
1061  * idpf_vport_alloc - Allocates the next available struct vport in the adapter
1062  * @adapter: board private structure
1063  * @max_q: vport max queue info
1064  *
1065  * returns a pointer to a vport on success, NULL on failure.
1066  */
1067 static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
1068 					   struct idpf_vport_max_q *max_q)
1069 {
1070 	struct idpf_rss_data *rss_data;
1071 	u16 idx = adapter->next_vport;
1072 	struct idpf_vport *vport;
1073 	u16 num_max_q;
1074 
1075 	if (idx == IDPF_NO_FREE_SLOT)
1076 		return NULL;
1077 
1078 	vport = kzalloc(sizeof(*vport), GFP_KERNEL);
1079 	if (!vport)
1080 		return vport;
1081 
1082 	if (!adapter->vport_config[idx]) {
1083 		struct idpf_vport_config *vport_config;
1084 
1085 		vport_config = kzalloc(sizeof(*vport_config), GFP_KERNEL);
1086 		if (!vport_config) {
1087 			kfree(vport);
1088 
1089 			return NULL;
1090 		}
1091 
1092 		adapter->vport_config[idx] = vport_config;
1093 	}
1094 
1095 	vport->idx = idx;
1096 	vport->adapter = adapter;
1097 	vport->compln_clean_budget = IDPF_TX_COMPLQ_CLEAN_BUDGET;
1098 	vport->default_vport = adapter->num_alloc_vports <
1099 			       idpf_get_default_vports(adapter);
1100 
1101 	num_max_q = max(max_q->max_txq, max_q->max_rxq);
1102 	vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL);
1103 	if (!vport->q_vector_idxs) {
1104 		kfree(vport);
1105 
1106 		return NULL;
1107 	}
1108 	idpf_vport_init(vport, max_q);
1109 
1110 	/* This alloc is done separate from the LUT because it's not strictly
1111 	 * dependent on how many queues we have. If we change number of queues
1112 	 * and soft reset we'll need a new LUT but the key can remain the same
1113 	 * for as long as the vport exists.
1114 	 */
1115 	rss_data = &adapter->vport_config[idx]->user_config.rss_data;
1116 	rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL);
1117 	if (!rss_data->rss_key) {
1118 		kfree(vport);
1119 
1120 		return NULL;
1121 	}
1122 	/* Initialize default rss key */
1123 	netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size);
1124 
1125 	/* fill vport slot in the adapter struct */
1126 	adapter->vports[idx] = vport;
1127 	adapter->vport_ids[idx] = idpf_get_vport_id(vport);
1128 
1129 	adapter->num_alloc_vports++;
1130 	/* prepare adapter->next_vport for next use */
1131 	adapter->next_vport = idpf_get_free_slot(adapter);
1132 
1133 	return vport;
1134 }
1135 
1136 /**
1137  * idpf_get_stats64 - get statistics for network device structure
1138  * @netdev: network interface device structure
1139  * @stats: main device statistics structure
1140  */
1141 static void idpf_get_stats64(struct net_device *netdev,
1142 			     struct rtnl_link_stats64 *stats)
1143 {
1144 	struct idpf_netdev_priv *np = netdev_priv(netdev);
1145 
1146 	spin_lock_bh(&np->stats_lock);
1147 	*stats = np->netstats;
1148 	spin_unlock_bh(&np->stats_lock);
1149 }
1150 
1151 /**
1152  * idpf_statistics_task - Delayed task to get statistics over mailbox
1153  * @work: work_struct handle to our data
1154  */
1155 void idpf_statistics_task(struct work_struct *work)
1156 {
1157 	struct idpf_adapter *adapter;
1158 	int i;
1159 
1160 	adapter = container_of(work, struct idpf_adapter, stats_task.work);
1161 
1162 	for (i = 0; i < adapter->max_vports; i++) {
1163 		struct idpf_vport *vport = adapter->vports[i];
1164 
1165 		if (vport && !test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
1166 			idpf_send_get_stats_msg(vport);
1167 	}
1168 
1169 	queue_delayed_work(adapter->stats_wq, &adapter->stats_task,
1170 			   msecs_to_jiffies(10000));
1171 }
1172 
1173 /**
1174  * idpf_mbx_task - Delayed task to handle mailbox responses
1175  * @work: work_struct handle
1176  */
1177 void idpf_mbx_task(struct work_struct *work)
1178 {
1179 	struct idpf_adapter *adapter;
1180 
1181 	adapter = container_of(work, struct idpf_adapter, mbx_task.work);
1182 
1183 	if (test_bit(IDPF_MB_INTR_MODE, adapter->flags))
1184 		idpf_mb_irq_enable(adapter);
1185 	else
1186 		queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task,
1187 				   msecs_to_jiffies(300));
1188 
1189 	idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_UNKNOWN, NULL, 0);
1190 }
1191 
1192 /**
1193  * idpf_service_task - Delayed task for handling mailbox responses
1194  * @work: work_struct handle to our data
1195  *
1196  */
1197 void idpf_service_task(struct work_struct *work)
1198 {
1199 	struct idpf_adapter *adapter;
1200 
1201 	adapter = container_of(work, struct idpf_adapter, serv_task.work);
1202 
1203 	if (idpf_is_reset_detected(adapter) &&
1204 	    !idpf_is_reset_in_prog(adapter) &&
1205 	    !test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) {
1206 		dev_info(&adapter->pdev->dev, "HW reset detected\n");
1207 		set_bit(IDPF_HR_FUNC_RESET, adapter->flags);
1208 		queue_delayed_work(adapter->vc_event_wq,
1209 				   &adapter->vc_event_task,
1210 				   msecs_to_jiffies(10));
1211 	}
1212 
1213 	queue_delayed_work(adapter->serv_wq, &adapter->serv_task,
1214 			   msecs_to_jiffies(300));
1215 }
1216 
1217 /**
1218  * idpf_restore_features - Restore feature configs
1219  * @vport: virtual port structure
1220  */
1221 static void idpf_restore_features(struct idpf_vport *vport)
1222 {
1223 	struct idpf_adapter *adapter = vport->adapter;
1224 
1225 	if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER))
1226 		idpf_restore_mac_filters(vport);
1227 }
1228 
1229 /**
1230  * idpf_set_real_num_queues - set number of queues for netdev
1231  * @vport: virtual port structure
1232  *
1233  * Returns 0 on success, negative on failure.
1234  */
1235 static int idpf_set_real_num_queues(struct idpf_vport *vport)
1236 {
1237 	int err;
1238 
1239 	err = netif_set_real_num_rx_queues(vport->netdev, vport->num_rxq);
1240 	if (err)
1241 		return err;
1242 
1243 	return netif_set_real_num_tx_queues(vport->netdev, vport->num_txq);
1244 }
1245 
1246 /**
1247  * idpf_up_complete - Complete interface up sequence
1248  * @vport: virtual port structure
1249  *
1250  * Returns 0 on success, negative on failure.
1251  */
1252 static int idpf_up_complete(struct idpf_vport *vport)
1253 {
1254 	struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
1255 
1256 	if (vport->link_up && !netif_carrier_ok(vport->netdev)) {
1257 		netif_carrier_on(vport->netdev);
1258 		netif_tx_start_all_queues(vport->netdev);
1259 	}
1260 
1261 	np->state = __IDPF_VPORT_UP;
1262 
1263 	return 0;
1264 }
1265 
1266 /**
1267  * idpf_rx_init_buf_tail - Write initial buffer ring tail value
1268  * @vport: virtual port struct
1269  */
1270 static void idpf_rx_init_buf_tail(struct idpf_vport *vport)
1271 {
1272 	int i, j;
1273 
1274 	for (i = 0; i < vport->num_rxq_grp; i++) {
1275 		struct idpf_rxq_group *grp = &vport->rxq_grps[i];
1276 
1277 		if (idpf_is_queue_model_split(vport->rxq_model)) {
1278 			for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
1279 				struct idpf_queue *q =
1280 					&grp->splitq.bufq_sets[j].bufq;
1281 
1282 				writel(q->next_to_alloc, q->tail);
1283 			}
1284 		} else {
1285 			for (j = 0; j < grp->singleq.num_rxq; j++) {
1286 				struct idpf_queue *q =
1287 					grp->singleq.rxqs[j];
1288 
1289 				writel(q->next_to_alloc, q->tail);
1290 			}
1291 		}
1292 	}
1293 }
1294 
1295 /**
1296  * idpf_vport_open - Bring up a vport
1297  * @vport: vport to bring up
1298  * @alloc_res: allocate queue resources
1299  */
1300 static int idpf_vport_open(struct idpf_vport *vport, bool alloc_res)
1301 {
1302 	struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
1303 	struct idpf_adapter *adapter = vport->adapter;
1304 	struct idpf_vport_config *vport_config;
1305 	int err;
1306 
1307 	if (np->state != __IDPF_VPORT_DOWN)
1308 		return -EBUSY;
1309 
1310 	/* we do not allow interface up just yet */
1311 	netif_carrier_off(vport->netdev);
1312 
1313 	if (alloc_res) {
1314 		err = idpf_vport_queues_alloc(vport);
1315 		if (err)
1316 			return err;
1317 	}
1318 
1319 	err = idpf_vport_intr_alloc(vport);
1320 	if (err) {
1321 		dev_err(&adapter->pdev->dev, "Failed to allocate interrupts for vport %u: %d\n",
1322 			vport->vport_id, err);
1323 		goto queues_rel;
1324 	}
1325 
1326 	err = idpf_vport_queue_ids_init(vport);
1327 	if (err) {
1328 		dev_err(&adapter->pdev->dev, "Failed to initialize queue ids for vport %u: %d\n",
1329 			vport->vport_id, err);
1330 		goto intr_rel;
1331 	}
1332 
1333 	err = idpf_vport_intr_init(vport);
1334 	if (err) {
1335 		dev_err(&adapter->pdev->dev, "Failed to initialize interrupts for vport %u: %d\n",
1336 			vport->vport_id, err);
1337 		goto intr_rel;
1338 	}
1339 
1340 	err = idpf_rx_bufs_init_all(vport);
1341 	if (err) {
1342 		dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n",
1343 			vport->vport_id, err);
1344 		goto intr_rel;
1345 	}
1346 
1347 	err = idpf_queue_reg_init(vport);
1348 	if (err) {
1349 		dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n",
1350 			vport->vport_id, err);
1351 		goto intr_rel;
1352 	}
1353 
1354 	idpf_rx_init_buf_tail(vport);
1355 
1356 	err = idpf_send_config_queues_msg(vport);
1357 	if (err) {
1358 		dev_err(&adapter->pdev->dev, "Failed to configure queues for vport %u, %d\n",
1359 			vport->vport_id, err);
1360 		goto intr_deinit;
1361 	}
1362 
1363 	err = idpf_send_map_unmap_queue_vector_msg(vport, true);
1364 	if (err) {
1365 		dev_err(&adapter->pdev->dev, "Failed to map queue vectors for vport %u: %d\n",
1366 			vport->vport_id, err);
1367 		goto intr_deinit;
1368 	}
1369 
1370 	err = idpf_send_enable_queues_msg(vport);
1371 	if (err) {
1372 		dev_err(&adapter->pdev->dev, "Failed to enable queues for vport %u: %d\n",
1373 			vport->vport_id, err);
1374 		goto unmap_queue_vectors;
1375 	}
1376 
1377 	err = idpf_send_enable_vport_msg(vport);
1378 	if (err) {
1379 		dev_err(&adapter->pdev->dev, "Failed to enable vport %u: %d\n",
1380 			vport->vport_id, err);
1381 		err = -EAGAIN;
1382 		goto disable_queues;
1383 	}
1384 
1385 	idpf_restore_features(vport);
1386 
1387 	vport_config = adapter->vport_config[vport->idx];
1388 	if (vport_config->user_config.rss_data.rss_lut)
1389 		err = idpf_config_rss(vport);
1390 	else
1391 		err = idpf_init_rss(vport);
1392 	if (err) {
1393 		dev_err(&adapter->pdev->dev, "Failed to initialize RSS for vport %u: %d\n",
1394 			vport->vport_id, err);
1395 		goto disable_vport;
1396 	}
1397 
1398 	err = idpf_up_complete(vport);
1399 	if (err) {
1400 		dev_err(&adapter->pdev->dev, "Failed to complete interface up for vport %u: %d\n",
1401 			vport->vport_id, err);
1402 		goto deinit_rss;
1403 	}
1404 
1405 	return 0;
1406 
1407 deinit_rss:
1408 	idpf_deinit_rss(vport);
1409 disable_vport:
1410 	idpf_send_disable_vport_msg(vport);
1411 disable_queues:
1412 	idpf_send_disable_queues_msg(vport);
1413 unmap_queue_vectors:
1414 	idpf_send_map_unmap_queue_vector_msg(vport, false);
1415 intr_deinit:
1416 	idpf_vport_intr_deinit(vport);
1417 intr_rel:
1418 	idpf_vport_intr_rel(vport);
1419 queues_rel:
1420 	idpf_vport_queues_rel(vport);
1421 
1422 	return err;
1423 }
1424 
1425 /**
1426  * idpf_init_task - Delayed initialization task
1427  * @work: work_struct handle to our data
1428  *
1429  * Init task finishes up pending work started in probe. Due to the asynchronous
1430  * nature in which the device communicates with hardware, we may have to wait
1431  * several milliseconds to get a response.  Instead of busy polling in probe,
1432  * pulling it out into a delayed work task prevents us from bogging down the
1433  * whole system waiting for a response from hardware.
1434  */
1435 void idpf_init_task(struct work_struct *work)
1436 {
1437 	struct idpf_vport_config *vport_config;
1438 	struct idpf_vport_max_q max_q;
1439 	struct idpf_adapter *adapter;
1440 	struct idpf_netdev_priv *np;
1441 	struct idpf_vport *vport;
1442 	u16 num_default_vports;
1443 	struct pci_dev *pdev;
1444 	bool default_vport;
1445 	int index, err;
1446 
1447 	adapter = container_of(work, struct idpf_adapter, init_task.work);
1448 
1449 	num_default_vports = idpf_get_default_vports(adapter);
1450 	if (adapter->num_alloc_vports < num_default_vports)
1451 		default_vport = true;
1452 	else
1453 		default_vport = false;
1454 
1455 	err = idpf_vport_alloc_max_qs(adapter, &max_q);
1456 	if (err)
1457 		goto unwind_vports;
1458 
1459 	err = idpf_send_create_vport_msg(adapter, &max_q);
1460 	if (err) {
1461 		idpf_vport_dealloc_max_qs(adapter, &max_q);
1462 		goto unwind_vports;
1463 	}
1464 
1465 	pdev = adapter->pdev;
1466 	vport = idpf_vport_alloc(adapter, &max_q);
1467 	if (!vport) {
1468 		err = -EFAULT;
1469 		dev_err(&pdev->dev, "failed to allocate vport: %d\n",
1470 			err);
1471 		idpf_vport_dealloc_max_qs(adapter, &max_q);
1472 		goto unwind_vports;
1473 	}
1474 
1475 	index = vport->idx;
1476 	vport_config = adapter->vport_config[index];
1477 
1478 	init_waitqueue_head(&vport->sw_marker_wq);
1479 	init_waitqueue_head(&vport->vchnl_wq);
1480 
1481 	mutex_init(&vport->vc_buf_lock);
1482 	spin_lock_init(&vport_config->mac_filter_list_lock);
1483 
1484 	INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list);
1485 
1486 	err = idpf_check_supported_desc_ids(vport);
1487 	if (err) {
1488 		dev_err(&pdev->dev, "failed to get required descriptor ids\n");
1489 		goto cfg_netdev_err;
1490 	}
1491 
1492 	if (idpf_cfg_netdev(vport))
1493 		goto cfg_netdev_err;
1494 
1495 	err = idpf_send_get_rx_ptype_msg(vport);
1496 	if (err)
1497 		goto handle_err;
1498 
1499 	/* Once state is put into DOWN, driver is ready for dev_open */
1500 	np = netdev_priv(vport->netdev);
1501 	np->state = __IDPF_VPORT_DOWN;
1502 	if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED, vport_config->flags))
1503 		idpf_vport_open(vport, true);
1504 
1505 	/* Spawn and return 'idpf_init_task' work queue until all the
1506 	 * default vports are created
1507 	 */
1508 	if (adapter->num_alloc_vports < num_default_vports) {
1509 		queue_delayed_work(adapter->init_wq, &adapter->init_task,
1510 				   msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
1511 
1512 		return;
1513 	}
1514 
1515 	for (index = 0; index < adapter->max_vports; index++) {
1516 		if (adapter->netdevs[index] &&
1517 		    !test_bit(IDPF_VPORT_REG_NETDEV,
1518 			      adapter->vport_config[index]->flags)) {
1519 			register_netdev(adapter->netdevs[index]);
1520 			set_bit(IDPF_VPORT_REG_NETDEV,
1521 				adapter->vport_config[index]->flags);
1522 		}
1523 	}
1524 
1525 	/* As all the required vports are created, clear the reset flag
1526 	 * unconditionally here in case we were in reset and the link was down.
1527 	 */
1528 	clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
1529 	/* Start the statistics task now */
1530 	queue_delayed_work(adapter->stats_wq, &adapter->stats_task,
1531 			   msecs_to_jiffies(10 * (pdev->devfn & 0x07)));
1532 
1533 	return;
1534 
1535 handle_err:
1536 	idpf_decfg_netdev(vport);
1537 cfg_netdev_err:
1538 	idpf_vport_rel(vport);
1539 	adapter->vports[index] = NULL;
1540 unwind_vports:
1541 	if (default_vport) {
1542 		for (index = 0; index < adapter->max_vports; index++) {
1543 			if (adapter->vports[index])
1544 				idpf_vport_dealloc(adapter->vports[index]);
1545 		}
1546 	}
1547 	clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
1548 }
1549 
1550 /**
1551  * idpf_sriov_ena - Enable or change number of VFs
1552  * @adapter: private data struct
1553  * @num_vfs: number of VFs to allocate
1554  */
1555 static int idpf_sriov_ena(struct idpf_adapter *adapter, int num_vfs)
1556 {
1557 	struct device *dev = &adapter->pdev->dev;
1558 	int err;
1559 
1560 	err = idpf_send_set_sriov_vfs_msg(adapter, num_vfs);
1561 	if (err) {
1562 		dev_err(dev, "Failed to allocate VFs: %d\n", err);
1563 
1564 		return err;
1565 	}
1566 
1567 	err = pci_enable_sriov(adapter->pdev, num_vfs);
1568 	if (err) {
1569 		idpf_send_set_sriov_vfs_msg(adapter, 0);
1570 		dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
1571 
1572 		return err;
1573 	}
1574 
1575 	adapter->num_vfs = num_vfs;
1576 
1577 	return num_vfs;
1578 }
1579 
1580 /**
1581  * idpf_sriov_configure - Configure the requested VFs
1582  * @pdev: pointer to a pci_dev structure
1583  * @num_vfs: number of vfs to allocate
1584  *
1585  * Enable or change the number of VFs. Called when the user updates the number
1586  * of VFs in sysfs.
1587  **/
1588 int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs)
1589 {
1590 	struct idpf_adapter *adapter = pci_get_drvdata(pdev);
1591 
1592 	if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_SRIOV)) {
1593 		dev_info(&pdev->dev, "SR-IOV is not supported on this device\n");
1594 
1595 		return -EOPNOTSUPP;
1596 	}
1597 
1598 	if (num_vfs)
1599 		return idpf_sriov_ena(adapter, num_vfs);
1600 
1601 	if (pci_vfs_assigned(pdev)) {
1602 		dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs\n");
1603 
1604 		return -EBUSY;
1605 	}
1606 
1607 	pci_disable_sriov(adapter->pdev);
1608 	idpf_send_set_sriov_vfs_msg(adapter, 0);
1609 	adapter->num_vfs = 0;
1610 
1611 	return 0;
1612 }
1613 
1614 /**
1615  * idpf_deinit_task - Device deinit routine
1616  * @adapter: Driver specific private structure
1617  *
1618  * Extended remove logic which will be used for
1619  * hard reset as well
1620  */
1621 void idpf_deinit_task(struct idpf_adapter *adapter)
1622 {
1623 	unsigned int i;
1624 
1625 	/* Wait until the init_task is done else this thread might release
1626 	 * the resources first and the other thread might end up in a bad state
1627 	 */
1628 	cancel_delayed_work_sync(&adapter->init_task);
1629 
1630 	if (!adapter->vports)
1631 		return;
1632 
1633 	cancel_delayed_work_sync(&adapter->stats_task);
1634 
1635 	for (i = 0; i < adapter->max_vports; i++) {
1636 		if (adapter->vports[i])
1637 			idpf_vport_dealloc(adapter->vports[i]);
1638 	}
1639 }
1640 
1641 /**
1642  * idpf_check_reset_complete - check that reset is complete
1643  * @hw: pointer to hw struct
1644  * @reset_reg: struct with reset registers
1645  *
1646  * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
1647  **/
1648 static int idpf_check_reset_complete(struct idpf_hw *hw,
1649 				     struct idpf_reset_reg *reset_reg)
1650 {
1651 	struct idpf_adapter *adapter = hw->back;
1652 	int i;
1653 
1654 	for (i = 0; i < 2000; i++) {
1655 		u32 reg_val = readl(reset_reg->rstat);
1656 
1657 		/* 0xFFFFFFFF might be read if other side hasn't cleared the
1658 		 * register for us yet and 0xFFFFFFFF is not a valid value for
1659 		 * the register, so treat that as invalid.
1660 		 */
1661 		if (reg_val != 0xFFFFFFFF && (reg_val & reset_reg->rstat_m))
1662 			return 0;
1663 
1664 		usleep_range(5000, 10000);
1665 	}
1666 
1667 	dev_warn(&adapter->pdev->dev, "Device reset timeout!\n");
1668 	/* Clear the reset flag unconditionally here since the reset
1669 	 * technically isn't in progress anymore from the driver's perspective
1670 	 */
1671 	clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
1672 
1673 	return -EBUSY;
1674 }
1675 
1676 /**
1677  * idpf_set_vport_state - Set the vport state to be after the reset
1678  * @adapter: Driver specific private structure
1679  */
1680 static void idpf_set_vport_state(struct idpf_adapter *adapter)
1681 {
1682 	u16 i;
1683 
1684 	for (i = 0; i < adapter->max_vports; i++) {
1685 		struct idpf_netdev_priv *np;
1686 
1687 		if (!adapter->netdevs[i])
1688 			continue;
1689 
1690 		np = netdev_priv(adapter->netdevs[i]);
1691 		if (np->state == __IDPF_VPORT_UP)
1692 			set_bit(IDPF_VPORT_UP_REQUESTED,
1693 				adapter->vport_config[i]->flags);
1694 	}
1695 }
1696 
1697 /**
1698  * idpf_init_hard_reset - Initiate a hardware reset
1699  * @adapter: Driver specific private structure
1700  *
1701  * Deallocate the vports and all the resources associated with them and
1702  * reallocate. Also reinitialize the mailbox. Return 0 on success,
1703  * negative on failure.
1704  */
1705 static int idpf_init_hard_reset(struct idpf_adapter *adapter)
1706 {
1707 	struct idpf_reg_ops *reg_ops = &adapter->dev_ops.reg_ops;
1708 	struct device *dev = &adapter->pdev->dev;
1709 	struct net_device *netdev;
1710 	int err;
1711 	u16 i;
1712 
1713 	mutex_lock(&adapter->vport_ctrl_lock);
1714 
1715 	dev_info(dev, "Device HW Reset initiated\n");
1716 
1717 	/* Avoid TX hangs on reset */
1718 	for (i = 0; i < adapter->max_vports; i++) {
1719 		netdev = adapter->netdevs[i];
1720 		if (!netdev)
1721 			continue;
1722 
1723 		netif_carrier_off(netdev);
1724 		netif_tx_disable(netdev);
1725 	}
1726 
1727 	/* Prepare for reset */
1728 	if (test_and_clear_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
1729 		reg_ops->trigger_reset(adapter, IDPF_HR_DRV_LOAD);
1730 	} else if (test_and_clear_bit(IDPF_HR_FUNC_RESET, adapter->flags)) {
1731 		bool is_reset = idpf_is_reset_detected(adapter);
1732 
1733 		idpf_set_vport_state(adapter);
1734 		idpf_vc_core_deinit(adapter);
1735 		if (!is_reset)
1736 			reg_ops->trigger_reset(adapter, IDPF_HR_FUNC_RESET);
1737 		idpf_deinit_dflt_mbx(adapter);
1738 	} else {
1739 		dev_err(dev, "Unhandled hard reset cause\n");
1740 		err = -EBADRQC;
1741 		goto unlock_mutex;
1742 	}
1743 
1744 	/* Wait for reset to complete */
1745 	err = idpf_check_reset_complete(&adapter->hw, &adapter->reset_reg);
1746 	if (err) {
1747 		dev_err(dev, "The driver was unable to contact the device's firmware. Check that the FW is running. Driver state= 0x%x\n",
1748 			adapter->state);
1749 		goto unlock_mutex;
1750 	}
1751 
1752 	/* Reset is complete and so start building the driver resources again */
1753 	err = idpf_init_dflt_mbx(adapter);
1754 	if (err) {
1755 		dev_err(dev, "Failed to initialize default mailbox: %d\n", err);
1756 		goto unlock_mutex;
1757 	}
1758 
1759 	/* Initialize the state machine, also allocate memory and request
1760 	 * resources
1761 	 */
1762 	err = idpf_vc_core_init(adapter);
1763 	if (err) {
1764 		idpf_deinit_dflt_mbx(adapter);
1765 		goto unlock_mutex;
1766 	}
1767 
1768 	/* Wait till all the vports are initialized to release the reset lock,
1769 	 * else user space callbacks may access uninitialized vports
1770 	 */
1771 	while (test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
1772 		msleep(100);
1773 
1774 unlock_mutex:
1775 	mutex_unlock(&adapter->vport_ctrl_lock);
1776 
1777 	return err;
1778 }
1779 
1780 /**
1781  * idpf_vc_event_task - Handle virtchannel event logic
1782  * @work: work queue struct
1783  */
1784 void idpf_vc_event_task(struct work_struct *work)
1785 {
1786 	struct idpf_adapter *adapter;
1787 
1788 	adapter = container_of(work, struct idpf_adapter, vc_event_task.work);
1789 
1790 	if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
1791 		return;
1792 
1793 	if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags) ||
1794 	    test_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
1795 		set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
1796 		idpf_init_hard_reset(adapter);
1797 	}
1798 }
1799 
1800 /**
1801  * idpf_initiate_soft_reset - Initiate a software reset
1802  * @vport: virtual port data struct
1803  * @reset_cause: reason for the soft reset
1804  *
1805  * Soft reset only reallocs vport queue resources. Returns 0 on success,
1806  * negative on failure.
1807  */
1808 int idpf_initiate_soft_reset(struct idpf_vport *vport,
1809 			     enum idpf_vport_reset_cause reset_cause)
1810 {
1811 	struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
1812 	enum idpf_vport_state current_state = np->state;
1813 	struct idpf_adapter *adapter = vport->adapter;
1814 	struct idpf_vport *new_vport;
1815 	int err, i;
1816 
1817 	/* If the system is low on memory, we can end up in bad state if we
1818 	 * free all the memory for queue resources and try to allocate them
1819 	 * again. Instead, we can pre-allocate the new resources before doing
1820 	 * anything and bailing if the alloc fails.
1821 	 *
1822 	 * Make a clone of the existing vport to mimic its current
1823 	 * configuration, then modify the new structure with any requested
1824 	 * changes. Once the allocation of the new resources is done, stop the
1825 	 * existing vport and copy the configuration to the main vport. If an
1826 	 * error occurred, the existing vport will be untouched.
1827 	 *
1828 	 */
1829 	new_vport = kzalloc(sizeof(*vport), GFP_KERNEL);
1830 	if (!new_vport)
1831 		return -ENOMEM;
1832 
1833 	/* This purposely avoids copying the end of the struct because it
1834 	 * contains wait_queues and mutexes and other stuff we don't want to
1835 	 * mess with. Nothing below should use those variables from new_vport
1836 	 * and should instead always refer to them in vport if they need to.
1837 	 */
1838 	memcpy(new_vport, vport, offsetof(struct idpf_vport, vc_state));
1839 
1840 	/* Adjust resource parameters prior to reallocating resources */
1841 	switch (reset_cause) {
1842 	case IDPF_SR_Q_CHANGE:
1843 		err = idpf_vport_adjust_qs(new_vport);
1844 		if (err)
1845 			goto free_vport;
1846 		break;
1847 	case IDPF_SR_Q_DESC_CHANGE:
1848 		/* Update queue parameters before allocating resources */
1849 		idpf_vport_calc_num_q_desc(new_vport);
1850 		break;
1851 	case IDPF_SR_MTU_CHANGE:
1852 	case IDPF_SR_RSC_CHANGE:
1853 		break;
1854 	default:
1855 		dev_err(&adapter->pdev->dev, "Unhandled soft reset cause\n");
1856 		err = -EINVAL;
1857 		goto free_vport;
1858 	}
1859 
1860 	err = idpf_vport_queues_alloc(new_vport);
1861 	if (err)
1862 		goto free_vport;
1863 	if (current_state <= __IDPF_VPORT_DOWN) {
1864 		idpf_send_delete_queues_msg(vport);
1865 	} else {
1866 		set_bit(IDPF_VPORT_DEL_QUEUES, vport->flags);
1867 		idpf_vport_stop(vport);
1868 	}
1869 
1870 	idpf_deinit_rss(vport);
1871 	/* We're passing in vport here because we need its wait_queue
1872 	 * to send a message and it should be getting all the vport
1873 	 * config data out of the adapter but we need to be careful not
1874 	 * to add code to add_queues to change the vport config within
1875 	 * vport itself as it will be wiped with a memcpy later.
1876 	 */
1877 	err = idpf_send_add_queues_msg(vport, new_vport->num_txq,
1878 				       new_vport->num_complq,
1879 				       new_vport->num_rxq,
1880 				       new_vport->num_bufq);
1881 	if (err)
1882 		goto err_reset;
1883 
1884 	/* Same comment as above regarding avoiding copying the wait_queues and
1885 	 * mutexes applies here. We do not want to mess with those if possible.
1886 	 */
1887 	memcpy(vport, new_vport, offsetof(struct idpf_vport, vc_state));
1888 
1889 	/* Since idpf_vport_queues_alloc was called with new_port, the queue
1890 	 * back pointers are currently pointing to the local new_vport. Reset
1891 	 * the backpointers to the original vport here
1892 	 */
1893 	for (i = 0; i < vport->num_txq_grp; i++) {
1894 		struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1895 		int j;
1896 
1897 		tx_qgrp->vport = vport;
1898 		for (j = 0; j < tx_qgrp->num_txq; j++)
1899 			tx_qgrp->txqs[j]->vport = vport;
1900 
1901 		if (idpf_is_queue_model_split(vport->txq_model))
1902 			tx_qgrp->complq->vport = vport;
1903 	}
1904 
1905 	for (i = 0; i < vport->num_rxq_grp; i++) {
1906 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1907 		struct idpf_queue *q;
1908 		u16 num_rxq;
1909 		int j;
1910 
1911 		rx_qgrp->vport = vport;
1912 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++)
1913 			rx_qgrp->splitq.bufq_sets[j].bufq.vport = vport;
1914 
1915 		if (idpf_is_queue_model_split(vport->rxq_model))
1916 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
1917 		else
1918 			num_rxq = rx_qgrp->singleq.num_rxq;
1919 
1920 		for (j = 0; j < num_rxq; j++) {
1921 			if (idpf_is_queue_model_split(vport->rxq_model))
1922 				q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
1923 			else
1924 				q = rx_qgrp->singleq.rxqs[j];
1925 			q->vport = vport;
1926 		}
1927 	}
1928 
1929 	if (reset_cause == IDPF_SR_Q_CHANGE)
1930 		idpf_vport_alloc_vec_indexes(vport);
1931 
1932 	err = idpf_set_real_num_queues(vport);
1933 	if (err)
1934 		goto err_reset;
1935 
1936 	if (current_state == __IDPF_VPORT_UP)
1937 		err = idpf_vport_open(vport, false);
1938 
1939 	kfree(new_vport);
1940 
1941 	return err;
1942 
1943 err_reset:
1944 	idpf_vport_queues_rel(new_vport);
1945 free_vport:
1946 	kfree(new_vport);
1947 
1948 	return err;
1949 }
1950 
1951 /**
1952  * idpf_addr_sync - Callback for dev_(mc|uc)_sync to add address
1953  * @netdev: the netdevice
1954  * @addr: address to add
1955  *
1956  * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1957  * __dev_(uc|mc)_sync from .set_rx_mode. Kernel takes addr_list_lock spinlock
1958  * meaning we cannot sleep in this context. Due to this, we have to add the
1959  * filter and send the virtchnl message asynchronously without waiting for the
1960  * response from the other side. We won't know whether or not the operation
1961  * actually succeeded until we get the message back.  Returns 0 on success,
1962  * negative on failure.
1963  */
1964 static int idpf_addr_sync(struct net_device *netdev, const u8 *addr)
1965 {
1966 	struct idpf_netdev_priv *np = netdev_priv(netdev);
1967 
1968 	return idpf_add_mac_filter(np->vport, np, addr, true);
1969 }
1970 
1971 /**
1972  * idpf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1973  * @netdev: the netdevice
1974  * @addr: address to add
1975  *
1976  * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1977  * __dev_(uc|mc)_sync from .set_rx_mode. Kernel takes addr_list_lock spinlock
1978  * meaning we cannot sleep in this context. Due to this we have to delete the
1979  * filter and send the virtchnl message asynchronously without waiting for the
1980  * return from the other side.  We won't know whether or not the operation
1981  * actually succeeded until we get the message back. Returns 0 on success,
1982  * negative on failure.
1983  */
1984 static int idpf_addr_unsync(struct net_device *netdev, const u8 *addr)
1985 {
1986 	struct idpf_netdev_priv *np = netdev_priv(netdev);
1987 
1988 	/* Under some circumstances, we might receive a request to delete
1989 	 * our own device address from our uc list. Because we store the
1990 	 * device address in the VSI's MAC filter list, we need to ignore
1991 	 * such requests and not delete our device address from this list.
1992 	 */
1993 	if (ether_addr_equal(addr, netdev->dev_addr))
1994 		return 0;
1995 
1996 	idpf_del_mac_filter(np->vport, np, addr, true);
1997 
1998 	return 0;
1999 }
2000 
2001 /**
2002  * idpf_set_rx_mode - NDO callback to set the netdev filters
2003  * @netdev: network interface device structure
2004  *
2005  * Stack takes addr_list_lock spinlock before calling our .set_rx_mode.  We
2006  * cannot sleep in this context.
2007  */
2008 static void idpf_set_rx_mode(struct net_device *netdev)
2009 {
2010 	struct idpf_netdev_priv *np = netdev_priv(netdev);
2011 	struct idpf_vport_user_config_data *config_data;
2012 	struct idpf_adapter *adapter;
2013 	bool changed = false;
2014 	struct device *dev;
2015 	int err;
2016 
2017 	adapter = np->adapter;
2018 	dev = &adapter->pdev->dev;
2019 
2020 	if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER)) {
2021 		__dev_uc_sync(netdev, idpf_addr_sync, idpf_addr_unsync);
2022 		__dev_mc_sync(netdev, idpf_addr_sync, idpf_addr_unsync);
2023 	}
2024 
2025 	if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PROMISC))
2026 		return;
2027 
2028 	config_data = &adapter->vport_config[np->vport_idx]->user_config;
2029 	/* IFF_PROMISC enables both unicast and multicast promiscuous,
2030 	 * while IFF_ALLMULTI only enables multicast such that:
2031 	 *
2032 	 * promisc  + allmulti		= unicast | multicast
2033 	 * promisc  + !allmulti		= unicast | multicast
2034 	 * !promisc + allmulti		= multicast
2035 	 */
2036 	if ((netdev->flags & IFF_PROMISC) &&
2037 	    !test_and_set_bit(__IDPF_PROMISC_UC, config_data->user_flags)) {
2038 		changed = true;
2039 		dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
2040 		if (!test_and_set_bit(__IDPF_PROMISC_MC, adapter->flags))
2041 			dev_info(dev, "Entering multicast promiscuous mode\n");
2042 	}
2043 
2044 	if (!(netdev->flags & IFF_PROMISC) &&
2045 	    test_and_clear_bit(__IDPF_PROMISC_UC, config_data->user_flags)) {
2046 		changed = true;
2047 		dev_info(dev, "Leaving promiscuous mode\n");
2048 	}
2049 
2050 	if (netdev->flags & IFF_ALLMULTI &&
2051 	    !test_and_set_bit(__IDPF_PROMISC_MC, config_data->user_flags)) {
2052 		changed = true;
2053 		dev_info(dev, "Entering multicast promiscuous mode\n");
2054 	}
2055 
2056 	if (!(netdev->flags & (IFF_ALLMULTI | IFF_PROMISC)) &&
2057 	    test_and_clear_bit(__IDPF_PROMISC_MC, config_data->user_flags)) {
2058 		changed = true;
2059 		dev_info(dev, "Leaving multicast promiscuous mode\n");
2060 	}
2061 
2062 	if (!changed)
2063 		return;
2064 
2065 	err = idpf_set_promiscuous(adapter, config_data, np->vport_id);
2066 	if (err)
2067 		dev_err(dev, "Failed to set promiscuous mode: %d\n", err);
2068 }
2069 
2070 /**
2071  * idpf_vport_manage_rss_lut - disable/enable RSS
2072  * @vport: the vport being changed
2073  *
2074  * In the event of disable request for RSS, this function will zero out RSS
2075  * LUT, while in the event of enable request for RSS, it will reconfigure RSS
2076  * LUT with the default LUT configuration.
2077  */
2078 static int idpf_vport_manage_rss_lut(struct idpf_vport *vport)
2079 {
2080 	bool ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH);
2081 	struct idpf_rss_data *rss_data;
2082 	u16 idx = vport->idx;
2083 	int lut_size;
2084 
2085 	rss_data = &vport->adapter->vport_config[idx]->user_config.rss_data;
2086 	lut_size = rss_data->rss_lut_size * sizeof(u32);
2087 
2088 	if (ena) {
2089 		/* This will contain the default or user configured LUT */
2090 		memcpy(rss_data->rss_lut, rss_data->cached_lut, lut_size);
2091 	} else {
2092 		/* Save a copy of the current LUT to be restored later if
2093 		 * requested.
2094 		 */
2095 		memcpy(rss_data->cached_lut, rss_data->rss_lut, lut_size);
2096 
2097 		/* Zero out the current LUT to disable */
2098 		memset(rss_data->rss_lut, 0, lut_size);
2099 	}
2100 
2101 	return idpf_config_rss(vport);
2102 }
2103 
2104 /**
2105  * idpf_set_features - set the netdev feature flags
2106  * @netdev: ptr to the netdev being adjusted
2107  * @features: the feature set that the stack is suggesting
2108  */
2109 static int idpf_set_features(struct net_device *netdev,
2110 			     netdev_features_t features)
2111 {
2112 	netdev_features_t changed = netdev->features ^ features;
2113 	struct idpf_adapter *adapter;
2114 	struct idpf_vport *vport;
2115 	int err = 0;
2116 
2117 	idpf_vport_ctrl_lock(netdev);
2118 	vport = idpf_netdev_to_vport(netdev);
2119 
2120 	adapter = vport->adapter;
2121 
2122 	if (idpf_is_reset_in_prog(adapter)) {
2123 		dev_err(&adapter->pdev->dev, "Device is resetting, changing netdev features temporarily unavailable.\n");
2124 		err = -EBUSY;
2125 		goto unlock_mutex;
2126 	}
2127 
2128 	if (changed & NETIF_F_RXHASH) {
2129 		netdev->features ^= NETIF_F_RXHASH;
2130 		err = idpf_vport_manage_rss_lut(vport);
2131 		if (err)
2132 			goto unlock_mutex;
2133 	}
2134 
2135 	if (changed & NETIF_F_GRO_HW) {
2136 		netdev->features ^= NETIF_F_GRO_HW;
2137 		err = idpf_initiate_soft_reset(vport, IDPF_SR_RSC_CHANGE);
2138 		if (err)
2139 			goto unlock_mutex;
2140 	}
2141 
2142 	if (changed & NETIF_F_LOOPBACK) {
2143 		netdev->features ^= NETIF_F_LOOPBACK;
2144 		err = idpf_send_ena_dis_loopback_msg(vport);
2145 	}
2146 
2147 unlock_mutex:
2148 	idpf_vport_ctrl_unlock(netdev);
2149 
2150 	return err;
2151 }
2152 
2153 /**
2154  * idpf_open - Called when a network interface becomes active
2155  * @netdev: network interface device structure
2156  *
2157  * The open entry point is called when a network interface is made
2158  * active by the system (IFF_UP).  At this point all resources needed
2159  * for transmit and receive operations are allocated, the interrupt
2160  * handler is registered with the OS, the netdev watchdog is enabled,
2161  * and the stack is notified that the interface is ready.
2162  *
2163  * Returns 0 on success, negative value on failure
2164  */
2165 static int idpf_open(struct net_device *netdev)
2166 {
2167 	struct idpf_vport *vport;
2168 	int err;
2169 
2170 	idpf_vport_ctrl_lock(netdev);
2171 	vport = idpf_netdev_to_vport(netdev);
2172 
2173 	err = idpf_vport_open(vport, true);
2174 
2175 	idpf_vport_ctrl_unlock(netdev);
2176 
2177 	return err;
2178 }
2179 
2180 /**
2181  * idpf_change_mtu - NDO callback to change the MTU
2182  * @netdev: network interface device structure
2183  * @new_mtu: new value for maximum frame size
2184  *
2185  * Returns 0 on success, negative on failure
2186  */
2187 static int idpf_change_mtu(struct net_device *netdev, int new_mtu)
2188 {
2189 	struct idpf_vport *vport;
2190 	int err;
2191 
2192 	idpf_vport_ctrl_lock(netdev);
2193 	vport = idpf_netdev_to_vport(netdev);
2194 
2195 	netdev->mtu = new_mtu;
2196 
2197 	err = idpf_initiate_soft_reset(vport, IDPF_SR_MTU_CHANGE);
2198 
2199 	idpf_vport_ctrl_unlock(netdev);
2200 
2201 	return err;
2202 }
2203 
2204 /**
2205  * idpf_features_check - Validate packet conforms to limits
2206  * @skb: skb buffer
2207  * @netdev: This port's netdev
2208  * @features: Offload features that the stack believes apply
2209  */
2210 static netdev_features_t idpf_features_check(struct sk_buff *skb,
2211 					     struct net_device *netdev,
2212 					     netdev_features_t features)
2213 {
2214 	struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
2215 	struct idpf_adapter *adapter = vport->adapter;
2216 	size_t len;
2217 
2218 	/* No point in doing any of this if neither checksum nor GSO are
2219 	 * being requested for this frame.  We can rule out both by just
2220 	 * checking for CHECKSUM_PARTIAL
2221 	 */
2222 	if (skb->ip_summed != CHECKSUM_PARTIAL)
2223 		return features;
2224 
2225 	/* We cannot support GSO if the MSS is going to be less than
2226 	 * 88 bytes. If it is then we need to drop support for GSO.
2227 	 */
2228 	if (skb_is_gso(skb) &&
2229 	    (skb_shinfo(skb)->gso_size < IDPF_TX_TSO_MIN_MSS))
2230 		features &= ~NETIF_F_GSO_MASK;
2231 
2232 	/* Ensure MACLEN is <= 126 bytes (63 words) and not an odd size */
2233 	len = skb_network_offset(skb);
2234 	if (unlikely(len & ~(126)))
2235 		goto unsupported;
2236 
2237 	len = skb_network_header_len(skb);
2238 	if (unlikely(len > idpf_get_max_tx_hdr_size(adapter)))
2239 		goto unsupported;
2240 
2241 	if (!skb->encapsulation)
2242 		return features;
2243 
2244 	/* L4TUNLEN can support 127 words */
2245 	len = skb_inner_network_header(skb) - skb_transport_header(skb);
2246 	if (unlikely(len & ~(127 * 2)))
2247 		goto unsupported;
2248 
2249 	/* IPLEN can support at most 127 dwords */
2250 	len = skb_inner_network_header_len(skb);
2251 	if (unlikely(len > idpf_get_max_tx_hdr_size(adapter)))
2252 		goto unsupported;
2253 
2254 	/* No need to validate L4LEN as TCP is the only protocol with a
2255 	 * a flexible value and we support all possible values supported
2256 	 * by TCP, which is at most 15 dwords
2257 	 */
2258 
2259 	return features;
2260 
2261 unsupported:
2262 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2263 }
2264 
2265 /**
2266  * idpf_set_mac - NDO callback to set port mac address
2267  * @netdev: network interface device structure
2268  * @p: pointer to an address structure
2269  *
2270  * Returns 0 on success, negative on failure
2271  **/
2272 static int idpf_set_mac(struct net_device *netdev, void *p)
2273 {
2274 	struct idpf_netdev_priv *np = netdev_priv(netdev);
2275 	struct idpf_vport_config *vport_config;
2276 	struct sockaddr *addr = p;
2277 	struct idpf_vport *vport;
2278 	int err = 0;
2279 
2280 	idpf_vport_ctrl_lock(netdev);
2281 	vport = idpf_netdev_to_vport(netdev);
2282 
2283 	if (!idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
2284 			     VIRTCHNL2_CAP_MACFILTER)) {
2285 		dev_info(&vport->adapter->pdev->dev, "Setting MAC address is not supported\n");
2286 		err = -EOPNOTSUPP;
2287 		goto unlock_mutex;
2288 	}
2289 
2290 	if (!is_valid_ether_addr(addr->sa_data)) {
2291 		dev_info(&vport->adapter->pdev->dev, "Invalid MAC address: %pM\n",
2292 			 addr->sa_data);
2293 		err = -EADDRNOTAVAIL;
2294 		goto unlock_mutex;
2295 	}
2296 
2297 	if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
2298 		goto unlock_mutex;
2299 
2300 	vport_config = vport->adapter->vport_config[vport->idx];
2301 	err = idpf_add_mac_filter(vport, np, addr->sa_data, false);
2302 	if (err) {
2303 		__idpf_del_mac_filter(vport_config, addr->sa_data);
2304 		goto unlock_mutex;
2305 	}
2306 
2307 	if (is_valid_ether_addr(vport->default_mac_addr))
2308 		idpf_del_mac_filter(vport, np, vport->default_mac_addr, false);
2309 
2310 	ether_addr_copy(vport->default_mac_addr, addr->sa_data);
2311 	eth_hw_addr_set(netdev, addr->sa_data);
2312 
2313 unlock_mutex:
2314 	idpf_vport_ctrl_unlock(netdev);
2315 
2316 	return err;
2317 }
2318 
2319 /**
2320  * idpf_alloc_dma_mem - Allocate dma memory
2321  * @hw: pointer to hw struct
2322  * @mem: pointer to dma_mem struct
2323  * @size: size of the memory to allocate
2324  */
2325 void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size)
2326 {
2327 	struct idpf_adapter *adapter = hw->back;
2328 	size_t sz = ALIGN(size, 4096);
2329 
2330 	mem->va = dma_alloc_coherent(&adapter->pdev->dev, sz,
2331 				     &mem->pa, GFP_KERNEL);
2332 	mem->size = sz;
2333 
2334 	return mem->va;
2335 }
2336 
2337 /**
2338  * idpf_free_dma_mem - Free the allocated dma memory
2339  * @hw: pointer to hw struct
2340  * @mem: pointer to dma_mem struct
2341  */
2342 void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem)
2343 {
2344 	struct idpf_adapter *adapter = hw->back;
2345 
2346 	dma_free_coherent(&adapter->pdev->dev, mem->size,
2347 			  mem->va, mem->pa);
2348 	mem->size = 0;
2349 	mem->va = NULL;
2350 	mem->pa = 0;
2351 }
2352 
2353 static const struct net_device_ops idpf_netdev_ops_splitq = {
2354 	.ndo_open = idpf_open,
2355 	.ndo_stop = idpf_stop,
2356 	.ndo_start_xmit = idpf_tx_splitq_start,
2357 	.ndo_features_check = idpf_features_check,
2358 	.ndo_set_rx_mode = idpf_set_rx_mode,
2359 	.ndo_validate_addr = eth_validate_addr,
2360 	.ndo_set_mac_address = idpf_set_mac,
2361 	.ndo_change_mtu = idpf_change_mtu,
2362 	.ndo_get_stats64 = idpf_get_stats64,
2363 	.ndo_set_features = idpf_set_features,
2364 	.ndo_tx_timeout = idpf_tx_timeout,
2365 };
2366 
2367 static const struct net_device_ops idpf_netdev_ops_singleq = {
2368 	.ndo_open = idpf_open,
2369 	.ndo_stop = idpf_stop,
2370 	.ndo_start_xmit = idpf_tx_singleq_start,
2371 	.ndo_features_check = idpf_features_check,
2372 	.ndo_set_rx_mode = idpf_set_rx_mode,
2373 	.ndo_validate_addr = eth_validate_addr,
2374 	.ndo_set_mac_address = idpf_set_mac,
2375 	.ndo_change_mtu = idpf_change_mtu,
2376 	.ndo_get_stats64 = idpf_get_stats64,
2377 	.ndo_set_features = idpf_set_features,
2378 	.ndo_tx_timeout = idpf_tx_timeout,
2379 };
2380