xref: /linux/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c (revision 2c7e4a2663a1ab5a740c59c31991579b6b865a26)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #include <net/libeth/rx.h>
5 
6 #include "idpf.h"
7 #include "idpf_virtchnl.h"
8 #include "idpf_ptp.h"
9 
10 /**
11  * struct idpf_vc_xn_manager - Manager for tracking transactions
12  * @ring: backing and lookup for transactions
13  * @free_xn_bm: bitmap for free transactions
14  * @xn_bm_lock: make bitmap access synchronous where necessary
15  * @salt: used to make cookie unique every message
16  */
17 struct idpf_vc_xn_manager {
18 	struct idpf_vc_xn ring[IDPF_VC_XN_RING_LEN];
19 	DECLARE_BITMAP(free_xn_bm, IDPF_VC_XN_RING_LEN);
20 	spinlock_t xn_bm_lock;
21 	u8 salt;
22 };
23 
24 /**
25  * idpf_vid_to_vport - Translate vport id to vport pointer
26  * @adapter: private data struct
27  * @v_id: vport id to translate
28  *
29  * Returns vport matching v_id, NULL if not found.
30  */
31 static
idpf_vid_to_vport(struct idpf_adapter * adapter,u32 v_id)32 struct idpf_vport *idpf_vid_to_vport(struct idpf_adapter *adapter, u32 v_id)
33 {
34 	u16 num_max_vports = idpf_get_max_vports(adapter);
35 	int i;
36 
37 	for (i = 0; i < num_max_vports; i++)
38 		if (adapter->vport_ids[i] == v_id)
39 			return adapter->vports[i];
40 
41 	return NULL;
42 }
43 
44 /**
45  * idpf_handle_event_link - Handle link event message
46  * @adapter: private data struct
47  * @v2e: virtchnl event message
48  */
idpf_handle_event_link(struct idpf_adapter * adapter,const struct virtchnl2_event * v2e)49 static void idpf_handle_event_link(struct idpf_adapter *adapter,
50 				   const struct virtchnl2_event *v2e)
51 {
52 	struct idpf_netdev_priv *np;
53 	struct idpf_vport *vport;
54 
55 	vport = idpf_vid_to_vport(adapter, le32_to_cpu(v2e->vport_id));
56 	if (!vport) {
57 		dev_err_ratelimited(&adapter->pdev->dev, "Failed to find vport_id %d for link event\n",
58 				    v2e->vport_id);
59 		return;
60 	}
61 	np = netdev_priv(vport->netdev);
62 
63 	np->link_speed_mbps = le32_to_cpu(v2e->link_speed);
64 
65 	if (vport->link_up == v2e->link_status)
66 		return;
67 
68 	vport->link_up = v2e->link_status;
69 
70 	if (np->state != __IDPF_VPORT_UP)
71 		return;
72 
73 	if (vport->link_up) {
74 		netif_tx_start_all_queues(vport->netdev);
75 		netif_carrier_on(vport->netdev);
76 	} else {
77 		netif_tx_stop_all_queues(vport->netdev);
78 		netif_carrier_off(vport->netdev);
79 	}
80 }
81 
82 /**
83  * idpf_recv_event_msg - Receive virtchnl event message
84  * @adapter: Driver specific private structure
85  * @ctlq_msg: message to copy from
86  *
87  * Receive virtchnl event message
88  */
idpf_recv_event_msg(struct idpf_adapter * adapter,struct idpf_ctlq_msg * ctlq_msg)89 static void idpf_recv_event_msg(struct idpf_adapter *adapter,
90 				struct idpf_ctlq_msg *ctlq_msg)
91 {
92 	int payload_size = ctlq_msg->ctx.indirect.payload->size;
93 	struct virtchnl2_event *v2e;
94 	u32 event;
95 
96 	if (payload_size < sizeof(*v2e)) {
97 		dev_err_ratelimited(&adapter->pdev->dev, "Failed to receive valid payload for event msg (op %d len %d)\n",
98 				    ctlq_msg->cookie.mbx.chnl_opcode,
99 				    payload_size);
100 		return;
101 	}
102 
103 	v2e = (struct virtchnl2_event *)ctlq_msg->ctx.indirect.payload->va;
104 	event = le32_to_cpu(v2e->event);
105 
106 	switch (event) {
107 	case VIRTCHNL2_EVENT_LINK_CHANGE:
108 		idpf_handle_event_link(adapter, v2e);
109 		return;
110 	default:
111 		dev_err(&adapter->pdev->dev,
112 			"Unknown event %d from PF\n", event);
113 		break;
114 	}
115 }
116 
117 /**
118  * idpf_mb_clean - Reclaim the send mailbox queue entries
119  * @adapter: Driver specific private structure
120  *
121  * Reclaim the send mailbox queue entries to be used to send further messages
122  *
123  * Returns 0 on success, negative on failure
124  */
idpf_mb_clean(struct idpf_adapter * adapter)125 static int idpf_mb_clean(struct idpf_adapter *adapter)
126 {
127 	u16 i, num_q_msg = IDPF_DFLT_MBX_Q_LEN;
128 	struct idpf_ctlq_msg **q_msg;
129 	struct idpf_dma_mem *dma_mem;
130 	int err;
131 
132 	q_msg = kcalloc(num_q_msg, sizeof(struct idpf_ctlq_msg *), GFP_ATOMIC);
133 	if (!q_msg)
134 		return -ENOMEM;
135 
136 	err = idpf_ctlq_clean_sq(adapter->hw.asq, &num_q_msg, q_msg);
137 	if (err)
138 		goto err_kfree;
139 
140 	for (i = 0; i < num_q_msg; i++) {
141 		if (!q_msg[i])
142 			continue;
143 		dma_mem = q_msg[i]->ctx.indirect.payload;
144 		if (dma_mem)
145 			dma_free_coherent(&adapter->pdev->dev, dma_mem->size,
146 					  dma_mem->va, dma_mem->pa);
147 		kfree(q_msg[i]);
148 		kfree(dma_mem);
149 	}
150 
151 err_kfree:
152 	kfree(q_msg);
153 
154 	return err;
155 }
156 
157 #if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
158 /**
159  * idpf_ptp_is_mb_msg - Check if the message is PTP-related
160  * @op: virtchnl opcode
161  *
162  * Return: true if msg is PTP-related, false otherwise.
163  */
idpf_ptp_is_mb_msg(u32 op)164 static bool idpf_ptp_is_mb_msg(u32 op)
165 {
166 	switch (op) {
167 	case VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME:
168 	case VIRTCHNL2_OP_PTP_GET_CROSS_TIME:
169 	case VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME:
170 	case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE:
171 	case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME:
172 	case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS:
173 	case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP:
174 		return true;
175 	default:
176 		return false;
177 	}
178 }
179 
180 /**
181  * idpf_prepare_ptp_mb_msg - Prepare PTP related message
182  *
183  * @adapter: Driver specific private structure
184  * @op: virtchnl opcode
185  * @ctlq_msg: Corresponding control queue message
186  */
idpf_prepare_ptp_mb_msg(struct idpf_adapter * adapter,u32 op,struct idpf_ctlq_msg * ctlq_msg)187 static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op,
188 				    struct idpf_ctlq_msg *ctlq_msg)
189 {
190 	/* If the message is PTP-related and the secondary mailbox is available,
191 	 * send the message through the secondary mailbox.
192 	 */
193 	if (!idpf_ptp_is_mb_msg(op) || !adapter->ptp->secondary_mbx.valid)
194 		return;
195 
196 	ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_peer_drv;
197 	ctlq_msg->func_id = adapter->ptp->secondary_mbx.peer_mbx_q_id;
198 	ctlq_msg->host_id = adapter->ptp->secondary_mbx.peer_id;
199 }
200 #else /* !CONFIG_PTP_1588_CLOCK */
idpf_prepare_ptp_mb_msg(struct idpf_adapter * adapter,u32 op,struct idpf_ctlq_msg * ctlq_msg)201 static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op,
202 				    struct idpf_ctlq_msg *ctlq_msg)
203 { }
204 #endif /* CONFIG_PTP_1588_CLOCK */
205 
206 /**
207  * idpf_send_mb_msg - Send message over mailbox
208  * @adapter: Driver specific private structure
209  * @op: virtchnl opcode
210  * @msg_size: size of the payload
211  * @msg: pointer to buffer holding the payload
212  * @cookie: unique SW generated cookie per message
213  *
214  * Will prepare the control queue message and initiates the send api
215  *
216  * Returns 0 on success, negative on failure
217  */
idpf_send_mb_msg(struct idpf_adapter * adapter,u32 op,u16 msg_size,u8 * msg,u16 cookie)218 int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
219 		     u16 msg_size, u8 *msg, u16 cookie)
220 {
221 	struct idpf_ctlq_msg *ctlq_msg;
222 	struct idpf_dma_mem *dma_mem;
223 	int err;
224 
225 	/* If we are here and a reset is detected nothing much can be
226 	 * done. This thread should silently abort and expected to
227 	 * be corrected with a new run either by user or driver
228 	 * flows after reset
229 	 */
230 	if (idpf_is_reset_detected(adapter))
231 		return 0;
232 
233 	err = idpf_mb_clean(adapter);
234 	if (err)
235 		return err;
236 
237 	ctlq_msg = kzalloc(sizeof(*ctlq_msg), GFP_ATOMIC);
238 	if (!ctlq_msg)
239 		return -ENOMEM;
240 
241 	dma_mem = kzalloc(sizeof(*dma_mem), GFP_ATOMIC);
242 	if (!dma_mem) {
243 		err = -ENOMEM;
244 		goto dma_mem_error;
245 	}
246 
247 	ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_cp;
248 	ctlq_msg->func_id = 0;
249 
250 	idpf_prepare_ptp_mb_msg(adapter, op, ctlq_msg);
251 
252 	ctlq_msg->data_len = msg_size;
253 	ctlq_msg->cookie.mbx.chnl_opcode = op;
254 	ctlq_msg->cookie.mbx.chnl_retval = 0;
255 	dma_mem->size = IDPF_CTLQ_MAX_BUF_LEN;
256 	dma_mem->va = dma_alloc_coherent(&adapter->pdev->dev, dma_mem->size,
257 					 &dma_mem->pa, GFP_ATOMIC);
258 	if (!dma_mem->va) {
259 		err = -ENOMEM;
260 		goto dma_alloc_error;
261 	}
262 
263 	/* It's possible we're just sending an opcode but no buffer */
264 	if (msg && msg_size)
265 		memcpy(dma_mem->va, msg, msg_size);
266 	ctlq_msg->ctx.indirect.payload = dma_mem;
267 	ctlq_msg->ctx.sw_cookie.data = cookie;
268 
269 	err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg);
270 	if (err)
271 		goto send_error;
272 
273 	return 0;
274 
275 send_error:
276 	dma_free_coherent(&adapter->pdev->dev, dma_mem->size, dma_mem->va,
277 			  dma_mem->pa);
278 dma_alloc_error:
279 	kfree(dma_mem);
280 dma_mem_error:
281 	kfree(ctlq_msg);
282 
283 	return err;
284 }
285 
286 /* API for virtchnl "transaction" support ("xn" for short).
287  *
288  * We are reusing the completion lock to serialize the accesses to the
289  * transaction state for simplicity, but it could be its own separate synchro
290  * as well. For now, this API is only used from within a workqueue context;
291  * raw_spin_lock() is enough.
292  */
293 /**
294  * idpf_vc_xn_lock - Request exclusive access to vc transaction
295  * @xn: struct idpf_vc_xn* to access
296  */
297 #define idpf_vc_xn_lock(xn)			\
298 	raw_spin_lock(&(xn)->completed.wait.lock)
299 
300 /**
301  * idpf_vc_xn_unlock - Release exclusive access to vc transaction
302  * @xn: struct idpf_vc_xn* to access
303  */
304 #define idpf_vc_xn_unlock(xn)		\
305 	raw_spin_unlock(&(xn)->completed.wait.lock)
306 
307 /**
308  * idpf_vc_xn_release_bufs - Release reference to reply buffer(s) and
309  * reset the transaction state.
310  * @xn: struct idpf_vc_xn to update
311  */
idpf_vc_xn_release_bufs(struct idpf_vc_xn * xn)312 static void idpf_vc_xn_release_bufs(struct idpf_vc_xn *xn)
313 {
314 	xn->reply.iov_base = NULL;
315 	xn->reply.iov_len = 0;
316 
317 	if (xn->state != IDPF_VC_XN_SHUTDOWN)
318 		xn->state = IDPF_VC_XN_IDLE;
319 }
320 
321 /**
322  * idpf_vc_xn_init - Initialize virtchnl transaction object
323  * @vcxn_mngr: pointer to vc transaction manager struct
324  */
idpf_vc_xn_init(struct idpf_vc_xn_manager * vcxn_mngr)325 static void idpf_vc_xn_init(struct idpf_vc_xn_manager *vcxn_mngr)
326 {
327 	int i;
328 
329 	spin_lock_init(&vcxn_mngr->xn_bm_lock);
330 
331 	for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) {
332 		struct idpf_vc_xn *xn = &vcxn_mngr->ring[i];
333 
334 		xn->state = IDPF_VC_XN_IDLE;
335 		xn->idx = i;
336 		idpf_vc_xn_release_bufs(xn);
337 		init_completion(&xn->completed);
338 	}
339 
340 	bitmap_fill(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
341 }
342 
343 /**
344  * idpf_vc_xn_shutdown - Uninitialize virtchnl transaction object
345  * @vcxn_mngr: pointer to vc transaction manager struct
346  *
347  * All waiting threads will be woken-up and their transaction aborted. Further
348  * operations on that object will fail.
349  */
idpf_vc_xn_shutdown(struct idpf_vc_xn_manager * vcxn_mngr)350 void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr)
351 {
352 	int i;
353 
354 	spin_lock_bh(&vcxn_mngr->xn_bm_lock);
355 	bitmap_zero(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
356 	spin_unlock_bh(&vcxn_mngr->xn_bm_lock);
357 
358 	for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) {
359 		struct idpf_vc_xn *xn = &vcxn_mngr->ring[i];
360 
361 		idpf_vc_xn_lock(xn);
362 		xn->state = IDPF_VC_XN_SHUTDOWN;
363 		idpf_vc_xn_release_bufs(xn);
364 		idpf_vc_xn_unlock(xn);
365 		complete_all(&xn->completed);
366 	}
367 }
368 
369 /**
370  * idpf_vc_xn_pop_free - Pop a free transaction from free list
371  * @vcxn_mngr: transaction manager to pop from
372  *
373  * Returns NULL if no free transactions
374  */
375 static
idpf_vc_xn_pop_free(struct idpf_vc_xn_manager * vcxn_mngr)376 struct idpf_vc_xn *idpf_vc_xn_pop_free(struct idpf_vc_xn_manager *vcxn_mngr)
377 {
378 	struct idpf_vc_xn *xn = NULL;
379 	unsigned long free_idx;
380 
381 	spin_lock_bh(&vcxn_mngr->xn_bm_lock);
382 	free_idx = find_first_bit(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
383 	if (free_idx == IDPF_VC_XN_RING_LEN)
384 		goto do_unlock;
385 
386 	clear_bit(free_idx, vcxn_mngr->free_xn_bm);
387 	xn = &vcxn_mngr->ring[free_idx];
388 	xn->salt = vcxn_mngr->salt++;
389 
390 do_unlock:
391 	spin_unlock_bh(&vcxn_mngr->xn_bm_lock);
392 
393 	return xn;
394 }
395 
396 /**
397  * idpf_vc_xn_push_free - Push a free transaction to free list
398  * @vcxn_mngr: transaction manager to push to
399  * @xn: transaction to push
400  */
idpf_vc_xn_push_free(struct idpf_vc_xn_manager * vcxn_mngr,struct idpf_vc_xn * xn)401 static void idpf_vc_xn_push_free(struct idpf_vc_xn_manager *vcxn_mngr,
402 				 struct idpf_vc_xn *xn)
403 {
404 	idpf_vc_xn_release_bufs(xn);
405 	set_bit(xn->idx, vcxn_mngr->free_xn_bm);
406 }
407 
408 /**
409  * idpf_vc_xn_exec - Perform a send/recv virtchnl transaction
410  * @adapter: driver specific private structure with vcxn_mngr
411  * @params: parameters for this particular transaction including
412  *   -vc_op: virtchannel operation to send
413  *   -send_buf: kvec iov for send buf and len
414  *   -recv_buf: kvec iov for recv buf and len (ignored if NULL)
415  *   -timeout_ms: timeout waiting for a reply (milliseconds)
416  *   -async: don't wait for message reply, will lose caller context
417  *   -async_handler: callback to handle async replies
418  *
419  * @returns >= 0 for success, the size of the initial reply (may or may not be
420  * >= @recv_buf.iov_len, but we never overflow @@recv_buf_iov_base). < 0 for
421  * error.
422  */
idpf_vc_xn_exec(struct idpf_adapter * adapter,const struct idpf_vc_xn_params * params)423 ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
424 			const struct idpf_vc_xn_params *params)
425 {
426 	const struct kvec *send_buf = &params->send_buf;
427 	struct idpf_vc_xn *xn;
428 	ssize_t retval;
429 	u16 cookie;
430 
431 	xn = idpf_vc_xn_pop_free(adapter->vcxn_mngr);
432 	/* no free transactions available */
433 	if (!xn)
434 		return -ENOSPC;
435 
436 	idpf_vc_xn_lock(xn);
437 	if (xn->state == IDPF_VC_XN_SHUTDOWN) {
438 		retval = -ENXIO;
439 		goto only_unlock;
440 	} else if (xn->state != IDPF_VC_XN_IDLE) {
441 		/* We're just going to clobber this transaction even though
442 		 * it's not IDLE. If we don't reuse it we could theoretically
443 		 * eventually leak all the free transactions and not be able to
444 		 * send any messages. At least this way we make an attempt to
445 		 * remain functional even though something really bad is
446 		 * happening that's corrupting what was supposed to be free
447 		 * transactions.
448 		 */
449 		WARN_ONCE(1, "There should only be idle transactions in free list (idx %d op %d)\n",
450 			  xn->idx, xn->vc_op);
451 	}
452 
453 	xn->reply = params->recv_buf;
454 	xn->reply_sz = 0;
455 	xn->state = params->async ? IDPF_VC_XN_ASYNC : IDPF_VC_XN_WAITING;
456 	xn->vc_op = params->vc_op;
457 	xn->async_handler = params->async_handler;
458 	idpf_vc_xn_unlock(xn);
459 
460 	if (!params->async)
461 		reinit_completion(&xn->completed);
462 	cookie = FIELD_PREP(IDPF_VC_XN_SALT_M, xn->salt) |
463 		 FIELD_PREP(IDPF_VC_XN_IDX_M, xn->idx);
464 
465 	retval = idpf_send_mb_msg(adapter, params->vc_op,
466 				  send_buf->iov_len, send_buf->iov_base,
467 				  cookie);
468 	if (retval) {
469 		idpf_vc_xn_lock(xn);
470 		goto release_and_unlock;
471 	}
472 
473 	if (params->async)
474 		return 0;
475 
476 	wait_for_completion_timeout(&xn->completed,
477 				    msecs_to_jiffies(params->timeout_ms));
478 
479 	/* No need to check the return value; we check the final state of the
480 	 * transaction below. It's possible the transaction actually gets more
481 	 * timeout than specified if we get preempted here but after
482 	 * wait_for_completion_timeout returns. This should be non-issue
483 	 * however.
484 	 */
485 	idpf_vc_xn_lock(xn);
486 	switch (xn->state) {
487 	case IDPF_VC_XN_SHUTDOWN:
488 		retval = -ENXIO;
489 		goto only_unlock;
490 	case IDPF_VC_XN_WAITING:
491 		dev_notice_ratelimited(&adapter->pdev->dev,
492 				       "Transaction timed-out (op:%d cookie:%04x vc_op:%d salt:%02x timeout:%dms)\n",
493 				       params->vc_op, cookie, xn->vc_op,
494 				       xn->salt, params->timeout_ms);
495 		retval = -ETIME;
496 		break;
497 	case IDPF_VC_XN_COMPLETED_SUCCESS:
498 		retval = xn->reply_sz;
499 		break;
500 	case IDPF_VC_XN_COMPLETED_FAILED:
501 		dev_notice_ratelimited(&adapter->pdev->dev, "Transaction failed (op %d)\n",
502 				       params->vc_op);
503 		retval = -EIO;
504 		break;
505 	default:
506 		/* Invalid state. */
507 		WARN_ON_ONCE(1);
508 		retval = -EIO;
509 		break;
510 	}
511 
512 release_and_unlock:
513 	idpf_vc_xn_push_free(adapter->vcxn_mngr, xn);
514 	/* If we receive a VC reply after here, it will be dropped. */
515 only_unlock:
516 	idpf_vc_xn_unlock(xn);
517 
518 	return retval;
519 }
520 
521 /**
522  * idpf_vc_xn_forward_async - Handle async reply receives
523  * @adapter: private data struct
524  * @xn: transaction to handle
525  * @ctlq_msg: corresponding ctlq_msg
526  *
527  * For async sends we're going to lose the caller's context so, if an
528  * async_handler was provided, it can deal with the reply, otherwise we'll just
529  * check and report if there is an error.
530  */
531 static int
idpf_vc_xn_forward_async(struct idpf_adapter * adapter,struct idpf_vc_xn * xn,const struct idpf_ctlq_msg * ctlq_msg)532 idpf_vc_xn_forward_async(struct idpf_adapter *adapter, struct idpf_vc_xn *xn,
533 			 const struct idpf_ctlq_msg *ctlq_msg)
534 {
535 	int err = 0;
536 
537 	if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) {
538 		dev_err_ratelimited(&adapter->pdev->dev, "Async message opcode does not match transaction opcode (msg: %d) (xn: %d)\n",
539 				    ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op);
540 		xn->reply_sz = 0;
541 		err = -EINVAL;
542 		goto release_bufs;
543 	}
544 
545 	if (xn->async_handler) {
546 		err = xn->async_handler(adapter, xn, ctlq_msg);
547 		goto release_bufs;
548 	}
549 
550 	if (ctlq_msg->cookie.mbx.chnl_retval) {
551 		xn->reply_sz = 0;
552 		dev_err_ratelimited(&adapter->pdev->dev, "Async message failure (op %d)\n",
553 				    ctlq_msg->cookie.mbx.chnl_opcode);
554 		err = -EINVAL;
555 	}
556 
557 release_bufs:
558 	idpf_vc_xn_push_free(adapter->vcxn_mngr, xn);
559 
560 	return err;
561 }
562 
563 /**
564  * idpf_vc_xn_forward_reply - copy a reply back to receiving thread
565  * @adapter: driver specific private structure with vcxn_mngr
566  * @ctlq_msg: controlq message to send back to receiving thread
567  */
568 static int
idpf_vc_xn_forward_reply(struct idpf_adapter * adapter,const struct idpf_ctlq_msg * ctlq_msg)569 idpf_vc_xn_forward_reply(struct idpf_adapter *adapter,
570 			 const struct idpf_ctlq_msg *ctlq_msg)
571 {
572 	const void *payload = NULL;
573 	size_t payload_size = 0;
574 	struct idpf_vc_xn *xn;
575 	u16 msg_info;
576 	int err = 0;
577 	u16 xn_idx;
578 	u16 salt;
579 
580 	msg_info = ctlq_msg->ctx.sw_cookie.data;
581 	xn_idx = FIELD_GET(IDPF_VC_XN_IDX_M, msg_info);
582 	if (xn_idx >= ARRAY_SIZE(adapter->vcxn_mngr->ring)) {
583 		dev_err_ratelimited(&adapter->pdev->dev, "Out of bounds cookie received: %02x\n",
584 				    xn_idx);
585 		return -EINVAL;
586 	}
587 	xn = &adapter->vcxn_mngr->ring[xn_idx];
588 	idpf_vc_xn_lock(xn);
589 	salt = FIELD_GET(IDPF_VC_XN_SALT_M, msg_info);
590 	if (xn->salt != salt) {
591 		dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (exp:%d@%02x(%d) != got:%d@%02x)\n",
592 				    xn->vc_op, xn->salt, xn->state,
593 				    ctlq_msg->cookie.mbx.chnl_opcode, salt);
594 		idpf_vc_xn_unlock(xn);
595 		return -EINVAL;
596 	}
597 
598 	switch (xn->state) {
599 	case IDPF_VC_XN_WAITING:
600 		/* success */
601 		break;
602 	case IDPF_VC_XN_IDLE:
603 		dev_err_ratelimited(&adapter->pdev->dev, "Unexpected or belated VC reply (op %d)\n",
604 				    ctlq_msg->cookie.mbx.chnl_opcode);
605 		err = -EINVAL;
606 		goto out_unlock;
607 	case IDPF_VC_XN_SHUTDOWN:
608 		/* ENXIO is a bit special here as the recv msg loop uses that
609 		 * know if it should stop trying to clean the ring if we lost
610 		 * the virtchnl. We need to stop playing with registers and
611 		 * yield.
612 		 */
613 		err = -ENXIO;
614 		goto out_unlock;
615 	case IDPF_VC_XN_ASYNC:
616 		err = idpf_vc_xn_forward_async(adapter, xn, ctlq_msg);
617 		idpf_vc_xn_unlock(xn);
618 		return err;
619 	default:
620 		dev_err_ratelimited(&adapter->pdev->dev, "Overwriting VC reply (op %d)\n",
621 				    ctlq_msg->cookie.mbx.chnl_opcode);
622 		err = -EBUSY;
623 		goto out_unlock;
624 	}
625 
626 	if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) {
627 		dev_err_ratelimited(&adapter->pdev->dev, "Message opcode does not match transaction opcode (msg: %d) (xn: %d)\n",
628 				    ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op);
629 		xn->reply_sz = 0;
630 		xn->state = IDPF_VC_XN_COMPLETED_FAILED;
631 		err = -EINVAL;
632 		goto out_unlock;
633 	}
634 
635 	if (ctlq_msg->cookie.mbx.chnl_retval) {
636 		xn->reply_sz = 0;
637 		xn->state = IDPF_VC_XN_COMPLETED_FAILED;
638 		err = -EINVAL;
639 		goto out_unlock;
640 	}
641 
642 	if (ctlq_msg->data_len) {
643 		payload = ctlq_msg->ctx.indirect.payload->va;
644 		payload_size = ctlq_msg->data_len;
645 	}
646 
647 	xn->reply_sz = payload_size;
648 	xn->state = IDPF_VC_XN_COMPLETED_SUCCESS;
649 
650 	if (xn->reply.iov_base && xn->reply.iov_len && payload_size)
651 		memcpy(xn->reply.iov_base, payload,
652 		       min_t(size_t, xn->reply.iov_len, payload_size));
653 
654 out_unlock:
655 	idpf_vc_xn_unlock(xn);
656 	/* we _cannot_ hold lock while calling complete */
657 	complete(&xn->completed);
658 
659 	return err;
660 }
661 
662 /**
663  * idpf_recv_mb_msg - Receive message over mailbox
664  * @adapter: Driver specific private structure
665  *
666  * Will receive control queue message and posts the receive buffer. Returns 0
667  * on success and negative on failure.
668  */
idpf_recv_mb_msg(struct idpf_adapter * adapter)669 int idpf_recv_mb_msg(struct idpf_adapter *adapter)
670 {
671 	struct idpf_ctlq_msg ctlq_msg;
672 	struct idpf_dma_mem *dma_mem;
673 	int post_err, err;
674 	u16 num_recv;
675 
676 	while (1) {
677 		/* This will get <= num_recv messages and output how many
678 		 * actually received on num_recv.
679 		 */
680 		num_recv = 1;
681 		err = idpf_ctlq_recv(adapter->hw.arq, &num_recv, &ctlq_msg);
682 		if (err || !num_recv)
683 			break;
684 
685 		if (ctlq_msg.data_len) {
686 			dma_mem = ctlq_msg.ctx.indirect.payload;
687 		} else {
688 			dma_mem = NULL;
689 			num_recv = 0;
690 		}
691 
692 		if (ctlq_msg.cookie.mbx.chnl_opcode == VIRTCHNL2_OP_EVENT)
693 			idpf_recv_event_msg(adapter, &ctlq_msg);
694 		else
695 			err = idpf_vc_xn_forward_reply(adapter, &ctlq_msg);
696 
697 		post_err = idpf_ctlq_post_rx_buffs(&adapter->hw,
698 						   adapter->hw.arq,
699 						   &num_recv, &dma_mem);
700 
701 		/* If post failed clear the only buffer we supplied */
702 		if (post_err) {
703 			if (dma_mem)
704 				dmam_free_coherent(&adapter->pdev->dev,
705 						   dma_mem->size, dma_mem->va,
706 						   dma_mem->pa);
707 			break;
708 		}
709 
710 		/* virtchnl trying to shutdown, stop cleaning */
711 		if (err == -ENXIO)
712 			break;
713 	}
714 
715 	return err;
716 }
717 
718 /**
719  * idpf_wait_for_marker_event - wait for software marker response
720  * @vport: virtual port data structure
721  *
722  * Returns 0 success, negative on failure.
723  **/
idpf_wait_for_marker_event(struct idpf_vport * vport)724 static int idpf_wait_for_marker_event(struct idpf_vport *vport)
725 {
726 	int event;
727 	int i;
728 
729 	for (i = 0; i < vport->num_txq; i++)
730 		idpf_queue_set(SW_MARKER, vport->txqs[i]);
731 
732 	event = wait_event_timeout(vport->sw_marker_wq,
733 				   test_and_clear_bit(IDPF_VPORT_SW_MARKER,
734 						      vport->flags),
735 				   msecs_to_jiffies(500));
736 
737 	for (i = 0; i < vport->num_txq; i++)
738 		idpf_queue_clear(POLL_MODE, vport->txqs[i]);
739 
740 	if (event)
741 		return 0;
742 
743 	dev_warn(&vport->adapter->pdev->dev, "Failed to receive marker packets\n");
744 
745 	return -ETIMEDOUT;
746 }
747 
748 /**
749  * idpf_send_ver_msg - send virtchnl version message
750  * @adapter: Driver specific private structure
751  *
752  * Send virtchnl version message.  Returns 0 on success, negative on failure.
753  */
idpf_send_ver_msg(struct idpf_adapter * adapter)754 static int idpf_send_ver_msg(struct idpf_adapter *adapter)
755 {
756 	struct idpf_vc_xn_params xn_params = {};
757 	struct virtchnl2_version_info vvi;
758 	ssize_t reply_sz;
759 	u32 major, minor;
760 	int err = 0;
761 
762 	if (adapter->virt_ver_maj) {
763 		vvi.major = cpu_to_le32(adapter->virt_ver_maj);
764 		vvi.minor = cpu_to_le32(adapter->virt_ver_min);
765 	} else {
766 		vvi.major = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MAJOR);
767 		vvi.minor = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MINOR);
768 	}
769 
770 	xn_params.vc_op = VIRTCHNL2_OP_VERSION;
771 	xn_params.send_buf.iov_base = &vvi;
772 	xn_params.send_buf.iov_len = sizeof(vvi);
773 	xn_params.recv_buf = xn_params.send_buf;
774 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
775 
776 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
777 	if (reply_sz < 0)
778 		return reply_sz;
779 	if (reply_sz < sizeof(vvi))
780 		return -EIO;
781 
782 	major = le32_to_cpu(vvi.major);
783 	minor = le32_to_cpu(vvi.minor);
784 
785 	if (major > IDPF_VIRTCHNL_VERSION_MAJOR) {
786 		dev_warn(&adapter->pdev->dev, "Virtchnl major version greater than supported\n");
787 		return -EINVAL;
788 	}
789 
790 	if (major == IDPF_VIRTCHNL_VERSION_MAJOR &&
791 	    minor > IDPF_VIRTCHNL_VERSION_MINOR)
792 		dev_warn(&adapter->pdev->dev, "Virtchnl minor version didn't match\n");
793 
794 	/* If we have a mismatch, resend version to update receiver on what
795 	 * version we will use.
796 	 */
797 	if (!adapter->virt_ver_maj &&
798 	    major != IDPF_VIRTCHNL_VERSION_MAJOR &&
799 	    minor != IDPF_VIRTCHNL_VERSION_MINOR)
800 		err = -EAGAIN;
801 
802 	adapter->virt_ver_maj = major;
803 	adapter->virt_ver_min = minor;
804 
805 	return err;
806 }
807 
808 /**
809  * idpf_send_get_caps_msg - Send virtchnl get capabilities message
810  * @adapter: Driver specific private structure
811  *
812  * Send virtchl get capabilities message. Returns 0 on success, negative on
813  * failure.
814  */
idpf_send_get_caps_msg(struct idpf_adapter * adapter)815 static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
816 {
817 	struct virtchnl2_get_capabilities caps = {};
818 	struct idpf_vc_xn_params xn_params = {};
819 	ssize_t reply_sz;
820 
821 	caps.csum_caps =
822 		cpu_to_le32(VIRTCHNL2_CAP_TX_CSUM_L3_IPV4	|
823 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP	|
824 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP	|
825 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP	|
826 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP	|
827 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP	|
828 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP	|
829 			    VIRTCHNL2_CAP_RX_CSUM_L3_IPV4	|
830 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP	|
831 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP	|
832 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP	|
833 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP	|
834 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP	|
835 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP	|
836 			    VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL |
837 			    VIRTCHNL2_CAP_RX_CSUM_L3_SINGLE_TUNNEL |
838 			    VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL |
839 			    VIRTCHNL2_CAP_RX_CSUM_L4_SINGLE_TUNNEL |
840 			    VIRTCHNL2_CAP_RX_CSUM_GENERIC);
841 
842 	caps.seg_caps =
843 		cpu_to_le32(VIRTCHNL2_CAP_SEG_IPV4_TCP		|
844 			    VIRTCHNL2_CAP_SEG_IPV4_UDP		|
845 			    VIRTCHNL2_CAP_SEG_IPV4_SCTP		|
846 			    VIRTCHNL2_CAP_SEG_IPV6_TCP		|
847 			    VIRTCHNL2_CAP_SEG_IPV6_UDP		|
848 			    VIRTCHNL2_CAP_SEG_IPV6_SCTP		|
849 			    VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL);
850 
851 	caps.rss_caps =
852 		cpu_to_le64(VIRTCHNL2_CAP_RSS_IPV4_TCP		|
853 			    VIRTCHNL2_CAP_RSS_IPV4_UDP		|
854 			    VIRTCHNL2_CAP_RSS_IPV4_SCTP		|
855 			    VIRTCHNL2_CAP_RSS_IPV4_OTHER	|
856 			    VIRTCHNL2_CAP_RSS_IPV6_TCP		|
857 			    VIRTCHNL2_CAP_RSS_IPV6_UDP		|
858 			    VIRTCHNL2_CAP_RSS_IPV6_SCTP		|
859 			    VIRTCHNL2_CAP_RSS_IPV6_OTHER);
860 
861 	caps.hsplit_caps =
862 		cpu_to_le32(VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4	|
863 			    VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6);
864 
865 	caps.rsc_caps =
866 		cpu_to_le32(VIRTCHNL2_CAP_RSC_IPV4_TCP		|
867 			    VIRTCHNL2_CAP_RSC_IPV6_TCP);
868 
869 	caps.other_caps =
870 		cpu_to_le64(VIRTCHNL2_CAP_SRIOV			|
871 			    VIRTCHNL2_CAP_MACFILTER		|
872 			    VIRTCHNL2_CAP_SPLITQ_QSCHED		|
873 			    VIRTCHNL2_CAP_PROMISC		|
874 			    VIRTCHNL2_CAP_LOOPBACK		|
875 			    VIRTCHNL2_CAP_PTP);
876 
877 	xn_params.vc_op = VIRTCHNL2_OP_GET_CAPS;
878 	xn_params.send_buf.iov_base = &caps;
879 	xn_params.send_buf.iov_len = sizeof(caps);
880 	xn_params.recv_buf.iov_base = &adapter->caps;
881 	xn_params.recv_buf.iov_len = sizeof(adapter->caps);
882 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
883 
884 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
885 	if (reply_sz < 0)
886 		return reply_sz;
887 	if (reply_sz < sizeof(adapter->caps))
888 		return -EIO;
889 
890 	return 0;
891 }
892 
893 /**
894  * idpf_vport_alloc_max_qs - Allocate max queues for a vport
895  * @adapter: Driver specific private structure
896  * @max_q: vport max queue structure
897  */
idpf_vport_alloc_max_qs(struct idpf_adapter * adapter,struct idpf_vport_max_q * max_q)898 int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
899 			    struct idpf_vport_max_q *max_q)
900 {
901 	struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues;
902 	struct virtchnl2_get_capabilities *caps = &adapter->caps;
903 	u16 default_vports = idpf_get_default_vports(adapter);
904 	int max_rx_q, max_tx_q;
905 
906 	mutex_lock(&adapter->queue_lock);
907 
908 	max_rx_q = le16_to_cpu(caps->max_rx_q) / default_vports;
909 	max_tx_q = le16_to_cpu(caps->max_tx_q) / default_vports;
910 	if (adapter->num_alloc_vports < default_vports) {
911 		max_q->max_rxq = min_t(u16, max_rx_q, IDPF_MAX_Q);
912 		max_q->max_txq = min_t(u16, max_tx_q, IDPF_MAX_Q);
913 	} else {
914 		max_q->max_rxq = IDPF_MIN_Q;
915 		max_q->max_txq = IDPF_MIN_Q;
916 	}
917 	max_q->max_bufq = max_q->max_rxq * IDPF_MAX_BUFQS_PER_RXQ_GRP;
918 	max_q->max_complq = max_q->max_txq;
919 
920 	if (avail_queues->avail_rxq < max_q->max_rxq ||
921 	    avail_queues->avail_txq < max_q->max_txq ||
922 	    avail_queues->avail_bufq < max_q->max_bufq ||
923 	    avail_queues->avail_complq < max_q->max_complq) {
924 		mutex_unlock(&adapter->queue_lock);
925 
926 		return -EINVAL;
927 	}
928 
929 	avail_queues->avail_rxq -= max_q->max_rxq;
930 	avail_queues->avail_txq -= max_q->max_txq;
931 	avail_queues->avail_bufq -= max_q->max_bufq;
932 	avail_queues->avail_complq -= max_q->max_complq;
933 
934 	mutex_unlock(&adapter->queue_lock);
935 
936 	return 0;
937 }
938 
939 /**
940  * idpf_vport_dealloc_max_qs - Deallocate max queues of a vport
941  * @adapter: Driver specific private structure
942  * @max_q: vport max queue structure
943  */
idpf_vport_dealloc_max_qs(struct idpf_adapter * adapter,struct idpf_vport_max_q * max_q)944 void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
945 			       struct idpf_vport_max_q *max_q)
946 {
947 	struct idpf_avail_queue_info *avail_queues;
948 
949 	mutex_lock(&adapter->queue_lock);
950 	avail_queues = &adapter->avail_queues;
951 
952 	avail_queues->avail_rxq += max_q->max_rxq;
953 	avail_queues->avail_txq += max_q->max_txq;
954 	avail_queues->avail_bufq += max_q->max_bufq;
955 	avail_queues->avail_complq += max_q->max_complq;
956 
957 	mutex_unlock(&adapter->queue_lock);
958 }
959 
960 /**
961  * idpf_init_avail_queues - Initialize available queues on the device
962  * @adapter: Driver specific private structure
963  */
idpf_init_avail_queues(struct idpf_adapter * adapter)964 static void idpf_init_avail_queues(struct idpf_adapter *adapter)
965 {
966 	struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues;
967 	struct virtchnl2_get_capabilities *caps = &adapter->caps;
968 
969 	avail_queues->avail_rxq = le16_to_cpu(caps->max_rx_q);
970 	avail_queues->avail_txq = le16_to_cpu(caps->max_tx_q);
971 	avail_queues->avail_bufq = le16_to_cpu(caps->max_rx_bufq);
972 	avail_queues->avail_complq = le16_to_cpu(caps->max_tx_complq);
973 }
974 
975 /**
976  * idpf_get_reg_intr_vecs - Get vector queue register offset
977  * @vport: virtual port structure
978  * @reg_vals: Register offsets to store in
979  *
980  * Returns number of registers that got populated
981  */
idpf_get_reg_intr_vecs(struct idpf_vport * vport,struct idpf_vec_regs * reg_vals)982 int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
983 			   struct idpf_vec_regs *reg_vals)
984 {
985 	struct virtchnl2_vector_chunks *chunks;
986 	struct idpf_vec_regs reg_val;
987 	u16 num_vchunks, num_vec;
988 	int num_regs = 0, i, j;
989 
990 	chunks = &vport->adapter->req_vec_chunks->vchunks;
991 	num_vchunks = le16_to_cpu(chunks->num_vchunks);
992 
993 	for (j = 0; j < num_vchunks; j++) {
994 		struct virtchnl2_vector_chunk *chunk;
995 		u32 dynctl_reg_spacing;
996 		u32 itrn_reg_spacing;
997 
998 		chunk = &chunks->vchunks[j];
999 		num_vec = le16_to_cpu(chunk->num_vectors);
1000 		reg_val.dyn_ctl_reg = le32_to_cpu(chunk->dynctl_reg_start);
1001 		reg_val.itrn_reg = le32_to_cpu(chunk->itrn_reg_start);
1002 		reg_val.itrn_index_spacing = le32_to_cpu(chunk->itrn_index_spacing);
1003 
1004 		dynctl_reg_spacing = le32_to_cpu(chunk->dynctl_reg_spacing);
1005 		itrn_reg_spacing = le32_to_cpu(chunk->itrn_reg_spacing);
1006 
1007 		for (i = 0; i < num_vec; i++) {
1008 			reg_vals[num_regs].dyn_ctl_reg = reg_val.dyn_ctl_reg;
1009 			reg_vals[num_regs].itrn_reg = reg_val.itrn_reg;
1010 			reg_vals[num_regs].itrn_index_spacing =
1011 						reg_val.itrn_index_spacing;
1012 
1013 			reg_val.dyn_ctl_reg += dynctl_reg_spacing;
1014 			reg_val.itrn_reg += itrn_reg_spacing;
1015 			num_regs++;
1016 		}
1017 	}
1018 
1019 	return num_regs;
1020 }
1021 
1022 /**
1023  * idpf_vport_get_q_reg - Get the queue registers for the vport
1024  * @reg_vals: register values needing to be set
1025  * @num_regs: amount we expect to fill
1026  * @q_type: queue model
1027  * @chunks: queue regs received over mailbox
1028  *
1029  * This function parses the queue register offsets from the queue register
1030  * chunk information, with a specific queue type and stores it into the array
1031  * passed as an argument. It returns the actual number of queue registers that
1032  * are filled.
1033  */
idpf_vport_get_q_reg(u32 * reg_vals,int num_regs,u32 q_type,struct virtchnl2_queue_reg_chunks * chunks)1034 static int idpf_vport_get_q_reg(u32 *reg_vals, int num_regs, u32 q_type,
1035 				struct virtchnl2_queue_reg_chunks *chunks)
1036 {
1037 	u16 num_chunks = le16_to_cpu(chunks->num_chunks);
1038 	int reg_filled = 0, i;
1039 	u32 reg_val;
1040 
1041 	while (num_chunks--) {
1042 		struct virtchnl2_queue_reg_chunk *chunk;
1043 		u16 num_q;
1044 
1045 		chunk = &chunks->chunks[num_chunks];
1046 		if (le32_to_cpu(chunk->type) != q_type)
1047 			continue;
1048 
1049 		num_q = le32_to_cpu(chunk->num_queues);
1050 		reg_val = le64_to_cpu(chunk->qtail_reg_start);
1051 		for (i = 0; i < num_q && reg_filled < num_regs ; i++) {
1052 			reg_vals[reg_filled++] = reg_val;
1053 			reg_val += le32_to_cpu(chunk->qtail_reg_spacing);
1054 		}
1055 	}
1056 
1057 	return reg_filled;
1058 }
1059 
1060 /**
1061  * __idpf_queue_reg_init - initialize queue registers
1062  * @vport: virtual port structure
1063  * @reg_vals: registers we are initializing
1064  * @num_regs: how many registers there are in total
1065  * @q_type: queue model
1066  *
1067  * Return number of queues that are initialized
1068  */
__idpf_queue_reg_init(struct idpf_vport * vport,u32 * reg_vals,int num_regs,u32 q_type)1069 static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
1070 				 int num_regs, u32 q_type)
1071 {
1072 	struct idpf_adapter *adapter = vport->adapter;
1073 	int i, j, k = 0;
1074 
1075 	switch (q_type) {
1076 	case VIRTCHNL2_QUEUE_TYPE_TX:
1077 		for (i = 0; i < vport->num_txq_grp; i++) {
1078 			struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1079 
1080 			for (j = 0; j < tx_qgrp->num_txq && k < num_regs; j++, k++)
1081 				tx_qgrp->txqs[j]->tail =
1082 					idpf_get_reg_addr(adapter, reg_vals[k]);
1083 		}
1084 		break;
1085 	case VIRTCHNL2_QUEUE_TYPE_RX:
1086 		for (i = 0; i < vport->num_rxq_grp; i++) {
1087 			struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1088 			u16 num_rxq = rx_qgrp->singleq.num_rxq;
1089 
1090 			for (j = 0; j < num_rxq && k < num_regs; j++, k++) {
1091 				struct idpf_rx_queue *q;
1092 
1093 				q = rx_qgrp->singleq.rxqs[j];
1094 				q->tail = idpf_get_reg_addr(adapter,
1095 							    reg_vals[k]);
1096 			}
1097 		}
1098 		break;
1099 	case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
1100 		for (i = 0; i < vport->num_rxq_grp; i++) {
1101 			struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1102 			u8 num_bufqs = vport->num_bufqs_per_qgrp;
1103 
1104 			for (j = 0; j < num_bufqs && k < num_regs; j++, k++) {
1105 				struct idpf_buf_queue *q;
1106 
1107 				q = &rx_qgrp->splitq.bufq_sets[j].bufq;
1108 				q->tail = idpf_get_reg_addr(adapter,
1109 							    reg_vals[k]);
1110 			}
1111 		}
1112 		break;
1113 	default:
1114 		break;
1115 	}
1116 
1117 	return k;
1118 }
1119 
1120 /**
1121  * idpf_queue_reg_init - initialize queue registers
1122  * @vport: virtual port structure
1123  *
1124  * Return 0 on success, negative on failure
1125  */
idpf_queue_reg_init(struct idpf_vport * vport)1126 int idpf_queue_reg_init(struct idpf_vport *vport)
1127 {
1128 	struct virtchnl2_create_vport *vport_params;
1129 	struct virtchnl2_queue_reg_chunks *chunks;
1130 	struct idpf_vport_config *vport_config;
1131 	u16 vport_idx = vport->idx;
1132 	int num_regs, ret = 0;
1133 	u32 *reg_vals;
1134 
1135 	/* We may never deal with more than 256 same type of queues */
1136 	reg_vals = kzalloc(sizeof(void *) * IDPF_LARGE_MAX_Q, GFP_KERNEL);
1137 	if (!reg_vals)
1138 		return -ENOMEM;
1139 
1140 	vport_config = vport->adapter->vport_config[vport_idx];
1141 	if (vport_config->req_qs_chunks) {
1142 		struct virtchnl2_add_queues *vc_aq =
1143 		  (struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
1144 		chunks = &vc_aq->chunks;
1145 	} else {
1146 		vport_params = vport->adapter->vport_params_recvd[vport_idx];
1147 		chunks = &vport_params->chunks;
1148 	}
1149 
1150 	/* Initialize Tx queue tail register address */
1151 	num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
1152 					VIRTCHNL2_QUEUE_TYPE_TX,
1153 					chunks);
1154 	if (num_regs < vport->num_txq) {
1155 		ret = -EINVAL;
1156 		goto free_reg_vals;
1157 	}
1158 
1159 	num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
1160 					 VIRTCHNL2_QUEUE_TYPE_TX);
1161 	if (num_regs < vport->num_txq) {
1162 		ret = -EINVAL;
1163 		goto free_reg_vals;
1164 	}
1165 
1166 	/* Initialize Rx/buffer queue tail register address based on Rx queue
1167 	 * model
1168 	 */
1169 	if (idpf_is_queue_model_split(vport->rxq_model)) {
1170 		num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
1171 						VIRTCHNL2_QUEUE_TYPE_RX_BUFFER,
1172 						chunks);
1173 		if (num_regs < vport->num_bufq) {
1174 			ret = -EINVAL;
1175 			goto free_reg_vals;
1176 		}
1177 
1178 		num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
1179 						 VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
1180 		if (num_regs < vport->num_bufq) {
1181 			ret = -EINVAL;
1182 			goto free_reg_vals;
1183 		}
1184 	} else {
1185 		num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
1186 						VIRTCHNL2_QUEUE_TYPE_RX,
1187 						chunks);
1188 		if (num_regs < vport->num_rxq) {
1189 			ret = -EINVAL;
1190 			goto free_reg_vals;
1191 		}
1192 
1193 		num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
1194 						 VIRTCHNL2_QUEUE_TYPE_RX);
1195 		if (num_regs < vport->num_rxq) {
1196 			ret = -EINVAL;
1197 			goto free_reg_vals;
1198 		}
1199 	}
1200 
1201 free_reg_vals:
1202 	kfree(reg_vals);
1203 
1204 	return ret;
1205 }
1206 
1207 /**
1208  * idpf_send_create_vport_msg - Send virtchnl create vport message
1209  * @adapter: Driver specific private structure
1210  * @max_q: vport max queue info
1211  *
1212  * send virtchnl creae vport message
1213  *
1214  * Returns 0 on success, negative on failure
1215  */
idpf_send_create_vport_msg(struct idpf_adapter * adapter,struct idpf_vport_max_q * max_q)1216 int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
1217 			       struct idpf_vport_max_q *max_q)
1218 {
1219 	struct virtchnl2_create_vport *vport_msg;
1220 	struct idpf_vc_xn_params xn_params = {};
1221 	u16 idx = adapter->next_vport;
1222 	int err, buf_size;
1223 	ssize_t reply_sz;
1224 
1225 	buf_size = sizeof(struct virtchnl2_create_vport);
1226 	if (!adapter->vport_params_reqd[idx]) {
1227 		adapter->vport_params_reqd[idx] = kzalloc(buf_size,
1228 							  GFP_KERNEL);
1229 		if (!adapter->vport_params_reqd[idx])
1230 			return -ENOMEM;
1231 	}
1232 
1233 	vport_msg = adapter->vport_params_reqd[idx];
1234 	vport_msg->vport_type = cpu_to_le16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
1235 	vport_msg->vport_index = cpu_to_le16(idx);
1236 
1237 	if (adapter->req_tx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ))
1238 		vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
1239 	else
1240 		vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
1241 
1242 	if (adapter->req_rx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ))
1243 		vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
1244 	else
1245 		vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
1246 
1247 	err = idpf_vport_calc_total_qs(adapter, idx, vport_msg, max_q);
1248 	if (err) {
1249 		dev_err(&adapter->pdev->dev, "Enough queues are not available");
1250 
1251 		return err;
1252 	}
1253 
1254 	if (!adapter->vport_params_recvd[idx]) {
1255 		adapter->vport_params_recvd[idx] = kzalloc(IDPF_CTLQ_MAX_BUF_LEN,
1256 							   GFP_KERNEL);
1257 		if (!adapter->vport_params_recvd[idx]) {
1258 			err = -ENOMEM;
1259 			goto free_vport_params;
1260 		}
1261 	}
1262 
1263 	xn_params.vc_op = VIRTCHNL2_OP_CREATE_VPORT;
1264 	xn_params.send_buf.iov_base = vport_msg;
1265 	xn_params.send_buf.iov_len = buf_size;
1266 	xn_params.recv_buf.iov_base = adapter->vport_params_recvd[idx];
1267 	xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
1268 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1269 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
1270 	if (reply_sz < 0) {
1271 		err = reply_sz;
1272 		goto free_vport_params;
1273 	}
1274 
1275 	return 0;
1276 
1277 free_vport_params:
1278 	kfree(adapter->vport_params_recvd[idx]);
1279 	adapter->vport_params_recvd[idx] = NULL;
1280 	kfree(adapter->vport_params_reqd[idx]);
1281 	adapter->vport_params_reqd[idx] = NULL;
1282 
1283 	return err;
1284 }
1285 
1286 /**
1287  * idpf_check_supported_desc_ids - Verify we have required descriptor support
1288  * @vport: virtual port structure
1289  *
1290  * Return 0 on success, error on failure
1291  */
idpf_check_supported_desc_ids(struct idpf_vport * vport)1292 int idpf_check_supported_desc_ids(struct idpf_vport *vport)
1293 {
1294 	struct idpf_adapter *adapter = vport->adapter;
1295 	struct virtchnl2_create_vport *vport_msg;
1296 	u64 rx_desc_ids, tx_desc_ids;
1297 
1298 	vport_msg = adapter->vport_params_recvd[vport->idx];
1299 
1300 	if (!IS_ENABLED(CONFIG_IDPF_SINGLEQ) &&
1301 	    (vport_msg->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE ||
1302 	     vport_msg->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)) {
1303 		pci_err(adapter->pdev, "singleq mode requested, but not compiled-in\n");
1304 		return -EOPNOTSUPP;
1305 	}
1306 
1307 	rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids);
1308 	tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids);
1309 
1310 	if (idpf_is_queue_model_split(vport->rxq_model)) {
1311 		if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) {
1312 			dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n");
1313 			vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
1314 		}
1315 	} else {
1316 		if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M))
1317 			vport->base_rxd = true;
1318 	}
1319 
1320 	if (!idpf_is_queue_model_split(vport->txq_model))
1321 		return 0;
1322 
1323 	if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) {
1324 		dev_info(&adapter->pdev->dev, "Minimum TX descriptor support not provided, using the default\n");
1325 		vport_msg->tx_desc_ids = cpu_to_le64(MIN_SUPPORT_TXDID);
1326 	}
1327 
1328 	return 0;
1329 }
1330 
1331 /**
1332  * idpf_send_destroy_vport_msg - Send virtchnl destroy vport message
1333  * @vport: virtual port data structure
1334  *
1335  * Send virtchnl destroy vport message.  Returns 0 on success, negative on
1336  * failure.
1337  */
idpf_send_destroy_vport_msg(struct idpf_vport * vport)1338 int idpf_send_destroy_vport_msg(struct idpf_vport *vport)
1339 {
1340 	struct idpf_vc_xn_params xn_params = {};
1341 	struct virtchnl2_vport v_id;
1342 	ssize_t reply_sz;
1343 
1344 	v_id.vport_id = cpu_to_le32(vport->vport_id);
1345 
1346 	xn_params.vc_op = VIRTCHNL2_OP_DESTROY_VPORT;
1347 	xn_params.send_buf.iov_base = &v_id;
1348 	xn_params.send_buf.iov_len = sizeof(v_id);
1349 	xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
1350 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
1351 
1352 	return reply_sz < 0 ? reply_sz : 0;
1353 }
1354 
1355 /**
1356  * idpf_send_enable_vport_msg - Send virtchnl enable vport message
1357  * @vport: virtual port data structure
1358  *
1359  * Send enable vport virtchnl message.  Returns 0 on success, negative on
1360  * failure.
1361  */
idpf_send_enable_vport_msg(struct idpf_vport * vport)1362 int idpf_send_enable_vport_msg(struct idpf_vport *vport)
1363 {
1364 	struct idpf_vc_xn_params xn_params = {};
1365 	struct virtchnl2_vport v_id;
1366 	ssize_t reply_sz;
1367 
1368 	v_id.vport_id = cpu_to_le32(vport->vport_id);
1369 
1370 	xn_params.vc_op = VIRTCHNL2_OP_ENABLE_VPORT;
1371 	xn_params.send_buf.iov_base = &v_id;
1372 	xn_params.send_buf.iov_len = sizeof(v_id);
1373 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1374 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
1375 
1376 	return reply_sz < 0 ? reply_sz : 0;
1377 }
1378 
1379 /**
1380  * idpf_send_disable_vport_msg - Send virtchnl disable vport message
1381  * @vport: virtual port data structure
1382  *
1383  * Send disable vport virtchnl message.  Returns 0 on success, negative on
1384  * failure.
1385  */
idpf_send_disable_vport_msg(struct idpf_vport * vport)1386 int idpf_send_disable_vport_msg(struct idpf_vport *vport)
1387 {
1388 	struct idpf_vc_xn_params xn_params = {};
1389 	struct virtchnl2_vport v_id;
1390 	ssize_t reply_sz;
1391 
1392 	v_id.vport_id = cpu_to_le32(vport->vport_id);
1393 
1394 	xn_params.vc_op = VIRTCHNL2_OP_DISABLE_VPORT;
1395 	xn_params.send_buf.iov_base = &v_id;
1396 	xn_params.send_buf.iov_len = sizeof(v_id);
1397 	xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
1398 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
1399 
1400 	return reply_sz < 0 ? reply_sz : 0;
1401 }
1402 
1403 /**
1404  * idpf_send_config_tx_queues_msg - Send virtchnl config tx queues message
1405  * @vport: virtual port data structure
1406  *
1407  * Send config tx queues virtchnl message. Returns 0 on success, negative on
1408  * failure.
1409  */
idpf_send_config_tx_queues_msg(struct idpf_vport * vport)1410 static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
1411 {
1412 	struct virtchnl2_config_tx_queues *ctq __free(kfree) = NULL;
1413 	struct virtchnl2_txq_info *qi __free(kfree) = NULL;
1414 	struct idpf_vc_xn_params xn_params = {};
1415 	u32 config_sz, chunk_sz, buf_sz;
1416 	int totqs, num_msgs, num_chunks;
1417 	ssize_t reply_sz;
1418 	int i, k = 0;
1419 
1420 	totqs = vport->num_txq + vport->num_complq;
1421 	qi = kcalloc(totqs, sizeof(struct virtchnl2_txq_info), GFP_KERNEL);
1422 	if (!qi)
1423 		return -ENOMEM;
1424 
1425 	/* Populate the queue info buffer with all queue context info */
1426 	for (i = 0; i < vport->num_txq_grp; i++) {
1427 		struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1428 		int j, sched_mode;
1429 
1430 		for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
1431 			qi[k].queue_id =
1432 				cpu_to_le32(tx_qgrp->txqs[j]->q_id);
1433 			qi[k].model =
1434 				cpu_to_le16(vport->txq_model);
1435 			qi[k].type =
1436 				cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
1437 			qi[k].ring_len =
1438 				cpu_to_le16(tx_qgrp->txqs[j]->desc_count);
1439 			qi[k].dma_ring_addr =
1440 				cpu_to_le64(tx_qgrp->txqs[j]->dma);
1441 			if (idpf_is_queue_model_split(vport->txq_model)) {
1442 				struct idpf_tx_queue *q = tx_qgrp->txqs[j];
1443 
1444 				qi[k].tx_compl_queue_id =
1445 					cpu_to_le16(tx_qgrp->complq->q_id);
1446 				qi[k].relative_queue_id = cpu_to_le16(j);
1447 
1448 				if (idpf_queue_has(FLOW_SCH_EN, q))
1449 					qi[k].sched_mode =
1450 					cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_FLOW);
1451 				else
1452 					qi[k].sched_mode =
1453 					cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
1454 			} else {
1455 				qi[k].sched_mode =
1456 					cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
1457 			}
1458 		}
1459 
1460 		if (!idpf_is_queue_model_split(vport->txq_model))
1461 			continue;
1462 
1463 		qi[k].queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
1464 		qi[k].model = cpu_to_le16(vport->txq_model);
1465 		qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
1466 		qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count);
1467 		qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma);
1468 
1469 		if (idpf_queue_has(FLOW_SCH_EN, tx_qgrp->complq))
1470 			sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
1471 		else
1472 			sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
1473 		qi[k].sched_mode = cpu_to_le16(sched_mode);
1474 
1475 		k++;
1476 	}
1477 
1478 	/* Make sure accounting agrees */
1479 	if (k != totqs)
1480 		return -EINVAL;
1481 
1482 	/* Chunk up the queue contexts into multiple messages to avoid
1483 	 * sending a control queue message buffer that is too large
1484 	 */
1485 	config_sz = sizeof(struct virtchnl2_config_tx_queues);
1486 	chunk_sz = sizeof(struct virtchnl2_txq_info);
1487 
1488 	num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
1489 			   totqs);
1490 	num_msgs = DIV_ROUND_UP(totqs, num_chunks);
1491 
1492 	buf_sz = struct_size(ctq, qinfo, num_chunks);
1493 	ctq = kzalloc(buf_sz, GFP_KERNEL);
1494 	if (!ctq)
1495 		return -ENOMEM;
1496 
1497 	xn_params.vc_op = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
1498 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1499 
1500 	for (i = 0, k = 0; i < num_msgs; i++) {
1501 		memset(ctq, 0, buf_sz);
1502 		ctq->vport_id = cpu_to_le32(vport->vport_id);
1503 		ctq->num_qinfo = cpu_to_le16(num_chunks);
1504 		memcpy(ctq->qinfo, &qi[k], chunk_sz * num_chunks);
1505 
1506 		xn_params.send_buf.iov_base = ctq;
1507 		xn_params.send_buf.iov_len = buf_sz;
1508 		reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
1509 		if (reply_sz < 0)
1510 			return reply_sz;
1511 
1512 		k += num_chunks;
1513 		totqs -= num_chunks;
1514 		num_chunks = min(num_chunks, totqs);
1515 		/* Recalculate buffer size */
1516 		buf_sz = struct_size(ctq, qinfo, num_chunks);
1517 	}
1518 
1519 	return 0;
1520 }
1521 
1522 /**
1523  * idpf_send_config_rx_queues_msg - Send virtchnl config rx queues message
1524  * @vport: virtual port data structure
1525  *
1526  * Send config rx queues virtchnl message.  Returns 0 on success, negative on
1527  * failure.
1528  */
idpf_send_config_rx_queues_msg(struct idpf_vport * vport)1529 static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
1530 {
1531 	struct virtchnl2_config_rx_queues *crq __free(kfree) = NULL;
1532 	struct virtchnl2_rxq_info *qi __free(kfree) = NULL;
1533 	struct idpf_vc_xn_params xn_params = {};
1534 	u32 config_sz, chunk_sz, buf_sz;
1535 	int totqs, num_msgs, num_chunks;
1536 	ssize_t reply_sz;
1537 	int i, k = 0;
1538 
1539 	totqs = vport->num_rxq + vport->num_bufq;
1540 	qi = kcalloc(totqs, sizeof(struct virtchnl2_rxq_info), GFP_KERNEL);
1541 	if (!qi)
1542 		return -ENOMEM;
1543 
1544 	/* Populate the queue info buffer with all queue context info */
1545 	for (i = 0; i < vport->num_rxq_grp; i++) {
1546 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1547 		u16 num_rxq;
1548 		int j;
1549 
1550 		if (!idpf_is_queue_model_split(vport->rxq_model))
1551 			goto setup_rxqs;
1552 
1553 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
1554 			struct idpf_buf_queue *bufq =
1555 				&rx_qgrp->splitq.bufq_sets[j].bufq;
1556 
1557 			qi[k].queue_id = cpu_to_le32(bufq->q_id);
1558 			qi[k].model = cpu_to_le16(vport->rxq_model);
1559 			qi[k].type =
1560 				cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
1561 			qi[k].desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
1562 			qi[k].ring_len = cpu_to_le16(bufq->desc_count);
1563 			qi[k].dma_ring_addr = cpu_to_le64(bufq->dma);
1564 			qi[k].data_buffer_size = cpu_to_le32(bufq->rx_buf_size);
1565 			qi[k].buffer_notif_stride = IDPF_RX_BUF_STRIDE;
1566 			qi[k].rx_buffer_low_watermark =
1567 				cpu_to_le16(bufq->rx_buffer_low_watermark);
1568 			if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
1569 				qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
1570 		}
1571 
1572 setup_rxqs:
1573 		if (idpf_is_queue_model_split(vport->rxq_model))
1574 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
1575 		else
1576 			num_rxq = rx_qgrp->singleq.num_rxq;
1577 
1578 		for (j = 0; j < num_rxq; j++, k++) {
1579 			const struct idpf_bufq_set *sets;
1580 			struct idpf_rx_queue *rxq;
1581 
1582 			if (!idpf_is_queue_model_split(vport->rxq_model)) {
1583 				rxq = rx_qgrp->singleq.rxqs[j];
1584 				goto common_qi_fields;
1585 			}
1586 
1587 			rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
1588 			sets = rxq->bufq_sets;
1589 
1590 			/* In splitq mode, RXQ buffer size should be
1591 			 * set to that of the first buffer queue
1592 			 * associated with this RXQ.
1593 			 */
1594 			rxq->rx_buf_size = sets[0].bufq.rx_buf_size;
1595 
1596 			qi[k].rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id);
1597 			if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) {
1598 				qi[k].bufq2_ena = IDPF_BUFQ2_ENA;
1599 				qi[k].rx_bufq2_id =
1600 					cpu_to_le16(sets[1].bufq.q_id);
1601 			}
1602 			qi[k].rx_buffer_low_watermark =
1603 				cpu_to_le16(rxq->rx_buffer_low_watermark);
1604 			if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
1605 				qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
1606 
1607 			rxq->rx_hbuf_size = sets[0].bufq.rx_hbuf_size;
1608 
1609 			if (idpf_queue_has(HSPLIT_EN, rxq)) {
1610 				qi[k].qflags |=
1611 					cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT);
1612 				qi[k].hdr_buffer_size =
1613 					cpu_to_le16(rxq->rx_hbuf_size);
1614 			}
1615 
1616 common_qi_fields:
1617 			qi[k].queue_id = cpu_to_le32(rxq->q_id);
1618 			qi[k].model = cpu_to_le16(vport->rxq_model);
1619 			qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
1620 			qi[k].ring_len = cpu_to_le16(rxq->desc_count);
1621 			qi[k].dma_ring_addr = cpu_to_le64(rxq->dma);
1622 			qi[k].max_pkt_size = cpu_to_le32(rxq->rx_max_pkt_size);
1623 			qi[k].data_buffer_size = cpu_to_le32(rxq->rx_buf_size);
1624 			qi[k].qflags |=
1625 				cpu_to_le16(VIRTCHNL2_RX_DESC_SIZE_32BYTE);
1626 			qi[k].desc_ids = cpu_to_le64(rxq->rxdids);
1627 		}
1628 	}
1629 
1630 	/* Make sure accounting agrees */
1631 	if (k != totqs)
1632 		return -EINVAL;
1633 
1634 	/* Chunk up the queue contexts into multiple messages to avoid
1635 	 * sending a control queue message buffer that is too large
1636 	 */
1637 	config_sz = sizeof(struct virtchnl2_config_rx_queues);
1638 	chunk_sz = sizeof(struct virtchnl2_rxq_info);
1639 
1640 	num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
1641 			   totqs);
1642 	num_msgs = DIV_ROUND_UP(totqs, num_chunks);
1643 
1644 	buf_sz = struct_size(crq, qinfo, num_chunks);
1645 	crq = kzalloc(buf_sz, GFP_KERNEL);
1646 	if (!crq)
1647 		return -ENOMEM;
1648 
1649 	xn_params.vc_op = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
1650 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1651 
1652 	for (i = 0, k = 0; i < num_msgs; i++) {
1653 		memset(crq, 0, buf_sz);
1654 		crq->vport_id = cpu_to_le32(vport->vport_id);
1655 		crq->num_qinfo = cpu_to_le16(num_chunks);
1656 		memcpy(crq->qinfo, &qi[k], chunk_sz * num_chunks);
1657 
1658 		xn_params.send_buf.iov_base = crq;
1659 		xn_params.send_buf.iov_len = buf_sz;
1660 		reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
1661 		if (reply_sz < 0)
1662 			return reply_sz;
1663 
1664 		k += num_chunks;
1665 		totqs -= num_chunks;
1666 		num_chunks = min(num_chunks, totqs);
1667 		/* Recalculate buffer size */
1668 		buf_sz = struct_size(crq, qinfo, num_chunks);
1669 	}
1670 
1671 	return 0;
1672 }
1673 
1674 /**
1675  * idpf_send_ena_dis_queues_msg - Send virtchnl enable or disable
1676  * queues message
1677  * @vport: virtual port data structure
1678  * @ena: if true enable, false disable
1679  *
1680  * Send enable or disable queues virtchnl message. Returns 0 on success,
1681  * negative on failure.
1682  */
idpf_send_ena_dis_queues_msg(struct idpf_vport * vport,bool ena)1683 static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena)
1684 {
1685 	struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
1686 	struct virtchnl2_queue_chunk *qc __free(kfree) = NULL;
1687 	u32 num_msgs, num_chunks, num_txq, num_rxq, num_q;
1688 	struct idpf_vc_xn_params xn_params = {};
1689 	struct virtchnl2_queue_chunks *qcs;
1690 	u32 config_sz, chunk_sz, buf_sz;
1691 	ssize_t reply_sz;
1692 	int i, j, k = 0;
1693 
1694 	num_txq = vport->num_txq + vport->num_complq;
1695 	num_rxq = vport->num_rxq + vport->num_bufq;
1696 	num_q = num_txq + num_rxq;
1697 	buf_sz = sizeof(struct virtchnl2_queue_chunk) * num_q;
1698 	qc = kzalloc(buf_sz, GFP_KERNEL);
1699 	if (!qc)
1700 		return -ENOMEM;
1701 
1702 	for (i = 0; i < vport->num_txq_grp; i++) {
1703 		struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1704 
1705 		for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
1706 			qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
1707 			qc[k].start_queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
1708 			qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
1709 		}
1710 	}
1711 	if (vport->num_txq != k)
1712 		return -EINVAL;
1713 
1714 	if (!idpf_is_queue_model_split(vport->txq_model))
1715 		goto setup_rx;
1716 
1717 	for (i = 0; i < vport->num_txq_grp; i++, k++) {
1718 		struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1719 
1720 		qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
1721 		qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
1722 		qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
1723 	}
1724 	if (vport->num_complq != (k - vport->num_txq))
1725 		return -EINVAL;
1726 
1727 setup_rx:
1728 	for (i = 0; i < vport->num_rxq_grp; i++) {
1729 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1730 
1731 		if (idpf_is_queue_model_split(vport->rxq_model))
1732 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
1733 		else
1734 			num_rxq = rx_qgrp->singleq.num_rxq;
1735 
1736 		for (j = 0; j < num_rxq; j++, k++) {
1737 			if (idpf_is_queue_model_split(vport->rxq_model)) {
1738 				qc[k].start_queue_id =
1739 				cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_id);
1740 				qc[k].type =
1741 				cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
1742 			} else {
1743 				qc[k].start_queue_id =
1744 				cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_id);
1745 				qc[k].type =
1746 				cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
1747 			}
1748 			qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
1749 		}
1750 	}
1751 	if (vport->num_rxq != k - (vport->num_txq + vport->num_complq))
1752 		return -EINVAL;
1753 
1754 	if (!idpf_is_queue_model_split(vport->rxq_model))
1755 		goto send_msg;
1756 
1757 	for (i = 0; i < vport->num_rxq_grp; i++) {
1758 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1759 
1760 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
1761 			const struct idpf_buf_queue *q;
1762 
1763 			q = &rx_qgrp->splitq.bufq_sets[j].bufq;
1764 			qc[k].type =
1765 				cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
1766 			qc[k].start_queue_id = cpu_to_le32(q->q_id);
1767 			qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
1768 		}
1769 	}
1770 	if (vport->num_bufq != k - (vport->num_txq +
1771 				    vport->num_complq +
1772 				    vport->num_rxq))
1773 		return -EINVAL;
1774 
1775 send_msg:
1776 	/* Chunk up the queue info into multiple messages */
1777 	config_sz = sizeof(struct virtchnl2_del_ena_dis_queues);
1778 	chunk_sz = sizeof(struct virtchnl2_queue_chunk);
1779 
1780 	num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
1781 			   num_q);
1782 	num_msgs = DIV_ROUND_UP(num_q, num_chunks);
1783 
1784 	buf_sz = struct_size(eq, chunks.chunks, num_chunks);
1785 	eq = kzalloc(buf_sz, GFP_KERNEL);
1786 	if (!eq)
1787 		return -ENOMEM;
1788 
1789 	if (ena) {
1790 		xn_params.vc_op = VIRTCHNL2_OP_ENABLE_QUEUES;
1791 		xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1792 	} else {
1793 		xn_params.vc_op = VIRTCHNL2_OP_DISABLE_QUEUES;
1794 		xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
1795 	}
1796 
1797 	for (i = 0, k = 0; i < num_msgs; i++) {
1798 		memset(eq, 0, buf_sz);
1799 		eq->vport_id = cpu_to_le32(vport->vport_id);
1800 		eq->chunks.num_chunks = cpu_to_le16(num_chunks);
1801 		qcs = &eq->chunks;
1802 		memcpy(qcs->chunks, &qc[k], chunk_sz * num_chunks);
1803 
1804 		xn_params.send_buf.iov_base = eq;
1805 		xn_params.send_buf.iov_len = buf_sz;
1806 		reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
1807 		if (reply_sz < 0)
1808 			return reply_sz;
1809 
1810 		k += num_chunks;
1811 		num_q -= num_chunks;
1812 		num_chunks = min(num_chunks, num_q);
1813 		/* Recalculate buffer size */
1814 		buf_sz = struct_size(eq, chunks.chunks, num_chunks);
1815 	}
1816 
1817 	return 0;
1818 }
1819 
1820 /**
1821  * idpf_send_map_unmap_queue_vector_msg - Send virtchnl map or unmap queue
1822  * vector message
1823  * @vport: virtual port data structure
1824  * @map: true for map and false for unmap
1825  *
1826  * Send map or unmap queue vector virtchnl message.  Returns 0 on success,
1827  * negative on failure.
1828  */
idpf_send_map_unmap_queue_vector_msg(struct idpf_vport * vport,bool map)1829 int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
1830 {
1831 	struct virtchnl2_queue_vector_maps *vqvm __free(kfree) = NULL;
1832 	struct virtchnl2_queue_vector *vqv __free(kfree) = NULL;
1833 	struct idpf_vc_xn_params xn_params = {};
1834 	u32 config_sz, chunk_sz, buf_sz;
1835 	u32 num_msgs, num_chunks, num_q;
1836 	ssize_t reply_sz;
1837 	int i, j, k = 0;
1838 
1839 	num_q = vport->num_txq + vport->num_rxq;
1840 
1841 	buf_sz = sizeof(struct virtchnl2_queue_vector) * num_q;
1842 	vqv = kzalloc(buf_sz, GFP_KERNEL);
1843 	if (!vqv)
1844 		return -ENOMEM;
1845 
1846 	for (i = 0; i < vport->num_txq_grp; i++) {
1847 		struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1848 
1849 		for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
1850 			vqv[k].queue_type =
1851 				cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
1852 			vqv[k].queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
1853 
1854 			if (idpf_is_queue_model_split(vport->txq_model)) {
1855 				vqv[k].vector_id =
1856 				cpu_to_le16(tx_qgrp->complq->q_vector->v_idx);
1857 				vqv[k].itr_idx =
1858 				cpu_to_le32(tx_qgrp->complq->q_vector->tx_itr_idx);
1859 			} else {
1860 				vqv[k].vector_id =
1861 				cpu_to_le16(tx_qgrp->txqs[j]->q_vector->v_idx);
1862 				vqv[k].itr_idx =
1863 				cpu_to_le32(tx_qgrp->txqs[j]->q_vector->tx_itr_idx);
1864 			}
1865 		}
1866 	}
1867 
1868 	if (vport->num_txq != k)
1869 		return -EINVAL;
1870 
1871 	for (i = 0; i < vport->num_rxq_grp; i++) {
1872 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1873 		u16 num_rxq;
1874 
1875 		if (idpf_is_queue_model_split(vport->rxq_model))
1876 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
1877 		else
1878 			num_rxq = rx_qgrp->singleq.num_rxq;
1879 
1880 		for (j = 0; j < num_rxq; j++, k++) {
1881 			struct idpf_rx_queue *rxq;
1882 
1883 			if (idpf_is_queue_model_split(vport->rxq_model))
1884 				rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
1885 			else
1886 				rxq = rx_qgrp->singleq.rxqs[j];
1887 
1888 			vqv[k].queue_type =
1889 				cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
1890 			vqv[k].queue_id = cpu_to_le32(rxq->q_id);
1891 			vqv[k].vector_id = cpu_to_le16(rxq->q_vector->v_idx);
1892 			vqv[k].itr_idx = cpu_to_le32(rxq->q_vector->rx_itr_idx);
1893 		}
1894 	}
1895 
1896 	if (idpf_is_queue_model_split(vport->txq_model)) {
1897 		if (vport->num_rxq != k - vport->num_complq)
1898 			return -EINVAL;
1899 	} else {
1900 		if (vport->num_rxq != k - vport->num_txq)
1901 			return -EINVAL;
1902 	}
1903 
1904 	/* Chunk up the vector info into multiple messages */
1905 	config_sz = sizeof(struct virtchnl2_queue_vector_maps);
1906 	chunk_sz = sizeof(struct virtchnl2_queue_vector);
1907 
1908 	num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
1909 			   num_q);
1910 	num_msgs = DIV_ROUND_UP(num_q, num_chunks);
1911 
1912 	buf_sz = struct_size(vqvm, qv_maps, num_chunks);
1913 	vqvm = kzalloc(buf_sz, GFP_KERNEL);
1914 	if (!vqvm)
1915 		return -ENOMEM;
1916 
1917 	if (map) {
1918 		xn_params.vc_op = VIRTCHNL2_OP_MAP_QUEUE_VECTOR;
1919 		xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1920 	} else {
1921 		xn_params.vc_op = VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR;
1922 		xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
1923 	}
1924 
1925 	for (i = 0, k = 0; i < num_msgs; i++) {
1926 		memset(vqvm, 0, buf_sz);
1927 		xn_params.send_buf.iov_base = vqvm;
1928 		xn_params.send_buf.iov_len = buf_sz;
1929 		vqvm->vport_id = cpu_to_le32(vport->vport_id);
1930 		vqvm->num_qv_maps = cpu_to_le16(num_chunks);
1931 		memcpy(vqvm->qv_maps, &vqv[k], chunk_sz * num_chunks);
1932 
1933 		reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
1934 		if (reply_sz < 0)
1935 			return reply_sz;
1936 
1937 		k += num_chunks;
1938 		num_q -= num_chunks;
1939 		num_chunks = min(num_chunks, num_q);
1940 		/* Recalculate buffer size */
1941 		buf_sz = struct_size(vqvm, qv_maps, num_chunks);
1942 	}
1943 
1944 	return 0;
1945 }
1946 
1947 /**
1948  * idpf_send_enable_queues_msg - send enable queues virtchnl message
1949  * @vport: Virtual port private data structure
1950  *
1951  * Will send enable queues virtchnl message.  Returns 0 on success, negative on
1952  * failure.
1953  */
idpf_send_enable_queues_msg(struct idpf_vport * vport)1954 int idpf_send_enable_queues_msg(struct idpf_vport *vport)
1955 {
1956 	return idpf_send_ena_dis_queues_msg(vport, true);
1957 }
1958 
1959 /**
1960  * idpf_send_disable_queues_msg - send disable queues virtchnl message
1961  * @vport: Virtual port private data structure
1962  *
1963  * Will send disable queues virtchnl message.  Returns 0 on success, negative
1964  * on failure.
1965  */
idpf_send_disable_queues_msg(struct idpf_vport * vport)1966 int idpf_send_disable_queues_msg(struct idpf_vport *vport)
1967 {
1968 	int err, i;
1969 
1970 	err = idpf_send_ena_dis_queues_msg(vport, false);
1971 	if (err)
1972 		return err;
1973 
1974 	/* switch to poll mode as interrupts will be disabled after disable
1975 	 * queues virtchnl message is sent
1976 	 */
1977 	for (i = 0; i < vport->num_txq; i++)
1978 		idpf_queue_set(POLL_MODE, vport->txqs[i]);
1979 
1980 	/* schedule the napi to receive all the marker packets */
1981 	local_bh_disable();
1982 	for (i = 0; i < vport->num_q_vectors; i++)
1983 		napi_schedule(&vport->q_vectors[i].napi);
1984 	local_bh_enable();
1985 
1986 	return idpf_wait_for_marker_event(vport);
1987 }
1988 
1989 /**
1990  * idpf_convert_reg_to_queue_chunks - Copy queue chunk information to the right
1991  * structure
1992  * @dchunks: Destination chunks to store data to
1993  * @schunks: Source chunks to copy data from
1994  * @num_chunks: number of chunks to copy
1995  */
idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk * dchunks,struct virtchnl2_queue_reg_chunk * schunks,u16 num_chunks)1996 static void idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchunks,
1997 					     struct virtchnl2_queue_reg_chunk *schunks,
1998 					     u16 num_chunks)
1999 {
2000 	u16 i;
2001 
2002 	for (i = 0; i < num_chunks; i++) {
2003 		dchunks[i].type = schunks[i].type;
2004 		dchunks[i].start_queue_id = schunks[i].start_queue_id;
2005 		dchunks[i].num_queues = schunks[i].num_queues;
2006 	}
2007 }
2008 
2009 /**
2010  * idpf_send_delete_queues_msg - send delete queues virtchnl message
2011  * @vport: Virtual port private data structure
2012  *
2013  * Will send delete queues virtchnl message. Return 0 on success, negative on
2014  * failure.
2015  */
idpf_send_delete_queues_msg(struct idpf_vport * vport)2016 int idpf_send_delete_queues_msg(struct idpf_vport *vport)
2017 {
2018 	struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
2019 	struct virtchnl2_create_vport *vport_params;
2020 	struct virtchnl2_queue_reg_chunks *chunks;
2021 	struct idpf_vc_xn_params xn_params = {};
2022 	struct idpf_vport_config *vport_config;
2023 	u16 vport_idx = vport->idx;
2024 	ssize_t reply_sz;
2025 	u16 num_chunks;
2026 	int buf_size;
2027 
2028 	vport_config = vport->adapter->vport_config[vport_idx];
2029 	if (vport_config->req_qs_chunks) {
2030 		chunks = &vport_config->req_qs_chunks->chunks;
2031 	} else {
2032 		vport_params = vport->adapter->vport_params_recvd[vport_idx];
2033 		chunks = &vport_params->chunks;
2034 	}
2035 
2036 	num_chunks = le16_to_cpu(chunks->num_chunks);
2037 	buf_size = struct_size(eq, chunks.chunks, num_chunks);
2038 
2039 	eq = kzalloc(buf_size, GFP_KERNEL);
2040 	if (!eq)
2041 		return -ENOMEM;
2042 
2043 	eq->vport_id = cpu_to_le32(vport->vport_id);
2044 	eq->chunks.num_chunks = cpu_to_le16(num_chunks);
2045 
2046 	idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->chunks,
2047 					 num_chunks);
2048 
2049 	xn_params.vc_op = VIRTCHNL2_OP_DEL_QUEUES;
2050 	xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
2051 	xn_params.send_buf.iov_base = eq;
2052 	xn_params.send_buf.iov_len = buf_size;
2053 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
2054 
2055 	return reply_sz < 0 ? reply_sz : 0;
2056 }
2057 
2058 /**
2059  * idpf_send_config_queues_msg - Send config queues virtchnl message
2060  * @vport: Virtual port private data structure
2061  *
2062  * Will send config queues virtchnl message. Returns 0 on success, negative on
2063  * failure.
2064  */
idpf_send_config_queues_msg(struct idpf_vport * vport)2065 int idpf_send_config_queues_msg(struct idpf_vport *vport)
2066 {
2067 	int err;
2068 
2069 	err = idpf_send_config_tx_queues_msg(vport);
2070 	if (err)
2071 		return err;
2072 
2073 	return idpf_send_config_rx_queues_msg(vport);
2074 }
2075 
2076 /**
2077  * idpf_send_add_queues_msg - Send virtchnl add queues message
2078  * @vport: Virtual port private data structure
2079  * @num_tx_q: number of transmit queues
2080  * @num_complq: number of transmit completion queues
2081  * @num_rx_q: number of receive queues
2082  * @num_rx_bufq: number of receive buffer queues
2083  *
2084  * Returns 0 on success, negative on failure. vport _MUST_ be const here as
2085  * we should not change any fields within vport itself in this function.
2086  */
idpf_send_add_queues_msg(const struct idpf_vport * vport,u16 num_tx_q,u16 num_complq,u16 num_rx_q,u16 num_rx_bufq)2087 int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
2088 			     u16 num_complq, u16 num_rx_q, u16 num_rx_bufq)
2089 {
2090 	struct virtchnl2_add_queues *vc_msg __free(kfree) = NULL;
2091 	struct idpf_vc_xn_params xn_params = {};
2092 	struct idpf_vport_config *vport_config;
2093 	struct virtchnl2_add_queues aq = {};
2094 	u16 vport_idx = vport->idx;
2095 	ssize_t reply_sz;
2096 	int size;
2097 
2098 	vc_msg = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2099 	if (!vc_msg)
2100 		return -ENOMEM;
2101 
2102 	vport_config = vport->adapter->vport_config[vport_idx];
2103 	kfree(vport_config->req_qs_chunks);
2104 	vport_config->req_qs_chunks = NULL;
2105 
2106 	aq.vport_id = cpu_to_le32(vport->vport_id);
2107 	aq.num_tx_q = cpu_to_le16(num_tx_q);
2108 	aq.num_tx_complq = cpu_to_le16(num_complq);
2109 	aq.num_rx_q = cpu_to_le16(num_rx_q);
2110 	aq.num_rx_bufq = cpu_to_le16(num_rx_bufq);
2111 
2112 	xn_params.vc_op = VIRTCHNL2_OP_ADD_QUEUES;
2113 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2114 	xn_params.send_buf.iov_base = &aq;
2115 	xn_params.send_buf.iov_len = sizeof(aq);
2116 	xn_params.recv_buf.iov_base = vc_msg;
2117 	xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
2118 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
2119 	if (reply_sz < 0)
2120 		return reply_sz;
2121 
2122 	/* compare vc_msg num queues with vport num queues */
2123 	if (le16_to_cpu(vc_msg->num_tx_q) != num_tx_q ||
2124 	    le16_to_cpu(vc_msg->num_rx_q) != num_rx_q ||
2125 	    le16_to_cpu(vc_msg->num_tx_complq) != num_complq ||
2126 	    le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq)
2127 		return -EINVAL;
2128 
2129 	size = struct_size(vc_msg, chunks.chunks,
2130 			   le16_to_cpu(vc_msg->chunks.num_chunks));
2131 	if (reply_sz < size)
2132 		return -EIO;
2133 
2134 	vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL);
2135 	if (!vport_config->req_qs_chunks)
2136 		return -ENOMEM;
2137 
2138 	return 0;
2139 }
2140 
2141 /**
2142  * idpf_send_alloc_vectors_msg - Send virtchnl alloc vectors message
2143  * @adapter: Driver specific private structure
2144  * @num_vectors: number of vectors to be allocated
2145  *
2146  * Returns 0 on success, negative on failure.
2147  */
idpf_send_alloc_vectors_msg(struct idpf_adapter * adapter,u16 num_vectors)2148 int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors)
2149 {
2150 	struct virtchnl2_alloc_vectors *rcvd_vec __free(kfree) = NULL;
2151 	struct idpf_vc_xn_params xn_params = {};
2152 	struct virtchnl2_alloc_vectors ac = {};
2153 	ssize_t reply_sz;
2154 	u16 num_vchunks;
2155 	int size;
2156 
2157 	ac.num_vectors = cpu_to_le16(num_vectors);
2158 
2159 	rcvd_vec = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2160 	if (!rcvd_vec)
2161 		return -ENOMEM;
2162 
2163 	xn_params.vc_op = VIRTCHNL2_OP_ALLOC_VECTORS;
2164 	xn_params.send_buf.iov_base = &ac;
2165 	xn_params.send_buf.iov_len = sizeof(ac);
2166 	xn_params.recv_buf.iov_base = rcvd_vec;
2167 	xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
2168 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2169 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2170 	if (reply_sz < 0)
2171 		return reply_sz;
2172 
2173 	num_vchunks = le16_to_cpu(rcvd_vec->vchunks.num_vchunks);
2174 	size = struct_size(rcvd_vec, vchunks.vchunks, num_vchunks);
2175 	if (reply_sz < size)
2176 		return -EIO;
2177 
2178 	if (size > IDPF_CTLQ_MAX_BUF_LEN)
2179 		return -EINVAL;
2180 
2181 	kfree(adapter->req_vec_chunks);
2182 	adapter->req_vec_chunks = kmemdup(rcvd_vec, size, GFP_KERNEL);
2183 	if (!adapter->req_vec_chunks)
2184 		return -ENOMEM;
2185 
2186 	if (le16_to_cpu(adapter->req_vec_chunks->num_vectors) < num_vectors) {
2187 		kfree(adapter->req_vec_chunks);
2188 		adapter->req_vec_chunks = NULL;
2189 		return -EINVAL;
2190 	}
2191 
2192 	return 0;
2193 }
2194 
2195 /**
2196  * idpf_send_dealloc_vectors_msg - Send virtchnl de allocate vectors message
2197  * @adapter: Driver specific private structure
2198  *
2199  * Returns 0 on success, negative on failure.
2200  */
idpf_send_dealloc_vectors_msg(struct idpf_adapter * adapter)2201 int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter)
2202 {
2203 	struct virtchnl2_alloc_vectors *ac = adapter->req_vec_chunks;
2204 	struct virtchnl2_vector_chunks *vcs = &ac->vchunks;
2205 	struct idpf_vc_xn_params xn_params = {};
2206 	ssize_t reply_sz;
2207 	int buf_size;
2208 
2209 	buf_size = struct_size(vcs, vchunks, le16_to_cpu(vcs->num_vchunks));
2210 
2211 	xn_params.vc_op = VIRTCHNL2_OP_DEALLOC_VECTORS;
2212 	xn_params.send_buf.iov_base = vcs;
2213 	xn_params.send_buf.iov_len = buf_size;
2214 	xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
2215 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2216 	if (reply_sz < 0)
2217 		return reply_sz;
2218 
2219 	kfree(adapter->req_vec_chunks);
2220 	adapter->req_vec_chunks = NULL;
2221 
2222 	return 0;
2223 }
2224 
2225 /**
2226  * idpf_get_max_vfs - Get max number of vfs supported
2227  * @adapter: Driver specific private structure
2228  *
2229  * Returns max number of VFs
2230  */
idpf_get_max_vfs(struct idpf_adapter * adapter)2231 static int idpf_get_max_vfs(struct idpf_adapter *adapter)
2232 {
2233 	return le16_to_cpu(adapter->caps.max_sriov_vfs);
2234 }
2235 
2236 /**
2237  * idpf_send_set_sriov_vfs_msg - Send virtchnl set sriov vfs message
2238  * @adapter: Driver specific private structure
2239  * @num_vfs: number of virtual functions to be created
2240  *
2241  * Returns 0 on success, negative on failure.
2242  */
idpf_send_set_sriov_vfs_msg(struct idpf_adapter * adapter,u16 num_vfs)2243 int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs)
2244 {
2245 	struct virtchnl2_sriov_vfs_info svi = {};
2246 	struct idpf_vc_xn_params xn_params = {};
2247 	ssize_t reply_sz;
2248 
2249 	svi.num_vfs = cpu_to_le16(num_vfs);
2250 	xn_params.vc_op = VIRTCHNL2_OP_SET_SRIOV_VFS;
2251 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2252 	xn_params.send_buf.iov_base = &svi;
2253 	xn_params.send_buf.iov_len = sizeof(svi);
2254 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2255 
2256 	return reply_sz < 0 ? reply_sz : 0;
2257 }
2258 
2259 /**
2260  * idpf_send_get_stats_msg - Send virtchnl get statistics message
2261  * @vport: vport to get stats for
2262  *
2263  * Returns 0 on success, negative on failure.
2264  */
idpf_send_get_stats_msg(struct idpf_vport * vport)2265 int idpf_send_get_stats_msg(struct idpf_vport *vport)
2266 {
2267 	struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
2268 	struct rtnl_link_stats64 *netstats = &np->netstats;
2269 	struct virtchnl2_vport_stats stats_msg = {};
2270 	struct idpf_vc_xn_params xn_params = {};
2271 	ssize_t reply_sz;
2272 
2273 
2274 	/* Don't send get_stats message if the link is down */
2275 	if (np->state <= __IDPF_VPORT_DOWN)
2276 		return 0;
2277 
2278 	stats_msg.vport_id = cpu_to_le32(vport->vport_id);
2279 
2280 	xn_params.vc_op = VIRTCHNL2_OP_GET_STATS;
2281 	xn_params.send_buf.iov_base = &stats_msg;
2282 	xn_params.send_buf.iov_len = sizeof(stats_msg);
2283 	xn_params.recv_buf = xn_params.send_buf;
2284 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2285 
2286 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
2287 	if (reply_sz < 0)
2288 		return reply_sz;
2289 	if (reply_sz < sizeof(stats_msg))
2290 		return -EIO;
2291 
2292 	spin_lock_bh(&np->stats_lock);
2293 
2294 	netstats->rx_packets = le64_to_cpu(stats_msg.rx_unicast) +
2295 			       le64_to_cpu(stats_msg.rx_multicast) +
2296 			       le64_to_cpu(stats_msg.rx_broadcast);
2297 	netstats->tx_packets = le64_to_cpu(stats_msg.tx_unicast) +
2298 			       le64_to_cpu(stats_msg.tx_multicast) +
2299 			       le64_to_cpu(stats_msg.tx_broadcast);
2300 	netstats->rx_bytes = le64_to_cpu(stats_msg.rx_bytes);
2301 	netstats->tx_bytes = le64_to_cpu(stats_msg.tx_bytes);
2302 	netstats->rx_errors = le64_to_cpu(stats_msg.rx_errors);
2303 	netstats->tx_errors = le64_to_cpu(stats_msg.tx_errors);
2304 	netstats->rx_dropped = le64_to_cpu(stats_msg.rx_discards);
2305 	netstats->tx_dropped = le64_to_cpu(stats_msg.tx_discards);
2306 
2307 	vport->port_stats.vport_stats = stats_msg;
2308 
2309 	spin_unlock_bh(&np->stats_lock);
2310 
2311 	return 0;
2312 }
2313 
2314 /**
2315  * idpf_send_get_set_rss_lut_msg - Send virtchnl get or set rss lut message
2316  * @vport: virtual port data structure
2317  * @get: flag to set or get rss look up table
2318  *
2319  * Returns 0 on success, negative on failure.
2320  */
idpf_send_get_set_rss_lut_msg(struct idpf_vport * vport,bool get)2321 int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
2322 {
2323 	struct virtchnl2_rss_lut *recv_rl __free(kfree) = NULL;
2324 	struct virtchnl2_rss_lut *rl __free(kfree) = NULL;
2325 	struct idpf_vc_xn_params xn_params = {};
2326 	struct idpf_rss_data *rss_data;
2327 	int buf_size, lut_buf_size;
2328 	ssize_t reply_sz;
2329 	int i;
2330 
2331 	rss_data =
2332 		&vport->adapter->vport_config[vport->idx]->user_config.rss_data;
2333 	buf_size = struct_size(rl, lut, rss_data->rss_lut_size);
2334 	rl = kzalloc(buf_size, GFP_KERNEL);
2335 	if (!rl)
2336 		return -ENOMEM;
2337 
2338 	rl->vport_id = cpu_to_le32(vport->vport_id);
2339 
2340 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2341 	xn_params.send_buf.iov_base = rl;
2342 	xn_params.send_buf.iov_len = buf_size;
2343 
2344 	if (get) {
2345 		recv_rl = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2346 		if (!recv_rl)
2347 			return -ENOMEM;
2348 		xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_LUT;
2349 		xn_params.recv_buf.iov_base = recv_rl;
2350 		xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
2351 	} else {
2352 		rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size);
2353 		for (i = 0; i < rss_data->rss_lut_size; i++)
2354 			rl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]);
2355 
2356 		xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_LUT;
2357 	}
2358 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
2359 	if (reply_sz < 0)
2360 		return reply_sz;
2361 	if (!get)
2362 		return 0;
2363 	if (reply_sz < sizeof(struct virtchnl2_rss_lut))
2364 		return -EIO;
2365 
2366 	lut_buf_size = le16_to_cpu(recv_rl->lut_entries) * sizeof(u32);
2367 	if (reply_sz < lut_buf_size)
2368 		return -EIO;
2369 
2370 	/* size didn't change, we can reuse existing lut buf */
2371 	if (rss_data->rss_lut_size == le16_to_cpu(recv_rl->lut_entries))
2372 		goto do_memcpy;
2373 
2374 	rss_data->rss_lut_size = le16_to_cpu(recv_rl->lut_entries);
2375 	kfree(rss_data->rss_lut);
2376 
2377 	rss_data->rss_lut = kzalloc(lut_buf_size, GFP_KERNEL);
2378 	if (!rss_data->rss_lut) {
2379 		rss_data->rss_lut_size = 0;
2380 		return -ENOMEM;
2381 	}
2382 
2383 do_memcpy:
2384 	memcpy(rss_data->rss_lut, recv_rl->lut, rss_data->rss_lut_size);
2385 
2386 	return 0;
2387 }
2388 
2389 /**
2390  * idpf_send_get_set_rss_key_msg - Send virtchnl get or set rss key message
2391  * @vport: virtual port data structure
2392  * @get: flag to set or get rss look up table
2393  *
2394  * Returns 0 on success, negative on failure
2395  */
idpf_send_get_set_rss_key_msg(struct idpf_vport * vport,bool get)2396 int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get)
2397 {
2398 	struct virtchnl2_rss_key *recv_rk __free(kfree) = NULL;
2399 	struct virtchnl2_rss_key *rk __free(kfree) = NULL;
2400 	struct idpf_vc_xn_params xn_params = {};
2401 	struct idpf_rss_data *rss_data;
2402 	ssize_t reply_sz;
2403 	int i, buf_size;
2404 	u16 key_size;
2405 
2406 	rss_data =
2407 		&vport->adapter->vport_config[vport->idx]->user_config.rss_data;
2408 	buf_size = struct_size(rk, key_flex, rss_data->rss_key_size);
2409 	rk = kzalloc(buf_size, GFP_KERNEL);
2410 	if (!rk)
2411 		return -ENOMEM;
2412 
2413 	rk->vport_id = cpu_to_le32(vport->vport_id);
2414 	xn_params.send_buf.iov_base = rk;
2415 	xn_params.send_buf.iov_len = buf_size;
2416 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2417 	if (get) {
2418 		recv_rk = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2419 		if (!recv_rk)
2420 			return -ENOMEM;
2421 
2422 		xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_KEY;
2423 		xn_params.recv_buf.iov_base = recv_rk;
2424 		xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
2425 	} else {
2426 		rk->key_len = cpu_to_le16(rss_data->rss_key_size);
2427 		for (i = 0; i < rss_data->rss_key_size; i++)
2428 			rk->key_flex[i] = rss_data->rss_key[i];
2429 
2430 		xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_KEY;
2431 	}
2432 
2433 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
2434 	if (reply_sz < 0)
2435 		return reply_sz;
2436 	if (!get)
2437 		return 0;
2438 	if (reply_sz < sizeof(struct virtchnl2_rss_key))
2439 		return -EIO;
2440 
2441 	key_size = min_t(u16, NETDEV_RSS_KEY_LEN,
2442 			 le16_to_cpu(recv_rk->key_len));
2443 	if (reply_sz < key_size)
2444 		return -EIO;
2445 
2446 	/* key len didn't change, reuse existing buf */
2447 	if (rss_data->rss_key_size == key_size)
2448 		goto do_memcpy;
2449 
2450 	rss_data->rss_key_size = key_size;
2451 	kfree(rss_data->rss_key);
2452 	rss_data->rss_key = kzalloc(key_size, GFP_KERNEL);
2453 	if (!rss_data->rss_key) {
2454 		rss_data->rss_key_size = 0;
2455 		return -ENOMEM;
2456 	}
2457 
2458 do_memcpy:
2459 	memcpy(rss_data->rss_key, recv_rk->key_flex, rss_data->rss_key_size);
2460 
2461 	return 0;
2462 }
2463 
2464 /**
2465  * idpf_fill_ptype_lookup - Fill L3 specific fields in ptype lookup table
2466  * @ptype: ptype lookup table
2467  * @pstate: state machine for ptype lookup table
2468  * @ipv4: ipv4 or ipv6
2469  * @frag: fragmentation allowed
2470  *
2471  */
idpf_fill_ptype_lookup(struct libeth_rx_pt * ptype,struct idpf_ptype_state * pstate,bool ipv4,bool frag)2472 static void idpf_fill_ptype_lookup(struct libeth_rx_pt *ptype,
2473 				   struct idpf_ptype_state *pstate,
2474 				   bool ipv4, bool frag)
2475 {
2476 	if (!pstate->outer_ip || !pstate->outer_frag) {
2477 		pstate->outer_ip = true;
2478 
2479 		if (ipv4)
2480 			ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV4;
2481 		else
2482 			ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV6;
2483 
2484 		if (frag) {
2485 			ptype->outer_frag = LIBETH_RX_PT_FRAG;
2486 			pstate->outer_frag = true;
2487 		}
2488 	} else {
2489 		ptype->tunnel_type = LIBETH_RX_PT_TUNNEL_IP_IP;
2490 		pstate->tunnel_state = IDPF_PTYPE_TUNNEL_IP;
2491 
2492 		if (ipv4)
2493 			ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV4;
2494 		else
2495 			ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV6;
2496 
2497 		if (frag)
2498 			ptype->tunnel_end_frag = LIBETH_RX_PT_FRAG;
2499 	}
2500 }
2501 
idpf_finalize_ptype_lookup(struct libeth_rx_pt * ptype)2502 static void idpf_finalize_ptype_lookup(struct libeth_rx_pt *ptype)
2503 {
2504 	if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 &&
2505 	    ptype->inner_prot)
2506 		ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L4;
2507 	else if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 &&
2508 		 ptype->outer_ip)
2509 		ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L3;
2510 	else if (ptype->outer_ip == LIBETH_RX_PT_OUTER_L2)
2511 		ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L2;
2512 	else
2513 		ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_NONE;
2514 
2515 	libeth_rx_pt_gen_hash_type(ptype);
2516 }
2517 
2518 /**
2519  * idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info
2520  * @vport: virtual port data structure
2521  *
2522  * Returns 0 on success, negative on failure.
2523  */
idpf_send_get_rx_ptype_msg(struct idpf_vport * vport)2524 int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
2525 {
2526 	struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL;
2527 	struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL;
2528 	struct libeth_rx_pt *ptype_lkup __free(kfree) = NULL;
2529 	int max_ptype, ptypes_recvd = 0, ptype_offset;
2530 	struct idpf_adapter *adapter = vport->adapter;
2531 	struct idpf_vc_xn_params xn_params = {};
2532 	u16 next_ptype_id = 0;
2533 	ssize_t reply_sz;
2534 	int i, j, k;
2535 
2536 	if (vport->rx_ptype_lkup)
2537 		return 0;
2538 
2539 	if (idpf_is_queue_model_split(vport->rxq_model))
2540 		max_ptype = IDPF_RX_MAX_PTYPE;
2541 	else
2542 		max_ptype = IDPF_RX_MAX_BASE_PTYPE;
2543 
2544 	ptype_lkup = kcalloc(max_ptype, sizeof(*ptype_lkup), GFP_KERNEL);
2545 	if (!ptype_lkup)
2546 		return -ENOMEM;
2547 
2548 	get_ptype_info = kzalloc(sizeof(*get_ptype_info), GFP_KERNEL);
2549 	if (!get_ptype_info)
2550 		return -ENOMEM;
2551 
2552 	ptype_info = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2553 	if (!ptype_info)
2554 		return -ENOMEM;
2555 
2556 	xn_params.vc_op = VIRTCHNL2_OP_GET_PTYPE_INFO;
2557 	xn_params.send_buf.iov_base = get_ptype_info;
2558 	xn_params.send_buf.iov_len = sizeof(*get_ptype_info);
2559 	xn_params.recv_buf.iov_base = ptype_info;
2560 	xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
2561 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2562 
2563 	while (next_ptype_id < max_ptype) {
2564 		get_ptype_info->start_ptype_id = cpu_to_le16(next_ptype_id);
2565 
2566 		if ((next_ptype_id + IDPF_RX_MAX_PTYPES_PER_BUF) > max_ptype)
2567 			get_ptype_info->num_ptypes =
2568 				cpu_to_le16(max_ptype - next_ptype_id);
2569 		else
2570 			get_ptype_info->num_ptypes =
2571 				cpu_to_le16(IDPF_RX_MAX_PTYPES_PER_BUF);
2572 
2573 		reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2574 		if (reply_sz < 0)
2575 			return reply_sz;
2576 
2577 		ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes);
2578 		if (ptypes_recvd > max_ptype)
2579 			return -EINVAL;
2580 
2581 		next_ptype_id = le16_to_cpu(get_ptype_info->start_ptype_id) +
2582 				le16_to_cpu(get_ptype_info->num_ptypes);
2583 
2584 		ptype_offset = IDPF_RX_PTYPE_HDR_SZ;
2585 
2586 		for (i = 0; i < le16_to_cpu(ptype_info->num_ptypes); i++) {
2587 			struct idpf_ptype_state pstate = { };
2588 			struct virtchnl2_ptype *ptype;
2589 			u16 id;
2590 
2591 			ptype = (struct virtchnl2_ptype *)
2592 					((u8 *)ptype_info + ptype_offset);
2593 
2594 			ptype_offset += IDPF_GET_PTYPE_SIZE(ptype);
2595 			if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN)
2596 				return -EINVAL;
2597 
2598 			/* 0xFFFF indicates end of ptypes */
2599 			if (le16_to_cpu(ptype->ptype_id_10) ==
2600 							IDPF_INVALID_PTYPE_ID)
2601 				goto out;
2602 
2603 			if (idpf_is_queue_model_split(vport->rxq_model))
2604 				k = le16_to_cpu(ptype->ptype_id_10);
2605 			else
2606 				k = ptype->ptype_id_8;
2607 
2608 			for (j = 0; j < ptype->proto_id_count; j++) {
2609 				id = le16_to_cpu(ptype->proto_id[j]);
2610 				switch (id) {
2611 				case VIRTCHNL2_PROTO_HDR_GRE:
2612 					if (pstate.tunnel_state ==
2613 							IDPF_PTYPE_TUNNEL_IP) {
2614 						ptype_lkup[k].tunnel_type =
2615 						LIBETH_RX_PT_TUNNEL_IP_GRENAT;
2616 						pstate.tunnel_state |=
2617 						IDPF_PTYPE_TUNNEL_IP_GRENAT;
2618 					}
2619 					break;
2620 				case VIRTCHNL2_PROTO_HDR_MAC:
2621 					ptype_lkup[k].outer_ip =
2622 						LIBETH_RX_PT_OUTER_L2;
2623 					if (pstate.tunnel_state ==
2624 							IDPF_TUN_IP_GRE) {
2625 						ptype_lkup[k].tunnel_type =
2626 						LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC;
2627 						pstate.tunnel_state |=
2628 						IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC;
2629 					}
2630 					break;
2631 				case VIRTCHNL2_PROTO_HDR_IPV4:
2632 					idpf_fill_ptype_lookup(&ptype_lkup[k],
2633 							       &pstate, true,
2634 							       false);
2635 					break;
2636 				case VIRTCHNL2_PROTO_HDR_IPV6:
2637 					idpf_fill_ptype_lookup(&ptype_lkup[k],
2638 							       &pstate, false,
2639 							       false);
2640 					break;
2641 				case VIRTCHNL2_PROTO_HDR_IPV4_FRAG:
2642 					idpf_fill_ptype_lookup(&ptype_lkup[k],
2643 							       &pstate, true,
2644 							       true);
2645 					break;
2646 				case VIRTCHNL2_PROTO_HDR_IPV6_FRAG:
2647 					idpf_fill_ptype_lookup(&ptype_lkup[k],
2648 							       &pstate, false,
2649 							       true);
2650 					break;
2651 				case VIRTCHNL2_PROTO_HDR_UDP:
2652 					ptype_lkup[k].inner_prot =
2653 					LIBETH_RX_PT_INNER_UDP;
2654 					break;
2655 				case VIRTCHNL2_PROTO_HDR_TCP:
2656 					ptype_lkup[k].inner_prot =
2657 					LIBETH_RX_PT_INNER_TCP;
2658 					break;
2659 				case VIRTCHNL2_PROTO_HDR_SCTP:
2660 					ptype_lkup[k].inner_prot =
2661 					LIBETH_RX_PT_INNER_SCTP;
2662 					break;
2663 				case VIRTCHNL2_PROTO_HDR_ICMP:
2664 					ptype_lkup[k].inner_prot =
2665 					LIBETH_RX_PT_INNER_ICMP;
2666 					break;
2667 				case VIRTCHNL2_PROTO_HDR_PAY:
2668 					ptype_lkup[k].payload_layer =
2669 						LIBETH_RX_PT_PAYLOAD_L2;
2670 					break;
2671 				case VIRTCHNL2_PROTO_HDR_ICMPV6:
2672 				case VIRTCHNL2_PROTO_HDR_IPV6_EH:
2673 				case VIRTCHNL2_PROTO_HDR_PRE_MAC:
2674 				case VIRTCHNL2_PROTO_HDR_POST_MAC:
2675 				case VIRTCHNL2_PROTO_HDR_ETHERTYPE:
2676 				case VIRTCHNL2_PROTO_HDR_SVLAN:
2677 				case VIRTCHNL2_PROTO_HDR_CVLAN:
2678 				case VIRTCHNL2_PROTO_HDR_MPLS:
2679 				case VIRTCHNL2_PROTO_HDR_MMPLS:
2680 				case VIRTCHNL2_PROTO_HDR_PTP:
2681 				case VIRTCHNL2_PROTO_HDR_CTRL:
2682 				case VIRTCHNL2_PROTO_HDR_LLDP:
2683 				case VIRTCHNL2_PROTO_HDR_ARP:
2684 				case VIRTCHNL2_PROTO_HDR_ECP:
2685 				case VIRTCHNL2_PROTO_HDR_EAPOL:
2686 				case VIRTCHNL2_PROTO_HDR_PPPOD:
2687 				case VIRTCHNL2_PROTO_HDR_PPPOE:
2688 				case VIRTCHNL2_PROTO_HDR_IGMP:
2689 				case VIRTCHNL2_PROTO_HDR_AH:
2690 				case VIRTCHNL2_PROTO_HDR_ESP:
2691 				case VIRTCHNL2_PROTO_HDR_IKE:
2692 				case VIRTCHNL2_PROTO_HDR_NATT_KEEP:
2693 				case VIRTCHNL2_PROTO_HDR_L2TPV2:
2694 				case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL:
2695 				case VIRTCHNL2_PROTO_HDR_L2TPV3:
2696 				case VIRTCHNL2_PROTO_HDR_GTP:
2697 				case VIRTCHNL2_PROTO_HDR_GTP_EH:
2698 				case VIRTCHNL2_PROTO_HDR_GTPCV2:
2699 				case VIRTCHNL2_PROTO_HDR_GTPC_TEID:
2700 				case VIRTCHNL2_PROTO_HDR_GTPU:
2701 				case VIRTCHNL2_PROTO_HDR_GTPU_UL:
2702 				case VIRTCHNL2_PROTO_HDR_GTPU_DL:
2703 				case VIRTCHNL2_PROTO_HDR_ECPRI:
2704 				case VIRTCHNL2_PROTO_HDR_VRRP:
2705 				case VIRTCHNL2_PROTO_HDR_OSPF:
2706 				case VIRTCHNL2_PROTO_HDR_TUN:
2707 				case VIRTCHNL2_PROTO_HDR_NVGRE:
2708 				case VIRTCHNL2_PROTO_HDR_VXLAN:
2709 				case VIRTCHNL2_PROTO_HDR_VXLAN_GPE:
2710 				case VIRTCHNL2_PROTO_HDR_GENEVE:
2711 				case VIRTCHNL2_PROTO_HDR_NSH:
2712 				case VIRTCHNL2_PROTO_HDR_QUIC:
2713 				case VIRTCHNL2_PROTO_HDR_PFCP:
2714 				case VIRTCHNL2_PROTO_HDR_PFCP_NODE:
2715 				case VIRTCHNL2_PROTO_HDR_PFCP_SESSION:
2716 				case VIRTCHNL2_PROTO_HDR_RTP:
2717 				case VIRTCHNL2_PROTO_HDR_NO_PROTO:
2718 					break;
2719 				default:
2720 					break;
2721 				}
2722 			}
2723 
2724 			idpf_finalize_ptype_lookup(&ptype_lkup[k]);
2725 		}
2726 	}
2727 
2728 out:
2729 	vport->rx_ptype_lkup = no_free_ptr(ptype_lkup);
2730 
2731 	return 0;
2732 }
2733 
2734 /**
2735  * idpf_send_ena_dis_loopback_msg - Send virtchnl enable/disable loopback
2736  *				    message
2737  * @vport: virtual port data structure
2738  *
2739  * Returns 0 on success, negative on failure.
2740  */
idpf_send_ena_dis_loopback_msg(struct idpf_vport * vport)2741 int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport)
2742 {
2743 	struct idpf_vc_xn_params xn_params = {};
2744 	struct virtchnl2_loopback loopback;
2745 	ssize_t reply_sz;
2746 
2747 	loopback.vport_id = cpu_to_le32(vport->vport_id);
2748 	loopback.enable = idpf_is_feature_ena(vport, NETIF_F_LOOPBACK);
2749 
2750 	xn_params.vc_op = VIRTCHNL2_OP_LOOPBACK;
2751 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2752 	xn_params.send_buf.iov_base = &loopback;
2753 	xn_params.send_buf.iov_len = sizeof(loopback);
2754 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
2755 
2756 	return reply_sz < 0 ? reply_sz : 0;
2757 }
2758 
2759 /**
2760  * idpf_find_ctlq - Given a type and id, find ctlq info
2761  * @hw: hardware struct
2762  * @type: type of ctrlq to find
2763  * @id: ctlq id to find
2764  *
2765  * Returns pointer to found ctlq info struct, NULL otherwise.
2766  */
idpf_find_ctlq(struct idpf_hw * hw,enum idpf_ctlq_type type,int id)2767 static struct idpf_ctlq_info *idpf_find_ctlq(struct idpf_hw *hw,
2768 					     enum idpf_ctlq_type type, int id)
2769 {
2770 	struct idpf_ctlq_info *cq, *tmp;
2771 
2772 	list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list)
2773 		if (cq->q_id == id && cq->cq_type == type)
2774 			return cq;
2775 
2776 	return NULL;
2777 }
2778 
2779 /**
2780  * idpf_init_dflt_mbx - Setup default mailbox parameters and make request
2781  * @adapter: adapter info struct
2782  *
2783  * Returns 0 on success, negative otherwise
2784  */
idpf_init_dflt_mbx(struct idpf_adapter * adapter)2785 int idpf_init_dflt_mbx(struct idpf_adapter *adapter)
2786 {
2787 	struct idpf_ctlq_create_info ctlq_info[] = {
2788 		{
2789 			.type = IDPF_CTLQ_TYPE_MAILBOX_TX,
2790 			.id = IDPF_DFLT_MBX_ID,
2791 			.len = IDPF_DFLT_MBX_Q_LEN,
2792 			.buf_size = IDPF_CTLQ_MAX_BUF_LEN
2793 		},
2794 		{
2795 			.type = IDPF_CTLQ_TYPE_MAILBOX_RX,
2796 			.id = IDPF_DFLT_MBX_ID,
2797 			.len = IDPF_DFLT_MBX_Q_LEN,
2798 			.buf_size = IDPF_CTLQ_MAX_BUF_LEN
2799 		}
2800 	};
2801 	struct idpf_hw *hw = &adapter->hw;
2802 	int err;
2803 
2804 	adapter->dev_ops.reg_ops.ctlq_reg_init(ctlq_info);
2805 
2806 	err = idpf_ctlq_init(hw, IDPF_NUM_DFLT_MBX_Q, ctlq_info);
2807 	if (err)
2808 		return err;
2809 
2810 	hw->asq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_TX,
2811 				 IDPF_DFLT_MBX_ID);
2812 	hw->arq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_RX,
2813 				 IDPF_DFLT_MBX_ID);
2814 
2815 	if (!hw->asq || !hw->arq) {
2816 		idpf_ctlq_deinit(hw);
2817 
2818 		return -ENOENT;
2819 	}
2820 
2821 	adapter->state = __IDPF_VER_CHECK;
2822 
2823 	return 0;
2824 }
2825 
2826 /**
2827  * idpf_deinit_dflt_mbx - Free up ctlqs setup
2828  * @adapter: Driver specific private data structure
2829  */
idpf_deinit_dflt_mbx(struct idpf_adapter * adapter)2830 void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter)
2831 {
2832 	if (adapter->hw.arq && adapter->hw.asq) {
2833 		idpf_mb_clean(adapter);
2834 		idpf_ctlq_deinit(&adapter->hw);
2835 	}
2836 	adapter->hw.arq = NULL;
2837 	adapter->hw.asq = NULL;
2838 }
2839 
2840 /**
2841  * idpf_vport_params_buf_rel - Release memory for MailBox resources
2842  * @adapter: Driver specific private data structure
2843  *
2844  * Will release memory to hold the vport parameters received on MailBox
2845  */
idpf_vport_params_buf_rel(struct idpf_adapter * adapter)2846 static void idpf_vport_params_buf_rel(struct idpf_adapter *adapter)
2847 {
2848 	kfree(adapter->vport_params_recvd);
2849 	adapter->vport_params_recvd = NULL;
2850 	kfree(adapter->vport_params_reqd);
2851 	adapter->vport_params_reqd = NULL;
2852 	kfree(adapter->vport_ids);
2853 	adapter->vport_ids = NULL;
2854 }
2855 
2856 /**
2857  * idpf_vport_params_buf_alloc - Allocate memory for MailBox resources
2858  * @adapter: Driver specific private data structure
2859  *
2860  * Will alloc memory to hold the vport parameters received on MailBox
2861  */
idpf_vport_params_buf_alloc(struct idpf_adapter * adapter)2862 static int idpf_vport_params_buf_alloc(struct idpf_adapter *adapter)
2863 {
2864 	u16 num_max_vports = idpf_get_max_vports(adapter);
2865 
2866 	adapter->vport_params_reqd = kcalloc(num_max_vports,
2867 					     sizeof(*adapter->vport_params_reqd),
2868 					     GFP_KERNEL);
2869 	if (!adapter->vport_params_reqd)
2870 		return -ENOMEM;
2871 
2872 	adapter->vport_params_recvd = kcalloc(num_max_vports,
2873 					      sizeof(*adapter->vport_params_recvd),
2874 					      GFP_KERNEL);
2875 	if (!adapter->vport_params_recvd)
2876 		goto err_mem;
2877 
2878 	adapter->vport_ids = kcalloc(num_max_vports, sizeof(u32), GFP_KERNEL);
2879 	if (!adapter->vport_ids)
2880 		goto err_mem;
2881 
2882 	if (adapter->vport_config)
2883 		return 0;
2884 
2885 	adapter->vport_config = kcalloc(num_max_vports,
2886 					sizeof(*adapter->vport_config),
2887 					GFP_KERNEL);
2888 	if (!adapter->vport_config)
2889 		goto err_mem;
2890 
2891 	return 0;
2892 
2893 err_mem:
2894 	idpf_vport_params_buf_rel(adapter);
2895 
2896 	return -ENOMEM;
2897 }
2898 
2899 /**
2900  * idpf_vc_core_init - Initialize state machine and get driver specific
2901  * resources
2902  * @adapter: Driver specific private structure
2903  *
2904  * This function will initialize the state machine and request all necessary
2905  * resources required by the device driver. Once the state machine is
2906  * initialized, allocate memory to store vport specific information and also
2907  * requests required interrupts.
2908  *
2909  * Returns 0 on success, -EAGAIN function will get called again,
2910  * otherwise negative on failure.
2911  */
idpf_vc_core_init(struct idpf_adapter * adapter)2912 int idpf_vc_core_init(struct idpf_adapter *adapter)
2913 {
2914 	int task_delay = 30;
2915 	u16 num_max_vports;
2916 	int err = 0;
2917 
2918 	if (!adapter->vcxn_mngr) {
2919 		adapter->vcxn_mngr = kzalloc(sizeof(*adapter->vcxn_mngr), GFP_KERNEL);
2920 		if (!adapter->vcxn_mngr) {
2921 			err = -ENOMEM;
2922 			goto init_failed;
2923 		}
2924 	}
2925 	idpf_vc_xn_init(adapter->vcxn_mngr);
2926 
2927 	while (adapter->state != __IDPF_INIT_SW) {
2928 		switch (adapter->state) {
2929 		case __IDPF_VER_CHECK:
2930 			err = idpf_send_ver_msg(adapter);
2931 			switch (err) {
2932 			case 0:
2933 				/* success, move state machine forward */
2934 				adapter->state = __IDPF_GET_CAPS;
2935 				fallthrough;
2936 			case -EAGAIN:
2937 				goto restart;
2938 			default:
2939 				/* Something bad happened, try again but only a
2940 				 * few times.
2941 				 */
2942 				goto init_failed;
2943 			}
2944 		case __IDPF_GET_CAPS:
2945 			err = idpf_send_get_caps_msg(adapter);
2946 			if (err)
2947 				goto init_failed;
2948 			adapter->state = __IDPF_INIT_SW;
2949 			break;
2950 		default:
2951 			dev_err(&adapter->pdev->dev, "Device is in bad state: %d\n",
2952 				adapter->state);
2953 			err = -EINVAL;
2954 			goto init_failed;
2955 		}
2956 		break;
2957 restart:
2958 		/* Give enough time before proceeding further with
2959 		 * state machine
2960 		 */
2961 		msleep(task_delay);
2962 	}
2963 
2964 	pci_sriov_set_totalvfs(adapter->pdev, idpf_get_max_vfs(adapter));
2965 	num_max_vports = idpf_get_max_vports(adapter);
2966 	adapter->max_vports = num_max_vports;
2967 	adapter->vports = kcalloc(num_max_vports, sizeof(*adapter->vports),
2968 				  GFP_KERNEL);
2969 	if (!adapter->vports)
2970 		return -ENOMEM;
2971 
2972 	if (!adapter->netdevs) {
2973 		adapter->netdevs = kcalloc(num_max_vports,
2974 					   sizeof(struct net_device *),
2975 					   GFP_KERNEL);
2976 		if (!adapter->netdevs) {
2977 			err = -ENOMEM;
2978 			goto err_netdev_alloc;
2979 		}
2980 	}
2981 
2982 	err = idpf_vport_params_buf_alloc(adapter);
2983 	if (err) {
2984 		dev_err(&adapter->pdev->dev, "Failed to alloc vport params buffer: %d\n",
2985 			err);
2986 		goto err_netdev_alloc;
2987 	}
2988 
2989 	/* Start the mailbox task before requesting vectors. This will ensure
2990 	 * vector information response from mailbox is handled
2991 	 */
2992 	queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
2993 
2994 	queue_delayed_work(adapter->serv_wq, &adapter->serv_task,
2995 			   msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
2996 
2997 	err = idpf_intr_req(adapter);
2998 	if (err) {
2999 		dev_err(&adapter->pdev->dev, "failed to enable interrupt vectors: %d\n",
3000 			err);
3001 		goto err_intr_req;
3002 	}
3003 
3004 	err = idpf_ptp_init(adapter);
3005 	if (err)
3006 		pci_err(adapter->pdev, "PTP init failed, err=%pe\n",
3007 			ERR_PTR(err));
3008 
3009 	idpf_init_avail_queues(adapter);
3010 
3011 	/* Skew the delay for init tasks for each function based on fn number
3012 	 * to prevent every function from making the same call simultaneously.
3013 	 */
3014 	queue_delayed_work(adapter->init_wq, &adapter->init_task,
3015 			   msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
3016 
3017 	set_bit(IDPF_VC_CORE_INIT, adapter->flags);
3018 
3019 	return 0;
3020 
3021 err_intr_req:
3022 	cancel_delayed_work_sync(&adapter->serv_task);
3023 	cancel_delayed_work_sync(&adapter->mbx_task);
3024 	idpf_vport_params_buf_rel(adapter);
3025 err_netdev_alloc:
3026 	kfree(adapter->vports);
3027 	adapter->vports = NULL;
3028 	return err;
3029 
3030 init_failed:
3031 	/* Don't retry if we're trying to go down, just bail. */
3032 	if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
3033 		return err;
3034 
3035 	if (++adapter->mb_wait_count > IDPF_MB_MAX_ERR) {
3036 		dev_err(&adapter->pdev->dev, "Failed to establish mailbox communications with hardware\n");
3037 
3038 		return -EFAULT;
3039 	}
3040 	/* If it reached here, it is possible that mailbox queue initialization
3041 	 * register writes might not have taken effect. Retry to initialize
3042 	 * the mailbox again
3043 	 */
3044 	adapter->state = __IDPF_VER_CHECK;
3045 	if (adapter->vcxn_mngr)
3046 		idpf_vc_xn_shutdown(adapter->vcxn_mngr);
3047 	set_bit(IDPF_HR_DRV_LOAD, adapter->flags);
3048 	queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task,
3049 			   msecs_to_jiffies(task_delay));
3050 
3051 	return -EAGAIN;
3052 }
3053 
3054 /**
3055  * idpf_vc_core_deinit - Device deinit routine
3056  * @adapter: Driver specific private structure
3057  *
3058  */
idpf_vc_core_deinit(struct idpf_adapter * adapter)3059 void idpf_vc_core_deinit(struct idpf_adapter *adapter)
3060 {
3061 	bool remove_in_prog;
3062 
3063 	if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags))
3064 		return;
3065 
3066 	/* Avoid transaction timeouts when called during reset */
3067 	remove_in_prog = test_bit(IDPF_REMOVE_IN_PROG, adapter->flags);
3068 	if (!remove_in_prog)
3069 		idpf_vc_xn_shutdown(adapter->vcxn_mngr);
3070 
3071 	idpf_ptp_release(adapter);
3072 	idpf_deinit_task(adapter);
3073 	idpf_intr_rel(adapter);
3074 
3075 	if (remove_in_prog)
3076 		idpf_vc_xn_shutdown(adapter->vcxn_mngr);
3077 
3078 	cancel_delayed_work_sync(&adapter->serv_task);
3079 	cancel_delayed_work_sync(&adapter->mbx_task);
3080 
3081 	idpf_vport_params_buf_rel(adapter);
3082 
3083 	kfree(adapter->vports);
3084 	adapter->vports = NULL;
3085 
3086 	clear_bit(IDPF_VC_CORE_INIT, adapter->flags);
3087 }
3088 
3089 /**
3090  * idpf_vport_alloc_vec_indexes - Get relative vector indexes
3091  * @vport: virtual port data struct
3092  *
3093  * This function requests the vector information required for the vport and
3094  * stores the vector indexes received from the 'global vector distribution'
3095  * in the vport's queue vectors array.
3096  *
3097  * Return 0 on success, error on failure
3098  */
idpf_vport_alloc_vec_indexes(struct idpf_vport * vport)3099 int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport)
3100 {
3101 	struct idpf_vector_info vec_info;
3102 	int num_alloc_vecs;
3103 
3104 	vec_info.num_curr_vecs = vport->num_q_vectors;
3105 	vec_info.num_req_vecs = max(vport->num_txq, vport->num_rxq);
3106 	vec_info.default_vport = vport->default_vport;
3107 	vec_info.index = vport->idx;
3108 
3109 	num_alloc_vecs = idpf_req_rel_vector_indexes(vport->adapter,
3110 						     vport->q_vector_idxs,
3111 						     &vec_info);
3112 	if (num_alloc_vecs <= 0) {
3113 		dev_err(&vport->adapter->pdev->dev, "Vector distribution failed: %d\n",
3114 			num_alloc_vecs);
3115 		return -EINVAL;
3116 	}
3117 
3118 	vport->num_q_vectors = num_alloc_vecs;
3119 
3120 	return 0;
3121 }
3122 
3123 /**
3124  * idpf_vport_init - Initialize virtual port
3125  * @vport: virtual port to be initialized
3126  * @max_q: vport max queue info
3127  *
3128  * Will initialize vport with the info received through MB earlier
3129  */
idpf_vport_init(struct idpf_vport * vport,struct idpf_vport_max_q * max_q)3130 void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
3131 {
3132 	struct idpf_adapter *adapter = vport->adapter;
3133 	struct virtchnl2_create_vport *vport_msg;
3134 	struct idpf_vport_config *vport_config;
3135 	u16 tx_itr[] = {2, 8, 64, 128, 256};
3136 	u16 rx_itr[] = {2, 8, 32, 96, 128};
3137 	struct idpf_rss_data *rss_data;
3138 	u16 idx = vport->idx;
3139 	int err;
3140 
3141 	vport_config = adapter->vport_config[idx];
3142 	rss_data = &vport_config->user_config.rss_data;
3143 	vport_msg = adapter->vport_params_recvd[idx];
3144 
3145 	vport_config->max_q.max_txq = max_q->max_txq;
3146 	vport_config->max_q.max_rxq = max_q->max_rxq;
3147 	vport_config->max_q.max_complq = max_q->max_complq;
3148 	vport_config->max_q.max_bufq = max_q->max_bufq;
3149 
3150 	vport->txq_model = le16_to_cpu(vport_msg->txq_model);
3151 	vport->rxq_model = le16_to_cpu(vport_msg->rxq_model);
3152 	vport->vport_type = le16_to_cpu(vport_msg->vport_type);
3153 	vport->vport_id = le32_to_cpu(vport_msg->vport_id);
3154 
3155 	rss_data->rss_key_size = min_t(u16, NETDEV_RSS_KEY_LEN,
3156 				       le16_to_cpu(vport_msg->rss_key_size));
3157 	rss_data->rss_lut_size = le16_to_cpu(vport_msg->rss_lut_size);
3158 
3159 	ether_addr_copy(vport->default_mac_addr, vport_msg->default_mac_addr);
3160 	vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - LIBETH_RX_LL_LEN;
3161 
3162 	/* Initialize Tx and Rx profiles for Dynamic Interrupt Moderation */
3163 	memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS);
3164 	memcpy(vport->tx_itr_profile, tx_itr, IDPF_DIM_PROFILE_SLOTS);
3165 
3166 	idpf_vport_set_hsplit(vport, ETHTOOL_TCP_DATA_SPLIT_ENABLED);
3167 
3168 	idpf_vport_init_num_qs(vport, vport_msg);
3169 	idpf_vport_calc_num_q_desc(vport);
3170 	idpf_vport_calc_num_q_groups(vport);
3171 	idpf_vport_alloc_vec_indexes(vport);
3172 
3173 	vport->crc_enable = adapter->crc_enable;
3174 
3175 	if (!(vport_msg->vport_flags &
3176 	      cpu_to_le16(VIRTCHNL2_VPORT_UPLINK_PORT)))
3177 		return;
3178 
3179 	err = idpf_ptp_get_vport_tstamps_caps(vport);
3180 	if (err) {
3181 		pci_dbg(vport->adapter->pdev, "Tx timestamping not supported\n");
3182 		return;
3183 	}
3184 
3185 	INIT_WORK(&vport->tstamp_task, idpf_tstamp_task);
3186 }
3187 
3188 /**
3189  * idpf_get_vec_ids - Initialize vector id from Mailbox parameters
3190  * @adapter: adapter structure to get the mailbox vector id
3191  * @vecids: Array of vector ids
3192  * @num_vecids: number of vector ids
3193  * @chunks: vector ids received over mailbox
3194  *
3195  * Will initialize the mailbox vector id which is received from the
3196  * get capabilities and data queue vector ids with ids received as
3197  * mailbox parameters.
3198  * Returns number of ids filled
3199  */
idpf_get_vec_ids(struct idpf_adapter * adapter,u16 * vecids,int num_vecids,struct virtchnl2_vector_chunks * chunks)3200 int idpf_get_vec_ids(struct idpf_adapter *adapter,
3201 		     u16 *vecids, int num_vecids,
3202 		     struct virtchnl2_vector_chunks *chunks)
3203 {
3204 	u16 num_chunks = le16_to_cpu(chunks->num_vchunks);
3205 	int num_vecid_filled = 0;
3206 	int i, j;
3207 
3208 	vecids[num_vecid_filled] = adapter->mb_vector.v_idx;
3209 	num_vecid_filled++;
3210 
3211 	for (j = 0; j < num_chunks; j++) {
3212 		struct virtchnl2_vector_chunk *chunk;
3213 		u16 start_vecid, num_vec;
3214 
3215 		chunk = &chunks->vchunks[j];
3216 		num_vec = le16_to_cpu(chunk->num_vectors);
3217 		start_vecid = le16_to_cpu(chunk->start_vector_id);
3218 
3219 		for (i = 0; i < num_vec; i++) {
3220 			if ((num_vecid_filled + i) < num_vecids) {
3221 				vecids[num_vecid_filled + i] = start_vecid;
3222 				start_vecid++;
3223 			} else {
3224 				break;
3225 			}
3226 		}
3227 		num_vecid_filled = num_vecid_filled + i;
3228 	}
3229 
3230 	return num_vecid_filled;
3231 }
3232 
3233 /**
3234  * idpf_vport_get_queue_ids - Initialize queue id from Mailbox parameters
3235  * @qids: Array of queue ids
3236  * @num_qids: number of queue ids
3237  * @q_type: queue model
3238  * @chunks: queue ids received over mailbox
3239  *
3240  * Will initialize all queue ids with ids received as mailbox parameters
3241  * Returns number of ids filled
3242  */
idpf_vport_get_queue_ids(u32 * qids,int num_qids,u16 q_type,struct virtchnl2_queue_reg_chunks * chunks)3243 static int idpf_vport_get_queue_ids(u32 *qids, int num_qids, u16 q_type,
3244 				    struct virtchnl2_queue_reg_chunks *chunks)
3245 {
3246 	u16 num_chunks = le16_to_cpu(chunks->num_chunks);
3247 	u32 num_q_id_filled = 0, i;
3248 	u32 start_q_id, num_q;
3249 
3250 	while (num_chunks--) {
3251 		struct virtchnl2_queue_reg_chunk *chunk;
3252 
3253 		chunk = &chunks->chunks[num_chunks];
3254 		if (le32_to_cpu(chunk->type) != q_type)
3255 			continue;
3256 
3257 		num_q = le32_to_cpu(chunk->num_queues);
3258 		start_q_id = le32_to_cpu(chunk->start_queue_id);
3259 
3260 		for (i = 0; i < num_q; i++) {
3261 			if ((num_q_id_filled + i) < num_qids) {
3262 				qids[num_q_id_filled + i] = start_q_id;
3263 				start_q_id++;
3264 			} else {
3265 				break;
3266 			}
3267 		}
3268 		num_q_id_filled = num_q_id_filled + i;
3269 	}
3270 
3271 	return num_q_id_filled;
3272 }
3273 
3274 /**
3275  * __idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
3276  * @vport: virtual port for which the queues ids are initialized
3277  * @qids: queue ids
3278  * @num_qids: number of queue ids
3279  * @q_type: type of queue
3280  *
3281  * Will initialize all queue ids with ids received as mailbox
3282  * parameters. Returns number of queue ids initialized.
3283  */
__idpf_vport_queue_ids_init(struct idpf_vport * vport,const u32 * qids,int num_qids,u32 q_type)3284 static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
3285 				       const u32 *qids,
3286 				       int num_qids,
3287 				       u32 q_type)
3288 {
3289 	int i, j, k = 0;
3290 
3291 	switch (q_type) {
3292 	case VIRTCHNL2_QUEUE_TYPE_TX:
3293 		for (i = 0; i < vport->num_txq_grp; i++) {
3294 			struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
3295 
3296 			for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++)
3297 				tx_qgrp->txqs[j]->q_id = qids[k];
3298 		}
3299 		break;
3300 	case VIRTCHNL2_QUEUE_TYPE_RX:
3301 		for (i = 0; i < vport->num_rxq_grp; i++) {
3302 			struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
3303 			u16 num_rxq;
3304 
3305 			if (idpf_is_queue_model_split(vport->rxq_model))
3306 				num_rxq = rx_qgrp->splitq.num_rxq_sets;
3307 			else
3308 				num_rxq = rx_qgrp->singleq.num_rxq;
3309 
3310 			for (j = 0; j < num_rxq && k < num_qids; j++, k++) {
3311 				struct idpf_rx_queue *q;
3312 
3313 				if (idpf_is_queue_model_split(vport->rxq_model))
3314 					q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
3315 				else
3316 					q = rx_qgrp->singleq.rxqs[j];
3317 				q->q_id = qids[k];
3318 			}
3319 		}
3320 		break;
3321 	case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
3322 		for (i = 0; i < vport->num_txq_grp && k < num_qids; i++, k++) {
3323 			struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
3324 
3325 			tx_qgrp->complq->q_id = qids[k];
3326 		}
3327 		break;
3328 	case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
3329 		for (i = 0; i < vport->num_rxq_grp; i++) {
3330 			struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
3331 			u8 num_bufqs = vport->num_bufqs_per_qgrp;
3332 
3333 			for (j = 0; j < num_bufqs && k < num_qids; j++, k++) {
3334 				struct idpf_buf_queue *q;
3335 
3336 				q = &rx_qgrp->splitq.bufq_sets[j].bufq;
3337 				q->q_id = qids[k];
3338 			}
3339 		}
3340 		break;
3341 	default:
3342 		break;
3343 	}
3344 
3345 	return k;
3346 }
3347 
3348 /**
3349  * idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
3350  * @vport: virtual port for which the queues ids are initialized
3351  *
3352  * Will initialize all queue ids with ids received as mailbox parameters.
3353  * Returns 0 on success, negative if all the queues are not initialized.
3354  */
idpf_vport_queue_ids_init(struct idpf_vport * vport)3355 int idpf_vport_queue_ids_init(struct idpf_vport *vport)
3356 {
3357 	struct virtchnl2_create_vport *vport_params;
3358 	struct virtchnl2_queue_reg_chunks *chunks;
3359 	struct idpf_vport_config *vport_config;
3360 	u16 vport_idx = vport->idx;
3361 	int num_ids, err = 0;
3362 	u16 q_type;
3363 	u32 *qids;
3364 
3365 	vport_config = vport->adapter->vport_config[vport_idx];
3366 	if (vport_config->req_qs_chunks) {
3367 		struct virtchnl2_add_queues *vc_aq =
3368 			(struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
3369 		chunks = &vc_aq->chunks;
3370 	} else {
3371 		vport_params = vport->adapter->vport_params_recvd[vport_idx];
3372 		chunks = &vport_params->chunks;
3373 	}
3374 
3375 	qids = kcalloc(IDPF_MAX_QIDS, sizeof(u32), GFP_KERNEL);
3376 	if (!qids)
3377 		return -ENOMEM;
3378 
3379 	num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS,
3380 					   VIRTCHNL2_QUEUE_TYPE_TX,
3381 					   chunks);
3382 	if (num_ids < vport->num_txq) {
3383 		err = -EINVAL;
3384 		goto mem_rel;
3385 	}
3386 	num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids,
3387 					      VIRTCHNL2_QUEUE_TYPE_TX);
3388 	if (num_ids < vport->num_txq) {
3389 		err = -EINVAL;
3390 		goto mem_rel;
3391 	}
3392 
3393 	num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS,
3394 					   VIRTCHNL2_QUEUE_TYPE_RX,
3395 					   chunks);
3396 	if (num_ids < vport->num_rxq) {
3397 		err = -EINVAL;
3398 		goto mem_rel;
3399 	}
3400 	num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids,
3401 					      VIRTCHNL2_QUEUE_TYPE_RX);
3402 	if (num_ids < vport->num_rxq) {
3403 		err = -EINVAL;
3404 		goto mem_rel;
3405 	}
3406 
3407 	if (!idpf_is_queue_model_split(vport->txq_model))
3408 		goto check_rxq;
3409 
3410 	q_type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
3411 	num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks);
3412 	if (num_ids < vport->num_complq) {
3413 		err = -EINVAL;
3414 		goto mem_rel;
3415 	}
3416 	num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type);
3417 	if (num_ids < vport->num_complq) {
3418 		err = -EINVAL;
3419 		goto mem_rel;
3420 	}
3421 
3422 check_rxq:
3423 	if (!idpf_is_queue_model_split(vport->rxq_model))
3424 		goto mem_rel;
3425 
3426 	q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
3427 	num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks);
3428 	if (num_ids < vport->num_bufq) {
3429 		err = -EINVAL;
3430 		goto mem_rel;
3431 	}
3432 	num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type);
3433 	if (num_ids < vport->num_bufq)
3434 		err = -EINVAL;
3435 
3436 mem_rel:
3437 	kfree(qids);
3438 
3439 	return err;
3440 }
3441 
3442 /**
3443  * idpf_vport_adjust_qs - Adjust to new requested queues
3444  * @vport: virtual port data struct
3445  *
3446  * Renegotiate queues.  Returns 0 on success, negative on failure.
3447  */
idpf_vport_adjust_qs(struct idpf_vport * vport)3448 int idpf_vport_adjust_qs(struct idpf_vport *vport)
3449 {
3450 	struct virtchnl2_create_vport vport_msg;
3451 	int err;
3452 
3453 	vport_msg.txq_model = cpu_to_le16(vport->txq_model);
3454 	vport_msg.rxq_model = cpu_to_le16(vport->rxq_model);
3455 	err = idpf_vport_calc_total_qs(vport->adapter, vport->idx, &vport_msg,
3456 				       NULL);
3457 	if (err)
3458 		return err;
3459 
3460 	idpf_vport_init_num_qs(vport, &vport_msg);
3461 	idpf_vport_calc_num_q_groups(vport);
3462 
3463 	return 0;
3464 }
3465 
3466 /**
3467  * idpf_is_capability_ena - Default implementation of capability checking
3468  * @adapter: Private data struct
3469  * @all: all or one flag
3470  * @field: caps field to check for flags
3471  * @flag: flag to check
3472  *
3473  * Return true if all capabilities are supported, false otherwise
3474  */
idpf_is_capability_ena(struct idpf_adapter * adapter,bool all,enum idpf_cap_field field,u64 flag)3475 bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
3476 			    enum idpf_cap_field field, u64 flag)
3477 {
3478 	u8 *caps = (u8 *)&adapter->caps;
3479 	u32 *cap_field;
3480 
3481 	if (!caps)
3482 		return false;
3483 
3484 	if (field == IDPF_BASE_CAPS)
3485 		return false;
3486 
3487 	cap_field = (u32 *)(caps + field);
3488 
3489 	if (all)
3490 		return (*cap_field & flag) == flag;
3491 	else
3492 		return !!(*cap_field & flag);
3493 }
3494 
3495 /**
3496  * idpf_get_vport_id: Get vport id
3497  * @vport: virtual port structure
3498  *
3499  * Return vport id from the adapter persistent data
3500  */
idpf_get_vport_id(struct idpf_vport * vport)3501 u32 idpf_get_vport_id(struct idpf_vport *vport)
3502 {
3503 	struct virtchnl2_create_vport *vport_msg;
3504 
3505 	vport_msg = vport->adapter->vport_params_recvd[vport->idx];
3506 
3507 	return le32_to_cpu(vport_msg->vport_id);
3508 }
3509 
3510 /**
3511  * idpf_mac_filter_async_handler - Async callback for mac filters
3512  * @adapter: private data struct
3513  * @xn: transaction for message
3514  * @ctlq_msg: received message
3515  *
3516  * In some scenarios driver can't sleep and wait for a reply (e.g.: stack is
3517  * holding rtnl_lock) when adding a new mac filter. It puts us in a difficult
3518  * situation to deal with errors returned on the reply. The best we can
3519  * ultimately do is remove it from our list of mac filters and report the
3520  * error.
3521  */
idpf_mac_filter_async_handler(struct idpf_adapter * adapter,struct idpf_vc_xn * xn,const struct idpf_ctlq_msg * ctlq_msg)3522 static int idpf_mac_filter_async_handler(struct idpf_adapter *adapter,
3523 					 struct idpf_vc_xn *xn,
3524 					 const struct idpf_ctlq_msg *ctlq_msg)
3525 {
3526 	struct virtchnl2_mac_addr_list *ma_list;
3527 	struct idpf_vport_config *vport_config;
3528 	struct virtchnl2_mac_addr *mac_addr;
3529 	struct idpf_mac_filter *f, *tmp;
3530 	struct list_head *ma_list_head;
3531 	struct idpf_vport *vport;
3532 	u16 num_entries;
3533 	int i;
3534 
3535 	/* if success we're done, we're only here if something bad happened */
3536 	if (!ctlq_msg->cookie.mbx.chnl_retval)
3537 		return 0;
3538 
3539 	/* make sure at least struct is there */
3540 	if (xn->reply_sz < sizeof(*ma_list))
3541 		goto invalid_payload;
3542 
3543 	ma_list = ctlq_msg->ctx.indirect.payload->va;
3544 	mac_addr = ma_list->mac_addr_list;
3545 	num_entries = le16_to_cpu(ma_list->num_mac_addr);
3546 	/* we should have received a buffer at least this big */
3547 	if (xn->reply_sz < struct_size(ma_list, mac_addr_list, num_entries))
3548 		goto invalid_payload;
3549 
3550 	vport = idpf_vid_to_vport(adapter, le32_to_cpu(ma_list->vport_id));
3551 	if (!vport)
3552 		goto invalid_payload;
3553 
3554 	vport_config = adapter->vport_config[le32_to_cpu(ma_list->vport_id)];
3555 	ma_list_head = &vport_config->user_config.mac_filter_list;
3556 
3557 	/* We can't do much to reconcile bad filters at this point, however we
3558 	 * should at least remove them from our list one way or the other so we
3559 	 * have some idea what good filters we have.
3560 	 */
3561 	spin_lock_bh(&vport_config->mac_filter_list_lock);
3562 	list_for_each_entry_safe(f, tmp, ma_list_head, list)
3563 		for (i = 0; i < num_entries; i++)
3564 			if (ether_addr_equal(mac_addr[i].addr, f->macaddr))
3565 				list_del(&f->list);
3566 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
3567 	dev_err_ratelimited(&adapter->pdev->dev, "Received error sending MAC filter request (op %d)\n",
3568 			    xn->vc_op);
3569 
3570 	return 0;
3571 
3572 invalid_payload:
3573 	dev_err_ratelimited(&adapter->pdev->dev, "Received invalid MAC filter payload (op %d) (len %zd)\n",
3574 			    xn->vc_op, xn->reply_sz);
3575 
3576 	return -EINVAL;
3577 }
3578 
3579 /**
3580  * idpf_add_del_mac_filters - Add/del mac filters
3581  * @vport: Virtual port data structure
3582  * @np: Netdev private structure
3583  * @add: Add or delete flag
3584  * @async: Don't wait for return message
3585  *
3586  * Returns 0 on success, error on failure.
3587  **/
idpf_add_del_mac_filters(struct idpf_vport * vport,struct idpf_netdev_priv * np,bool add,bool async)3588 int idpf_add_del_mac_filters(struct idpf_vport *vport,
3589 			     struct idpf_netdev_priv *np,
3590 			     bool add, bool async)
3591 {
3592 	struct virtchnl2_mac_addr_list *ma_list __free(kfree) = NULL;
3593 	struct virtchnl2_mac_addr *mac_addr __free(kfree) = NULL;
3594 	struct idpf_adapter *adapter = np->adapter;
3595 	struct idpf_vc_xn_params xn_params = {};
3596 	struct idpf_vport_config *vport_config;
3597 	u32 num_msgs, total_filters = 0;
3598 	struct idpf_mac_filter *f;
3599 	ssize_t reply_sz;
3600 	int i = 0, k;
3601 
3602 	xn_params.vc_op = add ? VIRTCHNL2_OP_ADD_MAC_ADDR :
3603 				VIRTCHNL2_OP_DEL_MAC_ADDR;
3604 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
3605 	xn_params.async = async;
3606 	xn_params.async_handler = idpf_mac_filter_async_handler;
3607 
3608 	vport_config = adapter->vport_config[np->vport_idx];
3609 	spin_lock_bh(&vport_config->mac_filter_list_lock);
3610 
3611 	/* Find the number of newly added filters */
3612 	list_for_each_entry(f, &vport_config->user_config.mac_filter_list,
3613 			    list) {
3614 		if (add && f->add)
3615 			total_filters++;
3616 		else if (!add && f->remove)
3617 			total_filters++;
3618 	}
3619 
3620 	if (!total_filters) {
3621 		spin_unlock_bh(&vport_config->mac_filter_list_lock);
3622 
3623 		return 0;
3624 	}
3625 
3626 	/* Fill all the new filters into virtchannel message */
3627 	mac_addr = kcalloc(total_filters, sizeof(struct virtchnl2_mac_addr),
3628 			   GFP_ATOMIC);
3629 	if (!mac_addr) {
3630 		spin_unlock_bh(&vport_config->mac_filter_list_lock);
3631 
3632 		return -ENOMEM;
3633 	}
3634 
3635 	list_for_each_entry(f, &vport_config->user_config.mac_filter_list,
3636 			    list) {
3637 		if (add && f->add) {
3638 			ether_addr_copy(mac_addr[i].addr, f->macaddr);
3639 			i++;
3640 			f->add = false;
3641 			if (i == total_filters)
3642 				break;
3643 		}
3644 		if (!add && f->remove) {
3645 			ether_addr_copy(mac_addr[i].addr, f->macaddr);
3646 			i++;
3647 			f->remove = false;
3648 			if (i == total_filters)
3649 				break;
3650 		}
3651 	}
3652 
3653 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
3654 
3655 	/* Chunk up the filters into multiple messages to avoid
3656 	 * sending a control queue message buffer that is too large
3657 	 */
3658 	num_msgs = DIV_ROUND_UP(total_filters, IDPF_NUM_FILTERS_PER_MSG);
3659 
3660 	for (i = 0, k = 0; i < num_msgs; i++) {
3661 		u32 entries_size, buf_size, num_entries;
3662 
3663 		num_entries = min_t(u32, total_filters,
3664 				    IDPF_NUM_FILTERS_PER_MSG);
3665 		entries_size = sizeof(struct virtchnl2_mac_addr) * num_entries;
3666 		buf_size = struct_size(ma_list, mac_addr_list, num_entries);
3667 
3668 		if (!ma_list || num_entries != IDPF_NUM_FILTERS_PER_MSG) {
3669 			kfree(ma_list);
3670 			ma_list = kzalloc(buf_size, GFP_ATOMIC);
3671 			if (!ma_list)
3672 				return -ENOMEM;
3673 		} else {
3674 			memset(ma_list, 0, buf_size);
3675 		}
3676 
3677 		ma_list->vport_id = cpu_to_le32(np->vport_id);
3678 		ma_list->num_mac_addr = cpu_to_le16(num_entries);
3679 		memcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size);
3680 
3681 		xn_params.send_buf.iov_base = ma_list;
3682 		xn_params.send_buf.iov_len = buf_size;
3683 		reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
3684 		if (reply_sz < 0)
3685 			return reply_sz;
3686 
3687 		k += num_entries;
3688 		total_filters -= num_entries;
3689 	}
3690 
3691 	return 0;
3692 }
3693 
3694 /**
3695  * idpf_set_promiscuous - set promiscuous and send message to mailbox
3696  * @adapter: Driver specific private structure
3697  * @config_data: Vport specific config data
3698  * @vport_id: Vport identifier
3699  *
3700  * Request to enable promiscuous mode for the vport. Message is sent
3701  * asynchronously and won't wait for response.  Returns 0 on success, negative
3702  * on failure;
3703  */
idpf_set_promiscuous(struct idpf_adapter * adapter,struct idpf_vport_user_config_data * config_data,u32 vport_id)3704 int idpf_set_promiscuous(struct idpf_adapter *adapter,
3705 			 struct idpf_vport_user_config_data *config_data,
3706 			 u32 vport_id)
3707 {
3708 	struct idpf_vc_xn_params xn_params = {};
3709 	struct virtchnl2_promisc_info vpi;
3710 	ssize_t reply_sz;
3711 	u16 flags = 0;
3712 
3713 	if (test_bit(__IDPF_PROMISC_UC, config_data->user_flags))
3714 		flags |= VIRTCHNL2_UNICAST_PROMISC;
3715 	if (test_bit(__IDPF_PROMISC_MC, config_data->user_flags))
3716 		flags |= VIRTCHNL2_MULTICAST_PROMISC;
3717 
3718 	vpi.vport_id = cpu_to_le32(vport_id);
3719 	vpi.flags = cpu_to_le16(flags);
3720 
3721 	xn_params.vc_op = VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE;
3722 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
3723 	xn_params.send_buf.iov_base = &vpi;
3724 	xn_params.send_buf.iov_len = sizeof(vpi);
3725 	/* setting promiscuous is only ever done asynchronously */
3726 	xn_params.async = true;
3727 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
3728 
3729 	return reply_sz < 0 ? reply_sz : 0;
3730 }
3731