xref: /linux/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #include <linux/export.h>
5 #include <net/libeth/rx.h>
6 
7 #include "idpf.h"
8 #include "idpf_virtchnl.h"
9 #include "idpf_ptp.h"
10 
11 /**
12  * struct idpf_vc_xn_manager - Manager for tracking transactions
13  * @ring: backing and lookup for transactions
14  * @free_xn_bm: bitmap for free transactions
15  * @xn_bm_lock: make bitmap access synchronous where necessary
16  * @salt: used to make cookie unique every message
17  */
18 struct idpf_vc_xn_manager {
19 	struct idpf_vc_xn ring[IDPF_VC_XN_RING_LEN];
20 	DECLARE_BITMAP(free_xn_bm, IDPF_VC_XN_RING_LEN);
21 	spinlock_t xn_bm_lock;
22 	u8 salt;
23 };
24 
25 /**
26  * idpf_vid_to_vport - Translate vport id to vport pointer
27  * @adapter: private data struct
28  * @v_id: vport id to translate
29  *
30  * Returns vport matching v_id, NULL if not found.
31  */
32 static
idpf_vid_to_vport(struct idpf_adapter * adapter,u32 v_id)33 struct idpf_vport *idpf_vid_to_vport(struct idpf_adapter *adapter, u32 v_id)
34 {
35 	u16 num_max_vports = idpf_get_max_vports(adapter);
36 	int i;
37 
38 	for (i = 0; i < num_max_vports; i++)
39 		if (adapter->vport_ids[i] == v_id)
40 			return adapter->vports[i];
41 
42 	return NULL;
43 }
44 
45 /**
46  * idpf_handle_event_link - Handle link event message
47  * @adapter: private data struct
48  * @v2e: virtchnl event message
49  */
idpf_handle_event_link(struct idpf_adapter * adapter,const struct virtchnl2_event * v2e)50 static void idpf_handle_event_link(struct idpf_adapter *adapter,
51 				   const struct virtchnl2_event *v2e)
52 {
53 	struct idpf_netdev_priv *np;
54 	struct idpf_vport *vport;
55 
56 	vport = idpf_vid_to_vport(adapter, le32_to_cpu(v2e->vport_id));
57 	if (!vport) {
58 		dev_err_ratelimited(&adapter->pdev->dev, "Failed to find vport_id %d for link event\n",
59 				    v2e->vport_id);
60 		return;
61 	}
62 	np = netdev_priv(vport->netdev);
63 
64 	np->link_speed_mbps = le32_to_cpu(v2e->link_speed);
65 
66 	if (vport->link_up == v2e->link_status)
67 		return;
68 
69 	vport->link_up = v2e->link_status;
70 
71 	if (np->state != __IDPF_VPORT_UP)
72 		return;
73 
74 	if (vport->link_up) {
75 		netif_tx_start_all_queues(vport->netdev);
76 		netif_carrier_on(vport->netdev);
77 	} else {
78 		netif_tx_stop_all_queues(vport->netdev);
79 		netif_carrier_off(vport->netdev);
80 	}
81 }
82 
83 /**
84  * idpf_recv_event_msg - Receive virtchnl event message
85  * @adapter: Driver specific private structure
86  * @ctlq_msg: message to copy from
87  *
88  * Receive virtchnl event message
89  */
idpf_recv_event_msg(struct idpf_adapter * adapter,struct idpf_ctlq_msg * ctlq_msg)90 static void idpf_recv_event_msg(struct idpf_adapter *adapter,
91 				struct idpf_ctlq_msg *ctlq_msg)
92 {
93 	int payload_size = ctlq_msg->ctx.indirect.payload->size;
94 	struct virtchnl2_event *v2e;
95 	u32 event;
96 
97 	if (payload_size < sizeof(*v2e)) {
98 		dev_err_ratelimited(&adapter->pdev->dev, "Failed to receive valid payload for event msg (op %d len %d)\n",
99 				    ctlq_msg->cookie.mbx.chnl_opcode,
100 				    payload_size);
101 		return;
102 	}
103 
104 	v2e = (struct virtchnl2_event *)ctlq_msg->ctx.indirect.payload->va;
105 	event = le32_to_cpu(v2e->event);
106 
107 	switch (event) {
108 	case VIRTCHNL2_EVENT_LINK_CHANGE:
109 		idpf_handle_event_link(adapter, v2e);
110 		return;
111 	default:
112 		dev_err(&adapter->pdev->dev,
113 			"Unknown event %d from PF\n", event);
114 		break;
115 	}
116 }
117 
118 /**
119  * idpf_mb_clean - Reclaim the send mailbox queue entries
120  * @adapter: Driver specific private structure
121  *
122  * Reclaim the send mailbox queue entries to be used to send further messages
123  *
124  * Returns 0 on success, negative on failure
125  */
idpf_mb_clean(struct idpf_adapter * adapter)126 static int idpf_mb_clean(struct idpf_adapter *adapter)
127 {
128 	u16 i, num_q_msg = IDPF_DFLT_MBX_Q_LEN;
129 	struct idpf_ctlq_msg **q_msg;
130 	struct idpf_dma_mem *dma_mem;
131 	int err;
132 
133 	q_msg = kcalloc(num_q_msg, sizeof(struct idpf_ctlq_msg *), GFP_ATOMIC);
134 	if (!q_msg)
135 		return -ENOMEM;
136 
137 	err = idpf_ctlq_clean_sq(adapter->hw.asq, &num_q_msg, q_msg);
138 	if (err)
139 		goto err_kfree;
140 
141 	for (i = 0; i < num_q_msg; i++) {
142 		if (!q_msg[i])
143 			continue;
144 		dma_mem = q_msg[i]->ctx.indirect.payload;
145 		if (dma_mem)
146 			dma_free_coherent(&adapter->pdev->dev, dma_mem->size,
147 					  dma_mem->va, dma_mem->pa);
148 		kfree(q_msg[i]);
149 		kfree(dma_mem);
150 	}
151 
152 err_kfree:
153 	kfree(q_msg);
154 
155 	return err;
156 }
157 
158 #if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
159 /**
160  * idpf_ptp_is_mb_msg - Check if the message is PTP-related
161  * @op: virtchnl opcode
162  *
163  * Return: true if msg is PTP-related, false otherwise.
164  */
idpf_ptp_is_mb_msg(u32 op)165 static bool idpf_ptp_is_mb_msg(u32 op)
166 {
167 	switch (op) {
168 	case VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME:
169 	case VIRTCHNL2_OP_PTP_GET_CROSS_TIME:
170 	case VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME:
171 	case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE:
172 	case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME:
173 	case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS:
174 	case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP:
175 		return true;
176 	default:
177 		return false;
178 	}
179 }
180 
181 /**
182  * idpf_prepare_ptp_mb_msg - Prepare PTP related message
183  *
184  * @adapter: Driver specific private structure
185  * @op: virtchnl opcode
186  * @ctlq_msg: Corresponding control queue message
187  */
idpf_prepare_ptp_mb_msg(struct idpf_adapter * adapter,u32 op,struct idpf_ctlq_msg * ctlq_msg)188 static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op,
189 				    struct idpf_ctlq_msg *ctlq_msg)
190 {
191 	/* If the message is PTP-related and the secondary mailbox is available,
192 	 * send the message through the secondary mailbox.
193 	 */
194 	if (!idpf_ptp_is_mb_msg(op) || !adapter->ptp->secondary_mbx.valid)
195 		return;
196 
197 	ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_peer_drv;
198 	ctlq_msg->func_id = adapter->ptp->secondary_mbx.peer_mbx_q_id;
199 	ctlq_msg->host_id = adapter->ptp->secondary_mbx.peer_id;
200 }
201 #else /* !CONFIG_PTP_1588_CLOCK */
idpf_prepare_ptp_mb_msg(struct idpf_adapter * adapter,u32 op,struct idpf_ctlq_msg * ctlq_msg)202 static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op,
203 				    struct idpf_ctlq_msg *ctlq_msg)
204 { }
205 #endif /* CONFIG_PTP_1588_CLOCK */
206 
207 /**
208  * idpf_send_mb_msg - Send message over mailbox
209  * @adapter: Driver specific private structure
210  * @op: virtchnl opcode
211  * @msg_size: size of the payload
212  * @msg: pointer to buffer holding the payload
213  * @cookie: unique SW generated cookie per message
214  *
215  * Will prepare the control queue message and initiates the send api
216  *
217  * Returns 0 on success, negative on failure
218  */
idpf_send_mb_msg(struct idpf_adapter * adapter,u32 op,u16 msg_size,u8 * msg,u16 cookie)219 int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
220 		     u16 msg_size, u8 *msg, u16 cookie)
221 {
222 	struct idpf_ctlq_msg *ctlq_msg;
223 	struct idpf_dma_mem *dma_mem;
224 	int err;
225 
226 	/* If we are here and a reset is detected nothing much can be
227 	 * done. This thread should silently abort and expected to
228 	 * be corrected with a new run either by user or driver
229 	 * flows after reset
230 	 */
231 	if (idpf_is_reset_detected(adapter))
232 		return 0;
233 
234 	err = idpf_mb_clean(adapter);
235 	if (err)
236 		return err;
237 
238 	ctlq_msg = kzalloc(sizeof(*ctlq_msg), GFP_ATOMIC);
239 	if (!ctlq_msg)
240 		return -ENOMEM;
241 
242 	dma_mem = kzalloc(sizeof(*dma_mem), GFP_ATOMIC);
243 	if (!dma_mem) {
244 		err = -ENOMEM;
245 		goto dma_mem_error;
246 	}
247 
248 	ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_cp;
249 	ctlq_msg->func_id = 0;
250 
251 	idpf_prepare_ptp_mb_msg(adapter, op, ctlq_msg);
252 
253 	ctlq_msg->data_len = msg_size;
254 	ctlq_msg->cookie.mbx.chnl_opcode = op;
255 	ctlq_msg->cookie.mbx.chnl_retval = 0;
256 	dma_mem->size = IDPF_CTLQ_MAX_BUF_LEN;
257 	dma_mem->va = dma_alloc_coherent(&adapter->pdev->dev, dma_mem->size,
258 					 &dma_mem->pa, GFP_ATOMIC);
259 	if (!dma_mem->va) {
260 		err = -ENOMEM;
261 		goto dma_alloc_error;
262 	}
263 
264 	/* It's possible we're just sending an opcode but no buffer */
265 	if (msg && msg_size)
266 		memcpy(dma_mem->va, msg, msg_size);
267 	ctlq_msg->ctx.indirect.payload = dma_mem;
268 	ctlq_msg->ctx.sw_cookie.data = cookie;
269 
270 	err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg);
271 	if (err)
272 		goto send_error;
273 
274 	return 0;
275 
276 send_error:
277 	dma_free_coherent(&adapter->pdev->dev, dma_mem->size, dma_mem->va,
278 			  dma_mem->pa);
279 dma_alloc_error:
280 	kfree(dma_mem);
281 dma_mem_error:
282 	kfree(ctlq_msg);
283 
284 	return err;
285 }
286 
287 /* API for virtchnl "transaction" support ("xn" for short).
288  *
289  * We are reusing the completion lock to serialize the accesses to the
290  * transaction state for simplicity, but it could be its own separate synchro
291  * as well. For now, this API is only used from within a workqueue context;
292  * raw_spin_lock() is enough.
293  */
294 /**
295  * idpf_vc_xn_lock - Request exclusive access to vc transaction
296  * @xn: struct idpf_vc_xn* to access
297  */
298 #define idpf_vc_xn_lock(xn)			\
299 	raw_spin_lock(&(xn)->completed.wait.lock)
300 
301 /**
302  * idpf_vc_xn_unlock - Release exclusive access to vc transaction
303  * @xn: struct idpf_vc_xn* to access
304  */
305 #define idpf_vc_xn_unlock(xn)		\
306 	raw_spin_unlock(&(xn)->completed.wait.lock)
307 
308 /**
309  * idpf_vc_xn_release_bufs - Release reference to reply buffer(s) and
310  * reset the transaction state.
311  * @xn: struct idpf_vc_xn to update
312  */
idpf_vc_xn_release_bufs(struct idpf_vc_xn * xn)313 static void idpf_vc_xn_release_bufs(struct idpf_vc_xn *xn)
314 {
315 	xn->reply.iov_base = NULL;
316 	xn->reply.iov_len = 0;
317 
318 	if (xn->state != IDPF_VC_XN_SHUTDOWN)
319 		xn->state = IDPF_VC_XN_IDLE;
320 }
321 
322 /**
323  * idpf_vc_xn_init - Initialize virtchnl transaction object
324  * @vcxn_mngr: pointer to vc transaction manager struct
325  */
idpf_vc_xn_init(struct idpf_vc_xn_manager * vcxn_mngr)326 static void idpf_vc_xn_init(struct idpf_vc_xn_manager *vcxn_mngr)
327 {
328 	int i;
329 
330 	spin_lock_init(&vcxn_mngr->xn_bm_lock);
331 
332 	for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) {
333 		struct idpf_vc_xn *xn = &vcxn_mngr->ring[i];
334 
335 		xn->state = IDPF_VC_XN_IDLE;
336 		xn->idx = i;
337 		idpf_vc_xn_release_bufs(xn);
338 		init_completion(&xn->completed);
339 	}
340 
341 	bitmap_fill(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
342 }
343 
344 /**
345  * idpf_vc_xn_shutdown - Uninitialize virtchnl transaction object
346  * @vcxn_mngr: pointer to vc transaction manager struct
347  *
348  * All waiting threads will be woken-up and their transaction aborted. Further
349  * operations on that object will fail.
350  */
idpf_vc_xn_shutdown(struct idpf_vc_xn_manager * vcxn_mngr)351 void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr)
352 {
353 	int i;
354 
355 	spin_lock_bh(&vcxn_mngr->xn_bm_lock);
356 	bitmap_zero(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
357 	spin_unlock_bh(&vcxn_mngr->xn_bm_lock);
358 
359 	for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) {
360 		struct idpf_vc_xn *xn = &vcxn_mngr->ring[i];
361 
362 		idpf_vc_xn_lock(xn);
363 		xn->state = IDPF_VC_XN_SHUTDOWN;
364 		idpf_vc_xn_release_bufs(xn);
365 		idpf_vc_xn_unlock(xn);
366 		complete_all(&xn->completed);
367 	}
368 }
369 
370 /**
371  * idpf_vc_xn_pop_free - Pop a free transaction from free list
372  * @vcxn_mngr: transaction manager to pop from
373  *
374  * Returns NULL if no free transactions
375  */
376 static
idpf_vc_xn_pop_free(struct idpf_vc_xn_manager * vcxn_mngr)377 struct idpf_vc_xn *idpf_vc_xn_pop_free(struct idpf_vc_xn_manager *vcxn_mngr)
378 {
379 	struct idpf_vc_xn *xn = NULL;
380 	unsigned long free_idx;
381 
382 	spin_lock_bh(&vcxn_mngr->xn_bm_lock);
383 	free_idx = find_first_bit(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
384 	if (free_idx == IDPF_VC_XN_RING_LEN)
385 		goto do_unlock;
386 
387 	clear_bit(free_idx, vcxn_mngr->free_xn_bm);
388 	xn = &vcxn_mngr->ring[free_idx];
389 	xn->salt = vcxn_mngr->salt++;
390 
391 do_unlock:
392 	spin_unlock_bh(&vcxn_mngr->xn_bm_lock);
393 
394 	return xn;
395 }
396 
397 /**
398  * idpf_vc_xn_push_free - Push a free transaction to free list
399  * @vcxn_mngr: transaction manager to push to
400  * @xn: transaction to push
401  */
idpf_vc_xn_push_free(struct idpf_vc_xn_manager * vcxn_mngr,struct idpf_vc_xn * xn)402 static void idpf_vc_xn_push_free(struct idpf_vc_xn_manager *vcxn_mngr,
403 				 struct idpf_vc_xn *xn)
404 {
405 	idpf_vc_xn_release_bufs(xn);
406 	set_bit(xn->idx, vcxn_mngr->free_xn_bm);
407 }
408 
409 /**
410  * idpf_vc_xn_exec - Perform a send/recv virtchnl transaction
411  * @adapter: driver specific private structure with vcxn_mngr
412  * @params: parameters for this particular transaction including
413  *   -vc_op: virtchannel operation to send
414  *   -send_buf: kvec iov for send buf and len
415  *   -recv_buf: kvec iov for recv buf and len (ignored if NULL)
416  *   -timeout_ms: timeout waiting for a reply (milliseconds)
417  *   -async: don't wait for message reply, will lose caller context
418  *   -async_handler: callback to handle async replies
419  *
420  * @returns >= 0 for success, the size of the initial reply (may or may not be
421  * >= @recv_buf.iov_len, but we never overflow @@recv_buf_iov_base). < 0 for
422  * error.
423  */
idpf_vc_xn_exec(struct idpf_adapter * adapter,const struct idpf_vc_xn_params * params)424 ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
425 			const struct idpf_vc_xn_params *params)
426 {
427 	const struct kvec *send_buf = &params->send_buf;
428 	struct idpf_vc_xn *xn;
429 	ssize_t retval;
430 	u16 cookie;
431 
432 	xn = idpf_vc_xn_pop_free(adapter->vcxn_mngr);
433 	/* no free transactions available */
434 	if (!xn)
435 		return -ENOSPC;
436 
437 	idpf_vc_xn_lock(xn);
438 	if (xn->state == IDPF_VC_XN_SHUTDOWN) {
439 		retval = -ENXIO;
440 		goto only_unlock;
441 	} else if (xn->state != IDPF_VC_XN_IDLE) {
442 		/* We're just going to clobber this transaction even though
443 		 * it's not IDLE. If we don't reuse it we could theoretically
444 		 * eventually leak all the free transactions and not be able to
445 		 * send any messages. At least this way we make an attempt to
446 		 * remain functional even though something really bad is
447 		 * happening that's corrupting what was supposed to be free
448 		 * transactions.
449 		 */
450 		WARN_ONCE(1, "There should only be idle transactions in free list (idx %d op %d)\n",
451 			  xn->idx, xn->vc_op);
452 	}
453 
454 	xn->reply = params->recv_buf;
455 	xn->reply_sz = 0;
456 	xn->state = params->async ? IDPF_VC_XN_ASYNC : IDPF_VC_XN_WAITING;
457 	xn->vc_op = params->vc_op;
458 	xn->async_handler = params->async_handler;
459 	idpf_vc_xn_unlock(xn);
460 
461 	if (!params->async)
462 		reinit_completion(&xn->completed);
463 	cookie = FIELD_PREP(IDPF_VC_XN_SALT_M, xn->salt) |
464 		 FIELD_PREP(IDPF_VC_XN_IDX_M, xn->idx);
465 
466 	retval = idpf_send_mb_msg(adapter, params->vc_op,
467 				  send_buf->iov_len, send_buf->iov_base,
468 				  cookie);
469 	if (retval) {
470 		idpf_vc_xn_lock(xn);
471 		goto release_and_unlock;
472 	}
473 
474 	if (params->async)
475 		return 0;
476 
477 	wait_for_completion_timeout(&xn->completed,
478 				    msecs_to_jiffies(params->timeout_ms));
479 
480 	/* No need to check the return value; we check the final state of the
481 	 * transaction below. It's possible the transaction actually gets more
482 	 * timeout than specified if we get preempted here but after
483 	 * wait_for_completion_timeout returns. This should be non-issue
484 	 * however.
485 	 */
486 	idpf_vc_xn_lock(xn);
487 	switch (xn->state) {
488 	case IDPF_VC_XN_SHUTDOWN:
489 		retval = -ENXIO;
490 		goto only_unlock;
491 	case IDPF_VC_XN_WAITING:
492 		dev_notice_ratelimited(&adapter->pdev->dev,
493 				       "Transaction timed-out (op:%d cookie:%04x vc_op:%d salt:%02x timeout:%dms)\n",
494 				       params->vc_op, cookie, xn->vc_op,
495 				       xn->salt, params->timeout_ms);
496 		retval = -ETIME;
497 		break;
498 	case IDPF_VC_XN_COMPLETED_SUCCESS:
499 		retval = xn->reply_sz;
500 		break;
501 	case IDPF_VC_XN_COMPLETED_FAILED:
502 		dev_notice_ratelimited(&adapter->pdev->dev, "Transaction failed (op %d)\n",
503 				       params->vc_op);
504 		retval = -EIO;
505 		break;
506 	default:
507 		/* Invalid state. */
508 		WARN_ON_ONCE(1);
509 		retval = -EIO;
510 		break;
511 	}
512 
513 release_and_unlock:
514 	idpf_vc_xn_push_free(adapter->vcxn_mngr, xn);
515 	/* If we receive a VC reply after here, it will be dropped. */
516 only_unlock:
517 	idpf_vc_xn_unlock(xn);
518 
519 	return retval;
520 }
521 
522 /**
523  * idpf_vc_xn_forward_async - Handle async reply receives
524  * @adapter: private data struct
525  * @xn: transaction to handle
526  * @ctlq_msg: corresponding ctlq_msg
527  *
528  * For async sends we're going to lose the caller's context so, if an
529  * async_handler was provided, it can deal with the reply, otherwise we'll just
530  * check and report if there is an error.
531  */
532 static int
idpf_vc_xn_forward_async(struct idpf_adapter * adapter,struct idpf_vc_xn * xn,const struct idpf_ctlq_msg * ctlq_msg)533 idpf_vc_xn_forward_async(struct idpf_adapter *adapter, struct idpf_vc_xn *xn,
534 			 const struct idpf_ctlq_msg *ctlq_msg)
535 {
536 	int err = 0;
537 
538 	if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) {
539 		dev_err_ratelimited(&adapter->pdev->dev, "Async message opcode does not match transaction opcode (msg: %d) (xn: %d)\n",
540 				    ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op);
541 		xn->reply_sz = 0;
542 		err = -EINVAL;
543 		goto release_bufs;
544 	}
545 
546 	if (xn->async_handler) {
547 		err = xn->async_handler(adapter, xn, ctlq_msg);
548 		goto release_bufs;
549 	}
550 
551 	if (ctlq_msg->cookie.mbx.chnl_retval) {
552 		xn->reply_sz = 0;
553 		dev_err_ratelimited(&adapter->pdev->dev, "Async message failure (op %d)\n",
554 				    ctlq_msg->cookie.mbx.chnl_opcode);
555 		err = -EINVAL;
556 	}
557 
558 release_bufs:
559 	idpf_vc_xn_push_free(adapter->vcxn_mngr, xn);
560 
561 	return err;
562 }
563 
564 /**
565  * idpf_vc_xn_forward_reply - copy a reply back to receiving thread
566  * @adapter: driver specific private structure with vcxn_mngr
567  * @ctlq_msg: controlq message to send back to receiving thread
568  */
569 static int
idpf_vc_xn_forward_reply(struct idpf_adapter * adapter,const struct idpf_ctlq_msg * ctlq_msg)570 idpf_vc_xn_forward_reply(struct idpf_adapter *adapter,
571 			 const struct idpf_ctlq_msg *ctlq_msg)
572 {
573 	const void *payload = NULL;
574 	size_t payload_size = 0;
575 	struct idpf_vc_xn *xn;
576 	u16 msg_info;
577 	int err = 0;
578 	u16 xn_idx;
579 	u16 salt;
580 
581 	msg_info = ctlq_msg->ctx.sw_cookie.data;
582 	xn_idx = FIELD_GET(IDPF_VC_XN_IDX_M, msg_info);
583 	if (xn_idx >= ARRAY_SIZE(adapter->vcxn_mngr->ring)) {
584 		dev_err_ratelimited(&adapter->pdev->dev, "Out of bounds cookie received: %02x\n",
585 				    xn_idx);
586 		return -EINVAL;
587 	}
588 	xn = &adapter->vcxn_mngr->ring[xn_idx];
589 	idpf_vc_xn_lock(xn);
590 	salt = FIELD_GET(IDPF_VC_XN_SALT_M, msg_info);
591 	if (xn->salt != salt) {
592 		dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (exp:%d@%02x(%d) != got:%d@%02x)\n",
593 				    xn->vc_op, xn->salt, xn->state,
594 				    ctlq_msg->cookie.mbx.chnl_opcode, salt);
595 		idpf_vc_xn_unlock(xn);
596 		return -EINVAL;
597 	}
598 
599 	switch (xn->state) {
600 	case IDPF_VC_XN_WAITING:
601 		/* success */
602 		break;
603 	case IDPF_VC_XN_IDLE:
604 		dev_err_ratelimited(&adapter->pdev->dev, "Unexpected or belated VC reply (op %d)\n",
605 				    ctlq_msg->cookie.mbx.chnl_opcode);
606 		err = -EINVAL;
607 		goto out_unlock;
608 	case IDPF_VC_XN_SHUTDOWN:
609 		/* ENXIO is a bit special here as the recv msg loop uses that
610 		 * know if it should stop trying to clean the ring if we lost
611 		 * the virtchnl. We need to stop playing with registers and
612 		 * yield.
613 		 */
614 		err = -ENXIO;
615 		goto out_unlock;
616 	case IDPF_VC_XN_ASYNC:
617 		err = idpf_vc_xn_forward_async(adapter, xn, ctlq_msg);
618 		idpf_vc_xn_unlock(xn);
619 		return err;
620 	default:
621 		dev_err_ratelimited(&adapter->pdev->dev, "Overwriting VC reply (op %d)\n",
622 				    ctlq_msg->cookie.mbx.chnl_opcode);
623 		err = -EBUSY;
624 		goto out_unlock;
625 	}
626 
627 	if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) {
628 		dev_err_ratelimited(&adapter->pdev->dev, "Message opcode does not match transaction opcode (msg: %d) (xn: %d)\n",
629 				    ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op);
630 		xn->reply_sz = 0;
631 		xn->state = IDPF_VC_XN_COMPLETED_FAILED;
632 		err = -EINVAL;
633 		goto out_unlock;
634 	}
635 
636 	if (ctlq_msg->cookie.mbx.chnl_retval) {
637 		xn->reply_sz = 0;
638 		xn->state = IDPF_VC_XN_COMPLETED_FAILED;
639 		err = -EINVAL;
640 		goto out_unlock;
641 	}
642 
643 	if (ctlq_msg->data_len) {
644 		payload = ctlq_msg->ctx.indirect.payload->va;
645 		payload_size = ctlq_msg->data_len;
646 	}
647 
648 	xn->reply_sz = payload_size;
649 	xn->state = IDPF_VC_XN_COMPLETED_SUCCESS;
650 
651 	if (xn->reply.iov_base && xn->reply.iov_len && payload_size)
652 		memcpy(xn->reply.iov_base, payload,
653 		       min_t(size_t, xn->reply.iov_len, payload_size));
654 
655 out_unlock:
656 	idpf_vc_xn_unlock(xn);
657 	/* we _cannot_ hold lock while calling complete */
658 	complete(&xn->completed);
659 
660 	return err;
661 }
662 
663 /**
664  * idpf_recv_mb_msg - Receive message over mailbox
665  * @adapter: Driver specific private structure
666  *
667  * Will receive control queue message and posts the receive buffer. Returns 0
668  * on success and negative on failure.
669  */
idpf_recv_mb_msg(struct idpf_adapter * adapter)670 int idpf_recv_mb_msg(struct idpf_adapter *adapter)
671 {
672 	struct idpf_ctlq_msg ctlq_msg;
673 	struct idpf_dma_mem *dma_mem;
674 	int post_err, err;
675 	u16 num_recv;
676 
677 	while (1) {
678 		/* This will get <= num_recv messages and output how many
679 		 * actually received on num_recv.
680 		 */
681 		num_recv = 1;
682 		err = idpf_ctlq_recv(adapter->hw.arq, &num_recv, &ctlq_msg);
683 		if (err || !num_recv)
684 			break;
685 
686 		if (ctlq_msg.data_len) {
687 			dma_mem = ctlq_msg.ctx.indirect.payload;
688 		} else {
689 			dma_mem = NULL;
690 			num_recv = 0;
691 		}
692 
693 		if (ctlq_msg.cookie.mbx.chnl_opcode == VIRTCHNL2_OP_EVENT)
694 			idpf_recv_event_msg(adapter, &ctlq_msg);
695 		else
696 			err = idpf_vc_xn_forward_reply(adapter, &ctlq_msg);
697 
698 		post_err = idpf_ctlq_post_rx_buffs(&adapter->hw,
699 						   adapter->hw.arq,
700 						   &num_recv, &dma_mem);
701 
702 		/* If post failed clear the only buffer we supplied */
703 		if (post_err) {
704 			if (dma_mem)
705 				dmam_free_coherent(&adapter->pdev->dev,
706 						   dma_mem->size, dma_mem->va,
707 						   dma_mem->pa);
708 			break;
709 		}
710 
711 		/* virtchnl trying to shutdown, stop cleaning */
712 		if (err == -ENXIO)
713 			break;
714 	}
715 
716 	return err;
717 }
718 
719 /**
720  * idpf_wait_for_marker_event - wait for software marker response
721  * @vport: virtual port data structure
722  *
723  * Returns 0 success, negative on failure.
724  **/
idpf_wait_for_marker_event(struct idpf_vport * vport)725 static int idpf_wait_for_marker_event(struct idpf_vport *vport)
726 {
727 	int event;
728 	int i;
729 
730 	for (i = 0; i < vport->num_txq; i++)
731 		idpf_queue_set(SW_MARKER, vport->txqs[i]);
732 
733 	event = wait_event_timeout(vport->sw_marker_wq,
734 				   test_and_clear_bit(IDPF_VPORT_SW_MARKER,
735 						      vport->flags),
736 				   msecs_to_jiffies(500));
737 
738 	for (i = 0; i < vport->num_txq; i++)
739 		idpf_queue_clear(POLL_MODE, vport->txqs[i]);
740 
741 	if (event)
742 		return 0;
743 
744 	dev_warn(&vport->adapter->pdev->dev, "Failed to receive marker packets\n");
745 
746 	return -ETIMEDOUT;
747 }
748 
749 /**
750  * idpf_send_ver_msg - send virtchnl version message
751  * @adapter: Driver specific private structure
752  *
753  * Send virtchnl version message.  Returns 0 on success, negative on failure.
754  */
idpf_send_ver_msg(struct idpf_adapter * adapter)755 static int idpf_send_ver_msg(struct idpf_adapter *adapter)
756 {
757 	struct idpf_vc_xn_params xn_params = {};
758 	struct virtchnl2_version_info vvi;
759 	ssize_t reply_sz;
760 	u32 major, minor;
761 	int err = 0;
762 
763 	if (adapter->virt_ver_maj) {
764 		vvi.major = cpu_to_le32(adapter->virt_ver_maj);
765 		vvi.minor = cpu_to_le32(adapter->virt_ver_min);
766 	} else {
767 		vvi.major = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MAJOR);
768 		vvi.minor = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MINOR);
769 	}
770 
771 	xn_params.vc_op = VIRTCHNL2_OP_VERSION;
772 	xn_params.send_buf.iov_base = &vvi;
773 	xn_params.send_buf.iov_len = sizeof(vvi);
774 	xn_params.recv_buf = xn_params.send_buf;
775 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
776 
777 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
778 	if (reply_sz < 0)
779 		return reply_sz;
780 	if (reply_sz < sizeof(vvi))
781 		return -EIO;
782 
783 	major = le32_to_cpu(vvi.major);
784 	minor = le32_to_cpu(vvi.minor);
785 
786 	if (major > IDPF_VIRTCHNL_VERSION_MAJOR) {
787 		dev_warn(&adapter->pdev->dev, "Virtchnl major version greater than supported\n");
788 		return -EINVAL;
789 	}
790 
791 	if (major == IDPF_VIRTCHNL_VERSION_MAJOR &&
792 	    minor > IDPF_VIRTCHNL_VERSION_MINOR)
793 		dev_warn(&adapter->pdev->dev, "Virtchnl minor version didn't match\n");
794 
795 	/* If we have a mismatch, resend version to update receiver on what
796 	 * version we will use.
797 	 */
798 	if (!adapter->virt_ver_maj &&
799 	    major != IDPF_VIRTCHNL_VERSION_MAJOR &&
800 	    minor != IDPF_VIRTCHNL_VERSION_MINOR)
801 		err = -EAGAIN;
802 
803 	adapter->virt_ver_maj = major;
804 	adapter->virt_ver_min = minor;
805 
806 	return err;
807 }
808 
809 /**
810  * idpf_send_get_caps_msg - Send virtchnl get capabilities message
811  * @adapter: Driver specific private structure
812  *
813  * Send virtchl get capabilities message. Returns 0 on success, negative on
814  * failure.
815  */
idpf_send_get_caps_msg(struct idpf_adapter * adapter)816 static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
817 {
818 	struct virtchnl2_get_capabilities caps = {};
819 	struct idpf_vc_xn_params xn_params = {};
820 	ssize_t reply_sz;
821 
822 	caps.csum_caps =
823 		cpu_to_le32(VIRTCHNL2_CAP_TX_CSUM_L3_IPV4	|
824 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP	|
825 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP	|
826 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP	|
827 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP	|
828 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP	|
829 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP	|
830 			    VIRTCHNL2_CAP_RX_CSUM_L3_IPV4	|
831 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP	|
832 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP	|
833 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP	|
834 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP	|
835 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP	|
836 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP	|
837 			    VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL |
838 			    VIRTCHNL2_CAP_RX_CSUM_L3_SINGLE_TUNNEL |
839 			    VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL |
840 			    VIRTCHNL2_CAP_RX_CSUM_L4_SINGLE_TUNNEL |
841 			    VIRTCHNL2_CAP_RX_CSUM_GENERIC);
842 
843 	caps.seg_caps =
844 		cpu_to_le32(VIRTCHNL2_CAP_SEG_IPV4_TCP		|
845 			    VIRTCHNL2_CAP_SEG_IPV4_UDP		|
846 			    VIRTCHNL2_CAP_SEG_IPV4_SCTP		|
847 			    VIRTCHNL2_CAP_SEG_IPV6_TCP		|
848 			    VIRTCHNL2_CAP_SEG_IPV6_UDP		|
849 			    VIRTCHNL2_CAP_SEG_IPV6_SCTP		|
850 			    VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL);
851 
852 	caps.rss_caps =
853 		cpu_to_le64(VIRTCHNL2_FLOW_IPV4_TCP		|
854 			    VIRTCHNL2_FLOW_IPV4_UDP		|
855 			    VIRTCHNL2_FLOW_IPV4_SCTP		|
856 			    VIRTCHNL2_FLOW_IPV4_OTHER		|
857 			    VIRTCHNL2_FLOW_IPV6_TCP		|
858 			    VIRTCHNL2_FLOW_IPV6_UDP		|
859 			    VIRTCHNL2_FLOW_IPV6_SCTP		|
860 			    VIRTCHNL2_FLOW_IPV6_OTHER);
861 
862 	caps.hsplit_caps =
863 		cpu_to_le32(VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4	|
864 			    VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6);
865 
866 	caps.rsc_caps =
867 		cpu_to_le32(VIRTCHNL2_CAP_RSC_IPV4_TCP		|
868 			    VIRTCHNL2_CAP_RSC_IPV6_TCP);
869 
870 	caps.other_caps =
871 		cpu_to_le64(VIRTCHNL2_CAP_SRIOV			|
872 			    VIRTCHNL2_CAP_RDMA                  |
873 			    VIRTCHNL2_CAP_LAN_MEMORY_REGIONS	|
874 			    VIRTCHNL2_CAP_MACFILTER		|
875 			    VIRTCHNL2_CAP_SPLITQ_QSCHED		|
876 			    VIRTCHNL2_CAP_PROMISC		|
877 			    VIRTCHNL2_CAP_LOOPBACK		|
878 			    VIRTCHNL2_CAP_PTP);
879 
880 	xn_params.vc_op = VIRTCHNL2_OP_GET_CAPS;
881 	xn_params.send_buf.iov_base = &caps;
882 	xn_params.send_buf.iov_len = sizeof(caps);
883 	xn_params.recv_buf.iov_base = &adapter->caps;
884 	xn_params.recv_buf.iov_len = sizeof(adapter->caps);
885 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
886 
887 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
888 	if (reply_sz < 0)
889 		return reply_sz;
890 	if (reply_sz < sizeof(adapter->caps))
891 		return -EIO;
892 
893 	return 0;
894 }
895 
896 /**
897  * idpf_send_get_lan_memory_regions - Send virtchnl get LAN memory regions msg
898  * @adapter: Driver specific private struct
899  *
900  * Return: 0 on success or error code on failure.
901  */
idpf_send_get_lan_memory_regions(struct idpf_adapter * adapter)902 static int idpf_send_get_lan_memory_regions(struct idpf_adapter *adapter)
903 {
904 	struct virtchnl2_get_lan_memory_regions *rcvd_regions __free(kfree);
905 	struct idpf_vc_xn_params xn_params = {
906 		.vc_op = VIRTCHNL2_OP_GET_LAN_MEMORY_REGIONS,
907 		.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN,
908 		.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
909 	};
910 	int num_regions, size;
911 	struct idpf_hw *hw;
912 	ssize_t reply_sz;
913 	int err = 0;
914 
915 	rcvd_regions = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
916 	if (!rcvd_regions)
917 		return -ENOMEM;
918 
919 	xn_params.recv_buf.iov_base = rcvd_regions;
920 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
921 	if (reply_sz < 0)
922 		return reply_sz;
923 
924 	num_regions = le16_to_cpu(rcvd_regions->num_memory_regions);
925 	size = struct_size(rcvd_regions, mem_reg, num_regions);
926 	if (reply_sz < size)
927 		return -EIO;
928 
929 	if (size > IDPF_CTLQ_MAX_BUF_LEN)
930 		return -EINVAL;
931 
932 	hw = &adapter->hw;
933 	hw->lan_regs = kcalloc(num_regions, sizeof(*hw->lan_regs), GFP_KERNEL);
934 	if (!hw->lan_regs)
935 		return -ENOMEM;
936 
937 	for (int i = 0; i < num_regions; i++) {
938 		hw->lan_regs[i].addr_len =
939 			le64_to_cpu(rcvd_regions->mem_reg[i].size);
940 		hw->lan_regs[i].addr_start =
941 			le64_to_cpu(rcvd_regions->mem_reg[i].start_offset);
942 	}
943 	hw->num_lan_regs = num_regions;
944 
945 	return err;
946 }
947 
948 /**
949  * idpf_calc_remaining_mmio_regs - calculate MMIO regions outside mbx and rstat
950  * @adapter: Driver specific private structure
951  *
952  * Called when idpf_send_get_lan_memory_regions is not supported. This will
953  * calculate the offsets and sizes for the regions before, in between, and
954  * after the mailbox and rstat MMIO mappings.
955  *
956  * Return: 0 on success or error code on failure.
957  */
idpf_calc_remaining_mmio_regs(struct idpf_adapter * adapter)958 static int idpf_calc_remaining_mmio_regs(struct idpf_adapter *adapter)
959 {
960 	struct resource *rstat_reg = &adapter->dev_ops.static_reg_info[1];
961 	struct resource *mbx_reg = &adapter->dev_ops.static_reg_info[0];
962 	struct idpf_hw *hw = &adapter->hw;
963 
964 	hw->num_lan_regs = IDPF_MMIO_MAP_FALLBACK_MAX_REMAINING;
965 	hw->lan_regs = kcalloc(hw->num_lan_regs, sizeof(*hw->lan_regs),
966 			       GFP_KERNEL);
967 	if (!hw->lan_regs)
968 		return -ENOMEM;
969 
970 	/* Region preceding mailbox */
971 	hw->lan_regs[0].addr_start = 0;
972 	hw->lan_regs[0].addr_len = mbx_reg->start;
973 	/* Region between mailbox and rstat */
974 	hw->lan_regs[1].addr_start = mbx_reg->end + 1;
975 	hw->lan_regs[1].addr_len = rstat_reg->start -
976 					hw->lan_regs[1].addr_start;
977 	/* Region after rstat */
978 	hw->lan_regs[2].addr_start = rstat_reg->end + 1;
979 	hw->lan_regs[2].addr_len = pci_resource_len(adapter->pdev, 0) -
980 					hw->lan_regs[2].addr_start;
981 
982 	return 0;
983 }
984 
985 /**
986  * idpf_map_lan_mmio_regs - map remaining LAN BAR regions
987  * @adapter: Driver specific private structure
988  *
989  * Return: 0 on success or error code on failure.
990  */
idpf_map_lan_mmio_regs(struct idpf_adapter * adapter)991 static int idpf_map_lan_mmio_regs(struct idpf_adapter *adapter)
992 {
993 	struct pci_dev *pdev = adapter->pdev;
994 	struct idpf_hw *hw = &adapter->hw;
995 	resource_size_t res_start;
996 
997 	res_start = pci_resource_start(pdev, 0);
998 
999 	for (int i = 0; i < hw->num_lan_regs; i++) {
1000 		resource_size_t start;
1001 		long len;
1002 
1003 		len = hw->lan_regs[i].addr_len;
1004 		if (!len)
1005 			continue;
1006 		start = hw->lan_regs[i].addr_start + res_start;
1007 
1008 		hw->lan_regs[i].vaddr = devm_ioremap(&pdev->dev, start, len);
1009 		if (!hw->lan_regs[i].vaddr) {
1010 			pci_err(pdev, "failed to allocate BAR0 region\n");
1011 			return -ENOMEM;
1012 		}
1013 	}
1014 
1015 	return 0;
1016 }
1017 
1018 /**
1019  * idpf_add_del_fsteer_filters - Send virtchnl add/del Flow Steering message
1020  * @adapter: adapter info struct
1021  * @rule: Flow steering rule to add/delete
1022  * @opcode: VIRTCHNL2_OP_ADD_FLOW_RULE to add filter, or
1023  *          VIRTCHNL2_OP_DEL_FLOW_RULE to delete. All other values are invalid.
1024  *
1025  * Send ADD/DELETE flow steering virtchnl message and receive the result.
1026  *
1027  * Return: 0 on success, negative on failure.
1028  */
idpf_add_del_fsteer_filters(struct idpf_adapter * adapter,struct virtchnl2_flow_rule_add_del * rule,enum virtchnl2_op opcode)1029 int idpf_add_del_fsteer_filters(struct idpf_adapter *adapter,
1030 				struct virtchnl2_flow_rule_add_del *rule,
1031 				enum virtchnl2_op opcode)
1032 {
1033 	int rule_count = le32_to_cpu(rule->count);
1034 	struct idpf_vc_xn_params xn_params = {};
1035 	ssize_t reply_sz;
1036 
1037 	if (opcode != VIRTCHNL2_OP_ADD_FLOW_RULE &&
1038 	    opcode != VIRTCHNL2_OP_DEL_FLOW_RULE)
1039 		return -EINVAL;
1040 
1041 	xn_params.vc_op = opcode;
1042 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1043 	xn_params.async = false;
1044 	xn_params.send_buf.iov_base = rule;
1045 	xn_params.send_buf.iov_len = struct_size(rule, rule_info, rule_count);
1046 	xn_params.recv_buf.iov_base = rule;
1047 	xn_params.recv_buf.iov_len = struct_size(rule, rule_info, rule_count);
1048 
1049 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
1050 	return reply_sz < 0 ? reply_sz : 0;
1051 }
1052 
1053 /**
1054  * idpf_vport_alloc_max_qs - Allocate max queues for a vport
1055  * @adapter: Driver specific private structure
1056  * @max_q: vport max queue structure
1057  */
idpf_vport_alloc_max_qs(struct idpf_adapter * adapter,struct idpf_vport_max_q * max_q)1058 int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
1059 			    struct idpf_vport_max_q *max_q)
1060 {
1061 	struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues;
1062 	struct virtchnl2_get_capabilities *caps = &adapter->caps;
1063 	u16 default_vports = idpf_get_default_vports(adapter);
1064 	int max_rx_q, max_tx_q;
1065 
1066 	mutex_lock(&adapter->queue_lock);
1067 
1068 	max_rx_q = le16_to_cpu(caps->max_rx_q) / default_vports;
1069 	max_tx_q = le16_to_cpu(caps->max_tx_q) / default_vports;
1070 	if (adapter->num_alloc_vports < default_vports) {
1071 		max_q->max_rxq = min_t(u16, max_rx_q, IDPF_MAX_Q);
1072 		max_q->max_txq = min_t(u16, max_tx_q, IDPF_MAX_Q);
1073 	} else {
1074 		max_q->max_rxq = IDPF_MIN_Q;
1075 		max_q->max_txq = IDPF_MIN_Q;
1076 	}
1077 	max_q->max_bufq = max_q->max_rxq * IDPF_MAX_BUFQS_PER_RXQ_GRP;
1078 	max_q->max_complq = max_q->max_txq;
1079 
1080 	if (avail_queues->avail_rxq < max_q->max_rxq ||
1081 	    avail_queues->avail_txq < max_q->max_txq ||
1082 	    avail_queues->avail_bufq < max_q->max_bufq ||
1083 	    avail_queues->avail_complq < max_q->max_complq) {
1084 		mutex_unlock(&adapter->queue_lock);
1085 
1086 		return -EINVAL;
1087 	}
1088 
1089 	avail_queues->avail_rxq -= max_q->max_rxq;
1090 	avail_queues->avail_txq -= max_q->max_txq;
1091 	avail_queues->avail_bufq -= max_q->max_bufq;
1092 	avail_queues->avail_complq -= max_q->max_complq;
1093 
1094 	mutex_unlock(&adapter->queue_lock);
1095 
1096 	return 0;
1097 }
1098 
1099 /**
1100  * idpf_vport_dealloc_max_qs - Deallocate max queues of a vport
1101  * @adapter: Driver specific private structure
1102  * @max_q: vport max queue structure
1103  */
idpf_vport_dealloc_max_qs(struct idpf_adapter * adapter,struct idpf_vport_max_q * max_q)1104 void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
1105 			       struct idpf_vport_max_q *max_q)
1106 {
1107 	struct idpf_avail_queue_info *avail_queues;
1108 
1109 	mutex_lock(&adapter->queue_lock);
1110 	avail_queues = &adapter->avail_queues;
1111 
1112 	avail_queues->avail_rxq += max_q->max_rxq;
1113 	avail_queues->avail_txq += max_q->max_txq;
1114 	avail_queues->avail_bufq += max_q->max_bufq;
1115 	avail_queues->avail_complq += max_q->max_complq;
1116 
1117 	mutex_unlock(&adapter->queue_lock);
1118 }
1119 
1120 /**
1121  * idpf_init_avail_queues - Initialize available queues on the device
1122  * @adapter: Driver specific private structure
1123  */
idpf_init_avail_queues(struct idpf_adapter * adapter)1124 static void idpf_init_avail_queues(struct idpf_adapter *adapter)
1125 {
1126 	struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues;
1127 	struct virtchnl2_get_capabilities *caps = &adapter->caps;
1128 
1129 	avail_queues->avail_rxq = le16_to_cpu(caps->max_rx_q);
1130 	avail_queues->avail_txq = le16_to_cpu(caps->max_tx_q);
1131 	avail_queues->avail_bufq = le16_to_cpu(caps->max_rx_bufq);
1132 	avail_queues->avail_complq = le16_to_cpu(caps->max_tx_complq);
1133 }
1134 
1135 /**
1136  * idpf_get_reg_intr_vecs - Get vector queue register offset
1137  * @vport: virtual port structure
1138  * @reg_vals: Register offsets to store in
1139  *
1140  * Returns number of registers that got populated
1141  */
idpf_get_reg_intr_vecs(struct idpf_vport * vport,struct idpf_vec_regs * reg_vals)1142 int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
1143 			   struct idpf_vec_regs *reg_vals)
1144 {
1145 	struct virtchnl2_vector_chunks *chunks;
1146 	struct idpf_vec_regs reg_val;
1147 	u16 num_vchunks, num_vec;
1148 	int num_regs = 0, i, j;
1149 
1150 	chunks = &vport->adapter->req_vec_chunks->vchunks;
1151 	num_vchunks = le16_to_cpu(chunks->num_vchunks);
1152 
1153 	for (j = 0; j < num_vchunks; j++) {
1154 		struct virtchnl2_vector_chunk *chunk;
1155 		u32 dynctl_reg_spacing;
1156 		u32 itrn_reg_spacing;
1157 
1158 		chunk = &chunks->vchunks[j];
1159 		num_vec = le16_to_cpu(chunk->num_vectors);
1160 		reg_val.dyn_ctl_reg = le32_to_cpu(chunk->dynctl_reg_start);
1161 		reg_val.itrn_reg = le32_to_cpu(chunk->itrn_reg_start);
1162 		reg_val.itrn_index_spacing = le32_to_cpu(chunk->itrn_index_spacing);
1163 
1164 		dynctl_reg_spacing = le32_to_cpu(chunk->dynctl_reg_spacing);
1165 		itrn_reg_spacing = le32_to_cpu(chunk->itrn_reg_spacing);
1166 
1167 		for (i = 0; i < num_vec; i++) {
1168 			reg_vals[num_regs].dyn_ctl_reg = reg_val.dyn_ctl_reg;
1169 			reg_vals[num_regs].itrn_reg = reg_val.itrn_reg;
1170 			reg_vals[num_regs].itrn_index_spacing =
1171 						reg_val.itrn_index_spacing;
1172 
1173 			reg_val.dyn_ctl_reg += dynctl_reg_spacing;
1174 			reg_val.itrn_reg += itrn_reg_spacing;
1175 			num_regs++;
1176 		}
1177 	}
1178 
1179 	return num_regs;
1180 }
1181 
1182 /**
1183  * idpf_vport_get_q_reg - Get the queue registers for the vport
1184  * @reg_vals: register values needing to be set
1185  * @num_regs: amount we expect to fill
1186  * @q_type: queue model
1187  * @chunks: queue regs received over mailbox
1188  *
1189  * This function parses the queue register offsets from the queue register
1190  * chunk information, with a specific queue type and stores it into the array
1191  * passed as an argument. It returns the actual number of queue registers that
1192  * are filled.
1193  */
idpf_vport_get_q_reg(u32 * reg_vals,int num_regs,u32 q_type,struct virtchnl2_queue_reg_chunks * chunks)1194 static int idpf_vport_get_q_reg(u32 *reg_vals, int num_regs, u32 q_type,
1195 				struct virtchnl2_queue_reg_chunks *chunks)
1196 {
1197 	u16 num_chunks = le16_to_cpu(chunks->num_chunks);
1198 	int reg_filled = 0, i;
1199 	u32 reg_val;
1200 
1201 	while (num_chunks--) {
1202 		struct virtchnl2_queue_reg_chunk *chunk;
1203 		u16 num_q;
1204 
1205 		chunk = &chunks->chunks[num_chunks];
1206 		if (le32_to_cpu(chunk->type) != q_type)
1207 			continue;
1208 
1209 		num_q = le32_to_cpu(chunk->num_queues);
1210 		reg_val = le64_to_cpu(chunk->qtail_reg_start);
1211 		for (i = 0; i < num_q && reg_filled < num_regs ; i++) {
1212 			reg_vals[reg_filled++] = reg_val;
1213 			reg_val += le32_to_cpu(chunk->qtail_reg_spacing);
1214 		}
1215 	}
1216 
1217 	return reg_filled;
1218 }
1219 
1220 /**
1221  * __idpf_queue_reg_init - initialize queue registers
1222  * @vport: virtual port structure
1223  * @reg_vals: registers we are initializing
1224  * @num_regs: how many registers there are in total
1225  * @q_type: queue model
1226  *
1227  * Return number of queues that are initialized
1228  */
__idpf_queue_reg_init(struct idpf_vport * vport,u32 * reg_vals,int num_regs,u32 q_type)1229 static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
1230 				 int num_regs, u32 q_type)
1231 {
1232 	struct idpf_adapter *adapter = vport->adapter;
1233 	int i, j, k = 0;
1234 
1235 	switch (q_type) {
1236 	case VIRTCHNL2_QUEUE_TYPE_TX:
1237 		for (i = 0; i < vport->num_txq_grp; i++) {
1238 			struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1239 
1240 			for (j = 0; j < tx_qgrp->num_txq && k < num_regs; j++, k++)
1241 				tx_qgrp->txqs[j]->tail =
1242 					idpf_get_reg_addr(adapter, reg_vals[k]);
1243 		}
1244 		break;
1245 	case VIRTCHNL2_QUEUE_TYPE_RX:
1246 		for (i = 0; i < vport->num_rxq_grp; i++) {
1247 			struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1248 			u16 num_rxq = rx_qgrp->singleq.num_rxq;
1249 
1250 			for (j = 0; j < num_rxq && k < num_regs; j++, k++) {
1251 				struct idpf_rx_queue *q;
1252 
1253 				q = rx_qgrp->singleq.rxqs[j];
1254 				q->tail = idpf_get_reg_addr(adapter,
1255 							    reg_vals[k]);
1256 			}
1257 		}
1258 		break;
1259 	case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
1260 		for (i = 0; i < vport->num_rxq_grp; i++) {
1261 			struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1262 			u8 num_bufqs = vport->num_bufqs_per_qgrp;
1263 
1264 			for (j = 0; j < num_bufqs && k < num_regs; j++, k++) {
1265 				struct idpf_buf_queue *q;
1266 
1267 				q = &rx_qgrp->splitq.bufq_sets[j].bufq;
1268 				q->tail = idpf_get_reg_addr(adapter,
1269 							    reg_vals[k]);
1270 			}
1271 		}
1272 		break;
1273 	default:
1274 		break;
1275 	}
1276 
1277 	return k;
1278 }
1279 
1280 /**
1281  * idpf_queue_reg_init - initialize queue registers
1282  * @vport: virtual port structure
1283  *
1284  * Return 0 on success, negative on failure
1285  */
idpf_queue_reg_init(struct idpf_vport * vport)1286 int idpf_queue_reg_init(struct idpf_vport *vport)
1287 {
1288 	struct virtchnl2_create_vport *vport_params;
1289 	struct virtchnl2_queue_reg_chunks *chunks;
1290 	struct idpf_vport_config *vport_config;
1291 	u16 vport_idx = vport->idx;
1292 	int num_regs, ret = 0;
1293 	u32 *reg_vals;
1294 
1295 	/* We may never deal with more than 256 same type of queues */
1296 	reg_vals = kzalloc(sizeof(void *) * IDPF_LARGE_MAX_Q, GFP_KERNEL);
1297 	if (!reg_vals)
1298 		return -ENOMEM;
1299 
1300 	vport_config = vport->adapter->vport_config[vport_idx];
1301 	if (vport_config->req_qs_chunks) {
1302 		struct virtchnl2_add_queues *vc_aq =
1303 		  (struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
1304 		chunks = &vc_aq->chunks;
1305 	} else {
1306 		vport_params = vport->adapter->vport_params_recvd[vport_idx];
1307 		chunks = &vport_params->chunks;
1308 	}
1309 
1310 	/* Initialize Tx queue tail register address */
1311 	num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
1312 					VIRTCHNL2_QUEUE_TYPE_TX,
1313 					chunks);
1314 	if (num_regs < vport->num_txq) {
1315 		ret = -EINVAL;
1316 		goto free_reg_vals;
1317 	}
1318 
1319 	num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
1320 					 VIRTCHNL2_QUEUE_TYPE_TX);
1321 	if (num_regs < vport->num_txq) {
1322 		ret = -EINVAL;
1323 		goto free_reg_vals;
1324 	}
1325 
1326 	/* Initialize Rx/buffer queue tail register address based on Rx queue
1327 	 * model
1328 	 */
1329 	if (idpf_is_queue_model_split(vport->rxq_model)) {
1330 		num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
1331 						VIRTCHNL2_QUEUE_TYPE_RX_BUFFER,
1332 						chunks);
1333 		if (num_regs < vport->num_bufq) {
1334 			ret = -EINVAL;
1335 			goto free_reg_vals;
1336 		}
1337 
1338 		num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
1339 						 VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
1340 		if (num_regs < vport->num_bufq) {
1341 			ret = -EINVAL;
1342 			goto free_reg_vals;
1343 		}
1344 	} else {
1345 		num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
1346 						VIRTCHNL2_QUEUE_TYPE_RX,
1347 						chunks);
1348 		if (num_regs < vport->num_rxq) {
1349 			ret = -EINVAL;
1350 			goto free_reg_vals;
1351 		}
1352 
1353 		num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
1354 						 VIRTCHNL2_QUEUE_TYPE_RX);
1355 		if (num_regs < vport->num_rxq) {
1356 			ret = -EINVAL;
1357 			goto free_reg_vals;
1358 		}
1359 	}
1360 
1361 free_reg_vals:
1362 	kfree(reg_vals);
1363 
1364 	return ret;
1365 }
1366 
1367 /**
1368  * idpf_send_create_vport_msg - Send virtchnl create vport message
1369  * @adapter: Driver specific private structure
1370  * @max_q: vport max queue info
1371  *
1372  * send virtchnl creae vport message
1373  *
1374  * Returns 0 on success, negative on failure
1375  */
idpf_send_create_vport_msg(struct idpf_adapter * adapter,struct idpf_vport_max_q * max_q)1376 int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
1377 			       struct idpf_vport_max_q *max_q)
1378 {
1379 	struct virtchnl2_create_vport *vport_msg;
1380 	struct idpf_vc_xn_params xn_params = {};
1381 	u16 idx = adapter->next_vport;
1382 	int err, buf_size;
1383 	ssize_t reply_sz;
1384 
1385 	buf_size = sizeof(struct virtchnl2_create_vport);
1386 	if (!adapter->vport_params_reqd[idx]) {
1387 		adapter->vport_params_reqd[idx] = kzalloc(buf_size,
1388 							  GFP_KERNEL);
1389 		if (!adapter->vport_params_reqd[idx])
1390 			return -ENOMEM;
1391 	}
1392 
1393 	vport_msg = adapter->vport_params_reqd[idx];
1394 	vport_msg->vport_type = cpu_to_le16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
1395 	vport_msg->vport_index = cpu_to_le16(idx);
1396 
1397 	if (adapter->req_tx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ))
1398 		vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
1399 	else
1400 		vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
1401 
1402 	if (adapter->req_rx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ))
1403 		vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
1404 	else
1405 		vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
1406 
1407 	err = idpf_vport_calc_total_qs(adapter, idx, vport_msg, max_q);
1408 	if (err) {
1409 		dev_err(&adapter->pdev->dev, "Enough queues are not available");
1410 
1411 		return err;
1412 	}
1413 
1414 	if (!adapter->vport_params_recvd[idx]) {
1415 		adapter->vport_params_recvd[idx] = kzalloc(IDPF_CTLQ_MAX_BUF_LEN,
1416 							   GFP_KERNEL);
1417 		if (!adapter->vport_params_recvd[idx]) {
1418 			err = -ENOMEM;
1419 			goto free_vport_params;
1420 		}
1421 	}
1422 
1423 	xn_params.vc_op = VIRTCHNL2_OP_CREATE_VPORT;
1424 	xn_params.send_buf.iov_base = vport_msg;
1425 	xn_params.send_buf.iov_len = buf_size;
1426 	xn_params.recv_buf.iov_base = adapter->vport_params_recvd[idx];
1427 	xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
1428 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1429 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
1430 	if (reply_sz < 0) {
1431 		err = reply_sz;
1432 		goto free_vport_params;
1433 	}
1434 
1435 	return 0;
1436 
1437 free_vport_params:
1438 	kfree(adapter->vport_params_recvd[idx]);
1439 	adapter->vport_params_recvd[idx] = NULL;
1440 	kfree(adapter->vport_params_reqd[idx]);
1441 	adapter->vport_params_reqd[idx] = NULL;
1442 
1443 	return err;
1444 }
1445 
1446 /**
1447  * idpf_check_supported_desc_ids - Verify we have required descriptor support
1448  * @vport: virtual port structure
1449  *
1450  * Return 0 on success, error on failure
1451  */
idpf_check_supported_desc_ids(struct idpf_vport * vport)1452 int idpf_check_supported_desc_ids(struct idpf_vport *vport)
1453 {
1454 	struct idpf_adapter *adapter = vport->adapter;
1455 	struct virtchnl2_create_vport *vport_msg;
1456 	u64 rx_desc_ids, tx_desc_ids;
1457 
1458 	vport_msg = adapter->vport_params_recvd[vport->idx];
1459 
1460 	if (!IS_ENABLED(CONFIG_IDPF_SINGLEQ) &&
1461 	    (vport_msg->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE ||
1462 	     vport_msg->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)) {
1463 		pci_err(adapter->pdev, "singleq mode requested, but not compiled-in\n");
1464 		return -EOPNOTSUPP;
1465 	}
1466 
1467 	rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids);
1468 	tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids);
1469 
1470 	if (idpf_is_queue_model_split(vport->rxq_model)) {
1471 		if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) {
1472 			dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n");
1473 			vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
1474 		}
1475 	} else {
1476 		if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M))
1477 			vport->base_rxd = true;
1478 	}
1479 
1480 	if (!idpf_is_queue_model_split(vport->txq_model))
1481 		return 0;
1482 
1483 	if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) {
1484 		dev_info(&adapter->pdev->dev, "Minimum TX descriptor support not provided, using the default\n");
1485 		vport_msg->tx_desc_ids = cpu_to_le64(MIN_SUPPORT_TXDID);
1486 	}
1487 
1488 	return 0;
1489 }
1490 
1491 /**
1492  * idpf_send_destroy_vport_msg - Send virtchnl destroy vport message
1493  * @vport: virtual port data structure
1494  *
1495  * Send virtchnl destroy vport message.  Returns 0 on success, negative on
1496  * failure.
1497  */
idpf_send_destroy_vport_msg(struct idpf_vport * vport)1498 int idpf_send_destroy_vport_msg(struct idpf_vport *vport)
1499 {
1500 	struct idpf_vc_xn_params xn_params = {};
1501 	struct virtchnl2_vport v_id;
1502 	ssize_t reply_sz;
1503 
1504 	v_id.vport_id = cpu_to_le32(vport->vport_id);
1505 
1506 	xn_params.vc_op = VIRTCHNL2_OP_DESTROY_VPORT;
1507 	xn_params.send_buf.iov_base = &v_id;
1508 	xn_params.send_buf.iov_len = sizeof(v_id);
1509 	xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
1510 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
1511 
1512 	return reply_sz < 0 ? reply_sz : 0;
1513 }
1514 
1515 /**
1516  * idpf_send_enable_vport_msg - Send virtchnl enable vport message
1517  * @vport: virtual port data structure
1518  *
1519  * Send enable vport virtchnl message.  Returns 0 on success, negative on
1520  * failure.
1521  */
idpf_send_enable_vport_msg(struct idpf_vport * vport)1522 int idpf_send_enable_vport_msg(struct idpf_vport *vport)
1523 {
1524 	struct idpf_vc_xn_params xn_params = {};
1525 	struct virtchnl2_vport v_id;
1526 	ssize_t reply_sz;
1527 
1528 	v_id.vport_id = cpu_to_le32(vport->vport_id);
1529 
1530 	xn_params.vc_op = VIRTCHNL2_OP_ENABLE_VPORT;
1531 	xn_params.send_buf.iov_base = &v_id;
1532 	xn_params.send_buf.iov_len = sizeof(v_id);
1533 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1534 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
1535 
1536 	return reply_sz < 0 ? reply_sz : 0;
1537 }
1538 
1539 /**
1540  * idpf_send_disable_vport_msg - Send virtchnl disable vport message
1541  * @vport: virtual port data structure
1542  *
1543  * Send disable vport virtchnl message.  Returns 0 on success, negative on
1544  * failure.
1545  */
idpf_send_disable_vport_msg(struct idpf_vport * vport)1546 int idpf_send_disable_vport_msg(struct idpf_vport *vport)
1547 {
1548 	struct idpf_vc_xn_params xn_params = {};
1549 	struct virtchnl2_vport v_id;
1550 	ssize_t reply_sz;
1551 
1552 	v_id.vport_id = cpu_to_le32(vport->vport_id);
1553 
1554 	xn_params.vc_op = VIRTCHNL2_OP_DISABLE_VPORT;
1555 	xn_params.send_buf.iov_base = &v_id;
1556 	xn_params.send_buf.iov_len = sizeof(v_id);
1557 	xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
1558 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
1559 
1560 	return reply_sz < 0 ? reply_sz : 0;
1561 }
1562 
1563 /**
1564  * idpf_send_config_tx_queues_msg - Send virtchnl config tx queues message
1565  * @vport: virtual port data structure
1566  *
1567  * Send config tx queues virtchnl message. Returns 0 on success, negative on
1568  * failure.
1569  */
idpf_send_config_tx_queues_msg(struct idpf_vport * vport)1570 static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
1571 {
1572 	struct virtchnl2_config_tx_queues *ctq __free(kfree) = NULL;
1573 	struct virtchnl2_txq_info *qi __free(kfree) = NULL;
1574 	struct idpf_vc_xn_params xn_params = {};
1575 	u32 config_sz, chunk_sz, buf_sz;
1576 	int totqs, num_msgs, num_chunks;
1577 	ssize_t reply_sz;
1578 	int i, k = 0;
1579 
1580 	totqs = vport->num_txq + vport->num_complq;
1581 	qi = kcalloc(totqs, sizeof(struct virtchnl2_txq_info), GFP_KERNEL);
1582 	if (!qi)
1583 		return -ENOMEM;
1584 
1585 	/* Populate the queue info buffer with all queue context info */
1586 	for (i = 0; i < vport->num_txq_grp; i++) {
1587 		struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1588 		int j, sched_mode;
1589 
1590 		for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
1591 			qi[k].queue_id =
1592 				cpu_to_le32(tx_qgrp->txqs[j]->q_id);
1593 			qi[k].model =
1594 				cpu_to_le16(vport->txq_model);
1595 			qi[k].type =
1596 				cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
1597 			qi[k].ring_len =
1598 				cpu_to_le16(tx_qgrp->txqs[j]->desc_count);
1599 			qi[k].dma_ring_addr =
1600 				cpu_to_le64(tx_qgrp->txqs[j]->dma);
1601 			if (idpf_is_queue_model_split(vport->txq_model)) {
1602 				struct idpf_tx_queue *q = tx_qgrp->txqs[j];
1603 
1604 				qi[k].tx_compl_queue_id =
1605 					cpu_to_le16(tx_qgrp->complq->q_id);
1606 				qi[k].relative_queue_id = cpu_to_le16(j);
1607 
1608 				if (idpf_queue_has(FLOW_SCH_EN, q))
1609 					qi[k].sched_mode =
1610 					cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_FLOW);
1611 				else
1612 					qi[k].sched_mode =
1613 					cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
1614 			} else {
1615 				qi[k].sched_mode =
1616 					cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
1617 			}
1618 		}
1619 
1620 		if (!idpf_is_queue_model_split(vport->txq_model))
1621 			continue;
1622 
1623 		qi[k].queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
1624 		qi[k].model = cpu_to_le16(vport->txq_model);
1625 		qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
1626 		qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count);
1627 		qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma);
1628 
1629 		if (idpf_queue_has(FLOW_SCH_EN, tx_qgrp->complq))
1630 			sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
1631 		else
1632 			sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
1633 		qi[k].sched_mode = cpu_to_le16(sched_mode);
1634 
1635 		k++;
1636 	}
1637 
1638 	/* Make sure accounting agrees */
1639 	if (k != totqs)
1640 		return -EINVAL;
1641 
1642 	/* Chunk up the queue contexts into multiple messages to avoid
1643 	 * sending a control queue message buffer that is too large
1644 	 */
1645 	config_sz = sizeof(struct virtchnl2_config_tx_queues);
1646 	chunk_sz = sizeof(struct virtchnl2_txq_info);
1647 
1648 	num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
1649 			   totqs);
1650 	num_msgs = DIV_ROUND_UP(totqs, num_chunks);
1651 
1652 	buf_sz = struct_size(ctq, qinfo, num_chunks);
1653 	ctq = kzalloc(buf_sz, GFP_KERNEL);
1654 	if (!ctq)
1655 		return -ENOMEM;
1656 
1657 	xn_params.vc_op = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
1658 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1659 
1660 	for (i = 0, k = 0; i < num_msgs; i++) {
1661 		memset(ctq, 0, buf_sz);
1662 		ctq->vport_id = cpu_to_le32(vport->vport_id);
1663 		ctq->num_qinfo = cpu_to_le16(num_chunks);
1664 		memcpy(ctq->qinfo, &qi[k], chunk_sz * num_chunks);
1665 
1666 		xn_params.send_buf.iov_base = ctq;
1667 		xn_params.send_buf.iov_len = buf_sz;
1668 		reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
1669 		if (reply_sz < 0)
1670 			return reply_sz;
1671 
1672 		k += num_chunks;
1673 		totqs -= num_chunks;
1674 		num_chunks = min(num_chunks, totqs);
1675 		/* Recalculate buffer size */
1676 		buf_sz = struct_size(ctq, qinfo, num_chunks);
1677 	}
1678 
1679 	return 0;
1680 }
1681 
1682 /**
1683  * idpf_send_config_rx_queues_msg - Send virtchnl config rx queues message
1684  * @vport: virtual port data structure
1685  *
1686  * Send config rx queues virtchnl message.  Returns 0 on success, negative on
1687  * failure.
1688  */
idpf_send_config_rx_queues_msg(struct idpf_vport * vport)1689 static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
1690 {
1691 	struct virtchnl2_config_rx_queues *crq __free(kfree) = NULL;
1692 	struct virtchnl2_rxq_info *qi __free(kfree) = NULL;
1693 	struct idpf_vc_xn_params xn_params = {};
1694 	u32 config_sz, chunk_sz, buf_sz;
1695 	int totqs, num_msgs, num_chunks;
1696 	ssize_t reply_sz;
1697 	int i, k = 0;
1698 
1699 	totqs = vport->num_rxq + vport->num_bufq;
1700 	qi = kcalloc(totqs, sizeof(struct virtchnl2_rxq_info), GFP_KERNEL);
1701 	if (!qi)
1702 		return -ENOMEM;
1703 
1704 	/* Populate the queue info buffer with all queue context info */
1705 	for (i = 0; i < vport->num_rxq_grp; i++) {
1706 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1707 		u16 num_rxq;
1708 		int j;
1709 
1710 		if (!idpf_is_queue_model_split(vport->rxq_model))
1711 			goto setup_rxqs;
1712 
1713 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
1714 			struct idpf_buf_queue *bufq =
1715 				&rx_qgrp->splitq.bufq_sets[j].bufq;
1716 
1717 			qi[k].queue_id = cpu_to_le32(bufq->q_id);
1718 			qi[k].model = cpu_to_le16(vport->rxq_model);
1719 			qi[k].type =
1720 				cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
1721 			qi[k].desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
1722 			qi[k].ring_len = cpu_to_le16(bufq->desc_count);
1723 			qi[k].dma_ring_addr = cpu_to_le64(bufq->dma);
1724 			qi[k].data_buffer_size = cpu_to_le32(bufq->rx_buf_size);
1725 			qi[k].buffer_notif_stride = IDPF_RX_BUF_STRIDE;
1726 			qi[k].rx_buffer_low_watermark =
1727 				cpu_to_le16(bufq->rx_buffer_low_watermark);
1728 			if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
1729 				qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
1730 		}
1731 
1732 setup_rxqs:
1733 		if (idpf_is_queue_model_split(vport->rxq_model))
1734 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
1735 		else
1736 			num_rxq = rx_qgrp->singleq.num_rxq;
1737 
1738 		for (j = 0; j < num_rxq; j++, k++) {
1739 			const struct idpf_bufq_set *sets;
1740 			struct idpf_rx_queue *rxq;
1741 
1742 			if (!idpf_is_queue_model_split(vport->rxq_model)) {
1743 				rxq = rx_qgrp->singleq.rxqs[j];
1744 				goto common_qi_fields;
1745 			}
1746 
1747 			rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
1748 			sets = rxq->bufq_sets;
1749 
1750 			/* In splitq mode, RXQ buffer size should be
1751 			 * set to that of the first buffer queue
1752 			 * associated with this RXQ.
1753 			 */
1754 			rxq->rx_buf_size = sets[0].bufq.rx_buf_size;
1755 
1756 			qi[k].rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id);
1757 			if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) {
1758 				qi[k].bufq2_ena = IDPF_BUFQ2_ENA;
1759 				qi[k].rx_bufq2_id =
1760 					cpu_to_le16(sets[1].bufq.q_id);
1761 			}
1762 			qi[k].rx_buffer_low_watermark =
1763 				cpu_to_le16(rxq->rx_buffer_low_watermark);
1764 			if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
1765 				qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
1766 
1767 			rxq->rx_hbuf_size = sets[0].bufq.rx_hbuf_size;
1768 
1769 			if (idpf_queue_has(HSPLIT_EN, rxq)) {
1770 				qi[k].qflags |=
1771 					cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT);
1772 				qi[k].hdr_buffer_size =
1773 					cpu_to_le16(rxq->rx_hbuf_size);
1774 			}
1775 
1776 common_qi_fields:
1777 			qi[k].queue_id = cpu_to_le32(rxq->q_id);
1778 			qi[k].model = cpu_to_le16(vport->rxq_model);
1779 			qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
1780 			qi[k].ring_len = cpu_to_le16(rxq->desc_count);
1781 			qi[k].dma_ring_addr = cpu_to_le64(rxq->dma);
1782 			qi[k].max_pkt_size = cpu_to_le32(rxq->rx_max_pkt_size);
1783 			qi[k].data_buffer_size = cpu_to_le32(rxq->rx_buf_size);
1784 			qi[k].qflags |=
1785 				cpu_to_le16(VIRTCHNL2_RX_DESC_SIZE_32BYTE);
1786 			qi[k].desc_ids = cpu_to_le64(rxq->rxdids);
1787 		}
1788 	}
1789 
1790 	/* Make sure accounting agrees */
1791 	if (k != totqs)
1792 		return -EINVAL;
1793 
1794 	/* Chunk up the queue contexts into multiple messages to avoid
1795 	 * sending a control queue message buffer that is too large
1796 	 */
1797 	config_sz = sizeof(struct virtchnl2_config_rx_queues);
1798 	chunk_sz = sizeof(struct virtchnl2_rxq_info);
1799 
1800 	num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
1801 			   totqs);
1802 	num_msgs = DIV_ROUND_UP(totqs, num_chunks);
1803 
1804 	buf_sz = struct_size(crq, qinfo, num_chunks);
1805 	crq = kzalloc(buf_sz, GFP_KERNEL);
1806 	if (!crq)
1807 		return -ENOMEM;
1808 
1809 	xn_params.vc_op = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
1810 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1811 
1812 	for (i = 0, k = 0; i < num_msgs; i++) {
1813 		memset(crq, 0, buf_sz);
1814 		crq->vport_id = cpu_to_le32(vport->vport_id);
1815 		crq->num_qinfo = cpu_to_le16(num_chunks);
1816 		memcpy(crq->qinfo, &qi[k], chunk_sz * num_chunks);
1817 
1818 		xn_params.send_buf.iov_base = crq;
1819 		xn_params.send_buf.iov_len = buf_sz;
1820 		reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
1821 		if (reply_sz < 0)
1822 			return reply_sz;
1823 
1824 		k += num_chunks;
1825 		totqs -= num_chunks;
1826 		num_chunks = min(num_chunks, totqs);
1827 		/* Recalculate buffer size */
1828 		buf_sz = struct_size(crq, qinfo, num_chunks);
1829 	}
1830 
1831 	return 0;
1832 }
1833 
1834 /**
1835  * idpf_send_ena_dis_queues_msg - Send virtchnl enable or disable
1836  * queues message
1837  * @vport: virtual port data structure
1838  * @ena: if true enable, false disable
1839  *
1840  * Send enable or disable queues virtchnl message. Returns 0 on success,
1841  * negative on failure.
1842  */
idpf_send_ena_dis_queues_msg(struct idpf_vport * vport,bool ena)1843 static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena)
1844 {
1845 	struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
1846 	struct virtchnl2_queue_chunk *qc __free(kfree) = NULL;
1847 	u32 num_msgs, num_chunks, num_txq, num_rxq, num_q;
1848 	struct idpf_vc_xn_params xn_params = {};
1849 	struct virtchnl2_queue_chunks *qcs;
1850 	u32 config_sz, chunk_sz, buf_sz;
1851 	ssize_t reply_sz;
1852 	int i, j, k = 0;
1853 
1854 	num_txq = vport->num_txq + vport->num_complq;
1855 	num_rxq = vport->num_rxq + vport->num_bufq;
1856 	num_q = num_txq + num_rxq;
1857 	buf_sz = sizeof(struct virtchnl2_queue_chunk) * num_q;
1858 	qc = kzalloc(buf_sz, GFP_KERNEL);
1859 	if (!qc)
1860 		return -ENOMEM;
1861 
1862 	for (i = 0; i < vport->num_txq_grp; i++) {
1863 		struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1864 
1865 		for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
1866 			qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
1867 			qc[k].start_queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
1868 			qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
1869 		}
1870 	}
1871 	if (vport->num_txq != k)
1872 		return -EINVAL;
1873 
1874 	if (!idpf_is_queue_model_split(vport->txq_model))
1875 		goto setup_rx;
1876 
1877 	for (i = 0; i < vport->num_txq_grp; i++, k++) {
1878 		struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1879 
1880 		qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
1881 		qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
1882 		qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
1883 	}
1884 	if (vport->num_complq != (k - vport->num_txq))
1885 		return -EINVAL;
1886 
1887 setup_rx:
1888 	for (i = 0; i < vport->num_rxq_grp; i++) {
1889 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1890 
1891 		if (idpf_is_queue_model_split(vport->rxq_model))
1892 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
1893 		else
1894 			num_rxq = rx_qgrp->singleq.num_rxq;
1895 
1896 		for (j = 0; j < num_rxq; j++, k++) {
1897 			if (idpf_is_queue_model_split(vport->rxq_model)) {
1898 				qc[k].start_queue_id =
1899 				cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_id);
1900 				qc[k].type =
1901 				cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
1902 			} else {
1903 				qc[k].start_queue_id =
1904 				cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_id);
1905 				qc[k].type =
1906 				cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
1907 			}
1908 			qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
1909 		}
1910 	}
1911 	if (vport->num_rxq != k - (vport->num_txq + vport->num_complq))
1912 		return -EINVAL;
1913 
1914 	if (!idpf_is_queue_model_split(vport->rxq_model))
1915 		goto send_msg;
1916 
1917 	for (i = 0; i < vport->num_rxq_grp; i++) {
1918 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1919 
1920 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
1921 			const struct idpf_buf_queue *q;
1922 
1923 			q = &rx_qgrp->splitq.bufq_sets[j].bufq;
1924 			qc[k].type =
1925 				cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
1926 			qc[k].start_queue_id = cpu_to_le32(q->q_id);
1927 			qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
1928 		}
1929 	}
1930 	if (vport->num_bufq != k - (vport->num_txq +
1931 				    vport->num_complq +
1932 				    vport->num_rxq))
1933 		return -EINVAL;
1934 
1935 send_msg:
1936 	/* Chunk up the queue info into multiple messages */
1937 	config_sz = sizeof(struct virtchnl2_del_ena_dis_queues);
1938 	chunk_sz = sizeof(struct virtchnl2_queue_chunk);
1939 
1940 	num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
1941 			   num_q);
1942 	num_msgs = DIV_ROUND_UP(num_q, num_chunks);
1943 
1944 	buf_sz = struct_size(eq, chunks.chunks, num_chunks);
1945 	eq = kzalloc(buf_sz, GFP_KERNEL);
1946 	if (!eq)
1947 		return -ENOMEM;
1948 
1949 	if (ena) {
1950 		xn_params.vc_op = VIRTCHNL2_OP_ENABLE_QUEUES;
1951 		xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1952 	} else {
1953 		xn_params.vc_op = VIRTCHNL2_OP_DISABLE_QUEUES;
1954 		xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
1955 	}
1956 
1957 	for (i = 0, k = 0; i < num_msgs; i++) {
1958 		memset(eq, 0, buf_sz);
1959 		eq->vport_id = cpu_to_le32(vport->vport_id);
1960 		eq->chunks.num_chunks = cpu_to_le16(num_chunks);
1961 		qcs = &eq->chunks;
1962 		memcpy(qcs->chunks, &qc[k], chunk_sz * num_chunks);
1963 
1964 		xn_params.send_buf.iov_base = eq;
1965 		xn_params.send_buf.iov_len = buf_sz;
1966 		reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
1967 		if (reply_sz < 0)
1968 			return reply_sz;
1969 
1970 		k += num_chunks;
1971 		num_q -= num_chunks;
1972 		num_chunks = min(num_chunks, num_q);
1973 		/* Recalculate buffer size */
1974 		buf_sz = struct_size(eq, chunks.chunks, num_chunks);
1975 	}
1976 
1977 	return 0;
1978 }
1979 
1980 /**
1981  * idpf_send_map_unmap_queue_vector_msg - Send virtchnl map or unmap queue
1982  * vector message
1983  * @vport: virtual port data structure
1984  * @map: true for map and false for unmap
1985  *
1986  * Send map or unmap queue vector virtchnl message.  Returns 0 on success,
1987  * negative on failure.
1988  */
idpf_send_map_unmap_queue_vector_msg(struct idpf_vport * vport,bool map)1989 int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
1990 {
1991 	struct virtchnl2_queue_vector_maps *vqvm __free(kfree) = NULL;
1992 	struct virtchnl2_queue_vector *vqv __free(kfree) = NULL;
1993 	struct idpf_vc_xn_params xn_params = {};
1994 	u32 config_sz, chunk_sz, buf_sz;
1995 	u32 num_msgs, num_chunks, num_q;
1996 	ssize_t reply_sz;
1997 	int i, j, k = 0;
1998 
1999 	num_q = vport->num_txq + vport->num_rxq;
2000 
2001 	buf_sz = sizeof(struct virtchnl2_queue_vector) * num_q;
2002 	vqv = kzalloc(buf_sz, GFP_KERNEL);
2003 	if (!vqv)
2004 		return -ENOMEM;
2005 
2006 	for (i = 0; i < vport->num_txq_grp; i++) {
2007 		struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
2008 
2009 		for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
2010 			vqv[k].queue_type =
2011 				cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
2012 			vqv[k].queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
2013 
2014 			if (idpf_is_queue_model_split(vport->txq_model)) {
2015 				vqv[k].vector_id =
2016 				cpu_to_le16(tx_qgrp->complq->q_vector->v_idx);
2017 				vqv[k].itr_idx =
2018 				cpu_to_le32(tx_qgrp->complq->q_vector->tx_itr_idx);
2019 			} else {
2020 				vqv[k].vector_id =
2021 				cpu_to_le16(tx_qgrp->txqs[j]->q_vector->v_idx);
2022 				vqv[k].itr_idx =
2023 				cpu_to_le32(tx_qgrp->txqs[j]->q_vector->tx_itr_idx);
2024 			}
2025 		}
2026 	}
2027 
2028 	if (vport->num_txq != k)
2029 		return -EINVAL;
2030 
2031 	for (i = 0; i < vport->num_rxq_grp; i++) {
2032 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
2033 		u16 num_rxq;
2034 
2035 		if (idpf_is_queue_model_split(vport->rxq_model))
2036 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
2037 		else
2038 			num_rxq = rx_qgrp->singleq.num_rxq;
2039 
2040 		for (j = 0; j < num_rxq; j++, k++) {
2041 			struct idpf_rx_queue *rxq;
2042 
2043 			if (idpf_is_queue_model_split(vport->rxq_model))
2044 				rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
2045 			else
2046 				rxq = rx_qgrp->singleq.rxqs[j];
2047 
2048 			vqv[k].queue_type =
2049 				cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
2050 			vqv[k].queue_id = cpu_to_le32(rxq->q_id);
2051 			vqv[k].vector_id = cpu_to_le16(rxq->q_vector->v_idx);
2052 			vqv[k].itr_idx = cpu_to_le32(rxq->q_vector->rx_itr_idx);
2053 		}
2054 	}
2055 
2056 	if (idpf_is_queue_model_split(vport->txq_model)) {
2057 		if (vport->num_rxq != k - vport->num_complq)
2058 			return -EINVAL;
2059 	} else {
2060 		if (vport->num_rxq != k - vport->num_txq)
2061 			return -EINVAL;
2062 	}
2063 
2064 	/* Chunk up the vector info into multiple messages */
2065 	config_sz = sizeof(struct virtchnl2_queue_vector_maps);
2066 	chunk_sz = sizeof(struct virtchnl2_queue_vector);
2067 
2068 	num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
2069 			   num_q);
2070 	num_msgs = DIV_ROUND_UP(num_q, num_chunks);
2071 
2072 	buf_sz = struct_size(vqvm, qv_maps, num_chunks);
2073 	vqvm = kzalloc(buf_sz, GFP_KERNEL);
2074 	if (!vqvm)
2075 		return -ENOMEM;
2076 
2077 	if (map) {
2078 		xn_params.vc_op = VIRTCHNL2_OP_MAP_QUEUE_VECTOR;
2079 		xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2080 	} else {
2081 		xn_params.vc_op = VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR;
2082 		xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
2083 	}
2084 
2085 	for (i = 0, k = 0; i < num_msgs; i++) {
2086 		memset(vqvm, 0, buf_sz);
2087 		xn_params.send_buf.iov_base = vqvm;
2088 		xn_params.send_buf.iov_len = buf_sz;
2089 		vqvm->vport_id = cpu_to_le32(vport->vport_id);
2090 		vqvm->num_qv_maps = cpu_to_le16(num_chunks);
2091 		memcpy(vqvm->qv_maps, &vqv[k], chunk_sz * num_chunks);
2092 
2093 		reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
2094 		if (reply_sz < 0)
2095 			return reply_sz;
2096 
2097 		k += num_chunks;
2098 		num_q -= num_chunks;
2099 		num_chunks = min(num_chunks, num_q);
2100 		/* Recalculate buffer size */
2101 		buf_sz = struct_size(vqvm, qv_maps, num_chunks);
2102 	}
2103 
2104 	return 0;
2105 }
2106 
2107 /**
2108  * idpf_send_enable_queues_msg - send enable queues virtchnl message
2109  * @vport: Virtual port private data structure
2110  *
2111  * Will send enable queues virtchnl message.  Returns 0 on success, negative on
2112  * failure.
2113  */
idpf_send_enable_queues_msg(struct idpf_vport * vport)2114 int idpf_send_enable_queues_msg(struct idpf_vport *vport)
2115 {
2116 	return idpf_send_ena_dis_queues_msg(vport, true);
2117 }
2118 
2119 /**
2120  * idpf_send_disable_queues_msg - send disable queues virtchnl message
2121  * @vport: Virtual port private data structure
2122  *
2123  * Will send disable queues virtchnl message.  Returns 0 on success, negative
2124  * on failure.
2125  */
idpf_send_disable_queues_msg(struct idpf_vport * vport)2126 int idpf_send_disable_queues_msg(struct idpf_vport *vport)
2127 {
2128 	int err, i;
2129 
2130 	err = idpf_send_ena_dis_queues_msg(vport, false);
2131 	if (err)
2132 		return err;
2133 
2134 	/* switch to poll mode as interrupts will be disabled after disable
2135 	 * queues virtchnl message is sent
2136 	 */
2137 	for (i = 0; i < vport->num_txq; i++)
2138 		idpf_queue_set(POLL_MODE, vport->txqs[i]);
2139 
2140 	/* schedule the napi to receive all the marker packets */
2141 	local_bh_disable();
2142 	for (i = 0; i < vport->num_q_vectors; i++)
2143 		napi_schedule(&vport->q_vectors[i].napi);
2144 	local_bh_enable();
2145 
2146 	return idpf_wait_for_marker_event(vport);
2147 }
2148 
2149 /**
2150  * idpf_convert_reg_to_queue_chunks - Copy queue chunk information to the right
2151  * structure
2152  * @dchunks: Destination chunks to store data to
2153  * @schunks: Source chunks to copy data from
2154  * @num_chunks: number of chunks to copy
2155  */
idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk * dchunks,struct virtchnl2_queue_reg_chunk * schunks,u16 num_chunks)2156 static void idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchunks,
2157 					     struct virtchnl2_queue_reg_chunk *schunks,
2158 					     u16 num_chunks)
2159 {
2160 	u16 i;
2161 
2162 	for (i = 0; i < num_chunks; i++) {
2163 		dchunks[i].type = schunks[i].type;
2164 		dchunks[i].start_queue_id = schunks[i].start_queue_id;
2165 		dchunks[i].num_queues = schunks[i].num_queues;
2166 	}
2167 }
2168 
2169 /**
2170  * idpf_send_delete_queues_msg - send delete queues virtchnl message
2171  * @vport: Virtual port private data structure
2172  *
2173  * Will send delete queues virtchnl message. Return 0 on success, negative on
2174  * failure.
2175  */
idpf_send_delete_queues_msg(struct idpf_vport * vport)2176 int idpf_send_delete_queues_msg(struct idpf_vport *vport)
2177 {
2178 	struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
2179 	struct virtchnl2_create_vport *vport_params;
2180 	struct virtchnl2_queue_reg_chunks *chunks;
2181 	struct idpf_vc_xn_params xn_params = {};
2182 	struct idpf_vport_config *vport_config;
2183 	u16 vport_idx = vport->idx;
2184 	ssize_t reply_sz;
2185 	u16 num_chunks;
2186 	int buf_size;
2187 
2188 	vport_config = vport->adapter->vport_config[vport_idx];
2189 	if (vport_config->req_qs_chunks) {
2190 		chunks = &vport_config->req_qs_chunks->chunks;
2191 	} else {
2192 		vport_params = vport->adapter->vport_params_recvd[vport_idx];
2193 		chunks = &vport_params->chunks;
2194 	}
2195 
2196 	num_chunks = le16_to_cpu(chunks->num_chunks);
2197 	buf_size = struct_size(eq, chunks.chunks, num_chunks);
2198 
2199 	eq = kzalloc(buf_size, GFP_KERNEL);
2200 	if (!eq)
2201 		return -ENOMEM;
2202 
2203 	eq->vport_id = cpu_to_le32(vport->vport_id);
2204 	eq->chunks.num_chunks = cpu_to_le16(num_chunks);
2205 
2206 	idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->chunks,
2207 					 num_chunks);
2208 
2209 	xn_params.vc_op = VIRTCHNL2_OP_DEL_QUEUES;
2210 	xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
2211 	xn_params.send_buf.iov_base = eq;
2212 	xn_params.send_buf.iov_len = buf_size;
2213 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
2214 
2215 	return reply_sz < 0 ? reply_sz : 0;
2216 }
2217 
2218 /**
2219  * idpf_send_config_queues_msg - Send config queues virtchnl message
2220  * @vport: Virtual port private data structure
2221  *
2222  * Will send config queues virtchnl message. Returns 0 on success, negative on
2223  * failure.
2224  */
idpf_send_config_queues_msg(struct idpf_vport * vport)2225 int idpf_send_config_queues_msg(struct idpf_vport *vport)
2226 {
2227 	int err;
2228 
2229 	err = idpf_send_config_tx_queues_msg(vport);
2230 	if (err)
2231 		return err;
2232 
2233 	return idpf_send_config_rx_queues_msg(vport);
2234 }
2235 
2236 /**
2237  * idpf_send_add_queues_msg - Send virtchnl add queues message
2238  * @vport: Virtual port private data structure
2239  * @num_tx_q: number of transmit queues
2240  * @num_complq: number of transmit completion queues
2241  * @num_rx_q: number of receive queues
2242  * @num_rx_bufq: number of receive buffer queues
2243  *
2244  * Returns 0 on success, negative on failure. vport _MUST_ be const here as
2245  * we should not change any fields within vport itself in this function.
2246  */
idpf_send_add_queues_msg(const struct idpf_vport * vport,u16 num_tx_q,u16 num_complq,u16 num_rx_q,u16 num_rx_bufq)2247 int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
2248 			     u16 num_complq, u16 num_rx_q, u16 num_rx_bufq)
2249 {
2250 	struct virtchnl2_add_queues *vc_msg __free(kfree) = NULL;
2251 	struct idpf_vc_xn_params xn_params = {};
2252 	struct idpf_vport_config *vport_config;
2253 	struct virtchnl2_add_queues aq = {};
2254 	u16 vport_idx = vport->idx;
2255 	ssize_t reply_sz;
2256 	int size;
2257 
2258 	vc_msg = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2259 	if (!vc_msg)
2260 		return -ENOMEM;
2261 
2262 	vport_config = vport->adapter->vport_config[vport_idx];
2263 	kfree(vport_config->req_qs_chunks);
2264 	vport_config->req_qs_chunks = NULL;
2265 
2266 	aq.vport_id = cpu_to_le32(vport->vport_id);
2267 	aq.num_tx_q = cpu_to_le16(num_tx_q);
2268 	aq.num_tx_complq = cpu_to_le16(num_complq);
2269 	aq.num_rx_q = cpu_to_le16(num_rx_q);
2270 	aq.num_rx_bufq = cpu_to_le16(num_rx_bufq);
2271 
2272 	xn_params.vc_op = VIRTCHNL2_OP_ADD_QUEUES;
2273 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2274 	xn_params.send_buf.iov_base = &aq;
2275 	xn_params.send_buf.iov_len = sizeof(aq);
2276 	xn_params.recv_buf.iov_base = vc_msg;
2277 	xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
2278 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
2279 	if (reply_sz < 0)
2280 		return reply_sz;
2281 
2282 	/* compare vc_msg num queues with vport num queues */
2283 	if (le16_to_cpu(vc_msg->num_tx_q) != num_tx_q ||
2284 	    le16_to_cpu(vc_msg->num_rx_q) != num_rx_q ||
2285 	    le16_to_cpu(vc_msg->num_tx_complq) != num_complq ||
2286 	    le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq)
2287 		return -EINVAL;
2288 
2289 	size = struct_size(vc_msg, chunks.chunks,
2290 			   le16_to_cpu(vc_msg->chunks.num_chunks));
2291 	if (reply_sz < size)
2292 		return -EIO;
2293 
2294 	vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL);
2295 	if (!vport_config->req_qs_chunks)
2296 		return -ENOMEM;
2297 
2298 	return 0;
2299 }
2300 
2301 /**
2302  * idpf_send_alloc_vectors_msg - Send virtchnl alloc vectors message
2303  * @adapter: Driver specific private structure
2304  * @num_vectors: number of vectors to be allocated
2305  *
2306  * Returns 0 on success, negative on failure.
2307  */
idpf_send_alloc_vectors_msg(struct idpf_adapter * adapter,u16 num_vectors)2308 int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors)
2309 {
2310 	struct virtchnl2_alloc_vectors *rcvd_vec __free(kfree) = NULL;
2311 	struct idpf_vc_xn_params xn_params = {};
2312 	struct virtchnl2_alloc_vectors ac = {};
2313 	ssize_t reply_sz;
2314 	u16 num_vchunks;
2315 	int size;
2316 
2317 	ac.num_vectors = cpu_to_le16(num_vectors);
2318 
2319 	rcvd_vec = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2320 	if (!rcvd_vec)
2321 		return -ENOMEM;
2322 
2323 	xn_params.vc_op = VIRTCHNL2_OP_ALLOC_VECTORS;
2324 	xn_params.send_buf.iov_base = &ac;
2325 	xn_params.send_buf.iov_len = sizeof(ac);
2326 	xn_params.recv_buf.iov_base = rcvd_vec;
2327 	xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
2328 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2329 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2330 	if (reply_sz < 0)
2331 		return reply_sz;
2332 
2333 	num_vchunks = le16_to_cpu(rcvd_vec->vchunks.num_vchunks);
2334 	size = struct_size(rcvd_vec, vchunks.vchunks, num_vchunks);
2335 	if (reply_sz < size)
2336 		return -EIO;
2337 
2338 	if (size > IDPF_CTLQ_MAX_BUF_LEN)
2339 		return -EINVAL;
2340 
2341 	kfree(adapter->req_vec_chunks);
2342 	adapter->req_vec_chunks = kmemdup(rcvd_vec, size, GFP_KERNEL);
2343 	if (!adapter->req_vec_chunks)
2344 		return -ENOMEM;
2345 
2346 	if (le16_to_cpu(adapter->req_vec_chunks->num_vectors) < num_vectors) {
2347 		kfree(adapter->req_vec_chunks);
2348 		adapter->req_vec_chunks = NULL;
2349 		return -EINVAL;
2350 	}
2351 
2352 	return 0;
2353 }
2354 
2355 /**
2356  * idpf_send_dealloc_vectors_msg - Send virtchnl de allocate vectors message
2357  * @adapter: Driver specific private structure
2358  *
2359  * Returns 0 on success, negative on failure.
2360  */
idpf_send_dealloc_vectors_msg(struct idpf_adapter * adapter)2361 int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter)
2362 {
2363 	struct virtchnl2_alloc_vectors *ac = adapter->req_vec_chunks;
2364 	struct virtchnl2_vector_chunks *vcs = &ac->vchunks;
2365 	struct idpf_vc_xn_params xn_params = {};
2366 	ssize_t reply_sz;
2367 	int buf_size;
2368 
2369 	buf_size = struct_size(vcs, vchunks, le16_to_cpu(vcs->num_vchunks));
2370 
2371 	xn_params.vc_op = VIRTCHNL2_OP_DEALLOC_VECTORS;
2372 	xn_params.send_buf.iov_base = vcs;
2373 	xn_params.send_buf.iov_len = buf_size;
2374 	xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
2375 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2376 	if (reply_sz < 0)
2377 		return reply_sz;
2378 
2379 	kfree(adapter->req_vec_chunks);
2380 	adapter->req_vec_chunks = NULL;
2381 
2382 	return 0;
2383 }
2384 
2385 /**
2386  * idpf_get_max_vfs - Get max number of vfs supported
2387  * @adapter: Driver specific private structure
2388  *
2389  * Returns max number of VFs
2390  */
idpf_get_max_vfs(struct idpf_adapter * adapter)2391 static int idpf_get_max_vfs(struct idpf_adapter *adapter)
2392 {
2393 	return le16_to_cpu(adapter->caps.max_sriov_vfs);
2394 }
2395 
2396 /**
2397  * idpf_send_set_sriov_vfs_msg - Send virtchnl set sriov vfs message
2398  * @adapter: Driver specific private structure
2399  * @num_vfs: number of virtual functions to be created
2400  *
2401  * Returns 0 on success, negative on failure.
2402  */
idpf_send_set_sriov_vfs_msg(struct idpf_adapter * adapter,u16 num_vfs)2403 int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs)
2404 {
2405 	struct virtchnl2_sriov_vfs_info svi = {};
2406 	struct idpf_vc_xn_params xn_params = {};
2407 	ssize_t reply_sz;
2408 
2409 	svi.num_vfs = cpu_to_le16(num_vfs);
2410 	xn_params.vc_op = VIRTCHNL2_OP_SET_SRIOV_VFS;
2411 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2412 	xn_params.send_buf.iov_base = &svi;
2413 	xn_params.send_buf.iov_len = sizeof(svi);
2414 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2415 
2416 	return reply_sz < 0 ? reply_sz : 0;
2417 }
2418 
2419 /**
2420  * idpf_send_get_stats_msg - Send virtchnl get statistics message
2421  * @vport: vport to get stats for
2422  *
2423  * Returns 0 on success, negative on failure.
2424  */
idpf_send_get_stats_msg(struct idpf_vport * vport)2425 int idpf_send_get_stats_msg(struct idpf_vport *vport)
2426 {
2427 	struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
2428 	struct rtnl_link_stats64 *netstats = &np->netstats;
2429 	struct virtchnl2_vport_stats stats_msg = {};
2430 	struct idpf_vc_xn_params xn_params = {};
2431 	ssize_t reply_sz;
2432 
2433 
2434 	/* Don't send get_stats message if the link is down */
2435 	if (np->state <= __IDPF_VPORT_DOWN)
2436 		return 0;
2437 
2438 	stats_msg.vport_id = cpu_to_le32(vport->vport_id);
2439 
2440 	xn_params.vc_op = VIRTCHNL2_OP_GET_STATS;
2441 	xn_params.send_buf.iov_base = &stats_msg;
2442 	xn_params.send_buf.iov_len = sizeof(stats_msg);
2443 	xn_params.recv_buf = xn_params.send_buf;
2444 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2445 
2446 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
2447 	if (reply_sz < 0)
2448 		return reply_sz;
2449 	if (reply_sz < sizeof(stats_msg))
2450 		return -EIO;
2451 
2452 	spin_lock_bh(&np->stats_lock);
2453 
2454 	netstats->rx_packets = le64_to_cpu(stats_msg.rx_unicast) +
2455 			       le64_to_cpu(stats_msg.rx_multicast) +
2456 			       le64_to_cpu(stats_msg.rx_broadcast);
2457 	netstats->tx_packets = le64_to_cpu(stats_msg.tx_unicast) +
2458 			       le64_to_cpu(stats_msg.tx_multicast) +
2459 			       le64_to_cpu(stats_msg.tx_broadcast);
2460 	netstats->rx_bytes = le64_to_cpu(stats_msg.rx_bytes);
2461 	netstats->tx_bytes = le64_to_cpu(stats_msg.tx_bytes);
2462 	netstats->rx_errors = le64_to_cpu(stats_msg.rx_errors);
2463 	netstats->tx_errors = le64_to_cpu(stats_msg.tx_errors);
2464 	netstats->rx_dropped = le64_to_cpu(stats_msg.rx_discards);
2465 	netstats->tx_dropped = le64_to_cpu(stats_msg.tx_discards);
2466 
2467 	vport->port_stats.vport_stats = stats_msg;
2468 
2469 	spin_unlock_bh(&np->stats_lock);
2470 
2471 	return 0;
2472 }
2473 
2474 /**
2475  * idpf_send_get_set_rss_lut_msg - Send virtchnl get or set rss lut message
2476  * @vport: virtual port data structure
2477  * @get: flag to set or get rss look up table
2478  *
2479  * Returns 0 on success, negative on failure.
2480  */
idpf_send_get_set_rss_lut_msg(struct idpf_vport * vport,bool get)2481 int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
2482 {
2483 	struct virtchnl2_rss_lut *recv_rl __free(kfree) = NULL;
2484 	struct virtchnl2_rss_lut *rl __free(kfree) = NULL;
2485 	struct idpf_vc_xn_params xn_params = {};
2486 	struct idpf_rss_data *rss_data;
2487 	int buf_size, lut_buf_size;
2488 	ssize_t reply_sz;
2489 	int i;
2490 
2491 	rss_data =
2492 		&vport->adapter->vport_config[vport->idx]->user_config.rss_data;
2493 	buf_size = struct_size(rl, lut, rss_data->rss_lut_size);
2494 	rl = kzalloc(buf_size, GFP_KERNEL);
2495 	if (!rl)
2496 		return -ENOMEM;
2497 
2498 	rl->vport_id = cpu_to_le32(vport->vport_id);
2499 
2500 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2501 	xn_params.send_buf.iov_base = rl;
2502 	xn_params.send_buf.iov_len = buf_size;
2503 
2504 	if (get) {
2505 		recv_rl = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2506 		if (!recv_rl)
2507 			return -ENOMEM;
2508 		xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_LUT;
2509 		xn_params.recv_buf.iov_base = recv_rl;
2510 		xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
2511 	} else {
2512 		rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size);
2513 		for (i = 0; i < rss_data->rss_lut_size; i++)
2514 			rl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]);
2515 
2516 		xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_LUT;
2517 	}
2518 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
2519 	if (reply_sz < 0)
2520 		return reply_sz;
2521 	if (!get)
2522 		return 0;
2523 	if (reply_sz < sizeof(struct virtchnl2_rss_lut))
2524 		return -EIO;
2525 
2526 	lut_buf_size = le16_to_cpu(recv_rl->lut_entries) * sizeof(u32);
2527 	if (reply_sz < lut_buf_size)
2528 		return -EIO;
2529 
2530 	/* size didn't change, we can reuse existing lut buf */
2531 	if (rss_data->rss_lut_size == le16_to_cpu(recv_rl->lut_entries))
2532 		goto do_memcpy;
2533 
2534 	rss_data->rss_lut_size = le16_to_cpu(recv_rl->lut_entries);
2535 	kfree(rss_data->rss_lut);
2536 
2537 	rss_data->rss_lut = kzalloc(lut_buf_size, GFP_KERNEL);
2538 	if (!rss_data->rss_lut) {
2539 		rss_data->rss_lut_size = 0;
2540 		return -ENOMEM;
2541 	}
2542 
2543 do_memcpy:
2544 	memcpy(rss_data->rss_lut, recv_rl->lut, rss_data->rss_lut_size);
2545 
2546 	return 0;
2547 }
2548 
2549 /**
2550  * idpf_send_get_set_rss_key_msg - Send virtchnl get or set rss key message
2551  * @vport: virtual port data structure
2552  * @get: flag to set or get rss look up table
2553  *
2554  * Returns 0 on success, negative on failure
2555  */
idpf_send_get_set_rss_key_msg(struct idpf_vport * vport,bool get)2556 int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get)
2557 {
2558 	struct virtchnl2_rss_key *recv_rk __free(kfree) = NULL;
2559 	struct virtchnl2_rss_key *rk __free(kfree) = NULL;
2560 	struct idpf_vc_xn_params xn_params = {};
2561 	struct idpf_rss_data *rss_data;
2562 	ssize_t reply_sz;
2563 	int i, buf_size;
2564 	u16 key_size;
2565 
2566 	rss_data =
2567 		&vport->adapter->vport_config[vport->idx]->user_config.rss_data;
2568 	buf_size = struct_size(rk, key_flex, rss_data->rss_key_size);
2569 	rk = kzalloc(buf_size, GFP_KERNEL);
2570 	if (!rk)
2571 		return -ENOMEM;
2572 
2573 	rk->vport_id = cpu_to_le32(vport->vport_id);
2574 	xn_params.send_buf.iov_base = rk;
2575 	xn_params.send_buf.iov_len = buf_size;
2576 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2577 	if (get) {
2578 		recv_rk = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2579 		if (!recv_rk)
2580 			return -ENOMEM;
2581 
2582 		xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_KEY;
2583 		xn_params.recv_buf.iov_base = recv_rk;
2584 		xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
2585 	} else {
2586 		rk->key_len = cpu_to_le16(rss_data->rss_key_size);
2587 		for (i = 0; i < rss_data->rss_key_size; i++)
2588 			rk->key_flex[i] = rss_data->rss_key[i];
2589 
2590 		xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_KEY;
2591 	}
2592 
2593 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
2594 	if (reply_sz < 0)
2595 		return reply_sz;
2596 	if (!get)
2597 		return 0;
2598 	if (reply_sz < sizeof(struct virtchnl2_rss_key))
2599 		return -EIO;
2600 
2601 	key_size = min_t(u16, NETDEV_RSS_KEY_LEN,
2602 			 le16_to_cpu(recv_rk->key_len));
2603 	if (reply_sz < key_size)
2604 		return -EIO;
2605 
2606 	/* key len didn't change, reuse existing buf */
2607 	if (rss_data->rss_key_size == key_size)
2608 		goto do_memcpy;
2609 
2610 	rss_data->rss_key_size = key_size;
2611 	kfree(rss_data->rss_key);
2612 	rss_data->rss_key = kzalloc(key_size, GFP_KERNEL);
2613 	if (!rss_data->rss_key) {
2614 		rss_data->rss_key_size = 0;
2615 		return -ENOMEM;
2616 	}
2617 
2618 do_memcpy:
2619 	memcpy(rss_data->rss_key, recv_rk->key_flex, rss_data->rss_key_size);
2620 
2621 	return 0;
2622 }
2623 
2624 /**
2625  * idpf_fill_ptype_lookup - Fill L3 specific fields in ptype lookup table
2626  * @ptype: ptype lookup table
2627  * @pstate: state machine for ptype lookup table
2628  * @ipv4: ipv4 or ipv6
2629  * @frag: fragmentation allowed
2630  *
2631  */
idpf_fill_ptype_lookup(struct libeth_rx_pt * ptype,struct idpf_ptype_state * pstate,bool ipv4,bool frag)2632 static void idpf_fill_ptype_lookup(struct libeth_rx_pt *ptype,
2633 				   struct idpf_ptype_state *pstate,
2634 				   bool ipv4, bool frag)
2635 {
2636 	if (!pstate->outer_ip || !pstate->outer_frag) {
2637 		pstate->outer_ip = true;
2638 
2639 		if (ipv4)
2640 			ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV4;
2641 		else
2642 			ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV6;
2643 
2644 		if (frag) {
2645 			ptype->outer_frag = LIBETH_RX_PT_FRAG;
2646 			pstate->outer_frag = true;
2647 		}
2648 	} else {
2649 		ptype->tunnel_type = LIBETH_RX_PT_TUNNEL_IP_IP;
2650 		pstate->tunnel_state = IDPF_PTYPE_TUNNEL_IP;
2651 
2652 		if (ipv4)
2653 			ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV4;
2654 		else
2655 			ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV6;
2656 
2657 		if (frag)
2658 			ptype->tunnel_end_frag = LIBETH_RX_PT_FRAG;
2659 	}
2660 }
2661 
idpf_finalize_ptype_lookup(struct libeth_rx_pt * ptype)2662 static void idpf_finalize_ptype_lookup(struct libeth_rx_pt *ptype)
2663 {
2664 	if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 &&
2665 	    ptype->inner_prot)
2666 		ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L4;
2667 	else if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 &&
2668 		 ptype->outer_ip)
2669 		ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L3;
2670 	else if (ptype->outer_ip == LIBETH_RX_PT_OUTER_L2)
2671 		ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L2;
2672 	else
2673 		ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_NONE;
2674 
2675 	libeth_rx_pt_gen_hash_type(ptype);
2676 }
2677 
2678 /**
2679  * idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info
2680  * @vport: virtual port data structure
2681  *
2682  * Returns 0 on success, negative on failure.
2683  */
idpf_send_get_rx_ptype_msg(struct idpf_vport * vport)2684 int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
2685 {
2686 	struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL;
2687 	struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL;
2688 	struct libeth_rx_pt *ptype_lkup __free(kfree) = NULL;
2689 	int max_ptype, ptypes_recvd = 0, ptype_offset;
2690 	struct idpf_adapter *adapter = vport->adapter;
2691 	struct idpf_vc_xn_params xn_params = {};
2692 	u16 next_ptype_id = 0;
2693 	ssize_t reply_sz;
2694 	int i, j, k;
2695 
2696 	if (vport->rx_ptype_lkup)
2697 		return 0;
2698 
2699 	if (idpf_is_queue_model_split(vport->rxq_model))
2700 		max_ptype = IDPF_RX_MAX_PTYPE;
2701 	else
2702 		max_ptype = IDPF_RX_MAX_BASE_PTYPE;
2703 
2704 	ptype_lkup = kcalloc(max_ptype, sizeof(*ptype_lkup), GFP_KERNEL);
2705 	if (!ptype_lkup)
2706 		return -ENOMEM;
2707 
2708 	get_ptype_info = kzalloc(sizeof(*get_ptype_info), GFP_KERNEL);
2709 	if (!get_ptype_info)
2710 		return -ENOMEM;
2711 
2712 	ptype_info = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2713 	if (!ptype_info)
2714 		return -ENOMEM;
2715 
2716 	xn_params.vc_op = VIRTCHNL2_OP_GET_PTYPE_INFO;
2717 	xn_params.send_buf.iov_base = get_ptype_info;
2718 	xn_params.send_buf.iov_len = sizeof(*get_ptype_info);
2719 	xn_params.recv_buf.iov_base = ptype_info;
2720 	xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
2721 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2722 
2723 	while (next_ptype_id < max_ptype) {
2724 		get_ptype_info->start_ptype_id = cpu_to_le16(next_ptype_id);
2725 
2726 		if ((next_ptype_id + IDPF_RX_MAX_PTYPES_PER_BUF) > max_ptype)
2727 			get_ptype_info->num_ptypes =
2728 				cpu_to_le16(max_ptype - next_ptype_id);
2729 		else
2730 			get_ptype_info->num_ptypes =
2731 				cpu_to_le16(IDPF_RX_MAX_PTYPES_PER_BUF);
2732 
2733 		reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2734 		if (reply_sz < 0)
2735 			return reply_sz;
2736 
2737 		ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes);
2738 		if (ptypes_recvd > max_ptype)
2739 			return -EINVAL;
2740 
2741 		next_ptype_id = le16_to_cpu(get_ptype_info->start_ptype_id) +
2742 				le16_to_cpu(get_ptype_info->num_ptypes);
2743 
2744 		ptype_offset = IDPF_RX_PTYPE_HDR_SZ;
2745 
2746 		for (i = 0; i < le16_to_cpu(ptype_info->num_ptypes); i++) {
2747 			struct idpf_ptype_state pstate = { };
2748 			struct virtchnl2_ptype *ptype;
2749 			u16 id;
2750 
2751 			ptype = (struct virtchnl2_ptype *)
2752 					((u8 *)ptype_info + ptype_offset);
2753 
2754 			ptype_offset += IDPF_GET_PTYPE_SIZE(ptype);
2755 			if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN)
2756 				return -EINVAL;
2757 
2758 			/* 0xFFFF indicates end of ptypes */
2759 			if (le16_to_cpu(ptype->ptype_id_10) ==
2760 							IDPF_INVALID_PTYPE_ID)
2761 				goto out;
2762 
2763 			if (idpf_is_queue_model_split(vport->rxq_model))
2764 				k = le16_to_cpu(ptype->ptype_id_10);
2765 			else
2766 				k = ptype->ptype_id_8;
2767 
2768 			for (j = 0; j < ptype->proto_id_count; j++) {
2769 				id = le16_to_cpu(ptype->proto_id[j]);
2770 				switch (id) {
2771 				case VIRTCHNL2_PROTO_HDR_GRE:
2772 					if (pstate.tunnel_state ==
2773 							IDPF_PTYPE_TUNNEL_IP) {
2774 						ptype_lkup[k].tunnel_type =
2775 						LIBETH_RX_PT_TUNNEL_IP_GRENAT;
2776 						pstate.tunnel_state |=
2777 						IDPF_PTYPE_TUNNEL_IP_GRENAT;
2778 					}
2779 					break;
2780 				case VIRTCHNL2_PROTO_HDR_MAC:
2781 					ptype_lkup[k].outer_ip =
2782 						LIBETH_RX_PT_OUTER_L2;
2783 					if (pstate.tunnel_state ==
2784 							IDPF_TUN_IP_GRE) {
2785 						ptype_lkup[k].tunnel_type =
2786 						LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC;
2787 						pstate.tunnel_state |=
2788 						IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC;
2789 					}
2790 					break;
2791 				case VIRTCHNL2_PROTO_HDR_IPV4:
2792 					idpf_fill_ptype_lookup(&ptype_lkup[k],
2793 							       &pstate, true,
2794 							       false);
2795 					break;
2796 				case VIRTCHNL2_PROTO_HDR_IPV6:
2797 					idpf_fill_ptype_lookup(&ptype_lkup[k],
2798 							       &pstate, false,
2799 							       false);
2800 					break;
2801 				case VIRTCHNL2_PROTO_HDR_IPV4_FRAG:
2802 					idpf_fill_ptype_lookup(&ptype_lkup[k],
2803 							       &pstate, true,
2804 							       true);
2805 					break;
2806 				case VIRTCHNL2_PROTO_HDR_IPV6_FRAG:
2807 					idpf_fill_ptype_lookup(&ptype_lkup[k],
2808 							       &pstate, false,
2809 							       true);
2810 					break;
2811 				case VIRTCHNL2_PROTO_HDR_UDP:
2812 					ptype_lkup[k].inner_prot =
2813 					LIBETH_RX_PT_INNER_UDP;
2814 					break;
2815 				case VIRTCHNL2_PROTO_HDR_TCP:
2816 					ptype_lkup[k].inner_prot =
2817 					LIBETH_RX_PT_INNER_TCP;
2818 					break;
2819 				case VIRTCHNL2_PROTO_HDR_SCTP:
2820 					ptype_lkup[k].inner_prot =
2821 					LIBETH_RX_PT_INNER_SCTP;
2822 					break;
2823 				case VIRTCHNL2_PROTO_HDR_ICMP:
2824 					ptype_lkup[k].inner_prot =
2825 					LIBETH_RX_PT_INNER_ICMP;
2826 					break;
2827 				case VIRTCHNL2_PROTO_HDR_PAY:
2828 					ptype_lkup[k].payload_layer =
2829 						LIBETH_RX_PT_PAYLOAD_L2;
2830 					break;
2831 				case VIRTCHNL2_PROTO_HDR_ICMPV6:
2832 				case VIRTCHNL2_PROTO_HDR_IPV6_EH:
2833 				case VIRTCHNL2_PROTO_HDR_PRE_MAC:
2834 				case VIRTCHNL2_PROTO_HDR_POST_MAC:
2835 				case VIRTCHNL2_PROTO_HDR_ETHERTYPE:
2836 				case VIRTCHNL2_PROTO_HDR_SVLAN:
2837 				case VIRTCHNL2_PROTO_HDR_CVLAN:
2838 				case VIRTCHNL2_PROTO_HDR_MPLS:
2839 				case VIRTCHNL2_PROTO_HDR_MMPLS:
2840 				case VIRTCHNL2_PROTO_HDR_PTP:
2841 				case VIRTCHNL2_PROTO_HDR_CTRL:
2842 				case VIRTCHNL2_PROTO_HDR_LLDP:
2843 				case VIRTCHNL2_PROTO_HDR_ARP:
2844 				case VIRTCHNL2_PROTO_HDR_ECP:
2845 				case VIRTCHNL2_PROTO_HDR_EAPOL:
2846 				case VIRTCHNL2_PROTO_HDR_PPPOD:
2847 				case VIRTCHNL2_PROTO_HDR_PPPOE:
2848 				case VIRTCHNL2_PROTO_HDR_IGMP:
2849 				case VIRTCHNL2_PROTO_HDR_AH:
2850 				case VIRTCHNL2_PROTO_HDR_ESP:
2851 				case VIRTCHNL2_PROTO_HDR_IKE:
2852 				case VIRTCHNL2_PROTO_HDR_NATT_KEEP:
2853 				case VIRTCHNL2_PROTO_HDR_L2TPV2:
2854 				case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL:
2855 				case VIRTCHNL2_PROTO_HDR_L2TPV3:
2856 				case VIRTCHNL2_PROTO_HDR_GTP:
2857 				case VIRTCHNL2_PROTO_HDR_GTP_EH:
2858 				case VIRTCHNL2_PROTO_HDR_GTPCV2:
2859 				case VIRTCHNL2_PROTO_HDR_GTPC_TEID:
2860 				case VIRTCHNL2_PROTO_HDR_GTPU:
2861 				case VIRTCHNL2_PROTO_HDR_GTPU_UL:
2862 				case VIRTCHNL2_PROTO_HDR_GTPU_DL:
2863 				case VIRTCHNL2_PROTO_HDR_ECPRI:
2864 				case VIRTCHNL2_PROTO_HDR_VRRP:
2865 				case VIRTCHNL2_PROTO_HDR_OSPF:
2866 				case VIRTCHNL2_PROTO_HDR_TUN:
2867 				case VIRTCHNL2_PROTO_HDR_NVGRE:
2868 				case VIRTCHNL2_PROTO_HDR_VXLAN:
2869 				case VIRTCHNL2_PROTO_HDR_VXLAN_GPE:
2870 				case VIRTCHNL2_PROTO_HDR_GENEVE:
2871 				case VIRTCHNL2_PROTO_HDR_NSH:
2872 				case VIRTCHNL2_PROTO_HDR_QUIC:
2873 				case VIRTCHNL2_PROTO_HDR_PFCP:
2874 				case VIRTCHNL2_PROTO_HDR_PFCP_NODE:
2875 				case VIRTCHNL2_PROTO_HDR_PFCP_SESSION:
2876 				case VIRTCHNL2_PROTO_HDR_RTP:
2877 				case VIRTCHNL2_PROTO_HDR_NO_PROTO:
2878 					break;
2879 				default:
2880 					break;
2881 				}
2882 			}
2883 
2884 			idpf_finalize_ptype_lookup(&ptype_lkup[k]);
2885 		}
2886 	}
2887 
2888 out:
2889 	vport->rx_ptype_lkup = no_free_ptr(ptype_lkup);
2890 
2891 	return 0;
2892 }
2893 
2894 /**
2895  * idpf_send_ena_dis_loopback_msg - Send virtchnl enable/disable loopback
2896  *				    message
2897  * @vport: virtual port data structure
2898  *
2899  * Returns 0 on success, negative on failure.
2900  */
idpf_send_ena_dis_loopback_msg(struct idpf_vport * vport)2901 int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport)
2902 {
2903 	struct idpf_vc_xn_params xn_params = {};
2904 	struct virtchnl2_loopback loopback;
2905 	ssize_t reply_sz;
2906 
2907 	loopback.vport_id = cpu_to_le32(vport->vport_id);
2908 	loopback.enable = idpf_is_feature_ena(vport, NETIF_F_LOOPBACK);
2909 
2910 	xn_params.vc_op = VIRTCHNL2_OP_LOOPBACK;
2911 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2912 	xn_params.send_buf.iov_base = &loopback;
2913 	xn_params.send_buf.iov_len = sizeof(loopback);
2914 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
2915 
2916 	return reply_sz < 0 ? reply_sz : 0;
2917 }
2918 
2919 /**
2920  * idpf_find_ctlq - Given a type and id, find ctlq info
2921  * @hw: hardware struct
2922  * @type: type of ctrlq to find
2923  * @id: ctlq id to find
2924  *
2925  * Returns pointer to found ctlq info struct, NULL otherwise.
2926  */
idpf_find_ctlq(struct idpf_hw * hw,enum idpf_ctlq_type type,int id)2927 static struct idpf_ctlq_info *idpf_find_ctlq(struct idpf_hw *hw,
2928 					     enum idpf_ctlq_type type, int id)
2929 {
2930 	struct idpf_ctlq_info *cq, *tmp;
2931 
2932 	list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list)
2933 		if (cq->q_id == id && cq->cq_type == type)
2934 			return cq;
2935 
2936 	return NULL;
2937 }
2938 
2939 /**
2940  * idpf_init_dflt_mbx - Setup default mailbox parameters and make request
2941  * @adapter: adapter info struct
2942  *
2943  * Returns 0 on success, negative otherwise
2944  */
idpf_init_dflt_mbx(struct idpf_adapter * adapter)2945 int idpf_init_dflt_mbx(struct idpf_adapter *adapter)
2946 {
2947 	struct idpf_ctlq_create_info ctlq_info[] = {
2948 		{
2949 			.type = IDPF_CTLQ_TYPE_MAILBOX_TX,
2950 			.id = IDPF_DFLT_MBX_ID,
2951 			.len = IDPF_DFLT_MBX_Q_LEN,
2952 			.buf_size = IDPF_CTLQ_MAX_BUF_LEN
2953 		},
2954 		{
2955 			.type = IDPF_CTLQ_TYPE_MAILBOX_RX,
2956 			.id = IDPF_DFLT_MBX_ID,
2957 			.len = IDPF_DFLT_MBX_Q_LEN,
2958 			.buf_size = IDPF_CTLQ_MAX_BUF_LEN
2959 		}
2960 	};
2961 	struct idpf_hw *hw = &adapter->hw;
2962 	int err;
2963 
2964 	adapter->dev_ops.reg_ops.ctlq_reg_init(adapter, ctlq_info);
2965 
2966 	err = idpf_ctlq_init(hw, IDPF_NUM_DFLT_MBX_Q, ctlq_info);
2967 	if (err)
2968 		return err;
2969 
2970 	hw->asq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_TX,
2971 				 IDPF_DFLT_MBX_ID);
2972 	hw->arq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_RX,
2973 				 IDPF_DFLT_MBX_ID);
2974 
2975 	if (!hw->asq || !hw->arq) {
2976 		idpf_ctlq_deinit(hw);
2977 
2978 		return -ENOENT;
2979 	}
2980 
2981 	adapter->state = __IDPF_VER_CHECK;
2982 
2983 	return 0;
2984 }
2985 
2986 /**
2987  * idpf_deinit_dflt_mbx - Free up ctlqs setup
2988  * @adapter: Driver specific private data structure
2989  */
idpf_deinit_dflt_mbx(struct idpf_adapter * adapter)2990 void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter)
2991 {
2992 	if (adapter->hw.arq && adapter->hw.asq) {
2993 		idpf_mb_clean(adapter);
2994 		idpf_ctlq_deinit(&adapter->hw);
2995 	}
2996 	adapter->hw.arq = NULL;
2997 	adapter->hw.asq = NULL;
2998 }
2999 
3000 /**
3001  * idpf_vport_params_buf_rel - Release memory for MailBox resources
3002  * @adapter: Driver specific private data structure
3003  *
3004  * Will release memory to hold the vport parameters received on MailBox
3005  */
idpf_vport_params_buf_rel(struct idpf_adapter * adapter)3006 static void idpf_vport_params_buf_rel(struct idpf_adapter *adapter)
3007 {
3008 	kfree(adapter->vport_params_recvd);
3009 	adapter->vport_params_recvd = NULL;
3010 	kfree(adapter->vport_params_reqd);
3011 	adapter->vport_params_reqd = NULL;
3012 	kfree(adapter->vport_ids);
3013 	adapter->vport_ids = NULL;
3014 }
3015 
3016 /**
3017  * idpf_vport_params_buf_alloc - Allocate memory for MailBox resources
3018  * @adapter: Driver specific private data structure
3019  *
3020  * Will alloc memory to hold the vport parameters received on MailBox
3021  */
idpf_vport_params_buf_alloc(struct idpf_adapter * adapter)3022 static int idpf_vport_params_buf_alloc(struct idpf_adapter *adapter)
3023 {
3024 	u16 num_max_vports = idpf_get_max_vports(adapter);
3025 
3026 	adapter->vport_params_reqd = kcalloc(num_max_vports,
3027 					     sizeof(*adapter->vport_params_reqd),
3028 					     GFP_KERNEL);
3029 	if (!adapter->vport_params_reqd)
3030 		return -ENOMEM;
3031 
3032 	adapter->vport_params_recvd = kcalloc(num_max_vports,
3033 					      sizeof(*adapter->vport_params_recvd),
3034 					      GFP_KERNEL);
3035 	if (!adapter->vport_params_recvd)
3036 		goto err_mem;
3037 
3038 	adapter->vport_ids = kcalloc(num_max_vports, sizeof(u32), GFP_KERNEL);
3039 	if (!adapter->vport_ids)
3040 		goto err_mem;
3041 
3042 	if (adapter->vport_config)
3043 		return 0;
3044 
3045 	adapter->vport_config = kcalloc(num_max_vports,
3046 					sizeof(*adapter->vport_config),
3047 					GFP_KERNEL);
3048 	if (!adapter->vport_config)
3049 		goto err_mem;
3050 
3051 	return 0;
3052 
3053 err_mem:
3054 	idpf_vport_params_buf_rel(adapter);
3055 
3056 	return -ENOMEM;
3057 }
3058 
3059 /**
3060  * idpf_vc_core_init - Initialize state machine and get driver specific
3061  * resources
3062  * @adapter: Driver specific private structure
3063  *
3064  * This function will initialize the state machine and request all necessary
3065  * resources required by the device driver. Once the state machine is
3066  * initialized, allocate memory to store vport specific information and also
3067  * requests required interrupts.
3068  *
3069  * Returns 0 on success, -EAGAIN function will get called again,
3070  * otherwise negative on failure.
3071  */
idpf_vc_core_init(struct idpf_adapter * adapter)3072 int idpf_vc_core_init(struct idpf_adapter *adapter)
3073 {
3074 	int task_delay = 30;
3075 	u16 num_max_vports;
3076 	int err = 0;
3077 
3078 	if (!adapter->vcxn_mngr) {
3079 		adapter->vcxn_mngr = kzalloc(sizeof(*adapter->vcxn_mngr), GFP_KERNEL);
3080 		if (!adapter->vcxn_mngr) {
3081 			err = -ENOMEM;
3082 			goto init_failed;
3083 		}
3084 	}
3085 	idpf_vc_xn_init(adapter->vcxn_mngr);
3086 
3087 	while (adapter->state != __IDPF_INIT_SW) {
3088 		switch (adapter->state) {
3089 		case __IDPF_VER_CHECK:
3090 			err = idpf_send_ver_msg(adapter);
3091 			switch (err) {
3092 			case 0:
3093 				/* success, move state machine forward */
3094 				adapter->state = __IDPF_GET_CAPS;
3095 				fallthrough;
3096 			case -EAGAIN:
3097 				goto restart;
3098 			default:
3099 				/* Something bad happened, try again but only a
3100 				 * few times.
3101 				 */
3102 				goto init_failed;
3103 			}
3104 		case __IDPF_GET_CAPS:
3105 			err = idpf_send_get_caps_msg(adapter);
3106 			if (err)
3107 				goto init_failed;
3108 			adapter->state = __IDPF_INIT_SW;
3109 			break;
3110 		default:
3111 			dev_err(&adapter->pdev->dev, "Device is in bad state: %d\n",
3112 				adapter->state);
3113 			err = -EINVAL;
3114 			goto init_failed;
3115 		}
3116 		break;
3117 restart:
3118 		/* Give enough time before proceeding further with
3119 		 * state machine
3120 		 */
3121 		msleep(task_delay);
3122 	}
3123 
3124 	if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LAN_MEMORY_REGIONS)) {
3125 		err = idpf_send_get_lan_memory_regions(adapter);
3126 		if (err) {
3127 			dev_err(&adapter->pdev->dev, "Failed to get LAN memory regions: %d\n",
3128 				err);
3129 			return -EINVAL;
3130 		}
3131 	} else {
3132 		/* Fallback to mapping the remaining regions of the entire BAR */
3133 		err = idpf_calc_remaining_mmio_regs(adapter);
3134 		if (err) {
3135 			dev_err(&adapter->pdev->dev, "Failed to allocate BAR0 region(s): %d\n",
3136 				err);
3137 			return -ENOMEM;
3138 		}
3139 	}
3140 
3141 	err = idpf_map_lan_mmio_regs(adapter);
3142 	if (err) {
3143 		dev_err(&adapter->pdev->dev, "Failed to map BAR0 region(s): %d\n",
3144 			err);
3145 		return -ENOMEM;
3146 	}
3147 
3148 	pci_sriov_set_totalvfs(adapter->pdev, idpf_get_max_vfs(adapter));
3149 	num_max_vports = idpf_get_max_vports(adapter);
3150 	adapter->max_vports = num_max_vports;
3151 	adapter->vports = kcalloc(num_max_vports, sizeof(*adapter->vports),
3152 				  GFP_KERNEL);
3153 	if (!adapter->vports)
3154 		return -ENOMEM;
3155 
3156 	if (!adapter->netdevs) {
3157 		adapter->netdevs = kcalloc(num_max_vports,
3158 					   sizeof(struct net_device *),
3159 					   GFP_KERNEL);
3160 		if (!adapter->netdevs) {
3161 			err = -ENOMEM;
3162 			goto err_netdev_alloc;
3163 		}
3164 	}
3165 
3166 	err = idpf_vport_params_buf_alloc(adapter);
3167 	if (err) {
3168 		dev_err(&adapter->pdev->dev, "Failed to alloc vport params buffer: %d\n",
3169 			err);
3170 		goto err_netdev_alloc;
3171 	}
3172 
3173 	/* Start the mailbox task before requesting vectors. This will ensure
3174 	 * vector information response from mailbox is handled
3175 	 */
3176 	queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
3177 
3178 	queue_delayed_work(adapter->serv_wq, &adapter->serv_task,
3179 			   msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
3180 
3181 	err = idpf_intr_req(adapter);
3182 	if (err) {
3183 		dev_err(&adapter->pdev->dev, "failed to enable interrupt vectors: %d\n",
3184 			err);
3185 		goto err_intr_req;
3186 	}
3187 
3188 	err = idpf_ptp_init(adapter);
3189 	if (err)
3190 		pci_err(adapter->pdev, "PTP init failed, err=%pe\n",
3191 			ERR_PTR(err));
3192 
3193 	idpf_init_avail_queues(adapter);
3194 
3195 	/* Skew the delay for init tasks for each function based on fn number
3196 	 * to prevent every function from making the same call simultaneously.
3197 	 */
3198 	queue_delayed_work(adapter->init_wq, &adapter->init_task,
3199 			   msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
3200 
3201 	set_bit(IDPF_VC_CORE_INIT, adapter->flags);
3202 
3203 	return 0;
3204 
3205 err_intr_req:
3206 	cancel_delayed_work_sync(&adapter->serv_task);
3207 	cancel_delayed_work_sync(&adapter->mbx_task);
3208 	idpf_vport_params_buf_rel(adapter);
3209 err_netdev_alloc:
3210 	kfree(adapter->vports);
3211 	adapter->vports = NULL;
3212 	return err;
3213 
3214 init_failed:
3215 	/* Don't retry if we're trying to go down, just bail. */
3216 	if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
3217 		return err;
3218 
3219 	if (++adapter->mb_wait_count > IDPF_MB_MAX_ERR) {
3220 		dev_err(&adapter->pdev->dev, "Failed to establish mailbox communications with hardware\n");
3221 
3222 		return -EFAULT;
3223 	}
3224 	/* If it reached here, it is possible that mailbox queue initialization
3225 	 * register writes might not have taken effect. Retry to initialize
3226 	 * the mailbox again
3227 	 */
3228 	adapter->state = __IDPF_VER_CHECK;
3229 	if (adapter->vcxn_mngr)
3230 		idpf_vc_xn_shutdown(adapter->vcxn_mngr);
3231 	set_bit(IDPF_HR_DRV_LOAD, adapter->flags);
3232 	queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task,
3233 			   msecs_to_jiffies(task_delay));
3234 
3235 	return -EAGAIN;
3236 }
3237 
3238 /**
3239  * idpf_vc_core_deinit - Device deinit routine
3240  * @adapter: Driver specific private structure
3241  *
3242  */
idpf_vc_core_deinit(struct idpf_adapter * adapter)3243 void idpf_vc_core_deinit(struct idpf_adapter *adapter)
3244 {
3245 	bool remove_in_prog;
3246 
3247 	if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags))
3248 		return;
3249 
3250 	/* Avoid transaction timeouts when called during reset */
3251 	remove_in_prog = test_bit(IDPF_REMOVE_IN_PROG, adapter->flags);
3252 	if (!remove_in_prog)
3253 		idpf_vc_xn_shutdown(adapter->vcxn_mngr);
3254 
3255 	idpf_ptp_release(adapter);
3256 	idpf_deinit_task(adapter);
3257 	idpf_idc_deinit_core_aux_device(adapter->cdev_info);
3258 	idpf_intr_rel(adapter);
3259 
3260 	if (remove_in_prog)
3261 		idpf_vc_xn_shutdown(adapter->vcxn_mngr);
3262 
3263 	cancel_delayed_work_sync(&adapter->serv_task);
3264 	cancel_delayed_work_sync(&adapter->mbx_task);
3265 
3266 	idpf_vport_params_buf_rel(adapter);
3267 
3268 	kfree(adapter->vports);
3269 	adapter->vports = NULL;
3270 
3271 	clear_bit(IDPF_VC_CORE_INIT, adapter->flags);
3272 }
3273 
3274 /**
3275  * idpf_vport_alloc_vec_indexes - Get relative vector indexes
3276  * @vport: virtual port data struct
3277  *
3278  * This function requests the vector information required for the vport and
3279  * stores the vector indexes received from the 'global vector distribution'
3280  * in the vport's queue vectors array.
3281  *
3282  * Return 0 on success, error on failure
3283  */
idpf_vport_alloc_vec_indexes(struct idpf_vport * vport)3284 int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport)
3285 {
3286 	struct idpf_vector_info vec_info;
3287 	int num_alloc_vecs;
3288 
3289 	vec_info.num_curr_vecs = vport->num_q_vectors;
3290 	vec_info.num_req_vecs = max(vport->num_txq, vport->num_rxq);
3291 	vec_info.default_vport = vport->default_vport;
3292 	vec_info.index = vport->idx;
3293 
3294 	num_alloc_vecs = idpf_req_rel_vector_indexes(vport->adapter,
3295 						     vport->q_vector_idxs,
3296 						     &vec_info);
3297 	if (num_alloc_vecs <= 0) {
3298 		dev_err(&vport->adapter->pdev->dev, "Vector distribution failed: %d\n",
3299 			num_alloc_vecs);
3300 		return -EINVAL;
3301 	}
3302 
3303 	vport->num_q_vectors = num_alloc_vecs;
3304 
3305 	return 0;
3306 }
3307 
3308 /**
3309  * idpf_vport_init - Initialize virtual port
3310  * @vport: virtual port to be initialized
3311  * @max_q: vport max queue info
3312  *
3313  * Will initialize vport with the info received through MB earlier
3314  */
idpf_vport_init(struct idpf_vport * vport,struct idpf_vport_max_q * max_q)3315 void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
3316 {
3317 	struct idpf_adapter *adapter = vport->adapter;
3318 	struct virtchnl2_create_vport *vport_msg;
3319 	struct idpf_vport_config *vport_config;
3320 	u16 tx_itr[] = {2, 8, 64, 128, 256};
3321 	u16 rx_itr[] = {2, 8, 32, 96, 128};
3322 	struct idpf_rss_data *rss_data;
3323 	u16 idx = vport->idx;
3324 	int err;
3325 
3326 	vport_config = adapter->vport_config[idx];
3327 	rss_data = &vport_config->user_config.rss_data;
3328 	vport_msg = adapter->vport_params_recvd[idx];
3329 
3330 	vport_config->max_q.max_txq = max_q->max_txq;
3331 	vport_config->max_q.max_rxq = max_q->max_rxq;
3332 	vport_config->max_q.max_complq = max_q->max_complq;
3333 	vport_config->max_q.max_bufq = max_q->max_bufq;
3334 
3335 	vport->txq_model = le16_to_cpu(vport_msg->txq_model);
3336 	vport->rxq_model = le16_to_cpu(vport_msg->rxq_model);
3337 	vport->vport_type = le16_to_cpu(vport_msg->vport_type);
3338 	vport->vport_id = le32_to_cpu(vport_msg->vport_id);
3339 
3340 	rss_data->rss_key_size = min_t(u16, NETDEV_RSS_KEY_LEN,
3341 				       le16_to_cpu(vport_msg->rss_key_size));
3342 	rss_data->rss_lut_size = le16_to_cpu(vport_msg->rss_lut_size);
3343 
3344 	ether_addr_copy(vport->default_mac_addr, vport_msg->default_mac_addr);
3345 	vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - LIBETH_RX_LL_LEN;
3346 
3347 	/* Initialize Tx and Rx profiles for Dynamic Interrupt Moderation */
3348 	memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS);
3349 	memcpy(vport->tx_itr_profile, tx_itr, IDPF_DIM_PROFILE_SLOTS);
3350 
3351 	idpf_vport_set_hsplit(vport, ETHTOOL_TCP_DATA_SPLIT_ENABLED);
3352 
3353 	idpf_vport_init_num_qs(vport, vport_msg);
3354 	idpf_vport_calc_num_q_desc(vport);
3355 	idpf_vport_calc_num_q_groups(vport);
3356 	idpf_vport_alloc_vec_indexes(vport);
3357 
3358 	vport->crc_enable = adapter->crc_enable;
3359 
3360 	if (!(vport_msg->vport_flags &
3361 	      cpu_to_le16(VIRTCHNL2_VPORT_UPLINK_PORT)))
3362 		return;
3363 
3364 	err = idpf_ptp_get_vport_tstamps_caps(vport);
3365 	if (err) {
3366 		pci_dbg(vport->adapter->pdev, "Tx timestamping not supported\n");
3367 		return;
3368 	}
3369 
3370 	INIT_WORK(&vport->tstamp_task, idpf_tstamp_task);
3371 }
3372 
3373 /**
3374  * idpf_get_vec_ids - Initialize vector id from Mailbox parameters
3375  * @adapter: adapter structure to get the mailbox vector id
3376  * @vecids: Array of vector ids
3377  * @num_vecids: number of vector ids
3378  * @chunks: vector ids received over mailbox
3379  *
3380  * Will initialize the mailbox vector id which is received from the
3381  * get capabilities and data queue vector ids with ids received as
3382  * mailbox parameters.
3383  * Returns number of ids filled
3384  */
idpf_get_vec_ids(struct idpf_adapter * adapter,u16 * vecids,int num_vecids,struct virtchnl2_vector_chunks * chunks)3385 int idpf_get_vec_ids(struct idpf_adapter *adapter,
3386 		     u16 *vecids, int num_vecids,
3387 		     struct virtchnl2_vector_chunks *chunks)
3388 {
3389 	u16 num_chunks = le16_to_cpu(chunks->num_vchunks);
3390 	int num_vecid_filled = 0;
3391 	int i, j;
3392 
3393 	vecids[num_vecid_filled] = adapter->mb_vector.v_idx;
3394 	num_vecid_filled++;
3395 
3396 	for (j = 0; j < num_chunks; j++) {
3397 		struct virtchnl2_vector_chunk *chunk;
3398 		u16 start_vecid, num_vec;
3399 
3400 		chunk = &chunks->vchunks[j];
3401 		num_vec = le16_to_cpu(chunk->num_vectors);
3402 		start_vecid = le16_to_cpu(chunk->start_vector_id);
3403 
3404 		for (i = 0; i < num_vec; i++) {
3405 			if ((num_vecid_filled + i) < num_vecids) {
3406 				vecids[num_vecid_filled + i] = start_vecid;
3407 				start_vecid++;
3408 			} else {
3409 				break;
3410 			}
3411 		}
3412 		num_vecid_filled = num_vecid_filled + i;
3413 	}
3414 
3415 	return num_vecid_filled;
3416 }
3417 
3418 /**
3419  * idpf_vport_get_queue_ids - Initialize queue id from Mailbox parameters
3420  * @qids: Array of queue ids
3421  * @num_qids: number of queue ids
3422  * @q_type: queue model
3423  * @chunks: queue ids received over mailbox
3424  *
3425  * Will initialize all queue ids with ids received as mailbox parameters
3426  * Returns number of ids filled
3427  */
idpf_vport_get_queue_ids(u32 * qids,int num_qids,u16 q_type,struct virtchnl2_queue_reg_chunks * chunks)3428 static int idpf_vport_get_queue_ids(u32 *qids, int num_qids, u16 q_type,
3429 				    struct virtchnl2_queue_reg_chunks *chunks)
3430 {
3431 	u16 num_chunks = le16_to_cpu(chunks->num_chunks);
3432 	u32 num_q_id_filled = 0, i;
3433 	u32 start_q_id, num_q;
3434 
3435 	while (num_chunks--) {
3436 		struct virtchnl2_queue_reg_chunk *chunk;
3437 
3438 		chunk = &chunks->chunks[num_chunks];
3439 		if (le32_to_cpu(chunk->type) != q_type)
3440 			continue;
3441 
3442 		num_q = le32_to_cpu(chunk->num_queues);
3443 		start_q_id = le32_to_cpu(chunk->start_queue_id);
3444 
3445 		for (i = 0; i < num_q; i++) {
3446 			if ((num_q_id_filled + i) < num_qids) {
3447 				qids[num_q_id_filled + i] = start_q_id;
3448 				start_q_id++;
3449 			} else {
3450 				break;
3451 			}
3452 		}
3453 		num_q_id_filled = num_q_id_filled + i;
3454 	}
3455 
3456 	return num_q_id_filled;
3457 }
3458 
3459 /**
3460  * __idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
3461  * @vport: virtual port for which the queues ids are initialized
3462  * @qids: queue ids
3463  * @num_qids: number of queue ids
3464  * @q_type: type of queue
3465  *
3466  * Will initialize all queue ids with ids received as mailbox
3467  * parameters. Returns number of queue ids initialized.
3468  */
__idpf_vport_queue_ids_init(struct idpf_vport * vport,const u32 * qids,int num_qids,u32 q_type)3469 static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
3470 				       const u32 *qids,
3471 				       int num_qids,
3472 				       u32 q_type)
3473 {
3474 	int i, j, k = 0;
3475 
3476 	switch (q_type) {
3477 	case VIRTCHNL2_QUEUE_TYPE_TX:
3478 		for (i = 0; i < vport->num_txq_grp; i++) {
3479 			struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
3480 
3481 			for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++)
3482 				tx_qgrp->txqs[j]->q_id = qids[k];
3483 		}
3484 		break;
3485 	case VIRTCHNL2_QUEUE_TYPE_RX:
3486 		for (i = 0; i < vport->num_rxq_grp; i++) {
3487 			struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
3488 			u16 num_rxq;
3489 
3490 			if (idpf_is_queue_model_split(vport->rxq_model))
3491 				num_rxq = rx_qgrp->splitq.num_rxq_sets;
3492 			else
3493 				num_rxq = rx_qgrp->singleq.num_rxq;
3494 
3495 			for (j = 0; j < num_rxq && k < num_qids; j++, k++) {
3496 				struct idpf_rx_queue *q;
3497 
3498 				if (idpf_is_queue_model_split(vport->rxq_model))
3499 					q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
3500 				else
3501 					q = rx_qgrp->singleq.rxqs[j];
3502 				q->q_id = qids[k];
3503 			}
3504 		}
3505 		break;
3506 	case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
3507 		for (i = 0; i < vport->num_txq_grp && k < num_qids; i++, k++) {
3508 			struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
3509 
3510 			tx_qgrp->complq->q_id = qids[k];
3511 		}
3512 		break;
3513 	case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
3514 		for (i = 0; i < vport->num_rxq_grp; i++) {
3515 			struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
3516 			u8 num_bufqs = vport->num_bufqs_per_qgrp;
3517 
3518 			for (j = 0; j < num_bufqs && k < num_qids; j++, k++) {
3519 				struct idpf_buf_queue *q;
3520 
3521 				q = &rx_qgrp->splitq.bufq_sets[j].bufq;
3522 				q->q_id = qids[k];
3523 			}
3524 		}
3525 		break;
3526 	default:
3527 		break;
3528 	}
3529 
3530 	return k;
3531 }
3532 
3533 /**
3534  * idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
3535  * @vport: virtual port for which the queues ids are initialized
3536  *
3537  * Will initialize all queue ids with ids received as mailbox parameters.
3538  * Returns 0 on success, negative if all the queues are not initialized.
3539  */
idpf_vport_queue_ids_init(struct idpf_vport * vport)3540 int idpf_vport_queue_ids_init(struct idpf_vport *vport)
3541 {
3542 	struct virtchnl2_create_vport *vport_params;
3543 	struct virtchnl2_queue_reg_chunks *chunks;
3544 	struct idpf_vport_config *vport_config;
3545 	u16 vport_idx = vport->idx;
3546 	int num_ids, err = 0;
3547 	u16 q_type;
3548 	u32 *qids;
3549 
3550 	vport_config = vport->adapter->vport_config[vport_idx];
3551 	if (vport_config->req_qs_chunks) {
3552 		struct virtchnl2_add_queues *vc_aq =
3553 			(struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
3554 		chunks = &vc_aq->chunks;
3555 	} else {
3556 		vport_params = vport->adapter->vport_params_recvd[vport_idx];
3557 		chunks = &vport_params->chunks;
3558 	}
3559 
3560 	qids = kcalloc(IDPF_MAX_QIDS, sizeof(u32), GFP_KERNEL);
3561 	if (!qids)
3562 		return -ENOMEM;
3563 
3564 	num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS,
3565 					   VIRTCHNL2_QUEUE_TYPE_TX,
3566 					   chunks);
3567 	if (num_ids < vport->num_txq) {
3568 		err = -EINVAL;
3569 		goto mem_rel;
3570 	}
3571 	num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids,
3572 					      VIRTCHNL2_QUEUE_TYPE_TX);
3573 	if (num_ids < vport->num_txq) {
3574 		err = -EINVAL;
3575 		goto mem_rel;
3576 	}
3577 
3578 	num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS,
3579 					   VIRTCHNL2_QUEUE_TYPE_RX,
3580 					   chunks);
3581 	if (num_ids < vport->num_rxq) {
3582 		err = -EINVAL;
3583 		goto mem_rel;
3584 	}
3585 	num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids,
3586 					      VIRTCHNL2_QUEUE_TYPE_RX);
3587 	if (num_ids < vport->num_rxq) {
3588 		err = -EINVAL;
3589 		goto mem_rel;
3590 	}
3591 
3592 	if (!idpf_is_queue_model_split(vport->txq_model))
3593 		goto check_rxq;
3594 
3595 	q_type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
3596 	num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks);
3597 	if (num_ids < vport->num_complq) {
3598 		err = -EINVAL;
3599 		goto mem_rel;
3600 	}
3601 	num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type);
3602 	if (num_ids < vport->num_complq) {
3603 		err = -EINVAL;
3604 		goto mem_rel;
3605 	}
3606 
3607 check_rxq:
3608 	if (!idpf_is_queue_model_split(vport->rxq_model))
3609 		goto mem_rel;
3610 
3611 	q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
3612 	num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks);
3613 	if (num_ids < vport->num_bufq) {
3614 		err = -EINVAL;
3615 		goto mem_rel;
3616 	}
3617 	num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type);
3618 	if (num_ids < vport->num_bufq)
3619 		err = -EINVAL;
3620 
3621 mem_rel:
3622 	kfree(qids);
3623 
3624 	return err;
3625 }
3626 
3627 /**
3628  * idpf_vport_adjust_qs - Adjust to new requested queues
3629  * @vport: virtual port data struct
3630  *
3631  * Renegotiate queues.  Returns 0 on success, negative on failure.
3632  */
idpf_vport_adjust_qs(struct idpf_vport * vport)3633 int idpf_vport_adjust_qs(struct idpf_vport *vport)
3634 {
3635 	struct virtchnl2_create_vport vport_msg;
3636 	int err;
3637 
3638 	vport_msg.txq_model = cpu_to_le16(vport->txq_model);
3639 	vport_msg.rxq_model = cpu_to_le16(vport->rxq_model);
3640 	err = idpf_vport_calc_total_qs(vport->adapter, vport->idx, &vport_msg,
3641 				       NULL);
3642 	if (err)
3643 		return err;
3644 
3645 	idpf_vport_init_num_qs(vport, &vport_msg);
3646 	idpf_vport_calc_num_q_groups(vport);
3647 
3648 	return 0;
3649 }
3650 
3651 /**
3652  * idpf_is_capability_ena - Default implementation of capability checking
3653  * @adapter: Private data struct
3654  * @all: all or one flag
3655  * @field: caps field to check for flags
3656  * @flag: flag to check
3657  *
3658  * Return true if all capabilities are supported, false otherwise
3659  */
idpf_is_capability_ena(struct idpf_adapter * adapter,bool all,enum idpf_cap_field field,u64 flag)3660 bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
3661 			    enum idpf_cap_field field, u64 flag)
3662 {
3663 	u8 *caps = (u8 *)&adapter->caps;
3664 	u32 *cap_field;
3665 
3666 	if (!caps)
3667 		return false;
3668 
3669 	if (field == IDPF_BASE_CAPS)
3670 		return false;
3671 
3672 	cap_field = (u32 *)(caps + field);
3673 
3674 	if (all)
3675 		return (*cap_field & flag) == flag;
3676 	else
3677 		return !!(*cap_field & flag);
3678 }
3679 
3680 /**
3681  * idpf_vport_is_cap_ena - Check if vport capability is enabled
3682  * @vport: Private data struct
3683  * @flag: flag(s) to check
3684  *
3685  * Return: true if the capability is supported, false otherwise
3686  */
idpf_vport_is_cap_ena(struct idpf_vport * vport,u16 flag)3687 bool idpf_vport_is_cap_ena(struct idpf_vport *vport, u16 flag)
3688 {
3689 	struct virtchnl2_create_vport *vport_msg;
3690 
3691 	vport_msg = vport->adapter->vport_params_recvd[vport->idx];
3692 
3693 	return !!(le16_to_cpu(vport_msg->vport_flags) & flag);
3694 }
3695 
3696 /**
3697  * idpf_sideband_flow_type_ena - Check if steering is enabled for flow type
3698  * @vport: Private data struct
3699  * @flow_type: flow type to check (from ethtool.h)
3700  *
3701  * Return: true if sideband filters are allowed for @flow_type, false otherwise
3702  */
idpf_sideband_flow_type_ena(struct idpf_vport * vport,u32 flow_type)3703 bool idpf_sideband_flow_type_ena(struct idpf_vport *vport, u32 flow_type)
3704 {
3705 	struct virtchnl2_create_vport *vport_msg;
3706 	__le64 caps;
3707 
3708 	vport_msg = vport->adapter->vport_params_recvd[vport->idx];
3709 	caps = vport_msg->sideband_flow_caps;
3710 
3711 	switch (flow_type) {
3712 	case TCP_V4_FLOW:
3713 		return !!(caps & cpu_to_le64(VIRTCHNL2_FLOW_IPV4_TCP));
3714 	case UDP_V4_FLOW:
3715 		return !!(caps & cpu_to_le64(VIRTCHNL2_FLOW_IPV4_UDP));
3716 	default:
3717 		return false;
3718 	}
3719 }
3720 
3721 /**
3722  * idpf_sideband_action_ena - Check if steering is enabled for action
3723  * @vport: Private data struct
3724  * @fsp: flow spec
3725  *
3726  * Return: true if sideband filters are allowed for @fsp, false otherwise
3727  */
idpf_sideband_action_ena(struct idpf_vport * vport,struct ethtool_rx_flow_spec * fsp)3728 bool idpf_sideband_action_ena(struct idpf_vport *vport,
3729 			      struct ethtool_rx_flow_spec *fsp)
3730 {
3731 	struct virtchnl2_create_vport *vport_msg;
3732 	unsigned int supp_actions;
3733 
3734 	vport_msg = vport->adapter->vport_params_recvd[vport->idx];
3735 	supp_actions = le32_to_cpu(vport_msg->sideband_flow_actions);
3736 
3737 	/* Actions Drop/Wake are not supported */
3738 	if (fsp->ring_cookie == RX_CLS_FLOW_DISC ||
3739 	    fsp->ring_cookie == RX_CLS_FLOW_WAKE)
3740 		return false;
3741 
3742 	return !!(supp_actions & VIRTCHNL2_ACTION_QUEUE);
3743 }
3744 
idpf_fsteer_max_rules(struct idpf_vport * vport)3745 unsigned int idpf_fsteer_max_rules(struct idpf_vport *vport)
3746 {
3747 	struct virtchnl2_create_vport *vport_msg;
3748 
3749 	vport_msg = vport->adapter->vport_params_recvd[vport->idx];
3750 	return le32_to_cpu(vport_msg->flow_steer_max_rules);
3751 }
3752 
3753 /**
3754  * idpf_get_vport_id: Get vport id
3755  * @vport: virtual port structure
3756  *
3757  * Return vport id from the adapter persistent data
3758  */
idpf_get_vport_id(struct idpf_vport * vport)3759 u32 idpf_get_vport_id(struct idpf_vport *vport)
3760 {
3761 	struct virtchnl2_create_vport *vport_msg;
3762 
3763 	vport_msg = vport->adapter->vport_params_recvd[vport->idx];
3764 
3765 	return le32_to_cpu(vport_msg->vport_id);
3766 }
3767 
3768 /**
3769  * idpf_mac_filter_async_handler - Async callback for mac filters
3770  * @adapter: private data struct
3771  * @xn: transaction for message
3772  * @ctlq_msg: received message
3773  *
3774  * In some scenarios driver can't sleep and wait for a reply (e.g.: stack is
3775  * holding rtnl_lock) when adding a new mac filter. It puts us in a difficult
3776  * situation to deal with errors returned on the reply. The best we can
3777  * ultimately do is remove it from our list of mac filters and report the
3778  * error.
3779  */
idpf_mac_filter_async_handler(struct idpf_adapter * adapter,struct idpf_vc_xn * xn,const struct idpf_ctlq_msg * ctlq_msg)3780 static int idpf_mac_filter_async_handler(struct idpf_adapter *adapter,
3781 					 struct idpf_vc_xn *xn,
3782 					 const struct idpf_ctlq_msg *ctlq_msg)
3783 {
3784 	struct virtchnl2_mac_addr_list *ma_list;
3785 	struct idpf_vport_config *vport_config;
3786 	struct virtchnl2_mac_addr *mac_addr;
3787 	struct idpf_mac_filter *f, *tmp;
3788 	struct list_head *ma_list_head;
3789 	struct idpf_vport *vport;
3790 	u16 num_entries;
3791 	int i;
3792 
3793 	/* if success we're done, we're only here if something bad happened */
3794 	if (!ctlq_msg->cookie.mbx.chnl_retval)
3795 		return 0;
3796 
3797 	/* make sure at least struct is there */
3798 	if (xn->reply_sz < sizeof(*ma_list))
3799 		goto invalid_payload;
3800 
3801 	ma_list = ctlq_msg->ctx.indirect.payload->va;
3802 	mac_addr = ma_list->mac_addr_list;
3803 	num_entries = le16_to_cpu(ma_list->num_mac_addr);
3804 	/* we should have received a buffer at least this big */
3805 	if (xn->reply_sz < struct_size(ma_list, mac_addr_list, num_entries))
3806 		goto invalid_payload;
3807 
3808 	vport = idpf_vid_to_vport(adapter, le32_to_cpu(ma_list->vport_id));
3809 	if (!vport)
3810 		goto invalid_payload;
3811 
3812 	vport_config = adapter->vport_config[le32_to_cpu(ma_list->vport_id)];
3813 	ma_list_head = &vport_config->user_config.mac_filter_list;
3814 
3815 	/* We can't do much to reconcile bad filters at this point, however we
3816 	 * should at least remove them from our list one way or the other so we
3817 	 * have some idea what good filters we have.
3818 	 */
3819 	spin_lock_bh(&vport_config->mac_filter_list_lock);
3820 	list_for_each_entry_safe(f, tmp, ma_list_head, list)
3821 		for (i = 0; i < num_entries; i++)
3822 			if (ether_addr_equal(mac_addr[i].addr, f->macaddr))
3823 				list_del(&f->list);
3824 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
3825 	dev_err_ratelimited(&adapter->pdev->dev, "Received error sending MAC filter request (op %d)\n",
3826 			    xn->vc_op);
3827 
3828 	return 0;
3829 
3830 invalid_payload:
3831 	dev_err_ratelimited(&adapter->pdev->dev, "Received invalid MAC filter payload (op %d) (len %zd)\n",
3832 			    xn->vc_op, xn->reply_sz);
3833 
3834 	return -EINVAL;
3835 }
3836 
3837 /**
3838  * idpf_add_del_mac_filters - Add/del mac filters
3839  * @vport: Virtual port data structure
3840  * @np: Netdev private structure
3841  * @add: Add or delete flag
3842  * @async: Don't wait for return message
3843  *
3844  * Returns 0 on success, error on failure.
3845  **/
idpf_add_del_mac_filters(struct idpf_vport * vport,struct idpf_netdev_priv * np,bool add,bool async)3846 int idpf_add_del_mac_filters(struct idpf_vport *vport,
3847 			     struct idpf_netdev_priv *np,
3848 			     bool add, bool async)
3849 {
3850 	struct virtchnl2_mac_addr_list *ma_list __free(kfree) = NULL;
3851 	struct virtchnl2_mac_addr *mac_addr __free(kfree) = NULL;
3852 	struct idpf_adapter *adapter = np->adapter;
3853 	struct idpf_vc_xn_params xn_params = {};
3854 	struct idpf_vport_config *vport_config;
3855 	u32 num_msgs, total_filters = 0;
3856 	struct idpf_mac_filter *f;
3857 	ssize_t reply_sz;
3858 	int i = 0, k;
3859 
3860 	xn_params.vc_op = add ? VIRTCHNL2_OP_ADD_MAC_ADDR :
3861 				VIRTCHNL2_OP_DEL_MAC_ADDR;
3862 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
3863 	xn_params.async = async;
3864 	xn_params.async_handler = idpf_mac_filter_async_handler;
3865 
3866 	vport_config = adapter->vport_config[np->vport_idx];
3867 	spin_lock_bh(&vport_config->mac_filter_list_lock);
3868 
3869 	/* Find the number of newly added filters */
3870 	list_for_each_entry(f, &vport_config->user_config.mac_filter_list,
3871 			    list) {
3872 		if (add && f->add)
3873 			total_filters++;
3874 		else if (!add && f->remove)
3875 			total_filters++;
3876 	}
3877 
3878 	if (!total_filters) {
3879 		spin_unlock_bh(&vport_config->mac_filter_list_lock);
3880 
3881 		return 0;
3882 	}
3883 
3884 	/* Fill all the new filters into virtchannel message */
3885 	mac_addr = kcalloc(total_filters, sizeof(struct virtchnl2_mac_addr),
3886 			   GFP_ATOMIC);
3887 	if (!mac_addr) {
3888 		spin_unlock_bh(&vport_config->mac_filter_list_lock);
3889 
3890 		return -ENOMEM;
3891 	}
3892 
3893 	list_for_each_entry(f, &vport_config->user_config.mac_filter_list,
3894 			    list) {
3895 		if (add && f->add) {
3896 			ether_addr_copy(mac_addr[i].addr, f->macaddr);
3897 			i++;
3898 			f->add = false;
3899 			if (i == total_filters)
3900 				break;
3901 		}
3902 		if (!add && f->remove) {
3903 			ether_addr_copy(mac_addr[i].addr, f->macaddr);
3904 			i++;
3905 			f->remove = false;
3906 			if (i == total_filters)
3907 				break;
3908 		}
3909 	}
3910 
3911 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
3912 
3913 	/* Chunk up the filters into multiple messages to avoid
3914 	 * sending a control queue message buffer that is too large
3915 	 */
3916 	num_msgs = DIV_ROUND_UP(total_filters, IDPF_NUM_FILTERS_PER_MSG);
3917 
3918 	for (i = 0, k = 0; i < num_msgs; i++) {
3919 		u32 entries_size, buf_size, num_entries;
3920 
3921 		num_entries = min_t(u32, total_filters,
3922 				    IDPF_NUM_FILTERS_PER_MSG);
3923 		entries_size = sizeof(struct virtchnl2_mac_addr) * num_entries;
3924 		buf_size = struct_size(ma_list, mac_addr_list, num_entries);
3925 
3926 		if (!ma_list || num_entries != IDPF_NUM_FILTERS_PER_MSG) {
3927 			kfree(ma_list);
3928 			ma_list = kzalloc(buf_size, GFP_ATOMIC);
3929 			if (!ma_list)
3930 				return -ENOMEM;
3931 		} else {
3932 			memset(ma_list, 0, buf_size);
3933 		}
3934 
3935 		ma_list->vport_id = cpu_to_le32(np->vport_id);
3936 		ma_list->num_mac_addr = cpu_to_le16(num_entries);
3937 		memcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size);
3938 
3939 		xn_params.send_buf.iov_base = ma_list;
3940 		xn_params.send_buf.iov_len = buf_size;
3941 		reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
3942 		if (reply_sz < 0)
3943 			return reply_sz;
3944 
3945 		k += num_entries;
3946 		total_filters -= num_entries;
3947 	}
3948 
3949 	return 0;
3950 }
3951 
3952 /**
3953  * idpf_set_promiscuous - set promiscuous and send message to mailbox
3954  * @adapter: Driver specific private structure
3955  * @config_data: Vport specific config data
3956  * @vport_id: Vport identifier
3957  *
3958  * Request to enable promiscuous mode for the vport. Message is sent
3959  * asynchronously and won't wait for response.  Returns 0 on success, negative
3960  * on failure;
3961  */
idpf_set_promiscuous(struct idpf_adapter * adapter,struct idpf_vport_user_config_data * config_data,u32 vport_id)3962 int idpf_set_promiscuous(struct idpf_adapter *adapter,
3963 			 struct idpf_vport_user_config_data *config_data,
3964 			 u32 vport_id)
3965 {
3966 	struct idpf_vc_xn_params xn_params = {};
3967 	struct virtchnl2_promisc_info vpi;
3968 	ssize_t reply_sz;
3969 	u16 flags = 0;
3970 
3971 	if (test_bit(__IDPF_PROMISC_UC, config_data->user_flags))
3972 		flags |= VIRTCHNL2_UNICAST_PROMISC;
3973 	if (test_bit(__IDPF_PROMISC_MC, config_data->user_flags))
3974 		flags |= VIRTCHNL2_MULTICAST_PROMISC;
3975 
3976 	vpi.vport_id = cpu_to_le32(vport_id);
3977 	vpi.flags = cpu_to_le16(flags);
3978 
3979 	xn_params.vc_op = VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE;
3980 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
3981 	xn_params.send_buf.iov_base = &vpi;
3982 	xn_params.send_buf.iov_len = sizeof(vpi);
3983 	/* setting promiscuous is only ever done asynchronously */
3984 	xn_params.async = true;
3985 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
3986 
3987 	return reply_sz < 0 ? reply_sz : 0;
3988 }
3989 
3990 /**
3991  * idpf_idc_rdma_vc_send_sync - virtchnl send callback for IDC registered drivers
3992  * @cdev_info: IDC core device info pointer
3993  * @send_msg: message to send
3994  * @msg_size: size of message to send
3995  * @recv_msg: message to populate on reception of response
3996  * @recv_len: length of message copied into recv_msg or 0 on error
3997  *
3998  * Return: 0 on success or error code on failure.
3999  */
idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info * cdev_info,u8 * send_msg,u16 msg_size,u8 * recv_msg,u16 * recv_len)4000 int idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info *cdev_info,
4001 			       u8 *send_msg, u16 msg_size,
4002 			       u8 *recv_msg, u16 *recv_len)
4003 {
4004 	struct idpf_adapter *adapter = pci_get_drvdata(cdev_info->pdev);
4005 	struct idpf_vc_xn_params xn_params = { };
4006 	ssize_t reply_sz;
4007 	u16 recv_size;
4008 
4009 	if (!recv_msg || !recv_len || msg_size > IDPF_CTLQ_MAX_BUF_LEN)
4010 		return -EINVAL;
4011 
4012 	recv_size = min_t(u16, *recv_len, IDPF_CTLQ_MAX_BUF_LEN);
4013 	*recv_len = 0;
4014 	xn_params.vc_op = VIRTCHNL2_OP_RDMA;
4015 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
4016 	xn_params.send_buf.iov_base = send_msg;
4017 	xn_params.send_buf.iov_len = msg_size;
4018 	xn_params.recv_buf.iov_base = recv_msg;
4019 	xn_params.recv_buf.iov_len = recv_size;
4020 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
4021 	if (reply_sz < 0)
4022 		return reply_sz;
4023 	*recv_len = reply_sz;
4024 
4025 	return 0;
4026 }
4027 EXPORT_SYMBOL_GPL(idpf_idc_rdma_vc_send_sync);
4028