1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2023 Intel Corporation */ 3 4 #include <linux/export.h> 5 #include <net/libeth/rx.h> 6 7 #include "idpf.h" 8 #include "idpf_virtchnl.h" 9 #include "idpf_ptp.h" 10 11 /** 12 * struct idpf_vc_xn_manager - Manager for tracking transactions 13 * @ring: backing and lookup for transactions 14 * @free_xn_bm: bitmap for free transactions 15 * @xn_bm_lock: make bitmap access synchronous where necessary 16 * @salt: used to make cookie unique every message 17 */ 18 struct idpf_vc_xn_manager { 19 struct idpf_vc_xn ring[IDPF_VC_XN_RING_LEN]; 20 DECLARE_BITMAP(free_xn_bm, IDPF_VC_XN_RING_LEN); 21 spinlock_t xn_bm_lock; 22 u8 salt; 23 }; 24 25 /** 26 * idpf_vid_to_vport - Translate vport id to vport pointer 27 * @adapter: private data struct 28 * @v_id: vport id to translate 29 * 30 * Returns vport matching v_id, NULL if not found. 31 */ 32 static 33 struct idpf_vport *idpf_vid_to_vport(struct idpf_adapter *adapter, u32 v_id) 34 { 35 u16 num_max_vports = idpf_get_max_vports(adapter); 36 int i; 37 38 for (i = 0; i < num_max_vports; i++) 39 if (adapter->vport_ids[i] == v_id) 40 return adapter->vports[i]; 41 42 return NULL; 43 } 44 45 /** 46 * idpf_handle_event_link - Handle link event message 47 * @adapter: private data struct 48 * @v2e: virtchnl event message 49 */ 50 static void idpf_handle_event_link(struct idpf_adapter *adapter, 51 const struct virtchnl2_event *v2e) 52 { 53 struct idpf_netdev_priv *np; 54 struct idpf_vport *vport; 55 56 vport = idpf_vid_to_vport(adapter, le32_to_cpu(v2e->vport_id)); 57 if (!vport) { 58 dev_err_ratelimited(&adapter->pdev->dev, "Failed to find vport_id %d for link event\n", 59 v2e->vport_id); 60 return; 61 } 62 np = netdev_priv(vport->netdev); 63 64 np->link_speed_mbps = le32_to_cpu(v2e->link_speed); 65 66 if (vport->link_up == v2e->link_status) 67 return; 68 69 vport->link_up = v2e->link_status; 70 71 if (np->state != __IDPF_VPORT_UP) 72 return; 73 74 if (vport->link_up) { 75 netif_tx_start_all_queues(vport->netdev); 76 netif_carrier_on(vport->netdev); 77 } else { 78 netif_tx_stop_all_queues(vport->netdev); 79 netif_carrier_off(vport->netdev); 80 } 81 } 82 83 /** 84 * idpf_recv_event_msg - Receive virtchnl event message 85 * @adapter: Driver specific private structure 86 * @ctlq_msg: message to copy from 87 * 88 * Receive virtchnl event message 89 */ 90 static void idpf_recv_event_msg(struct idpf_adapter *adapter, 91 struct idpf_ctlq_msg *ctlq_msg) 92 { 93 int payload_size = ctlq_msg->ctx.indirect.payload->size; 94 struct virtchnl2_event *v2e; 95 u32 event; 96 97 if (payload_size < sizeof(*v2e)) { 98 dev_err_ratelimited(&adapter->pdev->dev, "Failed to receive valid payload for event msg (op %d len %d)\n", 99 ctlq_msg->cookie.mbx.chnl_opcode, 100 payload_size); 101 return; 102 } 103 104 v2e = (struct virtchnl2_event *)ctlq_msg->ctx.indirect.payload->va; 105 event = le32_to_cpu(v2e->event); 106 107 switch (event) { 108 case VIRTCHNL2_EVENT_LINK_CHANGE: 109 idpf_handle_event_link(adapter, v2e); 110 return; 111 default: 112 dev_err(&adapter->pdev->dev, 113 "Unknown event %d from PF\n", event); 114 break; 115 } 116 } 117 118 /** 119 * idpf_mb_clean - Reclaim the send mailbox queue entries 120 * @adapter: Driver specific private structure 121 * 122 * Reclaim the send mailbox queue entries to be used to send further messages 123 * 124 * Returns 0 on success, negative on failure 125 */ 126 static int idpf_mb_clean(struct idpf_adapter *adapter) 127 { 128 u16 i, num_q_msg = IDPF_DFLT_MBX_Q_LEN; 129 struct idpf_ctlq_msg **q_msg; 130 struct idpf_dma_mem *dma_mem; 131 int err; 132 133 q_msg = kcalloc(num_q_msg, sizeof(struct idpf_ctlq_msg *), GFP_ATOMIC); 134 if (!q_msg) 135 return -ENOMEM; 136 137 err = idpf_ctlq_clean_sq(adapter->hw.asq, &num_q_msg, q_msg); 138 if (err) 139 goto err_kfree; 140 141 for (i = 0; i < num_q_msg; i++) { 142 if (!q_msg[i]) 143 continue; 144 dma_mem = q_msg[i]->ctx.indirect.payload; 145 if (dma_mem) 146 dma_free_coherent(&adapter->pdev->dev, dma_mem->size, 147 dma_mem->va, dma_mem->pa); 148 kfree(q_msg[i]); 149 kfree(dma_mem); 150 } 151 152 err_kfree: 153 kfree(q_msg); 154 155 return err; 156 } 157 158 #if IS_ENABLED(CONFIG_PTP_1588_CLOCK) 159 /** 160 * idpf_ptp_is_mb_msg - Check if the message is PTP-related 161 * @op: virtchnl opcode 162 * 163 * Return: true if msg is PTP-related, false otherwise. 164 */ 165 static bool idpf_ptp_is_mb_msg(u32 op) 166 { 167 switch (op) { 168 case VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME: 169 case VIRTCHNL2_OP_PTP_GET_CROSS_TIME: 170 case VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME: 171 case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE: 172 case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME: 173 case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS: 174 case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP: 175 return true; 176 default: 177 return false; 178 } 179 } 180 181 /** 182 * idpf_prepare_ptp_mb_msg - Prepare PTP related message 183 * 184 * @adapter: Driver specific private structure 185 * @op: virtchnl opcode 186 * @ctlq_msg: Corresponding control queue message 187 */ 188 static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op, 189 struct idpf_ctlq_msg *ctlq_msg) 190 { 191 /* If the message is PTP-related and the secondary mailbox is available, 192 * send the message through the secondary mailbox. 193 */ 194 if (!idpf_ptp_is_mb_msg(op) || !adapter->ptp->secondary_mbx.valid) 195 return; 196 197 ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_peer_drv; 198 ctlq_msg->func_id = adapter->ptp->secondary_mbx.peer_mbx_q_id; 199 ctlq_msg->host_id = adapter->ptp->secondary_mbx.peer_id; 200 } 201 #else /* !CONFIG_PTP_1588_CLOCK */ 202 static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op, 203 struct idpf_ctlq_msg *ctlq_msg) 204 { } 205 #endif /* CONFIG_PTP_1588_CLOCK */ 206 207 /** 208 * idpf_send_mb_msg - Send message over mailbox 209 * @adapter: Driver specific private structure 210 * @op: virtchnl opcode 211 * @msg_size: size of the payload 212 * @msg: pointer to buffer holding the payload 213 * @cookie: unique SW generated cookie per message 214 * 215 * Will prepare the control queue message and initiates the send api 216 * 217 * Returns 0 on success, negative on failure 218 */ 219 int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op, 220 u16 msg_size, u8 *msg, u16 cookie) 221 { 222 struct idpf_ctlq_msg *ctlq_msg; 223 struct idpf_dma_mem *dma_mem; 224 int err; 225 226 /* If we are here and a reset is detected nothing much can be 227 * done. This thread should silently abort and expected to 228 * be corrected with a new run either by user or driver 229 * flows after reset 230 */ 231 if (idpf_is_reset_detected(adapter)) 232 return 0; 233 234 err = idpf_mb_clean(adapter); 235 if (err) 236 return err; 237 238 ctlq_msg = kzalloc(sizeof(*ctlq_msg), GFP_ATOMIC); 239 if (!ctlq_msg) 240 return -ENOMEM; 241 242 dma_mem = kzalloc(sizeof(*dma_mem), GFP_ATOMIC); 243 if (!dma_mem) { 244 err = -ENOMEM; 245 goto dma_mem_error; 246 } 247 248 ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_cp; 249 ctlq_msg->func_id = 0; 250 251 idpf_prepare_ptp_mb_msg(adapter, op, ctlq_msg); 252 253 ctlq_msg->data_len = msg_size; 254 ctlq_msg->cookie.mbx.chnl_opcode = op; 255 ctlq_msg->cookie.mbx.chnl_retval = 0; 256 dma_mem->size = IDPF_CTLQ_MAX_BUF_LEN; 257 dma_mem->va = dma_alloc_coherent(&adapter->pdev->dev, dma_mem->size, 258 &dma_mem->pa, GFP_ATOMIC); 259 if (!dma_mem->va) { 260 err = -ENOMEM; 261 goto dma_alloc_error; 262 } 263 264 /* It's possible we're just sending an opcode but no buffer */ 265 if (msg && msg_size) 266 memcpy(dma_mem->va, msg, msg_size); 267 ctlq_msg->ctx.indirect.payload = dma_mem; 268 ctlq_msg->ctx.sw_cookie.data = cookie; 269 270 err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg); 271 if (err) 272 goto send_error; 273 274 return 0; 275 276 send_error: 277 dma_free_coherent(&adapter->pdev->dev, dma_mem->size, dma_mem->va, 278 dma_mem->pa); 279 dma_alloc_error: 280 kfree(dma_mem); 281 dma_mem_error: 282 kfree(ctlq_msg); 283 284 return err; 285 } 286 287 /* API for virtchnl "transaction" support ("xn" for short). 288 * 289 * We are reusing the completion lock to serialize the accesses to the 290 * transaction state for simplicity, but it could be its own separate synchro 291 * as well. For now, this API is only used from within a workqueue context; 292 * raw_spin_lock() is enough. 293 */ 294 /** 295 * idpf_vc_xn_lock - Request exclusive access to vc transaction 296 * @xn: struct idpf_vc_xn* to access 297 */ 298 #define idpf_vc_xn_lock(xn) \ 299 raw_spin_lock(&(xn)->completed.wait.lock) 300 301 /** 302 * idpf_vc_xn_unlock - Release exclusive access to vc transaction 303 * @xn: struct idpf_vc_xn* to access 304 */ 305 #define idpf_vc_xn_unlock(xn) \ 306 raw_spin_unlock(&(xn)->completed.wait.lock) 307 308 /** 309 * idpf_vc_xn_release_bufs - Release reference to reply buffer(s) and 310 * reset the transaction state. 311 * @xn: struct idpf_vc_xn to update 312 */ 313 static void idpf_vc_xn_release_bufs(struct idpf_vc_xn *xn) 314 { 315 xn->reply.iov_base = NULL; 316 xn->reply.iov_len = 0; 317 318 if (xn->state != IDPF_VC_XN_SHUTDOWN) 319 xn->state = IDPF_VC_XN_IDLE; 320 } 321 322 /** 323 * idpf_vc_xn_init - Initialize virtchnl transaction object 324 * @vcxn_mngr: pointer to vc transaction manager struct 325 */ 326 static void idpf_vc_xn_init(struct idpf_vc_xn_manager *vcxn_mngr) 327 { 328 int i; 329 330 spin_lock_init(&vcxn_mngr->xn_bm_lock); 331 332 for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) { 333 struct idpf_vc_xn *xn = &vcxn_mngr->ring[i]; 334 335 xn->state = IDPF_VC_XN_IDLE; 336 xn->idx = i; 337 idpf_vc_xn_release_bufs(xn); 338 init_completion(&xn->completed); 339 } 340 341 bitmap_fill(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN); 342 } 343 344 /** 345 * idpf_vc_xn_shutdown - Uninitialize virtchnl transaction object 346 * @vcxn_mngr: pointer to vc transaction manager struct 347 * 348 * All waiting threads will be woken-up and their transaction aborted. Further 349 * operations on that object will fail. 350 */ 351 void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr) 352 { 353 int i; 354 355 spin_lock_bh(&vcxn_mngr->xn_bm_lock); 356 bitmap_zero(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN); 357 spin_unlock_bh(&vcxn_mngr->xn_bm_lock); 358 359 for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) { 360 struct idpf_vc_xn *xn = &vcxn_mngr->ring[i]; 361 362 idpf_vc_xn_lock(xn); 363 xn->state = IDPF_VC_XN_SHUTDOWN; 364 idpf_vc_xn_release_bufs(xn); 365 idpf_vc_xn_unlock(xn); 366 complete_all(&xn->completed); 367 } 368 } 369 370 /** 371 * idpf_vc_xn_pop_free - Pop a free transaction from free list 372 * @vcxn_mngr: transaction manager to pop from 373 * 374 * Returns NULL if no free transactions 375 */ 376 static 377 struct idpf_vc_xn *idpf_vc_xn_pop_free(struct idpf_vc_xn_manager *vcxn_mngr) 378 { 379 struct idpf_vc_xn *xn = NULL; 380 unsigned long free_idx; 381 382 spin_lock_bh(&vcxn_mngr->xn_bm_lock); 383 free_idx = find_first_bit(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN); 384 if (free_idx == IDPF_VC_XN_RING_LEN) 385 goto do_unlock; 386 387 clear_bit(free_idx, vcxn_mngr->free_xn_bm); 388 xn = &vcxn_mngr->ring[free_idx]; 389 xn->salt = vcxn_mngr->salt++; 390 391 do_unlock: 392 spin_unlock_bh(&vcxn_mngr->xn_bm_lock); 393 394 return xn; 395 } 396 397 /** 398 * idpf_vc_xn_push_free - Push a free transaction to free list 399 * @vcxn_mngr: transaction manager to push to 400 * @xn: transaction to push 401 */ 402 static void idpf_vc_xn_push_free(struct idpf_vc_xn_manager *vcxn_mngr, 403 struct idpf_vc_xn *xn) 404 { 405 idpf_vc_xn_release_bufs(xn); 406 set_bit(xn->idx, vcxn_mngr->free_xn_bm); 407 } 408 409 /** 410 * idpf_vc_xn_exec - Perform a send/recv virtchnl transaction 411 * @adapter: driver specific private structure with vcxn_mngr 412 * @params: parameters for this particular transaction including 413 * -vc_op: virtchannel operation to send 414 * -send_buf: kvec iov for send buf and len 415 * -recv_buf: kvec iov for recv buf and len (ignored if NULL) 416 * -timeout_ms: timeout waiting for a reply (milliseconds) 417 * -async: don't wait for message reply, will lose caller context 418 * -async_handler: callback to handle async replies 419 * 420 * @returns >= 0 for success, the size of the initial reply (may or may not be 421 * >= @recv_buf.iov_len, but we never overflow @@recv_buf_iov_base). < 0 for 422 * error. 423 */ 424 ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter, 425 const struct idpf_vc_xn_params *params) 426 { 427 const struct kvec *send_buf = ¶ms->send_buf; 428 struct idpf_vc_xn *xn; 429 ssize_t retval; 430 u16 cookie; 431 432 xn = idpf_vc_xn_pop_free(adapter->vcxn_mngr); 433 /* no free transactions available */ 434 if (!xn) 435 return -ENOSPC; 436 437 idpf_vc_xn_lock(xn); 438 if (xn->state == IDPF_VC_XN_SHUTDOWN) { 439 retval = -ENXIO; 440 goto only_unlock; 441 } else if (xn->state != IDPF_VC_XN_IDLE) { 442 /* We're just going to clobber this transaction even though 443 * it's not IDLE. If we don't reuse it we could theoretically 444 * eventually leak all the free transactions and not be able to 445 * send any messages. At least this way we make an attempt to 446 * remain functional even though something really bad is 447 * happening that's corrupting what was supposed to be free 448 * transactions. 449 */ 450 WARN_ONCE(1, "There should only be idle transactions in free list (idx %d op %d)\n", 451 xn->idx, xn->vc_op); 452 } 453 454 xn->reply = params->recv_buf; 455 xn->reply_sz = 0; 456 xn->state = params->async ? IDPF_VC_XN_ASYNC : IDPF_VC_XN_WAITING; 457 xn->vc_op = params->vc_op; 458 xn->async_handler = params->async_handler; 459 idpf_vc_xn_unlock(xn); 460 461 if (!params->async) 462 reinit_completion(&xn->completed); 463 cookie = FIELD_PREP(IDPF_VC_XN_SALT_M, xn->salt) | 464 FIELD_PREP(IDPF_VC_XN_IDX_M, xn->idx); 465 466 retval = idpf_send_mb_msg(adapter, params->vc_op, 467 send_buf->iov_len, send_buf->iov_base, 468 cookie); 469 if (retval) { 470 idpf_vc_xn_lock(xn); 471 goto release_and_unlock; 472 } 473 474 if (params->async) 475 return 0; 476 477 wait_for_completion_timeout(&xn->completed, 478 msecs_to_jiffies(params->timeout_ms)); 479 480 /* No need to check the return value; we check the final state of the 481 * transaction below. It's possible the transaction actually gets more 482 * timeout than specified if we get preempted here but after 483 * wait_for_completion_timeout returns. This should be non-issue 484 * however. 485 */ 486 idpf_vc_xn_lock(xn); 487 switch (xn->state) { 488 case IDPF_VC_XN_SHUTDOWN: 489 retval = -ENXIO; 490 goto only_unlock; 491 case IDPF_VC_XN_WAITING: 492 dev_notice_ratelimited(&adapter->pdev->dev, 493 "Transaction timed-out (op:%d cookie:%04x vc_op:%d salt:%02x timeout:%dms)\n", 494 params->vc_op, cookie, xn->vc_op, 495 xn->salt, params->timeout_ms); 496 retval = -ETIME; 497 break; 498 case IDPF_VC_XN_COMPLETED_SUCCESS: 499 retval = xn->reply_sz; 500 break; 501 case IDPF_VC_XN_COMPLETED_FAILED: 502 dev_notice_ratelimited(&adapter->pdev->dev, "Transaction failed (op %d)\n", 503 params->vc_op); 504 retval = -EIO; 505 break; 506 default: 507 /* Invalid state. */ 508 WARN_ON_ONCE(1); 509 retval = -EIO; 510 break; 511 } 512 513 release_and_unlock: 514 idpf_vc_xn_push_free(adapter->vcxn_mngr, xn); 515 /* If we receive a VC reply after here, it will be dropped. */ 516 only_unlock: 517 idpf_vc_xn_unlock(xn); 518 519 return retval; 520 } 521 522 /** 523 * idpf_vc_xn_forward_async - Handle async reply receives 524 * @adapter: private data struct 525 * @xn: transaction to handle 526 * @ctlq_msg: corresponding ctlq_msg 527 * 528 * For async sends we're going to lose the caller's context so, if an 529 * async_handler was provided, it can deal with the reply, otherwise we'll just 530 * check and report if there is an error. 531 */ 532 static int 533 idpf_vc_xn_forward_async(struct idpf_adapter *adapter, struct idpf_vc_xn *xn, 534 const struct idpf_ctlq_msg *ctlq_msg) 535 { 536 int err = 0; 537 538 if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) { 539 dev_err_ratelimited(&adapter->pdev->dev, "Async message opcode does not match transaction opcode (msg: %d) (xn: %d)\n", 540 ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op); 541 xn->reply_sz = 0; 542 err = -EINVAL; 543 goto release_bufs; 544 } 545 546 if (xn->async_handler) { 547 err = xn->async_handler(adapter, xn, ctlq_msg); 548 goto release_bufs; 549 } 550 551 if (ctlq_msg->cookie.mbx.chnl_retval) { 552 xn->reply_sz = 0; 553 dev_err_ratelimited(&adapter->pdev->dev, "Async message failure (op %d)\n", 554 ctlq_msg->cookie.mbx.chnl_opcode); 555 err = -EINVAL; 556 } 557 558 release_bufs: 559 idpf_vc_xn_push_free(adapter->vcxn_mngr, xn); 560 561 return err; 562 } 563 564 /** 565 * idpf_vc_xn_forward_reply - copy a reply back to receiving thread 566 * @adapter: driver specific private structure with vcxn_mngr 567 * @ctlq_msg: controlq message to send back to receiving thread 568 */ 569 static int 570 idpf_vc_xn_forward_reply(struct idpf_adapter *adapter, 571 const struct idpf_ctlq_msg *ctlq_msg) 572 { 573 const void *payload = NULL; 574 size_t payload_size = 0; 575 struct idpf_vc_xn *xn; 576 u16 msg_info; 577 int err = 0; 578 u16 xn_idx; 579 u16 salt; 580 581 msg_info = ctlq_msg->ctx.sw_cookie.data; 582 xn_idx = FIELD_GET(IDPF_VC_XN_IDX_M, msg_info); 583 if (xn_idx >= ARRAY_SIZE(adapter->vcxn_mngr->ring)) { 584 dev_err_ratelimited(&adapter->pdev->dev, "Out of bounds cookie received: %02x\n", 585 xn_idx); 586 return -EINVAL; 587 } 588 xn = &adapter->vcxn_mngr->ring[xn_idx]; 589 idpf_vc_xn_lock(xn); 590 salt = FIELD_GET(IDPF_VC_XN_SALT_M, msg_info); 591 if (xn->salt != salt) { 592 dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (exp:%d@%02x(%d) != got:%d@%02x)\n", 593 xn->vc_op, xn->salt, xn->state, 594 ctlq_msg->cookie.mbx.chnl_opcode, salt); 595 idpf_vc_xn_unlock(xn); 596 return -EINVAL; 597 } 598 599 switch (xn->state) { 600 case IDPF_VC_XN_WAITING: 601 /* success */ 602 break; 603 case IDPF_VC_XN_IDLE: 604 dev_err_ratelimited(&adapter->pdev->dev, "Unexpected or belated VC reply (op %d)\n", 605 ctlq_msg->cookie.mbx.chnl_opcode); 606 err = -EINVAL; 607 goto out_unlock; 608 case IDPF_VC_XN_SHUTDOWN: 609 /* ENXIO is a bit special here as the recv msg loop uses that 610 * know if it should stop trying to clean the ring if we lost 611 * the virtchnl. We need to stop playing with registers and 612 * yield. 613 */ 614 err = -ENXIO; 615 goto out_unlock; 616 case IDPF_VC_XN_ASYNC: 617 err = idpf_vc_xn_forward_async(adapter, xn, ctlq_msg); 618 idpf_vc_xn_unlock(xn); 619 return err; 620 default: 621 dev_err_ratelimited(&adapter->pdev->dev, "Overwriting VC reply (op %d)\n", 622 ctlq_msg->cookie.mbx.chnl_opcode); 623 err = -EBUSY; 624 goto out_unlock; 625 } 626 627 if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) { 628 dev_err_ratelimited(&adapter->pdev->dev, "Message opcode does not match transaction opcode (msg: %d) (xn: %d)\n", 629 ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op); 630 xn->reply_sz = 0; 631 xn->state = IDPF_VC_XN_COMPLETED_FAILED; 632 err = -EINVAL; 633 goto out_unlock; 634 } 635 636 if (ctlq_msg->cookie.mbx.chnl_retval) { 637 xn->reply_sz = 0; 638 xn->state = IDPF_VC_XN_COMPLETED_FAILED; 639 err = -EINVAL; 640 goto out_unlock; 641 } 642 643 if (ctlq_msg->data_len) { 644 payload = ctlq_msg->ctx.indirect.payload->va; 645 payload_size = ctlq_msg->data_len; 646 } 647 648 xn->reply_sz = payload_size; 649 xn->state = IDPF_VC_XN_COMPLETED_SUCCESS; 650 651 if (xn->reply.iov_base && xn->reply.iov_len && payload_size) 652 memcpy(xn->reply.iov_base, payload, 653 min_t(size_t, xn->reply.iov_len, payload_size)); 654 655 out_unlock: 656 idpf_vc_xn_unlock(xn); 657 /* we _cannot_ hold lock while calling complete */ 658 complete(&xn->completed); 659 660 return err; 661 } 662 663 /** 664 * idpf_recv_mb_msg - Receive message over mailbox 665 * @adapter: Driver specific private structure 666 * 667 * Will receive control queue message and posts the receive buffer. Returns 0 668 * on success and negative on failure. 669 */ 670 int idpf_recv_mb_msg(struct idpf_adapter *adapter) 671 { 672 struct idpf_ctlq_msg ctlq_msg; 673 struct idpf_dma_mem *dma_mem; 674 int post_err, err; 675 u16 num_recv; 676 677 while (1) { 678 /* This will get <= num_recv messages and output how many 679 * actually received on num_recv. 680 */ 681 num_recv = 1; 682 err = idpf_ctlq_recv(adapter->hw.arq, &num_recv, &ctlq_msg); 683 if (err || !num_recv) 684 break; 685 686 if (ctlq_msg.data_len) { 687 dma_mem = ctlq_msg.ctx.indirect.payload; 688 } else { 689 dma_mem = NULL; 690 num_recv = 0; 691 } 692 693 if (ctlq_msg.cookie.mbx.chnl_opcode == VIRTCHNL2_OP_EVENT) 694 idpf_recv_event_msg(adapter, &ctlq_msg); 695 else 696 err = idpf_vc_xn_forward_reply(adapter, &ctlq_msg); 697 698 post_err = idpf_ctlq_post_rx_buffs(&adapter->hw, 699 adapter->hw.arq, 700 &num_recv, &dma_mem); 701 702 /* If post failed clear the only buffer we supplied */ 703 if (post_err) { 704 if (dma_mem) 705 dma_free_coherent(&adapter->pdev->dev, 706 dma_mem->size, dma_mem->va, 707 dma_mem->pa); 708 break; 709 } 710 711 /* virtchnl trying to shutdown, stop cleaning */ 712 if (err == -ENXIO) 713 break; 714 } 715 716 return err; 717 } 718 719 struct idpf_chunked_msg_params { 720 u32 (*prepare_msg)(const struct idpf_vport *vport, 721 void *buf, const void *pos, 722 u32 num); 723 724 const void *chunks; 725 u32 num_chunks; 726 727 u32 chunk_sz; 728 u32 config_sz; 729 730 u32 vc_op; 731 }; 732 733 struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_vport *vport, u32 num) 734 { 735 struct idpf_queue_set *qp; 736 737 qp = kzalloc(struct_size(qp, qs, num), GFP_KERNEL); 738 if (!qp) 739 return NULL; 740 741 qp->vport = vport; 742 qp->num = num; 743 744 return qp; 745 } 746 747 /** 748 * idpf_send_chunked_msg - send VC message consisting of chunks 749 * @vport: virtual port data structure 750 * @params: message params 751 * 752 * Helper function for preparing a message describing queues to be enabled 753 * or disabled. 754 * 755 * Return: the total size of the prepared message. 756 */ 757 static int idpf_send_chunked_msg(struct idpf_vport *vport, 758 const struct idpf_chunked_msg_params *params) 759 { 760 struct idpf_vc_xn_params xn_params = { 761 .vc_op = params->vc_op, 762 .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, 763 }; 764 const void *pos = params->chunks; 765 u32 num_chunks, num_msgs, buf_sz; 766 void *buf __free(kfree) = NULL; 767 u32 totqs = params->num_chunks; 768 769 num_chunks = min(IDPF_NUM_CHUNKS_PER_MSG(params->config_sz, 770 params->chunk_sz), totqs); 771 num_msgs = DIV_ROUND_UP(totqs, num_chunks); 772 773 buf_sz = params->config_sz + num_chunks * params->chunk_sz; 774 buf = kzalloc(buf_sz, GFP_KERNEL); 775 if (!buf) 776 return -ENOMEM; 777 778 xn_params.send_buf.iov_base = buf; 779 780 for (u32 i = 0; i < num_msgs; i++) { 781 ssize_t reply_sz; 782 783 memset(buf, 0, buf_sz); 784 xn_params.send_buf.iov_len = buf_sz; 785 786 if (params->prepare_msg(vport, buf, pos, num_chunks) != buf_sz) 787 return -EINVAL; 788 789 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 790 if (reply_sz < 0) 791 return reply_sz; 792 793 pos += num_chunks * params->chunk_sz; 794 totqs -= num_chunks; 795 796 num_chunks = min(num_chunks, totqs); 797 buf_sz = params->config_sz + num_chunks * params->chunk_sz; 798 } 799 800 return 0; 801 } 802 803 /** 804 * idpf_wait_for_marker_event_set - wait for software marker response for 805 * selected Tx queues 806 * @qs: set of the Tx queues 807 * 808 * Return: 0 success, -errno on failure. 809 */ 810 static int idpf_wait_for_marker_event_set(const struct idpf_queue_set *qs) 811 { 812 struct idpf_tx_queue *txq; 813 bool markers_rcvd = true; 814 815 for (u32 i = 0; i < qs->num; i++) { 816 switch (qs->qs[i].type) { 817 case VIRTCHNL2_QUEUE_TYPE_TX: 818 txq = qs->qs[i].txq; 819 820 idpf_queue_set(SW_MARKER, txq); 821 idpf_wait_for_sw_marker_completion(txq); 822 markers_rcvd &= !idpf_queue_has(SW_MARKER, txq); 823 break; 824 default: 825 break; 826 } 827 } 828 829 if (!markers_rcvd) { 830 netdev_warn(qs->vport->netdev, 831 "Failed to receive marker packets\n"); 832 return -ETIMEDOUT; 833 } 834 835 return 0; 836 } 837 838 /** 839 * idpf_wait_for_marker_event - wait for software marker response 840 * @vport: virtual port data structure 841 * 842 * Return: 0 success, negative on failure. 843 **/ 844 static int idpf_wait_for_marker_event(struct idpf_vport *vport) 845 { 846 struct idpf_queue_set *qs __free(kfree) = NULL; 847 848 qs = idpf_alloc_queue_set(vport, vport->num_txq); 849 if (!qs) 850 return -ENOMEM; 851 852 for (u32 i = 0; i < qs->num; i++) { 853 qs->qs[i].type = VIRTCHNL2_QUEUE_TYPE_TX; 854 qs->qs[i].txq = vport->txqs[i]; 855 } 856 857 return idpf_wait_for_marker_event_set(qs); 858 } 859 860 /** 861 * idpf_send_ver_msg - send virtchnl version message 862 * @adapter: Driver specific private structure 863 * 864 * Send virtchnl version message. Returns 0 on success, negative on failure. 865 */ 866 static int idpf_send_ver_msg(struct idpf_adapter *adapter) 867 { 868 struct idpf_vc_xn_params xn_params = {}; 869 struct virtchnl2_version_info vvi; 870 ssize_t reply_sz; 871 u32 major, minor; 872 int err = 0; 873 874 if (adapter->virt_ver_maj) { 875 vvi.major = cpu_to_le32(adapter->virt_ver_maj); 876 vvi.minor = cpu_to_le32(adapter->virt_ver_min); 877 } else { 878 vvi.major = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MAJOR); 879 vvi.minor = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MINOR); 880 } 881 882 xn_params.vc_op = VIRTCHNL2_OP_VERSION; 883 xn_params.send_buf.iov_base = &vvi; 884 xn_params.send_buf.iov_len = sizeof(vvi); 885 xn_params.recv_buf = xn_params.send_buf; 886 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 887 888 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 889 if (reply_sz < 0) 890 return reply_sz; 891 if (reply_sz < sizeof(vvi)) 892 return -EIO; 893 894 major = le32_to_cpu(vvi.major); 895 minor = le32_to_cpu(vvi.minor); 896 897 if (major > IDPF_VIRTCHNL_VERSION_MAJOR) { 898 dev_warn(&adapter->pdev->dev, "Virtchnl major version greater than supported\n"); 899 return -EINVAL; 900 } 901 902 if (major == IDPF_VIRTCHNL_VERSION_MAJOR && 903 minor > IDPF_VIRTCHNL_VERSION_MINOR) 904 dev_warn(&adapter->pdev->dev, "Virtchnl minor version didn't match\n"); 905 906 /* If we have a mismatch, resend version to update receiver on what 907 * version we will use. 908 */ 909 if (!adapter->virt_ver_maj && 910 major != IDPF_VIRTCHNL_VERSION_MAJOR && 911 minor != IDPF_VIRTCHNL_VERSION_MINOR) 912 err = -EAGAIN; 913 914 adapter->virt_ver_maj = major; 915 adapter->virt_ver_min = minor; 916 917 return err; 918 } 919 920 /** 921 * idpf_send_get_caps_msg - Send virtchnl get capabilities message 922 * @adapter: Driver specific private structure 923 * 924 * Send virtchl get capabilities message. Returns 0 on success, negative on 925 * failure. 926 */ 927 static int idpf_send_get_caps_msg(struct idpf_adapter *adapter) 928 { 929 struct virtchnl2_get_capabilities caps = {}; 930 struct idpf_vc_xn_params xn_params = {}; 931 ssize_t reply_sz; 932 933 caps.csum_caps = 934 cpu_to_le32(VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 | 935 VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP | 936 VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP | 937 VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP | 938 VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP | 939 VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP | 940 VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP | 941 VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 | 942 VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP | 943 VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP | 944 VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP | 945 VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP | 946 VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP | 947 VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP | 948 VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL | 949 VIRTCHNL2_CAP_RX_CSUM_L3_SINGLE_TUNNEL | 950 VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL | 951 VIRTCHNL2_CAP_RX_CSUM_L4_SINGLE_TUNNEL | 952 VIRTCHNL2_CAP_RX_CSUM_GENERIC); 953 954 caps.seg_caps = 955 cpu_to_le32(VIRTCHNL2_CAP_SEG_IPV4_TCP | 956 VIRTCHNL2_CAP_SEG_IPV4_UDP | 957 VIRTCHNL2_CAP_SEG_IPV4_SCTP | 958 VIRTCHNL2_CAP_SEG_IPV6_TCP | 959 VIRTCHNL2_CAP_SEG_IPV6_UDP | 960 VIRTCHNL2_CAP_SEG_IPV6_SCTP | 961 VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL); 962 963 caps.rss_caps = 964 cpu_to_le64(VIRTCHNL2_FLOW_IPV4_TCP | 965 VIRTCHNL2_FLOW_IPV4_UDP | 966 VIRTCHNL2_FLOW_IPV4_SCTP | 967 VIRTCHNL2_FLOW_IPV4_OTHER | 968 VIRTCHNL2_FLOW_IPV6_TCP | 969 VIRTCHNL2_FLOW_IPV6_UDP | 970 VIRTCHNL2_FLOW_IPV6_SCTP | 971 VIRTCHNL2_FLOW_IPV6_OTHER); 972 973 caps.hsplit_caps = 974 cpu_to_le32(VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 | 975 VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6); 976 977 caps.rsc_caps = 978 cpu_to_le32(VIRTCHNL2_CAP_RSC_IPV4_TCP | 979 VIRTCHNL2_CAP_RSC_IPV6_TCP); 980 981 caps.other_caps = 982 cpu_to_le64(VIRTCHNL2_CAP_SRIOV | 983 VIRTCHNL2_CAP_RDMA | 984 VIRTCHNL2_CAP_LAN_MEMORY_REGIONS | 985 VIRTCHNL2_CAP_MACFILTER | 986 VIRTCHNL2_CAP_SPLITQ_QSCHED | 987 VIRTCHNL2_CAP_PROMISC | 988 VIRTCHNL2_CAP_LOOPBACK | 989 VIRTCHNL2_CAP_PTP); 990 991 xn_params.vc_op = VIRTCHNL2_OP_GET_CAPS; 992 xn_params.send_buf.iov_base = ∩︀ 993 xn_params.send_buf.iov_len = sizeof(caps); 994 xn_params.recv_buf.iov_base = &adapter->caps; 995 xn_params.recv_buf.iov_len = sizeof(adapter->caps); 996 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 997 998 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 999 if (reply_sz < 0) 1000 return reply_sz; 1001 if (reply_sz < sizeof(adapter->caps)) 1002 return -EIO; 1003 1004 return 0; 1005 } 1006 1007 /** 1008 * idpf_send_get_lan_memory_regions - Send virtchnl get LAN memory regions msg 1009 * @adapter: Driver specific private struct 1010 * 1011 * Return: 0 on success or error code on failure. 1012 */ 1013 static int idpf_send_get_lan_memory_regions(struct idpf_adapter *adapter) 1014 { 1015 struct virtchnl2_get_lan_memory_regions *rcvd_regions __free(kfree); 1016 struct idpf_vc_xn_params xn_params = { 1017 .vc_op = VIRTCHNL2_OP_GET_LAN_MEMORY_REGIONS, 1018 .recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN, 1019 .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, 1020 }; 1021 int num_regions, size; 1022 struct idpf_hw *hw; 1023 ssize_t reply_sz; 1024 int err = 0; 1025 1026 rcvd_regions = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); 1027 if (!rcvd_regions) 1028 return -ENOMEM; 1029 1030 xn_params.recv_buf.iov_base = rcvd_regions; 1031 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 1032 if (reply_sz < 0) 1033 return reply_sz; 1034 1035 num_regions = le16_to_cpu(rcvd_regions->num_memory_regions); 1036 size = struct_size(rcvd_regions, mem_reg, num_regions); 1037 if (reply_sz < size) 1038 return -EIO; 1039 1040 if (size > IDPF_CTLQ_MAX_BUF_LEN) 1041 return -EINVAL; 1042 1043 hw = &adapter->hw; 1044 hw->lan_regs = kcalloc(num_regions, sizeof(*hw->lan_regs), GFP_KERNEL); 1045 if (!hw->lan_regs) 1046 return -ENOMEM; 1047 1048 for (int i = 0; i < num_regions; i++) { 1049 hw->lan_regs[i].addr_len = 1050 le64_to_cpu(rcvd_regions->mem_reg[i].size); 1051 hw->lan_regs[i].addr_start = 1052 le64_to_cpu(rcvd_regions->mem_reg[i].start_offset); 1053 } 1054 hw->num_lan_regs = num_regions; 1055 1056 return err; 1057 } 1058 1059 /** 1060 * idpf_calc_remaining_mmio_regs - calculate MMIO regions outside mbx and rstat 1061 * @adapter: Driver specific private structure 1062 * 1063 * Called when idpf_send_get_lan_memory_regions is not supported. This will 1064 * calculate the offsets and sizes for the regions before, in between, and 1065 * after the mailbox and rstat MMIO mappings. 1066 * 1067 * Return: 0 on success or error code on failure. 1068 */ 1069 static int idpf_calc_remaining_mmio_regs(struct idpf_adapter *adapter) 1070 { 1071 struct resource *rstat_reg = &adapter->dev_ops.static_reg_info[1]; 1072 struct resource *mbx_reg = &adapter->dev_ops.static_reg_info[0]; 1073 struct idpf_hw *hw = &adapter->hw; 1074 1075 hw->num_lan_regs = IDPF_MMIO_MAP_FALLBACK_MAX_REMAINING; 1076 hw->lan_regs = kcalloc(hw->num_lan_regs, sizeof(*hw->lan_regs), 1077 GFP_KERNEL); 1078 if (!hw->lan_regs) 1079 return -ENOMEM; 1080 1081 /* Region preceding mailbox */ 1082 hw->lan_regs[0].addr_start = 0; 1083 hw->lan_regs[0].addr_len = mbx_reg->start; 1084 /* Region between mailbox and rstat */ 1085 hw->lan_regs[1].addr_start = mbx_reg->end + 1; 1086 hw->lan_regs[1].addr_len = rstat_reg->start - 1087 hw->lan_regs[1].addr_start; 1088 /* Region after rstat */ 1089 hw->lan_regs[2].addr_start = rstat_reg->end + 1; 1090 hw->lan_regs[2].addr_len = pci_resource_len(adapter->pdev, 0) - 1091 hw->lan_regs[2].addr_start; 1092 1093 return 0; 1094 } 1095 1096 /** 1097 * idpf_map_lan_mmio_regs - map remaining LAN BAR regions 1098 * @adapter: Driver specific private structure 1099 * 1100 * Return: 0 on success or error code on failure. 1101 */ 1102 static int idpf_map_lan_mmio_regs(struct idpf_adapter *adapter) 1103 { 1104 struct pci_dev *pdev = adapter->pdev; 1105 struct idpf_hw *hw = &adapter->hw; 1106 resource_size_t res_start; 1107 1108 res_start = pci_resource_start(pdev, 0); 1109 1110 for (int i = 0; i < hw->num_lan_regs; i++) { 1111 resource_size_t start; 1112 long len; 1113 1114 len = hw->lan_regs[i].addr_len; 1115 if (!len) 1116 continue; 1117 start = hw->lan_regs[i].addr_start + res_start; 1118 1119 hw->lan_regs[i].vaddr = devm_ioremap(&pdev->dev, start, len); 1120 if (!hw->lan_regs[i].vaddr) { 1121 pci_err(pdev, "failed to allocate BAR0 region\n"); 1122 return -ENOMEM; 1123 } 1124 } 1125 1126 return 0; 1127 } 1128 1129 /** 1130 * idpf_add_del_fsteer_filters - Send virtchnl add/del Flow Steering message 1131 * @adapter: adapter info struct 1132 * @rule: Flow steering rule to add/delete 1133 * @opcode: VIRTCHNL2_OP_ADD_FLOW_RULE to add filter, or 1134 * VIRTCHNL2_OP_DEL_FLOW_RULE to delete. All other values are invalid. 1135 * 1136 * Send ADD/DELETE flow steering virtchnl message and receive the result. 1137 * 1138 * Return: 0 on success, negative on failure. 1139 */ 1140 int idpf_add_del_fsteer_filters(struct idpf_adapter *adapter, 1141 struct virtchnl2_flow_rule_add_del *rule, 1142 enum virtchnl2_op opcode) 1143 { 1144 int rule_count = le32_to_cpu(rule->count); 1145 struct idpf_vc_xn_params xn_params = {}; 1146 ssize_t reply_sz; 1147 1148 if (opcode != VIRTCHNL2_OP_ADD_FLOW_RULE && 1149 opcode != VIRTCHNL2_OP_DEL_FLOW_RULE) 1150 return -EINVAL; 1151 1152 xn_params.vc_op = opcode; 1153 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 1154 xn_params.async = false; 1155 xn_params.send_buf.iov_base = rule; 1156 xn_params.send_buf.iov_len = struct_size(rule, rule_info, rule_count); 1157 xn_params.recv_buf.iov_base = rule; 1158 xn_params.recv_buf.iov_len = struct_size(rule, rule_info, rule_count); 1159 1160 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 1161 return reply_sz < 0 ? reply_sz : 0; 1162 } 1163 1164 /** 1165 * idpf_vport_alloc_max_qs - Allocate max queues for a vport 1166 * @adapter: Driver specific private structure 1167 * @max_q: vport max queue structure 1168 */ 1169 int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter, 1170 struct idpf_vport_max_q *max_q) 1171 { 1172 struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues; 1173 struct virtchnl2_get_capabilities *caps = &adapter->caps; 1174 u16 default_vports = idpf_get_default_vports(adapter); 1175 u32 max_rx_q, max_tx_q, max_buf_q, max_compl_q; 1176 1177 mutex_lock(&adapter->queue_lock); 1178 1179 /* Caps are device-wide. Give each vport an equal piece */ 1180 max_rx_q = le16_to_cpu(caps->max_rx_q) / default_vports; 1181 max_tx_q = le16_to_cpu(caps->max_tx_q) / default_vports; 1182 max_buf_q = le16_to_cpu(caps->max_rx_bufq) / default_vports; 1183 max_compl_q = le16_to_cpu(caps->max_tx_complq) / default_vports; 1184 1185 if (adapter->num_alloc_vports >= default_vports) { 1186 max_rx_q = IDPF_MIN_Q; 1187 max_tx_q = IDPF_MIN_Q; 1188 } 1189 1190 /* 1191 * Harmonize the numbers. The current implementation always creates 1192 * `IDPF_MAX_BUFQS_PER_RXQ_GRP` buffer queues for each Rx queue and 1193 * one completion queue for each Tx queue for best performance. 1194 * If less buffer or completion queues is available, cap the number 1195 * of the corresponding Rx/Tx queues. 1196 */ 1197 max_rx_q = min(max_rx_q, max_buf_q / IDPF_MAX_BUFQS_PER_RXQ_GRP); 1198 max_tx_q = min(max_tx_q, max_compl_q); 1199 1200 max_q->max_rxq = max_rx_q; 1201 max_q->max_txq = max_tx_q; 1202 max_q->max_bufq = max_rx_q * IDPF_MAX_BUFQS_PER_RXQ_GRP; 1203 max_q->max_complq = max_tx_q; 1204 1205 if (avail_queues->avail_rxq < max_q->max_rxq || 1206 avail_queues->avail_txq < max_q->max_txq || 1207 avail_queues->avail_bufq < max_q->max_bufq || 1208 avail_queues->avail_complq < max_q->max_complq) { 1209 mutex_unlock(&adapter->queue_lock); 1210 1211 return -EINVAL; 1212 } 1213 1214 avail_queues->avail_rxq -= max_q->max_rxq; 1215 avail_queues->avail_txq -= max_q->max_txq; 1216 avail_queues->avail_bufq -= max_q->max_bufq; 1217 avail_queues->avail_complq -= max_q->max_complq; 1218 1219 mutex_unlock(&adapter->queue_lock); 1220 1221 return 0; 1222 } 1223 1224 /** 1225 * idpf_vport_dealloc_max_qs - Deallocate max queues of a vport 1226 * @adapter: Driver specific private structure 1227 * @max_q: vport max queue structure 1228 */ 1229 void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter, 1230 struct idpf_vport_max_q *max_q) 1231 { 1232 struct idpf_avail_queue_info *avail_queues; 1233 1234 mutex_lock(&adapter->queue_lock); 1235 avail_queues = &adapter->avail_queues; 1236 1237 avail_queues->avail_rxq += max_q->max_rxq; 1238 avail_queues->avail_txq += max_q->max_txq; 1239 avail_queues->avail_bufq += max_q->max_bufq; 1240 avail_queues->avail_complq += max_q->max_complq; 1241 1242 mutex_unlock(&adapter->queue_lock); 1243 } 1244 1245 /** 1246 * idpf_init_avail_queues - Initialize available queues on the device 1247 * @adapter: Driver specific private structure 1248 */ 1249 static void idpf_init_avail_queues(struct idpf_adapter *adapter) 1250 { 1251 struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues; 1252 struct virtchnl2_get_capabilities *caps = &adapter->caps; 1253 1254 avail_queues->avail_rxq = le16_to_cpu(caps->max_rx_q); 1255 avail_queues->avail_txq = le16_to_cpu(caps->max_tx_q); 1256 avail_queues->avail_bufq = le16_to_cpu(caps->max_rx_bufq); 1257 avail_queues->avail_complq = le16_to_cpu(caps->max_tx_complq); 1258 } 1259 1260 /** 1261 * idpf_get_reg_intr_vecs - Get vector queue register offset 1262 * @vport: virtual port structure 1263 * @reg_vals: Register offsets to store in 1264 * 1265 * Returns number of registers that got populated 1266 */ 1267 int idpf_get_reg_intr_vecs(struct idpf_vport *vport, 1268 struct idpf_vec_regs *reg_vals) 1269 { 1270 struct virtchnl2_vector_chunks *chunks; 1271 struct idpf_vec_regs reg_val; 1272 u16 num_vchunks, num_vec; 1273 int num_regs = 0, i, j; 1274 1275 chunks = &vport->adapter->req_vec_chunks->vchunks; 1276 num_vchunks = le16_to_cpu(chunks->num_vchunks); 1277 1278 for (j = 0; j < num_vchunks; j++) { 1279 struct virtchnl2_vector_chunk *chunk; 1280 u32 dynctl_reg_spacing; 1281 u32 itrn_reg_spacing; 1282 1283 chunk = &chunks->vchunks[j]; 1284 num_vec = le16_to_cpu(chunk->num_vectors); 1285 reg_val.dyn_ctl_reg = le32_to_cpu(chunk->dynctl_reg_start); 1286 reg_val.itrn_reg = le32_to_cpu(chunk->itrn_reg_start); 1287 reg_val.itrn_index_spacing = le32_to_cpu(chunk->itrn_index_spacing); 1288 1289 dynctl_reg_spacing = le32_to_cpu(chunk->dynctl_reg_spacing); 1290 itrn_reg_spacing = le32_to_cpu(chunk->itrn_reg_spacing); 1291 1292 for (i = 0; i < num_vec; i++) { 1293 reg_vals[num_regs].dyn_ctl_reg = reg_val.dyn_ctl_reg; 1294 reg_vals[num_regs].itrn_reg = reg_val.itrn_reg; 1295 reg_vals[num_regs].itrn_index_spacing = 1296 reg_val.itrn_index_spacing; 1297 1298 reg_val.dyn_ctl_reg += dynctl_reg_spacing; 1299 reg_val.itrn_reg += itrn_reg_spacing; 1300 num_regs++; 1301 } 1302 } 1303 1304 return num_regs; 1305 } 1306 1307 /** 1308 * idpf_vport_get_q_reg - Get the queue registers for the vport 1309 * @reg_vals: register values needing to be set 1310 * @num_regs: amount we expect to fill 1311 * @q_type: queue model 1312 * @chunks: queue regs received over mailbox 1313 * 1314 * This function parses the queue register offsets from the queue register 1315 * chunk information, with a specific queue type and stores it into the array 1316 * passed as an argument. It returns the actual number of queue registers that 1317 * are filled. 1318 */ 1319 static int idpf_vport_get_q_reg(u32 *reg_vals, int num_regs, u32 q_type, 1320 struct virtchnl2_queue_reg_chunks *chunks) 1321 { 1322 u16 num_chunks = le16_to_cpu(chunks->num_chunks); 1323 int reg_filled = 0, i; 1324 u32 reg_val; 1325 1326 while (num_chunks--) { 1327 struct virtchnl2_queue_reg_chunk *chunk; 1328 u16 num_q; 1329 1330 chunk = &chunks->chunks[num_chunks]; 1331 if (le32_to_cpu(chunk->type) != q_type) 1332 continue; 1333 1334 num_q = le32_to_cpu(chunk->num_queues); 1335 reg_val = le64_to_cpu(chunk->qtail_reg_start); 1336 for (i = 0; i < num_q && reg_filled < num_regs ; i++) { 1337 reg_vals[reg_filled++] = reg_val; 1338 reg_val += le32_to_cpu(chunk->qtail_reg_spacing); 1339 } 1340 } 1341 1342 return reg_filled; 1343 } 1344 1345 /** 1346 * __idpf_queue_reg_init - initialize queue registers 1347 * @vport: virtual port structure 1348 * @reg_vals: registers we are initializing 1349 * @num_regs: how many registers there are in total 1350 * @q_type: queue model 1351 * 1352 * Return number of queues that are initialized 1353 */ 1354 static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals, 1355 int num_regs, u32 q_type) 1356 { 1357 struct idpf_adapter *adapter = vport->adapter; 1358 int i, j, k = 0; 1359 1360 switch (q_type) { 1361 case VIRTCHNL2_QUEUE_TYPE_TX: 1362 for (i = 0; i < vport->num_txq_grp; i++) { 1363 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; 1364 1365 for (j = 0; j < tx_qgrp->num_txq && k < num_regs; j++, k++) 1366 tx_qgrp->txqs[j]->tail = 1367 idpf_get_reg_addr(adapter, reg_vals[k]); 1368 } 1369 break; 1370 case VIRTCHNL2_QUEUE_TYPE_RX: 1371 for (i = 0; i < vport->num_rxq_grp; i++) { 1372 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 1373 u16 num_rxq = rx_qgrp->singleq.num_rxq; 1374 1375 for (j = 0; j < num_rxq && k < num_regs; j++, k++) { 1376 struct idpf_rx_queue *q; 1377 1378 q = rx_qgrp->singleq.rxqs[j]; 1379 q->tail = idpf_get_reg_addr(adapter, 1380 reg_vals[k]); 1381 } 1382 } 1383 break; 1384 case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER: 1385 for (i = 0; i < vport->num_rxq_grp; i++) { 1386 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 1387 u8 num_bufqs = vport->num_bufqs_per_qgrp; 1388 1389 for (j = 0; j < num_bufqs && k < num_regs; j++, k++) { 1390 struct idpf_buf_queue *q; 1391 1392 q = &rx_qgrp->splitq.bufq_sets[j].bufq; 1393 q->tail = idpf_get_reg_addr(adapter, 1394 reg_vals[k]); 1395 } 1396 } 1397 break; 1398 default: 1399 break; 1400 } 1401 1402 return k; 1403 } 1404 1405 /** 1406 * idpf_queue_reg_init - initialize queue registers 1407 * @vport: virtual port structure 1408 * 1409 * Return 0 on success, negative on failure 1410 */ 1411 int idpf_queue_reg_init(struct idpf_vport *vport) 1412 { 1413 struct virtchnl2_create_vport *vport_params; 1414 struct virtchnl2_queue_reg_chunks *chunks; 1415 struct idpf_vport_config *vport_config; 1416 u16 vport_idx = vport->idx; 1417 int num_regs, ret = 0; 1418 u32 *reg_vals; 1419 1420 /* We may never deal with more than 256 same type of queues */ 1421 reg_vals = kzalloc(sizeof(void *) * IDPF_LARGE_MAX_Q, GFP_KERNEL); 1422 if (!reg_vals) 1423 return -ENOMEM; 1424 1425 vport_config = vport->adapter->vport_config[vport_idx]; 1426 if (vport_config->req_qs_chunks) { 1427 struct virtchnl2_add_queues *vc_aq = 1428 (struct virtchnl2_add_queues *)vport_config->req_qs_chunks; 1429 chunks = &vc_aq->chunks; 1430 } else { 1431 vport_params = vport->adapter->vport_params_recvd[vport_idx]; 1432 chunks = &vport_params->chunks; 1433 } 1434 1435 /* Initialize Tx queue tail register address */ 1436 num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q, 1437 VIRTCHNL2_QUEUE_TYPE_TX, 1438 chunks); 1439 if (num_regs < vport->num_txq) { 1440 ret = -EINVAL; 1441 goto free_reg_vals; 1442 } 1443 1444 num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs, 1445 VIRTCHNL2_QUEUE_TYPE_TX); 1446 if (num_regs < vport->num_txq) { 1447 ret = -EINVAL; 1448 goto free_reg_vals; 1449 } 1450 1451 /* Initialize Rx/buffer queue tail register address based on Rx queue 1452 * model 1453 */ 1454 if (idpf_is_queue_model_split(vport->rxq_model)) { 1455 num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q, 1456 VIRTCHNL2_QUEUE_TYPE_RX_BUFFER, 1457 chunks); 1458 if (num_regs < vport->num_bufq) { 1459 ret = -EINVAL; 1460 goto free_reg_vals; 1461 } 1462 1463 num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs, 1464 VIRTCHNL2_QUEUE_TYPE_RX_BUFFER); 1465 if (num_regs < vport->num_bufq) { 1466 ret = -EINVAL; 1467 goto free_reg_vals; 1468 } 1469 } else { 1470 num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q, 1471 VIRTCHNL2_QUEUE_TYPE_RX, 1472 chunks); 1473 if (num_regs < vport->num_rxq) { 1474 ret = -EINVAL; 1475 goto free_reg_vals; 1476 } 1477 1478 num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs, 1479 VIRTCHNL2_QUEUE_TYPE_RX); 1480 if (num_regs < vport->num_rxq) { 1481 ret = -EINVAL; 1482 goto free_reg_vals; 1483 } 1484 } 1485 1486 free_reg_vals: 1487 kfree(reg_vals); 1488 1489 return ret; 1490 } 1491 1492 /** 1493 * idpf_send_create_vport_msg - Send virtchnl create vport message 1494 * @adapter: Driver specific private structure 1495 * @max_q: vport max queue info 1496 * 1497 * send virtchnl creae vport message 1498 * 1499 * Returns 0 on success, negative on failure 1500 */ 1501 int idpf_send_create_vport_msg(struct idpf_adapter *adapter, 1502 struct idpf_vport_max_q *max_q) 1503 { 1504 struct virtchnl2_create_vport *vport_msg; 1505 struct idpf_vc_xn_params xn_params = {}; 1506 u16 idx = adapter->next_vport; 1507 int err, buf_size; 1508 ssize_t reply_sz; 1509 1510 buf_size = sizeof(struct virtchnl2_create_vport); 1511 if (!adapter->vport_params_reqd[idx]) { 1512 adapter->vport_params_reqd[idx] = kzalloc(buf_size, 1513 GFP_KERNEL); 1514 if (!adapter->vport_params_reqd[idx]) 1515 return -ENOMEM; 1516 } 1517 1518 vport_msg = adapter->vport_params_reqd[idx]; 1519 vport_msg->vport_type = cpu_to_le16(VIRTCHNL2_VPORT_TYPE_DEFAULT); 1520 vport_msg->vport_index = cpu_to_le16(idx); 1521 1522 if (adapter->req_tx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ)) 1523 vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT); 1524 else 1525 vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE); 1526 1527 if (adapter->req_rx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ)) 1528 vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT); 1529 else 1530 vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE); 1531 1532 err = idpf_vport_calc_total_qs(adapter, idx, vport_msg, max_q); 1533 if (err) { 1534 dev_err(&adapter->pdev->dev, "Enough queues are not available"); 1535 1536 return err; 1537 } 1538 1539 if (!adapter->vport_params_recvd[idx]) { 1540 adapter->vport_params_recvd[idx] = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, 1541 GFP_KERNEL); 1542 if (!adapter->vport_params_recvd[idx]) { 1543 err = -ENOMEM; 1544 goto free_vport_params; 1545 } 1546 } 1547 1548 xn_params.vc_op = VIRTCHNL2_OP_CREATE_VPORT; 1549 xn_params.send_buf.iov_base = vport_msg; 1550 xn_params.send_buf.iov_len = buf_size; 1551 xn_params.recv_buf.iov_base = adapter->vport_params_recvd[idx]; 1552 xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; 1553 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 1554 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 1555 if (reply_sz < 0) { 1556 err = reply_sz; 1557 goto free_vport_params; 1558 } 1559 1560 return 0; 1561 1562 free_vport_params: 1563 kfree(adapter->vport_params_recvd[idx]); 1564 adapter->vport_params_recvd[idx] = NULL; 1565 kfree(adapter->vport_params_reqd[idx]); 1566 adapter->vport_params_reqd[idx] = NULL; 1567 1568 return err; 1569 } 1570 1571 /** 1572 * idpf_check_supported_desc_ids - Verify we have required descriptor support 1573 * @vport: virtual port structure 1574 * 1575 * Return 0 on success, error on failure 1576 */ 1577 int idpf_check_supported_desc_ids(struct idpf_vport *vport) 1578 { 1579 struct idpf_adapter *adapter = vport->adapter; 1580 struct virtchnl2_create_vport *vport_msg; 1581 u64 rx_desc_ids, tx_desc_ids; 1582 1583 vport_msg = adapter->vport_params_recvd[vport->idx]; 1584 1585 if (!IS_ENABLED(CONFIG_IDPF_SINGLEQ) && 1586 (vport_msg->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE || 1587 vport_msg->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)) { 1588 pci_err(adapter->pdev, "singleq mode requested, but not compiled-in\n"); 1589 return -EOPNOTSUPP; 1590 } 1591 1592 rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids); 1593 tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids); 1594 1595 if (idpf_is_queue_model_split(vport->rxq_model)) { 1596 if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) { 1597 dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n"); 1598 vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); 1599 } 1600 } else { 1601 if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M)) 1602 vport->base_rxd = true; 1603 } 1604 1605 if (!idpf_is_queue_model_split(vport->txq_model)) 1606 return 0; 1607 1608 if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) { 1609 dev_info(&adapter->pdev->dev, "Minimum TX descriptor support not provided, using the default\n"); 1610 vport_msg->tx_desc_ids = cpu_to_le64(MIN_SUPPORT_TXDID); 1611 } 1612 1613 return 0; 1614 } 1615 1616 /** 1617 * idpf_send_destroy_vport_msg - Send virtchnl destroy vport message 1618 * @vport: virtual port data structure 1619 * 1620 * Send virtchnl destroy vport message. Returns 0 on success, negative on 1621 * failure. 1622 */ 1623 int idpf_send_destroy_vport_msg(struct idpf_vport *vport) 1624 { 1625 struct idpf_vc_xn_params xn_params = {}; 1626 struct virtchnl2_vport v_id; 1627 ssize_t reply_sz; 1628 1629 v_id.vport_id = cpu_to_le32(vport->vport_id); 1630 1631 xn_params.vc_op = VIRTCHNL2_OP_DESTROY_VPORT; 1632 xn_params.send_buf.iov_base = &v_id; 1633 xn_params.send_buf.iov_len = sizeof(v_id); 1634 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 1635 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 1636 1637 return reply_sz < 0 ? reply_sz : 0; 1638 } 1639 1640 /** 1641 * idpf_send_enable_vport_msg - Send virtchnl enable vport message 1642 * @vport: virtual port data structure 1643 * 1644 * Send enable vport virtchnl message. Returns 0 on success, negative on 1645 * failure. 1646 */ 1647 int idpf_send_enable_vport_msg(struct idpf_vport *vport) 1648 { 1649 struct idpf_vc_xn_params xn_params = {}; 1650 struct virtchnl2_vport v_id; 1651 ssize_t reply_sz; 1652 1653 v_id.vport_id = cpu_to_le32(vport->vport_id); 1654 1655 xn_params.vc_op = VIRTCHNL2_OP_ENABLE_VPORT; 1656 xn_params.send_buf.iov_base = &v_id; 1657 xn_params.send_buf.iov_len = sizeof(v_id); 1658 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 1659 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 1660 1661 return reply_sz < 0 ? reply_sz : 0; 1662 } 1663 1664 /** 1665 * idpf_send_disable_vport_msg - Send virtchnl disable vport message 1666 * @vport: virtual port data structure 1667 * 1668 * Send disable vport virtchnl message. Returns 0 on success, negative on 1669 * failure. 1670 */ 1671 int idpf_send_disable_vport_msg(struct idpf_vport *vport) 1672 { 1673 struct idpf_vc_xn_params xn_params = {}; 1674 struct virtchnl2_vport v_id; 1675 ssize_t reply_sz; 1676 1677 v_id.vport_id = cpu_to_le32(vport->vport_id); 1678 1679 xn_params.vc_op = VIRTCHNL2_OP_DISABLE_VPORT; 1680 xn_params.send_buf.iov_base = &v_id; 1681 xn_params.send_buf.iov_len = sizeof(v_id); 1682 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 1683 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 1684 1685 return reply_sz < 0 ? reply_sz : 0; 1686 } 1687 1688 /** 1689 * idpf_fill_txq_config_chunk - fill chunk describing the Tx queue 1690 * @vport: virtual port data structure 1691 * @q: Tx queue to be inserted into VC chunk 1692 * @qi: pointer to the buffer containing the VC chunk 1693 */ 1694 static void idpf_fill_txq_config_chunk(const struct idpf_vport *vport, 1695 const struct idpf_tx_queue *q, 1696 struct virtchnl2_txq_info *qi) 1697 { 1698 u32 val; 1699 1700 qi->queue_id = cpu_to_le32(q->q_id); 1701 qi->model = cpu_to_le16(vport->txq_model); 1702 qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX); 1703 qi->ring_len = cpu_to_le16(q->desc_count); 1704 qi->dma_ring_addr = cpu_to_le64(q->dma); 1705 qi->relative_queue_id = cpu_to_le16(q->rel_q_id); 1706 1707 if (!idpf_is_queue_model_split(vport->txq_model)) { 1708 qi->sched_mode = cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE); 1709 return; 1710 } 1711 1712 if (idpf_queue_has(XDP, q)) 1713 val = q->complq->q_id; 1714 else 1715 val = q->txq_grp->complq->q_id; 1716 1717 qi->tx_compl_queue_id = cpu_to_le16(val); 1718 1719 if (idpf_queue_has(FLOW_SCH_EN, q)) 1720 val = VIRTCHNL2_TXQ_SCHED_MODE_FLOW; 1721 else 1722 val = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE; 1723 1724 qi->sched_mode = cpu_to_le16(val); 1725 } 1726 1727 /** 1728 * idpf_fill_complq_config_chunk - fill chunk describing the completion queue 1729 * @vport: virtual port data structure 1730 * @q: completion queue to be inserted into VC chunk 1731 * @qi: pointer to the buffer containing the VC chunk 1732 */ 1733 static void idpf_fill_complq_config_chunk(const struct idpf_vport *vport, 1734 const struct idpf_compl_queue *q, 1735 struct virtchnl2_txq_info *qi) 1736 { 1737 u32 val; 1738 1739 qi->queue_id = cpu_to_le32(q->q_id); 1740 qi->model = cpu_to_le16(vport->txq_model); 1741 qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION); 1742 qi->ring_len = cpu_to_le16(q->desc_count); 1743 qi->dma_ring_addr = cpu_to_le64(q->dma); 1744 1745 if (idpf_queue_has(FLOW_SCH_EN, q)) 1746 val = VIRTCHNL2_TXQ_SCHED_MODE_FLOW; 1747 else 1748 val = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE; 1749 1750 qi->sched_mode = cpu_to_le16(val); 1751 } 1752 1753 /** 1754 * idpf_prepare_cfg_txqs_msg - prepare message to configure selected Tx queues 1755 * @vport: virtual port data structure 1756 * @buf: buffer containing the message 1757 * @pos: pointer to the first chunk describing the tx queue 1758 * @num_chunks: number of chunks in the message 1759 * 1760 * Helper function for preparing the message describing configuration of 1761 * Tx queues. 1762 * 1763 * Return: the total size of the prepared message. 1764 */ 1765 static u32 idpf_prepare_cfg_txqs_msg(const struct idpf_vport *vport, 1766 void *buf, const void *pos, 1767 u32 num_chunks) 1768 { 1769 struct virtchnl2_config_tx_queues *ctq = buf; 1770 1771 ctq->vport_id = cpu_to_le32(vport->vport_id); 1772 ctq->num_qinfo = cpu_to_le16(num_chunks); 1773 memcpy(ctq->qinfo, pos, num_chunks * sizeof(*ctq->qinfo)); 1774 1775 return struct_size(ctq, qinfo, num_chunks); 1776 } 1777 1778 /** 1779 * idpf_send_config_tx_queue_set_msg - send virtchnl config Tx queues 1780 * message for selected queues 1781 * @qs: set of the Tx queues to configure 1782 * 1783 * Send config queues virtchnl message for queues contained in the @qs array. 1784 * The @qs array can contain Tx queues (or completion queues) only. 1785 * 1786 * Return: 0 on success, -errno on failure. 1787 */ 1788 static int idpf_send_config_tx_queue_set_msg(const struct idpf_queue_set *qs) 1789 { 1790 struct virtchnl2_txq_info *qi __free(kfree) = NULL; 1791 struct idpf_chunked_msg_params params = { 1792 .vc_op = VIRTCHNL2_OP_CONFIG_TX_QUEUES, 1793 .prepare_msg = idpf_prepare_cfg_txqs_msg, 1794 .config_sz = sizeof(struct virtchnl2_config_tx_queues), 1795 .chunk_sz = sizeof(*qi), 1796 }; 1797 1798 qi = kcalloc(qs->num, sizeof(*qi), GFP_KERNEL); 1799 if (!qi) 1800 return -ENOMEM; 1801 1802 params.chunks = qi; 1803 1804 for (u32 i = 0; i < qs->num; i++) { 1805 if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_TX) 1806 idpf_fill_txq_config_chunk(qs->vport, qs->qs[i].txq, 1807 &qi[params.num_chunks++]); 1808 else if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION) 1809 idpf_fill_complq_config_chunk(qs->vport, 1810 qs->qs[i].complq, 1811 &qi[params.num_chunks++]); 1812 } 1813 1814 return idpf_send_chunked_msg(qs->vport, ¶ms); 1815 } 1816 1817 /** 1818 * idpf_send_config_tx_queues_msg - send virtchnl config Tx queues message 1819 * @vport: virtual port data structure 1820 * 1821 * Return: 0 on success, -errno on failure. 1822 */ 1823 static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) 1824 { 1825 struct idpf_queue_set *qs __free(kfree) = NULL; 1826 u32 totqs = vport->num_txq + vport->num_complq; 1827 u32 k = 0; 1828 1829 qs = idpf_alloc_queue_set(vport, totqs); 1830 if (!qs) 1831 return -ENOMEM; 1832 1833 /* Populate the queue info buffer with all queue context info */ 1834 for (u32 i = 0; i < vport->num_txq_grp; i++) { 1835 const struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; 1836 1837 for (u32 j = 0; j < tx_qgrp->num_txq; j++) { 1838 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX; 1839 qs->qs[k++].txq = tx_qgrp->txqs[j]; 1840 } 1841 1842 if (idpf_is_queue_model_split(vport->txq_model)) { 1843 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; 1844 qs->qs[k++].complq = tx_qgrp->complq; 1845 } 1846 } 1847 1848 /* Make sure accounting agrees */ 1849 if (k != totqs) 1850 return -EINVAL; 1851 1852 return idpf_send_config_tx_queue_set_msg(qs); 1853 } 1854 1855 /** 1856 * idpf_fill_rxq_config_chunk - fill chunk describing the Rx queue 1857 * @vport: virtual port data structure 1858 * @q: Rx queue to be inserted into VC chunk 1859 * @qi: pointer to the buffer containing the VC chunk 1860 */ 1861 static void idpf_fill_rxq_config_chunk(const struct idpf_vport *vport, 1862 struct idpf_rx_queue *q, 1863 struct virtchnl2_rxq_info *qi) 1864 { 1865 const struct idpf_bufq_set *sets; 1866 1867 qi->queue_id = cpu_to_le32(q->q_id); 1868 qi->model = cpu_to_le16(vport->rxq_model); 1869 qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX); 1870 qi->ring_len = cpu_to_le16(q->desc_count); 1871 qi->dma_ring_addr = cpu_to_le64(q->dma); 1872 qi->max_pkt_size = cpu_to_le32(q->rx_max_pkt_size); 1873 qi->rx_buffer_low_watermark = cpu_to_le16(q->rx_buffer_low_watermark); 1874 qi->qflags = cpu_to_le16(VIRTCHNL2_RX_DESC_SIZE_32BYTE); 1875 if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW)) 1876 qi->qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC); 1877 1878 if (!idpf_is_queue_model_split(vport->rxq_model)) { 1879 qi->data_buffer_size = cpu_to_le32(q->rx_buf_size); 1880 qi->desc_ids = cpu_to_le64(q->rxdids); 1881 1882 return; 1883 } 1884 1885 sets = q->bufq_sets; 1886 1887 /* 1888 * In splitq mode, RxQ buffer size should be set to that of the first 1889 * buffer queue associated with this RxQ. 1890 */ 1891 q->rx_buf_size = sets[0].bufq.rx_buf_size; 1892 qi->data_buffer_size = cpu_to_le32(q->rx_buf_size); 1893 1894 qi->rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id); 1895 if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) { 1896 qi->bufq2_ena = IDPF_BUFQ2_ENA; 1897 qi->rx_bufq2_id = cpu_to_le16(sets[1].bufq.q_id); 1898 } 1899 1900 q->rx_hbuf_size = sets[0].bufq.rx_hbuf_size; 1901 1902 if (idpf_queue_has(HSPLIT_EN, q)) { 1903 qi->qflags |= cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT); 1904 qi->hdr_buffer_size = cpu_to_le16(q->rx_hbuf_size); 1905 } 1906 1907 qi->desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); 1908 } 1909 1910 /** 1911 * idpf_fill_bufq_config_chunk - fill chunk describing the buffer queue 1912 * @vport: virtual port data structure 1913 * @q: buffer queue to be inserted into VC chunk 1914 * @qi: pointer to the buffer containing the VC chunk 1915 */ 1916 static void idpf_fill_bufq_config_chunk(const struct idpf_vport *vport, 1917 const struct idpf_buf_queue *q, 1918 struct virtchnl2_rxq_info *qi) 1919 { 1920 qi->queue_id = cpu_to_le32(q->q_id); 1921 qi->model = cpu_to_le16(vport->rxq_model); 1922 qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER); 1923 qi->ring_len = cpu_to_le16(q->desc_count); 1924 qi->dma_ring_addr = cpu_to_le64(q->dma); 1925 qi->data_buffer_size = cpu_to_le32(q->rx_buf_size); 1926 qi->rx_buffer_low_watermark = cpu_to_le16(q->rx_buffer_low_watermark); 1927 qi->desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); 1928 qi->buffer_notif_stride = IDPF_RX_BUF_STRIDE; 1929 if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW)) 1930 qi->qflags = cpu_to_le16(VIRTCHNL2_RXQ_RSC); 1931 1932 if (idpf_queue_has(HSPLIT_EN, q)) { 1933 qi->qflags |= cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT); 1934 qi->hdr_buffer_size = cpu_to_le16(q->rx_hbuf_size); 1935 } 1936 } 1937 1938 /** 1939 * idpf_prepare_cfg_rxqs_msg - prepare message to configure selected Rx queues 1940 * @vport: virtual port data structure 1941 * @buf: buffer containing the message 1942 * @pos: pointer to the first chunk describing the rx queue 1943 * @num_chunks: number of chunks in the message 1944 * 1945 * Helper function for preparing the message describing configuration of 1946 * Rx queues. 1947 * 1948 * Return: the total size of the prepared message. 1949 */ 1950 static u32 idpf_prepare_cfg_rxqs_msg(const struct idpf_vport *vport, 1951 void *buf, const void *pos, 1952 u32 num_chunks) 1953 { 1954 struct virtchnl2_config_rx_queues *crq = buf; 1955 1956 crq->vport_id = cpu_to_le32(vport->vport_id); 1957 crq->num_qinfo = cpu_to_le16(num_chunks); 1958 memcpy(crq->qinfo, pos, num_chunks * sizeof(*crq->qinfo)); 1959 1960 return struct_size(crq, qinfo, num_chunks); 1961 } 1962 1963 /** 1964 * idpf_send_config_rx_queue_set_msg - send virtchnl config Rx queues message 1965 * for selected queues. 1966 * @qs: set of the Rx queues to configure 1967 * 1968 * Send config queues virtchnl message for queues contained in the @qs array. 1969 * The @qs array can contain Rx queues (or buffer queues) only. 1970 * 1971 * Return: 0 on success, -errno on failure. 1972 */ 1973 static int idpf_send_config_rx_queue_set_msg(const struct idpf_queue_set *qs) 1974 { 1975 struct virtchnl2_rxq_info *qi __free(kfree) = NULL; 1976 struct idpf_chunked_msg_params params = { 1977 .vc_op = VIRTCHNL2_OP_CONFIG_RX_QUEUES, 1978 .prepare_msg = idpf_prepare_cfg_rxqs_msg, 1979 .config_sz = sizeof(struct virtchnl2_config_rx_queues), 1980 .chunk_sz = sizeof(*qi), 1981 }; 1982 1983 qi = kcalloc(qs->num, sizeof(*qi), GFP_KERNEL); 1984 if (!qi) 1985 return -ENOMEM; 1986 1987 params.chunks = qi; 1988 1989 for (u32 i = 0; i < qs->num; i++) { 1990 if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_RX) 1991 idpf_fill_rxq_config_chunk(qs->vport, qs->qs[i].rxq, 1992 &qi[params.num_chunks++]); 1993 else if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_RX_BUFFER) 1994 idpf_fill_bufq_config_chunk(qs->vport, qs->qs[i].bufq, 1995 &qi[params.num_chunks++]); 1996 } 1997 1998 return idpf_send_chunked_msg(qs->vport, ¶ms); 1999 } 2000 2001 /** 2002 * idpf_send_config_rx_queues_msg - send virtchnl config Rx queues message 2003 * @vport: virtual port data structure 2004 * 2005 * Return: 0 on success, -errno on failure. 2006 */ 2007 static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport) 2008 { 2009 bool splitq = idpf_is_queue_model_split(vport->rxq_model); 2010 struct idpf_queue_set *qs __free(kfree) = NULL; 2011 u32 totqs = vport->num_rxq + vport->num_bufq; 2012 u32 k = 0; 2013 2014 qs = idpf_alloc_queue_set(vport, totqs); 2015 if (!qs) 2016 return -ENOMEM; 2017 2018 /* Populate the queue info buffer with all queue context info */ 2019 for (u32 i = 0; i < vport->num_rxq_grp; i++) { 2020 const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 2021 u32 num_rxq; 2022 2023 if (!splitq) { 2024 num_rxq = rx_qgrp->singleq.num_rxq; 2025 goto rxq; 2026 } 2027 2028 for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) { 2029 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER; 2030 qs->qs[k++].bufq = &rx_qgrp->splitq.bufq_sets[j].bufq; 2031 } 2032 2033 num_rxq = rx_qgrp->splitq.num_rxq_sets; 2034 2035 rxq: 2036 for (u32 j = 0; j < num_rxq; j++) { 2037 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX; 2038 2039 if (splitq) 2040 qs->qs[k++].rxq = 2041 &rx_qgrp->splitq.rxq_sets[j]->rxq; 2042 else 2043 qs->qs[k++].rxq = rx_qgrp->singleq.rxqs[j]; 2044 } 2045 } 2046 2047 /* Make sure accounting agrees */ 2048 if (k != totqs) 2049 return -EINVAL; 2050 2051 return idpf_send_config_rx_queue_set_msg(qs); 2052 } 2053 2054 /** 2055 * idpf_prepare_ena_dis_qs_msg - prepare message to enable/disable selected 2056 * queues 2057 * @vport: virtual port data structure 2058 * @buf: buffer containing the message 2059 * @pos: pointer to the first chunk describing the queue 2060 * @num_chunks: number of chunks in the message 2061 * 2062 * Helper function for preparing the message describing queues to be enabled 2063 * or disabled. 2064 * 2065 * Return: the total size of the prepared message. 2066 */ 2067 static u32 idpf_prepare_ena_dis_qs_msg(const struct idpf_vport *vport, 2068 void *buf, const void *pos, 2069 u32 num_chunks) 2070 { 2071 struct virtchnl2_del_ena_dis_queues *eq = buf; 2072 2073 eq->vport_id = cpu_to_le32(vport->vport_id); 2074 eq->chunks.num_chunks = cpu_to_le16(num_chunks); 2075 memcpy(eq->chunks.chunks, pos, 2076 num_chunks * sizeof(*eq->chunks.chunks)); 2077 2078 return struct_size(eq, chunks.chunks, num_chunks); 2079 } 2080 2081 /** 2082 * idpf_send_ena_dis_queue_set_msg - send virtchnl enable or disable queues 2083 * message for selected queues 2084 * @qs: set of the queues to enable or disable 2085 * @en: whether to enable or disable queues 2086 * 2087 * Send enable or disable queues virtchnl message for queues contained 2088 * in the @qs array. 2089 * The @qs array can contain pointers to both Rx and Tx queues. 2090 * 2091 * Return: 0 on success, -errno on failure. 2092 */ 2093 static int idpf_send_ena_dis_queue_set_msg(const struct idpf_queue_set *qs, 2094 bool en) 2095 { 2096 struct virtchnl2_queue_chunk *qc __free(kfree) = NULL; 2097 struct idpf_chunked_msg_params params = { 2098 .vc_op = en ? VIRTCHNL2_OP_ENABLE_QUEUES : 2099 VIRTCHNL2_OP_DISABLE_QUEUES, 2100 .prepare_msg = idpf_prepare_ena_dis_qs_msg, 2101 .config_sz = sizeof(struct virtchnl2_del_ena_dis_queues), 2102 .chunk_sz = sizeof(*qc), 2103 .num_chunks = qs->num, 2104 }; 2105 2106 qc = kcalloc(qs->num, sizeof(*qc), GFP_KERNEL); 2107 if (!qc) 2108 return -ENOMEM; 2109 2110 params.chunks = qc; 2111 2112 for (u32 i = 0; i < qs->num; i++) { 2113 const struct idpf_queue_ptr *q = &qs->qs[i]; 2114 u32 qid; 2115 2116 qc[i].type = cpu_to_le32(q->type); 2117 qc[i].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); 2118 2119 switch (q->type) { 2120 case VIRTCHNL2_QUEUE_TYPE_RX: 2121 qid = q->rxq->q_id; 2122 break; 2123 case VIRTCHNL2_QUEUE_TYPE_TX: 2124 qid = q->txq->q_id; 2125 break; 2126 case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER: 2127 qid = q->bufq->q_id; 2128 break; 2129 case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION: 2130 qid = q->complq->q_id; 2131 break; 2132 default: 2133 return -EINVAL; 2134 } 2135 2136 qc[i].start_queue_id = cpu_to_le32(qid); 2137 } 2138 2139 return idpf_send_chunked_msg(qs->vport, ¶ms); 2140 } 2141 2142 /** 2143 * idpf_send_ena_dis_queues_msg - send virtchnl enable or disable queues 2144 * message 2145 * @vport: virtual port data structure 2146 * @en: whether to enable or disable queues 2147 * 2148 * Return: 0 on success, -errno on failure. 2149 */ 2150 static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool en) 2151 { 2152 struct idpf_queue_set *qs __free(kfree) = NULL; 2153 u32 num_txq, num_q, k = 0; 2154 bool split; 2155 2156 num_txq = vport->num_txq + vport->num_complq; 2157 num_q = num_txq + vport->num_rxq + vport->num_bufq; 2158 2159 qs = idpf_alloc_queue_set(vport, num_q); 2160 if (!qs) 2161 return -ENOMEM; 2162 2163 split = idpf_is_queue_model_split(vport->txq_model); 2164 2165 for (u32 i = 0; i < vport->num_txq_grp; i++) { 2166 const struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; 2167 2168 for (u32 j = 0; j < tx_qgrp->num_txq; j++) { 2169 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX; 2170 qs->qs[k++].txq = tx_qgrp->txqs[j]; 2171 } 2172 2173 if (!split) 2174 continue; 2175 2176 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; 2177 qs->qs[k++].complq = tx_qgrp->complq; 2178 } 2179 2180 if (k != num_txq) 2181 return -EINVAL; 2182 2183 split = idpf_is_queue_model_split(vport->rxq_model); 2184 2185 for (u32 i = 0; i < vport->num_rxq_grp; i++) { 2186 const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 2187 u32 num_rxq; 2188 2189 if (split) 2190 num_rxq = rx_qgrp->splitq.num_rxq_sets; 2191 else 2192 num_rxq = rx_qgrp->singleq.num_rxq; 2193 2194 for (u32 j = 0; j < num_rxq; j++) { 2195 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX; 2196 2197 if (split) 2198 qs->qs[k++].rxq = 2199 &rx_qgrp->splitq.rxq_sets[j]->rxq; 2200 else 2201 qs->qs[k++].rxq = rx_qgrp->singleq.rxqs[j]; 2202 } 2203 2204 if (!split) 2205 continue; 2206 2207 for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) { 2208 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER; 2209 qs->qs[k++].bufq = &rx_qgrp->splitq.bufq_sets[j].bufq; 2210 } 2211 } 2212 2213 if (k != num_q) 2214 return -EINVAL; 2215 2216 return idpf_send_ena_dis_queue_set_msg(qs, en); 2217 } 2218 2219 /** 2220 * idpf_prep_map_unmap_queue_set_vector_msg - prepare message to map or unmap 2221 * queue set to the interrupt vector 2222 * @vport: virtual port data structure 2223 * @buf: buffer containing the message 2224 * @pos: pointer to the first chunk describing the vector mapping 2225 * @num_chunks: number of chunks in the message 2226 * 2227 * Helper function for preparing the message describing mapping queues to 2228 * q_vectors. 2229 * 2230 * Return: the total size of the prepared message. 2231 */ 2232 static u32 2233 idpf_prep_map_unmap_queue_set_vector_msg(const struct idpf_vport *vport, 2234 void *buf, const void *pos, 2235 u32 num_chunks) 2236 { 2237 struct virtchnl2_queue_vector_maps *vqvm = buf; 2238 2239 vqvm->vport_id = cpu_to_le32(vport->vport_id); 2240 vqvm->num_qv_maps = cpu_to_le16(num_chunks); 2241 memcpy(vqvm->qv_maps, pos, num_chunks * sizeof(*vqvm->qv_maps)); 2242 2243 return struct_size(vqvm, qv_maps, num_chunks); 2244 } 2245 2246 /** 2247 * idpf_send_map_unmap_queue_set_vector_msg - send virtchnl map or unmap 2248 * queue set vector message 2249 * @qs: set of the queues to map or unmap 2250 * @map: true for map and false for unmap 2251 * 2252 * Return: 0 on success, -errno on failure. 2253 */ 2254 static int 2255 idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs, 2256 bool map) 2257 { 2258 struct virtchnl2_queue_vector *vqv __free(kfree) = NULL; 2259 struct idpf_chunked_msg_params params = { 2260 .vc_op = map ? VIRTCHNL2_OP_MAP_QUEUE_VECTOR : 2261 VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR, 2262 .prepare_msg = idpf_prep_map_unmap_queue_set_vector_msg, 2263 .config_sz = sizeof(struct virtchnl2_queue_vector_maps), 2264 .chunk_sz = sizeof(*vqv), 2265 .num_chunks = qs->num, 2266 }; 2267 bool split; 2268 2269 vqv = kcalloc(qs->num, sizeof(*vqv), GFP_KERNEL); 2270 if (!vqv) 2271 return -ENOMEM; 2272 2273 params.chunks = vqv; 2274 2275 split = idpf_is_queue_model_split(qs->vport->txq_model); 2276 2277 for (u32 i = 0; i < qs->num; i++) { 2278 const struct idpf_queue_ptr *q = &qs->qs[i]; 2279 const struct idpf_q_vector *vec; 2280 u32 qid, v_idx, itr_idx; 2281 2282 vqv[i].queue_type = cpu_to_le32(q->type); 2283 2284 switch (q->type) { 2285 case VIRTCHNL2_QUEUE_TYPE_RX: 2286 qid = q->rxq->q_id; 2287 2288 if (idpf_queue_has(NOIRQ, q->rxq)) 2289 vec = NULL; 2290 else 2291 vec = q->rxq->q_vector; 2292 2293 if (vec) { 2294 v_idx = vec->v_idx; 2295 itr_idx = vec->rx_itr_idx; 2296 } else { 2297 v_idx = qs->vport->noirq_v_idx; 2298 itr_idx = VIRTCHNL2_ITR_IDX_0; 2299 } 2300 break; 2301 case VIRTCHNL2_QUEUE_TYPE_TX: 2302 qid = q->txq->q_id; 2303 2304 if (idpf_queue_has(NOIRQ, q->txq)) 2305 vec = NULL; 2306 else if (idpf_queue_has(XDP, q->txq)) 2307 vec = q->txq->complq->q_vector; 2308 else if (split) 2309 vec = q->txq->txq_grp->complq->q_vector; 2310 else 2311 vec = q->txq->q_vector; 2312 2313 if (vec) { 2314 v_idx = vec->v_idx; 2315 itr_idx = vec->tx_itr_idx; 2316 } else { 2317 v_idx = qs->vport->noirq_v_idx; 2318 itr_idx = VIRTCHNL2_ITR_IDX_1; 2319 } 2320 break; 2321 default: 2322 return -EINVAL; 2323 } 2324 2325 vqv[i].queue_id = cpu_to_le32(qid); 2326 vqv[i].vector_id = cpu_to_le16(v_idx); 2327 vqv[i].itr_idx = cpu_to_le32(itr_idx); 2328 } 2329 2330 return idpf_send_chunked_msg(qs->vport, ¶ms); 2331 } 2332 2333 /** 2334 * idpf_send_map_unmap_queue_vector_msg - send virtchnl map or unmap queue 2335 * vector message 2336 * @vport: virtual port data structure 2337 * @map: true for map and false for unmap 2338 * 2339 * Return: 0 on success, -errno on failure. 2340 */ 2341 int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) 2342 { 2343 struct idpf_queue_set *qs __free(kfree) = NULL; 2344 u32 num_q = vport->num_txq + vport->num_rxq; 2345 u32 k = 0; 2346 2347 qs = idpf_alloc_queue_set(vport, num_q); 2348 if (!qs) 2349 return -ENOMEM; 2350 2351 for (u32 i = 0; i < vport->num_txq_grp; i++) { 2352 const struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; 2353 2354 for (u32 j = 0; j < tx_qgrp->num_txq; j++) { 2355 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX; 2356 qs->qs[k++].txq = tx_qgrp->txqs[j]; 2357 } 2358 } 2359 2360 if (k != vport->num_txq) 2361 return -EINVAL; 2362 2363 for (u32 i = 0; i < vport->num_rxq_grp; i++) { 2364 const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 2365 u32 num_rxq; 2366 2367 if (idpf_is_queue_model_split(vport->rxq_model)) 2368 num_rxq = rx_qgrp->splitq.num_rxq_sets; 2369 else 2370 num_rxq = rx_qgrp->singleq.num_rxq; 2371 2372 for (u32 j = 0; j < num_rxq; j++) { 2373 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX; 2374 2375 if (idpf_is_queue_model_split(vport->rxq_model)) 2376 qs->qs[k++].rxq = 2377 &rx_qgrp->splitq.rxq_sets[j]->rxq; 2378 else 2379 qs->qs[k++].rxq = rx_qgrp->singleq.rxqs[j]; 2380 } 2381 } 2382 2383 if (k != num_q) 2384 return -EINVAL; 2385 2386 return idpf_send_map_unmap_queue_set_vector_msg(qs, map); 2387 } 2388 2389 /** 2390 * idpf_send_enable_queue_set_msg - send enable queues virtchnl message for 2391 * selected queues 2392 * @qs: set of the queues 2393 * 2394 * Send enable queues virtchnl message for queues contained in the @qs array. 2395 * 2396 * Return: 0 on success, -errno on failure. 2397 */ 2398 int idpf_send_enable_queue_set_msg(const struct idpf_queue_set *qs) 2399 { 2400 return idpf_send_ena_dis_queue_set_msg(qs, true); 2401 } 2402 2403 /** 2404 * idpf_send_disable_queue_set_msg - send disable queues virtchnl message for 2405 * selected queues 2406 * @qs: set of the queues 2407 * 2408 * Return: 0 on success, -errno on failure. 2409 */ 2410 int idpf_send_disable_queue_set_msg(const struct idpf_queue_set *qs) 2411 { 2412 int err; 2413 2414 err = idpf_send_ena_dis_queue_set_msg(qs, false); 2415 if (err) 2416 return err; 2417 2418 return idpf_wait_for_marker_event_set(qs); 2419 } 2420 2421 /** 2422 * idpf_send_config_queue_set_msg - send virtchnl config queues message for 2423 * selected queues 2424 * @qs: set of the queues 2425 * 2426 * Send config queues virtchnl message for queues contained in the @qs array. 2427 * The @qs array can contain both Rx or Tx queues. 2428 * 2429 * Return: 0 on success, -errno on failure. 2430 */ 2431 int idpf_send_config_queue_set_msg(const struct idpf_queue_set *qs) 2432 { 2433 int err; 2434 2435 err = idpf_send_config_tx_queue_set_msg(qs); 2436 if (err) 2437 return err; 2438 2439 return idpf_send_config_rx_queue_set_msg(qs); 2440 } 2441 2442 /** 2443 * idpf_send_enable_queues_msg - send enable queues virtchnl message 2444 * @vport: Virtual port private data structure 2445 * 2446 * Will send enable queues virtchnl message. Returns 0 on success, negative on 2447 * failure. 2448 */ 2449 int idpf_send_enable_queues_msg(struct idpf_vport *vport) 2450 { 2451 return idpf_send_ena_dis_queues_msg(vport, true); 2452 } 2453 2454 /** 2455 * idpf_send_disable_queues_msg - send disable queues virtchnl message 2456 * @vport: Virtual port private data structure 2457 * 2458 * Will send disable queues virtchnl message. Returns 0 on success, negative 2459 * on failure. 2460 */ 2461 int idpf_send_disable_queues_msg(struct idpf_vport *vport) 2462 { 2463 int err; 2464 2465 err = idpf_send_ena_dis_queues_msg(vport, false); 2466 if (err) 2467 return err; 2468 2469 return idpf_wait_for_marker_event(vport); 2470 } 2471 2472 /** 2473 * idpf_convert_reg_to_queue_chunks - Copy queue chunk information to the right 2474 * structure 2475 * @dchunks: Destination chunks to store data to 2476 * @schunks: Source chunks to copy data from 2477 * @num_chunks: number of chunks to copy 2478 */ 2479 static void idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchunks, 2480 struct virtchnl2_queue_reg_chunk *schunks, 2481 u16 num_chunks) 2482 { 2483 u16 i; 2484 2485 for (i = 0; i < num_chunks; i++) { 2486 dchunks[i].type = schunks[i].type; 2487 dchunks[i].start_queue_id = schunks[i].start_queue_id; 2488 dchunks[i].num_queues = schunks[i].num_queues; 2489 } 2490 } 2491 2492 /** 2493 * idpf_send_delete_queues_msg - send delete queues virtchnl message 2494 * @vport: Virtual port private data structure 2495 * 2496 * Will send delete queues virtchnl message. Return 0 on success, negative on 2497 * failure. 2498 */ 2499 int idpf_send_delete_queues_msg(struct idpf_vport *vport) 2500 { 2501 struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL; 2502 struct virtchnl2_create_vport *vport_params; 2503 struct virtchnl2_queue_reg_chunks *chunks; 2504 struct idpf_vc_xn_params xn_params = {}; 2505 struct idpf_vport_config *vport_config; 2506 u16 vport_idx = vport->idx; 2507 ssize_t reply_sz; 2508 u16 num_chunks; 2509 int buf_size; 2510 2511 vport_config = vport->adapter->vport_config[vport_idx]; 2512 if (vport_config->req_qs_chunks) { 2513 chunks = &vport_config->req_qs_chunks->chunks; 2514 } else { 2515 vport_params = vport->adapter->vport_params_recvd[vport_idx]; 2516 chunks = &vport_params->chunks; 2517 } 2518 2519 num_chunks = le16_to_cpu(chunks->num_chunks); 2520 buf_size = struct_size(eq, chunks.chunks, num_chunks); 2521 2522 eq = kzalloc(buf_size, GFP_KERNEL); 2523 if (!eq) 2524 return -ENOMEM; 2525 2526 eq->vport_id = cpu_to_le32(vport->vport_id); 2527 eq->chunks.num_chunks = cpu_to_le16(num_chunks); 2528 2529 idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->chunks, 2530 num_chunks); 2531 2532 xn_params.vc_op = VIRTCHNL2_OP_DEL_QUEUES; 2533 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 2534 xn_params.send_buf.iov_base = eq; 2535 xn_params.send_buf.iov_len = buf_size; 2536 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 2537 2538 return reply_sz < 0 ? reply_sz : 0; 2539 } 2540 2541 /** 2542 * idpf_send_config_queues_msg - Send config queues virtchnl message 2543 * @vport: Virtual port private data structure 2544 * 2545 * Will send config queues virtchnl message. Returns 0 on success, negative on 2546 * failure. 2547 */ 2548 int idpf_send_config_queues_msg(struct idpf_vport *vport) 2549 { 2550 int err; 2551 2552 err = idpf_send_config_tx_queues_msg(vport); 2553 if (err) 2554 return err; 2555 2556 return idpf_send_config_rx_queues_msg(vport); 2557 } 2558 2559 /** 2560 * idpf_send_add_queues_msg - Send virtchnl add queues message 2561 * @vport: Virtual port private data structure 2562 * @num_tx_q: number of transmit queues 2563 * @num_complq: number of transmit completion queues 2564 * @num_rx_q: number of receive queues 2565 * @num_rx_bufq: number of receive buffer queues 2566 * 2567 * Returns 0 on success, negative on failure. vport _MUST_ be const here as 2568 * we should not change any fields within vport itself in this function. 2569 */ 2570 int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q, 2571 u16 num_complq, u16 num_rx_q, u16 num_rx_bufq) 2572 { 2573 struct virtchnl2_add_queues *vc_msg __free(kfree) = NULL; 2574 struct idpf_vc_xn_params xn_params = {}; 2575 struct idpf_vport_config *vport_config; 2576 struct virtchnl2_add_queues aq = {}; 2577 u16 vport_idx = vport->idx; 2578 ssize_t reply_sz; 2579 int size; 2580 2581 vc_msg = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); 2582 if (!vc_msg) 2583 return -ENOMEM; 2584 2585 vport_config = vport->adapter->vport_config[vport_idx]; 2586 kfree(vport_config->req_qs_chunks); 2587 vport_config->req_qs_chunks = NULL; 2588 2589 aq.vport_id = cpu_to_le32(vport->vport_id); 2590 aq.num_tx_q = cpu_to_le16(num_tx_q); 2591 aq.num_tx_complq = cpu_to_le16(num_complq); 2592 aq.num_rx_q = cpu_to_le16(num_rx_q); 2593 aq.num_rx_bufq = cpu_to_le16(num_rx_bufq); 2594 2595 xn_params.vc_op = VIRTCHNL2_OP_ADD_QUEUES; 2596 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 2597 xn_params.send_buf.iov_base = &aq; 2598 xn_params.send_buf.iov_len = sizeof(aq); 2599 xn_params.recv_buf.iov_base = vc_msg; 2600 xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; 2601 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 2602 if (reply_sz < 0) 2603 return reply_sz; 2604 2605 /* compare vc_msg num queues with vport num queues */ 2606 if (le16_to_cpu(vc_msg->num_tx_q) != num_tx_q || 2607 le16_to_cpu(vc_msg->num_rx_q) != num_rx_q || 2608 le16_to_cpu(vc_msg->num_tx_complq) != num_complq || 2609 le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq) 2610 return -EINVAL; 2611 2612 size = struct_size(vc_msg, chunks.chunks, 2613 le16_to_cpu(vc_msg->chunks.num_chunks)); 2614 if (reply_sz < size) 2615 return -EIO; 2616 2617 vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL); 2618 if (!vport_config->req_qs_chunks) 2619 return -ENOMEM; 2620 2621 return 0; 2622 } 2623 2624 /** 2625 * idpf_send_alloc_vectors_msg - Send virtchnl alloc vectors message 2626 * @adapter: Driver specific private structure 2627 * @num_vectors: number of vectors to be allocated 2628 * 2629 * Returns 0 on success, negative on failure. 2630 */ 2631 int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors) 2632 { 2633 struct virtchnl2_alloc_vectors *rcvd_vec __free(kfree) = NULL; 2634 struct idpf_vc_xn_params xn_params = {}; 2635 struct virtchnl2_alloc_vectors ac = {}; 2636 ssize_t reply_sz; 2637 u16 num_vchunks; 2638 int size; 2639 2640 ac.num_vectors = cpu_to_le16(num_vectors); 2641 2642 rcvd_vec = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); 2643 if (!rcvd_vec) 2644 return -ENOMEM; 2645 2646 xn_params.vc_op = VIRTCHNL2_OP_ALLOC_VECTORS; 2647 xn_params.send_buf.iov_base = ∾ 2648 xn_params.send_buf.iov_len = sizeof(ac); 2649 xn_params.recv_buf.iov_base = rcvd_vec; 2650 xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; 2651 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 2652 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 2653 if (reply_sz < 0) 2654 return reply_sz; 2655 2656 num_vchunks = le16_to_cpu(rcvd_vec->vchunks.num_vchunks); 2657 size = struct_size(rcvd_vec, vchunks.vchunks, num_vchunks); 2658 if (reply_sz < size) 2659 return -EIO; 2660 2661 if (size > IDPF_CTLQ_MAX_BUF_LEN) 2662 return -EINVAL; 2663 2664 kfree(adapter->req_vec_chunks); 2665 adapter->req_vec_chunks = kmemdup(rcvd_vec, size, GFP_KERNEL); 2666 if (!adapter->req_vec_chunks) 2667 return -ENOMEM; 2668 2669 if (le16_to_cpu(adapter->req_vec_chunks->num_vectors) < num_vectors) { 2670 kfree(adapter->req_vec_chunks); 2671 adapter->req_vec_chunks = NULL; 2672 return -EINVAL; 2673 } 2674 2675 return 0; 2676 } 2677 2678 /** 2679 * idpf_send_dealloc_vectors_msg - Send virtchnl de allocate vectors message 2680 * @adapter: Driver specific private structure 2681 * 2682 * Returns 0 on success, negative on failure. 2683 */ 2684 int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter) 2685 { 2686 struct virtchnl2_alloc_vectors *ac = adapter->req_vec_chunks; 2687 struct virtchnl2_vector_chunks *vcs = &ac->vchunks; 2688 struct idpf_vc_xn_params xn_params = {}; 2689 ssize_t reply_sz; 2690 int buf_size; 2691 2692 buf_size = struct_size(vcs, vchunks, le16_to_cpu(vcs->num_vchunks)); 2693 2694 xn_params.vc_op = VIRTCHNL2_OP_DEALLOC_VECTORS; 2695 xn_params.send_buf.iov_base = vcs; 2696 xn_params.send_buf.iov_len = buf_size; 2697 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 2698 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 2699 if (reply_sz < 0) 2700 return reply_sz; 2701 2702 kfree(adapter->req_vec_chunks); 2703 adapter->req_vec_chunks = NULL; 2704 2705 return 0; 2706 } 2707 2708 /** 2709 * idpf_get_max_vfs - Get max number of vfs supported 2710 * @adapter: Driver specific private structure 2711 * 2712 * Returns max number of VFs 2713 */ 2714 static int idpf_get_max_vfs(struct idpf_adapter *adapter) 2715 { 2716 return le16_to_cpu(adapter->caps.max_sriov_vfs); 2717 } 2718 2719 /** 2720 * idpf_send_set_sriov_vfs_msg - Send virtchnl set sriov vfs message 2721 * @adapter: Driver specific private structure 2722 * @num_vfs: number of virtual functions to be created 2723 * 2724 * Returns 0 on success, negative on failure. 2725 */ 2726 int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs) 2727 { 2728 struct virtchnl2_sriov_vfs_info svi = {}; 2729 struct idpf_vc_xn_params xn_params = {}; 2730 ssize_t reply_sz; 2731 2732 svi.num_vfs = cpu_to_le16(num_vfs); 2733 xn_params.vc_op = VIRTCHNL2_OP_SET_SRIOV_VFS; 2734 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 2735 xn_params.send_buf.iov_base = &svi; 2736 xn_params.send_buf.iov_len = sizeof(svi); 2737 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 2738 2739 return reply_sz < 0 ? reply_sz : 0; 2740 } 2741 2742 /** 2743 * idpf_send_get_stats_msg - Send virtchnl get statistics message 2744 * @vport: vport to get stats for 2745 * 2746 * Returns 0 on success, negative on failure. 2747 */ 2748 int idpf_send_get_stats_msg(struct idpf_vport *vport) 2749 { 2750 struct idpf_netdev_priv *np = netdev_priv(vport->netdev); 2751 struct rtnl_link_stats64 *netstats = &np->netstats; 2752 struct virtchnl2_vport_stats stats_msg = {}; 2753 struct idpf_vc_xn_params xn_params = {}; 2754 ssize_t reply_sz; 2755 2756 2757 /* Don't send get_stats message if the link is down */ 2758 if (np->state <= __IDPF_VPORT_DOWN) 2759 return 0; 2760 2761 stats_msg.vport_id = cpu_to_le32(vport->vport_id); 2762 2763 xn_params.vc_op = VIRTCHNL2_OP_GET_STATS; 2764 xn_params.send_buf.iov_base = &stats_msg; 2765 xn_params.send_buf.iov_len = sizeof(stats_msg); 2766 xn_params.recv_buf = xn_params.send_buf; 2767 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 2768 2769 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 2770 if (reply_sz < 0) 2771 return reply_sz; 2772 if (reply_sz < sizeof(stats_msg)) 2773 return -EIO; 2774 2775 spin_lock_bh(&np->stats_lock); 2776 2777 netstats->rx_packets = le64_to_cpu(stats_msg.rx_unicast) + 2778 le64_to_cpu(stats_msg.rx_multicast) + 2779 le64_to_cpu(stats_msg.rx_broadcast); 2780 netstats->tx_packets = le64_to_cpu(stats_msg.tx_unicast) + 2781 le64_to_cpu(stats_msg.tx_multicast) + 2782 le64_to_cpu(stats_msg.tx_broadcast); 2783 netstats->rx_bytes = le64_to_cpu(stats_msg.rx_bytes); 2784 netstats->tx_bytes = le64_to_cpu(stats_msg.tx_bytes); 2785 netstats->rx_errors = le64_to_cpu(stats_msg.rx_errors); 2786 netstats->tx_errors = le64_to_cpu(stats_msg.tx_errors); 2787 netstats->rx_dropped = le64_to_cpu(stats_msg.rx_discards); 2788 netstats->tx_dropped = le64_to_cpu(stats_msg.tx_discards); 2789 2790 vport->port_stats.vport_stats = stats_msg; 2791 2792 spin_unlock_bh(&np->stats_lock); 2793 2794 return 0; 2795 } 2796 2797 /** 2798 * idpf_send_get_set_rss_lut_msg - Send virtchnl get or set rss lut message 2799 * @vport: virtual port data structure 2800 * @get: flag to set or get rss look up table 2801 * 2802 * Returns 0 on success, negative on failure. 2803 */ 2804 int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get) 2805 { 2806 struct virtchnl2_rss_lut *recv_rl __free(kfree) = NULL; 2807 struct virtchnl2_rss_lut *rl __free(kfree) = NULL; 2808 struct idpf_vc_xn_params xn_params = {}; 2809 struct idpf_rss_data *rss_data; 2810 int buf_size, lut_buf_size; 2811 ssize_t reply_sz; 2812 int i; 2813 2814 rss_data = 2815 &vport->adapter->vport_config[vport->idx]->user_config.rss_data; 2816 buf_size = struct_size(rl, lut, rss_data->rss_lut_size); 2817 rl = kzalloc(buf_size, GFP_KERNEL); 2818 if (!rl) 2819 return -ENOMEM; 2820 2821 rl->vport_id = cpu_to_le32(vport->vport_id); 2822 2823 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 2824 xn_params.send_buf.iov_base = rl; 2825 xn_params.send_buf.iov_len = buf_size; 2826 2827 if (get) { 2828 recv_rl = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); 2829 if (!recv_rl) 2830 return -ENOMEM; 2831 xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_LUT; 2832 xn_params.recv_buf.iov_base = recv_rl; 2833 xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; 2834 } else { 2835 rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size); 2836 for (i = 0; i < rss_data->rss_lut_size; i++) 2837 rl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]); 2838 2839 xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_LUT; 2840 } 2841 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 2842 if (reply_sz < 0) 2843 return reply_sz; 2844 if (!get) 2845 return 0; 2846 if (reply_sz < sizeof(struct virtchnl2_rss_lut)) 2847 return -EIO; 2848 2849 lut_buf_size = le16_to_cpu(recv_rl->lut_entries) * sizeof(u32); 2850 if (reply_sz < lut_buf_size) 2851 return -EIO; 2852 2853 /* size didn't change, we can reuse existing lut buf */ 2854 if (rss_data->rss_lut_size == le16_to_cpu(recv_rl->lut_entries)) 2855 goto do_memcpy; 2856 2857 rss_data->rss_lut_size = le16_to_cpu(recv_rl->lut_entries); 2858 kfree(rss_data->rss_lut); 2859 2860 rss_data->rss_lut = kzalloc(lut_buf_size, GFP_KERNEL); 2861 if (!rss_data->rss_lut) { 2862 rss_data->rss_lut_size = 0; 2863 return -ENOMEM; 2864 } 2865 2866 do_memcpy: 2867 memcpy(rss_data->rss_lut, recv_rl->lut, rss_data->rss_lut_size); 2868 2869 return 0; 2870 } 2871 2872 /** 2873 * idpf_send_get_set_rss_key_msg - Send virtchnl get or set rss key message 2874 * @vport: virtual port data structure 2875 * @get: flag to set or get rss look up table 2876 * 2877 * Returns 0 on success, negative on failure 2878 */ 2879 int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get) 2880 { 2881 struct virtchnl2_rss_key *recv_rk __free(kfree) = NULL; 2882 struct virtchnl2_rss_key *rk __free(kfree) = NULL; 2883 struct idpf_vc_xn_params xn_params = {}; 2884 struct idpf_rss_data *rss_data; 2885 ssize_t reply_sz; 2886 int i, buf_size; 2887 u16 key_size; 2888 2889 rss_data = 2890 &vport->adapter->vport_config[vport->idx]->user_config.rss_data; 2891 buf_size = struct_size(rk, key_flex, rss_data->rss_key_size); 2892 rk = kzalloc(buf_size, GFP_KERNEL); 2893 if (!rk) 2894 return -ENOMEM; 2895 2896 rk->vport_id = cpu_to_le32(vport->vport_id); 2897 xn_params.send_buf.iov_base = rk; 2898 xn_params.send_buf.iov_len = buf_size; 2899 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 2900 if (get) { 2901 recv_rk = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); 2902 if (!recv_rk) 2903 return -ENOMEM; 2904 2905 xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_KEY; 2906 xn_params.recv_buf.iov_base = recv_rk; 2907 xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; 2908 } else { 2909 rk->key_len = cpu_to_le16(rss_data->rss_key_size); 2910 for (i = 0; i < rss_data->rss_key_size; i++) 2911 rk->key_flex[i] = rss_data->rss_key[i]; 2912 2913 xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_KEY; 2914 } 2915 2916 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 2917 if (reply_sz < 0) 2918 return reply_sz; 2919 if (!get) 2920 return 0; 2921 if (reply_sz < sizeof(struct virtchnl2_rss_key)) 2922 return -EIO; 2923 2924 key_size = min_t(u16, NETDEV_RSS_KEY_LEN, 2925 le16_to_cpu(recv_rk->key_len)); 2926 if (reply_sz < key_size) 2927 return -EIO; 2928 2929 /* key len didn't change, reuse existing buf */ 2930 if (rss_data->rss_key_size == key_size) 2931 goto do_memcpy; 2932 2933 rss_data->rss_key_size = key_size; 2934 kfree(rss_data->rss_key); 2935 rss_data->rss_key = kzalloc(key_size, GFP_KERNEL); 2936 if (!rss_data->rss_key) { 2937 rss_data->rss_key_size = 0; 2938 return -ENOMEM; 2939 } 2940 2941 do_memcpy: 2942 memcpy(rss_data->rss_key, recv_rk->key_flex, rss_data->rss_key_size); 2943 2944 return 0; 2945 } 2946 2947 /** 2948 * idpf_fill_ptype_lookup - Fill L3 specific fields in ptype lookup table 2949 * @ptype: ptype lookup table 2950 * @pstate: state machine for ptype lookup table 2951 * @ipv4: ipv4 or ipv6 2952 * @frag: fragmentation allowed 2953 * 2954 */ 2955 static void idpf_fill_ptype_lookup(struct libeth_rx_pt *ptype, 2956 struct idpf_ptype_state *pstate, 2957 bool ipv4, bool frag) 2958 { 2959 if (!pstate->outer_ip || !pstate->outer_frag) { 2960 pstate->outer_ip = true; 2961 2962 if (ipv4) 2963 ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV4; 2964 else 2965 ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV6; 2966 2967 if (frag) { 2968 ptype->outer_frag = LIBETH_RX_PT_FRAG; 2969 pstate->outer_frag = true; 2970 } 2971 } else { 2972 ptype->tunnel_type = LIBETH_RX_PT_TUNNEL_IP_IP; 2973 pstate->tunnel_state = IDPF_PTYPE_TUNNEL_IP; 2974 2975 if (ipv4) 2976 ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV4; 2977 else 2978 ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV6; 2979 2980 if (frag) 2981 ptype->tunnel_end_frag = LIBETH_RX_PT_FRAG; 2982 } 2983 } 2984 2985 static void idpf_finalize_ptype_lookup(struct libeth_rx_pt *ptype) 2986 { 2987 if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 && 2988 ptype->inner_prot) 2989 ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L4; 2990 else if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 && 2991 ptype->outer_ip) 2992 ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L3; 2993 else if (ptype->outer_ip == LIBETH_RX_PT_OUTER_L2) 2994 ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L2; 2995 else 2996 ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_NONE; 2997 2998 libeth_rx_pt_gen_hash_type(ptype); 2999 } 3000 3001 /** 3002 * idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info 3003 * @vport: virtual port data structure 3004 * 3005 * Returns 0 on success, negative on failure. 3006 */ 3007 int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) 3008 { 3009 struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL; 3010 struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL; 3011 struct libeth_rx_pt *ptype_lkup __free(kfree) = NULL; 3012 int max_ptype, ptypes_recvd = 0, ptype_offset; 3013 struct idpf_adapter *adapter = vport->adapter; 3014 struct idpf_vc_xn_params xn_params = {}; 3015 u16 next_ptype_id = 0; 3016 ssize_t reply_sz; 3017 int i, j, k; 3018 3019 if (vport->rx_ptype_lkup) 3020 return 0; 3021 3022 if (idpf_is_queue_model_split(vport->rxq_model)) 3023 max_ptype = IDPF_RX_MAX_PTYPE; 3024 else 3025 max_ptype = IDPF_RX_MAX_BASE_PTYPE; 3026 3027 ptype_lkup = kcalloc(max_ptype, sizeof(*ptype_lkup), GFP_KERNEL); 3028 if (!ptype_lkup) 3029 return -ENOMEM; 3030 3031 get_ptype_info = kzalloc(sizeof(*get_ptype_info), GFP_KERNEL); 3032 if (!get_ptype_info) 3033 return -ENOMEM; 3034 3035 ptype_info = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); 3036 if (!ptype_info) 3037 return -ENOMEM; 3038 3039 xn_params.vc_op = VIRTCHNL2_OP_GET_PTYPE_INFO; 3040 xn_params.send_buf.iov_base = get_ptype_info; 3041 xn_params.send_buf.iov_len = sizeof(*get_ptype_info); 3042 xn_params.recv_buf.iov_base = ptype_info; 3043 xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; 3044 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 3045 3046 while (next_ptype_id < max_ptype) { 3047 get_ptype_info->start_ptype_id = cpu_to_le16(next_ptype_id); 3048 3049 if ((next_ptype_id + IDPF_RX_MAX_PTYPES_PER_BUF) > max_ptype) 3050 get_ptype_info->num_ptypes = 3051 cpu_to_le16(max_ptype - next_ptype_id); 3052 else 3053 get_ptype_info->num_ptypes = 3054 cpu_to_le16(IDPF_RX_MAX_PTYPES_PER_BUF); 3055 3056 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 3057 if (reply_sz < 0) 3058 return reply_sz; 3059 3060 ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes); 3061 if (ptypes_recvd > max_ptype) 3062 return -EINVAL; 3063 3064 next_ptype_id = le16_to_cpu(get_ptype_info->start_ptype_id) + 3065 le16_to_cpu(get_ptype_info->num_ptypes); 3066 3067 ptype_offset = IDPF_RX_PTYPE_HDR_SZ; 3068 3069 for (i = 0; i < le16_to_cpu(ptype_info->num_ptypes); i++) { 3070 struct idpf_ptype_state pstate = { }; 3071 struct virtchnl2_ptype *ptype; 3072 u16 id; 3073 3074 ptype = (struct virtchnl2_ptype *) 3075 ((u8 *)ptype_info + ptype_offset); 3076 3077 ptype_offset += IDPF_GET_PTYPE_SIZE(ptype); 3078 if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN) 3079 return -EINVAL; 3080 3081 /* 0xFFFF indicates end of ptypes */ 3082 if (le16_to_cpu(ptype->ptype_id_10) == 3083 IDPF_INVALID_PTYPE_ID) 3084 goto out; 3085 3086 if (idpf_is_queue_model_split(vport->rxq_model)) 3087 k = le16_to_cpu(ptype->ptype_id_10); 3088 else 3089 k = ptype->ptype_id_8; 3090 3091 for (j = 0; j < ptype->proto_id_count; j++) { 3092 id = le16_to_cpu(ptype->proto_id[j]); 3093 switch (id) { 3094 case VIRTCHNL2_PROTO_HDR_GRE: 3095 if (pstate.tunnel_state == 3096 IDPF_PTYPE_TUNNEL_IP) { 3097 ptype_lkup[k].tunnel_type = 3098 LIBETH_RX_PT_TUNNEL_IP_GRENAT; 3099 pstate.tunnel_state |= 3100 IDPF_PTYPE_TUNNEL_IP_GRENAT; 3101 } 3102 break; 3103 case VIRTCHNL2_PROTO_HDR_MAC: 3104 ptype_lkup[k].outer_ip = 3105 LIBETH_RX_PT_OUTER_L2; 3106 if (pstate.tunnel_state == 3107 IDPF_TUN_IP_GRE) { 3108 ptype_lkup[k].tunnel_type = 3109 LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC; 3110 pstate.tunnel_state |= 3111 IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC; 3112 } 3113 break; 3114 case VIRTCHNL2_PROTO_HDR_IPV4: 3115 idpf_fill_ptype_lookup(&ptype_lkup[k], 3116 &pstate, true, 3117 false); 3118 break; 3119 case VIRTCHNL2_PROTO_HDR_IPV6: 3120 idpf_fill_ptype_lookup(&ptype_lkup[k], 3121 &pstate, false, 3122 false); 3123 break; 3124 case VIRTCHNL2_PROTO_HDR_IPV4_FRAG: 3125 idpf_fill_ptype_lookup(&ptype_lkup[k], 3126 &pstate, true, 3127 true); 3128 break; 3129 case VIRTCHNL2_PROTO_HDR_IPV6_FRAG: 3130 idpf_fill_ptype_lookup(&ptype_lkup[k], 3131 &pstate, false, 3132 true); 3133 break; 3134 case VIRTCHNL2_PROTO_HDR_UDP: 3135 ptype_lkup[k].inner_prot = 3136 LIBETH_RX_PT_INNER_UDP; 3137 break; 3138 case VIRTCHNL2_PROTO_HDR_TCP: 3139 ptype_lkup[k].inner_prot = 3140 LIBETH_RX_PT_INNER_TCP; 3141 break; 3142 case VIRTCHNL2_PROTO_HDR_SCTP: 3143 ptype_lkup[k].inner_prot = 3144 LIBETH_RX_PT_INNER_SCTP; 3145 break; 3146 case VIRTCHNL2_PROTO_HDR_ICMP: 3147 ptype_lkup[k].inner_prot = 3148 LIBETH_RX_PT_INNER_ICMP; 3149 break; 3150 case VIRTCHNL2_PROTO_HDR_PAY: 3151 ptype_lkup[k].payload_layer = 3152 LIBETH_RX_PT_PAYLOAD_L2; 3153 break; 3154 case VIRTCHNL2_PROTO_HDR_ICMPV6: 3155 case VIRTCHNL2_PROTO_HDR_IPV6_EH: 3156 case VIRTCHNL2_PROTO_HDR_PRE_MAC: 3157 case VIRTCHNL2_PROTO_HDR_POST_MAC: 3158 case VIRTCHNL2_PROTO_HDR_ETHERTYPE: 3159 case VIRTCHNL2_PROTO_HDR_SVLAN: 3160 case VIRTCHNL2_PROTO_HDR_CVLAN: 3161 case VIRTCHNL2_PROTO_HDR_MPLS: 3162 case VIRTCHNL2_PROTO_HDR_MMPLS: 3163 case VIRTCHNL2_PROTO_HDR_PTP: 3164 case VIRTCHNL2_PROTO_HDR_CTRL: 3165 case VIRTCHNL2_PROTO_HDR_LLDP: 3166 case VIRTCHNL2_PROTO_HDR_ARP: 3167 case VIRTCHNL2_PROTO_HDR_ECP: 3168 case VIRTCHNL2_PROTO_HDR_EAPOL: 3169 case VIRTCHNL2_PROTO_HDR_PPPOD: 3170 case VIRTCHNL2_PROTO_HDR_PPPOE: 3171 case VIRTCHNL2_PROTO_HDR_IGMP: 3172 case VIRTCHNL2_PROTO_HDR_AH: 3173 case VIRTCHNL2_PROTO_HDR_ESP: 3174 case VIRTCHNL2_PROTO_HDR_IKE: 3175 case VIRTCHNL2_PROTO_HDR_NATT_KEEP: 3176 case VIRTCHNL2_PROTO_HDR_L2TPV2: 3177 case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL: 3178 case VIRTCHNL2_PROTO_HDR_L2TPV3: 3179 case VIRTCHNL2_PROTO_HDR_GTP: 3180 case VIRTCHNL2_PROTO_HDR_GTP_EH: 3181 case VIRTCHNL2_PROTO_HDR_GTPCV2: 3182 case VIRTCHNL2_PROTO_HDR_GTPC_TEID: 3183 case VIRTCHNL2_PROTO_HDR_GTPU: 3184 case VIRTCHNL2_PROTO_HDR_GTPU_UL: 3185 case VIRTCHNL2_PROTO_HDR_GTPU_DL: 3186 case VIRTCHNL2_PROTO_HDR_ECPRI: 3187 case VIRTCHNL2_PROTO_HDR_VRRP: 3188 case VIRTCHNL2_PROTO_HDR_OSPF: 3189 case VIRTCHNL2_PROTO_HDR_TUN: 3190 case VIRTCHNL2_PROTO_HDR_NVGRE: 3191 case VIRTCHNL2_PROTO_HDR_VXLAN: 3192 case VIRTCHNL2_PROTO_HDR_VXLAN_GPE: 3193 case VIRTCHNL2_PROTO_HDR_GENEVE: 3194 case VIRTCHNL2_PROTO_HDR_NSH: 3195 case VIRTCHNL2_PROTO_HDR_QUIC: 3196 case VIRTCHNL2_PROTO_HDR_PFCP: 3197 case VIRTCHNL2_PROTO_HDR_PFCP_NODE: 3198 case VIRTCHNL2_PROTO_HDR_PFCP_SESSION: 3199 case VIRTCHNL2_PROTO_HDR_RTP: 3200 case VIRTCHNL2_PROTO_HDR_NO_PROTO: 3201 break; 3202 default: 3203 break; 3204 } 3205 } 3206 3207 idpf_finalize_ptype_lookup(&ptype_lkup[k]); 3208 } 3209 } 3210 3211 out: 3212 vport->rx_ptype_lkup = no_free_ptr(ptype_lkup); 3213 3214 return 0; 3215 } 3216 3217 /** 3218 * idpf_send_ena_dis_loopback_msg - Send virtchnl enable/disable loopback 3219 * message 3220 * @vport: virtual port data structure 3221 * 3222 * Returns 0 on success, negative on failure. 3223 */ 3224 int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport) 3225 { 3226 struct idpf_vc_xn_params xn_params = {}; 3227 struct virtchnl2_loopback loopback; 3228 ssize_t reply_sz; 3229 3230 loopback.vport_id = cpu_to_le32(vport->vport_id); 3231 loopback.enable = idpf_is_feature_ena(vport, NETIF_F_LOOPBACK); 3232 3233 xn_params.vc_op = VIRTCHNL2_OP_LOOPBACK; 3234 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 3235 xn_params.send_buf.iov_base = &loopback; 3236 xn_params.send_buf.iov_len = sizeof(loopback); 3237 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 3238 3239 return reply_sz < 0 ? reply_sz : 0; 3240 } 3241 3242 /** 3243 * idpf_find_ctlq - Given a type and id, find ctlq info 3244 * @hw: hardware struct 3245 * @type: type of ctrlq to find 3246 * @id: ctlq id to find 3247 * 3248 * Returns pointer to found ctlq info struct, NULL otherwise. 3249 */ 3250 static struct idpf_ctlq_info *idpf_find_ctlq(struct idpf_hw *hw, 3251 enum idpf_ctlq_type type, int id) 3252 { 3253 struct idpf_ctlq_info *cq, *tmp; 3254 3255 list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list) 3256 if (cq->q_id == id && cq->cq_type == type) 3257 return cq; 3258 3259 return NULL; 3260 } 3261 3262 /** 3263 * idpf_init_dflt_mbx - Setup default mailbox parameters and make request 3264 * @adapter: adapter info struct 3265 * 3266 * Returns 0 on success, negative otherwise 3267 */ 3268 int idpf_init_dflt_mbx(struct idpf_adapter *adapter) 3269 { 3270 struct idpf_ctlq_create_info ctlq_info[] = { 3271 { 3272 .type = IDPF_CTLQ_TYPE_MAILBOX_TX, 3273 .id = IDPF_DFLT_MBX_ID, 3274 .len = IDPF_DFLT_MBX_Q_LEN, 3275 .buf_size = IDPF_CTLQ_MAX_BUF_LEN 3276 }, 3277 { 3278 .type = IDPF_CTLQ_TYPE_MAILBOX_RX, 3279 .id = IDPF_DFLT_MBX_ID, 3280 .len = IDPF_DFLT_MBX_Q_LEN, 3281 .buf_size = IDPF_CTLQ_MAX_BUF_LEN 3282 } 3283 }; 3284 struct idpf_hw *hw = &adapter->hw; 3285 int err; 3286 3287 adapter->dev_ops.reg_ops.ctlq_reg_init(adapter, ctlq_info); 3288 3289 err = idpf_ctlq_init(hw, IDPF_NUM_DFLT_MBX_Q, ctlq_info); 3290 if (err) 3291 return err; 3292 3293 hw->asq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_TX, 3294 IDPF_DFLT_MBX_ID); 3295 hw->arq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_RX, 3296 IDPF_DFLT_MBX_ID); 3297 3298 if (!hw->asq || !hw->arq) { 3299 idpf_ctlq_deinit(hw); 3300 3301 return -ENOENT; 3302 } 3303 3304 adapter->state = __IDPF_VER_CHECK; 3305 3306 return 0; 3307 } 3308 3309 /** 3310 * idpf_deinit_dflt_mbx - Free up ctlqs setup 3311 * @adapter: Driver specific private data structure 3312 */ 3313 void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter) 3314 { 3315 if (adapter->hw.arq && adapter->hw.asq) { 3316 idpf_mb_clean(adapter); 3317 idpf_ctlq_deinit(&adapter->hw); 3318 } 3319 adapter->hw.arq = NULL; 3320 adapter->hw.asq = NULL; 3321 } 3322 3323 /** 3324 * idpf_vport_params_buf_rel - Release memory for MailBox resources 3325 * @adapter: Driver specific private data structure 3326 * 3327 * Will release memory to hold the vport parameters received on MailBox 3328 */ 3329 static void idpf_vport_params_buf_rel(struct idpf_adapter *adapter) 3330 { 3331 kfree(adapter->vport_params_recvd); 3332 adapter->vport_params_recvd = NULL; 3333 kfree(adapter->vport_params_reqd); 3334 adapter->vport_params_reqd = NULL; 3335 kfree(adapter->vport_ids); 3336 adapter->vport_ids = NULL; 3337 } 3338 3339 /** 3340 * idpf_vport_params_buf_alloc - Allocate memory for MailBox resources 3341 * @adapter: Driver specific private data structure 3342 * 3343 * Will alloc memory to hold the vport parameters received on MailBox 3344 */ 3345 static int idpf_vport_params_buf_alloc(struct idpf_adapter *adapter) 3346 { 3347 u16 num_max_vports = idpf_get_max_vports(adapter); 3348 3349 adapter->vport_params_reqd = kcalloc(num_max_vports, 3350 sizeof(*adapter->vport_params_reqd), 3351 GFP_KERNEL); 3352 if (!adapter->vport_params_reqd) 3353 return -ENOMEM; 3354 3355 adapter->vport_params_recvd = kcalloc(num_max_vports, 3356 sizeof(*adapter->vport_params_recvd), 3357 GFP_KERNEL); 3358 if (!adapter->vport_params_recvd) 3359 goto err_mem; 3360 3361 adapter->vport_ids = kcalloc(num_max_vports, sizeof(u32), GFP_KERNEL); 3362 if (!adapter->vport_ids) 3363 goto err_mem; 3364 3365 if (adapter->vport_config) 3366 return 0; 3367 3368 adapter->vport_config = kcalloc(num_max_vports, 3369 sizeof(*adapter->vport_config), 3370 GFP_KERNEL); 3371 if (!adapter->vport_config) 3372 goto err_mem; 3373 3374 return 0; 3375 3376 err_mem: 3377 idpf_vport_params_buf_rel(adapter); 3378 3379 return -ENOMEM; 3380 } 3381 3382 /** 3383 * idpf_vc_core_init - Initialize state machine and get driver specific 3384 * resources 3385 * @adapter: Driver specific private structure 3386 * 3387 * This function will initialize the state machine and request all necessary 3388 * resources required by the device driver. Once the state machine is 3389 * initialized, allocate memory to store vport specific information and also 3390 * requests required interrupts. 3391 * 3392 * Returns 0 on success, -EAGAIN function will get called again, 3393 * otherwise negative on failure. 3394 */ 3395 int idpf_vc_core_init(struct idpf_adapter *adapter) 3396 { 3397 int task_delay = 30; 3398 u16 num_max_vports; 3399 int err = 0; 3400 3401 if (!adapter->vcxn_mngr) { 3402 adapter->vcxn_mngr = kzalloc(sizeof(*adapter->vcxn_mngr), GFP_KERNEL); 3403 if (!adapter->vcxn_mngr) { 3404 err = -ENOMEM; 3405 goto init_failed; 3406 } 3407 } 3408 idpf_vc_xn_init(adapter->vcxn_mngr); 3409 3410 while (adapter->state != __IDPF_INIT_SW) { 3411 switch (adapter->state) { 3412 case __IDPF_VER_CHECK: 3413 err = idpf_send_ver_msg(adapter); 3414 switch (err) { 3415 case 0: 3416 /* success, move state machine forward */ 3417 adapter->state = __IDPF_GET_CAPS; 3418 fallthrough; 3419 case -EAGAIN: 3420 goto restart; 3421 default: 3422 /* Something bad happened, try again but only a 3423 * few times. 3424 */ 3425 goto init_failed; 3426 } 3427 case __IDPF_GET_CAPS: 3428 err = idpf_send_get_caps_msg(adapter); 3429 if (err) 3430 goto init_failed; 3431 adapter->state = __IDPF_INIT_SW; 3432 break; 3433 default: 3434 dev_err(&adapter->pdev->dev, "Device is in bad state: %d\n", 3435 adapter->state); 3436 err = -EINVAL; 3437 goto init_failed; 3438 } 3439 break; 3440 restart: 3441 /* Give enough time before proceeding further with 3442 * state machine 3443 */ 3444 msleep(task_delay); 3445 } 3446 3447 if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LAN_MEMORY_REGIONS)) { 3448 err = idpf_send_get_lan_memory_regions(adapter); 3449 if (err) { 3450 dev_err(&adapter->pdev->dev, "Failed to get LAN memory regions: %d\n", 3451 err); 3452 return -EINVAL; 3453 } 3454 } else { 3455 /* Fallback to mapping the remaining regions of the entire BAR */ 3456 err = idpf_calc_remaining_mmio_regs(adapter); 3457 if (err) { 3458 dev_err(&adapter->pdev->dev, "Failed to allocate BAR0 region(s): %d\n", 3459 err); 3460 return -ENOMEM; 3461 } 3462 } 3463 3464 err = idpf_map_lan_mmio_regs(adapter); 3465 if (err) { 3466 dev_err(&adapter->pdev->dev, "Failed to map BAR0 region(s): %d\n", 3467 err); 3468 return -ENOMEM; 3469 } 3470 3471 pci_sriov_set_totalvfs(adapter->pdev, idpf_get_max_vfs(adapter)); 3472 num_max_vports = idpf_get_max_vports(adapter); 3473 adapter->max_vports = num_max_vports; 3474 adapter->vports = kcalloc(num_max_vports, sizeof(*adapter->vports), 3475 GFP_KERNEL); 3476 if (!adapter->vports) 3477 return -ENOMEM; 3478 3479 if (!adapter->netdevs) { 3480 adapter->netdevs = kcalloc(num_max_vports, 3481 sizeof(struct net_device *), 3482 GFP_KERNEL); 3483 if (!adapter->netdevs) { 3484 err = -ENOMEM; 3485 goto err_netdev_alloc; 3486 } 3487 } 3488 3489 err = idpf_vport_params_buf_alloc(adapter); 3490 if (err) { 3491 dev_err(&adapter->pdev->dev, "Failed to alloc vport params buffer: %d\n", 3492 err); 3493 goto err_netdev_alloc; 3494 } 3495 3496 /* Start the mailbox task before requesting vectors. This will ensure 3497 * vector information response from mailbox is handled 3498 */ 3499 queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0); 3500 3501 queue_delayed_work(adapter->serv_wq, &adapter->serv_task, 3502 msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07))); 3503 3504 err = idpf_intr_req(adapter); 3505 if (err) { 3506 dev_err(&adapter->pdev->dev, "failed to enable interrupt vectors: %d\n", 3507 err); 3508 goto err_intr_req; 3509 } 3510 3511 err = idpf_ptp_init(adapter); 3512 if (err) 3513 pci_err(adapter->pdev, "PTP init failed, err=%pe\n", 3514 ERR_PTR(err)); 3515 3516 idpf_init_avail_queues(adapter); 3517 3518 /* Skew the delay for init tasks for each function based on fn number 3519 * to prevent every function from making the same call simultaneously. 3520 */ 3521 queue_delayed_work(adapter->init_wq, &adapter->init_task, 3522 msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07))); 3523 3524 set_bit(IDPF_VC_CORE_INIT, adapter->flags); 3525 3526 return 0; 3527 3528 err_intr_req: 3529 cancel_delayed_work_sync(&adapter->serv_task); 3530 cancel_delayed_work_sync(&adapter->mbx_task); 3531 idpf_vport_params_buf_rel(adapter); 3532 err_netdev_alloc: 3533 kfree(adapter->vports); 3534 adapter->vports = NULL; 3535 return err; 3536 3537 init_failed: 3538 /* Don't retry if we're trying to go down, just bail. */ 3539 if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) 3540 return err; 3541 3542 if (++adapter->mb_wait_count > IDPF_MB_MAX_ERR) { 3543 dev_err(&adapter->pdev->dev, "Failed to establish mailbox communications with hardware\n"); 3544 3545 return -EFAULT; 3546 } 3547 /* If it reached here, it is possible that mailbox queue initialization 3548 * register writes might not have taken effect. Retry to initialize 3549 * the mailbox again 3550 */ 3551 adapter->state = __IDPF_VER_CHECK; 3552 if (adapter->vcxn_mngr) 3553 idpf_vc_xn_shutdown(adapter->vcxn_mngr); 3554 set_bit(IDPF_HR_DRV_LOAD, adapter->flags); 3555 queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task, 3556 msecs_to_jiffies(task_delay)); 3557 3558 return -EAGAIN; 3559 } 3560 3561 /** 3562 * idpf_vc_core_deinit - Device deinit routine 3563 * @adapter: Driver specific private structure 3564 * 3565 */ 3566 void idpf_vc_core_deinit(struct idpf_adapter *adapter) 3567 { 3568 bool remove_in_prog; 3569 3570 if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags)) 3571 return; 3572 3573 /* Avoid transaction timeouts when called during reset */ 3574 remove_in_prog = test_bit(IDPF_REMOVE_IN_PROG, adapter->flags); 3575 if (!remove_in_prog) 3576 idpf_vc_xn_shutdown(adapter->vcxn_mngr); 3577 3578 idpf_ptp_release(adapter); 3579 idpf_deinit_task(adapter); 3580 idpf_idc_deinit_core_aux_device(adapter->cdev_info); 3581 idpf_intr_rel(adapter); 3582 3583 if (remove_in_prog) 3584 idpf_vc_xn_shutdown(adapter->vcxn_mngr); 3585 3586 cancel_delayed_work_sync(&adapter->serv_task); 3587 cancel_delayed_work_sync(&adapter->mbx_task); 3588 3589 idpf_vport_params_buf_rel(adapter); 3590 3591 kfree(adapter->vports); 3592 adapter->vports = NULL; 3593 3594 clear_bit(IDPF_VC_CORE_INIT, adapter->flags); 3595 } 3596 3597 /** 3598 * idpf_vport_alloc_vec_indexes - Get relative vector indexes 3599 * @vport: virtual port data struct 3600 * 3601 * This function requests the vector information required for the vport and 3602 * stores the vector indexes received from the 'global vector distribution' 3603 * in the vport's queue vectors array. 3604 * 3605 * Return 0 on success, error on failure 3606 */ 3607 int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport) 3608 { 3609 struct idpf_vector_info vec_info; 3610 int num_alloc_vecs; 3611 u32 req; 3612 3613 vec_info.num_curr_vecs = vport->num_q_vectors; 3614 if (vec_info.num_curr_vecs) 3615 vec_info.num_curr_vecs += IDPF_RESERVED_VECS; 3616 3617 /* XDPSQs are all bound to the NOIRQ vector from IDPF_RESERVED_VECS */ 3618 req = max(vport->num_txq - vport->num_xdp_txq, vport->num_rxq) + 3619 IDPF_RESERVED_VECS; 3620 vec_info.num_req_vecs = req; 3621 3622 vec_info.default_vport = vport->default_vport; 3623 vec_info.index = vport->idx; 3624 3625 num_alloc_vecs = idpf_req_rel_vector_indexes(vport->adapter, 3626 vport->q_vector_idxs, 3627 &vec_info); 3628 if (num_alloc_vecs <= 0) { 3629 dev_err(&vport->adapter->pdev->dev, "Vector distribution failed: %d\n", 3630 num_alloc_vecs); 3631 return -EINVAL; 3632 } 3633 3634 vport->num_q_vectors = num_alloc_vecs - IDPF_RESERVED_VECS; 3635 3636 return 0; 3637 } 3638 3639 /** 3640 * idpf_vport_init - Initialize virtual port 3641 * @vport: virtual port to be initialized 3642 * @max_q: vport max queue info 3643 * 3644 * Will initialize vport with the info received through MB earlier 3645 */ 3646 void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q) 3647 { 3648 struct idpf_adapter *adapter = vport->adapter; 3649 struct virtchnl2_create_vport *vport_msg; 3650 struct idpf_vport_config *vport_config; 3651 u16 tx_itr[] = {2, 8, 64, 128, 256}; 3652 u16 rx_itr[] = {2, 8, 32, 96, 128}; 3653 struct idpf_rss_data *rss_data; 3654 u16 idx = vport->idx; 3655 int err; 3656 3657 vport_config = adapter->vport_config[idx]; 3658 rss_data = &vport_config->user_config.rss_data; 3659 vport_msg = adapter->vport_params_recvd[idx]; 3660 3661 vport_config->max_q.max_txq = max_q->max_txq; 3662 vport_config->max_q.max_rxq = max_q->max_rxq; 3663 vport_config->max_q.max_complq = max_q->max_complq; 3664 vport_config->max_q.max_bufq = max_q->max_bufq; 3665 3666 vport->txq_model = le16_to_cpu(vport_msg->txq_model); 3667 vport->rxq_model = le16_to_cpu(vport_msg->rxq_model); 3668 vport->vport_type = le16_to_cpu(vport_msg->vport_type); 3669 vport->vport_id = le32_to_cpu(vport_msg->vport_id); 3670 3671 rss_data->rss_key_size = min_t(u16, NETDEV_RSS_KEY_LEN, 3672 le16_to_cpu(vport_msg->rss_key_size)); 3673 rss_data->rss_lut_size = le16_to_cpu(vport_msg->rss_lut_size); 3674 3675 ether_addr_copy(vport->default_mac_addr, vport_msg->default_mac_addr); 3676 vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - LIBETH_RX_LL_LEN; 3677 3678 /* Initialize Tx and Rx profiles for Dynamic Interrupt Moderation */ 3679 memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS); 3680 memcpy(vport->tx_itr_profile, tx_itr, IDPF_DIM_PROFILE_SLOTS); 3681 3682 idpf_vport_set_hsplit(vport, ETHTOOL_TCP_DATA_SPLIT_ENABLED); 3683 3684 idpf_vport_init_num_qs(vport, vport_msg); 3685 idpf_vport_calc_num_q_desc(vport); 3686 idpf_vport_calc_num_q_groups(vport); 3687 idpf_vport_alloc_vec_indexes(vport); 3688 3689 vport->crc_enable = adapter->crc_enable; 3690 3691 if (!(vport_msg->vport_flags & 3692 cpu_to_le16(VIRTCHNL2_VPORT_UPLINK_PORT))) 3693 return; 3694 3695 err = idpf_ptp_get_vport_tstamps_caps(vport); 3696 if (err) { 3697 pci_dbg(vport->adapter->pdev, "Tx timestamping not supported\n"); 3698 return; 3699 } 3700 3701 INIT_WORK(&vport->tstamp_task, idpf_tstamp_task); 3702 } 3703 3704 /** 3705 * idpf_get_vec_ids - Initialize vector id from Mailbox parameters 3706 * @adapter: adapter structure to get the mailbox vector id 3707 * @vecids: Array of vector ids 3708 * @num_vecids: number of vector ids 3709 * @chunks: vector ids received over mailbox 3710 * 3711 * Will initialize the mailbox vector id which is received from the 3712 * get capabilities and data queue vector ids with ids received as 3713 * mailbox parameters. 3714 * Returns number of ids filled 3715 */ 3716 int idpf_get_vec_ids(struct idpf_adapter *adapter, 3717 u16 *vecids, int num_vecids, 3718 struct virtchnl2_vector_chunks *chunks) 3719 { 3720 u16 num_chunks = le16_to_cpu(chunks->num_vchunks); 3721 int num_vecid_filled = 0; 3722 int i, j; 3723 3724 vecids[num_vecid_filled] = adapter->mb_vector.v_idx; 3725 num_vecid_filled++; 3726 3727 for (j = 0; j < num_chunks; j++) { 3728 struct virtchnl2_vector_chunk *chunk; 3729 u16 start_vecid, num_vec; 3730 3731 chunk = &chunks->vchunks[j]; 3732 num_vec = le16_to_cpu(chunk->num_vectors); 3733 start_vecid = le16_to_cpu(chunk->start_vector_id); 3734 3735 for (i = 0; i < num_vec; i++) { 3736 if ((num_vecid_filled + i) < num_vecids) { 3737 vecids[num_vecid_filled + i] = start_vecid; 3738 start_vecid++; 3739 } else { 3740 break; 3741 } 3742 } 3743 num_vecid_filled = num_vecid_filled + i; 3744 } 3745 3746 return num_vecid_filled; 3747 } 3748 3749 /** 3750 * idpf_vport_get_queue_ids - Initialize queue id from Mailbox parameters 3751 * @qids: Array of queue ids 3752 * @num_qids: number of queue ids 3753 * @q_type: queue model 3754 * @chunks: queue ids received over mailbox 3755 * 3756 * Will initialize all queue ids with ids received as mailbox parameters 3757 * Returns number of ids filled 3758 */ 3759 static int idpf_vport_get_queue_ids(u32 *qids, int num_qids, u16 q_type, 3760 struct virtchnl2_queue_reg_chunks *chunks) 3761 { 3762 u16 num_chunks = le16_to_cpu(chunks->num_chunks); 3763 u32 num_q_id_filled = 0, i; 3764 u32 start_q_id, num_q; 3765 3766 while (num_chunks--) { 3767 struct virtchnl2_queue_reg_chunk *chunk; 3768 3769 chunk = &chunks->chunks[num_chunks]; 3770 if (le32_to_cpu(chunk->type) != q_type) 3771 continue; 3772 3773 num_q = le32_to_cpu(chunk->num_queues); 3774 start_q_id = le32_to_cpu(chunk->start_queue_id); 3775 3776 for (i = 0; i < num_q; i++) { 3777 if ((num_q_id_filled + i) < num_qids) { 3778 qids[num_q_id_filled + i] = start_q_id; 3779 start_q_id++; 3780 } else { 3781 break; 3782 } 3783 } 3784 num_q_id_filled = num_q_id_filled + i; 3785 } 3786 3787 return num_q_id_filled; 3788 } 3789 3790 /** 3791 * __idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters 3792 * @vport: virtual port for which the queues ids are initialized 3793 * @qids: queue ids 3794 * @num_qids: number of queue ids 3795 * @q_type: type of queue 3796 * 3797 * Will initialize all queue ids with ids received as mailbox 3798 * parameters. Returns number of queue ids initialized. 3799 */ 3800 static int __idpf_vport_queue_ids_init(struct idpf_vport *vport, 3801 const u32 *qids, 3802 int num_qids, 3803 u32 q_type) 3804 { 3805 int i, j, k = 0; 3806 3807 switch (q_type) { 3808 case VIRTCHNL2_QUEUE_TYPE_TX: 3809 for (i = 0; i < vport->num_txq_grp; i++) { 3810 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; 3811 3812 for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++) 3813 tx_qgrp->txqs[j]->q_id = qids[k]; 3814 } 3815 break; 3816 case VIRTCHNL2_QUEUE_TYPE_RX: 3817 for (i = 0; i < vport->num_rxq_grp; i++) { 3818 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 3819 u16 num_rxq; 3820 3821 if (idpf_is_queue_model_split(vport->rxq_model)) 3822 num_rxq = rx_qgrp->splitq.num_rxq_sets; 3823 else 3824 num_rxq = rx_qgrp->singleq.num_rxq; 3825 3826 for (j = 0; j < num_rxq && k < num_qids; j++, k++) { 3827 struct idpf_rx_queue *q; 3828 3829 if (idpf_is_queue_model_split(vport->rxq_model)) 3830 q = &rx_qgrp->splitq.rxq_sets[j]->rxq; 3831 else 3832 q = rx_qgrp->singleq.rxqs[j]; 3833 q->q_id = qids[k]; 3834 } 3835 } 3836 break; 3837 case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION: 3838 for (i = 0; i < vport->num_txq_grp && k < num_qids; i++, k++) { 3839 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; 3840 3841 tx_qgrp->complq->q_id = qids[k]; 3842 } 3843 break; 3844 case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER: 3845 for (i = 0; i < vport->num_rxq_grp; i++) { 3846 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 3847 u8 num_bufqs = vport->num_bufqs_per_qgrp; 3848 3849 for (j = 0; j < num_bufqs && k < num_qids; j++, k++) { 3850 struct idpf_buf_queue *q; 3851 3852 q = &rx_qgrp->splitq.bufq_sets[j].bufq; 3853 q->q_id = qids[k]; 3854 } 3855 } 3856 break; 3857 default: 3858 break; 3859 } 3860 3861 return k; 3862 } 3863 3864 /** 3865 * idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters 3866 * @vport: virtual port for which the queues ids are initialized 3867 * 3868 * Will initialize all queue ids with ids received as mailbox parameters. 3869 * Returns 0 on success, negative if all the queues are not initialized. 3870 */ 3871 int idpf_vport_queue_ids_init(struct idpf_vport *vport) 3872 { 3873 struct virtchnl2_create_vport *vport_params; 3874 struct virtchnl2_queue_reg_chunks *chunks; 3875 struct idpf_vport_config *vport_config; 3876 u16 vport_idx = vport->idx; 3877 int num_ids, err = 0; 3878 u16 q_type; 3879 u32 *qids; 3880 3881 vport_config = vport->adapter->vport_config[vport_idx]; 3882 if (vport_config->req_qs_chunks) { 3883 struct virtchnl2_add_queues *vc_aq = 3884 (struct virtchnl2_add_queues *)vport_config->req_qs_chunks; 3885 chunks = &vc_aq->chunks; 3886 } else { 3887 vport_params = vport->adapter->vport_params_recvd[vport_idx]; 3888 chunks = &vport_params->chunks; 3889 } 3890 3891 qids = kcalloc(IDPF_MAX_QIDS, sizeof(u32), GFP_KERNEL); 3892 if (!qids) 3893 return -ENOMEM; 3894 3895 num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, 3896 VIRTCHNL2_QUEUE_TYPE_TX, 3897 chunks); 3898 if (num_ids < vport->num_txq) { 3899 err = -EINVAL; 3900 goto mem_rel; 3901 } 3902 num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, 3903 VIRTCHNL2_QUEUE_TYPE_TX); 3904 if (num_ids < vport->num_txq) { 3905 err = -EINVAL; 3906 goto mem_rel; 3907 } 3908 3909 num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, 3910 VIRTCHNL2_QUEUE_TYPE_RX, 3911 chunks); 3912 if (num_ids < vport->num_rxq) { 3913 err = -EINVAL; 3914 goto mem_rel; 3915 } 3916 num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, 3917 VIRTCHNL2_QUEUE_TYPE_RX); 3918 if (num_ids < vport->num_rxq) { 3919 err = -EINVAL; 3920 goto mem_rel; 3921 } 3922 3923 if (!idpf_is_queue_model_split(vport->txq_model)) 3924 goto check_rxq; 3925 3926 q_type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; 3927 num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks); 3928 if (num_ids < vport->num_complq) { 3929 err = -EINVAL; 3930 goto mem_rel; 3931 } 3932 num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type); 3933 if (num_ids < vport->num_complq) { 3934 err = -EINVAL; 3935 goto mem_rel; 3936 } 3937 3938 check_rxq: 3939 if (!idpf_is_queue_model_split(vport->rxq_model)) 3940 goto mem_rel; 3941 3942 q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER; 3943 num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks); 3944 if (num_ids < vport->num_bufq) { 3945 err = -EINVAL; 3946 goto mem_rel; 3947 } 3948 num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type); 3949 if (num_ids < vport->num_bufq) 3950 err = -EINVAL; 3951 3952 mem_rel: 3953 kfree(qids); 3954 3955 return err; 3956 } 3957 3958 /** 3959 * idpf_vport_adjust_qs - Adjust to new requested queues 3960 * @vport: virtual port data struct 3961 * 3962 * Renegotiate queues. Returns 0 on success, negative on failure. 3963 */ 3964 int idpf_vport_adjust_qs(struct idpf_vport *vport) 3965 { 3966 struct virtchnl2_create_vport vport_msg; 3967 int err; 3968 3969 vport_msg.txq_model = cpu_to_le16(vport->txq_model); 3970 vport_msg.rxq_model = cpu_to_le16(vport->rxq_model); 3971 err = idpf_vport_calc_total_qs(vport->adapter, vport->idx, &vport_msg, 3972 NULL); 3973 if (err) 3974 return err; 3975 3976 idpf_vport_init_num_qs(vport, &vport_msg); 3977 idpf_vport_calc_num_q_groups(vport); 3978 3979 return 0; 3980 } 3981 3982 /** 3983 * idpf_is_capability_ena - Default implementation of capability checking 3984 * @adapter: Private data struct 3985 * @all: all or one flag 3986 * @field: caps field to check for flags 3987 * @flag: flag to check 3988 * 3989 * Return true if all capabilities are supported, false otherwise 3990 */ 3991 bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all, 3992 enum idpf_cap_field field, u64 flag) 3993 { 3994 u8 *caps = (u8 *)&adapter->caps; 3995 u32 *cap_field; 3996 3997 if (!caps) 3998 return false; 3999 4000 if (field == IDPF_BASE_CAPS) 4001 return false; 4002 4003 cap_field = (u32 *)(caps + field); 4004 4005 if (all) 4006 return (*cap_field & flag) == flag; 4007 else 4008 return !!(*cap_field & flag); 4009 } 4010 4011 /** 4012 * idpf_vport_is_cap_ena - Check if vport capability is enabled 4013 * @vport: Private data struct 4014 * @flag: flag(s) to check 4015 * 4016 * Return: true if the capability is supported, false otherwise 4017 */ 4018 bool idpf_vport_is_cap_ena(struct idpf_vport *vport, u16 flag) 4019 { 4020 struct virtchnl2_create_vport *vport_msg; 4021 4022 vport_msg = vport->adapter->vport_params_recvd[vport->idx]; 4023 4024 return !!(le16_to_cpu(vport_msg->vport_flags) & flag); 4025 } 4026 4027 /** 4028 * idpf_sideband_flow_type_ena - Check if steering is enabled for flow type 4029 * @vport: Private data struct 4030 * @flow_type: flow type to check (from ethtool.h) 4031 * 4032 * Return: true if sideband filters are allowed for @flow_type, false otherwise 4033 */ 4034 bool idpf_sideband_flow_type_ena(struct idpf_vport *vport, u32 flow_type) 4035 { 4036 struct virtchnl2_create_vport *vport_msg; 4037 __le64 caps; 4038 4039 vport_msg = vport->adapter->vport_params_recvd[vport->idx]; 4040 caps = vport_msg->sideband_flow_caps; 4041 4042 switch (flow_type) { 4043 case TCP_V4_FLOW: 4044 return !!(caps & cpu_to_le64(VIRTCHNL2_FLOW_IPV4_TCP)); 4045 case UDP_V4_FLOW: 4046 return !!(caps & cpu_to_le64(VIRTCHNL2_FLOW_IPV4_UDP)); 4047 default: 4048 return false; 4049 } 4050 } 4051 4052 /** 4053 * idpf_sideband_action_ena - Check if steering is enabled for action 4054 * @vport: Private data struct 4055 * @fsp: flow spec 4056 * 4057 * Return: true if sideband filters are allowed for @fsp, false otherwise 4058 */ 4059 bool idpf_sideband_action_ena(struct idpf_vport *vport, 4060 struct ethtool_rx_flow_spec *fsp) 4061 { 4062 struct virtchnl2_create_vport *vport_msg; 4063 unsigned int supp_actions; 4064 4065 vport_msg = vport->adapter->vport_params_recvd[vport->idx]; 4066 supp_actions = le32_to_cpu(vport_msg->sideband_flow_actions); 4067 4068 /* Actions Drop/Wake are not supported */ 4069 if (fsp->ring_cookie == RX_CLS_FLOW_DISC || 4070 fsp->ring_cookie == RX_CLS_FLOW_WAKE) 4071 return false; 4072 4073 return !!(supp_actions & VIRTCHNL2_ACTION_QUEUE); 4074 } 4075 4076 unsigned int idpf_fsteer_max_rules(struct idpf_vport *vport) 4077 { 4078 struct virtchnl2_create_vport *vport_msg; 4079 4080 vport_msg = vport->adapter->vport_params_recvd[vport->idx]; 4081 return le32_to_cpu(vport_msg->flow_steer_max_rules); 4082 } 4083 4084 /** 4085 * idpf_get_vport_id: Get vport id 4086 * @vport: virtual port structure 4087 * 4088 * Return vport id from the adapter persistent data 4089 */ 4090 u32 idpf_get_vport_id(struct idpf_vport *vport) 4091 { 4092 struct virtchnl2_create_vport *vport_msg; 4093 4094 vport_msg = vport->adapter->vport_params_recvd[vport->idx]; 4095 4096 return le32_to_cpu(vport_msg->vport_id); 4097 } 4098 4099 static void idpf_set_mac_type(struct idpf_vport *vport, 4100 struct virtchnl2_mac_addr *mac_addr) 4101 { 4102 bool is_primary; 4103 4104 is_primary = ether_addr_equal(vport->default_mac_addr, mac_addr->addr); 4105 mac_addr->type = is_primary ? VIRTCHNL2_MAC_ADDR_PRIMARY : 4106 VIRTCHNL2_MAC_ADDR_EXTRA; 4107 } 4108 4109 /** 4110 * idpf_mac_filter_async_handler - Async callback for mac filters 4111 * @adapter: private data struct 4112 * @xn: transaction for message 4113 * @ctlq_msg: received message 4114 * 4115 * In some scenarios driver can't sleep and wait for a reply (e.g.: stack is 4116 * holding rtnl_lock) when adding a new mac filter. It puts us in a difficult 4117 * situation to deal with errors returned on the reply. The best we can 4118 * ultimately do is remove it from our list of mac filters and report the 4119 * error. 4120 */ 4121 static int idpf_mac_filter_async_handler(struct idpf_adapter *adapter, 4122 struct idpf_vc_xn *xn, 4123 const struct idpf_ctlq_msg *ctlq_msg) 4124 { 4125 struct virtchnl2_mac_addr_list *ma_list; 4126 struct idpf_vport_config *vport_config; 4127 struct virtchnl2_mac_addr *mac_addr; 4128 struct idpf_mac_filter *f, *tmp; 4129 struct list_head *ma_list_head; 4130 struct idpf_vport *vport; 4131 u16 num_entries; 4132 int i; 4133 4134 /* if success we're done, we're only here if something bad happened */ 4135 if (!ctlq_msg->cookie.mbx.chnl_retval) 4136 return 0; 4137 4138 /* make sure at least struct is there */ 4139 if (xn->reply_sz < sizeof(*ma_list)) 4140 goto invalid_payload; 4141 4142 ma_list = ctlq_msg->ctx.indirect.payload->va; 4143 mac_addr = ma_list->mac_addr_list; 4144 num_entries = le16_to_cpu(ma_list->num_mac_addr); 4145 /* we should have received a buffer at least this big */ 4146 if (xn->reply_sz < struct_size(ma_list, mac_addr_list, num_entries)) 4147 goto invalid_payload; 4148 4149 vport = idpf_vid_to_vport(adapter, le32_to_cpu(ma_list->vport_id)); 4150 if (!vport) 4151 goto invalid_payload; 4152 4153 vport_config = adapter->vport_config[le32_to_cpu(ma_list->vport_id)]; 4154 ma_list_head = &vport_config->user_config.mac_filter_list; 4155 4156 /* We can't do much to reconcile bad filters at this point, however we 4157 * should at least remove them from our list one way or the other so we 4158 * have some idea what good filters we have. 4159 */ 4160 spin_lock_bh(&vport_config->mac_filter_list_lock); 4161 list_for_each_entry_safe(f, tmp, ma_list_head, list) 4162 for (i = 0; i < num_entries; i++) 4163 if (ether_addr_equal(mac_addr[i].addr, f->macaddr)) 4164 list_del(&f->list); 4165 spin_unlock_bh(&vport_config->mac_filter_list_lock); 4166 dev_err_ratelimited(&adapter->pdev->dev, "Received error sending MAC filter request (op %d)\n", 4167 xn->vc_op); 4168 4169 return 0; 4170 4171 invalid_payload: 4172 dev_err_ratelimited(&adapter->pdev->dev, "Received invalid MAC filter payload (op %d) (len %zd)\n", 4173 xn->vc_op, xn->reply_sz); 4174 4175 return -EINVAL; 4176 } 4177 4178 /** 4179 * idpf_add_del_mac_filters - Add/del mac filters 4180 * @vport: Virtual port data structure 4181 * @np: Netdev private structure 4182 * @add: Add or delete flag 4183 * @async: Don't wait for return message 4184 * 4185 * Returns 0 on success, error on failure. 4186 **/ 4187 int idpf_add_del_mac_filters(struct idpf_vport *vport, 4188 struct idpf_netdev_priv *np, 4189 bool add, bool async) 4190 { 4191 struct virtchnl2_mac_addr_list *ma_list __free(kfree) = NULL; 4192 struct virtchnl2_mac_addr *mac_addr __free(kfree) = NULL; 4193 struct idpf_adapter *adapter = np->adapter; 4194 struct idpf_vc_xn_params xn_params = {}; 4195 struct idpf_vport_config *vport_config; 4196 u32 num_msgs, total_filters = 0; 4197 struct idpf_mac_filter *f; 4198 ssize_t reply_sz; 4199 int i = 0, k; 4200 4201 xn_params.vc_op = add ? VIRTCHNL2_OP_ADD_MAC_ADDR : 4202 VIRTCHNL2_OP_DEL_MAC_ADDR; 4203 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 4204 xn_params.async = async; 4205 xn_params.async_handler = idpf_mac_filter_async_handler; 4206 4207 vport_config = adapter->vport_config[np->vport_idx]; 4208 spin_lock_bh(&vport_config->mac_filter_list_lock); 4209 4210 /* Find the number of newly added filters */ 4211 list_for_each_entry(f, &vport_config->user_config.mac_filter_list, 4212 list) { 4213 if (add && f->add) 4214 total_filters++; 4215 else if (!add && f->remove) 4216 total_filters++; 4217 } 4218 4219 if (!total_filters) { 4220 spin_unlock_bh(&vport_config->mac_filter_list_lock); 4221 4222 return 0; 4223 } 4224 4225 /* Fill all the new filters into virtchannel message */ 4226 mac_addr = kcalloc(total_filters, sizeof(struct virtchnl2_mac_addr), 4227 GFP_ATOMIC); 4228 if (!mac_addr) { 4229 spin_unlock_bh(&vport_config->mac_filter_list_lock); 4230 4231 return -ENOMEM; 4232 } 4233 4234 list_for_each_entry(f, &vport_config->user_config.mac_filter_list, 4235 list) { 4236 if (add && f->add) { 4237 ether_addr_copy(mac_addr[i].addr, f->macaddr); 4238 idpf_set_mac_type(vport, &mac_addr[i]); 4239 i++; 4240 f->add = false; 4241 if (i == total_filters) 4242 break; 4243 } 4244 if (!add && f->remove) { 4245 ether_addr_copy(mac_addr[i].addr, f->macaddr); 4246 idpf_set_mac_type(vport, &mac_addr[i]); 4247 i++; 4248 f->remove = false; 4249 if (i == total_filters) 4250 break; 4251 } 4252 } 4253 4254 spin_unlock_bh(&vport_config->mac_filter_list_lock); 4255 4256 /* Chunk up the filters into multiple messages to avoid 4257 * sending a control queue message buffer that is too large 4258 */ 4259 num_msgs = DIV_ROUND_UP(total_filters, IDPF_NUM_FILTERS_PER_MSG); 4260 4261 for (i = 0, k = 0; i < num_msgs; i++) { 4262 u32 entries_size, buf_size, num_entries; 4263 4264 num_entries = min_t(u32, total_filters, 4265 IDPF_NUM_FILTERS_PER_MSG); 4266 entries_size = sizeof(struct virtchnl2_mac_addr) * num_entries; 4267 buf_size = struct_size(ma_list, mac_addr_list, num_entries); 4268 4269 if (!ma_list || num_entries != IDPF_NUM_FILTERS_PER_MSG) { 4270 kfree(ma_list); 4271 ma_list = kzalloc(buf_size, GFP_ATOMIC); 4272 if (!ma_list) 4273 return -ENOMEM; 4274 } else { 4275 memset(ma_list, 0, buf_size); 4276 } 4277 4278 ma_list->vport_id = cpu_to_le32(np->vport_id); 4279 ma_list->num_mac_addr = cpu_to_le16(num_entries); 4280 memcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size); 4281 4282 xn_params.send_buf.iov_base = ma_list; 4283 xn_params.send_buf.iov_len = buf_size; 4284 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 4285 if (reply_sz < 0) 4286 return reply_sz; 4287 4288 k += num_entries; 4289 total_filters -= num_entries; 4290 } 4291 4292 return 0; 4293 } 4294 4295 /** 4296 * idpf_set_promiscuous - set promiscuous and send message to mailbox 4297 * @adapter: Driver specific private structure 4298 * @config_data: Vport specific config data 4299 * @vport_id: Vport identifier 4300 * 4301 * Request to enable promiscuous mode for the vport. Message is sent 4302 * asynchronously and won't wait for response. Returns 0 on success, negative 4303 * on failure; 4304 */ 4305 int idpf_set_promiscuous(struct idpf_adapter *adapter, 4306 struct idpf_vport_user_config_data *config_data, 4307 u32 vport_id) 4308 { 4309 struct idpf_vc_xn_params xn_params = {}; 4310 struct virtchnl2_promisc_info vpi; 4311 ssize_t reply_sz; 4312 u16 flags = 0; 4313 4314 if (test_bit(__IDPF_PROMISC_UC, config_data->user_flags)) 4315 flags |= VIRTCHNL2_UNICAST_PROMISC; 4316 if (test_bit(__IDPF_PROMISC_MC, config_data->user_flags)) 4317 flags |= VIRTCHNL2_MULTICAST_PROMISC; 4318 4319 vpi.vport_id = cpu_to_le32(vport_id); 4320 vpi.flags = cpu_to_le16(flags); 4321 4322 xn_params.vc_op = VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE; 4323 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 4324 xn_params.send_buf.iov_base = &vpi; 4325 xn_params.send_buf.iov_len = sizeof(vpi); 4326 /* setting promiscuous is only ever done asynchronously */ 4327 xn_params.async = true; 4328 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 4329 4330 return reply_sz < 0 ? reply_sz : 0; 4331 } 4332 4333 /** 4334 * idpf_idc_rdma_vc_send_sync - virtchnl send callback for IDC registered drivers 4335 * @cdev_info: IDC core device info pointer 4336 * @send_msg: message to send 4337 * @msg_size: size of message to send 4338 * @recv_msg: message to populate on reception of response 4339 * @recv_len: length of message copied into recv_msg or 0 on error 4340 * 4341 * Return: 0 on success or error code on failure. 4342 */ 4343 int idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info *cdev_info, 4344 u8 *send_msg, u16 msg_size, 4345 u8 *recv_msg, u16 *recv_len) 4346 { 4347 struct idpf_adapter *adapter = pci_get_drvdata(cdev_info->pdev); 4348 struct idpf_vc_xn_params xn_params = { }; 4349 ssize_t reply_sz; 4350 u16 recv_size; 4351 4352 if (!recv_msg || !recv_len || msg_size > IDPF_CTLQ_MAX_BUF_LEN) 4353 return -EINVAL; 4354 4355 recv_size = min_t(u16, *recv_len, IDPF_CTLQ_MAX_BUF_LEN); 4356 *recv_len = 0; 4357 xn_params.vc_op = VIRTCHNL2_OP_RDMA; 4358 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 4359 xn_params.send_buf.iov_base = send_msg; 4360 xn_params.send_buf.iov_len = msg_size; 4361 xn_params.recv_buf.iov_base = recv_msg; 4362 xn_params.recv_buf.iov_len = recv_size; 4363 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 4364 if (reply_sz < 0) 4365 return reply_sz; 4366 *recv_len = reply_sz; 4367 4368 return 0; 4369 } 4370 EXPORT_SYMBOL_GPL(idpf_idc_rdma_vc_send_sync); 4371