1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2023 Intel Corporation */ 3 4 #include <linux/export.h> 5 #include <net/libeth/rx.h> 6 7 #include "idpf.h" 8 #include "idpf_virtchnl.h" 9 #include "idpf_ptp.h" 10 11 /** 12 * struct idpf_vc_xn_manager - Manager for tracking transactions 13 * @ring: backing and lookup for transactions 14 * @free_xn_bm: bitmap for free transactions 15 * @xn_bm_lock: make bitmap access synchronous where necessary 16 * @salt: used to make cookie unique every message 17 */ 18 struct idpf_vc_xn_manager { 19 struct idpf_vc_xn ring[IDPF_VC_XN_RING_LEN]; 20 DECLARE_BITMAP(free_xn_bm, IDPF_VC_XN_RING_LEN); 21 spinlock_t xn_bm_lock; 22 u8 salt; 23 }; 24 25 /** 26 * idpf_vid_to_vport - Translate vport id to vport pointer 27 * @adapter: private data struct 28 * @v_id: vport id to translate 29 * 30 * Returns vport matching v_id, NULL if not found. 31 */ 32 static 33 struct idpf_vport *idpf_vid_to_vport(struct idpf_adapter *adapter, u32 v_id) 34 { 35 u16 num_max_vports = idpf_get_max_vports(adapter); 36 int i; 37 38 for (i = 0; i < num_max_vports; i++) 39 if (adapter->vport_ids[i] == v_id) 40 return adapter->vports[i]; 41 42 return NULL; 43 } 44 45 /** 46 * idpf_handle_event_link - Handle link event message 47 * @adapter: private data struct 48 * @v2e: virtchnl event message 49 */ 50 static void idpf_handle_event_link(struct idpf_adapter *adapter, 51 const struct virtchnl2_event *v2e) 52 { 53 struct idpf_netdev_priv *np; 54 struct idpf_vport *vport; 55 56 vport = idpf_vid_to_vport(adapter, le32_to_cpu(v2e->vport_id)); 57 if (!vport) { 58 dev_err_ratelimited(&adapter->pdev->dev, "Failed to find vport_id %d for link event\n", 59 v2e->vport_id); 60 return; 61 } 62 np = netdev_priv(vport->netdev); 63 64 np->link_speed_mbps = le32_to_cpu(v2e->link_speed); 65 66 if (vport->link_up == v2e->link_status) 67 return; 68 69 vport->link_up = v2e->link_status; 70 71 if (!test_bit(IDPF_VPORT_UP, np->state)) 72 return; 73 74 if (vport->link_up) { 75 netif_tx_start_all_queues(vport->netdev); 76 netif_carrier_on(vport->netdev); 77 } else { 78 netif_tx_stop_all_queues(vport->netdev); 79 netif_carrier_off(vport->netdev); 80 } 81 } 82 83 /** 84 * idpf_recv_event_msg - Receive virtchnl event message 85 * @adapter: Driver specific private structure 86 * @ctlq_msg: message to copy from 87 * 88 * Receive virtchnl event message 89 */ 90 static void idpf_recv_event_msg(struct idpf_adapter *adapter, 91 struct idpf_ctlq_msg *ctlq_msg) 92 { 93 int payload_size = ctlq_msg->ctx.indirect.payload->size; 94 struct virtchnl2_event *v2e; 95 u32 event; 96 97 if (payload_size < sizeof(*v2e)) { 98 dev_err_ratelimited(&adapter->pdev->dev, "Failed to receive valid payload for event msg (op %d len %d)\n", 99 ctlq_msg->cookie.mbx.chnl_opcode, 100 payload_size); 101 return; 102 } 103 104 v2e = (struct virtchnl2_event *)ctlq_msg->ctx.indirect.payload->va; 105 event = le32_to_cpu(v2e->event); 106 107 switch (event) { 108 case VIRTCHNL2_EVENT_LINK_CHANGE: 109 idpf_handle_event_link(adapter, v2e); 110 return; 111 default: 112 dev_err(&adapter->pdev->dev, 113 "Unknown event %d from PF\n", event); 114 break; 115 } 116 } 117 118 /** 119 * idpf_mb_clean - Reclaim the send mailbox queue entries 120 * @adapter: driver specific private structure 121 * @asq: send control queue info 122 * 123 * Reclaim the send mailbox queue entries to be used to send further messages 124 * 125 * Return: 0 on success, negative on failure 126 */ 127 static int idpf_mb_clean(struct idpf_adapter *adapter, 128 struct idpf_ctlq_info *asq) 129 { 130 u16 i, num_q_msg = IDPF_DFLT_MBX_Q_LEN; 131 struct idpf_ctlq_msg **q_msg; 132 struct idpf_dma_mem *dma_mem; 133 int err; 134 135 q_msg = kzalloc_objs(struct idpf_ctlq_msg *, num_q_msg, GFP_ATOMIC); 136 if (!q_msg) 137 return -ENOMEM; 138 139 err = idpf_ctlq_clean_sq(asq, &num_q_msg, q_msg); 140 if (err) 141 goto err_kfree; 142 143 for (i = 0; i < num_q_msg; i++) { 144 if (!q_msg[i]) 145 continue; 146 dma_mem = q_msg[i]->ctx.indirect.payload; 147 if (dma_mem) 148 dma_free_coherent(&adapter->pdev->dev, dma_mem->size, 149 dma_mem->va, dma_mem->pa); 150 kfree(q_msg[i]); 151 kfree(dma_mem); 152 } 153 154 err_kfree: 155 kfree(q_msg); 156 157 return err; 158 } 159 160 #if IS_ENABLED(CONFIG_PTP_1588_CLOCK) 161 /** 162 * idpf_ptp_is_mb_msg - Check if the message is PTP-related 163 * @op: virtchnl opcode 164 * 165 * Return: true if msg is PTP-related, false otherwise. 166 */ 167 static bool idpf_ptp_is_mb_msg(u32 op) 168 { 169 switch (op) { 170 case VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME: 171 case VIRTCHNL2_OP_PTP_GET_CROSS_TIME: 172 case VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME: 173 case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE: 174 case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME: 175 case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS: 176 case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP: 177 return true; 178 default: 179 return false; 180 } 181 } 182 183 /** 184 * idpf_prepare_ptp_mb_msg - Prepare PTP related message 185 * 186 * @adapter: Driver specific private structure 187 * @op: virtchnl opcode 188 * @ctlq_msg: Corresponding control queue message 189 */ 190 static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op, 191 struct idpf_ctlq_msg *ctlq_msg) 192 { 193 /* If the message is PTP-related and the secondary mailbox is available, 194 * send the message through the secondary mailbox. 195 */ 196 if (!idpf_ptp_is_mb_msg(op) || !adapter->ptp->secondary_mbx.valid) 197 return; 198 199 ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_peer_drv; 200 ctlq_msg->func_id = adapter->ptp->secondary_mbx.peer_mbx_q_id; 201 ctlq_msg->host_id = adapter->ptp->secondary_mbx.peer_id; 202 } 203 #else /* !CONFIG_PTP_1588_CLOCK */ 204 static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op, 205 struct idpf_ctlq_msg *ctlq_msg) 206 { } 207 #endif /* CONFIG_PTP_1588_CLOCK */ 208 209 /** 210 * idpf_send_mb_msg - Send message over mailbox 211 * @adapter: driver specific private structure 212 * @asq: control queue to send message to 213 * @op: virtchnl opcode 214 * @msg_size: size of the payload 215 * @msg: pointer to buffer holding the payload 216 * @cookie: unique SW generated cookie per message 217 * 218 * Will prepare the control queue message and initiates the send api 219 * 220 * Return: 0 on success, negative on failure 221 */ 222 int idpf_send_mb_msg(struct idpf_adapter *adapter, struct idpf_ctlq_info *asq, 223 u32 op, u16 msg_size, u8 *msg, u16 cookie) 224 { 225 struct idpf_ctlq_msg *ctlq_msg; 226 struct idpf_dma_mem *dma_mem; 227 int err; 228 229 /* If we are here and a reset is detected nothing much can be 230 * done. This thread should silently abort and expected to 231 * be corrected with a new run either by user or driver 232 * flows after reset 233 */ 234 if (idpf_is_reset_detected(adapter)) 235 return 0; 236 237 err = idpf_mb_clean(adapter, asq); 238 if (err) 239 return err; 240 241 ctlq_msg = kzalloc_obj(*ctlq_msg, GFP_ATOMIC); 242 if (!ctlq_msg) 243 return -ENOMEM; 244 245 dma_mem = kzalloc_obj(*dma_mem, GFP_ATOMIC); 246 if (!dma_mem) { 247 err = -ENOMEM; 248 goto dma_mem_error; 249 } 250 251 ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_cp; 252 ctlq_msg->func_id = 0; 253 254 idpf_prepare_ptp_mb_msg(adapter, op, ctlq_msg); 255 256 ctlq_msg->data_len = msg_size; 257 ctlq_msg->cookie.mbx.chnl_opcode = op; 258 ctlq_msg->cookie.mbx.chnl_retval = 0; 259 dma_mem->size = IDPF_CTLQ_MAX_BUF_LEN; 260 dma_mem->va = dma_alloc_coherent(&adapter->pdev->dev, dma_mem->size, 261 &dma_mem->pa, GFP_ATOMIC); 262 if (!dma_mem->va) { 263 err = -ENOMEM; 264 goto dma_alloc_error; 265 } 266 267 /* It's possible we're just sending an opcode but no buffer */ 268 if (msg && msg_size) 269 memcpy(dma_mem->va, msg, msg_size); 270 ctlq_msg->ctx.indirect.payload = dma_mem; 271 ctlq_msg->ctx.sw_cookie.data = cookie; 272 273 err = idpf_ctlq_send(&adapter->hw, asq, 1, ctlq_msg); 274 if (err) 275 goto send_error; 276 277 return 0; 278 279 send_error: 280 dma_free_coherent(&adapter->pdev->dev, dma_mem->size, dma_mem->va, 281 dma_mem->pa); 282 dma_alloc_error: 283 kfree(dma_mem); 284 dma_mem_error: 285 kfree(ctlq_msg); 286 287 return err; 288 } 289 290 /* API for virtchnl "transaction" support ("xn" for short). 291 * 292 * We are reusing the completion lock to serialize the accesses to the 293 * transaction state for simplicity, but it could be its own separate synchro 294 * as well. For now, this API is only used from within a workqueue context; 295 * raw_spin_lock() is enough. 296 */ 297 /** 298 * idpf_vc_xn_lock - Request exclusive access to vc transaction 299 * @xn: struct idpf_vc_xn* to access 300 */ 301 #define idpf_vc_xn_lock(xn) \ 302 raw_spin_lock(&(xn)->completed.wait.lock) 303 304 /** 305 * idpf_vc_xn_unlock - Release exclusive access to vc transaction 306 * @xn: struct idpf_vc_xn* to access 307 */ 308 #define idpf_vc_xn_unlock(xn) \ 309 raw_spin_unlock(&(xn)->completed.wait.lock) 310 311 /** 312 * idpf_vc_xn_release_bufs - Release reference to reply buffer(s) and 313 * reset the transaction state. 314 * @xn: struct idpf_vc_xn to update 315 */ 316 static void idpf_vc_xn_release_bufs(struct idpf_vc_xn *xn) 317 { 318 xn->reply.iov_base = NULL; 319 xn->reply.iov_len = 0; 320 321 if (xn->state != IDPF_VC_XN_SHUTDOWN) 322 xn->state = IDPF_VC_XN_IDLE; 323 } 324 325 /** 326 * idpf_vc_xn_init - Initialize virtchnl transaction object 327 * @vcxn_mngr: pointer to vc transaction manager struct 328 */ 329 static void idpf_vc_xn_init(struct idpf_vc_xn_manager *vcxn_mngr) 330 { 331 int i; 332 333 spin_lock_init(&vcxn_mngr->xn_bm_lock); 334 335 for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) { 336 struct idpf_vc_xn *xn = &vcxn_mngr->ring[i]; 337 338 xn->state = IDPF_VC_XN_IDLE; 339 xn->idx = i; 340 idpf_vc_xn_release_bufs(xn); 341 init_completion(&xn->completed); 342 } 343 344 bitmap_fill(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN); 345 } 346 347 /** 348 * idpf_vc_xn_shutdown - Uninitialize virtchnl transaction object 349 * @vcxn_mngr: pointer to vc transaction manager struct 350 * 351 * All waiting threads will be woken-up and their transaction aborted. Further 352 * operations on that object will fail. 353 */ 354 void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr) 355 { 356 int i; 357 358 spin_lock_bh(&vcxn_mngr->xn_bm_lock); 359 bitmap_zero(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN); 360 spin_unlock_bh(&vcxn_mngr->xn_bm_lock); 361 362 for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) { 363 struct idpf_vc_xn *xn = &vcxn_mngr->ring[i]; 364 365 idpf_vc_xn_lock(xn); 366 xn->state = IDPF_VC_XN_SHUTDOWN; 367 idpf_vc_xn_release_bufs(xn); 368 idpf_vc_xn_unlock(xn); 369 complete_all(&xn->completed); 370 } 371 } 372 373 /** 374 * idpf_vc_xn_pop_free - Pop a free transaction from free list 375 * @vcxn_mngr: transaction manager to pop from 376 * 377 * Returns NULL if no free transactions 378 */ 379 static 380 struct idpf_vc_xn *idpf_vc_xn_pop_free(struct idpf_vc_xn_manager *vcxn_mngr) 381 { 382 struct idpf_vc_xn *xn = NULL; 383 unsigned long free_idx; 384 385 spin_lock_bh(&vcxn_mngr->xn_bm_lock); 386 free_idx = find_first_bit(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN); 387 if (free_idx == IDPF_VC_XN_RING_LEN) 388 goto do_unlock; 389 390 clear_bit(free_idx, vcxn_mngr->free_xn_bm); 391 xn = &vcxn_mngr->ring[free_idx]; 392 xn->salt = vcxn_mngr->salt++; 393 394 do_unlock: 395 spin_unlock_bh(&vcxn_mngr->xn_bm_lock); 396 397 return xn; 398 } 399 400 /** 401 * idpf_vc_xn_push_free - Push a free transaction to free list 402 * @vcxn_mngr: transaction manager to push to 403 * @xn: transaction to push 404 */ 405 static void idpf_vc_xn_push_free(struct idpf_vc_xn_manager *vcxn_mngr, 406 struct idpf_vc_xn *xn) 407 { 408 idpf_vc_xn_release_bufs(xn); 409 set_bit(xn->idx, vcxn_mngr->free_xn_bm); 410 } 411 412 /** 413 * idpf_vc_xn_exec - Perform a send/recv virtchnl transaction 414 * @adapter: driver specific private structure with vcxn_mngr 415 * @params: parameters for this particular transaction including 416 * -vc_op: virtchannel operation to send 417 * -send_buf: kvec iov for send buf and len 418 * -recv_buf: kvec iov for recv buf and len (ignored if NULL) 419 * -timeout_ms: timeout waiting for a reply (milliseconds) 420 * -async: don't wait for message reply, will lose caller context 421 * -async_handler: callback to handle async replies 422 * 423 * @returns >= 0 for success, the size of the initial reply (may or may not be 424 * >= @recv_buf.iov_len, but we never overflow @@recv_buf_iov_base). < 0 for 425 * error. 426 */ 427 ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter, 428 const struct idpf_vc_xn_params *params) 429 { 430 const struct kvec *send_buf = ¶ms->send_buf; 431 struct idpf_vc_xn *xn; 432 ssize_t retval; 433 u16 cookie; 434 435 xn = idpf_vc_xn_pop_free(adapter->vcxn_mngr); 436 /* no free transactions available */ 437 if (!xn) 438 return -ENOSPC; 439 440 idpf_vc_xn_lock(xn); 441 if (xn->state == IDPF_VC_XN_SHUTDOWN) { 442 retval = -ENXIO; 443 goto only_unlock; 444 } else if (xn->state != IDPF_VC_XN_IDLE) { 445 /* We're just going to clobber this transaction even though 446 * it's not IDLE. If we don't reuse it we could theoretically 447 * eventually leak all the free transactions and not be able to 448 * send any messages. At least this way we make an attempt to 449 * remain functional even though something really bad is 450 * happening that's corrupting what was supposed to be free 451 * transactions. 452 */ 453 WARN_ONCE(1, "There should only be idle transactions in free list (idx %d op %d)\n", 454 xn->idx, xn->vc_op); 455 } 456 457 xn->reply = params->recv_buf; 458 xn->reply_sz = 0; 459 xn->state = params->async ? IDPF_VC_XN_ASYNC : IDPF_VC_XN_WAITING; 460 xn->vc_op = params->vc_op; 461 xn->async_handler = params->async_handler; 462 idpf_vc_xn_unlock(xn); 463 464 if (!params->async) 465 reinit_completion(&xn->completed); 466 cookie = FIELD_PREP(IDPF_VC_XN_SALT_M, xn->salt) | 467 FIELD_PREP(IDPF_VC_XN_IDX_M, xn->idx); 468 469 retval = idpf_send_mb_msg(adapter, adapter->hw.asq, params->vc_op, 470 send_buf->iov_len, send_buf->iov_base, 471 cookie); 472 if (retval) { 473 idpf_vc_xn_lock(xn); 474 goto release_and_unlock; 475 } 476 477 if (params->async) 478 return 0; 479 480 wait_for_completion_timeout(&xn->completed, 481 msecs_to_jiffies(params->timeout_ms)); 482 483 /* No need to check the return value; we check the final state of the 484 * transaction below. It's possible the transaction actually gets more 485 * timeout than specified if we get preempted here but after 486 * wait_for_completion_timeout returns. This should be non-issue 487 * however. 488 */ 489 idpf_vc_xn_lock(xn); 490 switch (xn->state) { 491 case IDPF_VC_XN_SHUTDOWN: 492 retval = -ENXIO; 493 goto only_unlock; 494 case IDPF_VC_XN_WAITING: 495 dev_notice_ratelimited(&adapter->pdev->dev, 496 "Transaction timed-out (op:%d cookie:%04x vc_op:%d salt:%02x timeout:%dms)\n", 497 params->vc_op, cookie, xn->vc_op, 498 xn->salt, params->timeout_ms); 499 retval = -ETIME; 500 break; 501 case IDPF_VC_XN_COMPLETED_SUCCESS: 502 retval = xn->reply_sz; 503 break; 504 case IDPF_VC_XN_COMPLETED_FAILED: 505 dev_notice_ratelimited(&adapter->pdev->dev, "Transaction failed (op %d)\n", 506 params->vc_op); 507 retval = -EIO; 508 break; 509 default: 510 /* Invalid state. */ 511 WARN_ON_ONCE(1); 512 retval = -EIO; 513 break; 514 } 515 516 release_and_unlock: 517 idpf_vc_xn_push_free(adapter->vcxn_mngr, xn); 518 /* If we receive a VC reply after here, it will be dropped. */ 519 only_unlock: 520 idpf_vc_xn_unlock(xn); 521 522 return retval; 523 } 524 525 /** 526 * idpf_vc_xn_forward_async - Handle async reply receives 527 * @adapter: private data struct 528 * @xn: transaction to handle 529 * @ctlq_msg: corresponding ctlq_msg 530 * 531 * For async sends we're going to lose the caller's context so, if an 532 * async_handler was provided, it can deal with the reply, otherwise we'll just 533 * check and report if there is an error. 534 */ 535 static int 536 idpf_vc_xn_forward_async(struct idpf_adapter *adapter, struct idpf_vc_xn *xn, 537 const struct idpf_ctlq_msg *ctlq_msg) 538 { 539 int err = 0; 540 541 if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) { 542 dev_err_ratelimited(&adapter->pdev->dev, "Async message opcode does not match transaction opcode (msg: %d) (xn: %d)\n", 543 ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op); 544 xn->reply_sz = 0; 545 err = -EINVAL; 546 goto release_bufs; 547 } 548 549 if (xn->async_handler) { 550 err = xn->async_handler(adapter, xn, ctlq_msg); 551 goto release_bufs; 552 } 553 554 if (ctlq_msg->cookie.mbx.chnl_retval) { 555 xn->reply_sz = 0; 556 dev_err_ratelimited(&adapter->pdev->dev, "Async message failure (op %d)\n", 557 ctlq_msg->cookie.mbx.chnl_opcode); 558 err = -EINVAL; 559 } 560 561 release_bufs: 562 idpf_vc_xn_push_free(adapter->vcxn_mngr, xn); 563 564 return err; 565 } 566 567 /** 568 * idpf_vc_xn_forward_reply - copy a reply back to receiving thread 569 * @adapter: driver specific private structure with vcxn_mngr 570 * @ctlq_msg: controlq message to send back to receiving thread 571 */ 572 static int 573 idpf_vc_xn_forward_reply(struct idpf_adapter *adapter, 574 const struct idpf_ctlq_msg *ctlq_msg) 575 { 576 const void *payload = NULL; 577 size_t payload_size = 0; 578 struct idpf_vc_xn *xn; 579 u16 msg_info; 580 int err = 0; 581 u16 xn_idx; 582 u16 salt; 583 584 msg_info = ctlq_msg->ctx.sw_cookie.data; 585 xn_idx = FIELD_GET(IDPF_VC_XN_IDX_M, msg_info); 586 if (xn_idx >= ARRAY_SIZE(adapter->vcxn_mngr->ring)) { 587 dev_err_ratelimited(&adapter->pdev->dev, "Out of bounds cookie received: %02x\n", 588 xn_idx); 589 return -EINVAL; 590 } 591 xn = &adapter->vcxn_mngr->ring[xn_idx]; 592 idpf_vc_xn_lock(xn); 593 salt = FIELD_GET(IDPF_VC_XN_SALT_M, msg_info); 594 if (xn->salt != salt) { 595 dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (exp:%d@%02x(%d) != got:%d@%02x)\n", 596 xn->vc_op, xn->salt, xn->state, 597 ctlq_msg->cookie.mbx.chnl_opcode, salt); 598 idpf_vc_xn_unlock(xn); 599 return -EINVAL; 600 } 601 602 switch (xn->state) { 603 case IDPF_VC_XN_WAITING: 604 /* success */ 605 break; 606 case IDPF_VC_XN_IDLE: 607 dev_err_ratelimited(&adapter->pdev->dev, "Unexpected or belated VC reply (op %d)\n", 608 ctlq_msg->cookie.mbx.chnl_opcode); 609 err = -EINVAL; 610 goto out_unlock; 611 case IDPF_VC_XN_SHUTDOWN: 612 /* ENXIO is a bit special here as the recv msg loop uses that 613 * know if it should stop trying to clean the ring if we lost 614 * the virtchnl. We need to stop playing with registers and 615 * yield. 616 */ 617 err = -ENXIO; 618 goto out_unlock; 619 case IDPF_VC_XN_ASYNC: 620 err = idpf_vc_xn_forward_async(adapter, xn, ctlq_msg); 621 idpf_vc_xn_unlock(xn); 622 return err; 623 default: 624 dev_err_ratelimited(&adapter->pdev->dev, "Overwriting VC reply (op %d)\n", 625 ctlq_msg->cookie.mbx.chnl_opcode); 626 err = -EBUSY; 627 goto out_unlock; 628 } 629 630 if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) { 631 dev_err_ratelimited(&adapter->pdev->dev, "Message opcode does not match transaction opcode (msg: %d) (xn: %d)\n", 632 ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op); 633 xn->reply_sz = 0; 634 xn->state = IDPF_VC_XN_COMPLETED_FAILED; 635 err = -EINVAL; 636 goto out_unlock; 637 } 638 639 if (ctlq_msg->cookie.mbx.chnl_retval) { 640 xn->reply_sz = 0; 641 xn->state = IDPF_VC_XN_COMPLETED_FAILED; 642 err = -EINVAL; 643 goto out_unlock; 644 } 645 646 if (ctlq_msg->data_len) { 647 payload = ctlq_msg->ctx.indirect.payload->va; 648 payload_size = ctlq_msg->data_len; 649 } 650 651 xn->reply_sz = payload_size; 652 xn->state = IDPF_VC_XN_COMPLETED_SUCCESS; 653 654 if (xn->reply.iov_base && xn->reply.iov_len && payload_size) 655 memcpy(xn->reply.iov_base, payload, 656 min_t(size_t, xn->reply.iov_len, payload_size)); 657 658 out_unlock: 659 idpf_vc_xn_unlock(xn); 660 /* we _cannot_ hold lock while calling complete */ 661 complete(&xn->completed); 662 663 return err; 664 } 665 666 /** 667 * idpf_recv_mb_msg - Receive message over mailbox 668 * @adapter: driver specific private structure 669 * @arq: control queue to receive message from 670 * 671 * Will receive control queue message and posts the receive buffer. 672 * 673 * Return: 0 on success and negative on failure. 674 */ 675 int idpf_recv_mb_msg(struct idpf_adapter *adapter, struct idpf_ctlq_info *arq) 676 { 677 struct idpf_ctlq_msg ctlq_msg; 678 struct idpf_dma_mem *dma_mem; 679 int post_err, err; 680 u16 num_recv; 681 682 while (1) { 683 /* This will get <= num_recv messages and output how many 684 * actually received on num_recv. 685 */ 686 num_recv = 1; 687 err = idpf_ctlq_recv(arq, &num_recv, &ctlq_msg); 688 if (err || !num_recv) 689 break; 690 691 if (ctlq_msg.data_len) { 692 dma_mem = ctlq_msg.ctx.indirect.payload; 693 } else { 694 dma_mem = NULL; 695 num_recv = 0; 696 } 697 698 if (ctlq_msg.cookie.mbx.chnl_opcode == VIRTCHNL2_OP_EVENT) 699 idpf_recv_event_msg(adapter, &ctlq_msg); 700 else 701 err = idpf_vc_xn_forward_reply(adapter, &ctlq_msg); 702 703 post_err = idpf_ctlq_post_rx_buffs(&adapter->hw, arq, 704 &num_recv, &dma_mem); 705 706 /* If post failed clear the only buffer we supplied */ 707 if (post_err) { 708 if (dma_mem) 709 dma_free_coherent(&adapter->pdev->dev, 710 dma_mem->size, dma_mem->va, 711 dma_mem->pa); 712 break; 713 } 714 715 /* virtchnl trying to shutdown, stop cleaning */ 716 if (err == -ENXIO) 717 break; 718 } 719 720 return err; 721 } 722 723 struct idpf_chunked_msg_params { 724 u32 (*prepare_msg)(u32 vport_id, void *buf, 725 const void *pos, u32 num); 726 727 const void *chunks; 728 u32 num_chunks; 729 730 u32 chunk_sz; 731 u32 config_sz; 732 733 u32 vc_op; 734 u32 vport_id; 735 }; 736 737 struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_adapter *adapter, 738 struct idpf_q_vec_rsrc *qv_rsrc, 739 u32 vport_id, u32 num) 740 { 741 struct idpf_queue_set *qp; 742 743 qp = kzalloc_flex(*qp, qs, num); 744 if (!qp) 745 return NULL; 746 747 qp->adapter = adapter; 748 qp->qv_rsrc = qv_rsrc; 749 qp->vport_id = vport_id; 750 qp->num = num; 751 752 return qp; 753 } 754 755 /** 756 * idpf_send_chunked_msg - send VC message consisting of chunks 757 * @adapter: Driver specific private structure 758 * @params: message params 759 * 760 * Helper function for preparing a message describing queues to be enabled 761 * or disabled. 762 * 763 * Return: the total size of the prepared message. 764 */ 765 static int idpf_send_chunked_msg(struct idpf_adapter *adapter, 766 const struct idpf_chunked_msg_params *params) 767 { 768 struct idpf_vc_xn_params xn_params = { 769 .vc_op = params->vc_op, 770 .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, 771 }; 772 const void *pos = params->chunks; 773 u32 num_chunks, num_msgs, buf_sz; 774 void *buf __free(kfree) = NULL; 775 u32 totqs = params->num_chunks; 776 u32 vid = params->vport_id; 777 778 num_chunks = min(IDPF_NUM_CHUNKS_PER_MSG(params->config_sz, 779 params->chunk_sz), totqs); 780 num_msgs = DIV_ROUND_UP(totqs, num_chunks); 781 782 buf_sz = params->config_sz + num_chunks * params->chunk_sz; 783 buf = kzalloc(buf_sz, GFP_KERNEL); 784 if (!buf) 785 return -ENOMEM; 786 787 xn_params.send_buf.iov_base = buf; 788 789 for (u32 i = 0; i < num_msgs; i++) { 790 ssize_t reply_sz; 791 792 memset(buf, 0, buf_sz); 793 xn_params.send_buf.iov_len = buf_sz; 794 795 if (params->prepare_msg(vid, buf, pos, num_chunks) != buf_sz) 796 return -EINVAL; 797 798 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 799 if (reply_sz < 0) 800 return reply_sz; 801 802 pos += num_chunks * params->chunk_sz; 803 totqs -= num_chunks; 804 805 num_chunks = min(num_chunks, totqs); 806 buf_sz = params->config_sz + num_chunks * params->chunk_sz; 807 } 808 809 return 0; 810 } 811 812 /** 813 * idpf_wait_for_marker_event_set - wait for software marker response for 814 * selected Tx queues 815 * @qs: set of the Tx queues 816 * 817 * Return: 0 success, -errno on failure. 818 */ 819 static int idpf_wait_for_marker_event_set(const struct idpf_queue_set *qs) 820 { 821 struct net_device *netdev; 822 struct idpf_tx_queue *txq; 823 bool markers_rcvd = true; 824 825 for (u32 i = 0; i < qs->num; i++) { 826 switch (qs->qs[i].type) { 827 case VIRTCHNL2_QUEUE_TYPE_TX: 828 txq = qs->qs[i].txq; 829 830 netdev = txq->netdev; 831 832 idpf_queue_set(SW_MARKER, txq); 833 idpf_wait_for_sw_marker_completion(txq); 834 markers_rcvd &= !idpf_queue_has(SW_MARKER, txq); 835 break; 836 default: 837 break; 838 } 839 } 840 841 if (!markers_rcvd) { 842 netdev_warn(netdev, 843 "Failed to receive marker packets\n"); 844 return -ETIMEDOUT; 845 } 846 847 return 0; 848 } 849 850 /** 851 * idpf_wait_for_marker_event - wait for software marker response 852 * @vport: virtual port data structure 853 * 854 * Return: 0 success, negative on failure. 855 **/ 856 static int idpf_wait_for_marker_event(struct idpf_vport *vport) 857 { 858 struct idpf_queue_set *qs __free(kfree) = NULL; 859 860 qs = idpf_alloc_queue_set(vport->adapter, &vport->dflt_qv_rsrc, 861 vport->vport_id, vport->num_txq); 862 if (!qs) 863 return -ENOMEM; 864 865 for (u32 i = 0; i < qs->num; i++) { 866 qs->qs[i].type = VIRTCHNL2_QUEUE_TYPE_TX; 867 qs->qs[i].txq = vport->txqs[i]; 868 } 869 870 return idpf_wait_for_marker_event_set(qs); 871 } 872 873 /** 874 * idpf_send_ver_msg - send virtchnl version message 875 * @adapter: Driver specific private structure 876 * 877 * Send virtchnl version message. Returns 0 on success, negative on failure. 878 */ 879 static int idpf_send_ver_msg(struct idpf_adapter *adapter) 880 { 881 struct idpf_vc_xn_params xn_params = {}; 882 struct virtchnl2_version_info vvi; 883 ssize_t reply_sz; 884 u32 major, minor; 885 int err = 0; 886 887 if (adapter->virt_ver_maj) { 888 vvi.major = cpu_to_le32(adapter->virt_ver_maj); 889 vvi.minor = cpu_to_le32(adapter->virt_ver_min); 890 } else { 891 vvi.major = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MAJOR); 892 vvi.minor = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MINOR); 893 } 894 895 xn_params.vc_op = VIRTCHNL2_OP_VERSION; 896 xn_params.send_buf.iov_base = &vvi; 897 xn_params.send_buf.iov_len = sizeof(vvi); 898 xn_params.recv_buf = xn_params.send_buf; 899 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 900 901 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 902 if (reply_sz < 0) 903 return reply_sz; 904 if (reply_sz < sizeof(vvi)) 905 return -EIO; 906 907 major = le32_to_cpu(vvi.major); 908 minor = le32_to_cpu(vvi.minor); 909 910 if (major > IDPF_VIRTCHNL_VERSION_MAJOR) { 911 dev_warn(&adapter->pdev->dev, "Virtchnl major version greater than supported\n"); 912 return -EINVAL; 913 } 914 915 if (major == IDPF_VIRTCHNL_VERSION_MAJOR && 916 minor > IDPF_VIRTCHNL_VERSION_MINOR) 917 dev_warn(&adapter->pdev->dev, "Virtchnl minor version didn't match\n"); 918 919 /* If we have a mismatch, resend version to update receiver on what 920 * version we will use. 921 */ 922 if (!adapter->virt_ver_maj && 923 major != IDPF_VIRTCHNL_VERSION_MAJOR && 924 minor != IDPF_VIRTCHNL_VERSION_MINOR) 925 err = -EAGAIN; 926 927 adapter->virt_ver_maj = major; 928 adapter->virt_ver_min = minor; 929 930 return err; 931 } 932 933 /** 934 * idpf_send_get_caps_msg - Send virtchnl get capabilities message 935 * @adapter: Driver specific private structure 936 * 937 * Send virtchl get capabilities message. Returns 0 on success, negative on 938 * failure. 939 */ 940 static int idpf_send_get_caps_msg(struct idpf_adapter *adapter) 941 { 942 struct virtchnl2_get_capabilities caps = {}; 943 struct idpf_vc_xn_params xn_params = {}; 944 ssize_t reply_sz; 945 946 caps.csum_caps = 947 cpu_to_le32(VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 | 948 VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP | 949 VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP | 950 VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP | 951 VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP | 952 VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP | 953 VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP | 954 VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 | 955 VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP | 956 VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP | 957 VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP | 958 VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP | 959 VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP | 960 VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP | 961 VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL | 962 VIRTCHNL2_CAP_RX_CSUM_L3_SINGLE_TUNNEL | 963 VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL | 964 VIRTCHNL2_CAP_RX_CSUM_L4_SINGLE_TUNNEL | 965 VIRTCHNL2_CAP_RX_CSUM_GENERIC); 966 967 caps.seg_caps = 968 cpu_to_le32(VIRTCHNL2_CAP_SEG_IPV4_TCP | 969 VIRTCHNL2_CAP_SEG_IPV4_UDP | 970 VIRTCHNL2_CAP_SEG_IPV4_SCTP | 971 VIRTCHNL2_CAP_SEG_IPV6_TCP | 972 VIRTCHNL2_CAP_SEG_IPV6_UDP | 973 VIRTCHNL2_CAP_SEG_IPV6_SCTP | 974 VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL); 975 976 caps.rss_caps = 977 cpu_to_le64(VIRTCHNL2_FLOW_IPV4_TCP | 978 VIRTCHNL2_FLOW_IPV4_UDP | 979 VIRTCHNL2_FLOW_IPV4_SCTP | 980 VIRTCHNL2_FLOW_IPV4_OTHER | 981 VIRTCHNL2_FLOW_IPV6_TCP | 982 VIRTCHNL2_FLOW_IPV6_UDP | 983 VIRTCHNL2_FLOW_IPV6_SCTP | 984 VIRTCHNL2_FLOW_IPV6_OTHER); 985 986 caps.hsplit_caps = 987 cpu_to_le32(VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 | 988 VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6); 989 990 caps.rsc_caps = 991 cpu_to_le32(VIRTCHNL2_CAP_RSC_IPV4_TCP | 992 VIRTCHNL2_CAP_RSC_IPV6_TCP); 993 994 caps.other_caps = 995 cpu_to_le64(VIRTCHNL2_CAP_SRIOV | 996 VIRTCHNL2_CAP_RDMA | 997 VIRTCHNL2_CAP_LAN_MEMORY_REGIONS | 998 VIRTCHNL2_CAP_MACFILTER | 999 VIRTCHNL2_CAP_SPLITQ_QSCHED | 1000 VIRTCHNL2_CAP_PROMISC | 1001 VIRTCHNL2_CAP_LOOPBACK | 1002 VIRTCHNL2_CAP_PTP); 1003 1004 xn_params.vc_op = VIRTCHNL2_OP_GET_CAPS; 1005 xn_params.send_buf.iov_base = ∩︀ 1006 xn_params.send_buf.iov_len = sizeof(caps); 1007 xn_params.recv_buf.iov_base = &adapter->caps; 1008 xn_params.recv_buf.iov_len = sizeof(adapter->caps); 1009 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 1010 1011 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 1012 if (reply_sz < 0) 1013 return reply_sz; 1014 if (reply_sz < sizeof(adapter->caps)) 1015 return -EIO; 1016 1017 return 0; 1018 } 1019 1020 /** 1021 * idpf_send_get_lan_memory_regions - Send virtchnl get LAN memory regions msg 1022 * @adapter: Driver specific private struct 1023 * 1024 * Return: 0 on success or error code on failure. 1025 */ 1026 static int idpf_send_get_lan_memory_regions(struct idpf_adapter *adapter) 1027 { 1028 struct virtchnl2_get_lan_memory_regions *rcvd_regions __free(kfree); 1029 struct idpf_vc_xn_params xn_params = { 1030 .vc_op = VIRTCHNL2_OP_GET_LAN_MEMORY_REGIONS, 1031 .recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN, 1032 .send_buf.iov_len = 1033 sizeof(struct virtchnl2_get_lan_memory_regions) + 1034 sizeof(struct virtchnl2_mem_region), 1035 .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, 1036 }; 1037 int num_regions, size; 1038 struct idpf_hw *hw; 1039 ssize_t reply_sz; 1040 int err = 0; 1041 1042 rcvd_regions = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); 1043 if (!rcvd_regions) 1044 return -ENOMEM; 1045 1046 xn_params.recv_buf.iov_base = rcvd_regions; 1047 rcvd_regions->num_memory_regions = cpu_to_le16(1); 1048 xn_params.send_buf.iov_base = rcvd_regions; 1049 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 1050 if (reply_sz < 0) 1051 return reply_sz; 1052 1053 num_regions = le16_to_cpu(rcvd_regions->num_memory_regions); 1054 size = struct_size(rcvd_regions, mem_reg, num_regions); 1055 if (reply_sz < size) 1056 return -EIO; 1057 1058 if (size > IDPF_CTLQ_MAX_BUF_LEN) 1059 return -EINVAL; 1060 1061 hw = &adapter->hw; 1062 hw->lan_regs = kzalloc_objs(*hw->lan_regs, num_regions); 1063 if (!hw->lan_regs) 1064 return -ENOMEM; 1065 1066 for (int i = 0; i < num_regions; i++) { 1067 hw->lan_regs[i].addr_len = 1068 le64_to_cpu(rcvd_regions->mem_reg[i].size); 1069 hw->lan_regs[i].addr_start = 1070 le64_to_cpu(rcvd_regions->mem_reg[i].start_offset); 1071 } 1072 hw->num_lan_regs = num_regions; 1073 1074 return err; 1075 } 1076 1077 /** 1078 * idpf_calc_remaining_mmio_regs - calculate MMIO regions outside mbx and rstat 1079 * @adapter: Driver specific private structure 1080 * 1081 * Called when idpf_send_get_lan_memory_regions is not supported. This will 1082 * calculate the offsets and sizes for the regions before, in between, and 1083 * after the mailbox and rstat MMIO mappings. 1084 * 1085 * Return: 0 on success or error code on failure. 1086 */ 1087 static int idpf_calc_remaining_mmio_regs(struct idpf_adapter *adapter) 1088 { 1089 struct resource *rstat_reg = &adapter->dev_ops.static_reg_info[1]; 1090 struct resource *mbx_reg = &adapter->dev_ops.static_reg_info[0]; 1091 struct idpf_hw *hw = &adapter->hw; 1092 1093 hw->num_lan_regs = IDPF_MMIO_MAP_FALLBACK_MAX_REMAINING; 1094 hw->lan_regs = kzalloc_objs(*hw->lan_regs, hw->num_lan_regs); 1095 if (!hw->lan_regs) 1096 return -ENOMEM; 1097 1098 /* Region preceding mailbox */ 1099 hw->lan_regs[0].addr_start = 0; 1100 hw->lan_regs[0].addr_len = mbx_reg->start; 1101 /* Region between mailbox and rstat */ 1102 hw->lan_regs[1].addr_start = mbx_reg->end + 1; 1103 hw->lan_regs[1].addr_len = rstat_reg->start - 1104 hw->lan_regs[1].addr_start; 1105 /* Region after rstat */ 1106 hw->lan_regs[2].addr_start = rstat_reg->end + 1; 1107 hw->lan_regs[2].addr_len = pci_resource_len(adapter->pdev, 0) - 1108 hw->lan_regs[2].addr_start; 1109 1110 return 0; 1111 } 1112 1113 /** 1114 * idpf_map_lan_mmio_regs - map remaining LAN BAR regions 1115 * @adapter: Driver specific private structure 1116 * 1117 * Return: 0 on success or error code on failure. 1118 */ 1119 static int idpf_map_lan_mmio_regs(struct idpf_adapter *adapter) 1120 { 1121 struct pci_dev *pdev = adapter->pdev; 1122 struct idpf_hw *hw = &adapter->hw; 1123 resource_size_t res_start; 1124 1125 res_start = pci_resource_start(pdev, 0); 1126 1127 for (int i = 0; i < hw->num_lan_regs; i++) { 1128 resource_size_t start; 1129 long len; 1130 1131 len = hw->lan_regs[i].addr_len; 1132 if (!len) 1133 continue; 1134 start = hw->lan_regs[i].addr_start + res_start; 1135 1136 hw->lan_regs[i].vaddr = devm_ioremap(&pdev->dev, start, len); 1137 if (!hw->lan_regs[i].vaddr) { 1138 pci_err(pdev, "failed to allocate BAR0 region\n"); 1139 return -ENOMEM; 1140 } 1141 } 1142 1143 return 0; 1144 } 1145 1146 /** 1147 * idpf_add_del_fsteer_filters - Send virtchnl add/del Flow Steering message 1148 * @adapter: adapter info struct 1149 * @rule: Flow steering rule to add/delete 1150 * @opcode: VIRTCHNL2_OP_ADD_FLOW_RULE to add filter, or 1151 * VIRTCHNL2_OP_DEL_FLOW_RULE to delete. All other values are invalid. 1152 * 1153 * Send ADD/DELETE flow steering virtchnl message and receive the result. 1154 * 1155 * Return: 0 on success, negative on failure. 1156 */ 1157 int idpf_add_del_fsteer_filters(struct idpf_adapter *adapter, 1158 struct virtchnl2_flow_rule_add_del *rule, 1159 enum virtchnl2_op opcode) 1160 { 1161 int rule_count = le32_to_cpu(rule->count); 1162 struct idpf_vc_xn_params xn_params = {}; 1163 ssize_t reply_sz; 1164 1165 if (opcode != VIRTCHNL2_OP_ADD_FLOW_RULE && 1166 opcode != VIRTCHNL2_OP_DEL_FLOW_RULE) 1167 return -EINVAL; 1168 1169 xn_params.vc_op = opcode; 1170 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 1171 xn_params.async = false; 1172 xn_params.send_buf.iov_base = rule; 1173 xn_params.send_buf.iov_len = struct_size(rule, rule_info, rule_count); 1174 xn_params.recv_buf.iov_base = rule; 1175 xn_params.recv_buf.iov_len = struct_size(rule, rule_info, rule_count); 1176 1177 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 1178 return reply_sz < 0 ? reply_sz : 0; 1179 } 1180 1181 /** 1182 * idpf_vport_alloc_max_qs - Allocate max queues for a vport 1183 * @adapter: Driver specific private structure 1184 * @max_q: vport max queue structure 1185 */ 1186 int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter, 1187 struct idpf_vport_max_q *max_q) 1188 { 1189 struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues; 1190 struct virtchnl2_get_capabilities *caps = &adapter->caps; 1191 u16 default_vports = idpf_get_default_vports(adapter); 1192 u32 max_rx_q, max_tx_q, max_buf_q, max_compl_q; 1193 1194 mutex_lock(&adapter->queue_lock); 1195 1196 /* Caps are device-wide. Give each vport an equal piece */ 1197 max_rx_q = le16_to_cpu(caps->max_rx_q) / default_vports; 1198 max_tx_q = le16_to_cpu(caps->max_tx_q) / default_vports; 1199 max_buf_q = le16_to_cpu(caps->max_rx_bufq) / default_vports; 1200 max_compl_q = le16_to_cpu(caps->max_tx_complq) / default_vports; 1201 1202 if (adapter->num_alloc_vports >= default_vports) { 1203 max_rx_q = IDPF_MIN_Q; 1204 max_tx_q = IDPF_MIN_Q; 1205 } 1206 1207 /* 1208 * Harmonize the numbers. The current implementation always creates 1209 * `IDPF_MAX_BUFQS_PER_RXQ_GRP` buffer queues for each Rx queue and 1210 * one completion queue for each Tx queue for best performance. 1211 * If less buffer or completion queues is available, cap the number 1212 * of the corresponding Rx/Tx queues. 1213 */ 1214 max_rx_q = min(max_rx_q, max_buf_q / IDPF_MAX_BUFQS_PER_RXQ_GRP); 1215 max_tx_q = min(max_tx_q, max_compl_q); 1216 1217 max_q->max_rxq = max_rx_q; 1218 max_q->max_txq = max_tx_q; 1219 max_q->max_bufq = max_rx_q * IDPF_MAX_BUFQS_PER_RXQ_GRP; 1220 max_q->max_complq = max_tx_q; 1221 1222 if (avail_queues->avail_rxq < max_q->max_rxq || 1223 avail_queues->avail_txq < max_q->max_txq || 1224 avail_queues->avail_bufq < max_q->max_bufq || 1225 avail_queues->avail_complq < max_q->max_complq) { 1226 mutex_unlock(&adapter->queue_lock); 1227 1228 return -EINVAL; 1229 } 1230 1231 avail_queues->avail_rxq -= max_q->max_rxq; 1232 avail_queues->avail_txq -= max_q->max_txq; 1233 avail_queues->avail_bufq -= max_q->max_bufq; 1234 avail_queues->avail_complq -= max_q->max_complq; 1235 1236 mutex_unlock(&adapter->queue_lock); 1237 1238 return 0; 1239 } 1240 1241 /** 1242 * idpf_vport_dealloc_max_qs - Deallocate max queues of a vport 1243 * @adapter: Driver specific private structure 1244 * @max_q: vport max queue structure 1245 */ 1246 void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter, 1247 struct idpf_vport_max_q *max_q) 1248 { 1249 struct idpf_avail_queue_info *avail_queues; 1250 1251 mutex_lock(&adapter->queue_lock); 1252 avail_queues = &adapter->avail_queues; 1253 1254 avail_queues->avail_rxq += max_q->max_rxq; 1255 avail_queues->avail_txq += max_q->max_txq; 1256 avail_queues->avail_bufq += max_q->max_bufq; 1257 avail_queues->avail_complq += max_q->max_complq; 1258 1259 mutex_unlock(&adapter->queue_lock); 1260 } 1261 1262 /** 1263 * idpf_init_avail_queues - Initialize available queues on the device 1264 * @adapter: Driver specific private structure 1265 */ 1266 static void idpf_init_avail_queues(struct idpf_adapter *adapter) 1267 { 1268 struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues; 1269 struct virtchnl2_get_capabilities *caps = &adapter->caps; 1270 1271 avail_queues->avail_rxq = le16_to_cpu(caps->max_rx_q); 1272 avail_queues->avail_txq = le16_to_cpu(caps->max_tx_q); 1273 avail_queues->avail_bufq = le16_to_cpu(caps->max_rx_bufq); 1274 avail_queues->avail_complq = le16_to_cpu(caps->max_tx_complq); 1275 } 1276 1277 /** 1278 * idpf_vport_init_queue_reg_chunks - initialize queue register chunks 1279 * @vport_config: persistent vport structure to store the queue register info 1280 * @schunks: source chunks to copy data from 1281 * 1282 * Return: 0 on success, negative on failure. 1283 */ 1284 static int 1285 idpf_vport_init_queue_reg_chunks(struct idpf_vport_config *vport_config, 1286 struct virtchnl2_queue_reg_chunks *schunks) 1287 { 1288 struct idpf_queue_id_reg_info *q_info = &vport_config->qid_reg_info; 1289 u16 num_chunks = le16_to_cpu(schunks->num_chunks); 1290 1291 kfree(q_info->queue_chunks); 1292 1293 q_info->queue_chunks = kzalloc_objs(*q_info->queue_chunks, num_chunks); 1294 if (!q_info->queue_chunks) { 1295 q_info->num_chunks = 0; 1296 return -ENOMEM; 1297 } 1298 1299 q_info->num_chunks = num_chunks; 1300 1301 for (u16 i = 0; i < num_chunks; i++) { 1302 struct idpf_queue_id_reg_chunk *dchunk = &q_info->queue_chunks[i]; 1303 struct virtchnl2_queue_reg_chunk *schunk = &schunks->chunks[i]; 1304 1305 dchunk->qtail_reg_start = le64_to_cpu(schunk->qtail_reg_start); 1306 dchunk->qtail_reg_spacing = le32_to_cpu(schunk->qtail_reg_spacing); 1307 dchunk->type = le32_to_cpu(schunk->type); 1308 dchunk->start_queue_id = le32_to_cpu(schunk->start_queue_id); 1309 dchunk->num_queues = le32_to_cpu(schunk->num_queues); 1310 } 1311 1312 return 0; 1313 } 1314 1315 /** 1316 * idpf_get_reg_intr_vecs - Get vector queue register offset 1317 * @adapter: adapter structure to get the vector chunks 1318 * @reg_vals: Register offsets to store in 1319 * 1320 * Return: number of registers that got populated 1321 */ 1322 int idpf_get_reg_intr_vecs(struct idpf_adapter *adapter, 1323 struct idpf_vec_regs *reg_vals) 1324 { 1325 struct virtchnl2_vector_chunks *chunks; 1326 struct idpf_vec_regs reg_val; 1327 u16 num_vchunks, num_vec; 1328 int num_regs = 0, i, j; 1329 1330 chunks = &adapter->req_vec_chunks->vchunks; 1331 num_vchunks = le16_to_cpu(chunks->num_vchunks); 1332 1333 for (j = 0; j < num_vchunks; j++) { 1334 struct virtchnl2_vector_chunk *chunk; 1335 u32 dynctl_reg_spacing; 1336 u32 itrn_reg_spacing; 1337 1338 chunk = &chunks->vchunks[j]; 1339 num_vec = le16_to_cpu(chunk->num_vectors); 1340 reg_val.dyn_ctl_reg = le32_to_cpu(chunk->dynctl_reg_start); 1341 reg_val.itrn_reg = le32_to_cpu(chunk->itrn_reg_start); 1342 reg_val.itrn_index_spacing = le32_to_cpu(chunk->itrn_index_spacing); 1343 1344 dynctl_reg_spacing = le32_to_cpu(chunk->dynctl_reg_spacing); 1345 itrn_reg_spacing = le32_to_cpu(chunk->itrn_reg_spacing); 1346 1347 for (i = 0; i < num_vec; i++) { 1348 reg_vals[num_regs].dyn_ctl_reg = reg_val.dyn_ctl_reg; 1349 reg_vals[num_regs].itrn_reg = reg_val.itrn_reg; 1350 reg_vals[num_regs].itrn_index_spacing = 1351 reg_val.itrn_index_spacing; 1352 1353 reg_val.dyn_ctl_reg += dynctl_reg_spacing; 1354 reg_val.itrn_reg += itrn_reg_spacing; 1355 num_regs++; 1356 } 1357 } 1358 1359 return num_regs; 1360 } 1361 1362 /** 1363 * idpf_vport_get_q_reg - Get the queue registers for the vport 1364 * @reg_vals: register values needing to be set 1365 * @num_regs: amount we expect to fill 1366 * @q_type: queue model 1367 * @chunks: queue regs received over mailbox 1368 * 1369 * This function parses the queue register offsets from the queue register 1370 * chunk information, with a specific queue type and stores it into the array 1371 * passed as an argument. It returns the actual number of queue registers that 1372 * are filled. 1373 */ 1374 static int idpf_vport_get_q_reg(u32 *reg_vals, int num_regs, u32 q_type, 1375 struct idpf_queue_id_reg_info *chunks) 1376 { 1377 u16 num_chunks = chunks->num_chunks; 1378 int reg_filled = 0, i; 1379 u32 reg_val; 1380 1381 while (num_chunks--) { 1382 struct idpf_queue_id_reg_chunk *chunk; 1383 u16 num_q; 1384 1385 chunk = &chunks->queue_chunks[num_chunks]; 1386 if (chunk->type != q_type) 1387 continue; 1388 1389 num_q = chunk->num_queues; 1390 reg_val = chunk->qtail_reg_start; 1391 for (i = 0; i < num_q && reg_filled < num_regs ; i++) { 1392 reg_vals[reg_filled++] = reg_val; 1393 reg_val += chunk->qtail_reg_spacing; 1394 } 1395 } 1396 1397 return reg_filled; 1398 } 1399 1400 /** 1401 * __idpf_queue_reg_init - initialize queue registers 1402 * @vport: virtual port structure 1403 * @rsrc: pointer to queue and vector resources 1404 * @reg_vals: registers we are initializing 1405 * @num_regs: how many registers there are in total 1406 * @q_type: queue model 1407 * 1408 * Return number of queues that are initialized 1409 */ 1410 static int __idpf_queue_reg_init(struct idpf_vport *vport, 1411 struct idpf_q_vec_rsrc *rsrc, u32 *reg_vals, 1412 int num_regs, u32 q_type) 1413 { 1414 struct idpf_adapter *adapter = vport->adapter; 1415 int i, j, k = 0; 1416 1417 switch (q_type) { 1418 case VIRTCHNL2_QUEUE_TYPE_TX: 1419 for (i = 0; i < rsrc->num_txq_grp; i++) { 1420 struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i]; 1421 1422 for (j = 0; j < tx_qgrp->num_txq && k < num_regs; j++, k++) 1423 tx_qgrp->txqs[j]->tail = 1424 idpf_get_reg_addr(adapter, reg_vals[k]); 1425 } 1426 break; 1427 case VIRTCHNL2_QUEUE_TYPE_RX: 1428 for (i = 0; i < rsrc->num_rxq_grp; i++) { 1429 struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i]; 1430 u16 num_rxq = rx_qgrp->singleq.num_rxq; 1431 1432 for (j = 0; j < num_rxq && k < num_regs; j++, k++) { 1433 struct idpf_rx_queue *q; 1434 1435 q = rx_qgrp->singleq.rxqs[j]; 1436 q->tail = idpf_get_reg_addr(adapter, 1437 reg_vals[k]); 1438 } 1439 } 1440 break; 1441 case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER: 1442 for (i = 0; i < rsrc->num_rxq_grp; i++) { 1443 struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i]; 1444 u8 num_bufqs = rsrc->num_bufqs_per_qgrp; 1445 1446 for (j = 0; j < num_bufqs && k < num_regs; j++, k++) { 1447 struct idpf_buf_queue *q; 1448 1449 q = &rx_qgrp->splitq.bufq_sets[j].bufq; 1450 q->tail = idpf_get_reg_addr(adapter, 1451 reg_vals[k]); 1452 } 1453 } 1454 break; 1455 default: 1456 break; 1457 } 1458 1459 return k; 1460 } 1461 1462 /** 1463 * idpf_queue_reg_init - initialize queue registers 1464 * @vport: virtual port structure 1465 * @rsrc: pointer to queue and vector resources 1466 * @chunks: queue registers received over mailbox 1467 * 1468 * Return: 0 on success, negative on failure 1469 */ 1470 int idpf_queue_reg_init(struct idpf_vport *vport, 1471 struct idpf_q_vec_rsrc *rsrc, 1472 struct idpf_queue_id_reg_info *chunks) 1473 { 1474 int num_regs, ret = 0; 1475 u32 *reg_vals; 1476 1477 /* We may never deal with more than 256 same type of queues */ 1478 reg_vals = kzalloc(sizeof(void *) * IDPF_LARGE_MAX_Q, GFP_KERNEL); 1479 if (!reg_vals) 1480 return -ENOMEM; 1481 1482 /* Initialize Tx queue tail register address */ 1483 num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q, 1484 VIRTCHNL2_QUEUE_TYPE_TX, 1485 chunks); 1486 if (num_regs < rsrc->num_txq) { 1487 ret = -EINVAL; 1488 goto free_reg_vals; 1489 } 1490 1491 num_regs = __idpf_queue_reg_init(vport, rsrc, reg_vals, num_regs, 1492 VIRTCHNL2_QUEUE_TYPE_TX); 1493 if (num_regs < rsrc->num_txq) { 1494 ret = -EINVAL; 1495 goto free_reg_vals; 1496 } 1497 1498 /* Initialize Rx/buffer queue tail register address based on Rx queue 1499 * model 1500 */ 1501 if (idpf_is_queue_model_split(rsrc->rxq_model)) { 1502 num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q, 1503 VIRTCHNL2_QUEUE_TYPE_RX_BUFFER, 1504 chunks); 1505 if (num_regs < rsrc->num_bufq) { 1506 ret = -EINVAL; 1507 goto free_reg_vals; 1508 } 1509 1510 num_regs = __idpf_queue_reg_init(vport, rsrc, reg_vals, num_regs, 1511 VIRTCHNL2_QUEUE_TYPE_RX_BUFFER); 1512 if (num_regs < rsrc->num_bufq) { 1513 ret = -EINVAL; 1514 goto free_reg_vals; 1515 } 1516 } else { 1517 num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q, 1518 VIRTCHNL2_QUEUE_TYPE_RX, 1519 chunks); 1520 if (num_regs < rsrc->num_rxq) { 1521 ret = -EINVAL; 1522 goto free_reg_vals; 1523 } 1524 1525 num_regs = __idpf_queue_reg_init(vport, rsrc, reg_vals, num_regs, 1526 VIRTCHNL2_QUEUE_TYPE_RX); 1527 if (num_regs < rsrc->num_rxq) { 1528 ret = -EINVAL; 1529 goto free_reg_vals; 1530 } 1531 } 1532 1533 free_reg_vals: 1534 kfree(reg_vals); 1535 1536 return ret; 1537 } 1538 1539 /** 1540 * idpf_send_create_vport_msg - Send virtchnl create vport message 1541 * @adapter: Driver specific private structure 1542 * @max_q: vport max queue info 1543 * 1544 * send virtchnl creae vport message 1545 * 1546 * Returns 0 on success, negative on failure 1547 */ 1548 int idpf_send_create_vport_msg(struct idpf_adapter *adapter, 1549 struct idpf_vport_max_q *max_q) 1550 { 1551 struct virtchnl2_create_vport *vport_msg; 1552 struct idpf_vc_xn_params xn_params = {}; 1553 u16 idx = adapter->next_vport; 1554 int err, buf_size; 1555 ssize_t reply_sz; 1556 1557 buf_size = sizeof(struct virtchnl2_create_vport); 1558 if (!adapter->vport_params_reqd[idx]) { 1559 adapter->vport_params_reqd[idx] = kzalloc(buf_size, 1560 GFP_KERNEL); 1561 if (!adapter->vport_params_reqd[idx]) 1562 return -ENOMEM; 1563 } 1564 1565 vport_msg = adapter->vport_params_reqd[idx]; 1566 vport_msg->vport_type = cpu_to_le16(VIRTCHNL2_VPORT_TYPE_DEFAULT); 1567 vport_msg->vport_index = cpu_to_le16(idx); 1568 1569 if (adapter->req_tx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ)) 1570 vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT); 1571 else 1572 vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE); 1573 1574 if (adapter->req_rx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ)) 1575 vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT); 1576 else 1577 vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE); 1578 1579 err = idpf_vport_calc_total_qs(adapter, idx, vport_msg, max_q); 1580 if (err) { 1581 dev_err(&adapter->pdev->dev, "Enough queues are not available"); 1582 1583 return err; 1584 } 1585 1586 if (!adapter->vport_params_recvd[idx]) { 1587 adapter->vport_params_recvd[idx] = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, 1588 GFP_KERNEL); 1589 if (!adapter->vport_params_recvd[idx]) { 1590 err = -ENOMEM; 1591 goto free_vport_params; 1592 } 1593 } 1594 1595 xn_params.vc_op = VIRTCHNL2_OP_CREATE_VPORT; 1596 xn_params.send_buf.iov_base = vport_msg; 1597 xn_params.send_buf.iov_len = buf_size; 1598 xn_params.recv_buf.iov_base = adapter->vport_params_recvd[idx]; 1599 xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; 1600 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 1601 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 1602 if (reply_sz < 0) { 1603 err = reply_sz; 1604 goto free_vport_params; 1605 } 1606 1607 return 0; 1608 1609 free_vport_params: 1610 kfree(adapter->vport_params_recvd[idx]); 1611 adapter->vport_params_recvd[idx] = NULL; 1612 kfree(adapter->vport_params_reqd[idx]); 1613 adapter->vport_params_reqd[idx] = NULL; 1614 1615 return err; 1616 } 1617 1618 /** 1619 * idpf_check_supported_desc_ids - Verify we have required descriptor support 1620 * @vport: virtual port structure 1621 * 1622 * Return 0 on success, error on failure 1623 */ 1624 int idpf_check_supported_desc_ids(struct idpf_vport *vport) 1625 { 1626 struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc; 1627 struct idpf_adapter *adapter = vport->adapter; 1628 struct virtchnl2_create_vport *vport_msg; 1629 u64 rx_desc_ids, tx_desc_ids; 1630 1631 vport_msg = adapter->vport_params_recvd[vport->idx]; 1632 1633 if (!IS_ENABLED(CONFIG_IDPF_SINGLEQ) && 1634 (vport_msg->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE || 1635 vport_msg->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)) { 1636 pci_err(adapter->pdev, "singleq mode requested, but not compiled-in\n"); 1637 return -EOPNOTSUPP; 1638 } 1639 1640 rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids); 1641 tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids); 1642 1643 if (idpf_is_queue_model_split(rsrc->rxq_model)) { 1644 if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) { 1645 dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n"); 1646 vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); 1647 } 1648 } else { 1649 if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M)) 1650 rsrc->base_rxd = true; 1651 } 1652 1653 if (!idpf_is_queue_model_split(rsrc->txq_model)) 1654 return 0; 1655 1656 if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) { 1657 dev_info(&adapter->pdev->dev, "Minimum TX descriptor support not provided, using the default\n"); 1658 vport_msg->tx_desc_ids = cpu_to_le64(MIN_SUPPORT_TXDID); 1659 } 1660 1661 return 0; 1662 } 1663 1664 /** 1665 * idpf_send_destroy_vport_msg - Send virtchnl destroy vport message 1666 * @adapter: adapter pointer used to send virtchnl message 1667 * @vport_id: vport identifier used while preparing the virtchnl message 1668 * 1669 * Return: 0 on success, negative on failure. 1670 */ 1671 int idpf_send_destroy_vport_msg(struct idpf_adapter *adapter, u32 vport_id) 1672 { 1673 struct idpf_vc_xn_params xn_params = {}; 1674 struct virtchnl2_vport v_id; 1675 ssize_t reply_sz; 1676 1677 v_id.vport_id = cpu_to_le32(vport_id); 1678 1679 xn_params.vc_op = VIRTCHNL2_OP_DESTROY_VPORT; 1680 xn_params.send_buf.iov_base = &v_id; 1681 xn_params.send_buf.iov_len = sizeof(v_id); 1682 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 1683 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 1684 1685 return reply_sz < 0 ? reply_sz : 0; 1686 } 1687 1688 /** 1689 * idpf_send_enable_vport_msg - Send virtchnl enable vport message 1690 * @adapter: adapter pointer used to send virtchnl message 1691 * @vport_id: vport identifier used while preparing the virtchnl message 1692 * 1693 * Return: 0 on success, negative on failure. 1694 */ 1695 int idpf_send_enable_vport_msg(struct idpf_adapter *adapter, u32 vport_id) 1696 { 1697 struct idpf_vc_xn_params xn_params = {}; 1698 struct virtchnl2_vport v_id; 1699 ssize_t reply_sz; 1700 1701 v_id.vport_id = cpu_to_le32(vport_id); 1702 1703 xn_params.vc_op = VIRTCHNL2_OP_ENABLE_VPORT; 1704 xn_params.send_buf.iov_base = &v_id; 1705 xn_params.send_buf.iov_len = sizeof(v_id); 1706 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 1707 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 1708 1709 return reply_sz < 0 ? reply_sz : 0; 1710 } 1711 1712 /** 1713 * idpf_send_disable_vport_msg - Send virtchnl disable vport message 1714 * @adapter: adapter pointer used to send virtchnl message 1715 * @vport_id: vport identifier used while preparing the virtchnl message 1716 * 1717 * Return: 0 on success, negative on failure. 1718 */ 1719 int idpf_send_disable_vport_msg(struct idpf_adapter *adapter, u32 vport_id) 1720 { 1721 struct idpf_vc_xn_params xn_params = {}; 1722 struct virtchnl2_vport v_id; 1723 ssize_t reply_sz; 1724 1725 v_id.vport_id = cpu_to_le32(vport_id); 1726 1727 xn_params.vc_op = VIRTCHNL2_OP_DISABLE_VPORT; 1728 xn_params.send_buf.iov_base = &v_id; 1729 xn_params.send_buf.iov_len = sizeof(v_id); 1730 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 1731 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 1732 1733 return reply_sz < 0 ? reply_sz : 0; 1734 } 1735 1736 /** 1737 * idpf_fill_txq_config_chunk - fill chunk describing the Tx queue 1738 * @rsrc: pointer to queue and vector resources 1739 * @q: Tx queue to be inserted into VC chunk 1740 * @qi: pointer to the buffer containing the VC chunk 1741 */ 1742 static void idpf_fill_txq_config_chunk(const struct idpf_q_vec_rsrc *rsrc, 1743 const struct idpf_tx_queue *q, 1744 struct virtchnl2_txq_info *qi) 1745 { 1746 u32 val; 1747 1748 qi->queue_id = cpu_to_le32(q->q_id); 1749 qi->model = cpu_to_le16(rsrc->txq_model); 1750 qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX); 1751 qi->ring_len = cpu_to_le16(q->desc_count); 1752 qi->dma_ring_addr = cpu_to_le64(q->dma); 1753 qi->relative_queue_id = cpu_to_le16(q->rel_q_id); 1754 1755 if (!idpf_is_queue_model_split(rsrc->txq_model)) { 1756 qi->sched_mode = cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE); 1757 return; 1758 } 1759 1760 if (idpf_queue_has(XDP, q)) 1761 val = q->complq->q_id; 1762 else 1763 val = q->txq_grp->complq->q_id; 1764 1765 qi->tx_compl_queue_id = cpu_to_le16(val); 1766 1767 if (idpf_queue_has(FLOW_SCH_EN, q)) 1768 val = VIRTCHNL2_TXQ_SCHED_MODE_FLOW; 1769 else 1770 val = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE; 1771 1772 qi->sched_mode = cpu_to_le16(val); 1773 } 1774 1775 /** 1776 * idpf_fill_complq_config_chunk - fill chunk describing the completion queue 1777 * @rsrc: pointer to queue and vector resources 1778 * @q: completion queue to be inserted into VC chunk 1779 * @qi: pointer to the buffer containing the VC chunk 1780 */ 1781 static void idpf_fill_complq_config_chunk(const struct idpf_q_vec_rsrc *rsrc, 1782 const struct idpf_compl_queue *q, 1783 struct virtchnl2_txq_info *qi) 1784 { 1785 u32 val; 1786 1787 qi->queue_id = cpu_to_le32(q->q_id); 1788 qi->model = cpu_to_le16(rsrc->txq_model); 1789 qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION); 1790 qi->ring_len = cpu_to_le16(q->desc_count); 1791 qi->dma_ring_addr = cpu_to_le64(q->dma); 1792 1793 if (idpf_queue_has(FLOW_SCH_EN, q)) 1794 val = VIRTCHNL2_TXQ_SCHED_MODE_FLOW; 1795 else 1796 val = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE; 1797 1798 qi->sched_mode = cpu_to_le16(val); 1799 } 1800 1801 /** 1802 * idpf_prepare_cfg_txqs_msg - prepare message to configure selected Tx queues 1803 * @vport_id: ID of virtual port queues are associated with 1804 * @buf: buffer containing the message 1805 * @pos: pointer to the first chunk describing the tx queue 1806 * @num_chunks: number of chunks in the message 1807 * 1808 * Helper function for preparing the message describing configuration of 1809 * Tx queues. 1810 * 1811 * Return: the total size of the prepared message. 1812 */ 1813 static u32 idpf_prepare_cfg_txqs_msg(u32 vport_id, void *buf, const void *pos, 1814 u32 num_chunks) 1815 { 1816 struct virtchnl2_config_tx_queues *ctq = buf; 1817 1818 ctq->vport_id = cpu_to_le32(vport_id); 1819 ctq->num_qinfo = cpu_to_le16(num_chunks); 1820 memcpy(ctq->qinfo, pos, num_chunks * sizeof(*ctq->qinfo)); 1821 1822 return struct_size(ctq, qinfo, num_chunks); 1823 } 1824 1825 /** 1826 * idpf_send_config_tx_queue_set_msg - send virtchnl config Tx queues 1827 * message for selected queues 1828 * @qs: set of the Tx queues to configure 1829 * 1830 * Send config queues virtchnl message for queues contained in the @qs array. 1831 * The @qs array can contain Tx queues (or completion queues) only. 1832 * 1833 * Return: 0 on success, -errno on failure. 1834 */ 1835 static int idpf_send_config_tx_queue_set_msg(const struct idpf_queue_set *qs) 1836 { 1837 struct virtchnl2_txq_info *qi __free(kfree) = NULL; 1838 struct idpf_chunked_msg_params params = { 1839 .vport_id = qs->vport_id, 1840 .vc_op = VIRTCHNL2_OP_CONFIG_TX_QUEUES, 1841 .prepare_msg = idpf_prepare_cfg_txqs_msg, 1842 .config_sz = sizeof(struct virtchnl2_config_tx_queues), 1843 .chunk_sz = sizeof(*qi), 1844 }; 1845 1846 qi = kzalloc_objs(*qi, qs->num); 1847 if (!qi) 1848 return -ENOMEM; 1849 1850 params.chunks = qi; 1851 1852 for (u32 i = 0; i < qs->num; i++) { 1853 if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_TX) 1854 idpf_fill_txq_config_chunk(qs->qv_rsrc, qs->qs[i].txq, 1855 &qi[params.num_chunks++]); 1856 else if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION) 1857 idpf_fill_complq_config_chunk(qs->qv_rsrc, 1858 qs->qs[i].complq, 1859 &qi[params.num_chunks++]); 1860 } 1861 1862 return idpf_send_chunked_msg(qs->adapter, ¶ms); 1863 } 1864 1865 /** 1866 * idpf_send_config_tx_queues_msg - send virtchnl config Tx queues message 1867 * @adapter: adapter pointer used to send virtchnl message 1868 * @rsrc: pointer to queue and vector resources 1869 * @vport_id: vport identifier used while preparing the virtchnl message 1870 * 1871 * Return: 0 on success, -errno on failure. 1872 */ 1873 static int idpf_send_config_tx_queues_msg(struct idpf_adapter *adapter, 1874 struct idpf_q_vec_rsrc *rsrc, 1875 u32 vport_id) 1876 { 1877 struct idpf_queue_set *qs __free(kfree) = NULL; 1878 u32 totqs = rsrc->num_txq + rsrc->num_complq; 1879 u32 k = 0; 1880 1881 qs = idpf_alloc_queue_set(adapter, rsrc, vport_id, totqs); 1882 if (!qs) 1883 return -ENOMEM; 1884 1885 /* Populate the queue info buffer with all queue context info */ 1886 for (u32 i = 0; i < rsrc->num_txq_grp; i++) { 1887 const struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i]; 1888 1889 for (u32 j = 0; j < tx_qgrp->num_txq; j++) { 1890 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX; 1891 qs->qs[k++].txq = tx_qgrp->txqs[j]; 1892 } 1893 1894 if (idpf_is_queue_model_split(rsrc->txq_model)) { 1895 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; 1896 qs->qs[k++].complq = tx_qgrp->complq; 1897 } 1898 } 1899 1900 /* Make sure accounting agrees */ 1901 if (k != totqs) 1902 return -EINVAL; 1903 1904 return idpf_send_config_tx_queue_set_msg(qs); 1905 } 1906 1907 /** 1908 * idpf_fill_rxq_config_chunk - fill chunk describing the Rx queue 1909 * @rsrc: pointer to queue and vector resources 1910 * @q: Rx queue to be inserted into VC chunk 1911 * @qi: pointer to the buffer containing the VC chunk 1912 */ 1913 static void idpf_fill_rxq_config_chunk(const struct idpf_q_vec_rsrc *rsrc, 1914 struct idpf_rx_queue *q, 1915 struct virtchnl2_rxq_info *qi) 1916 { 1917 const struct idpf_bufq_set *sets; 1918 1919 qi->queue_id = cpu_to_le32(q->q_id); 1920 qi->model = cpu_to_le16(rsrc->rxq_model); 1921 qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX); 1922 qi->ring_len = cpu_to_le16(q->desc_count); 1923 qi->dma_ring_addr = cpu_to_le64(q->dma); 1924 qi->max_pkt_size = cpu_to_le32(q->rx_max_pkt_size); 1925 qi->rx_buffer_low_watermark = cpu_to_le16(q->rx_buffer_low_watermark); 1926 qi->qflags = cpu_to_le16(VIRTCHNL2_RX_DESC_SIZE_32BYTE); 1927 if (idpf_queue_has(RSC_EN, q)) 1928 qi->qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC); 1929 1930 if (!idpf_is_queue_model_split(rsrc->rxq_model)) { 1931 qi->data_buffer_size = cpu_to_le32(q->rx_buf_size); 1932 qi->desc_ids = cpu_to_le64(q->rxdids); 1933 1934 return; 1935 } 1936 1937 sets = q->bufq_sets; 1938 1939 /* 1940 * In splitq mode, RxQ buffer size should be set to that of the first 1941 * buffer queue associated with this RxQ. 1942 */ 1943 q->rx_buf_size = sets[0].bufq.rx_buf_size; 1944 qi->data_buffer_size = cpu_to_le32(q->rx_buf_size); 1945 1946 qi->rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id); 1947 if (rsrc->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) { 1948 qi->bufq2_ena = IDPF_BUFQ2_ENA; 1949 qi->rx_bufq2_id = cpu_to_le16(sets[1].bufq.q_id); 1950 } 1951 1952 q->rx_hbuf_size = sets[0].bufq.rx_hbuf_size; 1953 1954 if (idpf_queue_has(HSPLIT_EN, q)) { 1955 qi->qflags |= cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT); 1956 qi->hdr_buffer_size = cpu_to_le16(q->rx_hbuf_size); 1957 } 1958 1959 qi->desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); 1960 } 1961 1962 /** 1963 * idpf_fill_bufq_config_chunk - fill chunk describing the buffer queue 1964 * @rsrc: pointer to queue and vector resources 1965 * @q: buffer queue to be inserted into VC chunk 1966 * @qi: pointer to the buffer containing the VC chunk 1967 */ 1968 static void idpf_fill_bufq_config_chunk(const struct idpf_q_vec_rsrc *rsrc, 1969 const struct idpf_buf_queue *q, 1970 struct virtchnl2_rxq_info *qi) 1971 { 1972 qi->queue_id = cpu_to_le32(q->q_id); 1973 qi->model = cpu_to_le16(rsrc->rxq_model); 1974 qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER); 1975 qi->ring_len = cpu_to_le16(q->desc_count); 1976 qi->dma_ring_addr = cpu_to_le64(q->dma); 1977 qi->data_buffer_size = cpu_to_le32(q->rx_buf_size); 1978 qi->rx_buffer_low_watermark = cpu_to_le16(q->rx_buffer_low_watermark); 1979 qi->desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); 1980 qi->buffer_notif_stride = IDPF_RX_BUF_STRIDE; 1981 if (idpf_queue_has(RSC_EN, q)) 1982 qi->qflags = cpu_to_le16(VIRTCHNL2_RXQ_RSC); 1983 1984 if (idpf_queue_has(HSPLIT_EN, q)) { 1985 qi->qflags |= cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT); 1986 qi->hdr_buffer_size = cpu_to_le16(q->rx_hbuf_size); 1987 } 1988 } 1989 1990 /** 1991 * idpf_prepare_cfg_rxqs_msg - prepare message to configure selected Rx queues 1992 * @vport_id: ID of virtual port queues are associated with 1993 * @buf: buffer containing the message 1994 * @pos: pointer to the first chunk describing the rx queue 1995 * @num_chunks: number of chunks in the message 1996 * 1997 * Helper function for preparing the message describing configuration of 1998 * Rx queues. 1999 * 2000 * Return: the total size of the prepared message. 2001 */ 2002 static u32 idpf_prepare_cfg_rxqs_msg(u32 vport_id, void *buf, const void *pos, 2003 u32 num_chunks) 2004 { 2005 struct virtchnl2_config_rx_queues *crq = buf; 2006 2007 crq->vport_id = cpu_to_le32(vport_id); 2008 crq->num_qinfo = cpu_to_le16(num_chunks); 2009 memcpy(crq->qinfo, pos, num_chunks * sizeof(*crq->qinfo)); 2010 2011 return struct_size(crq, qinfo, num_chunks); 2012 } 2013 2014 /** 2015 * idpf_send_config_rx_queue_set_msg - send virtchnl config Rx queues message 2016 * for selected queues. 2017 * @qs: set of the Rx queues to configure 2018 * 2019 * Send config queues virtchnl message for queues contained in the @qs array. 2020 * The @qs array can contain Rx queues (or buffer queues) only. 2021 * 2022 * Return: 0 on success, -errno on failure. 2023 */ 2024 static int idpf_send_config_rx_queue_set_msg(const struct idpf_queue_set *qs) 2025 { 2026 struct virtchnl2_rxq_info *qi __free(kfree) = NULL; 2027 struct idpf_chunked_msg_params params = { 2028 .vport_id = qs->vport_id, 2029 .vc_op = VIRTCHNL2_OP_CONFIG_RX_QUEUES, 2030 .prepare_msg = idpf_prepare_cfg_rxqs_msg, 2031 .config_sz = sizeof(struct virtchnl2_config_rx_queues), 2032 .chunk_sz = sizeof(*qi), 2033 }; 2034 2035 qi = kzalloc_objs(*qi, qs->num); 2036 if (!qi) 2037 return -ENOMEM; 2038 2039 params.chunks = qi; 2040 2041 for (u32 i = 0; i < qs->num; i++) { 2042 if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_RX) 2043 idpf_fill_rxq_config_chunk(qs->qv_rsrc, qs->qs[i].rxq, 2044 &qi[params.num_chunks++]); 2045 else if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_RX_BUFFER) 2046 idpf_fill_bufq_config_chunk(qs->qv_rsrc, qs->qs[i].bufq, 2047 &qi[params.num_chunks++]); 2048 } 2049 2050 return idpf_send_chunked_msg(qs->adapter, ¶ms); 2051 } 2052 2053 /** 2054 * idpf_send_config_rx_queues_msg - send virtchnl config Rx queues message 2055 * @adapter: adapter pointer used to send virtchnl message 2056 * @rsrc: pointer to queue and vector resources 2057 * @vport_id: vport identifier used while preparing the virtchnl message 2058 * 2059 * Return: 0 on success, -errno on failure. 2060 */ 2061 static int idpf_send_config_rx_queues_msg(struct idpf_adapter *adapter, 2062 struct idpf_q_vec_rsrc *rsrc, 2063 u32 vport_id) 2064 { 2065 bool splitq = idpf_is_queue_model_split(rsrc->rxq_model); 2066 struct idpf_queue_set *qs __free(kfree) = NULL; 2067 u32 totqs = rsrc->num_rxq + rsrc->num_bufq; 2068 u32 k = 0; 2069 2070 qs = idpf_alloc_queue_set(adapter, rsrc, vport_id, totqs); 2071 if (!qs) 2072 return -ENOMEM; 2073 2074 /* Populate the queue info buffer with all queue context info */ 2075 for (u32 i = 0; i < rsrc->num_rxq_grp; i++) { 2076 const struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i]; 2077 u32 num_rxq; 2078 2079 if (!splitq) { 2080 num_rxq = rx_qgrp->singleq.num_rxq; 2081 goto rxq; 2082 } 2083 2084 for (u32 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) { 2085 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER; 2086 qs->qs[k++].bufq = &rx_qgrp->splitq.bufq_sets[j].bufq; 2087 } 2088 2089 num_rxq = rx_qgrp->splitq.num_rxq_sets; 2090 2091 rxq: 2092 for (u32 j = 0; j < num_rxq; j++) { 2093 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX; 2094 2095 if (splitq) 2096 qs->qs[k++].rxq = 2097 &rx_qgrp->splitq.rxq_sets[j]->rxq; 2098 else 2099 qs->qs[k++].rxq = rx_qgrp->singleq.rxqs[j]; 2100 } 2101 } 2102 2103 /* Make sure accounting agrees */ 2104 if (k != totqs) 2105 return -EINVAL; 2106 2107 return idpf_send_config_rx_queue_set_msg(qs); 2108 } 2109 2110 /** 2111 * idpf_prepare_ena_dis_qs_msg - prepare message to enable/disable selected 2112 * queues 2113 * @vport_id: ID of virtual port queues are associated with 2114 * @buf: buffer containing the message 2115 * @pos: pointer to the first chunk describing the queue 2116 * @num_chunks: number of chunks in the message 2117 * 2118 * Helper function for preparing the message describing queues to be enabled 2119 * or disabled. 2120 * 2121 * Return: the total size of the prepared message. 2122 */ 2123 static u32 idpf_prepare_ena_dis_qs_msg(u32 vport_id, void *buf, const void *pos, 2124 u32 num_chunks) 2125 { 2126 struct virtchnl2_del_ena_dis_queues *eq = buf; 2127 2128 eq->vport_id = cpu_to_le32(vport_id); 2129 eq->chunks.num_chunks = cpu_to_le16(num_chunks); 2130 memcpy(eq->chunks.chunks, pos, 2131 num_chunks * sizeof(*eq->chunks.chunks)); 2132 2133 return struct_size(eq, chunks.chunks, num_chunks); 2134 } 2135 2136 /** 2137 * idpf_send_ena_dis_queue_set_msg - send virtchnl enable or disable queues 2138 * message for selected queues 2139 * @qs: set of the queues to enable or disable 2140 * @en: whether to enable or disable queues 2141 * 2142 * Send enable or disable queues virtchnl message for queues contained 2143 * in the @qs array. 2144 * The @qs array can contain pointers to both Rx and Tx queues. 2145 * 2146 * Return: 0 on success, -errno on failure. 2147 */ 2148 static int idpf_send_ena_dis_queue_set_msg(const struct idpf_queue_set *qs, 2149 bool en) 2150 { 2151 struct virtchnl2_queue_chunk *qc __free(kfree) = NULL; 2152 struct idpf_chunked_msg_params params = { 2153 .vport_id = qs->vport_id, 2154 .vc_op = en ? VIRTCHNL2_OP_ENABLE_QUEUES : 2155 VIRTCHNL2_OP_DISABLE_QUEUES, 2156 .prepare_msg = idpf_prepare_ena_dis_qs_msg, 2157 .config_sz = sizeof(struct virtchnl2_del_ena_dis_queues), 2158 .chunk_sz = sizeof(*qc), 2159 .num_chunks = qs->num, 2160 }; 2161 2162 qc = kzalloc_objs(*qc, qs->num); 2163 if (!qc) 2164 return -ENOMEM; 2165 2166 params.chunks = qc; 2167 2168 for (u32 i = 0; i < qs->num; i++) { 2169 const struct idpf_queue_ptr *q = &qs->qs[i]; 2170 u32 qid; 2171 2172 qc[i].type = cpu_to_le32(q->type); 2173 qc[i].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); 2174 2175 switch (q->type) { 2176 case VIRTCHNL2_QUEUE_TYPE_RX: 2177 qid = q->rxq->q_id; 2178 break; 2179 case VIRTCHNL2_QUEUE_TYPE_TX: 2180 qid = q->txq->q_id; 2181 break; 2182 case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER: 2183 qid = q->bufq->q_id; 2184 break; 2185 case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION: 2186 qid = q->complq->q_id; 2187 break; 2188 default: 2189 return -EINVAL; 2190 } 2191 2192 qc[i].start_queue_id = cpu_to_le32(qid); 2193 } 2194 2195 return idpf_send_chunked_msg(qs->adapter, ¶ms); 2196 } 2197 2198 /** 2199 * idpf_send_ena_dis_queues_msg - send virtchnl enable or disable queues 2200 * message 2201 * @adapter: adapter pointer used to send virtchnl message 2202 * @rsrc: pointer to queue and vector resources 2203 * @vport_id: vport identifier used while preparing the virtchnl message 2204 * @en: whether to enable or disable queues 2205 * 2206 * Return: 0 on success, -errno on failure. 2207 */ 2208 static int idpf_send_ena_dis_queues_msg(struct idpf_adapter *adapter, 2209 struct idpf_q_vec_rsrc *rsrc, 2210 u32 vport_id, bool en) 2211 { 2212 struct idpf_queue_set *qs __free(kfree) = NULL; 2213 u32 num_txq, num_q, k = 0; 2214 bool split; 2215 2216 num_txq = rsrc->num_txq + rsrc->num_complq; 2217 num_q = num_txq + rsrc->num_rxq + rsrc->num_bufq; 2218 2219 qs = idpf_alloc_queue_set(adapter, rsrc, vport_id, num_q); 2220 if (!qs) 2221 return -ENOMEM; 2222 2223 split = idpf_is_queue_model_split(rsrc->txq_model); 2224 2225 for (u32 i = 0; i < rsrc->num_txq_grp; i++) { 2226 const struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i]; 2227 2228 for (u32 j = 0; j < tx_qgrp->num_txq; j++) { 2229 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX; 2230 qs->qs[k++].txq = tx_qgrp->txqs[j]; 2231 } 2232 2233 if (!split) 2234 continue; 2235 2236 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; 2237 qs->qs[k++].complq = tx_qgrp->complq; 2238 } 2239 2240 if (k != num_txq) 2241 return -EINVAL; 2242 2243 split = idpf_is_queue_model_split(rsrc->rxq_model); 2244 2245 for (u32 i = 0; i < rsrc->num_rxq_grp; i++) { 2246 const struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i]; 2247 u32 num_rxq; 2248 2249 if (split) 2250 num_rxq = rx_qgrp->splitq.num_rxq_sets; 2251 else 2252 num_rxq = rx_qgrp->singleq.num_rxq; 2253 2254 for (u32 j = 0; j < num_rxq; j++) { 2255 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX; 2256 2257 if (split) 2258 qs->qs[k++].rxq = 2259 &rx_qgrp->splitq.rxq_sets[j]->rxq; 2260 else 2261 qs->qs[k++].rxq = rx_qgrp->singleq.rxqs[j]; 2262 } 2263 2264 if (!split) 2265 continue; 2266 2267 for (u32 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) { 2268 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER; 2269 qs->qs[k++].bufq = &rx_qgrp->splitq.bufq_sets[j].bufq; 2270 } 2271 } 2272 2273 if (k != num_q) 2274 return -EINVAL; 2275 2276 return idpf_send_ena_dis_queue_set_msg(qs, en); 2277 } 2278 2279 /** 2280 * idpf_prep_map_unmap_queue_set_vector_msg - prepare message to map or unmap 2281 * queue set to the interrupt vector 2282 * @vport_id: ID of virtual port queues are associated with 2283 * @buf: buffer containing the message 2284 * @pos: pointer to the first chunk describing the vector mapping 2285 * @num_chunks: number of chunks in the message 2286 * 2287 * Helper function for preparing the message describing mapping queues to 2288 * q_vectors. 2289 * 2290 * Return: the total size of the prepared message. 2291 */ 2292 static u32 2293 idpf_prep_map_unmap_queue_set_vector_msg(u32 vport_id, void *buf, 2294 const void *pos, u32 num_chunks) 2295 { 2296 struct virtchnl2_queue_vector_maps *vqvm = buf; 2297 2298 vqvm->vport_id = cpu_to_le32(vport_id); 2299 vqvm->num_qv_maps = cpu_to_le16(num_chunks); 2300 memcpy(vqvm->qv_maps, pos, num_chunks * sizeof(*vqvm->qv_maps)); 2301 2302 return struct_size(vqvm, qv_maps, num_chunks); 2303 } 2304 2305 /** 2306 * idpf_send_map_unmap_queue_set_vector_msg - send virtchnl map or unmap 2307 * queue set vector message 2308 * @qs: set of the queues to map or unmap 2309 * @map: true for map and false for unmap 2310 * 2311 * Return: 0 on success, -errno on failure. 2312 */ 2313 static int 2314 idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs, 2315 bool map) 2316 { 2317 struct virtchnl2_queue_vector *vqv __free(kfree) = NULL; 2318 struct idpf_chunked_msg_params params = { 2319 .vport_id = qs->vport_id, 2320 .vc_op = map ? VIRTCHNL2_OP_MAP_QUEUE_VECTOR : 2321 VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR, 2322 .prepare_msg = idpf_prep_map_unmap_queue_set_vector_msg, 2323 .config_sz = sizeof(struct virtchnl2_queue_vector_maps), 2324 .chunk_sz = sizeof(*vqv), 2325 .num_chunks = qs->num, 2326 }; 2327 bool split; 2328 2329 vqv = kzalloc_objs(*vqv, qs->num); 2330 if (!vqv) 2331 return -ENOMEM; 2332 2333 params.chunks = vqv; 2334 2335 split = idpf_is_queue_model_split(qs->qv_rsrc->txq_model); 2336 2337 for (u32 i = 0; i < qs->num; i++) { 2338 const struct idpf_queue_ptr *q = &qs->qs[i]; 2339 const struct idpf_q_vector *vec; 2340 u32 qid, v_idx, itr_idx; 2341 2342 vqv[i].queue_type = cpu_to_le32(q->type); 2343 2344 switch (q->type) { 2345 case VIRTCHNL2_QUEUE_TYPE_RX: 2346 qid = q->rxq->q_id; 2347 2348 if (idpf_queue_has(NOIRQ, q->rxq)) 2349 vec = NULL; 2350 else 2351 vec = q->rxq->q_vector; 2352 2353 if (vec) { 2354 v_idx = vec->v_idx; 2355 itr_idx = vec->rx_itr_idx; 2356 } else { 2357 v_idx = qs->qv_rsrc->noirq_v_idx; 2358 itr_idx = VIRTCHNL2_ITR_IDX_0; 2359 } 2360 break; 2361 case VIRTCHNL2_QUEUE_TYPE_TX: 2362 qid = q->txq->q_id; 2363 2364 if (idpf_queue_has(NOIRQ, q->txq)) 2365 vec = NULL; 2366 else if (idpf_queue_has(XDP, q->txq)) 2367 vec = q->txq->complq->q_vector; 2368 else if (split) 2369 vec = q->txq->txq_grp->complq->q_vector; 2370 else 2371 vec = q->txq->q_vector; 2372 2373 if (vec) { 2374 v_idx = vec->v_idx; 2375 itr_idx = vec->tx_itr_idx; 2376 } else { 2377 v_idx = qs->qv_rsrc->noirq_v_idx; 2378 itr_idx = VIRTCHNL2_ITR_IDX_1; 2379 } 2380 break; 2381 default: 2382 return -EINVAL; 2383 } 2384 2385 vqv[i].queue_id = cpu_to_le32(qid); 2386 vqv[i].vector_id = cpu_to_le16(v_idx); 2387 vqv[i].itr_idx = cpu_to_le32(itr_idx); 2388 } 2389 2390 return idpf_send_chunked_msg(qs->adapter, ¶ms); 2391 } 2392 2393 /** 2394 * idpf_send_map_unmap_queue_vector_msg - send virtchnl map or unmap queue 2395 * vector message 2396 * @adapter: adapter pointer used to send virtchnl message 2397 * @rsrc: pointer to queue and vector resources 2398 * @vport_id: vport identifier used while preparing the virtchnl message 2399 * @map: true for map and false for unmap 2400 * 2401 * Return: 0 on success, -errno on failure. 2402 */ 2403 int idpf_send_map_unmap_queue_vector_msg(struct idpf_adapter *adapter, 2404 struct idpf_q_vec_rsrc *rsrc, 2405 u32 vport_id, bool map) 2406 { 2407 struct idpf_queue_set *qs __free(kfree) = NULL; 2408 u32 num_q = rsrc->num_txq + rsrc->num_rxq; 2409 u32 k = 0; 2410 2411 qs = idpf_alloc_queue_set(adapter, rsrc, vport_id, num_q); 2412 if (!qs) 2413 return -ENOMEM; 2414 2415 for (u32 i = 0; i < rsrc->num_txq_grp; i++) { 2416 const struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i]; 2417 2418 for (u32 j = 0; j < tx_qgrp->num_txq; j++) { 2419 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX; 2420 qs->qs[k++].txq = tx_qgrp->txqs[j]; 2421 } 2422 } 2423 2424 if (k != rsrc->num_txq) 2425 return -EINVAL; 2426 2427 for (u32 i = 0; i < rsrc->num_rxq_grp; i++) { 2428 const struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i]; 2429 u32 num_rxq; 2430 2431 if (idpf_is_queue_model_split(rsrc->rxq_model)) 2432 num_rxq = rx_qgrp->splitq.num_rxq_sets; 2433 else 2434 num_rxq = rx_qgrp->singleq.num_rxq; 2435 2436 for (u32 j = 0; j < num_rxq; j++) { 2437 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX; 2438 2439 if (idpf_is_queue_model_split(rsrc->rxq_model)) 2440 qs->qs[k++].rxq = 2441 &rx_qgrp->splitq.rxq_sets[j]->rxq; 2442 else 2443 qs->qs[k++].rxq = rx_qgrp->singleq.rxqs[j]; 2444 } 2445 } 2446 2447 if (k != num_q) 2448 return -EINVAL; 2449 2450 return idpf_send_map_unmap_queue_set_vector_msg(qs, map); 2451 } 2452 2453 /** 2454 * idpf_send_enable_queue_set_msg - send enable queues virtchnl message for 2455 * selected queues 2456 * @qs: set of the queues 2457 * 2458 * Send enable queues virtchnl message for queues contained in the @qs array. 2459 * 2460 * Return: 0 on success, -errno on failure. 2461 */ 2462 int idpf_send_enable_queue_set_msg(const struct idpf_queue_set *qs) 2463 { 2464 return idpf_send_ena_dis_queue_set_msg(qs, true); 2465 } 2466 2467 /** 2468 * idpf_send_disable_queue_set_msg - send disable queues virtchnl message for 2469 * selected queues 2470 * @qs: set of the queues 2471 * 2472 * Return: 0 on success, -errno on failure. 2473 */ 2474 int idpf_send_disable_queue_set_msg(const struct idpf_queue_set *qs) 2475 { 2476 int err; 2477 2478 err = idpf_send_ena_dis_queue_set_msg(qs, false); 2479 if (err) 2480 return err; 2481 2482 return idpf_wait_for_marker_event_set(qs); 2483 } 2484 2485 /** 2486 * idpf_send_config_queue_set_msg - send virtchnl config queues message for 2487 * selected queues 2488 * @qs: set of the queues 2489 * 2490 * Send config queues virtchnl message for queues contained in the @qs array. 2491 * The @qs array can contain both Rx or Tx queues. 2492 * 2493 * Return: 0 on success, -errno on failure. 2494 */ 2495 int idpf_send_config_queue_set_msg(const struct idpf_queue_set *qs) 2496 { 2497 int err; 2498 2499 err = idpf_send_config_tx_queue_set_msg(qs); 2500 if (err) 2501 return err; 2502 2503 return idpf_send_config_rx_queue_set_msg(qs); 2504 } 2505 2506 /** 2507 * idpf_send_enable_queues_msg - send enable queues virtchnl message 2508 * @vport: Virtual port private data structure 2509 * 2510 * Will send enable queues virtchnl message. Returns 0 on success, negative on 2511 * failure. 2512 */ 2513 int idpf_send_enable_queues_msg(struct idpf_vport *vport) 2514 { 2515 return idpf_send_ena_dis_queues_msg(vport->adapter, 2516 &vport->dflt_qv_rsrc, 2517 vport->vport_id, true); 2518 } 2519 2520 /** 2521 * idpf_send_disable_queues_msg - send disable queues virtchnl message 2522 * @vport: Virtual port private data structure 2523 * 2524 * Will send disable queues virtchnl message. Returns 0 on success, negative 2525 * on failure. 2526 */ 2527 int idpf_send_disable_queues_msg(struct idpf_vport *vport) 2528 { 2529 int err; 2530 2531 err = idpf_send_ena_dis_queues_msg(vport->adapter, 2532 &vport->dflt_qv_rsrc, 2533 vport->vport_id, false); 2534 if (err) 2535 return err; 2536 2537 return idpf_wait_for_marker_event(vport); 2538 } 2539 2540 /** 2541 * idpf_convert_reg_to_queue_chunks - Copy queue chunk information to the right 2542 * structure 2543 * @dchunks: Destination chunks to store data to 2544 * @schunks: Source chunks to copy data from 2545 * @num_chunks: number of chunks to copy 2546 */ 2547 static void idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchunks, 2548 struct idpf_queue_id_reg_chunk *schunks, 2549 u16 num_chunks) 2550 { 2551 u16 i; 2552 2553 for (i = 0; i < num_chunks; i++) { 2554 dchunks[i].type = cpu_to_le32(schunks[i].type); 2555 dchunks[i].start_queue_id = cpu_to_le32(schunks[i].start_queue_id); 2556 dchunks[i].num_queues = cpu_to_le32(schunks[i].num_queues); 2557 } 2558 } 2559 2560 /** 2561 * idpf_send_delete_queues_msg - send delete queues virtchnl message 2562 * @adapter: adapter pointer used to send virtchnl message 2563 * @chunks: queue ids received over mailbox 2564 * @vport_id: vport identifier used while preparing the virtchnl message 2565 * 2566 * Return: 0 on success, negative on failure. 2567 */ 2568 int idpf_send_delete_queues_msg(struct idpf_adapter *adapter, 2569 struct idpf_queue_id_reg_info *chunks, 2570 u32 vport_id) 2571 { 2572 struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL; 2573 struct idpf_vc_xn_params xn_params = {}; 2574 ssize_t reply_sz; 2575 u16 num_chunks; 2576 int buf_size; 2577 2578 num_chunks = chunks->num_chunks; 2579 buf_size = struct_size(eq, chunks.chunks, num_chunks); 2580 2581 eq = kzalloc(buf_size, GFP_KERNEL); 2582 if (!eq) 2583 return -ENOMEM; 2584 2585 eq->vport_id = cpu_to_le32(vport_id); 2586 eq->chunks.num_chunks = cpu_to_le16(num_chunks); 2587 2588 idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->queue_chunks, 2589 num_chunks); 2590 2591 xn_params.vc_op = VIRTCHNL2_OP_DEL_QUEUES; 2592 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 2593 xn_params.send_buf.iov_base = eq; 2594 xn_params.send_buf.iov_len = buf_size; 2595 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 2596 2597 return reply_sz < 0 ? reply_sz : 0; 2598 } 2599 2600 /** 2601 * idpf_send_config_queues_msg - Send config queues virtchnl message 2602 * @adapter: adapter pointer used to send virtchnl message 2603 * @rsrc: pointer to queue and vector resources 2604 * @vport_id: vport identifier used while preparing the virtchnl message 2605 * 2606 * Return: 0 on success, negative on failure. 2607 */ 2608 int idpf_send_config_queues_msg(struct idpf_adapter *adapter, 2609 struct idpf_q_vec_rsrc *rsrc, 2610 u32 vport_id) 2611 { 2612 int err; 2613 2614 err = idpf_send_config_tx_queues_msg(adapter, rsrc, vport_id); 2615 if (err) 2616 return err; 2617 2618 return idpf_send_config_rx_queues_msg(adapter, rsrc, vport_id); 2619 } 2620 2621 /** 2622 * idpf_send_add_queues_msg - Send virtchnl add queues message 2623 * @adapter: adapter pointer used to send virtchnl message 2624 * @vport_config: vport persistent structure to store the queue chunk info 2625 * @rsrc: pointer to queue and vector resources 2626 * @vport_id: vport identifier used while preparing the virtchnl message 2627 * 2628 * Return: 0 on success, negative on failure. 2629 */ 2630 int idpf_send_add_queues_msg(struct idpf_adapter *adapter, 2631 struct idpf_vport_config *vport_config, 2632 struct idpf_q_vec_rsrc *rsrc, 2633 u32 vport_id) 2634 { 2635 struct virtchnl2_add_queues *vc_msg __free(kfree) = NULL; 2636 struct idpf_vc_xn_params xn_params = {}; 2637 struct virtchnl2_add_queues aq = {}; 2638 ssize_t reply_sz; 2639 int size; 2640 2641 vc_msg = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); 2642 if (!vc_msg) 2643 return -ENOMEM; 2644 2645 aq.vport_id = cpu_to_le32(vport_id); 2646 aq.num_tx_q = cpu_to_le16(rsrc->num_txq); 2647 aq.num_tx_complq = cpu_to_le16(rsrc->num_complq); 2648 aq.num_rx_q = cpu_to_le16(rsrc->num_rxq); 2649 aq.num_rx_bufq = cpu_to_le16(rsrc->num_bufq); 2650 2651 xn_params.vc_op = VIRTCHNL2_OP_ADD_QUEUES; 2652 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 2653 xn_params.send_buf.iov_base = &aq; 2654 xn_params.send_buf.iov_len = sizeof(aq); 2655 xn_params.recv_buf.iov_base = vc_msg; 2656 xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; 2657 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 2658 if (reply_sz < 0) 2659 return reply_sz; 2660 2661 /* compare vc_msg num queues with vport num queues */ 2662 if (le16_to_cpu(vc_msg->num_tx_q) != rsrc->num_txq || 2663 le16_to_cpu(vc_msg->num_rx_q) != rsrc->num_rxq || 2664 le16_to_cpu(vc_msg->num_tx_complq) != rsrc->num_complq || 2665 le16_to_cpu(vc_msg->num_rx_bufq) != rsrc->num_bufq) 2666 return -EINVAL; 2667 2668 size = struct_size(vc_msg, chunks.chunks, 2669 le16_to_cpu(vc_msg->chunks.num_chunks)); 2670 if (reply_sz < size) 2671 return -EIO; 2672 2673 return idpf_vport_init_queue_reg_chunks(vport_config, &vc_msg->chunks); 2674 } 2675 2676 /** 2677 * idpf_send_alloc_vectors_msg - Send virtchnl alloc vectors message 2678 * @adapter: Driver specific private structure 2679 * @num_vectors: number of vectors to be allocated 2680 * 2681 * Returns 0 on success, negative on failure. 2682 */ 2683 int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors) 2684 { 2685 struct virtchnl2_alloc_vectors *rcvd_vec __free(kfree) = NULL; 2686 struct idpf_vc_xn_params xn_params = {}; 2687 struct virtchnl2_alloc_vectors ac = {}; 2688 ssize_t reply_sz; 2689 u16 num_vchunks; 2690 int size; 2691 2692 ac.num_vectors = cpu_to_le16(num_vectors); 2693 2694 rcvd_vec = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); 2695 if (!rcvd_vec) 2696 return -ENOMEM; 2697 2698 xn_params.vc_op = VIRTCHNL2_OP_ALLOC_VECTORS; 2699 xn_params.send_buf.iov_base = ∾ 2700 xn_params.send_buf.iov_len = sizeof(ac); 2701 xn_params.recv_buf.iov_base = rcvd_vec; 2702 xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; 2703 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 2704 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 2705 if (reply_sz < 0) 2706 return reply_sz; 2707 2708 num_vchunks = le16_to_cpu(rcvd_vec->vchunks.num_vchunks); 2709 size = struct_size(rcvd_vec, vchunks.vchunks, num_vchunks); 2710 if (reply_sz < size) 2711 return -EIO; 2712 2713 if (size > IDPF_CTLQ_MAX_BUF_LEN) 2714 return -EINVAL; 2715 2716 kfree(adapter->req_vec_chunks); 2717 adapter->req_vec_chunks = kmemdup(rcvd_vec, size, GFP_KERNEL); 2718 if (!adapter->req_vec_chunks) 2719 return -ENOMEM; 2720 2721 if (le16_to_cpu(adapter->req_vec_chunks->num_vectors) < num_vectors) { 2722 kfree(adapter->req_vec_chunks); 2723 adapter->req_vec_chunks = NULL; 2724 return -EINVAL; 2725 } 2726 2727 return 0; 2728 } 2729 2730 /** 2731 * idpf_send_dealloc_vectors_msg - Send virtchnl de allocate vectors message 2732 * @adapter: Driver specific private structure 2733 * 2734 * Returns 0 on success, negative on failure. 2735 */ 2736 int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter) 2737 { 2738 struct virtchnl2_alloc_vectors *ac = adapter->req_vec_chunks; 2739 struct virtchnl2_vector_chunks *vcs = &ac->vchunks; 2740 struct idpf_vc_xn_params xn_params = {}; 2741 ssize_t reply_sz; 2742 int buf_size; 2743 2744 buf_size = struct_size(vcs, vchunks, le16_to_cpu(vcs->num_vchunks)); 2745 2746 xn_params.vc_op = VIRTCHNL2_OP_DEALLOC_VECTORS; 2747 xn_params.send_buf.iov_base = vcs; 2748 xn_params.send_buf.iov_len = buf_size; 2749 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 2750 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 2751 if (reply_sz < 0) 2752 return reply_sz; 2753 2754 kfree(adapter->req_vec_chunks); 2755 adapter->req_vec_chunks = NULL; 2756 2757 return 0; 2758 } 2759 2760 /** 2761 * idpf_get_max_vfs - Get max number of vfs supported 2762 * @adapter: Driver specific private structure 2763 * 2764 * Returns max number of VFs 2765 */ 2766 static int idpf_get_max_vfs(struct idpf_adapter *adapter) 2767 { 2768 return le16_to_cpu(adapter->caps.max_sriov_vfs); 2769 } 2770 2771 /** 2772 * idpf_send_set_sriov_vfs_msg - Send virtchnl set sriov vfs message 2773 * @adapter: Driver specific private structure 2774 * @num_vfs: number of virtual functions to be created 2775 * 2776 * Returns 0 on success, negative on failure. 2777 */ 2778 int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs) 2779 { 2780 struct virtchnl2_sriov_vfs_info svi = {}; 2781 struct idpf_vc_xn_params xn_params = {}; 2782 ssize_t reply_sz; 2783 2784 svi.num_vfs = cpu_to_le16(num_vfs); 2785 xn_params.vc_op = VIRTCHNL2_OP_SET_SRIOV_VFS; 2786 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 2787 xn_params.send_buf.iov_base = &svi; 2788 xn_params.send_buf.iov_len = sizeof(svi); 2789 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 2790 2791 return reply_sz < 0 ? reply_sz : 0; 2792 } 2793 2794 /** 2795 * idpf_send_get_stats_msg - Send virtchnl get statistics message 2796 * @np: netdev private structure 2797 * @port_stats: structure to store the vport statistics 2798 * 2799 * Return: 0 on success, negative on failure. 2800 */ 2801 int idpf_send_get_stats_msg(struct idpf_netdev_priv *np, 2802 struct idpf_port_stats *port_stats) 2803 { 2804 struct rtnl_link_stats64 *netstats = &np->netstats; 2805 struct virtchnl2_vport_stats stats_msg = {}; 2806 struct idpf_vc_xn_params xn_params = {}; 2807 ssize_t reply_sz; 2808 2809 2810 /* Don't send get_stats message if the link is down */ 2811 if (!test_bit(IDPF_VPORT_UP, np->state)) 2812 return 0; 2813 2814 stats_msg.vport_id = cpu_to_le32(np->vport_id); 2815 2816 xn_params.vc_op = VIRTCHNL2_OP_GET_STATS; 2817 xn_params.send_buf.iov_base = &stats_msg; 2818 xn_params.send_buf.iov_len = sizeof(stats_msg); 2819 xn_params.recv_buf = xn_params.send_buf; 2820 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 2821 2822 reply_sz = idpf_vc_xn_exec(np->adapter, &xn_params); 2823 if (reply_sz < 0) 2824 return reply_sz; 2825 if (reply_sz < sizeof(stats_msg)) 2826 return -EIO; 2827 2828 spin_lock_bh(&np->stats_lock); 2829 2830 netstats->rx_packets = le64_to_cpu(stats_msg.rx_unicast) + 2831 le64_to_cpu(stats_msg.rx_multicast) + 2832 le64_to_cpu(stats_msg.rx_broadcast); 2833 netstats->tx_packets = le64_to_cpu(stats_msg.tx_unicast) + 2834 le64_to_cpu(stats_msg.tx_multicast) + 2835 le64_to_cpu(stats_msg.tx_broadcast); 2836 netstats->rx_bytes = le64_to_cpu(stats_msg.rx_bytes); 2837 netstats->tx_bytes = le64_to_cpu(stats_msg.tx_bytes); 2838 netstats->rx_errors = le64_to_cpu(stats_msg.rx_errors); 2839 netstats->tx_errors = le64_to_cpu(stats_msg.tx_errors); 2840 netstats->rx_dropped = le64_to_cpu(stats_msg.rx_discards); 2841 netstats->tx_dropped = le64_to_cpu(stats_msg.tx_discards); 2842 2843 port_stats->vport_stats = stats_msg; 2844 2845 spin_unlock_bh(&np->stats_lock); 2846 2847 return 0; 2848 } 2849 2850 /** 2851 * idpf_send_get_set_rss_lut_msg - Send virtchnl get or set RSS lut message 2852 * @adapter: adapter pointer used to send virtchnl message 2853 * @rss_data: pointer to RSS key and lut info 2854 * @vport_id: vport identifier used while preparing the virtchnl message 2855 * @get: flag to set or get RSS look up table 2856 * 2857 * When rxhash is disabled, RSS LUT will be configured with zeros. If rxhash 2858 * is enabled, the LUT values stored in driver's soft copy will be used to setup 2859 * the HW. 2860 * 2861 * Return: 0 on success, negative on failure. 2862 */ 2863 int idpf_send_get_set_rss_lut_msg(struct idpf_adapter *adapter, 2864 struct idpf_rss_data *rss_data, 2865 u32 vport_id, bool get) 2866 { 2867 struct virtchnl2_rss_lut *recv_rl __free(kfree) = NULL; 2868 struct virtchnl2_rss_lut *rl __free(kfree) = NULL; 2869 struct idpf_vc_xn_params xn_params = {}; 2870 int buf_size, lut_buf_size; 2871 struct idpf_vport *vport; 2872 ssize_t reply_sz; 2873 bool rxhash_ena; 2874 int i; 2875 2876 vport = idpf_vid_to_vport(adapter, vport_id); 2877 if (!vport) 2878 return -EINVAL; 2879 2880 rxhash_ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH); 2881 2882 buf_size = struct_size(rl, lut, rss_data->rss_lut_size); 2883 rl = kzalloc(buf_size, GFP_KERNEL); 2884 if (!rl) 2885 return -ENOMEM; 2886 2887 rl->vport_id = cpu_to_le32(vport_id); 2888 2889 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 2890 xn_params.send_buf.iov_base = rl; 2891 xn_params.send_buf.iov_len = buf_size; 2892 2893 if (get) { 2894 recv_rl = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); 2895 if (!recv_rl) 2896 return -ENOMEM; 2897 xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_LUT; 2898 xn_params.recv_buf.iov_base = recv_rl; 2899 xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; 2900 } else { 2901 rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size); 2902 for (i = 0; i < rss_data->rss_lut_size; i++) 2903 rl->lut[i] = rxhash_ena ? 2904 cpu_to_le32(rss_data->rss_lut[i]) : 0; 2905 2906 xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_LUT; 2907 } 2908 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 2909 if (reply_sz < 0) 2910 return reply_sz; 2911 if (!get) 2912 return 0; 2913 if (reply_sz < sizeof(struct virtchnl2_rss_lut)) 2914 return -EIO; 2915 2916 lut_buf_size = le16_to_cpu(recv_rl->lut_entries) * sizeof(u32); 2917 if (reply_sz < lut_buf_size) 2918 return -EIO; 2919 2920 /* size didn't change, we can reuse existing lut buf */ 2921 if (rss_data->rss_lut_size == le16_to_cpu(recv_rl->lut_entries)) 2922 goto do_memcpy; 2923 2924 rss_data->rss_lut_size = le16_to_cpu(recv_rl->lut_entries); 2925 kfree(rss_data->rss_lut); 2926 2927 rss_data->rss_lut = kzalloc(lut_buf_size, GFP_KERNEL); 2928 if (!rss_data->rss_lut) { 2929 rss_data->rss_lut_size = 0; 2930 return -ENOMEM; 2931 } 2932 2933 do_memcpy: 2934 memcpy(rss_data->rss_lut, recv_rl->lut, rss_data->rss_lut_size); 2935 2936 return 0; 2937 } 2938 2939 /** 2940 * idpf_send_get_set_rss_key_msg - Send virtchnl get or set RSS key message 2941 * @adapter: adapter pointer used to send virtchnl message 2942 * @rss_data: pointer to RSS key and lut info 2943 * @vport_id: vport identifier used while preparing the virtchnl message 2944 * @get: flag to set or get RSS look up table 2945 * 2946 * Return: 0 on success, negative on failure 2947 */ 2948 int idpf_send_get_set_rss_key_msg(struct idpf_adapter *adapter, 2949 struct idpf_rss_data *rss_data, 2950 u32 vport_id, bool get) 2951 { 2952 struct virtchnl2_rss_key *recv_rk __free(kfree) = NULL; 2953 struct virtchnl2_rss_key *rk __free(kfree) = NULL; 2954 struct idpf_vc_xn_params xn_params = {}; 2955 ssize_t reply_sz; 2956 int i, buf_size; 2957 u16 key_size; 2958 2959 buf_size = struct_size(rk, key_flex, rss_data->rss_key_size); 2960 rk = kzalloc(buf_size, GFP_KERNEL); 2961 if (!rk) 2962 return -ENOMEM; 2963 2964 rk->vport_id = cpu_to_le32(vport_id); 2965 xn_params.send_buf.iov_base = rk; 2966 xn_params.send_buf.iov_len = buf_size; 2967 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 2968 if (get) { 2969 recv_rk = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); 2970 if (!recv_rk) 2971 return -ENOMEM; 2972 2973 xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_KEY; 2974 xn_params.recv_buf.iov_base = recv_rk; 2975 xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; 2976 } else { 2977 rk->key_len = cpu_to_le16(rss_data->rss_key_size); 2978 for (i = 0; i < rss_data->rss_key_size; i++) 2979 rk->key_flex[i] = rss_data->rss_key[i]; 2980 2981 xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_KEY; 2982 } 2983 2984 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 2985 if (reply_sz < 0) 2986 return reply_sz; 2987 if (!get) 2988 return 0; 2989 if (reply_sz < sizeof(struct virtchnl2_rss_key)) 2990 return -EIO; 2991 2992 key_size = min_t(u16, NETDEV_RSS_KEY_LEN, 2993 le16_to_cpu(recv_rk->key_len)); 2994 if (reply_sz < key_size) 2995 return -EIO; 2996 2997 /* key len didn't change, reuse existing buf */ 2998 if (rss_data->rss_key_size == key_size) 2999 goto do_memcpy; 3000 3001 rss_data->rss_key_size = key_size; 3002 kfree(rss_data->rss_key); 3003 rss_data->rss_key = kzalloc(key_size, GFP_KERNEL); 3004 if (!rss_data->rss_key) { 3005 rss_data->rss_key_size = 0; 3006 return -ENOMEM; 3007 } 3008 3009 do_memcpy: 3010 memcpy(rss_data->rss_key, recv_rk->key_flex, rss_data->rss_key_size); 3011 3012 return 0; 3013 } 3014 3015 /** 3016 * idpf_fill_ptype_lookup - Fill L3 specific fields in ptype lookup table 3017 * @ptype: ptype lookup table 3018 * @pstate: state machine for ptype lookup table 3019 * @ipv4: ipv4 or ipv6 3020 * @frag: fragmentation allowed 3021 * 3022 */ 3023 static void idpf_fill_ptype_lookup(struct libeth_rx_pt *ptype, 3024 struct idpf_ptype_state *pstate, 3025 bool ipv4, bool frag) 3026 { 3027 if (!pstate->outer_ip || !pstate->outer_frag) { 3028 pstate->outer_ip = true; 3029 3030 if (ipv4) 3031 ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV4; 3032 else 3033 ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV6; 3034 3035 if (frag) { 3036 ptype->outer_frag = LIBETH_RX_PT_FRAG; 3037 pstate->outer_frag = true; 3038 } 3039 } else { 3040 ptype->tunnel_type = LIBETH_RX_PT_TUNNEL_IP_IP; 3041 pstate->tunnel_state = IDPF_PTYPE_TUNNEL_IP; 3042 3043 if (ipv4) 3044 ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV4; 3045 else 3046 ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV6; 3047 3048 if (frag) 3049 ptype->tunnel_end_frag = LIBETH_RX_PT_FRAG; 3050 } 3051 } 3052 3053 static void idpf_finalize_ptype_lookup(struct libeth_rx_pt *ptype) 3054 { 3055 if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 && 3056 ptype->inner_prot) 3057 ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L4; 3058 else if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 && 3059 ptype->outer_ip) 3060 ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L3; 3061 else if (ptype->outer_ip == LIBETH_RX_PT_OUTER_L2) 3062 ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L2; 3063 else 3064 ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_NONE; 3065 3066 libeth_rx_pt_gen_hash_type(ptype); 3067 } 3068 3069 /** 3070 * idpf_parse_protocol_ids - parse protocol IDs for a given packet type 3071 * @ptype: packet type to parse 3072 * @rx_pt: store the parsed packet type info into 3073 */ 3074 static void idpf_parse_protocol_ids(struct virtchnl2_ptype *ptype, 3075 struct libeth_rx_pt *rx_pt) 3076 { 3077 struct idpf_ptype_state pstate = {}; 3078 3079 for (u32 j = 0; j < ptype->proto_id_count; j++) { 3080 u16 id = le16_to_cpu(ptype->proto_id[j]); 3081 3082 switch (id) { 3083 case VIRTCHNL2_PROTO_HDR_GRE: 3084 if (pstate.tunnel_state == IDPF_PTYPE_TUNNEL_IP) { 3085 rx_pt->tunnel_type = 3086 LIBETH_RX_PT_TUNNEL_IP_GRENAT; 3087 pstate.tunnel_state |= 3088 IDPF_PTYPE_TUNNEL_IP_GRENAT; 3089 } 3090 break; 3091 case VIRTCHNL2_PROTO_HDR_MAC: 3092 rx_pt->outer_ip = LIBETH_RX_PT_OUTER_L2; 3093 if (pstate.tunnel_state == IDPF_TUN_IP_GRE) { 3094 rx_pt->tunnel_type = 3095 LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC; 3096 pstate.tunnel_state |= 3097 IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC; 3098 } 3099 break; 3100 case VIRTCHNL2_PROTO_HDR_IPV4: 3101 idpf_fill_ptype_lookup(rx_pt, &pstate, true, false); 3102 break; 3103 case VIRTCHNL2_PROTO_HDR_IPV6: 3104 idpf_fill_ptype_lookup(rx_pt, &pstate, false, false); 3105 break; 3106 case VIRTCHNL2_PROTO_HDR_IPV4_FRAG: 3107 idpf_fill_ptype_lookup(rx_pt, &pstate, true, true); 3108 break; 3109 case VIRTCHNL2_PROTO_HDR_IPV6_FRAG: 3110 idpf_fill_ptype_lookup(rx_pt, &pstate, false, true); 3111 break; 3112 case VIRTCHNL2_PROTO_HDR_UDP: 3113 rx_pt->inner_prot = LIBETH_RX_PT_INNER_UDP; 3114 break; 3115 case VIRTCHNL2_PROTO_HDR_TCP: 3116 rx_pt->inner_prot = LIBETH_RX_PT_INNER_TCP; 3117 break; 3118 case VIRTCHNL2_PROTO_HDR_SCTP: 3119 rx_pt->inner_prot = LIBETH_RX_PT_INNER_SCTP; 3120 break; 3121 case VIRTCHNL2_PROTO_HDR_ICMP: 3122 rx_pt->inner_prot = LIBETH_RX_PT_INNER_ICMP; 3123 break; 3124 case VIRTCHNL2_PROTO_HDR_PAY: 3125 rx_pt->payload_layer = LIBETH_RX_PT_PAYLOAD_L2; 3126 break; 3127 case VIRTCHNL2_PROTO_HDR_ICMPV6: 3128 case VIRTCHNL2_PROTO_HDR_IPV6_EH: 3129 case VIRTCHNL2_PROTO_HDR_PRE_MAC: 3130 case VIRTCHNL2_PROTO_HDR_POST_MAC: 3131 case VIRTCHNL2_PROTO_HDR_ETHERTYPE: 3132 case VIRTCHNL2_PROTO_HDR_SVLAN: 3133 case VIRTCHNL2_PROTO_HDR_CVLAN: 3134 case VIRTCHNL2_PROTO_HDR_MPLS: 3135 case VIRTCHNL2_PROTO_HDR_MMPLS: 3136 case VIRTCHNL2_PROTO_HDR_PTP: 3137 case VIRTCHNL2_PROTO_HDR_CTRL: 3138 case VIRTCHNL2_PROTO_HDR_LLDP: 3139 case VIRTCHNL2_PROTO_HDR_ARP: 3140 case VIRTCHNL2_PROTO_HDR_ECP: 3141 case VIRTCHNL2_PROTO_HDR_EAPOL: 3142 case VIRTCHNL2_PROTO_HDR_PPPOD: 3143 case VIRTCHNL2_PROTO_HDR_PPPOE: 3144 case VIRTCHNL2_PROTO_HDR_IGMP: 3145 case VIRTCHNL2_PROTO_HDR_AH: 3146 case VIRTCHNL2_PROTO_HDR_ESP: 3147 case VIRTCHNL2_PROTO_HDR_IKE: 3148 case VIRTCHNL2_PROTO_HDR_NATT_KEEP: 3149 case VIRTCHNL2_PROTO_HDR_L2TPV2: 3150 case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL: 3151 case VIRTCHNL2_PROTO_HDR_L2TPV3: 3152 case VIRTCHNL2_PROTO_HDR_GTP: 3153 case VIRTCHNL2_PROTO_HDR_GTP_EH: 3154 case VIRTCHNL2_PROTO_HDR_GTPCV2: 3155 case VIRTCHNL2_PROTO_HDR_GTPC_TEID: 3156 case VIRTCHNL2_PROTO_HDR_GTPU: 3157 case VIRTCHNL2_PROTO_HDR_GTPU_UL: 3158 case VIRTCHNL2_PROTO_HDR_GTPU_DL: 3159 case VIRTCHNL2_PROTO_HDR_ECPRI: 3160 case VIRTCHNL2_PROTO_HDR_VRRP: 3161 case VIRTCHNL2_PROTO_HDR_OSPF: 3162 case VIRTCHNL2_PROTO_HDR_TUN: 3163 case VIRTCHNL2_PROTO_HDR_NVGRE: 3164 case VIRTCHNL2_PROTO_HDR_VXLAN: 3165 case VIRTCHNL2_PROTO_HDR_VXLAN_GPE: 3166 case VIRTCHNL2_PROTO_HDR_GENEVE: 3167 case VIRTCHNL2_PROTO_HDR_NSH: 3168 case VIRTCHNL2_PROTO_HDR_QUIC: 3169 case VIRTCHNL2_PROTO_HDR_PFCP: 3170 case VIRTCHNL2_PROTO_HDR_PFCP_NODE: 3171 case VIRTCHNL2_PROTO_HDR_PFCP_SESSION: 3172 case VIRTCHNL2_PROTO_HDR_RTP: 3173 case VIRTCHNL2_PROTO_HDR_NO_PROTO: 3174 break; 3175 default: 3176 break; 3177 } 3178 } 3179 } 3180 3181 /** 3182 * idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info 3183 * @adapter: driver specific private structure 3184 * 3185 * Return: 0 on success, negative on failure. 3186 */ 3187 static int idpf_send_get_rx_ptype_msg(struct idpf_adapter *adapter) 3188 { 3189 struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL; 3190 struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL; 3191 struct libeth_rx_pt *singleq_pt_lkup __free(kfree) = NULL; 3192 struct libeth_rx_pt *splitq_pt_lkup __free(kfree) = NULL; 3193 struct idpf_vc_xn_params xn_params = {}; 3194 int ptypes_recvd = 0, ptype_offset; 3195 u32 max_ptype = IDPF_RX_MAX_PTYPE; 3196 u16 next_ptype_id = 0; 3197 ssize_t reply_sz; 3198 3199 singleq_pt_lkup = kzalloc_objs(*singleq_pt_lkup, IDPF_RX_MAX_BASE_PTYPE); 3200 if (!singleq_pt_lkup) 3201 return -ENOMEM; 3202 3203 splitq_pt_lkup = kzalloc_objs(*splitq_pt_lkup, max_ptype); 3204 if (!splitq_pt_lkup) 3205 return -ENOMEM; 3206 3207 get_ptype_info = kzalloc_obj(*get_ptype_info); 3208 if (!get_ptype_info) 3209 return -ENOMEM; 3210 3211 ptype_info = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); 3212 if (!ptype_info) 3213 return -ENOMEM; 3214 3215 xn_params.vc_op = VIRTCHNL2_OP_GET_PTYPE_INFO; 3216 xn_params.send_buf.iov_base = get_ptype_info; 3217 xn_params.send_buf.iov_len = sizeof(*get_ptype_info); 3218 xn_params.recv_buf.iov_base = ptype_info; 3219 xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; 3220 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 3221 3222 while (next_ptype_id < max_ptype) { 3223 get_ptype_info->start_ptype_id = cpu_to_le16(next_ptype_id); 3224 3225 if ((next_ptype_id + IDPF_RX_MAX_PTYPES_PER_BUF) > max_ptype) 3226 get_ptype_info->num_ptypes = 3227 cpu_to_le16(max_ptype - next_ptype_id); 3228 else 3229 get_ptype_info->num_ptypes = 3230 cpu_to_le16(IDPF_RX_MAX_PTYPES_PER_BUF); 3231 3232 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 3233 if (reply_sz < 0) 3234 return reply_sz; 3235 3236 ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes); 3237 if (ptypes_recvd > max_ptype) 3238 return -EINVAL; 3239 3240 next_ptype_id = le16_to_cpu(get_ptype_info->start_ptype_id) + 3241 le16_to_cpu(get_ptype_info->num_ptypes); 3242 3243 ptype_offset = IDPF_RX_PTYPE_HDR_SZ; 3244 3245 for (u16 i = 0; i < le16_to_cpu(ptype_info->num_ptypes); i++) { 3246 struct libeth_rx_pt rx_pt = {}; 3247 struct virtchnl2_ptype *ptype; 3248 u16 pt_10, pt_8; 3249 3250 ptype = (struct virtchnl2_ptype *) 3251 ((u8 *)ptype_info + ptype_offset); 3252 3253 pt_10 = le16_to_cpu(ptype->ptype_id_10); 3254 pt_8 = ptype->ptype_id_8; 3255 3256 ptype_offset += IDPF_GET_PTYPE_SIZE(ptype); 3257 if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN) 3258 return -EINVAL; 3259 3260 /* 0xFFFF indicates end of ptypes */ 3261 if (pt_10 == IDPF_INVALID_PTYPE_ID) 3262 goto out; 3263 if (pt_10 >= max_ptype) 3264 return -EINVAL; 3265 3266 idpf_parse_protocol_ids(ptype, &rx_pt); 3267 idpf_finalize_ptype_lookup(&rx_pt); 3268 3269 /* For a given protocol ID stack, the ptype value might 3270 * vary between ptype_id_10 and ptype_id_8. So store 3271 * them separately for splitq and singleq. Also skip 3272 * the repeated ptypes in case of singleq. 3273 */ 3274 splitq_pt_lkup[pt_10] = rx_pt; 3275 if (!singleq_pt_lkup[pt_8].outer_ip) 3276 singleq_pt_lkup[pt_8] = rx_pt; 3277 } 3278 } 3279 3280 out: 3281 adapter->splitq_pt_lkup = no_free_ptr(splitq_pt_lkup); 3282 adapter->singleq_pt_lkup = no_free_ptr(singleq_pt_lkup); 3283 3284 return 0; 3285 } 3286 3287 /** 3288 * idpf_rel_rx_pt_lkup - release RX ptype lookup table 3289 * @adapter: adapter pointer to get the lookup table 3290 */ 3291 static void idpf_rel_rx_pt_lkup(struct idpf_adapter *adapter) 3292 { 3293 kfree(adapter->splitq_pt_lkup); 3294 adapter->splitq_pt_lkup = NULL; 3295 3296 kfree(adapter->singleq_pt_lkup); 3297 adapter->singleq_pt_lkup = NULL; 3298 } 3299 3300 /** 3301 * idpf_send_ena_dis_loopback_msg - Send virtchnl enable/disable loopback 3302 * message 3303 * @adapter: adapter pointer used to send virtchnl message 3304 * @vport_id: vport identifier used while preparing the virtchnl message 3305 * @loopback_ena: flag to enable or disable loopback 3306 * 3307 * Return: 0 on success, negative on failure. 3308 */ 3309 int idpf_send_ena_dis_loopback_msg(struct idpf_adapter *adapter, u32 vport_id, 3310 bool loopback_ena) 3311 { 3312 struct idpf_vc_xn_params xn_params = {}; 3313 struct virtchnl2_loopback loopback; 3314 ssize_t reply_sz; 3315 3316 loopback.vport_id = cpu_to_le32(vport_id); 3317 loopback.enable = loopback_ena; 3318 3319 xn_params.vc_op = VIRTCHNL2_OP_LOOPBACK; 3320 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 3321 xn_params.send_buf.iov_base = &loopback; 3322 xn_params.send_buf.iov_len = sizeof(loopback); 3323 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 3324 3325 return reply_sz < 0 ? reply_sz : 0; 3326 } 3327 3328 /** 3329 * idpf_find_ctlq - Given a type and id, find ctlq info 3330 * @hw: hardware struct 3331 * @type: type of ctrlq to find 3332 * @id: ctlq id to find 3333 * 3334 * Returns pointer to found ctlq info struct, NULL otherwise. 3335 */ 3336 static struct idpf_ctlq_info *idpf_find_ctlq(struct idpf_hw *hw, 3337 enum idpf_ctlq_type type, int id) 3338 { 3339 struct idpf_ctlq_info *cq, *tmp; 3340 3341 list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list) 3342 if (cq->q_id == id && cq->cq_type == type) 3343 return cq; 3344 3345 return NULL; 3346 } 3347 3348 /** 3349 * idpf_init_dflt_mbx - Setup default mailbox parameters and make request 3350 * @adapter: adapter info struct 3351 * 3352 * Returns 0 on success, negative otherwise 3353 */ 3354 int idpf_init_dflt_mbx(struct idpf_adapter *adapter) 3355 { 3356 struct idpf_ctlq_create_info ctlq_info[] = { 3357 { 3358 .type = IDPF_CTLQ_TYPE_MAILBOX_TX, 3359 .id = IDPF_DFLT_MBX_ID, 3360 .len = IDPF_DFLT_MBX_Q_LEN, 3361 .buf_size = IDPF_CTLQ_MAX_BUF_LEN 3362 }, 3363 { 3364 .type = IDPF_CTLQ_TYPE_MAILBOX_RX, 3365 .id = IDPF_DFLT_MBX_ID, 3366 .len = IDPF_DFLT_MBX_Q_LEN, 3367 .buf_size = IDPF_CTLQ_MAX_BUF_LEN 3368 } 3369 }; 3370 struct idpf_hw *hw = &adapter->hw; 3371 int err; 3372 3373 adapter->dev_ops.reg_ops.ctlq_reg_init(adapter, ctlq_info); 3374 3375 err = idpf_ctlq_init(hw, IDPF_NUM_DFLT_MBX_Q, ctlq_info); 3376 if (err) 3377 return err; 3378 3379 hw->asq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_TX, 3380 IDPF_DFLT_MBX_ID); 3381 hw->arq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_RX, 3382 IDPF_DFLT_MBX_ID); 3383 3384 if (!hw->asq || !hw->arq) { 3385 idpf_ctlq_deinit(hw); 3386 3387 return -ENOENT; 3388 } 3389 3390 adapter->state = __IDPF_VER_CHECK; 3391 3392 return 0; 3393 } 3394 3395 /** 3396 * idpf_deinit_dflt_mbx - Free up ctlqs setup 3397 * @adapter: Driver specific private data structure 3398 */ 3399 void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter) 3400 { 3401 if (adapter->hw.arq && adapter->hw.asq) { 3402 idpf_mb_clean(adapter, adapter->hw.asq); 3403 idpf_ctlq_deinit(&adapter->hw); 3404 } 3405 adapter->hw.arq = NULL; 3406 adapter->hw.asq = NULL; 3407 } 3408 3409 /** 3410 * idpf_vport_params_buf_rel - Release memory for MailBox resources 3411 * @adapter: Driver specific private data structure 3412 * 3413 * Will release memory to hold the vport parameters received on MailBox 3414 */ 3415 static void idpf_vport_params_buf_rel(struct idpf_adapter *adapter) 3416 { 3417 kfree(adapter->vport_params_recvd); 3418 adapter->vport_params_recvd = NULL; 3419 kfree(adapter->vport_params_reqd); 3420 adapter->vport_params_reqd = NULL; 3421 kfree(adapter->vport_ids); 3422 adapter->vport_ids = NULL; 3423 } 3424 3425 /** 3426 * idpf_vport_params_buf_alloc - Allocate memory for MailBox resources 3427 * @adapter: Driver specific private data structure 3428 * 3429 * Will alloc memory to hold the vport parameters received on MailBox 3430 */ 3431 static int idpf_vport_params_buf_alloc(struct idpf_adapter *adapter) 3432 { 3433 u16 num_max_vports = idpf_get_max_vports(adapter); 3434 3435 adapter->vport_params_reqd = kzalloc_objs(*adapter->vport_params_reqd, 3436 num_max_vports); 3437 if (!adapter->vport_params_reqd) 3438 return -ENOMEM; 3439 3440 adapter->vport_params_recvd = kzalloc_objs(*adapter->vport_params_recvd, 3441 num_max_vports); 3442 if (!adapter->vport_params_recvd) 3443 goto err_mem; 3444 3445 adapter->vport_ids = kcalloc(num_max_vports, sizeof(u32), GFP_KERNEL); 3446 if (!adapter->vport_ids) 3447 goto err_mem; 3448 3449 if (adapter->vport_config) 3450 return 0; 3451 3452 adapter->vport_config = kzalloc_objs(*adapter->vport_config, 3453 num_max_vports); 3454 if (!adapter->vport_config) 3455 goto err_mem; 3456 3457 return 0; 3458 3459 err_mem: 3460 idpf_vport_params_buf_rel(adapter); 3461 3462 return -ENOMEM; 3463 } 3464 3465 /** 3466 * idpf_vc_core_init - Initialize state machine and get driver specific 3467 * resources 3468 * @adapter: Driver specific private structure 3469 * 3470 * This function will initialize the state machine and request all necessary 3471 * resources required by the device driver. Once the state machine is 3472 * initialized, allocate memory to store vport specific information and also 3473 * requests required interrupts. 3474 * 3475 * Returns 0 on success, -EAGAIN function will get called again, 3476 * otherwise negative on failure. 3477 */ 3478 int idpf_vc_core_init(struct idpf_adapter *adapter) 3479 { 3480 int task_delay = 30; 3481 u16 num_max_vports; 3482 int err = 0; 3483 3484 if (!adapter->vcxn_mngr) { 3485 adapter->vcxn_mngr = kzalloc_obj(*adapter->vcxn_mngr); 3486 if (!adapter->vcxn_mngr) { 3487 err = -ENOMEM; 3488 goto init_failed; 3489 } 3490 } 3491 idpf_vc_xn_init(adapter->vcxn_mngr); 3492 3493 while (adapter->state != __IDPF_INIT_SW) { 3494 switch (adapter->state) { 3495 case __IDPF_VER_CHECK: 3496 err = idpf_send_ver_msg(adapter); 3497 switch (err) { 3498 case 0: 3499 /* success, move state machine forward */ 3500 adapter->state = __IDPF_GET_CAPS; 3501 fallthrough; 3502 case -EAGAIN: 3503 goto restart; 3504 default: 3505 /* Something bad happened, try again but only a 3506 * few times. 3507 */ 3508 goto init_failed; 3509 } 3510 case __IDPF_GET_CAPS: 3511 err = idpf_send_get_caps_msg(adapter); 3512 if (err) 3513 goto init_failed; 3514 adapter->state = __IDPF_INIT_SW; 3515 break; 3516 default: 3517 dev_err(&adapter->pdev->dev, "Device is in bad state: %d\n", 3518 adapter->state); 3519 err = -EINVAL; 3520 goto init_failed; 3521 } 3522 break; 3523 restart: 3524 /* Give enough time before proceeding further with 3525 * state machine 3526 */ 3527 msleep(task_delay); 3528 } 3529 3530 if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LAN_MEMORY_REGIONS)) { 3531 err = idpf_send_get_lan_memory_regions(adapter); 3532 if (err) { 3533 dev_err(&adapter->pdev->dev, "Failed to get LAN memory regions: %d\n", 3534 err); 3535 return -EINVAL; 3536 } 3537 } else { 3538 /* Fallback to mapping the remaining regions of the entire BAR */ 3539 err = idpf_calc_remaining_mmio_regs(adapter); 3540 if (err) { 3541 dev_err(&adapter->pdev->dev, "Failed to allocate BAR0 region(s): %d\n", 3542 err); 3543 return -ENOMEM; 3544 } 3545 } 3546 3547 err = idpf_map_lan_mmio_regs(adapter); 3548 if (err) { 3549 dev_err(&adapter->pdev->dev, "Failed to map BAR0 region(s): %d\n", 3550 err); 3551 return -ENOMEM; 3552 } 3553 3554 pci_sriov_set_totalvfs(adapter->pdev, idpf_get_max_vfs(adapter)); 3555 num_max_vports = idpf_get_max_vports(adapter); 3556 adapter->max_vports = num_max_vports; 3557 adapter->vports = kzalloc_objs(*adapter->vports, num_max_vports); 3558 if (!adapter->vports) 3559 return -ENOMEM; 3560 3561 if (!adapter->netdevs) { 3562 adapter->netdevs = kzalloc_objs(struct net_device *, 3563 num_max_vports); 3564 if (!adapter->netdevs) { 3565 err = -ENOMEM; 3566 goto err_netdev_alloc; 3567 } 3568 } 3569 3570 err = idpf_vport_params_buf_alloc(adapter); 3571 if (err) { 3572 dev_err(&adapter->pdev->dev, "Failed to alloc vport params buffer: %d\n", 3573 err); 3574 goto err_netdev_alloc; 3575 } 3576 3577 /* Start the mailbox task before requesting vectors. This will ensure 3578 * vector information response from mailbox is handled 3579 */ 3580 queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0); 3581 3582 queue_delayed_work(adapter->serv_wq, &adapter->serv_task, 3583 msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07))); 3584 3585 err = idpf_intr_req(adapter); 3586 if (err) { 3587 dev_err(&adapter->pdev->dev, "failed to enable interrupt vectors: %d\n", 3588 err); 3589 goto err_intr_req; 3590 } 3591 3592 err = idpf_send_get_rx_ptype_msg(adapter); 3593 if (err) { 3594 dev_err(&adapter->pdev->dev, "failed to get RX ptypes: %d\n", 3595 err); 3596 goto intr_rel; 3597 } 3598 3599 err = idpf_ptp_init(adapter); 3600 if (err) 3601 pci_err(adapter->pdev, "PTP init failed, err=%pe\n", 3602 ERR_PTR(err)); 3603 3604 idpf_init_avail_queues(adapter); 3605 3606 /* Skew the delay for init tasks for each function based on fn number 3607 * to prevent every function from making the same call simultaneously. 3608 */ 3609 queue_delayed_work(adapter->init_wq, &adapter->init_task, 3610 msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07))); 3611 3612 set_bit(IDPF_VC_CORE_INIT, adapter->flags); 3613 3614 return 0; 3615 3616 intr_rel: 3617 idpf_intr_rel(adapter); 3618 err_intr_req: 3619 cancel_delayed_work_sync(&adapter->serv_task); 3620 cancel_delayed_work_sync(&adapter->mbx_task); 3621 idpf_vport_params_buf_rel(adapter); 3622 err_netdev_alloc: 3623 kfree(adapter->vports); 3624 adapter->vports = NULL; 3625 return err; 3626 3627 init_failed: 3628 /* Don't retry if we're trying to go down, just bail. */ 3629 if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) 3630 return err; 3631 3632 if (++adapter->mb_wait_count > IDPF_MB_MAX_ERR) { 3633 dev_err(&adapter->pdev->dev, "Failed to establish mailbox communications with hardware\n"); 3634 3635 return -EFAULT; 3636 } 3637 /* If it reached here, it is possible that mailbox queue initialization 3638 * register writes might not have taken effect. Retry to initialize 3639 * the mailbox again 3640 */ 3641 adapter->state = __IDPF_VER_CHECK; 3642 if (adapter->vcxn_mngr) 3643 idpf_vc_xn_shutdown(adapter->vcxn_mngr); 3644 set_bit(IDPF_HR_DRV_LOAD, adapter->flags); 3645 queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task, 3646 msecs_to_jiffies(task_delay)); 3647 3648 return -EAGAIN; 3649 } 3650 3651 /** 3652 * idpf_vc_core_deinit - Device deinit routine 3653 * @adapter: Driver specific private structure 3654 * 3655 */ 3656 void idpf_vc_core_deinit(struct idpf_adapter *adapter) 3657 { 3658 struct idpf_hw *hw = &adapter->hw; 3659 bool remove_in_prog; 3660 3661 if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags)) 3662 return; 3663 3664 /* Avoid transaction timeouts when called during reset */ 3665 remove_in_prog = test_bit(IDPF_REMOVE_IN_PROG, adapter->flags); 3666 if (!remove_in_prog) 3667 idpf_vc_xn_shutdown(adapter->vcxn_mngr); 3668 3669 idpf_ptp_release(adapter); 3670 idpf_deinit_task(adapter); 3671 idpf_idc_deinit_core_aux_device(adapter->cdev_info); 3672 idpf_rel_rx_pt_lkup(adapter); 3673 idpf_intr_rel(adapter); 3674 3675 if (remove_in_prog) 3676 idpf_vc_xn_shutdown(adapter->vcxn_mngr); 3677 3678 cancel_delayed_work_sync(&adapter->serv_task); 3679 cancel_delayed_work_sync(&adapter->mbx_task); 3680 3681 idpf_vport_params_buf_rel(adapter); 3682 3683 kfree(hw->lan_regs); 3684 hw->lan_regs = NULL; 3685 3686 kfree(adapter->vports); 3687 adapter->vports = NULL; 3688 3689 clear_bit(IDPF_VC_CORE_INIT, adapter->flags); 3690 } 3691 3692 /** 3693 * idpf_vport_alloc_vec_indexes - Get relative vector indexes 3694 * @vport: virtual port data struct 3695 * @rsrc: pointer to queue and vector resources 3696 * 3697 * This function requests the vector information required for the vport and 3698 * stores the vector indexes received from the 'global vector distribution' 3699 * in the vport's queue vectors array. 3700 * 3701 * Return: 0 on success, error on failure 3702 */ 3703 int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport, 3704 struct idpf_q_vec_rsrc *rsrc) 3705 { 3706 struct idpf_vector_info vec_info; 3707 int num_alloc_vecs; 3708 u32 req; 3709 3710 vec_info.num_curr_vecs = rsrc->num_q_vectors; 3711 if (vec_info.num_curr_vecs) 3712 vec_info.num_curr_vecs += IDPF_RESERVED_VECS; 3713 3714 /* XDPSQs are all bound to the NOIRQ vector from IDPF_RESERVED_VECS */ 3715 req = max(rsrc->num_txq - vport->num_xdp_txq, rsrc->num_rxq) + 3716 IDPF_RESERVED_VECS; 3717 vec_info.num_req_vecs = req; 3718 3719 vec_info.default_vport = vport->default_vport; 3720 vec_info.index = vport->idx; 3721 3722 num_alloc_vecs = idpf_req_rel_vector_indexes(vport->adapter, 3723 rsrc->q_vector_idxs, 3724 &vec_info); 3725 if (num_alloc_vecs <= 0) { 3726 dev_err(&vport->adapter->pdev->dev, "Vector distribution failed: %d\n", 3727 num_alloc_vecs); 3728 return -EINVAL; 3729 } 3730 3731 rsrc->num_q_vectors = num_alloc_vecs - IDPF_RESERVED_VECS; 3732 3733 return 0; 3734 } 3735 3736 /** 3737 * idpf_vport_init - Initialize virtual port 3738 * @vport: virtual port to be initialized 3739 * @max_q: vport max queue info 3740 * 3741 * Will initialize vport with the info received through MB earlier 3742 * 3743 * Return: 0 on success, negative on failure. 3744 */ 3745 int idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q) 3746 { 3747 struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc; 3748 struct idpf_adapter *adapter = vport->adapter; 3749 struct virtchnl2_create_vport *vport_msg; 3750 struct idpf_vport_config *vport_config; 3751 u16 tx_itr[] = {2, 8, 64, 128, 256}; 3752 u16 rx_itr[] = {2, 8, 32, 96, 128}; 3753 struct idpf_rss_data *rss_data; 3754 u16 idx = vport->idx; 3755 int err; 3756 3757 vport_config = adapter->vport_config[idx]; 3758 rss_data = &vport_config->user_config.rss_data; 3759 vport_msg = adapter->vport_params_recvd[idx]; 3760 3761 err = idpf_vport_init_queue_reg_chunks(vport_config, 3762 &vport_msg->chunks); 3763 if (err) 3764 return err; 3765 3766 vport_config->max_q.max_txq = max_q->max_txq; 3767 vport_config->max_q.max_rxq = max_q->max_rxq; 3768 vport_config->max_q.max_complq = max_q->max_complq; 3769 vport_config->max_q.max_bufq = max_q->max_bufq; 3770 3771 rsrc->txq_model = le16_to_cpu(vport_msg->txq_model); 3772 rsrc->rxq_model = le16_to_cpu(vport_msg->rxq_model); 3773 vport->vport_type = le16_to_cpu(vport_msg->vport_type); 3774 vport->vport_id = le32_to_cpu(vport_msg->vport_id); 3775 3776 rss_data->rss_key_size = min_t(u16, NETDEV_RSS_KEY_LEN, 3777 le16_to_cpu(vport_msg->rss_key_size)); 3778 rss_data->rss_lut_size = le16_to_cpu(vport_msg->rss_lut_size); 3779 3780 ether_addr_copy(vport->default_mac_addr, vport_msg->default_mac_addr); 3781 vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - LIBETH_RX_LL_LEN; 3782 3783 /* Initialize Tx and Rx profiles for Dynamic Interrupt Moderation */ 3784 memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS); 3785 memcpy(vport->tx_itr_profile, tx_itr, IDPF_DIM_PROFILE_SLOTS); 3786 3787 idpf_vport_set_hsplit(vport, ETHTOOL_TCP_DATA_SPLIT_ENABLED); 3788 3789 idpf_vport_init_num_qs(vport, vport_msg, rsrc); 3790 idpf_vport_calc_num_q_desc(vport, rsrc); 3791 idpf_vport_calc_num_q_groups(rsrc); 3792 idpf_vport_alloc_vec_indexes(vport, rsrc); 3793 3794 vport->crc_enable = adapter->crc_enable; 3795 3796 if (!(vport_msg->vport_flags & 3797 cpu_to_le16(VIRTCHNL2_VPORT_UPLINK_PORT))) 3798 return 0; 3799 3800 err = idpf_ptp_get_vport_tstamps_caps(vport); 3801 if (err) { 3802 /* Do not error on timestamp failure */ 3803 pci_dbg(vport->adapter->pdev, "Tx timestamping not supported\n"); 3804 return 0; 3805 } 3806 3807 INIT_WORK(&vport->tstamp_task, idpf_tstamp_task); 3808 3809 return 0; 3810 } 3811 3812 /** 3813 * idpf_get_vec_ids - Initialize vector id from Mailbox parameters 3814 * @adapter: adapter structure to get the mailbox vector id 3815 * @vecids: Array of vector ids 3816 * @num_vecids: number of vector ids 3817 * @chunks: vector ids received over mailbox 3818 * 3819 * Will initialize the mailbox vector id which is received from the 3820 * get capabilities and data queue vector ids with ids received as 3821 * mailbox parameters. 3822 * Returns number of ids filled 3823 */ 3824 int idpf_get_vec_ids(struct idpf_adapter *adapter, 3825 u16 *vecids, int num_vecids, 3826 struct virtchnl2_vector_chunks *chunks) 3827 { 3828 u16 num_chunks = le16_to_cpu(chunks->num_vchunks); 3829 int num_vecid_filled = 0; 3830 int i, j; 3831 3832 vecids[num_vecid_filled] = adapter->mb_vector.v_idx; 3833 num_vecid_filled++; 3834 3835 for (j = 0; j < num_chunks; j++) { 3836 struct virtchnl2_vector_chunk *chunk; 3837 u16 start_vecid, num_vec; 3838 3839 chunk = &chunks->vchunks[j]; 3840 num_vec = le16_to_cpu(chunk->num_vectors); 3841 start_vecid = le16_to_cpu(chunk->start_vector_id); 3842 3843 for (i = 0; i < num_vec; i++) { 3844 if ((num_vecid_filled + i) < num_vecids) { 3845 vecids[num_vecid_filled + i] = start_vecid; 3846 start_vecid++; 3847 } else { 3848 break; 3849 } 3850 } 3851 num_vecid_filled = num_vecid_filled + i; 3852 } 3853 3854 return num_vecid_filled; 3855 } 3856 3857 /** 3858 * idpf_vport_get_queue_ids - Initialize queue id from Mailbox parameters 3859 * @qids: Array of queue ids 3860 * @num_qids: number of queue ids 3861 * @q_type: queue model 3862 * @chunks: queue ids received over mailbox 3863 * 3864 * Will initialize all queue ids with ids received as mailbox parameters 3865 * Returns number of ids filled 3866 */ 3867 static int idpf_vport_get_queue_ids(u32 *qids, int num_qids, u16 q_type, 3868 struct idpf_queue_id_reg_info *chunks) 3869 { 3870 u16 num_chunks = chunks->num_chunks; 3871 u32 num_q_id_filled = 0, i; 3872 u32 start_q_id, num_q; 3873 3874 while (num_chunks--) { 3875 struct idpf_queue_id_reg_chunk *chunk; 3876 3877 chunk = &chunks->queue_chunks[num_chunks]; 3878 if (chunk->type != q_type) 3879 continue; 3880 3881 num_q = chunk->num_queues; 3882 start_q_id = chunk->start_queue_id; 3883 3884 for (i = 0; i < num_q; i++) { 3885 if ((num_q_id_filled + i) < num_qids) { 3886 qids[num_q_id_filled + i] = start_q_id; 3887 start_q_id++; 3888 } else { 3889 break; 3890 } 3891 } 3892 num_q_id_filled = num_q_id_filled + i; 3893 } 3894 3895 return num_q_id_filled; 3896 } 3897 3898 /** 3899 * __idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters 3900 * @vport: virtual port for which the queues ids are initialized 3901 * @rsrc: pointer to queue and vector resources 3902 * @qids: queue ids 3903 * @num_qids: number of queue ids 3904 * @q_type: type of queue 3905 * 3906 * Will initialize all queue ids with ids received as mailbox 3907 * parameters. Returns number of queue ids initialized. 3908 */ 3909 static int __idpf_vport_queue_ids_init(struct idpf_vport *vport, 3910 struct idpf_q_vec_rsrc *rsrc, 3911 const u32 *qids, 3912 int num_qids, 3913 u32 q_type) 3914 { 3915 int i, j, k = 0; 3916 3917 switch (q_type) { 3918 case VIRTCHNL2_QUEUE_TYPE_TX: 3919 for (i = 0; i < rsrc->num_txq_grp; i++) { 3920 struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i]; 3921 3922 for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++) 3923 tx_qgrp->txqs[j]->q_id = qids[k]; 3924 } 3925 break; 3926 case VIRTCHNL2_QUEUE_TYPE_RX: 3927 for (i = 0; i < rsrc->num_rxq_grp; i++) { 3928 struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i]; 3929 u16 num_rxq; 3930 3931 if (idpf_is_queue_model_split(rsrc->rxq_model)) 3932 num_rxq = rx_qgrp->splitq.num_rxq_sets; 3933 else 3934 num_rxq = rx_qgrp->singleq.num_rxq; 3935 3936 for (j = 0; j < num_rxq && k < num_qids; j++, k++) { 3937 struct idpf_rx_queue *q; 3938 3939 if (idpf_is_queue_model_split(rsrc->rxq_model)) 3940 q = &rx_qgrp->splitq.rxq_sets[j]->rxq; 3941 else 3942 q = rx_qgrp->singleq.rxqs[j]; 3943 q->q_id = qids[k]; 3944 } 3945 } 3946 break; 3947 case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION: 3948 for (i = 0; i < rsrc->num_txq_grp && k < num_qids; i++, k++) { 3949 struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i]; 3950 3951 tx_qgrp->complq->q_id = qids[k]; 3952 } 3953 break; 3954 case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER: 3955 for (i = 0; i < rsrc->num_rxq_grp; i++) { 3956 struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i]; 3957 u8 num_bufqs = rsrc->num_bufqs_per_qgrp; 3958 3959 for (j = 0; j < num_bufqs && k < num_qids; j++, k++) { 3960 struct idpf_buf_queue *q; 3961 3962 q = &rx_qgrp->splitq.bufq_sets[j].bufq; 3963 q->q_id = qids[k]; 3964 } 3965 } 3966 break; 3967 default: 3968 break; 3969 } 3970 3971 return k; 3972 } 3973 3974 /** 3975 * idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters 3976 * @vport: virtual port for which the queues ids are initialized 3977 * @rsrc: pointer to queue and vector resources 3978 * @chunks: queue ids received over mailbox 3979 * 3980 * Will initialize all queue ids with ids received as mailbox parameters. 3981 * 3982 * Return: 0 on success, negative if all the queues are not initialized. 3983 */ 3984 int idpf_vport_queue_ids_init(struct idpf_vport *vport, 3985 struct idpf_q_vec_rsrc *rsrc, 3986 struct idpf_queue_id_reg_info *chunks) 3987 { 3988 int num_ids, err = 0; 3989 u16 q_type; 3990 u32 *qids; 3991 3992 qids = kcalloc(IDPF_MAX_QIDS, sizeof(u32), GFP_KERNEL); 3993 if (!qids) 3994 return -ENOMEM; 3995 3996 num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, 3997 VIRTCHNL2_QUEUE_TYPE_TX, 3998 chunks); 3999 if (num_ids < rsrc->num_txq) { 4000 err = -EINVAL; 4001 goto mem_rel; 4002 } 4003 num_ids = __idpf_vport_queue_ids_init(vport, rsrc, qids, num_ids, 4004 VIRTCHNL2_QUEUE_TYPE_TX); 4005 if (num_ids < rsrc->num_txq) { 4006 err = -EINVAL; 4007 goto mem_rel; 4008 } 4009 4010 num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, 4011 VIRTCHNL2_QUEUE_TYPE_RX, 4012 chunks); 4013 if (num_ids < rsrc->num_rxq) { 4014 err = -EINVAL; 4015 goto mem_rel; 4016 } 4017 num_ids = __idpf_vport_queue_ids_init(vport, rsrc, qids, num_ids, 4018 VIRTCHNL2_QUEUE_TYPE_RX); 4019 if (num_ids < rsrc->num_rxq) { 4020 err = -EINVAL; 4021 goto mem_rel; 4022 } 4023 4024 if (!idpf_is_queue_model_split(rsrc->txq_model)) 4025 goto check_rxq; 4026 4027 q_type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; 4028 num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks); 4029 if (num_ids < rsrc->num_complq) { 4030 err = -EINVAL; 4031 goto mem_rel; 4032 } 4033 num_ids = __idpf_vport_queue_ids_init(vport, rsrc, qids, 4034 num_ids, q_type); 4035 if (num_ids < rsrc->num_complq) { 4036 err = -EINVAL; 4037 goto mem_rel; 4038 } 4039 4040 check_rxq: 4041 if (!idpf_is_queue_model_split(rsrc->rxq_model)) 4042 goto mem_rel; 4043 4044 q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER; 4045 num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks); 4046 if (num_ids < rsrc->num_bufq) { 4047 err = -EINVAL; 4048 goto mem_rel; 4049 } 4050 num_ids = __idpf_vport_queue_ids_init(vport, rsrc, qids, 4051 num_ids, q_type); 4052 if (num_ids < rsrc->num_bufq) 4053 err = -EINVAL; 4054 4055 mem_rel: 4056 kfree(qids); 4057 4058 return err; 4059 } 4060 4061 /** 4062 * idpf_vport_adjust_qs - Adjust to new requested queues 4063 * @vport: virtual port data struct 4064 * @rsrc: pointer to queue and vector resources 4065 * 4066 * Renegotiate queues. Returns 0 on success, negative on failure. 4067 */ 4068 int idpf_vport_adjust_qs(struct idpf_vport *vport, struct idpf_q_vec_rsrc *rsrc) 4069 { 4070 struct virtchnl2_create_vport vport_msg; 4071 int err; 4072 4073 vport_msg.txq_model = cpu_to_le16(rsrc->txq_model); 4074 vport_msg.rxq_model = cpu_to_le16(rsrc->rxq_model); 4075 err = idpf_vport_calc_total_qs(vport->adapter, vport->idx, &vport_msg, 4076 NULL); 4077 if (err) 4078 return err; 4079 4080 idpf_vport_init_num_qs(vport, &vport_msg, rsrc); 4081 idpf_vport_calc_num_q_groups(rsrc); 4082 4083 return 0; 4084 } 4085 4086 /** 4087 * idpf_is_capability_ena - Default implementation of capability checking 4088 * @adapter: Private data struct 4089 * @all: all or one flag 4090 * @field: caps field to check for flags 4091 * @flag: flag to check 4092 * 4093 * Return true if all capabilities are supported, false otherwise 4094 */ 4095 bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all, 4096 enum idpf_cap_field field, u64 flag) 4097 { 4098 u8 *caps = (u8 *)&adapter->caps; 4099 u32 *cap_field; 4100 4101 if (!caps) 4102 return false; 4103 4104 if (field == IDPF_BASE_CAPS) 4105 return false; 4106 4107 cap_field = (u32 *)(caps + field); 4108 4109 if (all) 4110 return (*cap_field & flag) == flag; 4111 else 4112 return !!(*cap_field & flag); 4113 } 4114 4115 /** 4116 * idpf_vport_is_cap_ena - Check if vport capability is enabled 4117 * @vport: Private data struct 4118 * @flag: flag(s) to check 4119 * 4120 * Return: true if the capability is supported, false otherwise 4121 */ 4122 bool idpf_vport_is_cap_ena(struct idpf_vport *vport, u16 flag) 4123 { 4124 struct virtchnl2_create_vport *vport_msg; 4125 4126 vport_msg = vport->adapter->vport_params_recvd[vport->idx]; 4127 4128 return !!(le16_to_cpu(vport_msg->vport_flags) & flag); 4129 } 4130 4131 /** 4132 * idpf_sideband_flow_type_ena - Check if steering is enabled for flow type 4133 * @vport: Private data struct 4134 * @flow_type: flow type to check (from ethtool.h) 4135 * 4136 * Return: true if sideband filters are allowed for @flow_type, false otherwise 4137 */ 4138 bool idpf_sideband_flow_type_ena(struct idpf_vport *vport, u32 flow_type) 4139 { 4140 struct virtchnl2_create_vport *vport_msg; 4141 __le64 caps; 4142 4143 vport_msg = vport->adapter->vport_params_recvd[vport->idx]; 4144 caps = vport_msg->sideband_flow_caps; 4145 4146 switch (flow_type) { 4147 case TCP_V4_FLOW: 4148 return !!(caps & cpu_to_le64(VIRTCHNL2_FLOW_IPV4_TCP)); 4149 case UDP_V4_FLOW: 4150 return !!(caps & cpu_to_le64(VIRTCHNL2_FLOW_IPV4_UDP)); 4151 default: 4152 return false; 4153 } 4154 } 4155 4156 /** 4157 * idpf_sideband_action_ena - Check if steering is enabled for action 4158 * @vport: Private data struct 4159 * @fsp: flow spec 4160 * 4161 * Return: true if sideband filters are allowed for @fsp, false otherwise 4162 */ 4163 bool idpf_sideband_action_ena(struct idpf_vport *vport, 4164 struct ethtool_rx_flow_spec *fsp) 4165 { 4166 struct virtchnl2_create_vport *vport_msg; 4167 unsigned int supp_actions; 4168 4169 vport_msg = vport->adapter->vport_params_recvd[vport->idx]; 4170 supp_actions = le32_to_cpu(vport_msg->sideband_flow_actions); 4171 4172 /* Actions Drop/Wake are not supported */ 4173 if (fsp->ring_cookie == RX_CLS_FLOW_DISC || 4174 fsp->ring_cookie == RX_CLS_FLOW_WAKE) 4175 return false; 4176 4177 return !!(supp_actions & VIRTCHNL2_ACTION_QUEUE); 4178 } 4179 4180 unsigned int idpf_fsteer_max_rules(struct idpf_vport *vport) 4181 { 4182 struct virtchnl2_create_vport *vport_msg; 4183 4184 vport_msg = vport->adapter->vport_params_recvd[vport->idx]; 4185 return le32_to_cpu(vport_msg->flow_steer_max_rules); 4186 } 4187 4188 /** 4189 * idpf_get_vport_id: Get vport id 4190 * @vport: virtual port structure 4191 * 4192 * Return vport id from the adapter persistent data 4193 */ 4194 u32 idpf_get_vport_id(struct idpf_vport *vport) 4195 { 4196 struct virtchnl2_create_vport *vport_msg; 4197 4198 vport_msg = vport->adapter->vport_params_recvd[vport->idx]; 4199 4200 return le32_to_cpu(vport_msg->vport_id); 4201 } 4202 4203 static void idpf_set_mac_type(const u8 *default_mac_addr, 4204 struct virtchnl2_mac_addr *mac_addr) 4205 { 4206 bool is_primary; 4207 4208 is_primary = ether_addr_equal(default_mac_addr, mac_addr->addr); 4209 mac_addr->type = is_primary ? VIRTCHNL2_MAC_ADDR_PRIMARY : 4210 VIRTCHNL2_MAC_ADDR_EXTRA; 4211 } 4212 4213 /** 4214 * idpf_mac_filter_async_handler - Async callback for mac filters 4215 * @adapter: private data struct 4216 * @xn: transaction for message 4217 * @ctlq_msg: received message 4218 * 4219 * In some scenarios driver can't sleep and wait for a reply (e.g.: stack is 4220 * holding rtnl_lock) when adding a new mac filter. It puts us in a difficult 4221 * situation to deal with errors returned on the reply. The best we can 4222 * ultimately do is remove it from our list of mac filters and report the 4223 * error. 4224 */ 4225 static int idpf_mac_filter_async_handler(struct idpf_adapter *adapter, 4226 struct idpf_vc_xn *xn, 4227 const struct idpf_ctlq_msg *ctlq_msg) 4228 { 4229 struct virtchnl2_mac_addr_list *ma_list; 4230 struct idpf_vport_config *vport_config; 4231 struct virtchnl2_mac_addr *mac_addr; 4232 struct idpf_mac_filter *f, *tmp; 4233 struct list_head *ma_list_head; 4234 struct idpf_vport *vport; 4235 u16 num_entries; 4236 int i; 4237 4238 /* if success we're done, we're only here if something bad happened */ 4239 if (!ctlq_msg->cookie.mbx.chnl_retval) 4240 return 0; 4241 4242 /* make sure at least struct is there */ 4243 if (xn->reply_sz < sizeof(*ma_list)) 4244 goto invalid_payload; 4245 4246 ma_list = ctlq_msg->ctx.indirect.payload->va; 4247 mac_addr = ma_list->mac_addr_list; 4248 num_entries = le16_to_cpu(ma_list->num_mac_addr); 4249 /* we should have received a buffer at least this big */ 4250 if (xn->reply_sz < struct_size(ma_list, mac_addr_list, num_entries)) 4251 goto invalid_payload; 4252 4253 vport = idpf_vid_to_vport(adapter, le32_to_cpu(ma_list->vport_id)); 4254 if (!vport) 4255 goto invalid_payload; 4256 4257 vport_config = adapter->vport_config[le32_to_cpu(ma_list->vport_id)]; 4258 ma_list_head = &vport_config->user_config.mac_filter_list; 4259 4260 /* We can't do much to reconcile bad filters at this point, however we 4261 * should at least remove them from our list one way or the other so we 4262 * have some idea what good filters we have. 4263 */ 4264 spin_lock_bh(&vport_config->mac_filter_list_lock); 4265 list_for_each_entry_safe(f, tmp, ma_list_head, list) 4266 for (i = 0; i < num_entries; i++) 4267 if (ether_addr_equal(mac_addr[i].addr, f->macaddr)) 4268 list_del(&f->list); 4269 spin_unlock_bh(&vport_config->mac_filter_list_lock); 4270 dev_err_ratelimited(&adapter->pdev->dev, "Received error sending MAC filter request (op %d)\n", 4271 xn->vc_op); 4272 4273 return 0; 4274 4275 invalid_payload: 4276 dev_err_ratelimited(&adapter->pdev->dev, "Received invalid MAC filter payload (op %d) (len %zd)\n", 4277 xn->vc_op, xn->reply_sz); 4278 4279 return -EINVAL; 4280 } 4281 4282 /** 4283 * idpf_add_del_mac_filters - Add/del mac filters 4284 * @adapter: adapter pointer used to send virtchnl message 4285 * @vport_config: persistent vport structure to get the MAC filter list 4286 * @default_mac_addr: default MAC address to compare with 4287 * @vport_id: vport identifier used while preparing the virtchnl message 4288 * @add: Add or delete flag 4289 * @async: Don't wait for return message 4290 * 4291 * Return: 0 on success, error on failure. 4292 **/ 4293 int idpf_add_del_mac_filters(struct idpf_adapter *adapter, 4294 struct idpf_vport_config *vport_config, 4295 const u8 *default_mac_addr, u32 vport_id, 4296 bool add, bool async) 4297 { 4298 struct virtchnl2_mac_addr_list *ma_list __free(kfree) = NULL; 4299 struct virtchnl2_mac_addr *mac_addr __free(kfree) = NULL; 4300 struct idpf_vc_xn_params xn_params = {}; 4301 u32 num_msgs, total_filters = 0; 4302 struct idpf_mac_filter *f; 4303 ssize_t reply_sz; 4304 int i = 0, k; 4305 4306 xn_params.vc_op = add ? VIRTCHNL2_OP_ADD_MAC_ADDR : 4307 VIRTCHNL2_OP_DEL_MAC_ADDR; 4308 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 4309 xn_params.async = async; 4310 xn_params.async_handler = idpf_mac_filter_async_handler; 4311 4312 spin_lock_bh(&vport_config->mac_filter_list_lock); 4313 4314 /* Find the number of newly added filters */ 4315 list_for_each_entry(f, &vport_config->user_config.mac_filter_list, 4316 list) { 4317 if (add && f->add) 4318 total_filters++; 4319 else if (!add && f->remove) 4320 total_filters++; 4321 } 4322 4323 if (!total_filters) { 4324 spin_unlock_bh(&vport_config->mac_filter_list_lock); 4325 4326 return 0; 4327 } 4328 4329 /* Fill all the new filters into virtchannel message */ 4330 mac_addr = kzalloc_objs(struct virtchnl2_mac_addr, total_filters, 4331 GFP_ATOMIC); 4332 if (!mac_addr) { 4333 spin_unlock_bh(&vport_config->mac_filter_list_lock); 4334 4335 return -ENOMEM; 4336 } 4337 4338 list_for_each_entry(f, &vport_config->user_config.mac_filter_list, 4339 list) { 4340 if (add && f->add) { 4341 ether_addr_copy(mac_addr[i].addr, f->macaddr); 4342 idpf_set_mac_type(default_mac_addr, &mac_addr[i]); 4343 i++; 4344 f->add = false; 4345 if (i == total_filters) 4346 break; 4347 } 4348 if (!add && f->remove) { 4349 ether_addr_copy(mac_addr[i].addr, f->macaddr); 4350 idpf_set_mac_type(default_mac_addr, &mac_addr[i]); 4351 i++; 4352 f->remove = false; 4353 if (i == total_filters) 4354 break; 4355 } 4356 } 4357 4358 spin_unlock_bh(&vport_config->mac_filter_list_lock); 4359 4360 /* Chunk up the filters into multiple messages to avoid 4361 * sending a control queue message buffer that is too large 4362 */ 4363 num_msgs = DIV_ROUND_UP(total_filters, IDPF_NUM_FILTERS_PER_MSG); 4364 4365 for (i = 0, k = 0; i < num_msgs; i++) { 4366 u32 entries_size, buf_size, num_entries; 4367 4368 num_entries = min_t(u32, total_filters, 4369 IDPF_NUM_FILTERS_PER_MSG); 4370 entries_size = sizeof(struct virtchnl2_mac_addr) * num_entries; 4371 buf_size = struct_size(ma_list, mac_addr_list, num_entries); 4372 4373 if (!ma_list || num_entries != IDPF_NUM_FILTERS_PER_MSG) { 4374 kfree(ma_list); 4375 ma_list = kzalloc(buf_size, GFP_ATOMIC); 4376 if (!ma_list) 4377 return -ENOMEM; 4378 } else { 4379 memset(ma_list, 0, buf_size); 4380 } 4381 4382 ma_list->vport_id = cpu_to_le32(vport_id); 4383 ma_list->num_mac_addr = cpu_to_le16(num_entries); 4384 memcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size); 4385 4386 xn_params.send_buf.iov_base = ma_list; 4387 xn_params.send_buf.iov_len = buf_size; 4388 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 4389 if (reply_sz < 0) 4390 return reply_sz; 4391 4392 k += num_entries; 4393 total_filters -= num_entries; 4394 } 4395 4396 return 0; 4397 } 4398 4399 /** 4400 * idpf_set_promiscuous - set promiscuous and send message to mailbox 4401 * @adapter: Driver specific private structure 4402 * @config_data: Vport specific config data 4403 * @vport_id: Vport identifier 4404 * 4405 * Request to enable promiscuous mode for the vport. Message is sent 4406 * asynchronously and won't wait for response. Returns 0 on success, negative 4407 * on failure; 4408 */ 4409 int idpf_set_promiscuous(struct idpf_adapter *adapter, 4410 struct idpf_vport_user_config_data *config_data, 4411 u32 vport_id) 4412 { 4413 struct idpf_vc_xn_params xn_params = {}; 4414 struct virtchnl2_promisc_info vpi; 4415 ssize_t reply_sz; 4416 u16 flags = 0; 4417 4418 if (test_bit(__IDPF_PROMISC_UC, config_data->user_flags)) 4419 flags |= VIRTCHNL2_UNICAST_PROMISC; 4420 if (test_bit(__IDPF_PROMISC_MC, config_data->user_flags)) 4421 flags |= VIRTCHNL2_MULTICAST_PROMISC; 4422 4423 vpi.vport_id = cpu_to_le32(vport_id); 4424 vpi.flags = cpu_to_le16(flags); 4425 4426 xn_params.vc_op = VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE; 4427 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 4428 xn_params.send_buf.iov_base = &vpi; 4429 xn_params.send_buf.iov_len = sizeof(vpi); 4430 /* setting promiscuous is only ever done asynchronously */ 4431 xn_params.async = true; 4432 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 4433 4434 return reply_sz < 0 ? reply_sz : 0; 4435 } 4436 4437 /** 4438 * idpf_idc_rdma_vc_send_sync - virtchnl send callback for IDC registered drivers 4439 * @cdev_info: IDC core device info pointer 4440 * @send_msg: message to send 4441 * @msg_size: size of message to send 4442 * @recv_msg: message to populate on reception of response 4443 * @recv_len: length of message copied into recv_msg or 0 on error 4444 * 4445 * Return: 0 on success or error code on failure. 4446 */ 4447 int idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info *cdev_info, 4448 u8 *send_msg, u16 msg_size, 4449 u8 *recv_msg, u16 *recv_len) 4450 { 4451 struct idpf_adapter *adapter = pci_get_drvdata(cdev_info->pdev); 4452 struct idpf_vc_xn_params xn_params = { }; 4453 ssize_t reply_sz; 4454 u16 recv_size; 4455 4456 if (!recv_msg || !recv_len || msg_size > IDPF_CTLQ_MAX_BUF_LEN) 4457 return -EINVAL; 4458 4459 recv_size = min_t(u16, *recv_len, IDPF_CTLQ_MAX_BUF_LEN); 4460 *recv_len = 0; 4461 xn_params.vc_op = VIRTCHNL2_OP_RDMA; 4462 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 4463 xn_params.send_buf.iov_base = send_msg; 4464 xn_params.send_buf.iov_len = msg_size; 4465 xn_params.recv_buf.iov_base = recv_msg; 4466 xn_params.recv_buf.iov_len = recv_size; 4467 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 4468 if (reply_sz < 0) 4469 return reply_sz; 4470 *recv_len = reply_sz; 4471 4472 return 0; 4473 } 4474 EXPORT_SYMBOL_GPL(idpf_idc_rdma_vc_send_sync); 4475