1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2023 Intel Corporation */ 3 4 #include <net/libeth/rx.h> 5 6 #include "idpf.h" 7 #include "idpf_virtchnl.h" 8 9 #define IDPF_VC_XN_MIN_TIMEOUT_MSEC 2000 10 #define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC (60 * 1000) 11 #define IDPF_VC_XN_IDX_M GENMASK(7, 0) 12 #define IDPF_VC_XN_SALT_M GENMASK(15, 8) 13 #define IDPF_VC_XN_RING_LEN U8_MAX 14 15 /** 16 * enum idpf_vc_xn_state - Virtchnl transaction status 17 * @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used 18 * @IDPF_VC_XN_WAITING: expecting a reply, not yet received 19 * @IDPF_VC_XN_COMPLETED_SUCCESS: a reply was expected and received, 20 * buffer updated 21 * @IDPF_VC_XN_COMPLETED_FAILED: a reply was expected and received, but there 22 * was an error, buffer not updated 23 * @IDPF_VC_XN_SHUTDOWN: transaction object cannot be used, VC torn down 24 * @IDPF_VC_XN_ASYNC: transaction sent asynchronously and doesn't have the 25 * return context; a callback may be provided to handle 26 * return 27 */ 28 enum idpf_vc_xn_state { 29 IDPF_VC_XN_IDLE = 1, 30 IDPF_VC_XN_WAITING, 31 IDPF_VC_XN_COMPLETED_SUCCESS, 32 IDPF_VC_XN_COMPLETED_FAILED, 33 IDPF_VC_XN_SHUTDOWN, 34 IDPF_VC_XN_ASYNC, 35 }; 36 37 struct idpf_vc_xn; 38 /* Callback for asynchronous messages */ 39 typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *, 40 const struct idpf_ctlq_msg *); 41 42 /** 43 * struct idpf_vc_xn - Data structure representing virtchnl transactions 44 * @completed: virtchnl event loop uses that to signal when a reply is 45 * available, uses kernel completion API 46 * @state: virtchnl event loop stores the data below, protected by the 47 * completion's lock. 48 * @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be 49 * truncated on its way to the receiver thread according to 50 * reply_buf.iov_len. 51 * @reply: Reference to the buffer(s) where the reply data should be written 52 * to. May be 0-length (then NULL address permitted) if the reply data 53 * should be ignored. 54 * @async_handler: if sent asynchronously, a callback can be provided to handle 55 * the reply when it's received 56 * @vc_op: corresponding opcode sent with this transaction 57 * @idx: index used as retrieval on reply receive, used for cookie 58 * @salt: changed every message to make unique, used for cookie 59 */ 60 struct idpf_vc_xn { 61 struct completion completed; 62 enum idpf_vc_xn_state state; 63 size_t reply_sz; 64 struct kvec reply; 65 async_vc_cb async_handler; 66 u32 vc_op; 67 u8 idx; 68 u8 salt; 69 }; 70 71 /** 72 * struct idpf_vc_xn_params - Parameters for executing transaction 73 * @send_buf: kvec for send buffer 74 * @recv_buf: kvec for recv buffer, may be NULL, must then have zero length 75 * @timeout_ms: timeout to wait for reply 76 * @async: send message asynchronously, will not wait on completion 77 * @async_handler: If sent asynchronously, optional callback handler. The user 78 * must be careful when using async handlers as the memory for 79 * the recv_buf _cannot_ be on stack if this is async. 80 * @vc_op: virtchnl op to send 81 */ 82 struct idpf_vc_xn_params { 83 struct kvec send_buf; 84 struct kvec recv_buf; 85 int timeout_ms; 86 bool async; 87 async_vc_cb async_handler; 88 u32 vc_op; 89 }; 90 91 /** 92 * struct idpf_vc_xn_manager - Manager for tracking transactions 93 * @ring: backing and lookup for transactions 94 * @free_xn_bm: bitmap for free transactions 95 * @xn_bm_lock: make bitmap access synchronous where necessary 96 * @salt: used to make cookie unique every message 97 */ 98 struct idpf_vc_xn_manager { 99 struct idpf_vc_xn ring[IDPF_VC_XN_RING_LEN]; 100 DECLARE_BITMAP(free_xn_bm, IDPF_VC_XN_RING_LEN); 101 spinlock_t xn_bm_lock; 102 u8 salt; 103 }; 104 105 /** 106 * idpf_vid_to_vport - Translate vport id to vport pointer 107 * @adapter: private data struct 108 * @v_id: vport id to translate 109 * 110 * Returns vport matching v_id, NULL if not found. 111 */ 112 static 113 struct idpf_vport *idpf_vid_to_vport(struct idpf_adapter *adapter, u32 v_id) 114 { 115 u16 num_max_vports = idpf_get_max_vports(adapter); 116 int i; 117 118 for (i = 0; i < num_max_vports; i++) 119 if (adapter->vport_ids[i] == v_id) 120 return adapter->vports[i]; 121 122 return NULL; 123 } 124 125 /** 126 * idpf_handle_event_link - Handle link event message 127 * @adapter: private data struct 128 * @v2e: virtchnl event message 129 */ 130 static void idpf_handle_event_link(struct idpf_adapter *adapter, 131 const struct virtchnl2_event *v2e) 132 { 133 struct idpf_netdev_priv *np; 134 struct idpf_vport *vport; 135 136 vport = idpf_vid_to_vport(adapter, le32_to_cpu(v2e->vport_id)); 137 if (!vport) { 138 dev_err_ratelimited(&adapter->pdev->dev, "Failed to find vport_id %d for link event\n", 139 v2e->vport_id); 140 return; 141 } 142 np = netdev_priv(vport->netdev); 143 144 vport->link_speed_mbps = le32_to_cpu(v2e->link_speed); 145 146 if (vport->link_up == v2e->link_status) 147 return; 148 149 vport->link_up = v2e->link_status; 150 151 if (np->state != __IDPF_VPORT_UP) 152 return; 153 154 if (vport->link_up) { 155 netif_tx_start_all_queues(vport->netdev); 156 netif_carrier_on(vport->netdev); 157 } else { 158 netif_tx_stop_all_queues(vport->netdev); 159 netif_carrier_off(vport->netdev); 160 } 161 } 162 163 /** 164 * idpf_recv_event_msg - Receive virtchnl event message 165 * @adapter: Driver specific private structure 166 * @ctlq_msg: message to copy from 167 * 168 * Receive virtchnl event message 169 */ 170 static void idpf_recv_event_msg(struct idpf_adapter *adapter, 171 struct idpf_ctlq_msg *ctlq_msg) 172 { 173 int payload_size = ctlq_msg->ctx.indirect.payload->size; 174 struct virtchnl2_event *v2e; 175 u32 event; 176 177 if (payload_size < sizeof(*v2e)) { 178 dev_err_ratelimited(&adapter->pdev->dev, "Failed to receive valid payload for event msg (op %d len %d)\n", 179 ctlq_msg->cookie.mbx.chnl_opcode, 180 payload_size); 181 return; 182 } 183 184 v2e = (struct virtchnl2_event *)ctlq_msg->ctx.indirect.payload->va; 185 event = le32_to_cpu(v2e->event); 186 187 switch (event) { 188 case VIRTCHNL2_EVENT_LINK_CHANGE: 189 idpf_handle_event_link(adapter, v2e); 190 return; 191 default: 192 dev_err(&adapter->pdev->dev, 193 "Unknown event %d from PF\n", event); 194 break; 195 } 196 } 197 198 /** 199 * idpf_mb_clean - Reclaim the send mailbox queue entries 200 * @adapter: Driver specific private structure 201 * 202 * Reclaim the send mailbox queue entries to be used to send further messages 203 * 204 * Returns 0 on success, negative on failure 205 */ 206 static int idpf_mb_clean(struct idpf_adapter *adapter) 207 { 208 u16 i, num_q_msg = IDPF_DFLT_MBX_Q_LEN; 209 struct idpf_ctlq_msg **q_msg; 210 struct idpf_dma_mem *dma_mem; 211 int err; 212 213 q_msg = kcalloc(num_q_msg, sizeof(struct idpf_ctlq_msg *), GFP_ATOMIC); 214 if (!q_msg) 215 return -ENOMEM; 216 217 err = idpf_ctlq_clean_sq(adapter->hw.asq, &num_q_msg, q_msg); 218 if (err) 219 goto err_kfree; 220 221 for (i = 0; i < num_q_msg; i++) { 222 if (!q_msg[i]) 223 continue; 224 dma_mem = q_msg[i]->ctx.indirect.payload; 225 if (dma_mem) 226 dma_free_coherent(&adapter->pdev->dev, dma_mem->size, 227 dma_mem->va, dma_mem->pa); 228 kfree(q_msg[i]); 229 kfree(dma_mem); 230 } 231 232 err_kfree: 233 kfree(q_msg); 234 235 return err; 236 } 237 238 /** 239 * idpf_send_mb_msg - Send message over mailbox 240 * @adapter: Driver specific private structure 241 * @op: virtchnl opcode 242 * @msg_size: size of the payload 243 * @msg: pointer to buffer holding the payload 244 * @cookie: unique SW generated cookie per message 245 * 246 * Will prepare the control queue message and initiates the send api 247 * 248 * Returns 0 on success, negative on failure 249 */ 250 int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op, 251 u16 msg_size, u8 *msg, u16 cookie) 252 { 253 struct idpf_ctlq_msg *ctlq_msg; 254 struct idpf_dma_mem *dma_mem; 255 int err; 256 257 /* If we are here and a reset is detected nothing much can be 258 * done. This thread should silently abort and expected to 259 * be corrected with a new run either by user or driver 260 * flows after reset 261 */ 262 if (idpf_is_reset_detected(adapter)) 263 return 0; 264 265 err = idpf_mb_clean(adapter); 266 if (err) 267 return err; 268 269 ctlq_msg = kzalloc(sizeof(*ctlq_msg), GFP_ATOMIC); 270 if (!ctlq_msg) 271 return -ENOMEM; 272 273 dma_mem = kzalloc(sizeof(*dma_mem), GFP_ATOMIC); 274 if (!dma_mem) { 275 err = -ENOMEM; 276 goto dma_mem_error; 277 } 278 279 ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_cp; 280 ctlq_msg->func_id = 0; 281 ctlq_msg->data_len = msg_size; 282 ctlq_msg->cookie.mbx.chnl_opcode = op; 283 ctlq_msg->cookie.mbx.chnl_retval = 0; 284 dma_mem->size = IDPF_CTLQ_MAX_BUF_LEN; 285 dma_mem->va = dma_alloc_coherent(&adapter->pdev->dev, dma_mem->size, 286 &dma_mem->pa, GFP_ATOMIC); 287 if (!dma_mem->va) { 288 err = -ENOMEM; 289 goto dma_alloc_error; 290 } 291 292 /* It's possible we're just sending an opcode but no buffer */ 293 if (msg && msg_size) 294 memcpy(dma_mem->va, msg, msg_size); 295 ctlq_msg->ctx.indirect.payload = dma_mem; 296 ctlq_msg->ctx.sw_cookie.data = cookie; 297 298 err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg); 299 if (err) 300 goto send_error; 301 302 return 0; 303 304 send_error: 305 dma_free_coherent(&adapter->pdev->dev, dma_mem->size, dma_mem->va, 306 dma_mem->pa); 307 dma_alloc_error: 308 kfree(dma_mem); 309 dma_mem_error: 310 kfree(ctlq_msg); 311 312 return err; 313 } 314 315 /* API for virtchnl "transaction" support ("xn" for short). 316 * 317 * We are reusing the completion lock to serialize the accesses to the 318 * transaction state for simplicity, but it could be its own separate synchro 319 * as well. For now, this API is only used from within a workqueue context; 320 * raw_spin_lock() is enough. 321 */ 322 /** 323 * idpf_vc_xn_lock - Request exclusive access to vc transaction 324 * @xn: struct idpf_vc_xn* to access 325 */ 326 #define idpf_vc_xn_lock(xn) \ 327 raw_spin_lock(&(xn)->completed.wait.lock) 328 329 /** 330 * idpf_vc_xn_unlock - Release exclusive access to vc transaction 331 * @xn: struct idpf_vc_xn* to access 332 */ 333 #define idpf_vc_xn_unlock(xn) \ 334 raw_spin_unlock(&(xn)->completed.wait.lock) 335 336 /** 337 * idpf_vc_xn_release_bufs - Release reference to reply buffer(s) and 338 * reset the transaction state. 339 * @xn: struct idpf_vc_xn to update 340 */ 341 static void idpf_vc_xn_release_bufs(struct idpf_vc_xn *xn) 342 { 343 xn->reply.iov_base = NULL; 344 xn->reply.iov_len = 0; 345 346 if (xn->state != IDPF_VC_XN_SHUTDOWN) 347 xn->state = IDPF_VC_XN_IDLE; 348 } 349 350 /** 351 * idpf_vc_xn_init - Initialize virtchnl transaction object 352 * @vcxn_mngr: pointer to vc transaction manager struct 353 */ 354 static void idpf_vc_xn_init(struct idpf_vc_xn_manager *vcxn_mngr) 355 { 356 int i; 357 358 spin_lock_init(&vcxn_mngr->xn_bm_lock); 359 360 for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) { 361 struct idpf_vc_xn *xn = &vcxn_mngr->ring[i]; 362 363 xn->state = IDPF_VC_XN_IDLE; 364 xn->idx = i; 365 idpf_vc_xn_release_bufs(xn); 366 init_completion(&xn->completed); 367 } 368 369 bitmap_fill(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN); 370 } 371 372 /** 373 * idpf_vc_xn_shutdown - Uninitialize virtchnl transaction object 374 * @vcxn_mngr: pointer to vc transaction manager struct 375 * 376 * All waiting threads will be woken-up and their transaction aborted. Further 377 * operations on that object will fail. 378 */ 379 static void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr) 380 { 381 int i; 382 383 spin_lock_bh(&vcxn_mngr->xn_bm_lock); 384 bitmap_zero(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN); 385 spin_unlock_bh(&vcxn_mngr->xn_bm_lock); 386 387 for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) { 388 struct idpf_vc_xn *xn = &vcxn_mngr->ring[i]; 389 390 idpf_vc_xn_lock(xn); 391 xn->state = IDPF_VC_XN_SHUTDOWN; 392 idpf_vc_xn_release_bufs(xn); 393 idpf_vc_xn_unlock(xn); 394 complete_all(&xn->completed); 395 } 396 } 397 398 /** 399 * idpf_vc_xn_pop_free - Pop a free transaction from free list 400 * @vcxn_mngr: transaction manager to pop from 401 * 402 * Returns NULL if no free transactions 403 */ 404 static 405 struct idpf_vc_xn *idpf_vc_xn_pop_free(struct idpf_vc_xn_manager *vcxn_mngr) 406 { 407 struct idpf_vc_xn *xn = NULL; 408 unsigned long free_idx; 409 410 spin_lock_bh(&vcxn_mngr->xn_bm_lock); 411 free_idx = find_first_bit(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN); 412 if (free_idx == IDPF_VC_XN_RING_LEN) 413 goto do_unlock; 414 415 clear_bit(free_idx, vcxn_mngr->free_xn_bm); 416 xn = &vcxn_mngr->ring[free_idx]; 417 xn->salt = vcxn_mngr->salt++; 418 419 do_unlock: 420 spin_unlock_bh(&vcxn_mngr->xn_bm_lock); 421 422 return xn; 423 } 424 425 /** 426 * idpf_vc_xn_push_free - Push a free transaction to free list 427 * @vcxn_mngr: transaction manager to push to 428 * @xn: transaction to push 429 */ 430 static void idpf_vc_xn_push_free(struct idpf_vc_xn_manager *vcxn_mngr, 431 struct idpf_vc_xn *xn) 432 { 433 idpf_vc_xn_release_bufs(xn); 434 set_bit(xn->idx, vcxn_mngr->free_xn_bm); 435 } 436 437 /** 438 * idpf_vc_xn_exec - Perform a send/recv virtchnl transaction 439 * @adapter: driver specific private structure with vcxn_mngr 440 * @params: parameters for this particular transaction including 441 * -vc_op: virtchannel operation to send 442 * -send_buf: kvec iov for send buf and len 443 * -recv_buf: kvec iov for recv buf and len (ignored if NULL) 444 * -timeout_ms: timeout waiting for a reply (milliseconds) 445 * -async: don't wait for message reply, will lose caller context 446 * -async_handler: callback to handle async replies 447 * 448 * @returns >= 0 for success, the size of the initial reply (may or may not be 449 * >= @recv_buf.iov_len, but we never overflow @@recv_buf_iov_base). < 0 for 450 * error. 451 */ 452 static ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter, 453 const struct idpf_vc_xn_params *params) 454 { 455 const struct kvec *send_buf = ¶ms->send_buf; 456 struct idpf_vc_xn *xn; 457 ssize_t retval; 458 u16 cookie; 459 460 xn = idpf_vc_xn_pop_free(adapter->vcxn_mngr); 461 /* no free transactions available */ 462 if (!xn) 463 return -ENOSPC; 464 465 idpf_vc_xn_lock(xn); 466 if (xn->state == IDPF_VC_XN_SHUTDOWN) { 467 retval = -ENXIO; 468 goto only_unlock; 469 } else if (xn->state != IDPF_VC_XN_IDLE) { 470 /* We're just going to clobber this transaction even though 471 * it's not IDLE. If we don't reuse it we could theoretically 472 * eventually leak all the free transactions and not be able to 473 * send any messages. At least this way we make an attempt to 474 * remain functional even though something really bad is 475 * happening that's corrupting what was supposed to be free 476 * transactions. 477 */ 478 WARN_ONCE(1, "There should only be idle transactions in free list (idx %d op %d)\n", 479 xn->idx, xn->vc_op); 480 } 481 482 xn->reply = params->recv_buf; 483 xn->reply_sz = 0; 484 xn->state = params->async ? IDPF_VC_XN_ASYNC : IDPF_VC_XN_WAITING; 485 xn->vc_op = params->vc_op; 486 xn->async_handler = params->async_handler; 487 idpf_vc_xn_unlock(xn); 488 489 if (!params->async) 490 reinit_completion(&xn->completed); 491 cookie = FIELD_PREP(IDPF_VC_XN_SALT_M, xn->salt) | 492 FIELD_PREP(IDPF_VC_XN_IDX_M, xn->idx); 493 494 retval = idpf_send_mb_msg(adapter, params->vc_op, 495 send_buf->iov_len, send_buf->iov_base, 496 cookie); 497 if (retval) { 498 idpf_vc_xn_lock(xn); 499 goto release_and_unlock; 500 } 501 502 if (params->async) 503 return 0; 504 505 wait_for_completion_timeout(&xn->completed, 506 msecs_to_jiffies(params->timeout_ms)); 507 508 /* No need to check the return value; we check the final state of the 509 * transaction below. It's possible the transaction actually gets more 510 * timeout than specified if we get preempted here but after 511 * wait_for_completion_timeout returns. This should be non-issue 512 * however. 513 */ 514 idpf_vc_xn_lock(xn); 515 switch (xn->state) { 516 case IDPF_VC_XN_SHUTDOWN: 517 retval = -ENXIO; 518 goto only_unlock; 519 case IDPF_VC_XN_WAITING: 520 dev_notice_ratelimited(&adapter->pdev->dev, "Transaction timed-out (op %d, %dms)\n", 521 params->vc_op, params->timeout_ms); 522 retval = -ETIME; 523 break; 524 case IDPF_VC_XN_COMPLETED_SUCCESS: 525 retval = xn->reply_sz; 526 break; 527 case IDPF_VC_XN_COMPLETED_FAILED: 528 dev_notice_ratelimited(&adapter->pdev->dev, "Transaction failed (op %d)\n", 529 params->vc_op); 530 retval = -EIO; 531 break; 532 default: 533 /* Invalid state. */ 534 WARN_ON_ONCE(1); 535 retval = -EIO; 536 break; 537 } 538 539 release_and_unlock: 540 idpf_vc_xn_push_free(adapter->vcxn_mngr, xn); 541 /* If we receive a VC reply after here, it will be dropped. */ 542 only_unlock: 543 idpf_vc_xn_unlock(xn); 544 545 return retval; 546 } 547 548 /** 549 * idpf_vc_xn_forward_async - Handle async reply receives 550 * @adapter: private data struct 551 * @xn: transaction to handle 552 * @ctlq_msg: corresponding ctlq_msg 553 * 554 * For async sends we're going to lose the caller's context so, if an 555 * async_handler was provided, it can deal with the reply, otherwise we'll just 556 * check and report if there is an error. 557 */ 558 static int 559 idpf_vc_xn_forward_async(struct idpf_adapter *adapter, struct idpf_vc_xn *xn, 560 const struct idpf_ctlq_msg *ctlq_msg) 561 { 562 int err = 0; 563 564 if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) { 565 dev_err_ratelimited(&adapter->pdev->dev, "Async message opcode does not match transaction opcode (msg: %d) (xn: %d)\n", 566 ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op); 567 xn->reply_sz = 0; 568 err = -EINVAL; 569 goto release_bufs; 570 } 571 572 if (xn->async_handler) { 573 err = xn->async_handler(adapter, xn, ctlq_msg); 574 goto release_bufs; 575 } 576 577 if (ctlq_msg->cookie.mbx.chnl_retval) { 578 xn->reply_sz = 0; 579 dev_err_ratelimited(&adapter->pdev->dev, "Async message failure (op %d)\n", 580 ctlq_msg->cookie.mbx.chnl_opcode); 581 err = -EINVAL; 582 } 583 584 release_bufs: 585 idpf_vc_xn_push_free(adapter->vcxn_mngr, xn); 586 587 return err; 588 } 589 590 /** 591 * idpf_vc_xn_forward_reply - copy a reply back to receiving thread 592 * @adapter: driver specific private structure with vcxn_mngr 593 * @ctlq_msg: controlq message to send back to receiving thread 594 */ 595 static int 596 idpf_vc_xn_forward_reply(struct idpf_adapter *adapter, 597 const struct idpf_ctlq_msg *ctlq_msg) 598 { 599 const void *payload = NULL; 600 size_t payload_size = 0; 601 struct idpf_vc_xn *xn; 602 u16 msg_info; 603 int err = 0; 604 u16 xn_idx; 605 u16 salt; 606 607 msg_info = ctlq_msg->ctx.sw_cookie.data; 608 xn_idx = FIELD_GET(IDPF_VC_XN_IDX_M, msg_info); 609 if (xn_idx >= ARRAY_SIZE(adapter->vcxn_mngr->ring)) { 610 dev_err_ratelimited(&adapter->pdev->dev, "Out of bounds cookie received: %02x\n", 611 xn_idx); 612 return -EINVAL; 613 } 614 xn = &adapter->vcxn_mngr->ring[xn_idx]; 615 salt = FIELD_GET(IDPF_VC_XN_SALT_M, msg_info); 616 if (xn->salt != salt) { 617 dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (%02x != %02x)\n", 618 xn->salt, salt); 619 return -EINVAL; 620 } 621 622 idpf_vc_xn_lock(xn); 623 switch (xn->state) { 624 case IDPF_VC_XN_WAITING: 625 /* success */ 626 break; 627 case IDPF_VC_XN_IDLE: 628 dev_err_ratelimited(&adapter->pdev->dev, "Unexpected or belated VC reply (op %d)\n", 629 ctlq_msg->cookie.mbx.chnl_opcode); 630 err = -EINVAL; 631 goto out_unlock; 632 case IDPF_VC_XN_SHUTDOWN: 633 /* ENXIO is a bit special here as the recv msg loop uses that 634 * know if it should stop trying to clean the ring if we lost 635 * the virtchnl. We need to stop playing with registers and 636 * yield. 637 */ 638 err = -ENXIO; 639 goto out_unlock; 640 case IDPF_VC_XN_ASYNC: 641 err = idpf_vc_xn_forward_async(adapter, xn, ctlq_msg); 642 idpf_vc_xn_unlock(xn); 643 return err; 644 default: 645 dev_err_ratelimited(&adapter->pdev->dev, "Overwriting VC reply (op %d)\n", 646 ctlq_msg->cookie.mbx.chnl_opcode); 647 err = -EBUSY; 648 goto out_unlock; 649 } 650 651 if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) { 652 dev_err_ratelimited(&adapter->pdev->dev, "Message opcode does not match transaction opcode (msg: %d) (xn: %d)\n", 653 ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op); 654 xn->reply_sz = 0; 655 xn->state = IDPF_VC_XN_COMPLETED_FAILED; 656 err = -EINVAL; 657 goto out_unlock; 658 } 659 660 if (ctlq_msg->cookie.mbx.chnl_retval) { 661 xn->reply_sz = 0; 662 xn->state = IDPF_VC_XN_COMPLETED_FAILED; 663 err = -EINVAL; 664 goto out_unlock; 665 } 666 667 if (ctlq_msg->data_len) { 668 payload = ctlq_msg->ctx.indirect.payload->va; 669 payload_size = ctlq_msg->data_len; 670 } 671 672 xn->reply_sz = payload_size; 673 xn->state = IDPF_VC_XN_COMPLETED_SUCCESS; 674 675 if (xn->reply.iov_base && xn->reply.iov_len && payload_size) 676 memcpy(xn->reply.iov_base, payload, 677 min_t(size_t, xn->reply.iov_len, payload_size)); 678 679 out_unlock: 680 idpf_vc_xn_unlock(xn); 681 /* we _cannot_ hold lock while calling complete */ 682 complete(&xn->completed); 683 684 return err; 685 } 686 687 /** 688 * idpf_recv_mb_msg - Receive message over mailbox 689 * @adapter: Driver specific private structure 690 * 691 * Will receive control queue message and posts the receive buffer. Returns 0 692 * on success and negative on failure. 693 */ 694 int idpf_recv_mb_msg(struct idpf_adapter *adapter) 695 { 696 struct idpf_ctlq_msg ctlq_msg; 697 struct idpf_dma_mem *dma_mem; 698 int post_err, err; 699 u16 num_recv; 700 701 while (1) { 702 /* This will get <= num_recv messages and output how many 703 * actually received on num_recv. 704 */ 705 num_recv = 1; 706 err = idpf_ctlq_recv(adapter->hw.arq, &num_recv, &ctlq_msg); 707 if (err || !num_recv) 708 break; 709 710 if (ctlq_msg.data_len) { 711 dma_mem = ctlq_msg.ctx.indirect.payload; 712 } else { 713 dma_mem = NULL; 714 num_recv = 0; 715 } 716 717 if (ctlq_msg.cookie.mbx.chnl_opcode == VIRTCHNL2_OP_EVENT) 718 idpf_recv_event_msg(adapter, &ctlq_msg); 719 else 720 err = idpf_vc_xn_forward_reply(adapter, &ctlq_msg); 721 722 post_err = idpf_ctlq_post_rx_buffs(&adapter->hw, 723 adapter->hw.arq, 724 &num_recv, &dma_mem); 725 726 /* If post failed clear the only buffer we supplied */ 727 if (post_err) { 728 if (dma_mem) 729 dmam_free_coherent(&adapter->pdev->dev, 730 dma_mem->size, dma_mem->va, 731 dma_mem->pa); 732 break; 733 } 734 735 /* virtchnl trying to shutdown, stop cleaning */ 736 if (err == -ENXIO) 737 break; 738 } 739 740 return err; 741 } 742 743 /** 744 * idpf_wait_for_marker_event - wait for software marker response 745 * @vport: virtual port data structure 746 * 747 * Returns 0 success, negative on failure. 748 **/ 749 static int idpf_wait_for_marker_event(struct idpf_vport *vport) 750 { 751 int event; 752 int i; 753 754 for (i = 0; i < vport->num_txq; i++) 755 idpf_queue_set(SW_MARKER, vport->txqs[i]); 756 757 event = wait_event_timeout(vport->sw_marker_wq, 758 test_and_clear_bit(IDPF_VPORT_SW_MARKER, 759 vport->flags), 760 msecs_to_jiffies(500)); 761 762 for (i = 0; i < vport->num_txq; i++) 763 idpf_queue_clear(POLL_MODE, vport->txqs[i]); 764 765 if (event) 766 return 0; 767 768 dev_warn(&vport->adapter->pdev->dev, "Failed to receive marker packets\n"); 769 770 return -ETIMEDOUT; 771 } 772 773 /** 774 * idpf_send_ver_msg - send virtchnl version message 775 * @adapter: Driver specific private structure 776 * 777 * Send virtchnl version message. Returns 0 on success, negative on failure. 778 */ 779 static int idpf_send_ver_msg(struct idpf_adapter *adapter) 780 { 781 struct idpf_vc_xn_params xn_params = {}; 782 struct virtchnl2_version_info vvi; 783 ssize_t reply_sz; 784 u32 major, minor; 785 int err = 0; 786 787 if (adapter->virt_ver_maj) { 788 vvi.major = cpu_to_le32(adapter->virt_ver_maj); 789 vvi.minor = cpu_to_le32(adapter->virt_ver_min); 790 } else { 791 vvi.major = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MAJOR); 792 vvi.minor = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MINOR); 793 } 794 795 xn_params.vc_op = VIRTCHNL2_OP_VERSION; 796 xn_params.send_buf.iov_base = &vvi; 797 xn_params.send_buf.iov_len = sizeof(vvi); 798 xn_params.recv_buf = xn_params.send_buf; 799 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 800 801 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 802 if (reply_sz < 0) 803 return reply_sz; 804 if (reply_sz < sizeof(vvi)) 805 return -EIO; 806 807 major = le32_to_cpu(vvi.major); 808 minor = le32_to_cpu(vvi.minor); 809 810 if (major > IDPF_VIRTCHNL_VERSION_MAJOR) { 811 dev_warn(&adapter->pdev->dev, "Virtchnl major version greater than supported\n"); 812 return -EINVAL; 813 } 814 815 if (major == IDPF_VIRTCHNL_VERSION_MAJOR && 816 minor > IDPF_VIRTCHNL_VERSION_MINOR) 817 dev_warn(&adapter->pdev->dev, "Virtchnl minor version didn't match\n"); 818 819 /* If we have a mismatch, resend version to update receiver on what 820 * version we will use. 821 */ 822 if (!adapter->virt_ver_maj && 823 major != IDPF_VIRTCHNL_VERSION_MAJOR && 824 minor != IDPF_VIRTCHNL_VERSION_MINOR) 825 err = -EAGAIN; 826 827 adapter->virt_ver_maj = major; 828 adapter->virt_ver_min = minor; 829 830 return err; 831 } 832 833 /** 834 * idpf_send_get_caps_msg - Send virtchnl get capabilities message 835 * @adapter: Driver specific private structure 836 * 837 * Send virtchl get capabilities message. Returns 0 on success, negative on 838 * failure. 839 */ 840 static int idpf_send_get_caps_msg(struct idpf_adapter *adapter) 841 { 842 struct virtchnl2_get_capabilities caps = {}; 843 struct idpf_vc_xn_params xn_params = {}; 844 ssize_t reply_sz; 845 846 caps.csum_caps = 847 cpu_to_le32(VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 | 848 VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP | 849 VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP | 850 VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP | 851 VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP | 852 VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP | 853 VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP | 854 VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 | 855 VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP | 856 VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP | 857 VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP | 858 VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP | 859 VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP | 860 VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP | 861 VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL | 862 VIRTCHNL2_CAP_RX_CSUM_L3_SINGLE_TUNNEL | 863 VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL | 864 VIRTCHNL2_CAP_RX_CSUM_L4_SINGLE_TUNNEL | 865 VIRTCHNL2_CAP_RX_CSUM_GENERIC); 866 867 caps.seg_caps = 868 cpu_to_le32(VIRTCHNL2_CAP_SEG_IPV4_TCP | 869 VIRTCHNL2_CAP_SEG_IPV4_UDP | 870 VIRTCHNL2_CAP_SEG_IPV4_SCTP | 871 VIRTCHNL2_CAP_SEG_IPV6_TCP | 872 VIRTCHNL2_CAP_SEG_IPV6_UDP | 873 VIRTCHNL2_CAP_SEG_IPV6_SCTP | 874 VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL); 875 876 caps.rss_caps = 877 cpu_to_le64(VIRTCHNL2_CAP_RSS_IPV4_TCP | 878 VIRTCHNL2_CAP_RSS_IPV4_UDP | 879 VIRTCHNL2_CAP_RSS_IPV4_SCTP | 880 VIRTCHNL2_CAP_RSS_IPV4_OTHER | 881 VIRTCHNL2_CAP_RSS_IPV6_TCP | 882 VIRTCHNL2_CAP_RSS_IPV6_UDP | 883 VIRTCHNL2_CAP_RSS_IPV6_SCTP | 884 VIRTCHNL2_CAP_RSS_IPV6_OTHER); 885 886 caps.hsplit_caps = 887 cpu_to_le32(VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 | 888 VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6); 889 890 caps.rsc_caps = 891 cpu_to_le32(VIRTCHNL2_CAP_RSC_IPV4_TCP | 892 VIRTCHNL2_CAP_RSC_IPV6_TCP); 893 894 caps.other_caps = 895 cpu_to_le64(VIRTCHNL2_CAP_SRIOV | 896 VIRTCHNL2_CAP_MACFILTER | 897 VIRTCHNL2_CAP_SPLITQ_QSCHED | 898 VIRTCHNL2_CAP_PROMISC | 899 VIRTCHNL2_CAP_LOOPBACK); 900 901 xn_params.vc_op = VIRTCHNL2_OP_GET_CAPS; 902 xn_params.send_buf.iov_base = ∩︀ 903 xn_params.send_buf.iov_len = sizeof(caps); 904 xn_params.recv_buf.iov_base = &adapter->caps; 905 xn_params.recv_buf.iov_len = sizeof(adapter->caps); 906 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 907 908 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 909 if (reply_sz < 0) 910 return reply_sz; 911 if (reply_sz < sizeof(adapter->caps)) 912 return -EIO; 913 914 return 0; 915 } 916 917 /** 918 * idpf_vport_alloc_max_qs - Allocate max queues for a vport 919 * @adapter: Driver specific private structure 920 * @max_q: vport max queue structure 921 */ 922 int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter, 923 struct idpf_vport_max_q *max_q) 924 { 925 struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues; 926 struct virtchnl2_get_capabilities *caps = &adapter->caps; 927 u16 default_vports = idpf_get_default_vports(adapter); 928 int max_rx_q, max_tx_q; 929 930 mutex_lock(&adapter->queue_lock); 931 932 max_rx_q = le16_to_cpu(caps->max_rx_q) / default_vports; 933 max_tx_q = le16_to_cpu(caps->max_tx_q) / default_vports; 934 if (adapter->num_alloc_vports < default_vports) { 935 max_q->max_rxq = min_t(u16, max_rx_q, IDPF_MAX_Q); 936 max_q->max_txq = min_t(u16, max_tx_q, IDPF_MAX_Q); 937 } else { 938 max_q->max_rxq = IDPF_MIN_Q; 939 max_q->max_txq = IDPF_MIN_Q; 940 } 941 max_q->max_bufq = max_q->max_rxq * IDPF_MAX_BUFQS_PER_RXQ_GRP; 942 max_q->max_complq = max_q->max_txq; 943 944 if (avail_queues->avail_rxq < max_q->max_rxq || 945 avail_queues->avail_txq < max_q->max_txq || 946 avail_queues->avail_bufq < max_q->max_bufq || 947 avail_queues->avail_complq < max_q->max_complq) { 948 mutex_unlock(&adapter->queue_lock); 949 950 return -EINVAL; 951 } 952 953 avail_queues->avail_rxq -= max_q->max_rxq; 954 avail_queues->avail_txq -= max_q->max_txq; 955 avail_queues->avail_bufq -= max_q->max_bufq; 956 avail_queues->avail_complq -= max_q->max_complq; 957 958 mutex_unlock(&adapter->queue_lock); 959 960 return 0; 961 } 962 963 /** 964 * idpf_vport_dealloc_max_qs - Deallocate max queues of a vport 965 * @adapter: Driver specific private structure 966 * @max_q: vport max queue structure 967 */ 968 void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter, 969 struct idpf_vport_max_q *max_q) 970 { 971 struct idpf_avail_queue_info *avail_queues; 972 973 mutex_lock(&adapter->queue_lock); 974 avail_queues = &adapter->avail_queues; 975 976 avail_queues->avail_rxq += max_q->max_rxq; 977 avail_queues->avail_txq += max_q->max_txq; 978 avail_queues->avail_bufq += max_q->max_bufq; 979 avail_queues->avail_complq += max_q->max_complq; 980 981 mutex_unlock(&adapter->queue_lock); 982 } 983 984 /** 985 * idpf_init_avail_queues - Initialize available queues on the device 986 * @adapter: Driver specific private structure 987 */ 988 static void idpf_init_avail_queues(struct idpf_adapter *adapter) 989 { 990 struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues; 991 struct virtchnl2_get_capabilities *caps = &adapter->caps; 992 993 avail_queues->avail_rxq = le16_to_cpu(caps->max_rx_q); 994 avail_queues->avail_txq = le16_to_cpu(caps->max_tx_q); 995 avail_queues->avail_bufq = le16_to_cpu(caps->max_rx_bufq); 996 avail_queues->avail_complq = le16_to_cpu(caps->max_tx_complq); 997 } 998 999 /** 1000 * idpf_get_reg_intr_vecs - Get vector queue register offset 1001 * @vport: virtual port structure 1002 * @reg_vals: Register offsets to store in 1003 * 1004 * Returns number of registers that got populated 1005 */ 1006 int idpf_get_reg_intr_vecs(struct idpf_vport *vport, 1007 struct idpf_vec_regs *reg_vals) 1008 { 1009 struct virtchnl2_vector_chunks *chunks; 1010 struct idpf_vec_regs reg_val; 1011 u16 num_vchunks, num_vec; 1012 int num_regs = 0, i, j; 1013 1014 chunks = &vport->adapter->req_vec_chunks->vchunks; 1015 num_vchunks = le16_to_cpu(chunks->num_vchunks); 1016 1017 for (j = 0; j < num_vchunks; j++) { 1018 struct virtchnl2_vector_chunk *chunk; 1019 u32 dynctl_reg_spacing; 1020 u32 itrn_reg_spacing; 1021 1022 chunk = &chunks->vchunks[j]; 1023 num_vec = le16_to_cpu(chunk->num_vectors); 1024 reg_val.dyn_ctl_reg = le32_to_cpu(chunk->dynctl_reg_start); 1025 reg_val.itrn_reg = le32_to_cpu(chunk->itrn_reg_start); 1026 reg_val.itrn_index_spacing = le32_to_cpu(chunk->itrn_index_spacing); 1027 1028 dynctl_reg_spacing = le32_to_cpu(chunk->dynctl_reg_spacing); 1029 itrn_reg_spacing = le32_to_cpu(chunk->itrn_reg_spacing); 1030 1031 for (i = 0; i < num_vec; i++) { 1032 reg_vals[num_regs].dyn_ctl_reg = reg_val.dyn_ctl_reg; 1033 reg_vals[num_regs].itrn_reg = reg_val.itrn_reg; 1034 reg_vals[num_regs].itrn_index_spacing = 1035 reg_val.itrn_index_spacing; 1036 1037 reg_val.dyn_ctl_reg += dynctl_reg_spacing; 1038 reg_val.itrn_reg += itrn_reg_spacing; 1039 num_regs++; 1040 } 1041 } 1042 1043 return num_regs; 1044 } 1045 1046 /** 1047 * idpf_vport_get_q_reg - Get the queue registers for the vport 1048 * @reg_vals: register values needing to be set 1049 * @num_regs: amount we expect to fill 1050 * @q_type: queue model 1051 * @chunks: queue regs received over mailbox 1052 * 1053 * This function parses the queue register offsets from the queue register 1054 * chunk information, with a specific queue type and stores it into the array 1055 * passed as an argument. It returns the actual number of queue registers that 1056 * are filled. 1057 */ 1058 static int idpf_vport_get_q_reg(u32 *reg_vals, int num_regs, u32 q_type, 1059 struct virtchnl2_queue_reg_chunks *chunks) 1060 { 1061 u16 num_chunks = le16_to_cpu(chunks->num_chunks); 1062 int reg_filled = 0, i; 1063 u32 reg_val; 1064 1065 while (num_chunks--) { 1066 struct virtchnl2_queue_reg_chunk *chunk; 1067 u16 num_q; 1068 1069 chunk = &chunks->chunks[num_chunks]; 1070 if (le32_to_cpu(chunk->type) != q_type) 1071 continue; 1072 1073 num_q = le32_to_cpu(chunk->num_queues); 1074 reg_val = le64_to_cpu(chunk->qtail_reg_start); 1075 for (i = 0; i < num_q && reg_filled < num_regs ; i++) { 1076 reg_vals[reg_filled++] = reg_val; 1077 reg_val += le32_to_cpu(chunk->qtail_reg_spacing); 1078 } 1079 } 1080 1081 return reg_filled; 1082 } 1083 1084 /** 1085 * __idpf_queue_reg_init - initialize queue registers 1086 * @vport: virtual port structure 1087 * @reg_vals: registers we are initializing 1088 * @num_regs: how many registers there are in total 1089 * @q_type: queue model 1090 * 1091 * Return number of queues that are initialized 1092 */ 1093 static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals, 1094 int num_regs, u32 q_type) 1095 { 1096 struct idpf_adapter *adapter = vport->adapter; 1097 int i, j, k = 0; 1098 1099 switch (q_type) { 1100 case VIRTCHNL2_QUEUE_TYPE_TX: 1101 for (i = 0; i < vport->num_txq_grp; i++) { 1102 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; 1103 1104 for (j = 0; j < tx_qgrp->num_txq && k < num_regs; j++, k++) 1105 tx_qgrp->txqs[j]->tail = 1106 idpf_get_reg_addr(adapter, reg_vals[k]); 1107 } 1108 break; 1109 case VIRTCHNL2_QUEUE_TYPE_RX: 1110 for (i = 0; i < vport->num_rxq_grp; i++) { 1111 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 1112 u16 num_rxq = rx_qgrp->singleq.num_rxq; 1113 1114 for (j = 0; j < num_rxq && k < num_regs; j++, k++) { 1115 struct idpf_rx_queue *q; 1116 1117 q = rx_qgrp->singleq.rxqs[j]; 1118 q->tail = idpf_get_reg_addr(adapter, 1119 reg_vals[k]); 1120 } 1121 } 1122 break; 1123 case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER: 1124 for (i = 0; i < vport->num_rxq_grp; i++) { 1125 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 1126 u8 num_bufqs = vport->num_bufqs_per_qgrp; 1127 1128 for (j = 0; j < num_bufqs && k < num_regs; j++, k++) { 1129 struct idpf_buf_queue *q; 1130 1131 q = &rx_qgrp->splitq.bufq_sets[j].bufq; 1132 q->tail = idpf_get_reg_addr(adapter, 1133 reg_vals[k]); 1134 } 1135 } 1136 break; 1137 default: 1138 break; 1139 } 1140 1141 return k; 1142 } 1143 1144 /** 1145 * idpf_queue_reg_init - initialize queue registers 1146 * @vport: virtual port structure 1147 * 1148 * Return 0 on success, negative on failure 1149 */ 1150 int idpf_queue_reg_init(struct idpf_vport *vport) 1151 { 1152 struct virtchnl2_create_vport *vport_params; 1153 struct virtchnl2_queue_reg_chunks *chunks; 1154 struct idpf_vport_config *vport_config; 1155 u16 vport_idx = vport->idx; 1156 int num_regs, ret = 0; 1157 u32 *reg_vals; 1158 1159 /* We may never deal with more than 256 same type of queues */ 1160 reg_vals = kzalloc(sizeof(void *) * IDPF_LARGE_MAX_Q, GFP_KERNEL); 1161 if (!reg_vals) 1162 return -ENOMEM; 1163 1164 vport_config = vport->adapter->vport_config[vport_idx]; 1165 if (vport_config->req_qs_chunks) { 1166 struct virtchnl2_add_queues *vc_aq = 1167 (struct virtchnl2_add_queues *)vport_config->req_qs_chunks; 1168 chunks = &vc_aq->chunks; 1169 } else { 1170 vport_params = vport->adapter->vport_params_recvd[vport_idx]; 1171 chunks = &vport_params->chunks; 1172 } 1173 1174 /* Initialize Tx queue tail register address */ 1175 num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q, 1176 VIRTCHNL2_QUEUE_TYPE_TX, 1177 chunks); 1178 if (num_regs < vport->num_txq) { 1179 ret = -EINVAL; 1180 goto free_reg_vals; 1181 } 1182 1183 num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs, 1184 VIRTCHNL2_QUEUE_TYPE_TX); 1185 if (num_regs < vport->num_txq) { 1186 ret = -EINVAL; 1187 goto free_reg_vals; 1188 } 1189 1190 /* Initialize Rx/buffer queue tail register address based on Rx queue 1191 * model 1192 */ 1193 if (idpf_is_queue_model_split(vport->rxq_model)) { 1194 num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q, 1195 VIRTCHNL2_QUEUE_TYPE_RX_BUFFER, 1196 chunks); 1197 if (num_regs < vport->num_bufq) { 1198 ret = -EINVAL; 1199 goto free_reg_vals; 1200 } 1201 1202 num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs, 1203 VIRTCHNL2_QUEUE_TYPE_RX_BUFFER); 1204 if (num_regs < vport->num_bufq) { 1205 ret = -EINVAL; 1206 goto free_reg_vals; 1207 } 1208 } else { 1209 num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q, 1210 VIRTCHNL2_QUEUE_TYPE_RX, 1211 chunks); 1212 if (num_regs < vport->num_rxq) { 1213 ret = -EINVAL; 1214 goto free_reg_vals; 1215 } 1216 1217 num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs, 1218 VIRTCHNL2_QUEUE_TYPE_RX); 1219 if (num_regs < vport->num_rxq) { 1220 ret = -EINVAL; 1221 goto free_reg_vals; 1222 } 1223 } 1224 1225 free_reg_vals: 1226 kfree(reg_vals); 1227 1228 return ret; 1229 } 1230 1231 /** 1232 * idpf_send_create_vport_msg - Send virtchnl create vport message 1233 * @adapter: Driver specific private structure 1234 * @max_q: vport max queue info 1235 * 1236 * send virtchnl creae vport message 1237 * 1238 * Returns 0 on success, negative on failure 1239 */ 1240 int idpf_send_create_vport_msg(struct idpf_adapter *adapter, 1241 struct idpf_vport_max_q *max_q) 1242 { 1243 struct virtchnl2_create_vport *vport_msg; 1244 struct idpf_vc_xn_params xn_params = {}; 1245 u16 idx = adapter->next_vport; 1246 int err, buf_size; 1247 ssize_t reply_sz; 1248 1249 buf_size = sizeof(struct virtchnl2_create_vport); 1250 if (!adapter->vport_params_reqd[idx]) { 1251 adapter->vport_params_reqd[idx] = kzalloc(buf_size, 1252 GFP_KERNEL); 1253 if (!adapter->vport_params_reqd[idx]) 1254 return -ENOMEM; 1255 } 1256 1257 vport_msg = adapter->vport_params_reqd[idx]; 1258 vport_msg->vport_type = cpu_to_le16(VIRTCHNL2_VPORT_TYPE_DEFAULT); 1259 vport_msg->vport_index = cpu_to_le16(idx); 1260 1261 if (adapter->req_tx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ)) 1262 vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT); 1263 else 1264 vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE); 1265 1266 if (adapter->req_rx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ)) 1267 vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT); 1268 else 1269 vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE); 1270 1271 err = idpf_vport_calc_total_qs(adapter, idx, vport_msg, max_q); 1272 if (err) { 1273 dev_err(&adapter->pdev->dev, "Enough queues are not available"); 1274 1275 return err; 1276 } 1277 1278 if (!adapter->vport_params_recvd[idx]) { 1279 adapter->vport_params_recvd[idx] = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, 1280 GFP_KERNEL); 1281 if (!adapter->vport_params_recvd[idx]) { 1282 err = -ENOMEM; 1283 goto free_vport_params; 1284 } 1285 } 1286 1287 xn_params.vc_op = VIRTCHNL2_OP_CREATE_VPORT; 1288 xn_params.send_buf.iov_base = vport_msg; 1289 xn_params.send_buf.iov_len = buf_size; 1290 xn_params.recv_buf.iov_base = adapter->vport_params_recvd[idx]; 1291 xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; 1292 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 1293 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 1294 if (reply_sz < 0) { 1295 err = reply_sz; 1296 goto free_vport_params; 1297 } 1298 1299 return 0; 1300 1301 free_vport_params: 1302 kfree(adapter->vport_params_recvd[idx]); 1303 adapter->vport_params_recvd[idx] = NULL; 1304 kfree(adapter->vport_params_reqd[idx]); 1305 adapter->vport_params_reqd[idx] = NULL; 1306 1307 return err; 1308 } 1309 1310 /** 1311 * idpf_check_supported_desc_ids - Verify we have required descriptor support 1312 * @vport: virtual port structure 1313 * 1314 * Return 0 on success, error on failure 1315 */ 1316 int idpf_check_supported_desc_ids(struct idpf_vport *vport) 1317 { 1318 struct idpf_adapter *adapter = vport->adapter; 1319 struct virtchnl2_create_vport *vport_msg; 1320 u64 rx_desc_ids, tx_desc_ids; 1321 1322 vport_msg = adapter->vport_params_recvd[vport->idx]; 1323 1324 if (!IS_ENABLED(CONFIG_IDPF_SINGLEQ) && 1325 (vport_msg->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE || 1326 vport_msg->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)) { 1327 pci_err(adapter->pdev, "singleq mode requested, but not compiled-in\n"); 1328 return -EOPNOTSUPP; 1329 } 1330 1331 rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids); 1332 tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids); 1333 1334 if (idpf_is_queue_model_split(vport->rxq_model)) { 1335 if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) { 1336 dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n"); 1337 vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); 1338 } 1339 } else { 1340 if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M)) 1341 vport->base_rxd = true; 1342 } 1343 1344 if (!idpf_is_queue_model_split(vport->txq_model)) 1345 return 0; 1346 1347 if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) { 1348 dev_info(&adapter->pdev->dev, "Minimum TX descriptor support not provided, using the default\n"); 1349 vport_msg->tx_desc_ids = cpu_to_le64(MIN_SUPPORT_TXDID); 1350 } 1351 1352 return 0; 1353 } 1354 1355 /** 1356 * idpf_send_destroy_vport_msg - Send virtchnl destroy vport message 1357 * @vport: virtual port data structure 1358 * 1359 * Send virtchnl destroy vport message. Returns 0 on success, negative on 1360 * failure. 1361 */ 1362 int idpf_send_destroy_vport_msg(struct idpf_vport *vport) 1363 { 1364 struct idpf_vc_xn_params xn_params = {}; 1365 struct virtchnl2_vport v_id; 1366 ssize_t reply_sz; 1367 1368 v_id.vport_id = cpu_to_le32(vport->vport_id); 1369 1370 xn_params.vc_op = VIRTCHNL2_OP_DESTROY_VPORT; 1371 xn_params.send_buf.iov_base = &v_id; 1372 xn_params.send_buf.iov_len = sizeof(v_id); 1373 xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC; 1374 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 1375 1376 return reply_sz < 0 ? reply_sz : 0; 1377 } 1378 1379 /** 1380 * idpf_send_enable_vport_msg - Send virtchnl enable vport message 1381 * @vport: virtual port data structure 1382 * 1383 * Send enable vport virtchnl message. Returns 0 on success, negative on 1384 * failure. 1385 */ 1386 int idpf_send_enable_vport_msg(struct idpf_vport *vport) 1387 { 1388 struct idpf_vc_xn_params xn_params = {}; 1389 struct virtchnl2_vport v_id; 1390 ssize_t reply_sz; 1391 1392 v_id.vport_id = cpu_to_le32(vport->vport_id); 1393 1394 xn_params.vc_op = VIRTCHNL2_OP_ENABLE_VPORT; 1395 xn_params.send_buf.iov_base = &v_id; 1396 xn_params.send_buf.iov_len = sizeof(v_id); 1397 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 1398 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 1399 1400 return reply_sz < 0 ? reply_sz : 0; 1401 } 1402 1403 /** 1404 * idpf_send_disable_vport_msg - Send virtchnl disable vport message 1405 * @vport: virtual port data structure 1406 * 1407 * Send disable vport virtchnl message. Returns 0 on success, negative on 1408 * failure. 1409 */ 1410 int idpf_send_disable_vport_msg(struct idpf_vport *vport) 1411 { 1412 struct idpf_vc_xn_params xn_params = {}; 1413 struct virtchnl2_vport v_id; 1414 ssize_t reply_sz; 1415 1416 v_id.vport_id = cpu_to_le32(vport->vport_id); 1417 1418 xn_params.vc_op = VIRTCHNL2_OP_DISABLE_VPORT; 1419 xn_params.send_buf.iov_base = &v_id; 1420 xn_params.send_buf.iov_len = sizeof(v_id); 1421 xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC; 1422 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 1423 1424 return reply_sz < 0 ? reply_sz : 0; 1425 } 1426 1427 /** 1428 * idpf_send_config_tx_queues_msg - Send virtchnl config tx queues message 1429 * @vport: virtual port data structure 1430 * 1431 * Send config tx queues virtchnl message. Returns 0 on success, negative on 1432 * failure. 1433 */ 1434 static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) 1435 { 1436 struct virtchnl2_config_tx_queues *ctq __free(kfree) = NULL; 1437 struct virtchnl2_txq_info *qi __free(kfree) = NULL; 1438 struct idpf_vc_xn_params xn_params = {}; 1439 u32 config_sz, chunk_sz, buf_sz; 1440 int totqs, num_msgs, num_chunks; 1441 ssize_t reply_sz; 1442 int i, k = 0; 1443 1444 totqs = vport->num_txq + vport->num_complq; 1445 qi = kcalloc(totqs, sizeof(struct virtchnl2_txq_info), GFP_KERNEL); 1446 if (!qi) 1447 return -ENOMEM; 1448 1449 /* Populate the queue info buffer with all queue context info */ 1450 for (i = 0; i < vport->num_txq_grp; i++) { 1451 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; 1452 int j, sched_mode; 1453 1454 for (j = 0; j < tx_qgrp->num_txq; j++, k++) { 1455 qi[k].queue_id = 1456 cpu_to_le32(tx_qgrp->txqs[j]->q_id); 1457 qi[k].model = 1458 cpu_to_le16(vport->txq_model); 1459 qi[k].type = 1460 cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX); 1461 qi[k].ring_len = 1462 cpu_to_le16(tx_qgrp->txqs[j]->desc_count); 1463 qi[k].dma_ring_addr = 1464 cpu_to_le64(tx_qgrp->txqs[j]->dma); 1465 if (idpf_is_queue_model_split(vport->txq_model)) { 1466 struct idpf_tx_queue *q = tx_qgrp->txqs[j]; 1467 1468 qi[k].tx_compl_queue_id = 1469 cpu_to_le16(tx_qgrp->complq->q_id); 1470 qi[k].relative_queue_id = cpu_to_le16(j); 1471 1472 if (idpf_queue_has(FLOW_SCH_EN, q)) 1473 qi[k].sched_mode = 1474 cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_FLOW); 1475 else 1476 qi[k].sched_mode = 1477 cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE); 1478 } else { 1479 qi[k].sched_mode = 1480 cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE); 1481 } 1482 } 1483 1484 if (!idpf_is_queue_model_split(vport->txq_model)) 1485 continue; 1486 1487 qi[k].queue_id = cpu_to_le32(tx_qgrp->complq->q_id); 1488 qi[k].model = cpu_to_le16(vport->txq_model); 1489 qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION); 1490 qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count); 1491 qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma); 1492 1493 if (idpf_queue_has(FLOW_SCH_EN, tx_qgrp->complq)) 1494 sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW; 1495 else 1496 sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE; 1497 qi[k].sched_mode = cpu_to_le16(sched_mode); 1498 1499 k++; 1500 } 1501 1502 /* Make sure accounting agrees */ 1503 if (k != totqs) 1504 return -EINVAL; 1505 1506 /* Chunk up the queue contexts into multiple messages to avoid 1507 * sending a control queue message buffer that is too large 1508 */ 1509 config_sz = sizeof(struct virtchnl2_config_tx_queues); 1510 chunk_sz = sizeof(struct virtchnl2_txq_info); 1511 1512 num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz), 1513 totqs); 1514 num_msgs = DIV_ROUND_UP(totqs, num_chunks); 1515 1516 buf_sz = struct_size(ctq, qinfo, num_chunks); 1517 ctq = kzalloc(buf_sz, GFP_KERNEL); 1518 if (!ctq) 1519 return -ENOMEM; 1520 1521 xn_params.vc_op = VIRTCHNL2_OP_CONFIG_TX_QUEUES; 1522 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 1523 1524 for (i = 0, k = 0; i < num_msgs; i++) { 1525 memset(ctq, 0, buf_sz); 1526 ctq->vport_id = cpu_to_le32(vport->vport_id); 1527 ctq->num_qinfo = cpu_to_le16(num_chunks); 1528 memcpy(ctq->qinfo, &qi[k], chunk_sz * num_chunks); 1529 1530 xn_params.send_buf.iov_base = ctq; 1531 xn_params.send_buf.iov_len = buf_sz; 1532 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 1533 if (reply_sz < 0) 1534 return reply_sz; 1535 1536 k += num_chunks; 1537 totqs -= num_chunks; 1538 num_chunks = min(num_chunks, totqs); 1539 /* Recalculate buffer size */ 1540 buf_sz = struct_size(ctq, qinfo, num_chunks); 1541 } 1542 1543 return 0; 1544 } 1545 1546 /** 1547 * idpf_send_config_rx_queues_msg - Send virtchnl config rx queues message 1548 * @vport: virtual port data structure 1549 * 1550 * Send config rx queues virtchnl message. Returns 0 on success, negative on 1551 * failure. 1552 */ 1553 static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport) 1554 { 1555 struct virtchnl2_config_rx_queues *crq __free(kfree) = NULL; 1556 struct virtchnl2_rxq_info *qi __free(kfree) = NULL; 1557 struct idpf_vc_xn_params xn_params = {}; 1558 u32 config_sz, chunk_sz, buf_sz; 1559 int totqs, num_msgs, num_chunks; 1560 ssize_t reply_sz; 1561 int i, k = 0; 1562 1563 totqs = vport->num_rxq + vport->num_bufq; 1564 qi = kcalloc(totqs, sizeof(struct virtchnl2_rxq_info), GFP_KERNEL); 1565 if (!qi) 1566 return -ENOMEM; 1567 1568 /* Populate the queue info buffer with all queue context info */ 1569 for (i = 0; i < vport->num_rxq_grp; i++) { 1570 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 1571 u16 num_rxq; 1572 int j; 1573 1574 if (!idpf_is_queue_model_split(vport->rxq_model)) 1575 goto setup_rxqs; 1576 1577 for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) { 1578 struct idpf_buf_queue *bufq = 1579 &rx_qgrp->splitq.bufq_sets[j].bufq; 1580 1581 qi[k].queue_id = cpu_to_le32(bufq->q_id); 1582 qi[k].model = cpu_to_le16(vport->rxq_model); 1583 qi[k].type = 1584 cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER); 1585 qi[k].desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); 1586 qi[k].ring_len = cpu_to_le16(bufq->desc_count); 1587 qi[k].dma_ring_addr = cpu_to_le64(bufq->dma); 1588 qi[k].data_buffer_size = cpu_to_le32(bufq->rx_buf_size); 1589 qi[k].buffer_notif_stride = IDPF_RX_BUF_STRIDE; 1590 qi[k].rx_buffer_low_watermark = 1591 cpu_to_le16(bufq->rx_buffer_low_watermark); 1592 if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW)) 1593 qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC); 1594 } 1595 1596 setup_rxqs: 1597 if (idpf_is_queue_model_split(vport->rxq_model)) 1598 num_rxq = rx_qgrp->splitq.num_rxq_sets; 1599 else 1600 num_rxq = rx_qgrp->singleq.num_rxq; 1601 1602 for (j = 0; j < num_rxq; j++, k++) { 1603 const struct idpf_bufq_set *sets; 1604 struct idpf_rx_queue *rxq; 1605 1606 if (!idpf_is_queue_model_split(vport->rxq_model)) { 1607 rxq = rx_qgrp->singleq.rxqs[j]; 1608 goto common_qi_fields; 1609 } 1610 1611 rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq; 1612 sets = rxq->bufq_sets; 1613 1614 /* In splitq mode, RXQ buffer size should be 1615 * set to that of the first buffer queue 1616 * associated with this RXQ. 1617 */ 1618 rxq->rx_buf_size = sets[0].bufq.rx_buf_size; 1619 1620 qi[k].rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id); 1621 if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) { 1622 qi[k].bufq2_ena = IDPF_BUFQ2_ENA; 1623 qi[k].rx_bufq2_id = 1624 cpu_to_le16(sets[1].bufq.q_id); 1625 } 1626 qi[k].rx_buffer_low_watermark = 1627 cpu_to_le16(rxq->rx_buffer_low_watermark); 1628 if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW)) 1629 qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC); 1630 1631 rxq->rx_hbuf_size = sets[0].bufq.rx_hbuf_size; 1632 1633 if (idpf_queue_has(HSPLIT_EN, rxq)) { 1634 qi[k].qflags |= 1635 cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT); 1636 qi[k].hdr_buffer_size = 1637 cpu_to_le16(rxq->rx_hbuf_size); 1638 } 1639 1640 common_qi_fields: 1641 qi[k].queue_id = cpu_to_le32(rxq->q_id); 1642 qi[k].model = cpu_to_le16(vport->rxq_model); 1643 qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX); 1644 qi[k].ring_len = cpu_to_le16(rxq->desc_count); 1645 qi[k].dma_ring_addr = cpu_to_le64(rxq->dma); 1646 qi[k].max_pkt_size = cpu_to_le32(rxq->rx_max_pkt_size); 1647 qi[k].data_buffer_size = cpu_to_le32(rxq->rx_buf_size); 1648 qi[k].qflags |= 1649 cpu_to_le16(VIRTCHNL2_RX_DESC_SIZE_32BYTE); 1650 qi[k].desc_ids = cpu_to_le64(rxq->rxdids); 1651 } 1652 } 1653 1654 /* Make sure accounting agrees */ 1655 if (k != totqs) 1656 return -EINVAL; 1657 1658 /* Chunk up the queue contexts into multiple messages to avoid 1659 * sending a control queue message buffer that is too large 1660 */ 1661 config_sz = sizeof(struct virtchnl2_config_rx_queues); 1662 chunk_sz = sizeof(struct virtchnl2_rxq_info); 1663 1664 num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz), 1665 totqs); 1666 num_msgs = DIV_ROUND_UP(totqs, num_chunks); 1667 1668 buf_sz = struct_size(crq, qinfo, num_chunks); 1669 crq = kzalloc(buf_sz, GFP_KERNEL); 1670 if (!crq) 1671 return -ENOMEM; 1672 1673 xn_params.vc_op = VIRTCHNL2_OP_CONFIG_RX_QUEUES; 1674 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 1675 1676 for (i = 0, k = 0; i < num_msgs; i++) { 1677 memset(crq, 0, buf_sz); 1678 crq->vport_id = cpu_to_le32(vport->vport_id); 1679 crq->num_qinfo = cpu_to_le16(num_chunks); 1680 memcpy(crq->qinfo, &qi[k], chunk_sz * num_chunks); 1681 1682 xn_params.send_buf.iov_base = crq; 1683 xn_params.send_buf.iov_len = buf_sz; 1684 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 1685 if (reply_sz < 0) 1686 return reply_sz; 1687 1688 k += num_chunks; 1689 totqs -= num_chunks; 1690 num_chunks = min(num_chunks, totqs); 1691 /* Recalculate buffer size */ 1692 buf_sz = struct_size(crq, qinfo, num_chunks); 1693 } 1694 1695 return 0; 1696 } 1697 1698 /** 1699 * idpf_send_ena_dis_queues_msg - Send virtchnl enable or disable 1700 * queues message 1701 * @vport: virtual port data structure 1702 * @ena: if true enable, false disable 1703 * 1704 * Send enable or disable queues virtchnl message. Returns 0 on success, 1705 * negative on failure. 1706 */ 1707 static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena) 1708 { 1709 struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL; 1710 struct virtchnl2_queue_chunk *qc __free(kfree) = NULL; 1711 u32 num_msgs, num_chunks, num_txq, num_rxq, num_q; 1712 struct idpf_vc_xn_params xn_params = {}; 1713 struct virtchnl2_queue_chunks *qcs; 1714 u32 config_sz, chunk_sz, buf_sz; 1715 ssize_t reply_sz; 1716 int i, j, k = 0; 1717 1718 num_txq = vport->num_txq + vport->num_complq; 1719 num_rxq = vport->num_rxq + vport->num_bufq; 1720 num_q = num_txq + num_rxq; 1721 buf_sz = sizeof(struct virtchnl2_queue_chunk) * num_q; 1722 qc = kzalloc(buf_sz, GFP_KERNEL); 1723 if (!qc) 1724 return -ENOMEM; 1725 1726 for (i = 0; i < vport->num_txq_grp; i++) { 1727 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; 1728 1729 for (j = 0; j < tx_qgrp->num_txq; j++, k++) { 1730 qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX); 1731 qc[k].start_queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id); 1732 qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); 1733 } 1734 } 1735 if (vport->num_txq != k) 1736 return -EINVAL; 1737 1738 if (!idpf_is_queue_model_split(vport->txq_model)) 1739 goto setup_rx; 1740 1741 for (i = 0; i < vport->num_txq_grp; i++, k++) { 1742 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; 1743 1744 qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION); 1745 qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id); 1746 qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); 1747 } 1748 if (vport->num_complq != (k - vport->num_txq)) 1749 return -EINVAL; 1750 1751 setup_rx: 1752 for (i = 0; i < vport->num_rxq_grp; i++) { 1753 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 1754 1755 if (idpf_is_queue_model_split(vport->rxq_model)) 1756 num_rxq = rx_qgrp->splitq.num_rxq_sets; 1757 else 1758 num_rxq = rx_qgrp->singleq.num_rxq; 1759 1760 for (j = 0; j < num_rxq; j++, k++) { 1761 if (idpf_is_queue_model_split(vport->rxq_model)) { 1762 qc[k].start_queue_id = 1763 cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_id); 1764 qc[k].type = 1765 cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX); 1766 } else { 1767 qc[k].start_queue_id = 1768 cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_id); 1769 qc[k].type = 1770 cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX); 1771 } 1772 qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); 1773 } 1774 } 1775 if (vport->num_rxq != k - (vport->num_txq + vport->num_complq)) 1776 return -EINVAL; 1777 1778 if (!idpf_is_queue_model_split(vport->rxq_model)) 1779 goto send_msg; 1780 1781 for (i = 0; i < vport->num_rxq_grp; i++) { 1782 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 1783 1784 for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) { 1785 const struct idpf_buf_queue *q; 1786 1787 q = &rx_qgrp->splitq.bufq_sets[j].bufq; 1788 qc[k].type = 1789 cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER); 1790 qc[k].start_queue_id = cpu_to_le32(q->q_id); 1791 qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); 1792 } 1793 } 1794 if (vport->num_bufq != k - (vport->num_txq + 1795 vport->num_complq + 1796 vport->num_rxq)) 1797 return -EINVAL; 1798 1799 send_msg: 1800 /* Chunk up the queue info into multiple messages */ 1801 config_sz = sizeof(struct virtchnl2_del_ena_dis_queues); 1802 chunk_sz = sizeof(struct virtchnl2_queue_chunk); 1803 1804 num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz), 1805 num_q); 1806 num_msgs = DIV_ROUND_UP(num_q, num_chunks); 1807 1808 buf_sz = struct_size(eq, chunks.chunks, num_chunks); 1809 eq = kzalloc(buf_sz, GFP_KERNEL); 1810 if (!eq) 1811 return -ENOMEM; 1812 1813 if (ena) { 1814 xn_params.vc_op = VIRTCHNL2_OP_ENABLE_QUEUES; 1815 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 1816 } else { 1817 xn_params.vc_op = VIRTCHNL2_OP_DISABLE_QUEUES; 1818 xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC; 1819 } 1820 1821 for (i = 0, k = 0; i < num_msgs; i++) { 1822 memset(eq, 0, buf_sz); 1823 eq->vport_id = cpu_to_le32(vport->vport_id); 1824 eq->chunks.num_chunks = cpu_to_le16(num_chunks); 1825 qcs = &eq->chunks; 1826 memcpy(qcs->chunks, &qc[k], chunk_sz * num_chunks); 1827 1828 xn_params.send_buf.iov_base = eq; 1829 xn_params.send_buf.iov_len = buf_sz; 1830 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 1831 if (reply_sz < 0) 1832 return reply_sz; 1833 1834 k += num_chunks; 1835 num_q -= num_chunks; 1836 num_chunks = min(num_chunks, num_q); 1837 /* Recalculate buffer size */ 1838 buf_sz = struct_size(eq, chunks.chunks, num_chunks); 1839 } 1840 1841 return 0; 1842 } 1843 1844 /** 1845 * idpf_send_map_unmap_queue_vector_msg - Send virtchnl map or unmap queue 1846 * vector message 1847 * @vport: virtual port data structure 1848 * @map: true for map and false for unmap 1849 * 1850 * Send map or unmap queue vector virtchnl message. Returns 0 on success, 1851 * negative on failure. 1852 */ 1853 int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) 1854 { 1855 struct virtchnl2_queue_vector_maps *vqvm __free(kfree) = NULL; 1856 struct virtchnl2_queue_vector *vqv __free(kfree) = NULL; 1857 struct idpf_vc_xn_params xn_params = {}; 1858 u32 config_sz, chunk_sz, buf_sz; 1859 u32 num_msgs, num_chunks, num_q; 1860 ssize_t reply_sz; 1861 int i, j, k = 0; 1862 1863 num_q = vport->num_txq + vport->num_rxq; 1864 1865 buf_sz = sizeof(struct virtchnl2_queue_vector) * num_q; 1866 vqv = kzalloc(buf_sz, GFP_KERNEL); 1867 if (!vqv) 1868 return -ENOMEM; 1869 1870 for (i = 0; i < vport->num_txq_grp; i++) { 1871 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; 1872 1873 for (j = 0; j < tx_qgrp->num_txq; j++, k++) { 1874 vqv[k].queue_type = 1875 cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX); 1876 vqv[k].queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id); 1877 1878 if (idpf_is_queue_model_split(vport->txq_model)) { 1879 vqv[k].vector_id = 1880 cpu_to_le16(tx_qgrp->complq->q_vector->v_idx); 1881 vqv[k].itr_idx = 1882 cpu_to_le32(tx_qgrp->complq->q_vector->tx_itr_idx); 1883 } else { 1884 vqv[k].vector_id = 1885 cpu_to_le16(tx_qgrp->txqs[j]->q_vector->v_idx); 1886 vqv[k].itr_idx = 1887 cpu_to_le32(tx_qgrp->txqs[j]->q_vector->tx_itr_idx); 1888 } 1889 } 1890 } 1891 1892 if (vport->num_txq != k) 1893 return -EINVAL; 1894 1895 for (i = 0; i < vport->num_rxq_grp; i++) { 1896 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 1897 u16 num_rxq; 1898 1899 if (idpf_is_queue_model_split(vport->rxq_model)) 1900 num_rxq = rx_qgrp->splitq.num_rxq_sets; 1901 else 1902 num_rxq = rx_qgrp->singleq.num_rxq; 1903 1904 for (j = 0; j < num_rxq; j++, k++) { 1905 struct idpf_rx_queue *rxq; 1906 1907 if (idpf_is_queue_model_split(vport->rxq_model)) 1908 rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq; 1909 else 1910 rxq = rx_qgrp->singleq.rxqs[j]; 1911 1912 vqv[k].queue_type = 1913 cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX); 1914 vqv[k].queue_id = cpu_to_le32(rxq->q_id); 1915 vqv[k].vector_id = cpu_to_le16(rxq->q_vector->v_idx); 1916 vqv[k].itr_idx = cpu_to_le32(rxq->q_vector->rx_itr_idx); 1917 } 1918 } 1919 1920 if (idpf_is_queue_model_split(vport->txq_model)) { 1921 if (vport->num_rxq != k - vport->num_complq) 1922 return -EINVAL; 1923 } else { 1924 if (vport->num_rxq != k - vport->num_txq) 1925 return -EINVAL; 1926 } 1927 1928 /* Chunk up the vector info into multiple messages */ 1929 config_sz = sizeof(struct virtchnl2_queue_vector_maps); 1930 chunk_sz = sizeof(struct virtchnl2_queue_vector); 1931 1932 num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz), 1933 num_q); 1934 num_msgs = DIV_ROUND_UP(num_q, num_chunks); 1935 1936 buf_sz = struct_size(vqvm, qv_maps, num_chunks); 1937 vqvm = kzalloc(buf_sz, GFP_KERNEL); 1938 if (!vqvm) 1939 return -ENOMEM; 1940 1941 if (map) { 1942 xn_params.vc_op = VIRTCHNL2_OP_MAP_QUEUE_VECTOR; 1943 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 1944 } else { 1945 xn_params.vc_op = VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR; 1946 xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC; 1947 } 1948 1949 for (i = 0, k = 0; i < num_msgs; i++) { 1950 memset(vqvm, 0, buf_sz); 1951 xn_params.send_buf.iov_base = vqvm; 1952 xn_params.send_buf.iov_len = buf_sz; 1953 vqvm->vport_id = cpu_to_le32(vport->vport_id); 1954 vqvm->num_qv_maps = cpu_to_le16(num_chunks); 1955 memcpy(vqvm->qv_maps, &vqv[k], chunk_sz * num_chunks); 1956 1957 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 1958 if (reply_sz < 0) 1959 return reply_sz; 1960 1961 k += num_chunks; 1962 num_q -= num_chunks; 1963 num_chunks = min(num_chunks, num_q); 1964 /* Recalculate buffer size */ 1965 buf_sz = struct_size(vqvm, qv_maps, num_chunks); 1966 } 1967 1968 return 0; 1969 } 1970 1971 /** 1972 * idpf_send_enable_queues_msg - send enable queues virtchnl message 1973 * @vport: Virtual port private data structure 1974 * 1975 * Will send enable queues virtchnl message. Returns 0 on success, negative on 1976 * failure. 1977 */ 1978 int idpf_send_enable_queues_msg(struct idpf_vport *vport) 1979 { 1980 return idpf_send_ena_dis_queues_msg(vport, true); 1981 } 1982 1983 /** 1984 * idpf_send_disable_queues_msg - send disable queues virtchnl message 1985 * @vport: Virtual port private data structure 1986 * 1987 * Will send disable queues virtchnl message. Returns 0 on success, negative 1988 * on failure. 1989 */ 1990 int idpf_send_disable_queues_msg(struct idpf_vport *vport) 1991 { 1992 int err, i; 1993 1994 err = idpf_send_ena_dis_queues_msg(vport, false); 1995 if (err) 1996 return err; 1997 1998 /* switch to poll mode as interrupts will be disabled after disable 1999 * queues virtchnl message is sent 2000 */ 2001 for (i = 0; i < vport->num_txq; i++) 2002 idpf_queue_set(POLL_MODE, vport->txqs[i]); 2003 2004 /* schedule the napi to receive all the marker packets */ 2005 local_bh_disable(); 2006 for (i = 0; i < vport->num_q_vectors; i++) 2007 napi_schedule(&vport->q_vectors[i].napi); 2008 local_bh_enable(); 2009 2010 return idpf_wait_for_marker_event(vport); 2011 } 2012 2013 /** 2014 * idpf_convert_reg_to_queue_chunks - Copy queue chunk information to the right 2015 * structure 2016 * @dchunks: Destination chunks to store data to 2017 * @schunks: Source chunks to copy data from 2018 * @num_chunks: number of chunks to copy 2019 */ 2020 static void idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchunks, 2021 struct virtchnl2_queue_reg_chunk *schunks, 2022 u16 num_chunks) 2023 { 2024 u16 i; 2025 2026 for (i = 0; i < num_chunks; i++) { 2027 dchunks[i].type = schunks[i].type; 2028 dchunks[i].start_queue_id = schunks[i].start_queue_id; 2029 dchunks[i].num_queues = schunks[i].num_queues; 2030 } 2031 } 2032 2033 /** 2034 * idpf_send_delete_queues_msg - send delete queues virtchnl message 2035 * @vport: Virtual port private data structure 2036 * 2037 * Will send delete queues virtchnl message. Return 0 on success, negative on 2038 * failure. 2039 */ 2040 int idpf_send_delete_queues_msg(struct idpf_vport *vport) 2041 { 2042 struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL; 2043 struct virtchnl2_create_vport *vport_params; 2044 struct virtchnl2_queue_reg_chunks *chunks; 2045 struct idpf_vc_xn_params xn_params = {}; 2046 struct idpf_vport_config *vport_config; 2047 u16 vport_idx = vport->idx; 2048 ssize_t reply_sz; 2049 u16 num_chunks; 2050 int buf_size; 2051 2052 vport_config = vport->adapter->vport_config[vport_idx]; 2053 if (vport_config->req_qs_chunks) { 2054 chunks = &vport_config->req_qs_chunks->chunks; 2055 } else { 2056 vport_params = vport->adapter->vport_params_recvd[vport_idx]; 2057 chunks = &vport_params->chunks; 2058 } 2059 2060 num_chunks = le16_to_cpu(chunks->num_chunks); 2061 buf_size = struct_size(eq, chunks.chunks, num_chunks); 2062 2063 eq = kzalloc(buf_size, GFP_KERNEL); 2064 if (!eq) 2065 return -ENOMEM; 2066 2067 eq->vport_id = cpu_to_le32(vport->vport_id); 2068 eq->chunks.num_chunks = cpu_to_le16(num_chunks); 2069 2070 idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->chunks, 2071 num_chunks); 2072 2073 xn_params.vc_op = VIRTCHNL2_OP_DEL_QUEUES; 2074 xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC; 2075 xn_params.send_buf.iov_base = eq; 2076 xn_params.send_buf.iov_len = buf_size; 2077 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 2078 2079 return reply_sz < 0 ? reply_sz : 0; 2080 } 2081 2082 /** 2083 * idpf_send_config_queues_msg - Send config queues virtchnl message 2084 * @vport: Virtual port private data structure 2085 * 2086 * Will send config queues virtchnl message. Returns 0 on success, negative on 2087 * failure. 2088 */ 2089 int idpf_send_config_queues_msg(struct idpf_vport *vport) 2090 { 2091 int err; 2092 2093 err = idpf_send_config_tx_queues_msg(vport); 2094 if (err) 2095 return err; 2096 2097 return idpf_send_config_rx_queues_msg(vport); 2098 } 2099 2100 /** 2101 * idpf_send_add_queues_msg - Send virtchnl add queues message 2102 * @vport: Virtual port private data structure 2103 * @num_tx_q: number of transmit queues 2104 * @num_complq: number of transmit completion queues 2105 * @num_rx_q: number of receive queues 2106 * @num_rx_bufq: number of receive buffer queues 2107 * 2108 * Returns 0 on success, negative on failure. vport _MUST_ be const here as 2109 * we should not change any fields within vport itself in this function. 2110 */ 2111 int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q, 2112 u16 num_complq, u16 num_rx_q, u16 num_rx_bufq) 2113 { 2114 struct virtchnl2_add_queues *vc_msg __free(kfree) = NULL; 2115 struct idpf_vc_xn_params xn_params = {}; 2116 struct idpf_vport_config *vport_config; 2117 struct virtchnl2_add_queues aq = {}; 2118 u16 vport_idx = vport->idx; 2119 ssize_t reply_sz; 2120 int size; 2121 2122 vc_msg = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); 2123 if (!vc_msg) 2124 return -ENOMEM; 2125 2126 vport_config = vport->adapter->vport_config[vport_idx]; 2127 kfree(vport_config->req_qs_chunks); 2128 vport_config->req_qs_chunks = NULL; 2129 2130 aq.vport_id = cpu_to_le32(vport->vport_id); 2131 aq.num_tx_q = cpu_to_le16(num_tx_q); 2132 aq.num_tx_complq = cpu_to_le16(num_complq); 2133 aq.num_rx_q = cpu_to_le16(num_rx_q); 2134 aq.num_rx_bufq = cpu_to_le16(num_rx_bufq); 2135 2136 xn_params.vc_op = VIRTCHNL2_OP_ADD_QUEUES; 2137 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 2138 xn_params.send_buf.iov_base = &aq; 2139 xn_params.send_buf.iov_len = sizeof(aq); 2140 xn_params.recv_buf.iov_base = vc_msg; 2141 xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; 2142 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 2143 if (reply_sz < 0) 2144 return reply_sz; 2145 2146 /* compare vc_msg num queues with vport num queues */ 2147 if (le16_to_cpu(vc_msg->num_tx_q) != num_tx_q || 2148 le16_to_cpu(vc_msg->num_rx_q) != num_rx_q || 2149 le16_to_cpu(vc_msg->num_tx_complq) != num_complq || 2150 le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq) 2151 return -EINVAL; 2152 2153 size = struct_size(vc_msg, chunks.chunks, 2154 le16_to_cpu(vc_msg->chunks.num_chunks)); 2155 if (reply_sz < size) 2156 return -EIO; 2157 2158 vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL); 2159 if (!vport_config->req_qs_chunks) 2160 return -ENOMEM; 2161 2162 return 0; 2163 } 2164 2165 /** 2166 * idpf_send_alloc_vectors_msg - Send virtchnl alloc vectors message 2167 * @adapter: Driver specific private structure 2168 * @num_vectors: number of vectors to be allocated 2169 * 2170 * Returns 0 on success, negative on failure. 2171 */ 2172 int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors) 2173 { 2174 struct virtchnl2_alloc_vectors *rcvd_vec __free(kfree) = NULL; 2175 struct idpf_vc_xn_params xn_params = {}; 2176 struct virtchnl2_alloc_vectors ac = {}; 2177 ssize_t reply_sz; 2178 u16 num_vchunks; 2179 int size; 2180 2181 ac.num_vectors = cpu_to_le16(num_vectors); 2182 2183 rcvd_vec = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); 2184 if (!rcvd_vec) 2185 return -ENOMEM; 2186 2187 xn_params.vc_op = VIRTCHNL2_OP_ALLOC_VECTORS; 2188 xn_params.send_buf.iov_base = ∾ 2189 xn_params.send_buf.iov_len = sizeof(ac); 2190 xn_params.recv_buf.iov_base = rcvd_vec; 2191 xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; 2192 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 2193 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 2194 if (reply_sz < 0) 2195 return reply_sz; 2196 2197 num_vchunks = le16_to_cpu(rcvd_vec->vchunks.num_vchunks); 2198 size = struct_size(rcvd_vec, vchunks.vchunks, num_vchunks); 2199 if (reply_sz < size) 2200 return -EIO; 2201 2202 if (size > IDPF_CTLQ_MAX_BUF_LEN) 2203 return -EINVAL; 2204 2205 kfree(adapter->req_vec_chunks); 2206 adapter->req_vec_chunks = kmemdup(rcvd_vec, size, GFP_KERNEL); 2207 if (!adapter->req_vec_chunks) 2208 return -ENOMEM; 2209 2210 if (le16_to_cpu(adapter->req_vec_chunks->num_vectors) < num_vectors) { 2211 kfree(adapter->req_vec_chunks); 2212 adapter->req_vec_chunks = NULL; 2213 return -EINVAL; 2214 } 2215 2216 return 0; 2217 } 2218 2219 /** 2220 * idpf_send_dealloc_vectors_msg - Send virtchnl de allocate vectors message 2221 * @adapter: Driver specific private structure 2222 * 2223 * Returns 0 on success, negative on failure. 2224 */ 2225 int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter) 2226 { 2227 struct virtchnl2_alloc_vectors *ac = adapter->req_vec_chunks; 2228 struct virtchnl2_vector_chunks *vcs = &ac->vchunks; 2229 struct idpf_vc_xn_params xn_params = {}; 2230 ssize_t reply_sz; 2231 int buf_size; 2232 2233 buf_size = struct_size(vcs, vchunks, le16_to_cpu(vcs->num_vchunks)); 2234 2235 xn_params.vc_op = VIRTCHNL2_OP_DEALLOC_VECTORS; 2236 xn_params.send_buf.iov_base = vcs; 2237 xn_params.send_buf.iov_len = buf_size; 2238 xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC; 2239 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 2240 if (reply_sz < 0) 2241 return reply_sz; 2242 2243 kfree(adapter->req_vec_chunks); 2244 adapter->req_vec_chunks = NULL; 2245 2246 return 0; 2247 } 2248 2249 /** 2250 * idpf_get_max_vfs - Get max number of vfs supported 2251 * @adapter: Driver specific private structure 2252 * 2253 * Returns max number of VFs 2254 */ 2255 static int idpf_get_max_vfs(struct idpf_adapter *adapter) 2256 { 2257 return le16_to_cpu(adapter->caps.max_sriov_vfs); 2258 } 2259 2260 /** 2261 * idpf_send_set_sriov_vfs_msg - Send virtchnl set sriov vfs message 2262 * @adapter: Driver specific private structure 2263 * @num_vfs: number of virtual functions to be created 2264 * 2265 * Returns 0 on success, negative on failure. 2266 */ 2267 int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs) 2268 { 2269 struct virtchnl2_sriov_vfs_info svi = {}; 2270 struct idpf_vc_xn_params xn_params = {}; 2271 ssize_t reply_sz; 2272 2273 svi.num_vfs = cpu_to_le16(num_vfs); 2274 xn_params.vc_op = VIRTCHNL2_OP_SET_SRIOV_VFS; 2275 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 2276 xn_params.send_buf.iov_base = &svi; 2277 xn_params.send_buf.iov_len = sizeof(svi); 2278 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 2279 2280 return reply_sz < 0 ? reply_sz : 0; 2281 } 2282 2283 /** 2284 * idpf_send_get_stats_msg - Send virtchnl get statistics message 2285 * @vport: vport to get stats for 2286 * 2287 * Returns 0 on success, negative on failure. 2288 */ 2289 int idpf_send_get_stats_msg(struct idpf_vport *vport) 2290 { 2291 struct idpf_netdev_priv *np = netdev_priv(vport->netdev); 2292 struct rtnl_link_stats64 *netstats = &np->netstats; 2293 struct virtchnl2_vport_stats stats_msg = {}; 2294 struct idpf_vc_xn_params xn_params = {}; 2295 ssize_t reply_sz; 2296 2297 2298 /* Don't send get_stats message if the link is down */ 2299 if (np->state <= __IDPF_VPORT_DOWN) 2300 return 0; 2301 2302 stats_msg.vport_id = cpu_to_le32(vport->vport_id); 2303 2304 xn_params.vc_op = VIRTCHNL2_OP_GET_STATS; 2305 xn_params.send_buf.iov_base = &stats_msg; 2306 xn_params.send_buf.iov_len = sizeof(stats_msg); 2307 xn_params.recv_buf = xn_params.send_buf; 2308 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 2309 2310 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 2311 if (reply_sz < 0) 2312 return reply_sz; 2313 if (reply_sz < sizeof(stats_msg)) 2314 return -EIO; 2315 2316 spin_lock_bh(&np->stats_lock); 2317 2318 netstats->rx_packets = le64_to_cpu(stats_msg.rx_unicast) + 2319 le64_to_cpu(stats_msg.rx_multicast) + 2320 le64_to_cpu(stats_msg.rx_broadcast); 2321 netstats->tx_packets = le64_to_cpu(stats_msg.tx_unicast) + 2322 le64_to_cpu(stats_msg.tx_multicast) + 2323 le64_to_cpu(stats_msg.tx_broadcast); 2324 netstats->rx_bytes = le64_to_cpu(stats_msg.rx_bytes); 2325 netstats->tx_bytes = le64_to_cpu(stats_msg.tx_bytes); 2326 netstats->rx_errors = le64_to_cpu(stats_msg.rx_errors); 2327 netstats->tx_errors = le64_to_cpu(stats_msg.tx_errors); 2328 netstats->rx_dropped = le64_to_cpu(stats_msg.rx_discards); 2329 netstats->tx_dropped = le64_to_cpu(stats_msg.tx_discards); 2330 2331 vport->port_stats.vport_stats = stats_msg; 2332 2333 spin_unlock_bh(&np->stats_lock); 2334 2335 return 0; 2336 } 2337 2338 /** 2339 * idpf_send_get_set_rss_lut_msg - Send virtchnl get or set rss lut message 2340 * @vport: virtual port data structure 2341 * @get: flag to set or get rss look up table 2342 * 2343 * Returns 0 on success, negative on failure. 2344 */ 2345 int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get) 2346 { 2347 struct virtchnl2_rss_lut *recv_rl __free(kfree) = NULL; 2348 struct virtchnl2_rss_lut *rl __free(kfree) = NULL; 2349 struct idpf_vc_xn_params xn_params = {}; 2350 struct idpf_rss_data *rss_data; 2351 int buf_size, lut_buf_size; 2352 ssize_t reply_sz; 2353 int i; 2354 2355 rss_data = 2356 &vport->adapter->vport_config[vport->idx]->user_config.rss_data; 2357 buf_size = struct_size(rl, lut, rss_data->rss_lut_size); 2358 rl = kzalloc(buf_size, GFP_KERNEL); 2359 if (!rl) 2360 return -ENOMEM; 2361 2362 rl->vport_id = cpu_to_le32(vport->vport_id); 2363 2364 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 2365 xn_params.send_buf.iov_base = rl; 2366 xn_params.send_buf.iov_len = buf_size; 2367 2368 if (get) { 2369 recv_rl = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); 2370 if (!recv_rl) 2371 return -ENOMEM; 2372 xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_LUT; 2373 xn_params.recv_buf.iov_base = recv_rl; 2374 xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; 2375 } else { 2376 rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size); 2377 for (i = 0; i < rss_data->rss_lut_size; i++) 2378 rl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]); 2379 2380 xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_LUT; 2381 } 2382 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 2383 if (reply_sz < 0) 2384 return reply_sz; 2385 if (!get) 2386 return 0; 2387 if (reply_sz < sizeof(struct virtchnl2_rss_lut)) 2388 return -EIO; 2389 2390 lut_buf_size = le16_to_cpu(recv_rl->lut_entries) * sizeof(u32); 2391 if (reply_sz < lut_buf_size) 2392 return -EIO; 2393 2394 /* size didn't change, we can reuse existing lut buf */ 2395 if (rss_data->rss_lut_size == le16_to_cpu(recv_rl->lut_entries)) 2396 goto do_memcpy; 2397 2398 rss_data->rss_lut_size = le16_to_cpu(recv_rl->lut_entries); 2399 kfree(rss_data->rss_lut); 2400 2401 rss_data->rss_lut = kzalloc(lut_buf_size, GFP_KERNEL); 2402 if (!rss_data->rss_lut) { 2403 rss_data->rss_lut_size = 0; 2404 return -ENOMEM; 2405 } 2406 2407 do_memcpy: 2408 memcpy(rss_data->rss_lut, recv_rl->lut, rss_data->rss_lut_size); 2409 2410 return 0; 2411 } 2412 2413 /** 2414 * idpf_send_get_set_rss_key_msg - Send virtchnl get or set rss key message 2415 * @vport: virtual port data structure 2416 * @get: flag to set or get rss look up table 2417 * 2418 * Returns 0 on success, negative on failure 2419 */ 2420 int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get) 2421 { 2422 struct virtchnl2_rss_key *recv_rk __free(kfree) = NULL; 2423 struct virtchnl2_rss_key *rk __free(kfree) = NULL; 2424 struct idpf_vc_xn_params xn_params = {}; 2425 struct idpf_rss_data *rss_data; 2426 ssize_t reply_sz; 2427 int i, buf_size; 2428 u16 key_size; 2429 2430 rss_data = 2431 &vport->adapter->vport_config[vport->idx]->user_config.rss_data; 2432 buf_size = struct_size(rk, key_flex, rss_data->rss_key_size); 2433 rk = kzalloc(buf_size, GFP_KERNEL); 2434 if (!rk) 2435 return -ENOMEM; 2436 2437 rk->vport_id = cpu_to_le32(vport->vport_id); 2438 xn_params.send_buf.iov_base = rk; 2439 xn_params.send_buf.iov_len = buf_size; 2440 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 2441 if (get) { 2442 recv_rk = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); 2443 if (!recv_rk) 2444 return -ENOMEM; 2445 2446 xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_KEY; 2447 xn_params.recv_buf.iov_base = recv_rk; 2448 xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; 2449 } else { 2450 rk->key_len = cpu_to_le16(rss_data->rss_key_size); 2451 for (i = 0; i < rss_data->rss_key_size; i++) 2452 rk->key_flex[i] = rss_data->rss_key[i]; 2453 2454 xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_KEY; 2455 } 2456 2457 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 2458 if (reply_sz < 0) 2459 return reply_sz; 2460 if (!get) 2461 return 0; 2462 if (reply_sz < sizeof(struct virtchnl2_rss_key)) 2463 return -EIO; 2464 2465 key_size = min_t(u16, NETDEV_RSS_KEY_LEN, 2466 le16_to_cpu(recv_rk->key_len)); 2467 if (reply_sz < key_size) 2468 return -EIO; 2469 2470 /* key len didn't change, reuse existing buf */ 2471 if (rss_data->rss_key_size == key_size) 2472 goto do_memcpy; 2473 2474 rss_data->rss_key_size = key_size; 2475 kfree(rss_data->rss_key); 2476 rss_data->rss_key = kzalloc(key_size, GFP_KERNEL); 2477 if (!rss_data->rss_key) { 2478 rss_data->rss_key_size = 0; 2479 return -ENOMEM; 2480 } 2481 2482 do_memcpy: 2483 memcpy(rss_data->rss_key, recv_rk->key_flex, rss_data->rss_key_size); 2484 2485 return 0; 2486 } 2487 2488 /** 2489 * idpf_fill_ptype_lookup - Fill L3 specific fields in ptype lookup table 2490 * @ptype: ptype lookup table 2491 * @pstate: state machine for ptype lookup table 2492 * @ipv4: ipv4 or ipv6 2493 * @frag: fragmentation allowed 2494 * 2495 */ 2496 static void idpf_fill_ptype_lookup(struct libeth_rx_pt *ptype, 2497 struct idpf_ptype_state *pstate, 2498 bool ipv4, bool frag) 2499 { 2500 if (!pstate->outer_ip || !pstate->outer_frag) { 2501 pstate->outer_ip = true; 2502 2503 if (ipv4) 2504 ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV4; 2505 else 2506 ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV6; 2507 2508 if (frag) { 2509 ptype->outer_frag = LIBETH_RX_PT_FRAG; 2510 pstate->outer_frag = true; 2511 } 2512 } else { 2513 ptype->tunnel_type = LIBETH_RX_PT_TUNNEL_IP_IP; 2514 pstate->tunnel_state = IDPF_PTYPE_TUNNEL_IP; 2515 2516 if (ipv4) 2517 ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV4; 2518 else 2519 ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV6; 2520 2521 if (frag) 2522 ptype->tunnel_end_frag = LIBETH_RX_PT_FRAG; 2523 } 2524 } 2525 2526 static void idpf_finalize_ptype_lookup(struct libeth_rx_pt *ptype) 2527 { 2528 if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 && 2529 ptype->inner_prot) 2530 ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L4; 2531 else if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 && 2532 ptype->outer_ip) 2533 ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L3; 2534 else if (ptype->outer_ip == LIBETH_RX_PT_OUTER_L2) 2535 ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L2; 2536 else 2537 ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_NONE; 2538 2539 libeth_rx_pt_gen_hash_type(ptype); 2540 } 2541 2542 /** 2543 * idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info 2544 * @vport: virtual port data structure 2545 * 2546 * Returns 0 on success, negative on failure. 2547 */ 2548 int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) 2549 { 2550 struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL; 2551 struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL; 2552 struct libeth_rx_pt *ptype_lkup __free(kfree) = NULL; 2553 int max_ptype, ptypes_recvd = 0, ptype_offset; 2554 struct idpf_adapter *adapter = vport->adapter; 2555 struct idpf_vc_xn_params xn_params = {}; 2556 u16 next_ptype_id = 0; 2557 ssize_t reply_sz; 2558 int i, j, k; 2559 2560 if (vport->rx_ptype_lkup) 2561 return 0; 2562 2563 if (idpf_is_queue_model_split(vport->rxq_model)) 2564 max_ptype = IDPF_RX_MAX_PTYPE; 2565 else 2566 max_ptype = IDPF_RX_MAX_BASE_PTYPE; 2567 2568 ptype_lkup = kcalloc(max_ptype, sizeof(*ptype_lkup), GFP_KERNEL); 2569 if (!ptype_lkup) 2570 return -ENOMEM; 2571 2572 get_ptype_info = kzalloc(sizeof(*get_ptype_info), GFP_KERNEL); 2573 if (!get_ptype_info) 2574 return -ENOMEM; 2575 2576 ptype_info = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); 2577 if (!ptype_info) 2578 return -ENOMEM; 2579 2580 xn_params.vc_op = VIRTCHNL2_OP_GET_PTYPE_INFO; 2581 xn_params.send_buf.iov_base = get_ptype_info; 2582 xn_params.send_buf.iov_len = sizeof(*get_ptype_info); 2583 xn_params.recv_buf.iov_base = ptype_info; 2584 xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; 2585 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 2586 2587 while (next_ptype_id < max_ptype) { 2588 get_ptype_info->start_ptype_id = cpu_to_le16(next_ptype_id); 2589 2590 if ((next_ptype_id + IDPF_RX_MAX_PTYPES_PER_BUF) > max_ptype) 2591 get_ptype_info->num_ptypes = 2592 cpu_to_le16(max_ptype - next_ptype_id); 2593 else 2594 get_ptype_info->num_ptypes = 2595 cpu_to_le16(IDPF_RX_MAX_PTYPES_PER_BUF); 2596 2597 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 2598 if (reply_sz < 0) 2599 return reply_sz; 2600 2601 ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes); 2602 if (ptypes_recvd > max_ptype) 2603 return -EINVAL; 2604 2605 next_ptype_id = le16_to_cpu(get_ptype_info->start_ptype_id) + 2606 le16_to_cpu(get_ptype_info->num_ptypes); 2607 2608 ptype_offset = IDPF_RX_PTYPE_HDR_SZ; 2609 2610 for (i = 0; i < le16_to_cpu(ptype_info->num_ptypes); i++) { 2611 struct idpf_ptype_state pstate = { }; 2612 struct virtchnl2_ptype *ptype; 2613 u16 id; 2614 2615 ptype = (struct virtchnl2_ptype *) 2616 ((u8 *)ptype_info + ptype_offset); 2617 2618 ptype_offset += IDPF_GET_PTYPE_SIZE(ptype); 2619 if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN) 2620 return -EINVAL; 2621 2622 /* 0xFFFF indicates end of ptypes */ 2623 if (le16_to_cpu(ptype->ptype_id_10) == 2624 IDPF_INVALID_PTYPE_ID) 2625 goto out; 2626 2627 if (idpf_is_queue_model_split(vport->rxq_model)) 2628 k = le16_to_cpu(ptype->ptype_id_10); 2629 else 2630 k = ptype->ptype_id_8; 2631 2632 for (j = 0; j < ptype->proto_id_count; j++) { 2633 id = le16_to_cpu(ptype->proto_id[j]); 2634 switch (id) { 2635 case VIRTCHNL2_PROTO_HDR_GRE: 2636 if (pstate.tunnel_state == 2637 IDPF_PTYPE_TUNNEL_IP) { 2638 ptype_lkup[k].tunnel_type = 2639 LIBETH_RX_PT_TUNNEL_IP_GRENAT; 2640 pstate.tunnel_state |= 2641 IDPF_PTYPE_TUNNEL_IP_GRENAT; 2642 } 2643 break; 2644 case VIRTCHNL2_PROTO_HDR_MAC: 2645 ptype_lkup[k].outer_ip = 2646 LIBETH_RX_PT_OUTER_L2; 2647 if (pstate.tunnel_state == 2648 IDPF_TUN_IP_GRE) { 2649 ptype_lkup[k].tunnel_type = 2650 LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC; 2651 pstate.tunnel_state |= 2652 IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC; 2653 } 2654 break; 2655 case VIRTCHNL2_PROTO_HDR_IPV4: 2656 idpf_fill_ptype_lookup(&ptype_lkup[k], 2657 &pstate, true, 2658 false); 2659 break; 2660 case VIRTCHNL2_PROTO_HDR_IPV6: 2661 idpf_fill_ptype_lookup(&ptype_lkup[k], 2662 &pstate, false, 2663 false); 2664 break; 2665 case VIRTCHNL2_PROTO_HDR_IPV4_FRAG: 2666 idpf_fill_ptype_lookup(&ptype_lkup[k], 2667 &pstate, true, 2668 true); 2669 break; 2670 case VIRTCHNL2_PROTO_HDR_IPV6_FRAG: 2671 idpf_fill_ptype_lookup(&ptype_lkup[k], 2672 &pstate, false, 2673 true); 2674 break; 2675 case VIRTCHNL2_PROTO_HDR_UDP: 2676 ptype_lkup[k].inner_prot = 2677 LIBETH_RX_PT_INNER_UDP; 2678 break; 2679 case VIRTCHNL2_PROTO_HDR_TCP: 2680 ptype_lkup[k].inner_prot = 2681 LIBETH_RX_PT_INNER_TCP; 2682 break; 2683 case VIRTCHNL2_PROTO_HDR_SCTP: 2684 ptype_lkup[k].inner_prot = 2685 LIBETH_RX_PT_INNER_SCTP; 2686 break; 2687 case VIRTCHNL2_PROTO_HDR_ICMP: 2688 ptype_lkup[k].inner_prot = 2689 LIBETH_RX_PT_INNER_ICMP; 2690 break; 2691 case VIRTCHNL2_PROTO_HDR_PAY: 2692 ptype_lkup[k].payload_layer = 2693 LIBETH_RX_PT_PAYLOAD_L2; 2694 break; 2695 case VIRTCHNL2_PROTO_HDR_ICMPV6: 2696 case VIRTCHNL2_PROTO_HDR_IPV6_EH: 2697 case VIRTCHNL2_PROTO_HDR_PRE_MAC: 2698 case VIRTCHNL2_PROTO_HDR_POST_MAC: 2699 case VIRTCHNL2_PROTO_HDR_ETHERTYPE: 2700 case VIRTCHNL2_PROTO_HDR_SVLAN: 2701 case VIRTCHNL2_PROTO_HDR_CVLAN: 2702 case VIRTCHNL2_PROTO_HDR_MPLS: 2703 case VIRTCHNL2_PROTO_HDR_MMPLS: 2704 case VIRTCHNL2_PROTO_HDR_PTP: 2705 case VIRTCHNL2_PROTO_HDR_CTRL: 2706 case VIRTCHNL2_PROTO_HDR_LLDP: 2707 case VIRTCHNL2_PROTO_HDR_ARP: 2708 case VIRTCHNL2_PROTO_HDR_ECP: 2709 case VIRTCHNL2_PROTO_HDR_EAPOL: 2710 case VIRTCHNL2_PROTO_HDR_PPPOD: 2711 case VIRTCHNL2_PROTO_HDR_PPPOE: 2712 case VIRTCHNL2_PROTO_HDR_IGMP: 2713 case VIRTCHNL2_PROTO_HDR_AH: 2714 case VIRTCHNL2_PROTO_HDR_ESP: 2715 case VIRTCHNL2_PROTO_HDR_IKE: 2716 case VIRTCHNL2_PROTO_HDR_NATT_KEEP: 2717 case VIRTCHNL2_PROTO_HDR_L2TPV2: 2718 case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL: 2719 case VIRTCHNL2_PROTO_HDR_L2TPV3: 2720 case VIRTCHNL2_PROTO_HDR_GTP: 2721 case VIRTCHNL2_PROTO_HDR_GTP_EH: 2722 case VIRTCHNL2_PROTO_HDR_GTPCV2: 2723 case VIRTCHNL2_PROTO_HDR_GTPC_TEID: 2724 case VIRTCHNL2_PROTO_HDR_GTPU: 2725 case VIRTCHNL2_PROTO_HDR_GTPU_UL: 2726 case VIRTCHNL2_PROTO_HDR_GTPU_DL: 2727 case VIRTCHNL2_PROTO_HDR_ECPRI: 2728 case VIRTCHNL2_PROTO_HDR_VRRP: 2729 case VIRTCHNL2_PROTO_HDR_OSPF: 2730 case VIRTCHNL2_PROTO_HDR_TUN: 2731 case VIRTCHNL2_PROTO_HDR_NVGRE: 2732 case VIRTCHNL2_PROTO_HDR_VXLAN: 2733 case VIRTCHNL2_PROTO_HDR_VXLAN_GPE: 2734 case VIRTCHNL2_PROTO_HDR_GENEVE: 2735 case VIRTCHNL2_PROTO_HDR_NSH: 2736 case VIRTCHNL2_PROTO_HDR_QUIC: 2737 case VIRTCHNL2_PROTO_HDR_PFCP: 2738 case VIRTCHNL2_PROTO_HDR_PFCP_NODE: 2739 case VIRTCHNL2_PROTO_HDR_PFCP_SESSION: 2740 case VIRTCHNL2_PROTO_HDR_RTP: 2741 case VIRTCHNL2_PROTO_HDR_NO_PROTO: 2742 break; 2743 default: 2744 break; 2745 } 2746 } 2747 2748 idpf_finalize_ptype_lookup(&ptype_lkup[k]); 2749 } 2750 } 2751 2752 out: 2753 vport->rx_ptype_lkup = no_free_ptr(ptype_lkup); 2754 2755 return 0; 2756 } 2757 2758 /** 2759 * idpf_send_ena_dis_loopback_msg - Send virtchnl enable/disable loopback 2760 * message 2761 * @vport: virtual port data structure 2762 * 2763 * Returns 0 on success, negative on failure. 2764 */ 2765 int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport) 2766 { 2767 struct idpf_vc_xn_params xn_params = {}; 2768 struct virtchnl2_loopback loopback; 2769 ssize_t reply_sz; 2770 2771 loopback.vport_id = cpu_to_le32(vport->vport_id); 2772 loopback.enable = idpf_is_feature_ena(vport, NETIF_F_LOOPBACK); 2773 2774 xn_params.vc_op = VIRTCHNL2_OP_LOOPBACK; 2775 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 2776 xn_params.send_buf.iov_base = &loopback; 2777 xn_params.send_buf.iov_len = sizeof(loopback); 2778 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); 2779 2780 return reply_sz < 0 ? reply_sz : 0; 2781 } 2782 2783 /** 2784 * idpf_find_ctlq - Given a type and id, find ctlq info 2785 * @hw: hardware struct 2786 * @type: type of ctrlq to find 2787 * @id: ctlq id to find 2788 * 2789 * Returns pointer to found ctlq info struct, NULL otherwise. 2790 */ 2791 static struct idpf_ctlq_info *idpf_find_ctlq(struct idpf_hw *hw, 2792 enum idpf_ctlq_type type, int id) 2793 { 2794 struct idpf_ctlq_info *cq, *tmp; 2795 2796 list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list) 2797 if (cq->q_id == id && cq->cq_type == type) 2798 return cq; 2799 2800 return NULL; 2801 } 2802 2803 /** 2804 * idpf_init_dflt_mbx - Setup default mailbox parameters and make request 2805 * @adapter: adapter info struct 2806 * 2807 * Returns 0 on success, negative otherwise 2808 */ 2809 int idpf_init_dflt_mbx(struct idpf_adapter *adapter) 2810 { 2811 struct idpf_ctlq_create_info ctlq_info[] = { 2812 { 2813 .type = IDPF_CTLQ_TYPE_MAILBOX_TX, 2814 .id = IDPF_DFLT_MBX_ID, 2815 .len = IDPF_DFLT_MBX_Q_LEN, 2816 .buf_size = IDPF_CTLQ_MAX_BUF_LEN 2817 }, 2818 { 2819 .type = IDPF_CTLQ_TYPE_MAILBOX_RX, 2820 .id = IDPF_DFLT_MBX_ID, 2821 .len = IDPF_DFLT_MBX_Q_LEN, 2822 .buf_size = IDPF_CTLQ_MAX_BUF_LEN 2823 } 2824 }; 2825 struct idpf_hw *hw = &adapter->hw; 2826 int err; 2827 2828 adapter->dev_ops.reg_ops.ctlq_reg_init(ctlq_info); 2829 2830 err = idpf_ctlq_init(hw, IDPF_NUM_DFLT_MBX_Q, ctlq_info); 2831 if (err) 2832 return err; 2833 2834 hw->asq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_TX, 2835 IDPF_DFLT_MBX_ID); 2836 hw->arq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_RX, 2837 IDPF_DFLT_MBX_ID); 2838 2839 if (!hw->asq || !hw->arq) { 2840 idpf_ctlq_deinit(hw); 2841 2842 return -ENOENT; 2843 } 2844 2845 adapter->state = __IDPF_VER_CHECK; 2846 2847 return 0; 2848 } 2849 2850 /** 2851 * idpf_deinit_dflt_mbx - Free up ctlqs setup 2852 * @adapter: Driver specific private data structure 2853 */ 2854 void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter) 2855 { 2856 if (adapter->hw.arq && adapter->hw.asq) { 2857 idpf_mb_clean(adapter); 2858 idpf_ctlq_deinit(&adapter->hw); 2859 } 2860 adapter->hw.arq = NULL; 2861 adapter->hw.asq = NULL; 2862 } 2863 2864 /** 2865 * idpf_vport_params_buf_rel - Release memory for MailBox resources 2866 * @adapter: Driver specific private data structure 2867 * 2868 * Will release memory to hold the vport parameters received on MailBox 2869 */ 2870 static void idpf_vport_params_buf_rel(struct idpf_adapter *adapter) 2871 { 2872 kfree(adapter->vport_params_recvd); 2873 adapter->vport_params_recvd = NULL; 2874 kfree(adapter->vport_params_reqd); 2875 adapter->vport_params_reqd = NULL; 2876 kfree(adapter->vport_ids); 2877 adapter->vport_ids = NULL; 2878 } 2879 2880 /** 2881 * idpf_vport_params_buf_alloc - Allocate memory for MailBox resources 2882 * @adapter: Driver specific private data structure 2883 * 2884 * Will alloc memory to hold the vport parameters received on MailBox 2885 */ 2886 static int idpf_vport_params_buf_alloc(struct idpf_adapter *adapter) 2887 { 2888 u16 num_max_vports = idpf_get_max_vports(adapter); 2889 2890 adapter->vport_params_reqd = kcalloc(num_max_vports, 2891 sizeof(*adapter->vport_params_reqd), 2892 GFP_KERNEL); 2893 if (!adapter->vport_params_reqd) 2894 return -ENOMEM; 2895 2896 adapter->vport_params_recvd = kcalloc(num_max_vports, 2897 sizeof(*adapter->vport_params_recvd), 2898 GFP_KERNEL); 2899 if (!adapter->vport_params_recvd) 2900 goto err_mem; 2901 2902 adapter->vport_ids = kcalloc(num_max_vports, sizeof(u32), GFP_KERNEL); 2903 if (!adapter->vport_ids) 2904 goto err_mem; 2905 2906 if (adapter->vport_config) 2907 return 0; 2908 2909 adapter->vport_config = kcalloc(num_max_vports, 2910 sizeof(*adapter->vport_config), 2911 GFP_KERNEL); 2912 if (!adapter->vport_config) 2913 goto err_mem; 2914 2915 return 0; 2916 2917 err_mem: 2918 idpf_vport_params_buf_rel(adapter); 2919 2920 return -ENOMEM; 2921 } 2922 2923 /** 2924 * idpf_vc_core_init - Initialize state machine and get driver specific 2925 * resources 2926 * @adapter: Driver specific private structure 2927 * 2928 * This function will initialize the state machine and request all necessary 2929 * resources required by the device driver. Once the state machine is 2930 * initialized, allocate memory to store vport specific information and also 2931 * requests required interrupts. 2932 * 2933 * Returns 0 on success, -EAGAIN function will get called again, 2934 * otherwise negative on failure. 2935 */ 2936 int idpf_vc_core_init(struct idpf_adapter *adapter) 2937 { 2938 int task_delay = 30; 2939 u16 num_max_vports; 2940 int err = 0; 2941 2942 if (!adapter->vcxn_mngr) { 2943 adapter->vcxn_mngr = kzalloc(sizeof(*adapter->vcxn_mngr), GFP_KERNEL); 2944 if (!adapter->vcxn_mngr) { 2945 err = -ENOMEM; 2946 goto init_failed; 2947 } 2948 } 2949 idpf_vc_xn_init(adapter->vcxn_mngr); 2950 2951 while (adapter->state != __IDPF_INIT_SW) { 2952 switch (adapter->state) { 2953 case __IDPF_VER_CHECK: 2954 err = idpf_send_ver_msg(adapter); 2955 switch (err) { 2956 case 0: 2957 /* success, move state machine forward */ 2958 adapter->state = __IDPF_GET_CAPS; 2959 fallthrough; 2960 case -EAGAIN: 2961 goto restart; 2962 default: 2963 /* Something bad happened, try again but only a 2964 * few times. 2965 */ 2966 goto init_failed; 2967 } 2968 case __IDPF_GET_CAPS: 2969 err = idpf_send_get_caps_msg(adapter); 2970 if (err) 2971 goto init_failed; 2972 adapter->state = __IDPF_INIT_SW; 2973 break; 2974 default: 2975 dev_err(&adapter->pdev->dev, "Device is in bad state: %d\n", 2976 adapter->state); 2977 err = -EINVAL; 2978 goto init_failed; 2979 } 2980 break; 2981 restart: 2982 /* Give enough time before proceeding further with 2983 * state machine 2984 */ 2985 msleep(task_delay); 2986 } 2987 2988 pci_sriov_set_totalvfs(adapter->pdev, idpf_get_max_vfs(adapter)); 2989 num_max_vports = idpf_get_max_vports(adapter); 2990 adapter->max_vports = num_max_vports; 2991 adapter->vports = kcalloc(num_max_vports, sizeof(*adapter->vports), 2992 GFP_KERNEL); 2993 if (!adapter->vports) 2994 return -ENOMEM; 2995 2996 if (!adapter->netdevs) { 2997 adapter->netdevs = kcalloc(num_max_vports, 2998 sizeof(struct net_device *), 2999 GFP_KERNEL); 3000 if (!adapter->netdevs) { 3001 err = -ENOMEM; 3002 goto err_netdev_alloc; 3003 } 3004 } 3005 3006 err = idpf_vport_params_buf_alloc(adapter); 3007 if (err) { 3008 dev_err(&adapter->pdev->dev, "Failed to alloc vport params buffer: %d\n", 3009 err); 3010 goto err_netdev_alloc; 3011 } 3012 3013 /* Start the mailbox task before requesting vectors. This will ensure 3014 * vector information response from mailbox is handled 3015 */ 3016 queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0); 3017 3018 queue_delayed_work(adapter->serv_wq, &adapter->serv_task, 3019 msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07))); 3020 3021 err = idpf_intr_req(adapter); 3022 if (err) { 3023 dev_err(&adapter->pdev->dev, "failed to enable interrupt vectors: %d\n", 3024 err); 3025 goto err_intr_req; 3026 } 3027 3028 idpf_init_avail_queues(adapter); 3029 3030 /* Skew the delay for init tasks for each function based on fn number 3031 * to prevent every function from making the same call simultaneously. 3032 */ 3033 queue_delayed_work(adapter->init_wq, &adapter->init_task, 3034 msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07))); 3035 3036 set_bit(IDPF_VC_CORE_INIT, adapter->flags); 3037 3038 return 0; 3039 3040 err_intr_req: 3041 cancel_delayed_work_sync(&adapter->serv_task); 3042 cancel_delayed_work_sync(&adapter->mbx_task); 3043 idpf_vport_params_buf_rel(adapter); 3044 err_netdev_alloc: 3045 kfree(adapter->vports); 3046 adapter->vports = NULL; 3047 return err; 3048 3049 init_failed: 3050 /* Don't retry if we're trying to go down, just bail. */ 3051 if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) 3052 return err; 3053 3054 if (++adapter->mb_wait_count > IDPF_MB_MAX_ERR) { 3055 dev_err(&adapter->pdev->dev, "Failed to establish mailbox communications with hardware\n"); 3056 3057 return -EFAULT; 3058 } 3059 /* If it reached here, it is possible that mailbox queue initialization 3060 * register writes might not have taken effect. Retry to initialize 3061 * the mailbox again 3062 */ 3063 adapter->state = __IDPF_VER_CHECK; 3064 if (adapter->vcxn_mngr) 3065 idpf_vc_xn_shutdown(adapter->vcxn_mngr); 3066 idpf_deinit_dflt_mbx(adapter); 3067 set_bit(IDPF_HR_DRV_LOAD, adapter->flags); 3068 queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task, 3069 msecs_to_jiffies(task_delay)); 3070 3071 return -EAGAIN; 3072 } 3073 3074 /** 3075 * idpf_vc_core_deinit - Device deinit routine 3076 * @adapter: Driver specific private structure 3077 * 3078 */ 3079 void idpf_vc_core_deinit(struct idpf_adapter *adapter) 3080 { 3081 if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags)) 3082 return; 3083 3084 idpf_deinit_task(adapter); 3085 idpf_intr_rel(adapter); 3086 idpf_vc_xn_shutdown(adapter->vcxn_mngr); 3087 3088 cancel_delayed_work_sync(&adapter->serv_task); 3089 cancel_delayed_work_sync(&adapter->mbx_task); 3090 3091 idpf_vport_params_buf_rel(adapter); 3092 3093 kfree(adapter->vports); 3094 adapter->vports = NULL; 3095 3096 clear_bit(IDPF_VC_CORE_INIT, adapter->flags); 3097 } 3098 3099 /** 3100 * idpf_vport_alloc_vec_indexes - Get relative vector indexes 3101 * @vport: virtual port data struct 3102 * 3103 * This function requests the vector information required for the vport and 3104 * stores the vector indexes received from the 'global vector distribution' 3105 * in the vport's queue vectors array. 3106 * 3107 * Return 0 on success, error on failure 3108 */ 3109 int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport) 3110 { 3111 struct idpf_vector_info vec_info; 3112 int num_alloc_vecs; 3113 3114 vec_info.num_curr_vecs = vport->num_q_vectors; 3115 vec_info.num_req_vecs = max(vport->num_txq, vport->num_rxq); 3116 vec_info.default_vport = vport->default_vport; 3117 vec_info.index = vport->idx; 3118 3119 num_alloc_vecs = idpf_req_rel_vector_indexes(vport->adapter, 3120 vport->q_vector_idxs, 3121 &vec_info); 3122 if (num_alloc_vecs <= 0) { 3123 dev_err(&vport->adapter->pdev->dev, "Vector distribution failed: %d\n", 3124 num_alloc_vecs); 3125 return -EINVAL; 3126 } 3127 3128 vport->num_q_vectors = num_alloc_vecs; 3129 3130 return 0; 3131 } 3132 3133 /** 3134 * idpf_vport_init - Initialize virtual port 3135 * @vport: virtual port to be initialized 3136 * @max_q: vport max queue info 3137 * 3138 * Will initialize vport with the info received through MB earlier 3139 */ 3140 void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q) 3141 { 3142 struct idpf_adapter *adapter = vport->adapter; 3143 struct virtchnl2_create_vport *vport_msg; 3144 struct idpf_vport_config *vport_config; 3145 u16 tx_itr[] = {2, 8, 64, 128, 256}; 3146 u16 rx_itr[] = {2, 8, 32, 96, 128}; 3147 struct idpf_rss_data *rss_data; 3148 u16 idx = vport->idx; 3149 3150 vport_config = adapter->vport_config[idx]; 3151 rss_data = &vport_config->user_config.rss_data; 3152 vport_msg = adapter->vport_params_recvd[idx]; 3153 3154 vport_config->max_q.max_txq = max_q->max_txq; 3155 vport_config->max_q.max_rxq = max_q->max_rxq; 3156 vport_config->max_q.max_complq = max_q->max_complq; 3157 vport_config->max_q.max_bufq = max_q->max_bufq; 3158 3159 vport->txq_model = le16_to_cpu(vport_msg->txq_model); 3160 vport->rxq_model = le16_to_cpu(vport_msg->rxq_model); 3161 vport->vport_type = le16_to_cpu(vport_msg->vport_type); 3162 vport->vport_id = le32_to_cpu(vport_msg->vport_id); 3163 3164 rss_data->rss_key_size = min_t(u16, NETDEV_RSS_KEY_LEN, 3165 le16_to_cpu(vport_msg->rss_key_size)); 3166 rss_data->rss_lut_size = le16_to_cpu(vport_msg->rss_lut_size); 3167 3168 ether_addr_copy(vport->default_mac_addr, vport_msg->default_mac_addr); 3169 vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - LIBETH_RX_LL_LEN; 3170 3171 /* Initialize Tx and Rx profiles for Dynamic Interrupt Moderation */ 3172 memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS); 3173 memcpy(vport->tx_itr_profile, tx_itr, IDPF_DIM_PROFILE_SLOTS); 3174 3175 idpf_vport_set_hsplit(vport, ETHTOOL_TCP_DATA_SPLIT_ENABLED); 3176 3177 idpf_vport_init_num_qs(vport, vport_msg); 3178 idpf_vport_calc_num_q_desc(vport); 3179 idpf_vport_calc_num_q_groups(vport); 3180 idpf_vport_alloc_vec_indexes(vport); 3181 3182 vport->crc_enable = adapter->crc_enable; 3183 } 3184 3185 /** 3186 * idpf_get_vec_ids - Initialize vector id from Mailbox parameters 3187 * @adapter: adapter structure to get the mailbox vector id 3188 * @vecids: Array of vector ids 3189 * @num_vecids: number of vector ids 3190 * @chunks: vector ids received over mailbox 3191 * 3192 * Will initialize the mailbox vector id which is received from the 3193 * get capabilities and data queue vector ids with ids received as 3194 * mailbox parameters. 3195 * Returns number of ids filled 3196 */ 3197 int idpf_get_vec_ids(struct idpf_adapter *adapter, 3198 u16 *vecids, int num_vecids, 3199 struct virtchnl2_vector_chunks *chunks) 3200 { 3201 u16 num_chunks = le16_to_cpu(chunks->num_vchunks); 3202 int num_vecid_filled = 0; 3203 int i, j; 3204 3205 vecids[num_vecid_filled] = adapter->mb_vector.v_idx; 3206 num_vecid_filled++; 3207 3208 for (j = 0; j < num_chunks; j++) { 3209 struct virtchnl2_vector_chunk *chunk; 3210 u16 start_vecid, num_vec; 3211 3212 chunk = &chunks->vchunks[j]; 3213 num_vec = le16_to_cpu(chunk->num_vectors); 3214 start_vecid = le16_to_cpu(chunk->start_vector_id); 3215 3216 for (i = 0; i < num_vec; i++) { 3217 if ((num_vecid_filled + i) < num_vecids) { 3218 vecids[num_vecid_filled + i] = start_vecid; 3219 start_vecid++; 3220 } else { 3221 break; 3222 } 3223 } 3224 num_vecid_filled = num_vecid_filled + i; 3225 } 3226 3227 return num_vecid_filled; 3228 } 3229 3230 /** 3231 * idpf_vport_get_queue_ids - Initialize queue id from Mailbox parameters 3232 * @qids: Array of queue ids 3233 * @num_qids: number of queue ids 3234 * @q_type: queue model 3235 * @chunks: queue ids received over mailbox 3236 * 3237 * Will initialize all queue ids with ids received as mailbox parameters 3238 * Returns number of ids filled 3239 */ 3240 static int idpf_vport_get_queue_ids(u32 *qids, int num_qids, u16 q_type, 3241 struct virtchnl2_queue_reg_chunks *chunks) 3242 { 3243 u16 num_chunks = le16_to_cpu(chunks->num_chunks); 3244 u32 num_q_id_filled = 0, i; 3245 u32 start_q_id, num_q; 3246 3247 while (num_chunks--) { 3248 struct virtchnl2_queue_reg_chunk *chunk; 3249 3250 chunk = &chunks->chunks[num_chunks]; 3251 if (le32_to_cpu(chunk->type) != q_type) 3252 continue; 3253 3254 num_q = le32_to_cpu(chunk->num_queues); 3255 start_q_id = le32_to_cpu(chunk->start_queue_id); 3256 3257 for (i = 0; i < num_q; i++) { 3258 if ((num_q_id_filled + i) < num_qids) { 3259 qids[num_q_id_filled + i] = start_q_id; 3260 start_q_id++; 3261 } else { 3262 break; 3263 } 3264 } 3265 num_q_id_filled = num_q_id_filled + i; 3266 } 3267 3268 return num_q_id_filled; 3269 } 3270 3271 /** 3272 * __idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters 3273 * @vport: virtual port for which the queues ids are initialized 3274 * @qids: queue ids 3275 * @num_qids: number of queue ids 3276 * @q_type: type of queue 3277 * 3278 * Will initialize all queue ids with ids received as mailbox 3279 * parameters. Returns number of queue ids initialized. 3280 */ 3281 static int __idpf_vport_queue_ids_init(struct idpf_vport *vport, 3282 const u32 *qids, 3283 int num_qids, 3284 u32 q_type) 3285 { 3286 int i, j, k = 0; 3287 3288 switch (q_type) { 3289 case VIRTCHNL2_QUEUE_TYPE_TX: 3290 for (i = 0; i < vport->num_txq_grp; i++) { 3291 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; 3292 3293 for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++) 3294 tx_qgrp->txqs[j]->q_id = qids[k]; 3295 } 3296 break; 3297 case VIRTCHNL2_QUEUE_TYPE_RX: 3298 for (i = 0; i < vport->num_rxq_grp; i++) { 3299 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 3300 u16 num_rxq; 3301 3302 if (idpf_is_queue_model_split(vport->rxq_model)) 3303 num_rxq = rx_qgrp->splitq.num_rxq_sets; 3304 else 3305 num_rxq = rx_qgrp->singleq.num_rxq; 3306 3307 for (j = 0; j < num_rxq && k < num_qids; j++, k++) { 3308 struct idpf_rx_queue *q; 3309 3310 if (idpf_is_queue_model_split(vport->rxq_model)) 3311 q = &rx_qgrp->splitq.rxq_sets[j]->rxq; 3312 else 3313 q = rx_qgrp->singleq.rxqs[j]; 3314 q->q_id = qids[k]; 3315 } 3316 } 3317 break; 3318 case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION: 3319 for (i = 0; i < vport->num_txq_grp && k < num_qids; i++, k++) { 3320 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; 3321 3322 tx_qgrp->complq->q_id = qids[k]; 3323 } 3324 break; 3325 case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER: 3326 for (i = 0; i < vport->num_rxq_grp; i++) { 3327 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 3328 u8 num_bufqs = vport->num_bufqs_per_qgrp; 3329 3330 for (j = 0; j < num_bufqs && k < num_qids; j++, k++) { 3331 struct idpf_buf_queue *q; 3332 3333 q = &rx_qgrp->splitq.bufq_sets[j].bufq; 3334 q->q_id = qids[k]; 3335 } 3336 } 3337 break; 3338 default: 3339 break; 3340 } 3341 3342 return k; 3343 } 3344 3345 /** 3346 * idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters 3347 * @vport: virtual port for which the queues ids are initialized 3348 * 3349 * Will initialize all queue ids with ids received as mailbox parameters. 3350 * Returns 0 on success, negative if all the queues are not initialized. 3351 */ 3352 int idpf_vport_queue_ids_init(struct idpf_vport *vport) 3353 { 3354 struct virtchnl2_create_vport *vport_params; 3355 struct virtchnl2_queue_reg_chunks *chunks; 3356 struct idpf_vport_config *vport_config; 3357 u16 vport_idx = vport->idx; 3358 int num_ids, err = 0; 3359 u16 q_type; 3360 u32 *qids; 3361 3362 vport_config = vport->adapter->vport_config[vport_idx]; 3363 if (vport_config->req_qs_chunks) { 3364 struct virtchnl2_add_queues *vc_aq = 3365 (struct virtchnl2_add_queues *)vport_config->req_qs_chunks; 3366 chunks = &vc_aq->chunks; 3367 } else { 3368 vport_params = vport->adapter->vport_params_recvd[vport_idx]; 3369 chunks = &vport_params->chunks; 3370 } 3371 3372 qids = kcalloc(IDPF_MAX_QIDS, sizeof(u32), GFP_KERNEL); 3373 if (!qids) 3374 return -ENOMEM; 3375 3376 num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, 3377 VIRTCHNL2_QUEUE_TYPE_TX, 3378 chunks); 3379 if (num_ids < vport->num_txq) { 3380 err = -EINVAL; 3381 goto mem_rel; 3382 } 3383 num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, 3384 VIRTCHNL2_QUEUE_TYPE_TX); 3385 if (num_ids < vport->num_txq) { 3386 err = -EINVAL; 3387 goto mem_rel; 3388 } 3389 3390 num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, 3391 VIRTCHNL2_QUEUE_TYPE_RX, 3392 chunks); 3393 if (num_ids < vport->num_rxq) { 3394 err = -EINVAL; 3395 goto mem_rel; 3396 } 3397 num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, 3398 VIRTCHNL2_QUEUE_TYPE_RX); 3399 if (num_ids < vport->num_rxq) { 3400 err = -EINVAL; 3401 goto mem_rel; 3402 } 3403 3404 if (!idpf_is_queue_model_split(vport->txq_model)) 3405 goto check_rxq; 3406 3407 q_type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; 3408 num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks); 3409 if (num_ids < vport->num_complq) { 3410 err = -EINVAL; 3411 goto mem_rel; 3412 } 3413 num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type); 3414 if (num_ids < vport->num_complq) { 3415 err = -EINVAL; 3416 goto mem_rel; 3417 } 3418 3419 check_rxq: 3420 if (!idpf_is_queue_model_split(vport->rxq_model)) 3421 goto mem_rel; 3422 3423 q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER; 3424 num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks); 3425 if (num_ids < vport->num_bufq) { 3426 err = -EINVAL; 3427 goto mem_rel; 3428 } 3429 num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type); 3430 if (num_ids < vport->num_bufq) 3431 err = -EINVAL; 3432 3433 mem_rel: 3434 kfree(qids); 3435 3436 return err; 3437 } 3438 3439 /** 3440 * idpf_vport_adjust_qs - Adjust to new requested queues 3441 * @vport: virtual port data struct 3442 * 3443 * Renegotiate queues. Returns 0 on success, negative on failure. 3444 */ 3445 int idpf_vport_adjust_qs(struct idpf_vport *vport) 3446 { 3447 struct virtchnl2_create_vport vport_msg; 3448 int err; 3449 3450 vport_msg.txq_model = cpu_to_le16(vport->txq_model); 3451 vport_msg.rxq_model = cpu_to_le16(vport->rxq_model); 3452 err = idpf_vport_calc_total_qs(vport->adapter, vport->idx, &vport_msg, 3453 NULL); 3454 if (err) 3455 return err; 3456 3457 idpf_vport_init_num_qs(vport, &vport_msg); 3458 idpf_vport_calc_num_q_groups(vport); 3459 3460 return 0; 3461 } 3462 3463 /** 3464 * idpf_is_capability_ena - Default implementation of capability checking 3465 * @adapter: Private data struct 3466 * @all: all or one flag 3467 * @field: caps field to check for flags 3468 * @flag: flag to check 3469 * 3470 * Return true if all capabilities are supported, false otherwise 3471 */ 3472 bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all, 3473 enum idpf_cap_field field, u64 flag) 3474 { 3475 u8 *caps = (u8 *)&adapter->caps; 3476 u32 *cap_field; 3477 3478 if (!caps) 3479 return false; 3480 3481 if (field == IDPF_BASE_CAPS) 3482 return false; 3483 3484 cap_field = (u32 *)(caps + field); 3485 3486 if (all) 3487 return (*cap_field & flag) == flag; 3488 else 3489 return !!(*cap_field & flag); 3490 } 3491 3492 /** 3493 * idpf_get_vport_id: Get vport id 3494 * @vport: virtual port structure 3495 * 3496 * Return vport id from the adapter persistent data 3497 */ 3498 u32 idpf_get_vport_id(struct idpf_vport *vport) 3499 { 3500 struct virtchnl2_create_vport *vport_msg; 3501 3502 vport_msg = vport->adapter->vport_params_recvd[vport->idx]; 3503 3504 return le32_to_cpu(vport_msg->vport_id); 3505 } 3506 3507 /** 3508 * idpf_mac_filter_async_handler - Async callback for mac filters 3509 * @adapter: private data struct 3510 * @xn: transaction for message 3511 * @ctlq_msg: received message 3512 * 3513 * In some scenarios driver can't sleep and wait for a reply (e.g.: stack is 3514 * holding rtnl_lock) when adding a new mac filter. It puts us in a difficult 3515 * situation to deal with errors returned on the reply. The best we can 3516 * ultimately do is remove it from our list of mac filters and report the 3517 * error. 3518 */ 3519 static int idpf_mac_filter_async_handler(struct idpf_adapter *adapter, 3520 struct idpf_vc_xn *xn, 3521 const struct idpf_ctlq_msg *ctlq_msg) 3522 { 3523 struct virtchnl2_mac_addr_list *ma_list; 3524 struct idpf_vport_config *vport_config; 3525 struct virtchnl2_mac_addr *mac_addr; 3526 struct idpf_mac_filter *f, *tmp; 3527 struct list_head *ma_list_head; 3528 struct idpf_vport *vport; 3529 u16 num_entries; 3530 int i; 3531 3532 /* if success we're done, we're only here if something bad happened */ 3533 if (!ctlq_msg->cookie.mbx.chnl_retval) 3534 return 0; 3535 3536 /* make sure at least struct is there */ 3537 if (xn->reply_sz < sizeof(*ma_list)) 3538 goto invalid_payload; 3539 3540 ma_list = ctlq_msg->ctx.indirect.payload->va; 3541 mac_addr = ma_list->mac_addr_list; 3542 num_entries = le16_to_cpu(ma_list->num_mac_addr); 3543 /* we should have received a buffer at least this big */ 3544 if (xn->reply_sz < struct_size(ma_list, mac_addr_list, num_entries)) 3545 goto invalid_payload; 3546 3547 vport = idpf_vid_to_vport(adapter, le32_to_cpu(ma_list->vport_id)); 3548 if (!vport) 3549 goto invalid_payload; 3550 3551 vport_config = adapter->vport_config[le32_to_cpu(ma_list->vport_id)]; 3552 ma_list_head = &vport_config->user_config.mac_filter_list; 3553 3554 /* We can't do much to reconcile bad filters at this point, however we 3555 * should at least remove them from our list one way or the other so we 3556 * have some idea what good filters we have. 3557 */ 3558 spin_lock_bh(&vport_config->mac_filter_list_lock); 3559 list_for_each_entry_safe(f, tmp, ma_list_head, list) 3560 for (i = 0; i < num_entries; i++) 3561 if (ether_addr_equal(mac_addr[i].addr, f->macaddr)) 3562 list_del(&f->list); 3563 spin_unlock_bh(&vport_config->mac_filter_list_lock); 3564 dev_err_ratelimited(&adapter->pdev->dev, "Received error sending MAC filter request (op %d)\n", 3565 xn->vc_op); 3566 3567 return 0; 3568 3569 invalid_payload: 3570 dev_err_ratelimited(&adapter->pdev->dev, "Received invalid MAC filter payload (op %d) (len %zd)\n", 3571 xn->vc_op, xn->reply_sz); 3572 3573 return -EINVAL; 3574 } 3575 3576 /** 3577 * idpf_add_del_mac_filters - Add/del mac filters 3578 * @vport: Virtual port data structure 3579 * @np: Netdev private structure 3580 * @add: Add or delete flag 3581 * @async: Don't wait for return message 3582 * 3583 * Returns 0 on success, error on failure. 3584 **/ 3585 int idpf_add_del_mac_filters(struct idpf_vport *vport, 3586 struct idpf_netdev_priv *np, 3587 bool add, bool async) 3588 { 3589 struct virtchnl2_mac_addr_list *ma_list __free(kfree) = NULL; 3590 struct virtchnl2_mac_addr *mac_addr __free(kfree) = NULL; 3591 struct idpf_adapter *adapter = np->adapter; 3592 struct idpf_vc_xn_params xn_params = {}; 3593 struct idpf_vport_config *vport_config; 3594 u32 num_msgs, total_filters = 0; 3595 struct idpf_mac_filter *f; 3596 ssize_t reply_sz; 3597 int i = 0, k; 3598 3599 xn_params.vc_op = add ? VIRTCHNL2_OP_ADD_MAC_ADDR : 3600 VIRTCHNL2_OP_DEL_MAC_ADDR; 3601 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 3602 xn_params.async = async; 3603 xn_params.async_handler = idpf_mac_filter_async_handler; 3604 3605 vport_config = adapter->vport_config[np->vport_idx]; 3606 spin_lock_bh(&vport_config->mac_filter_list_lock); 3607 3608 /* Find the number of newly added filters */ 3609 list_for_each_entry(f, &vport_config->user_config.mac_filter_list, 3610 list) { 3611 if (add && f->add) 3612 total_filters++; 3613 else if (!add && f->remove) 3614 total_filters++; 3615 } 3616 3617 if (!total_filters) { 3618 spin_unlock_bh(&vport_config->mac_filter_list_lock); 3619 3620 return 0; 3621 } 3622 3623 /* Fill all the new filters into virtchannel message */ 3624 mac_addr = kcalloc(total_filters, sizeof(struct virtchnl2_mac_addr), 3625 GFP_ATOMIC); 3626 if (!mac_addr) { 3627 spin_unlock_bh(&vport_config->mac_filter_list_lock); 3628 3629 return -ENOMEM; 3630 } 3631 3632 list_for_each_entry(f, &vport_config->user_config.mac_filter_list, 3633 list) { 3634 if (add && f->add) { 3635 ether_addr_copy(mac_addr[i].addr, f->macaddr); 3636 i++; 3637 f->add = false; 3638 if (i == total_filters) 3639 break; 3640 } 3641 if (!add && f->remove) { 3642 ether_addr_copy(mac_addr[i].addr, f->macaddr); 3643 i++; 3644 f->remove = false; 3645 if (i == total_filters) 3646 break; 3647 } 3648 } 3649 3650 spin_unlock_bh(&vport_config->mac_filter_list_lock); 3651 3652 /* Chunk up the filters into multiple messages to avoid 3653 * sending a control queue message buffer that is too large 3654 */ 3655 num_msgs = DIV_ROUND_UP(total_filters, IDPF_NUM_FILTERS_PER_MSG); 3656 3657 for (i = 0, k = 0; i < num_msgs; i++) { 3658 u32 entries_size, buf_size, num_entries; 3659 3660 num_entries = min_t(u32, total_filters, 3661 IDPF_NUM_FILTERS_PER_MSG); 3662 entries_size = sizeof(struct virtchnl2_mac_addr) * num_entries; 3663 buf_size = struct_size(ma_list, mac_addr_list, num_entries); 3664 3665 if (!ma_list || num_entries != IDPF_NUM_FILTERS_PER_MSG) { 3666 kfree(ma_list); 3667 ma_list = kzalloc(buf_size, GFP_ATOMIC); 3668 if (!ma_list) 3669 return -ENOMEM; 3670 } else { 3671 memset(ma_list, 0, buf_size); 3672 } 3673 3674 ma_list->vport_id = cpu_to_le32(np->vport_id); 3675 ma_list->num_mac_addr = cpu_to_le16(num_entries); 3676 memcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size); 3677 3678 xn_params.send_buf.iov_base = ma_list; 3679 xn_params.send_buf.iov_len = buf_size; 3680 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 3681 if (reply_sz < 0) 3682 return reply_sz; 3683 3684 k += num_entries; 3685 total_filters -= num_entries; 3686 } 3687 3688 return 0; 3689 } 3690 3691 /** 3692 * idpf_set_promiscuous - set promiscuous and send message to mailbox 3693 * @adapter: Driver specific private structure 3694 * @config_data: Vport specific config data 3695 * @vport_id: Vport identifier 3696 * 3697 * Request to enable promiscuous mode for the vport. Message is sent 3698 * asynchronously and won't wait for response. Returns 0 on success, negative 3699 * on failure; 3700 */ 3701 int idpf_set_promiscuous(struct idpf_adapter *adapter, 3702 struct idpf_vport_user_config_data *config_data, 3703 u32 vport_id) 3704 { 3705 struct idpf_vc_xn_params xn_params = {}; 3706 struct virtchnl2_promisc_info vpi; 3707 ssize_t reply_sz; 3708 u16 flags = 0; 3709 3710 if (test_bit(__IDPF_PROMISC_UC, config_data->user_flags)) 3711 flags |= VIRTCHNL2_UNICAST_PROMISC; 3712 if (test_bit(__IDPF_PROMISC_MC, config_data->user_flags)) 3713 flags |= VIRTCHNL2_MULTICAST_PROMISC; 3714 3715 vpi.vport_id = cpu_to_le32(vport_id); 3716 vpi.flags = cpu_to_le16(flags); 3717 3718 xn_params.vc_op = VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE; 3719 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; 3720 xn_params.send_buf.iov_base = &vpi; 3721 xn_params.send_buf.iov_len = sizeof(vpi); 3722 /* setting promiscuous is only ever done asynchronously */ 3723 xn_params.async = true; 3724 reply_sz = idpf_vc_xn_exec(adapter, &xn_params); 3725 3726 return reply_sz < 0 ? reply_sz : 0; 3727 } 3728