1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2023 Intel Corporation */ 3 4 #include "idpf.h" 5 6 /** 7 * idpf_recv_event_msg - Receive virtchnl event message 8 * @vport: virtual port structure 9 * @ctlq_msg: message to copy from 10 * 11 * Receive virtchnl event message 12 */ 13 static void idpf_recv_event_msg(struct idpf_vport *vport, 14 struct idpf_ctlq_msg *ctlq_msg) 15 { 16 struct idpf_netdev_priv *np = netdev_priv(vport->netdev); 17 struct virtchnl2_event *v2e; 18 bool link_status; 19 u32 event; 20 21 v2e = (struct virtchnl2_event *)ctlq_msg->ctx.indirect.payload->va; 22 event = le32_to_cpu(v2e->event); 23 24 switch (event) { 25 case VIRTCHNL2_EVENT_LINK_CHANGE: 26 vport->link_speed_mbps = le32_to_cpu(v2e->link_speed); 27 link_status = v2e->link_status; 28 29 if (vport->link_up == link_status) 30 break; 31 32 vport->link_up = link_status; 33 if (np->state == __IDPF_VPORT_UP) { 34 if (vport->link_up) { 35 netif_carrier_on(vport->netdev); 36 netif_tx_start_all_queues(vport->netdev); 37 } else { 38 netif_tx_stop_all_queues(vport->netdev); 39 netif_carrier_off(vport->netdev); 40 } 41 } 42 break; 43 default: 44 dev_err(&vport->adapter->pdev->dev, 45 "Unknown event %d from PF\n", event); 46 break; 47 } 48 } 49 50 /** 51 * idpf_mb_clean - Reclaim the send mailbox queue entries 52 * @adapter: Driver specific private structure 53 * 54 * Reclaim the send mailbox queue entries to be used to send further messages 55 * 56 * Returns 0 on success, negative on failure 57 */ 58 static int idpf_mb_clean(struct idpf_adapter *adapter) 59 { 60 u16 i, num_q_msg = IDPF_DFLT_MBX_Q_LEN; 61 struct idpf_ctlq_msg **q_msg; 62 struct idpf_dma_mem *dma_mem; 63 int err; 64 65 q_msg = kcalloc(num_q_msg, sizeof(struct idpf_ctlq_msg *), GFP_ATOMIC); 66 if (!q_msg) 67 return -ENOMEM; 68 69 err = idpf_ctlq_clean_sq(adapter->hw.asq, &num_q_msg, q_msg); 70 if (err) 71 goto err_kfree; 72 73 for (i = 0; i < num_q_msg; i++) { 74 if (!q_msg[i]) 75 continue; 76 dma_mem = q_msg[i]->ctx.indirect.payload; 77 if (dma_mem) 78 dma_free_coherent(&adapter->pdev->dev, dma_mem->size, 79 dma_mem->va, dma_mem->pa); 80 kfree(q_msg[i]); 81 kfree(dma_mem); 82 } 83 84 err_kfree: 85 kfree(q_msg); 86 87 return err; 88 } 89 90 /** 91 * idpf_send_mb_msg - Send message over mailbox 92 * @adapter: Driver specific private structure 93 * @op: virtchnl opcode 94 * @msg_size: size of the payload 95 * @msg: pointer to buffer holding the payload 96 * 97 * Will prepare the control queue message and initiates the send api 98 * 99 * Returns 0 on success, negative on failure 100 */ 101 int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op, 102 u16 msg_size, u8 *msg) 103 { 104 struct idpf_ctlq_msg *ctlq_msg; 105 struct idpf_dma_mem *dma_mem; 106 int err; 107 108 /* If we are here and a reset is detected nothing much can be 109 * done. This thread should silently abort and expected to 110 * be corrected with a new run either by user or driver 111 * flows after reset 112 */ 113 if (idpf_is_reset_detected(adapter)) 114 return 0; 115 116 err = idpf_mb_clean(adapter); 117 if (err) 118 return err; 119 120 ctlq_msg = kzalloc(sizeof(*ctlq_msg), GFP_ATOMIC); 121 if (!ctlq_msg) 122 return -ENOMEM; 123 124 dma_mem = kzalloc(sizeof(*dma_mem), GFP_ATOMIC); 125 if (!dma_mem) { 126 err = -ENOMEM; 127 goto dma_mem_error; 128 } 129 130 ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_cp; 131 ctlq_msg->func_id = 0; 132 ctlq_msg->data_len = msg_size; 133 ctlq_msg->cookie.mbx.chnl_opcode = op; 134 ctlq_msg->cookie.mbx.chnl_retval = 0; 135 dma_mem->size = IDPF_CTLQ_MAX_BUF_LEN; 136 dma_mem->va = dma_alloc_coherent(&adapter->pdev->dev, dma_mem->size, 137 &dma_mem->pa, GFP_ATOMIC); 138 if (!dma_mem->va) { 139 err = -ENOMEM; 140 goto dma_alloc_error; 141 } 142 memcpy(dma_mem->va, msg, msg_size); 143 ctlq_msg->ctx.indirect.payload = dma_mem; 144 145 err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg); 146 if (err) 147 goto send_error; 148 149 return 0; 150 151 send_error: 152 dma_free_coherent(&adapter->pdev->dev, dma_mem->size, dma_mem->va, 153 dma_mem->pa); 154 dma_alloc_error: 155 kfree(dma_mem); 156 dma_mem_error: 157 kfree(ctlq_msg); 158 159 return err; 160 } 161 162 /** 163 * idpf_find_vport - Find vport pointer from control queue message 164 * @adapter: driver specific private structure 165 * @vport: address of vport pointer to copy the vport from adapters vport list 166 * @ctlq_msg: control queue message 167 * 168 * Return 0 on success, error value on failure. Also this function does check 169 * for the opcodes which expect to receive payload and return error value if 170 * it is not the case. 171 */ 172 static int idpf_find_vport(struct idpf_adapter *adapter, 173 struct idpf_vport **vport, 174 struct idpf_ctlq_msg *ctlq_msg) 175 { 176 bool no_op = false, vid_found = false; 177 int i, err = 0; 178 char *vc_msg; 179 u32 v_id; 180 181 vc_msg = kcalloc(IDPF_CTLQ_MAX_BUF_LEN, sizeof(char), GFP_KERNEL); 182 if (!vc_msg) 183 return -ENOMEM; 184 185 if (ctlq_msg->data_len) { 186 size_t payload_size = ctlq_msg->ctx.indirect.payload->size; 187 188 if (!payload_size) { 189 dev_err(&adapter->pdev->dev, "Failed to receive payload buffer\n"); 190 kfree(vc_msg); 191 192 return -EINVAL; 193 } 194 195 memcpy(vc_msg, ctlq_msg->ctx.indirect.payload->va, 196 min_t(size_t, payload_size, IDPF_CTLQ_MAX_BUF_LEN)); 197 } 198 199 switch (ctlq_msg->cookie.mbx.chnl_opcode) { 200 case VIRTCHNL2_OP_VERSION: 201 case VIRTCHNL2_OP_GET_CAPS: 202 case VIRTCHNL2_OP_CREATE_VPORT: 203 case VIRTCHNL2_OP_SET_SRIOV_VFS: 204 case VIRTCHNL2_OP_ALLOC_VECTORS: 205 case VIRTCHNL2_OP_DEALLOC_VECTORS: 206 case VIRTCHNL2_OP_GET_PTYPE_INFO: 207 goto free_vc_msg; 208 case VIRTCHNL2_OP_ENABLE_VPORT: 209 case VIRTCHNL2_OP_DISABLE_VPORT: 210 case VIRTCHNL2_OP_DESTROY_VPORT: 211 v_id = le32_to_cpu(((struct virtchnl2_vport *)vc_msg)->vport_id); 212 break; 213 case VIRTCHNL2_OP_CONFIG_TX_QUEUES: 214 v_id = le32_to_cpu(((struct virtchnl2_config_tx_queues *)vc_msg)->vport_id); 215 break; 216 case VIRTCHNL2_OP_CONFIG_RX_QUEUES: 217 v_id = le32_to_cpu(((struct virtchnl2_config_rx_queues *)vc_msg)->vport_id); 218 break; 219 case VIRTCHNL2_OP_ENABLE_QUEUES: 220 case VIRTCHNL2_OP_DISABLE_QUEUES: 221 case VIRTCHNL2_OP_DEL_QUEUES: 222 v_id = le32_to_cpu(((struct virtchnl2_del_ena_dis_queues *)vc_msg)->vport_id); 223 break; 224 case VIRTCHNL2_OP_ADD_QUEUES: 225 v_id = le32_to_cpu(((struct virtchnl2_add_queues *)vc_msg)->vport_id); 226 break; 227 case VIRTCHNL2_OP_MAP_QUEUE_VECTOR: 228 case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR: 229 v_id = le32_to_cpu(((struct virtchnl2_queue_vector_maps *)vc_msg)->vport_id); 230 break; 231 case VIRTCHNL2_OP_GET_STATS: 232 v_id = le32_to_cpu(((struct virtchnl2_vport_stats *)vc_msg)->vport_id); 233 break; 234 case VIRTCHNL2_OP_GET_RSS_LUT: 235 case VIRTCHNL2_OP_SET_RSS_LUT: 236 v_id = le32_to_cpu(((struct virtchnl2_rss_lut *)vc_msg)->vport_id); 237 break; 238 case VIRTCHNL2_OP_GET_RSS_KEY: 239 case VIRTCHNL2_OP_SET_RSS_KEY: 240 v_id = le32_to_cpu(((struct virtchnl2_rss_key *)vc_msg)->vport_id); 241 break; 242 case VIRTCHNL2_OP_EVENT: 243 v_id = le32_to_cpu(((struct virtchnl2_event *)vc_msg)->vport_id); 244 break; 245 case VIRTCHNL2_OP_LOOPBACK: 246 v_id = le32_to_cpu(((struct virtchnl2_loopback *)vc_msg)->vport_id); 247 break; 248 case VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE: 249 v_id = le32_to_cpu(((struct virtchnl2_promisc_info *)vc_msg)->vport_id); 250 break; 251 case VIRTCHNL2_OP_ADD_MAC_ADDR: 252 case VIRTCHNL2_OP_DEL_MAC_ADDR: 253 v_id = le32_to_cpu(((struct virtchnl2_mac_addr_list *)vc_msg)->vport_id); 254 break; 255 default: 256 no_op = true; 257 break; 258 } 259 260 if (no_op) 261 goto free_vc_msg; 262 263 for (i = 0; i < idpf_get_max_vports(adapter); i++) { 264 if (adapter->vport_ids[i] == v_id) { 265 vid_found = true; 266 break; 267 } 268 } 269 270 if (vid_found) 271 *vport = adapter->vports[i]; 272 else 273 err = -EINVAL; 274 275 free_vc_msg: 276 kfree(vc_msg); 277 278 return err; 279 } 280 281 /** 282 * idpf_copy_data_to_vc_buf - Copy the virtchnl response data into the buffer. 283 * @adapter: driver specific private structure 284 * @vport: virtual port structure 285 * @ctlq_msg: msg to copy from 286 * @err_enum: err bit to set on error 287 * 288 * Copies the payload from ctlq_msg into virtchnl buffer. Returns 0 on success, 289 * negative on failure. 290 */ 291 static int idpf_copy_data_to_vc_buf(struct idpf_adapter *adapter, 292 struct idpf_vport *vport, 293 struct idpf_ctlq_msg *ctlq_msg, 294 enum idpf_vport_vc_state err_enum) 295 { 296 if (ctlq_msg->cookie.mbx.chnl_retval) { 297 if (vport) 298 set_bit(err_enum, vport->vc_state); 299 else 300 set_bit(err_enum, adapter->vc_state); 301 302 return -EINVAL; 303 } 304 305 if (vport) 306 memcpy(vport->vc_msg, ctlq_msg->ctx.indirect.payload->va, 307 min_t(int, ctlq_msg->ctx.indirect.payload->size, 308 IDPF_CTLQ_MAX_BUF_LEN)); 309 else 310 memcpy(adapter->vc_msg, ctlq_msg->ctx.indirect.payload->va, 311 min_t(int, ctlq_msg->ctx.indirect.payload->size, 312 IDPF_CTLQ_MAX_BUF_LEN)); 313 314 return 0; 315 } 316 317 /** 318 * idpf_recv_vchnl_op - helper function with common logic when handling the 319 * reception of VIRTCHNL OPs. 320 * @adapter: driver specific private structure 321 * @vport: virtual port structure 322 * @ctlq_msg: msg to copy from 323 * @state: state bit used on timeout check 324 * @err_state: err bit to set on error 325 */ 326 static void idpf_recv_vchnl_op(struct idpf_adapter *adapter, 327 struct idpf_vport *vport, 328 struct idpf_ctlq_msg *ctlq_msg, 329 enum idpf_vport_vc_state state, 330 enum idpf_vport_vc_state err_state) 331 { 332 wait_queue_head_t *vchnl_wq; 333 int err; 334 335 if (vport) 336 vchnl_wq = &vport->vchnl_wq; 337 else 338 vchnl_wq = &adapter->vchnl_wq; 339 340 err = idpf_copy_data_to_vc_buf(adapter, vport, ctlq_msg, err_state); 341 if (wq_has_sleeper(vchnl_wq)) { 342 if (vport) 343 set_bit(state, vport->vc_state); 344 else 345 set_bit(state, adapter->vc_state); 346 347 wake_up(vchnl_wq); 348 } else { 349 if (!err) { 350 dev_warn(&adapter->pdev->dev, "opcode %d received without waiting thread\n", 351 ctlq_msg->cookie.mbx.chnl_opcode); 352 } else { 353 /* Clear the errors since there is no sleeper to pass 354 * them on 355 */ 356 if (vport) 357 clear_bit(err_state, vport->vc_state); 358 else 359 clear_bit(err_state, adapter->vc_state); 360 } 361 } 362 } 363 364 /** 365 * idpf_recv_mb_msg - Receive message over mailbox 366 * @adapter: Driver specific private structure 367 * @op: virtchannel operation code 368 * @msg: Received message holding buffer 369 * @msg_size: message size 370 * 371 * Will receive control queue message and posts the receive buffer. Returns 0 372 * on success and negative on failure. 373 */ 374 int idpf_recv_mb_msg(struct idpf_adapter *adapter, u32 op, 375 void *msg, int msg_size) 376 { 377 struct idpf_vport *vport = NULL; 378 struct idpf_ctlq_msg ctlq_msg; 379 struct idpf_dma_mem *dma_mem; 380 bool work_done = false; 381 int num_retry = 2000; 382 u16 num_q_msg; 383 int err; 384 385 while (1) { 386 struct idpf_vport_config *vport_config; 387 int payload_size = 0; 388 389 /* Try to get one message */ 390 num_q_msg = 1; 391 dma_mem = NULL; 392 err = idpf_ctlq_recv(adapter->hw.arq, &num_q_msg, &ctlq_msg); 393 /* If no message then decide if we have to retry based on 394 * opcode 395 */ 396 if (err || !num_q_msg) { 397 /* Increasing num_retry to consider the delayed 398 * responses because of large number of VF's mailbox 399 * messages. If the mailbox message is received from 400 * the other side, we come out of the sleep cycle 401 * immediately else we wait for more time. 402 */ 403 if (!op || !num_retry--) 404 break; 405 if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) { 406 err = -EIO; 407 break; 408 } 409 msleep(20); 410 continue; 411 } 412 413 /* If we are here a message is received. Check if we are looking 414 * for a specific message based on opcode. If it is different 415 * ignore and post buffers 416 */ 417 if (op && ctlq_msg.cookie.mbx.chnl_opcode != op) 418 goto post_buffs; 419 420 err = idpf_find_vport(adapter, &vport, &ctlq_msg); 421 if (err) 422 goto post_buffs; 423 424 if (ctlq_msg.data_len) 425 payload_size = ctlq_msg.ctx.indirect.payload->size; 426 427 /* All conditions are met. Either a message requested is 428 * received or we received a message to be processed 429 */ 430 switch (ctlq_msg.cookie.mbx.chnl_opcode) { 431 case VIRTCHNL2_OP_VERSION: 432 case VIRTCHNL2_OP_GET_CAPS: 433 if (ctlq_msg.cookie.mbx.chnl_retval) { 434 dev_err(&adapter->pdev->dev, "Failure initializing, vc op: %u retval: %u\n", 435 ctlq_msg.cookie.mbx.chnl_opcode, 436 ctlq_msg.cookie.mbx.chnl_retval); 437 err = -EBADMSG; 438 } else if (msg) { 439 memcpy(msg, ctlq_msg.ctx.indirect.payload->va, 440 min_t(int, payload_size, msg_size)); 441 } 442 work_done = true; 443 break; 444 case VIRTCHNL2_OP_CREATE_VPORT: 445 idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg, 446 IDPF_VC_CREATE_VPORT, 447 IDPF_VC_CREATE_VPORT_ERR); 448 break; 449 case VIRTCHNL2_OP_ENABLE_VPORT: 450 idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, 451 IDPF_VC_ENA_VPORT, 452 IDPF_VC_ENA_VPORT_ERR); 453 break; 454 case VIRTCHNL2_OP_DISABLE_VPORT: 455 idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, 456 IDPF_VC_DIS_VPORT, 457 IDPF_VC_DIS_VPORT_ERR); 458 break; 459 case VIRTCHNL2_OP_DESTROY_VPORT: 460 idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, 461 IDPF_VC_DESTROY_VPORT, 462 IDPF_VC_DESTROY_VPORT_ERR); 463 break; 464 case VIRTCHNL2_OP_CONFIG_TX_QUEUES: 465 idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, 466 IDPF_VC_CONFIG_TXQ, 467 IDPF_VC_CONFIG_TXQ_ERR); 468 break; 469 case VIRTCHNL2_OP_CONFIG_RX_QUEUES: 470 idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, 471 IDPF_VC_CONFIG_RXQ, 472 IDPF_VC_CONFIG_RXQ_ERR); 473 break; 474 case VIRTCHNL2_OP_ENABLE_QUEUES: 475 idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, 476 IDPF_VC_ENA_QUEUES, 477 IDPF_VC_ENA_QUEUES_ERR); 478 break; 479 case VIRTCHNL2_OP_DISABLE_QUEUES: 480 idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, 481 IDPF_VC_DIS_QUEUES, 482 IDPF_VC_DIS_QUEUES_ERR); 483 break; 484 case VIRTCHNL2_OP_ADD_QUEUES: 485 idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, 486 IDPF_VC_ADD_QUEUES, 487 IDPF_VC_ADD_QUEUES_ERR); 488 break; 489 case VIRTCHNL2_OP_DEL_QUEUES: 490 idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, 491 IDPF_VC_DEL_QUEUES, 492 IDPF_VC_DEL_QUEUES_ERR); 493 break; 494 case VIRTCHNL2_OP_MAP_QUEUE_VECTOR: 495 idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, 496 IDPF_VC_MAP_IRQ, 497 IDPF_VC_MAP_IRQ_ERR); 498 break; 499 case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR: 500 idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, 501 IDPF_VC_UNMAP_IRQ, 502 IDPF_VC_UNMAP_IRQ_ERR); 503 break; 504 case VIRTCHNL2_OP_GET_STATS: 505 idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, 506 IDPF_VC_GET_STATS, 507 IDPF_VC_GET_STATS_ERR); 508 break; 509 case VIRTCHNL2_OP_GET_RSS_LUT: 510 idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, 511 IDPF_VC_GET_RSS_LUT, 512 IDPF_VC_GET_RSS_LUT_ERR); 513 break; 514 case VIRTCHNL2_OP_SET_RSS_LUT: 515 idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, 516 IDPF_VC_SET_RSS_LUT, 517 IDPF_VC_SET_RSS_LUT_ERR); 518 break; 519 case VIRTCHNL2_OP_GET_RSS_KEY: 520 idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, 521 IDPF_VC_GET_RSS_KEY, 522 IDPF_VC_GET_RSS_KEY_ERR); 523 break; 524 case VIRTCHNL2_OP_SET_RSS_KEY: 525 idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, 526 IDPF_VC_SET_RSS_KEY, 527 IDPF_VC_SET_RSS_KEY_ERR); 528 break; 529 case VIRTCHNL2_OP_SET_SRIOV_VFS: 530 idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg, 531 IDPF_VC_SET_SRIOV_VFS, 532 IDPF_VC_SET_SRIOV_VFS_ERR); 533 break; 534 case VIRTCHNL2_OP_ALLOC_VECTORS: 535 idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg, 536 IDPF_VC_ALLOC_VECTORS, 537 IDPF_VC_ALLOC_VECTORS_ERR); 538 break; 539 case VIRTCHNL2_OP_DEALLOC_VECTORS: 540 idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg, 541 IDPF_VC_DEALLOC_VECTORS, 542 IDPF_VC_DEALLOC_VECTORS_ERR); 543 break; 544 case VIRTCHNL2_OP_GET_PTYPE_INFO: 545 idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg, 546 IDPF_VC_GET_PTYPE_INFO, 547 IDPF_VC_GET_PTYPE_INFO_ERR); 548 break; 549 case VIRTCHNL2_OP_LOOPBACK: 550 idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, 551 IDPF_VC_LOOPBACK_STATE, 552 IDPF_VC_LOOPBACK_STATE_ERR); 553 break; 554 case VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE: 555 /* This message can only be sent asynchronously. As 556 * such we'll have lost the context in which it was 557 * called and thus can only really report if it looks 558 * like an error occurred. Don't bother setting ERR bit 559 * or waking chnl_wq since no work queue will be waiting 560 * to read the message. 561 */ 562 if (ctlq_msg.cookie.mbx.chnl_retval) { 563 dev_err(&adapter->pdev->dev, "Failed to set promiscuous mode: %d\n", 564 ctlq_msg.cookie.mbx.chnl_retval); 565 } 566 break; 567 case VIRTCHNL2_OP_ADD_MAC_ADDR: 568 vport_config = adapter->vport_config[vport->idx]; 569 if (test_and_clear_bit(IDPF_VPORT_ADD_MAC_REQ, 570 vport_config->flags)) { 571 /* Message was sent asynchronously. We don't 572 * normally print errors here, instead 573 * prefer to handle errors in the function 574 * calling wait_for_event. However, if 575 * asynchronous, the context in which the 576 * message was sent is lost. We can't really do 577 * anything about at it this point, but we 578 * should at a minimum indicate that it looks 579 * like something went wrong. Also don't bother 580 * setting ERR bit or waking vchnl_wq since no 581 * one will be waiting to read the async 582 * message. 583 */ 584 if (ctlq_msg.cookie.mbx.chnl_retval) 585 dev_err(&adapter->pdev->dev, "Failed to add MAC address: %d\n", 586 ctlq_msg.cookie.mbx.chnl_retval); 587 break; 588 } 589 idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, 590 IDPF_VC_ADD_MAC_ADDR, 591 IDPF_VC_ADD_MAC_ADDR_ERR); 592 break; 593 case VIRTCHNL2_OP_DEL_MAC_ADDR: 594 vport_config = adapter->vport_config[vport->idx]; 595 if (test_and_clear_bit(IDPF_VPORT_DEL_MAC_REQ, 596 vport_config->flags)) { 597 /* Message was sent asynchronously like the 598 * VIRTCHNL2_OP_ADD_MAC_ADDR 599 */ 600 if (ctlq_msg.cookie.mbx.chnl_retval) 601 dev_err(&adapter->pdev->dev, "Failed to delete MAC address: %d\n", 602 ctlq_msg.cookie.mbx.chnl_retval); 603 break; 604 } 605 idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, 606 IDPF_VC_DEL_MAC_ADDR, 607 IDPF_VC_DEL_MAC_ADDR_ERR); 608 break; 609 case VIRTCHNL2_OP_EVENT: 610 idpf_recv_event_msg(vport, &ctlq_msg); 611 break; 612 default: 613 dev_warn(&adapter->pdev->dev, 614 "Unhandled virtchnl response %d\n", 615 ctlq_msg.cookie.mbx.chnl_opcode); 616 break; 617 } 618 619 post_buffs: 620 if (ctlq_msg.data_len) 621 dma_mem = ctlq_msg.ctx.indirect.payload; 622 else 623 num_q_msg = 0; 624 625 err = idpf_ctlq_post_rx_buffs(&adapter->hw, adapter->hw.arq, 626 &num_q_msg, &dma_mem); 627 /* If post failed clear the only buffer we supplied */ 628 if (err && dma_mem) 629 dma_free_coherent(&adapter->pdev->dev, dma_mem->size, 630 dma_mem->va, dma_mem->pa); 631 632 /* Applies only if we are looking for a specific opcode */ 633 if (work_done) 634 break; 635 } 636 637 return err; 638 } 639 640 /** 641 * __idpf_wait_for_event - wrapper function for wait on virtchannel response 642 * @adapter: Driver private data structure 643 * @vport: virtual port structure 644 * @state: check on state upon timeout 645 * @err_check: check if this specific error bit is set 646 * @timeout: Max time to wait 647 * 648 * Checks if state is set upon expiry of timeout. Returns 0 on success, 649 * negative on failure. 650 */ 651 static int __idpf_wait_for_event(struct idpf_adapter *adapter, 652 struct idpf_vport *vport, 653 enum idpf_vport_vc_state state, 654 enum idpf_vport_vc_state err_check, 655 int timeout) 656 { 657 int time_to_wait, num_waits; 658 wait_queue_head_t *vchnl_wq; 659 unsigned long *vc_state; 660 661 time_to_wait = ((timeout <= IDPF_MAX_WAIT) ? timeout : IDPF_MAX_WAIT); 662 num_waits = ((timeout <= IDPF_MAX_WAIT) ? 1 : timeout / IDPF_MAX_WAIT); 663 664 if (vport) { 665 vchnl_wq = &vport->vchnl_wq; 666 vc_state = vport->vc_state; 667 } else { 668 vchnl_wq = &adapter->vchnl_wq; 669 vc_state = adapter->vc_state; 670 } 671 672 while (num_waits) { 673 int event; 674 675 /* If we are here and a reset is detected do not wait but 676 * return. Reset timing is out of drivers control. So 677 * while we are cleaning resources as part of reset if the 678 * underlying HW mailbox is gone, wait on mailbox messages 679 * is not meaningful 680 */ 681 if (idpf_is_reset_detected(adapter)) 682 return 0; 683 684 event = wait_event_timeout(*vchnl_wq, 685 test_and_clear_bit(state, vc_state), 686 msecs_to_jiffies(time_to_wait)); 687 if (event) { 688 if (test_and_clear_bit(err_check, vc_state)) { 689 dev_err(&adapter->pdev->dev, "VC response error %s\n", 690 idpf_vport_vc_state_str[err_check]); 691 692 return -EINVAL; 693 } 694 695 return 0; 696 } 697 num_waits--; 698 } 699 700 /* Timeout occurred */ 701 dev_err(&adapter->pdev->dev, "VC timeout, state = %s\n", 702 idpf_vport_vc_state_str[state]); 703 704 return -ETIMEDOUT; 705 } 706 707 /** 708 * idpf_min_wait_for_event - wait for virtchannel response 709 * @adapter: Driver private data structure 710 * @vport: virtual port structure 711 * @state: check on state upon timeout 712 * @err_check: check if this specific error bit is set 713 * 714 * Returns 0 on success, negative on failure. 715 */ 716 static int idpf_min_wait_for_event(struct idpf_adapter *adapter, 717 struct idpf_vport *vport, 718 enum idpf_vport_vc_state state, 719 enum idpf_vport_vc_state err_check) 720 { 721 return __idpf_wait_for_event(adapter, vport, state, err_check, 722 IDPF_WAIT_FOR_EVENT_TIMEO_MIN); 723 } 724 725 /** 726 * idpf_wait_for_event - wait for virtchannel response 727 * @adapter: Driver private data structure 728 * @vport: virtual port structure 729 * @state: check on state upon timeout after 500ms 730 * @err_check: check if this specific error bit is set 731 * 732 * Returns 0 on success, negative on failure. 733 */ 734 static int idpf_wait_for_event(struct idpf_adapter *adapter, 735 struct idpf_vport *vport, 736 enum idpf_vport_vc_state state, 737 enum idpf_vport_vc_state err_check) 738 { 739 /* Increasing the timeout in __IDPF_INIT_SW flow to consider large 740 * number of VF's mailbox message responses. When a message is received 741 * on mailbox, this thread is woken up by the idpf_recv_mb_msg before 742 * the timeout expires. Only in the error case i.e. if no message is 743 * received on mailbox, we wait for the complete timeout which is 744 * less likely to happen. 745 */ 746 return __idpf_wait_for_event(adapter, vport, state, err_check, 747 IDPF_WAIT_FOR_EVENT_TIMEO); 748 } 749 750 /** 751 * idpf_wait_for_marker_event - wait for software marker response 752 * @vport: virtual port data structure 753 * 754 * Returns 0 success, negative on failure. 755 **/ 756 static int idpf_wait_for_marker_event(struct idpf_vport *vport) 757 { 758 int event; 759 int i; 760 761 for (i = 0; i < vport->num_txq; i++) 762 set_bit(__IDPF_Q_SW_MARKER, vport->txqs[i]->flags); 763 764 event = wait_event_timeout(vport->sw_marker_wq, 765 test_and_clear_bit(IDPF_VPORT_SW_MARKER, 766 vport->flags), 767 msecs_to_jiffies(500)); 768 769 for (i = 0; i < vport->num_txq; i++) 770 clear_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags); 771 772 if (event) 773 return 0; 774 775 dev_warn(&vport->adapter->pdev->dev, "Failed to receive marker packets\n"); 776 777 return -ETIMEDOUT; 778 } 779 780 /** 781 * idpf_send_ver_msg - send virtchnl version message 782 * @adapter: Driver specific private structure 783 * 784 * Send virtchnl version message. Returns 0 on success, negative on failure. 785 */ 786 static int idpf_send_ver_msg(struct idpf_adapter *adapter) 787 { 788 struct virtchnl2_version_info vvi; 789 790 if (adapter->virt_ver_maj) { 791 vvi.major = cpu_to_le32(adapter->virt_ver_maj); 792 vvi.minor = cpu_to_le32(adapter->virt_ver_min); 793 } else { 794 vvi.major = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MAJOR); 795 vvi.minor = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MINOR); 796 } 797 798 return idpf_send_mb_msg(adapter, VIRTCHNL2_OP_VERSION, sizeof(vvi), 799 (u8 *)&vvi); 800 } 801 802 /** 803 * idpf_recv_ver_msg - Receive virtchnl version message 804 * @adapter: Driver specific private structure 805 * 806 * Receive virtchnl version message. Returns 0 on success, -EAGAIN if we need 807 * to send version message again, otherwise negative on failure. 808 */ 809 static int idpf_recv_ver_msg(struct idpf_adapter *adapter) 810 { 811 struct virtchnl2_version_info vvi; 812 u32 major, minor; 813 int err; 814 815 err = idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_VERSION, &vvi, 816 sizeof(vvi)); 817 if (err) 818 return err; 819 820 major = le32_to_cpu(vvi.major); 821 minor = le32_to_cpu(vvi.minor); 822 823 if (major > IDPF_VIRTCHNL_VERSION_MAJOR) { 824 dev_warn(&adapter->pdev->dev, 825 "Virtchnl major version (%d) greater than supported\n", 826 major); 827 828 return -EINVAL; 829 } 830 831 if (major == IDPF_VIRTCHNL_VERSION_MAJOR && 832 minor > IDPF_VIRTCHNL_VERSION_MINOR) 833 dev_warn(&adapter->pdev->dev, 834 "Virtchnl minor version (%d) didn't match\n", minor); 835 836 /* If we have a mismatch, resend version to update receiver on what 837 * version we will use. 838 */ 839 if (!adapter->virt_ver_maj && 840 major != IDPF_VIRTCHNL_VERSION_MAJOR && 841 minor != IDPF_VIRTCHNL_VERSION_MINOR) 842 err = -EAGAIN; 843 844 adapter->virt_ver_maj = major; 845 adapter->virt_ver_min = minor; 846 847 return err; 848 } 849 850 /** 851 * idpf_send_get_caps_msg - Send virtchnl get capabilities message 852 * @adapter: Driver specific private structure 853 * 854 * Send virtchl get capabilities message. Returns 0 on success, negative on 855 * failure. 856 */ 857 static int idpf_send_get_caps_msg(struct idpf_adapter *adapter) 858 { 859 struct virtchnl2_get_capabilities caps = { }; 860 861 caps.csum_caps = 862 cpu_to_le32(VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 | 863 VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP | 864 VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP | 865 VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP | 866 VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP | 867 VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP | 868 VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP | 869 VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 | 870 VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP | 871 VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP | 872 VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP | 873 VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP | 874 VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP | 875 VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP | 876 VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL | 877 VIRTCHNL2_CAP_RX_CSUM_L3_SINGLE_TUNNEL | 878 VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL | 879 VIRTCHNL2_CAP_RX_CSUM_L4_SINGLE_TUNNEL | 880 VIRTCHNL2_CAP_RX_CSUM_GENERIC); 881 882 caps.seg_caps = 883 cpu_to_le32(VIRTCHNL2_CAP_SEG_IPV4_TCP | 884 VIRTCHNL2_CAP_SEG_IPV4_UDP | 885 VIRTCHNL2_CAP_SEG_IPV4_SCTP | 886 VIRTCHNL2_CAP_SEG_IPV6_TCP | 887 VIRTCHNL2_CAP_SEG_IPV6_UDP | 888 VIRTCHNL2_CAP_SEG_IPV6_SCTP | 889 VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL); 890 891 caps.rss_caps = 892 cpu_to_le64(VIRTCHNL2_CAP_RSS_IPV4_TCP | 893 VIRTCHNL2_CAP_RSS_IPV4_UDP | 894 VIRTCHNL2_CAP_RSS_IPV4_SCTP | 895 VIRTCHNL2_CAP_RSS_IPV4_OTHER | 896 VIRTCHNL2_CAP_RSS_IPV6_TCP | 897 VIRTCHNL2_CAP_RSS_IPV6_UDP | 898 VIRTCHNL2_CAP_RSS_IPV6_SCTP | 899 VIRTCHNL2_CAP_RSS_IPV6_OTHER); 900 901 caps.hsplit_caps = 902 cpu_to_le32(VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 | 903 VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6); 904 905 caps.rsc_caps = 906 cpu_to_le32(VIRTCHNL2_CAP_RSC_IPV4_TCP | 907 VIRTCHNL2_CAP_RSC_IPV6_TCP); 908 909 caps.other_caps = 910 cpu_to_le64(VIRTCHNL2_CAP_SRIOV | 911 VIRTCHNL2_CAP_MACFILTER | 912 VIRTCHNL2_CAP_SPLITQ_QSCHED | 913 VIRTCHNL2_CAP_PROMISC | 914 VIRTCHNL2_CAP_LOOPBACK); 915 916 return idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_CAPS, sizeof(caps), 917 (u8 *)&caps); 918 } 919 920 /** 921 * idpf_recv_get_caps_msg - Receive virtchnl get capabilities message 922 * @adapter: Driver specific private structure 923 * 924 * Receive virtchnl get capabilities message. Returns 0 on success, negative on 925 * failure. 926 */ 927 static int idpf_recv_get_caps_msg(struct idpf_adapter *adapter) 928 { 929 return idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_GET_CAPS, &adapter->caps, 930 sizeof(struct virtchnl2_get_capabilities)); 931 } 932 933 /** 934 * idpf_vport_alloc_max_qs - Allocate max queues for a vport 935 * @adapter: Driver specific private structure 936 * @max_q: vport max queue structure 937 */ 938 int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter, 939 struct idpf_vport_max_q *max_q) 940 { 941 struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues; 942 struct virtchnl2_get_capabilities *caps = &adapter->caps; 943 u16 default_vports = idpf_get_default_vports(adapter); 944 int max_rx_q, max_tx_q; 945 946 mutex_lock(&adapter->queue_lock); 947 948 max_rx_q = le16_to_cpu(caps->max_rx_q) / default_vports; 949 max_tx_q = le16_to_cpu(caps->max_tx_q) / default_vports; 950 if (adapter->num_alloc_vports < default_vports) { 951 max_q->max_rxq = min_t(u16, max_rx_q, IDPF_MAX_Q); 952 max_q->max_txq = min_t(u16, max_tx_q, IDPF_MAX_Q); 953 } else { 954 max_q->max_rxq = IDPF_MIN_Q; 955 max_q->max_txq = IDPF_MIN_Q; 956 } 957 max_q->max_bufq = max_q->max_rxq * IDPF_MAX_BUFQS_PER_RXQ_GRP; 958 max_q->max_complq = max_q->max_txq; 959 960 if (avail_queues->avail_rxq < max_q->max_rxq || 961 avail_queues->avail_txq < max_q->max_txq || 962 avail_queues->avail_bufq < max_q->max_bufq || 963 avail_queues->avail_complq < max_q->max_complq) { 964 mutex_unlock(&adapter->queue_lock); 965 966 return -EINVAL; 967 } 968 969 avail_queues->avail_rxq -= max_q->max_rxq; 970 avail_queues->avail_txq -= max_q->max_txq; 971 avail_queues->avail_bufq -= max_q->max_bufq; 972 avail_queues->avail_complq -= max_q->max_complq; 973 974 mutex_unlock(&adapter->queue_lock); 975 976 return 0; 977 } 978 979 /** 980 * idpf_vport_dealloc_max_qs - Deallocate max queues of a vport 981 * @adapter: Driver specific private structure 982 * @max_q: vport max queue structure 983 */ 984 void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter, 985 struct idpf_vport_max_q *max_q) 986 { 987 struct idpf_avail_queue_info *avail_queues; 988 989 mutex_lock(&adapter->queue_lock); 990 avail_queues = &adapter->avail_queues; 991 992 avail_queues->avail_rxq += max_q->max_rxq; 993 avail_queues->avail_txq += max_q->max_txq; 994 avail_queues->avail_bufq += max_q->max_bufq; 995 avail_queues->avail_complq += max_q->max_complq; 996 997 mutex_unlock(&adapter->queue_lock); 998 } 999 1000 /** 1001 * idpf_init_avail_queues - Initialize available queues on the device 1002 * @adapter: Driver specific private structure 1003 */ 1004 static void idpf_init_avail_queues(struct idpf_adapter *adapter) 1005 { 1006 struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues; 1007 struct virtchnl2_get_capabilities *caps = &adapter->caps; 1008 1009 avail_queues->avail_rxq = le16_to_cpu(caps->max_rx_q); 1010 avail_queues->avail_txq = le16_to_cpu(caps->max_tx_q); 1011 avail_queues->avail_bufq = le16_to_cpu(caps->max_rx_bufq); 1012 avail_queues->avail_complq = le16_to_cpu(caps->max_tx_complq); 1013 } 1014 1015 /** 1016 * idpf_get_reg_intr_vecs - Get vector queue register offset 1017 * @vport: virtual port structure 1018 * @reg_vals: Register offsets to store in 1019 * 1020 * Returns number of registers that got populated 1021 */ 1022 int idpf_get_reg_intr_vecs(struct idpf_vport *vport, 1023 struct idpf_vec_regs *reg_vals) 1024 { 1025 struct virtchnl2_vector_chunks *chunks; 1026 struct idpf_vec_regs reg_val; 1027 u16 num_vchunks, num_vec; 1028 int num_regs = 0, i, j; 1029 1030 chunks = &vport->adapter->req_vec_chunks->vchunks; 1031 num_vchunks = le16_to_cpu(chunks->num_vchunks); 1032 1033 for (j = 0; j < num_vchunks; j++) { 1034 struct virtchnl2_vector_chunk *chunk; 1035 u32 dynctl_reg_spacing; 1036 u32 itrn_reg_spacing; 1037 1038 chunk = &chunks->vchunks[j]; 1039 num_vec = le16_to_cpu(chunk->num_vectors); 1040 reg_val.dyn_ctl_reg = le32_to_cpu(chunk->dynctl_reg_start); 1041 reg_val.itrn_reg = le32_to_cpu(chunk->itrn_reg_start); 1042 reg_val.itrn_index_spacing = le32_to_cpu(chunk->itrn_index_spacing); 1043 1044 dynctl_reg_spacing = le32_to_cpu(chunk->dynctl_reg_spacing); 1045 itrn_reg_spacing = le32_to_cpu(chunk->itrn_reg_spacing); 1046 1047 for (i = 0; i < num_vec; i++) { 1048 reg_vals[num_regs].dyn_ctl_reg = reg_val.dyn_ctl_reg; 1049 reg_vals[num_regs].itrn_reg = reg_val.itrn_reg; 1050 reg_vals[num_regs].itrn_index_spacing = 1051 reg_val.itrn_index_spacing; 1052 1053 reg_val.dyn_ctl_reg += dynctl_reg_spacing; 1054 reg_val.itrn_reg += itrn_reg_spacing; 1055 num_regs++; 1056 } 1057 } 1058 1059 return num_regs; 1060 } 1061 1062 /** 1063 * idpf_vport_get_q_reg - Get the queue registers for the vport 1064 * @reg_vals: register values needing to be set 1065 * @num_regs: amount we expect to fill 1066 * @q_type: queue model 1067 * @chunks: queue regs received over mailbox 1068 * 1069 * This function parses the queue register offsets from the queue register 1070 * chunk information, with a specific queue type and stores it into the array 1071 * passed as an argument. It returns the actual number of queue registers that 1072 * are filled. 1073 */ 1074 static int idpf_vport_get_q_reg(u32 *reg_vals, int num_regs, u32 q_type, 1075 struct virtchnl2_queue_reg_chunks *chunks) 1076 { 1077 u16 num_chunks = le16_to_cpu(chunks->num_chunks); 1078 int reg_filled = 0, i; 1079 u32 reg_val; 1080 1081 while (num_chunks--) { 1082 struct virtchnl2_queue_reg_chunk *chunk; 1083 u16 num_q; 1084 1085 chunk = &chunks->chunks[num_chunks]; 1086 if (le32_to_cpu(chunk->type) != q_type) 1087 continue; 1088 1089 num_q = le32_to_cpu(chunk->num_queues); 1090 reg_val = le64_to_cpu(chunk->qtail_reg_start); 1091 for (i = 0; i < num_q && reg_filled < num_regs ; i++) { 1092 reg_vals[reg_filled++] = reg_val; 1093 reg_val += le32_to_cpu(chunk->qtail_reg_spacing); 1094 } 1095 } 1096 1097 return reg_filled; 1098 } 1099 1100 /** 1101 * __idpf_queue_reg_init - initialize queue registers 1102 * @vport: virtual port structure 1103 * @reg_vals: registers we are initializing 1104 * @num_regs: how many registers there are in total 1105 * @q_type: queue model 1106 * 1107 * Return number of queues that are initialized 1108 */ 1109 static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals, 1110 int num_regs, u32 q_type) 1111 { 1112 struct idpf_adapter *adapter = vport->adapter; 1113 struct idpf_queue *q; 1114 int i, j, k = 0; 1115 1116 switch (q_type) { 1117 case VIRTCHNL2_QUEUE_TYPE_TX: 1118 for (i = 0; i < vport->num_txq_grp; i++) { 1119 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; 1120 1121 for (j = 0; j < tx_qgrp->num_txq && k < num_regs; j++, k++) 1122 tx_qgrp->txqs[j]->tail = 1123 idpf_get_reg_addr(adapter, reg_vals[k]); 1124 } 1125 break; 1126 case VIRTCHNL2_QUEUE_TYPE_RX: 1127 for (i = 0; i < vport->num_rxq_grp; i++) { 1128 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 1129 u16 num_rxq = rx_qgrp->singleq.num_rxq; 1130 1131 for (j = 0; j < num_rxq && k < num_regs; j++, k++) { 1132 q = rx_qgrp->singleq.rxqs[j]; 1133 q->tail = idpf_get_reg_addr(adapter, 1134 reg_vals[k]); 1135 } 1136 } 1137 break; 1138 case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER: 1139 for (i = 0; i < vport->num_rxq_grp; i++) { 1140 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 1141 u8 num_bufqs = vport->num_bufqs_per_qgrp; 1142 1143 for (j = 0; j < num_bufqs && k < num_regs; j++, k++) { 1144 q = &rx_qgrp->splitq.bufq_sets[j].bufq; 1145 q->tail = idpf_get_reg_addr(adapter, 1146 reg_vals[k]); 1147 } 1148 } 1149 break; 1150 default: 1151 break; 1152 } 1153 1154 return k; 1155 } 1156 1157 /** 1158 * idpf_queue_reg_init - initialize queue registers 1159 * @vport: virtual port structure 1160 * 1161 * Return 0 on success, negative on failure 1162 */ 1163 int idpf_queue_reg_init(struct idpf_vport *vport) 1164 { 1165 struct virtchnl2_create_vport *vport_params; 1166 struct virtchnl2_queue_reg_chunks *chunks; 1167 struct idpf_vport_config *vport_config; 1168 u16 vport_idx = vport->idx; 1169 int num_regs, ret = 0; 1170 u32 *reg_vals; 1171 1172 /* We may never deal with more than 256 same type of queues */ 1173 reg_vals = kzalloc(sizeof(void *) * IDPF_LARGE_MAX_Q, GFP_KERNEL); 1174 if (!reg_vals) 1175 return -ENOMEM; 1176 1177 vport_config = vport->adapter->vport_config[vport_idx]; 1178 if (vport_config->req_qs_chunks) { 1179 struct virtchnl2_add_queues *vc_aq = 1180 (struct virtchnl2_add_queues *)vport_config->req_qs_chunks; 1181 chunks = &vc_aq->chunks; 1182 } else { 1183 vport_params = vport->adapter->vport_params_recvd[vport_idx]; 1184 chunks = &vport_params->chunks; 1185 } 1186 1187 /* Initialize Tx queue tail register address */ 1188 num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q, 1189 VIRTCHNL2_QUEUE_TYPE_TX, 1190 chunks); 1191 if (num_regs < vport->num_txq) { 1192 ret = -EINVAL; 1193 goto free_reg_vals; 1194 } 1195 1196 num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs, 1197 VIRTCHNL2_QUEUE_TYPE_TX); 1198 if (num_regs < vport->num_txq) { 1199 ret = -EINVAL; 1200 goto free_reg_vals; 1201 } 1202 1203 /* Initialize Rx/buffer queue tail register address based on Rx queue 1204 * model 1205 */ 1206 if (idpf_is_queue_model_split(vport->rxq_model)) { 1207 num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q, 1208 VIRTCHNL2_QUEUE_TYPE_RX_BUFFER, 1209 chunks); 1210 if (num_regs < vport->num_bufq) { 1211 ret = -EINVAL; 1212 goto free_reg_vals; 1213 } 1214 1215 num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs, 1216 VIRTCHNL2_QUEUE_TYPE_RX_BUFFER); 1217 if (num_regs < vport->num_bufq) { 1218 ret = -EINVAL; 1219 goto free_reg_vals; 1220 } 1221 } else { 1222 num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q, 1223 VIRTCHNL2_QUEUE_TYPE_RX, 1224 chunks); 1225 if (num_regs < vport->num_rxq) { 1226 ret = -EINVAL; 1227 goto free_reg_vals; 1228 } 1229 1230 num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs, 1231 VIRTCHNL2_QUEUE_TYPE_RX); 1232 if (num_regs < vport->num_rxq) { 1233 ret = -EINVAL; 1234 goto free_reg_vals; 1235 } 1236 } 1237 1238 free_reg_vals: 1239 kfree(reg_vals); 1240 1241 return ret; 1242 } 1243 1244 /** 1245 * idpf_send_create_vport_msg - Send virtchnl create vport message 1246 * @adapter: Driver specific private structure 1247 * @max_q: vport max queue info 1248 * 1249 * send virtchnl creae vport message 1250 * 1251 * Returns 0 on success, negative on failure 1252 */ 1253 int idpf_send_create_vport_msg(struct idpf_adapter *adapter, 1254 struct idpf_vport_max_q *max_q) 1255 { 1256 struct virtchnl2_create_vport *vport_msg; 1257 u16 idx = adapter->next_vport; 1258 int err, buf_size; 1259 1260 buf_size = sizeof(struct virtchnl2_create_vport); 1261 if (!adapter->vport_params_reqd[idx]) { 1262 adapter->vport_params_reqd[idx] = kzalloc(buf_size, 1263 GFP_KERNEL); 1264 if (!adapter->vport_params_reqd[idx]) 1265 return -ENOMEM; 1266 } 1267 1268 vport_msg = adapter->vport_params_reqd[idx]; 1269 vport_msg->vport_type = cpu_to_le16(VIRTCHNL2_VPORT_TYPE_DEFAULT); 1270 vport_msg->vport_index = cpu_to_le16(idx); 1271 1272 if (adapter->req_tx_splitq) 1273 vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT); 1274 else 1275 vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE); 1276 1277 if (adapter->req_rx_splitq) 1278 vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT); 1279 else 1280 vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE); 1281 1282 err = idpf_vport_calc_total_qs(adapter, idx, vport_msg, max_q); 1283 if (err) { 1284 dev_err(&adapter->pdev->dev, "Enough queues are not available"); 1285 1286 return err; 1287 } 1288 1289 mutex_lock(&adapter->vc_buf_lock); 1290 1291 err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_CREATE_VPORT, buf_size, 1292 (u8 *)vport_msg); 1293 if (err) 1294 goto rel_lock; 1295 1296 err = idpf_wait_for_event(adapter, NULL, IDPF_VC_CREATE_VPORT, 1297 IDPF_VC_CREATE_VPORT_ERR); 1298 if (err) { 1299 dev_err(&adapter->pdev->dev, "Failed to receive create vport message"); 1300 1301 goto rel_lock; 1302 } 1303 1304 if (!adapter->vport_params_recvd[idx]) { 1305 adapter->vport_params_recvd[idx] = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, 1306 GFP_KERNEL); 1307 if (!adapter->vport_params_recvd[idx]) { 1308 err = -ENOMEM; 1309 goto rel_lock; 1310 } 1311 } 1312 1313 vport_msg = adapter->vport_params_recvd[idx]; 1314 memcpy(vport_msg, adapter->vc_msg, IDPF_CTLQ_MAX_BUF_LEN); 1315 1316 rel_lock: 1317 mutex_unlock(&adapter->vc_buf_lock); 1318 1319 return err; 1320 } 1321 1322 /** 1323 * idpf_check_supported_desc_ids - Verify we have required descriptor support 1324 * @vport: virtual port structure 1325 * 1326 * Return 0 on success, error on failure 1327 */ 1328 int idpf_check_supported_desc_ids(struct idpf_vport *vport) 1329 { 1330 struct idpf_adapter *adapter = vport->adapter; 1331 struct virtchnl2_create_vport *vport_msg; 1332 u64 rx_desc_ids, tx_desc_ids; 1333 1334 vport_msg = adapter->vport_params_recvd[vport->idx]; 1335 1336 rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids); 1337 tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids); 1338 1339 if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) { 1340 if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) { 1341 dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n"); 1342 vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); 1343 } 1344 } else { 1345 if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M)) 1346 vport->base_rxd = true; 1347 } 1348 1349 if (vport->txq_model != VIRTCHNL2_QUEUE_MODEL_SPLIT) 1350 return 0; 1351 1352 if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) { 1353 dev_info(&adapter->pdev->dev, "Minimum TX descriptor support not provided, using the default\n"); 1354 vport_msg->tx_desc_ids = cpu_to_le64(MIN_SUPPORT_TXDID); 1355 } 1356 1357 return 0; 1358 } 1359 1360 /** 1361 * idpf_send_destroy_vport_msg - Send virtchnl destroy vport message 1362 * @vport: virtual port data structure 1363 * 1364 * Send virtchnl destroy vport message. Returns 0 on success, negative on 1365 * failure. 1366 */ 1367 int idpf_send_destroy_vport_msg(struct idpf_vport *vport) 1368 { 1369 struct idpf_adapter *adapter = vport->adapter; 1370 struct virtchnl2_vport v_id; 1371 int err; 1372 1373 v_id.vport_id = cpu_to_le32(vport->vport_id); 1374 1375 mutex_lock(&vport->vc_buf_lock); 1376 1377 err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DESTROY_VPORT, 1378 sizeof(v_id), (u8 *)&v_id); 1379 if (err) 1380 goto rel_lock; 1381 1382 err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DESTROY_VPORT, 1383 IDPF_VC_DESTROY_VPORT_ERR); 1384 1385 rel_lock: 1386 mutex_unlock(&vport->vc_buf_lock); 1387 1388 return err; 1389 } 1390 1391 /** 1392 * idpf_send_enable_vport_msg - Send virtchnl enable vport message 1393 * @vport: virtual port data structure 1394 * 1395 * Send enable vport virtchnl message. Returns 0 on success, negative on 1396 * failure. 1397 */ 1398 int idpf_send_enable_vport_msg(struct idpf_vport *vport) 1399 { 1400 struct idpf_adapter *adapter = vport->adapter; 1401 struct virtchnl2_vport v_id; 1402 int err; 1403 1404 v_id.vport_id = cpu_to_le32(vport->vport_id); 1405 1406 mutex_lock(&vport->vc_buf_lock); 1407 1408 err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ENABLE_VPORT, 1409 sizeof(v_id), (u8 *)&v_id); 1410 if (err) 1411 goto rel_lock; 1412 1413 err = idpf_wait_for_event(adapter, vport, IDPF_VC_ENA_VPORT, 1414 IDPF_VC_ENA_VPORT_ERR); 1415 1416 rel_lock: 1417 mutex_unlock(&vport->vc_buf_lock); 1418 1419 return err; 1420 } 1421 1422 /** 1423 * idpf_send_disable_vport_msg - Send virtchnl disable vport message 1424 * @vport: virtual port data structure 1425 * 1426 * Send disable vport virtchnl message. Returns 0 on success, negative on 1427 * failure. 1428 */ 1429 int idpf_send_disable_vport_msg(struct idpf_vport *vport) 1430 { 1431 struct idpf_adapter *adapter = vport->adapter; 1432 struct virtchnl2_vport v_id; 1433 int err; 1434 1435 v_id.vport_id = cpu_to_le32(vport->vport_id); 1436 1437 mutex_lock(&vport->vc_buf_lock); 1438 1439 err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DISABLE_VPORT, 1440 sizeof(v_id), (u8 *)&v_id); 1441 if (err) 1442 goto rel_lock; 1443 1444 err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DIS_VPORT, 1445 IDPF_VC_DIS_VPORT_ERR); 1446 1447 rel_lock: 1448 mutex_unlock(&vport->vc_buf_lock); 1449 1450 return err; 1451 } 1452 1453 /** 1454 * idpf_send_config_tx_queues_msg - Send virtchnl config tx queues message 1455 * @vport: virtual port data structure 1456 * 1457 * Send config tx queues virtchnl message. Returns 0 on success, negative on 1458 * failure. 1459 */ 1460 static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) 1461 { 1462 struct virtchnl2_config_tx_queues *ctq; 1463 u32 config_sz, chunk_sz, buf_sz; 1464 int totqs, num_msgs, num_chunks; 1465 struct virtchnl2_txq_info *qi; 1466 int err = 0, i, k = 0; 1467 1468 totqs = vport->num_txq + vport->num_complq; 1469 qi = kcalloc(totqs, sizeof(struct virtchnl2_txq_info), GFP_KERNEL); 1470 if (!qi) 1471 return -ENOMEM; 1472 1473 /* Populate the queue info buffer with all queue context info */ 1474 for (i = 0; i < vport->num_txq_grp; i++) { 1475 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; 1476 int j, sched_mode; 1477 1478 for (j = 0; j < tx_qgrp->num_txq; j++, k++) { 1479 qi[k].queue_id = 1480 cpu_to_le32(tx_qgrp->txqs[j]->q_id); 1481 qi[k].model = 1482 cpu_to_le16(vport->txq_model); 1483 qi[k].type = 1484 cpu_to_le32(tx_qgrp->txqs[j]->q_type); 1485 qi[k].ring_len = 1486 cpu_to_le16(tx_qgrp->txqs[j]->desc_count); 1487 qi[k].dma_ring_addr = 1488 cpu_to_le64(tx_qgrp->txqs[j]->dma); 1489 if (idpf_is_queue_model_split(vport->txq_model)) { 1490 struct idpf_queue *q = tx_qgrp->txqs[j]; 1491 1492 qi[k].tx_compl_queue_id = 1493 cpu_to_le16(tx_qgrp->complq->q_id); 1494 qi[k].relative_queue_id = cpu_to_le16(j); 1495 1496 if (test_bit(__IDPF_Q_FLOW_SCH_EN, q->flags)) 1497 qi[k].sched_mode = 1498 cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_FLOW); 1499 else 1500 qi[k].sched_mode = 1501 cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE); 1502 } else { 1503 qi[k].sched_mode = 1504 cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE); 1505 } 1506 } 1507 1508 if (!idpf_is_queue_model_split(vport->txq_model)) 1509 continue; 1510 1511 qi[k].queue_id = cpu_to_le32(tx_qgrp->complq->q_id); 1512 qi[k].model = cpu_to_le16(vport->txq_model); 1513 qi[k].type = cpu_to_le32(tx_qgrp->complq->q_type); 1514 qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count); 1515 qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma); 1516 1517 if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags)) 1518 sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW; 1519 else 1520 sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE; 1521 qi[k].sched_mode = cpu_to_le16(sched_mode); 1522 1523 k++; 1524 } 1525 1526 /* Make sure accounting agrees */ 1527 if (k != totqs) { 1528 err = -EINVAL; 1529 goto error; 1530 } 1531 1532 /* Chunk up the queue contexts into multiple messages to avoid 1533 * sending a control queue message buffer that is too large 1534 */ 1535 config_sz = sizeof(struct virtchnl2_config_tx_queues); 1536 chunk_sz = sizeof(struct virtchnl2_txq_info); 1537 1538 num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz), 1539 totqs); 1540 num_msgs = DIV_ROUND_UP(totqs, num_chunks); 1541 1542 buf_sz = struct_size(ctq, qinfo, num_chunks); 1543 ctq = kzalloc(buf_sz, GFP_KERNEL); 1544 if (!ctq) { 1545 err = -ENOMEM; 1546 goto error; 1547 } 1548 1549 mutex_lock(&vport->vc_buf_lock); 1550 1551 for (i = 0, k = 0; i < num_msgs; i++) { 1552 memset(ctq, 0, buf_sz); 1553 ctq->vport_id = cpu_to_le32(vport->vport_id); 1554 ctq->num_qinfo = cpu_to_le16(num_chunks); 1555 memcpy(ctq->qinfo, &qi[k], chunk_sz * num_chunks); 1556 1557 err = idpf_send_mb_msg(vport->adapter, 1558 VIRTCHNL2_OP_CONFIG_TX_QUEUES, 1559 buf_sz, (u8 *)ctq); 1560 if (err) 1561 goto mbx_error; 1562 1563 err = idpf_wait_for_event(vport->adapter, vport, 1564 IDPF_VC_CONFIG_TXQ, 1565 IDPF_VC_CONFIG_TXQ_ERR); 1566 if (err) 1567 goto mbx_error; 1568 1569 k += num_chunks; 1570 totqs -= num_chunks; 1571 num_chunks = min(num_chunks, totqs); 1572 /* Recalculate buffer size */ 1573 buf_sz = struct_size(ctq, qinfo, num_chunks); 1574 } 1575 1576 mbx_error: 1577 mutex_unlock(&vport->vc_buf_lock); 1578 kfree(ctq); 1579 error: 1580 kfree(qi); 1581 1582 return err; 1583 } 1584 1585 /** 1586 * idpf_send_config_rx_queues_msg - Send virtchnl config rx queues message 1587 * @vport: virtual port data structure 1588 * 1589 * Send config rx queues virtchnl message. Returns 0 on success, negative on 1590 * failure. 1591 */ 1592 static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport) 1593 { 1594 struct virtchnl2_config_rx_queues *crq; 1595 u32 config_sz, chunk_sz, buf_sz; 1596 int totqs, num_msgs, num_chunks; 1597 struct virtchnl2_rxq_info *qi; 1598 int err = 0, i, k = 0; 1599 1600 totqs = vport->num_rxq + vport->num_bufq; 1601 qi = kcalloc(totqs, sizeof(struct virtchnl2_rxq_info), GFP_KERNEL); 1602 if (!qi) 1603 return -ENOMEM; 1604 1605 /* Populate the queue info buffer with all queue context info */ 1606 for (i = 0; i < vport->num_rxq_grp; i++) { 1607 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 1608 u16 num_rxq; 1609 int j; 1610 1611 if (!idpf_is_queue_model_split(vport->rxq_model)) 1612 goto setup_rxqs; 1613 1614 for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) { 1615 struct idpf_queue *bufq = 1616 &rx_qgrp->splitq.bufq_sets[j].bufq; 1617 1618 qi[k].queue_id = cpu_to_le32(bufq->q_id); 1619 qi[k].model = cpu_to_le16(vport->rxq_model); 1620 qi[k].type = cpu_to_le32(bufq->q_type); 1621 qi[k].desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); 1622 qi[k].ring_len = cpu_to_le16(bufq->desc_count); 1623 qi[k].dma_ring_addr = cpu_to_le64(bufq->dma); 1624 qi[k].data_buffer_size = cpu_to_le32(bufq->rx_buf_size); 1625 qi[k].buffer_notif_stride = bufq->rx_buf_stride; 1626 qi[k].rx_buffer_low_watermark = 1627 cpu_to_le16(bufq->rx_buffer_low_watermark); 1628 if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW)) 1629 qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC); 1630 } 1631 1632 setup_rxqs: 1633 if (idpf_is_queue_model_split(vport->rxq_model)) 1634 num_rxq = rx_qgrp->splitq.num_rxq_sets; 1635 else 1636 num_rxq = rx_qgrp->singleq.num_rxq; 1637 1638 for (j = 0; j < num_rxq; j++, k++) { 1639 struct idpf_queue *rxq; 1640 1641 if (!idpf_is_queue_model_split(vport->rxq_model)) { 1642 rxq = rx_qgrp->singleq.rxqs[j]; 1643 goto common_qi_fields; 1644 } 1645 rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq; 1646 qi[k].rx_bufq1_id = 1647 cpu_to_le16(rxq->rxq_grp->splitq.bufq_sets[0].bufq.q_id); 1648 if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) { 1649 qi[k].bufq2_ena = IDPF_BUFQ2_ENA; 1650 qi[k].rx_bufq2_id = 1651 cpu_to_le16(rxq->rxq_grp->splitq.bufq_sets[1].bufq.q_id); 1652 } 1653 qi[k].rx_buffer_low_watermark = 1654 cpu_to_le16(rxq->rx_buffer_low_watermark); 1655 if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW)) 1656 qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC); 1657 1658 common_qi_fields: 1659 if (rxq->rx_hsplit_en) { 1660 qi[k].qflags |= 1661 cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT); 1662 qi[k].hdr_buffer_size = 1663 cpu_to_le16(rxq->rx_hbuf_size); 1664 } 1665 qi[k].queue_id = cpu_to_le32(rxq->q_id); 1666 qi[k].model = cpu_to_le16(vport->rxq_model); 1667 qi[k].type = cpu_to_le32(rxq->q_type); 1668 qi[k].ring_len = cpu_to_le16(rxq->desc_count); 1669 qi[k].dma_ring_addr = cpu_to_le64(rxq->dma); 1670 qi[k].max_pkt_size = cpu_to_le32(rxq->rx_max_pkt_size); 1671 qi[k].data_buffer_size = cpu_to_le32(rxq->rx_buf_size); 1672 qi[k].qflags |= 1673 cpu_to_le16(VIRTCHNL2_RX_DESC_SIZE_32BYTE); 1674 qi[k].desc_ids = cpu_to_le64(rxq->rxdids); 1675 } 1676 } 1677 1678 /* Make sure accounting agrees */ 1679 if (k != totqs) { 1680 err = -EINVAL; 1681 goto error; 1682 } 1683 1684 /* Chunk up the queue contexts into multiple messages to avoid 1685 * sending a control queue message buffer that is too large 1686 */ 1687 config_sz = sizeof(struct virtchnl2_config_rx_queues); 1688 chunk_sz = sizeof(struct virtchnl2_rxq_info); 1689 1690 num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz), 1691 totqs); 1692 num_msgs = DIV_ROUND_UP(totqs, num_chunks); 1693 1694 buf_sz = struct_size(crq, qinfo, num_chunks); 1695 crq = kzalloc(buf_sz, GFP_KERNEL); 1696 if (!crq) { 1697 err = -ENOMEM; 1698 goto error; 1699 } 1700 1701 mutex_lock(&vport->vc_buf_lock); 1702 1703 for (i = 0, k = 0; i < num_msgs; i++) { 1704 memset(crq, 0, buf_sz); 1705 crq->vport_id = cpu_to_le32(vport->vport_id); 1706 crq->num_qinfo = cpu_to_le16(num_chunks); 1707 memcpy(crq->qinfo, &qi[k], chunk_sz * num_chunks); 1708 1709 err = idpf_send_mb_msg(vport->adapter, 1710 VIRTCHNL2_OP_CONFIG_RX_QUEUES, 1711 buf_sz, (u8 *)crq); 1712 if (err) 1713 goto mbx_error; 1714 1715 err = idpf_wait_for_event(vport->adapter, vport, 1716 IDPF_VC_CONFIG_RXQ, 1717 IDPF_VC_CONFIG_RXQ_ERR); 1718 if (err) 1719 goto mbx_error; 1720 1721 k += num_chunks; 1722 totqs -= num_chunks; 1723 num_chunks = min(num_chunks, totqs); 1724 /* Recalculate buffer size */ 1725 buf_sz = struct_size(crq, qinfo, num_chunks); 1726 } 1727 1728 mbx_error: 1729 mutex_unlock(&vport->vc_buf_lock); 1730 kfree(crq); 1731 error: 1732 kfree(qi); 1733 1734 return err; 1735 } 1736 1737 /** 1738 * idpf_send_ena_dis_queues_msg - Send virtchnl enable or disable 1739 * queues message 1740 * @vport: virtual port data structure 1741 * @vc_op: virtchnl op code to send 1742 * 1743 * Send enable or disable queues virtchnl message. Returns 0 on success, 1744 * negative on failure. 1745 */ 1746 static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op) 1747 { 1748 u32 num_msgs, num_chunks, num_txq, num_rxq, num_q; 1749 struct idpf_adapter *adapter = vport->adapter; 1750 struct virtchnl2_del_ena_dis_queues *eq; 1751 struct virtchnl2_queue_chunks *qcs; 1752 struct virtchnl2_queue_chunk *qc; 1753 u32 config_sz, chunk_sz, buf_sz; 1754 int i, j, k = 0, err = 0; 1755 1756 /* validate virtchnl op */ 1757 switch (vc_op) { 1758 case VIRTCHNL2_OP_ENABLE_QUEUES: 1759 case VIRTCHNL2_OP_DISABLE_QUEUES: 1760 break; 1761 default: 1762 return -EINVAL; 1763 } 1764 1765 num_txq = vport->num_txq + vport->num_complq; 1766 num_rxq = vport->num_rxq + vport->num_bufq; 1767 num_q = num_txq + num_rxq; 1768 buf_sz = sizeof(struct virtchnl2_queue_chunk) * num_q; 1769 qc = kzalloc(buf_sz, GFP_KERNEL); 1770 if (!qc) 1771 return -ENOMEM; 1772 1773 for (i = 0; i < vport->num_txq_grp; i++) { 1774 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; 1775 1776 for (j = 0; j < tx_qgrp->num_txq; j++, k++) { 1777 qc[k].type = cpu_to_le32(tx_qgrp->txqs[j]->q_type); 1778 qc[k].start_queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id); 1779 qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); 1780 } 1781 } 1782 if (vport->num_txq != k) { 1783 err = -EINVAL; 1784 goto error; 1785 } 1786 1787 if (!idpf_is_queue_model_split(vport->txq_model)) 1788 goto setup_rx; 1789 1790 for (i = 0; i < vport->num_txq_grp; i++, k++) { 1791 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; 1792 1793 qc[k].type = cpu_to_le32(tx_qgrp->complq->q_type); 1794 qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id); 1795 qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); 1796 } 1797 if (vport->num_complq != (k - vport->num_txq)) { 1798 err = -EINVAL; 1799 goto error; 1800 } 1801 1802 setup_rx: 1803 for (i = 0; i < vport->num_rxq_grp; i++) { 1804 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 1805 1806 if (idpf_is_queue_model_split(vport->rxq_model)) 1807 num_rxq = rx_qgrp->splitq.num_rxq_sets; 1808 else 1809 num_rxq = rx_qgrp->singleq.num_rxq; 1810 1811 for (j = 0; j < num_rxq; j++, k++) { 1812 if (idpf_is_queue_model_split(vport->rxq_model)) { 1813 qc[k].start_queue_id = 1814 cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_id); 1815 qc[k].type = 1816 cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_type); 1817 } else { 1818 qc[k].start_queue_id = 1819 cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_id); 1820 qc[k].type = 1821 cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_type); 1822 } 1823 qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); 1824 } 1825 } 1826 if (vport->num_rxq != k - (vport->num_txq + vport->num_complq)) { 1827 err = -EINVAL; 1828 goto error; 1829 } 1830 1831 if (!idpf_is_queue_model_split(vport->rxq_model)) 1832 goto send_msg; 1833 1834 for (i = 0; i < vport->num_rxq_grp; i++) { 1835 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 1836 1837 for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) { 1838 struct idpf_queue *q; 1839 1840 q = &rx_qgrp->splitq.bufq_sets[j].bufq; 1841 qc[k].type = cpu_to_le32(q->q_type); 1842 qc[k].start_queue_id = cpu_to_le32(q->q_id); 1843 qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); 1844 } 1845 } 1846 if (vport->num_bufq != k - (vport->num_txq + 1847 vport->num_complq + 1848 vport->num_rxq)) { 1849 err = -EINVAL; 1850 goto error; 1851 } 1852 1853 send_msg: 1854 /* Chunk up the queue info into multiple messages */ 1855 config_sz = sizeof(struct virtchnl2_del_ena_dis_queues); 1856 chunk_sz = sizeof(struct virtchnl2_queue_chunk); 1857 1858 num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz), 1859 num_q); 1860 num_msgs = DIV_ROUND_UP(num_q, num_chunks); 1861 1862 buf_sz = struct_size(eq, chunks.chunks, num_chunks); 1863 eq = kzalloc(buf_sz, GFP_KERNEL); 1864 if (!eq) { 1865 err = -ENOMEM; 1866 goto error; 1867 } 1868 1869 mutex_lock(&vport->vc_buf_lock); 1870 1871 for (i = 0, k = 0; i < num_msgs; i++) { 1872 memset(eq, 0, buf_sz); 1873 eq->vport_id = cpu_to_le32(vport->vport_id); 1874 eq->chunks.num_chunks = cpu_to_le16(num_chunks); 1875 qcs = &eq->chunks; 1876 memcpy(qcs->chunks, &qc[k], chunk_sz * num_chunks); 1877 1878 err = idpf_send_mb_msg(adapter, vc_op, buf_sz, (u8 *)eq); 1879 if (err) 1880 goto mbx_error; 1881 1882 if (vc_op == VIRTCHNL2_OP_ENABLE_QUEUES) 1883 err = idpf_wait_for_event(adapter, vport, 1884 IDPF_VC_ENA_QUEUES, 1885 IDPF_VC_ENA_QUEUES_ERR); 1886 else 1887 err = idpf_min_wait_for_event(adapter, vport, 1888 IDPF_VC_DIS_QUEUES, 1889 IDPF_VC_DIS_QUEUES_ERR); 1890 if (err) 1891 goto mbx_error; 1892 1893 k += num_chunks; 1894 num_q -= num_chunks; 1895 num_chunks = min(num_chunks, num_q); 1896 /* Recalculate buffer size */ 1897 buf_sz = struct_size(eq, chunks.chunks, num_chunks); 1898 } 1899 1900 mbx_error: 1901 mutex_unlock(&vport->vc_buf_lock); 1902 kfree(eq); 1903 error: 1904 kfree(qc); 1905 1906 return err; 1907 } 1908 1909 /** 1910 * idpf_send_map_unmap_queue_vector_msg - Send virtchnl map or unmap queue 1911 * vector message 1912 * @vport: virtual port data structure 1913 * @map: true for map and false for unmap 1914 * 1915 * Send map or unmap queue vector virtchnl message. Returns 0 on success, 1916 * negative on failure. 1917 */ 1918 int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) 1919 { 1920 struct idpf_adapter *adapter = vport->adapter; 1921 struct virtchnl2_queue_vector_maps *vqvm; 1922 struct virtchnl2_queue_vector *vqv; 1923 u32 config_sz, chunk_sz, buf_sz; 1924 u32 num_msgs, num_chunks, num_q; 1925 int i, j, k = 0, err = 0; 1926 1927 num_q = vport->num_txq + vport->num_rxq; 1928 1929 buf_sz = sizeof(struct virtchnl2_queue_vector) * num_q; 1930 vqv = kzalloc(buf_sz, GFP_KERNEL); 1931 if (!vqv) 1932 return -ENOMEM; 1933 1934 for (i = 0; i < vport->num_txq_grp; i++) { 1935 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; 1936 1937 for (j = 0; j < tx_qgrp->num_txq; j++, k++) { 1938 vqv[k].queue_type = cpu_to_le32(tx_qgrp->txqs[j]->q_type); 1939 vqv[k].queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id); 1940 1941 if (idpf_is_queue_model_split(vport->txq_model)) { 1942 vqv[k].vector_id = 1943 cpu_to_le16(tx_qgrp->complq->q_vector->v_idx); 1944 vqv[k].itr_idx = 1945 cpu_to_le32(tx_qgrp->complq->q_vector->tx_itr_idx); 1946 } else { 1947 vqv[k].vector_id = 1948 cpu_to_le16(tx_qgrp->txqs[j]->q_vector->v_idx); 1949 vqv[k].itr_idx = 1950 cpu_to_le32(tx_qgrp->txqs[j]->q_vector->tx_itr_idx); 1951 } 1952 } 1953 } 1954 1955 if (vport->num_txq != k) { 1956 err = -EINVAL; 1957 goto error; 1958 } 1959 1960 for (i = 0; i < vport->num_rxq_grp; i++) { 1961 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 1962 u16 num_rxq; 1963 1964 if (idpf_is_queue_model_split(vport->rxq_model)) 1965 num_rxq = rx_qgrp->splitq.num_rxq_sets; 1966 else 1967 num_rxq = rx_qgrp->singleq.num_rxq; 1968 1969 for (j = 0; j < num_rxq; j++, k++) { 1970 struct idpf_queue *rxq; 1971 1972 if (idpf_is_queue_model_split(vport->rxq_model)) 1973 rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq; 1974 else 1975 rxq = rx_qgrp->singleq.rxqs[j]; 1976 1977 vqv[k].queue_type = cpu_to_le32(rxq->q_type); 1978 vqv[k].queue_id = cpu_to_le32(rxq->q_id); 1979 vqv[k].vector_id = cpu_to_le16(rxq->q_vector->v_idx); 1980 vqv[k].itr_idx = cpu_to_le32(rxq->q_vector->rx_itr_idx); 1981 } 1982 } 1983 1984 if (idpf_is_queue_model_split(vport->txq_model)) { 1985 if (vport->num_rxq != k - vport->num_complq) { 1986 err = -EINVAL; 1987 goto error; 1988 } 1989 } else { 1990 if (vport->num_rxq != k - vport->num_txq) { 1991 err = -EINVAL; 1992 goto error; 1993 } 1994 } 1995 1996 /* Chunk up the vector info into multiple messages */ 1997 config_sz = sizeof(struct virtchnl2_queue_vector_maps); 1998 chunk_sz = sizeof(struct virtchnl2_queue_vector); 1999 2000 num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz), 2001 num_q); 2002 num_msgs = DIV_ROUND_UP(num_q, num_chunks); 2003 2004 buf_sz = struct_size(vqvm, qv_maps, num_chunks); 2005 vqvm = kzalloc(buf_sz, GFP_KERNEL); 2006 if (!vqvm) { 2007 err = -ENOMEM; 2008 goto error; 2009 } 2010 2011 mutex_lock(&vport->vc_buf_lock); 2012 2013 for (i = 0, k = 0; i < num_msgs; i++) { 2014 memset(vqvm, 0, buf_sz); 2015 vqvm->vport_id = cpu_to_le32(vport->vport_id); 2016 vqvm->num_qv_maps = cpu_to_le16(num_chunks); 2017 memcpy(vqvm->qv_maps, &vqv[k], chunk_sz * num_chunks); 2018 2019 if (map) { 2020 err = idpf_send_mb_msg(adapter, 2021 VIRTCHNL2_OP_MAP_QUEUE_VECTOR, 2022 buf_sz, (u8 *)vqvm); 2023 if (!err) 2024 err = idpf_wait_for_event(adapter, vport, 2025 IDPF_VC_MAP_IRQ, 2026 IDPF_VC_MAP_IRQ_ERR); 2027 } else { 2028 err = idpf_send_mb_msg(adapter, 2029 VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR, 2030 buf_sz, (u8 *)vqvm); 2031 if (!err) 2032 err = 2033 idpf_min_wait_for_event(adapter, vport, 2034 IDPF_VC_UNMAP_IRQ, 2035 IDPF_VC_UNMAP_IRQ_ERR); 2036 } 2037 if (err) 2038 goto mbx_error; 2039 2040 k += num_chunks; 2041 num_q -= num_chunks; 2042 num_chunks = min(num_chunks, num_q); 2043 /* Recalculate buffer size */ 2044 buf_sz = struct_size(vqvm, qv_maps, num_chunks); 2045 } 2046 2047 mbx_error: 2048 mutex_unlock(&vport->vc_buf_lock); 2049 kfree(vqvm); 2050 error: 2051 kfree(vqv); 2052 2053 return err; 2054 } 2055 2056 /** 2057 * idpf_send_enable_queues_msg - send enable queues virtchnl message 2058 * @vport: Virtual port private data structure 2059 * 2060 * Will send enable queues virtchnl message. Returns 0 on success, negative on 2061 * failure. 2062 */ 2063 int idpf_send_enable_queues_msg(struct idpf_vport *vport) 2064 { 2065 return idpf_send_ena_dis_queues_msg(vport, VIRTCHNL2_OP_ENABLE_QUEUES); 2066 } 2067 2068 /** 2069 * idpf_send_disable_queues_msg - send disable queues virtchnl message 2070 * @vport: Virtual port private data structure 2071 * 2072 * Will send disable queues virtchnl message. Returns 0 on success, negative 2073 * on failure. 2074 */ 2075 int idpf_send_disable_queues_msg(struct idpf_vport *vport) 2076 { 2077 int err, i; 2078 2079 err = idpf_send_ena_dis_queues_msg(vport, VIRTCHNL2_OP_DISABLE_QUEUES); 2080 if (err) 2081 return err; 2082 2083 /* switch to poll mode as interrupts will be disabled after disable 2084 * queues virtchnl message is sent 2085 */ 2086 for (i = 0; i < vport->num_txq; i++) 2087 set_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags); 2088 2089 /* schedule the napi to receive all the marker packets */ 2090 for (i = 0; i < vport->num_q_vectors; i++) 2091 napi_schedule(&vport->q_vectors[i].napi); 2092 2093 return idpf_wait_for_marker_event(vport); 2094 } 2095 2096 /** 2097 * idpf_convert_reg_to_queue_chunks - Copy queue chunk information to the right 2098 * structure 2099 * @dchunks: Destination chunks to store data to 2100 * @schunks: Source chunks to copy data from 2101 * @num_chunks: number of chunks to copy 2102 */ 2103 static void idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchunks, 2104 struct virtchnl2_queue_reg_chunk *schunks, 2105 u16 num_chunks) 2106 { 2107 u16 i; 2108 2109 for (i = 0; i < num_chunks; i++) { 2110 dchunks[i].type = schunks[i].type; 2111 dchunks[i].start_queue_id = schunks[i].start_queue_id; 2112 dchunks[i].num_queues = schunks[i].num_queues; 2113 } 2114 } 2115 2116 /** 2117 * idpf_send_delete_queues_msg - send delete queues virtchnl message 2118 * @vport: Virtual port private data structure 2119 * 2120 * Will send delete queues virtchnl message. Return 0 on success, negative on 2121 * failure. 2122 */ 2123 int idpf_send_delete_queues_msg(struct idpf_vport *vport) 2124 { 2125 struct idpf_adapter *adapter = vport->adapter; 2126 struct virtchnl2_create_vport *vport_params; 2127 struct virtchnl2_queue_reg_chunks *chunks; 2128 struct virtchnl2_del_ena_dis_queues *eq; 2129 struct idpf_vport_config *vport_config; 2130 u16 vport_idx = vport->idx; 2131 int buf_size, err; 2132 u16 num_chunks; 2133 2134 vport_config = adapter->vport_config[vport_idx]; 2135 if (vport_config->req_qs_chunks) { 2136 struct virtchnl2_add_queues *vc_aq = 2137 (struct virtchnl2_add_queues *)vport_config->req_qs_chunks; 2138 chunks = &vc_aq->chunks; 2139 } else { 2140 vport_params = adapter->vport_params_recvd[vport_idx]; 2141 chunks = &vport_params->chunks; 2142 } 2143 2144 num_chunks = le16_to_cpu(chunks->num_chunks); 2145 buf_size = struct_size(eq, chunks.chunks, num_chunks); 2146 2147 eq = kzalloc(buf_size, GFP_KERNEL); 2148 if (!eq) 2149 return -ENOMEM; 2150 2151 eq->vport_id = cpu_to_le32(vport->vport_id); 2152 eq->chunks.num_chunks = cpu_to_le16(num_chunks); 2153 2154 idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->chunks, 2155 num_chunks); 2156 2157 mutex_lock(&vport->vc_buf_lock); 2158 2159 err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DEL_QUEUES, 2160 buf_size, (u8 *)eq); 2161 if (err) 2162 goto rel_lock; 2163 2164 err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DEL_QUEUES, 2165 IDPF_VC_DEL_QUEUES_ERR); 2166 2167 rel_lock: 2168 mutex_unlock(&vport->vc_buf_lock); 2169 kfree(eq); 2170 2171 return err; 2172 } 2173 2174 /** 2175 * idpf_send_config_queues_msg - Send config queues virtchnl message 2176 * @vport: Virtual port private data structure 2177 * 2178 * Will send config queues virtchnl message. Returns 0 on success, negative on 2179 * failure. 2180 */ 2181 int idpf_send_config_queues_msg(struct idpf_vport *vport) 2182 { 2183 int err; 2184 2185 err = idpf_send_config_tx_queues_msg(vport); 2186 if (err) 2187 return err; 2188 2189 return idpf_send_config_rx_queues_msg(vport); 2190 } 2191 2192 /** 2193 * idpf_send_add_queues_msg - Send virtchnl add queues message 2194 * @vport: Virtual port private data structure 2195 * @num_tx_q: number of transmit queues 2196 * @num_complq: number of transmit completion queues 2197 * @num_rx_q: number of receive queues 2198 * @num_rx_bufq: number of receive buffer queues 2199 * 2200 * Returns 0 on success, negative on failure. vport _MUST_ be const here as 2201 * we should not change any fields within vport itself in this function. 2202 */ 2203 int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q, 2204 u16 num_complq, u16 num_rx_q, u16 num_rx_bufq) 2205 { 2206 struct idpf_adapter *adapter = vport->adapter; 2207 struct idpf_vport_config *vport_config; 2208 struct virtchnl2_add_queues aq = { }; 2209 struct virtchnl2_add_queues *vc_msg; 2210 u16 vport_idx = vport->idx; 2211 int size, err; 2212 2213 vport_config = adapter->vport_config[vport_idx]; 2214 2215 aq.vport_id = cpu_to_le32(vport->vport_id); 2216 aq.num_tx_q = cpu_to_le16(num_tx_q); 2217 aq.num_tx_complq = cpu_to_le16(num_complq); 2218 aq.num_rx_q = cpu_to_le16(num_rx_q); 2219 aq.num_rx_bufq = cpu_to_le16(num_rx_bufq); 2220 2221 mutex_lock(&((struct idpf_vport *)vport)->vc_buf_lock); 2222 2223 err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ADD_QUEUES, 2224 sizeof(struct virtchnl2_add_queues), (u8 *)&aq); 2225 if (err) 2226 goto rel_lock; 2227 2228 /* We want vport to be const to prevent incidental code changes making 2229 * changes to the vport config. We're making a special exception here 2230 * to discard const to use the virtchnl. 2231 */ 2232 err = idpf_wait_for_event(adapter, (struct idpf_vport *)vport, 2233 IDPF_VC_ADD_QUEUES, IDPF_VC_ADD_QUEUES_ERR); 2234 if (err) 2235 goto rel_lock; 2236 2237 kfree(vport_config->req_qs_chunks); 2238 vport_config->req_qs_chunks = NULL; 2239 2240 vc_msg = (struct virtchnl2_add_queues *)vport->vc_msg; 2241 /* compare vc_msg num queues with vport num queues */ 2242 if (le16_to_cpu(vc_msg->num_tx_q) != num_tx_q || 2243 le16_to_cpu(vc_msg->num_rx_q) != num_rx_q || 2244 le16_to_cpu(vc_msg->num_tx_complq) != num_complq || 2245 le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq) { 2246 err = -EINVAL; 2247 goto rel_lock; 2248 } 2249 2250 size = struct_size(vc_msg, chunks.chunks, 2251 le16_to_cpu(vc_msg->chunks.num_chunks)); 2252 vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL); 2253 if (!vport_config->req_qs_chunks) { 2254 err = -ENOMEM; 2255 goto rel_lock; 2256 } 2257 2258 rel_lock: 2259 mutex_unlock(&((struct idpf_vport *)vport)->vc_buf_lock); 2260 2261 return err; 2262 } 2263 2264 /** 2265 * idpf_send_alloc_vectors_msg - Send virtchnl alloc vectors message 2266 * @adapter: Driver specific private structure 2267 * @num_vectors: number of vectors to be allocated 2268 * 2269 * Returns 0 on success, negative on failure. 2270 */ 2271 int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors) 2272 { 2273 struct virtchnl2_alloc_vectors *alloc_vec, *rcvd_vec; 2274 struct virtchnl2_alloc_vectors ac = { }; 2275 u16 num_vchunks; 2276 int size, err; 2277 2278 ac.num_vectors = cpu_to_le16(num_vectors); 2279 2280 mutex_lock(&adapter->vc_buf_lock); 2281 2282 err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ALLOC_VECTORS, 2283 sizeof(ac), (u8 *)&ac); 2284 if (err) 2285 goto rel_lock; 2286 2287 err = idpf_wait_for_event(adapter, NULL, IDPF_VC_ALLOC_VECTORS, 2288 IDPF_VC_ALLOC_VECTORS_ERR); 2289 if (err) 2290 goto rel_lock; 2291 2292 rcvd_vec = (struct virtchnl2_alloc_vectors *)adapter->vc_msg; 2293 num_vchunks = le16_to_cpu(rcvd_vec->vchunks.num_vchunks); 2294 2295 size = struct_size(rcvd_vec, vchunks.vchunks, num_vchunks); 2296 if (size > sizeof(adapter->vc_msg)) { 2297 err = -EINVAL; 2298 goto rel_lock; 2299 } 2300 2301 kfree(adapter->req_vec_chunks); 2302 adapter->req_vec_chunks = NULL; 2303 adapter->req_vec_chunks = kmemdup(adapter->vc_msg, size, GFP_KERNEL); 2304 if (!adapter->req_vec_chunks) { 2305 err = -ENOMEM; 2306 goto rel_lock; 2307 } 2308 2309 alloc_vec = adapter->req_vec_chunks; 2310 if (le16_to_cpu(alloc_vec->num_vectors) < num_vectors) { 2311 kfree(adapter->req_vec_chunks); 2312 adapter->req_vec_chunks = NULL; 2313 err = -EINVAL; 2314 } 2315 2316 rel_lock: 2317 mutex_unlock(&adapter->vc_buf_lock); 2318 2319 return err; 2320 } 2321 2322 /** 2323 * idpf_send_dealloc_vectors_msg - Send virtchnl de allocate vectors message 2324 * @adapter: Driver specific private structure 2325 * 2326 * Returns 0 on success, negative on failure. 2327 */ 2328 int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter) 2329 { 2330 struct virtchnl2_alloc_vectors *ac = adapter->req_vec_chunks; 2331 struct virtchnl2_vector_chunks *vcs = &ac->vchunks; 2332 int buf_size, err; 2333 2334 buf_size = struct_size(vcs, vchunks, le16_to_cpu(vcs->num_vchunks)); 2335 2336 mutex_lock(&adapter->vc_buf_lock); 2337 2338 err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DEALLOC_VECTORS, buf_size, 2339 (u8 *)vcs); 2340 if (err) 2341 goto rel_lock; 2342 2343 err = idpf_min_wait_for_event(adapter, NULL, IDPF_VC_DEALLOC_VECTORS, 2344 IDPF_VC_DEALLOC_VECTORS_ERR); 2345 if (err) 2346 goto rel_lock; 2347 2348 kfree(adapter->req_vec_chunks); 2349 adapter->req_vec_chunks = NULL; 2350 2351 rel_lock: 2352 mutex_unlock(&adapter->vc_buf_lock); 2353 2354 return err; 2355 } 2356 2357 /** 2358 * idpf_get_max_vfs - Get max number of vfs supported 2359 * @adapter: Driver specific private structure 2360 * 2361 * Returns max number of VFs 2362 */ 2363 static int idpf_get_max_vfs(struct idpf_adapter *adapter) 2364 { 2365 return le16_to_cpu(adapter->caps.max_sriov_vfs); 2366 } 2367 2368 /** 2369 * idpf_send_set_sriov_vfs_msg - Send virtchnl set sriov vfs message 2370 * @adapter: Driver specific private structure 2371 * @num_vfs: number of virtual functions to be created 2372 * 2373 * Returns 0 on success, negative on failure. 2374 */ 2375 int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs) 2376 { 2377 struct virtchnl2_sriov_vfs_info svi = { }; 2378 int err; 2379 2380 svi.num_vfs = cpu_to_le16(num_vfs); 2381 2382 mutex_lock(&adapter->vc_buf_lock); 2383 2384 err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_SRIOV_VFS, 2385 sizeof(svi), (u8 *)&svi); 2386 if (err) 2387 goto rel_lock; 2388 2389 err = idpf_wait_for_event(adapter, NULL, IDPF_VC_SET_SRIOV_VFS, 2390 IDPF_VC_SET_SRIOV_VFS_ERR); 2391 2392 rel_lock: 2393 mutex_unlock(&adapter->vc_buf_lock); 2394 2395 return err; 2396 } 2397 2398 /** 2399 * idpf_send_get_stats_msg - Send virtchnl get statistics message 2400 * @vport: vport to get stats for 2401 * 2402 * Returns 0 on success, negative on failure. 2403 */ 2404 int idpf_send_get_stats_msg(struct idpf_vport *vport) 2405 { 2406 struct idpf_netdev_priv *np = netdev_priv(vport->netdev); 2407 struct rtnl_link_stats64 *netstats = &np->netstats; 2408 struct idpf_adapter *adapter = vport->adapter; 2409 struct virtchnl2_vport_stats stats_msg = { }; 2410 struct virtchnl2_vport_stats *stats; 2411 int err; 2412 2413 /* Don't send get_stats message if the link is down */ 2414 if (np->state <= __IDPF_VPORT_DOWN) 2415 return 0; 2416 2417 stats_msg.vport_id = cpu_to_le32(vport->vport_id); 2418 2419 mutex_lock(&vport->vc_buf_lock); 2420 2421 err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_STATS, 2422 sizeof(struct virtchnl2_vport_stats), 2423 (u8 *)&stats_msg); 2424 if (err) 2425 goto rel_lock; 2426 2427 err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_STATS, 2428 IDPF_VC_GET_STATS_ERR); 2429 if (err) 2430 goto rel_lock; 2431 2432 stats = (struct virtchnl2_vport_stats *)vport->vc_msg; 2433 2434 spin_lock_bh(&np->stats_lock); 2435 2436 netstats->rx_packets = le64_to_cpu(stats->rx_unicast) + 2437 le64_to_cpu(stats->rx_multicast) + 2438 le64_to_cpu(stats->rx_broadcast); 2439 netstats->rx_bytes = le64_to_cpu(stats->rx_bytes); 2440 netstats->rx_dropped = le64_to_cpu(stats->rx_discards); 2441 netstats->rx_over_errors = le64_to_cpu(stats->rx_overflow_drop); 2442 netstats->rx_length_errors = le64_to_cpu(stats->rx_invalid_frame_length); 2443 2444 netstats->tx_packets = le64_to_cpu(stats->tx_unicast) + 2445 le64_to_cpu(stats->tx_multicast) + 2446 le64_to_cpu(stats->tx_broadcast); 2447 netstats->tx_bytes = le64_to_cpu(stats->tx_bytes); 2448 netstats->tx_errors = le64_to_cpu(stats->tx_errors); 2449 netstats->tx_dropped = le64_to_cpu(stats->tx_discards); 2450 2451 vport->port_stats.vport_stats = *stats; 2452 2453 spin_unlock_bh(&np->stats_lock); 2454 2455 rel_lock: 2456 mutex_unlock(&vport->vc_buf_lock); 2457 2458 return err; 2459 } 2460 2461 /** 2462 * idpf_send_get_set_rss_lut_msg - Send virtchnl get or set rss lut message 2463 * @vport: virtual port data structure 2464 * @get: flag to set or get rss look up table 2465 * 2466 * Returns 0 on success, negative on failure. 2467 */ 2468 int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get) 2469 { 2470 struct idpf_adapter *adapter = vport->adapter; 2471 struct virtchnl2_rss_lut *recv_rl; 2472 struct idpf_rss_data *rss_data; 2473 struct virtchnl2_rss_lut *rl; 2474 int buf_size, lut_buf_size; 2475 int i, err; 2476 2477 rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; 2478 buf_size = struct_size(rl, lut, rss_data->rss_lut_size); 2479 rl = kzalloc(buf_size, GFP_KERNEL); 2480 if (!rl) 2481 return -ENOMEM; 2482 2483 rl->vport_id = cpu_to_le32(vport->vport_id); 2484 mutex_lock(&vport->vc_buf_lock); 2485 2486 if (!get) { 2487 rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size); 2488 for (i = 0; i < rss_data->rss_lut_size; i++) 2489 rl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]); 2490 2491 err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_RSS_LUT, 2492 buf_size, (u8 *)rl); 2493 if (err) 2494 goto free_mem; 2495 2496 err = idpf_wait_for_event(adapter, vport, IDPF_VC_SET_RSS_LUT, 2497 IDPF_VC_SET_RSS_LUT_ERR); 2498 2499 goto free_mem; 2500 } 2501 2502 err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_RSS_LUT, 2503 buf_size, (u8 *)rl); 2504 if (err) 2505 goto free_mem; 2506 2507 err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_RSS_LUT, 2508 IDPF_VC_GET_RSS_LUT_ERR); 2509 if (err) 2510 goto free_mem; 2511 2512 recv_rl = (struct virtchnl2_rss_lut *)vport->vc_msg; 2513 if (rss_data->rss_lut_size == le16_to_cpu(recv_rl->lut_entries)) 2514 goto do_memcpy; 2515 2516 rss_data->rss_lut_size = le16_to_cpu(recv_rl->lut_entries); 2517 kfree(rss_data->rss_lut); 2518 2519 lut_buf_size = rss_data->rss_lut_size * sizeof(u32); 2520 rss_data->rss_lut = kzalloc(lut_buf_size, GFP_KERNEL); 2521 if (!rss_data->rss_lut) { 2522 rss_data->rss_lut_size = 0; 2523 err = -ENOMEM; 2524 goto free_mem; 2525 } 2526 2527 do_memcpy: 2528 memcpy(rss_data->rss_lut, vport->vc_msg, rss_data->rss_lut_size); 2529 free_mem: 2530 mutex_unlock(&vport->vc_buf_lock); 2531 kfree(rl); 2532 2533 return err; 2534 } 2535 2536 /** 2537 * idpf_send_get_set_rss_key_msg - Send virtchnl get or set rss key message 2538 * @vport: virtual port data structure 2539 * @get: flag to set or get rss look up table 2540 * 2541 * Returns 0 on success, negative on failure 2542 */ 2543 int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get) 2544 { 2545 struct idpf_adapter *adapter = vport->adapter; 2546 struct virtchnl2_rss_key *recv_rk; 2547 struct idpf_rss_data *rss_data; 2548 struct virtchnl2_rss_key *rk; 2549 int i, buf_size, err; 2550 2551 rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; 2552 buf_size = struct_size(rk, key_flex, rss_data->rss_key_size); 2553 rk = kzalloc(buf_size, GFP_KERNEL); 2554 if (!rk) 2555 return -ENOMEM; 2556 2557 rk->vport_id = cpu_to_le32(vport->vport_id); 2558 mutex_lock(&vport->vc_buf_lock); 2559 2560 if (get) { 2561 err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_RSS_KEY, 2562 buf_size, (u8 *)rk); 2563 if (err) 2564 goto error; 2565 2566 err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_RSS_KEY, 2567 IDPF_VC_GET_RSS_KEY_ERR); 2568 if (err) 2569 goto error; 2570 2571 recv_rk = (struct virtchnl2_rss_key *)vport->vc_msg; 2572 if (rss_data->rss_key_size != 2573 le16_to_cpu(recv_rk->key_len)) { 2574 rss_data->rss_key_size = 2575 min_t(u16, NETDEV_RSS_KEY_LEN, 2576 le16_to_cpu(recv_rk->key_len)); 2577 kfree(rss_data->rss_key); 2578 rss_data->rss_key = kzalloc(rss_data->rss_key_size, 2579 GFP_KERNEL); 2580 if (!rss_data->rss_key) { 2581 rss_data->rss_key_size = 0; 2582 err = -ENOMEM; 2583 goto error; 2584 } 2585 } 2586 memcpy(rss_data->rss_key, recv_rk->key_flex, 2587 rss_data->rss_key_size); 2588 } else { 2589 rk->key_len = cpu_to_le16(rss_data->rss_key_size); 2590 for (i = 0; i < rss_data->rss_key_size; i++) 2591 rk->key_flex[i] = rss_data->rss_key[i]; 2592 2593 err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_RSS_KEY, 2594 buf_size, (u8 *)rk); 2595 if (err) 2596 goto error; 2597 2598 err = idpf_wait_for_event(adapter, vport, IDPF_VC_SET_RSS_KEY, 2599 IDPF_VC_SET_RSS_KEY_ERR); 2600 } 2601 2602 error: 2603 mutex_unlock(&vport->vc_buf_lock); 2604 kfree(rk); 2605 2606 return err; 2607 } 2608 2609 /** 2610 * idpf_fill_ptype_lookup - Fill L3 specific fields in ptype lookup table 2611 * @ptype: ptype lookup table 2612 * @pstate: state machine for ptype lookup table 2613 * @ipv4: ipv4 or ipv6 2614 * @frag: fragmentation allowed 2615 * 2616 */ 2617 static void idpf_fill_ptype_lookup(struct idpf_rx_ptype_decoded *ptype, 2618 struct idpf_ptype_state *pstate, 2619 bool ipv4, bool frag) 2620 { 2621 if (!pstate->outer_ip || !pstate->outer_frag) { 2622 ptype->outer_ip = IDPF_RX_PTYPE_OUTER_IP; 2623 pstate->outer_ip = true; 2624 2625 if (ipv4) 2626 ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV4; 2627 else 2628 ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV6; 2629 2630 if (frag) { 2631 ptype->outer_frag = IDPF_RX_PTYPE_FRAG; 2632 pstate->outer_frag = true; 2633 } 2634 } else { 2635 ptype->tunnel_type = IDPF_RX_PTYPE_TUNNEL_IP_IP; 2636 pstate->tunnel_state = IDPF_PTYPE_TUNNEL_IP; 2637 2638 if (ipv4) 2639 ptype->tunnel_end_prot = 2640 IDPF_RX_PTYPE_TUNNEL_END_IPV4; 2641 else 2642 ptype->tunnel_end_prot = 2643 IDPF_RX_PTYPE_TUNNEL_END_IPV6; 2644 2645 if (frag) 2646 ptype->tunnel_end_frag = IDPF_RX_PTYPE_FRAG; 2647 } 2648 } 2649 2650 /** 2651 * idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info 2652 * @vport: virtual port data structure 2653 * 2654 * Returns 0 on success, negative on failure. 2655 */ 2656 int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) 2657 { 2658 struct idpf_rx_ptype_decoded *ptype_lkup = vport->rx_ptype_lkup; 2659 struct virtchnl2_get_ptype_info get_ptype_info; 2660 int max_ptype, ptypes_recvd = 0, ptype_offset; 2661 struct idpf_adapter *adapter = vport->adapter; 2662 struct virtchnl2_get_ptype_info *ptype_info; 2663 u16 next_ptype_id = 0; 2664 int err = 0, i, j, k; 2665 2666 if (idpf_is_queue_model_split(vport->rxq_model)) 2667 max_ptype = IDPF_RX_MAX_PTYPE; 2668 else 2669 max_ptype = IDPF_RX_MAX_BASE_PTYPE; 2670 2671 memset(vport->rx_ptype_lkup, 0, sizeof(vport->rx_ptype_lkup)); 2672 2673 ptype_info = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); 2674 if (!ptype_info) 2675 return -ENOMEM; 2676 2677 mutex_lock(&adapter->vc_buf_lock); 2678 2679 while (next_ptype_id < max_ptype) { 2680 get_ptype_info.start_ptype_id = cpu_to_le16(next_ptype_id); 2681 2682 if ((next_ptype_id + IDPF_RX_MAX_PTYPES_PER_BUF) > max_ptype) 2683 get_ptype_info.num_ptypes = 2684 cpu_to_le16(max_ptype - next_ptype_id); 2685 else 2686 get_ptype_info.num_ptypes = 2687 cpu_to_le16(IDPF_RX_MAX_PTYPES_PER_BUF); 2688 2689 err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_PTYPE_INFO, 2690 sizeof(struct virtchnl2_get_ptype_info), 2691 (u8 *)&get_ptype_info); 2692 if (err) 2693 goto vc_buf_unlock; 2694 2695 err = idpf_wait_for_event(adapter, NULL, IDPF_VC_GET_PTYPE_INFO, 2696 IDPF_VC_GET_PTYPE_INFO_ERR); 2697 if (err) 2698 goto vc_buf_unlock; 2699 2700 memcpy(ptype_info, adapter->vc_msg, IDPF_CTLQ_MAX_BUF_LEN); 2701 2702 ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes); 2703 if (ptypes_recvd > max_ptype) { 2704 err = -EINVAL; 2705 goto vc_buf_unlock; 2706 } 2707 2708 next_ptype_id = le16_to_cpu(get_ptype_info.start_ptype_id) + 2709 le16_to_cpu(get_ptype_info.num_ptypes); 2710 2711 ptype_offset = IDPF_RX_PTYPE_HDR_SZ; 2712 2713 for (i = 0; i < le16_to_cpu(ptype_info->num_ptypes); i++) { 2714 struct idpf_ptype_state pstate = { }; 2715 struct virtchnl2_ptype *ptype; 2716 u16 id; 2717 2718 ptype = (struct virtchnl2_ptype *) 2719 ((u8 *)ptype_info + ptype_offset); 2720 2721 ptype_offset += IDPF_GET_PTYPE_SIZE(ptype); 2722 if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN) { 2723 err = -EINVAL; 2724 goto vc_buf_unlock; 2725 } 2726 2727 /* 0xFFFF indicates end of ptypes */ 2728 if (le16_to_cpu(ptype->ptype_id_10) == 2729 IDPF_INVALID_PTYPE_ID) { 2730 err = 0; 2731 goto vc_buf_unlock; 2732 } 2733 2734 if (idpf_is_queue_model_split(vport->rxq_model)) 2735 k = le16_to_cpu(ptype->ptype_id_10); 2736 else 2737 k = ptype->ptype_id_8; 2738 2739 if (ptype->proto_id_count) 2740 ptype_lkup[k].known = 1; 2741 2742 for (j = 0; j < ptype->proto_id_count; j++) { 2743 id = le16_to_cpu(ptype->proto_id[j]); 2744 switch (id) { 2745 case VIRTCHNL2_PROTO_HDR_GRE: 2746 if (pstate.tunnel_state == 2747 IDPF_PTYPE_TUNNEL_IP) { 2748 ptype_lkup[k].tunnel_type = 2749 IDPF_RX_PTYPE_TUNNEL_IP_GRENAT; 2750 pstate.tunnel_state |= 2751 IDPF_PTYPE_TUNNEL_IP_GRENAT; 2752 } 2753 break; 2754 case VIRTCHNL2_PROTO_HDR_MAC: 2755 ptype_lkup[k].outer_ip = 2756 IDPF_RX_PTYPE_OUTER_L2; 2757 if (pstate.tunnel_state == 2758 IDPF_TUN_IP_GRE) { 2759 ptype_lkup[k].tunnel_type = 2760 IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC; 2761 pstate.tunnel_state |= 2762 IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC; 2763 } 2764 break; 2765 case VIRTCHNL2_PROTO_HDR_IPV4: 2766 idpf_fill_ptype_lookup(&ptype_lkup[k], 2767 &pstate, true, 2768 false); 2769 break; 2770 case VIRTCHNL2_PROTO_HDR_IPV6: 2771 idpf_fill_ptype_lookup(&ptype_lkup[k], 2772 &pstate, false, 2773 false); 2774 break; 2775 case VIRTCHNL2_PROTO_HDR_IPV4_FRAG: 2776 idpf_fill_ptype_lookup(&ptype_lkup[k], 2777 &pstate, true, 2778 true); 2779 break; 2780 case VIRTCHNL2_PROTO_HDR_IPV6_FRAG: 2781 idpf_fill_ptype_lookup(&ptype_lkup[k], 2782 &pstate, false, 2783 true); 2784 break; 2785 case VIRTCHNL2_PROTO_HDR_UDP: 2786 ptype_lkup[k].inner_prot = 2787 IDPF_RX_PTYPE_INNER_PROT_UDP; 2788 break; 2789 case VIRTCHNL2_PROTO_HDR_TCP: 2790 ptype_lkup[k].inner_prot = 2791 IDPF_RX_PTYPE_INNER_PROT_TCP; 2792 break; 2793 case VIRTCHNL2_PROTO_HDR_SCTP: 2794 ptype_lkup[k].inner_prot = 2795 IDPF_RX_PTYPE_INNER_PROT_SCTP; 2796 break; 2797 case VIRTCHNL2_PROTO_HDR_ICMP: 2798 ptype_lkup[k].inner_prot = 2799 IDPF_RX_PTYPE_INNER_PROT_ICMP; 2800 break; 2801 case VIRTCHNL2_PROTO_HDR_PAY: 2802 ptype_lkup[k].payload_layer = 2803 IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2; 2804 break; 2805 case VIRTCHNL2_PROTO_HDR_ICMPV6: 2806 case VIRTCHNL2_PROTO_HDR_IPV6_EH: 2807 case VIRTCHNL2_PROTO_HDR_PRE_MAC: 2808 case VIRTCHNL2_PROTO_HDR_POST_MAC: 2809 case VIRTCHNL2_PROTO_HDR_ETHERTYPE: 2810 case VIRTCHNL2_PROTO_HDR_SVLAN: 2811 case VIRTCHNL2_PROTO_HDR_CVLAN: 2812 case VIRTCHNL2_PROTO_HDR_MPLS: 2813 case VIRTCHNL2_PROTO_HDR_MMPLS: 2814 case VIRTCHNL2_PROTO_HDR_PTP: 2815 case VIRTCHNL2_PROTO_HDR_CTRL: 2816 case VIRTCHNL2_PROTO_HDR_LLDP: 2817 case VIRTCHNL2_PROTO_HDR_ARP: 2818 case VIRTCHNL2_PROTO_HDR_ECP: 2819 case VIRTCHNL2_PROTO_HDR_EAPOL: 2820 case VIRTCHNL2_PROTO_HDR_PPPOD: 2821 case VIRTCHNL2_PROTO_HDR_PPPOE: 2822 case VIRTCHNL2_PROTO_HDR_IGMP: 2823 case VIRTCHNL2_PROTO_HDR_AH: 2824 case VIRTCHNL2_PROTO_HDR_ESP: 2825 case VIRTCHNL2_PROTO_HDR_IKE: 2826 case VIRTCHNL2_PROTO_HDR_NATT_KEEP: 2827 case VIRTCHNL2_PROTO_HDR_L2TPV2: 2828 case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL: 2829 case VIRTCHNL2_PROTO_HDR_L2TPV3: 2830 case VIRTCHNL2_PROTO_HDR_GTP: 2831 case VIRTCHNL2_PROTO_HDR_GTP_EH: 2832 case VIRTCHNL2_PROTO_HDR_GTPCV2: 2833 case VIRTCHNL2_PROTO_HDR_GTPC_TEID: 2834 case VIRTCHNL2_PROTO_HDR_GTPU: 2835 case VIRTCHNL2_PROTO_HDR_GTPU_UL: 2836 case VIRTCHNL2_PROTO_HDR_GTPU_DL: 2837 case VIRTCHNL2_PROTO_HDR_ECPRI: 2838 case VIRTCHNL2_PROTO_HDR_VRRP: 2839 case VIRTCHNL2_PROTO_HDR_OSPF: 2840 case VIRTCHNL2_PROTO_HDR_TUN: 2841 case VIRTCHNL2_PROTO_HDR_NVGRE: 2842 case VIRTCHNL2_PROTO_HDR_VXLAN: 2843 case VIRTCHNL2_PROTO_HDR_VXLAN_GPE: 2844 case VIRTCHNL2_PROTO_HDR_GENEVE: 2845 case VIRTCHNL2_PROTO_HDR_NSH: 2846 case VIRTCHNL2_PROTO_HDR_QUIC: 2847 case VIRTCHNL2_PROTO_HDR_PFCP: 2848 case VIRTCHNL2_PROTO_HDR_PFCP_NODE: 2849 case VIRTCHNL2_PROTO_HDR_PFCP_SESSION: 2850 case VIRTCHNL2_PROTO_HDR_RTP: 2851 case VIRTCHNL2_PROTO_HDR_NO_PROTO: 2852 break; 2853 default: 2854 break; 2855 } 2856 } 2857 } 2858 } 2859 2860 vc_buf_unlock: 2861 mutex_unlock(&adapter->vc_buf_lock); 2862 kfree(ptype_info); 2863 2864 return err; 2865 } 2866 2867 /** 2868 * idpf_send_ena_dis_loopback_msg - Send virtchnl enable/disable loopback 2869 * message 2870 * @vport: virtual port data structure 2871 * 2872 * Returns 0 on success, negative on failure. 2873 */ 2874 int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport) 2875 { 2876 struct virtchnl2_loopback loopback; 2877 int err; 2878 2879 loopback.vport_id = cpu_to_le32(vport->vport_id); 2880 loopback.enable = idpf_is_feature_ena(vport, NETIF_F_LOOPBACK); 2881 2882 mutex_lock(&vport->vc_buf_lock); 2883 2884 err = idpf_send_mb_msg(vport->adapter, VIRTCHNL2_OP_LOOPBACK, 2885 sizeof(loopback), (u8 *)&loopback); 2886 if (err) 2887 goto rel_lock; 2888 2889 err = idpf_wait_for_event(vport->adapter, vport, 2890 IDPF_VC_LOOPBACK_STATE, 2891 IDPF_VC_LOOPBACK_STATE_ERR); 2892 2893 rel_lock: 2894 mutex_unlock(&vport->vc_buf_lock); 2895 2896 return err; 2897 } 2898 2899 /** 2900 * idpf_find_ctlq - Given a type and id, find ctlq info 2901 * @hw: hardware struct 2902 * @type: type of ctrlq to find 2903 * @id: ctlq id to find 2904 * 2905 * Returns pointer to found ctlq info struct, NULL otherwise. 2906 */ 2907 static struct idpf_ctlq_info *idpf_find_ctlq(struct idpf_hw *hw, 2908 enum idpf_ctlq_type type, int id) 2909 { 2910 struct idpf_ctlq_info *cq, *tmp; 2911 2912 list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list) 2913 if (cq->q_id == id && cq->cq_type == type) 2914 return cq; 2915 2916 return NULL; 2917 } 2918 2919 /** 2920 * idpf_init_dflt_mbx - Setup default mailbox parameters and make request 2921 * @adapter: adapter info struct 2922 * 2923 * Returns 0 on success, negative otherwise 2924 */ 2925 int idpf_init_dflt_mbx(struct idpf_adapter *adapter) 2926 { 2927 struct idpf_ctlq_create_info ctlq_info[] = { 2928 { 2929 .type = IDPF_CTLQ_TYPE_MAILBOX_TX, 2930 .id = IDPF_DFLT_MBX_ID, 2931 .len = IDPF_DFLT_MBX_Q_LEN, 2932 .buf_size = IDPF_CTLQ_MAX_BUF_LEN 2933 }, 2934 { 2935 .type = IDPF_CTLQ_TYPE_MAILBOX_RX, 2936 .id = IDPF_DFLT_MBX_ID, 2937 .len = IDPF_DFLT_MBX_Q_LEN, 2938 .buf_size = IDPF_CTLQ_MAX_BUF_LEN 2939 } 2940 }; 2941 struct idpf_hw *hw = &adapter->hw; 2942 int err; 2943 2944 adapter->dev_ops.reg_ops.ctlq_reg_init(ctlq_info); 2945 2946 err = idpf_ctlq_init(hw, IDPF_NUM_DFLT_MBX_Q, ctlq_info); 2947 if (err) 2948 return err; 2949 2950 hw->asq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_TX, 2951 IDPF_DFLT_MBX_ID); 2952 hw->arq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_RX, 2953 IDPF_DFLT_MBX_ID); 2954 2955 if (!hw->asq || !hw->arq) { 2956 idpf_ctlq_deinit(hw); 2957 2958 return -ENOENT; 2959 } 2960 2961 adapter->state = __IDPF_STARTUP; 2962 2963 return 0; 2964 } 2965 2966 /** 2967 * idpf_deinit_dflt_mbx - Free up ctlqs setup 2968 * @adapter: Driver specific private data structure 2969 */ 2970 void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter) 2971 { 2972 if (adapter->hw.arq && adapter->hw.asq) { 2973 idpf_mb_clean(adapter); 2974 idpf_ctlq_deinit(&adapter->hw); 2975 } 2976 adapter->hw.arq = NULL; 2977 adapter->hw.asq = NULL; 2978 } 2979 2980 /** 2981 * idpf_vport_params_buf_rel - Release memory for MailBox resources 2982 * @adapter: Driver specific private data structure 2983 * 2984 * Will release memory to hold the vport parameters received on MailBox 2985 */ 2986 static void idpf_vport_params_buf_rel(struct idpf_adapter *adapter) 2987 { 2988 kfree(adapter->vport_params_recvd); 2989 adapter->vport_params_recvd = NULL; 2990 kfree(adapter->vport_params_reqd); 2991 adapter->vport_params_reqd = NULL; 2992 kfree(adapter->vport_ids); 2993 adapter->vport_ids = NULL; 2994 } 2995 2996 /** 2997 * idpf_vport_params_buf_alloc - Allocate memory for MailBox resources 2998 * @adapter: Driver specific private data structure 2999 * 3000 * Will alloc memory to hold the vport parameters received on MailBox 3001 */ 3002 static int idpf_vport_params_buf_alloc(struct idpf_adapter *adapter) 3003 { 3004 u16 num_max_vports = idpf_get_max_vports(adapter); 3005 3006 adapter->vport_params_reqd = kcalloc(num_max_vports, 3007 sizeof(*adapter->vport_params_reqd), 3008 GFP_KERNEL); 3009 if (!adapter->vport_params_reqd) 3010 return -ENOMEM; 3011 3012 adapter->vport_params_recvd = kcalloc(num_max_vports, 3013 sizeof(*adapter->vport_params_recvd), 3014 GFP_KERNEL); 3015 if (!adapter->vport_params_recvd) 3016 goto err_mem; 3017 3018 adapter->vport_ids = kcalloc(num_max_vports, sizeof(u32), GFP_KERNEL); 3019 if (!adapter->vport_ids) 3020 goto err_mem; 3021 3022 if (adapter->vport_config) 3023 return 0; 3024 3025 adapter->vport_config = kcalloc(num_max_vports, 3026 sizeof(*adapter->vport_config), 3027 GFP_KERNEL); 3028 if (!adapter->vport_config) 3029 goto err_mem; 3030 3031 return 0; 3032 3033 err_mem: 3034 idpf_vport_params_buf_rel(adapter); 3035 3036 return -ENOMEM; 3037 } 3038 3039 /** 3040 * idpf_vc_core_init - Initialize state machine and get driver specific 3041 * resources 3042 * @adapter: Driver specific private structure 3043 * 3044 * This function will initialize the state machine and request all necessary 3045 * resources required by the device driver. Once the state machine is 3046 * initialized, allocate memory to store vport specific information and also 3047 * requests required interrupts. 3048 * 3049 * Returns 0 on success, -EAGAIN function will get called again, 3050 * otherwise negative on failure. 3051 */ 3052 int idpf_vc_core_init(struct idpf_adapter *adapter) 3053 { 3054 int task_delay = 30; 3055 u16 num_max_vports; 3056 int err = 0; 3057 3058 while (adapter->state != __IDPF_INIT_SW) { 3059 switch (adapter->state) { 3060 case __IDPF_STARTUP: 3061 if (idpf_send_ver_msg(adapter)) 3062 goto init_failed; 3063 adapter->state = __IDPF_VER_CHECK; 3064 goto restart; 3065 case __IDPF_VER_CHECK: 3066 err = idpf_recv_ver_msg(adapter); 3067 if (err == -EIO) { 3068 return err; 3069 } else if (err == -EAGAIN) { 3070 adapter->state = __IDPF_STARTUP; 3071 goto restart; 3072 } else if (err) { 3073 goto init_failed; 3074 } 3075 if (idpf_send_get_caps_msg(adapter)) 3076 goto init_failed; 3077 adapter->state = __IDPF_GET_CAPS; 3078 goto restart; 3079 case __IDPF_GET_CAPS: 3080 if (idpf_recv_get_caps_msg(adapter)) 3081 goto init_failed; 3082 adapter->state = __IDPF_INIT_SW; 3083 break; 3084 default: 3085 dev_err(&adapter->pdev->dev, "Device is in bad state: %d\n", 3086 adapter->state); 3087 goto init_failed; 3088 } 3089 break; 3090 restart: 3091 /* Give enough time before proceeding further with 3092 * state machine 3093 */ 3094 msleep(task_delay); 3095 } 3096 3097 pci_sriov_set_totalvfs(adapter->pdev, idpf_get_max_vfs(adapter)); 3098 num_max_vports = idpf_get_max_vports(adapter); 3099 adapter->max_vports = num_max_vports; 3100 adapter->vports = kcalloc(num_max_vports, sizeof(*adapter->vports), 3101 GFP_KERNEL); 3102 if (!adapter->vports) 3103 return -ENOMEM; 3104 3105 if (!adapter->netdevs) { 3106 adapter->netdevs = kcalloc(num_max_vports, 3107 sizeof(struct net_device *), 3108 GFP_KERNEL); 3109 if (!adapter->netdevs) { 3110 err = -ENOMEM; 3111 goto err_netdev_alloc; 3112 } 3113 } 3114 3115 err = idpf_vport_params_buf_alloc(adapter); 3116 if (err) { 3117 dev_err(&adapter->pdev->dev, "Failed to alloc vport params buffer: %d\n", 3118 err); 3119 goto err_netdev_alloc; 3120 } 3121 3122 /* Start the mailbox task before requesting vectors. This will ensure 3123 * vector information response from mailbox is handled 3124 */ 3125 queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0); 3126 3127 queue_delayed_work(adapter->serv_wq, &adapter->serv_task, 3128 msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07))); 3129 3130 err = idpf_intr_req(adapter); 3131 if (err) { 3132 dev_err(&adapter->pdev->dev, "failed to enable interrupt vectors: %d\n", 3133 err); 3134 goto err_intr_req; 3135 } 3136 3137 idpf_init_avail_queues(adapter); 3138 3139 /* Skew the delay for init tasks for each function based on fn number 3140 * to prevent every function from making the same call simultaneously. 3141 */ 3142 queue_delayed_work(adapter->init_wq, &adapter->init_task, 3143 msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07))); 3144 3145 goto no_err; 3146 3147 err_intr_req: 3148 cancel_delayed_work_sync(&adapter->serv_task); 3149 cancel_delayed_work_sync(&adapter->mbx_task); 3150 idpf_vport_params_buf_rel(adapter); 3151 err_netdev_alloc: 3152 kfree(adapter->vports); 3153 adapter->vports = NULL; 3154 no_err: 3155 return err; 3156 3157 init_failed: 3158 /* Don't retry if we're trying to go down, just bail. */ 3159 if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) 3160 return err; 3161 3162 if (++adapter->mb_wait_count > IDPF_MB_MAX_ERR) { 3163 dev_err(&adapter->pdev->dev, "Failed to establish mailbox communications with hardware\n"); 3164 3165 return -EFAULT; 3166 } 3167 /* If it reached here, it is possible that mailbox queue initialization 3168 * register writes might not have taken effect. Retry to initialize 3169 * the mailbox again 3170 */ 3171 adapter->state = __IDPF_STARTUP; 3172 idpf_deinit_dflt_mbx(adapter); 3173 set_bit(IDPF_HR_DRV_LOAD, adapter->flags); 3174 queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task, 3175 msecs_to_jiffies(task_delay)); 3176 3177 return -EAGAIN; 3178 } 3179 3180 /** 3181 * idpf_vc_core_deinit - Device deinit routine 3182 * @adapter: Driver specific private structure 3183 * 3184 */ 3185 void idpf_vc_core_deinit(struct idpf_adapter *adapter) 3186 { 3187 int i; 3188 3189 idpf_deinit_task(adapter); 3190 idpf_intr_rel(adapter); 3191 /* Set all bits as we dont know on which vc_state the vhnl_wq is 3192 * waiting on and wakeup the virtchnl workqueue even if it is waiting 3193 * for the response as we are going down 3194 */ 3195 for (i = 0; i < IDPF_VC_NBITS; i++) 3196 set_bit(i, adapter->vc_state); 3197 wake_up(&adapter->vchnl_wq); 3198 3199 cancel_delayed_work_sync(&adapter->serv_task); 3200 cancel_delayed_work_sync(&adapter->mbx_task); 3201 3202 idpf_vport_params_buf_rel(adapter); 3203 3204 /* Clear all the bits */ 3205 for (i = 0; i < IDPF_VC_NBITS; i++) 3206 clear_bit(i, adapter->vc_state); 3207 3208 kfree(adapter->vports); 3209 adapter->vports = NULL; 3210 } 3211 3212 /** 3213 * idpf_vport_alloc_vec_indexes - Get relative vector indexes 3214 * @vport: virtual port data struct 3215 * 3216 * This function requests the vector information required for the vport and 3217 * stores the vector indexes received from the 'global vector distribution' 3218 * in the vport's queue vectors array. 3219 * 3220 * Return 0 on success, error on failure 3221 */ 3222 int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport) 3223 { 3224 struct idpf_vector_info vec_info; 3225 int num_alloc_vecs; 3226 3227 vec_info.num_curr_vecs = vport->num_q_vectors; 3228 vec_info.num_req_vecs = max(vport->num_txq, vport->num_rxq); 3229 vec_info.default_vport = vport->default_vport; 3230 vec_info.index = vport->idx; 3231 3232 num_alloc_vecs = idpf_req_rel_vector_indexes(vport->adapter, 3233 vport->q_vector_idxs, 3234 &vec_info); 3235 if (num_alloc_vecs <= 0) { 3236 dev_err(&vport->adapter->pdev->dev, "Vector distribution failed: %d\n", 3237 num_alloc_vecs); 3238 return -EINVAL; 3239 } 3240 3241 vport->num_q_vectors = num_alloc_vecs; 3242 3243 return 0; 3244 } 3245 3246 /** 3247 * idpf_vport_init - Initialize virtual port 3248 * @vport: virtual port to be initialized 3249 * @max_q: vport max queue info 3250 * 3251 * Will initialize vport with the info received through MB earlier 3252 */ 3253 void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q) 3254 { 3255 struct idpf_adapter *adapter = vport->adapter; 3256 struct virtchnl2_create_vport *vport_msg; 3257 struct idpf_vport_config *vport_config; 3258 u16 tx_itr[] = {2, 8, 64, 128, 256}; 3259 u16 rx_itr[] = {2, 8, 32, 96, 128}; 3260 struct idpf_rss_data *rss_data; 3261 u16 idx = vport->idx; 3262 3263 vport_config = adapter->vport_config[idx]; 3264 rss_data = &vport_config->user_config.rss_data; 3265 vport_msg = adapter->vport_params_recvd[idx]; 3266 3267 vport_config->max_q.max_txq = max_q->max_txq; 3268 vport_config->max_q.max_rxq = max_q->max_rxq; 3269 vport_config->max_q.max_complq = max_q->max_complq; 3270 vport_config->max_q.max_bufq = max_q->max_bufq; 3271 3272 vport->txq_model = le16_to_cpu(vport_msg->txq_model); 3273 vport->rxq_model = le16_to_cpu(vport_msg->rxq_model); 3274 vport->vport_type = le16_to_cpu(vport_msg->vport_type); 3275 vport->vport_id = le32_to_cpu(vport_msg->vport_id); 3276 3277 rss_data->rss_key_size = min_t(u16, NETDEV_RSS_KEY_LEN, 3278 le16_to_cpu(vport_msg->rss_key_size)); 3279 rss_data->rss_lut_size = le16_to_cpu(vport_msg->rss_lut_size); 3280 3281 ether_addr_copy(vport->default_mac_addr, vport_msg->default_mac_addr); 3282 vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - IDPF_PACKET_HDR_PAD; 3283 3284 /* Initialize Tx and Rx profiles for Dynamic Interrupt Moderation */ 3285 memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS); 3286 memcpy(vport->tx_itr_profile, tx_itr, IDPF_DIM_PROFILE_SLOTS); 3287 3288 idpf_vport_set_hsplit(vport, ETHTOOL_TCP_DATA_SPLIT_ENABLED); 3289 3290 idpf_vport_init_num_qs(vport, vport_msg); 3291 idpf_vport_calc_num_q_desc(vport); 3292 idpf_vport_calc_num_q_groups(vport); 3293 idpf_vport_alloc_vec_indexes(vport); 3294 3295 vport->crc_enable = adapter->crc_enable; 3296 } 3297 3298 /** 3299 * idpf_get_vec_ids - Initialize vector id from Mailbox parameters 3300 * @adapter: adapter structure to get the mailbox vector id 3301 * @vecids: Array of vector ids 3302 * @num_vecids: number of vector ids 3303 * @chunks: vector ids received over mailbox 3304 * 3305 * Will initialize the mailbox vector id which is received from the 3306 * get capabilities and data queue vector ids with ids received as 3307 * mailbox parameters. 3308 * Returns number of ids filled 3309 */ 3310 int idpf_get_vec_ids(struct idpf_adapter *adapter, 3311 u16 *vecids, int num_vecids, 3312 struct virtchnl2_vector_chunks *chunks) 3313 { 3314 u16 num_chunks = le16_to_cpu(chunks->num_vchunks); 3315 int num_vecid_filled = 0; 3316 int i, j; 3317 3318 vecids[num_vecid_filled] = adapter->mb_vector.v_idx; 3319 num_vecid_filled++; 3320 3321 for (j = 0; j < num_chunks; j++) { 3322 struct virtchnl2_vector_chunk *chunk; 3323 u16 start_vecid, num_vec; 3324 3325 chunk = &chunks->vchunks[j]; 3326 num_vec = le16_to_cpu(chunk->num_vectors); 3327 start_vecid = le16_to_cpu(chunk->start_vector_id); 3328 3329 for (i = 0; i < num_vec; i++) { 3330 if ((num_vecid_filled + i) < num_vecids) { 3331 vecids[num_vecid_filled + i] = start_vecid; 3332 start_vecid++; 3333 } else { 3334 break; 3335 } 3336 } 3337 num_vecid_filled = num_vecid_filled + i; 3338 } 3339 3340 return num_vecid_filled; 3341 } 3342 3343 /** 3344 * idpf_vport_get_queue_ids - Initialize queue id from Mailbox parameters 3345 * @qids: Array of queue ids 3346 * @num_qids: number of queue ids 3347 * @q_type: queue model 3348 * @chunks: queue ids received over mailbox 3349 * 3350 * Will initialize all queue ids with ids received as mailbox parameters 3351 * Returns number of ids filled 3352 */ 3353 static int idpf_vport_get_queue_ids(u32 *qids, int num_qids, u16 q_type, 3354 struct virtchnl2_queue_reg_chunks *chunks) 3355 { 3356 u16 num_chunks = le16_to_cpu(chunks->num_chunks); 3357 u32 num_q_id_filled = 0, i; 3358 u32 start_q_id, num_q; 3359 3360 while (num_chunks--) { 3361 struct virtchnl2_queue_reg_chunk *chunk; 3362 3363 chunk = &chunks->chunks[num_chunks]; 3364 if (le32_to_cpu(chunk->type) != q_type) 3365 continue; 3366 3367 num_q = le32_to_cpu(chunk->num_queues); 3368 start_q_id = le32_to_cpu(chunk->start_queue_id); 3369 3370 for (i = 0; i < num_q; i++) { 3371 if ((num_q_id_filled + i) < num_qids) { 3372 qids[num_q_id_filled + i] = start_q_id; 3373 start_q_id++; 3374 } else { 3375 break; 3376 } 3377 } 3378 num_q_id_filled = num_q_id_filled + i; 3379 } 3380 3381 return num_q_id_filled; 3382 } 3383 3384 /** 3385 * __idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters 3386 * @vport: virtual port for which the queues ids are initialized 3387 * @qids: queue ids 3388 * @num_qids: number of queue ids 3389 * @q_type: type of queue 3390 * 3391 * Will initialize all queue ids with ids received as mailbox 3392 * parameters. Returns number of queue ids initialized. 3393 */ 3394 static int __idpf_vport_queue_ids_init(struct idpf_vport *vport, 3395 const u32 *qids, 3396 int num_qids, 3397 u32 q_type) 3398 { 3399 struct idpf_queue *q; 3400 int i, j, k = 0; 3401 3402 switch (q_type) { 3403 case VIRTCHNL2_QUEUE_TYPE_TX: 3404 for (i = 0; i < vport->num_txq_grp; i++) { 3405 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; 3406 3407 for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++) { 3408 tx_qgrp->txqs[j]->q_id = qids[k]; 3409 tx_qgrp->txqs[j]->q_type = 3410 VIRTCHNL2_QUEUE_TYPE_TX; 3411 } 3412 } 3413 break; 3414 case VIRTCHNL2_QUEUE_TYPE_RX: 3415 for (i = 0; i < vport->num_rxq_grp; i++) { 3416 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 3417 u16 num_rxq; 3418 3419 if (idpf_is_queue_model_split(vport->rxq_model)) 3420 num_rxq = rx_qgrp->splitq.num_rxq_sets; 3421 else 3422 num_rxq = rx_qgrp->singleq.num_rxq; 3423 3424 for (j = 0; j < num_rxq && k < num_qids; j++, k++) { 3425 if (idpf_is_queue_model_split(vport->rxq_model)) 3426 q = &rx_qgrp->splitq.rxq_sets[j]->rxq; 3427 else 3428 q = rx_qgrp->singleq.rxqs[j]; 3429 q->q_id = qids[k]; 3430 q->q_type = VIRTCHNL2_QUEUE_TYPE_RX; 3431 } 3432 } 3433 break; 3434 case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION: 3435 for (i = 0; i < vport->num_txq_grp && k < num_qids; i++, k++) { 3436 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; 3437 3438 tx_qgrp->complq->q_id = qids[k]; 3439 tx_qgrp->complq->q_type = 3440 VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; 3441 } 3442 break; 3443 case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER: 3444 for (i = 0; i < vport->num_rxq_grp; i++) { 3445 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 3446 u8 num_bufqs = vport->num_bufqs_per_qgrp; 3447 3448 for (j = 0; j < num_bufqs && k < num_qids; j++, k++) { 3449 q = &rx_qgrp->splitq.bufq_sets[j].bufq; 3450 q->q_id = qids[k]; 3451 q->q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER; 3452 } 3453 } 3454 break; 3455 default: 3456 break; 3457 } 3458 3459 return k; 3460 } 3461 3462 /** 3463 * idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters 3464 * @vport: virtual port for which the queues ids are initialized 3465 * 3466 * Will initialize all queue ids with ids received as mailbox parameters. 3467 * Returns 0 on success, negative if all the queues are not initialized. 3468 */ 3469 int idpf_vport_queue_ids_init(struct idpf_vport *vport) 3470 { 3471 struct virtchnl2_create_vport *vport_params; 3472 struct virtchnl2_queue_reg_chunks *chunks; 3473 struct idpf_vport_config *vport_config; 3474 u16 vport_idx = vport->idx; 3475 int num_ids, err = 0; 3476 u16 q_type; 3477 u32 *qids; 3478 3479 vport_config = vport->adapter->vport_config[vport_idx]; 3480 if (vport_config->req_qs_chunks) { 3481 struct virtchnl2_add_queues *vc_aq = 3482 (struct virtchnl2_add_queues *)vport_config->req_qs_chunks; 3483 chunks = &vc_aq->chunks; 3484 } else { 3485 vport_params = vport->adapter->vport_params_recvd[vport_idx]; 3486 chunks = &vport_params->chunks; 3487 } 3488 3489 qids = kcalloc(IDPF_MAX_QIDS, sizeof(u32), GFP_KERNEL); 3490 if (!qids) 3491 return -ENOMEM; 3492 3493 num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, 3494 VIRTCHNL2_QUEUE_TYPE_TX, 3495 chunks); 3496 if (num_ids < vport->num_txq) { 3497 err = -EINVAL; 3498 goto mem_rel; 3499 } 3500 num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, 3501 VIRTCHNL2_QUEUE_TYPE_TX); 3502 if (num_ids < vport->num_txq) { 3503 err = -EINVAL; 3504 goto mem_rel; 3505 } 3506 3507 num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, 3508 VIRTCHNL2_QUEUE_TYPE_RX, 3509 chunks); 3510 if (num_ids < vport->num_rxq) { 3511 err = -EINVAL; 3512 goto mem_rel; 3513 } 3514 num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, 3515 VIRTCHNL2_QUEUE_TYPE_RX); 3516 if (num_ids < vport->num_rxq) { 3517 err = -EINVAL; 3518 goto mem_rel; 3519 } 3520 3521 if (!idpf_is_queue_model_split(vport->txq_model)) 3522 goto check_rxq; 3523 3524 q_type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; 3525 num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks); 3526 if (num_ids < vport->num_complq) { 3527 err = -EINVAL; 3528 goto mem_rel; 3529 } 3530 num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type); 3531 if (num_ids < vport->num_complq) { 3532 err = -EINVAL; 3533 goto mem_rel; 3534 } 3535 3536 check_rxq: 3537 if (!idpf_is_queue_model_split(vport->rxq_model)) 3538 goto mem_rel; 3539 3540 q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER; 3541 num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks); 3542 if (num_ids < vport->num_bufq) { 3543 err = -EINVAL; 3544 goto mem_rel; 3545 } 3546 num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type); 3547 if (num_ids < vport->num_bufq) 3548 err = -EINVAL; 3549 3550 mem_rel: 3551 kfree(qids); 3552 3553 return err; 3554 } 3555 3556 /** 3557 * idpf_vport_adjust_qs - Adjust to new requested queues 3558 * @vport: virtual port data struct 3559 * 3560 * Renegotiate queues. Returns 0 on success, negative on failure. 3561 */ 3562 int idpf_vport_adjust_qs(struct idpf_vport *vport) 3563 { 3564 struct virtchnl2_create_vport vport_msg; 3565 int err; 3566 3567 vport_msg.txq_model = cpu_to_le16(vport->txq_model); 3568 vport_msg.rxq_model = cpu_to_le16(vport->rxq_model); 3569 err = idpf_vport_calc_total_qs(vport->adapter, vport->idx, &vport_msg, 3570 NULL); 3571 if (err) 3572 return err; 3573 3574 idpf_vport_init_num_qs(vport, &vport_msg); 3575 idpf_vport_calc_num_q_groups(vport); 3576 3577 return 0; 3578 } 3579 3580 /** 3581 * idpf_is_capability_ena - Default implementation of capability checking 3582 * @adapter: Private data struct 3583 * @all: all or one flag 3584 * @field: caps field to check for flags 3585 * @flag: flag to check 3586 * 3587 * Return true if all capabilities are supported, false otherwise 3588 */ 3589 bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all, 3590 enum idpf_cap_field field, u64 flag) 3591 { 3592 u8 *caps = (u8 *)&adapter->caps; 3593 u32 *cap_field; 3594 3595 if (!caps) 3596 return false; 3597 3598 if (field == IDPF_BASE_CAPS) 3599 return false; 3600 3601 cap_field = (u32 *)(caps + field); 3602 3603 if (all) 3604 return (*cap_field & flag) == flag; 3605 else 3606 return !!(*cap_field & flag); 3607 } 3608 3609 /** 3610 * idpf_get_vport_id: Get vport id 3611 * @vport: virtual port structure 3612 * 3613 * Return vport id from the adapter persistent data 3614 */ 3615 u32 idpf_get_vport_id(struct idpf_vport *vport) 3616 { 3617 struct virtchnl2_create_vport *vport_msg; 3618 3619 vport_msg = vport->adapter->vport_params_recvd[vport->idx]; 3620 3621 return le32_to_cpu(vport_msg->vport_id); 3622 } 3623 3624 /** 3625 * idpf_add_del_mac_filters - Add/del mac filters 3626 * @vport: Virtual port data structure 3627 * @np: Netdev private structure 3628 * @add: Add or delete flag 3629 * @async: Don't wait for return message 3630 * 3631 * Returns 0 on success, error on failure. 3632 **/ 3633 int idpf_add_del_mac_filters(struct idpf_vport *vport, 3634 struct idpf_netdev_priv *np, 3635 bool add, bool async) 3636 { 3637 struct virtchnl2_mac_addr_list *ma_list = NULL; 3638 struct idpf_adapter *adapter = np->adapter; 3639 struct idpf_vport_config *vport_config; 3640 enum idpf_vport_config_flags mac_flag; 3641 struct pci_dev *pdev = adapter->pdev; 3642 enum idpf_vport_vc_state vc, vc_err; 3643 struct virtchnl2_mac_addr *mac_addr; 3644 struct idpf_mac_filter *f, *tmp; 3645 u32 num_msgs, total_filters = 0; 3646 int i = 0, k, err = 0; 3647 u32 vop; 3648 3649 vport_config = adapter->vport_config[np->vport_idx]; 3650 spin_lock_bh(&vport_config->mac_filter_list_lock); 3651 3652 /* Find the number of newly added filters */ 3653 list_for_each_entry(f, &vport_config->user_config.mac_filter_list, 3654 list) { 3655 if (add && f->add) 3656 total_filters++; 3657 else if (!add && f->remove) 3658 total_filters++; 3659 } 3660 3661 if (!total_filters) { 3662 spin_unlock_bh(&vport_config->mac_filter_list_lock); 3663 3664 return 0; 3665 } 3666 3667 /* Fill all the new filters into virtchannel message */ 3668 mac_addr = kcalloc(total_filters, sizeof(struct virtchnl2_mac_addr), 3669 GFP_ATOMIC); 3670 if (!mac_addr) { 3671 err = -ENOMEM; 3672 spin_unlock_bh(&vport_config->mac_filter_list_lock); 3673 goto error; 3674 } 3675 3676 list_for_each_entry_safe(f, tmp, &vport_config->user_config.mac_filter_list, 3677 list) { 3678 if (add && f->add) { 3679 ether_addr_copy(mac_addr[i].addr, f->macaddr); 3680 i++; 3681 f->add = false; 3682 if (i == total_filters) 3683 break; 3684 } 3685 if (!add && f->remove) { 3686 ether_addr_copy(mac_addr[i].addr, f->macaddr); 3687 i++; 3688 f->remove = false; 3689 if (i == total_filters) 3690 break; 3691 } 3692 } 3693 3694 spin_unlock_bh(&vport_config->mac_filter_list_lock); 3695 3696 if (add) { 3697 vop = VIRTCHNL2_OP_ADD_MAC_ADDR; 3698 vc = IDPF_VC_ADD_MAC_ADDR; 3699 vc_err = IDPF_VC_ADD_MAC_ADDR_ERR; 3700 mac_flag = IDPF_VPORT_ADD_MAC_REQ; 3701 } else { 3702 vop = VIRTCHNL2_OP_DEL_MAC_ADDR; 3703 vc = IDPF_VC_DEL_MAC_ADDR; 3704 vc_err = IDPF_VC_DEL_MAC_ADDR_ERR; 3705 mac_flag = IDPF_VPORT_DEL_MAC_REQ; 3706 } 3707 3708 /* Chunk up the filters into multiple messages to avoid 3709 * sending a control queue message buffer that is too large 3710 */ 3711 num_msgs = DIV_ROUND_UP(total_filters, IDPF_NUM_FILTERS_PER_MSG); 3712 3713 if (!async) 3714 mutex_lock(&vport->vc_buf_lock); 3715 3716 for (i = 0, k = 0; i < num_msgs; i++) { 3717 u32 entries_size, buf_size, num_entries; 3718 3719 num_entries = min_t(u32, total_filters, 3720 IDPF_NUM_FILTERS_PER_MSG); 3721 entries_size = sizeof(struct virtchnl2_mac_addr) * num_entries; 3722 buf_size = struct_size(ma_list, mac_addr_list, num_entries); 3723 3724 if (!ma_list || num_entries != IDPF_NUM_FILTERS_PER_MSG) { 3725 kfree(ma_list); 3726 ma_list = kzalloc(buf_size, GFP_ATOMIC); 3727 if (!ma_list) { 3728 err = -ENOMEM; 3729 goto list_prep_error; 3730 } 3731 } else { 3732 memset(ma_list, 0, buf_size); 3733 } 3734 3735 ma_list->vport_id = cpu_to_le32(np->vport_id); 3736 ma_list->num_mac_addr = cpu_to_le16(num_entries); 3737 memcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size); 3738 3739 if (async) 3740 set_bit(mac_flag, vport_config->flags); 3741 3742 err = idpf_send_mb_msg(adapter, vop, buf_size, (u8 *)ma_list); 3743 if (err) 3744 goto mbx_error; 3745 3746 if (!async) { 3747 err = idpf_wait_for_event(adapter, vport, vc, vc_err); 3748 if (err) 3749 goto mbx_error; 3750 } 3751 3752 k += num_entries; 3753 total_filters -= num_entries; 3754 } 3755 3756 mbx_error: 3757 if (!async) 3758 mutex_unlock(&vport->vc_buf_lock); 3759 kfree(ma_list); 3760 list_prep_error: 3761 kfree(mac_addr); 3762 error: 3763 if (err) 3764 dev_err(&pdev->dev, "Failed to add or del mac filters %d", err); 3765 3766 return err; 3767 } 3768 3769 /** 3770 * idpf_set_promiscuous - set promiscuous and send message to mailbox 3771 * @adapter: Driver specific private structure 3772 * @config_data: Vport specific config data 3773 * @vport_id: Vport identifier 3774 * 3775 * Request to enable promiscuous mode for the vport. Message is sent 3776 * asynchronously and won't wait for response. Returns 0 on success, negative 3777 * on failure; 3778 */ 3779 int idpf_set_promiscuous(struct idpf_adapter *adapter, 3780 struct idpf_vport_user_config_data *config_data, 3781 u32 vport_id) 3782 { 3783 struct virtchnl2_promisc_info vpi; 3784 u16 flags = 0; 3785 int err; 3786 3787 if (test_bit(__IDPF_PROMISC_UC, config_data->user_flags)) 3788 flags |= VIRTCHNL2_UNICAST_PROMISC; 3789 if (test_bit(__IDPF_PROMISC_MC, config_data->user_flags)) 3790 flags |= VIRTCHNL2_MULTICAST_PROMISC; 3791 3792 vpi.vport_id = cpu_to_le32(vport_id); 3793 vpi.flags = cpu_to_le16(flags); 3794 3795 err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE, 3796 sizeof(struct virtchnl2_promisc_info), 3797 (u8 *)&vpi); 3798 3799 return err; 3800 } 3801