1 /* 2 * QLogic qlcnic NIC Driver 3 * Copyright (c) 2009-2010 QLogic Corporation 4 * 5 * See LICENSE.qlcnic for copyright and licensing details. 6 */ 7 8 #include "qlcnic.h" 9 10 static u32 11 qlcnic_poll_rsp(struct qlcnic_adapter *adapter) 12 { 13 u32 rsp; 14 int timeout = 0; 15 16 do { 17 /* give atleast 1ms for firmware to respond */ 18 msleep(1); 19 20 if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT) 21 return QLCNIC_CDRP_RSP_TIMEOUT; 22 23 rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET); 24 } while (!QLCNIC_CDRP_IS_RSP(rsp)); 25 26 return rsp; 27 } 28 29 void 30 qlcnic_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) 31 { 32 u32 rsp; 33 u32 signature; 34 struct pci_dev *pdev = adapter->pdev; 35 struct qlcnic_hardware_context *ahw = adapter->ahw; 36 37 signature = QLCNIC_CDRP_SIGNATURE_MAKE(ahw->pci_func, 38 adapter->fw_hal_version); 39 40 /* Acquire semaphore before accessing CRB */ 41 if (qlcnic_api_lock(adapter)) { 42 cmd->rsp.cmd = QLCNIC_RCODE_TIMEOUT; 43 return; 44 } 45 46 QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature); 47 QLCWR32(adapter, QLCNIC_ARG1_CRB_OFFSET, cmd->req.arg1); 48 QLCWR32(adapter, QLCNIC_ARG2_CRB_OFFSET, cmd->req.arg2); 49 QLCWR32(adapter, QLCNIC_ARG3_CRB_OFFSET, cmd->req.arg3); 50 QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET, 51 QLCNIC_CDRP_FORM_CMD(cmd->req.cmd)); 52 53 rsp = qlcnic_poll_rsp(adapter); 54 55 if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) { 56 dev_err(&pdev->dev, "card response timeout.\n"); 57 cmd->rsp.cmd = QLCNIC_RCODE_TIMEOUT; 58 } else if (rsp == QLCNIC_CDRP_RSP_FAIL) { 59 cmd->rsp.cmd = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET); 60 dev_err(&pdev->dev, "failed card response code:0x%x\n", 61 cmd->rsp.cmd); 62 } else if (rsp == QLCNIC_CDRP_RSP_OK) { 63 cmd->rsp.cmd = QLCNIC_RCODE_SUCCESS; 64 if (cmd->rsp.arg2) 65 cmd->rsp.arg2 = QLCRD32(adapter, 66 QLCNIC_ARG2_CRB_OFFSET); 67 if (cmd->rsp.arg3) 68 cmd->rsp.arg3 = QLCRD32(adapter, 69 QLCNIC_ARG3_CRB_OFFSET); 70 } 71 if (cmd->rsp.arg1) 72 cmd->rsp.arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET); 73 74 /* Release semaphore */ 75 qlcnic_api_unlock(adapter); 76 77 } 78 79 static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u16 temp_size) 80 { 81 uint64_t sum = 0; 82 int count = temp_size / sizeof(uint32_t); 83 while (count-- > 0) 84 sum += *temp_buffer++; 85 while (sum >> 32) 86 sum = (sum & 0xFFFFFFFF) + (sum >> 32); 87 return ~sum; 88 } 89 90 int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter) 91 { 92 int err, i; 93 u16 temp_size; 94 void *tmp_addr; 95 u32 version, csum, *template, *tmp_buf; 96 struct qlcnic_cmd_args cmd; 97 struct qlcnic_hardware_context *ahw; 98 struct qlcnic_dump_template_hdr *tmpl_hdr, *tmp_tmpl; 99 dma_addr_t tmp_addr_t = 0; 100 101 ahw = adapter->ahw; 102 memset(&cmd, 0, sizeof(cmd)); 103 cmd.req.cmd = QLCNIC_CDRP_CMD_TEMP_SIZE; 104 memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd)); 105 qlcnic_issue_cmd(adapter, &cmd); 106 if (cmd.rsp.cmd != QLCNIC_RCODE_SUCCESS) { 107 dev_info(&adapter->pdev->dev, 108 "Can't get template size %d\n", cmd.rsp.cmd); 109 err = -EIO; 110 return err; 111 } 112 temp_size = cmd.rsp.arg2; 113 version = cmd.rsp.arg3; 114 if (!temp_size) 115 return -EIO; 116 117 tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size, 118 &tmp_addr_t, GFP_KERNEL); 119 if (!tmp_addr) { 120 dev_err(&adapter->pdev->dev, 121 "Can't get memory for FW dump template\n"); 122 return -ENOMEM; 123 } 124 memset(&cmd.rsp, 0, sizeof(struct _cdrp_cmd)); 125 cmd.req.cmd = QLCNIC_CDRP_CMD_GET_TEMP_HDR; 126 cmd.req.arg1 = LSD(tmp_addr_t); 127 cmd.req.arg2 = MSD(tmp_addr_t); 128 cmd.req.arg3 = temp_size; 129 qlcnic_issue_cmd(adapter, &cmd); 130 131 err = cmd.rsp.cmd; 132 if (err != QLCNIC_RCODE_SUCCESS) { 133 dev_err(&adapter->pdev->dev, 134 "Failed to get mini dump template header %d\n", err); 135 err = -EIO; 136 goto error; 137 } 138 tmp_tmpl = tmp_addr; 139 csum = qlcnic_temp_checksum((uint32_t *) tmp_addr, temp_size); 140 if (csum) { 141 dev_err(&adapter->pdev->dev, 142 "Template header checksum validation failed\n"); 143 err = -EIO; 144 goto error; 145 } 146 ahw->fw_dump.tmpl_hdr = vzalloc(temp_size); 147 if (!ahw->fw_dump.tmpl_hdr) { 148 err = -EIO; 149 goto error; 150 } 151 tmp_buf = tmp_addr; 152 template = (u32 *) ahw->fw_dump.tmpl_hdr; 153 for (i = 0; i < temp_size/sizeof(u32); i++) 154 *template++ = __le32_to_cpu(*tmp_buf++); 155 156 tmpl_hdr = ahw->fw_dump.tmpl_hdr; 157 tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF; 158 ahw->fw_dump.enable = 1; 159 error: 160 dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t); 161 return err; 162 } 163 164 int 165 qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu) 166 { 167 struct qlcnic_cmd_args cmd; 168 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 169 170 memset(&cmd, 0, sizeof(cmd)); 171 cmd.req.cmd = QLCNIC_CDRP_CMD_SET_MTU; 172 cmd.req.arg1 = recv_ctx->context_id; 173 cmd.req.arg2 = mtu; 174 cmd.req.arg3 = 0; 175 if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) { 176 qlcnic_issue_cmd(adapter, &cmd); 177 if (cmd.rsp.cmd) { 178 dev_err(&adapter->pdev->dev, "Failed to set mtu\n"); 179 return -EIO; 180 } 181 } 182 183 return 0; 184 } 185 186 static int 187 qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) 188 { 189 void *addr; 190 struct qlcnic_hostrq_rx_ctx *prq; 191 struct qlcnic_cardrsp_rx_ctx *prsp; 192 struct qlcnic_hostrq_rds_ring *prq_rds; 193 struct qlcnic_hostrq_sds_ring *prq_sds; 194 struct qlcnic_cardrsp_rds_ring *prsp_rds; 195 struct qlcnic_cardrsp_sds_ring *prsp_sds; 196 struct qlcnic_host_rds_ring *rds_ring; 197 struct qlcnic_host_sds_ring *sds_ring; 198 struct qlcnic_cmd_args cmd; 199 200 dma_addr_t hostrq_phys_addr, cardrsp_phys_addr; 201 u64 phys_addr; 202 203 u8 i, nrds_rings, nsds_rings; 204 size_t rq_size, rsp_size; 205 u32 cap, reg, val, reg2; 206 int err; 207 208 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 209 210 nrds_rings = adapter->max_rds_rings; 211 nsds_rings = adapter->max_sds_rings; 212 213 rq_size = 214 SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings, 215 nsds_rings); 216 rsp_size = 217 SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings, 218 nsds_rings); 219 220 addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, 221 &hostrq_phys_addr, GFP_KERNEL); 222 if (addr == NULL) 223 return -ENOMEM; 224 prq = addr; 225 226 addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, 227 &cardrsp_phys_addr, GFP_KERNEL); 228 if (addr == NULL) { 229 err = -ENOMEM; 230 goto out_free_rq; 231 } 232 prsp = addr; 233 234 prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr); 235 236 cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN 237 | QLCNIC_CAP0_VALIDOFF); 238 cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS); 239 240 prq->valid_field_offset = offsetof(struct qlcnic_hostrq_rx_ctx, 241 msix_handler); 242 prq->txrx_sds_binding = nsds_rings - 1; 243 244 prq->capabilities[0] = cpu_to_le32(cap); 245 prq->host_int_crb_mode = 246 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED); 247 prq->host_rds_crb_mode = 248 cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE); 249 250 prq->num_rds_rings = cpu_to_le16(nrds_rings); 251 prq->num_sds_rings = cpu_to_le16(nsds_rings); 252 prq->rds_ring_offset = 0; 253 254 val = le32_to_cpu(prq->rds_ring_offset) + 255 (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings); 256 prq->sds_ring_offset = cpu_to_le32(val); 257 258 prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data + 259 le32_to_cpu(prq->rds_ring_offset)); 260 261 for (i = 0; i < nrds_rings; i++) { 262 263 rds_ring = &recv_ctx->rds_rings[i]; 264 rds_ring->producer = 0; 265 266 prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr); 267 prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc); 268 prq_rds[i].ring_kind = cpu_to_le32(i); 269 prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size); 270 } 271 272 prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data + 273 le32_to_cpu(prq->sds_ring_offset)); 274 275 for (i = 0; i < nsds_rings; i++) { 276 277 sds_ring = &recv_ctx->sds_rings[i]; 278 sds_ring->consumer = 0; 279 memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring)); 280 281 prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr); 282 prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc); 283 prq_sds[i].msi_index = cpu_to_le16(i); 284 } 285 286 phys_addr = hostrq_phys_addr; 287 memset(&cmd, 0, sizeof(cmd)); 288 cmd.req.arg1 = (u32) (phys_addr >> 32); 289 cmd.req.arg2 = (u32) (phys_addr & 0xffffffff); 290 cmd.req.arg3 = rq_size; 291 cmd.req.cmd = QLCNIC_CDRP_CMD_CREATE_RX_CTX; 292 qlcnic_issue_cmd(adapter, &cmd); 293 err = cmd.rsp.cmd; 294 if (err) { 295 dev_err(&adapter->pdev->dev, 296 "Failed to create rx ctx in firmware%d\n", err); 297 goto out_free_rsp; 298 } 299 300 301 prsp_rds = ((struct qlcnic_cardrsp_rds_ring *) 302 &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]); 303 304 for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) { 305 rds_ring = &recv_ctx->rds_rings[i]; 306 307 reg = le32_to_cpu(prsp_rds[i].host_producer_crb); 308 rds_ring->crb_rcv_producer = adapter->ahw->pci_base0 + reg; 309 } 310 311 prsp_sds = ((struct qlcnic_cardrsp_sds_ring *) 312 &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]); 313 314 for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) { 315 sds_ring = &recv_ctx->sds_rings[i]; 316 317 reg = le32_to_cpu(prsp_sds[i].host_consumer_crb); 318 reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb); 319 320 sds_ring->crb_sts_consumer = adapter->ahw->pci_base0 + reg; 321 sds_ring->crb_intr_mask = adapter->ahw->pci_base0 + reg2; 322 } 323 324 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state); 325 recv_ctx->context_id = le16_to_cpu(prsp->context_id); 326 recv_ctx->virt_port = prsp->virt_port; 327 328 out_free_rsp: 329 dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp, 330 cardrsp_phys_addr); 331 out_free_rq: 332 dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr); 333 return err; 334 } 335 336 static void 337 qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter) 338 { 339 struct qlcnic_cmd_args cmd; 340 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 341 342 memset(&cmd, 0, sizeof(cmd)); 343 cmd.req.arg1 = recv_ctx->context_id; 344 cmd.req.arg2 = QLCNIC_DESTROY_CTX_RESET; 345 cmd.req.arg3 = 0; 346 cmd.req.cmd = QLCNIC_CDRP_CMD_DESTROY_RX_CTX; 347 qlcnic_issue_cmd(adapter, &cmd); 348 if (cmd.rsp.cmd) 349 dev_err(&adapter->pdev->dev, 350 "Failed to destroy rx ctx in firmware\n"); 351 352 recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED; 353 } 354 355 static int 356 qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter) 357 { 358 struct qlcnic_hostrq_tx_ctx *prq; 359 struct qlcnic_hostrq_cds_ring *prq_cds; 360 struct qlcnic_cardrsp_tx_ctx *prsp; 361 void *rq_addr, *rsp_addr; 362 size_t rq_size, rsp_size; 363 u32 temp; 364 struct qlcnic_cmd_args cmd; 365 int err; 366 u64 phys_addr; 367 dma_addr_t rq_phys_addr, rsp_phys_addr; 368 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; 369 370 /* reset host resources */ 371 tx_ring->producer = 0; 372 tx_ring->sw_consumer = 0; 373 *(tx_ring->hw_consumer) = 0; 374 375 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx); 376 rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, 377 &rq_phys_addr, GFP_KERNEL); 378 if (!rq_addr) 379 return -ENOMEM; 380 381 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx); 382 rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, 383 &rsp_phys_addr, GFP_KERNEL); 384 if (!rsp_addr) { 385 err = -ENOMEM; 386 goto out_free_rq; 387 } 388 389 memset(rq_addr, 0, rq_size); 390 prq = rq_addr; 391 392 memset(rsp_addr, 0, rsp_size); 393 prsp = rsp_addr; 394 395 prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr); 396 397 temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN | 398 QLCNIC_CAP0_LSO); 399 prq->capabilities[0] = cpu_to_le32(temp); 400 401 prq->host_int_crb_mode = 402 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED); 403 404 prq->interrupt_ctl = 0; 405 prq->msi_index = 0; 406 prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr); 407 408 prq_cds = &prq->cds_ring; 409 410 prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr); 411 prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc); 412 413 phys_addr = rq_phys_addr; 414 memset(&cmd, 0, sizeof(cmd)); 415 cmd.req.arg1 = (u32)(phys_addr >> 32); 416 cmd.req.arg2 = ((u32)phys_addr & 0xffffffff); 417 cmd.req.arg3 = rq_size; 418 cmd.req.cmd = QLCNIC_CDRP_CMD_CREATE_TX_CTX; 419 qlcnic_issue_cmd(adapter, &cmd); 420 err = cmd.rsp.cmd; 421 422 if (err == QLCNIC_RCODE_SUCCESS) { 423 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); 424 tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp; 425 426 adapter->tx_context_id = 427 le16_to_cpu(prsp->context_id); 428 } else { 429 dev_err(&adapter->pdev->dev, 430 "Failed to create tx ctx in firmware%d\n", err); 431 err = -EIO; 432 } 433 434 dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr, 435 rsp_phys_addr); 436 437 out_free_rq: 438 dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr); 439 440 return err; 441 } 442 443 static void 444 qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter) 445 { 446 struct qlcnic_cmd_args cmd; 447 448 memset(&cmd, 0, sizeof(cmd)); 449 cmd.req.arg1 = adapter->tx_context_id; 450 cmd.req.arg2 = QLCNIC_DESTROY_CTX_RESET; 451 cmd.req.arg3 = 0; 452 cmd.req.cmd = QLCNIC_CDRP_CMD_DESTROY_TX_CTX; 453 qlcnic_issue_cmd(adapter, &cmd); 454 if (cmd.rsp.cmd) 455 dev_err(&adapter->pdev->dev, 456 "Failed to destroy tx ctx in firmware\n"); 457 } 458 459 int 460 qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config) 461 { 462 struct qlcnic_cmd_args cmd; 463 464 memset(&cmd, 0, sizeof(cmd)); 465 cmd.req.arg1 = config; 466 cmd.req.cmd = QLCNIC_CDRP_CMD_CONFIG_PORT; 467 qlcnic_issue_cmd(adapter, &cmd); 468 469 return cmd.rsp.cmd; 470 } 471 472 int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter) 473 { 474 void *addr; 475 int err; 476 int ring; 477 struct qlcnic_recv_context *recv_ctx; 478 struct qlcnic_host_rds_ring *rds_ring; 479 struct qlcnic_host_sds_ring *sds_ring; 480 struct qlcnic_host_tx_ring *tx_ring; 481 482 struct pci_dev *pdev = adapter->pdev; 483 484 recv_ctx = adapter->recv_ctx; 485 tx_ring = adapter->tx_ring; 486 487 tx_ring->hw_consumer = (__le32 *) dma_alloc_coherent(&pdev->dev, 488 sizeof(u32), &tx_ring->hw_cons_phys_addr, GFP_KERNEL); 489 if (tx_ring->hw_consumer == NULL) { 490 dev_err(&pdev->dev, "failed to allocate tx consumer\n"); 491 return -ENOMEM; 492 } 493 494 /* cmd desc ring */ 495 addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring), 496 &tx_ring->phys_addr, GFP_KERNEL); 497 498 if (addr == NULL) { 499 dev_err(&pdev->dev, "failed to allocate tx desc ring\n"); 500 err = -ENOMEM; 501 goto err_out_free; 502 } 503 504 tx_ring->desc_head = addr; 505 506 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 507 rds_ring = &recv_ctx->rds_rings[ring]; 508 addr = dma_alloc_coherent(&adapter->pdev->dev, 509 RCV_DESC_RINGSIZE(rds_ring), 510 &rds_ring->phys_addr, GFP_KERNEL); 511 if (addr == NULL) { 512 dev_err(&pdev->dev, 513 "failed to allocate rds ring [%d]\n", ring); 514 err = -ENOMEM; 515 goto err_out_free; 516 } 517 rds_ring->desc_head = addr; 518 519 } 520 521 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 522 sds_ring = &recv_ctx->sds_rings[ring]; 523 524 addr = dma_alloc_coherent(&adapter->pdev->dev, 525 STATUS_DESC_RINGSIZE(sds_ring), 526 &sds_ring->phys_addr, GFP_KERNEL); 527 if (addr == NULL) { 528 dev_err(&pdev->dev, 529 "failed to allocate sds ring [%d]\n", ring); 530 err = -ENOMEM; 531 goto err_out_free; 532 } 533 sds_ring->desc_head = addr; 534 } 535 536 return 0; 537 538 err_out_free: 539 qlcnic_free_hw_resources(adapter); 540 return err; 541 } 542 543 544 int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter) 545 { 546 int err; 547 548 if (adapter->flags & QLCNIC_NEED_FLR) { 549 pci_reset_function(adapter->pdev); 550 adapter->flags &= ~QLCNIC_NEED_FLR; 551 } 552 553 err = qlcnic_fw_cmd_create_rx_ctx(adapter); 554 if (err) 555 return err; 556 557 err = qlcnic_fw_cmd_create_tx_ctx(adapter); 558 if (err) { 559 qlcnic_fw_cmd_destroy_rx_ctx(adapter); 560 return err; 561 } 562 563 set_bit(__QLCNIC_FW_ATTACHED, &adapter->state); 564 return 0; 565 } 566 567 void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter) 568 { 569 if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) { 570 qlcnic_fw_cmd_destroy_rx_ctx(adapter); 571 qlcnic_fw_cmd_destroy_tx_ctx(adapter); 572 573 /* Allow dma queues to drain after context reset */ 574 msleep(20); 575 } 576 } 577 578 void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter) 579 { 580 struct qlcnic_recv_context *recv_ctx; 581 struct qlcnic_host_rds_ring *rds_ring; 582 struct qlcnic_host_sds_ring *sds_ring; 583 struct qlcnic_host_tx_ring *tx_ring; 584 int ring; 585 586 recv_ctx = adapter->recv_ctx; 587 588 tx_ring = adapter->tx_ring; 589 if (tx_ring->hw_consumer != NULL) { 590 dma_free_coherent(&adapter->pdev->dev, 591 sizeof(u32), 592 tx_ring->hw_consumer, 593 tx_ring->hw_cons_phys_addr); 594 tx_ring->hw_consumer = NULL; 595 } 596 597 if (tx_ring->desc_head != NULL) { 598 dma_free_coherent(&adapter->pdev->dev, 599 TX_DESC_RINGSIZE(tx_ring), 600 tx_ring->desc_head, tx_ring->phys_addr); 601 tx_ring->desc_head = NULL; 602 } 603 604 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 605 rds_ring = &recv_ctx->rds_rings[ring]; 606 607 if (rds_ring->desc_head != NULL) { 608 dma_free_coherent(&adapter->pdev->dev, 609 RCV_DESC_RINGSIZE(rds_ring), 610 rds_ring->desc_head, 611 rds_ring->phys_addr); 612 rds_ring->desc_head = NULL; 613 } 614 } 615 616 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 617 sds_ring = &recv_ctx->sds_rings[ring]; 618 619 if (sds_ring->desc_head != NULL) { 620 dma_free_coherent(&adapter->pdev->dev, 621 STATUS_DESC_RINGSIZE(sds_ring), 622 sds_ring->desc_head, 623 sds_ring->phys_addr); 624 sds_ring->desc_head = NULL; 625 } 626 } 627 } 628 629 630 /* Get MAC address of a NIC partition */ 631 int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac) 632 { 633 int err; 634 struct qlcnic_cmd_args cmd; 635 636 memset(&cmd, 0, sizeof(cmd)); 637 cmd.req.arg1 = adapter->ahw->pci_func | BIT_8; 638 cmd.req.cmd = QLCNIC_CDRP_CMD_MAC_ADDRESS; 639 cmd.rsp.arg1 = cmd.rsp.arg2 = 1; 640 qlcnic_issue_cmd(adapter, &cmd); 641 err = cmd.rsp.cmd; 642 643 if (err == QLCNIC_RCODE_SUCCESS) 644 qlcnic_fetch_mac(adapter, cmd.rsp.arg1, cmd.rsp.arg2, 0, mac); 645 else { 646 dev_err(&adapter->pdev->dev, 647 "Failed to get mac address%d\n", err); 648 err = -EIO; 649 } 650 651 return err; 652 } 653 654 /* Get info of a NIC partition */ 655 int qlcnic_get_nic_info(struct qlcnic_adapter *adapter, 656 struct qlcnic_info *npar_info, u8 func_id) 657 { 658 int err; 659 dma_addr_t nic_dma_t; 660 struct qlcnic_info *nic_info; 661 void *nic_info_addr; 662 struct qlcnic_cmd_args cmd; 663 size_t nic_size = sizeof(struct qlcnic_info); 664 665 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, 666 &nic_dma_t, GFP_KERNEL); 667 if (!nic_info_addr) 668 return -ENOMEM; 669 memset(nic_info_addr, 0, nic_size); 670 671 nic_info = nic_info_addr; 672 memset(&cmd, 0, sizeof(cmd)); 673 cmd.req.cmd = QLCNIC_CDRP_CMD_GET_NIC_INFO; 674 cmd.req.arg1 = MSD(nic_dma_t); 675 cmd.req.arg2 = LSD(nic_dma_t); 676 cmd.req.arg3 = (func_id << 16 | nic_size); 677 qlcnic_issue_cmd(adapter, &cmd); 678 err = cmd.rsp.cmd; 679 680 if (err == QLCNIC_RCODE_SUCCESS) { 681 npar_info->pci_func = le16_to_cpu(nic_info->pci_func); 682 npar_info->op_mode = le16_to_cpu(nic_info->op_mode); 683 npar_info->phys_port = le16_to_cpu(nic_info->phys_port); 684 npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode); 685 npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques); 686 npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques); 687 npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw); 688 npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw); 689 npar_info->capabilities = le32_to_cpu(nic_info->capabilities); 690 npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu); 691 692 dev_info(&adapter->pdev->dev, 693 "phy port: %d switch_mode: %d,\n" 694 "\tmax_tx_q: %d max_rx_q: %d min_tx_bw: 0x%x,\n" 695 "\tmax_tx_bw: 0x%x max_mtu:0x%x, capabilities: 0x%x\n", 696 npar_info->phys_port, npar_info->switch_mode, 697 npar_info->max_tx_ques, npar_info->max_rx_ques, 698 npar_info->min_tx_bw, npar_info->max_tx_bw, 699 npar_info->max_mtu, npar_info->capabilities); 700 } else { 701 dev_err(&adapter->pdev->dev, 702 "Failed to get nic info%d\n", err); 703 err = -EIO; 704 } 705 706 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr, 707 nic_dma_t); 708 return err; 709 } 710 711 /* Configure a NIC partition */ 712 int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic) 713 { 714 int err = -EIO; 715 dma_addr_t nic_dma_t; 716 void *nic_info_addr; 717 struct qlcnic_cmd_args cmd; 718 struct qlcnic_info *nic_info; 719 size_t nic_size = sizeof(struct qlcnic_info); 720 721 if (adapter->op_mode != QLCNIC_MGMT_FUNC) 722 return err; 723 724 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, 725 &nic_dma_t, GFP_KERNEL); 726 if (!nic_info_addr) 727 return -ENOMEM; 728 729 memset(nic_info_addr, 0, nic_size); 730 nic_info = nic_info_addr; 731 732 nic_info->pci_func = cpu_to_le16(nic->pci_func); 733 nic_info->op_mode = cpu_to_le16(nic->op_mode); 734 nic_info->phys_port = cpu_to_le16(nic->phys_port); 735 nic_info->switch_mode = cpu_to_le16(nic->switch_mode); 736 nic_info->capabilities = cpu_to_le32(nic->capabilities); 737 nic_info->max_mac_filters = nic->max_mac_filters; 738 nic_info->max_tx_ques = cpu_to_le16(nic->max_tx_ques); 739 nic_info->max_rx_ques = cpu_to_le16(nic->max_rx_ques); 740 nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw); 741 nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw); 742 743 memset(&cmd, 0, sizeof(cmd)); 744 cmd.req.cmd = QLCNIC_CDRP_CMD_SET_NIC_INFO; 745 cmd.req.arg1 = MSD(nic_dma_t); 746 cmd.req.arg2 = LSD(nic_dma_t); 747 cmd.req.arg3 = ((nic->pci_func << 16) | nic_size); 748 qlcnic_issue_cmd(adapter, &cmd); 749 err = cmd.rsp.cmd; 750 751 if (err != QLCNIC_RCODE_SUCCESS) { 752 dev_err(&adapter->pdev->dev, 753 "Failed to set nic info%d\n", err); 754 err = -EIO; 755 } 756 757 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr, 758 nic_dma_t); 759 return err; 760 } 761 762 /* Get PCI Info of a partition */ 763 int qlcnic_get_pci_info(struct qlcnic_adapter *adapter, 764 struct qlcnic_pci_info *pci_info) 765 { 766 int err = 0, i; 767 struct qlcnic_cmd_args cmd; 768 dma_addr_t pci_info_dma_t; 769 struct qlcnic_pci_info *npar; 770 void *pci_info_addr; 771 size_t npar_size = sizeof(struct qlcnic_pci_info); 772 size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC; 773 774 pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size, 775 &pci_info_dma_t, GFP_KERNEL); 776 if (!pci_info_addr) 777 return -ENOMEM; 778 memset(pci_info_addr, 0, pci_size); 779 780 npar = pci_info_addr; 781 memset(&cmd, 0, sizeof(cmd)); 782 cmd.req.cmd = QLCNIC_CDRP_CMD_GET_PCI_INFO; 783 cmd.req.arg1 = MSD(pci_info_dma_t); 784 cmd.req.arg2 = LSD(pci_info_dma_t); 785 cmd.req.arg3 = pci_size; 786 qlcnic_issue_cmd(adapter, &cmd); 787 err = cmd.rsp.cmd; 788 789 if (err == QLCNIC_RCODE_SUCCESS) { 790 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) { 791 pci_info->id = le16_to_cpu(npar->id); 792 pci_info->active = le16_to_cpu(npar->active); 793 pci_info->type = le16_to_cpu(npar->type); 794 pci_info->default_port = 795 le16_to_cpu(npar->default_port); 796 pci_info->tx_min_bw = 797 le16_to_cpu(npar->tx_min_bw); 798 pci_info->tx_max_bw = 799 le16_to_cpu(npar->tx_max_bw); 800 memcpy(pci_info->mac, npar->mac, ETH_ALEN); 801 } 802 } else { 803 dev_err(&adapter->pdev->dev, 804 "Failed to get PCI Info%d\n", err); 805 err = -EIO; 806 } 807 808 dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr, 809 pci_info_dma_t); 810 return err; 811 } 812 813 /* Configure eSwitch for port mirroring */ 814 int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id, 815 u8 enable_mirroring, u8 pci_func) 816 { 817 int err = -EIO; 818 u32 arg1; 819 struct qlcnic_cmd_args cmd; 820 821 if (adapter->op_mode != QLCNIC_MGMT_FUNC || 822 !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) 823 return err; 824 825 arg1 = id | (enable_mirroring ? BIT_4 : 0); 826 arg1 |= pci_func << 8; 827 828 memset(&cmd, 0, sizeof(cmd)); 829 cmd.req.cmd = QLCNIC_CDRP_CMD_SET_PORTMIRRORING; 830 cmd.req.arg1 = arg1; 831 qlcnic_issue_cmd(adapter, &cmd); 832 err = cmd.rsp.cmd; 833 834 if (err != QLCNIC_RCODE_SUCCESS) { 835 dev_err(&adapter->pdev->dev, 836 "Failed to configure port mirroring%d on eswitch:%d\n", 837 pci_func, id); 838 } else { 839 dev_info(&adapter->pdev->dev, 840 "Configured eSwitch %d for port mirroring:%d\n", 841 id, pci_func); 842 } 843 844 return err; 845 } 846 847 int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func, 848 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) { 849 850 size_t stats_size = sizeof(struct __qlcnic_esw_statistics); 851 struct __qlcnic_esw_statistics *stats; 852 dma_addr_t stats_dma_t; 853 void *stats_addr; 854 u32 arg1; 855 struct qlcnic_cmd_args cmd; 856 int err; 857 858 if (esw_stats == NULL) 859 return -ENOMEM; 860 861 if (adapter->op_mode != QLCNIC_MGMT_FUNC && 862 func != adapter->ahw->pci_func) { 863 dev_err(&adapter->pdev->dev, 864 "Not privilege to query stats for func=%d", func); 865 return -EIO; 866 } 867 868 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, 869 &stats_dma_t, GFP_KERNEL); 870 if (!stats_addr) { 871 dev_err(&adapter->pdev->dev, "Unable to allocate memory\n"); 872 return -ENOMEM; 873 } 874 memset(stats_addr, 0, stats_size); 875 876 arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12; 877 arg1 |= rx_tx << 15 | stats_size << 16; 878 879 memset(&cmd, 0, sizeof(cmd)); 880 cmd.req.cmd = QLCNIC_CDRP_CMD_GET_ESWITCH_STATS; 881 cmd.req.arg1 = arg1; 882 cmd.req.arg2 = MSD(stats_dma_t); 883 cmd.req.arg3 = LSD(stats_dma_t); 884 qlcnic_issue_cmd(adapter, &cmd); 885 err = cmd.rsp.cmd; 886 887 if (!err) { 888 stats = stats_addr; 889 esw_stats->context_id = le16_to_cpu(stats->context_id); 890 esw_stats->version = le16_to_cpu(stats->version); 891 esw_stats->size = le16_to_cpu(stats->size); 892 esw_stats->multicast_frames = 893 le64_to_cpu(stats->multicast_frames); 894 esw_stats->broadcast_frames = 895 le64_to_cpu(stats->broadcast_frames); 896 esw_stats->unicast_frames = le64_to_cpu(stats->unicast_frames); 897 esw_stats->dropped_frames = le64_to_cpu(stats->dropped_frames); 898 esw_stats->local_frames = le64_to_cpu(stats->local_frames); 899 esw_stats->errors = le64_to_cpu(stats->errors); 900 esw_stats->numbytes = le64_to_cpu(stats->numbytes); 901 } 902 903 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr, 904 stats_dma_t); 905 return err; 906 } 907 908 /* This routine will retrieve the MAC statistics from firmware */ 909 int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter, 910 struct qlcnic_mac_statistics *mac_stats) 911 { 912 struct qlcnic_mac_statistics *stats; 913 struct qlcnic_cmd_args cmd; 914 size_t stats_size = sizeof(struct qlcnic_mac_statistics); 915 dma_addr_t stats_dma_t; 916 void *stats_addr; 917 int err; 918 919 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, 920 &stats_dma_t, GFP_KERNEL); 921 if (!stats_addr) { 922 dev_err(&adapter->pdev->dev, 923 "%s: Unable to allocate memory.\n", __func__); 924 return -ENOMEM; 925 } 926 memset(stats_addr, 0, stats_size); 927 memset(&cmd, 0, sizeof(cmd)); 928 cmd.req.cmd = QLCNIC_CDRP_CMD_GET_MAC_STATS; 929 cmd.req.arg1 = stats_size << 16; 930 cmd.req.arg2 = MSD(stats_dma_t); 931 cmd.req.arg3 = LSD(stats_dma_t); 932 933 qlcnic_issue_cmd(adapter, &cmd); 934 err = cmd.rsp.cmd; 935 936 if (!err) { 937 stats = stats_addr; 938 mac_stats->mac_tx_frames = le64_to_cpu(stats->mac_tx_frames); 939 mac_stats->mac_tx_bytes = le64_to_cpu(stats->mac_tx_bytes); 940 mac_stats->mac_tx_mcast_pkts = 941 le64_to_cpu(stats->mac_tx_mcast_pkts); 942 mac_stats->mac_tx_bcast_pkts = 943 le64_to_cpu(stats->mac_tx_bcast_pkts); 944 mac_stats->mac_rx_frames = le64_to_cpu(stats->mac_rx_frames); 945 mac_stats->mac_rx_bytes = le64_to_cpu(stats->mac_rx_bytes); 946 mac_stats->mac_rx_mcast_pkts = 947 le64_to_cpu(stats->mac_rx_mcast_pkts); 948 mac_stats->mac_rx_length_error = 949 le64_to_cpu(stats->mac_rx_length_error); 950 mac_stats->mac_rx_length_small = 951 le64_to_cpu(stats->mac_rx_length_small); 952 mac_stats->mac_rx_length_large = 953 le64_to_cpu(stats->mac_rx_length_large); 954 mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber); 955 mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped); 956 mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error); 957 } else { 958 dev_info(&adapter->pdev->dev, 959 "%s: Get mac stats failed =%d.\n", __func__, err); 960 } 961 962 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr, 963 stats_dma_t); 964 return err; 965 } 966 967 int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch, 968 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) { 969 970 struct __qlcnic_esw_statistics port_stats; 971 u8 i; 972 int ret = -EIO; 973 974 if (esw_stats == NULL) 975 return -ENOMEM; 976 if (adapter->op_mode != QLCNIC_MGMT_FUNC) 977 return -EIO; 978 if (adapter->npars == NULL) 979 return -EIO; 980 981 memset(esw_stats, 0, sizeof(u64)); 982 esw_stats->unicast_frames = QLCNIC_STATS_NOT_AVAIL; 983 esw_stats->multicast_frames = QLCNIC_STATS_NOT_AVAIL; 984 esw_stats->broadcast_frames = QLCNIC_STATS_NOT_AVAIL; 985 esw_stats->dropped_frames = QLCNIC_STATS_NOT_AVAIL; 986 esw_stats->errors = QLCNIC_STATS_NOT_AVAIL; 987 esw_stats->local_frames = QLCNIC_STATS_NOT_AVAIL; 988 esw_stats->numbytes = QLCNIC_STATS_NOT_AVAIL; 989 esw_stats->context_id = eswitch; 990 991 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { 992 if (adapter->npars[i].phy_port != eswitch) 993 continue; 994 995 memset(&port_stats, 0, sizeof(struct __qlcnic_esw_statistics)); 996 if (qlcnic_get_port_stats(adapter, i, rx_tx, &port_stats)) 997 continue; 998 999 esw_stats->size = port_stats.size; 1000 esw_stats->version = port_stats.version; 1001 QLCNIC_ADD_ESW_STATS(esw_stats->unicast_frames, 1002 port_stats.unicast_frames); 1003 QLCNIC_ADD_ESW_STATS(esw_stats->multicast_frames, 1004 port_stats.multicast_frames); 1005 QLCNIC_ADD_ESW_STATS(esw_stats->broadcast_frames, 1006 port_stats.broadcast_frames); 1007 QLCNIC_ADD_ESW_STATS(esw_stats->dropped_frames, 1008 port_stats.dropped_frames); 1009 QLCNIC_ADD_ESW_STATS(esw_stats->errors, 1010 port_stats.errors); 1011 QLCNIC_ADD_ESW_STATS(esw_stats->local_frames, 1012 port_stats.local_frames); 1013 QLCNIC_ADD_ESW_STATS(esw_stats->numbytes, 1014 port_stats.numbytes); 1015 ret = 0; 1016 } 1017 return ret; 1018 } 1019 1020 int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw, 1021 const u8 port, const u8 rx_tx) 1022 { 1023 1024 u32 arg1; 1025 struct qlcnic_cmd_args cmd; 1026 1027 if (adapter->op_mode != QLCNIC_MGMT_FUNC) 1028 return -EIO; 1029 1030 if (func_esw == QLCNIC_STATS_PORT) { 1031 if (port >= QLCNIC_MAX_PCI_FUNC) 1032 goto err_ret; 1033 } else if (func_esw == QLCNIC_STATS_ESWITCH) { 1034 if (port >= QLCNIC_NIU_MAX_XG_PORTS) 1035 goto err_ret; 1036 } else { 1037 goto err_ret; 1038 } 1039 1040 if (rx_tx > QLCNIC_QUERY_TX_COUNTER) 1041 goto err_ret; 1042 1043 arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12; 1044 arg1 |= BIT_14 | rx_tx << 15; 1045 1046 memset(&cmd, 0, sizeof(cmd)); 1047 cmd.req.cmd = QLCNIC_CDRP_CMD_GET_ESWITCH_STATS; 1048 cmd.req.arg1 = arg1; 1049 qlcnic_issue_cmd(adapter, &cmd); 1050 return cmd.rsp.cmd; 1051 1052 err_ret: 1053 dev_err(&adapter->pdev->dev, "Invalid argument func_esw=%d port=%d" 1054 "rx_ctx=%d\n", func_esw, port, rx_tx); 1055 return -EIO; 1056 } 1057 1058 static int 1059 __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter, 1060 u32 *arg1, u32 *arg2) 1061 { 1062 int err = -EIO; 1063 struct qlcnic_cmd_args cmd; 1064 u8 pci_func; 1065 pci_func = (*arg1 >> 8); 1066 1067 cmd.req.cmd = QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG; 1068 cmd.req.arg1 = *arg1; 1069 cmd.rsp.arg1 = cmd.rsp.arg2 = 1; 1070 qlcnic_issue_cmd(adapter, &cmd); 1071 *arg1 = cmd.rsp.arg1; 1072 *arg2 = cmd.rsp.arg2; 1073 err = cmd.rsp.cmd; 1074 1075 if (err == QLCNIC_RCODE_SUCCESS) { 1076 dev_info(&adapter->pdev->dev, 1077 "eSwitch port config for pci func %d\n", pci_func); 1078 } else { 1079 dev_err(&adapter->pdev->dev, 1080 "Failed to get eswitch port config for pci func %d\n", 1081 pci_func); 1082 } 1083 return err; 1084 } 1085 /* Configure eSwitch port 1086 op_mode = 0 for setting default port behavior 1087 op_mode = 1 for setting vlan id 1088 op_mode = 2 for deleting vlan id 1089 op_type = 0 for vlan_id 1090 op_type = 1 for port vlan_id 1091 */ 1092 int qlcnic_config_switch_port(struct qlcnic_adapter *adapter, 1093 struct qlcnic_esw_func_cfg *esw_cfg) 1094 { 1095 int err = -EIO; 1096 u32 arg1, arg2 = 0; 1097 struct qlcnic_cmd_args cmd; 1098 u8 pci_func; 1099 1100 if (adapter->op_mode != QLCNIC_MGMT_FUNC) 1101 return err; 1102 pci_func = esw_cfg->pci_func; 1103 arg1 = (adapter->npars[pci_func].phy_port & BIT_0); 1104 arg1 |= (pci_func << 8); 1105 1106 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2)) 1107 return err; 1108 arg1 &= ~(0x0ff << 8); 1109 arg1 |= (pci_func << 8); 1110 arg1 &= ~(BIT_2 | BIT_3); 1111 switch (esw_cfg->op_mode) { 1112 case QLCNIC_PORT_DEFAULTS: 1113 arg1 |= (BIT_4 | BIT_6 | BIT_7); 1114 arg2 |= (BIT_0 | BIT_1); 1115 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) 1116 arg2 |= (BIT_2 | BIT_3); 1117 if (!(esw_cfg->discard_tagged)) 1118 arg1 &= ~BIT_4; 1119 if (!(esw_cfg->promisc_mode)) 1120 arg1 &= ~BIT_6; 1121 if (!(esw_cfg->mac_override)) 1122 arg1 &= ~BIT_7; 1123 if (!(esw_cfg->mac_anti_spoof)) 1124 arg2 &= ~BIT_0; 1125 if (!(esw_cfg->offload_flags & BIT_0)) 1126 arg2 &= ~(BIT_1 | BIT_2 | BIT_3); 1127 if (!(esw_cfg->offload_flags & BIT_1)) 1128 arg2 &= ~BIT_2; 1129 if (!(esw_cfg->offload_flags & BIT_2)) 1130 arg2 &= ~BIT_3; 1131 break; 1132 case QLCNIC_ADD_VLAN: 1133 arg1 |= (BIT_2 | BIT_5); 1134 arg1 |= (esw_cfg->vlan_id << 16); 1135 break; 1136 case QLCNIC_DEL_VLAN: 1137 arg1 |= (BIT_3 | BIT_5); 1138 arg1 &= ~(0x0ffff << 16); 1139 break; 1140 default: 1141 return err; 1142 } 1143 1144 memset(&cmd, 0, sizeof(cmd)); 1145 cmd.req.cmd = QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH; 1146 cmd.req.arg1 = arg1; 1147 cmd.req.arg2 = arg2; 1148 qlcnic_issue_cmd(adapter, &cmd); 1149 1150 err = cmd.rsp.cmd; 1151 if (err != QLCNIC_RCODE_SUCCESS) { 1152 dev_err(&adapter->pdev->dev, 1153 "Failed to configure eswitch pci func %d\n", pci_func); 1154 } else { 1155 dev_info(&adapter->pdev->dev, 1156 "Configured eSwitch for pci func %d\n", pci_func); 1157 } 1158 1159 return err; 1160 } 1161 1162 int 1163 qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter, 1164 struct qlcnic_esw_func_cfg *esw_cfg) 1165 { 1166 u32 arg1, arg2; 1167 u8 phy_port; 1168 if (adapter->op_mode == QLCNIC_MGMT_FUNC) 1169 phy_port = adapter->npars[esw_cfg->pci_func].phy_port; 1170 else 1171 phy_port = adapter->physical_port; 1172 arg1 = phy_port; 1173 arg1 |= (esw_cfg->pci_func << 8); 1174 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2)) 1175 return -EIO; 1176 1177 esw_cfg->discard_tagged = !!(arg1 & BIT_4); 1178 esw_cfg->host_vlan_tag = !!(arg1 & BIT_5); 1179 esw_cfg->promisc_mode = !!(arg1 & BIT_6); 1180 esw_cfg->mac_override = !!(arg1 & BIT_7); 1181 esw_cfg->vlan_id = LSW(arg1 >> 16); 1182 esw_cfg->mac_anti_spoof = (arg2 & 0x1); 1183 esw_cfg->offload_flags = ((arg2 >> 1) & 0x7); 1184 1185 return 0; 1186 } 1187