1 /* 2 * Copyright (C) 2005 - 2011 Emulex 3 * All rights reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License version 2 7 * as published by the Free Software Foundation. The full GNU General 8 * Public License is included in this distribution in the file called COPYING. 9 * 10 * Contact Information: 11 * linux-drivers@emulex.com 12 * 13 * Emulex 14 * 3333 Susan Street 15 * Costa Mesa, CA 92626 16 */ 17 18 #include "be.h" 19 #include "be_cmds.h" 20 21 /* Must be a power of 2 or else MODULO will BUG_ON */ 22 static int be_get_temp_freq = 64; 23 24 static inline void *embedded_payload(struct be_mcc_wrb *wrb) 25 { 26 return wrb->payload.embedded_payload; 27 } 28 29 static void be_mcc_notify(struct be_adapter *adapter) 30 { 31 struct be_queue_info *mccq = &adapter->mcc_obj.q; 32 u32 val = 0; 33 34 if (be_error(adapter)) 35 return; 36 37 val |= mccq->id & DB_MCCQ_RING_ID_MASK; 38 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; 39 40 wmb(); 41 iowrite32(val, adapter->db + DB_MCCQ_OFFSET); 42 } 43 44 /* To check if valid bit is set, check the entire word as we don't know 45 * the endianness of the data (old entry is host endian while a new entry is 46 * little endian) */ 47 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl) 48 { 49 if (compl->flags != 0) { 50 compl->flags = le32_to_cpu(compl->flags); 51 BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0); 52 return true; 53 } else { 54 return false; 55 } 56 } 57 58 /* Need to reset the entire word that houses the valid bit */ 59 static inline void be_mcc_compl_use(struct be_mcc_compl *compl) 60 { 61 compl->flags = 0; 62 } 63 64 static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1) 65 { 66 unsigned long addr; 67 68 addr = tag1; 69 addr = ((addr << 16) << 16) | tag0; 70 return (void *)addr; 71 } 72 73 static int be_mcc_compl_process(struct be_adapter *adapter, 74 struct be_mcc_compl *compl) 75 { 76 u16 compl_status, extd_status; 77 struct be_cmd_resp_hdr *resp_hdr; 78 u8 opcode = 0, subsystem = 0; 79 80 /* Just swap the status to host endian; mcc tag is opaquely copied 81 * from mcc_wrb */ 82 be_dws_le_to_cpu(compl, 4); 83 84 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & 85 CQE_STATUS_COMPL_MASK; 86 87 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1); 88 89 if (resp_hdr) { 90 opcode = resp_hdr->opcode; 91 subsystem = resp_hdr->subsystem; 92 } 93 94 if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) || 95 (opcode == OPCODE_COMMON_WRITE_OBJECT)) && 96 (subsystem == CMD_SUBSYSTEM_COMMON)) { 97 adapter->flash_status = compl_status; 98 complete(&adapter->flash_compl); 99 } 100 101 if (compl_status == MCC_STATUS_SUCCESS) { 102 if (((opcode == OPCODE_ETH_GET_STATISTICS) || 103 (opcode == OPCODE_ETH_GET_PPORT_STATS)) && 104 (subsystem == CMD_SUBSYSTEM_ETH)) { 105 be_parse_stats(adapter); 106 adapter->stats_cmd_sent = false; 107 } 108 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES && 109 subsystem == CMD_SUBSYSTEM_COMMON) { 110 struct be_cmd_resp_get_cntl_addnl_attribs *resp = 111 (void *)resp_hdr; 112 adapter->drv_stats.be_on_die_temperature = 113 resp->on_die_temperature; 114 } 115 } else { 116 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES) 117 be_get_temp_freq = 0; 118 119 if (compl_status == MCC_STATUS_NOT_SUPPORTED || 120 compl_status == MCC_STATUS_ILLEGAL_REQUEST) 121 goto done; 122 123 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { 124 dev_warn(&adapter->pdev->dev, "This domain(VM) is not " 125 "permitted to execute this cmd (opcode %d)\n", 126 opcode); 127 } else { 128 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & 129 CQE_STATUS_EXTD_MASK; 130 dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:" 131 "status %d, extd-status %d\n", 132 opcode, compl_status, extd_status); 133 } 134 } 135 done: 136 return compl_status; 137 } 138 139 /* Link state evt is a string of bytes; no need for endian swapping */ 140 static void be_async_link_state_process(struct be_adapter *adapter, 141 struct be_async_event_link_state *evt) 142 { 143 /* When link status changes, link speed must be re-queried from FW */ 144 adapter->phy.link_speed = -1; 145 146 /* For the initial link status do not rely on the ASYNC event as 147 * it may not be received in some cases. 148 */ 149 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT) 150 be_link_status_update(adapter, evt->port_link_status); 151 } 152 153 /* Grp5 CoS Priority evt */ 154 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter, 155 struct be_async_event_grp5_cos_priority *evt) 156 { 157 if (evt->valid) { 158 adapter->vlan_prio_bmap = evt->available_priority_bmap; 159 adapter->recommended_prio &= ~VLAN_PRIO_MASK; 160 adapter->recommended_prio = 161 evt->reco_default_priority << VLAN_PRIO_SHIFT; 162 } 163 } 164 165 /* Grp5 QOS Speed evt */ 166 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter, 167 struct be_async_event_grp5_qos_link_speed *evt) 168 { 169 if (evt->physical_port == adapter->port_num) { 170 /* qos_link_speed is in units of 10 Mbps */ 171 adapter->phy.link_speed = evt->qos_link_speed * 10; 172 } 173 } 174 175 /*Grp5 PVID evt*/ 176 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter, 177 struct be_async_event_grp5_pvid_state *evt) 178 { 179 if (evt->enabled) 180 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK; 181 else 182 adapter->pvid = 0; 183 } 184 185 static void be_async_grp5_evt_process(struct be_adapter *adapter, 186 u32 trailer, struct be_mcc_compl *evt) 187 { 188 u8 event_type = 0; 189 190 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) & 191 ASYNC_TRAILER_EVENT_TYPE_MASK; 192 193 switch (event_type) { 194 case ASYNC_EVENT_COS_PRIORITY: 195 be_async_grp5_cos_priority_process(adapter, 196 (struct be_async_event_grp5_cos_priority *)evt); 197 break; 198 case ASYNC_EVENT_QOS_SPEED: 199 be_async_grp5_qos_speed_process(adapter, 200 (struct be_async_event_grp5_qos_link_speed *)evt); 201 break; 202 case ASYNC_EVENT_PVID_STATE: 203 be_async_grp5_pvid_state_process(adapter, 204 (struct be_async_event_grp5_pvid_state *)evt); 205 break; 206 default: 207 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n"); 208 break; 209 } 210 } 211 212 static inline bool is_link_state_evt(u32 trailer) 213 { 214 return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & 215 ASYNC_TRAILER_EVENT_CODE_MASK) == 216 ASYNC_EVENT_CODE_LINK_STATE; 217 } 218 219 static inline bool is_grp5_evt(u32 trailer) 220 { 221 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & 222 ASYNC_TRAILER_EVENT_CODE_MASK) == 223 ASYNC_EVENT_CODE_GRP_5); 224 } 225 226 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter) 227 { 228 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq; 229 struct be_mcc_compl *compl = queue_tail_node(mcc_cq); 230 231 if (be_mcc_compl_is_new(compl)) { 232 queue_tail_inc(mcc_cq); 233 return compl; 234 } 235 return NULL; 236 } 237 238 void be_async_mcc_enable(struct be_adapter *adapter) 239 { 240 spin_lock_bh(&adapter->mcc_cq_lock); 241 242 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0); 243 adapter->mcc_obj.rearm_cq = true; 244 245 spin_unlock_bh(&adapter->mcc_cq_lock); 246 } 247 248 void be_async_mcc_disable(struct be_adapter *adapter) 249 { 250 adapter->mcc_obj.rearm_cq = false; 251 } 252 253 int be_process_mcc(struct be_adapter *adapter) 254 { 255 struct be_mcc_compl *compl; 256 int num = 0, status = 0; 257 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 258 259 spin_lock_bh(&adapter->mcc_cq_lock); 260 while ((compl = be_mcc_compl_get(adapter))) { 261 if (compl->flags & CQE_FLAGS_ASYNC_MASK) { 262 /* Interpret flags as an async trailer */ 263 if (is_link_state_evt(compl->flags)) 264 be_async_link_state_process(adapter, 265 (struct be_async_event_link_state *) compl); 266 else if (is_grp5_evt(compl->flags)) 267 be_async_grp5_evt_process(adapter, 268 compl->flags, compl); 269 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { 270 status = be_mcc_compl_process(adapter, compl); 271 atomic_dec(&mcc_obj->q.used); 272 } 273 be_mcc_compl_use(compl); 274 num++; 275 } 276 277 if (num) 278 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num); 279 280 spin_unlock_bh(&adapter->mcc_cq_lock); 281 return status; 282 } 283 284 /* Wait till no more pending mcc requests are present */ 285 static int be_mcc_wait_compl(struct be_adapter *adapter) 286 { 287 #define mcc_timeout 120000 /* 12s timeout */ 288 int i, status = 0; 289 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 290 291 for (i = 0; i < mcc_timeout; i++) { 292 if (be_error(adapter)) 293 return -EIO; 294 295 status = be_process_mcc(adapter); 296 297 if (atomic_read(&mcc_obj->q.used) == 0) 298 break; 299 udelay(100); 300 } 301 if (i == mcc_timeout) { 302 dev_err(&adapter->pdev->dev, "FW not responding\n"); 303 adapter->fw_timeout = true; 304 return -EIO; 305 } 306 return status; 307 } 308 309 /* Notify MCC requests and wait for completion */ 310 static int be_mcc_notify_wait(struct be_adapter *adapter) 311 { 312 int status; 313 struct be_mcc_wrb *wrb; 314 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 315 u16 index = mcc_obj->q.head; 316 struct be_cmd_resp_hdr *resp; 317 318 index_dec(&index, mcc_obj->q.len); 319 wrb = queue_index_node(&mcc_obj->q, index); 320 321 resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1); 322 323 be_mcc_notify(adapter); 324 325 status = be_mcc_wait_compl(adapter); 326 if (status == -EIO) 327 goto out; 328 329 status = resp->status; 330 out: 331 return status; 332 } 333 334 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) 335 { 336 int msecs = 0; 337 u32 ready; 338 339 do { 340 if (be_error(adapter)) 341 return -EIO; 342 343 ready = ioread32(db); 344 if (ready == 0xffffffff) 345 return -1; 346 347 ready &= MPU_MAILBOX_DB_RDY_MASK; 348 if (ready) 349 break; 350 351 if (msecs > 4000) { 352 dev_err(&adapter->pdev->dev, "FW not responding\n"); 353 adapter->fw_timeout = true; 354 be_detect_dump_ue(adapter); 355 return -1; 356 } 357 358 msleep(1); 359 msecs++; 360 } while (true); 361 362 return 0; 363 } 364 365 /* 366 * Insert the mailbox address into the doorbell in two steps 367 * Polls on the mbox doorbell till a command completion (or a timeout) occurs 368 */ 369 static int be_mbox_notify_wait(struct be_adapter *adapter) 370 { 371 int status; 372 u32 val = 0; 373 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET; 374 struct be_dma_mem *mbox_mem = &adapter->mbox_mem; 375 struct be_mcc_mailbox *mbox = mbox_mem->va; 376 struct be_mcc_compl *compl = &mbox->compl; 377 378 /* wait for ready to be set */ 379 status = be_mbox_db_ready_wait(adapter, db); 380 if (status != 0) 381 return status; 382 383 val |= MPU_MAILBOX_DB_HI_MASK; 384 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */ 385 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; 386 iowrite32(val, db); 387 388 /* wait for ready to be set */ 389 status = be_mbox_db_ready_wait(adapter, db); 390 if (status != 0) 391 return status; 392 393 val = 0; 394 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */ 395 val |= (u32)(mbox_mem->dma >> 4) << 2; 396 iowrite32(val, db); 397 398 status = be_mbox_db_ready_wait(adapter, db); 399 if (status != 0) 400 return status; 401 402 /* A cq entry has been made now */ 403 if (be_mcc_compl_is_new(compl)) { 404 status = be_mcc_compl_process(adapter, &mbox->compl); 405 be_mcc_compl_use(compl); 406 if (status) 407 return status; 408 } else { 409 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n"); 410 return -1; 411 } 412 return 0; 413 } 414 415 static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage) 416 { 417 u32 sem; 418 419 if (lancer_chip(adapter)) 420 sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET); 421 else 422 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET); 423 424 *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK; 425 if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK) 426 return -1; 427 else 428 return 0; 429 } 430 431 int be_cmd_POST(struct be_adapter *adapter) 432 { 433 u16 stage; 434 int status, timeout = 0; 435 struct device *dev = &adapter->pdev->dev; 436 437 do { 438 status = be_POST_stage_get(adapter, &stage); 439 if (status) { 440 dev_err(dev, "POST error; stage=0x%x\n", stage); 441 return -1; 442 } else if (stage != POST_STAGE_ARMFW_RDY) { 443 if (msleep_interruptible(2000)) { 444 dev_err(dev, "Waiting for POST aborted\n"); 445 return -EINTR; 446 } 447 timeout += 2; 448 } else { 449 return 0; 450 } 451 } while (timeout < 60); 452 453 dev_err(dev, "POST timeout; stage=0x%x\n", stage); 454 return -1; 455 } 456 457 458 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb) 459 { 460 return &wrb->payload.sgl[0]; 461 } 462 463 464 /* Don't touch the hdr after it's prepared */ 465 /* mem will be NULL for embedded commands */ 466 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, 467 u8 subsystem, u8 opcode, int cmd_len, 468 struct be_mcc_wrb *wrb, struct be_dma_mem *mem) 469 { 470 struct be_sge *sge; 471 unsigned long addr = (unsigned long)req_hdr; 472 u64 req_addr = addr; 473 474 req_hdr->opcode = opcode; 475 req_hdr->subsystem = subsystem; 476 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); 477 req_hdr->version = 0; 478 479 wrb->tag0 = req_addr & 0xFFFFFFFF; 480 wrb->tag1 = upper_32_bits(req_addr); 481 482 wrb->payload_length = cmd_len; 483 if (mem) { 484 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) << 485 MCC_WRB_SGE_CNT_SHIFT; 486 sge = nonembedded_sgl(wrb); 487 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma)); 488 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF); 489 sge->len = cpu_to_le32(mem->size); 490 } else 491 wrb->embedded |= MCC_WRB_EMBEDDED_MASK; 492 be_dws_cpu_to_le(wrb, 8); 493 } 494 495 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, 496 struct be_dma_mem *mem) 497 { 498 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages); 499 u64 dma = (u64)mem->dma; 500 501 for (i = 0; i < buf_pages; i++) { 502 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF); 503 pages[i].hi = cpu_to_le32(upper_32_bits(dma)); 504 dma += PAGE_SIZE_4K; 505 } 506 } 507 508 /* Converts interrupt delay in microseconds to multiplier value */ 509 static u32 eq_delay_to_mult(u32 usec_delay) 510 { 511 #define MAX_INTR_RATE 651042 512 const u32 round = 10; 513 u32 multiplier; 514 515 if (usec_delay == 0) 516 multiplier = 0; 517 else { 518 u32 interrupt_rate = 1000000 / usec_delay; 519 /* Max delay, corresponding to the lowest interrupt rate */ 520 if (interrupt_rate == 0) 521 multiplier = 1023; 522 else { 523 multiplier = (MAX_INTR_RATE - interrupt_rate) * round; 524 multiplier /= interrupt_rate; 525 /* Round the multiplier to the closest value.*/ 526 multiplier = (multiplier + round/2) / round; 527 multiplier = min(multiplier, (u32)1023); 528 } 529 } 530 return multiplier; 531 } 532 533 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter) 534 { 535 struct be_dma_mem *mbox_mem = &adapter->mbox_mem; 536 struct be_mcc_wrb *wrb 537 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb; 538 memset(wrb, 0, sizeof(*wrb)); 539 return wrb; 540 } 541 542 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter) 543 { 544 struct be_queue_info *mccq = &adapter->mcc_obj.q; 545 struct be_mcc_wrb *wrb; 546 547 if (atomic_read(&mccq->used) >= mccq->len) { 548 dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n"); 549 return NULL; 550 } 551 552 wrb = queue_head_node(mccq); 553 queue_head_inc(mccq); 554 atomic_inc(&mccq->used); 555 memset(wrb, 0, sizeof(*wrb)); 556 return wrb; 557 } 558 559 /* Tell fw we're about to start firing cmds by writing a 560 * special pattern across the wrb hdr; uses mbox 561 */ 562 int be_cmd_fw_init(struct be_adapter *adapter) 563 { 564 u8 *wrb; 565 int status; 566 567 if (mutex_lock_interruptible(&adapter->mbox_lock)) 568 return -1; 569 570 wrb = (u8 *)wrb_from_mbox(adapter); 571 *wrb++ = 0xFF; 572 *wrb++ = 0x12; 573 *wrb++ = 0x34; 574 *wrb++ = 0xFF; 575 *wrb++ = 0xFF; 576 *wrb++ = 0x56; 577 *wrb++ = 0x78; 578 *wrb = 0xFF; 579 580 status = be_mbox_notify_wait(adapter); 581 582 mutex_unlock(&adapter->mbox_lock); 583 return status; 584 } 585 586 /* Tell fw we're done with firing cmds by writing a 587 * special pattern across the wrb hdr; uses mbox 588 */ 589 int be_cmd_fw_clean(struct be_adapter *adapter) 590 { 591 u8 *wrb; 592 int status; 593 594 if (mutex_lock_interruptible(&adapter->mbox_lock)) 595 return -1; 596 597 wrb = (u8 *)wrb_from_mbox(adapter); 598 *wrb++ = 0xFF; 599 *wrb++ = 0xAA; 600 *wrb++ = 0xBB; 601 *wrb++ = 0xFF; 602 *wrb++ = 0xFF; 603 *wrb++ = 0xCC; 604 *wrb++ = 0xDD; 605 *wrb = 0xFF; 606 607 status = be_mbox_notify_wait(adapter); 608 609 mutex_unlock(&adapter->mbox_lock); 610 return status; 611 } 612 int be_cmd_eq_create(struct be_adapter *adapter, 613 struct be_queue_info *eq, int eq_delay) 614 { 615 struct be_mcc_wrb *wrb; 616 struct be_cmd_req_eq_create *req; 617 struct be_dma_mem *q_mem = &eq->dma_mem; 618 int status; 619 620 if (mutex_lock_interruptible(&adapter->mbox_lock)) 621 return -1; 622 623 wrb = wrb_from_mbox(adapter); 624 req = embedded_payload(wrb); 625 626 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 627 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL); 628 629 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 630 631 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1); 632 /* 4byte eqe*/ 633 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0); 634 AMAP_SET_BITS(struct amap_eq_context, count, req->context, 635 __ilog2_u32(eq->len/256)); 636 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context, 637 eq_delay_to_mult(eq_delay)); 638 be_dws_cpu_to_le(req->context, sizeof(req->context)); 639 640 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 641 642 status = be_mbox_notify_wait(adapter); 643 if (!status) { 644 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb); 645 eq->id = le16_to_cpu(resp->eq_id); 646 eq->created = true; 647 } 648 649 mutex_unlock(&adapter->mbox_lock); 650 return status; 651 } 652 653 /* Use MCC */ 654 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, 655 u8 type, bool permanent, u32 if_handle, u32 pmac_id) 656 { 657 struct be_mcc_wrb *wrb; 658 struct be_cmd_req_mac_query *req; 659 int status; 660 661 spin_lock_bh(&adapter->mcc_lock); 662 663 wrb = wrb_from_mccq(adapter); 664 if (!wrb) { 665 status = -EBUSY; 666 goto err; 667 } 668 req = embedded_payload(wrb); 669 670 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 671 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL); 672 req->type = type; 673 if (permanent) { 674 req->permanent = 1; 675 } else { 676 req->if_id = cpu_to_le16((u16) if_handle); 677 req->pmac_id = cpu_to_le32(pmac_id); 678 req->permanent = 0; 679 } 680 681 status = be_mcc_notify_wait(adapter); 682 if (!status) { 683 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb); 684 memcpy(mac_addr, resp->mac.addr, ETH_ALEN); 685 } 686 687 err: 688 spin_unlock_bh(&adapter->mcc_lock); 689 return status; 690 } 691 692 /* Uses synchronous MCCQ */ 693 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, 694 u32 if_id, u32 *pmac_id, u32 domain) 695 { 696 struct be_mcc_wrb *wrb; 697 struct be_cmd_req_pmac_add *req; 698 int status; 699 700 spin_lock_bh(&adapter->mcc_lock); 701 702 wrb = wrb_from_mccq(adapter); 703 if (!wrb) { 704 status = -EBUSY; 705 goto err; 706 } 707 req = embedded_payload(wrb); 708 709 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 710 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL); 711 712 req->hdr.domain = domain; 713 req->if_id = cpu_to_le32(if_id); 714 memcpy(req->mac_address, mac_addr, ETH_ALEN); 715 716 status = be_mcc_notify_wait(adapter); 717 if (!status) { 718 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb); 719 *pmac_id = le32_to_cpu(resp->pmac_id); 720 } 721 722 err: 723 spin_unlock_bh(&adapter->mcc_lock); 724 725 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST) 726 status = -EPERM; 727 728 return status; 729 } 730 731 /* Uses synchronous MCCQ */ 732 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom) 733 { 734 struct be_mcc_wrb *wrb; 735 struct be_cmd_req_pmac_del *req; 736 int status; 737 738 if (pmac_id == -1) 739 return 0; 740 741 spin_lock_bh(&adapter->mcc_lock); 742 743 wrb = wrb_from_mccq(adapter); 744 if (!wrb) { 745 status = -EBUSY; 746 goto err; 747 } 748 req = embedded_payload(wrb); 749 750 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 751 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL); 752 753 req->hdr.domain = dom; 754 req->if_id = cpu_to_le32(if_id); 755 req->pmac_id = cpu_to_le32(pmac_id); 756 757 status = be_mcc_notify_wait(adapter); 758 759 err: 760 spin_unlock_bh(&adapter->mcc_lock); 761 return status; 762 } 763 764 /* Uses Mbox */ 765 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, 766 struct be_queue_info *eq, bool no_delay, int coalesce_wm) 767 { 768 struct be_mcc_wrb *wrb; 769 struct be_cmd_req_cq_create *req; 770 struct be_dma_mem *q_mem = &cq->dma_mem; 771 void *ctxt; 772 int status; 773 774 if (mutex_lock_interruptible(&adapter->mbox_lock)) 775 return -1; 776 777 wrb = wrb_from_mbox(adapter); 778 req = embedded_payload(wrb); 779 ctxt = &req->context; 780 781 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 782 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL); 783 784 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 785 if (lancer_chip(adapter)) { 786 req->hdr.version = 2; 787 req->page_size = 1; /* 1 for 4K */ 788 AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt, 789 no_delay); 790 AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt, 791 __ilog2_u32(cq->len/256)); 792 AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1); 793 AMAP_SET_BITS(struct amap_cq_context_lancer, eventable, 794 ctxt, 1); 795 AMAP_SET_BITS(struct amap_cq_context_lancer, eqid, 796 ctxt, eq->id); 797 } else { 798 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt, 799 coalesce_wm); 800 AMAP_SET_BITS(struct amap_cq_context_be, nodelay, 801 ctxt, no_delay); 802 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt, 803 __ilog2_u32(cq->len/256)); 804 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1); 805 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1); 806 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id); 807 } 808 809 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 810 811 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 812 813 status = be_mbox_notify_wait(adapter); 814 if (!status) { 815 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb); 816 cq->id = le16_to_cpu(resp->cq_id); 817 cq->created = true; 818 } 819 820 mutex_unlock(&adapter->mbox_lock); 821 822 return status; 823 } 824 825 static u32 be_encoded_q_len(int q_len) 826 { 827 u32 len_encoded = fls(q_len); /* log2(len) + 1 */ 828 if (len_encoded == 16) 829 len_encoded = 0; 830 return len_encoded; 831 } 832 833 int be_cmd_mccq_ext_create(struct be_adapter *adapter, 834 struct be_queue_info *mccq, 835 struct be_queue_info *cq) 836 { 837 struct be_mcc_wrb *wrb; 838 struct be_cmd_req_mcc_ext_create *req; 839 struct be_dma_mem *q_mem = &mccq->dma_mem; 840 void *ctxt; 841 int status; 842 843 if (mutex_lock_interruptible(&adapter->mbox_lock)) 844 return -1; 845 846 wrb = wrb_from_mbox(adapter); 847 req = embedded_payload(wrb); 848 ctxt = &req->context; 849 850 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 851 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL); 852 853 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 854 if (lancer_chip(adapter)) { 855 req->hdr.version = 1; 856 req->cq_id = cpu_to_le16(cq->id); 857 858 AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt, 859 be_encoded_q_len(mccq->len)); 860 AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1); 861 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id, 862 ctxt, cq->id); 863 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid, 864 ctxt, 1); 865 866 } else { 867 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); 868 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, 869 be_encoded_q_len(mccq->len)); 870 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); 871 } 872 873 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */ 874 req->async_event_bitmap[0] = cpu_to_le32(0x00000022); 875 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 876 877 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 878 879 status = be_mbox_notify_wait(adapter); 880 if (!status) { 881 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); 882 mccq->id = le16_to_cpu(resp->id); 883 mccq->created = true; 884 } 885 mutex_unlock(&adapter->mbox_lock); 886 887 return status; 888 } 889 890 int be_cmd_mccq_org_create(struct be_adapter *adapter, 891 struct be_queue_info *mccq, 892 struct be_queue_info *cq) 893 { 894 struct be_mcc_wrb *wrb; 895 struct be_cmd_req_mcc_create *req; 896 struct be_dma_mem *q_mem = &mccq->dma_mem; 897 void *ctxt; 898 int status; 899 900 if (mutex_lock_interruptible(&adapter->mbox_lock)) 901 return -1; 902 903 wrb = wrb_from_mbox(adapter); 904 req = embedded_payload(wrb); 905 ctxt = &req->context; 906 907 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 908 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL); 909 910 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 911 912 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); 913 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, 914 be_encoded_q_len(mccq->len)); 915 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); 916 917 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 918 919 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 920 921 status = be_mbox_notify_wait(adapter); 922 if (!status) { 923 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); 924 mccq->id = le16_to_cpu(resp->id); 925 mccq->created = true; 926 } 927 928 mutex_unlock(&adapter->mbox_lock); 929 return status; 930 } 931 932 int be_cmd_mccq_create(struct be_adapter *adapter, 933 struct be_queue_info *mccq, 934 struct be_queue_info *cq) 935 { 936 int status; 937 938 status = be_cmd_mccq_ext_create(adapter, mccq, cq); 939 if (status && !lancer_chip(adapter)) { 940 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 " 941 "or newer to avoid conflicting priorities between NIC " 942 "and FCoE traffic"); 943 status = be_cmd_mccq_org_create(adapter, mccq, cq); 944 } 945 return status; 946 } 947 948 int be_cmd_txq_create(struct be_adapter *adapter, 949 struct be_queue_info *txq, 950 struct be_queue_info *cq) 951 { 952 struct be_mcc_wrb *wrb; 953 struct be_cmd_req_eth_tx_create *req; 954 struct be_dma_mem *q_mem = &txq->dma_mem; 955 void *ctxt; 956 int status; 957 958 spin_lock_bh(&adapter->mcc_lock); 959 960 wrb = wrb_from_mccq(adapter); 961 if (!wrb) { 962 status = -EBUSY; 963 goto err; 964 } 965 966 req = embedded_payload(wrb); 967 ctxt = &req->context; 968 969 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 970 OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL); 971 972 if (lancer_chip(adapter)) { 973 req->hdr.version = 1; 974 AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt, 975 adapter->if_handle); 976 } 977 978 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); 979 req->ulp_num = BE_ULP1_NUM; 980 req->type = BE_ETH_TX_RING_TYPE_STANDARD; 981 982 AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt, 983 be_encoded_q_len(txq->len)); 984 AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1); 985 AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id); 986 987 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 988 989 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 990 991 status = be_mcc_notify_wait(adapter); 992 if (!status) { 993 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb); 994 txq->id = le16_to_cpu(resp->cid); 995 txq->created = true; 996 } 997 998 err: 999 spin_unlock_bh(&adapter->mcc_lock); 1000 1001 return status; 1002 } 1003 1004 /* Uses MCC */ 1005 int be_cmd_rxq_create(struct be_adapter *adapter, 1006 struct be_queue_info *rxq, u16 cq_id, u16 frag_size, 1007 u32 if_id, u32 rss, u8 *rss_id) 1008 { 1009 struct be_mcc_wrb *wrb; 1010 struct be_cmd_req_eth_rx_create *req; 1011 struct be_dma_mem *q_mem = &rxq->dma_mem; 1012 int status; 1013 1014 spin_lock_bh(&adapter->mcc_lock); 1015 1016 wrb = wrb_from_mccq(adapter); 1017 if (!wrb) { 1018 status = -EBUSY; 1019 goto err; 1020 } 1021 req = embedded_payload(wrb); 1022 1023 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1024 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL); 1025 1026 req->cq_id = cpu_to_le16(cq_id); 1027 req->frag_size = fls(frag_size) - 1; 1028 req->num_pages = 2; 1029 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1030 req->interface_id = cpu_to_le32(if_id); 1031 req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE); 1032 req->rss_queue = cpu_to_le32(rss); 1033 1034 status = be_mcc_notify_wait(adapter); 1035 if (!status) { 1036 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb); 1037 rxq->id = le16_to_cpu(resp->id); 1038 rxq->created = true; 1039 *rss_id = resp->rss_id; 1040 } 1041 1042 err: 1043 spin_unlock_bh(&adapter->mcc_lock); 1044 return status; 1045 } 1046 1047 /* Generic destroyer function for all types of queues 1048 * Uses Mbox 1049 */ 1050 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, 1051 int queue_type) 1052 { 1053 struct be_mcc_wrb *wrb; 1054 struct be_cmd_req_q_destroy *req; 1055 u8 subsys = 0, opcode = 0; 1056 int status; 1057 1058 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1059 return -1; 1060 1061 wrb = wrb_from_mbox(adapter); 1062 req = embedded_payload(wrb); 1063 1064 switch (queue_type) { 1065 case QTYPE_EQ: 1066 subsys = CMD_SUBSYSTEM_COMMON; 1067 opcode = OPCODE_COMMON_EQ_DESTROY; 1068 break; 1069 case QTYPE_CQ: 1070 subsys = CMD_SUBSYSTEM_COMMON; 1071 opcode = OPCODE_COMMON_CQ_DESTROY; 1072 break; 1073 case QTYPE_TXQ: 1074 subsys = CMD_SUBSYSTEM_ETH; 1075 opcode = OPCODE_ETH_TX_DESTROY; 1076 break; 1077 case QTYPE_RXQ: 1078 subsys = CMD_SUBSYSTEM_ETH; 1079 opcode = OPCODE_ETH_RX_DESTROY; 1080 break; 1081 case QTYPE_MCCQ: 1082 subsys = CMD_SUBSYSTEM_COMMON; 1083 opcode = OPCODE_COMMON_MCC_DESTROY; 1084 break; 1085 default: 1086 BUG(); 1087 } 1088 1089 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb, 1090 NULL); 1091 req->id = cpu_to_le16(q->id); 1092 1093 status = be_mbox_notify_wait(adapter); 1094 if (!status) 1095 q->created = false; 1096 1097 mutex_unlock(&adapter->mbox_lock); 1098 return status; 1099 } 1100 1101 /* Uses MCC */ 1102 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q) 1103 { 1104 struct be_mcc_wrb *wrb; 1105 struct be_cmd_req_q_destroy *req; 1106 int status; 1107 1108 spin_lock_bh(&adapter->mcc_lock); 1109 1110 wrb = wrb_from_mccq(adapter); 1111 if (!wrb) { 1112 status = -EBUSY; 1113 goto err; 1114 } 1115 req = embedded_payload(wrb); 1116 1117 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1118 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL); 1119 req->id = cpu_to_le16(q->id); 1120 1121 status = be_mcc_notify_wait(adapter); 1122 if (!status) 1123 q->created = false; 1124 1125 err: 1126 spin_unlock_bh(&adapter->mcc_lock); 1127 return status; 1128 } 1129 1130 /* Create an rx filtering policy configuration on an i/f 1131 * Uses MCCQ 1132 */ 1133 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, 1134 u8 *mac, u32 *if_handle, u32 *pmac_id, u32 domain) 1135 { 1136 struct be_mcc_wrb *wrb; 1137 struct be_cmd_req_if_create *req; 1138 int status; 1139 1140 spin_lock_bh(&adapter->mcc_lock); 1141 1142 wrb = wrb_from_mccq(adapter); 1143 if (!wrb) { 1144 status = -EBUSY; 1145 goto err; 1146 } 1147 req = embedded_payload(wrb); 1148 1149 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1150 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), wrb, NULL); 1151 req->hdr.domain = domain; 1152 req->capability_flags = cpu_to_le32(cap_flags); 1153 req->enable_flags = cpu_to_le32(en_flags); 1154 if (mac) 1155 memcpy(req->mac_addr, mac, ETH_ALEN); 1156 else 1157 req->pmac_invalid = true; 1158 1159 status = be_mcc_notify_wait(adapter); 1160 if (!status) { 1161 struct be_cmd_resp_if_create *resp = embedded_payload(wrb); 1162 *if_handle = le32_to_cpu(resp->interface_id); 1163 if (mac) 1164 *pmac_id = le32_to_cpu(resp->pmac_id); 1165 } 1166 1167 err: 1168 spin_unlock_bh(&adapter->mcc_lock); 1169 return status; 1170 } 1171 1172 /* Uses MCCQ */ 1173 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain) 1174 { 1175 struct be_mcc_wrb *wrb; 1176 struct be_cmd_req_if_destroy *req; 1177 int status; 1178 1179 if (interface_id == -1) 1180 return 0; 1181 1182 spin_lock_bh(&adapter->mcc_lock); 1183 1184 wrb = wrb_from_mccq(adapter); 1185 if (!wrb) { 1186 status = -EBUSY; 1187 goto err; 1188 } 1189 req = embedded_payload(wrb); 1190 1191 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1192 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL); 1193 req->hdr.domain = domain; 1194 req->interface_id = cpu_to_le32(interface_id); 1195 1196 status = be_mcc_notify_wait(adapter); 1197 err: 1198 spin_unlock_bh(&adapter->mcc_lock); 1199 return status; 1200 } 1201 1202 /* Get stats is a non embedded command: the request is not embedded inside 1203 * WRB but is a separate dma memory block 1204 * Uses asynchronous MCC 1205 */ 1206 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) 1207 { 1208 struct be_mcc_wrb *wrb; 1209 struct be_cmd_req_hdr *hdr; 1210 int status = 0; 1211 1212 if (MODULO(adapter->work_counter, be_get_temp_freq) == 0) 1213 be_cmd_get_die_temperature(adapter); 1214 1215 spin_lock_bh(&adapter->mcc_lock); 1216 1217 wrb = wrb_from_mccq(adapter); 1218 if (!wrb) { 1219 status = -EBUSY; 1220 goto err; 1221 } 1222 hdr = nonemb_cmd->va; 1223 1224 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH, 1225 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd); 1226 1227 if (adapter->generation == BE_GEN3) 1228 hdr->version = 1; 1229 1230 be_mcc_notify(adapter); 1231 adapter->stats_cmd_sent = true; 1232 1233 err: 1234 spin_unlock_bh(&adapter->mcc_lock); 1235 return status; 1236 } 1237 1238 /* Lancer Stats */ 1239 int lancer_cmd_get_pport_stats(struct be_adapter *adapter, 1240 struct be_dma_mem *nonemb_cmd) 1241 { 1242 1243 struct be_mcc_wrb *wrb; 1244 struct lancer_cmd_req_pport_stats *req; 1245 int status = 0; 1246 1247 spin_lock_bh(&adapter->mcc_lock); 1248 1249 wrb = wrb_from_mccq(adapter); 1250 if (!wrb) { 1251 status = -EBUSY; 1252 goto err; 1253 } 1254 req = nonemb_cmd->va; 1255 1256 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1257 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb, 1258 nonemb_cmd); 1259 1260 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num); 1261 req->cmd_params.params.reset_stats = 0; 1262 1263 be_mcc_notify(adapter); 1264 adapter->stats_cmd_sent = true; 1265 1266 err: 1267 spin_unlock_bh(&adapter->mcc_lock); 1268 return status; 1269 } 1270 1271 /* Uses synchronous mcc */ 1272 int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed, 1273 u16 *link_speed, u8 *link_status, u32 dom) 1274 { 1275 struct be_mcc_wrb *wrb; 1276 struct be_cmd_req_link_status *req; 1277 int status; 1278 1279 spin_lock_bh(&adapter->mcc_lock); 1280 1281 if (link_status) 1282 *link_status = LINK_DOWN; 1283 1284 wrb = wrb_from_mccq(adapter); 1285 if (!wrb) { 1286 status = -EBUSY; 1287 goto err; 1288 } 1289 req = embedded_payload(wrb); 1290 1291 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1292 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL); 1293 1294 if (adapter->generation == BE_GEN3 || lancer_chip(adapter)) 1295 req->hdr.version = 1; 1296 1297 req->hdr.domain = dom; 1298 1299 status = be_mcc_notify_wait(adapter); 1300 if (!status) { 1301 struct be_cmd_resp_link_status *resp = embedded_payload(wrb); 1302 if (resp->mac_speed != PHY_LINK_SPEED_ZERO) { 1303 if (link_speed) 1304 *link_speed = le16_to_cpu(resp->link_speed); 1305 if (mac_speed) 1306 *mac_speed = resp->mac_speed; 1307 } 1308 if (link_status) 1309 *link_status = resp->logical_link_status; 1310 } 1311 1312 err: 1313 spin_unlock_bh(&adapter->mcc_lock); 1314 return status; 1315 } 1316 1317 /* Uses synchronous mcc */ 1318 int be_cmd_get_die_temperature(struct be_adapter *adapter) 1319 { 1320 struct be_mcc_wrb *wrb; 1321 struct be_cmd_req_get_cntl_addnl_attribs *req; 1322 int status; 1323 1324 spin_lock_bh(&adapter->mcc_lock); 1325 1326 wrb = wrb_from_mccq(adapter); 1327 if (!wrb) { 1328 status = -EBUSY; 1329 goto err; 1330 } 1331 req = embedded_payload(wrb); 1332 1333 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1334 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req), 1335 wrb, NULL); 1336 1337 be_mcc_notify(adapter); 1338 1339 err: 1340 spin_unlock_bh(&adapter->mcc_lock); 1341 return status; 1342 } 1343 1344 /* Uses synchronous mcc */ 1345 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size) 1346 { 1347 struct be_mcc_wrb *wrb; 1348 struct be_cmd_req_get_fat *req; 1349 int status; 1350 1351 spin_lock_bh(&adapter->mcc_lock); 1352 1353 wrb = wrb_from_mccq(adapter); 1354 if (!wrb) { 1355 status = -EBUSY; 1356 goto err; 1357 } 1358 req = embedded_payload(wrb); 1359 1360 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1361 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL); 1362 req->fat_operation = cpu_to_le32(QUERY_FAT); 1363 status = be_mcc_notify_wait(adapter); 1364 if (!status) { 1365 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb); 1366 if (log_size && resp->log_size) 1367 *log_size = le32_to_cpu(resp->log_size) - 1368 sizeof(u32); 1369 } 1370 err: 1371 spin_unlock_bh(&adapter->mcc_lock); 1372 return status; 1373 } 1374 1375 void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) 1376 { 1377 struct be_dma_mem get_fat_cmd; 1378 struct be_mcc_wrb *wrb; 1379 struct be_cmd_req_get_fat *req; 1380 u32 offset = 0, total_size, buf_size, 1381 log_offset = sizeof(u32), payload_len; 1382 int status; 1383 1384 if (buf_len == 0) 1385 return; 1386 1387 total_size = buf_len; 1388 1389 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; 1390 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev, 1391 get_fat_cmd.size, 1392 &get_fat_cmd.dma); 1393 if (!get_fat_cmd.va) { 1394 status = -ENOMEM; 1395 dev_err(&adapter->pdev->dev, 1396 "Memory allocation failure while retrieving FAT data\n"); 1397 return; 1398 } 1399 1400 spin_lock_bh(&adapter->mcc_lock); 1401 1402 while (total_size) { 1403 buf_size = min(total_size, (u32)60*1024); 1404 total_size -= buf_size; 1405 1406 wrb = wrb_from_mccq(adapter); 1407 if (!wrb) { 1408 status = -EBUSY; 1409 goto err; 1410 } 1411 req = get_fat_cmd.va; 1412 1413 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size; 1414 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1415 OPCODE_COMMON_MANAGE_FAT, payload_len, wrb, 1416 &get_fat_cmd); 1417 1418 req->fat_operation = cpu_to_le32(RETRIEVE_FAT); 1419 req->read_log_offset = cpu_to_le32(log_offset); 1420 req->read_log_length = cpu_to_le32(buf_size); 1421 req->data_buffer_size = cpu_to_le32(buf_size); 1422 1423 status = be_mcc_notify_wait(adapter); 1424 if (!status) { 1425 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va; 1426 memcpy(buf + offset, 1427 resp->data_buffer, 1428 le32_to_cpu(resp->read_log_length)); 1429 } else { 1430 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n"); 1431 goto err; 1432 } 1433 offset += buf_size; 1434 log_offset += buf_size; 1435 } 1436 err: 1437 pci_free_consistent(adapter->pdev, get_fat_cmd.size, 1438 get_fat_cmd.va, 1439 get_fat_cmd.dma); 1440 spin_unlock_bh(&adapter->mcc_lock); 1441 } 1442 1443 /* Uses synchronous mcc */ 1444 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver, 1445 char *fw_on_flash) 1446 { 1447 struct be_mcc_wrb *wrb; 1448 struct be_cmd_req_get_fw_version *req; 1449 int status; 1450 1451 spin_lock_bh(&adapter->mcc_lock); 1452 1453 wrb = wrb_from_mccq(adapter); 1454 if (!wrb) { 1455 status = -EBUSY; 1456 goto err; 1457 } 1458 1459 req = embedded_payload(wrb); 1460 1461 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1462 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL); 1463 status = be_mcc_notify_wait(adapter); 1464 if (!status) { 1465 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb); 1466 strcpy(fw_ver, resp->firmware_version_string); 1467 if (fw_on_flash) 1468 strcpy(fw_on_flash, resp->fw_on_flash_version_string); 1469 } 1470 err: 1471 spin_unlock_bh(&adapter->mcc_lock); 1472 return status; 1473 } 1474 1475 /* set the EQ delay interval of an EQ to specified value 1476 * Uses async mcc 1477 */ 1478 int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd) 1479 { 1480 struct be_mcc_wrb *wrb; 1481 struct be_cmd_req_modify_eq_delay *req; 1482 int status = 0; 1483 1484 spin_lock_bh(&adapter->mcc_lock); 1485 1486 wrb = wrb_from_mccq(adapter); 1487 if (!wrb) { 1488 status = -EBUSY; 1489 goto err; 1490 } 1491 req = embedded_payload(wrb); 1492 1493 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1494 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL); 1495 1496 req->num_eq = cpu_to_le32(1); 1497 req->delay[0].eq_id = cpu_to_le32(eq_id); 1498 req->delay[0].phase = 0; 1499 req->delay[0].delay_multiplier = cpu_to_le32(eqd); 1500 1501 be_mcc_notify(adapter); 1502 1503 err: 1504 spin_unlock_bh(&adapter->mcc_lock); 1505 return status; 1506 } 1507 1508 /* Uses sycnhronous mcc */ 1509 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, 1510 u32 num, bool untagged, bool promiscuous) 1511 { 1512 struct be_mcc_wrb *wrb; 1513 struct be_cmd_req_vlan_config *req; 1514 int status; 1515 1516 spin_lock_bh(&adapter->mcc_lock); 1517 1518 wrb = wrb_from_mccq(adapter); 1519 if (!wrb) { 1520 status = -EBUSY; 1521 goto err; 1522 } 1523 req = embedded_payload(wrb); 1524 1525 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1526 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL); 1527 1528 req->interface_id = if_id; 1529 req->promiscuous = promiscuous; 1530 req->untagged = untagged; 1531 req->num_vlan = num; 1532 if (!promiscuous) { 1533 memcpy(req->normal_vlan, vtag_array, 1534 req->num_vlan * sizeof(vtag_array[0])); 1535 } 1536 1537 status = be_mcc_notify_wait(adapter); 1538 1539 err: 1540 spin_unlock_bh(&adapter->mcc_lock); 1541 return status; 1542 } 1543 1544 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) 1545 { 1546 struct be_mcc_wrb *wrb; 1547 struct be_dma_mem *mem = &adapter->rx_filter; 1548 struct be_cmd_req_rx_filter *req = mem->va; 1549 int status; 1550 1551 spin_lock_bh(&adapter->mcc_lock); 1552 1553 wrb = wrb_from_mccq(adapter); 1554 if (!wrb) { 1555 status = -EBUSY; 1556 goto err; 1557 } 1558 memset(req, 0, sizeof(*req)); 1559 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1560 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req), 1561 wrb, mem); 1562 1563 req->if_id = cpu_to_le32(adapter->if_handle); 1564 if (flags & IFF_PROMISC) { 1565 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | 1566 BE_IF_FLAGS_VLAN_PROMISCUOUS); 1567 if (value == ON) 1568 req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | 1569 BE_IF_FLAGS_VLAN_PROMISCUOUS); 1570 } else if (flags & IFF_ALLMULTI) { 1571 req->if_flags_mask = req->if_flags = 1572 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); 1573 } else { 1574 struct netdev_hw_addr *ha; 1575 int i = 0; 1576 1577 req->if_flags_mask = req->if_flags = 1578 cpu_to_le32(BE_IF_FLAGS_MULTICAST); 1579 1580 /* Reset mcast promisc mode if already set by setting mask 1581 * and not setting flags field 1582 */ 1583 req->if_flags_mask |= 1584 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); 1585 1586 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev)); 1587 netdev_for_each_mc_addr(ha, adapter->netdev) 1588 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN); 1589 } 1590 1591 status = be_mcc_notify_wait(adapter); 1592 err: 1593 spin_unlock_bh(&adapter->mcc_lock); 1594 return status; 1595 } 1596 1597 /* Uses synchrounous mcc */ 1598 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc) 1599 { 1600 struct be_mcc_wrb *wrb; 1601 struct be_cmd_req_set_flow_control *req; 1602 int status; 1603 1604 spin_lock_bh(&adapter->mcc_lock); 1605 1606 wrb = wrb_from_mccq(adapter); 1607 if (!wrb) { 1608 status = -EBUSY; 1609 goto err; 1610 } 1611 req = embedded_payload(wrb); 1612 1613 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1614 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL); 1615 1616 req->tx_flow_control = cpu_to_le16((u16)tx_fc); 1617 req->rx_flow_control = cpu_to_le16((u16)rx_fc); 1618 1619 status = be_mcc_notify_wait(adapter); 1620 1621 err: 1622 spin_unlock_bh(&adapter->mcc_lock); 1623 return status; 1624 } 1625 1626 /* Uses sycn mcc */ 1627 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc) 1628 { 1629 struct be_mcc_wrb *wrb; 1630 struct be_cmd_req_get_flow_control *req; 1631 int status; 1632 1633 spin_lock_bh(&adapter->mcc_lock); 1634 1635 wrb = wrb_from_mccq(adapter); 1636 if (!wrb) { 1637 status = -EBUSY; 1638 goto err; 1639 } 1640 req = embedded_payload(wrb); 1641 1642 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1643 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL); 1644 1645 status = be_mcc_notify_wait(adapter); 1646 if (!status) { 1647 struct be_cmd_resp_get_flow_control *resp = 1648 embedded_payload(wrb); 1649 *tx_fc = le16_to_cpu(resp->tx_flow_control); 1650 *rx_fc = le16_to_cpu(resp->rx_flow_control); 1651 } 1652 1653 err: 1654 spin_unlock_bh(&adapter->mcc_lock); 1655 return status; 1656 } 1657 1658 /* Uses mbox */ 1659 int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, 1660 u32 *mode, u32 *caps) 1661 { 1662 struct be_mcc_wrb *wrb; 1663 struct be_cmd_req_query_fw_cfg *req; 1664 int status; 1665 1666 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1667 return -1; 1668 1669 wrb = wrb_from_mbox(adapter); 1670 req = embedded_payload(wrb); 1671 1672 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1673 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL); 1674 1675 status = be_mbox_notify_wait(adapter); 1676 if (!status) { 1677 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); 1678 *port_num = le32_to_cpu(resp->phys_port); 1679 *mode = le32_to_cpu(resp->function_mode); 1680 *caps = le32_to_cpu(resp->function_caps); 1681 } 1682 1683 mutex_unlock(&adapter->mbox_lock); 1684 return status; 1685 } 1686 1687 /* Uses mbox */ 1688 int be_cmd_reset_function(struct be_adapter *adapter) 1689 { 1690 struct be_mcc_wrb *wrb; 1691 struct be_cmd_req_hdr *req; 1692 int status; 1693 1694 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1695 return -1; 1696 1697 wrb = wrb_from_mbox(adapter); 1698 req = embedded_payload(wrb); 1699 1700 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON, 1701 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL); 1702 1703 status = be_mbox_notify_wait(adapter); 1704 1705 mutex_unlock(&adapter->mbox_lock); 1706 return status; 1707 } 1708 1709 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size) 1710 { 1711 struct be_mcc_wrb *wrb; 1712 struct be_cmd_req_rss_config *req; 1713 u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e, 1714 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2, 1715 0x3ea83c02, 0x4a110304}; 1716 int status; 1717 1718 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1719 return -1; 1720 1721 wrb = wrb_from_mbox(adapter); 1722 req = embedded_payload(wrb); 1723 1724 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1725 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL); 1726 1727 req->if_id = cpu_to_le32(adapter->if_handle); 1728 req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 | 1729 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6); 1730 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1); 1731 memcpy(req->cpu_table, rsstable, table_size); 1732 memcpy(req->hash, myhash, sizeof(myhash)); 1733 be_dws_cpu_to_le(req->hash, sizeof(req->hash)); 1734 1735 status = be_mbox_notify_wait(adapter); 1736 1737 mutex_unlock(&adapter->mbox_lock); 1738 return status; 1739 } 1740 1741 /* Uses sync mcc */ 1742 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, 1743 u8 bcn, u8 sts, u8 state) 1744 { 1745 struct be_mcc_wrb *wrb; 1746 struct be_cmd_req_enable_disable_beacon *req; 1747 int status; 1748 1749 spin_lock_bh(&adapter->mcc_lock); 1750 1751 wrb = wrb_from_mccq(adapter); 1752 if (!wrb) { 1753 status = -EBUSY; 1754 goto err; 1755 } 1756 req = embedded_payload(wrb); 1757 1758 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1759 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL); 1760 1761 req->port_num = port_num; 1762 req->beacon_state = state; 1763 req->beacon_duration = bcn; 1764 req->status_duration = sts; 1765 1766 status = be_mcc_notify_wait(adapter); 1767 1768 err: 1769 spin_unlock_bh(&adapter->mcc_lock); 1770 return status; 1771 } 1772 1773 /* Uses sync mcc */ 1774 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state) 1775 { 1776 struct be_mcc_wrb *wrb; 1777 struct be_cmd_req_get_beacon_state *req; 1778 int status; 1779 1780 spin_lock_bh(&adapter->mcc_lock); 1781 1782 wrb = wrb_from_mccq(adapter); 1783 if (!wrb) { 1784 status = -EBUSY; 1785 goto err; 1786 } 1787 req = embedded_payload(wrb); 1788 1789 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1790 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL); 1791 1792 req->port_num = port_num; 1793 1794 status = be_mcc_notify_wait(adapter); 1795 if (!status) { 1796 struct be_cmd_resp_get_beacon_state *resp = 1797 embedded_payload(wrb); 1798 *state = resp->beacon_state; 1799 } 1800 1801 err: 1802 spin_unlock_bh(&adapter->mcc_lock); 1803 return status; 1804 } 1805 1806 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, 1807 u32 data_size, u32 data_offset, const char *obj_name, 1808 u32 *data_written, u8 *addn_status) 1809 { 1810 struct be_mcc_wrb *wrb; 1811 struct lancer_cmd_req_write_object *req; 1812 struct lancer_cmd_resp_write_object *resp; 1813 void *ctxt = NULL; 1814 int status; 1815 1816 spin_lock_bh(&adapter->mcc_lock); 1817 adapter->flash_status = 0; 1818 1819 wrb = wrb_from_mccq(adapter); 1820 if (!wrb) { 1821 status = -EBUSY; 1822 goto err_unlock; 1823 } 1824 1825 req = embedded_payload(wrb); 1826 1827 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1828 OPCODE_COMMON_WRITE_OBJECT, 1829 sizeof(struct lancer_cmd_req_write_object), wrb, 1830 NULL); 1831 1832 ctxt = &req->context; 1833 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 1834 write_length, ctxt, data_size); 1835 1836 if (data_size == 0) 1837 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 1838 eof, ctxt, 1); 1839 else 1840 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 1841 eof, ctxt, 0); 1842 1843 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 1844 req->write_offset = cpu_to_le32(data_offset); 1845 strcpy(req->object_name, obj_name); 1846 req->descriptor_count = cpu_to_le32(1); 1847 req->buf_len = cpu_to_le32(data_size); 1848 req->addr_low = cpu_to_le32((cmd->dma + 1849 sizeof(struct lancer_cmd_req_write_object)) 1850 & 0xFFFFFFFF); 1851 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma + 1852 sizeof(struct lancer_cmd_req_write_object))); 1853 1854 be_mcc_notify(adapter); 1855 spin_unlock_bh(&adapter->mcc_lock); 1856 1857 if (!wait_for_completion_timeout(&adapter->flash_compl, 1858 msecs_to_jiffies(30000))) 1859 status = -1; 1860 else 1861 status = adapter->flash_status; 1862 1863 resp = embedded_payload(wrb); 1864 if (!status) 1865 *data_written = le32_to_cpu(resp->actual_write_len); 1866 else 1867 *addn_status = resp->additional_status; 1868 1869 return status; 1870 1871 err_unlock: 1872 spin_unlock_bh(&adapter->mcc_lock); 1873 return status; 1874 } 1875 1876 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, 1877 u32 data_size, u32 data_offset, const char *obj_name, 1878 u32 *data_read, u32 *eof, u8 *addn_status) 1879 { 1880 struct be_mcc_wrb *wrb; 1881 struct lancer_cmd_req_read_object *req; 1882 struct lancer_cmd_resp_read_object *resp; 1883 int status; 1884 1885 spin_lock_bh(&adapter->mcc_lock); 1886 1887 wrb = wrb_from_mccq(adapter); 1888 if (!wrb) { 1889 status = -EBUSY; 1890 goto err_unlock; 1891 } 1892 1893 req = embedded_payload(wrb); 1894 1895 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1896 OPCODE_COMMON_READ_OBJECT, 1897 sizeof(struct lancer_cmd_req_read_object), wrb, 1898 NULL); 1899 1900 req->desired_read_len = cpu_to_le32(data_size); 1901 req->read_offset = cpu_to_le32(data_offset); 1902 strcpy(req->object_name, obj_name); 1903 req->descriptor_count = cpu_to_le32(1); 1904 req->buf_len = cpu_to_le32(data_size); 1905 req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF)); 1906 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma)); 1907 1908 status = be_mcc_notify_wait(adapter); 1909 1910 resp = embedded_payload(wrb); 1911 if (!status) { 1912 *data_read = le32_to_cpu(resp->actual_read_len); 1913 *eof = le32_to_cpu(resp->eof); 1914 } else { 1915 *addn_status = resp->additional_status; 1916 } 1917 1918 err_unlock: 1919 spin_unlock_bh(&adapter->mcc_lock); 1920 return status; 1921 } 1922 1923 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, 1924 u32 flash_type, u32 flash_opcode, u32 buf_size) 1925 { 1926 struct be_mcc_wrb *wrb; 1927 struct be_cmd_write_flashrom *req; 1928 int status; 1929 1930 spin_lock_bh(&adapter->mcc_lock); 1931 adapter->flash_status = 0; 1932 1933 wrb = wrb_from_mccq(adapter); 1934 if (!wrb) { 1935 status = -EBUSY; 1936 goto err_unlock; 1937 } 1938 req = cmd->va; 1939 1940 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1941 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd); 1942 1943 req->params.op_type = cpu_to_le32(flash_type); 1944 req->params.op_code = cpu_to_le32(flash_opcode); 1945 req->params.data_buf_size = cpu_to_le32(buf_size); 1946 1947 be_mcc_notify(adapter); 1948 spin_unlock_bh(&adapter->mcc_lock); 1949 1950 if (!wait_for_completion_timeout(&adapter->flash_compl, 1951 msecs_to_jiffies(40000))) 1952 status = -1; 1953 else 1954 status = adapter->flash_status; 1955 1956 return status; 1957 1958 err_unlock: 1959 spin_unlock_bh(&adapter->mcc_lock); 1960 return status; 1961 } 1962 1963 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, 1964 int offset) 1965 { 1966 struct be_mcc_wrb *wrb; 1967 struct be_cmd_write_flashrom *req; 1968 int status; 1969 1970 spin_lock_bh(&adapter->mcc_lock); 1971 1972 wrb = wrb_from_mccq(adapter); 1973 if (!wrb) { 1974 status = -EBUSY; 1975 goto err; 1976 } 1977 req = embedded_payload(wrb); 1978 1979 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1980 OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4, wrb, NULL); 1981 1982 req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT); 1983 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); 1984 req->params.offset = cpu_to_le32(offset); 1985 req->params.data_buf_size = cpu_to_le32(0x4); 1986 1987 status = be_mcc_notify_wait(adapter); 1988 if (!status) 1989 memcpy(flashed_crc, req->params.data_buf, 4); 1990 1991 err: 1992 spin_unlock_bh(&adapter->mcc_lock); 1993 return status; 1994 } 1995 1996 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, 1997 struct be_dma_mem *nonemb_cmd) 1998 { 1999 struct be_mcc_wrb *wrb; 2000 struct be_cmd_req_acpi_wol_magic_config *req; 2001 int status; 2002 2003 spin_lock_bh(&adapter->mcc_lock); 2004 2005 wrb = wrb_from_mccq(adapter); 2006 if (!wrb) { 2007 status = -EBUSY; 2008 goto err; 2009 } 2010 req = nonemb_cmd->va; 2011 2012 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 2013 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb, 2014 nonemb_cmd); 2015 memcpy(req->magic_mac, mac, ETH_ALEN); 2016 2017 status = be_mcc_notify_wait(adapter); 2018 2019 err: 2020 spin_unlock_bh(&adapter->mcc_lock); 2021 return status; 2022 } 2023 2024 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, 2025 u8 loopback_type, u8 enable) 2026 { 2027 struct be_mcc_wrb *wrb; 2028 struct be_cmd_req_set_lmode *req; 2029 int status; 2030 2031 spin_lock_bh(&adapter->mcc_lock); 2032 2033 wrb = wrb_from_mccq(adapter); 2034 if (!wrb) { 2035 status = -EBUSY; 2036 goto err; 2037 } 2038 2039 req = embedded_payload(wrb); 2040 2041 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2042 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb, 2043 NULL); 2044 2045 req->src_port = port_num; 2046 req->dest_port = port_num; 2047 req->loopback_type = loopback_type; 2048 req->loopback_state = enable; 2049 2050 status = be_mcc_notify_wait(adapter); 2051 err: 2052 spin_unlock_bh(&adapter->mcc_lock); 2053 return status; 2054 } 2055 2056 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, 2057 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern) 2058 { 2059 struct be_mcc_wrb *wrb; 2060 struct be_cmd_req_loopback_test *req; 2061 int status; 2062 2063 spin_lock_bh(&adapter->mcc_lock); 2064 2065 wrb = wrb_from_mccq(adapter); 2066 if (!wrb) { 2067 status = -EBUSY; 2068 goto err; 2069 } 2070 2071 req = embedded_payload(wrb); 2072 2073 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2074 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL); 2075 req->hdr.timeout = cpu_to_le32(4); 2076 2077 req->pattern = cpu_to_le64(pattern); 2078 req->src_port = cpu_to_le32(port_num); 2079 req->dest_port = cpu_to_le32(port_num); 2080 req->pkt_size = cpu_to_le32(pkt_size); 2081 req->num_pkts = cpu_to_le32(num_pkts); 2082 req->loopback_type = cpu_to_le32(loopback_type); 2083 2084 status = be_mcc_notify_wait(adapter); 2085 if (!status) { 2086 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb); 2087 status = le32_to_cpu(resp->status); 2088 } 2089 2090 err: 2091 spin_unlock_bh(&adapter->mcc_lock); 2092 return status; 2093 } 2094 2095 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, 2096 u32 byte_cnt, struct be_dma_mem *cmd) 2097 { 2098 struct be_mcc_wrb *wrb; 2099 struct be_cmd_req_ddrdma_test *req; 2100 int status; 2101 int i, j = 0; 2102 2103 spin_lock_bh(&adapter->mcc_lock); 2104 2105 wrb = wrb_from_mccq(adapter); 2106 if (!wrb) { 2107 status = -EBUSY; 2108 goto err; 2109 } 2110 req = cmd->va; 2111 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2112 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd); 2113 2114 req->pattern = cpu_to_le64(pattern); 2115 req->byte_count = cpu_to_le32(byte_cnt); 2116 for (i = 0; i < byte_cnt; i++) { 2117 req->snd_buff[i] = (u8)(pattern >> (j*8)); 2118 j++; 2119 if (j > 7) 2120 j = 0; 2121 } 2122 2123 status = be_mcc_notify_wait(adapter); 2124 2125 if (!status) { 2126 struct be_cmd_resp_ddrdma_test *resp; 2127 resp = cmd->va; 2128 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) || 2129 resp->snd_err) { 2130 status = -1; 2131 } 2132 } 2133 2134 err: 2135 spin_unlock_bh(&adapter->mcc_lock); 2136 return status; 2137 } 2138 2139 int be_cmd_get_seeprom_data(struct be_adapter *adapter, 2140 struct be_dma_mem *nonemb_cmd) 2141 { 2142 struct be_mcc_wrb *wrb; 2143 struct be_cmd_req_seeprom_read *req; 2144 struct be_sge *sge; 2145 int status; 2146 2147 spin_lock_bh(&adapter->mcc_lock); 2148 2149 wrb = wrb_from_mccq(adapter); 2150 if (!wrb) { 2151 status = -EBUSY; 2152 goto err; 2153 } 2154 req = nonemb_cmd->va; 2155 sge = nonembedded_sgl(wrb); 2156 2157 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2158 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb, 2159 nonemb_cmd); 2160 2161 status = be_mcc_notify_wait(adapter); 2162 2163 err: 2164 spin_unlock_bh(&adapter->mcc_lock); 2165 return status; 2166 } 2167 2168 int be_cmd_get_phy_info(struct be_adapter *adapter) 2169 { 2170 struct be_mcc_wrb *wrb; 2171 struct be_cmd_req_get_phy_info *req; 2172 struct be_dma_mem cmd; 2173 int status; 2174 2175 spin_lock_bh(&adapter->mcc_lock); 2176 2177 wrb = wrb_from_mccq(adapter); 2178 if (!wrb) { 2179 status = -EBUSY; 2180 goto err; 2181 } 2182 cmd.size = sizeof(struct be_cmd_req_get_phy_info); 2183 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, 2184 &cmd.dma); 2185 if (!cmd.va) { 2186 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 2187 status = -ENOMEM; 2188 goto err; 2189 } 2190 2191 req = cmd.va; 2192 2193 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2194 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req), 2195 wrb, &cmd); 2196 2197 status = be_mcc_notify_wait(adapter); 2198 if (!status) { 2199 struct be_phy_info *resp_phy_info = 2200 cmd.va + sizeof(struct be_cmd_req_hdr); 2201 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type); 2202 adapter->phy.interface_type = 2203 le16_to_cpu(resp_phy_info->interface_type); 2204 adapter->phy.auto_speeds_supported = 2205 le16_to_cpu(resp_phy_info->auto_speeds_supported); 2206 adapter->phy.fixed_speeds_supported = 2207 le16_to_cpu(resp_phy_info->fixed_speeds_supported); 2208 adapter->phy.misc_params = 2209 le32_to_cpu(resp_phy_info->misc_params); 2210 } 2211 pci_free_consistent(adapter->pdev, cmd.size, 2212 cmd.va, cmd.dma); 2213 err: 2214 spin_unlock_bh(&adapter->mcc_lock); 2215 return status; 2216 } 2217 2218 int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain) 2219 { 2220 struct be_mcc_wrb *wrb; 2221 struct be_cmd_req_set_qos *req; 2222 int status; 2223 2224 spin_lock_bh(&adapter->mcc_lock); 2225 2226 wrb = wrb_from_mccq(adapter); 2227 if (!wrb) { 2228 status = -EBUSY; 2229 goto err; 2230 } 2231 2232 req = embedded_payload(wrb); 2233 2234 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2235 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL); 2236 2237 req->hdr.domain = domain; 2238 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC); 2239 req->max_bps_nic = cpu_to_le32(bps); 2240 2241 status = be_mcc_notify_wait(adapter); 2242 2243 err: 2244 spin_unlock_bh(&adapter->mcc_lock); 2245 return status; 2246 } 2247 2248 int be_cmd_get_cntl_attributes(struct be_adapter *adapter) 2249 { 2250 struct be_mcc_wrb *wrb; 2251 struct be_cmd_req_cntl_attribs *req; 2252 struct be_cmd_resp_cntl_attribs *resp; 2253 int status; 2254 int payload_len = max(sizeof(*req), sizeof(*resp)); 2255 struct mgmt_controller_attrib *attribs; 2256 struct be_dma_mem attribs_cmd; 2257 2258 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); 2259 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); 2260 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size, 2261 &attribs_cmd.dma); 2262 if (!attribs_cmd.va) { 2263 dev_err(&adapter->pdev->dev, 2264 "Memory allocation failure\n"); 2265 return -ENOMEM; 2266 } 2267 2268 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2269 return -1; 2270 2271 wrb = wrb_from_mbox(adapter); 2272 if (!wrb) { 2273 status = -EBUSY; 2274 goto err; 2275 } 2276 req = attribs_cmd.va; 2277 2278 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2279 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb, 2280 &attribs_cmd); 2281 2282 status = be_mbox_notify_wait(adapter); 2283 if (!status) { 2284 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr); 2285 adapter->hba_port_num = attribs->hba_attribs.phy_port; 2286 } 2287 2288 err: 2289 mutex_unlock(&adapter->mbox_lock); 2290 pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va, 2291 attribs_cmd.dma); 2292 return status; 2293 } 2294 2295 /* Uses mbox */ 2296 int be_cmd_req_native_mode(struct be_adapter *adapter) 2297 { 2298 struct be_mcc_wrb *wrb; 2299 struct be_cmd_req_set_func_cap *req; 2300 int status; 2301 2302 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2303 return -1; 2304 2305 wrb = wrb_from_mbox(adapter); 2306 if (!wrb) { 2307 status = -EBUSY; 2308 goto err; 2309 } 2310 2311 req = embedded_payload(wrb); 2312 2313 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2314 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL); 2315 2316 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS | 2317 CAPABILITY_BE3_NATIVE_ERX_API); 2318 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API); 2319 2320 status = be_mbox_notify_wait(adapter); 2321 if (!status) { 2322 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb); 2323 adapter->be3_native = le32_to_cpu(resp->cap_flags) & 2324 CAPABILITY_BE3_NATIVE_ERX_API; 2325 } 2326 err: 2327 mutex_unlock(&adapter->mbox_lock); 2328 return status; 2329 } 2330 2331 /* Uses synchronous MCCQ */ 2332 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain, 2333 bool *pmac_id_active, u32 *pmac_id, u8 *mac) 2334 { 2335 struct be_mcc_wrb *wrb; 2336 struct be_cmd_req_get_mac_list *req; 2337 int status; 2338 int mac_count; 2339 struct be_dma_mem get_mac_list_cmd; 2340 int i; 2341 2342 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); 2343 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); 2344 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev, 2345 get_mac_list_cmd.size, 2346 &get_mac_list_cmd.dma); 2347 2348 if (!get_mac_list_cmd.va) { 2349 dev_err(&adapter->pdev->dev, 2350 "Memory allocation failure during GET_MAC_LIST\n"); 2351 return -ENOMEM; 2352 } 2353 2354 spin_lock_bh(&adapter->mcc_lock); 2355 2356 wrb = wrb_from_mccq(adapter); 2357 if (!wrb) { 2358 status = -EBUSY; 2359 goto out; 2360 } 2361 2362 req = get_mac_list_cmd.va; 2363 2364 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2365 OPCODE_COMMON_GET_MAC_LIST, sizeof(*req), 2366 wrb, &get_mac_list_cmd); 2367 2368 req->hdr.domain = domain; 2369 req->mac_type = MAC_ADDRESS_TYPE_NETWORK; 2370 req->perm_override = 1; 2371 2372 status = be_mcc_notify_wait(adapter); 2373 if (!status) { 2374 struct be_cmd_resp_get_mac_list *resp = 2375 get_mac_list_cmd.va; 2376 mac_count = resp->true_mac_count + resp->pseudo_mac_count; 2377 /* Mac list returned could contain one or more active mac_ids 2378 * or one or more pseudo permanant mac addresses. If an active 2379 * mac_id is present, return first active mac_id found 2380 */ 2381 for (i = 0; i < mac_count; i++) { 2382 struct get_list_macaddr *mac_entry; 2383 u16 mac_addr_size; 2384 u32 mac_id; 2385 2386 mac_entry = &resp->macaddr_list[i]; 2387 mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size); 2388 /* mac_id is a 32 bit value and mac_addr size 2389 * is 6 bytes 2390 */ 2391 if (mac_addr_size == sizeof(u32)) { 2392 *pmac_id_active = true; 2393 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id; 2394 *pmac_id = le32_to_cpu(mac_id); 2395 goto out; 2396 } 2397 } 2398 /* If no active mac_id found, return first pseudo mac addr */ 2399 *pmac_id_active = false; 2400 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr, 2401 ETH_ALEN); 2402 } 2403 2404 out: 2405 spin_unlock_bh(&adapter->mcc_lock); 2406 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size, 2407 get_mac_list_cmd.va, get_mac_list_cmd.dma); 2408 return status; 2409 } 2410 2411 /* Uses synchronous MCCQ */ 2412 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, 2413 u8 mac_count, u32 domain) 2414 { 2415 struct be_mcc_wrb *wrb; 2416 struct be_cmd_req_set_mac_list *req; 2417 int status; 2418 struct be_dma_mem cmd; 2419 2420 memset(&cmd, 0, sizeof(struct be_dma_mem)); 2421 cmd.size = sizeof(struct be_cmd_req_set_mac_list); 2422 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, 2423 &cmd.dma, GFP_KERNEL); 2424 if (!cmd.va) { 2425 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 2426 return -ENOMEM; 2427 } 2428 2429 spin_lock_bh(&adapter->mcc_lock); 2430 2431 wrb = wrb_from_mccq(adapter); 2432 if (!wrb) { 2433 status = -EBUSY; 2434 goto err; 2435 } 2436 2437 req = cmd.va; 2438 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2439 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req), 2440 wrb, &cmd); 2441 2442 req->hdr.domain = domain; 2443 req->mac_count = mac_count; 2444 if (mac_count) 2445 memcpy(req->mac, mac_array, ETH_ALEN*mac_count); 2446 2447 status = be_mcc_notify_wait(adapter); 2448 2449 err: 2450 dma_free_coherent(&adapter->pdev->dev, cmd.size, 2451 cmd.va, cmd.dma); 2452 spin_unlock_bh(&adapter->mcc_lock); 2453 return status; 2454 } 2455 2456 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, 2457 u32 domain, u16 intf_id) 2458 { 2459 struct be_mcc_wrb *wrb; 2460 struct be_cmd_req_set_hsw_config *req; 2461 void *ctxt; 2462 int status; 2463 2464 spin_lock_bh(&adapter->mcc_lock); 2465 2466 wrb = wrb_from_mccq(adapter); 2467 if (!wrb) { 2468 status = -EBUSY; 2469 goto err; 2470 } 2471 2472 req = embedded_payload(wrb); 2473 ctxt = &req->context; 2474 2475 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2476 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL); 2477 2478 req->hdr.domain = domain; 2479 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id); 2480 if (pvid) { 2481 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1); 2482 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid); 2483 } 2484 2485 be_dws_cpu_to_le(req->context, sizeof(req->context)); 2486 status = be_mcc_notify_wait(adapter); 2487 2488 err: 2489 spin_unlock_bh(&adapter->mcc_lock); 2490 return status; 2491 } 2492 2493 /* Get Hyper switch config */ 2494 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, 2495 u32 domain, u16 intf_id) 2496 { 2497 struct be_mcc_wrb *wrb; 2498 struct be_cmd_req_get_hsw_config *req; 2499 void *ctxt; 2500 int status; 2501 u16 vid; 2502 2503 spin_lock_bh(&adapter->mcc_lock); 2504 2505 wrb = wrb_from_mccq(adapter); 2506 if (!wrb) { 2507 status = -EBUSY; 2508 goto err; 2509 } 2510 2511 req = embedded_payload(wrb); 2512 ctxt = &req->context; 2513 2514 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2515 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL); 2516 2517 req->hdr.domain = domain; 2518 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt, 2519 intf_id); 2520 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1); 2521 be_dws_cpu_to_le(req->context, sizeof(req->context)); 2522 2523 status = be_mcc_notify_wait(adapter); 2524 if (!status) { 2525 struct be_cmd_resp_get_hsw_config *resp = 2526 embedded_payload(wrb); 2527 be_dws_le_to_cpu(&resp->context, 2528 sizeof(resp->context)); 2529 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context, 2530 pvid, &resp->context); 2531 *pvid = le16_to_cpu(vid); 2532 } 2533 2534 err: 2535 spin_unlock_bh(&adapter->mcc_lock); 2536 return status; 2537 } 2538 2539 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) 2540 { 2541 struct be_mcc_wrb *wrb; 2542 struct be_cmd_req_acpi_wol_magic_config_v1 *req; 2543 int status; 2544 int payload_len = sizeof(*req); 2545 struct be_dma_mem cmd; 2546 2547 memset(&cmd, 0, sizeof(struct be_dma_mem)); 2548 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); 2549 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, 2550 &cmd.dma); 2551 if (!cmd.va) { 2552 dev_err(&adapter->pdev->dev, 2553 "Memory allocation failure\n"); 2554 return -ENOMEM; 2555 } 2556 2557 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2558 return -1; 2559 2560 wrb = wrb_from_mbox(adapter); 2561 if (!wrb) { 2562 status = -EBUSY; 2563 goto err; 2564 } 2565 2566 req = cmd.va; 2567 2568 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 2569 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, 2570 payload_len, wrb, &cmd); 2571 2572 req->hdr.version = 1; 2573 req->query_options = BE_GET_WOL_CAP; 2574 2575 status = be_mbox_notify_wait(adapter); 2576 if (!status) { 2577 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp; 2578 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va; 2579 2580 /* the command could succeed misleadingly on old f/w 2581 * which is not aware of the V1 version. fake an error. */ 2582 if (resp->hdr.response_length < payload_len) { 2583 status = -1; 2584 goto err; 2585 } 2586 adapter->wol_cap = resp->wol_settings; 2587 } 2588 err: 2589 mutex_unlock(&adapter->mbox_lock); 2590 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 2591 return status; 2592 } 2593