1 /* 2 * Copyright (C) 2005 - 2011 Emulex 3 * All rights reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License version 2 7 * as published by the Free Software Foundation. The full GNU General 8 * Public License is included in this distribution in the file called COPYING. 9 * 10 * Contact Information: 11 * linux-drivers@emulex.com 12 * 13 * Emulex 14 * 3333 Susan Street 15 * Costa Mesa, CA 92626 16 */ 17 18 #include <linux/module.h> 19 #include "be.h" 20 #include "be_cmds.h" 21 22 /* Must be a power of 2 or else MODULO will BUG_ON */ 23 static int be_get_temp_freq = 64; 24 25 static inline void *embedded_payload(struct be_mcc_wrb *wrb) 26 { 27 return wrb->payload.embedded_payload; 28 } 29 30 static void be_mcc_notify(struct be_adapter *adapter) 31 { 32 struct be_queue_info *mccq = &adapter->mcc_obj.q; 33 u32 val = 0; 34 35 if (be_error(adapter)) 36 return; 37 38 val |= mccq->id & DB_MCCQ_RING_ID_MASK; 39 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; 40 41 wmb(); 42 iowrite32(val, adapter->db + DB_MCCQ_OFFSET); 43 } 44 45 /* To check if valid bit is set, check the entire word as we don't know 46 * the endianness of the data (old entry is host endian while a new entry is 47 * little endian) */ 48 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl) 49 { 50 if (compl->flags != 0) { 51 compl->flags = le32_to_cpu(compl->flags); 52 BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0); 53 return true; 54 } else { 55 return false; 56 } 57 } 58 59 /* Need to reset the entire word that houses the valid bit */ 60 static inline void be_mcc_compl_use(struct be_mcc_compl *compl) 61 { 62 compl->flags = 0; 63 } 64 65 static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1) 66 { 67 unsigned long addr; 68 69 addr = tag1; 70 addr = ((addr << 16) << 16) | tag0; 71 return (void *)addr; 72 } 73 74 static int be_mcc_compl_process(struct be_adapter *adapter, 75 struct be_mcc_compl *compl) 76 { 77 u16 compl_status, extd_status; 78 struct be_cmd_resp_hdr *resp_hdr; 79 u8 opcode = 0, subsystem = 0; 80 81 /* Just swap the status to host endian; mcc tag is opaquely copied 82 * from mcc_wrb */ 83 be_dws_le_to_cpu(compl, 4); 84 85 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & 86 CQE_STATUS_COMPL_MASK; 87 88 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1); 89 90 if (resp_hdr) { 91 opcode = resp_hdr->opcode; 92 subsystem = resp_hdr->subsystem; 93 } 94 95 if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) || 96 (opcode == OPCODE_COMMON_WRITE_OBJECT)) && 97 (subsystem == CMD_SUBSYSTEM_COMMON)) { 98 adapter->flash_status = compl_status; 99 complete(&adapter->flash_compl); 100 } 101 102 if (compl_status == MCC_STATUS_SUCCESS) { 103 if (((opcode == OPCODE_ETH_GET_STATISTICS) || 104 (opcode == OPCODE_ETH_GET_PPORT_STATS)) && 105 (subsystem == CMD_SUBSYSTEM_ETH)) { 106 be_parse_stats(adapter); 107 adapter->stats_cmd_sent = false; 108 } 109 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES && 110 subsystem == CMD_SUBSYSTEM_COMMON) { 111 struct be_cmd_resp_get_cntl_addnl_attribs *resp = 112 (void *)resp_hdr; 113 adapter->drv_stats.be_on_die_temperature = 114 resp->on_die_temperature; 115 } 116 } else { 117 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES) 118 be_get_temp_freq = 0; 119 120 if (compl_status == MCC_STATUS_NOT_SUPPORTED || 121 compl_status == MCC_STATUS_ILLEGAL_REQUEST) 122 goto done; 123 124 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { 125 dev_warn(&adapter->pdev->dev, "This domain(VM) is not " 126 "permitted to execute this cmd (opcode %d)\n", 127 opcode); 128 } else { 129 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & 130 CQE_STATUS_EXTD_MASK; 131 dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:" 132 "status %d, extd-status %d\n", 133 opcode, compl_status, extd_status); 134 } 135 } 136 done: 137 return compl_status; 138 } 139 140 /* Link state evt is a string of bytes; no need for endian swapping */ 141 static void be_async_link_state_process(struct be_adapter *adapter, 142 struct be_async_event_link_state *evt) 143 { 144 /* When link status changes, link speed must be re-queried from FW */ 145 adapter->phy.link_speed = -1; 146 147 /* For the initial link status do not rely on the ASYNC event as 148 * it may not be received in some cases. 149 */ 150 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT) 151 be_link_status_update(adapter, evt->port_link_status); 152 } 153 154 /* Grp5 CoS Priority evt */ 155 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter, 156 struct be_async_event_grp5_cos_priority *evt) 157 { 158 if (evt->valid) { 159 adapter->vlan_prio_bmap = evt->available_priority_bmap; 160 adapter->recommended_prio &= ~VLAN_PRIO_MASK; 161 adapter->recommended_prio = 162 evt->reco_default_priority << VLAN_PRIO_SHIFT; 163 } 164 } 165 166 /* Grp5 QOS Speed evt */ 167 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter, 168 struct be_async_event_grp5_qos_link_speed *evt) 169 { 170 if (evt->physical_port == adapter->port_num) { 171 /* qos_link_speed is in units of 10 Mbps */ 172 adapter->phy.link_speed = evt->qos_link_speed * 10; 173 } 174 } 175 176 /*Grp5 PVID evt*/ 177 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter, 178 struct be_async_event_grp5_pvid_state *evt) 179 { 180 if (evt->enabled) 181 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK; 182 else 183 adapter->pvid = 0; 184 } 185 186 static void be_async_grp5_evt_process(struct be_adapter *adapter, 187 u32 trailer, struct be_mcc_compl *evt) 188 { 189 u8 event_type = 0; 190 191 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) & 192 ASYNC_TRAILER_EVENT_TYPE_MASK; 193 194 switch (event_type) { 195 case ASYNC_EVENT_COS_PRIORITY: 196 be_async_grp5_cos_priority_process(adapter, 197 (struct be_async_event_grp5_cos_priority *)evt); 198 break; 199 case ASYNC_EVENT_QOS_SPEED: 200 be_async_grp5_qos_speed_process(adapter, 201 (struct be_async_event_grp5_qos_link_speed *)evt); 202 break; 203 case ASYNC_EVENT_PVID_STATE: 204 be_async_grp5_pvid_state_process(adapter, 205 (struct be_async_event_grp5_pvid_state *)evt); 206 break; 207 default: 208 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n"); 209 break; 210 } 211 } 212 213 static inline bool is_link_state_evt(u32 trailer) 214 { 215 return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & 216 ASYNC_TRAILER_EVENT_CODE_MASK) == 217 ASYNC_EVENT_CODE_LINK_STATE; 218 } 219 220 static inline bool is_grp5_evt(u32 trailer) 221 { 222 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & 223 ASYNC_TRAILER_EVENT_CODE_MASK) == 224 ASYNC_EVENT_CODE_GRP_5); 225 } 226 227 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter) 228 { 229 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq; 230 struct be_mcc_compl *compl = queue_tail_node(mcc_cq); 231 232 if (be_mcc_compl_is_new(compl)) { 233 queue_tail_inc(mcc_cq); 234 return compl; 235 } 236 return NULL; 237 } 238 239 void be_async_mcc_enable(struct be_adapter *adapter) 240 { 241 spin_lock_bh(&adapter->mcc_cq_lock); 242 243 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0); 244 adapter->mcc_obj.rearm_cq = true; 245 246 spin_unlock_bh(&adapter->mcc_cq_lock); 247 } 248 249 void be_async_mcc_disable(struct be_adapter *adapter) 250 { 251 adapter->mcc_obj.rearm_cq = false; 252 } 253 254 int be_process_mcc(struct be_adapter *adapter) 255 { 256 struct be_mcc_compl *compl; 257 int num = 0, status = 0; 258 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 259 260 spin_lock_bh(&adapter->mcc_cq_lock); 261 while ((compl = be_mcc_compl_get(adapter))) { 262 if (compl->flags & CQE_FLAGS_ASYNC_MASK) { 263 /* Interpret flags as an async trailer */ 264 if (is_link_state_evt(compl->flags)) 265 be_async_link_state_process(adapter, 266 (struct be_async_event_link_state *) compl); 267 else if (is_grp5_evt(compl->flags)) 268 be_async_grp5_evt_process(adapter, 269 compl->flags, compl); 270 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { 271 status = be_mcc_compl_process(adapter, compl); 272 atomic_dec(&mcc_obj->q.used); 273 } 274 be_mcc_compl_use(compl); 275 num++; 276 } 277 278 if (num) 279 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num); 280 281 spin_unlock_bh(&adapter->mcc_cq_lock); 282 return status; 283 } 284 285 /* Wait till no more pending mcc requests are present */ 286 static int be_mcc_wait_compl(struct be_adapter *adapter) 287 { 288 #define mcc_timeout 120000 /* 12s timeout */ 289 int i, status = 0; 290 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 291 292 for (i = 0; i < mcc_timeout; i++) { 293 if (be_error(adapter)) 294 return -EIO; 295 296 status = be_process_mcc(adapter); 297 298 if (atomic_read(&mcc_obj->q.used) == 0) 299 break; 300 udelay(100); 301 } 302 if (i == mcc_timeout) { 303 dev_err(&adapter->pdev->dev, "FW not responding\n"); 304 adapter->fw_timeout = true; 305 return -EIO; 306 } 307 return status; 308 } 309 310 /* Notify MCC requests and wait for completion */ 311 static int be_mcc_notify_wait(struct be_adapter *adapter) 312 { 313 int status; 314 struct be_mcc_wrb *wrb; 315 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 316 u16 index = mcc_obj->q.head; 317 struct be_cmd_resp_hdr *resp; 318 319 index_dec(&index, mcc_obj->q.len); 320 wrb = queue_index_node(&mcc_obj->q, index); 321 322 resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1); 323 324 be_mcc_notify(adapter); 325 326 status = be_mcc_wait_compl(adapter); 327 if (status == -EIO) 328 goto out; 329 330 status = resp->status; 331 out: 332 return status; 333 } 334 335 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) 336 { 337 int msecs = 0; 338 u32 ready; 339 340 do { 341 if (be_error(adapter)) 342 return -EIO; 343 344 ready = ioread32(db); 345 if (ready == 0xffffffff) 346 return -1; 347 348 ready &= MPU_MAILBOX_DB_RDY_MASK; 349 if (ready) 350 break; 351 352 if (msecs > 4000) { 353 dev_err(&adapter->pdev->dev, "FW not responding\n"); 354 adapter->fw_timeout = true; 355 be_detect_dump_ue(adapter); 356 return -1; 357 } 358 359 msleep(1); 360 msecs++; 361 } while (true); 362 363 return 0; 364 } 365 366 /* 367 * Insert the mailbox address into the doorbell in two steps 368 * Polls on the mbox doorbell till a command completion (or a timeout) occurs 369 */ 370 static int be_mbox_notify_wait(struct be_adapter *adapter) 371 { 372 int status; 373 u32 val = 0; 374 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET; 375 struct be_dma_mem *mbox_mem = &adapter->mbox_mem; 376 struct be_mcc_mailbox *mbox = mbox_mem->va; 377 struct be_mcc_compl *compl = &mbox->compl; 378 379 /* wait for ready to be set */ 380 status = be_mbox_db_ready_wait(adapter, db); 381 if (status != 0) 382 return status; 383 384 val |= MPU_MAILBOX_DB_HI_MASK; 385 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */ 386 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; 387 iowrite32(val, db); 388 389 /* wait for ready to be set */ 390 status = be_mbox_db_ready_wait(adapter, db); 391 if (status != 0) 392 return status; 393 394 val = 0; 395 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */ 396 val |= (u32)(mbox_mem->dma >> 4) << 2; 397 iowrite32(val, db); 398 399 status = be_mbox_db_ready_wait(adapter, db); 400 if (status != 0) 401 return status; 402 403 /* A cq entry has been made now */ 404 if (be_mcc_compl_is_new(compl)) { 405 status = be_mcc_compl_process(adapter, &mbox->compl); 406 be_mcc_compl_use(compl); 407 if (status) 408 return status; 409 } else { 410 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n"); 411 return -1; 412 } 413 return 0; 414 } 415 416 static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage) 417 { 418 u32 sem; 419 420 if (lancer_chip(adapter)) 421 sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET); 422 else 423 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET); 424 425 *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK; 426 if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK) 427 return -1; 428 else 429 return 0; 430 } 431 432 int be_cmd_POST(struct be_adapter *adapter) 433 { 434 u16 stage; 435 int status, timeout = 0; 436 struct device *dev = &adapter->pdev->dev; 437 438 do { 439 status = be_POST_stage_get(adapter, &stage); 440 if (status) { 441 dev_err(dev, "POST error; stage=0x%x\n", stage); 442 return -1; 443 } else if (stage != POST_STAGE_ARMFW_RDY) { 444 if (msleep_interruptible(2000)) { 445 dev_err(dev, "Waiting for POST aborted\n"); 446 return -EINTR; 447 } 448 timeout += 2; 449 } else { 450 return 0; 451 } 452 } while (timeout < 60); 453 454 dev_err(dev, "POST timeout; stage=0x%x\n", stage); 455 return -1; 456 } 457 458 459 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb) 460 { 461 return &wrb->payload.sgl[0]; 462 } 463 464 465 /* Don't touch the hdr after it's prepared */ 466 /* mem will be NULL for embedded commands */ 467 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, 468 u8 subsystem, u8 opcode, int cmd_len, 469 struct be_mcc_wrb *wrb, struct be_dma_mem *mem) 470 { 471 struct be_sge *sge; 472 unsigned long addr = (unsigned long)req_hdr; 473 u64 req_addr = addr; 474 475 req_hdr->opcode = opcode; 476 req_hdr->subsystem = subsystem; 477 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); 478 req_hdr->version = 0; 479 480 wrb->tag0 = req_addr & 0xFFFFFFFF; 481 wrb->tag1 = upper_32_bits(req_addr); 482 483 wrb->payload_length = cmd_len; 484 if (mem) { 485 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) << 486 MCC_WRB_SGE_CNT_SHIFT; 487 sge = nonembedded_sgl(wrb); 488 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma)); 489 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF); 490 sge->len = cpu_to_le32(mem->size); 491 } else 492 wrb->embedded |= MCC_WRB_EMBEDDED_MASK; 493 be_dws_cpu_to_le(wrb, 8); 494 } 495 496 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, 497 struct be_dma_mem *mem) 498 { 499 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages); 500 u64 dma = (u64)mem->dma; 501 502 for (i = 0; i < buf_pages; i++) { 503 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF); 504 pages[i].hi = cpu_to_le32(upper_32_bits(dma)); 505 dma += PAGE_SIZE_4K; 506 } 507 } 508 509 /* Converts interrupt delay in microseconds to multiplier value */ 510 static u32 eq_delay_to_mult(u32 usec_delay) 511 { 512 #define MAX_INTR_RATE 651042 513 const u32 round = 10; 514 u32 multiplier; 515 516 if (usec_delay == 0) 517 multiplier = 0; 518 else { 519 u32 interrupt_rate = 1000000 / usec_delay; 520 /* Max delay, corresponding to the lowest interrupt rate */ 521 if (interrupt_rate == 0) 522 multiplier = 1023; 523 else { 524 multiplier = (MAX_INTR_RATE - interrupt_rate) * round; 525 multiplier /= interrupt_rate; 526 /* Round the multiplier to the closest value.*/ 527 multiplier = (multiplier + round/2) / round; 528 multiplier = min(multiplier, (u32)1023); 529 } 530 } 531 return multiplier; 532 } 533 534 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter) 535 { 536 struct be_dma_mem *mbox_mem = &adapter->mbox_mem; 537 struct be_mcc_wrb *wrb 538 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb; 539 memset(wrb, 0, sizeof(*wrb)); 540 return wrb; 541 } 542 543 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter) 544 { 545 struct be_queue_info *mccq = &adapter->mcc_obj.q; 546 struct be_mcc_wrb *wrb; 547 548 if (atomic_read(&mccq->used) >= mccq->len) { 549 dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n"); 550 return NULL; 551 } 552 553 wrb = queue_head_node(mccq); 554 queue_head_inc(mccq); 555 atomic_inc(&mccq->used); 556 memset(wrb, 0, sizeof(*wrb)); 557 return wrb; 558 } 559 560 /* Tell fw we're about to start firing cmds by writing a 561 * special pattern across the wrb hdr; uses mbox 562 */ 563 int be_cmd_fw_init(struct be_adapter *adapter) 564 { 565 u8 *wrb; 566 int status; 567 568 if (mutex_lock_interruptible(&adapter->mbox_lock)) 569 return -1; 570 571 wrb = (u8 *)wrb_from_mbox(adapter); 572 *wrb++ = 0xFF; 573 *wrb++ = 0x12; 574 *wrb++ = 0x34; 575 *wrb++ = 0xFF; 576 *wrb++ = 0xFF; 577 *wrb++ = 0x56; 578 *wrb++ = 0x78; 579 *wrb = 0xFF; 580 581 status = be_mbox_notify_wait(adapter); 582 583 mutex_unlock(&adapter->mbox_lock); 584 return status; 585 } 586 587 /* Tell fw we're done with firing cmds by writing a 588 * special pattern across the wrb hdr; uses mbox 589 */ 590 int be_cmd_fw_clean(struct be_adapter *adapter) 591 { 592 u8 *wrb; 593 int status; 594 595 if (mutex_lock_interruptible(&adapter->mbox_lock)) 596 return -1; 597 598 wrb = (u8 *)wrb_from_mbox(adapter); 599 *wrb++ = 0xFF; 600 *wrb++ = 0xAA; 601 *wrb++ = 0xBB; 602 *wrb++ = 0xFF; 603 *wrb++ = 0xFF; 604 *wrb++ = 0xCC; 605 *wrb++ = 0xDD; 606 *wrb = 0xFF; 607 608 status = be_mbox_notify_wait(adapter); 609 610 mutex_unlock(&adapter->mbox_lock); 611 return status; 612 } 613 int be_cmd_eq_create(struct be_adapter *adapter, 614 struct be_queue_info *eq, int eq_delay) 615 { 616 struct be_mcc_wrb *wrb; 617 struct be_cmd_req_eq_create *req; 618 struct be_dma_mem *q_mem = &eq->dma_mem; 619 int status; 620 621 if (mutex_lock_interruptible(&adapter->mbox_lock)) 622 return -1; 623 624 wrb = wrb_from_mbox(adapter); 625 req = embedded_payload(wrb); 626 627 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 628 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL); 629 630 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 631 632 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1); 633 /* 4byte eqe*/ 634 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0); 635 AMAP_SET_BITS(struct amap_eq_context, count, req->context, 636 __ilog2_u32(eq->len/256)); 637 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context, 638 eq_delay_to_mult(eq_delay)); 639 be_dws_cpu_to_le(req->context, sizeof(req->context)); 640 641 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 642 643 status = be_mbox_notify_wait(adapter); 644 if (!status) { 645 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb); 646 eq->id = le16_to_cpu(resp->eq_id); 647 eq->created = true; 648 } 649 650 mutex_unlock(&adapter->mbox_lock); 651 return status; 652 } 653 654 /* Use MCC */ 655 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, 656 u8 type, bool permanent, u32 if_handle, u32 pmac_id) 657 { 658 struct be_mcc_wrb *wrb; 659 struct be_cmd_req_mac_query *req; 660 int status; 661 662 spin_lock_bh(&adapter->mcc_lock); 663 664 wrb = wrb_from_mccq(adapter); 665 if (!wrb) { 666 status = -EBUSY; 667 goto err; 668 } 669 req = embedded_payload(wrb); 670 671 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 672 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL); 673 req->type = type; 674 if (permanent) { 675 req->permanent = 1; 676 } else { 677 req->if_id = cpu_to_le16((u16) if_handle); 678 req->pmac_id = cpu_to_le32(pmac_id); 679 req->permanent = 0; 680 } 681 682 status = be_mcc_notify_wait(adapter); 683 if (!status) { 684 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb); 685 memcpy(mac_addr, resp->mac.addr, ETH_ALEN); 686 } 687 688 err: 689 spin_unlock_bh(&adapter->mcc_lock); 690 return status; 691 } 692 693 /* Uses synchronous MCCQ */ 694 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, 695 u32 if_id, u32 *pmac_id, u32 domain) 696 { 697 struct be_mcc_wrb *wrb; 698 struct be_cmd_req_pmac_add *req; 699 int status; 700 701 spin_lock_bh(&adapter->mcc_lock); 702 703 wrb = wrb_from_mccq(adapter); 704 if (!wrb) { 705 status = -EBUSY; 706 goto err; 707 } 708 req = embedded_payload(wrb); 709 710 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 711 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL); 712 713 req->hdr.domain = domain; 714 req->if_id = cpu_to_le32(if_id); 715 memcpy(req->mac_address, mac_addr, ETH_ALEN); 716 717 status = be_mcc_notify_wait(adapter); 718 if (!status) { 719 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb); 720 *pmac_id = le32_to_cpu(resp->pmac_id); 721 } 722 723 err: 724 spin_unlock_bh(&adapter->mcc_lock); 725 726 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST) 727 status = -EPERM; 728 729 return status; 730 } 731 732 /* Uses synchronous MCCQ */ 733 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom) 734 { 735 struct be_mcc_wrb *wrb; 736 struct be_cmd_req_pmac_del *req; 737 int status; 738 739 if (pmac_id == -1) 740 return 0; 741 742 spin_lock_bh(&adapter->mcc_lock); 743 744 wrb = wrb_from_mccq(adapter); 745 if (!wrb) { 746 status = -EBUSY; 747 goto err; 748 } 749 req = embedded_payload(wrb); 750 751 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 752 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL); 753 754 req->hdr.domain = dom; 755 req->if_id = cpu_to_le32(if_id); 756 req->pmac_id = cpu_to_le32(pmac_id); 757 758 status = be_mcc_notify_wait(adapter); 759 760 err: 761 spin_unlock_bh(&adapter->mcc_lock); 762 return status; 763 } 764 765 /* Uses Mbox */ 766 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, 767 struct be_queue_info *eq, bool no_delay, int coalesce_wm) 768 { 769 struct be_mcc_wrb *wrb; 770 struct be_cmd_req_cq_create *req; 771 struct be_dma_mem *q_mem = &cq->dma_mem; 772 void *ctxt; 773 int status; 774 775 if (mutex_lock_interruptible(&adapter->mbox_lock)) 776 return -1; 777 778 wrb = wrb_from_mbox(adapter); 779 req = embedded_payload(wrb); 780 ctxt = &req->context; 781 782 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 783 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL); 784 785 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 786 if (lancer_chip(adapter)) { 787 req->hdr.version = 2; 788 req->page_size = 1; /* 1 for 4K */ 789 AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt, 790 no_delay); 791 AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt, 792 __ilog2_u32(cq->len/256)); 793 AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1); 794 AMAP_SET_BITS(struct amap_cq_context_lancer, eventable, 795 ctxt, 1); 796 AMAP_SET_BITS(struct amap_cq_context_lancer, eqid, 797 ctxt, eq->id); 798 } else { 799 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt, 800 coalesce_wm); 801 AMAP_SET_BITS(struct amap_cq_context_be, nodelay, 802 ctxt, no_delay); 803 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt, 804 __ilog2_u32(cq->len/256)); 805 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1); 806 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1); 807 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id); 808 } 809 810 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 811 812 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 813 814 status = be_mbox_notify_wait(adapter); 815 if (!status) { 816 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb); 817 cq->id = le16_to_cpu(resp->cq_id); 818 cq->created = true; 819 } 820 821 mutex_unlock(&adapter->mbox_lock); 822 823 return status; 824 } 825 826 static u32 be_encoded_q_len(int q_len) 827 { 828 u32 len_encoded = fls(q_len); /* log2(len) + 1 */ 829 if (len_encoded == 16) 830 len_encoded = 0; 831 return len_encoded; 832 } 833 834 int be_cmd_mccq_ext_create(struct be_adapter *adapter, 835 struct be_queue_info *mccq, 836 struct be_queue_info *cq) 837 { 838 struct be_mcc_wrb *wrb; 839 struct be_cmd_req_mcc_ext_create *req; 840 struct be_dma_mem *q_mem = &mccq->dma_mem; 841 void *ctxt; 842 int status; 843 844 if (mutex_lock_interruptible(&adapter->mbox_lock)) 845 return -1; 846 847 wrb = wrb_from_mbox(adapter); 848 req = embedded_payload(wrb); 849 ctxt = &req->context; 850 851 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 852 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL); 853 854 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 855 if (lancer_chip(adapter)) { 856 req->hdr.version = 1; 857 req->cq_id = cpu_to_le16(cq->id); 858 859 AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt, 860 be_encoded_q_len(mccq->len)); 861 AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1); 862 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id, 863 ctxt, cq->id); 864 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid, 865 ctxt, 1); 866 867 } else { 868 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); 869 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, 870 be_encoded_q_len(mccq->len)); 871 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); 872 } 873 874 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */ 875 req->async_event_bitmap[0] = cpu_to_le32(0x00000022); 876 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 877 878 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 879 880 status = be_mbox_notify_wait(adapter); 881 if (!status) { 882 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); 883 mccq->id = le16_to_cpu(resp->id); 884 mccq->created = true; 885 } 886 mutex_unlock(&adapter->mbox_lock); 887 888 return status; 889 } 890 891 int be_cmd_mccq_org_create(struct be_adapter *adapter, 892 struct be_queue_info *mccq, 893 struct be_queue_info *cq) 894 { 895 struct be_mcc_wrb *wrb; 896 struct be_cmd_req_mcc_create *req; 897 struct be_dma_mem *q_mem = &mccq->dma_mem; 898 void *ctxt; 899 int status; 900 901 if (mutex_lock_interruptible(&adapter->mbox_lock)) 902 return -1; 903 904 wrb = wrb_from_mbox(adapter); 905 req = embedded_payload(wrb); 906 ctxt = &req->context; 907 908 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 909 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL); 910 911 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 912 913 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); 914 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, 915 be_encoded_q_len(mccq->len)); 916 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); 917 918 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 919 920 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 921 922 status = be_mbox_notify_wait(adapter); 923 if (!status) { 924 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); 925 mccq->id = le16_to_cpu(resp->id); 926 mccq->created = true; 927 } 928 929 mutex_unlock(&adapter->mbox_lock); 930 return status; 931 } 932 933 int be_cmd_mccq_create(struct be_adapter *adapter, 934 struct be_queue_info *mccq, 935 struct be_queue_info *cq) 936 { 937 int status; 938 939 status = be_cmd_mccq_ext_create(adapter, mccq, cq); 940 if (status && !lancer_chip(adapter)) { 941 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 " 942 "or newer to avoid conflicting priorities between NIC " 943 "and FCoE traffic"); 944 status = be_cmd_mccq_org_create(adapter, mccq, cq); 945 } 946 return status; 947 } 948 949 int be_cmd_txq_create(struct be_adapter *adapter, 950 struct be_queue_info *txq, 951 struct be_queue_info *cq) 952 { 953 struct be_mcc_wrb *wrb; 954 struct be_cmd_req_eth_tx_create *req; 955 struct be_dma_mem *q_mem = &txq->dma_mem; 956 void *ctxt; 957 int status; 958 959 spin_lock_bh(&adapter->mcc_lock); 960 961 wrb = wrb_from_mccq(adapter); 962 if (!wrb) { 963 status = -EBUSY; 964 goto err; 965 } 966 967 req = embedded_payload(wrb); 968 ctxt = &req->context; 969 970 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 971 OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL); 972 973 if (lancer_chip(adapter)) { 974 req->hdr.version = 1; 975 AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt, 976 adapter->if_handle); 977 } 978 979 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); 980 req->ulp_num = BE_ULP1_NUM; 981 req->type = BE_ETH_TX_RING_TYPE_STANDARD; 982 983 AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt, 984 be_encoded_q_len(txq->len)); 985 AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1); 986 AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id); 987 988 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 989 990 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 991 992 status = be_mcc_notify_wait(adapter); 993 if (!status) { 994 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb); 995 txq->id = le16_to_cpu(resp->cid); 996 txq->created = true; 997 } 998 999 err: 1000 spin_unlock_bh(&adapter->mcc_lock); 1001 1002 return status; 1003 } 1004 1005 /* Uses MCC */ 1006 int be_cmd_rxq_create(struct be_adapter *adapter, 1007 struct be_queue_info *rxq, u16 cq_id, u16 frag_size, 1008 u32 if_id, u32 rss, u8 *rss_id) 1009 { 1010 struct be_mcc_wrb *wrb; 1011 struct be_cmd_req_eth_rx_create *req; 1012 struct be_dma_mem *q_mem = &rxq->dma_mem; 1013 int status; 1014 1015 spin_lock_bh(&adapter->mcc_lock); 1016 1017 wrb = wrb_from_mccq(adapter); 1018 if (!wrb) { 1019 status = -EBUSY; 1020 goto err; 1021 } 1022 req = embedded_payload(wrb); 1023 1024 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1025 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL); 1026 1027 req->cq_id = cpu_to_le16(cq_id); 1028 req->frag_size = fls(frag_size) - 1; 1029 req->num_pages = 2; 1030 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1031 req->interface_id = cpu_to_le32(if_id); 1032 req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE); 1033 req->rss_queue = cpu_to_le32(rss); 1034 1035 status = be_mcc_notify_wait(adapter); 1036 if (!status) { 1037 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb); 1038 rxq->id = le16_to_cpu(resp->id); 1039 rxq->created = true; 1040 *rss_id = resp->rss_id; 1041 } 1042 1043 err: 1044 spin_unlock_bh(&adapter->mcc_lock); 1045 return status; 1046 } 1047 1048 /* Generic destroyer function for all types of queues 1049 * Uses Mbox 1050 */ 1051 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, 1052 int queue_type) 1053 { 1054 struct be_mcc_wrb *wrb; 1055 struct be_cmd_req_q_destroy *req; 1056 u8 subsys = 0, opcode = 0; 1057 int status; 1058 1059 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1060 return -1; 1061 1062 wrb = wrb_from_mbox(adapter); 1063 req = embedded_payload(wrb); 1064 1065 switch (queue_type) { 1066 case QTYPE_EQ: 1067 subsys = CMD_SUBSYSTEM_COMMON; 1068 opcode = OPCODE_COMMON_EQ_DESTROY; 1069 break; 1070 case QTYPE_CQ: 1071 subsys = CMD_SUBSYSTEM_COMMON; 1072 opcode = OPCODE_COMMON_CQ_DESTROY; 1073 break; 1074 case QTYPE_TXQ: 1075 subsys = CMD_SUBSYSTEM_ETH; 1076 opcode = OPCODE_ETH_TX_DESTROY; 1077 break; 1078 case QTYPE_RXQ: 1079 subsys = CMD_SUBSYSTEM_ETH; 1080 opcode = OPCODE_ETH_RX_DESTROY; 1081 break; 1082 case QTYPE_MCCQ: 1083 subsys = CMD_SUBSYSTEM_COMMON; 1084 opcode = OPCODE_COMMON_MCC_DESTROY; 1085 break; 1086 default: 1087 BUG(); 1088 } 1089 1090 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb, 1091 NULL); 1092 req->id = cpu_to_le16(q->id); 1093 1094 status = be_mbox_notify_wait(adapter); 1095 if (!status) 1096 q->created = false; 1097 1098 mutex_unlock(&adapter->mbox_lock); 1099 return status; 1100 } 1101 1102 /* Uses MCC */ 1103 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q) 1104 { 1105 struct be_mcc_wrb *wrb; 1106 struct be_cmd_req_q_destroy *req; 1107 int status; 1108 1109 spin_lock_bh(&adapter->mcc_lock); 1110 1111 wrb = wrb_from_mccq(adapter); 1112 if (!wrb) { 1113 status = -EBUSY; 1114 goto err; 1115 } 1116 req = embedded_payload(wrb); 1117 1118 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1119 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL); 1120 req->id = cpu_to_le16(q->id); 1121 1122 status = be_mcc_notify_wait(adapter); 1123 if (!status) 1124 q->created = false; 1125 1126 err: 1127 spin_unlock_bh(&adapter->mcc_lock); 1128 return status; 1129 } 1130 1131 /* Create an rx filtering policy configuration on an i/f 1132 * Uses MCCQ 1133 */ 1134 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, 1135 u8 *mac, u32 *if_handle, u32 *pmac_id, u32 domain) 1136 { 1137 struct be_mcc_wrb *wrb; 1138 struct be_cmd_req_if_create *req; 1139 int status; 1140 1141 spin_lock_bh(&adapter->mcc_lock); 1142 1143 wrb = wrb_from_mccq(adapter); 1144 if (!wrb) { 1145 status = -EBUSY; 1146 goto err; 1147 } 1148 req = embedded_payload(wrb); 1149 1150 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1151 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), wrb, NULL); 1152 req->hdr.domain = domain; 1153 req->capability_flags = cpu_to_le32(cap_flags); 1154 req->enable_flags = cpu_to_le32(en_flags); 1155 if (mac) 1156 memcpy(req->mac_addr, mac, ETH_ALEN); 1157 else 1158 req->pmac_invalid = true; 1159 1160 status = be_mcc_notify_wait(adapter); 1161 if (!status) { 1162 struct be_cmd_resp_if_create *resp = embedded_payload(wrb); 1163 *if_handle = le32_to_cpu(resp->interface_id); 1164 if (mac) 1165 *pmac_id = le32_to_cpu(resp->pmac_id); 1166 } 1167 1168 err: 1169 spin_unlock_bh(&adapter->mcc_lock); 1170 return status; 1171 } 1172 1173 /* Uses MCCQ */ 1174 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain) 1175 { 1176 struct be_mcc_wrb *wrb; 1177 struct be_cmd_req_if_destroy *req; 1178 int status; 1179 1180 if (interface_id == -1) 1181 return 0; 1182 1183 spin_lock_bh(&adapter->mcc_lock); 1184 1185 wrb = wrb_from_mccq(adapter); 1186 if (!wrb) { 1187 status = -EBUSY; 1188 goto err; 1189 } 1190 req = embedded_payload(wrb); 1191 1192 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1193 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL); 1194 req->hdr.domain = domain; 1195 req->interface_id = cpu_to_le32(interface_id); 1196 1197 status = be_mcc_notify_wait(adapter); 1198 err: 1199 spin_unlock_bh(&adapter->mcc_lock); 1200 return status; 1201 } 1202 1203 /* Get stats is a non embedded command: the request is not embedded inside 1204 * WRB but is a separate dma memory block 1205 * Uses asynchronous MCC 1206 */ 1207 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) 1208 { 1209 struct be_mcc_wrb *wrb; 1210 struct be_cmd_req_hdr *hdr; 1211 int status = 0; 1212 1213 if (MODULO(adapter->work_counter, be_get_temp_freq) == 0) 1214 be_cmd_get_die_temperature(adapter); 1215 1216 spin_lock_bh(&adapter->mcc_lock); 1217 1218 wrb = wrb_from_mccq(adapter); 1219 if (!wrb) { 1220 status = -EBUSY; 1221 goto err; 1222 } 1223 hdr = nonemb_cmd->va; 1224 1225 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH, 1226 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd); 1227 1228 if (adapter->generation == BE_GEN3) 1229 hdr->version = 1; 1230 1231 be_mcc_notify(adapter); 1232 adapter->stats_cmd_sent = true; 1233 1234 err: 1235 spin_unlock_bh(&adapter->mcc_lock); 1236 return status; 1237 } 1238 1239 /* Lancer Stats */ 1240 int lancer_cmd_get_pport_stats(struct be_adapter *adapter, 1241 struct be_dma_mem *nonemb_cmd) 1242 { 1243 1244 struct be_mcc_wrb *wrb; 1245 struct lancer_cmd_req_pport_stats *req; 1246 int status = 0; 1247 1248 spin_lock_bh(&adapter->mcc_lock); 1249 1250 wrb = wrb_from_mccq(adapter); 1251 if (!wrb) { 1252 status = -EBUSY; 1253 goto err; 1254 } 1255 req = nonemb_cmd->va; 1256 1257 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1258 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb, 1259 nonemb_cmd); 1260 1261 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num); 1262 req->cmd_params.params.reset_stats = 0; 1263 1264 be_mcc_notify(adapter); 1265 adapter->stats_cmd_sent = true; 1266 1267 err: 1268 spin_unlock_bh(&adapter->mcc_lock); 1269 return status; 1270 } 1271 1272 /* Uses synchronous mcc */ 1273 int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed, 1274 u16 *link_speed, u8 *link_status, u32 dom) 1275 { 1276 struct be_mcc_wrb *wrb; 1277 struct be_cmd_req_link_status *req; 1278 int status; 1279 1280 spin_lock_bh(&adapter->mcc_lock); 1281 1282 if (link_status) 1283 *link_status = LINK_DOWN; 1284 1285 wrb = wrb_from_mccq(adapter); 1286 if (!wrb) { 1287 status = -EBUSY; 1288 goto err; 1289 } 1290 req = embedded_payload(wrb); 1291 1292 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1293 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL); 1294 1295 if (adapter->generation == BE_GEN3 || lancer_chip(adapter)) 1296 req->hdr.version = 1; 1297 1298 req->hdr.domain = dom; 1299 1300 status = be_mcc_notify_wait(adapter); 1301 if (!status) { 1302 struct be_cmd_resp_link_status *resp = embedded_payload(wrb); 1303 if (resp->mac_speed != PHY_LINK_SPEED_ZERO) { 1304 if (link_speed) 1305 *link_speed = le16_to_cpu(resp->link_speed); 1306 if (mac_speed) 1307 *mac_speed = resp->mac_speed; 1308 } 1309 if (link_status) 1310 *link_status = resp->logical_link_status; 1311 } 1312 1313 err: 1314 spin_unlock_bh(&adapter->mcc_lock); 1315 return status; 1316 } 1317 1318 /* Uses synchronous mcc */ 1319 int be_cmd_get_die_temperature(struct be_adapter *adapter) 1320 { 1321 struct be_mcc_wrb *wrb; 1322 struct be_cmd_req_get_cntl_addnl_attribs *req; 1323 int status; 1324 1325 spin_lock_bh(&adapter->mcc_lock); 1326 1327 wrb = wrb_from_mccq(adapter); 1328 if (!wrb) { 1329 status = -EBUSY; 1330 goto err; 1331 } 1332 req = embedded_payload(wrb); 1333 1334 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1335 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req), 1336 wrb, NULL); 1337 1338 be_mcc_notify(adapter); 1339 1340 err: 1341 spin_unlock_bh(&adapter->mcc_lock); 1342 return status; 1343 } 1344 1345 /* Uses synchronous mcc */ 1346 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size) 1347 { 1348 struct be_mcc_wrb *wrb; 1349 struct be_cmd_req_get_fat *req; 1350 int status; 1351 1352 spin_lock_bh(&adapter->mcc_lock); 1353 1354 wrb = wrb_from_mccq(adapter); 1355 if (!wrb) { 1356 status = -EBUSY; 1357 goto err; 1358 } 1359 req = embedded_payload(wrb); 1360 1361 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1362 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL); 1363 req->fat_operation = cpu_to_le32(QUERY_FAT); 1364 status = be_mcc_notify_wait(adapter); 1365 if (!status) { 1366 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb); 1367 if (log_size && resp->log_size) 1368 *log_size = le32_to_cpu(resp->log_size) - 1369 sizeof(u32); 1370 } 1371 err: 1372 spin_unlock_bh(&adapter->mcc_lock); 1373 return status; 1374 } 1375 1376 void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) 1377 { 1378 struct be_dma_mem get_fat_cmd; 1379 struct be_mcc_wrb *wrb; 1380 struct be_cmd_req_get_fat *req; 1381 u32 offset = 0, total_size, buf_size, 1382 log_offset = sizeof(u32), payload_len; 1383 int status; 1384 1385 if (buf_len == 0) 1386 return; 1387 1388 total_size = buf_len; 1389 1390 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; 1391 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev, 1392 get_fat_cmd.size, 1393 &get_fat_cmd.dma); 1394 if (!get_fat_cmd.va) { 1395 status = -ENOMEM; 1396 dev_err(&adapter->pdev->dev, 1397 "Memory allocation failure while retrieving FAT data\n"); 1398 return; 1399 } 1400 1401 spin_lock_bh(&adapter->mcc_lock); 1402 1403 while (total_size) { 1404 buf_size = min(total_size, (u32)60*1024); 1405 total_size -= buf_size; 1406 1407 wrb = wrb_from_mccq(adapter); 1408 if (!wrb) { 1409 status = -EBUSY; 1410 goto err; 1411 } 1412 req = get_fat_cmd.va; 1413 1414 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size; 1415 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1416 OPCODE_COMMON_MANAGE_FAT, payload_len, wrb, 1417 &get_fat_cmd); 1418 1419 req->fat_operation = cpu_to_le32(RETRIEVE_FAT); 1420 req->read_log_offset = cpu_to_le32(log_offset); 1421 req->read_log_length = cpu_to_le32(buf_size); 1422 req->data_buffer_size = cpu_to_le32(buf_size); 1423 1424 status = be_mcc_notify_wait(adapter); 1425 if (!status) { 1426 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va; 1427 memcpy(buf + offset, 1428 resp->data_buffer, 1429 le32_to_cpu(resp->read_log_length)); 1430 } else { 1431 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n"); 1432 goto err; 1433 } 1434 offset += buf_size; 1435 log_offset += buf_size; 1436 } 1437 err: 1438 pci_free_consistent(adapter->pdev, get_fat_cmd.size, 1439 get_fat_cmd.va, 1440 get_fat_cmd.dma); 1441 spin_unlock_bh(&adapter->mcc_lock); 1442 } 1443 1444 /* Uses synchronous mcc */ 1445 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver, 1446 char *fw_on_flash) 1447 { 1448 struct be_mcc_wrb *wrb; 1449 struct be_cmd_req_get_fw_version *req; 1450 int status; 1451 1452 spin_lock_bh(&adapter->mcc_lock); 1453 1454 wrb = wrb_from_mccq(adapter); 1455 if (!wrb) { 1456 status = -EBUSY; 1457 goto err; 1458 } 1459 1460 req = embedded_payload(wrb); 1461 1462 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1463 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL); 1464 status = be_mcc_notify_wait(adapter); 1465 if (!status) { 1466 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb); 1467 strcpy(fw_ver, resp->firmware_version_string); 1468 if (fw_on_flash) 1469 strcpy(fw_on_flash, resp->fw_on_flash_version_string); 1470 } 1471 err: 1472 spin_unlock_bh(&adapter->mcc_lock); 1473 return status; 1474 } 1475 1476 /* set the EQ delay interval of an EQ to specified value 1477 * Uses async mcc 1478 */ 1479 int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd) 1480 { 1481 struct be_mcc_wrb *wrb; 1482 struct be_cmd_req_modify_eq_delay *req; 1483 int status = 0; 1484 1485 spin_lock_bh(&adapter->mcc_lock); 1486 1487 wrb = wrb_from_mccq(adapter); 1488 if (!wrb) { 1489 status = -EBUSY; 1490 goto err; 1491 } 1492 req = embedded_payload(wrb); 1493 1494 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1495 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL); 1496 1497 req->num_eq = cpu_to_le32(1); 1498 req->delay[0].eq_id = cpu_to_le32(eq_id); 1499 req->delay[0].phase = 0; 1500 req->delay[0].delay_multiplier = cpu_to_le32(eqd); 1501 1502 be_mcc_notify(adapter); 1503 1504 err: 1505 spin_unlock_bh(&adapter->mcc_lock); 1506 return status; 1507 } 1508 1509 /* Uses sycnhronous mcc */ 1510 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, 1511 u32 num, bool untagged, bool promiscuous) 1512 { 1513 struct be_mcc_wrb *wrb; 1514 struct be_cmd_req_vlan_config *req; 1515 int status; 1516 1517 spin_lock_bh(&adapter->mcc_lock); 1518 1519 wrb = wrb_from_mccq(adapter); 1520 if (!wrb) { 1521 status = -EBUSY; 1522 goto err; 1523 } 1524 req = embedded_payload(wrb); 1525 1526 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1527 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL); 1528 1529 req->interface_id = if_id; 1530 req->promiscuous = promiscuous; 1531 req->untagged = untagged; 1532 req->num_vlan = num; 1533 if (!promiscuous) { 1534 memcpy(req->normal_vlan, vtag_array, 1535 req->num_vlan * sizeof(vtag_array[0])); 1536 } 1537 1538 status = be_mcc_notify_wait(adapter); 1539 1540 err: 1541 spin_unlock_bh(&adapter->mcc_lock); 1542 return status; 1543 } 1544 1545 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) 1546 { 1547 struct be_mcc_wrb *wrb; 1548 struct be_dma_mem *mem = &adapter->rx_filter; 1549 struct be_cmd_req_rx_filter *req = mem->va; 1550 int status; 1551 1552 spin_lock_bh(&adapter->mcc_lock); 1553 1554 wrb = wrb_from_mccq(adapter); 1555 if (!wrb) { 1556 status = -EBUSY; 1557 goto err; 1558 } 1559 memset(req, 0, sizeof(*req)); 1560 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1561 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req), 1562 wrb, mem); 1563 1564 req->if_id = cpu_to_le32(adapter->if_handle); 1565 if (flags & IFF_PROMISC) { 1566 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | 1567 BE_IF_FLAGS_VLAN_PROMISCUOUS); 1568 if (value == ON) 1569 req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | 1570 BE_IF_FLAGS_VLAN_PROMISCUOUS); 1571 } else if (flags & IFF_ALLMULTI) { 1572 req->if_flags_mask = req->if_flags = 1573 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); 1574 } else { 1575 struct netdev_hw_addr *ha; 1576 int i = 0; 1577 1578 req->if_flags_mask = req->if_flags = 1579 cpu_to_le32(BE_IF_FLAGS_MULTICAST); 1580 1581 /* Reset mcast promisc mode if already set by setting mask 1582 * and not setting flags field 1583 */ 1584 req->if_flags_mask |= 1585 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); 1586 1587 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev)); 1588 netdev_for_each_mc_addr(ha, adapter->netdev) 1589 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN); 1590 } 1591 1592 status = be_mcc_notify_wait(adapter); 1593 err: 1594 spin_unlock_bh(&adapter->mcc_lock); 1595 return status; 1596 } 1597 1598 /* Uses synchrounous mcc */ 1599 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc) 1600 { 1601 struct be_mcc_wrb *wrb; 1602 struct be_cmd_req_set_flow_control *req; 1603 int status; 1604 1605 spin_lock_bh(&adapter->mcc_lock); 1606 1607 wrb = wrb_from_mccq(adapter); 1608 if (!wrb) { 1609 status = -EBUSY; 1610 goto err; 1611 } 1612 req = embedded_payload(wrb); 1613 1614 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1615 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL); 1616 1617 req->tx_flow_control = cpu_to_le16((u16)tx_fc); 1618 req->rx_flow_control = cpu_to_le16((u16)rx_fc); 1619 1620 status = be_mcc_notify_wait(adapter); 1621 1622 err: 1623 spin_unlock_bh(&adapter->mcc_lock); 1624 return status; 1625 } 1626 1627 /* Uses sycn mcc */ 1628 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc) 1629 { 1630 struct be_mcc_wrb *wrb; 1631 struct be_cmd_req_get_flow_control *req; 1632 int status; 1633 1634 spin_lock_bh(&adapter->mcc_lock); 1635 1636 wrb = wrb_from_mccq(adapter); 1637 if (!wrb) { 1638 status = -EBUSY; 1639 goto err; 1640 } 1641 req = embedded_payload(wrb); 1642 1643 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1644 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL); 1645 1646 status = be_mcc_notify_wait(adapter); 1647 if (!status) { 1648 struct be_cmd_resp_get_flow_control *resp = 1649 embedded_payload(wrb); 1650 *tx_fc = le16_to_cpu(resp->tx_flow_control); 1651 *rx_fc = le16_to_cpu(resp->rx_flow_control); 1652 } 1653 1654 err: 1655 spin_unlock_bh(&adapter->mcc_lock); 1656 return status; 1657 } 1658 1659 /* Uses mbox */ 1660 int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, 1661 u32 *mode, u32 *caps) 1662 { 1663 struct be_mcc_wrb *wrb; 1664 struct be_cmd_req_query_fw_cfg *req; 1665 int status; 1666 1667 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1668 return -1; 1669 1670 wrb = wrb_from_mbox(adapter); 1671 req = embedded_payload(wrb); 1672 1673 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1674 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL); 1675 1676 status = be_mbox_notify_wait(adapter); 1677 if (!status) { 1678 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); 1679 *port_num = le32_to_cpu(resp->phys_port); 1680 *mode = le32_to_cpu(resp->function_mode); 1681 *caps = le32_to_cpu(resp->function_caps); 1682 } 1683 1684 mutex_unlock(&adapter->mbox_lock); 1685 return status; 1686 } 1687 1688 /* Uses mbox */ 1689 int be_cmd_reset_function(struct be_adapter *adapter) 1690 { 1691 struct be_mcc_wrb *wrb; 1692 struct be_cmd_req_hdr *req; 1693 int status; 1694 1695 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1696 return -1; 1697 1698 wrb = wrb_from_mbox(adapter); 1699 req = embedded_payload(wrb); 1700 1701 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON, 1702 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL); 1703 1704 status = be_mbox_notify_wait(adapter); 1705 1706 mutex_unlock(&adapter->mbox_lock); 1707 return status; 1708 } 1709 1710 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size) 1711 { 1712 struct be_mcc_wrb *wrb; 1713 struct be_cmd_req_rss_config *req; 1714 u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e, 1715 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2, 1716 0x3ea83c02, 0x4a110304}; 1717 int status; 1718 1719 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1720 return -1; 1721 1722 wrb = wrb_from_mbox(adapter); 1723 req = embedded_payload(wrb); 1724 1725 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1726 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL); 1727 1728 req->if_id = cpu_to_le32(adapter->if_handle); 1729 req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 | 1730 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6); 1731 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1); 1732 memcpy(req->cpu_table, rsstable, table_size); 1733 memcpy(req->hash, myhash, sizeof(myhash)); 1734 be_dws_cpu_to_le(req->hash, sizeof(req->hash)); 1735 1736 status = be_mbox_notify_wait(adapter); 1737 1738 mutex_unlock(&adapter->mbox_lock); 1739 return status; 1740 } 1741 1742 /* Uses sync mcc */ 1743 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, 1744 u8 bcn, u8 sts, u8 state) 1745 { 1746 struct be_mcc_wrb *wrb; 1747 struct be_cmd_req_enable_disable_beacon *req; 1748 int status; 1749 1750 spin_lock_bh(&adapter->mcc_lock); 1751 1752 wrb = wrb_from_mccq(adapter); 1753 if (!wrb) { 1754 status = -EBUSY; 1755 goto err; 1756 } 1757 req = embedded_payload(wrb); 1758 1759 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1760 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL); 1761 1762 req->port_num = port_num; 1763 req->beacon_state = state; 1764 req->beacon_duration = bcn; 1765 req->status_duration = sts; 1766 1767 status = be_mcc_notify_wait(adapter); 1768 1769 err: 1770 spin_unlock_bh(&adapter->mcc_lock); 1771 return status; 1772 } 1773 1774 /* Uses sync mcc */ 1775 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state) 1776 { 1777 struct be_mcc_wrb *wrb; 1778 struct be_cmd_req_get_beacon_state *req; 1779 int status; 1780 1781 spin_lock_bh(&adapter->mcc_lock); 1782 1783 wrb = wrb_from_mccq(adapter); 1784 if (!wrb) { 1785 status = -EBUSY; 1786 goto err; 1787 } 1788 req = embedded_payload(wrb); 1789 1790 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1791 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL); 1792 1793 req->port_num = port_num; 1794 1795 status = be_mcc_notify_wait(adapter); 1796 if (!status) { 1797 struct be_cmd_resp_get_beacon_state *resp = 1798 embedded_payload(wrb); 1799 *state = resp->beacon_state; 1800 } 1801 1802 err: 1803 spin_unlock_bh(&adapter->mcc_lock); 1804 return status; 1805 } 1806 1807 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, 1808 u32 data_size, u32 data_offset, const char *obj_name, 1809 u32 *data_written, u8 *addn_status) 1810 { 1811 struct be_mcc_wrb *wrb; 1812 struct lancer_cmd_req_write_object *req; 1813 struct lancer_cmd_resp_write_object *resp; 1814 void *ctxt = NULL; 1815 int status; 1816 1817 spin_lock_bh(&adapter->mcc_lock); 1818 adapter->flash_status = 0; 1819 1820 wrb = wrb_from_mccq(adapter); 1821 if (!wrb) { 1822 status = -EBUSY; 1823 goto err_unlock; 1824 } 1825 1826 req = embedded_payload(wrb); 1827 1828 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1829 OPCODE_COMMON_WRITE_OBJECT, 1830 sizeof(struct lancer_cmd_req_write_object), wrb, 1831 NULL); 1832 1833 ctxt = &req->context; 1834 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 1835 write_length, ctxt, data_size); 1836 1837 if (data_size == 0) 1838 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 1839 eof, ctxt, 1); 1840 else 1841 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 1842 eof, ctxt, 0); 1843 1844 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 1845 req->write_offset = cpu_to_le32(data_offset); 1846 strcpy(req->object_name, obj_name); 1847 req->descriptor_count = cpu_to_le32(1); 1848 req->buf_len = cpu_to_le32(data_size); 1849 req->addr_low = cpu_to_le32((cmd->dma + 1850 sizeof(struct lancer_cmd_req_write_object)) 1851 & 0xFFFFFFFF); 1852 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma + 1853 sizeof(struct lancer_cmd_req_write_object))); 1854 1855 be_mcc_notify(adapter); 1856 spin_unlock_bh(&adapter->mcc_lock); 1857 1858 if (!wait_for_completion_timeout(&adapter->flash_compl, 1859 msecs_to_jiffies(30000))) 1860 status = -1; 1861 else 1862 status = adapter->flash_status; 1863 1864 resp = embedded_payload(wrb); 1865 if (!status) 1866 *data_written = le32_to_cpu(resp->actual_write_len); 1867 else 1868 *addn_status = resp->additional_status; 1869 1870 return status; 1871 1872 err_unlock: 1873 spin_unlock_bh(&adapter->mcc_lock); 1874 return status; 1875 } 1876 1877 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, 1878 u32 data_size, u32 data_offset, const char *obj_name, 1879 u32 *data_read, u32 *eof, u8 *addn_status) 1880 { 1881 struct be_mcc_wrb *wrb; 1882 struct lancer_cmd_req_read_object *req; 1883 struct lancer_cmd_resp_read_object *resp; 1884 int status; 1885 1886 spin_lock_bh(&adapter->mcc_lock); 1887 1888 wrb = wrb_from_mccq(adapter); 1889 if (!wrb) { 1890 status = -EBUSY; 1891 goto err_unlock; 1892 } 1893 1894 req = embedded_payload(wrb); 1895 1896 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1897 OPCODE_COMMON_READ_OBJECT, 1898 sizeof(struct lancer_cmd_req_read_object), wrb, 1899 NULL); 1900 1901 req->desired_read_len = cpu_to_le32(data_size); 1902 req->read_offset = cpu_to_le32(data_offset); 1903 strcpy(req->object_name, obj_name); 1904 req->descriptor_count = cpu_to_le32(1); 1905 req->buf_len = cpu_to_le32(data_size); 1906 req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF)); 1907 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma)); 1908 1909 status = be_mcc_notify_wait(adapter); 1910 1911 resp = embedded_payload(wrb); 1912 if (!status) { 1913 *data_read = le32_to_cpu(resp->actual_read_len); 1914 *eof = le32_to_cpu(resp->eof); 1915 } else { 1916 *addn_status = resp->additional_status; 1917 } 1918 1919 err_unlock: 1920 spin_unlock_bh(&adapter->mcc_lock); 1921 return status; 1922 } 1923 1924 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, 1925 u32 flash_type, u32 flash_opcode, u32 buf_size) 1926 { 1927 struct be_mcc_wrb *wrb; 1928 struct be_cmd_write_flashrom *req; 1929 int status; 1930 1931 spin_lock_bh(&adapter->mcc_lock); 1932 adapter->flash_status = 0; 1933 1934 wrb = wrb_from_mccq(adapter); 1935 if (!wrb) { 1936 status = -EBUSY; 1937 goto err_unlock; 1938 } 1939 req = cmd->va; 1940 1941 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1942 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd); 1943 1944 req->params.op_type = cpu_to_le32(flash_type); 1945 req->params.op_code = cpu_to_le32(flash_opcode); 1946 req->params.data_buf_size = cpu_to_le32(buf_size); 1947 1948 be_mcc_notify(adapter); 1949 spin_unlock_bh(&adapter->mcc_lock); 1950 1951 if (!wait_for_completion_timeout(&adapter->flash_compl, 1952 msecs_to_jiffies(40000))) 1953 status = -1; 1954 else 1955 status = adapter->flash_status; 1956 1957 return status; 1958 1959 err_unlock: 1960 spin_unlock_bh(&adapter->mcc_lock); 1961 return status; 1962 } 1963 1964 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, 1965 int offset) 1966 { 1967 struct be_mcc_wrb *wrb; 1968 struct be_cmd_write_flashrom *req; 1969 int status; 1970 1971 spin_lock_bh(&adapter->mcc_lock); 1972 1973 wrb = wrb_from_mccq(adapter); 1974 if (!wrb) { 1975 status = -EBUSY; 1976 goto err; 1977 } 1978 req = embedded_payload(wrb); 1979 1980 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1981 OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4, wrb, NULL); 1982 1983 req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT); 1984 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); 1985 req->params.offset = cpu_to_le32(offset); 1986 req->params.data_buf_size = cpu_to_le32(0x4); 1987 1988 status = be_mcc_notify_wait(adapter); 1989 if (!status) 1990 memcpy(flashed_crc, req->params.data_buf, 4); 1991 1992 err: 1993 spin_unlock_bh(&adapter->mcc_lock); 1994 return status; 1995 } 1996 1997 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, 1998 struct be_dma_mem *nonemb_cmd) 1999 { 2000 struct be_mcc_wrb *wrb; 2001 struct be_cmd_req_acpi_wol_magic_config *req; 2002 int status; 2003 2004 spin_lock_bh(&adapter->mcc_lock); 2005 2006 wrb = wrb_from_mccq(adapter); 2007 if (!wrb) { 2008 status = -EBUSY; 2009 goto err; 2010 } 2011 req = nonemb_cmd->va; 2012 2013 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 2014 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb, 2015 nonemb_cmd); 2016 memcpy(req->magic_mac, mac, ETH_ALEN); 2017 2018 status = be_mcc_notify_wait(adapter); 2019 2020 err: 2021 spin_unlock_bh(&adapter->mcc_lock); 2022 return status; 2023 } 2024 2025 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, 2026 u8 loopback_type, u8 enable) 2027 { 2028 struct be_mcc_wrb *wrb; 2029 struct be_cmd_req_set_lmode *req; 2030 int status; 2031 2032 spin_lock_bh(&adapter->mcc_lock); 2033 2034 wrb = wrb_from_mccq(adapter); 2035 if (!wrb) { 2036 status = -EBUSY; 2037 goto err; 2038 } 2039 2040 req = embedded_payload(wrb); 2041 2042 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2043 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb, 2044 NULL); 2045 2046 req->src_port = port_num; 2047 req->dest_port = port_num; 2048 req->loopback_type = loopback_type; 2049 req->loopback_state = enable; 2050 2051 status = be_mcc_notify_wait(adapter); 2052 err: 2053 spin_unlock_bh(&adapter->mcc_lock); 2054 return status; 2055 } 2056 2057 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, 2058 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern) 2059 { 2060 struct be_mcc_wrb *wrb; 2061 struct be_cmd_req_loopback_test *req; 2062 int status; 2063 2064 spin_lock_bh(&adapter->mcc_lock); 2065 2066 wrb = wrb_from_mccq(adapter); 2067 if (!wrb) { 2068 status = -EBUSY; 2069 goto err; 2070 } 2071 2072 req = embedded_payload(wrb); 2073 2074 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2075 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL); 2076 req->hdr.timeout = cpu_to_le32(4); 2077 2078 req->pattern = cpu_to_le64(pattern); 2079 req->src_port = cpu_to_le32(port_num); 2080 req->dest_port = cpu_to_le32(port_num); 2081 req->pkt_size = cpu_to_le32(pkt_size); 2082 req->num_pkts = cpu_to_le32(num_pkts); 2083 req->loopback_type = cpu_to_le32(loopback_type); 2084 2085 status = be_mcc_notify_wait(adapter); 2086 if (!status) { 2087 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb); 2088 status = le32_to_cpu(resp->status); 2089 } 2090 2091 err: 2092 spin_unlock_bh(&adapter->mcc_lock); 2093 return status; 2094 } 2095 2096 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, 2097 u32 byte_cnt, struct be_dma_mem *cmd) 2098 { 2099 struct be_mcc_wrb *wrb; 2100 struct be_cmd_req_ddrdma_test *req; 2101 int status; 2102 int i, j = 0; 2103 2104 spin_lock_bh(&adapter->mcc_lock); 2105 2106 wrb = wrb_from_mccq(adapter); 2107 if (!wrb) { 2108 status = -EBUSY; 2109 goto err; 2110 } 2111 req = cmd->va; 2112 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2113 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd); 2114 2115 req->pattern = cpu_to_le64(pattern); 2116 req->byte_count = cpu_to_le32(byte_cnt); 2117 for (i = 0; i < byte_cnt; i++) { 2118 req->snd_buff[i] = (u8)(pattern >> (j*8)); 2119 j++; 2120 if (j > 7) 2121 j = 0; 2122 } 2123 2124 status = be_mcc_notify_wait(adapter); 2125 2126 if (!status) { 2127 struct be_cmd_resp_ddrdma_test *resp; 2128 resp = cmd->va; 2129 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) || 2130 resp->snd_err) { 2131 status = -1; 2132 } 2133 } 2134 2135 err: 2136 spin_unlock_bh(&adapter->mcc_lock); 2137 return status; 2138 } 2139 2140 int be_cmd_get_seeprom_data(struct be_adapter *adapter, 2141 struct be_dma_mem *nonemb_cmd) 2142 { 2143 struct be_mcc_wrb *wrb; 2144 struct be_cmd_req_seeprom_read *req; 2145 struct be_sge *sge; 2146 int status; 2147 2148 spin_lock_bh(&adapter->mcc_lock); 2149 2150 wrb = wrb_from_mccq(adapter); 2151 if (!wrb) { 2152 status = -EBUSY; 2153 goto err; 2154 } 2155 req = nonemb_cmd->va; 2156 sge = nonembedded_sgl(wrb); 2157 2158 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2159 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb, 2160 nonemb_cmd); 2161 2162 status = be_mcc_notify_wait(adapter); 2163 2164 err: 2165 spin_unlock_bh(&adapter->mcc_lock); 2166 return status; 2167 } 2168 2169 int be_cmd_get_phy_info(struct be_adapter *adapter) 2170 { 2171 struct be_mcc_wrb *wrb; 2172 struct be_cmd_req_get_phy_info *req; 2173 struct be_dma_mem cmd; 2174 int status; 2175 2176 spin_lock_bh(&adapter->mcc_lock); 2177 2178 wrb = wrb_from_mccq(adapter); 2179 if (!wrb) { 2180 status = -EBUSY; 2181 goto err; 2182 } 2183 cmd.size = sizeof(struct be_cmd_req_get_phy_info); 2184 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, 2185 &cmd.dma); 2186 if (!cmd.va) { 2187 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 2188 status = -ENOMEM; 2189 goto err; 2190 } 2191 2192 req = cmd.va; 2193 2194 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2195 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req), 2196 wrb, &cmd); 2197 2198 status = be_mcc_notify_wait(adapter); 2199 if (!status) { 2200 struct be_phy_info *resp_phy_info = 2201 cmd.va + sizeof(struct be_cmd_req_hdr); 2202 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type); 2203 adapter->phy.interface_type = 2204 le16_to_cpu(resp_phy_info->interface_type); 2205 adapter->phy.auto_speeds_supported = 2206 le16_to_cpu(resp_phy_info->auto_speeds_supported); 2207 adapter->phy.fixed_speeds_supported = 2208 le16_to_cpu(resp_phy_info->fixed_speeds_supported); 2209 adapter->phy.misc_params = 2210 le32_to_cpu(resp_phy_info->misc_params); 2211 } 2212 pci_free_consistent(adapter->pdev, cmd.size, 2213 cmd.va, cmd.dma); 2214 err: 2215 spin_unlock_bh(&adapter->mcc_lock); 2216 return status; 2217 } 2218 2219 int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain) 2220 { 2221 struct be_mcc_wrb *wrb; 2222 struct be_cmd_req_set_qos *req; 2223 int status; 2224 2225 spin_lock_bh(&adapter->mcc_lock); 2226 2227 wrb = wrb_from_mccq(adapter); 2228 if (!wrb) { 2229 status = -EBUSY; 2230 goto err; 2231 } 2232 2233 req = embedded_payload(wrb); 2234 2235 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2236 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL); 2237 2238 req->hdr.domain = domain; 2239 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC); 2240 req->max_bps_nic = cpu_to_le32(bps); 2241 2242 status = be_mcc_notify_wait(adapter); 2243 2244 err: 2245 spin_unlock_bh(&adapter->mcc_lock); 2246 return status; 2247 } 2248 2249 int be_cmd_get_cntl_attributes(struct be_adapter *adapter) 2250 { 2251 struct be_mcc_wrb *wrb; 2252 struct be_cmd_req_cntl_attribs *req; 2253 struct be_cmd_resp_cntl_attribs *resp; 2254 int status; 2255 int payload_len = max(sizeof(*req), sizeof(*resp)); 2256 struct mgmt_controller_attrib *attribs; 2257 struct be_dma_mem attribs_cmd; 2258 2259 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); 2260 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); 2261 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size, 2262 &attribs_cmd.dma); 2263 if (!attribs_cmd.va) { 2264 dev_err(&adapter->pdev->dev, 2265 "Memory allocation failure\n"); 2266 return -ENOMEM; 2267 } 2268 2269 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2270 return -1; 2271 2272 wrb = wrb_from_mbox(adapter); 2273 if (!wrb) { 2274 status = -EBUSY; 2275 goto err; 2276 } 2277 req = attribs_cmd.va; 2278 2279 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2280 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb, 2281 &attribs_cmd); 2282 2283 status = be_mbox_notify_wait(adapter); 2284 if (!status) { 2285 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr); 2286 adapter->hba_port_num = attribs->hba_attribs.phy_port; 2287 } 2288 2289 err: 2290 mutex_unlock(&adapter->mbox_lock); 2291 pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va, 2292 attribs_cmd.dma); 2293 return status; 2294 } 2295 2296 /* Uses mbox */ 2297 int be_cmd_req_native_mode(struct be_adapter *adapter) 2298 { 2299 struct be_mcc_wrb *wrb; 2300 struct be_cmd_req_set_func_cap *req; 2301 int status; 2302 2303 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2304 return -1; 2305 2306 wrb = wrb_from_mbox(adapter); 2307 if (!wrb) { 2308 status = -EBUSY; 2309 goto err; 2310 } 2311 2312 req = embedded_payload(wrb); 2313 2314 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2315 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL); 2316 2317 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS | 2318 CAPABILITY_BE3_NATIVE_ERX_API); 2319 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API); 2320 2321 status = be_mbox_notify_wait(adapter); 2322 if (!status) { 2323 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb); 2324 adapter->be3_native = le32_to_cpu(resp->cap_flags) & 2325 CAPABILITY_BE3_NATIVE_ERX_API; 2326 } 2327 err: 2328 mutex_unlock(&adapter->mbox_lock); 2329 return status; 2330 } 2331 2332 /* Uses synchronous MCCQ */ 2333 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain, 2334 bool *pmac_id_active, u32 *pmac_id, u8 *mac) 2335 { 2336 struct be_mcc_wrb *wrb; 2337 struct be_cmd_req_get_mac_list *req; 2338 int status; 2339 int mac_count; 2340 struct be_dma_mem get_mac_list_cmd; 2341 int i; 2342 2343 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); 2344 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); 2345 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev, 2346 get_mac_list_cmd.size, 2347 &get_mac_list_cmd.dma); 2348 2349 if (!get_mac_list_cmd.va) { 2350 dev_err(&adapter->pdev->dev, 2351 "Memory allocation failure during GET_MAC_LIST\n"); 2352 return -ENOMEM; 2353 } 2354 2355 spin_lock_bh(&adapter->mcc_lock); 2356 2357 wrb = wrb_from_mccq(adapter); 2358 if (!wrb) { 2359 status = -EBUSY; 2360 goto out; 2361 } 2362 2363 req = get_mac_list_cmd.va; 2364 2365 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2366 OPCODE_COMMON_GET_MAC_LIST, sizeof(*req), 2367 wrb, &get_mac_list_cmd); 2368 2369 req->hdr.domain = domain; 2370 req->mac_type = MAC_ADDRESS_TYPE_NETWORK; 2371 req->perm_override = 1; 2372 2373 status = be_mcc_notify_wait(adapter); 2374 if (!status) { 2375 struct be_cmd_resp_get_mac_list *resp = 2376 get_mac_list_cmd.va; 2377 mac_count = resp->true_mac_count + resp->pseudo_mac_count; 2378 /* Mac list returned could contain one or more active mac_ids 2379 * or one or more pseudo permanant mac addresses. If an active 2380 * mac_id is present, return first active mac_id found 2381 */ 2382 for (i = 0; i < mac_count; i++) { 2383 struct get_list_macaddr *mac_entry; 2384 u16 mac_addr_size; 2385 u32 mac_id; 2386 2387 mac_entry = &resp->macaddr_list[i]; 2388 mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size); 2389 /* mac_id is a 32 bit value and mac_addr size 2390 * is 6 bytes 2391 */ 2392 if (mac_addr_size == sizeof(u32)) { 2393 *pmac_id_active = true; 2394 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id; 2395 *pmac_id = le32_to_cpu(mac_id); 2396 goto out; 2397 } 2398 } 2399 /* If no active mac_id found, return first pseudo mac addr */ 2400 *pmac_id_active = false; 2401 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr, 2402 ETH_ALEN); 2403 } 2404 2405 out: 2406 spin_unlock_bh(&adapter->mcc_lock); 2407 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size, 2408 get_mac_list_cmd.va, get_mac_list_cmd.dma); 2409 return status; 2410 } 2411 2412 /* Uses synchronous MCCQ */ 2413 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, 2414 u8 mac_count, u32 domain) 2415 { 2416 struct be_mcc_wrb *wrb; 2417 struct be_cmd_req_set_mac_list *req; 2418 int status; 2419 struct be_dma_mem cmd; 2420 2421 memset(&cmd, 0, sizeof(struct be_dma_mem)); 2422 cmd.size = sizeof(struct be_cmd_req_set_mac_list); 2423 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, 2424 &cmd.dma, GFP_KERNEL); 2425 if (!cmd.va) { 2426 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 2427 return -ENOMEM; 2428 } 2429 2430 spin_lock_bh(&adapter->mcc_lock); 2431 2432 wrb = wrb_from_mccq(adapter); 2433 if (!wrb) { 2434 status = -EBUSY; 2435 goto err; 2436 } 2437 2438 req = cmd.va; 2439 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2440 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req), 2441 wrb, &cmd); 2442 2443 req->hdr.domain = domain; 2444 req->mac_count = mac_count; 2445 if (mac_count) 2446 memcpy(req->mac, mac_array, ETH_ALEN*mac_count); 2447 2448 status = be_mcc_notify_wait(adapter); 2449 2450 err: 2451 dma_free_coherent(&adapter->pdev->dev, cmd.size, 2452 cmd.va, cmd.dma); 2453 spin_unlock_bh(&adapter->mcc_lock); 2454 return status; 2455 } 2456 2457 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, 2458 u32 domain, u16 intf_id) 2459 { 2460 struct be_mcc_wrb *wrb; 2461 struct be_cmd_req_set_hsw_config *req; 2462 void *ctxt; 2463 int status; 2464 2465 spin_lock_bh(&adapter->mcc_lock); 2466 2467 wrb = wrb_from_mccq(adapter); 2468 if (!wrb) { 2469 status = -EBUSY; 2470 goto err; 2471 } 2472 2473 req = embedded_payload(wrb); 2474 ctxt = &req->context; 2475 2476 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2477 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL); 2478 2479 req->hdr.domain = domain; 2480 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id); 2481 if (pvid) { 2482 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1); 2483 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid); 2484 } 2485 2486 be_dws_cpu_to_le(req->context, sizeof(req->context)); 2487 status = be_mcc_notify_wait(adapter); 2488 2489 err: 2490 spin_unlock_bh(&adapter->mcc_lock); 2491 return status; 2492 } 2493 2494 /* Get Hyper switch config */ 2495 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, 2496 u32 domain, u16 intf_id) 2497 { 2498 struct be_mcc_wrb *wrb; 2499 struct be_cmd_req_get_hsw_config *req; 2500 void *ctxt; 2501 int status; 2502 u16 vid; 2503 2504 spin_lock_bh(&adapter->mcc_lock); 2505 2506 wrb = wrb_from_mccq(adapter); 2507 if (!wrb) { 2508 status = -EBUSY; 2509 goto err; 2510 } 2511 2512 req = embedded_payload(wrb); 2513 ctxt = &req->context; 2514 2515 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2516 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL); 2517 2518 req->hdr.domain = domain; 2519 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt, 2520 intf_id); 2521 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1); 2522 be_dws_cpu_to_le(req->context, sizeof(req->context)); 2523 2524 status = be_mcc_notify_wait(adapter); 2525 if (!status) { 2526 struct be_cmd_resp_get_hsw_config *resp = 2527 embedded_payload(wrb); 2528 be_dws_le_to_cpu(&resp->context, 2529 sizeof(resp->context)); 2530 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context, 2531 pvid, &resp->context); 2532 *pvid = le16_to_cpu(vid); 2533 } 2534 2535 err: 2536 spin_unlock_bh(&adapter->mcc_lock); 2537 return status; 2538 } 2539 2540 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) 2541 { 2542 struct be_mcc_wrb *wrb; 2543 struct be_cmd_req_acpi_wol_magic_config_v1 *req; 2544 int status; 2545 int payload_len = sizeof(*req); 2546 struct be_dma_mem cmd; 2547 2548 memset(&cmd, 0, sizeof(struct be_dma_mem)); 2549 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); 2550 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, 2551 &cmd.dma); 2552 if (!cmd.va) { 2553 dev_err(&adapter->pdev->dev, 2554 "Memory allocation failure\n"); 2555 return -ENOMEM; 2556 } 2557 2558 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2559 return -1; 2560 2561 wrb = wrb_from_mbox(adapter); 2562 if (!wrb) { 2563 status = -EBUSY; 2564 goto err; 2565 } 2566 2567 req = cmd.va; 2568 2569 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 2570 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, 2571 payload_len, wrb, &cmd); 2572 2573 req->hdr.version = 1; 2574 req->query_options = BE_GET_WOL_CAP; 2575 2576 status = be_mbox_notify_wait(adapter); 2577 if (!status) { 2578 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp; 2579 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va; 2580 2581 /* the command could succeed misleadingly on old f/w 2582 * which is not aware of the V1 version. fake an error. */ 2583 if (resp->hdr.response_length < payload_len) { 2584 status = -1; 2585 goto err; 2586 } 2587 adapter->wol_cap = resp->wol_settings; 2588 } 2589 err: 2590 mutex_unlock(&adapter->mbox_lock); 2591 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 2592 return status; 2593 2594 } 2595 int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter, 2596 struct be_dma_mem *cmd) 2597 { 2598 struct be_mcc_wrb *wrb; 2599 struct be_cmd_req_get_ext_fat_caps *req; 2600 int status; 2601 2602 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2603 return -1; 2604 2605 wrb = wrb_from_mbox(adapter); 2606 if (!wrb) { 2607 status = -EBUSY; 2608 goto err; 2609 } 2610 2611 req = cmd->va; 2612 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2613 OPCODE_COMMON_GET_EXT_FAT_CAPABILITES, 2614 cmd->size, wrb, cmd); 2615 req->parameter_type = cpu_to_le32(1); 2616 2617 status = be_mbox_notify_wait(adapter); 2618 err: 2619 mutex_unlock(&adapter->mbox_lock); 2620 return status; 2621 } 2622 2623 int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter, 2624 struct be_dma_mem *cmd, 2625 struct be_fat_conf_params *configs) 2626 { 2627 struct be_mcc_wrb *wrb; 2628 struct be_cmd_req_set_ext_fat_caps *req; 2629 int status; 2630 2631 spin_lock_bh(&adapter->mcc_lock); 2632 2633 wrb = wrb_from_mccq(adapter); 2634 if (!wrb) { 2635 status = -EBUSY; 2636 goto err; 2637 } 2638 2639 req = cmd->va; 2640 memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params)); 2641 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2642 OPCODE_COMMON_SET_EXT_FAT_CAPABILITES, 2643 cmd->size, wrb, cmd); 2644 2645 status = be_mcc_notify_wait(adapter); 2646 err: 2647 spin_unlock_bh(&adapter->mcc_lock); 2648 return status; 2649 } 2650 2651 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, 2652 int wrb_payload_size, u16 *cmd_status, u16 *ext_status) 2653 { 2654 struct be_adapter *adapter = netdev_priv(netdev_handle); 2655 struct be_mcc_wrb *wrb; 2656 struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) wrb_payload; 2657 struct be_cmd_req_hdr *req; 2658 struct be_cmd_resp_hdr *resp; 2659 int status; 2660 2661 spin_lock_bh(&adapter->mcc_lock); 2662 2663 wrb = wrb_from_mccq(adapter); 2664 if (!wrb) { 2665 status = -EBUSY; 2666 goto err; 2667 } 2668 req = embedded_payload(wrb); 2669 resp = embedded_payload(wrb); 2670 2671 be_wrb_cmd_hdr_prepare(req, hdr->subsystem, 2672 hdr->opcode, wrb_payload_size, wrb, NULL); 2673 memcpy(req, wrb_payload, wrb_payload_size); 2674 be_dws_cpu_to_le(req, wrb_payload_size); 2675 2676 status = be_mcc_notify_wait(adapter); 2677 if (cmd_status) 2678 *cmd_status = (status & 0xffff); 2679 if (ext_status) 2680 *ext_status = 0; 2681 memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length); 2682 be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length); 2683 err: 2684 spin_unlock_bh(&adapter->mcc_lock); 2685 return status; 2686 } 2687 EXPORT_SYMBOL(be_roce_mcc_cmd); 2688