1 /* 2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet 3 * driver for Linux. 4 * 5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/ethtool.h> 37 #include <linux/pci.h> 38 39 #include "t4vf_common.h" 40 #include "t4vf_defs.h" 41 42 #include "../cxgb4/t4_regs.h" 43 #include "../cxgb4/t4_values.h" 44 #include "../cxgb4/t4fw_api.h" 45 46 /* 47 * Wait for the device to become ready (signified by our "who am I" register 48 * returning a value other than all 1's). Return an error if it doesn't 49 * become ready ... 50 */ 51 int t4vf_wait_dev_ready(struct adapter *adapter) 52 { 53 const u32 whoami = T4VF_PL_BASE_ADDR + PL_VF_WHOAMI; 54 const u32 notready1 = 0xffffffff; 55 const u32 notready2 = 0xeeeeeeee; 56 u32 val; 57 58 val = t4_read_reg(adapter, whoami); 59 if (val != notready1 && val != notready2) 60 return 0; 61 msleep(500); 62 val = t4_read_reg(adapter, whoami); 63 if (val != notready1 && val != notready2) 64 return 0; 65 else 66 return -EIO; 67 } 68 69 /* 70 * Get the reply to a mailbox command and store it in @rpl in big-endian order 71 * (since the firmware data structures are specified in a big-endian layout). 72 */ 73 static void get_mbox_rpl(struct adapter *adapter, __be64 *rpl, int size, 74 u32 mbox_data) 75 { 76 for ( ; size; size -= 8, mbox_data += 8) 77 *rpl++ = cpu_to_be64(t4_read_reg64(adapter, mbox_data)); 78 } 79 80 /** 81 * t4vf_record_mbox - record a Firmware Mailbox Command/Reply in the log 82 * @adapter: the adapter 83 * @cmd: the Firmware Mailbox Command or Reply 84 * @size: command length in bytes 85 * @access: the time (ms) needed to access the Firmware Mailbox 86 * @execute: the time (ms) the command spent being executed 87 */ 88 static void t4vf_record_mbox(struct adapter *adapter, const __be64 *cmd, 89 int size, int access, int execute) 90 { 91 struct mbox_cmd_log *log = adapter->mbox_log; 92 struct mbox_cmd *entry; 93 int i; 94 95 entry = mbox_cmd_log_entry(log, log->cursor++); 96 if (log->cursor == log->size) 97 log->cursor = 0; 98 99 for (i = 0; i < size / 8; i++) 100 entry->cmd[i] = be64_to_cpu(cmd[i]); 101 while (i < MBOX_LEN / 8) 102 entry->cmd[i++] = 0; 103 entry->timestamp = jiffies; 104 entry->seqno = log->seqno++; 105 entry->access = access; 106 entry->execute = execute; 107 } 108 109 /** 110 * t4vf_wr_mbox_core - send a command to FW through the mailbox 111 * @adapter: the adapter 112 * @cmd: the command to write 113 * @size: command length in bytes 114 * @rpl: where to optionally store the reply 115 * @sleep_ok: if true we may sleep while awaiting command completion 116 * 117 * Sends the given command to FW through the mailbox and waits for the 118 * FW to execute the command. If @rpl is not %NULL it is used to store 119 * the FW's reply to the command. The command and its optional reply 120 * are of the same length. FW can take up to 500 ms to respond. 121 * @sleep_ok determines whether we may sleep while awaiting the response. 122 * If sleeping is allowed we use progressive backoff otherwise we spin. 123 * 124 * The return value is 0 on success or a negative errno on failure. A 125 * failure can happen either because we are not able to execute the 126 * command or FW executes it but signals an error. In the latter case 127 * the return value is the error code indicated by FW (negated). 128 */ 129 int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, 130 void *rpl, bool sleep_ok) 131 { 132 static const int delay[] = { 133 1, 1, 3, 5, 10, 10, 20, 50, 100 134 }; 135 136 u16 access = 0, execute = 0; 137 u32 v, mbox_data; 138 int i, ms, delay_idx, ret; 139 const __be64 *p; 140 u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL; 141 u32 cmd_op = FW_CMD_OP_G(be32_to_cpu(((struct fw_cmd_hdr *)cmd)->hi)); 142 __be64 cmd_rpl[MBOX_LEN / 8]; 143 struct mbox_list entry; 144 145 /* In T6, mailbox size is changed to 128 bytes to avoid 146 * invalidating the entire prefetch buffer. 147 */ 148 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) 149 mbox_data = T4VF_MBDATA_BASE_ADDR; 150 else 151 mbox_data = T6VF_MBDATA_BASE_ADDR; 152 153 /* 154 * Commands must be multiples of 16 bytes in length and may not be 155 * larger than the size of the Mailbox Data register array. 156 */ 157 if ((size % 16) != 0 || 158 size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4) 159 return -EINVAL; 160 161 /* Queue ourselves onto the mailbox access list. When our entry is at 162 * the front of the list, we have rights to access the mailbox. So we 163 * wait [for a while] till we're at the front [or bail out with an 164 * EBUSY] ... 165 */ 166 spin_lock(&adapter->mbox_lock); 167 list_add_tail(&entry.list, &adapter->mlist.list); 168 spin_unlock(&adapter->mbox_lock); 169 170 delay_idx = 0; 171 ms = delay[0]; 172 173 for (i = 0; ; i += ms) { 174 /* If we've waited too long, return a busy indication. This 175 * really ought to be based on our initial position in the 176 * mailbox access list but this is a start. We very rearely 177 * contend on access to the mailbox ... 178 */ 179 if (i > FW_CMD_MAX_TIMEOUT) { 180 spin_lock(&adapter->mbox_lock); 181 list_del(&entry.list); 182 spin_unlock(&adapter->mbox_lock); 183 ret = -EBUSY; 184 t4vf_record_mbox(adapter, cmd, size, access, ret); 185 return ret; 186 } 187 188 /* If we're at the head, break out and start the mailbox 189 * protocol. 190 */ 191 if (list_first_entry(&adapter->mlist.list, struct mbox_list, 192 list) == &entry) 193 break; 194 195 /* Delay for a bit before checking again ... */ 196 if (sleep_ok) { 197 ms = delay[delay_idx]; /* last element may repeat */ 198 if (delay_idx < ARRAY_SIZE(delay) - 1) 199 delay_idx++; 200 msleep(ms); 201 } else { 202 mdelay(ms); 203 } 204 } 205 206 /* 207 * Loop trying to get ownership of the mailbox. Return an error 208 * if we can't gain ownership. 209 */ 210 v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl)); 211 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) 212 v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl)); 213 if (v != MBOX_OWNER_DRV) { 214 spin_lock(&adapter->mbox_lock); 215 list_del(&entry.list); 216 spin_unlock(&adapter->mbox_lock); 217 ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT; 218 t4vf_record_mbox(adapter, cmd, size, access, ret); 219 return ret; 220 } 221 222 /* 223 * Write the command array into the Mailbox Data register array and 224 * transfer ownership of the mailbox to the firmware. 225 * 226 * For the VFs, the Mailbox Data "registers" are actually backed by 227 * T4's "MA" interface rather than PL Registers (as is the case for 228 * the PFs). Because these are in different coherency domains, the 229 * write to the VF's PL-register-backed Mailbox Control can race in 230 * front of the writes to the MA-backed VF Mailbox Data "registers". 231 * So we need to do a read-back on at least one byte of the VF Mailbox 232 * Data registers before doing the write to the VF Mailbox Control 233 * register. 234 */ 235 if (cmd_op != FW_VI_STATS_CMD) 236 t4vf_record_mbox(adapter, cmd, size, access, 0); 237 for (i = 0, p = cmd; i < size; i += 8) 238 t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++)); 239 t4_read_reg(adapter, mbox_data); /* flush write */ 240 241 t4_write_reg(adapter, mbox_ctl, 242 MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW)); 243 t4_read_reg(adapter, mbox_ctl); /* flush write */ 244 245 /* 246 * Spin waiting for firmware to acknowledge processing our command. 247 */ 248 delay_idx = 0; 249 ms = delay[0]; 250 251 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { 252 if (sleep_ok) { 253 ms = delay[delay_idx]; 254 if (delay_idx < ARRAY_SIZE(delay) - 1) 255 delay_idx++; 256 msleep(ms); 257 } else 258 mdelay(ms); 259 260 /* 261 * If we're the owner, see if this is the reply we wanted. 262 */ 263 v = t4_read_reg(adapter, mbox_ctl); 264 if (MBOWNER_G(v) == MBOX_OWNER_DRV) { 265 /* 266 * If the Message Valid bit isn't on, revoke ownership 267 * of the mailbox and continue waiting for our reply. 268 */ 269 if ((v & MBMSGVALID_F) == 0) { 270 t4_write_reg(adapter, mbox_ctl, 271 MBOWNER_V(MBOX_OWNER_NONE)); 272 continue; 273 } 274 275 /* 276 * We now have our reply. Extract the command return 277 * value, copy the reply back to our caller's buffer 278 * (if specified) and revoke ownership of the mailbox. 279 * We return the (negated) firmware command return 280 * code (this depends on FW_SUCCESS == 0). 281 */ 282 get_mbox_rpl(adapter, cmd_rpl, size, mbox_data); 283 284 /* return value in low-order little-endian word */ 285 v = be64_to_cpu(cmd_rpl[0]); 286 287 if (rpl) { 288 /* request bit in high-order BE word */ 289 WARN_ON((be32_to_cpu(*(const __be32 *)cmd) 290 & FW_CMD_REQUEST_F) == 0); 291 memcpy(rpl, cmd_rpl, size); 292 WARN_ON((be32_to_cpu(*(__be32 *)rpl) 293 & FW_CMD_REQUEST_F) != 0); 294 } 295 t4_write_reg(adapter, mbox_ctl, 296 MBOWNER_V(MBOX_OWNER_NONE)); 297 execute = i + ms; 298 if (cmd_op != FW_VI_STATS_CMD) 299 t4vf_record_mbox(adapter, cmd_rpl, size, access, 300 execute); 301 spin_lock(&adapter->mbox_lock); 302 list_del(&entry.list); 303 spin_unlock(&adapter->mbox_lock); 304 return -FW_CMD_RETVAL_G(v); 305 } 306 } 307 308 /* We timed out. Return the error ... */ 309 ret = -ETIMEDOUT; 310 t4vf_record_mbox(adapter, cmd, size, access, ret); 311 spin_lock(&adapter->mbox_lock); 312 list_del(&entry.list); 313 spin_unlock(&adapter->mbox_lock); 314 return ret; 315 } 316 317 /* In the Physical Function Driver Common Code, the ADVERT_MASK is used to 318 * mask out bits in the Advertised Port Capabilities which are managed via 319 * separate controls, like Pause Frames and Forward Error Correction. In the 320 * Virtual Function Common Code, since we never perform L1 Configuration on 321 * the Link, the only things we really need to filter out are things which 322 * we decode and report separately like Speed. 323 */ 324 #define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \ 325 FW_PORT_CAP32_802_3_PAUSE | \ 326 FW_PORT_CAP32_802_3_ASM_DIR | \ 327 FW_PORT_CAP32_FEC_V(FW_PORT_CAP32_FEC_M) | \ 328 FW_PORT_CAP32_ANEG) 329 330 /** 331 * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits 332 * @caps16: a 16-bit Port Capabilities value 333 * 334 * Returns the equivalent 32-bit Port Capabilities value. 335 */ 336 static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16) 337 { 338 fw_port_cap32_t caps32 = 0; 339 340 #define CAP16_TO_CAP32(__cap) \ 341 do { \ 342 if (caps16 & FW_PORT_CAP_##__cap) \ 343 caps32 |= FW_PORT_CAP32_##__cap; \ 344 } while (0) 345 346 CAP16_TO_CAP32(SPEED_100M); 347 CAP16_TO_CAP32(SPEED_1G); 348 CAP16_TO_CAP32(SPEED_25G); 349 CAP16_TO_CAP32(SPEED_10G); 350 CAP16_TO_CAP32(SPEED_40G); 351 CAP16_TO_CAP32(SPEED_100G); 352 CAP16_TO_CAP32(FC_RX); 353 CAP16_TO_CAP32(FC_TX); 354 CAP16_TO_CAP32(ANEG); 355 CAP16_TO_CAP32(MDIAUTO); 356 CAP16_TO_CAP32(MDISTRAIGHT); 357 CAP16_TO_CAP32(FEC_RS); 358 CAP16_TO_CAP32(FEC_BASER_RS); 359 CAP16_TO_CAP32(802_3_PAUSE); 360 CAP16_TO_CAP32(802_3_ASM_DIR); 361 362 #undef CAP16_TO_CAP32 363 364 return caps32; 365 } 366 367 /* Translate Firmware Pause specification to Common Code */ 368 static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause) 369 { 370 enum cc_pause cc_pause = 0; 371 372 if (fw_pause & FW_PORT_CAP32_FC_RX) 373 cc_pause |= PAUSE_RX; 374 if (fw_pause & FW_PORT_CAP32_FC_TX) 375 cc_pause |= PAUSE_TX; 376 377 return cc_pause; 378 } 379 380 /* Translate Firmware Forward Error Correction specification to Common Code */ 381 static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec) 382 { 383 enum cc_fec cc_fec = 0; 384 385 if (fw_fec & FW_PORT_CAP32_FEC_RS) 386 cc_fec |= FEC_RS; 387 if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS) 388 cc_fec |= FEC_BASER_RS; 389 390 return cc_fec; 391 } 392 393 /* Return the highest speed set in the port capabilities, in Mb/s. */ 394 static unsigned int fwcap_to_speed(fw_port_cap32_t caps) 395 { 396 #define TEST_SPEED_RETURN(__caps_speed, __speed) \ 397 do { \ 398 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \ 399 return __speed; \ 400 } while (0) 401 402 TEST_SPEED_RETURN(400G, 400000); 403 TEST_SPEED_RETURN(200G, 200000); 404 TEST_SPEED_RETURN(100G, 100000); 405 TEST_SPEED_RETURN(50G, 50000); 406 TEST_SPEED_RETURN(40G, 40000); 407 TEST_SPEED_RETURN(25G, 25000); 408 TEST_SPEED_RETURN(10G, 10000); 409 TEST_SPEED_RETURN(1G, 1000); 410 TEST_SPEED_RETURN(100M, 100); 411 412 #undef TEST_SPEED_RETURN 413 414 return 0; 415 } 416 417 /** 418 * fwcap_to_fwspeed - return highest speed in Port Capabilities 419 * @acaps: advertised Port Capabilities 420 * 421 * Get the highest speed for the port from the advertised Port 422 * Capabilities. It will be either the highest speed from the list of 423 * speeds or whatever user has set using ethtool. 424 */ 425 static fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps) 426 { 427 #define TEST_SPEED_RETURN(__caps_speed) \ 428 do { \ 429 if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \ 430 return FW_PORT_CAP32_SPEED_##__caps_speed; \ 431 } while (0) 432 433 TEST_SPEED_RETURN(400G); 434 TEST_SPEED_RETURN(200G); 435 TEST_SPEED_RETURN(100G); 436 TEST_SPEED_RETURN(50G); 437 TEST_SPEED_RETURN(40G); 438 TEST_SPEED_RETURN(25G); 439 TEST_SPEED_RETURN(10G); 440 TEST_SPEED_RETURN(1G); 441 TEST_SPEED_RETURN(100M); 442 443 #undef TEST_SPEED_RETURN 444 return 0; 445 } 446 447 /* 448 * init_link_config - initialize a link's SW state 449 * @lc: structure holding the link state 450 * @pcaps: link Port Capabilities 451 * @acaps: link current Advertised Port Capabilities 452 * 453 * Initializes the SW state maintained for each link, including the link's 454 * capabilities and default speed/flow-control/autonegotiation settings. 455 */ 456 static void init_link_config(struct link_config *lc, 457 fw_port_cap32_t pcaps, 458 fw_port_cap32_t acaps) 459 { 460 lc->pcaps = pcaps; 461 lc->lpacaps = 0; 462 lc->speed_caps = 0; 463 lc->speed = 0; 464 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; 465 466 /* For Forward Error Control, we default to whatever the Firmware 467 * tells us the Link is currently advertising. 468 */ 469 lc->auto_fec = fwcap_to_cc_fec(acaps); 470 lc->requested_fec = FEC_AUTO; 471 lc->fec = lc->auto_fec; 472 473 /* If the Port is capable of Auto-Negtotiation, initialize it as 474 * "enabled" and copy over all of the Physical Port Capabilities 475 * to the Advertised Port Capabilities. Otherwise mark it as 476 * Auto-Negotiate disabled and select the highest supported speed 477 * for the link. Note parallel structure in t4_link_l1cfg_core() 478 * and t4_handle_get_port_info(). 479 */ 480 if (lc->pcaps & FW_PORT_CAP32_ANEG) { 481 lc->acaps = acaps & ADVERT_MASK; 482 lc->autoneg = AUTONEG_ENABLE; 483 lc->requested_fc |= PAUSE_AUTONEG; 484 } else { 485 lc->acaps = 0; 486 lc->autoneg = AUTONEG_DISABLE; 487 lc->speed_caps = fwcap_to_fwspeed(acaps); 488 } 489 } 490 491 /** 492 * t4vf_port_init - initialize port hardware/software state 493 * @adapter: the adapter 494 * @pidx: the adapter port index 495 */ 496 int t4vf_port_init(struct adapter *adapter, int pidx) 497 { 498 struct port_info *pi = adap2pinfo(adapter, pidx); 499 unsigned int fw_caps = adapter->params.fw_caps_support; 500 struct fw_vi_cmd vi_cmd, vi_rpl; 501 struct fw_port_cmd port_cmd, port_rpl; 502 enum fw_port_type port_type; 503 int mdio_addr; 504 fw_port_cap32_t pcaps, acaps; 505 int ret; 506 507 /* If we haven't yet determined whether we're talking to Firmware 508 * which knows the new 32-bit Port Capabilities, it's time to find 509 * out now. This will also tell new Firmware to send us Port Status 510 * Updates using the new 32-bit Port Capabilities version of the 511 * Port Information message. 512 */ 513 if (fw_caps == FW_CAPS_UNKNOWN) { 514 u32 param, val; 515 516 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | 517 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_PORT_CAPS32)); 518 val = 1; 519 ret = t4vf_set_params(adapter, 1, ¶m, &val); 520 fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16); 521 adapter->params.fw_caps_support = fw_caps; 522 } 523 524 /* 525 * Execute a VI Read command to get our Virtual Interface information 526 * like MAC address, etc. 527 */ 528 memset(&vi_cmd, 0, sizeof(vi_cmd)); 529 vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | 530 FW_CMD_REQUEST_F | 531 FW_CMD_READ_F); 532 vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd)); 533 vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(pi->viid)); 534 ret = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl); 535 if (ret != FW_SUCCESS) 536 return ret; 537 538 BUG_ON(pi->port_id != FW_VI_CMD_PORTID_G(vi_rpl.portid_pkd)); 539 pi->rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(vi_rpl.rsssize_pkd)); 540 t4_os_set_hw_addr(adapter, pidx, vi_rpl.mac); 541 542 /* 543 * If we don't have read access to our port information, we're done 544 * now. Otherwise, execute a PORT Read command to get it ... 545 */ 546 if (!(adapter->params.vfres.r_caps & FW_CMD_CAP_PORT)) 547 return 0; 548 549 memset(&port_cmd, 0, sizeof(port_cmd)); 550 port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | 551 FW_CMD_REQUEST_F | 552 FW_CMD_READ_F | 553 FW_PORT_CMD_PORTID_V(pi->port_id)); 554 port_cmd.action_to_len16 = cpu_to_be32( 555 FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16 556 ? FW_PORT_ACTION_GET_PORT_INFO 557 : FW_PORT_ACTION_GET_PORT_INFO32) | 558 FW_LEN16(port_cmd)); 559 ret = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), &port_rpl); 560 if (ret != FW_SUCCESS) 561 return ret; 562 563 /* Extract the various fields from the Port Information message. */ 564 if (fw_caps == FW_CAPS16) { 565 u32 lstatus = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype); 566 567 port_type = FW_PORT_CMD_PTYPE_G(lstatus); 568 mdio_addr = ((lstatus & FW_PORT_CMD_MDIOCAP_F) 569 ? FW_PORT_CMD_MDIOADDR_G(lstatus) 570 : -1); 571 pcaps = fwcaps16_to_caps32(be16_to_cpu(port_rpl.u.info.pcap)); 572 acaps = fwcaps16_to_caps32(be16_to_cpu(port_rpl.u.info.acap)); 573 } else { 574 u32 lstatus32 = 575 be32_to_cpu(port_rpl.u.info32.lstatus32_to_cbllen32); 576 577 port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32); 578 mdio_addr = ((lstatus32 & FW_PORT_CMD_MDIOCAP32_F) 579 ? FW_PORT_CMD_MDIOADDR32_G(lstatus32) 580 : -1); 581 pcaps = be32_to_cpu(port_rpl.u.info32.pcaps32); 582 acaps = be32_to_cpu(port_rpl.u.info32.acaps32); 583 } 584 585 pi->port_type = port_type; 586 pi->mdio_addr = mdio_addr; 587 pi->mod_type = FW_PORT_MOD_TYPE_NA; 588 589 init_link_config(&pi->link_cfg, pcaps, acaps); 590 return 0; 591 } 592 593 /** 594 * t4vf_fw_reset - issue a reset to FW 595 * @adapter: the adapter 596 * 597 * Issues a reset command to FW. For a Physical Function this would 598 * result in the Firmware resetting all of its state. For a Virtual 599 * Function this just resets the state associated with the VF. 600 */ 601 int t4vf_fw_reset(struct adapter *adapter) 602 { 603 struct fw_reset_cmd cmd; 604 605 memset(&cmd, 0, sizeof(cmd)); 606 cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RESET_CMD) | 607 FW_CMD_WRITE_F); 608 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 609 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 610 } 611 612 /** 613 * t4vf_query_params - query FW or device parameters 614 * @adapter: the adapter 615 * @nparams: the number of parameters 616 * @params: the parameter names 617 * @vals: the parameter values 618 * 619 * Reads the values of firmware or device parameters. Up to 7 parameters 620 * can be queried at once. 621 */ 622 static int t4vf_query_params(struct adapter *adapter, unsigned int nparams, 623 const u32 *params, u32 *vals) 624 { 625 int i, ret; 626 struct fw_params_cmd cmd, rpl; 627 struct fw_params_param *p; 628 size_t len16; 629 630 if (nparams > 7) 631 return -EINVAL; 632 633 memset(&cmd, 0, sizeof(cmd)); 634 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) | 635 FW_CMD_REQUEST_F | 636 FW_CMD_READ_F); 637 len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, 638 param[nparams].mnem), 16); 639 cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); 640 for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) 641 p->mnem = htonl(*params++); 642 643 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 644 if (ret == 0) 645 for (i = 0, p = &rpl.param[0]; i < nparams; i++, p++) 646 *vals++ = be32_to_cpu(p->val); 647 return ret; 648 } 649 650 /** 651 * t4vf_set_params - sets FW or device parameters 652 * @adapter: the adapter 653 * @nparams: the number of parameters 654 * @params: the parameter names 655 * @vals: the parameter values 656 * 657 * Sets the values of firmware or device parameters. Up to 7 parameters 658 * can be specified at once. 659 */ 660 int t4vf_set_params(struct adapter *adapter, unsigned int nparams, 661 const u32 *params, const u32 *vals) 662 { 663 int i; 664 struct fw_params_cmd cmd; 665 struct fw_params_param *p; 666 size_t len16; 667 668 if (nparams > 7) 669 return -EINVAL; 670 671 memset(&cmd, 0, sizeof(cmd)); 672 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) | 673 FW_CMD_REQUEST_F | 674 FW_CMD_WRITE_F); 675 len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, 676 param[nparams]), 16); 677 cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); 678 for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) { 679 p->mnem = cpu_to_be32(*params++); 680 p->val = cpu_to_be32(*vals++); 681 } 682 683 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 684 } 685 686 /** 687 * t4vf_fl_pkt_align - return the fl packet alignment 688 * @adapter: the adapter 689 * 690 * T4 has a single field to specify the packing and padding boundary. 691 * T5 onwards has separate fields for this and hence the alignment for 692 * next packet offset is maximum of these two. And T6 changes the 693 * Ingress Padding Boundary Shift, so it's all a mess and it's best 694 * if we put this in low-level Common Code ... 695 * 696 */ 697 int t4vf_fl_pkt_align(struct adapter *adapter) 698 { 699 u32 sge_control, sge_control2; 700 unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift; 701 702 sge_control = adapter->params.sge.sge_control; 703 704 /* T4 uses a single control field to specify both the PCIe Padding and 705 * Packing Boundary. T5 introduced the ability to specify these 706 * separately. The actual Ingress Packet Data alignment boundary 707 * within Packed Buffer Mode is the maximum of these two 708 * specifications. (Note that it makes no real practical sense to 709 * have the Pading Boudary be larger than the Packing Boundary but you 710 * could set the chip up that way and, in fact, legacy T4 code would 711 * end doing this because it would initialize the Padding Boundary and 712 * leave the Packing Boundary initialized to 0 (16 bytes).) 713 * Padding Boundary values in T6 starts from 8B, 714 * where as it is 32B for T4 and T5. 715 */ 716 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) 717 ingpad_shift = INGPADBOUNDARY_SHIFT_X; 718 else 719 ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X; 720 721 ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift); 722 723 fl_align = ingpadboundary; 724 if (!is_t4(adapter->params.chip)) { 725 /* T5 has a different interpretation of one of the PCIe Packing 726 * Boundary values. 727 */ 728 sge_control2 = adapter->params.sge.sge_control2; 729 ingpackboundary = INGPACKBOUNDARY_G(sge_control2); 730 if (ingpackboundary == INGPACKBOUNDARY_16B_X) 731 ingpackboundary = 16; 732 else 733 ingpackboundary = 1 << (ingpackboundary + 734 INGPACKBOUNDARY_SHIFT_X); 735 736 fl_align = max(ingpadboundary, ingpackboundary); 737 } 738 return fl_align; 739 } 740 741 /** 742 * t4vf_bar2_sge_qregs - return BAR2 SGE Queue register information 743 * @adapter: the adapter 744 * @qid: the Queue ID 745 * @qtype: the Ingress or Egress type for @qid 746 * @pbar2_qoffset: BAR2 Queue Offset 747 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues 748 * 749 * Returns the BAR2 SGE Queue Registers information associated with the 750 * indicated Absolute Queue ID. These are passed back in return value 751 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue 752 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues. 753 * 754 * This may return an error which indicates that BAR2 SGE Queue 755 * registers aren't available. If an error is not returned, then the 756 * following values are returned: 757 * 758 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers 759 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid 760 * 761 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which 762 * require the "Inferred Queue ID" ability may be used. E.g. the 763 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0, 764 * then these "Inferred Queue ID" register may not be used. 765 */ 766 int t4vf_bar2_sge_qregs(struct adapter *adapter, 767 unsigned int qid, 768 enum t4_bar2_qtype qtype, 769 u64 *pbar2_qoffset, 770 unsigned int *pbar2_qid) 771 { 772 unsigned int page_shift, page_size, qpp_shift, qpp_mask; 773 u64 bar2_page_offset, bar2_qoffset; 774 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred; 775 776 /* T4 doesn't support BAR2 SGE Queue registers. 777 */ 778 if (is_t4(adapter->params.chip)) 779 return -EINVAL; 780 781 /* Get our SGE Page Size parameters. 782 */ 783 page_shift = adapter->params.sge.sge_vf_hps + 10; 784 page_size = 1 << page_shift; 785 786 /* Get the right Queues per Page parameters for our Queue. 787 */ 788 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS 789 ? adapter->params.sge.sge_vf_eq_qpp 790 : adapter->params.sge.sge_vf_iq_qpp); 791 qpp_mask = (1 << qpp_shift) - 1; 792 793 /* Calculate the basics of the BAR2 SGE Queue register area: 794 * o The BAR2 page the Queue registers will be in. 795 * o The BAR2 Queue ID. 796 * o The BAR2 Queue ID Offset into the BAR2 page. 797 */ 798 bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift); 799 bar2_qid = qid & qpp_mask; 800 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; 801 802 /* If the BAR2 Queue ID Offset is less than the Page Size, then the 803 * hardware will infer the Absolute Queue ID simply from the writes to 804 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a 805 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply 806 * write to the first BAR2 SGE Queue Area within the BAR2 Page with 807 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID 808 * from the BAR2 Page and BAR2 Queue ID. 809 * 810 * One important censequence of this is that some BAR2 SGE registers 811 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID 812 * there. But other registers synthesize the SGE Queue ID purely 813 * from the writes to the registers -- the Write Combined Doorbell 814 * Buffer is a good example. These BAR2 SGE Registers are only 815 * available for those BAR2 SGE Register areas where the SGE Absolute 816 * Queue ID can be inferred from simple writes. 817 */ 818 bar2_qoffset = bar2_page_offset; 819 bar2_qinferred = (bar2_qid_offset < page_size); 820 if (bar2_qinferred) { 821 bar2_qoffset += bar2_qid_offset; 822 bar2_qid = 0; 823 } 824 825 *pbar2_qoffset = bar2_qoffset; 826 *pbar2_qid = bar2_qid; 827 return 0; 828 } 829 830 unsigned int t4vf_get_pf_from_vf(struct adapter *adapter) 831 { 832 u32 whoami; 833 834 whoami = t4_read_reg(adapter, T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A); 835 return (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ? 836 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami)); 837 } 838 839 /** 840 * t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters 841 * @adapter: the adapter 842 * 843 * Retrieves various core SGE parameters in the form of hardware SGE 844 * register values. The caller is responsible for decoding these as 845 * needed. The SGE parameters are stored in @adapter->params.sge. 846 */ 847 int t4vf_get_sge_params(struct adapter *adapter) 848 { 849 struct sge_params *sge_params = &adapter->params.sge; 850 u32 params[7], vals[7]; 851 int v; 852 853 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 854 FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL_A)); 855 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 856 FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE_A)); 857 params[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 858 FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0_A)); 859 params[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 860 FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1_A)); 861 params[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 862 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1_A)); 863 params[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 864 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3_A)); 865 params[6] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 866 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5_A)); 867 v = t4vf_query_params(adapter, 7, params, vals); 868 if (v) 869 return v; 870 sge_params->sge_control = vals[0]; 871 sge_params->sge_host_page_size = vals[1]; 872 sge_params->sge_fl_buffer_size[0] = vals[2]; 873 sge_params->sge_fl_buffer_size[1] = vals[3]; 874 sge_params->sge_timer_value_0_and_1 = vals[4]; 875 sge_params->sge_timer_value_2_and_3 = vals[5]; 876 sge_params->sge_timer_value_4_and_5 = vals[6]; 877 878 /* T4 uses a single control field to specify both the PCIe Padding and 879 * Packing Boundary. T5 introduced the ability to specify these 880 * separately with the Padding Boundary in SGE_CONTROL and Packing 881 * Boundary in SGE_CONTROL2. So for T5 and later we need to grab 882 * SGE_CONTROL in order to determine how ingress packet data will be 883 * laid out in Packed Buffer Mode. Unfortunately, older versions of 884 * the firmware won't let us retrieve SGE_CONTROL2 so if we get a 885 * failure grabbing it we throw an error since we can't figure out the 886 * right value. 887 */ 888 if (!is_t4(adapter->params.chip)) { 889 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 890 FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL2_A)); 891 v = t4vf_query_params(adapter, 1, params, vals); 892 if (v != FW_SUCCESS) { 893 dev_err(adapter->pdev_dev, 894 "Unable to get SGE Control2; " 895 "probably old firmware.\n"); 896 return v; 897 } 898 sge_params->sge_control2 = vals[0]; 899 } 900 901 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 902 FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD_A)); 903 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 904 FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL_A)); 905 v = t4vf_query_params(adapter, 2, params, vals); 906 if (v) 907 return v; 908 sge_params->sge_ingress_rx_threshold = vals[0]; 909 sge_params->sge_congestion_control = vals[1]; 910 911 /* For T5 and later we want to use the new BAR2 Doorbells. 912 * Unfortunately, older firmware didn't allow the this register to be 913 * read. 914 */ 915 if (!is_t4(adapter->params.chip)) { 916 unsigned int pf, s_hps, s_qpp; 917 918 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 919 FW_PARAMS_PARAM_XYZ_V( 920 SGE_EGRESS_QUEUES_PER_PAGE_VF_A)); 921 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 922 FW_PARAMS_PARAM_XYZ_V( 923 SGE_INGRESS_QUEUES_PER_PAGE_VF_A)); 924 v = t4vf_query_params(adapter, 2, params, vals); 925 if (v != FW_SUCCESS) { 926 dev_warn(adapter->pdev_dev, 927 "Unable to get VF SGE Queues/Page; " 928 "probably old firmware.\n"); 929 return v; 930 } 931 sge_params->sge_egress_queues_per_page = vals[0]; 932 sge_params->sge_ingress_queues_per_page = vals[1]; 933 934 /* We need the Queues/Page for our VF. This is based on the 935 * PF from which we're instantiated and is indexed in the 936 * register we just read. Do it once here so other code in 937 * the driver can just use it. 938 */ 939 pf = t4vf_get_pf_from_vf(adapter); 940 s_hps = (HOSTPAGESIZEPF0_S + 941 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf); 942 sge_params->sge_vf_hps = 943 ((sge_params->sge_host_page_size >> s_hps) 944 & HOSTPAGESIZEPF0_M); 945 946 s_qpp = (QUEUESPERPAGEPF0_S + 947 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * pf); 948 sge_params->sge_vf_eq_qpp = 949 ((sge_params->sge_egress_queues_per_page >> s_qpp) 950 & QUEUESPERPAGEPF0_M); 951 sge_params->sge_vf_iq_qpp = 952 ((sge_params->sge_ingress_queues_per_page >> s_qpp) 953 & QUEUESPERPAGEPF0_M); 954 } 955 956 return 0; 957 } 958 959 /** 960 * t4vf_get_vpd_params - retrieve device VPD paremeters 961 * @adapter: the adapter 962 * 963 * Retrives various device Vital Product Data parameters. The parameters 964 * are stored in @adapter->params.vpd. 965 */ 966 int t4vf_get_vpd_params(struct adapter *adapter) 967 { 968 struct vpd_params *vpd_params = &adapter->params.vpd; 969 u32 params[7], vals[7]; 970 int v; 971 972 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | 973 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK)); 974 v = t4vf_query_params(adapter, 1, params, vals); 975 if (v) 976 return v; 977 vpd_params->cclk = vals[0]; 978 979 return 0; 980 } 981 982 /** 983 * t4vf_get_dev_params - retrieve device paremeters 984 * @adapter: the adapter 985 * 986 * Retrives various device parameters. The parameters are stored in 987 * @adapter->params.dev. 988 */ 989 int t4vf_get_dev_params(struct adapter *adapter) 990 { 991 struct dev_params *dev_params = &adapter->params.dev; 992 u32 params[7], vals[7]; 993 int v; 994 995 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | 996 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWREV)); 997 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | 998 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_TPREV)); 999 v = t4vf_query_params(adapter, 2, params, vals); 1000 if (v) 1001 return v; 1002 dev_params->fwrev = vals[0]; 1003 dev_params->tprev = vals[1]; 1004 1005 return 0; 1006 } 1007 1008 /** 1009 * t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration 1010 * @adapter: the adapter 1011 * 1012 * Retrieves global RSS mode and parameters with which we have to live 1013 * and stores them in the @adapter's RSS parameters. 1014 */ 1015 int t4vf_get_rss_glb_config(struct adapter *adapter) 1016 { 1017 struct rss_params *rss = &adapter->params.rss; 1018 struct fw_rss_glb_config_cmd cmd, rpl; 1019 int v; 1020 1021 /* 1022 * Execute an RSS Global Configuration read command to retrieve 1023 * our RSS configuration. 1024 */ 1025 memset(&cmd, 0, sizeof(cmd)); 1026 cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) | 1027 FW_CMD_REQUEST_F | 1028 FW_CMD_READ_F); 1029 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 1030 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 1031 if (v) 1032 return v; 1033 1034 /* 1035 * Transate the big-endian RSS Global Configuration into our 1036 * cpu-endian format based on the RSS mode. We also do first level 1037 * filtering at this point to weed out modes which don't support 1038 * VF Drivers ... 1039 */ 1040 rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_G( 1041 be32_to_cpu(rpl.u.manual.mode_pkd)); 1042 switch (rss->mode) { 1043 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { 1044 u32 word = be32_to_cpu( 1045 rpl.u.basicvirtual.synmapen_to_hashtoeplitz); 1046 1047 rss->u.basicvirtual.synmapen = 1048 ((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_F) != 0); 1049 rss->u.basicvirtual.syn4tupenipv6 = 1050 ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_F) != 0); 1051 rss->u.basicvirtual.syn2tupenipv6 = 1052 ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_F) != 0); 1053 rss->u.basicvirtual.syn4tupenipv4 = 1054 ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_F) != 0); 1055 rss->u.basicvirtual.syn2tupenipv4 = 1056 ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_F) != 0); 1057 1058 rss->u.basicvirtual.ofdmapen = 1059 ((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_F) != 0); 1060 1061 rss->u.basicvirtual.tnlmapen = 1062 ((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F) != 0); 1063 rss->u.basicvirtual.tnlalllookup = 1064 ((word & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F) != 0); 1065 1066 rss->u.basicvirtual.hashtoeplitz = 1067 ((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F) != 0); 1068 1069 /* we need at least Tunnel Map Enable to be set */ 1070 if (!rss->u.basicvirtual.tnlmapen) 1071 return -EINVAL; 1072 break; 1073 } 1074 1075 default: 1076 /* all unknown/unsupported RSS modes result in an error */ 1077 return -EINVAL; 1078 } 1079 1080 return 0; 1081 } 1082 1083 /** 1084 * t4vf_get_vfres - retrieve VF resource limits 1085 * @adapter: the adapter 1086 * 1087 * Retrieves configured resource limits and capabilities for a virtual 1088 * function. The results are stored in @adapter->vfres. 1089 */ 1090 int t4vf_get_vfres(struct adapter *adapter) 1091 { 1092 struct vf_resources *vfres = &adapter->params.vfres; 1093 struct fw_pfvf_cmd cmd, rpl; 1094 int v; 1095 u32 word; 1096 1097 /* 1098 * Execute PFVF Read command to get VF resource limits; bail out early 1099 * with error on command failure. 1100 */ 1101 memset(&cmd, 0, sizeof(cmd)); 1102 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | 1103 FW_CMD_REQUEST_F | 1104 FW_CMD_READ_F); 1105 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 1106 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 1107 if (v) 1108 return v; 1109 1110 /* 1111 * Extract VF resource limits and return success. 1112 */ 1113 word = be32_to_cpu(rpl.niqflint_niq); 1114 vfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word); 1115 vfres->niq = FW_PFVF_CMD_NIQ_G(word); 1116 1117 word = be32_to_cpu(rpl.type_to_neq); 1118 vfres->neq = FW_PFVF_CMD_NEQ_G(word); 1119 vfres->pmask = FW_PFVF_CMD_PMASK_G(word); 1120 1121 word = be32_to_cpu(rpl.tc_to_nexactf); 1122 vfres->tc = FW_PFVF_CMD_TC_G(word); 1123 vfres->nvi = FW_PFVF_CMD_NVI_G(word); 1124 vfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word); 1125 1126 word = be32_to_cpu(rpl.r_caps_to_nethctrl); 1127 vfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word); 1128 vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word); 1129 vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word); 1130 1131 return 0; 1132 } 1133 1134 /** 1135 * t4vf_read_rss_vi_config - read a VI's RSS configuration 1136 * @adapter: the adapter 1137 * @viid: Virtual Interface ID 1138 * @config: pointer to host-native VI RSS Configuration buffer 1139 * 1140 * Reads the Virtual Interface's RSS configuration information and 1141 * translates it into CPU-native format. 1142 */ 1143 int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid, 1144 union rss_vi_config *config) 1145 { 1146 struct fw_rss_vi_config_cmd cmd, rpl; 1147 int v; 1148 1149 memset(&cmd, 0, sizeof(cmd)); 1150 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) | 1151 FW_CMD_REQUEST_F | 1152 FW_CMD_READ_F | 1153 FW_RSS_VI_CONFIG_CMD_VIID(viid)); 1154 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 1155 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 1156 if (v) 1157 return v; 1158 1159 switch (adapter->params.rss.mode) { 1160 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { 1161 u32 word = be32_to_cpu(rpl.u.basicvirtual.defaultq_to_udpen); 1162 1163 config->basicvirtual.ip6fourtupen = 1164 ((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) != 0); 1165 config->basicvirtual.ip6twotupen = 1166 ((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) != 0); 1167 config->basicvirtual.ip4fourtupen = 1168 ((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) != 0); 1169 config->basicvirtual.ip4twotupen = 1170 ((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) != 0); 1171 config->basicvirtual.udpen = 1172 ((word & FW_RSS_VI_CONFIG_CMD_UDPEN_F) != 0); 1173 config->basicvirtual.defaultq = 1174 FW_RSS_VI_CONFIG_CMD_DEFAULTQ_G(word); 1175 break; 1176 } 1177 1178 default: 1179 return -EINVAL; 1180 } 1181 1182 return 0; 1183 } 1184 1185 /** 1186 * t4vf_write_rss_vi_config - write a VI's RSS configuration 1187 * @adapter: the adapter 1188 * @viid: Virtual Interface ID 1189 * @config: pointer to host-native VI RSS Configuration buffer 1190 * 1191 * Write the Virtual Interface's RSS configuration information 1192 * (translating it into firmware-native format before writing). 1193 */ 1194 int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid, 1195 union rss_vi_config *config) 1196 { 1197 struct fw_rss_vi_config_cmd cmd, rpl; 1198 1199 memset(&cmd, 0, sizeof(cmd)); 1200 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) | 1201 FW_CMD_REQUEST_F | 1202 FW_CMD_WRITE_F | 1203 FW_RSS_VI_CONFIG_CMD_VIID(viid)); 1204 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 1205 switch (adapter->params.rss.mode) { 1206 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { 1207 u32 word = 0; 1208 1209 if (config->basicvirtual.ip6fourtupen) 1210 word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F; 1211 if (config->basicvirtual.ip6twotupen) 1212 word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F; 1213 if (config->basicvirtual.ip4fourtupen) 1214 word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F; 1215 if (config->basicvirtual.ip4twotupen) 1216 word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F; 1217 if (config->basicvirtual.udpen) 1218 word |= FW_RSS_VI_CONFIG_CMD_UDPEN_F; 1219 word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V( 1220 config->basicvirtual.defaultq); 1221 cmd.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(word); 1222 break; 1223 } 1224 1225 default: 1226 return -EINVAL; 1227 } 1228 1229 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 1230 } 1231 1232 /** 1233 * t4vf_config_rss_range - configure a portion of the RSS mapping table 1234 * @adapter: the adapter 1235 * @viid: Virtual Interface of RSS Table Slice 1236 * @start: starting entry in the table to write 1237 * @n: how many table entries to write 1238 * @rspq: values for the "Response Queue" (Ingress Queue) lookup table 1239 * @nrspq: number of values in @rspq 1240 * 1241 * Programs the selected part of the VI's RSS mapping table with the 1242 * provided values. If @nrspq < @n the supplied values are used repeatedly 1243 * until the full table range is populated. 1244 * 1245 * The caller must ensure the values in @rspq are in the range 0..1023. 1246 */ 1247 int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid, 1248 int start, int n, const u16 *rspq, int nrspq) 1249 { 1250 const u16 *rsp = rspq; 1251 const u16 *rsp_end = rspq+nrspq; 1252 struct fw_rss_ind_tbl_cmd cmd; 1253 1254 /* 1255 * Initialize firmware command template to write the RSS table. 1256 */ 1257 memset(&cmd, 0, sizeof(cmd)); 1258 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) | 1259 FW_CMD_REQUEST_F | 1260 FW_CMD_WRITE_F | 1261 FW_RSS_IND_TBL_CMD_VIID_V(viid)); 1262 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 1263 1264 /* 1265 * Each firmware RSS command can accommodate up to 32 RSS Ingress 1266 * Queue Identifiers. These Ingress Queue IDs are packed three to 1267 * a 32-bit word as 10-bit values with the upper remaining 2 bits 1268 * reserved. 1269 */ 1270 while (n > 0) { 1271 __be32 *qp = &cmd.iq0_to_iq2; 1272 int nq = min(n, 32); 1273 int ret; 1274 1275 /* 1276 * Set up the firmware RSS command header to send the next 1277 * "nq" Ingress Queue IDs to the firmware. 1278 */ 1279 cmd.niqid = cpu_to_be16(nq); 1280 cmd.startidx = cpu_to_be16(start); 1281 1282 /* 1283 * "nq" more done for the start of the next loop. 1284 */ 1285 start += nq; 1286 n -= nq; 1287 1288 /* 1289 * While there are still Ingress Queue IDs to stuff into the 1290 * current firmware RSS command, retrieve them from the 1291 * Ingress Queue ID array and insert them into the command. 1292 */ 1293 while (nq > 0) { 1294 /* 1295 * Grab up to the next 3 Ingress Queue IDs (wrapping 1296 * around the Ingress Queue ID array if necessary) and 1297 * insert them into the firmware RSS command at the 1298 * current 3-tuple position within the commad. 1299 */ 1300 u16 qbuf[3]; 1301 u16 *qbp = qbuf; 1302 int nqbuf = min(3, nq); 1303 1304 nq -= nqbuf; 1305 qbuf[0] = qbuf[1] = qbuf[2] = 0; 1306 while (nqbuf) { 1307 nqbuf--; 1308 *qbp++ = *rsp++; 1309 if (rsp >= rsp_end) 1310 rsp = rspq; 1311 } 1312 *qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0_V(qbuf[0]) | 1313 FW_RSS_IND_TBL_CMD_IQ1_V(qbuf[1]) | 1314 FW_RSS_IND_TBL_CMD_IQ2_V(qbuf[2])); 1315 } 1316 1317 /* 1318 * Send this portion of the RRS table update to the firmware; 1319 * bail out on any errors. 1320 */ 1321 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1322 if (ret) 1323 return ret; 1324 } 1325 return 0; 1326 } 1327 1328 /** 1329 * t4vf_alloc_vi - allocate a virtual interface on a port 1330 * @adapter: the adapter 1331 * @port_id: physical port associated with the VI 1332 * 1333 * Allocate a new Virtual Interface and bind it to the indicated 1334 * physical port. Return the new Virtual Interface Identifier on 1335 * success, or a [negative] error number on failure. 1336 */ 1337 int t4vf_alloc_vi(struct adapter *adapter, int port_id) 1338 { 1339 struct fw_vi_cmd cmd, rpl; 1340 int v; 1341 1342 /* 1343 * Execute a VI command to allocate Virtual Interface and return its 1344 * VIID. 1345 */ 1346 memset(&cmd, 0, sizeof(cmd)); 1347 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | 1348 FW_CMD_REQUEST_F | 1349 FW_CMD_WRITE_F | 1350 FW_CMD_EXEC_F); 1351 cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) | 1352 FW_VI_CMD_ALLOC_F); 1353 cmd.portid_pkd = FW_VI_CMD_PORTID_V(port_id); 1354 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 1355 if (v) 1356 return v; 1357 1358 return FW_VI_CMD_VIID_G(be16_to_cpu(rpl.type_viid)); 1359 } 1360 1361 /** 1362 * t4vf_free_vi -- free a virtual interface 1363 * @adapter: the adapter 1364 * @viid: the virtual interface identifier 1365 * 1366 * Free a previously allocated Virtual Interface. Return an error on 1367 * failure. 1368 */ 1369 int t4vf_free_vi(struct adapter *adapter, int viid) 1370 { 1371 struct fw_vi_cmd cmd; 1372 1373 /* 1374 * Execute a VI command to free the Virtual Interface. 1375 */ 1376 memset(&cmd, 0, sizeof(cmd)); 1377 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | 1378 FW_CMD_REQUEST_F | 1379 FW_CMD_EXEC_F); 1380 cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) | 1381 FW_VI_CMD_FREE_F); 1382 cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid)); 1383 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1384 } 1385 1386 /** 1387 * t4vf_enable_vi - enable/disable a virtual interface 1388 * @adapter: the adapter 1389 * @viid: the Virtual Interface ID 1390 * @rx_en: 1=enable Rx, 0=disable Rx 1391 * @tx_en: 1=enable Tx, 0=disable Tx 1392 * 1393 * Enables/disables a virtual interface. 1394 */ 1395 int t4vf_enable_vi(struct adapter *adapter, unsigned int viid, 1396 bool rx_en, bool tx_en) 1397 { 1398 struct fw_vi_enable_cmd cmd; 1399 1400 memset(&cmd, 0, sizeof(cmd)); 1401 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | 1402 FW_CMD_REQUEST_F | 1403 FW_CMD_EXEC_F | 1404 FW_VI_ENABLE_CMD_VIID_V(viid)); 1405 cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) | 1406 FW_VI_ENABLE_CMD_EEN_V(tx_en) | 1407 FW_LEN16(cmd)); 1408 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1409 } 1410 1411 /** 1412 * t4vf_enable_pi - enable/disable a Port's virtual interface 1413 * @adapter: the adapter 1414 * @pi: the Port Information structure 1415 * @rx_en: 1=enable Rx, 0=disable Rx 1416 * @tx_en: 1=enable Tx, 0=disable Tx 1417 * 1418 * Enables/disables a Port's virtual interface. If the Virtual 1419 * Interface enable/disable operation is successful, we notify the 1420 * OS-specific code of a potential Link Status change via the OS Contract 1421 * API t4vf_os_link_changed(). 1422 */ 1423 int t4vf_enable_pi(struct adapter *adapter, struct port_info *pi, 1424 bool rx_en, bool tx_en) 1425 { 1426 int ret = t4vf_enable_vi(adapter, pi->viid, rx_en, tx_en); 1427 1428 if (ret) 1429 return ret; 1430 t4vf_os_link_changed(adapter, pi->pidx, 1431 rx_en && tx_en && pi->link_cfg.link_ok); 1432 return 0; 1433 } 1434 1435 /** 1436 * t4vf_identify_port - identify a VI's port by blinking its LED 1437 * @adapter: the adapter 1438 * @viid: the Virtual Interface ID 1439 * @nblinks: how many times to blink LED at 2.5 Hz 1440 * 1441 * Identifies a VI's port by blinking its LED. 1442 */ 1443 int t4vf_identify_port(struct adapter *adapter, unsigned int viid, 1444 unsigned int nblinks) 1445 { 1446 struct fw_vi_enable_cmd cmd; 1447 1448 memset(&cmd, 0, sizeof(cmd)); 1449 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | 1450 FW_CMD_REQUEST_F | 1451 FW_CMD_EXEC_F | 1452 FW_VI_ENABLE_CMD_VIID_V(viid)); 1453 cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | 1454 FW_LEN16(cmd)); 1455 cmd.blinkdur = cpu_to_be16(nblinks); 1456 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1457 } 1458 1459 /** 1460 * t4vf_set_rxmode - set Rx properties of a virtual interface 1461 * @adapter: the adapter 1462 * @viid: the VI id 1463 * @mtu: the new MTU or -1 for no change 1464 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change 1465 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change 1466 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change 1467 * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it, 1468 * -1 no change 1469 * @sleep_ok: call is allowed to sleep 1470 * 1471 * Sets Rx properties of a virtual interface. 1472 */ 1473 int t4vf_set_rxmode(struct adapter *adapter, unsigned int viid, 1474 int mtu, int promisc, int all_multi, int bcast, int vlanex, 1475 bool sleep_ok) 1476 { 1477 struct fw_vi_rxmode_cmd cmd; 1478 1479 /* convert to FW values */ 1480 if (mtu < 0) 1481 mtu = FW_VI_RXMODE_CMD_MTU_M; 1482 if (promisc < 0) 1483 promisc = FW_VI_RXMODE_CMD_PROMISCEN_M; 1484 if (all_multi < 0) 1485 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M; 1486 if (bcast < 0) 1487 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M; 1488 if (vlanex < 0) 1489 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M; 1490 1491 memset(&cmd, 0, sizeof(cmd)); 1492 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) | 1493 FW_CMD_REQUEST_F | 1494 FW_CMD_WRITE_F | 1495 FW_VI_RXMODE_CMD_VIID_V(viid)); 1496 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 1497 cmd.mtu_to_vlanexen = 1498 cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) | 1499 FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) | 1500 FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) | 1501 FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) | 1502 FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex)); 1503 return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok); 1504 } 1505 1506 /** 1507 * t4vf_alloc_mac_filt - allocates exact-match filters for MAC addresses 1508 * @adapter: the adapter 1509 * @viid: the Virtual Interface Identifier 1510 * @free: if true any existing filters for this VI id are first removed 1511 * @naddr: the number of MAC addresses to allocate filters for (up to 7) 1512 * @addr: the MAC address(es) 1513 * @idx: where to store the index of each allocated filter 1514 * @hash: pointer to hash address filter bitmap 1515 * @sleep_ok: call is allowed to sleep 1516 * 1517 * Allocates an exact-match filter for each of the supplied addresses and 1518 * sets it to the corresponding address. If @idx is not %NULL it should 1519 * have at least @naddr entries, each of which will be set to the index of 1520 * the filter allocated for the corresponding MAC address. If a filter 1521 * could not be allocated for an address its index is set to 0xffff. 1522 * If @hash is not %NULL addresses that fail to allocate an exact filter 1523 * are hashed and update the hash filter bitmap pointed at by @hash. 1524 * 1525 * Returns a negative error number or the number of filters allocated. 1526 */ 1527 int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free, 1528 unsigned int naddr, const u8 **addr, u16 *idx, 1529 u64 *hash, bool sleep_ok) 1530 { 1531 int offset, ret = 0; 1532 unsigned nfilters = 0; 1533 unsigned int rem = naddr; 1534 struct fw_vi_mac_cmd cmd, rpl; 1535 unsigned int max_naddr = adapter->params.arch.mps_tcam_size; 1536 1537 if (naddr > max_naddr) 1538 return -EINVAL; 1539 1540 for (offset = 0; offset < naddr; /**/) { 1541 unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact) 1542 ? rem 1543 : ARRAY_SIZE(cmd.u.exact)); 1544 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 1545 u.exact[fw_naddr]), 16); 1546 struct fw_vi_mac_exact *p; 1547 int i; 1548 1549 memset(&cmd, 0, sizeof(cmd)); 1550 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | 1551 FW_CMD_REQUEST_F | 1552 FW_CMD_WRITE_F | 1553 (free ? FW_CMD_EXEC_F : 0) | 1554 FW_VI_MAC_CMD_VIID_V(viid)); 1555 cmd.freemacs_to_len16 = 1556 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) | 1557 FW_CMD_LEN16_V(len16)); 1558 1559 for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) { 1560 p->valid_to_idx = cpu_to_be16( 1561 FW_VI_MAC_CMD_VALID_F | 1562 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC)); 1563 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); 1564 } 1565 1566 1567 ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl, 1568 sleep_ok); 1569 if (ret && ret != -ENOMEM) 1570 break; 1571 1572 for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) { 1573 u16 index = FW_VI_MAC_CMD_IDX_G( 1574 be16_to_cpu(p->valid_to_idx)); 1575 1576 if (idx) 1577 idx[offset+i] = 1578 (index >= max_naddr 1579 ? 0xffff 1580 : index); 1581 if (index < max_naddr) 1582 nfilters++; 1583 else if (hash) 1584 *hash |= (1ULL << hash_mac_addr(addr[offset+i])); 1585 } 1586 1587 free = false; 1588 offset += fw_naddr; 1589 rem -= fw_naddr; 1590 } 1591 1592 /* 1593 * If there were no errors or we merely ran out of room in our MAC 1594 * address arena, return the number of filters actually written. 1595 */ 1596 if (ret == 0 || ret == -ENOMEM) 1597 ret = nfilters; 1598 return ret; 1599 } 1600 1601 /** 1602 * t4vf_free_mac_filt - frees exact-match filters of given MAC addresses 1603 * @adapter: the adapter 1604 * @viid: the VI id 1605 * @naddr: the number of MAC addresses to allocate filters for (up to 7) 1606 * @addr: the MAC address(es) 1607 * @sleep_ok: call is allowed to sleep 1608 * 1609 * Frees the exact-match filter for each of the supplied addresses 1610 * 1611 * Returns a negative error number or the number of filters freed. 1612 */ 1613 int t4vf_free_mac_filt(struct adapter *adapter, unsigned int viid, 1614 unsigned int naddr, const u8 **addr, bool sleep_ok) 1615 { 1616 int offset, ret = 0; 1617 struct fw_vi_mac_cmd cmd; 1618 unsigned int nfilters = 0; 1619 unsigned int max_naddr = adapter->params.arch.mps_tcam_size; 1620 unsigned int rem = naddr; 1621 1622 if (naddr > max_naddr) 1623 return -EINVAL; 1624 1625 for (offset = 0; offset < (int)naddr ; /**/) { 1626 unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact) ? 1627 rem : ARRAY_SIZE(cmd.u.exact)); 1628 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 1629 u.exact[fw_naddr]), 16); 1630 struct fw_vi_mac_exact *p; 1631 int i; 1632 1633 memset(&cmd, 0, sizeof(cmd)); 1634 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | 1635 FW_CMD_REQUEST_F | 1636 FW_CMD_WRITE_F | 1637 FW_CMD_EXEC_V(0) | 1638 FW_VI_MAC_CMD_VIID_V(viid)); 1639 cmd.freemacs_to_len16 = 1640 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) | 1641 FW_CMD_LEN16_V(len16)); 1642 1643 for (i = 0, p = cmd.u.exact; i < (int)fw_naddr; i++, p++) { 1644 p->valid_to_idx = cpu_to_be16( 1645 FW_VI_MAC_CMD_VALID_F | 1646 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE)); 1647 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); 1648 } 1649 1650 ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &cmd, 1651 sleep_ok); 1652 if (ret) 1653 break; 1654 1655 for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) { 1656 u16 index = FW_VI_MAC_CMD_IDX_G( 1657 be16_to_cpu(p->valid_to_idx)); 1658 1659 if (index < max_naddr) 1660 nfilters++; 1661 } 1662 1663 offset += fw_naddr; 1664 rem -= fw_naddr; 1665 } 1666 1667 if (ret == 0) 1668 ret = nfilters; 1669 return ret; 1670 } 1671 1672 /** 1673 * t4vf_change_mac - modifies the exact-match filter for a MAC address 1674 * @adapter: the adapter 1675 * @viid: the Virtual Interface ID 1676 * @idx: index of existing filter for old value of MAC address, or -1 1677 * @addr: the new MAC address value 1678 * @persist: if idx < 0, the new MAC allocation should be persistent 1679 * 1680 * Modifies an exact-match filter and sets it to the new MAC address. 1681 * Note that in general it is not possible to modify the value of a given 1682 * filter so the generic way to modify an address filter is to free the 1683 * one being used by the old address value and allocate a new filter for 1684 * the new address value. @idx can be -1 if the address is a new 1685 * addition. 1686 * 1687 * Returns a negative error number or the index of the filter with the new 1688 * MAC value. 1689 */ 1690 int t4vf_change_mac(struct adapter *adapter, unsigned int viid, 1691 int idx, const u8 *addr, bool persist) 1692 { 1693 int ret; 1694 struct fw_vi_mac_cmd cmd, rpl; 1695 struct fw_vi_mac_exact *p = &cmd.u.exact[0]; 1696 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 1697 u.exact[1]), 16); 1698 unsigned int max_mac_addr = adapter->params.arch.mps_tcam_size; 1699 1700 /* 1701 * If this is a new allocation, determine whether it should be 1702 * persistent (across a "freemacs" operation) or not. 1703 */ 1704 if (idx < 0) 1705 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; 1706 1707 memset(&cmd, 0, sizeof(cmd)); 1708 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | 1709 FW_CMD_REQUEST_F | 1710 FW_CMD_WRITE_F | 1711 FW_VI_MAC_CMD_VIID_V(viid)); 1712 cmd.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); 1713 p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F | 1714 FW_VI_MAC_CMD_IDX_V(idx)); 1715 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 1716 1717 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 1718 if (ret == 0) { 1719 p = &rpl.u.exact[0]; 1720 ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx)); 1721 if (ret >= max_mac_addr) 1722 ret = -ENOMEM; 1723 } 1724 return ret; 1725 } 1726 1727 /** 1728 * t4vf_set_addr_hash - program the MAC inexact-match hash filter 1729 * @adapter: the adapter 1730 * @viid: the Virtual Interface Identifier 1731 * @ucast: whether the hash filter should also match unicast addresses 1732 * @vec: the value to be written to the hash filter 1733 * @sleep_ok: call is allowed to sleep 1734 * 1735 * Sets the 64-bit inexact-match hash filter for a virtual interface. 1736 */ 1737 int t4vf_set_addr_hash(struct adapter *adapter, unsigned int viid, 1738 bool ucast, u64 vec, bool sleep_ok) 1739 { 1740 struct fw_vi_mac_cmd cmd; 1741 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 1742 u.exact[0]), 16); 1743 1744 memset(&cmd, 0, sizeof(cmd)); 1745 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | 1746 FW_CMD_REQUEST_F | 1747 FW_CMD_WRITE_F | 1748 FW_VI_ENABLE_CMD_VIID_V(viid)); 1749 cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F | 1750 FW_VI_MAC_CMD_HASHUNIEN_V(ucast) | 1751 FW_CMD_LEN16_V(len16)); 1752 cmd.u.hash.hashvec = cpu_to_be64(vec); 1753 return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok); 1754 } 1755 1756 /** 1757 * t4vf_get_port_stats - collect "port" statistics 1758 * @adapter: the adapter 1759 * @pidx: the port index 1760 * @s: the stats structure to fill 1761 * 1762 * Collect statistics for the "port"'s Virtual Interface. 1763 */ 1764 int t4vf_get_port_stats(struct adapter *adapter, int pidx, 1765 struct t4vf_port_stats *s) 1766 { 1767 struct port_info *pi = adap2pinfo(adapter, pidx); 1768 struct fw_vi_stats_vf fwstats; 1769 unsigned int rem = VI_VF_NUM_STATS; 1770 __be64 *fwsp = (__be64 *)&fwstats; 1771 1772 /* 1773 * Grab the Virtual Interface statistics a chunk at a time via mailbox 1774 * commands. We could use a Work Request and get all of them at once 1775 * but that's an asynchronous interface which is awkward to use. 1776 */ 1777 while (rem) { 1778 unsigned int ix = VI_VF_NUM_STATS - rem; 1779 unsigned int nstats = min(6U, rem); 1780 struct fw_vi_stats_cmd cmd, rpl; 1781 size_t len = (offsetof(struct fw_vi_stats_cmd, u) + 1782 sizeof(struct fw_vi_stats_ctl)); 1783 size_t len16 = DIV_ROUND_UP(len, 16); 1784 int ret; 1785 1786 memset(&cmd, 0, sizeof(cmd)); 1787 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_STATS_CMD) | 1788 FW_VI_STATS_CMD_VIID_V(pi->viid) | 1789 FW_CMD_REQUEST_F | 1790 FW_CMD_READ_F); 1791 cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); 1792 cmd.u.ctl.nstats_ix = 1793 cpu_to_be16(FW_VI_STATS_CMD_IX_V(ix) | 1794 FW_VI_STATS_CMD_NSTATS_V(nstats)); 1795 ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl); 1796 if (ret) 1797 return ret; 1798 1799 memcpy(fwsp, &rpl.u.ctl.stat0, sizeof(__be64) * nstats); 1800 1801 rem -= nstats; 1802 fwsp += nstats; 1803 } 1804 1805 /* 1806 * Translate firmware statistics into host native statistics. 1807 */ 1808 s->tx_bcast_bytes = be64_to_cpu(fwstats.tx_bcast_bytes); 1809 s->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames); 1810 s->tx_mcast_bytes = be64_to_cpu(fwstats.tx_mcast_bytes); 1811 s->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames); 1812 s->tx_ucast_bytes = be64_to_cpu(fwstats.tx_ucast_bytes); 1813 s->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames); 1814 s->tx_drop_frames = be64_to_cpu(fwstats.tx_drop_frames); 1815 s->tx_offload_bytes = be64_to_cpu(fwstats.tx_offload_bytes); 1816 s->tx_offload_frames = be64_to_cpu(fwstats.tx_offload_frames); 1817 1818 s->rx_bcast_bytes = be64_to_cpu(fwstats.rx_bcast_bytes); 1819 s->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames); 1820 s->rx_mcast_bytes = be64_to_cpu(fwstats.rx_mcast_bytes); 1821 s->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames); 1822 s->rx_ucast_bytes = be64_to_cpu(fwstats.rx_ucast_bytes); 1823 s->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames); 1824 1825 s->rx_err_frames = be64_to_cpu(fwstats.rx_err_frames); 1826 1827 return 0; 1828 } 1829 1830 /** 1831 * t4vf_iq_free - free an ingress queue and its free lists 1832 * @adapter: the adapter 1833 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) 1834 * @iqid: ingress queue ID 1835 * @fl0id: FL0 queue ID or 0xffff if no attached FL0 1836 * @fl1id: FL1 queue ID or 0xffff if no attached FL1 1837 * 1838 * Frees an ingress queue and its associated free lists, if any. 1839 */ 1840 int t4vf_iq_free(struct adapter *adapter, unsigned int iqtype, 1841 unsigned int iqid, unsigned int fl0id, unsigned int fl1id) 1842 { 1843 struct fw_iq_cmd cmd; 1844 1845 memset(&cmd, 0, sizeof(cmd)); 1846 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | 1847 FW_CMD_REQUEST_F | 1848 FW_CMD_EXEC_F); 1849 cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | 1850 FW_LEN16(cmd)); 1851 cmd.type_to_iqandstindex = 1852 cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype)); 1853 1854 cmd.iqid = cpu_to_be16(iqid); 1855 cmd.fl0id = cpu_to_be16(fl0id); 1856 cmd.fl1id = cpu_to_be16(fl1id); 1857 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1858 } 1859 1860 /** 1861 * t4vf_eth_eq_free - free an Ethernet egress queue 1862 * @adapter: the adapter 1863 * @eqid: egress queue ID 1864 * 1865 * Frees an Ethernet egress queue. 1866 */ 1867 int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid) 1868 { 1869 struct fw_eq_eth_cmd cmd; 1870 1871 memset(&cmd, 0, sizeof(cmd)); 1872 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) | 1873 FW_CMD_REQUEST_F | 1874 FW_CMD_EXEC_F); 1875 cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | 1876 FW_LEN16(cmd)); 1877 cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid)); 1878 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1879 } 1880 1881 /** 1882 * t4vf_link_down_rc_str - return a string for a Link Down Reason Code 1883 * @link_down_rc: Link Down Reason Code 1884 * 1885 * Returns a string representation of the Link Down Reason Code. 1886 */ 1887 static const char *t4vf_link_down_rc_str(unsigned char link_down_rc) 1888 { 1889 static const char * const reason[] = { 1890 "Link Down", 1891 "Remote Fault", 1892 "Auto-negotiation Failure", 1893 "Reserved", 1894 "Insufficient Airflow", 1895 "Unable To Determine Reason", 1896 "No RX Signal Detected", 1897 "Reserved", 1898 }; 1899 1900 if (link_down_rc >= ARRAY_SIZE(reason)) 1901 return "Bad Reason Code"; 1902 1903 return reason[link_down_rc]; 1904 } 1905 1906 /** 1907 * t4vf_handle_get_port_info - process a FW reply message 1908 * @pi: the port info 1909 * @cmd: start of the FW message 1910 * 1911 * Processes a GET_PORT_INFO FW reply message. 1912 */ 1913 static void t4vf_handle_get_port_info(struct port_info *pi, 1914 const struct fw_port_cmd *cmd) 1915 { 1916 fw_port_cap32_t pcaps, acaps, lpacaps, linkattr; 1917 struct link_config *lc = &pi->link_cfg; 1918 struct adapter *adapter = pi->adapter; 1919 unsigned int speed, fc, fec, adv_fc; 1920 enum fw_port_module_type mod_type; 1921 int action, link_ok, linkdnrc; 1922 enum fw_port_type port_type; 1923 1924 /* Extract the various fields from the Port Information message. */ 1925 action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16)); 1926 switch (action) { 1927 case FW_PORT_ACTION_GET_PORT_INFO: { 1928 u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype); 1929 1930 link_ok = (lstatus & FW_PORT_CMD_LSTATUS_F) != 0; 1931 linkdnrc = FW_PORT_CMD_LINKDNRC_G(lstatus); 1932 port_type = FW_PORT_CMD_PTYPE_G(lstatus); 1933 mod_type = FW_PORT_CMD_MODTYPE_G(lstatus); 1934 pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap)); 1935 acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap)); 1936 lpacaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.lpacap)); 1937 1938 /* Unfortunately the format of the Link Status in the old 1939 * 16-bit Port Information message isn't the same as the 1940 * 16-bit Port Capabilities bitfield used everywhere else ... 1941 */ 1942 linkattr = 0; 1943 if (lstatus & FW_PORT_CMD_RXPAUSE_F) 1944 linkattr |= FW_PORT_CAP32_FC_RX; 1945 if (lstatus & FW_PORT_CMD_TXPAUSE_F) 1946 linkattr |= FW_PORT_CAP32_FC_TX; 1947 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) 1948 linkattr |= FW_PORT_CAP32_SPEED_100M; 1949 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) 1950 linkattr |= FW_PORT_CAP32_SPEED_1G; 1951 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) 1952 linkattr |= FW_PORT_CAP32_SPEED_10G; 1953 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G)) 1954 linkattr |= FW_PORT_CAP32_SPEED_25G; 1955 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) 1956 linkattr |= FW_PORT_CAP32_SPEED_40G; 1957 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G)) 1958 linkattr |= FW_PORT_CAP32_SPEED_100G; 1959 1960 break; 1961 } 1962 1963 case FW_PORT_ACTION_GET_PORT_INFO32: { 1964 u32 lstatus32; 1965 1966 lstatus32 = be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32); 1967 link_ok = (lstatus32 & FW_PORT_CMD_LSTATUS32_F) != 0; 1968 linkdnrc = FW_PORT_CMD_LINKDNRC32_G(lstatus32); 1969 port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32); 1970 mod_type = FW_PORT_CMD_MODTYPE32_G(lstatus32); 1971 pcaps = be32_to_cpu(cmd->u.info32.pcaps32); 1972 acaps = be32_to_cpu(cmd->u.info32.acaps32); 1973 lpacaps = be32_to_cpu(cmd->u.info32.lpacaps32); 1974 linkattr = be32_to_cpu(cmd->u.info32.linkattr32); 1975 break; 1976 } 1977 1978 default: 1979 dev_err(adapter->pdev_dev, "Handle Port Information: Bad Command/Action %#x\n", 1980 be32_to_cpu(cmd->action_to_len16)); 1981 return; 1982 } 1983 1984 fec = fwcap_to_cc_fec(acaps); 1985 adv_fc = fwcap_to_cc_pause(acaps); 1986 fc = fwcap_to_cc_pause(linkattr); 1987 speed = fwcap_to_speed(linkattr); 1988 1989 if (mod_type != pi->mod_type) { 1990 /* When a new Transceiver Module is inserted, the Firmware 1991 * will examine any Forward Error Correction parameters 1992 * present in the Transceiver Module i2c EPROM and determine 1993 * the supported and recommended FEC settings from those 1994 * based on IEEE 802.3 standards. We always record the 1995 * IEEE 802.3 recommended "automatic" settings. 1996 */ 1997 lc->auto_fec = fec; 1998 1999 /* Some versions of the early T6 Firmware "cheated" when 2000 * handling different Transceiver Modules by changing the 2001 * underlaying Port Type reported to the Host Drivers. As 2002 * such we need to capture whatever Port Type the Firmware 2003 * sends us and record it in case it's different from what we 2004 * were told earlier. Unfortunately, since Firmware is 2005 * forever, we'll need to keep this code here forever, but in 2006 * later T6 Firmware it should just be an assignment of the 2007 * same value already recorded. 2008 */ 2009 pi->port_type = port_type; 2010 2011 pi->mod_type = mod_type; 2012 t4vf_os_portmod_changed(adapter, pi->pidx); 2013 } 2014 2015 if (link_ok != lc->link_ok || speed != lc->speed || 2016 fc != lc->fc || adv_fc != lc->advertised_fc || 2017 fec != lc->fec) { 2018 /* something changed */ 2019 if (!link_ok && lc->link_ok) { 2020 lc->link_down_rc = linkdnrc; 2021 dev_warn_ratelimited(adapter->pdev_dev, 2022 "Port %d link down, reason: %s\n", 2023 pi->port_id, 2024 t4vf_link_down_rc_str(linkdnrc)); 2025 } 2026 lc->link_ok = link_ok; 2027 lc->speed = speed; 2028 lc->advertised_fc = adv_fc; 2029 lc->fc = fc; 2030 lc->fec = fec; 2031 2032 lc->pcaps = pcaps; 2033 lc->lpacaps = lpacaps; 2034 lc->acaps = acaps & ADVERT_MASK; 2035 2036 /* If we're not physically capable of Auto-Negotiation, note 2037 * this as Auto-Negotiation disabled. Otherwise, we track 2038 * what Auto-Negotiation settings we have. Note parallel 2039 * structure in init_link_config(). 2040 */ 2041 if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) { 2042 lc->autoneg = AUTONEG_DISABLE; 2043 } else if (lc->acaps & FW_PORT_CAP32_ANEG) { 2044 lc->autoneg = AUTONEG_ENABLE; 2045 } else { 2046 /* When Autoneg is disabled, user needs to set 2047 * single speed. 2048 * Similar to cxgb4_ethtool.c: set_link_ksettings 2049 */ 2050 lc->acaps = 0; 2051 lc->speed_caps = fwcap_to_speed(acaps); 2052 lc->autoneg = AUTONEG_DISABLE; 2053 } 2054 2055 t4vf_os_link_changed(adapter, pi->pidx, link_ok); 2056 } 2057 } 2058 2059 /** 2060 * t4vf_update_port_info - retrieve and update port information if changed 2061 * @pi: the port_info 2062 * 2063 * We issue a Get Port Information Command to the Firmware and, if 2064 * successful, we check to see if anything is different from what we 2065 * last recorded and update things accordingly. 2066 */ 2067 int t4vf_update_port_info(struct port_info *pi) 2068 { 2069 unsigned int fw_caps = pi->adapter->params.fw_caps_support; 2070 struct fw_port_cmd port_cmd; 2071 int ret; 2072 2073 memset(&port_cmd, 0, sizeof(port_cmd)); 2074 port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | 2075 FW_CMD_REQUEST_F | FW_CMD_READ_F | 2076 FW_PORT_CMD_PORTID_V(pi->port_id)); 2077 port_cmd.action_to_len16 = cpu_to_be32( 2078 FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16 2079 ? FW_PORT_ACTION_GET_PORT_INFO 2080 : FW_PORT_ACTION_GET_PORT_INFO32) | 2081 FW_LEN16(port_cmd)); 2082 ret = t4vf_wr_mbox(pi->adapter, &port_cmd, sizeof(port_cmd), 2083 &port_cmd); 2084 if (ret) 2085 return ret; 2086 t4vf_handle_get_port_info(pi, &port_cmd); 2087 return 0; 2088 } 2089 2090 /** 2091 * t4vf_handle_fw_rpl - process a firmware reply message 2092 * @adapter: the adapter 2093 * @rpl: start of the firmware message 2094 * 2095 * Processes a firmware message, such as link state change messages. 2096 */ 2097 int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) 2098 { 2099 const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl; 2100 u8 opcode = FW_CMD_OP_G(be32_to_cpu(cmd_hdr->hi)); 2101 2102 switch (opcode) { 2103 case FW_PORT_CMD: { 2104 /* 2105 * Link/module state change message. 2106 */ 2107 const struct fw_port_cmd *port_cmd = 2108 (const struct fw_port_cmd *)rpl; 2109 int action = FW_PORT_CMD_ACTION_G( 2110 be32_to_cpu(port_cmd->action_to_len16)); 2111 int port_id, pidx; 2112 2113 if (action != FW_PORT_ACTION_GET_PORT_INFO && 2114 action != FW_PORT_ACTION_GET_PORT_INFO32) { 2115 dev_err(adapter->pdev_dev, 2116 "Unknown firmware PORT reply action %x\n", 2117 action); 2118 break; 2119 } 2120 2121 port_id = FW_PORT_CMD_PORTID_G( 2122 be32_to_cpu(port_cmd->op_to_portid)); 2123 for_each_port(adapter, pidx) { 2124 struct port_info *pi = adap2pinfo(adapter, pidx); 2125 2126 if (pi->port_id != port_id) 2127 continue; 2128 t4vf_handle_get_port_info(pi, port_cmd); 2129 } 2130 break; 2131 } 2132 2133 default: 2134 dev_err(adapter->pdev_dev, "Unknown firmware reply %X\n", 2135 opcode); 2136 } 2137 return 0; 2138 } 2139 2140 int t4vf_prep_adapter(struct adapter *adapter) 2141 { 2142 int err; 2143 unsigned int chipid; 2144 2145 /* Wait for the device to become ready before proceeding ... 2146 */ 2147 err = t4vf_wait_dev_ready(adapter); 2148 if (err) 2149 return err; 2150 2151 /* Default port and clock for debugging in case we can't reach 2152 * firmware. 2153 */ 2154 adapter->params.nports = 1; 2155 adapter->params.vfres.pmask = 1; 2156 adapter->params.vpd.cclk = 50000; 2157 2158 adapter->params.chip = 0; 2159 switch (CHELSIO_PCI_ID_VER(adapter->pdev->device)) { 2160 case CHELSIO_T4: 2161 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, 0); 2162 adapter->params.arch.sge_fl_db = DBPRIO_F; 2163 adapter->params.arch.mps_tcam_size = 2164 NUM_MPS_CLS_SRAM_L_INSTANCES; 2165 break; 2166 2167 case CHELSIO_T5: 2168 chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A)); 2169 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid); 2170 adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F; 2171 adapter->params.arch.mps_tcam_size = 2172 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 2173 break; 2174 2175 case CHELSIO_T6: 2176 chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A)); 2177 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, chipid); 2178 adapter->params.arch.sge_fl_db = 0; 2179 adapter->params.arch.mps_tcam_size = 2180 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 2181 break; 2182 } 2183 2184 return 0; 2185 } 2186 2187 /** 2188 * t4vf_get_vf_mac_acl - Get the MAC address to be set to 2189 * the VI of this VF. 2190 * @adapter: The adapter 2191 * @port: The port associated with vf 2192 * @naddr: the number of ACL MAC addresses returned in addr 2193 * @addr: Placeholder for MAC addresses 2194 * 2195 * Find the MAC address to be set to the VF's VI. The requested MAC address 2196 * is from the host OS via callback in the PF driver. 2197 */ 2198 int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int port, 2199 unsigned int *naddr, u8 *addr) 2200 { 2201 struct fw_acl_mac_cmd cmd; 2202 int ret; 2203 2204 memset(&cmd, 0, sizeof(cmd)); 2205 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_MAC_CMD) | 2206 FW_CMD_REQUEST_F | 2207 FW_CMD_READ_F); 2208 cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd)); 2209 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &cmd); 2210 if (ret) 2211 return ret; 2212 2213 if (cmd.nmac < *naddr) 2214 *naddr = cmd.nmac; 2215 2216 switch (port) { 2217 case 3: 2218 memcpy(addr, cmd.macaddr3, sizeof(cmd.macaddr3)); 2219 break; 2220 case 2: 2221 memcpy(addr, cmd.macaddr2, sizeof(cmd.macaddr2)); 2222 break; 2223 case 1: 2224 memcpy(addr, cmd.macaddr1, sizeof(cmd.macaddr1)); 2225 break; 2226 case 0: 2227 memcpy(addr, cmd.macaddr0, sizeof(cmd.macaddr0)); 2228 break; 2229 } 2230 2231 return ret; 2232 } 2233 2234 /** 2235 * t4vf_get_vf_vlan_acl - Get the VLAN ID to be set to 2236 * the VI of this VF. 2237 * @adapter: The adapter 2238 * 2239 * Find the VLAN ID to be set to the VF's VI. The requested VLAN ID 2240 * is from the host OS via callback in the PF driver. 2241 */ 2242 int t4vf_get_vf_vlan_acl(struct adapter *adapter) 2243 { 2244 struct fw_acl_vlan_cmd cmd; 2245 int vlan = 0; 2246 int ret = 0; 2247 2248 cmd.op_to_vfn = htonl(FW_CMD_OP_V(FW_ACL_VLAN_CMD) | 2249 FW_CMD_REQUEST_F | FW_CMD_READ_F); 2250 2251 /* Note: Do not enable the ACL */ 2252 cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd)); 2253 2254 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &cmd); 2255 2256 if (!ret) 2257 vlan = be16_to_cpu(cmd.vlanid[0]); 2258 2259 return vlan; 2260 } 2261