1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2021 Intel Corporation. */ 3 4 #include <linux/avf/virtchnl.h> 5 #include <linux/delay.h> 6 #include <linux/etherdevice.h> 7 #include <linux/pci.h> 8 #include "i40e_adminq_cmd.h" 9 #include "i40e_devids.h" 10 #include "i40e_prototype.h" 11 #include "i40e_register.h" 12 13 /** 14 * i40e_set_mac_type - Sets MAC type 15 * @hw: pointer to the HW structure 16 * 17 * This function sets the mac type of the adapter based on the 18 * vendor ID and device ID stored in the hw structure. 19 **/ 20 int i40e_set_mac_type(struct i40e_hw *hw) 21 { 22 int status = 0; 23 24 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { 25 switch (hw->device_id) { 26 case I40E_DEV_ID_SFP_XL710: 27 case I40E_DEV_ID_QEMU: 28 case I40E_DEV_ID_KX_B: 29 case I40E_DEV_ID_KX_C: 30 case I40E_DEV_ID_QSFP_A: 31 case I40E_DEV_ID_QSFP_B: 32 case I40E_DEV_ID_QSFP_C: 33 case I40E_DEV_ID_1G_BASE_T_BC: 34 case I40E_DEV_ID_5G_BASE_T_BC: 35 case I40E_DEV_ID_10G_BASE_T: 36 case I40E_DEV_ID_10G_BASE_T4: 37 case I40E_DEV_ID_10G_BASE_T_BC: 38 case I40E_DEV_ID_10G_B: 39 case I40E_DEV_ID_10G_SFP: 40 case I40E_DEV_ID_20G_KR2: 41 case I40E_DEV_ID_20G_KR2_A: 42 case I40E_DEV_ID_25G_B: 43 case I40E_DEV_ID_25G_SFP28: 44 case I40E_DEV_ID_X710_N3000: 45 case I40E_DEV_ID_XXV710_N3000: 46 hw->mac.type = I40E_MAC_XL710; 47 break; 48 case I40E_DEV_ID_KX_X722: 49 case I40E_DEV_ID_QSFP_X722: 50 case I40E_DEV_ID_SFP_X722: 51 case I40E_DEV_ID_1G_BASE_T_X722: 52 case I40E_DEV_ID_10G_BASE_T_X722: 53 case I40E_DEV_ID_SFP_I_X722: 54 case I40E_DEV_ID_SFP_X722_A: 55 hw->mac.type = I40E_MAC_X722; 56 break; 57 default: 58 hw->mac.type = I40E_MAC_GENERIC; 59 break; 60 } 61 } else { 62 status = -ENODEV; 63 } 64 65 hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n", 66 hw->mac.type, status); 67 return status; 68 } 69 70 /** 71 * i40e_aq_str - convert AQ err code to a string 72 * @hw: pointer to the HW structure 73 * @aq_err: the AQ error code to convert 74 **/ 75 const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) 76 { 77 switch (aq_err) { 78 case I40E_AQ_RC_OK: 79 return "OK"; 80 case I40E_AQ_RC_EPERM: 81 return "I40E_AQ_RC_EPERM"; 82 case I40E_AQ_RC_ENOENT: 83 return "I40E_AQ_RC_ENOENT"; 84 case I40E_AQ_RC_ESRCH: 85 return "I40E_AQ_RC_ESRCH"; 86 case I40E_AQ_RC_EINTR: 87 return "I40E_AQ_RC_EINTR"; 88 case I40E_AQ_RC_EIO: 89 return "I40E_AQ_RC_EIO"; 90 case I40E_AQ_RC_ENXIO: 91 return "I40E_AQ_RC_ENXIO"; 92 case I40E_AQ_RC_E2BIG: 93 return "I40E_AQ_RC_E2BIG"; 94 case I40E_AQ_RC_EAGAIN: 95 return "I40E_AQ_RC_EAGAIN"; 96 case I40E_AQ_RC_ENOMEM: 97 return "I40E_AQ_RC_ENOMEM"; 98 case I40E_AQ_RC_EACCES: 99 return "I40E_AQ_RC_EACCES"; 100 case I40E_AQ_RC_EFAULT: 101 return "I40E_AQ_RC_EFAULT"; 102 case I40E_AQ_RC_EBUSY: 103 return "I40E_AQ_RC_EBUSY"; 104 case I40E_AQ_RC_EEXIST: 105 return "I40E_AQ_RC_EEXIST"; 106 case I40E_AQ_RC_EINVAL: 107 return "I40E_AQ_RC_EINVAL"; 108 case I40E_AQ_RC_ENOTTY: 109 return "I40E_AQ_RC_ENOTTY"; 110 case I40E_AQ_RC_ENOSPC: 111 return "I40E_AQ_RC_ENOSPC"; 112 case I40E_AQ_RC_ENOSYS: 113 return "I40E_AQ_RC_ENOSYS"; 114 case I40E_AQ_RC_ERANGE: 115 return "I40E_AQ_RC_ERANGE"; 116 case I40E_AQ_RC_EFLUSHED: 117 return "I40E_AQ_RC_EFLUSHED"; 118 case I40E_AQ_RC_BAD_ADDR: 119 return "I40E_AQ_RC_BAD_ADDR"; 120 case I40E_AQ_RC_EMODE: 121 return "I40E_AQ_RC_EMODE"; 122 case I40E_AQ_RC_EFBIG: 123 return "I40E_AQ_RC_EFBIG"; 124 } 125 126 snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); 127 return hw->err_str; 128 } 129 130 /** 131 * i40e_debug_aq 132 * @hw: debug mask related to admin queue 133 * @mask: debug mask 134 * @desc: pointer to admin queue descriptor 135 * @buffer: pointer to command buffer 136 * @buf_len: max length of buffer 137 * 138 * Dumps debug log about adminq command with descriptor contents. 139 **/ 140 void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, 141 void *buffer, u16 buf_len) 142 { 143 struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; 144 u32 effective_mask = hw->debug_mask & mask; 145 char prefix[27]; 146 u16 len; 147 u8 *buf = (u8 *)buffer; 148 149 if (!effective_mask || !desc) 150 return; 151 152 len = le16_to_cpu(aq_desc->datalen); 153 154 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 155 "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", 156 le16_to_cpu(aq_desc->opcode), 157 le16_to_cpu(aq_desc->flags), 158 le16_to_cpu(aq_desc->datalen), 159 le16_to_cpu(aq_desc->retval)); 160 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 161 "\tcookie (h,l) 0x%08X 0x%08X\n", 162 le32_to_cpu(aq_desc->cookie_high), 163 le32_to_cpu(aq_desc->cookie_low)); 164 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 165 "\tparam (0,1) 0x%08X 0x%08X\n", 166 le32_to_cpu(aq_desc->params.internal.param0), 167 le32_to_cpu(aq_desc->params.internal.param1)); 168 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 169 "\taddr (h,l) 0x%08X 0x%08X\n", 170 le32_to_cpu(aq_desc->params.external.addr_high), 171 le32_to_cpu(aq_desc->params.external.addr_low)); 172 173 if (buffer && buf_len != 0 && len != 0 && 174 (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) { 175 i40e_debug(hw, mask, "AQ CMD Buffer:\n"); 176 if (buf_len < len) 177 len = buf_len; 178 179 snprintf(prefix, sizeof(prefix), 180 "i40e %02x:%02x.%x: \t0x", 181 hw->bus.bus_id, 182 hw->bus.device, 183 hw->bus.func); 184 185 print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 186 16, 1, buf, len, false); 187 } 188 } 189 190 /** 191 * i40e_check_asq_alive 192 * @hw: pointer to the hw struct 193 * 194 * Returns true if Queue is enabled else false. 195 **/ 196 bool i40e_check_asq_alive(struct i40e_hw *hw) 197 { 198 if (hw->aq.asq.len) 199 return !!(rd32(hw, hw->aq.asq.len) & 200 I40E_PF_ATQLEN_ATQENABLE_MASK); 201 else 202 return false; 203 } 204 205 /** 206 * i40e_aq_queue_shutdown 207 * @hw: pointer to the hw struct 208 * @unloading: is the driver unloading itself 209 * 210 * Tell the Firmware that we're shutting down the AdminQ and whether 211 * or not the driver is unloading as well. 212 **/ 213 int i40e_aq_queue_shutdown(struct i40e_hw *hw, 214 bool unloading) 215 { 216 struct i40e_aq_desc desc; 217 struct i40e_aqc_queue_shutdown *cmd = 218 (struct i40e_aqc_queue_shutdown *)&desc.params.raw; 219 int status; 220 221 i40e_fill_default_direct_cmd_desc(&desc, 222 i40e_aqc_opc_queue_shutdown); 223 224 if (unloading) 225 cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING); 226 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 227 228 return status; 229 } 230 231 /** 232 * i40e_aq_get_set_rss_lut 233 * @hw: pointer to the hardware structure 234 * @vsi_id: vsi fw index 235 * @pf_lut: for PF table set true, for VSI table set false 236 * @lut: pointer to the lut buffer provided by the caller 237 * @lut_size: size of the lut buffer 238 * @set: set true to set the table, false to get the table 239 * 240 * Internal function to get or set RSS look up table 241 **/ 242 static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw, 243 u16 vsi_id, bool pf_lut, 244 u8 *lut, u16 lut_size, 245 bool set) 246 { 247 struct i40e_aq_desc desc; 248 struct i40e_aqc_get_set_rss_lut *cmd_resp = 249 (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; 250 int status; 251 252 if (set) 253 i40e_fill_default_direct_cmd_desc(&desc, 254 i40e_aqc_opc_set_rss_lut); 255 else 256 i40e_fill_default_direct_cmd_desc(&desc, 257 i40e_aqc_opc_get_rss_lut); 258 259 /* Indirect command */ 260 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 261 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 262 263 cmd_resp->vsi_id = 264 cpu_to_le16((u16)((vsi_id << 265 I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & 266 I40E_AQC_SET_RSS_LUT_VSI_ID_MASK)); 267 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); 268 269 if (pf_lut) 270 cmd_resp->flags |= cpu_to_le16((u16) 271 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF << 272 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & 273 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); 274 else 275 cmd_resp->flags |= cpu_to_le16((u16) 276 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << 277 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & 278 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); 279 280 status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL); 281 282 return status; 283 } 284 285 /** 286 * i40e_aq_get_rss_lut 287 * @hw: pointer to the hardware structure 288 * @vsi_id: vsi fw index 289 * @pf_lut: for PF table set true, for VSI table set false 290 * @lut: pointer to the lut buffer provided by the caller 291 * @lut_size: size of the lut buffer 292 * 293 * get the RSS lookup table, PF or VSI type 294 **/ 295 int i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, 296 bool pf_lut, u8 *lut, u16 lut_size) 297 { 298 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, 299 false); 300 } 301 302 /** 303 * i40e_aq_set_rss_lut 304 * @hw: pointer to the hardware structure 305 * @vsi_id: vsi fw index 306 * @pf_lut: for PF table set true, for VSI table set false 307 * @lut: pointer to the lut buffer provided by the caller 308 * @lut_size: size of the lut buffer 309 * 310 * set the RSS lookup table, PF or VSI type 311 **/ 312 int i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, 313 bool pf_lut, u8 *lut, u16 lut_size) 314 { 315 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); 316 } 317 318 /** 319 * i40e_aq_get_set_rss_key 320 * @hw: pointer to the hw struct 321 * @vsi_id: vsi fw index 322 * @key: pointer to key info struct 323 * @set: set true to set the key, false to get the key 324 * 325 * get the RSS key per VSI 326 **/ 327 static int i40e_aq_get_set_rss_key(struct i40e_hw *hw, 328 u16 vsi_id, 329 struct i40e_aqc_get_set_rss_key_data *key, 330 bool set) 331 { 332 struct i40e_aq_desc desc; 333 struct i40e_aqc_get_set_rss_key *cmd_resp = 334 (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; 335 u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); 336 int status; 337 338 if (set) 339 i40e_fill_default_direct_cmd_desc(&desc, 340 i40e_aqc_opc_set_rss_key); 341 else 342 i40e_fill_default_direct_cmd_desc(&desc, 343 i40e_aqc_opc_get_rss_key); 344 345 /* Indirect command */ 346 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 347 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 348 349 cmd_resp->vsi_id = 350 cpu_to_le16((u16)((vsi_id << 351 I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & 352 I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); 353 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); 354 355 status = i40e_asq_send_command(hw, &desc, key, key_size, NULL); 356 357 return status; 358 } 359 360 /** 361 * i40e_aq_get_rss_key 362 * @hw: pointer to the hw struct 363 * @vsi_id: vsi fw index 364 * @key: pointer to key info struct 365 * 366 **/ 367 int i40e_aq_get_rss_key(struct i40e_hw *hw, 368 u16 vsi_id, 369 struct i40e_aqc_get_set_rss_key_data *key) 370 { 371 return i40e_aq_get_set_rss_key(hw, vsi_id, key, false); 372 } 373 374 /** 375 * i40e_aq_set_rss_key 376 * @hw: pointer to the hw struct 377 * @vsi_id: vsi fw index 378 * @key: pointer to key info struct 379 * 380 * set the RSS key per VSI 381 **/ 382 int i40e_aq_set_rss_key(struct i40e_hw *hw, 383 u16 vsi_id, 384 struct i40e_aqc_get_set_rss_key_data *key) 385 { 386 return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); 387 } 388 389 /* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the 390 * hardware to a bit-field that can be used by SW to more easily determine the 391 * packet type. 392 * 393 * Macros are used to shorten the table lines and make this table human 394 * readable. 395 * 396 * We store the PTYPE in the top byte of the bit field - this is just so that 397 * we can check that the table doesn't have a row missing, as the index into 398 * the table should be the PTYPE. 399 * 400 * Typical work flow: 401 * 402 * IF NOT i40e_ptype_lookup[ptype].known 403 * THEN 404 * Packet is unknown 405 * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP 406 * Use the rest of the fields to look at the tunnels, inner protocols, etc 407 * ELSE 408 * Use the enum i40e_rx_l2_ptype to decode the packet type 409 * ENDIF 410 */ 411 412 /* macro to make the table lines short, use explicit indexing with [PTYPE] */ 413 #define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ 414 [PTYPE] = { \ 415 1, \ 416 I40E_RX_PTYPE_OUTER_##OUTER_IP, \ 417 I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ 418 I40E_RX_PTYPE_##OUTER_FRAG, \ 419 I40E_RX_PTYPE_TUNNEL_##T, \ 420 I40E_RX_PTYPE_TUNNEL_END_##TE, \ 421 I40E_RX_PTYPE_##TEF, \ 422 I40E_RX_PTYPE_INNER_PROT_##I, \ 423 I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } 424 425 #define I40E_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } 426 427 /* shorter macros makes the table fit but are terse */ 428 #define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG 429 #define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG 430 #define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC 431 432 /* Lookup table mapping in the 8-bit HW PTYPE to the bit field for decoding */ 433 struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = { 434 /* L2 Packet types */ 435 I40E_PTT_UNUSED_ENTRY(0), 436 I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 437 I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), 438 I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 439 I40E_PTT_UNUSED_ENTRY(4), 440 I40E_PTT_UNUSED_ENTRY(5), 441 I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 442 I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 443 I40E_PTT_UNUSED_ENTRY(8), 444 I40E_PTT_UNUSED_ENTRY(9), 445 I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 446 I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), 447 I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 448 I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 449 I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 450 I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 451 I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 452 I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 453 I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 454 I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 455 I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 456 I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 457 458 /* Non Tunneled IPv4 */ 459 I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), 460 I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), 461 I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), 462 I40E_PTT_UNUSED_ENTRY(25), 463 I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), 464 I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), 465 I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), 466 467 /* IPv4 --> IPv4 */ 468 I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), 469 I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), 470 I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), 471 I40E_PTT_UNUSED_ENTRY(32), 472 I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), 473 I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), 474 I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), 475 476 /* IPv4 --> IPv6 */ 477 I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), 478 I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), 479 I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), 480 I40E_PTT_UNUSED_ENTRY(39), 481 I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), 482 I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), 483 I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), 484 485 /* IPv4 --> GRE/NAT */ 486 I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), 487 488 /* IPv4 --> GRE/NAT --> IPv4 */ 489 I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), 490 I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), 491 I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), 492 I40E_PTT_UNUSED_ENTRY(47), 493 I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), 494 I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), 495 I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), 496 497 /* IPv4 --> GRE/NAT --> IPv6 */ 498 I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), 499 I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), 500 I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), 501 I40E_PTT_UNUSED_ENTRY(54), 502 I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), 503 I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), 504 I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), 505 506 /* IPv4 --> GRE/NAT --> MAC */ 507 I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), 508 509 /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ 510 I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), 511 I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), 512 I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), 513 I40E_PTT_UNUSED_ENTRY(62), 514 I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), 515 I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), 516 I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), 517 518 /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ 519 I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), 520 I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), 521 I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), 522 I40E_PTT_UNUSED_ENTRY(69), 523 I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), 524 I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), 525 I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), 526 527 /* IPv4 --> GRE/NAT --> MAC/VLAN */ 528 I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), 529 530 /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ 531 I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), 532 I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), 533 I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), 534 I40E_PTT_UNUSED_ENTRY(77), 535 I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), 536 I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), 537 I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), 538 539 /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ 540 I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), 541 I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), 542 I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), 543 I40E_PTT_UNUSED_ENTRY(84), 544 I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), 545 I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), 546 I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), 547 548 /* Non Tunneled IPv6 */ 549 I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), 550 I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), 551 I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), 552 I40E_PTT_UNUSED_ENTRY(91), 553 I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), 554 I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), 555 I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), 556 557 /* IPv6 --> IPv4 */ 558 I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), 559 I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), 560 I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), 561 I40E_PTT_UNUSED_ENTRY(98), 562 I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), 563 I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), 564 I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), 565 566 /* IPv6 --> IPv6 */ 567 I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), 568 I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), 569 I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), 570 I40E_PTT_UNUSED_ENTRY(105), 571 I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), 572 I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), 573 I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), 574 575 /* IPv6 --> GRE/NAT */ 576 I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), 577 578 /* IPv6 --> GRE/NAT -> IPv4 */ 579 I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), 580 I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), 581 I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), 582 I40E_PTT_UNUSED_ENTRY(113), 583 I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), 584 I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), 585 I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), 586 587 /* IPv6 --> GRE/NAT -> IPv6 */ 588 I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), 589 I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), 590 I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), 591 I40E_PTT_UNUSED_ENTRY(120), 592 I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), 593 I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), 594 I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), 595 596 /* IPv6 --> GRE/NAT -> MAC */ 597 I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), 598 599 /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ 600 I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), 601 I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), 602 I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), 603 I40E_PTT_UNUSED_ENTRY(128), 604 I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), 605 I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), 606 I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), 607 608 /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ 609 I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), 610 I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), 611 I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), 612 I40E_PTT_UNUSED_ENTRY(135), 613 I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), 614 I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), 615 I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), 616 617 /* IPv6 --> GRE/NAT -> MAC/VLAN */ 618 I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), 619 620 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ 621 I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), 622 I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), 623 I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), 624 I40E_PTT_UNUSED_ENTRY(143), 625 I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), 626 I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), 627 I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), 628 629 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ 630 I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), 631 I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), 632 I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), 633 I40E_PTT_UNUSED_ENTRY(150), 634 I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), 635 I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), 636 I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), 637 638 /* unused entries */ 639 [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } 640 }; 641 642 /** 643 * i40e_init_shared_code - Initialize the shared code 644 * @hw: pointer to hardware structure 645 * 646 * This assigns the MAC type and PHY code and inits the NVM. 647 * Does not touch the hardware. This function must be called prior to any 648 * other function in the shared code. The i40e_hw structure should be 649 * memset to 0 prior to calling this function. The following fields in 650 * hw structure should be filled in prior to calling this function: 651 * hw_addr, back, device_id, vendor_id, subsystem_device_id, 652 * subsystem_vendor_id, and revision_id 653 **/ 654 int i40e_init_shared_code(struct i40e_hw *hw) 655 { 656 u32 port, ari, func_rid; 657 int status = 0; 658 659 i40e_set_mac_type(hw); 660 661 switch (hw->mac.type) { 662 case I40E_MAC_XL710: 663 case I40E_MAC_X722: 664 break; 665 default: 666 return -ENODEV; 667 } 668 669 hw->phy.get_link_info = true; 670 671 /* Determine port number and PF number*/ 672 port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) 673 >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT; 674 hw->port = (u8)port; 675 ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >> 676 I40E_GLPCI_CAPSUP_ARI_EN_SHIFT; 677 func_rid = rd32(hw, I40E_PF_FUNC_RID); 678 if (ari) 679 hw->pf_id = (u8)(func_rid & 0xff); 680 else 681 hw->pf_id = (u8)(func_rid & 0x7); 682 683 status = i40e_init_nvm(hw); 684 return status; 685 } 686 687 /** 688 * i40e_aq_mac_address_read - Retrieve the MAC addresses 689 * @hw: pointer to the hw struct 690 * @flags: a return indicator of what addresses were added to the addr store 691 * @addrs: the requestor's mac addr store 692 * @cmd_details: pointer to command details structure or NULL 693 **/ 694 static int 695 i40e_aq_mac_address_read(struct i40e_hw *hw, 696 u16 *flags, 697 struct i40e_aqc_mac_address_read_data *addrs, 698 struct i40e_asq_cmd_details *cmd_details) 699 { 700 struct i40e_aq_desc desc; 701 struct i40e_aqc_mac_address_read *cmd_data = 702 (struct i40e_aqc_mac_address_read *)&desc.params.raw; 703 int status; 704 705 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read); 706 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF); 707 708 status = i40e_asq_send_command(hw, &desc, addrs, 709 sizeof(*addrs), cmd_details); 710 *flags = le16_to_cpu(cmd_data->command_flags); 711 712 return status; 713 } 714 715 /** 716 * i40e_aq_mac_address_write - Change the MAC addresses 717 * @hw: pointer to the hw struct 718 * @flags: indicates which MAC to be written 719 * @mac_addr: address to write 720 * @cmd_details: pointer to command details structure or NULL 721 **/ 722 int i40e_aq_mac_address_write(struct i40e_hw *hw, 723 u16 flags, u8 *mac_addr, 724 struct i40e_asq_cmd_details *cmd_details) 725 { 726 struct i40e_aq_desc desc; 727 struct i40e_aqc_mac_address_write *cmd_data = 728 (struct i40e_aqc_mac_address_write *)&desc.params.raw; 729 int status; 730 731 i40e_fill_default_direct_cmd_desc(&desc, 732 i40e_aqc_opc_mac_address_write); 733 cmd_data->command_flags = cpu_to_le16(flags); 734 cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]); 735 cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) | 736 ((u32)mac_addr[3] << 16) | 737 ((u32)mac_addr[4] << 8) | 738 mac_addr[5]); 739 740 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 741 742 return status; 743 } 744 745 /** 746 * i40e_get_mac_addr - get MAC address 747 * @hw: pointer to the HW structure 748 * @mac_addr: pointer to MAC address 749 * 750 * Reads the adapter's MAC address from register 751 **/ 752 int i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr) 753 { 754 struct i40e_aqc_mac_address_read_data addrs; 755 u16 flags = 0; 756 int status; 757 758 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); 759 760 if (flags & I40E_AQC_LAN_ADDR_VALID) 761 ether_addr_copy(mac_addr, addrs.pf_lan_mac); 762 763 return status; 764 } 765 766 /** 767 * i40e_get_port_mac_addr - get Port MAC address 768 * @hw: pointer to the HW structure 769 * @mac_addr: pointer to Port MAC address 770 * 771 * Reads the adapter's Port MAC address 772 **/ 773 int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr) 774 { 775 struct i40e_aqc_mac_address_read_data addrs; 776 u16 flags = 0; 777 int status; 778 779 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); 780 if (status) 781 return status; 782 783 if (flags & I40E_AQC_PORT_ADDR_VALID) 784 ether_addr_copy(mac_addr, addrs.port_mac); 785 else 786 status = -EINVAL; 787 788 return status; 789 } 790 791 /** 792 * i40e_pre_tx_queue_cfg - pre tx queue configure 793 * @hw: pointer to the HW structure 794 * @queue: target PF queue index 795 * @enable: state change request 796 * 797 * Handles hw requirement to indicate intention to enable 798 * or disable target queue. 799 **/ 800 void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable) 801 { 802 u32 abs_queue_idx = hw->func_caps.base_queue + queue; 803 u32 reg_block = 0; 804 u32 reg_val; 805 806 if (abs_queue_idx >= 128) { 807 reg_block = abs_queue_idx / 128; 808 abs_queue_idx %= 128; 809 } 810 811 reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); 812 reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 813 reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 814 815 if (enable) 816 reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK; 817 else 818 reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 819 820 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val); 821 } 822 823 /** 824 * i40e_read_pba_string - Reads part number string from EEPROM 825 * @hw: pointer to hardware structure 826 * @pba_num: stores the part number string from the EEPROM 827 * @pba_num_size: part number string buffer length 828 * 829 * Reads the part number string from the EEPROM. 830 **/ 831 int i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, 832 u32 pba_num_size) 833 { 834 u16 pba_word = 0; 835 u16 pba_size = 0; 836 u16 pba_ptr = 0; 837 int status = 0; 838 u16 i = 0; 839 840 status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word); 841 if (status || (pba_word != 0xFAFA)) { 842 hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n"); 843 return status; 844 } 845 846 status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr); 847 if (status) { 848 hw_dbg(hw, "Failed to read PBA Block pointer.\n"); 849 return status; 850 } 851 852 status = i40e_read_nvm_word(hw, pba_ptr, &pba_size); 853 if (status) { 854 hw_dbg(hw, "Failed to read PBA Block size.\n"); 855 return status; 856 } 857 858 /* Subtract one to get PBA word count (PBA Size word is included in 859 * total size) 860 */ 861 pba_size--; 862 if (pba_num_size < (((u32)pba_size * 2) + 1)) { 863 hw_dbg(hw, "Buffer too small for PBA data.\n"); 864 return -EINVAL; 865 } 866 867 for (i = 0; i < pba_size; i++) { 868 status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word); 869 if (status) { 870 hw_dbg(hw, "Failed to read PBA Block word %d.\n", i); 871 return status; 872 } 873 874 pba_num[(i * 2)] = (pba_word >> 8) & 0xFF; 875 pba_num[(i * 2) + 1] = pba_word & 0xFF; 876 } 877 pba_num[(pba_size * 2)] = '\0'; 878 879 return status; 880 } 881 882 /** 883 * i40e_get_media_type - Gets media type 884 * @hw: pointer to the hardware structure 885 **/ 886 static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) 887 { 888 enum i40e_media_type media; 889 890 switch (hw->phy.link_info.phy_type) { 891 case I40E_PHY_TYPE_10GBASE_SR: 892 case I40E_PHY_TYPE_10GBASE_LR: 893 case I40E_PHY_TYPE_1000BASE_SX: 894 case I40E_PHY_TYPE_1000BASE_LX: 895 case I40E_PHY_TYPE_40GBASE_SR4: 896 case I40E_PHY_TYPE_40GBASE_LR4: 897 case I40E_PHY_TYPE_25GBASE_LR: 898 case I40E_PHY_TYPE_25GBASE_SR: 899 media = I40E_MEDIA_TYPE_FIBER; 900 break; 901 case I40E_PHY_TYPE_100BASE_TX: 902 case I40E_PHY_TYPE_1000BASE_T: 903 case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS: 904 case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS: 905 case I40E_PHY_TYPE_10GBASE_T: 906 media = I40E_MEDIA_TYPE_BASET; 907 break; 908 case I40E_PHY_TYPE_10GBASE_CR1_CU: 909 case I40E_PHY_TYPE_40GBASE_CR4_CU: 910 case I40E_PHY_TYPE_10GBASE_CR1: 911 case I40E_PHY_TYPE_40GBASE_CR4: 912 case I40E_PHY_TYPE_10GBASE_SFPP_CU: 913 case I40E_PHY_TYPE_40GBASE_AOC: 914 case I40E_PHY_TYPE_10GBASE_AOC: 915 case I40E_PHY_TYPE_25GBASE_CR: 916 case I40E_PHY_TYPE_25GBASE_AOC: 917 case I40E_PHY_TYPE_25GBASE_ACC: 918 media = I40E_MEDIA_TYPE_DA; 919 break; 920 case I40E_PHY_TYPE_1000BASE_KX: 921 case I40E_PHY_TYPE_10GBASE_KX4: 922 case I40E_PHY_TYPE_10GBASE_KR: 923 case I40E_PHY_TYPE_40GBASE_KR4: 924 case I40E_PHY_TYPE_20GBASE_KR2: 925 case I40E_PHY_TYPE_25GBASE_KR: 926 media = I40E_MEDIA_TYPE_BACKPLANE; 927 break; 928 case I40E_PHY_TYPE_SGMII: 929 case I40E_PHY_TYPE_XAUI: 930 case I40E_PHY_TYPE_XFI: 931 case I40E_PHY_TYPE_XLAUI: 932 case I40E_PHY_TYPE_XLPPI: 933 default: 934 media = I40E_MEDIA_TYPE_UNKNOWN; 935 break; 936 } 937 938 return media; 939 } 940 941 /** 942 * i40e_poll_globr - Poll for Global Reset completion 943 * @hw: pointer to the hardware structure 944 * @retry_limit: how many times to retry before failure 945 **/ 946 static int i40e_poll_globr(struct i40e_hw *hw, 947 u32 retry_limit) 948 { 949 u32 cnt, reg = 0; 950 951 for (cnt = 0; cnt < retry_limit; cnt++) { 952 reg = rd32(hw, I40E_GLGEN_RSTAT); 953 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 954 return 0; 955 msleep(100); 956 } 957 958 hw_dbg(hw, "Global reset failed.\n"); 959 hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg); 960 961 return -EIO; 962 } 963 964 #define I40E_PF_RESET_WAIT_COUNT_A0 200 965 #define I40E_PF_RESET_WAIT_COUNT 200 966 /** 967 * i40e_pf_reset - Reset the PF 968 * @hw: pointer to the hardware structure 969 * 970 * Assuming someone else has triggered a global reset, 971 * assure the global reset is complete and then reset the PF 972 **/ 973 int i40e_pf_reset(struct i40e_hw *hw) 974 { 975 u32 cnt = 0; 976 u32 cnt1 = 0; 977 u32 reg = 0; 978 u32 grst_del; 979 980 /* Poll for Global Reset steady state in case of recent GRST. 981 * The grst delay value is in 100ms units, and we'll wait a 982 * couple counts longer to be sure we don't just miss the end. 983 */ 984 grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) & 985 I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> 986 I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; 987 988 /* It can take upto 15 secs for GRST steady state. 989 * Bump it to 16 secs max to be safe. 990 */ 991 grst_del = grst_del * 20; 992 993 for (cnt = 0; cnt < grst_del; cnt++) { 994 reg = rd32(hw, I40E_GLGEN_RSTAT); 995 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 996 break; 997 msleep(100); 998 } 999 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 1000 hw_dbg(hw, "Global reset polling failed to complete.\n"); 1001 return -EIO; 1002 } 1003 1004 /* Now Wait for the FW to be ready */ 1005 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) { 1006 reg = rd32(hw, I40E_GLNVM_ULD); 1007 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1008 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK); 1009 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1010 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) { 1011 hw_dbg(hw, "Core and Global modules ready %d\n", cnt1); 1012 break; 1013 } 1014 usleep_range(10000, 20000); 1015 } 1016 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1017 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) { 1018 hw_dbg(hw, "wait for FW Reset complete timedout\n"); 1019 hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg); 1020 return -EIO; 1021 } 1022 1023 /* If there was a Global Reset in progress when we got here, 1024 * we don't need to do the PF Reset 1025 */ 1026 if (!cnt) { 1027 u32 reg2 = 0; 1028 if (hw->revision_id == 0) 1029 cnt = I40E_PF_RESET_WAIT_COUNT_A0; 1030 else 1031 cnt = I40E_PF_RESET_WAIT_COUNT; 1032 reg = rd32(hw, I40E_PFGEN_CTRL); 1033 wr32(hw, I40E_PFGEN_CTRL, 1034 (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); 1035 for (; cnt; cnt--) { 1036 reg = rd32(hw, I40E_PFGEN_CTRL); 1037 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) 1038 break; 1039 reg2 = rd32(hw, I40E_GLGEN_RSTAT); 1040 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) 1041 break; 1042 usleep_range(1000, 2000); 1043 } 1044 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 1045 if (i40e_poll_globr(hw, grst_del)) 1046 return -EIO; 1047 } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { 1048 hw_dbg(hw, "PF reset polling failed to complete.\n"); 1049 return -EIO; 1050 } 1051 } 1052 1053 i40e_clear_pxe_mode(hw); 1054 1055 return 0; 1056 } 1057 1058 /** 1059 * i40e_clear_hw - clear out any left over hw state 1060 * @hw: pointer to the hw struct 1061 * 1062 * Clear queues and interrupts, typically called at init time, 1063 * but after the capabilities have been found so we know how many 1064 * queues and msix vectors have been allocated. 1065 **/ 1066 void i40e_clear_hw(struct i40e_hw *hw) 1067 { 1068 u32 num_queues, base_queue; 1069 u32 num_pf_int; 1070 u32 num_vf_int; 1071 u32 num_vfs; 1072 u32 i, j; 1073 u32 val; 1074 u32 eol = 0x7ff; 1075 1076 /* get number of interrupts, queues, and VFs */ 1077 val = rd32(hw, I40E_GLPCI_CNF2); 1078 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> 1079 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; 1080 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >> 1081 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT; 1082 1083 val = rd32(hw, I40E_PFLAN_QALLOC); 1084 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >> 1085 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; 1086 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> 1087 I40E_PFLAN_QALLOC_LASTQ_SHIFT; 1088 if (val & I40E_PFLAN_QALLOC_VALID_MASK) 1089 num_queues = (j - base_queue) + 1; 1090 else 1091 num_queues = 0; 1092 1093 val = rd32(hw, I40E_PF_VT_PFALLOC); 1094 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >> 1095 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; 1096 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >> 1097 I40E_PF_VT_PFALLOC_LASTVF_SHIFT; 1098 if (val & I40E_PF_VT_PFALLOC_VALID_MASK) 1099 num_vfs = (j - i) + 1; 1100 else 1101 num_vfs = 0; 1102 1103 /* stop all the interrupts */ 1104 wr32(hw, I40E_PFINT_ICR0_ENA, 0); 1105 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 1106 for (i = 0; i < num_pf_int - 2; i++) 1107 wr32(hw, I40E_PFINT_DYN_CTLN(i), val); 1108 1109 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */ 1110 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 1111 wr32(hw, I40E_PFINT_LNKLST0, val); 1112 for (i = 0; i < num_pf_int - 2; i++) 1113 wr32(hw, I40E_PFINT_LNKLSTN(i), val); 1114 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT; 1115 for (i = 0; i < num_vfs; i++) 1116 wr32(hw, I40E_VPINT_LNKLST0(i), val); 1117 for (i = 0; i < num_vf_int - 2; i++) 1118 wr32(hw, I40E_VPINT_LNKLSTN(i), val); 1119 1120 /* warn the HW of the coming Tx disables */ 1121 for (i = 0; i < num_queues; i++) { 1122 u32 abs_queue_idx = base_queue + i; 1123 u32 reg_block = 0; 1124 1125 if (abs_queue_idx >= 128) { 1126 reg_block = abs_queue_idx / 128; 1127 abs_queue_idx %= 128; 1128 } 1129 1130 val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); 1131 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 1132 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 1133 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 1134 1135 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val); 1136 } 1137 udelay(400); 1138 1139 /* stop all the queues */ 1140 for (i = 0; i < num_queues; i++) { 1141 wr32(hw, I40E_QINT_TQCTL(i), 0); 1142 wr32(hw, I40E_QTX_ENA(i), 0); 1143 wr32(hw, I40E_QINT_RQCTL(i), 0); 1144 wr32(hw, I40E_QRX_ENA(i), 0); 1145 } 1146 1147 /* short wait for all queue disables to settle */ 1148 udelay(50); 1149 } 1150 1151 /** 1152 * i40e_clear_pxe_mode - clear pxe operations mode 1153 * @hw: pointer to the hw struct 1154 * 1155 * Make sure all PXE mode settings are cleared, including things 1156 * like descriptor fetch/write-back mode. 1157 **/ 1158 void i40e_clear_pxe_mode(struct i40e_hw *hw) 1159 { 1160 u32 reg; 1161 1162 if (i40e_check_asq_alive(hw)) 1163 i40e_aq_clear_pxe_mode(hw, NULL); 1164 1165 /* Clear single descriptor fetch/write-back mode */ 1166 reg = rd32(hw, I40E_GLLAN_RCTL_0); 1167 1168 if (hw->revision_id == 0) { 1169 /* As a work around clear PXE_MODE instead of setting it */ 1170 wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK))); 1171 } else { 1172 wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK)); 1173 } 1174 } 1175 1176 /** 1177 * i40e_led_is_mine - helper to find matching led 1178 * @hw: pointer to the hw struct 1179 * @idx: index into GPIO registers 1180 * 1181 * returns: 0 if no match, otherwise the value of the GPIO_CTL register 1182 */ 1183 static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) 1184 { 1185 u32 gpio_val = 0; 1186 u32 port; 1187 1188 if (!I40E_IS_X710TL_DEVICE(hw->device_id) && 1189 !hw->func_caps.led[idx]) 1190 return 0; 1191 gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx)); 1192 port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >> 1193 I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT; 1194 1195 /* if PRT_NUM_NA is 1 then this LED is not port specific, OR 1196 * if it is not our port then ignore 1197 */ 1198 if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) || 1199 (port != hw->port)) 1200 return 0; 1201 1202 return gpio_val; 1203 } 1204 1205 #define I40E_FW_LED BIT(4) 1206 #define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \ 1207 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) 1208 1209 #define I40E_LED0 22 1210 1211 #define I40E_PIN_FUNC_SDP 0x0 1212 #define I40E_PIN_FUNC_LED 0x1 1213 1214 /** 1215 * i40e_led_get - return current on/off mode 1216 * @hw: pointer to the hw struct 1217 * 1218 * The value returned is the 'mode' field as defined in the 1219 * GPIO register definitions: 0x0 = off, 0xf = on, and other 1220 * values are variations of possible behaviors relating to 1221 * blink, link, and wire. 1222 **/ 1223 u32 i40e_led_get(struct i40e_hw *hw) 1224 { 1225 u32 mode = 0; 1226 int i; 1227 1228 /* as per the documentation GPIO 22-29 are the LED 1229 * GPIO pins named LED0..LED7 1230 */ 1231 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { 1232 u32 gpio_val = i40e_led_is_mine(hw, i); 1233 1234 if (!gpio_val) 1235 continue; 1236 1237 mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> 1238 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT; 1239 break; 1240 } 1241 1242 return mode; 1243 } 1244 1245 /** 1246 * i40e_led_set - set new on/off mode 1247 * @hw: pointer to the hw struct 1248 * @mode: 0=off, 0xf=on (else see manual for mode details) 1249 * @blink: true if the LED should blink when on, false if steady 1250 * 1251 * if this function is used to turn on the blink it should 1252 * be used to disable the blink when restoring the original state. 1253 **/ 1254 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) 1255 { 1256 int i; 1257 1258 if (mode & ~I40E_LED_MODE_VALID) { 1259 hw_dbg(hw, "invalid mode passed in %X\n", mode); 1260 return; 1261 } 1262 1263 /* as per the documentation GPIO 22-29 are the LED 1264 * GPIO pins named LED0..LED7 1265 */ 1266 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { 1267 u32 gpio_val = i40e_led_is_mine(hw, i); 1268 1269 if (!gpio_val) 1270 continue; 1271 1272 if (I40E_IS_X710TL_DEVICE(hw->device_id)) { 1273 u32 pin_func = 0; 1274 1275 if (mode & I40E_FW_LED) 1276 pin_func = I40E_PIN_FUNC_SDP; 1277 else 1278 pin_func = I40E_PIN_FUNC_LED; 1279 1280 gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK; 1281 gpio_val |= ((pin_func << 1282 I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) & 1283 I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK); 1284 } 1285 gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK; 1286 /* this & is a bit of paranoia, but serves as a range check */ 1287 gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & 1288 I40E_GLGEN_GPIO_CTL_LED_MODE_MASK); 1289 1290 if (blink) 1291 gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); 1292 else 1293 gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); 1294 1295 wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val); 1296 break; 1297 } 1298 } 1299 1300 /* Admin command wrappers */ 1301 1302 /** 1303 * i40e_aq_get_phy_capabilities 1304 * @hw: pointer to the hw struct 1305 * @abilities: structure for PHY capabilities to be filled 1306 * @qualified_modules: report Qualified Modules 1307 * @report_init: report init capabilities (active are default) 1308 * @cmd_details: pointer to command details structure or NULL 1309 * 1310 * Returns the various PHY abilities supported on the Port. 1311 **/ 1312 int 1313 i40e_aq_get_phy_capabilities(struct i40e_hw *hw, 1314 bool qualified_modules, bool report_init, 1315 struct i40e_aq_get_phy_abilities_resp *abilities, 1316 struct i40e_asq_cmd_details *cmd_details) 1317 { 1318 u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp); 1319 u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0; 1320 struct i40e_aq_desc desc; 1321 int status; 1322 1323 if (!abilities) 1324 return -EINVAL; 1325 1326 do { 1327 i40e_fill_default_direct_cmd_desc(&desc, 1328 i40e_aqc_opc_get_phy_abilities); 1329 1330 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 1331 if (abilities_size > I40E_AQ_LARGE_BUF) 1332 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 1333 1334 if (qualified_modules) 1335 desc.params.external.param0 |= 1336 cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES); 1337 1338 if (report_init) 1339 desc.params.external.param0 |= 1340 cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES); 1341 1342 status = i40e_asq_send_command(hw, &desc, abilities, 1343 abilities_size, cmd_details); 1344 1345 switch (hw->aq.asq_last_status) { 1346 case I40E_AQ_RC_EIO: 1347 status = -EIO; 1348 break; 1349 case I40E_AQ_RC_EAGAIN: 1350 usleep_range(1000, 2000); 1351 total_delay++; 1352 status = -EIO; 1353 break; 1354 /* also covers I40E_AQ_RC_OK */ 1355 default: 1356 break; 1357 } 1358 1359 } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) && 1360 (total_delay < max_delay)); 1361 1362 if (status) 1363 return status; 1364 1365 if (report_init) { 1366 if (hw->mac.type == I40E_MAC_XL710 && 1367 hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 1368 hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) { 1369 status = i40e_aq_get_link_info(hw, true, NULL, NULL); 1370 } else { 1371 hw->phy.phy_types = le32_to_cpu(abilities->phy_type); 1372 hw->phy.phy_types |= 1373 ((u64)abilities->phy_type_ext << 32); 1374 } 1375 } 1376 1377 return status; 1378 } 1379 1380 /** 1381 * i40e_aq_set_phy_config 1382 * @hw: pointer to the hw struct 1383 * @config: structure with PHY configuration to be set 1384 * @cmd_details: pointer to command details structure or NULL 1385 * 1386 * Set the various PHY configuration parameters 1387 * supported on the Port.One or more of the Set PHY config parameters may be 1388 * ignored in an MFP mode as the PF may not have the privilege to set some 1389 * of the PHY Config parameters. This status will be indicated by the 1390 * command response. 1391 **/ 1392 int i40e_aq_set_phy_config(struct i40e_hw *hw, 1393 struct i40e_aq_set_phy_config *config, 1394 struct i40e_asq_cmd_details *cmd_details) 1395 { 1396 struct i40e_aq_desc desc; 1397 struct i40e_aq_set_phy_config *cmd = 1398 (struct i40e_aq_set_phy_config *)&desc.params.raw; 1399 int status; 1400 1401 if (!config) 1402 return -EINVAL; 1403 1404 i40e_fill_default_direct_cmd_desc(&desc, 1405 i40e_aqc_opc_set_phy_config); 1406 1407 *cmd = *config; 1408 1409 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1410 1411 return status; 1412 } 1413 1414 static noinline_for_stack int 1415 i40e_set_fc_status(struct i40e_hw *hw, 1416 struct i40e_aq_get_phy_abilities_resp *abilities, 1417 bool atomic_restart) 1418 { 1419 struct i40e_aq_set_phy_config config; 1420 enum i40e_fc_mode fc_mode = hw->fc.requested_mode; 1421 u8 pause_mask = 0x0; 1422 1423 switch (fc_mode) { 1424 case I40E_FC_FULL: 1425 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; 1426 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; 1427 break; 1428 case I40E_FC_RX_PAUSE: 1429 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; 1430 break; 1431 case I40E_FC_TX_PAUSE: 1432 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; 1433 break; 1434 default: 1435 break; 1436 } 1437 1438 memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); 1439 /* clear the old pause settings */ 1440 config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & 1441 ~(I40E_AQ_PHY_FLAG_PAUSE_RX); 1442 /* set the new abilities */ 1443 config.abilities |= pause_mask; 1444 /* If the abilities have changed, then set the new config */ 1445 if (config.abilities == abilities->abilities) 1446 return 0; 1447 1448 /* Auto restart link so settings take effect */ 1449 if (atomic_restart) 1450 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 1451 /* Copy over all the old settings */ 1452 config.phy_type = abilities->phy_type; 1453 config.phy_type_ext = abilities->phy_type_ext; 1454 config.link_speed = abilities->link_speed; 1455 config.eee_capability = abilities->eee_capability; 1456 config.eeer = abilities->eeer_val; 1457 config.low_power_ctrl = abilities->d3_lpan; 1458 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & 1459 I40E_AQ_PHY_FEC_CONFIG_MASK; 1460 1461 return i40e_aq_set_phy_config(hw, &config, NULL); 1462 } 1463 1464 /** 1465 * i40e_set_fc 1466 * @hw: pointer to the hw struct 1467 * @aq_failures: buffer to return AdminQ failure information 1468 * @atomic_restart: whether to enable atomic link restart 1469 * 1470 * Set the requested flow control mode using set_phy_config. 1471 **/ 1472 int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, 1473 bool atomic_restart) 1474 { 1475 struct i40e_aq_get_phy_abilities_resp abilities; 1476 int status; 1477 1478 *aq_failures = 0x0; 1479 1480 /* Get the current phy config */ 1481 status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, 1482 NULL); 1483 if (status) { 1484 *aq_failures |= I40E_SET_FC_AQ_FAIL_GET; 1485 return status; 1486 } 1487 1488 status = i40e_set_fc_status(hw, &abilities, atomic_restart); 1489 if (status) 1490 *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; 1491 1492 /* Update the link info */ 1493 status = i40e_update_link_info(hw); 1494 if (status) { 1495 /* Wait a little bit (on 40G cards it sometimes takes a really 1496 * long time for link to come back from the atomic reset) 1497 * and try once more 1498 */ 1499 msleep(1000); 1500 status = i40e_update_link_info(hw); 1501 } 1502 if (status) 1503 *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE; 1504 1505 return status; 1506 } 1507 1508 /** 1509 * i40e_aq_clear_pxe_mode 1510 * @hw: pointer to the hw struct 1511 * @cmd_details: pointer to command details structure or NULL 1512 * 1513 * Tell the firmware that the driver is taking over from PXE 1514 **/ 1515 int i40e_aq_clear_pxe_mode(struct i40e_hw *hw, 1516 struct i40e_asq_cmd_details *cmd_details) 1517 { 1518 struct i40e_aq_desc desc; 1519 struct i40e_aqc_clear_pxe *cmd = 1520 (struct i40e_aqc_clear_pxe *)&desc.params.raw; 1521 int status; 1522 1523 i40e_fill_default_direct_cmd_desc(&desc, 1524 i40e_aqc_opc_clear_pxe_mode); 1525 1526 cmd->rx_cnt = 0x2; 1527 1528 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1529 1530 wr32(hw, I40E_GLLAN_RCTL_0, 0x1); 1531 1532 return status; 1533 } 1534 1535 /** 1536 * i40e_aq_set_link_restart_an 1537 * @hw: pointer to the hw struct 1538 * @enable_link: if true: enable link, if false: disable link 1539 * @cmd_details: pointer to command details structure or NULL 1540 * 1541 * Sets up the link and restarts the Auto-Negotiation over the link. 1542 **/ 1543 int i40e_aq_set_link_restart_an(struct i40e_hw *hw, 1544 bool enable_link, 1545 struct i40e_asq_cmd_details *cmd_details) 1546 { 1547 struct i40e_aq_desc desc; 1548 struct i40e_aqc_set_link_restart_an *cmd = 1549 (struct i40e_aqc_set_link_restart_an *)&desc.params.raw; 1550 int status; 1551 1552 i40e_fill_default_direct_cmd_desc(&desc, 1553 i40e_aqc_opc_set_link_restart_an); 1554 1555 cmd->command = I40E_AQ_PHY_RESTART_AN; 1556 if (enable_link) 1557 cmd->command |= I40E_AQ_PHY_LINK_ENABLE; 1558 else 1559 cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE; 1560 1561 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1562 1563 return status; 1564 } 1565 1566 /** 1567 * i40e_aq_get_link_info 1568 * @hw: pointer to the hw struct 1569 * @enable_lse: enable/disable LinkStatusEvent reporting 1570 * @link: pointer to link status structure - optional 1571 * @cmd_details: pointer to command details structure or NULL 1572 * 1573 * Returns the link status of the adapter. 1574 **/ 1575 int i40e_aq_get_link_info(struct i40e_hw *hw, 1576 bool enable_lse, struct i40e_link_status *link, 1577 struct i40e_asq_cmd_details *cmd_details) 1578 { 1579 struct i40e_aq_desc desc; 1580 struct i40e_aqc_get_link_status *resp = 1581 (struct i40e_aqc_get_link_status *)&desc.params.raw; 1582 struct i40e_link_status *hw_link_info = &hw->phy.link_info; 1583 bool tx_pause, rx_pause; 1584 u16 command_flags; 1585 int status; 1586 1587 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); 1588 1589 if (enable_lse) 1590 command_flags = I40E_AQ_LSE_ENABLE; 1591 else 1592 command_flags = I40E_AQ_LSE_DISABLE; 1593 resp->command_flags = cpu_to_le16(command_flags); 1594 1595 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1596 1597 if (status) 1598 goto aq_get_link_info_exit; 1599 1600 /* save off old link status information */ 1601 hw->phy.link_info_old = *hw_link_info; 1602 1603 /* update link status */ 1604 hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type; 1605 hw->phy.media_type = i40e_get_media_type(hw); 1606 hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed; 1607 hw_link_info->link_info = resp->link_info; 1608 hw_link_info->an_info = resp->an_info; 1609 hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA | 1610 I40E_AQ_CONFIG_FEC_RS_ENA); 1611 hw_link_info->ext_info = resp->ext_info; 1612 hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK; 1613 hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size); 1614 hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK; 1615 1616 /* update fc info */ 1617 tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX); 1618 rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX); 1619 if (tx_pause & rx_pause) 1620 hw->fc.current_mode = I40E_FC_FULL; 1621 else if (tx_pause) 1622 hw->fc.current_mode = I40E_FC_TX_PAUSE; 1623 else if (rx_pause) 1624 hw->fc.current_mode = I40E_FC_RX_PAUSE; 1625 else 1626 hw->fc.current_mode = I40E_FC_NONE; 1627 1628 if (resp->config & I40E_AQ_CONFIG_CRC_ENA) 1629 hw_link_info->crc_enable = true; 1630 else 1631 hw_link_info->crc_enable = false; 1632 1633 if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED)) 1634 hw_link_info->lse_enable = true; 1635 else 1636 hw_link_info->lse_enable = false; 1637 1638 if ((hw->mac.type == I40E_MAC_XL710) && 1639 (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 && 1640 hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE) 1641 hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; 1642 1643 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE && 1644 hw->mac.type != I40E_MAC_X722) { 1645 __le32 tmp; 1646 1647 memcpy(&tmp, resp->link_type, sizeof(tmp)); 1648 hw->phy.phy_types = le32_to_cpu(tmp); 1649 hw->phy.phy_types |= ((u64)resp->link_type_ext << 32); 1650 } 1651 1652 /* save link status information */ 1653 if (link) 1654 *link = *hw_link_info; 1655 1656 /* flag cleared so helper functions don't call AQ again */ 1657 hw->phy.get_link_info = false; 1658 1659 aq_get_link_info_exit: 1660 return status; 1661 } 1662 1663 /** 1664 * i40e_aq_set_phy_int_mask 1665 * @hw: pointer to the hw struct 1666 * @mask: interrupt mask to be set 1667 * @cmd_details: pointer to command details structure or NULL 1668 * 1669 * Set link interrupt mask. 1670 **/ 1671 int i40e_aq_set_phy_int_mask(struct i40e_hw *hw, 1672 u16 mask, 1673 struct i40e_asq_cmd_details *cmd_details) 1674 { 1675 struct i40e_aq_desc desc; 1676 struct i40e_aqc_set_phy_int_mask *cmd = 1677 (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw; 1678 int status; 1679 1680 i40e_fill_default_direct_cmd_desc(&desc, 1681 i40e_aqc_opc_set_phy_int_mask); 1682 1683 cmd->event_mask = cpu_to_le16(mask); 1684 1685 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1686 1687 return status; 1688 } 1689 1690 /** 1691 * i40e_aq_set_mac_loopback 1692 * @hw: pointer to the HW struct 1693 * @ena_lpbk: Enable or Disable loopback 1694 * @cmd_details: pointer to command details structure or NULL 1695 * 1696 * Enable/disable loopback on a given port 1697 */ 1698 int i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk, 1699 struct i40e_asq_cmd_details *cmd_details) 1700 { 1701 struct i40e_aq_desc desc; 1702 struct i40e_aqc_set_lb_mode *cmd = 1703 (struct i40e_aqc_set_lb_mode *)&desc.params.raw; 1704 1705 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_lb_modes); 1706 if (ena_lpbk) { 1707 if (hw->nvm.version <= I40E_LEGACY_LOOPBACK_NVM_VER) 1708 cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL_LEGACY); 1709 else 1710 cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL); 1711 } 1712 1713 return i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1714 } 1715 1716 /** 1717 * i40e_aq_set_phy_debug 1718 * @hw: pointer to the hw struct 1719 * @cmd_flags: debug command flags 1720 * @cmd_details: pointer to command details structure or NULL 1721 * 1722 * Reset the external PHY. 1723 **/ 1724 int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, 1725 struct i40e_asq_cmd_details *cmd_details) 1726 { 1727 struct i40e_aq_desc desc; 1728 struct i40e_aqc_set_phy_debug *cmd = 1729 (struct i40e_aqc_set_phy_debug *)&desc.params.raw; 1730 int status; 1731 1732 i40e_fill_default_direct_cmd_desc(&desc, 1733 i40e_aqc_opc_set_phy_debug); 1734 1735 cmd->command_flags = cmd_flags; 1736 1737 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1738 1739 return status; 1740 } 1741 1742 /** 1743 * i40e_is_aq_api_ver_ge 1744 * @aq: pointer to AdminQ info containing HW API version to compare 1745 * @maj: API major value 1746 * @min: API minor value 1747 * 1748 * Assert whether current HW API version is greater/equal than provided. 1749 **/ 1750 static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj, 1751 u16 min) 1752 { 1753 return (aq->api_maj_ver > maj || 1754 (aq->api_maj_ver == maj && aq->api_min_ver >= min)); 1755 } 1756 1757 /** 1758 * i40e_aq_add_vsi 1759 * @hw: pointer to the hw struct 1760 * @vsi_ctx: pointer to a vsi context struct 1761 * @cmd_details: pointer to command details structure or NULL 1762 * 1763 * Add a VSI context to the hardware. 1764 **/ 1765 int i40e_aq_add_vsi(struct i40e_hw *hw, 1766 struct i40e_vsi_context *vsi_ctx, 1767 struct i40e_asq_cmd_details *cmd_details) 1768 { 1769 struct i40e_aq_desc desc; 1770 struct i40e_aqc_add_get_update_vsi *cmd = 1771 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 1772 struct i40e_aqc_add_get_update_vsi_completion *resp = 1773 (struct i40e_aqc_add_get_update_vsi_completion *) 1774 &desc.params.raw; 1775 int status; 1776 1777 i40e_fill_default_direct_cmd_desc(&desc, 1778 i40e_aqc_opc_add_vsi); 1779 1780 cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid); 1781 cmd->connection_type = vsi_ctx->connection_type; 1782 cmd->vf_id = vsi_ctx->vf_num; 1783 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); 1784 1785 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 1786 1787 status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info, 1788 sizeof(vsi_ctx->info), 1789 cmd_details, true); 1790 1791 if (status) 1792 goto aq_add_vsi_exit; 1793 1794 vsi_ctx->seid = le16_to_cpu(resp->seid); 1795 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); 1796 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 1797 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 1798 1799 aq_add_vsi_exit: 1800 return status; 1801 } 1802 1803 /** 1804 * i40e_aq_set_default_vsi 1805 * @hw: pointer to the hw struct 1806 * @seid: vsi number 1807 * @cmd_details: pointer to command details structure or NULL 1808 **/ 1809 int i40e_aq_set_default_vsi(struct i40e_hw *hw, 1810 u16 seid, 1811 struct i40e_asq_cmd_details *cmd_details) 1812 { 1813 struct i40e_aq_desc desc; 1814 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1815 (struct i40e_aqc_set_vsi_promiscuous_modes *) 1816 &desc.params.raw; 1817 int status; 1818 1819 i40e_fill_default_direct_cmd_desc(&desc, 1820 i40e_aqc_opc_set_vsi_promiscuous_modes); 1821 1822 cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 1823 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 1824 cmd->seid = cpu_to_le16(seid); 1825 1826 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1827 1828 return status; 1829 } 1830 1831 /** 1832 * i40e_aq_clear_default_vsi 1833 * @hw: pointer to the hw struct 1834 * @seid: vsi number 1835 * @cmd_details: pointer to command details structure or NULL 1836 **/ 1837 int i40e_aq_clear_default_vsi(struct i40e_hw *hw, 1838 u16 seid, 1839 struct i40e_asq_cmd_details *cmd_details) 1840 { 1841 struct i40e_aq_desc desc; 1842 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1843 (struct i40e_aqc_set_vsi_promiscuous_modes *) 1844 &desc.params.raw; 1845 int status; 1846 1847 i40e_fill_default_direct_cmd_desc(&desc, 1848 i40e_aqc_opc_set_vsi_promiscuous_modes); 1849 1850 cmd->promiscuous_flags = cpu_to_le16(0); 1851 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 1852 cmd->seid = cpu_to_le16(seid); 1853 1854 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1855 1856 return status; 1857 } 1858 1859 /** 1860 * i40e_aq_set_vsi_unicast_promiscuous 1861 * @hw: pointer to the hw struct 1862 * @seid: vsi number 1863 * @set: set unicast promiscuous enable/disable 1864 * @cmd_details: pointer to command details structure or NULL 1865 * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc 1866 **/ 1867 int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, 1868 u16 seid, bool set, 1869 struct i40e_asq_cmd_details *cmd_details, 1870 bool rx_only_promisc) 1871 { 1872 struct i40e_aq_desc desc; 1873 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1874 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 1875 u16 flags = 0; 1876 int status; 1877 1878 i40e_fill_default_direct_cmd_desc(&desc, 1879 i40e_aqc_opc_set_vsi_promiscuous_modes); 1880 1881 if (set) { 1882 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; 1883 if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) 1884 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; 1885 } 1886 1887 cmd->promiscuous_flags = cpu_to_le16(flags); 1888 1889 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); 1890 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) 1891 cmd->valid_flags |= 1892 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); 1893 1894 cmd->seid = cpu_to_le16(seid); 1895 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1896 1897 return status; 1898 } 1899 1900 /** 1901 * i40e_aq_set_vsi_multicast_promiscuous 1902 * @hw: pointer to the hw struct 1903 * @seid: vsi number 1904 * @set: set multicast promiscuous enable/disable 1905 * @cmd_details: pointer to command details structure or NULL 1906 **/ 1907 int i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, 1908 u16 seid, bool set, 1909 struct i40e_asq_cmd_details *cmd_details) 1910 { 1911 struct i40e_aq_desc desc; 1912 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1913 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 1914 u16 flags = 0; 1915 int status; 1916 1917 i40e_fill_default_direct_cmd_desc(&desc, 1918 i40e_aqc_opc_set_vsi_promiscuous_modes); 1919 1920 if (set) 1921 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; 1922 1923 cmd->promiscuous_flags = cpu_to_le16(flags); 1924 1925 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); 1926 1927 cmd->seid = cpu_to_le16(seid); 1928 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1929 1930 return status; 1931 } 1932 1933 /** 1934 * i40e_aq_set_vsi_mc_promisc_on_vlan 1935 * @hw: pointer to the hw struct 1936 * @seid: vsi number 1937 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 1938 * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag 1939 * @cmd_details: pointer to command details structure or NULL 1940 **/ 1941 int i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, 1942 u16 seid, bool enable, 1943 u16 vid, 1944 struct i40e_asq_cmd_details *cmd_details) 1945 { 1946 struct i40e_aq_desc desc; 1947 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1948 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 1949 u16 flags = 0; 1950 int status; 1951 1952 i40e_fill_default_direct_cmd_desc(&desc, 1953 i40e_aqc_opc_set_vsi_promiscuous_modes); 1954 1955 if (enable) 1956 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; 1957 1958 cmd->promiscuous_flags = cpu_to_le16(flags); 1959 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); 1960 cmd->seid = cpu_to_le16(seid); 1961 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 1962 1963 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0, 1964 cmd_details, true); 1965 1966 return status; 1967 } 1968 1969 /** 1970 * i40e_aq_set_vsi_uc_promisc_on_vlan 1971 * @hw: pointer to the hw struct 1972 * @seid: vsi number 1973 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 1974 * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag 1975 * @cmd_details: pointer to command details structure or NULL 1976 **/ 1977 int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, 1978 u16 seid, bool enable, 1979 u16 vid, 1980 struct i40e_asq_cmd_details *cmd_details) 1981 { 1982 struct i40e_aq_desc desc; 1983 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1984 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 1985 u16 flags = 0; 1986 int status; 1987 1988 i40e_fill_default_direct_cmd_desc(&desc, 1989 i40e_aqc_opc_set_vsi_promiscuous_modes); 1990 1991 if (enable) { 1992 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; 1993 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) 1994 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; 1995 } 1996 1997 cmd->promiscuous_flags = cpu_to_le16(flags); 1998 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); 1999 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) 2000 cmd->valid_flags |= 2001 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); 2002 cmd->seid = cpu_to_le16(seid); 2003 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2004 2005 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0, 2006 cmd_details, true); 2007 2008 return status; 2009 } 2010 2011 /** 2012 * i40e_aq_set_vsi_bc_promisc_on_vlan 2013 * @hw: pointer to the hw struct 2014 * @seid: vsi number 2015 * @enable: set broadcast promiscuous enable/disable for a given VLAN 2016 * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag 2017 * @cmd_details: pointer to command details structure or NULL 2018 **/ 2019 int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, 2020 u16 seid, bool enable, u16 vid, 2021 struct i40e_asq_cmd_details *cmd_details) 2022 { 2023 struct i40e_aq_desc desc; 2024 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2025 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2026 u16 flags = 0; 2027 int status; 2028 2029 i40e_fill_default_direct_cmd_desc(&desc, 2030 i40e_aqc_opc_set_vsi_promiscuous_modes); 2031 2032 if (enable) 2033 flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST; 2034 2035 cmd->promiscuous_flags = cpu_to_le16(flags); 2036 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2037 cmd->seid = cpu_to_le16(seid); 2038 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2039 2040 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2041 2042 return status; 2043 } 2044 2045 /** 2046 * i40e_aq_set_vsi_broadcast 2047 * @hw: pointer to the hw struct 2048 * @seid: vsi number 2049 * @set_filter: true to set filter, false to clear filter 2050 * @cmd_details: pointer to command details structure or NULL 2051 * 2052 * Set or clear the broadcast promiscuous flag (filter) for a given VSI. 2053 **/ 2054 int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, 2055 u16 seid, bool set_filter, 2056 struct i40e_asq_cmd_details *cmd_details) 2057 { 2058 struct i40e_aq_desc desc; 2059 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2060 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2061 int status; 2062 2063 i40e_fill_default_direct_cmd_desc(&desc, 2064 i40e_aqc_opc_set_vsi_promiscuous_modes); 2065 2066 if (set_filter) 2067 cmd->promiscuous_flags 2068 |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2069 else 2070 cmd->promiscuous_flags 2071 &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2072 2073 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2074 cmd->seid = cpu_to_le16(seid); 2075 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2076 2077 return status; 2078 } 2079 2080 /** 2081 * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting 2082 * @hw: pointer to the hw struct 2083 * @seid: vsi number 2084 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2085 * @cmd_details: pointer to command details structure or NULL 2086 **/ 2087 int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, 2088 u16 seid, bool enable, 2089 struct i40e_asq_cmd_details *cmd_details) 2090 { 2091 struct i40e_aq_desc desc; 2092 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2093 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2094 u16 flags = 0; 2095 int status; 2096 2097 i40e_fill_default_direct_cmd_desc(&desc, 2098 i40e_aqc_opc_set_vsi_promiscuous_modes); 2099 if (enable) 2100 flags |= I40E_AQC_SET_VSI_PROMISC_VLAN; 2101 2102 cmd->promiscuous_flags = cpu_to_le16(flags); 2103 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN); 2104 cmd->seid = cpu_to_le16(seid); 2105 2106 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2107 2108 return status; 2109 } 2110 2111 /** 2112 * i40e_aq_get_vsi_params - get VSI configuration info 2113 * @hw: pointer to the hw struct 2114 * @vsi_ctx: pointer to a vsi context struct 2115 * @cmd_details: pointer to command details structure or NULL 2116 **/ 2117 int i40e_aq_get_vsi_params(struct i40e_hw *hw, 2118 struct i40e_vsi_context *vsi_ctx, 2119 struct i40e_asq_cmd_details *cmd_details) 2120 { 2121 struct i40e_aq_desc desc; 2122 struct i40e_aqc_add_get_update_vsi *cmd = 2123 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 2124 struct i40e_aqc_add_get_update_vsi_completion *resp = 2125 (struct i40e_aqc_add_get_update_vsi_completion *) 2126 &desc.params.raw; 2127 int status; 2128 2129 i40e_fill_default_direct_cmd_desc(&desc, 2130 i40e_aqc_opc_get_vsi_parameters); 2131 2132 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 2133 2134 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 2135 2136 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 2137 sizeof(vsi_ctx->info), NULL); 2138 2139 if (status) 2140 goto aq_get_vsi_params_exit; 2141 2142 vsi_ctx->seid = le16_to_cpu(resp->seid); 2143 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); 2144 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 2145 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 2146 2147 aq_get_vsi_params_exit: 2148 return status; 2149 } 2150 2151 /** 2152 * i40e_aq_update_vsi_params 2153 * @hw: pointer to the hw struct 2154 * @vsi_ctx: pointer to a vsi context struct 2155 * @cmd_details: pointer to command details structure or NULL 2156 * 2157 * Update a VSI context. 2158 **/ 2159 int i40e_aq_update_vsi_params(struct i40e_hw *hw, 2160 struct i40e_vsi_context *vsi_ctx, 2161 struct i40e_asq_cmd_details *cmd_details) 2162 { 2163 struct i40e_aq_desc desc; 2164 struct i40e_aqc_add_get_update_vsi *cmd = 2165 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 2166 struct i40e_aqc_add_get_update_vsi_completion *resp = 2167 (struct i40e_aqc_add_get_update_vsi_completion *) 2168 &desc.params.raw; 2169 int status; 2170 2171 i40e_fill_default_direct_cmd_desc(&desc, 2172 i40e_aqc_opc_update_vsi_parameters); 2173 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 2174 2175 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2176 2177 status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info, 2178 sizeof(vsi_ctx->info), 2179 cmd_details, true); 2180 2181 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 2182 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 2183 2184 return status; 2185 } 2186 2187 /** 2188 * i40e_aq_get_switch_config 2189 * @hw: pointer to the hardware structure 2190 * @buf: pointer to the result buffer 2191 * @buf_size: length of input buffer 2192 * @start_seid: seid to start for the report, 0 == beginning 2193 * @cmd_details: pointer to command details structure or NULL 2194 * 2195 * Fill the buf with switch configuration returned from AdminQ command 2196 **/ 2197 int i40e_aq_get_switch_config(struct i40e_hw *hw, 2198 struct i40e_aqc_get_switch_config_resp *buf, 2199 u16 buf_size, u16 *start_seid, 2200 struct i40e_asq_cmd_details *cmd_details) 2201 { 2202 struct i40e_aq_desc desc; 2203 struct i40e_aqc_switch_seid *scfg = 2204 (struct i40e_aqc_switch_seid *)&desc.params.raw; 2205 int status; 2206 2207 i40e_fill_default_direct_cmd_desc(&desc, 2208 i40e_aqc_opc_get_switch_config); 2209 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 2210 if (buf_size > I40E_AQ_LARGE_BUF) 2211 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2212 scfg->seid = cpu_to_le16(*start_seid); 2213 2214 status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details); 2215 *start_seid = le16_to_cpu(scfg->seid); 2216 2217 return status; 2218 } 2219 2220 /** 2221 * i40e_aq_set_switch_config 2222 * @hw: pointer to the hardware structure 2223 * @flags: bit flag values to set 2224 * @mode: cloud filter mode 2225 * @valid_flags: which bit flags to set 2226 * @mode: cloud filter mode 2227 * @cmd_details: pointer to command details structure or NULL 2228 * 2229 * Set switch configuration bits 2230 **/ 2231 int i40e_aq_set_switch_config(struct i40e_hw *hw, 2232 u16 flags, 2233 u16 valid_flags, u8 mode, 2234 struct i40e_asq_cmd_details *cmd_details) 2235 { 2236 struct i40e_aq_desc desc; 2237 struct i40e_aqc_set_switch_config *scfg = 2238 (struct i40e_aqc_set_switch_config *)&desc.params.raw; 2239 int status; 2240 2241 i40e_fill_default_direct_cmd_desc(&desc, 2242 i40e_aqc_opc_set_switch_config); 2243 scfg->flags = cpu_to_le16(flags); 2244 scfg->valid_flags = cpu_to_le16(valid_flags); 2245 scfg->mode = mode; 2246 if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) { 2247 scfg->switch_tag = cpu_to_le16(hw->switch_tag); 2248 scfg->first_tag = cpu_to_le16(hw->first_tag); 2249 scfg->second_tag = cpu_to_le16(hw->second_tag); 2250 } 2251 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2252 2253 return status; 2254 } 2255 2256 /** 2257 * i40e_aq_get_firmware_version 2258 * @hw: pointer to the hw struct 2259 * @fw_major_version: firmware major version 2260 * @fw_minor_version: firmware minor version 2261 * @fw_build: firmware build number 2262 * @api_major_version: major queue version 2263 * @api_minor_version: minor queue version 2264 * @cmd_details: pointer to command details structure or NULL 2265 * 2266 * Get the firmware version from the admin queue commands 2267 **/ 2268 int i40e_aq_get_firmware_version(struct i40e_hw *hw, 2269 u16 *fw_major_version, u16 *fw_minor_version, 2270 u32 *fw_build, 2271 u16 *api_major_version, u16 *api_minor_version, 2272 struct i40e_asq_cmd_details *cmd_details) 2273 { 2274 struct i40e_aq_desc desc; 2275 struct i40e_aqc_get_version *resp = 2276 (struct i40e_aqc_get_version *)&desc.params.raw; 2277 int status; 2278 2279 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version); 2280 2281 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2282 2283 if (!status) { 2284 if (fw_major_version) 2285 *fw_major_version = le16_to_cpu(resp->fw_major); 2286 if (fw_minor_version) 2287 *fw_minor_version = le16_to_cpu(resp->fw_minor); 2288 if (fw_build) 2289 *fw_build = le32_to_cpu(resp->fw_build); 2290 if (api_major_version) 2291 *api_major_version = le16_to_cpu(resp->api_major); 2292 if (api_minor_version) 2293 *api_minor_version = le16_to_cpu(resp->api_minor); 2294 } 2295 2296 return status; 2297 } 2298 2299 /** 2300 * i40e_aq_send_driver_version 2301 * @hw: pointer to the hw struct 2302 * @dv: driver's major, minor version 2303 * @cmd_details: pointer to command details structure or NULL 2304 * 2305 * Send the driver version to the firmware 2306 **/ 2307 int i40e_aq_send_driver_version(struct i40e_hw *hw, 2308 struct i40e_driver_version *dv, 2309 struct i40e_asq_cmd_details *cmd_details) 2310 { 2311 struct i40e_aq_desc desc; 2312 struct i40e_aqc_driver_version *cmd = 2313 (struct i40e_aqc_driver_version *)&desc.params.raw; 2314 int status; 2315 u16 len; 2316 2317 if (dv == NULL) 2318 return -EINVAL; 2319 2320 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version); 2321 2322 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); 2323 cmd->driver_major_ver = dv->major_version; 2324 cmd->driver_minor_ver = dv->minor_version; 2325 cmd->driver_build_ver = dv->build_version; 2326 cmd->driver_subbuild_ver = dv->subbuild_version; 2327 2328 len = 0; 2329 while (len < sizeof(dv->driver_string) && 2330 (dv->driver_string[len] < 0x80) && 2331 dv->driver_string[len]) 2332 len++; 2333 status = i40e_asq_send_command(hw, &desc, dv->driver_string, 2334 len, cmd_details); 2335 2336 return status; 2337 } 2338 2339 /** 2340 * i40e_get_link_status - get status of the HW network link 2341 * @hw: pointer to the hw struct 2342 * @link_up: pointer to bool (true/false = linkup/linkdown) 2343 * 2344 * Variable link_up true if link is up, false if link is down. 2345 * The variable link_up is invalid if returned value of status != 0 2346 * 2347 * Side effect: LinkStatusEvent reporting becomes enabled 2348 **/ 2349 int i40e_get_link_status(struct i40e_hw *hw, bool *link_up) 2350 { 2351 int status = 0; 2352 2353 if (hw->phy.get_link_info) { 2354 status = i40e_update_link_info(hw); 2355 2356 if (status) 2357 i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n", 2358 status); 2359 } 2360 2361 *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; 2362 2363 return status; 2364 } 2365 2366 /** 2367 * i40e_update_link_info - update status of the HW network link 2368 * @hw: pointer to the hw struct 2369 **/ 2370 noinline_for_stack int i40e_update_link_info(struct i40e_hw *hw) 2371 { 2372 struct i40e_aq_get_phy_abilities_resp abilities; 2373 int status = 0; 2374 2375 status = i40e_aq_get_link_info(hw, true, NULL, NULL); 2376 if (status) 2377 return status; 2378 2379 /* extra checking needed to ensure link info to user is timely */ 2380 if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) && 2381 ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) || 2382 !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) { 2383 status = i40e_aq_get_phy_capabilities(hw, false, false, 2384 &abilities, NULL); 2385 if (status) 2386 return status; 2387 2388 if (abilities.fec_cfg_curr_mod_ext_info & 2389 I40E_AQ_ENABLE_FEC_AUTO) 2390 hw->phy.link_info.req_fec_info = 2391 (I40E_AQ_REQUEST_FEC_KR | 2392 I40E_AQ_REQUEST_FEC_RS); 2393 else 2394 hw->phy.link_info.req_fec_info = 2395 abilities.fec_cfg_curr_mod_ext_info & 2396 (I40E_AQ_REQUEST_FEC_KR | 2397 I40E_AQ_REQUEST_FEC_RS); 2398 2399 memcpy(hw->phy.link_info.module_type, &abilities.module_type, 2400 sizeof(hw->phy.link_info.module_type)); 2401 } 2402 2403 return status; 2404 } 2405 2406 /** 2407 * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC 2408 * @hw: pointer to the hw struct 2409 * @uplink_seid: the MAC or other gizmo SEID 2410 * @downlink_seid: the VSI SEID 2411 * @enabled_tc: bitmap of TCs to be enabled 2412 * @default_port: true for default port VSI, false for control port 2413 * @veb_seid: pointer to where to put the resulting VEB SEID 2414 * @enable_stats: true to turn on VEB stats 2415 * @cmd_details: pointer to command details structure or NULL 2416 * 2417 * This asks the FW to add a VEB between the uplink and downlink 2418 * elements. If the uplink SEID is 0, this will be a floating VEB. 2419 **/ 2420 int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, 2421 u16 downlink_seid, u8 enabled_tc, 2422 bool default_port, u16 *veb_seid, 2423 bool enable_stats, 2424 struct i40e_asq_cmd_details *cmd_details) 2425 { 2426 struct i40e_aq_desc desc; 2427 struct i40e_aqc_add_veb *cmd = 2428 (struct i40e_aqc_add_veb *)&desc.params.raw; 2429 struct i40e_aqc_add_veb_completion *resp = 2430 (struct i40e_aqc_add_veb_completion *)&desc.params.raw; 2431 u16 veb_flags = 0; 2432 int status; 2433 2434 /* SEIDs need to either both be set or both be 0 for floating VEB */ 2435 if (!!uplink_seid != !!downlink_seid) 2436 return -EINVAL; 2437 2438 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb); 2439 2440 cmd->uplink_seid = cpu_to_le16(uplink_seid); 2441 cmd->downlink_seid = cpu_to_le16(downlink_seid); 2442 cmd->enable_tcs = enabled_tc; 2443 if (!uplink_seid) 2444 veb_flags |= I40E_AQC_ADD_VEB_FLOATING; 2445 if (default_port) 2446 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT; 2447 else 2448 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA; 2449 2450 /* reverse logic here: set the bitflag to disable the stats */ 2451 if (!enable_stats) 2452 veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS; 2453 2454 cmd->veb_flags = cpu_to_le16(veb_flags); 2455 2456 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2457 2458 if (!status && veb_seid) 2459 *veb_seid = le16_to_cpu(resp->veb_seid); 2460 2461 return status; 2462 } 2463 2464 /** 2465 * i40e_aq_get_veb_parameters - Retrieve VEB parameters 2466 * @hw: pointer to the hw struct 2467 * @veb_seid: the SEID of the VEB to query 2468 * @switch_id: the uplink switch id 2469 * @floating: set to true if the VEB is floating 2470 * @statistic_index: index of the stats counter block for this VEB 2471 * @vebs_used: number of VEB's used by function 2472 * @vebs_free: total VEB's not reserved by any function 2473 * @cmd_details: pointer to command details structure or NULL 2474 * 2475 * This retrieves the parameters for a particular VEB, specified by 2476 * uplink_seid, and returns them to the caller. 2477 **/ 2478 int i40e_aq_get_veb_parameters(struct i40e_hw *hw, 2479 u16 veb_seid, u16 *switch_id, 2480 bool *floating, u16 *statistic_index, 2481 u16 *vebs_used, u16 *vebs_free, 2482 struct i40e_asq_cmd_details *cmd_details) 2483 { 2484 struct i40e_aq_desc desc; 2485 struct i40e_aqc_get_veb_parameters_completion *cmd_resp = 2486 (struct i40e_aqc_get_veb_parameters_completion *) 2487 &desc.params.raw; 2488 int status; 2489 2490 if (veb_seid == 0) 2491 return -EINVAL; 2492 2493 i40e_fill_default_direct_cmd_desc(&desc, 2494 i40e_aqc_opc_get_veb_parameters); 2495 cmd_resp->seid = cpu_to_le16(veb_seid); 2496 2497 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2498 if (status) 2499 goto get_veb_exit; 2500 2501 if (switch_id) 2502 *switch_id = le16_to_cpu(cmd_resp->switch_id); 2503 if (statistic_index) 2504 *statistic_index = le16_to_cpu(cmd_resp->statistic_index); 2505 if (vebs_used) 2506 *vebs_used = le16_to_cpu(cmd_resp->vebs_used); 2507 if (vebs_free) 2508 *vebs_free = le16_to_cpu(cmd_resp->vebs_free); 2509 if (floating) { 2510 u16 flags = le16_to_cpu(cmd_resp->veb_flags); 2511 2512 if (flags & I40E_AQC_ADD_VEB_FLOATING) 2513 *floating = true; 2514 else 2515 *floating = false; 2516 } 2517 2518 get_veb_exit: 2519 return status; 2520 } 2521 2522 /** 2523 * i40e_prepare_add_macvlan 2524 * @mv_list: list of macvlans to be added 2525 * @desc: pointer to AQ descriptor structure 2526 * @count: length of the list 2527 * @seid: VSI for the mac address 2528 * 2529 * Internal helper function that prepares the add macvlan request 2530 * and returns the buffer size. 2531 **/ 2532 static u16 2533 i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list, 2534 struct i40e_aq_desc *desc, u16 count, u16 seid) 2535 { 2536 struct i40e_aqc_macvlan *cmd = 2537 (struct i40e_aqc_macvlan *)&desc->params.raw; 2538 u16 buf_size; 2539 int i; 2540 2541 buf_size = count * sizeof(*mv_list); 2542 2543 /* prep the rest of the request */ 2544 i40e_fill_default_direct_cmd_desc(desc, i40e_aqc_opc_add_macvlan); 2545 cmd->num_addresses = cpu_to_le16(count); 2546 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2547 cmd->seid[1] = 0; 2548 cmd->seid[2] = 0; 2549 2550 for (i = 0; i < count; i++) 2551 if (is_multicast_ether_addr(mv_list[i].mac_addr)) 2552 mv_list[i].flags |= 2553 cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC); 2554 2555 desc->flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2556 if (buf_size > I40E_AQ_LARGE_BUF) 2557 desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2558 2559 return buf_size; 2560 } 2561 2562 /** 2563 * i40e_aq_add_macvlan 2564 * @hw: pointer to the hw struct 2565 * @seid: VSI for the mac address 2566 * @mv_list: list of macvlans to be added 2567 * @count: length of the list 2568 * @cmd_details: pointer to command details structure or NULL 2569 * 2570 * Add MAC/VLAN addresses to the HW filtering 2571 **/ 2572 int 2573 i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, 2574 struct i40e_aqc_add_macvlan_element_data *mv_list, 2575 u16 count, struct i40e_asq_cmd_details *cmd_details) 2576 { 2577 struct i40e_aq_desc desc; 2578 u16 buf_size; 2579 2580 if (count == 0 || !mv_list || !hw) 2581 return -EINVAL; 2582 2583 buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid); 2584 2585 return i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size, 2586 cmd_details, true); 2587 } 2588 2589 /** 2590 * i40e_aq_add_macvlan_v2 2591 * @hw: pointer to the hw struct 2592 * @seid: VSI for the mac address 2593 * @mv_list: list of macvlans to be added 2594 * @count: length of the list 2595 * @cmd_details: pointer to command details structure or NULL 2596 * @aq_status: pointer to Admin Queue status return value 2597 * 2598 * Add MAC/VLAN addresses to the HW filtering. 2599 * The _v2 version returns the last Admin Queue status in aq_status 2600 * to avoid race conditions in access to hw->aq.asq_last_status. 2601 * It also calls _v2 versions of asq_send_command functions to 2602 * get the aq_status on the stack. 2603 **/ 2604 int 2605 i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid, 2606 struct i40e_aqc_add_macvlan_element_data *mv_list, 2607 u16 count, struct i40e_asq_cmd_details *cmd_details, 2608 enum i40e_admin_queue_err *aq_status) 2609 { 2610 struct i40e_aq_desc desc; 2611 u16 buf_size; 2612 2613 if (count == 0 || !mv_list || !hw) 2614 return -EINVAL; 2615 2616 buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid); 2617 2618 return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size, 2619 cmd_details, true, aq_status); 2620 } 2621 2622 /** 2623 * i40e_aq_remove_macvlan 2624 * @hw: pointer to the hw struct 2625 * @seid: VSI for the mac address 2626 * @mv_list: list of macvlans to be removed 2627 * @count: length of the list 2628 * @cmd_details: pointer to command details structure or NULL 2629 * 2630 * Remove MAC/VLAN addresses from the HW filtering 2631 **/ 2632 int 2633 i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, 2634 struct i40e_aqc_remove_macvlan_element_data *mv_list, 2635 u16 count, struct i40e_asq_cmd_details *cmd_details) 2636 { 2637 struct i40e_aq_desc desc; 2638 struct i40e_aqc_macvlan *cmd = 2639 (struct i40e_aqc_macvlan *)&desc.params.raw; 2640 u16 buf_size; 2641 int status; 2642 2643 if (count == 0 || !mv_list || !hw) 2644 return -EINVAL; 2645 2646 buf_size = count * sizeof(*mv_list); 2647 2648 /* prep the rest of the request */ 2649 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); 2650 cmd->num_addresses = cpu_to_le16(count); 2651 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2652 cmd->seid[1] = 0; 2653 cmd->seid[2] = 0; 2654 2655 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2656 if (buf_size > I40E_AQ_LARGE_BUF) 2657 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2658 2659 status = i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size, 2660 cmd_details, true); 2661 2662 return status; 2663 } 2664 2665 /** 2666 * i40e_aq_remove_macvlan_v2 2667 * @hw: pointer to the hw struct 2668 * @seid: VSI for the mac address 2669 * @mv_list: list of macvlans to be removed 2670 * @count: length of the list 2671 * @cmd_details: pointer to command details structure or NULL 2672 * @aq_status: pointer to Admin Queue status return value 2673 * 2674 * Remove MAC/VLAN addresses from the HW filtering. 2675 * The _v2 version returns the last Admin Queue status in aq_status 2676 * to avoid race conditions in access to hw->aq.asq_last_status. 2677 * It also calls _v2 versions of asq_send_command functions to 2678 * get the aq_status on the stack. 2679 **/ 2680 int 2681 i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid, 2682 struct i40e_aqc_remove_macvlan_element_data *mv_list, 2683 u16 count, struct i40e_asq_cmd_details *cmd_details, 2684 enum i40e_admin_queue_err *aq_status) 2685 { 2686 struct i40e_aqc_macvlan *cmd; 2687 struct i40e_aq_desc desc; 2688 u16 buf_size; 2689 2690 if (count == 0 || !mv_list || !hw) 2691 return -EINVAL; 2692 2693 buf_size = count * sizeof(*mv_list); 2694 2695 /* prep the rest of the request */ 2696 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); 2697 cmd = (struct i40e_aqc_macvlan *)&desc.params.raw; 2698 cmd->num_addresses = cpu_to_le16(count); 2699 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2700 cmd->seid[1] = 0; 2701 cmd->seid[2] = 0; 2702 2703 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2704 if (buf_size > I40E_AQ_LARGE_BUF) 2705 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2706 2707 return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size, 2708 cmd_details, true, aq_status); 2709 } 2710 2711 /** 2712 * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule 2713 * @hw: pointer to the hw struct 2714 * @opcode: AQ opcode for add or delete mirror rule 2715 * @sw_seid: Switch SEID (to which rule refers) 2716 * @rule_type: Rule Type (ingress/egress/VLAN) 2717 * @id: Destination VSI SEID or Rule ID 2718 * @count: length of the list 2719 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs 2720 * @cmd_details: pointer to command details structure or NULL 2721 * @rule_id: Rule ID returned from FW 2722 * @rules_used: Number of rules used in internal switch 2723 * @rules_free: Number of rules free in internal switch 2724 * 2725 * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for 2726 * VEBs/VEPA elements only 2727 **/ 2728 static int i40e_mirrorrule_op(struct i40e_hw *hw, 2729 u16 opcode, u16 sw_seid, u16 rule_type, u16 id, 2730 u16 count, __le16 *mr_list, 2731 struct i40e_asq_cmd_details *cmd_details, 2732 u16 *rule_id, u16 *rules_used, u16 *rules_free) 2733 { 2734 struct i40e_aq_desc desc; 2735 struct i40e_aqc_add_delete_mirror_rule *cmd = 2736 (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw; 2737 struct i40e_aqc_add_delete_mirror_rule_completion *resp = 2738 (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw; 2739 u16 buf_size; 2740 int status; 2741 2742 buf_size = count * sizeof(*mr_list); 2743 2744 /* prep the rest of the request */ 2745 i40e_fill_default_direct_cmd_desc(&desc, opcode); 2746 cmd->seid = cpu_to_le16(sw_seid); 2747 cmd->rule_type = cpu_to_le16(rule_type & 2748 I40E_AQC_MIRROR_RULE_TYPE_MASK); 2749 cmd->num_entries = cpu_to_le16(count); 2750 /* Dest VSI for add, rule_id for delete */ 2751 cmd->destination = cpu_to_le16(id); 2752 if (mr_list) { 2753 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 2754 I40E_AQ_FLAG_RD)); 2755 if (buf_size > I40E_AQ_LARGE_BUF) 2756 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2757 } 2758 2759 status = i40e_asq_send_command(hw, &desc, mr_list, buf_size, 2760 cmd_details); 2761 if (!status || 2762 hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) { 2763 if (rule_id) 2764 *rule_id = le16_to_cpu(resp->rule_id); 2765 if (rules_used) 2766 *rules_used = le16_to_cpu(resp->mirror_rules_used); 2767 if (rules_free) 2768 *rules_free = le16_to_cpu(resp->mirror_rules_free); 2769 } 2770 return status; 2771 } 2772 2773 /** 2774 * i40e_aq_add_mirrorrule - add a mirror rule 2775 * @hw: pointer to the hw struct 2776 * @sw_seid: Switch SEID (to which rule refers) 2777 * @rule_type: Rule Type (ingress/egress/VLAN) 2778 * @dest_vsi: SEID of VSI to which packets will be mirrored 2779 * @count: length of the list 2780 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs 2781 * @cmd_details: pointer to command details structure or NULL 2782 * @rule_id: Rule ID returned from FW 2783 * @rules_used: Number of rules used in internal switch 2784 * @rules_free: Number of rules free in internal switch 2785 * 2786 * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only 2787 **/ 2788 int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, 2789 u16 rule_type, u16 dest_vsi, u16 count, 2790 __le16 *mr_list, 2791 struct i40e_asq_cmd_details *cmd_details, 2792 u16 *rule_id, u16 *rules_used, u16 *rules_free) 2793 { 2794 if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS || 2795 rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) { 2796 if (count == 0 || !mr_list) 2797 return -EINVAL; 2798 } 2799 2800 return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid, 2801 rule_type, dest_vsi, count, mr_list, 2802 cmd_details, rule_id, rules_used, rules_free); 2803 } 2804 2805 /** 2806 * i40e_aq_delete_mirrorrule - delete a mirror rule 2807 * @hw: pointer to the hw struct 2808 * @sw_seid: Switch SEID (to which rule refers) 2809 * @rule_type: Rule Type (ingress/egress/VLAN) 2810 * @count: length of the list 2811 * @rule_id: Rule ID that is returned in the receive desc as part of 2812 * add_mirrorrule. 2813 * @mr_list: list of mirrored VLAN IDs to be removed 2814 * @cmd_details: pointer to command details structure or NULL 2815 * @rules_used: Number of rules used in internal switch 2816 * @rules_free: Number of rules free in internal switch 2817 * 2818 * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only 2819 **/ 2820 int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, 2821 u16 rule_type, u16 rule_id, u16 count, 2822 __le16 *mr_list, 2823 struct i40e_asq_cmd_details *cmd_details, 2824 u16 *rules_used, u16 *rules_free) 2825 { 2826 /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */ 2827 if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) { 2828 /* count and mr_list shall be valid for rule_type INGRESS VLAN 2829 * mirroring. For other rule_type, count and rule_type should 2830 * not matter. 2831 */ 2832 if (count == 0 || !mr_list) 2833 return -EINVAL; 2834 } 2835 2836 return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid, 2837 rule_type, rule_id, count, mr_list, 2838 cmd_details, NULL, rules_used, rules_free); 2839 } 2840 2841 /** 2842 * i40e_aq_send_msg_to_vf 2843 * @hw: pointer to the hardware structure 2844 * @vfid: VF id to send msg 2845 * @v_opcode: opcodes for VF-PF communication 2846 * @v_retval: return error code 2847 * @msg: pointer to the msg buffer 2848 * @msglen: msg length 2849 * @cmd_details: pointer to command details 2850 * 2851 * send msg to vf 2852 **/ 2853 int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, 2854 u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, 2855 struct i40e_asq_cmd_details *cmd_details) 2856 { 2857 struct i40e_aq_desc desc; 2858 struct i40e_aqc_pf_vf_message *cmd = 2859 (struct i40e_aqc_pf_vf_message *)&desc.params.raw; 2860 int status; 2861 2862 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf); 2863 cmd->id = cpu_to_le32(vfid); 2864 desc.cookie_high = cpu_to_le32(v_opcode); 2865 desc.cookie_low = cpu_to_le32(v_retval); 2866 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI); 2867 if (msglen) { 2868 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 2869 I40E_AQ_FLAG_RD)); 2870 if (msglen > I40E_AQ_LARGE_BUF) 2871 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2872 desc.datalen = cpu_to_le16(msglen); 2873 } 2874 status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details); 2875 2876 return status; 2877 } 2878 2879 /** 2880 * i40e_aq_debug_read_register 2881 * @hw: pointer to the hw struct 2882 * @reg_addr: register address 2883 * @reg_val: register value 2884 * @cmd_details: pointer to command details structure or NULL 2885 * 2886 * Read the register using the admin queue commands 2887 **/ 2888 int i40e_aq_debug_read_register(struct i40e_hw *hw, 2889 u32 reg_addr, u64 *reg_val, 2890 struct i40e_asq_cmd_details *cmd_details) 2891 { 2892 struct i40e_aq_desc desc; 2893 struct i40e_aqc_debug_reg_read_write *cmd_resp = 2894 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; 2895 int status; 2896 2897 if (reg_val == NULL) 2898 return -EINVAL; 2899 2900 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg); 2901 2902 cmd_resp->address = cpu_to_le32(reg_addr); 2903 2904 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2905 2906 if (!status) { 2907 *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) | 2908 (u64)le32_to_cpu(cmd_resp->value_low); 2909 } 2910 2911 return status; 2912 } 2913 2914 /** 2915 * i40e_aq_debug_write_register 2916 * @hw: pointer to the hw struct 2917 * @reg_addr: register address 2918 * @reg_val: register value 2919 * @cmd_details: pointer to command details structure or NULL 2920 * 2921 * Write to a register using the admin queue commands 2922 **/ 2923 int i40e_aq_debug_write_register(struct i40e_hw *hw, 2924 u32 reg_addr, u64 reg_val, 2925 struct i40e_asq_cmd_details *cmd_details) 2926 { 2927 struct i40e_aq_desc desc; 2928 struct i40e_aqc_debug_reg_read_write *cmd = 2929 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; 2930 int status; 2931 2932 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg); 2933 2934 cmd->address = cpu_to_le32(reg_addr); 2935 cmd->value_high = cpu_to_le32((u32)(reg_val >> 32)); 2936 cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF)); 2937 2938 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2939 2940 return status; 2941 } 2942 2943 /** 2944 * i40e_aq_request_resource 2945 * @hw: pointer to the hw struct 2946 * @resource: resource id 2947 * @access: access type 2948 * @sdp_number: resource number 2949 * @timeout: the maximum time in ms that the driver may hold the resource 2950 * @cmd_details: pointer to command details structure or NULL 2951 * 2952 * requests common resource using the admin queue commands 2953 **/ 2954 int i40e_aq_request_resource(struct i40e_hw *hw, 2955 enum i40e_aq_resources_ids resource, 2956 enum i40e_aq_resource_access_type access, 2957 u8 sdp_number, u64 *timeout, 2958 struct i40e_asq_cmd_details *cmd_details) 2959 { 2960 struct i40e_aq_desc desc; 2961 struct i40e_aqc_request_resource *cmd_resp = 2962 (struct i40e_aqc_request_resource *)&desc.params.raw; 2963 int status; 2964 2965 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource); 2966 2967 cmd_resp->resource_id = cpu_to_le16(resource); 2968 cmd_resp->access_type = cpu_to_le16(access); 2969 cmd_resp->resource_number = cpu_to_le32(sdp_number); 2970 2971 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2972 /* The completion specifies the maximum time in ms that the driver 2973 * may hold the resource in the Timeout field. 2974 * If the resource is held by someone else, the command completes with 2975 * busy return value and the timeout field indicates the maximum time 2976 * the current owner of the resource has to free it. 2977 */ 2978 if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) 2979 *timeout = le32_to_cpu(cmd_resp->timeout); 2980 2981 return status; 2982 } 2983 2984 /** 2985 * i40e_aq_release_resource 2986 * @hw: pointer to the hw struct 2987 * @resource: resource id 2988 * @sdp_number: resource number 2989 * @cmd_details: pointer to command details structure or NULL 2990 * 2991 * release common resource using the admin queue commands 2992 **/ 2993 int i40e_aq_release_resource(struct i40e_hw *hw, 2994 enum i40e_aq_resources_ids resource, 2995 u8 sdp_number, 2996 struct i40e_asq_cmd_details *cmd_details) 2997 { 2998 struct i40e_aq_desc desc; 2999 struct i40e_aqc_request_resource *cmd = 3000 (struct i40e_aqc_request_resource *)&desc.params.raw; 3001 int status; 3002 3003 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource); 3004 3005 cmd->resource_id = cpu_to_le16(resource); 3006 cmd->resource_number = cpu_to_le32(sdp_number); 3007 3008 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3009 3010 return status; 3011 } 3012 3013 /** 3014 * i40e_aq_read_nvm 3015 * @hw: pointer to the hw struct 3016 * @module_pointer: module pointer location in words from the NVM beginning 3017 * @offset: byte offset from the module beginning 3018 * @length: length of the section to be read (in bytes from the offset) 3019 * @data: command buffer (size [bytes] = length) 3020 * @last_command: tells if this is the last command in a series 3021 * @cmd_details: pointer to command details structure or NULL 3022 * 3023 * Read the NVM using the admin queue commands 3024 **/ 3025 int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, 3026 u32 offset, u16 length, void *data, 3027 bool last_command, 3028 struct i40e_asq_cmd_details *cmd_details) 3029 { 3030 struct i40e_aq_desc desc; 3031 struct i40e_aqc_nvm_update *cmd = 3032 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3033 int status; 3034 3035 /* In offset the highest byte must be zeroed. */ 3036 if (offset & 0xFF000000) { 3037 status = -EINVAL; 3038 goto i40e_aq_read_nvm_exit; 3039 } 3040 3041 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read); 3042 3043 /* If this is the last command in a series, set the proper flag. */ 3044 if (last_command) 3045 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3046 cmd->module_pointer = module_pointer; 3047 cmd->offset = cpu_to_le32(offset); 3048 cmd->length = cpu_to_le16(length); 3049 3050 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3051 if (length > I40E_AQ_LARGE_BUF) 3052 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3053 3054 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); 3055 3056 i40e_aq_read_nvm_exit: 3057 return status; 3058 } 3059 3060 /** 3061 * i40e_aq_erase_nvm 3062 * @hw: pointer to the hw struct 3063 * @module_pointer: module pointer location in words from the NVM beginning 3064 * @offset: offset in the module (expressed in 4 KB from module's beginning) 3065 * @length: length of the section to be erased (expressed in 4 KB) 3066 * @last_command: tells if this is the last command in a series 3067 * @cmd_details: pointer to command details structure or NULL 3068 * 3069 * Erase the NVM sector using the admin queue commands 3070 **/ 3071 int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, 3072 u32 offset, u16 length, bool last_command, 3073 struct i40e_asq_cmd_details *cmd_details) 3074 { 3075 struct i40e_aq_desc desc; 3076 struct i40e_aqc_nvm_update *cmd = 3077 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3078 int status; 3079 3080 /* In offset the highest byte must be zeroed. */ 3081 if (offset & 0xFF000000) { 3082 status = -EINVAL; 3083 goto i40e_aq_erase_nvm_exit; 3084 } 3085 3086 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase); 3087 3088 /* If this is the last command in a series, set the proper flag. */ 3089 if (last_command) 3090 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3091 cmd->module_pointer = module_pointer; 3092 cmd->offset = cpu_to_le32(offset); 3093 cmd->length = cpu_to_le16(length); 3094 3095 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3096 3097 i40e_aq_erase_nvm_exit: 3098 return status; 3099 } 3100 3101 /** 3102 * i40e_parse_discover_capabilities 3103 * @hw: pointer to the hw struct 3104 * @buff: pointer to a buffer containing device/function capability records 3105 * @cap_count: number of capability records in the list 3106 * @list_type_opc: type of capabilities list to parse 3107 * 3108 * Parse the device/function capabilities list. 3109 **/ 3110 static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, 3111 u32 cap_count, 3112 enum i40e_admin_queue_opc list_type_opc) 3113 { 3114 struct i40e_aqc_list_capabilities_element_resp *cap; 3115 u32 valid_functions, num_functions; 3116 u32 number, logical_id, phys_id; 3117 struct i40e_hw_capabilities *p; 3118 u16 id, ocp_cfg_word0; 3119 u8 major_rev; 3120 int status; 3121 u32 i = 0; 3122 3123 cap = (struct i40e_aqc_list_capabilities_element_resp *) buff; 3124 3125 if (list_type_opc == i40e_aqc_opc_list_dev_capabilities) 3126 p = &hw->dev_caps; 3127 else if (list_type_opc == i40e_aqc_opc_list_func_capabilities) 3128 p = &hw->func_caps; 3129 else 3130 return; 3131 3132 for (i = 0; i < cap_count; i++, cap++) { 3133 id = le16_to_cpu(cap->id); 3134 number = le32_to_cpu(cap->number); 3135 logical_id = le32_to_cpu(cap->logical_id); 3136 phys_id = le32_to_cpu(cap->phys_id); 3137 major_rev = cap->major_rev; 3138 3139 switch (id) { 3140 case I40E_AQ_CAP_ID_SWITCH_MODE: 3141 p->switch_mode = number; 3142 break; 3143 case I40E_AQ_CAP_ID_MNG_MODE: 3144 p->management_mode = number; 3145 if (major_rev > 1) { 3146 p->mng_protocols_over_mctp = logical_id; 3147 i40e_debug(hw, I40E_DEBUG_INIT, 3148 "HW Capability: Protocols over MCTP = %d\n", 3149 p->mng_protocols_over_mctp); 3150 } else { 3151 p->mng_protocols_over_mctp = 0; 3152 } 3153 break; 3154 case I40E_AQ_CAP_ID_NPAR_ACTIVE: 3155 p->npar_enable = number; 3156 break; 3157 case I40E_AQ_CAP_ID_OS2BMC_CAP: 3158 p->os2bmc = number; 3159 break; 3160 case I40E_AQ_CAP_ID_FUNCTIONS_VALID: 3161 p->valid_functions = number; 3162 break; 3163 case I40E_AQ_CAP_ID_SRIOV: 3164 if (number == 1) 3165 p->sr_iov_1_1 = true; 3166 break; 3167 case I40E_AQ_CAP_ID_VF: 3168 p->num_vfs = number; 3169 p->vf_base_id = logical_id; 3170 break; 3171 case I40E_AQ_CAP_ID_VMDQ: 3172 if (number == 1) 3173 p->vmdq = true; 3174 break; 3175 case I40E_AQ_CAP_ID_8021QBG: 3176 if (number == 1) 3177 p->evb_802_1_qbg = true; 3178 break; 3179 case I40E_AQ_CAP_ID_8021QBR: 3180 if (number == 1) 3181 p->evb_802_1_qbh = true; 3182 break; 3183 case I40E_AQ_CAP_ID_VSI: 3184 p->num_vsis = number; 3185 break; 3186 case I40E_AQ_CAP_ID_DCB: 3187 if (number == 1) { 3188 p->dcb = true; 3189 p->enabled_tcmap = logical_id; 3190 p->maxtc = phys_id; 3191 } 3192 break; 3193 case I40E_AQ_CAP_ID_FCOE: 3194 if (number == 1) 3195 p->fcoe = true; 3196 break; 3197 case I40E_AQ_CAP_ID_ISCSI: 3198 if (number == 1) 3199 p->iscsi = true; 3200 break; 3201 case I40E_AQ_CAP_ID_RSS: 3202 p->rss = true; 3203 p->rss_table_size = number; 3204 p->rss_table_entry_width = logical_id; 3205 break; 3206 case I40E_AQ_CAP_ID_RXQ: 3207 p->num_rx_qp = number; 3208 p->base_queue = phys_id; 3209 break; 3210 case I40E_AQ_CAP_ID_TXQ: 3211 p->num_tx_qp = number; 3212 p->base_queue = phys_id; 3213 break; 3214 case I40E_AQ_CAP_ID_MSIX: 3215 p->num_msix_vectors = number; 3216 i40e_debug(hw, I40E_DEBUG_INIT, 3217 "HW Capability: MSIX vector count = %d\n", 3218 p->num_msix_vectors); 3219 break; 3220 case I40E_AQ_CAP_ID_VF_MSIX: 3221 p->num_msix_vectors_vf = number; 3222 break; 3223 case I40E_AQ_CAP_ID_FLEX10: 3224 if (major_rev == 1) { 3225 if (number == 1) { 3226 p->flex10_enable = true; 3227 p->flex10_capable = true; 3228 } 3229 } else { 3230 /* Capability revision >= 2 */ 3231 if (number & 1) 3232 p->flex10_enable = true; 3233 if (number & 2) 3234 p->flex10_capable = true; 3235 } 3236 p->flex10_mode = logical_id; 3237 p->flex10_status = phys_id; 3238 break; 3239 case I40E_AQ_CAP_ID_CEM: 3240 if (number == 1) 3241 p->mgmt_cem = true; 3242 break; 3243 case I40E_AQ_CAP_ID_IWARP: 3244 if (number == 1) 3245 p->iwarp = true; 3246 break; 3247 case I40E_AQ_CAP_ID_LED: 3248 if (phys_id < I40E_HW_CAP_MAX_GPIO) 3249 p->led[phys_id] = true; 3250 break; 3251 case I40E_AQ_CAP_ID_SDP: 3252 if (phys_id < I40E_HW_CAP_MAX_GPIO) 3253 p->sdp[phys_id] = true; 3254 break; 3255 case I40E_AQ_CAP_ID_MDIO: 3256 if (number == 1) { 3257 p->mdio_port_num = phys_id; 3258 p->mdio_port_mode = logical_id; 3259 } 3260 break; 3261 case I40E_AQ_CAP_ID_1588: 3262 if (number == 1) 3263 p->ieee_1588 = true; 3264 break; 3265 case I40E_AQ_CAP_ID_FLOW_DIRECTOR: 3266 p->fd = true; 3267 p->fd_filters_guaranteed = number; 3268 p->fd_filters_best_effort = logical_id; 3269 break; 3270 case I40E_AQ_CAP_ID_WSR_PROT: 3271 p->wr_csr_prot = (u64)number; 3272 p->wr_csr_prot |= (u64)logical_id << 32; 3273 break; 3274 case I40E_AQ_CAP_ID_NVM_MGMT: 3275 if (number & I40E_NVM_MGMT_SEC_REV_DISABLED) 3276 p->sec_rev_disabled = true; 3277 if (number & I40E_NVM_MGMT_UPDATE_DISABLED) 3278 p->update_disabled = true; 3279 break; 3280 default: 3281 break; 3282 } 3283 } 3284 3285 if (p->fcoe) 3286 i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n"); 3287 3288 /* Software override ensuring FCoE is disabled if npar or mfp 3289 * mode because it is not supported in these modes. 3290 */ 3291 if (p->npar_enable || p->flex10_enable) 3292 p->fcoe = false; 3293 3294 /* count the enabled ports (aka the "not disabled" ports) */ 3295 hw->num_ports = 0; 3296 for (i = 0; i < 4; i++) { 3297 u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i); 3298 u64 port_cfg = 0; 3299 3300 /* use AQ read to get the physical register offset instead 3301 * of the port relative offset 3302 */ 3303 i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL); 3304 if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK)) 3305 hw->num_ports++; 3306 } 3307 3308 /* OCP cards case: if a mezz is removed the Ethernet port is at 3309 * disabled state in PRTGEN_CNF register. Additional NVM read is 3310 * needed in order to check if we are dealing with OCP card. 3311 * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting 3312 * physical ports results in wrong partition id calculation and thus 3313 * not supporting WoL. 3314 */ 3315 if (hw->mac.type == I40E_MAC_X722) { 3316 if (!i40e_acquire_nvm(hw, I40E_RESOURCE_READ)) { 3317 status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, 3318 2 * I40E_SR_OCP_CFG_WORD0, 3319 sizeof(ocp_cfg_word0), 3320 &ocp_cfg_word0, true, NULL); 3321 if (!status && 3322 (ocp_cfg_word0 & I40E_SR_OCP_ENABLED)) 3323 hw->num_ports = 4; 3324 i40e_release_nvm(hw); 3325 } 3326 } 3327 3328 valid_functions = p->valid_functions; 3329 num_functions = 0; 3330 while (valid_functions) { 3331 if (valid_functions & 1) 3332 num_functions++; 3333 valid_functions >>= 1; 3334 } 3335 3336 /* partition id is 1-based, and functions are evenly spread 3337 * across the ports as partitions 3338 */ 3339 if (hw->num_ports != 0) { 3340 hw->partition_id = (hw->pf_id / hw->num_ports) + 1; 3341 hw->num_partitions = num_functions / hw->num_ports; 3342 } 3343 3344 /* additional HW specific goodies that might 3345 * someday be HW version specific 3346 */ 3347 p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS; 3348 } 3349 3350 /** 3351 * i40e_aq_discover_capabilities 3352 * @hw: pointer to the hw struct 3353 * @buff: a virtual buffer to hold the capabilities 3354 * @buff_size: Size of the virtual buffer 3355 * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM 3356 * @list_type_opc: capabilities type to discover - pass in the command opcode 3357 * @cmd_details: pointer to command details structure or NULL 3358 * 3359 * Get the device capabilities descriptions from the firmware 3360 **/ 3361 int i40e_aq_discover_capabilities(struct i40e_hw *hw, 3362 void *buff, u16 buff_size, u16 *data_size, 3363 enum i40e_admin_queue_opc list_type_opc, 3364 struct i40e_asq_cmd_details *cmd_details) 3365 { 3366 struct i40e_aqc_list_capabilites *cmd; 3367 struct i40e_aq_desc desc; 3368 int status = 0; 3369 3370 cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw; 3371 3372 if (list_type_opc != i40e_aqc_opc_list_func_capabilities && 3373 list_type_opc != i40e_aqc_opc_list_dev_capabilities) { 3374 status = -EINVAL; 3375 goto exit; 3376 } 3377 3378 i40e_fill_default_direct_cmd_desc(&desc, list_type_opc); 3379 3380 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3381 if (buff_size > I40E_AQ_LARGE_BUF) 3382 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3383 3384 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3385 *data_size = le16_to_cpu(desc.datalen); 3386 3387 if (status) 3388 goto exit; 3389 3390 i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count), 3391 list_type_opc); 3392 3393 exit: 3394 return status; 3395 } 3396 3397 /** 3398 * i40e_aq_update_nvm 3399 * @hw: pointer to the hw struct 3400 * @module_pointer: module pointer location in words from the NVM beginning 3401 * @offset: byte offset from the module beginning 3402 * @length: length of the section to be written (in bytes from the offset) 3403 * @data: command buffer (size [bytes] = length) 3404 * @last_command: tells if this is the last command in a series 3405 * @preservation_flags: Preservation mode flags 3406 * @cmd_details: pointer to command details structure or NULL 3407 * 3408 * Update the NVM using the admin queue commands 3409 **/ 3410 int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, 3411 u32 offset, u16 length, void *data, 3412 bool last_command, u8 preservation_flags, 3413 struct i40e_asq_cmd_details *cmd_details) 3414 { 3415 struct i40e_aq_desc desc; 3416 struct i40e_aqc_nvm_update *cmd = 3417 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3418 int status; 3419 3420 /* In offset the highest byte must be zeroed. */ 3421 if (offset & 0xFF000000) { 3422 status = -EINVAL; 3423 goto i40e_aq_update_nvm_exit; 3424 } 3425 3426 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); 3427 3428 /* If this is the last command in a series, set the proper flag. */ 3429 if (last_command) 3430 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3431 if (hw->mac.type == I40E_MAC_X722) { 3432 if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED) 3433 cmd->command_flags |= 3434 (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED << 3435 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); 3436 else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL) 3437 cmd->command_flags |= 3438 (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL << 3439 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); 3440 } 3441 cmd->module_pointer = module_pointer; 3442 cmd->offset = cpu_to_le32(offset); 3443 cmd->length = cpu_to_le16(length); 3444 3445 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 3446 if (length > I40E_AQ_LARGE_BUF) 3447 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3448 3449 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); 3450 3451 i40e_aq_update_nvm_exit: 3452 return status; 3453 } 3454 3455 /** 3456 * i40e_aq_rearrange_nvm 3457 * @hw: pointer to the hw struct 3458 * @rearrange_nvm: defines direction of rearrangement 3459 * @cmd_details: pointer to command details structure or NULL 3460 * 3461 * Rearrange NVM structure, available only for transition FW 3462 **/ 3463 int i40e_aq_rearrange_nvm(struct i40e_hw *hw, 3464 u8 rearrange_nvm, 3465 struct i40e_asq_cmd_details *cmd_details) 3466 { 3467 struct i40e_aqc_nvm_update *cmd; 3468 struct i40e_aq_desc desc; 3469 int status; 3470 3471 cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; 3472 3473 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); 3474 3475 rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT | 3476 I40E_AQ_NVM_REARRANGE_TO_STRUCT); 3477 3478 if (!rearrange_nvm) { 3479 status = -EINVAL; 3480 goto i40e_aq_rearrange_nvm_exit; 3481 } 3482 3483 cmd->command_flags |= rearrange_nvm; 3484 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3485 3486 i40e_aq_rearrange_nvm_exit: 3487 return status; 3488 } 3489 3490 /** 3491 * i40e_aq_get_lldp_mib 3492 * @hw: pointer to the hw struct 3493 * @bridge_type: type of bridge requested 3494 * @mib_type: Local, Remote or both Local and Remote MIBs 3495 * @buff: pointer to a user supplied buffer to store the MIB block 3496 * @buff_size: size of the buffer (in bytes) 3497 * @local_len : length of the returned Local LLDP MIB 3498 * @remote_len: length of the returned Remote LLDP MIB 3499 * @cmd_details: pointer to command details structure or NULL 3500 * 3501 * Requests the complete LLDP MIB (entire packet). 3502 **/ 3503 int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, 3504 u8 mib_type, void *buff, u16 buff_size, 3505 u16 *local_len, u16 *remote_len, 3506 struct i40e_asq_cmd_details *cmd_details) 3507 { 3508 struct i40e_aq_desc desc; 3509 struct i40e_aqc_lldp_get_mib *cmd = 3510 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; 3511 struct i40e_aqc_lldp_get_mib *resp = 3512 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; 3513 int status; 3514 3515 if (buff_size == 0 || !buff) 3516 return -EINVAL; 3517 3518 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib); 3519 /* Indirect Command */ 3520 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3521 3522 cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK; 3523 cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & 3524 I40E_AQ_LLDP_BRIDGE_TYPE_MASK); 3525 3526 desc.datalen = cpu_to_le16(buff_size); 3527 3528 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3529 if (buff_size > I40E_AQ_LARGE_BUF) 3530 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3531 3532 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3533 if (!status) { 3534 if (local_len != NULL) 3535 *local_len = le16_to_cpu(resp->local_len); 3536 if (remote_len != NULL) 3537 *remote_len = le16_to_cpu(resp->remote_len); 3538 } 3539 3540 return status; 3541 } 3542 3543 /** 3544 * i40e_aq_set_lldp_mib - Set the LLDP MIB 3545 * @hw: pointer to the hw struct 3546 * @mib_type: Local, Remote or both Local and Remote MIBs 3547 * @buff: pointer to a user supplied buffer to store the MIB block 3548 * @buff_size: size of the buffer (in bytes) 3549 * @cmd_details: pointer to command details structure or NULL 3550 * 3551 * Set the LLDP MIB. 3552 **/ 3553 int 3554 i40e_aq_set_lldp_mib(struct i40e_hw *hw, 3555 u8 mib_type, void *buff, u16 buff_size, 3556 struct i40e_asq_cmd_details *cmd_details) 3557 { 3558 struct i40e_aqc_lldp_set_local_mib *cmd; 3559 struct i40e_aq_desc desc; 3560 int status; 3561 3562 cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw; 3563 if (buff_size == 0 || !buff) 3564 return -EINVAL; 3565 3566 i40e_fill_default_direct_cmd_desc(&desc, 3567 i40e_aqc_opc_lldp_set_local_mib); 3568 /* Indirect Command */ 3569 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 3570 if (buff_size > I40E_AQ_LARGE_BUF) 3571 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3572 desc.datalen = cpu_to_le16(buff_size); 3573 3574 cmd->type = mib_type; 3575 cmd->length = cpu_to_le16(buff_size); 3576 cmd->address_high = cpu_to_le32(upper_32_bits((uintptr_t)buff)); 3577 cmd->address_low = cpu_to_le32(lower_32_bits((uintptr_t)buff)); 3578 3579 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3580 return status; 3581 } 3582 3583 /** 3584 * i40e_aq_cfg_lldp_mib_change_event 3585 * @hw: pointer to the hw struct 3586 * @enable_update: Enable or Disable event posting 3587 * @cmd_details: pointer to command details structure or NULL 3588 * 3589 * Enable or Disable posting of an event on ARQ when LLDP MIB 3590 * associated with the interface changes 3591 **/ 3592 int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, 3593 bool enable_update, 3594 struct i40e_asq_cmd_details *cmd_details) 3595 { 3596 struct i40e_aq_desc desc; 3597 struct i40e_aqc_lldp_update_mib *cmd = 3598 (struct i40e_aqc_lldp_update_mib *)&desc.params.raw; 3599 int status; 3600 3601 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib); 3602 3603 if (!enable_update) 3604 cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE; 3605 3606 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3607 3608 return status; 3609 } 3610 3611 /** 3612 * i40e_aq_restore_lldp 3613 * @hw: pointer to the hw struct 3614 * @setting: pointer to factory setting variable or NULL 3615 * @restore: True if factory settings should be restored 3616 * @cmd_details: pointer to command details structure or NULL 3617 * 3618 * Restore LLDP Agent factory settings if @restore set to True. In other case 3619 * only returns factory setting in AQ response. 3620 **/ 3621 int 3622 i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore, 3623 struct i40e_asq_cmd_details *cmd_details) 3624 { 3625 struct i40e_aq_desc desc; 3626 struct i40e_aqc_lldp_restore *cmd = 3627 (struct i40e_aqc_lldp_restore *)&desc.params.raw; 3628 int status; 3629 3630 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) { 3631 i40e_debug(hw, I40E_DEBUG_ALL, 3632 "Restore LLDP not supported by current FW version.\n"); 3633 return -ENODEV; 3634 } 3635 3636 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore); 3637 3638 if (restore) 3639 cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE; 3640 3641 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3642 3643 if (setting) 3644 *setting = cmd->command & 1; 3645 3646 return status; 3647 } 3648 3649 /** 3650 * i40e_aq_stop_lldp 3651 * @hw: pointer to the hw struct 3652 * @shutdown_agent: True if LLDP Agent needs to be Shutdown 3653 * @persist: True if stop of LLDP should be persistent across power cycles 3654 * @cmd_details: pointer to command details structure or NULL 3655 * 3656 * Stop or Shutdown the embedded LLDP Agent 3657 **/ 3658 int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, 3659 bool persist, 3660 struct i40e_asq_cmd_details *cmd_details) 3661 { 3662 struct i40e_aq_desc desc; 3663 struct i40e_aqc_lldp_stop *cmd = 3664 (struct i40e_aqc_lldp_stop *)&desc.params.raw; 3665 int status; 3666 3667 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop); 3668 3669 if (shutdown_agent) 3670 cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN; 3671 3672 if (persist) { 3673 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) 3674 cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST; 3675 else 3676 i40e_debug(hw, I40E_DEBUG_ALL, 3677 "Persistent Stop LLDP not supported by current FW version.\n"); 3678 } 3679 3680 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3681 3682 return status; 3683 } 3684 3685 /** 3686 * i40e_aq_start_lldp 3687 * @hw: pointer to the hw struct 3688 * @persist: True if start of LLDP should be persistent across power cycles 3689 * @cmd_details: pointer to command details structure or NULL 3690 * 3691 * Start the embedded LLDP Agent on all ports. 3692 **/ 3693 int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist, 3694 struct i40e_asq_cmd_details *cmd_details) 3695 { 3696 struct i40e_aq_desc desc; 3697 struct i40e_aqc_lldp_start *cmd = 3698 (struct i40e_aqc_lldp_start *)&desc.params.raw; 3699 int status; 3700 3701 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start); 3702 3703 cmd->command = I40E_AQ_LLDP_AGENT_START; 3704 3705 if (persist) { 3706 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) 3707 cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST; 3708 else 3709 i40e_debug(hw, I40E_DEBUG_ALL, 3710 "Persistent Start LLDP not supported by current FW version.\n"); 3711 } 3712 3713 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3714 3715 return status; 3716 } 3717 3718 /** 3719 * i40e_aq_set_dcb_parameters 3720 * @hw: pointer to the hw struct 3721 * @cmd_details: pointer to command details structure or NULL 3722 * @dcb_enable: True if DCB configuration needs to be applied 3723 * 3724 **/ 3725 int 3726 i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable, 3727 struct i40e_asq_cmd_details *cmd_details) 3728 { 3729 struct i40e_aq_desc desc; 3730 struct i40e_aqc_set_dcb_parameters *cmd = 3731 (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw; 3732 int status; 3733 3734 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) 3735 return -ENODEV; 3736 3737 i40e_fill_default_direct_cmd_desc(&desc, 3738 i40e_aqc_opc_set_dcb_parameters); 3739 3740 if (dcb_enable) { 3741 cmd->valid_flags = I40E_DCB_VALID; 3742 cmd->command = I40E_AQ_DCB_SET_AGENT; 3743 } 3744 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3745 3746 return status; 3747 } 3748 3749 /** 3750 * i40e_aq_get_cee_dcb_config 3751 * @hw: pointer to the hw struct 3752 * @buff: response buffer that stores CEE operational configuration 3753 * @buff_size: size of the buffer passed 3754 * @cmd_details: pointer to command details structure or NULL 3755 * 3756 * Get CEE DCBX mode operational configuration from firmware 3757 **/ 3758 int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, 3759 void *buff, u16 buff_size, 3760 struct i40e_asq_cmd_details *cmd_details) 3761 { 3762 struct i40e_aq_desc desc; 3763 int status; 3764 3765 if (buff_size == 0 || !buff) 3766 return -EINVAL; 3767 3768 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg); 3769 3770 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3771 status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size, 3772 cmd_details); 3773 3774 return status; 3775 } 3776 3777 /** 3778 * i40e_aq_add_udp_tunnel 3779 * @hw: pointer to the hw struct 3780 * @udp_port: the UDP port to add in Host byte order 3781 * @protocol_index: protocol index type 3782 * @filter_index: pointer to filter index 3783 * @cmd_details: pointer to command details structure or NULL 3784 * 3785 * Note: Firmware expects the udp_port value to be in Little Endian format, 3786 * and this function will call cpu_to_le16 to convert from Host byte order to 3787 * Little Endian order. 3788 **/ 3789 int i40e_aq_add_udp_tunnel(struct i40e_hw *hw, 3790 u16 udp_port, u8 protocol_index, 3791 u8 *filter_index, 3792 struct i40e_asq_cmd_details *cmd_details) 3793 { 3794 struct i40e_aq_desc desc; 3795 struct i40e_aqc_add_udp_tunnel *cmd = 3796 (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw; 3797 struct i40e_aqc_del_udp_tunnel_completion *resp = 3798 (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw; 3799 int status; 3800 3801 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel); 3802 3803 cmd->udp_port = cpu_to_le16(udp_port); 3804 cmd->protocol_type = protocol_index; 3805 3806 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3807 3808 if (!status && filter_index) 3809 *filter_index = resp->index; 3810 3811 return status; 3812 } 3813 3814 /** 3815 * i40e_aq_del_udp_tunnel 3816 * @hw: pointer to the hw struct 3817 * @index: filter index 3818 * @cmd_details: pointer to command details structure or NULL 3819 **/ 3820 int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, 3821 struct i40e_asq_cmd_details *cmd_details) 3822 { 3823 struct i40e_aq_desc desc; 3824 struct i40e_aqc_remove_udp_tunnel *cmd = 3825 (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw; 3826 int status; 3827 3828 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel); 3829 3830 cmd->index = index; 3831 3832 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3833 3834 return status; 3835 } 3836 3837 /** 3838 * i40e_aq_delete_element - Delete switch element 3839 * @hw: pointer to the hw struct 3840 * @seid: the SEID to delete from the switch 3841 * @cmd_details: pointer to command details structure or NULL 3842 * 3843 * This deletes a switch element from the switch. 3844 **/ 3845 int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, 3846 struct i40e_asq_cmd_details *cmd_details) 3847 { 3848 struct i40e_aq_desc desc; 3849 struct i40e_aqc_switch_seid *cmd = 3850 (struct i40e_aqc_switch_seid *)&desc.params.raw; 3851 int status; 3852 3853 if (seid == 0) 3854 return -EINVAL; 3855 3856 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element); 3857 3858 cmd->seid = cpu_to_le16(seid); 3859 3860 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0, 3861 cmd_details, true); 3862 3863 return status; 3864 } 3865 3866 /** 3867 * i40e_aq_dcb_updated - DCB Updated Command 3868 * @hw: pointer to the hw struct 3869 * @cmd_details: pointer to command details structure or NULL 3870 * 3871 * EMP will return when the shared RPB settings have been 3872 * recomputed and modified. The retval field in the descriptor 3873 * will be set to 0 when RPB is modified. 3874 **/ 3875 int i40e_aq_dcb_updated(struct i40e_hw *hw, 3876 struct i40e_asq_cmd_details *cmd_details) 3877 { 3878 struct i40e_aq_desc desc; 3879 int status; 3880 3881 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated); 3882 3883 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3884 3885 return status; 3886 } 3887 3888 /** 3889 * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler 3890 * @hw: pointer to the hw struct 3891 * @seid: seid for the physical port/switching component/vsi 3892 * @buff: Indirect buffer to hold data parameters and response 3893 * @buff_size: Indirect buffer size 3894 * @opcode: Tx scheduler AQ command opcode 3895 * @cmd_details: pointer to command details structure or NULL 3896 * 3897 * Generic command handler for Tx scheduler AQ commands 3898 **/ 3899 static int i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid, 3900 void *buff, u16 buff_size, 3901 enum i40e_admin_queue_opc opcode, 3902 struct i40e_asq_cmd_details *cmd_details) 3903 { 3904 struct i40e_aq_desc desc; 3905 struct i40e_aqc_tx_sched_ind *cmd = 3906 (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; 3907 int status; 3908 bool cmd_param_flag = false; 3909 3910 switch (opcode) { 3911 case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit: 3912 case i40e_aqc_opc_configure_vsi_tc_bw: 3913 case i40e_aqc_opc_enable_switching_comp_ets: 3914 case i40e_aqc_opc_modify_switching_comp_ets: 3915 case i40e_aqc_opc_disable_switching_comp_ets: 3916 case i40e_aqc_opc_configure_switching_comp_ets_bw_limit: 3917 case i40e_aqc_opc_configure_switching_comp_bw_config: 3918 cmd_param_flag = true; 3919 break; 3920 case i40e_aqc_opc_query_vsi_bw_config: 3921 case i40e_aqc_opc_query_vsi_ets_sla_config: 3922 case i40e_aqc_opc_query_switching_comp_ets_config: 3923 case i40e_aqc_opc_query_port_ets_config: 3924 case i40e_aqc_opc_query_switching_comp_bw_config: 3925 cmd_param_flag = false; 3926 break; 3927 default: 3928 return -EINVAL; 3929 } 3930 3931 i40e_fill_default_direct_cmd_desc(&desc, opcode); 3932 3933 /* Indirect command */ 3934 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3935 if (cmd_param_flag) 3936 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 3937 if (buff_size > I40E_AQ_LARGE_BUF) 3938 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3939 3940 desc.datalen = cpu_to_le16(buff_size); 3941 3942 cmd->vsi_seid = cpu_to_le16(seid); 3943 3944 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3945 3946 return status; 3947 } 3948 3949 /** 3950 * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit 3951 * @hw: pointer to the hw struct 3952 * @seid: VSI seid 3953 * @credit: BW limit credits (0 = disabled) 3954 * @max_credit: Max BW limit credits 3955 * @cmd_details: pointer to command details structure or NULL 3956 **/ 3957 int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, 3958 u16 seid, u16 credit, u8 max_credit, 3959 struct i40e_asq_cmd_details *cmd_details) 3960 { 3961 struct i40e_aq_desc desc; 3962 struct i40e_aqc_configure_vsi_bw_limit *cmd = 3963 (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw; 3964 int status; 3965 3966 i40e_fill_default_direct_cmd_desc(&desc, 3967 i40e_aqc_opc_configure_vsi_bw_limit); 3968 3969 cmd->vsi_seid = cpu_to_le16(seid); 3970 cmd->credit = cpu_to_le16(credit); 3971 cmd->max_credit = max_credit; 3972 3973 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3974 3975 return status; 3976 } 3977 3978 /** 3979 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC 3980 * @hw: pointer to the hw struct 3981 * @seid: VSI seid 3982 * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits 3983 * @cmd_details: pointer to command details structure or NULL 3984 **/ 3985 int i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, 3986 u16 seid, 3987 struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, 3988 struct i40e_asq_cmd_details *cmd_details) 3989 { 3990 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 3991 i40e_aqc_opc_configure_vsi_tc_bw, 3992 cmd_details); 3993 } 3994 3995 /** 3996 * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port 3997 * @hw: pointer to the hw struct 3998 * @seid: seid of the switching component connected to Physical Port 3999 * @ets_data: Buffer holding ETS parameters 4000 * @opcode: Tx scheduler AQ command opcode 4001 * @cmd_details: pointer to command details structure or NULL 4002 **/ 4003 int 4004 i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, 4005 u16 seid, 4006 struct i40e_aqc_configure_switching_comp_ets_data *ets_data, 4007 enum i40e_admin_queue_opc opcode, 4008 struct i40e_asq_cmd_details *cmd_details) 4009 { 4010 return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data, 4011 sizeof(*ets_data), opcode, cmd_details); 4012 } 4013 4014 /** 4015 * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC 4016 * @hw: pointer to the hw struct 4017 * @seid: seid of the switching component 4018 * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits 4019 * @cmd_details: pointer to command details structure or NULL 4020 **/ 4021 int 4022 i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, 4023 u16 seid, 4024 struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data, 4025 struct i40e_asq_cmd_details *cmd_details) 4026 { 4027 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4028 i40e_aqc_opc_configure_switching_comp_bw_config, 4029 cmd_details); 4030 } 4031 4032 /** 4033 * i40e_aq_query_vsi_bw_config - Query VSI BW configuration 4034 * @hw: pointer to the hw struct 4035 * @seid: seid of the VSI 4036 * @bw_data: Buffer to hold VSI BW configuration 4037 * @cmd_details: pointer to command details structure or NULL 4038 **/ 4039 int 4040 i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, 4041 u16 seid, 4042 struct i40e_aqc_query_vsi_bw_config_resp *bw_data, 4043 struct i40e_asq_cmd_details *cmd_details) 4044 { 4045 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4046 i40e_aqc_opc_query_vsi_bw_config, 4047 cmd_details); 4048 } 4049 4050 /** 4051 * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC 4052 * @hw: pointer to the hw struct 4053 * @seid: seid of the VSI 4054 * @bw_data: Buffer to hold VSI BW configuration per TC 4055 * @cmd_details: pointer to command details structure or NULL 4056 **/ 4057 int 4058 i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, 4059 u16 seid, 4060 struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, 4061 struct i40e_asq_cmd_details *cmd_details) 4062 { 4063 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4064 i40e_aqc_opc_query_vsi_ets_sla_config, 4065 cmd_details); 4066 } 4067 4068 /** 4069 * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC 4070 * @hw: pointer to the hw struct 4071 * @seid: seid of the switching component 4072 * @bw_data: Buffer to hold switching component's per TC BW config 4073 * @cmd_details: pointer to command details structure or NULL 4074 **/ 4075 int 4076 i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, 4077 u16 seid, 4078 struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, 4079 struct i40e_asq_cmd_details *cmd_details) 4080 { 4081 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4082 i40e_aqc_opc_query_switching_comp_ets_config, 4083 cmd_details); 4084 } 4085 4086 /** 4087 * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration 4088 * @hw: pointer to the hw struct 4089 * @seid: seid of the VSI or switching component connected to Physical Port 4090 * @bw_data: Buffer to hold current ETS configuration for the Physical Port 4091 * @cmd_details: pointer to command details structure or NULL 4092 **/ 4093 int 4094 i40e_aq_query_port_ets_config(struct i40e_hw *hw, 4095 u16 seid, 4096 struct i40e_aqc_query_port_ets_config_resp *bw_data, 4097 struct i40e_asq_cmd_details *cmd_details) 4098 { 4099 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4100 i40e_aqc_opc_query_port_ets_config, 4101 cmd_details); 4102 } 4103 4104 /** 4105 * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration 4106 * @hw: pointer to the hw struct 4107 * @seid: seid of the switching component 4108 * @bw_data: Buffer to hold switching component's BW configuration 4109 * @cmd_details: pointer to command details structure or NULL 4110 **/ 4111 int 4112 i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, 4113 u16 seid, 4114 struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, 4115 struct i40e_asq_cmd_details *cmd_details) 4116 { 4117 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4118 i40e_aqc_opc_query_switching_comp_bw_config, 4119 cmd_details); 4120 } 4121 4122 /** 4123 * i40e_validate_filter_settings 4124 * @hw: pointer to the hardware structure 4125 * @settings: Filter control settings 4126 * 4127 * Check and validate the filter control settings passed. 4128 * The function checks for the valid filter/context sizes being 4129 * passed for FCoE and PE. 4130 * 4131 * Returns 0 if the values passed are valid and within 4132 * range else returns an error. 4133 **/ 4134 static int 4135 i40e_validate_filter_settings(struct i40e_hw *hw, 4136 struct i40e_filter_control_settings *settings) 4137 { 4138 u32 fcoe_cntx_size, fcoe_filt_size; 4139 u32 fcoe_fmax; 4140 u32 val; 4141 4142 /* Validate FCoE settings passed */ 4143 switch (settings->fcoe_filt_num) { 4144 case I40E_HASH_FILTER_SIZE_1K: 4145 case I40E_HASH_FILTER_SIZE_2K: 4146 case I40E_HASH_FILTER_SIZE_4K: 4147 case I40E_HASH_FILTER_SIZE_8K: 4148 case I40E_HASH_FILTER_SIZE_16K: 4149 case I40E_HASH_FILTER_SIZE_32K: 4150 fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE; 4151 fcoe_filt_size <<= (u32)settings->fcoe_filt_num; 4152 break; 4153 default: 4154 return -EINVAL; 4155 } 4156 4157 switch (settings->fcoe_cntx_num) { 4158 case I40E_DMA_CNTX_SIZE_512: 4159 case I40E_DMA_CNTX_SIZE_1K: 4160 case I40E_DMA_CNTX_SIZE_2K: 4161 case I40E_DMA_CNTX_SIZE_4K: 4162 fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; 4163 fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num; 4164 break; 4165 default: 4166 return -EINVAL; 4167 } 4168 4169 /* Validate PE settings passed */ 4170 switch (settings->pe_filt_num) { 4171 case I40E_HASH_FILTER_SIZE_1K: 4172 case I40E_HASH_FILTER_SIZE_2K: 4173 case I40E_HASH_FILTER_SIZE_4K: 4174 case I40E_HASH_FILTER_SIZE_8K: 4175 case I40E_HASH_FILTER_SIZE_16K: 4176 case I40E_HASH_FILTER_SIZE_32K: 4177 case I40E_HASH_FILTER_SIZE_64K: 4178 case I40E_HASH_FILTER_SIZE_128K: 4179 case I40E_HASH_FILTER_SIZE_256K: 4180 case I40E_HASH_FILTER_SIZE_512K: 4181 case I40E_HASH_FILTER_SIZE_1M: 4182 break; 4183 default: 4184 return -EINVAL; 4185 } 4186 4187 switch (settings->pe_cntx_num) { 4188 case I40E_DMA_CNTX_SIZE_512: 4189 case I40E_DMA_CNTX_SIZE_1K: 4190 case I40E_DMA_CNTX_SIZE_2K: 4191 case I40E_DMA_CNTX_SIZE_4K: 4192 case I40E_DMA_CNTX_SIZE_8K: 4193 case I40E_DMA_CNTX_SIZE_16K: 4194 case I40E_DMA_CNTX_SIZE_32K: 4195 case I40E_DMA_CNTX_SIZE_64K: 4196 case I40E_DMA_CNTX_SIZE_128K: 4197 case I40E_DMA_CNTX_SIZE_256K: 4198 break; 4199 default: 4200 return -EINVAL; 4201 } 4202 4203 /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */ 4204 val = rd32(hw, I40E_GLHMC_FCOEFMAX); 4205 fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK) 4206 >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT; 4207 if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax) 4208 return -EINVAL; 4209 4210 return 0; 4211 } 4212 4213 /** 4214 * i40e_set_filter_control 4215 * @hw: pointer to the hardware structure 4216 * @settings: Filter control settings 4217 * 4218 * Set the Queue Filters for PE/FCoE and enable filters required 4219 * for a single PF. It is expected that these settings are programmed 4220 * at the driver initialization time. 4221 **/ 4222 int i40e_set_filter_control(struct i40e_hw *hw, 4223 struct i40e_filter_control_settings *settings) 4224 { 4225 u32 hash_lut_size = 0; 4226 int ret = 0; 4227 u32 val; 4228 4229 if (!settings) 4230 return -EINVAL; 4231 4232 /* Validate the input settings */ 4233 ret = i40e_validate_filter_settings(hw, settings); 4234 if (ret) 4235 return ret; 4236 4237 /* Read the PF Queue Filter control register */ 4238 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0); 4239 4240 /* Program required PE hash buckets for the PF */ 4241 val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK; 4242 val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) & 4243 I40E_PFQF_CTL_0_PEHSIZE_MASK; 4244 /* Program required PE contexts for the PF */ 4245 val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK; 4246 val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) & 4247 I40E_PFQF_CTL_0_PEDSIZE_MASK; 4248 4249 /* Program required FCoE hash buckets for the PF */ 4250 val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK; 4251 val |= ((u32)settings->fcoe_filt_num << 4252 I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) & 4253 I40E_PFQF_CTL_0_PFFCHSIZE_MASK; 4254 /* Program required FCoE DDP contexts for the PF */ 4255 val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK; 4256 val |= ((u32)settings->fcoe_cntx_num << 4257 I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) & 4258 I40E_PFQF_CTL_0_PFFCDSIZE_MASK; 4259 4260 /* Program Hash LUT size for the PF */ 4261 val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; 4262 if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512) 4263 hash_lut_size = 1; 4264 val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) & 4265 I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; 4266 4267 /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */ 4268 if (settings->enable_fdir) 4269 val |= I40E_PFQF_CTL_0_FD_ENA_MASK; 4270 if (settings->enable_ethtype) 4271 val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK; 4272 if (settings->enable_macvlan) 4273 val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK; 4274 4275 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val); 4276 4277 return 0; 4278 } 4279 4280 /** 4281 * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter 4282 * @hw: pointer to the hw struct 4283 * @mac_addr: MAC address to use in the filter 4284 * @ethtype: Ethertype to use in the filter 4285 * @flags: Flags that needs to be applied to the filter 4286 * @vsi_seid: seid of the control VSI 4287 * @queue: VSI queue number to send the packet to 4288 * @is_add: Add control packet filter if True else remove 4289 * @stats: Structure to hold information on control filter counts 4290 * @cmd_details: pointer to command details structure or NULL 4291 * 4292 * This command will Add or Remove control packet filter for a control VSI. 4293 * In return it will update the total number of perfect filter count in 4294 * the stats member. 4295 **/ 4296 int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, 4297 u8 *mac_addr, u16 ethtype, u16 flags, 4298 u16 vsi_seid, u16 queue, bool is_add, 4299 struct i40e_control_filter_stats *stats, 4300 struct i40e_asq_cmd_details *cmd_details) 4301 { 4302 struct i40e_aq_desc desc; 4303 struct i40e_aqc_add_remove_control_packet_filter *cmd = 4304 (struct i40e_aqc_add_remove_control_packet_filter *) 4305 &desc.params.raw; 4306 struct i40e_aqc_add_remove_control_packet_filter_completion *resp = 4307 (struct i40e_aqc_add_remove_control_packet_filter_completion *) 4308 &desc.params.raw; 4309 int status; 4310 4311 if (vsi_seid == 0) 4312 return -EINVAL; 4313 4314 if (is_add) { 4315 i40e_fill_default_direct_cmd_desc(&desc, 4316 i40e_aqc_opc_add_control_packet_filter); 4317 cmd->queue = cpu_to_le16(queue); 4318 } else { 4319 i40e_fill_default_direct_cmd_desc(&desc, 4320 i40e_aqc_opc_remove_control_packet_filter); 4321 } 4322 4323 if (mac_addr) 4324 ether_addr_copy(cmd->mac, mac_addr); 4325 4326 cmd->etype = cpu_to_le16(ethtype); 4327 cmd->flags = cpu_to_le16(flags); 4328 cmd->seid = cpu_to_le16(vsi_seid); 4329 4330 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4331 4332 if (!status && stats) { 4333 stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used); 4334 stats->etype_used = le16_to_cpu(resp->etype_used); 4335 stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free); 4336 stats->etype_free = le16_to_cpu(resp->etype_free); 4337 } 4338 4339 return status; 4340 } 4341 4342 /** 4343 * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control 4344 * @hw: pointer to the hw struct 4345 * @seid: VSI seid to add ethertype filter from 4346 **/ 4347 void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, 4348 u16 seid) 4349 { 4350 #define I40E_FLOW_CONTROL_ETHTYPE 0x8808 4351 u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC | 4352 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP | 4353 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX; 4354 u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE; 4355 int status; 4356 4357 status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag, 4358 seid, 0, true, NULL, 4359 NULL); 4360 if (status) 4361 hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n"); 4362 } 4363 4364 /** 4365 * i40e_aq_alternate_read 4366 * @hw: pointer to the hardware structure 4367 * @reg_addr0: address of first dword to be read 4368 * @reg_val0: pointer for data read from 'reg_addr0' 4369 * @reg_addr1: address of second dword to be read 4370 * @reg_val1: pointer for data read from 'reg_addr1' 4371 * 4372 * Read one or two dwords from alternate structure. Fields are indicated 4373 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer 4374 * is not passed then only register at 'reg_addr0' is read. 4375 * 4376 **/ 4377 static int i40e_aq_alternate_read(struct i40e_hw *hw, 4378 u32 reg_addr0, u32 *reg_val0, 4379 u32 reg_addr1, u32 *reg_val1) 4380 { 4381 struct i40e_aq_desc desc; 4382 struct i40e_aqc_alternate_write *cmd_resp = 4383 (struct i40e_aqc_alternate_write *)&desc.params.raw; 4384 int status; 4385 4386 if (!reg_val0) 4387 return -EINVAL; 4388 4389 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read); 4390 cmd_resp->address0 = cpu_to_le32(reg_addr0); 4391 cmd_resp->address1 = cpu_to_le32(reg_addr1); 4392 4393 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 4394 4395 if (!status) { 4396 *reg_val0 = le32_to_cpu(cmd_resp->data0); 4397 4398 if (reg_val1) 4399 *reg_val1 = le32_to_cpu(cmd_resp->data1); 4400 } 4401 4402 return status; 4403 } 4404 4405 /** 4406 * i40e_aq_suspend_port_tx 4407 * @hw: pointer to the hardware structure 4408 * @seid: port seid 4409 * @cmd_details: pointer to command details structure or NULL 4410 * 4411 * Suspend port's Tx traffic 4412 **/ 4413 int i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid, 4414 struct i40e_asq_cmd_details *cmd_details) 4415 { 4416 struct i40e_aqc_tx_sched_ind *cmd; 4417 struct i40e_aq_desc desc; 4418 int status; 4419 4420 cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; 4421 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_suspend_port_tx); 4422 cmd->vsi_seid = cpu_to_le16(seid); 4423 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4424 4425 return status; 4426 } 4427 4428 /** 4429 * i40e_aq_resume_port_tx 4430 * @hw: pointer to the hardware structure 4431 * @cmd_details: pointer to command details structure or NULL 4432 * 4433 * Resume port's Tx traffic 4434 **/ 4435 int i40e_aq_resume_port_tx(struct i40e_hw *hw, 4436 struct i40e_asq_cmd_details *cmd_details) 4437 { 4438 struct i40e_aq_desc desc; 4439 int status; 4440 4441 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx); 4442 4443 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4444 4445 return status; 4446 } 4447 4448 /** 4449 * i40e_set_pci_config_data - store PCI bus info 4450 * @hw: pointer to hardware structure 4451 * @link_status: the link status word from PCI config space 4452 * 4453 * Stores the PCI bus info (speed, width, type) within the i40e_hw structure 4454 **/ 4455 void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) 4456 { 4457 hw->bus.type = i40e_bus_type_pci_express; 4458 4459 switch (link_status & PCI_EXP_LNKSTA_NLW) { 4460 case PCI_EXP_LNKSTA_NLW_X1: 4461 hw->bus.width = i40e_bus_width_pcie_x1; 4462 break; 4463 case PCI_EXP_LNKSTA_NLW_X2: 4464 hw->bus.width = i40e_bus_width_pcie_x2; 4465 break; 4466 case PCI_EXP_LNKSTA_NLW_X4: 4467 hw->bus.width = i40e_bus_width_pcie_x4; 4468 break; 4469 case PCI_EXP_LNKSTA_NLW_X8: 4470 hw->bus.width = i40e_bus_width_pcie_x8; 4471 break; 4472 default: 4473 hw->bus.width = i40e_bus_width_unknown; 4474 break; 4475 } 4476 4477 switch (link_status & PCI_EXP_LNKSTA_CLS) { 4478 case PCI_EXP_LNKSTA_CLS_2_5GB: 4479 hw->bus.speed = i40e_bus_speed_2500; 4480 break; 4481 case PCI_EXP_LNKSTA_CLS_5_0GB: 4482 hw->bus.speed = i40e_bus_speed_5000; 4483 break; 4484 case PCI_EXP_LNKSTA_CLS_8_0GB: 4485 hw->bus.speed = i40e_bus_speed_8000; 4486 break; 4487 default: 4488 hw->bus.speed = i40e_bus_speed_unknown; 4489 break; 4490 } 4491 } 4492 4493 /** 4494 * i40e_aq_debug_dump 4495 * @hw: pointer to the hardware structure 4496 * @cluster_id: specific cluster to dump 4497 * @table_id: table id within cluster 4498 * @start_index: index of line in the block to read 4499 * @buff_size: dump buffer size 4500 * @buff: dump buffer 4501 * @ret_buff_size: actual buffer size returned 4502 * @ret_next_table: next block to read 4503 * @ret_next_index: next index to read 4504 * @cmd_details: pointer to command details structure or NULL 4505 * 4506 * Dump internal FW/HW data for debug purposes. 4507 * 4508 **/ 4509 int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, 4510 u8 table_id, u32 start_index, u16 buff_size, 4511 void *buff, u16 *ret_buff_size, 4512 u8 *ret_next_table, u32 *ret_next_index, 4513 struct i40e_asq_cmd_details *cmd_details) 4514 { 4515 struct i40e_aq_desc desc; 4516 struct i40e_aqc_debug_dump_internals *cmd = 4517 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; 4518 struct i40e_aqc_debug_dump_internals *resp = 4519 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; 4520 int status; 4521 4522 if (buff_size == 0 || !buff) 4523 return -EINVAL; 4524 4525 i40e_fill_default_direct_cmd_desc(&desc, 4526 i40e_aqc_opc_debug_dump_internals); 4527 /* Indirect Command */ 4528 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 4529 if (buff_size > I40E_AQ_LARGE_BUF) 4530 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 4531 4532 cmd->cluster_id = cluster_id; 4533 cmd->table_id = table_id; 4534 cmd->idx = cpu_to_le32(start_index); 4535 4536 desc.datalen = cpu_to_le16(buff_size); 4537 4538 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 4539 if (!status) { 4540 if (ret_buff_size) 4541 *ret_buff_size = le16_to_cpu(desc.datalen); 4542 if (ret_next_table) 4543 *ret_next_table = resp->table_id; 4544 if (ret_next_index) 4545 *ret_next_index = le32_to_cpu(resp->idx); 4546 } 4547 4548 return status; 4549 } 4550 4551 /** 4552 * i40e_read_bw_from_alt_ram 4553 * @hw: pointer to the hardware structure 4554 * @max_bw: pointer for max_bw read 4555 * @min_bw: pointer for min_bw read 4556 * @min_valid: pointer for bool that is true if min_bw is a valid value 4557 * @max_valid: pointer for bool that is true if max_bw is a valid value 4558 * 4559 * Read bw from the alternate ram for the given pf 4560 **/ 4561 int i40e_read_bw_from_alt_ram(struct i40e_hw *hw, 4562 u32 *max_bw, u32 *min_bw, 4563 bool *min_valid, bool *max_valid) 4564 { 4565 u32 max_bw_addr, min_bw_addr; 4566 int status; 4567 4568 /* Calculate the address of the min/max bw registers */ 4569 max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + 4570 I40E_ALT_STRUCT_MAX_BW_OFFSET + 4571 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); 4572 min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + 4573 I40E_ALT_STRUCT_MIN_BW_OFFSET + 4574 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); 4575 4576 /* Read the bandwidths from alt ram */ 4577 status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw, 4578 min_bw_addr, min_bw); 4579 4580 if (*min_bw & I40E_ALT_BW_VALID_MASK) 4581 *min_valid = true; 4582 else 4583 *min_valid = false; 4584 4585 if (*max_bw & I40E_ALT_BW_VALID_MASK) 4586 *max_valid = true; 4587 else 4588 *max_valid = false; 4589 4590 return status; 4591 } 4592 4593 /** 4594 * i40e_aq_configure_partition_bw 4595 * @hw: pointer to the hardware structure 4596 * @bw_data: Buffer holding valid pfs and bw limits 4597 * @cmd_details: pointer to command details 4598 * 4599 * Configure partitions guaranteed/max bw 4600 **/ 4601 int 4602 i40e_aq_configure_partition_bw(struct i40e_hw *hw, 4603 struct i40e_aqc_configure_partition_bw_data *bw_data, 4604 struct i40e_asq_cmd_details *cmd_details) 4605 { 4606 u16 bwd_size = sizeof(*bw_data); 4607 struct i40e_aq_desc desc; 4608 int status; 4609 4610 i40e_fill_default_direct_cmd_desc(&desc, 4611 i40e_aqc_opc_configure_partition_bw); 4612 4613 /* Indirect command */ 4614 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 4615 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 4616 4617 if (bwd_size > I40E_AQ_LARGE_BUF) 4618 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 4619 4620 desc.datalen = cpu_to_le16(bwd_size); 4621 4622 status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, 4623 cmd_details); 4624 4625 return status; 4626 } 4627 4628 /** 4629 * i40e_read_phy_register_clause22 4630 * @hw: pointer to the HW structure 4631 * @reg: register address in the page 4632 * @phy_addr: PHY address on MDIO interface 4633 * @value: PHY register value 4634 * 4635 * Reads specified PHY register value 4636 **/ 4637 int i40e_read_phy_register_clause22(struct i40e_hw *hw, 4638 u16 reg, u8 phy_addr, u16 *value) 4639 { 4640 u8 port_num = (u8)hw->func_caps.mdio_port_num; 4641 int status = -EIO; 4642 u32 command = 0; 4643 u16 retry = 1000; 4644 4645 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4646 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4647 (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) | 4648 (I40E_MDIO_CLAUSE22_STCODE_MASK) | 4649 (I40E_GLGEN_MSCA_MDICMD_MASK); 4650 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4651 do { 4652 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4653 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4654 status = 0; 4655 break; 4656 } 4657 udelay(10); 4658 retry--; 4659 } while (retry); 4660 4661 if (status) { 4662 i40e_debug(hw, I40E_DEBUG_PHY, 4663 "PHY: Can't write command to external PHY.\n"); 4664 } else { 4665 command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); 4666 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> 4667 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; 4668 } 4669 4670 return status; 4671 } 4672 4673 /** 4674 * i40e_write_phy_register_clause22 4675 * @hw: pointer to the HW structure 4676 * @reg: register address in the page 4677 * @phy_addr: PHY address on MDIO interface 4678 * @value: PHY register value 4679 * 4680 * Writes specified PHY register value 4681 **/ 4682 int i40e_write_phy_register_clause22(struct i40e_hw *hw, 4683 u16 reg, u8 phy_addr, u16 value) 4684 { 4685 u8 port_num = (u8)hw->func_caps.mdio_port_num; 4686 int status = -EIO; 4687 u32 command = 0; 4688 u16 retry = 1000; 4689 4690 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; 4691 wr32(hw, I40E_GLGEN_MSRWD(port_num), command); 4692 4693 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4694 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4695 (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) | 4696 (I40E_MDIO_CLAUSE22_STCODE_MASK) | 4697 (I40E_GLGEN_MSCA_MDICMD_MASK); 4698 4699 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4700 do { 4701 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4702 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4703 status = 0; 4704 break; 4705 } 4706 udelay(10); 4707 retry--; 4708 } while (retry); 4709 4710 return status; 4711 } 4712 4713 /** 4714 * i40e_read_phy_register_clause45 4715 * @hw: pointer to the HW structure 4716 * @page: registers page number 4717 * @reg: register address in the page 4718 * @phy_addr: PHY address on MDIO interface 4719 * @value: PHY register value 4720 * 4721 * Reads specified PHY register value 4722 **/ 4723 int i40e_read_phy_register_clause45(struct i40e_hw *hw, 4724 u8 page, u16 reg, u8 phy_addr, u16 *value) 4725 { 4726 u8 port_num = hw->func_caps.mdio_port_num; 4727 int status = -EIO; 4728 u32 command = 0; 4729 u16 retry = 1000; 4730 4731 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | 4732 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4733 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4734 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | 4735 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4736 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4737 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4738 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4739 do { 4740 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4741 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4742 status = 0; 4743 break; 4744 } 4745 usleep_range(10, 20); 4746 retry--; 4747 } while (retry); 4748 4749 if (status) { 4750 i40e_debug(hw, I40E_DEBUG_PHY, 4751 "PHY: Can't write command to external PHY.\n"); 4752 goto phy_read_end; 4753 } 4754 4755 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4756 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4757 (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) | 4758 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4759 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4760 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4761 status = -EIO; 4762 retry = 1000; 4763 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4764 do { 4765 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4766 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4767 status = 0; 4768 break; 4769 } 4770 usleep_range(10, 20); 4771 retry--; 4772 } while (retry); 4773 4774 if (!status) { 4775 command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); 4776 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> 4777 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; 4778 } else { 4779 i40e_debug(hw, I40E_DEBUG_PHY, 4780 "PHY: Can't read register value from external PHY.\n"); 4781 } 4782 4783 phy_read_end: 4784 return status; 4785 } 4786 4787 /** 4788 * i40e_write_phy_register_clause45 4789 * @hw: pointer to the HW structure 4790 * @page: registers page number 4791 * @reg: register address in the page 4792 * @phy_addr: PHY address on MDIO interface 4793 * @value: PHY register value 4794 * 4795 * Writes value to specified PHY register 4796 **/ 4797 int i40e_write_phy_register_clause45(struct i40e_hw *hw, 4798 u8 page, u16 reg, u8 phy_addr, u16 value) 4799 { 4800 u8 port_num = hw->func_caps.mdio_port_num; 4801 int status = -EIO; 4802 u16 retry = 1000; 4803 u32 command = 0; 4804 4805 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | 4806 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4807 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4808 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | 4809 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4810 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4811 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4812 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4813 do { 4814 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4815 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4816 status = 0; 4817 break; 4818 } 4819 usleep_range(10, 20); 4820 retry--; 4821 } while (retry); 4822 if (status) { 4823 i40e_debug(hw, I40E_DEBUG_PHY, 4824 "PHY: Can't write command to external PHY.\n"); 4825 goto phy_write_end; 4826 } 4827 4828 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; 4829 wr32(hw, I40E_GLGEN_MSRWD(port_num), command); 4830 4831 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4832 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4833 (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) | 4834 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4835 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4836 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4837 status = -EIO; 4838 retry = 1000; 4839 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4840 do { 4841 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4842 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4843 status = 0; 4844 break; 4845 } 4846 usleep_range(10, 20); 4847 retry--; 4848 } while (retry); 4849 4850 phy_write_end: 4851 return status; 4852 } 4853 4854 /** 4855 * i40e_write_phy_register 4856 * @hw: pointer to the HW structure 4857 * @page: registers page number 4858 * @reg: register address in the page 4859 * @phy_addr: PHY address on MDIO interface 4860 * @value: PHY register value 4861 * 4862 * Writes value to specified PHY register 4863 **/ 4864 int i40e_write_phy_register(struct i40e_hw *hw, 4865 u8 page, u16 reg, u8 phy_addr, u16 value) 4866 { 4867 int status; 4868 4869 switch (hw->device_id) { 4870 case I40E_DEV_ID_1G_BASE_T_X722: 4871 status = i40e_write_phy_register_clause22(hw, reg, phy_addr, 4872 value); 4873 break; 4874 case I40E_DEV_ID_1G_BASE_T_BC: 4875 case I40E_DEV_ID_5G_BASE_T_BC: 4876 case I40E_DEV_ID_10G_BASE_T: 4877 case I40E_DEV_ID_10G_BASE_T4: 4878 case I40E_DEV_ID_10G_BASE_T_BC: 4879 case I40E_DEV_ID_10G_BASE_T_X722: 4880 case I40E_DEV_ID_25G_B: 4881 case I40E_DEV_ID_25G_SFP28: 4882 status = i40e_write_phy_register_clause45(hw, page, reg, 4883 phy_addr, value); 4884 break; 4885 default: 4886 status = -EIO; 4887 break; 4888 } 4889 4890 return status; 4891 } 4892 4893 /** 4894 * i40e_read_phy_register 4895 * @hw: pointer to the HW structure 4896 * @page: registers page number 4897 * @reg: register address in the page 4898 * @phy_addr: PHY address on MDIO interface 4899 * @value: PHY register value 4900 * 4901 * Reads specified PHY register value 4902 **/ 4903 int i40e_read_phy_register(struct i40e_hw *hw, 4904 u8 page, u16 reg, u8 phy_addr, u16 *value) 4905 { 4906 int status; 4907 4908 switch (hw->device_id) { 4909 case I40E_DEV_ID_1G_BASE_T_X722: 4910 status = i40e_read_phy_register_clause22(hw, reg, phy_addr, 4911 value); 4912 break; 4913 case I40E_DEV_ID_1G_BASE_T_BC: 4914 case I40E_DEV_ID_5G_BASE_T_BC: 4915 case I40E_DEV_ID_10G_BASE_T: 4916 case I40E_DEV_ID_10G_BASE_T4: 4917 case I40E_DEV_ID_10G_BASE_T_BC: 4918 case I40E_DEV_ID_10G_BASE_T_X722: 4919 case I40E_DEV_ID_25G_B: 4920 case I40E_DEV_ID_25G_SFP28: 4921 status = i40e_read_phy_register_clause45(hw, page, reg, 4922 phy_addr, value); 4923 break; 4924 default: 4925 status = -EIO; 4926 break; 4927 } 4928 4929 return status; 4930 } 4931 4932 /** 4933 * i40e_get_phy_address 4934 * @hw: pointer to the HW structure 4935 * @dev_num: PHY port num that address we want 4936 * 4937 * Gets PHY address for current port 4938 **/ 4939 u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num) 4940 { 4941 u8 port_num = hw->func_caps.mdio_port_num; 4942 u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num)); 4943 4944 return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f; 4945 } 4946 4947 /** 4948 * i40e_blink_phy_link_led 4949 * @hw: pointer to the HW structure 4950 * @time: time how long led will blinks in secs 4951 * @interval: gap between LED on and off in msecs 4952 * 4953 * Blinks PHY link LED 4954 **/ 4955 int i40e_blink_phy_link_led(struct i40e_hw *hw, 4956 u32 time, u32 interval) 4957 { 4958 u16 led_addr = I40E_PHY_LED_PROV_REG_1; 4959 u16 gpio_led_port; 4960 u8 phy_addr = 0; 4961 int status = 0; 4962 u16 led_ctl; 4963 u8 port_num; 4964 u16 led_reg; 4965 u32 i; 4966 4967 i = rd32(hw, I40E_PFGEN_PORTNUM); 4968 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 4969 phy_addr = i40e_get_phy_address(hw, port_num); 4970 4971 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, 4972 led_addr++) { 4973 status = i40e_read_phy_register_clause45(hw, 4974 I40E_PHY_COM_REG_PAGE, 4975 led_addr, phy_addr, 4976 &led_reg); 4977 if (status) 4978 goto phy_blinking_end; 4979 led_ctl = led_reg; 4980 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { 4981 led_reg = 0; 4982 status = i40e_write_phy_register_clause45(hw, 4983 I40E_PHY_COM_REG_PAGE, 4984 led_addr, phy_addr, 4985 led_reg); 4986 if (status) 4987 goto phy_blinking_end; 4988 break; 4989 } 4990 } 4991 4992 if (time > 0 && interval > 0) { 4993 for (i = 0; i < time * 1000; i += interval) { 4994 status = i40e_read_phy_register_clause45(hw, 4995 I40E_PHY_COM_REG_PAGE, 4996 led_addr, phy_addr, &led_reg); 4997 if (status) 4998 goto restore_config; 4999 if (led_reg & I40E_PHY_LED_MANUAL_ON) 5000 led_reg = 0; 5001 else 5002 led_reg = I40E_PHY_LED_MANUAL_ON; 5003 status = i40e_write_phy_register_clause45(hw, 5004 I40E_PHY_COM_REG_PAGE, 5005 led_addr, phy_addr, led_reg); 5006 if (status) 5007 goto restore_config; 5008 msleep(interval); 5009 } 5010 } 5011 5012 restore_config: 5013 status = i40e_write_phy_register_clause45(hw, 5014 I40E_PHY_COM_REG_PAGE, 5015 led_addr, phy_addr, led_ctl); 5016 5017 phy_blinking_end: 5018 return status; 5019 } 5020 5021 /** 5022 * i40e_led_get_reg - read LED register 5023 * @hw: pointer to the HW structure 5024 * @led_addr: LED register address 5025 * @reg_val: read register value 5026 **/ 5027 static int i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr, 5028 u32 *reg_val) 5029 { 5030 u8 phy_addr = 0; 5031 u8 port_num; 5032 int status; 5033 u32 i; 5034 5035 *reg_val = 0; 5036 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5037 status = 5038 i40e_aq_get_phy_register(hw, 5039 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5040 I40E_PHY_COM_REG_PAGE, true, 5041 I40E_PHY_LED_PROV_REG_1, 5042 reg_val, NULL); 5043 } else { 5044 i = rd32(hw, I40E_PFGEN_PORTNUM); 5045 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5046 phy_addr = i40e_get_phy_address(hw, port_num); 5047 status = i40e_read_phy_register_clause45(hw, 5048 I40E_PHY_COM_REG_PAGE, 5049 led_addr, phy_addr, 5050 (u16 *)reg_val); 5051 } 5052 return status; 5053 } 5054 5055 /** 5056 * i40e_led_set_reg - write LED register 5057 * @hw: pointer to the HW structure 5058 * @led_addr: LED register address 5059 * @reg_val: register value to write 5060 **/ 5061 static int i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr, 5062 u32 reg_val) 5063 { 5064 u8 phy_addr = 0; 5065 u8 port_num; 5066 int status; 5067 u32 i; 5068 5069 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5070 status = 5071 i40e_aq_set_phy_register(hw, 5072 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5073 I40E_PHY_COM_REG_PAGE, true, 5074 I40E_PHY_LED_PROV_REG_1, 5075 reg_val, NULL); 5076 } else { 5077 i = rd32(hw, I40E_PFGEN_PORTNUM); 5078 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5079 phy_addr = i40e_get_phy_address(hw, port_num); 5080 status = i40e_write_phy_register_clause45(hw, 5081 I40E_PHY_COM_REG_PAGE, 5082 led_addr, phy_addr, 5083 (u16)reg_val); 5084 } 5085 5086 return status; 5087 } 5088 5089 /** 5090 * i40e_led_get_phy - return current on/off mode 5091 * @hw: pointer to the hw struct 5092 * @led_addr: address of led register to use 5093 * @val: original value of register to use 5094 * 5095 **/ 5096 int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, 5097 u16 *val) 5098 { 5099 u16 gpio_led_port; 5100 u8 phy_addr = 0; 5101 u32 reg_val_aq; 5102 int status = 0; 5103 u16 temp_addr; 5104 u16 reg_val; 5105 u8 port_num; 5106 u32 i; 5107 5108 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5109 status = 5110 i40e_aq_get_phy_register(hw, 5111 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5112 I40E_PHY_COM_REG_PAGE, true, 5113 I40E_PHY_LED_PROV_REG_1, 5114 ®_val_aq, NULL); 5115 if (status == 0) 5116 *val = (u16)reg_val_aq; 5117 return status; 5118 } 5119 temp_addr = I40E_PHY_LED_PROV_REG_1; 5120 i = rd32(hw, I40E_PFGEN_PORTNUM); 5121 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5122 phy_addr = i40e_get_phy_address(hw, port_num); 5123 5124 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, 5125 temp_addr++) { 5126 status = i40e_read_phy_register_clause45(hw, 5127 I40E_PHY_COM_REG_PAGE, 5128 temp_addr, phy_addr, 5129 ®_val); 5130 if (status) 5131 return status; 5132 *val = reg_val; 5133 if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) { 5134 *led_addr = temp_addr; 5135 break; 5136 } 5137 } 5138 return status; 5139 } 5140 5141 /** 5142 * i40e_led_set_phy 5143 * @hw: pointer to the HW structure 5144 * @on: true or false 5145 * @led_addr: address of led register to use 5146 * @mode: original val plus bit for set or ignore 5147 * 5148 * Set led's on or off when controlled by the PHY 5149 * 5150 **/ 5151 int i40e_led_set_phy(struct i40e_hw *hw, bool on, 5152 u16 led_addr, u32 mode) 5153 { 5154 u32 led_ctl = 0; 5155 u32 led_reg = 0; 5156 int status = 0; 5157 5158 status = i40e_led_get_reg(hw, led_addr, &led_reg); 5159 if (status) 5160 return status; 5161 led_ctl = led_reg; 5162 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { 5163 led_reg = 0; 5164 status = i40e_led_set_reg(hw, led_addr, led_reg); 5165 if (status) 5166 return status; 5167 } 5168 status = i40e_led_get_reg(hw, led_addr, &led_reg); 5169 if (status) 5170 goto restore_config; 5171 if (on) 5172 led_reg = I40E_PHY_LED_MANUAL_ON; 5173 else 5174 led_reg = 0; 5175 5176 status = i40e_led_set_reg(hw, led_addr, led_reg); 5177 if (status) 5178 goto restore_config; 5179 if (mode & I40E_PHY_LED_MODE_ORIG) { 5180 led_ctl = (mode & I40E_PHY_LED_MODE_MASK); 5181 status = i40e_led_set_reg(hw, led_addr, led_ctl); 5182 } 5183 return status; 5184 5185 restore_config: 5186 status = i40e_led_set_reg(hw, led_addr, led_ctl); 5187 return status; 5188 } 5189 5190 /** 5191 * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register 5192 * @hw: pointer to the hw struct 5193 * @reg_addr: register address 5194 * @reg_val: ptr to register value 5195 * @cmd_details: pointer to command details structure or NULL 5196 * 5197 * Use the firmware to read the Rx control register, 5198 * especially useful if the Rx unit is under heavy pressure 5199 **/ 5200 int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, 5201 u32 reg_addr, u32 *reg_val, 5202 struct i40e_asq_cmd_details *cmd_details) 5203 { 5204 struct i40e_aq_desc desc; 5205 struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp = 5206 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; 5207 int status; 5208 5209 if (!reg_val) 5210 return -EINVAL; 5211 5212 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read); 5213 5214 cmd_resp->address = cpu_to_le32(reg_addr); 5215 5216 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5217 5218 if (status == 0) 5219 *reg_val = le32_to_cpu(cmd_resp->value); 5220 5221 return status; 5222 } 5223 5224 /** 5225 * i40e_read_rx_ctl - read from an Rx control register 5226 * @hw: pointer to the hw struct 5227 * @reg_addr: register address 5228 **/ 5229 u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) 5230 { 5231 bool use_register; 5232 int status = 0; 5233 int retry = 5; 5234 u32 val = 0; 5235 5236 use_register = (((hw->aq.api_maj_ver == 1) && 5237 (hw->aq.api_min_ver < 5)) || 5238 (hw->mac.type == I40E_MAC_X722)); 5239 if (!use_register) { 5240 do_retry: 5241 status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL); 5242 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { 5243 usleep_range(1000, 2000); 5244 retry--; 5245 goto do_retry; 5246 } 5247 } 5248 5249 /* if the AQ access failed, try the old-fashioned way */ 5250 if (status || use_register) 5251 val = rd32(hw, reg_addr); 5252 5253 return val; 5254 } 5255 5256 /** 5257 * i40e_aq_rx_ctl_write_register 5258 * @hw: pointer to the hw struct 5259 * @reg_addr: register address 5260 * @reg_val: register value 5261 * @cmd_details: pointer to command details structure or NULL 5262 * 5263 * Use the firmware to write to an Rx control register, 5264 * especially useful if the Rx unit is under heavy pressure 5265 **/ 5266 int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, 5267 u32 reg_addr, u32 reg_val, 5268 struct i40e_asq_cmd_details *cmd_details) 5269 { 5270 struct i40e_aq_desc desc; 5271 struct i40e_aqc_rx_ctl_reg_read_write *cmd = 5272 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; 5273 int status; 5274 5275 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write); 5276 5277 cmd->address = cpu_to_le32(reg_addr); 5278 cmd->value = cpu_to_le32(reg_val); 5279 5280 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5281 5282 return status; 5283 } 5284 5285 /** 5286 * i40e_write_rx_ctl - write to an Rx control register 5287 * @hw: pointer to the hw struct 5288 * @reg_addr: register address 5289 * @reg_val: register value 5290 **/ 5291 void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) 5292 { 5293 bool use_register; 5294 int status = 0; 5295 int retry = 5; 5296 5297 use_register = (((hw->aq.api_maj_ver == 1) && 5298 (hw->aq.api_min_ver < 5)) || 5299 (hw->mac.type == I40E_MAC_X722)); 5300 if (!use_register) { 5301 do_retry: 5302 status = i40e_aq_rx_ctl_write_register(hw, reg_addr, 5303 reg_val, NULL); 5304 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { 5305 usleep_range(1000, 2000); 5306 retry--; 5307 goto do_retry; 5308 } 5309 } 5310 5311 /* if the AQ access failed, try the old-fashioned way */ 5312 if (status || use_register) 5313 wr32(hw, reg_addr, reg_val); 5314 } 5315 5316 /** 5317 * i40e_mdio_if_number_selection - MDIO I/F number selection 5318 * @hw: pointer to the hw struct 5319 * @set_mdio: use MDIO I/F number specified by mdio_num 5320 * @mdio_num: MDIO I/F number 5321 * @cmd: pointer to PHY Register command structure 5322 **/ 5323 static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio, 5324 u8 mdio_num, 5325 struct i40e_aqc_phy_register_access *cmd) 5326 { 5327 if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) { 5328 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED) 5329 cmd->cmd_flags |= 5330 I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER | 5331 ((mdio_num << 5332 I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) & 5333 I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK); 5334 else 5335 i40e_debug(hw, I40E_DEBUG_PHY, 5336 "MDIO I/F number selection not supported by current FW version.\n"); 5337 } 5338 } 5339 5340 /** 5341 * i40e_aq_set_phy_register_ext 5342 * @hw: pointer to the hw struct 5343 * @phy_select: select which phy should be accessed 5344 * @dev_addr: PHY device address 5345 * @page_change: flag to indicate if phy page should be updated 5346 * @set_mdio: use MDIO I/F number specified by mdio_num 5347 * @mdio_num: MDIO I/F number 5348 * @reg_addr: PHY register address 5349 * @reg_val: new register value 5350 * @cmd_details: pointer to command details structure or NULL 5351 * 5352 * Write the external PHY register. 5353 * NOTE: In common cases MDIO I/F number should not be changed, thats why you 5354 * may use simple wrapper i40e_aq_set_phy_register. 5355 **/ 5356 int i40e_aq_set_phy_register_ext(struct i40e_hw *hw, 5357 u8 phy_select, u8 dev_addr, bool page_change, 5358 bool set_mdio, u8 mdio_num, 5359 u32 reg_addr, u32 reg_val, 5360 struct i40e_asq_cmd_details *cmd_details) 5361 { 5362 struct i40e_aq_desc desc; 5363 struct i40e_aqc_phy_register_access *cmd = 5364 (struct i40e_aqc_phy_register_access *)&desc.params.raw; 5365 int status; 5366 5367 i40e_fill_default_direct_cmd_desc(&desc, 5368 i40e_aqc_opc_set_phy_register); 5369 5370 cmd->phy_interface = phy_select; 5371 cmd->dev_address = dev_addr; 5372 cmd->reg_address = cpu_to_le32(reg_addr); 5373 cmd->reg_value = cpu_to_le32(reg_val); 5374 5375 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd); 5376 5377 if (!page_change) 5378 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE; 5379 5380 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5381 5382 return status; 5383 } 5384 5385 /** 5386 * i40e_aq_get_phy_register_ext 5387 * @hw: pointer to the hw struct 5388 * @phy_select: select which phy should be accessed 5389 * @dev_addr: PHY device address 5390 * @page_change: flag to indicate if phy page should be updated 5391 * @set_mdio: use MDIO I/F number specified by mdio_num 5392 * @mdio_num: MDIO I/F number 5393 * @reg_addr: PHY register address 5394 * @reg_val: read register value 5395 * @cmd_details: pointer to command details structure or NULL 5396 * 5397 * Read the external PHY register. 5398 * NOTE: In common cases MDIO I/F number should not be changed, thats why you 5399 * may use simple wrapper i40e_aq_get_phy_register. 5400 **/ 5401 int i40e_aq_get_phy_register_ext(struct i40e_hw *hw, 5402 u8 phy_select, u8 dev_addr, bool page_change, 5403 bool set_mdio, u8 mdio_num, 5404 u32 reg_addr, u32 *reg_val, 5405 struct i40e_asq_cmd_details *cmd_details) 5406 { 5407 struct i40e_aq_desc desc; 5408 struct i40e_aqc_phy_register_access *cmd = 5409 (struct i40e_aqc_phy_register_access *)&desc.params.raw; 5410 int status; 5411 5412 i40e_fill_default_direct_cmd_desc(&desc, 5413 i40e_aqc_opc_get_phy_register); 5414 5415 cmd->phy_interface = phy_select; 5416 cmd->dev_address = dev_addr; 5417 cmd->reg_address = cpu_to_le32(reg_addr); 5418 5419 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd); 5420 5421 if (!page_change) 5422 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE; 5423 5424 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5425 if (!status) 5426 *reg_val = le32_to_cpu(cmd->reg_value); 5427 5428 return status; 5429 } 5430 5431 /** 5432 * i40e_aq_write_ddp - Write dynamic device personalization (ddp) 5433 * @hw: pointer to the hw struct 5434 * @buff: command buffer (size in bytes = buff_size) 5435 * @buff_size: buffer size in bytes 5436 * @track_id: package tracking id 5437 * @error_offset: returns error offset 5438 * @error_info: returns error information 5439 * @cmd_details: pointer to command details structure or NULL 5440 **/ 5441 int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, 5442 u16 buff_size, u32 track_id, 5443 u32 *error_offset, u32 *error_info, 5444 struct i40e_asq_cmd_details *cmd_details) 5445 { 5446 struct i40e_aq_desc desc; 5447 struct i40e_aqc_write_personalization_profile *cmd = 5448 (struct i40e_aqc_write_personalization_profile *) 5449 &desc.params.raw; 5450 struct i40e_aqc_write_ddp_resp *resp; 5451 int status; 5452 5453 i40e_fill_default_direct_cmd_desc(&desc, 5454 i40e_aqc_opc_write_personalization_profile); 5455 5456 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); 5457 if (buff_size > I40E_AQ_LARGE_BUF) 5458 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5459 5460 desc.datalen = cpu_to_le16(buff_size); 5461 5462 cmd->profile_track_id = cpu_to_le32(track_id); 5463 5464 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 5465 if (!status) { 5466 resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw; 5467 if (error_offset) 5468 *error_offset = le32_to_cpu(resp->error_offset); 5469 if (error_info) 5470 *error_info = le32_to_cpu(resp->error_info); 5471 } 5472 5473 return status; 5474 } 5475 5476 /** 5477 * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp) 5478 * @hw: pointer to the hw struct 5479 * @buff: command buffer (size in bytes = buff_size) 5480 * @buff_size: buffer size in bytes 5481 * @flags: AdminQ command flags 5482 * @cmd_details: pointer to command details structure or NULL 5483 **/ 5484 int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, 5485 u16 buff_size, u8 flags, 5486 struct i40e_asq_cmd_details *cmd_details) 5487 { 5488 struct i40e_aq_desc desc; 5489 struct i40e_aqc_get_applied_profiles *cmd = 5490 (struct i40e_aqc_get_applied_profiles *)&desc.params.raw; 5491 int status; 5492 5493 i40e_fill_default_direct_cmd_desc(&desc, 5494 i40e_aqc_opc_get_personalization_profile_list); 5495 5496 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 5497 if (buff_size > I40E_AQ_LARGE_BUF) 5498 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5499 desc.datalen = cpu_to_le16(buff_size); 5500 5501 cmd->flags = flags; 5502 5503 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 5504 5505 return status; 5506 } 5507 5508 /** 5509 * i40e_find_segment_in_package 5510 * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E) 5511 * @pkg_hdr: pointer to the package header to be searched 5512 * 5513 * This function searches a package file for a particular segment type. On 5514 * success it returns a pointer to the segment header, otherwise it will 5515 * return NULL. 5516 **/ 5517 struct i40e_generic_seg_header * 5518 i40e_find_segment_in_package(u32 segment_type, 5519 struct i40e_package_header *pkg_hdr) 5520 { 5521 struct i40e_generic_seg_header *segment; 5522 u32 i; 5523 5524 /* Search all package segments for the requested segment type */ 5525 for (i = 0; i < pkg_hdr->segment_count; i++) { 5526 segment = 5527 (struct i40e_generic_seg_header *)((u8 *)pkg_hdr + 5528 pkg_hdr->segment_offset[i]); 5529 5530 if (segment->type == segment_type) 5531 return segment; 5532 } 5533 5534 return NULL; 5535 } 5536 5537 /* Get section table in profile */ 5538 #define I40E_SECTION_TABLE(profile, sec_tbl) \ 5539 do { \ 5540 struct i40e_profile_segment *p = (profile); \ 5541 u32 count; \ 5542 u32 *nvm; \ 5543 count = p->device_table_count; \ 5544 nvm = (u32 *)&p->device_table[count]; \ 5545 sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \ 5546 } while (0) 5547 5548 /* Get section header in profile */ 5549 #define I40E_SECTION_HEADER(profile, offset) \ 5550 (struct i40e_profile_section_header *)((u8 *)(profile) + (offset)) 5551 5552 /** 5553 * i40e_find_section_in_profile 5554 * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE) 5555 * @profile: pointer to the i40e segment header to be searched 5556 * 5557 * This function searches i40e segment for a particular section type. On 5558 * success it returns a pointer to the section header, otherwise it will 5559 * return NULL. 5560 **/ 5561 struct i40e_profile_section_header * 5562 i40e_find_section_in_profile(u32 section_type, 5563 struct i40e_profile_segment *profile) 5564 { 5565 struct i40e_profile_section_header *sec; 5566 struct i40e_section_table *sec_tbl; 5567 u32 sec_off; 5568 u32 i; 5569 5570 if (profile->header.type != SEGMENT_TYPE_I40E) 5571 return NULL; 5572 5573 I40E_SECTION_TABLE(profile, sec_tbl); 5574 5575 for (i = 0; i < sec_tbl->section_count; i++) { 5576 sec_off = sec_tbl->section_offset[i]; 5577 sec = I40E_SECTION_HEADER(profile, sec_off); 5578 if (sec->section.type == section_type) 5579 return sec; 5580 } 5581 5582 return NULL; 5583 } 5584 5585 /** 5586 * i40e_ddp_exec_aq_section - Execute generic AQ for DDP 5587 * @hw: pointer to the hw struct 5588 * @aq: command buffer containing all data to execute AQ 5589 **/ 5590 static int i40e_ddp_exec_aq_section(struct i40e_hw *hw, 5591 struct i40e_profile_aq_section *aq) 5592 { 5593 struct i40e_aq_desc desc; 5594 u8 *msg = NULL; 5595 u16 msglen; 5596 int status; 5597 5598 i40e_fill_default_direct_cmd_desc(&desc, aq->opcode); 5599 desc.flags |= cpu_to_le16(aq->flags); 5600 memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw)); 5601 5602 msglen = aq->datalen; 5603 if (msglen) { 5604 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 5605 I40E_AQ_FLAG_RD)); 5606 if (msglen > I40E_AQ_LARGE_BUF) 5607 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5608 desc.datalen = cpu_to_le16(msglen); 5609 msg = &aq->data[0]; 5610 } 5611 5612 status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL); 5613 5614 if (status) { 5615 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5616 "unable to exec DDP AQ opcode %u, error %d\n", 5617 aq->opcode, status); 5618 return status; 5619 } 5620 5621 /* copy returned desc to aq_buf */ 5622 memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw)); 5623 5624 return 0; 5625 } 5626 5627 /** 5628 * i40e_validate_profile 5629 * @hw: pointer to the hardware structure 5630 * @profile: pointer to the profile segment of the package to be validated 5631 * @track_id: package tracking id 5632 * @rollback: flag if the profile is for rollback. 5633 * 5634 * Validates supported devices and profile's sections. 5635 */ 5636 static int 5637 i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5638 u32 track_id, bool rollback) 5639 { 5640 struct i40e_profile_section_header *sec = NULL; 5641 struct i40e_section_table *sec_tbl; 5642 u32 vendor_dev_id; 5643 int status = 0; 5644 u32 dev_cnt; 5645 u32 sec_off; 5646 u32 i; 5647 5648 if (track_id == I40E_DDP_TRACKID_INVALID) { 5649 i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n"); 5650 return -EOPNOTSUPP; 5651 } 5652 5653 dev_cnt = profile->device_table_count; 5654 for (i = 0; i < dev_cnt; i++) { 5655 vendor_dev_id = profile->device_table[i].vendor_dev_id; 5656 if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL && 5657 hw->device_id == (vendor_dev_id & 0xFFFF)) 5658 break; 5659 } 5660 if (dev_cnt && i == dev_cnt) { 5661 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5662 "Device doesn't support DDP\n"); 5663 return -ENODEV; 5664 } 5665 5666 I40E_SECTION_TABLE(profile, sec_tbl); 5667 5668 /* Validate sections types */ 5669 for (i = 0; i < sec_tbl->section_count; i++) { 5670 sec_off = sec_tbl->section_offset[i]; 5671 sec = I40E_SECTION_HEADER(profile, sec_off); 5672 if (rollback) { 5673 if (sec->section.type == SECTION_TYPE_MMIO || 5674 sec->section.type == SECTION_TYPE_AQ || 5675 sec->section.type == SECTION_TYPE_RB_AQ) { 5676 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5677 "Not a roll-back package\n"); 5678 return -EOPNOTSUPP; 5679 } 5680 } else { 5681 if (sec->section.type == SECTION_TYPE_RB_AQ || 5682 sec->section.type == SECTION_TYPE_RB_MMIO) { 5683 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5684 "Not an original package\n"); 5685 return -EOPNOTSUPP; 5686 } 5687 } 5688 } 5689 5690 return status; 5691 } 5692 5693 /** 5694 * i40e_write_profile 5695 * @hw: pointer to the hardware structure 5696 * @profile: pointer to the profile segment of the package to be downloaded 5697 * @track_id: package tracking id 5698 * 5699 * Handles the download of a complete package. 5700 */ 5701 int 5702 i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5703 u32 track_id) 5704 { 5705 struct i40e_profile_section_header *sec = NULL; 5706 struct i40e_profile_aq_section *ddp_aq; 5707 struct i40e_section_table *sec_tbl; 5708 u32 offset = 0, info = 0; 5709 u32 section_size = 0; 5710 int status = 0; 5711 u32 sec_off; 5712 u32 i; 5713 5714 status = i40e_validate_profile(hw, profile, track_id, false); 5715 if (status) 5716 return status; 5717 5718 I40E_SECTION_TABLE(profile, sec_tbl); 5719 5720 for (i = 0; i < sec_tbl->section_count; i++) { 5721 sec_off = sec_tbl->section_offset[i]; 5722 sec = I40E_SECTION_HEADER(profile, sec_off); 5723 /* Process generic admin command */ 5724 if (sec->section.type == SECTION_TYPE_AQ) { 5725 ddp_aq = (struct i40e_profile_aq_section *)&sec[1]; 5726 status = i40e_ddp_exec_aq_section(hw, ddp_aq); 5727 if (status) { 5728 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5729 "Failed to execute aq: section %d, opcode %u\n", 5730 i, ddp_aq->opcode); 5731 break; 5732 } 5733 sec->section.type = SECTION_TYPE_RB_AQ; 5734 } 5735 5736 /* Skip any non-mmio sections */ 5737 if (sec->section.type != SECTION_TYPE_MMIO) 5738 continue; 5739 5740 section_size = sec->section.size + 5741 sizeof(struct i40e_profile_section_header); 5742 5743 /* Write MMIO section */ 5744 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, 5745 track_id, &offset, &info, NULL); 5746 if (status) { 5747 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5748 "Failed to write profile: section %d, offset %d, info %d\n", 5749 i, offset, info); 5750 break; 5751 } 5752 } 5753 return status; 5754 } 5755 5756 /** 5757 * i40e_rollback_profile 5758 * @hw: pointer to the hardware structure 5759 * @profile: pointer to the profile segment of the package to be removed 5760 * @track_id: package tracking id 5761 * 5762 * Rolls back previously loaded package. 5763 */ 5764 int 5765 i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5766 u32 track_id) 5767 { 5768 struct i40e_profile_section_header *sec = NULL; 5769 struct i40e_section_table *sec_tbl; 5770 u32 offset = 0, info = 0; 5771 u32 section_size = 0; 5772 int status = 0; 5773 u32 sec_off; 5774 int i; 5775 5776 status = i40e_validate_profile(hw, profile, track_id, true); 5777 if (status) 5778 return status; 5779 5780 I40E_SECTION_TABLE(profile, sec_tbl); 5781 5782 /* For rollback write sections in reverse */ 5783 for (i = sec_tbl->section_count - 1; i >= 0; i--) { 5784 sec_off = sec_tbl->section_offset[i]; 5785 sec = I40E_SECTION_HEADER(profile, sec_off); 5786 5787 /* Skip any non-rollback sections */ 5788 if (sec->section.type != SECTION_TYPE_RB_MMIO) 5789 continue; 5790 5791 section_size = sec->section.size + 5792 sizeof(struct i40e_profile_section_header); 5793 5794 /* Write roll-back MMIO section */ 5795 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, 5796 track_id, &offset, &info, NULL); 5797 if (status) { 5798 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5799 "Failed to write profile: section %d, offset %d, info %d\n", 5800 i, offset, info); 5801 break; 5802 } 5803 } 5804 return status; 5805 } 5806 5807 /** 5808 * i40e_add_pinfo_to_list 5809 * @hw: pointer to the hardware structure 5810 * @profile: pointer to the profile segment of the package 5811 * @profile_info_sec: buffer for information section 5812 * @track_id: package tracking id 5813 * 5814 * Register a profile to the list of loaded profiles. 5815 */ 5816 int 5817 i40e_add_pinfo_to_list(struct i40e_hw *hw, 5818 struct i40e_profile_segment *profile, 5819 u8 *profile_info_sec, u32 track_id) 5820 { 5821 struct i40e_profile_section_header *sec = NULL; 5822 struct i40e_profile_info *pinfo; 5823 u32 offset = 0, info = 0; 5824 int status = 0; 5825 5826 sec = (struct i40e_profile_section_header *)profile_info_sec; 5827 sec->tbl_size = 1; 5828 sec->data_end = sizeof(struct i40e_profile_section_header) + 5829 sizeof(struct i40e_profile_info); 5830 sec->section.type = SECTION_TYPE_INFO; 5831 sec->section.offset = sizeof(struct i40e_profile_section_header); 5832 sec->section.size = sizeof(struct i40e_profile_info); 5833 pinfo = (struct i40e_profile_info *)(profile_info_sec + 5834 sec->section.offset); 5835 pinfo->track_id = track_id; 5836 pinfo->version = profile->version; 5837 pinfo->op = I40E_DDP_ADD_TRACKID; 5838 memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE); 5839 5840 status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end, 5841 track_id, &offset, &info, NULL); 5842 5843 return status; 5844 } 5845 5846 /** 5847 * i40e_aq_add_cloud_filters 5848 * @hw: pointer to the hardware structure 5849 * @seid: VSI seid to add cloud filters from 5850 * @filters: Buffer which contains the filters to be added 5851 * @filter_count: number of filters contained in the buffer 5852 * 5853 * Set the cloud filters for a given VSI. The contents of the 5854 * i40e_aqc_cloud_filters_element_data are filled in by the caller 5855 * of the function. 5856 * 5857 **/ 5858 int 5859 i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid, 5860 struct i40e_aqc_cloud_filters_element_data *filters, 5861 u8 filter_count) 5862 { 5863 struct i40e_aq_desc desc; 5864 struct i40e_aqc_add_remove_cloud_filters *cmd = 5865 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5866 u16 buff_len; 5867 int status; 5868 5869 i40e_fill_default_direct_cmd_desc(&desc, 5870 i40e_aqc_opc_add_cloud_filters); 5871 5872 buff_len = filter_count * sizeof(*filters); 5873 desc.datalen = cpu_to_le16(buff_len); 5874 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5875 cmd->num_filters = filter_count; 5876 cmd->seid = cpu_to_le16(seid); 5877 5878 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5879 5880 return status; 5881 } 5882 5883 /** 5884 * i40e_aq_add_cloud_filters_bb 5885 * @hw: pointer to the hardware structure 5886 * @seid: VSI seid to add cloud filters from 5887 * @filters: Buffer which contains the filters in big buffer to be added 5888 * @filter_count: number of filters contained in the buffer 5889 * 5890 * Set the big buffer cloud filters for a given VSI. The contents of the 5891 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the 5892 * function. 5893 * 5894 **/ 5895 int 5896 i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid, 5897 struct i40e_aqc_cloud_filters_element_bb *filters, 5898 u8 filter_count) 5899 { 5900 struct i40e_aq_desc desc; 5901 struct i40e_aqc_add_remove_cloud_filters *cmd = 5902 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5903 u16 buff_len; 5904 int status; 5905 int i; 5906 5907 i40e_fill_default_direct_cmd_desc(&desc, 5908 i40e_aqc_opc_add_cloud_filters); 5909 5910 buff_len = filter_count * sizeof(*filters); 5911 desc.datalen = cpu_to_le16(buff_len); 5912 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5913 cmd->num_filters = filter_count; 5914 cmd->seid = cpu_to_le16(seid); 5915 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; 5916 5917 for (i = 0; i < filter_count; i++) { 5918 u16 tnl_type; 5919 u32 ti; 5920 5921 tnl_type = (le16_to_cpu(filters[i].element.flags) & 5922 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> 5923 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; 5924 5925 /* Due to hardware eccentricities, the VNI for Geneve is shifted 5926 * one more byte further than normally used for Tenant ID in 5927 * other tunnel types. 5928 */ 5929 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { 5930 ti = le32_to_cpu(filters[i].element.tenant_id); 5931 filters[i].element.tenant_id = cpu_to_le32(ti << 8); 5932 } 5933 } 5934 5935 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5936 5937 return status; 5938 } 5939 5940 /** 5941 * i40e_aq_rem_cloud_filters 5942 * @hw: pointer to the hardware structure 5943 * @seid: VSI seid to remove cloud filters from 5944 * @filters: Buffer which contains the filters to be removed 5945 * @filter_count: number of filters contained in the buffer 5946 * 5947 * Remove the cloud filters for a given VSI. The contents of the 5948 * i40e_aqc_cloud_filters_element_data are filled in by the caller 5949 * of the function. 5950 * 5951 **/ 5952 int 5953 i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid, 5954 struct i40e_aqc_cloud_filters_element_data *filters, 5955 u8 filter_count) 5956 { 5957 struct i40e_aq_desc desc; 5958 struct i40e_aqc_add_remove_cloud_filters *cmd = 5959 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5960 u16 buff_len; 5961 int status; 5962 5963 i40e_fill_default_direct_cmd_desc(&desc, 5964 i40e_aqc_opc_remove_cloud_filters); 5965 5966 buff_len = filter_count * sizeof(*filters); 5967 desc.datalen = cpu_to_le16(buff_len); 5968 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5969 cmd->num_filters = filter_count; 5970 cmd->seid = cpu_to_le16(seid); 5971 5972 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5973 5974 return status; 5975 } 5976 5977 /** 5978 * i40e_aq_rem_cloud_filters_bb 5979 * @hw: pointer to the hardware structure 5980 * @seid: VSI seid to remove cloud filters from 5981 * @filters: Buffer which contains the filters in big buffer to be removed 5982 * @filter_count: number of filters contained in the buffer 5983 * 5984 * Remove the big buffer cloud filters for a given VSI. The contents of the 5985 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the 5986 * function. 5987 * 5988 **/ 5989 int 5990 i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid, 5991 struct i40e_aqc_cloud_filters_element_bb *filters, 5992 u8 filter_count) 5993 { 5994 struct i40e_aq_desc desc; 5995 struct i40e_aqc_add_remove_cloud_filters *cmd = 5996 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5997 u16 buff_len; 5998 int status; 5999 int i; 6000 6001 i40e_fill_default_direct_cmd_desc(&desc, 6002 i40e_aqc_opc_remove_cloud_filters); 6003 6004 buff_len = filter_count * sizeof(*filters); 6005 desc.datalen = cpu_to_le16(buff_len); 6006 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 6007 cmd->num_filters = filter_count; 6008 cmd->seid = cpu_to_le16(seid); 6009 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; 6010 6011 for (i = 0; i < filter_count; i++) { 6012 u16 tnl_type; 6013 u32 ti; 6014 6015 tnl_type = (le16_to_cpu(filters[i].element.flags) & 6016 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> 6017 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; 6018 6019 /* Due to hardware eccentricities, the VNI for Geneve is shifted 6020 * one more byte further than normally used for Tenant ID in 6021 * other tunnel types. 6022 */ 6023 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { 6024 ti = le32_to_cpu(filters[i].element.tenant_id); 6025 filters[i].element.tenant_id = cpu_to_le32(ti << 8); 6026 } 6027 } 6028 6029 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 6030 6031 return status; 6032 } 6033