1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2021 Intel Corporation. */ 3 4 #include <linux/avf/virtchnl.h> 5 #include <linux/delay.h> 6 #include <linux/etherdevice.h> 7 #include <linux/pci.h> 8 #include "i40e_adminq_cmd.h" 9 #include "i40e_devids.h" 10 #include "i40e_prototype.h" 11 #include "i40e_register.h" 12 13 /** 14 * i40e_set_mac_type - Sets MAC type 15 * @hw: pointer to the HW structure 16 * 17 * This function sets the mac type of the adapter based on the 18 * vendor ID and device ID stored in the hw structure. 19 **/ 20 int i40e_set_mac_type(struct i40e_hw *hw) 21 { 22 int status = 0; 23 24 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { 25 switch (hw->device_id) { 26 case I40E_DEV_ID_SFP_XL710: 27 case I40E_DEV_ID_QEMU: 28 case I40E_DEV_ID_KX_B: 29 case I40E_DEV_ID_KX_C: 30 case I40E_DEV_ID_QSFP_A: 31 case I40E_DEV_ID_QSFP_B: 32 case I40E_DEV_ID_QSFP_C: 33 case I40E_DEV_ID_1G_BASE_T_BC: 34 case I40E_DEV_ID_5G_BASE_T_BC: 35 case I40E_DEV_ID_10G_BASE_T: 36 case I40E_DEV_ID_10G_BASE_T4: 37 case I40E_DEV_ID_10G_BASE_T_BC: 38 case I40E_DEV_ID_10G_B: 39 case I40E_DEV_ID_10G_SFP: 40 case I40E_DEV_ID_20G_KR2: 41 case I40E_DEV_ID_20G_KR2_A: 42 case I40E_DEV_ID_25G_B: 43 case I40E_DEV_ID_25G_SFP28: 44 case I40E_DEV_ID_X710_N3000: 45 case I40E_DEV_ID_XXV710_N3000: 46 hw->mac.type = I40E_MAC_XL710; 47 break; 48 case I40E_DEV_ID_KX_X722: 49 case I40E_DEV_ID_QSFP_X722: 50 case I40E_DEV_ID_SFP_X722: 51 case I40E_DEV_ID_1G_BASE_T_X722: 52 case I40E_DEV_ID_10G_BASE_T_X722: 53 case I40E_DEV_ID_SFP_I_X722: 54 case I40E_DEV_ID_SFP_X722_A: 55 hw->mac.type = I40E_MAC_X722; 56 break; 57 default: 58 hw->mac.type = I40E_MAC_GENERIC; 59 break; 60 } 61 } else { 62 status = -ENODEV; 63 } 64 65 hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n", 66 hw->mac.type, status); 67 return status; 68 } 69 70 /** 71 * i40e_aq_str - convert AQ err code to a string 72 * @hw: pointer to the HW structure 73 * @aq_err: the AQ error code to convert 74 **/ 75 const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) 76 { 77 switch (aq_err) { 78 case I40E_AQ_RC_OK: 79 return "OK"; 80 case I40E_AQ_RC_EPERM: 81 return "I40E_AQ_RC_EPERM"; 82 case I40E_AQ_RC_ENOENT: 83 return "I40E_AQ_RC_ENOENT"; 84 case I40E_AQ_RC_ESRCH: 85 return "I40E_AQ_RC_ESRCH"; 86 case I40E_AQ_RC_EINTR: 87 return "I40E_AQ_RC_EINTR"; 88 case I40E_AQ_RC_EIO: 89 return "I40E_AQ_RC_EIO"; 90 case I40E_AQ_RC_ENXIO: 91 return "I40E_AQ_RC_ENXIO"; 92 case I40E_AQ_RC_E2BIG: 93 return "I40E_AQ_RC_E2BIG"; 94 case I40E_AQ_RC_EAGAIN: 95 return "I40E_AQ_RC_EAGAIN"; 96 case I40E_AQ_RC_ENOMEM: 97 return "I40E_AQ_RC_ENOMEM"; 98 case I40E_AQ_RC_EACCES: 99 return "I40E_AQ_RC_EACCES"; 100 case I40E_AQ_RC_EFAULT: 101 return "I40E_AQ_RC_EFAULT"; 102 case I40E_AQ_RC_EBUSY: 103 return "I40E_AQ_RC_EBUSY"; 104 case I40E_AQ_RC_EEXIST: 105 return "I40E_AQ_RC_EEXIST"; 106 case I40E_AQ_RC_EINVAL: 107 return "I40E_AQ_RC_EINVAL"; 108 case I40E_AQ_RC_ENOTTY: 109 return "I40E_AQ_RC_ENOTTY"; 110 case I40E_AQ_RC_ENOSPC: 111 return "I40E_AQ_RC_ENOSPC"; 112 case I40E_AQ_RC_ENOSYS: 113 return "I40E_AQ_RC_ENOSYS"; 114 case I40E_AQ_RC_ERANGE: 115 return "I40E_AQ_RC_ERANGE"; 116 case I40E_AQ_RC_EFLUSHED: 117 return "I40E_AQ_RC_EFLUSHED"; 118 case I40E_AQ_RC_BAD_ADDR: 119 return "I40E_AQ_RC_BAD_ADDR"; 120 case I40E_AQ_RC_EMODE: 121 return "I40E_AQ_RC_EMODE"; 122 case I40E_AQ_RC_EFBIG: 123 return "I40E_AQ_RC_EFBIG"; 124 } 125 126 snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); 127 return hw->err_str; 128 } 129 130 /** 131 * i40e_debug_aq 132 * @hw: debug mask related to admin queue 133 * @mask: debug mask 134 * @desc: pointer to admin queue descriptor 135 * @buffer: pointer to command buffer 136 * @buf_len: max length of buffer 137 * 138 * Dumps debug log about adminq command with descriptor contents. 139 **/ 140 void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, 141 void *buffer, u16 buf_len) 142 { 143 struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; 144 u32 effective_mask = hw->debug_mask & mask; 145 char prefix[27]; 146 u16 len; 147 u8 *buf = (u8 *)buffer; 148 149 if (!effective_mask || !desc) 150 return; 151 152 len = le16_to_cpu(aq_desc->datalen); 153 154 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 155 "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", 156 le16_to_cpu(aq_desc->opcode), 157 le16_to_cpu(aq_desc->flags), 158 le16_to_cpu(aq_desc->datalen), 159 le16_to_cpu(aq_desc->retval)); 160 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 161 "\tcookie (h,l) 0x%08X 0x%08X\n", 162 le32_to_cpu(aq_desc->cookie_high), 163 le32_to_cpu(aq_desc->cookie_low)); 164 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 165 "\tparam (0,1) 0x%08X 0x%08X\n", 166 le32_to_cpu(aq_desc->params.internal.param0), 167 le32_to_cpu(aq_desc->params.internal.param1)); 168 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 169 "\taddr (h,l) 0x%08X 0x%08X\n", 170 le32_to_cpu(aq_desc->params.external.addr_high), 171 le32_to_cpu(aq_desc->params.external.addr_low)); 172 173 if (buffer && buf_len != 0 && len != 0 && 174 (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) { 175 i40e_debug(hw, mask, "AQ CMD Buffer:\n"); 176 if (buf_len < len) 177 len = buf_len; 178 179 snprintf(prefix, sizeof(prefix), 180 "i40e %02x:%02x.%x: \t0x", 181 hw->bus.bus_id, 182 hw->bus.device, 183 hw->bus.func); 184 185 print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 186 16, 1, buf, len, false); 187 } 188 } 189 190 /** 191 * i40e_check_asq_alive 192 * @hw: pointer to the hw struct 193 * 194 * Returns true if Queue is enabled else false. 195 **/ 196 bool i40e_check_asq_alive(struct i40e_hw *hw) 197 { 198 if (hw->aq.asq.len) 199 return !!(rd32(hw, hw->aq.asq.len) & 200 I40E_PF_ATQLEN_ATQENABLE_MASK); 201 else 202 return false; 203 } 204 205 /** 206 * i40e_aq_queue_shutdown 207 * @hw: pointer to the hw struct 208 * @unloading: is the driver unloading itself 209 * 210 * Tell the Firmware that we're shutting down the AdminQ and whether 211 * or not the driver is unloading as well. 212 **/ 213 int i40e_aq_queue_shutdown(struct i40e_hw *hw, 214 bool unloading) 215 { 216 struct i40e_aq_desc desc; 217 struct i40e_aqc_queue_shutdown *cmd = 218 (struct i40e_aqc_queue_shutdown *)&desc.params.raw; 219 int status; 220 221 i40e_fill_default_direct_cmd_desc(&desc, 222 i40e_aqc_opc_queue_shutdown); 223 224 if (unloading) 225 cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING); 226 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 227 228 return status; 229 } 230 231 /** 232 * i40e_aq_get_set_rss_lut 233 * @hw: pointer to the hardware structure 234 * @vsi_id: vsi fw index 235 * @pf_lut: for PF table set true, for VSI table set false 236 * @lut: pointer to the lut buffer provided by the caller 237 * @lut_size: size of the lut buffer 238 * @set: set true to set the table, false to get the table 239 * 240 * Internal function to get or set RSS look up table 241 **/ 242 static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw, 243 u16 vsi_id, bool pf_lut, 244 u8 *lut, u16 lut_size, 245 bool set) 246 { 247 struct i40e_aq_desc desc; 248 struct i40e_aqc_get_set_rss_lut *cmd_resp = 249 (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; 250 int status; 251 252 if (set) 253 i40e_fill_default_direct_cmd_desc(&desc, 254 i40e_aqc_opc_set_rss_lut); 255 else 256 i40e_fill_default_direct_cmd_desc(&desc, 257 i40e_aqc_opc_get_rss_lut); 258 259 /* Indirect command */ 260 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 261 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 262 263 cmd_resp->vsi_id = 264 cpu_to_le16((u16)((vsi_id << 265 I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & 266 I40E_AQC_SET_RSS_LUT_VSI_ID_MASK)); 267 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); 268 269 if (pf_lut) 270 cmd_resp->flags |= cpu_to_le16((u16) 271 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF << 272 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & 273 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); 274 else 275 cmd_resp->flags |= cpu_to_le16((u16) 276 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << 277 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & 278 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); 279 280 status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL); 281 282 return status; 283 } 284 285 /** 286 * i40e_aq_get_rss_lut 287 * @hw: pointer to the hardware structure 288 * @vsi_id: vsi fw index 289 * @pf_lut: for PF table set true, for VSI table set false 290 * @lut: pointer to the lut buffer provided by the caller 291 * @lut_size: size of the lut buffer 292 * 293 * get the RSS lookup table, PF or VSI type 294 **/ 295 int i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, 296 bool pf_lut, u8 *lut, u16 lut_size) 297 { 298 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, 299 false); 300 } 301 302 /** 303 * i40e_aq_set_rss_lut 304 * @hw: pointer to the hardware structure 305 * @vsi_id: vsi fw index 306 * @pf_lut: for PF table set true, for VSI table set false 307 * @lut: pointer to the lut buffer provided by the caller 308 * @lut_size: size of the lut buffer 309 * 310 * set the RSS lookup table, PF or VSI type 311 **/ 312 int i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, 313 bool pf_lut, u8 *lut, u16 lut_size) 314 { 315 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); 316 } 317 318 /** 319 * i40e_aq_get_set_rss_key 320 * @hw: pointer to the hw struct 321 * @vsi_id: vsi fw index 322 * @key: pointer to key info struct 323 * @set: set true to set the key, false to get the key 324 * 325 * get the RSS key per VSI 326 **/ 327 static int i40e_aq_get_set_rss_key(struct i40e_hw *hw, 328 u16 vsi_id, 329 struct i40e_aqc_get_set_rss_key_data *key, 330 bool set) 331 { 332 struct i40e_aq_desc desc; 333 struct i40e_aqc_get_set_rss_key *cmd_resp = 334 (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; 335 u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); 336 int status; 337 338 if (set) 339 i40e_fill_default_direct_cmd_desc(&desc, 340 i40e_aqc_opc_set_rss_key); 341 else 342 i40e_fill_default_direct_cmd_desc(&desc, 343 i40e_aqc_opc_get_rss_key); 344 345 /* Indirect command */ 346 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 347 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 348 349 cmd_resp->vsi_id = 350 cpu_to_le16((u16)((vsi_id << 351 I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & 352 I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); 353 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); 354 355 status = i40e_asq_send_command(hw, &desc, key, key_size, NULL); 356 357 return status; 358 } 359 360 /** 361 * i40e_aq_get_rss_key 362 * @hw: pointer to the hw struct 363 * @vsi_id: vsi fw index 364 * @key: pointer to key info struct 365 * 366 **/ 367 int i40e_aq_get_rss_key(struct i40e_hw *hw, 368 u16 vsi_id, 369 struct i40e_aqc_get_set_rss_key_data *key) 370 { 371 return i40e_aq_get_set_rss_key(hw, vsi_id, key, false); 372 } 373 374 /** 375 * i40e_aq_set_rss_key 376 * @hw: pointer to the hw struct 377 * @vsi_id: vsi fw index 378 * @key: pointer to key info struct 379 * 380 * set the RSS key per VSI 381 **/ 382 int i40e_aq_set_rss_key(struct i40e_hw *hw, 383 u16 vsi_id, 384 struct i40e_aqc_get_set_rss_key_data *key) 385 { 386 return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); 387 } 388 389 /* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the 390 * hardware to a bit-field that can be used by SW to more easily determine the 391 * packet type. 392 * 393 * Macros are used to shorten the table lines and make this table human 394 * readable. 395 * 396 * We store the PTYPE in the top byte of the bit field - this is just so that 397 * we can check that the table doesn't have a row missing, as the index into 398 * the table should be the PTYPE. 399 * 400 * Typical work flow: 401 * 402 * IF NOT i40e_ptype_lookup[ptype].known 403 * THEN 404 * Packet is unknown 405 * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP 406 * Use the rest of the fields to look at the tunnels, inner protocols, etc 407 * ELSE 408 * Use the enum i40e_rx_l2_ptype to decode the packet type 409 * ENDIF 410 */ 411 412 /* macro to make the table lines short, use explicit indexing with [PTYPE] */ 413 #define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ 414 [PTYPE] = { \ 415 1, \ 416 I40E_RX_PTYPE_OUTER_##OUTER_IP, \ 417 I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ 418 I40E_RX_PTYPE_##OUTER_FRAG, \ 419 I40E_RX_PTYPE_TUNNEL_##T, \ 420 I40E_RX_PTYPE_TUNNEL_END_##TE, \ 421 I40E_RX_PTYPE_##TEF, \ 422 I40E_RX_PTYPE_INNER_PROT_##I, \ 423 I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } 424 425 #define I40E_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } 426 427 /* shorter macros makes the table fit but are terse */ 428 #define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG 429 #define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG 430 #define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC 431 432 /* Lookup table mapping in the 8-bit HW PTYPE to the bit field for decoding */ 433 struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = { 434 /* L2 Packet types */ 435 I40E_PTT_UNUSED_ENTRY(0), 436 I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 437 I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), 438 I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 439 I40E_PTT_UNUSED_ENTRY(4), 440 I40E_PTT_UNUSED_ENTRY(5), 441 I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 442 I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 443 I40E_PTT_UNUSED_ENTRY(8), 444 I40E_PTT_UNUSED_ENTRY(9), 445 I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 446 I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), 447 I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 448 I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 449 I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 450 I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 451 I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 452 I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 453 I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 454 I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 455 I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 456 I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 457 458 /* Non Tunneled IPv4 */ 459 I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), 460 I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), 461 I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), 462 I40E_PTT_UNUSED_ENTRY(25), 463 I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), 464 I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), 465 I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), 466 467 /* IPv4 --> IPv4 */ 468 I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), 469 I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), 470 I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), 471 I40E_PTT_UNUSED_ENTRY(32), 472 I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), 473 I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), 474 I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), 475 476 /* IPv4 --> IPv6 */ 477 I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), 478 I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), 479 I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), 480 I40E_PTT_UNUSED_ENTRY(39), 481 I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), 482 I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), 483 I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), 484 485 /* IPv4 --> GRE/NAT */ 486 I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), 487 488 /* IPv4 --> GRE/NAT --> IPv4 */ 489 I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), 490 I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), 491 I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), 492 I40E_PTT_UNUSED_ENTRY(47), 493 I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), 494 I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), 495 I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), 496 497 /* IPv4 --> GRE/NAT --> IPv6 */ 498 I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), 499 I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), 500 I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), 501 I40E_PTT_UNUSED_ENTRY(54), 502 I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), 503 I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), 504 I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), 505 506 /* IPv4 --> GRE/NAT --> MAC */ 507 I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), 508 509 /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ 510 I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), 511 I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), 512 I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), 513 I40E_PTT_UNUSED_ENTRY(62), 514 I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), 515 I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), 516 I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), 517 518 /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ 519 I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), 520 I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), 521 I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), 522 I40E_PTT_UNUSED_ENTRY(69), 523 I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), 524 I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), 525 I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), 526 527 /* IPv4 --> GRE/NAT --> MAC/VLAN */ 528 I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), 529 530 /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ 531 I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), 532 I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), 533 I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), 534 I40E_PTT_UNUSED_ENTRY(77), 535 I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), 536 I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), 537 I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), 538 539 /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ 540 I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), 541 I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), 542 I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), 543 I40E_PTT_UNUSED_ENTRY(84), 544 I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), 545 I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), 546 I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), 547 548 /* Non Tunneled IPv6 */ 549 I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), 550 I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), 551 I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), 552 I40E_PTT_UNUSED_ENTRY(91), 553 I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), 554 I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), 555 I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), 556 557 /* IPv6 --> IPv4 */ 558 I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), 559 I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), 560 I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), 561 I40E_PTT_UNUSED_ENTRY(98), 562 I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), 563 I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), 564 I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), 565 566 /* IPv6 --> IPv6 */ 567 I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), 568 I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), 569 I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), 570 I40E_PTT_UNUSED_ENTRY(105), 571 I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), 572 I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), 573 I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), 574 575 /* IPv6 --> GRE/NAT */ 576 I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), 577 578 /* IPv6 --> GRE/NAT -> IPv4 */ 579 I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), 580 I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), 581 I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), 582 I40E_PTT_UNUSED_ENTRY(113), 583 I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), 584 I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), 585 I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), 586 587 /* IPv6 --> GRE/NAT -> IPv6 */ 588 I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), 589 I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), 590 I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), 591 I40E_PTT_UNUSED_ENTRY(120), 592 I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), 593 I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), 594 I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), 595 596 /* IPv6 --> GRE/NAT -> MAC */ 597 I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), 598 599 /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ 600 I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), 601 I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), 602 I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), 603 I40E_PTT_UNUSED_ENTRY(128), 604 I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), 605 I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), 606 I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), 607 608 /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ 609 I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), 610 I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), 611 I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), 612 I40E_PTT_UNUSED_ENTRY(135), 613 I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), 614 I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), 615 I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), 616 617 /* IPv6 --> GRE/NAT -> MAC/VLAN */ 618 I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), 619 620 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ 621 I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), 622 I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), 623 I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), 624 I40E_PTT_UNUSED_ENTRY(143), 625 I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), 626 I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), 627 I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), 628 629 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ 630 I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), 631 I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), 632 I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), 633 I40E_PTT_UNUSED_ENTRY(150), 634 I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), 635 I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), 636 I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), 637 638 /* unused entries */ 639 [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } 640 }; 641 642 /** 643 * i40e_init_shared_code - Initialize the shared code 644 * @hw: pointer to hardware structure 645 * 646 * This assigns the MAC type and PHY code and inits the NVM. 647 * Does not touch the hardware. This function must be called prior to any 648 * other function in the shared code. The i40e_hw structure should be 649 * memset to 0 prior to calling this function. The following fields in 650 * hw structure should be filled in prior to calling this function: 651 * hw_addr, back, device_id, vendor_id, subsystem_device_id, 652 * subsystem_vendor_id, and revision_id 653 **/ 654 int i40e_init_shared_code(struct i40e_hw *hw) 655 { 656 u32 port, ari, func_rid; 657 int status = 0; 658 659 i40e_set_mac_type(hw); 660 661 switch (hw->mac.type) { 662 case I40E_MAC_XL710: 663 case I40E_MAC_X722: 664 break; 665 default: 666 return -ENODEV; 667 } 668 669 hw->phy.get_link_info = true; 670 671 /* Determine port number and PF number*/ 672 port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) 673 >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT; 674 hw->port = (u8)port; 675 ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >> 676 I40E_GLPCI_CAPSUP_ARI_EN_SHIFT; 677 func_rid = rd32(hw, I40E_PF_FUNC_RID); 678 if (ari) 679 hw->pf_id = (u8)(func_rid & 0xff); 680 else 681 hw->pf_id = (u8)(func_rid & 0x7); 682 683 status = i40e_init_nvm(hw); 684 return status; 685 } 686 687 /** 688 * i40e_aq_mac_address_read - Retrieve the MAC addresses 689 * @hw: pointer to the hw struct 690 * @flags: a return indicator of what addresses were added to the addr store 691 * @addrs: the requestor's mac addr store 692 * @cmd_details: pointer to command details structure or NULL 693 **/ 694 static int 695 i40e_aq_mac_address_read(struct i40e_hw *hw, 696 u16 *flags, 697 struct i40e_aqc_mac_address_read_data *addrs, 698 struct i40e_asq_cmd_details *cmd_details) 699 { 700 struct i40e_aq_desc desc; 701 struct i40e_aqc_mac_address_read *cmd_data = 702 (struct i40e_aqc_mac_address_read *)&desc.params.raw; 703 int status; 704 705 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read); 706 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF); 707 708 status = i40e_asq_send_command(hw, &desc, addrs, 709 sizeof(*addrs), cmd_details); 710 *flags = le16_to_cpu(cmd_data->command_flags); 711 712 return status; 713 } 714 715 /** 716 * i40e_aq_mac_address_write - Change the MAC addresses 717 * @hw: pointer to the hw struct 718 * @flags: indicates which MAC to be written 719 * @mac_addr: address to write 720 * @cmd_details: pointer to command details structure or NULL 721 **/ 722 int i40e_aq_mac_address_write(struct i40e_hw *hw, 723 u16 flags, u8 *mac_addr, 724 struct i40e_asq_cmd_details *cmd_details) 725 { 726 struct i40e_aq_desc desc; 727 struct i40e_aqc_mac_address_write *cmd_data = 728 (struct i40e_aqc_mac_address_write *)&desc.params.raw; 729 int status; 730 731 i40e_fill_default_direct_cmd_desc(&desc, 732 i40e_aqc_opc_mac_address_write); 733 cmd_data->command_flags = cpu_to_le16(flags); 734 cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]); 735 cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) | 736 ((u32)mac_addr[3] << 16) | 737 ((u32)mac_addr[4] << 8) | 738 mac_addr[5]); 739 740 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 741 742 return status; 743 } 744 745 /** 746 * i40e_get_mac_addr - get MAC address 747 * @hw: pointer to the HW structure 748 * @mac_addr: pointer to MAC address 749 * 750 * Reads the adapter's MAC address from register 751 **/ 752 int i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr) 753 { 754 struct i40e_aqc_mac_address_read_data addrs; 755 u16 flags = 0; 756 int status; 757 758 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); 759 760 if (flags & I40E_AQC_LAN_ADDR_VALID) 761 ether_addr_copy(mac_addr, addrs.pf_lan_mac); 762 763 return status; 764 } 765 766 /** 767 * i40e_get_port_mac_addr - get Port MAC address 768 * @hw: pointer to the HW structure 769 * @mac_addr: pointer to Port MAC address 770 * 771 * Reads the adapter's Port MAC address 772 **/ 773 int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr) 774 { 775 struct i40e_aqc_mac_address_read_data addrs; 776 u16 flags = 0; 777 int status; 778 779 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); 780 if (status) 781 return status; 782 783 if (flags & I40E_AQC_PORT_ADDR_VALID) 784 ether_addr_copy(mac_addr, addrs.port_mac); 785 else 786 status = -EINVAL; 787 788 return status; 789 } 790 791 /** 792 * i40e_pre_tx_queue_cfg - pre tx queue configure 793 * @hw: pointer to the HW structure 794 * @queue: target PF queue index 795 * @enable: state change request 796 * 797 * Handles hw requirement to indicate intention to enable 798 * or disable target queue. 799 **/ 800 void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable) 801 { 802 u32 abs_queue_idx = hw->func_caps.base_queue + queue; 803 u32 reg_block = 0; 804 u32 reg_val; 805 806 if (abs_queue_idx >= 128) { 807 reg_block = abs_queue_idx / 128; 808 abs_queue_idx %= 128; 809 } 810 811 reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); 812 reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 813 reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 814 815 if (enable) 816 reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK; 817 else 818 reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 819 820 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val); 821 } 822 823 /** 824 * i40e_get_pba_string - Reads part number string from EEPROM 825 * @hw: pointer to hardware structure 826 * 827 * Reads the part number string from the EEPROM and stores it 828 * into newly allocated buffer and saves resulting pointer 829 * to i40e_hw->pba_id field. 830 **/ 831 void i40e_get_pba_string(struct i40e_hw *hw) 832 { 833 #define I40E_NVM_PBA_FLAGS_BLK_PRESENT 0xFAFA 834 u16 pba_word = 0; 835 u16 pba_size = 0; 836 u16 pba_ptr = 0; 837 int status; 838 char *ptr; 839 u16 i; 840 841 status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word); 842 if (status) { 843 hw_dbg(hw, "Failed to read PBA flags.\n"); 844 return; 845 } 846 if (pba_word != I40E_NVM_PBA_FLAGS_BLK_PRESENT) { 847 hw_dbg(hw, "PBA block is not present.\n"); 848 return; 849 } 850 851 status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr); 852 if (status) { 853 hw_dbg(hw, "Failed to read PBA Block pointer.\n"); 854 return; 855 } 856 857 status = i40e_read_nvm_word(hw, pba_ptr, &pba_size); 858 if (status) { 859 hw_dbg(hw, "Failed to read PBA Block size.\n"); 860 return; 861 } 862 863 /* Subtract one to get PBA word count (PBA Size word is included in 864 * total size) and advance pointer to first PBA word. 865 */ 866 pba_size--; 867 pba_ptr++; 868 if (!pba_size) { 869 hw_dbg(hw, "PBA ID is empty.\n"); 870 return; 871 } 872 873 ptr = devm_kzalloc(i40e_hw_to_dev(hw), pba_size * 2 + 1, GFP_KERNEL); 874 if (!ptr) 875 return; 876 hw->pba_id = ptr; 877 878 for (i = 0; i < pba_size; i++) { 879 status = i40e_read_nvm_word(hw, pba_ptr + i, &pba_word); 880 if (status) { 881 hw_dbg(hw, "Failed to read PBA Block word %d.\n", i); 882 devm_kfree(i40e_hw_to_dev(hw), hw->pba_id); 883 hw->pba_id = NULL; 884 return; 885 } 886 887 *ptr++ = (pba_word >> 8) & 0xFF; 888 *ptr++ = pba_word & 0xFF; 889 } 890 } 891 892 /** 893 * i40e_get_media_type - Gets media type 894 * @hw: pointer to the hardware structure 895 **/ 896 static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) 897 { 898 enum i40e_media_type media; 899 900 switch (hw->phy.link_info.phy_type) { 901 case I40E_PHY_TYPE_10GBASE_SR: 902 case I40E_PHY_TYPE_10GBASE_LR: 903 case I40E_PHY_TYPE_1000BASE_SX: 904 case I40E_PHY_TYPE_1000BASE_LX: 905 case I40E_PHY_TYPE_40GBASE_SR4: 906 case I40E_PHY_TYPE_40GBASE_LR4: 907 case I40E_PHY_TYPE_25GBASE_LR: 908 case I40E_PHY_TYPE_25GBASE_SR: 909 media = I40E_MEDIA_TYPE_FIBER; 910 break; 911 case I40E_PHY_TYPE_100BASE_TX: 912 case I40E_PHY_TYPE_1000BASE_T: 913 case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS: 914 case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS: 915 case I40E_PHY_TYPE_10GBASE_T: 916 media = I40E_MEDIA_TYPE_BASET; 917 break; 918 case I40E_PHY_TYPE_10GBASE_CR1_CU: 919 case I40E_PHY_TYPE_40GBASE_CR4_CU: 920 case I40E_PHY_TYPE_10GBASE_CR1: 921 case I40E_PHY_TYPE_40GBASE_CR4: 922 case I40E_PHY_TYPE_10GBASE_SFPP_CU: 923 case I40E_PHY_TYPE_40GBASE_AOC: 924 case I40E_PHY_TYPE_10GBASE_AOC: 925 case I40E_PHY_TYPE_25GBASE_CR: 926 case I40E_PHY_TYPE_25GBASE_AOC: 927 case I40E_PHY_TYPE_25GBASE_ACC: 928 media = I40E_MEDIA_TYPE_DA; 929 break; 930 case I40E_PHY_TYPE_1000BASE_KX: 931 case I40E_PHY_TYPE_10GBASE_KX4: 932 case I40E_PHY_TYPE_10GBASE_KR: 933 case I40E_PHY_TYPE_40GBASE_KR4: 934 case I40E_PHY_TYPE_20GBASE_KR2: 935 case I40E_PHY_TYPE_25GBASE_KR: 936 media = I40E_MEDIA_TYPE_BACKPLANE; 937 break; 938 case I40E_PHY_TYPE_SGMII: 939 case I40E_PHY_TYPE_XAUI: 940 case I40E_PHY_TYPE_XFI: 941 case I40E_PHY_TYPE_XLAUI: 942 case I40E_PHY_TYPE_XLPPI: 943 default: 944 media = I40E_MEDIA_TYPE_UNKNOWN; 945 break; 946 } 947 948 return media; 949 } 950 951 /** 952 * i40e_poll_globr - Poll for Global Reset completion 953 * @hw: pointer to the hardware structure 954 * @retry_limit: how many times to retry before failure 955 **/ 956 static int i40e_poll_globr(struct i40e_hw *hw, 957 u32 retry_limit) 958 { 959 u32 cnt, reg = 0; 960 961 for (cnt = 0; cnt < retry_limit; cnt++) { 962 reg = rd32(hw, I40E_GLGEN_RSTAT); 963 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 964 return 0; 965 msleep(100); 966 } 967 968 hw_dbg(hw, "Global reset failed.\n"); 969 hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg); 970 971 return -EIO; 972 } 973 974 #define I40E_PF_RESET_WAIT_COUNT_A0 200 975 #define I40E_PF_RESET_WAIT_COUNT 200 976 /** 977 * i40e_pf_reset - Reset the PF 978 * @hw: pointer to the hardware structure 979 * 980 * Assuming someone else has triggered a global reset, 981 * assure the global reset is complete and then reset the PF 982 **/ 983 int i40e_pf_reset(struct i40e_hw *hw) 984 { 985 u32 cnt = 0; 986 u32 cnt1 = 0; 987 u32 reg = 0; 988 u32 grst_del; 989 990 /* Poll for Global Reset steady state in case of recent GRST. 991 * The grst delay value is in 100ms units, and we'll wait a 992 * couple counts longer to be sure we don't just miss the end. 993 */ 994 grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) & 995 I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> 996 I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; 997 998 /* It can take upto 15 secs for GRST steady state. 999 * Bump it to 16 secs max to be safe. 1000 */ 1001 grst_del = grst_del * 20; 1002 1003 for (cnt = 0; cnt < grst_del; cnt++) { 1004 reg = rd32(hw, I40E_GLGEN_RSTAT); 1005 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 1006 break; 1007 msleep(100); 1008 } 1009 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 1010 hw_dbg(hw, "Global reset polling failed to complete.\n"); 1011 return -EIO; 1012 } 1013 1014 /* Now Wait for the FW to be ready */ 1015 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) { 1016 reg = rd32(hw, I40E_GLNVM_ULD); 1017 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1018 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK); 1019 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1020 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) { 1021 hw_dbg(hw, "Core and Global modules ready %d\n", cnt1); 1022 break; 1023 } 1024 usleep_range(10000, 20000); 1025 } 1026 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1027 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) { 1028 hw_dbg(hw, "wait for FW Reset complete timedout\n"); 1029 hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg); 1030 return -EIO; 1031 } 1032 1033 /* If there was a Global Reset in progress when we got here, 1034 * we don't need to do the PF Reset 1035 */ 1036 if (!cnt) { 1037 u32 reg2 = 0; 1038 if (hw->revision_id == 0) 1039 cnt = I40E_PF_RESET_WAIT_COUNT_A0; 1040 else 1041 cnt = I40E_PF_RESET_WAIT_COUNT; 1042 reg = rd32(hw, I40E_PFGEN_CTRL); 1043 wr32(hw, I40E_PFGEN_CTRL, 1044 (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); 1045 for (; cnt; cnt--) { 1046 reg = rd32(hw, I40E_PFGEN_CTRL); 1047 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) 1048 break; 1049 reg2 = rd32(hw, I40E_GLGEN_RSTAT); 1050 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) 1051 break; 1052 usleep_range(1000, 2000); 1053 } 1054 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 1055 if (i40e_poll_globr(hw, grst_del)) 1056 return -EIO; 1057 } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { 1058 hw_dbg(hw, "PF reset polling failed to complete.\n"); 1059 return -EIO; 1060 } 1061 } 1062 1063 i40e_clear_pxe_mode(hw); 1064 1065 return 0; 1066 } 1067 1068 /** 1069 * i40e_clear_hw - clear out any left over hw state 1070 * @hw: pointer to the hw struct 1071 * 1072 * Clear queues and interrupts, typically called at init time, 1073 * but after the capabilities have been found so we know how many 1074 * queues and msix vectors have been allocated. 1075 **/ 1076 void i40e_clear_hw(struct i40e_hw *hw) 1077 { 1078 u32 num_queues, base_queue; 1079 u32 num_pf_int; 1080 u32 num_vf_int; 1081 u32 num_vfs; 1082 u32 i, j; 1083 u32 val; 1084 u32 eol = 0x7ff; 1085 1086 /* get number of interrupts, queues, and VFs */ 1087 val = rd32(hw, I40E_GLPCI_CNF2); 1088 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> 1089 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; 1090 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >> 1091 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT; 1092 1093 val = rd32(hw, I40E_PFLAN_QALLOC); 1094 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >> 1095 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; 1096 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> 1097 I40E_PFLAN_QALLOC_LASTQ_SHIFT; 1098 if (val & I40E_PFLAN_QALLOC_VALID_MASK && j >= base_queue) 1099 num_queues = (j - base_queue) + 1; 1100 else 1101 num_queues = 0; 1102 1103 val = rd32(hw, I40E_PF_VT_PFALLOC); 1104 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >> 1105 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; 1106 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >> 1107 I40E_PF_VT_PFALLOC_LASTVF_SHIFT; 1108 if (val & I40E_PF_VT_PFALLOC_VALID_MASK && j >= i) 1109 num_vfs = (j - i) + 1; 1110 else 1111 num_vfs = 0; 1112 1113 /* stop all the interrupts */ 1114 wr32(hw, I40E_PFINT_ICR0_ENA, 0); 1115 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 1116 for (i = 0; i < num_pf_int - 2; i++) 1117 wr32(hw, I40E_PFINT_DYN_CTLN(i), val); 1118 1119 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */ 1120 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 1121 wr32(hw, I40E_PFINT_LNKLST0, val); 1122 for (i = 0; i < num_pf_int - 2; i++) 1123 wr32(hw, I40E_PFINT_LNKLSTN(i), val); 1124 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT; 1125 for (i = 0; i < num_vfs; i++) 1126 wr32(hw, I40E_VPINT_LNKLST0(i), val); 1127 for (i = 0; i < num_vf_int - 2; i++) 1128 wr32(hw, I40E_VPINT_LNKLSTN(i), val); 1129 1130 /* warn the HW of the coming Tx disables */ 1131 for (i = 0; i < num_queues; i++) { 1132 u32 abs_queue_idx = base_queue + i; 1133 u32 reg_block = 0; 1134 1135 if (abs_queue_idx >= 128) { 1136 reg_block = abs_queue_idx / 128; 1137 abs_queue_idx %= 128; 1138 } 1139 1140 val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); 1141 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 1142 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 1143 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 1144 1145 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val); 1146 } 1147 udelay(400); 1148 1149 /* stop all the queues */ 1150 for (i = 0; i < num_queues; i++) { 1151 wr32(hw, I40E_QINT_TQCTL(i), 0); 1152 wr32(hw, I40E_QTX_ENA(i), 0); 1153 wr32(hw, I40E_QINT_RQCTL(i), 0); 1154 wr32(hw, I40E_QRX_ENA(i), 0); 1155 } 1156 1157 /* short wait for all queue disables to settle */ 1158 udelay(50); 1159 } 1160 1161 /** 1162 * i40e_clear_pxe_mode - clear pxe operations mode 1163 * @hw: pointer to the hw struct 1164 * 1165 * Make sure all PXE mode settings are cleared, including things 1166 * like descriptor fetch/write-back mode. 1167 **/ 1168 void i40e_clear_pxe_mode(struct i40e_hw *hw) 1169 { 1170 u32 reg; 1171 1172 if (i40e_check_asq_alive(hw)) 1173 i40e_aq_clear_pxe_mode(hw, NULL); 1174 1175 /* Clear single descriptor fetch/write-back mode */ 1176 reg = rd32(hw, I40E_GLLAN_RCTL_0); 1177 1178 if (hw->revision_id == 0) { 1179 /* As a work around clear PXE_MODE instead of setting it */ 1180 wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK))); 1181 } else { 1182 wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK)); 1183 } 1184 } 1185 1186 /** 1187 * i40e_led_is_mine - helper to find matching led 1188 * @hw: pointer to the hw struct 1189 * @idx: index into GPIO registers 1190 * 1191 * returns: 0 if no match, otherwise the value of the GPIO_CTL register 1192 */ 1193 static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) 1194 { 1195 u32 gpio_val = 0; 1196 u32 port; 1197 1198 if (!I40E_IS_X710TL_DEVICE(hw->device_id) && 1199 !hw->func_caps.led[idx]) 1200 return 0; 1201 gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx)); 1202 port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >> 1203 I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT; 1204 1205 /* if PRT_NUM_NA is 1 then this LED is not port specific, OR 1206 * if it is not our port then ignore 1207 */ 1208 if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) || 1209 (port != hw->port)) 1210 return 0; 1211 1212 return gpio_val; 1213 } 1214 1215 #define I40E_FW_LED BIT(4) 1216 #define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \ 1217 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) 1218 1219 #define I40E_LED0 22 1220 1221 #define I40E_PIN_FUNC_SDP 0x0 1222 #define I40E_PIN_FUNC_LED 0x1 1223 1224 /** 1225 * i40e_led_get - return current on/off mode 1226 * @hw: pointer to the hw struct 1227 * 1228 * The value returned is the 'mode' field as defined in the 1229 * GPIO register definitions: 0x0 = off, 0xf = on, and other 1230 * values are variations of possible behaviors relating to 1231 * blink, link, and wire. 1232 **/ 1233 u32 i40e_led_get(struct i40e_hw *hw) 1234 { 1235 u32 mode = 0; 1236 int i; 1237 1238 /* as per the documentation GPIO 22-29 are the LED 1239 * GPIO pins named LED0..LED7 1240 */ 1241 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { 1242 u32 gpio_val = i40e_led_is_mine(hw, i); 1243 1244 if (!gpio_val) 1245 continue; 1246 1247 mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> 1248 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT; 1249 break; 1250 } 1251 1252 return mode; 1253 } 1254 1255 /** 1256 * i40e_led_set - set new on/off mode 1257 * @hw: pointer to the hw struct 1258 * @mode: 0=off, 0xf=on (else see manual for mode details) 1259 * @blink: true if the LED should blink when on, false if steady 1260 * 1261 * if this function is used to turn on the blink it should 1262 * be used to disable the blink when restoring the original state. 1263 **/ 1264 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) 1265 { 1266 int i; 1267 1268 if (mode & ~I40E_LED_MODE_VALID) { 1269 hw_dbg(hw, "invalid mode passed in %X\n", mode); 1270 return; 1271 } 1272 1273 /* as per the documentation GPIO 22-29 are the LED 1274 * GPIO pins named LED0..LED7 1275 */ 1276 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { 1277 u32 gpio_val = i40e_led_is_mine(hw, i); 1278 1279 if (!gpio_val) 1280 continue; 1281 1282 if (I40E_IS_X710TL_DEVICE(hw->device_id)) { 1283 u32 pin_func = 0; 1284 1285 if (mode & I40E_FW_LED) 1286 pin_func = I40E_PIN_FUNC_SDP; 1287 else 1288 pin_func = I40E_PIN_FUNC_LED; 1289 1290 gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK; 1291 gpio_val |= ((pin_func << 1292 I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) & 1293 I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK); 1294 } 1295 gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK; 1296 /* this & is a bit of paranoia, but serves as a range check */ 1297 gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & 1298 I40E_GLGEN_GPIO_CTL_LED_MODE_MASK); 1299 1300 if (blink) 1301 gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); 1302 else 1303 gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); 1304 1305 wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val); 1306 break; 1307 } 1308 } 1309 1310 /* Admin command wrappers */ 1311 1312 /** 1313 * i40e_aq_get_phy_capabilities 1314 * @hw: pointer to the hw struct 1315 * @abilities: structure for PHY capabilities to be filled 1316 * @qualified_modules: report Qualified Modules 1317 * @report_init: report init capabilities (active are default) 1318 * @cmd_details: pointer to command details structure or NULL 1319 * 1320 * Returns the various PHY abilities supported on the Port. 1321 **/ 1322 int 1323 i40e_aq_get_phy_capabilities(struct i40e_hw *hw, 1324 bool qualified_modules, bool report_init, 1325 struct i40e_aq_get_phy_abilities_resp *abilities, 1326 struct i40e_asq_cmd_details *cmd_details) 1327 { 1328 u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp); 1329 u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0; 1330 struct i40e_aq_desc desc; 1331 int status; 1332 1333 if (!abilities) 1334 return -EINVAL; 1335 1336 do { 1337 i40e_fill_default_direct_cmd_desc(&desc, 1338 i40e_aqc_opc_get_phy_abilities); 1339 1340 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 1341 if (abilities_size > I40E_AQ_LARGE_BUF) 1342 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 1343 1344 if (qualified_modules) 1345 desc.params.external.param0 |= 1346 cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES); 1347 1348 if (report_init) 1349 desc.params.external.param0 |= 1350 cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES); 1351 1352 status = i40e_asq_send_command(hw, &desc, abilities, 1353 abilities_size, cmd_details); 1354 1355 switch (hw->aq.asq_last_status) { 1356 case I40E_AQ_RC_EIO: 1357 status = -EIO; 1358 break; 1359 case I40E_AQ_RC_EAGAIN: 1360 usleep_range(1000, 2000); 1361 total_delay++; 1362 status = -EIO; 1363 break; 1364 /* also covers I40E_AQ_RC_OK */ 1365 default: 1366 break; 1367 } 1368 1369 } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) && 1370 (total_delay < max_delay)); 1371 1372 if (status) 1373 return status; 1374 1375 if (report_init) { 1376 if (hw->mac.type == I40E_MAC_XL710 && 1377 hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 1378 hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) { 1379 status = i40e_aq_get_link_info(hw, true, NULL, NULL); 1380 } else { 1381 hw->phy.phy_types = le32_to_cpu(abilities->phy_type); 1382 hw->phy.phy_types |= 1383 ((u64)abilities->phy_type_ext << 32); 1384 } 1385 } 1386 1387 return status; 1388 } 1389 1390 /** 1391 * i40e_aq_set_phy_config 1392 * @hw: pointer to the hw struct 1393 * @config: structure with PHY configuration to be set 1394 * @cmd_details: pointer to command details structure or NULL 1395 * 1396 * Set the various PHY configuration parameters 1397 * supported on the Port.One or more of the Set PHY config parameters may be 1398 * ignored in an MFP mode as the PF may not have the privilege to set some 1399 * of the PHY Config parameters. This status will be indicated by the 1400 * command response. 1401 **/ 1402 int i40e_aq_set_phy_config(struct i40e_hw *hw, 1403 struct i40e_aq_set_phy_config *config, 1404 struct i40e_asq_cmd_details *cmd_details) 1405 { 1406 struct i40e_aq_desc desc; 1407 struct i40e_aq_set_phy_config *cmd = 1408 (struct i40e_aq_set_phy_config *)&desc.params.raw; 1409 int status; 1410 1411 if (!config) 1412 return -EINVAL; 1413 1414 i40e_fill_default_direct_cmd_desc(&desc, 1415 i40e_aqc_opc_set_phy_config); 1416 1417 *cmd = *config; 1418 1419 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1420 1421 return status; 1422 } 1423 1424 static noinline_for_stack int 1425 i40e_set_fc_status(struct i40e_hw *hw, 1426 struct i40e_aq_get_phy_abilities_resp *abilities, 1427 bool atomic_restart) 1428 { 1429 struct i40e_aq_set_phy_config config; 1430 enum i40e_fc_mode fc_mode = hw->fc.requested_mode; 1431 u8 pause_mask = 0x0; 1432 1433 switch (fc_mode) { 1434 case I40E_FC_FULL: 1435 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; 1436 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; 1437 break; 1438 case I40E_FC_RX_PAUSE: 1439 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; 1440 break; 1441 case I40E_FC_TX_PAUSE: 1442 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; 1443 break; 1444 default: 1445 break; 1446 } 1447 1448 memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); 1449 /* clear the old pause settings */ 1450 config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & 1451 ~(I40E_AQ_PHY_FLAG_PAUSE_RX); 1452 /* set the new abilities */ 1453 config.abilities |= pause_mask; 1454 /* If the abilities have changed, then set the new config */ 1455 if (config.abilities == abilities->abilities) 1456 return 0; 1457 1458 /* Auto restart link so settings take effect */ 1459 if (atomic_restart) 1460 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 1461 /* Copy over all the old settings */ 1462 config.phy_type = abilities->phy_type; 1463 config.phy_type_ext = abilities->phy_type_ext; 1464 config.link_speed = abilities->link_speed; 1465 config.eee_capability = abilities->eee_capability; 1466 config.eeer = abilities->eeer_val; 1467 config.low_power_ctrl = abilities->d3_lpan; 1468 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & 1469 I40E_AQ_PHY_FEC_CONFIG_MASK; 1470 1471 return i40e_aq_set_phy_config(hw, &config, NULL); 1472 } 1473 1474 /** 1475 * i40e_set_fc 1476 * @hw: pointer to the hw struct 1477 * @aq_failures: buffer to return AdminQ failure information 1478 * @atomic_restart: whether to enable atomic link restart 1479 * 1480 * Set the requested flow control mode using set_phy_config. 1481 **/ 1482 int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, 1483 bool atomic_restart) 1484 { 1485 struct i40e_aq_get_phy_abilities_resp abilities; 1486 int status; 1487 1488 *aq_failures = 0x0; 1489 1490 /* Get the current phy config */ 1491 status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, 1492 NULL); 1493 if (status) { 1494 *aq_failures |= I40E_SET_FC_AQ_FAIL_GET; 1495 return status; 1496 } 1497 1498 status = i40e_set_fc_status(hw, &abilities, atomic_restart); 1499 if (status) 1500 *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; 1501 1502 /* Update the link info */ 1503 status = i40e_update_link_info(hw); 1504 if (status) { 1505 /* Wait a little bit (on 40G cards it sometimes takes a really 1506 * long time for link to come back from the atomic reset) 1507 * and try once more 1508 */ 1509 msleep(1000); 1510 status = i40e_update_link_info(hw); 1511 } 1512 if (status) 1513 *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE; 1514 1515 return status; 1516 } 1517 1518 /** 1519 * i40e_aq_clear_pxe_mode 1520 * @hw: pointer to the hw struct 1521 * @cmd_details: pointer to command details structure or NULL 1522 * 1523 * Tell the firmware that the driver is taking over from PXE 1524 **/ 1525 int i40e_aq_clear_pxe_mode(struct i40e_hw *hw, 1526 struct i40e_asq_cmd_details *cmd_details) 1527 { 1528 struct i40e_aq_desc desc; 1529 struct i40e_aqc_clear_pxe *cmd = 1530 (struct i40e_aqc_clear_pxe *)&desc.params.raw; 1531 int status; 1532 1533 i40e_fill_default_direct_cmd_desc(&desc, 1534 i40e_aqc_opc_clear_pxe_mode); 1535 1536 cmd->rx_cnt = 0x2; 1537 1538 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1539 1540 wr32(hw, I40E_GLLAN_RCTL_0, 0x1); 1541 1542 return status; 1543 } 1544 1545 /** 1546 * i40e_aq_set_link_restart_an 1547 * @hw: pointer to the hw struct 1548 * @enable_link: if true: enable link, if false: disable link 1549 * @cmd_details: pointer to command details structure or NULL 1550 * 1551 * Sets up the link and restarts the Auto-Negotiation over the link. 1552 **/ 1553 int i40e_aq_set_link_restart_an(struct i40e_hw *hw, 1554 bool enable_link, 1555 struct i40e_asq_cmd_details *cmd_details) 1556 { 1557 struct i40e_aq_desc desc; 1558 struct i40e_aqc_set_link_restart_an *cmd = 1559 (struct i40e_aqc_set_link_restart_an *)&desc.params.raw; 1560 int status; 1561 1562 i40e_fill_default_direct_cmd_desc(&desc, 1563 i40e_aqc_opc_set_link_restart_an); 1564 1565 cmd->command = I40E_AQ_PHY_RESTART_AN; 1566 if (enable_link) 1567 cmd->command |= I40E_AQ_PHY_LINK_ENABLE; 1568 else 1569 cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE; 1570 1571 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1572 1573 return status; 1574 } 1575 1576 /** 1577 * i40e_aq_get_link_info 1578 * @hw: pointer to the hw struct 1579 * @enable_lse: enable/disable LinkStatusEvent reporting 1580 * @link: pointer to link status structure - optional 1581 * @cmd_details: pointer to command details structure or NULL 1582 * 1583 * Returns the link status of the adapter. 1584 **/ 1585 int i40e_aq_get_link_info(struct i40e_hw *hw, 1586 bool enable_lse, struct i40e_link_status *link, 1587 struct i40e_asq_cmd_details *cmd_details) 1588 { 1589 struct i40e_aq_desc desc; 1590 struct i40e_aqc_get_link_status *resp = 1591 (struct i40e_aqc_get_link_status *)&desc.params.raw; 1592 struct i40e_link_status *hw_link_info = &hw->phy.link_info; 1593 bool tx_pause, rx_pause; 1594 u16 command_flags; 1595 int status; 1596 1597 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); 1598 1599 if (enable_lse) 1600 command_flags = I40E_AQ_LSE_ENABLE; 1601 else 1602 command_flags = I40E_AQ_LSE_DISABLE; 1603 resp->command_flags = cpu_to_le16(command_flags); 1604 1605 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1606 1607 if (status) 1608 goto aq_get_link_info_exit; 1609 1610 /* save off old link status information */ 1611 hw->phy.link_info_old = *hw_link_info; 1612 1613 /* update link status */ 1614 hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type; 1615 hw->phy.media_type = i40e_get_media_type(hw); 1616 hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed; 1617 hw_link_info->link_info = resp->link_info; 1618 hw_link_info->an_info = resp->an_info; 1619 hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA | 1620 I40E_AQ_CONFIG_FEC_RS_ENA); 1621 hw_link_info->ext_info = resp->ext_info; 1622 hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK; 1623 hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size); 1624 hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK; 1625 1626 /* update fc info */ 1627 tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX); 1628 rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX); 1629 if (tx_pause & rx_pause) 1630 hw->fc.current_mode = I40E_FC_FULL; 1631 else if (tx_pause) 1632 hw->fc.current_mode = I40E_FC_TX_PAUSE; 1633 else if (rx_pause) 1634 hw->fc.current_mode = I40E_FC_RX_PAUSE; 1635 else 1636 hw->fc.current_mode = I40E_FC_NONE; 1637 1638 if (resp->config & I40E_AQ_CONFIG_CRC_ENA) 1639 hw_link_info->crc_enable = true; 1640 else 1641 hw_link_info->crc_enable = false; 1642 1643 if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED)) 1644 hw_link_info->lse_enable = true; 1645 else 1646 hw_link_info->lse_enable = false; 1647 1648 if ((hw->mac.type == I40E_MAC_XL710) && 1649 (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 && 1650 hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE) 1651 hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; 1652 1653 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE && 1654 hw->mac.type != I40E_MAC_X722) { 1655 __le32 tmp; 1656 1657 memcpy(&tmp, resp->link_type, sizeof(tmp)); 1658 hw->phy.phy_types = le32_to_cpu(tmp); 1659 hw->phy.phy_types |= ((u64)resp->link_type_ext << 32); 1660 } 1661 1662 /* save link status information */ 1663 if (link) 1664 *link = *hw_link_info; 1665 1666 /* flag cleared so helper functions don't call AQ again */ 1667 hw->phy.get_link_info = false; 1668 1669 aq_get_link_info_exit: 1670 return status; 1671 } 1672 1673 /** 1674 * i40e_aq_set_phy_int_mask 1675 * @hw: pointer to the hw struct 1676 * @mask: interrupt mask to be set 1677 * @cmd_details: pointer to command details structure or NULL 1678 * 1679 * Set link interrupt mask. 1680 **/ 1681 int i40e_aq_set_phy_int_mask(struct i40e_hw *hw, 1682 u16 mask, 1683 struct i40e_asq_cmd_details *cmd_details) 1684 { 1685 struct i40e_aq_desc desc; 1686 struct i40e_aqc_set_phy_int_mask *cmd = 1687 (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw; 1688 int status; 1689 1690 i40e_fill_default_direct_cmd_desc(&desc, 1691 i40e_aqc_opc_set_phy_int_mask); 1692 1693 cmd->event_mask = cpu_to_le16(mask); 1694 1695 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1696 1697 return status; 1698 } 1699 1700 /** 1701 * i40e_aq_set_mac_loopback 1702 * @hw: pointer to the HW struct 1703 * @ena_lpbk: Enable or Disable loopback 1704 * @cmd_details: pointer to command details structure or NULL 1705 * 1706 * Enable/disable loopback on a given port 1707 */ 1708 int i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk, 1709 struct i40e_asq_cmd_details *cmd_details) 1710 { 1711 struct i40e_aq_desc desc; 1712 struct i40e_aqc_set_lb_mode *cmd = 1713 (struct i40e_aqc_set_lb_mode *)&desc.params.raw; 1714 1715 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_lb_modes); 1716 if (ena_lpbk) { 1717 if (hw->nvm.version <= I40E_LEGACY_LOOPBACK_NVM_VER) 1718 cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL_LEGACY); 1719 else 1720 cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL); 1721 } 1722 1723 return i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1724 } 1725 1726 /** 1727 * i40e_aq_set_phy_debug 1728 * @hw: pointer to the hw struct 1729 * @cmd_flags: debug command flags 1730 * @cmd_details: pointer to command details structure or NULL 1731 * 1732 * Reset the external PHY. 1733 **/ 1734 int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, 1735 struct i40e_asq_cmd_details *cmd_details) 1736 { 1737 struct i40e_aq_desc desc; 1738 struct i40e_aqc_set_phy_debug *cmd = 1739 (struct i40e_aqc_set_phy_debug *)&desc.params.raw; 1740 int status; 1741 1742 i40e_fill_default_direct_cmd_desc(&desc, 1743 i40e_aqc_opc_set_phy_debug); 1744 1745 cmd->command_flags = cmd_flags; 1746 1747 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1748 1749 return status; 1750 } 1751 1752 /** 1753 * i40e_is_aq_api_ver_ge 1754 * @aq: pointer to AdminQ info containing HW API version to compare 1755 * @maj: API major value 1756 * @min: API minor value 1757 * 1758 * Assert whether current HW API version is greater/equal than provided. 1759 **/ 1760 static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj, 1761 u16 min) 1762 { 1763 return (aq->api_maj_ver > maj || 1764 (aq->api_maj_ver == maj && aq->api_min_ver >= min)); 1765 } 1766 1767 /** 1768 * i40e_aq_add_vsi 1769 * @hw: pointer to the hw struct 1770 * @vsi_ctx: pointer to a vsi context struct 1771 * @cmd_details: pointer to command details structure or NULL 1772 * 1773 * Add a VSI context to the hardware. 1774 **/ 1775 int i40e_aq_add_vsi(struct i40e_hw *hw, 1776 struct i40e_vsi_context *vsi_ctx, 1777 struct i40e_asq_cmd_details *cmd_details) 1778 { 1779 struct i40e_aq_desc desc; 1780 struct i40e_aqc_add_get_update_vsi *cmd = 1781 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 1782 struct i40e_aqc_add_get_update_vsi_completion *resp = 1783 (struct i40e_aqc_add_get_update_vsi_completion *) 1784 &desc.params.raw; 1785 int status; 1786 1787 i40e_fill_default_direct_cmd_desc(&desc, 1788 i40e_aqc_opc_add_vsi); 1789 1790 cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid); 1791 cmd->connection_type = vsi_ctx->connection_type; 1792 cmd->vf_id = vsi_ctx->vf_num; 1793 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); 1794 1795 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 1796 1797 status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info, 1798 sizeof(vsi_ctx->info), 1799 cmd_details, true); 1800 1801 if (status) 1802 goto aq_add_vsi_exit; 1803 1804 vsi_ctx->seid = le16_to_cpu(resp->seid); 1805 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); 1806 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 1807 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 1808 1809 aq_add_vsi_exit: 1810 return status; 1811 } 1812 1813 /** 1814 * i40e_aq_set_default_vsi 1815 * @hw: pointer to the hw struct 1816 * @seid: vsi number 1817 * @cmd_details: pointer to command details structure or NULL 1818 **/ 1819 int i40e_aq_set_default_vsi(struct i40e_hw *hw, 1820 u16 seid, 1821 struct i40e_asq_cmd_details *cmd_details) 1822 { 1823 struct i40e_aq_desc desc; 1824 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1825 (struct i40e_aqc_set_vsi_promiscuous_modes *) 1826 &desc.params.raw; 1827 int status; 1828 1829 i40e_fill_default_direct_cmd_desc(&desc, 1830 i40e_aqc_opc_set_vsi_promiscuous_modes); 1831 1832 cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 1833 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 1834 cmd->seid = cpu_to_le16(seid); 1835 1836 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1837 1838 return status; 1839 } 1840 1841 /** 1842 * i40e_aq_clear_default_vsi 1843 * @hw: pointer to the hw struct 1844 * @seid: vsi number 1845 * @cmd_details: pointer to command details structure or NULL 1846 **/ 1847 int i40e_aq_clear_default_vsi(struct i40e_hw *hw, 1848 u16 seid, 1849 struct i40e_asq_cmd_details *cmd_details) 1850 { 1851 struct i40e_aq_desc desc; 1852 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1853 (struct i40e_aqc_set_vsi_promiscuous_modes *) 1854 &desc.params.raw; 1855 int status; 1856 1857 i40e_fill_default_direct_cmd_desc(&desc, 1858 i40e_aqc_opc_set_vsi_promiscuous_modes); 1859 1860 cmd->promiscuous_flags = cpu_to_le16(0); 1861 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 1862 cmd->seid = cpu_to_le16(seid); 1863 1864 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1865 1866 return status; 1867 } 1868 1869 /** 1870 * i40e_aq_set_vsi_unicast_promiscuous 1871 * @hw: pointer to the hw struct 1872 * @seid: vsi number 1873 * @set: set unicast promiscuous enable/disable 1874 * @cmd_details: pointer to command details structure or NULL 1875 * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc 1876 **/ 1877 int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, 1878 u16 seid, bool set, 1879 struct i40e_asq_cmd_details *cmd_details, 1880 bool rx_only_promisc) 1881 { 1882 struct i40e_aq_desc desc; 1883 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1884 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 1885 u16 flags = 0; 1886 int status; 1887 1888 i40e_fill_default_direct_cmd_desc(&desc, 1889 i40e_aqc_opc_set_vsi_promiscuous_modes); 1890 1891 if (set) { 1892 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; 1893 if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) 1894 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; 1895 } 1896 1897 cmd->promiscuous_flags = cpu_to_le16(flags); 1898 1899 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); 1900 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) 1901 cmd->valid_flags |= 1902 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); 1903 1904 cmd->seid = cpu_to_le16(seid); 1905 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1906 1907 return status; 1908 } 1909 1910 /** 1911 * i40e_aq_set_vsi_multicast_promiscuous 1912 * @hw: pointer to the hw struct 1913 * @seid: vsi number 1914 * @set: set multicast promiscuous enable/disable 1915 * @cmd_details: pointer to command details structure or NULL 1916 **/ 1917 int i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, 1918 u16 seid, bool set, 1919 struct i40e_asq_cmd_details *cmd_details) 1920 { 1921 struct i40e_aq_desc desc; 1922 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1923 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 1924 u16 flags = 0; 1925 int status; 1926 1927 i40e_fill_default_direct_cmd_desc(&desc, 1928 i40e_aqc_opc_set_vsi_promiscuous_modes); 1929 1930 if (set) 1931 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; 1932 1933 cmd->promiscuous_flags = cpu_to_le16(flags); 1934 1935 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); 1936 1937 cmd->seid = cpu_to_le16(seid); 1938 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1939 1940 return status; 1941 } 1942 1943 /** 1944 * i40e_aq_set_vsi_mc_promisc_on_vlan 1945 * @hw: pointer to the hw struct 1946 * @seid: vsi number 1947 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 1948 * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag 1949 * @cmd_details: pointer to command details structure or NULL 1950 **/ 1951 int i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, 1952 u16 seid, bool enable, 1953 u16 vid, 1954 struct i40e_asq_cmd_details *cmd_details) 1955 { 1956 struct i40e_aq_desc desc; 1957 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1958 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 1959 u16 flags = 0; 1960 int status; 1961 1962 i40e_fill_default_direct_cmd_desc(&desc, 1963 i40e_aqc_opc_set_vsi_promiscuous_modes); 1964 1965 if (enable) 1966 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; 1967 1968 cmd->promiscuous_flags = cpu_to_le16(flags); 1969 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); 1970 cmd->seid = cpu_to_le16(seid); 1971 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 1972 1973 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0, 1974 cmd_details, true); 1975 1976 return status; 1977 } 1978 1979 /** 1980 * i40e_aq_set_vsi_uc_promisc_on_vlan 1981 * @hw: pointer to the hw struct 1982 * @seid: vsi number 1983 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 1984 * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag 1985 * @cmd_details: pointer to command details structure or NULL 1986 **/ 1987 int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, 1988 u16 seid, bool enable, 1989 u16 vid, 1990 struct i40e_asq_cmd_details *cmd_details) 1991 { 1992 struct i40e_aq_desc desc; 1993 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1994 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 1995 u16 flags = 0; 1996 int status; 1997 1998 i40e_fill_default_direct_cmd_desc(&desc, 1999 i40e_aqc_opc_set_vsi_promiscuous_modes); 2000 2001 if (enable) { 2002 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; 2003 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) 2004 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; 2005 } 2006 2007 cmd->promiscuous_flags = cpu_to_le16(flags); 2008 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); 2009 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) 2010 cmd->valid_flags |= 2011 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); 2012 cmd->seid = cpu_to_le16(seid); 2013 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2014 2015 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0, 2016 cmd_details, true); 2017 2018 return status; 2019 } 2020 2021 /** 2022 * i40e_aq_set_vsi_bc_promisc_on_vlan 2023 * @hw: pointer to the hw struct 2024 * @seid: vsi number 2025 * @enable: set broadcast promiscuous enable/disable for a given VLAN 2026 * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag 2027 * @cmd_details: pointer to command details structure or NULL 2028 **/ 2029 int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, 2030 u16 seid, bool enable, u16 vid, 2031 struct i40e_asq_cmd_details *cmd_details) 2032 { 2033 struct i40e_aq_desc desc; 2034 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2035 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2036 u16 flags = 0; 2037 int status; 2038 2039 i40e_fill_default_direct_cmd_desc(&desc, 2040 i40e_aqc_opc_set_vsi_promiscuous_modes); 2041 2042 if (enable) 2043 flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST; 2044 2045 cmd->promiscuous_flags = cpu_to_le16(flags); 2046 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2047 cmd->seid = cpu_to_le16(seid); 2048 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2049 2050 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2051 2052 return status; 2053 } 2054 2055 /** 2056 * i40e_aq_set_vsi_broadcast 2057 * @hw: pointer to the hw struct 2058 * @seid: vsi number 2059 * @set_filter: true to set filter, false to clear filter 2060 * @cmd_details: pointer to command details structure or NULL 2061 * 2062 * Set or clear the broadcast promiscuous flag (filter) for a given VSI. 2063 **/ 2064 int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, 2065 u16 seid, bool set_filter, 2066 struct i40e_asq_cmd_details *cmd_details) 2067 { 2068 struct i40e_aq_desc desc; 2069 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2070 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2071 int status; 2072 2073 i40e_fill_default_direct_cmd_desc(&desc, 2074 i40e_aqc_opc_set_vsi_promiscuous_modes); 2075 2076 if (set_filter) 2077 cmd->promiscuous_flags 2078 |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2079 else 2080 cmd->promiscuous_flags 2081 &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2082 2083 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2084 cmd->seid = cpu_to_le16(seid); 2085 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2086 2087 return status; 2088 } 2089 2090 /** 2091 * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting 2092 * @hw: pointer to the hw struct 2093 * @seid: vsi number 2094 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2095 * @cmd_details: pointer to command details structure or NULL 2096 **/ 2097 int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, 2098 u16 seid, bool enable, 2099 struct i40e_asq_cmd_details *cmd_details) 2100 { 2101 struct i40e_aq_desc desc; 2102 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2103 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2104 u16 flags = 0; 2105 int status; 2106 2107 i40e_fill_default_direct_cmd_desc(&desc, 2108 i40e_aqc_opc_set_vsi_promiscuous_modes); 2109 if (enable) 2110 flags |= I40E_AQC_SET_VSI_PROMISC_VLAN; 2111 2112 cmd->promiscuous_flags = cpu_to_le16(flags); 2113 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN); 2114 cmd->seid = cpu_to_le16(seid); 2115 2116 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2117 2118 return status; 2119 } 2120 2121 /** 2122 * i40e_aq_get_vsi_params - get VSI configuration info 2123 * @hw: pointer to the hw struct 2124 * @vsi_ctx: pointer to a vsi context struct 2125 * @cmd_details: pointer to command details structure or NULL 2126 **/ 2127 int i40e_aq_get_vsi_params(struct i40e_hw *hw, 2128 struct i40e_vsi_context *vsi_ctx, 2129 struct i40e_asq_cmd_details *cmd_details) 2130 { 2131 struct i40e_aq_desc desc; 2132 struct i40e_aqc_add_get_update_vsi *cmd = 2133 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 2134 struct i40e_aqc_add_get_update_vsi_completion *resp = 2135 (struct i40e_aqc_add_get_update_vsi_completion *) 2136 &desc.params.raw; 2137 int status; 2138 2139 i40e_fill_default_direct_cmd_desc(&desc, 2140 i40e_aqc_opc_get_vsi_parameters); 2141 2142 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 2143 2144 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 2145 2146 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 2147 sizeof(vsi_ctx->info), NULL); 2148 2149 if (status) 2150 goto aq_get_vsi_params_exit; 2151 2152 vsi_ctx->seid = le16_to_cpu(resp->seid); 2153 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); 2154 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 2155 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 2156 2157 aq_get_vsi_params_exit: 2158 return status; 2159 } 2160 2161 /** 2162 * i40e_aq_update_vsi_params 2163 * @hw: pointer to the hw struct 2164 * @vsi_ctx: pointer to a vsi context struct 2165 * @cmd_details: pointer to command details structure or NULL 2166 * 2167 * Update a VSI context. 2168 **/ 2169 int i40e_aq_update_vsi_params(struct i40e_hw *hw, 2170 struct i40e_vsi_context *vsi_ctx, 2171 struct i40e_asq_cmd_details *cmd_details) 2172 { 2173 struct i40e_aq_desc desc; 2174 struct i40e_aqc_add_get_update_vsi *cmd = 2175 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 2176 struct i40e_aqc_add_get_update_vsi_completion *resp = 2177 (struct i40e_aqc_add_get_update_vsi_completion *) 2178 &desc.params.raw; 2179 int status; 2180 2181 i40e_fill_default_direct_cmd_desc(&desc, 2182 i40e_aqc_opc_update_vsi_parameters); 2183 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 2184 2185 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2186 2187 status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info, 2188 sizeof(vsi_ctx->info), 2189 cmd_details, true); 2190 2191 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 2192 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 2193 2194 return status; 2195 } 2196 2197 /** 2198 * i40e_aq_get_switch_config 2199 * @hw: pointer to the hardware structure 2200 * @buf: pointer to the result buffer 2201 * @buf_size: length of input buffer 2202 * @start_seid: seid to start for the report, 0 == beginning 2203 * @cmd_details: pointer to command details structure or NULL 2204 * 2205 * Fill the buf with switch configuration returned from AdminQ command 2206 **/ 2207 int i40e_aq_get_switch_config(struct i40e_hw *hw, 2208 struct i40e_aqc_get_switch_config_resp *buf, 2209 u16 buf_size, u16 *start_seid, 2210 struct i40e_asq_cmd_details *cmd_details) 2211 { 2212 struct i40e_aq_desc desc; 2213 struct i40e_aqc_switch_seid *scfg = 2214 (struct i40e_aqc_switch_seid *)&desc.params.raw; 2215 int status; 2216 2217 i40e_fill_default_direct_cmd_desc(&desc, 2218 i40e_aqc_opc_get_switch_config); 2219 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 2220 if (buf_size > I40E_AQ_LARGE_BUF) 2221 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2222 scfg->seid = cpu_to_le16(*start_seid); 2223 2224 status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details); 2225 *start_seid = le16_to_cpu(scfg->seid); 2226 2227 return status; 2228 } 2229 2230 /** 2231 * i40e_aq_set_switch_config 2232 * @hw: pointer to the hardware structure 2233 * @flags: bit flag values to set 2234 * @mode: cloud filter mode 2235 * @valid_flags: which bit flags to set 2236 * @mode: cloud filter mode 2237 * @cmd_details: pointer to command details structure or NULL 2238 * 2239 * Set switch configuration bits 2240 **/ 2241 int i40e_aq_set_switch_config(struct i40e_hw *hw, 2242 u16 flags, 2243 u16 valid_flags, u8 mode, 2244 struct i40e_asq_cmd_details *cmd_details) 2245 { 2246 struct i40e_aq_desc desc; 2247 struct i40e_aqc_set_switch_config *scfg = 2248 (struct i40e_aqc_set_switch_config *)&desc.params.raw; 2249 int status; 2250 2251 i40e_fill_default_direct_cmd_desc(&desc, 2252 i40e_aqc_opc_set_switch_config); 2253 scfg->flags = cpu_to_le16(flags); 2254 scfg->valid_flags = cpu_to_le16(valid_flags); 2255 scfg->mode = mode; 2256 if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) { 2257 scfg->switch_tag = cpu_to_le16(hw->switch_tag); 2258 scfg->first_tag = cpu_to_le16(hw->first_tag); 2259 scfg->second_tag = cpu_to_le16(hw->second_tag); 2260 } 2261 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2262 2263 return status; 2264 } 2265 2266 /** 2267 * i40e_aq_get_firmware_version 2268 * @hw: pointer to the hw struct 2269 * @fw_major_version: firmware major version 2270 * @fw_minor_version: firmware minor version 2271 * @fw_build: firmware build number 2272 * @api_major_version: major queue version 2273 * @api_minor_version: minor queue version 2274 * @cmd_details: pointer to command details structure or NULL 2275 * 2276 * Get the firmware version from the admin queue commands 2277 **/ 2278 int i40e_aq_get_firmware_version(struct i40e_hw *hw, 2279 u16 *fw_major_version, u16 *fw_minor_version, 2280 u32 *fw_build, 2281 u16 *api_major_version, u16 *api_minor_version, 2282 struct i40e_asq_cmd_details *cmd_details) 2283 { 2284 struct i40e_aq_desc desc; 2285 struct i40e_aqc_get_version *resp = 2286 (struct i40e_aqc_get_version *)&desc.params.raw; 2287 int status; 2288 2289 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version); 2290 2291 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2292 2293 if (!status) { 2294 if (fw_major_version) 2295 *fw_major_version = le16_to_cpu(resp->fw_major); 2296 if (fw_minor_version) 2297 *fw_minor_version = le16_to_cpu(resp->fw_minor); 2298 if (fw_build) 2299 *fw_build = le32_to_cpu(resp->fw_build); 2300 if (api_major_version) 2301 *api_major_version = le16_to_cpu(resp->api_major); 2302 if (api_minor_version) 2303 *api_minor_version = le16_to_cpu(resp->api_minor); 2304 } 2305 2306 return status; 2307 } 2308 2309 /** 2310 * i40e_aq_send_driver_version 2311 * @hw: pointer to the hw struct 2312 * @dv: driver's major, minor version 2313 * @cmd_details: pointer to command details structure or NULL 2314 * 2315 * Send the driver version to the firmware 2316 **/ 2317 int i40e_aq_send_driver_version(struct i40e_hw *hw, 2318 struct i40e_driver_version *dv, 2319 struct i40e_asq_cmd_details *cmd_details) 2320 { 2321 struct i40e_aq_desc desc; 2322 struct i40e_aqc_driver_version *cmd = 2323 (struct i40e_aqc_driver_version *)&desc.params.raw; 2324 int status; 2325 u16 len; 2326 2327 if (dv == NULL) 2328 return -EINVAL; 2329 2330 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version); 2331 2332 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); 2333 cmd->driver_major_ver = dv->major_version; 2334 cmd->driver_minor_ver = dv->minor_version; 2335 cmd->driver_build_ver = dv->build_version; 2336 cmd->driver_subbuild_ver = dv->subbuild_version; 2337 2338 len = 0; 2339 while (len < sizeof(dv->driver_string) && 2340 (dv->driver_string[len] < 0x80) && 2341 dv->driver_string[len]) 2342 len++; 2343 status = i40e_asq_send_command(hw, &desc, dv->driver_string, 2344 len, cmd_details); 2345 2346 return status; 2347 } 2348 2349 /** 2350 * i40e_get_link_status - get status of the HW network link 2351 * @hw: pointer to the hw struct 2352 * @link_up: pointer to bool (true/false = linkup/linkdown) 2353 * 2354 * Variable link_up true if link is up, false if link is down. 2355 * The variable link_up is invalid if returned value of status != 0 2356 * 2357 * Side effect: LinkStatusEvent reporting becomes enabled 2358 **/ 2359 int i40e_get_link_status(struct i40e_hw *hw, bool *link_up) 2360 { 2361 int status = 0; 2362 2363 if (hw->phy.get_link_info) { 2364 status = i40e_update_link_info(hw); 2365 2366 if (status) 2367 i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n", 2368 status); 2369 } 2370 2371 *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; 2372 2373 return status; 2374 } 2375 2376 /** 2377 * i40e_update_link_info - update status of the HW network link 2378 * @hw: pointer to the hw struct 2379 **/ 2380 noinline_for_stack int i40e_update_link_info(struct i40e_hw *hw) 2381 { 2382 struct i40e_aq_get_phy_abilities_resp abilities; 2383 int status = 0; 2384 2385 status = i40e_aq_get_link_info(hw, true, NULL, NULL); 2386 if (status) 2387 return status; 2388 2389 /* extra checking needed to ensure link info to user is timely */ 2390 if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) && 2391 ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) || 2392 !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) { 2393 status = i40e_aq_get_phy_capabilities(hw, false, false, 2394 &abilities, NULL); 2395 if (status) 2396 return status; 2397 2398 if (abilities.fec_cfg_curr_mod_ext_info & 2399 I40E_AQ_ENABLE_FEC_AUTO) 2400 hw->phy.link_info.req_fec_info = 2401 (I40E_AQ_REQUEST_FEC_KR | 2402 I40E_AQ_REQUEST_FEC_RS); 2403 else 2404 hw->phy.link_info.req_fec_info = 2405 abilities.fec_cfg_curr_mod_ext_info & 2406 (I40E_AQ_REQUEST_FEC_KR | 2407 I40E_AQ_REQUEST_FEC_RS); 2408 2409 memcpy(hw->phy.link_info.module_type, &abilities.module_type, 2410 sizeof(hw->phy.link_info.module_type)); 2411 } 2412 2413 return status; 2414 } 2415 2416 /** 2417 * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC 2418 * @hw: pointer to the hw struct 2419 * @uplink_seid: the MAC or other gizmo SEID 2420 * @downlink_seid: the VSI SEID 2421 * @enabled_tc: bitmap of TCs to be enabled 2422 * @default_port: true for default port VSI, false for control port 2423 * @veb_seid: pointer to where to put the resulting VEB SEID 2424 * @enable_stats: true to turn on VEB stats 2425 * @cmd_details: pointer to command details structure or NULL 2426 * 2427 * This asks the FW to add a VEB between the uplink and downlink 2428 * elements. If the uplink SEID is 0, this will be a floating VEB. 2429 **/ 2430 int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, 2431 u16 downlink_seid, u8 enabled_tc, 2432 bool default_port, u16 *veb_seid, 2433 bool enable_stats, 2434 struct i40e_asq_cmd_details *cmd_details) 2435 { 2436 struct i40e_aq_desc desc; 2437 struct i40e_aqc_add_veb *cmd = 2438 (struct i40e_aqc_add_veb *)&desc.params.raw; 2439 struct i40e_aqc_add_veb_completion *resp = 2440 (struct i40e_aqc_add_veb_completion *)&desc.params.raw; 2441 u16 veb_flags = 0; 2442 int status; 2443 2444 /* SEIDs need to either both be set or both be 0 for floating VEB */ 2445 if (!!uplink_seid != !!downlink_seid) 2446 return -EINVAL; 2447 2448 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb); 2449 2450 cmd->uplink_seid = cpu_to_le16(uplink_seid); 2451 cmd->downlink_seid = cpu_to_le16(downlink_seid); 2452 cmd->enable_tcs = enabled_tc; 2453 if (!uplink_seid) 2454 veb_flags |= I40E_AQC_ADD_VEB_FLOATING; 2455 if (default_port) 2456 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT; 2457 else 2458 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA; 2459 2460 /* reverse logic here: set the bitflag to disable the stats */ 2461 if (!enable_stats) 2462 veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS; 2463 2464 cmd->veb_flags = cpu_to_le16(veb_flags); 2465 2466 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2467 2468 if (!status && veb_seid) 2469 *veb_seid = le16_to_cpu(resp->veb_seid); 2470 2471 return status; 2472 } 2473 2474 /** 2475 * i40e_aq_get_veb_parameters - Retrieve VEB parameters 2476 * @hw: pointer to the hw struct 2477 * @veb_seid: the SEID of the VEB to query 2478 * @switch_id: the uplink switch id 2479 * @floating: set to true if the VEB is floating 2480 * @statistic_index: index of the stats counter block for this VEB 2481 * @vebs_used: number of VEB's used by function 2482 * @vebs_free: total VEB's not reserved by any function 2483 * @cmd_details: pointer to command details structure or NULL 2484 * 2485 * This retrieves the parameters for a particular VEB, specified by 2486 * uplink_seid, and returns them to the caller. 2487 **/ 2488 int i40e_aq_get_veb_parameters(struct i40e_hw *hw, 2489 u16 veb_seid, u16 *switch_id, 2490 bool *floating, u16 *statistic_index, 2491 u16 *vebs_used, u16 *vebs_free, 2492 struct i40e_asq_cmd_details *cmd_details) 2493 { 2494 struct i40e_aq_desc desc; 2495 struct i40e_aqc_get_veb_parameters_completion *cmd_resp = 2496 (struct i40e_aqc_get_veb_parameters_completion *) 2497 &desc.params.raw; 2498 int status; 2499 2500 if (veb_seid == 0) 2501 return -EINVAL; 2502 2503 i40e_fill_default_direct_cmd_desc(&desc, 2504 i40e_aqc_opc_get_veb_parameters); 2505 cmd_resp->seid = cpu_to_le16(veb_seid); 2506 2507 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2508 if (status) 2509 goto get_veb_exit; 2510 2511 if (switch_id) 2512 *switch_id = le16_to_cpu(cmd_resp->switch_id); 2513 if (statistic_index) 2514 *statistic_index = le16_to_cpu(cmd_resp->statistic_index); 2515 if (vebs_used) 2516 *vebs_used = le16_to_cpu(cmd_resp->vebs_used); 2517 if (vebs_free) 2518 *vebs_free = le16_to_cpu(cmd_resp->vebs_free); 2519 if (floating) { 2520 u16 flags = le16_to_cpu(cmd_resp->veb_flags); 2521 2522 if (flags & I40E_AQC_ADD_VEB_FLOATING) 2523 *floating = true; 2524 else 2525 *floating = false; 2526 } 2527 2528 get_veb_exit: 2529 return status; 2530 } 2531 2532 /** 2533 * i40e_prepare_add_macvlan 2534 * @mv_list: list of macvlans to be added 2535 * @desc: pointer to AQ descriptor structure 2536 * @count: length of the list 2537 * @seid: VSI for the mac address 2538 * 2539 * Internal helper function that prepares the add macvlan request 2540 * and returns the buffer size. 2541 **/ 2542 static u16 2543 i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list, 2544 struct i40e_aq_desc *desc, u16 count, u16 seid) 2545 { 2546 struct i40e_aqc_macvlan *cmd = 2547 (struct i40e_aqc_macvlan *)&desc->params.raw; 2548 u16 buf_size; 2549 int i; 2550 2551 buf_size = count * sizeof(*mv_list); 2552 2553 /* prep the rest of the request */ 2554 i40e_fill_default_direct_cmd_desc(desc, i40e_aqc_opc_add_macvlan); 2555 cmd->num_addresses = cpu_to_le16(count); 2556 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2557 cmd->seid[1] = 0; 2558 cmd->seid[2] = 0; 2559 2560 for (i = 0; i < count; i++) 2561 if (is_multicast_ether_addr(mv_list[i].mac_addr)) 2562 mv_list[i].flags |= 2563 cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC); 2564 2565 desc->flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2566 if (buf_size > I40E_AQ_LARGE_BUF) 2567 desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2568 2569 return buf_size; 2570 } 2571 2572 /** 2573 * i40e_aq_add_macvlan 2574 * @hw: pointer to the hw struct 2575 * @seid: VSI for the mac address 2576 * @mv_list: list of macvlans to be added 2577 * @count: length of the list 2578 * @cmd_details: pointer to command details structure or NULL 2579 * 2580 * Add MAC/VLAN addresses to the HW filtering 2581 **/ 2582 int 2583 i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, 2584 struct i40e_aqc_add_macvlan_element_data *mv_list, 2585 u16 count, struct i40e_asq_cmd_details *cmd_details) 2586 { 2587 struct i40e_aq_desc desc; 2588 u16 buf_size; 2589 2590 if (count == 0 || !mv_list || !hw) 2591 return -EINVAL; 2592 2593 buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid); 2594 2595 return i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size, 2596 cmd_details, true); 2597 } 2598 2599 /** 2600 * i40e_aq_add_macvlan_v2 2601 * @hw: pointer to the hw struct 2602 * @seid: VSI for the mac address 2603 * @mv_list: list of macvlans to be added 2604 * @count: length of the list 2605 * @cmd_details: pointer to command details structure or NULL 2606 * @aq_status: pointer to Admin Queue status return value 2607 * 2608 * Add MAC/VLAN addresses to the HW filtering. 2609 * The _v2 version returns the last Admin Queue status in aq_status 2610 * to avoid race conditions in access to hw->aq.asq_last_status. 2611 * It also calls _v2 versions of asq_send_command functions to 2612 * get the aq_status on the stack. 2613 **/ 2614 int 2615 i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid, 2616 struct i40e_aqc_add_macvlan_element_data *mv_list, 2617 u16 count, struct i40e_asq_cmd_details *cmd_details, 2618 enum i40e_admin_queue_err *aq_status) 2619 { 2620 struct i40e_aq_desc desc; 2621 u16 buf_size; 2622 2623 if (count == 0 || !mv_list || !hw) 2624 return -EINVAL; 2625 2626 buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid); 2627 2628 return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size, 2629 cmd_details, true, aq_status); 2630 } 2631 2632 /** 2633 * i40e_aq_remove_macvlan 2634 * @hw: pointer to the hw struct 2635 * @seid: VSI for the mac address 2636 * @mv_list: list of macvlans to be removed 2637 * @count: length of the list 2638 * @cmd_details: pointer to command details structure or NULL 2639 * 2640 * Remove MAC/VLAN addresses from the HW filtering 2641 **/ 2642 int 2643 i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, 2644 struct i40e_aqc_remove_macvlan_element_data *mv_list, 2645 u16 count, struct i40e_asq_cmd_details *cmd_details) 2646 { 2647 struct i40e_aq_desc desc; 2648 struct i40e_aqc_macvlan *cmd = 2649 (struct i40e_aqc_macvlan *)&desc.params.raw; 2650 u16 buf_size; 2651 int status; 2652 2653 if (count == 0 || !mv_list || !hw) 2654 return -EINVAL; 2655 2656 buf_size = count * sizeof(*mv_list); 2657 2658 /* prep the rest of the request */ 2659 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); 2660 cmd->num_addresses = cpu_to_le16(count); 2661 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2662 cmd->seid[1] = 0; 2663 cmd->seid[2] = 0; 2664 2665 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2666 if (buf_size > I40E_AQ_LARGE_BUF) 2667 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2668 2669 status = i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size, 2670 cmd_details, true); 2671 2672 return status; 2673 } 2674 2675 /** 2676 * i40e_aq_remove_macvlan_v2 2677 * @hw: pointer to the hw struct 2678 * @seid: VSI for the mac address 2679 * @mv_list: list of macvlans to be removed 2680 * @count: length of the list 2681 * @cmd_details: pointer to command details structure or NULL 2682 * @aq_status: pointer to Admin Queue status return value 2683 * 2684 * Remove MAC/VLAN addresses from the HW filtering. 2685 * The _v2 version returns the last Admin Queue status in aq_status 2686 * to avoid race conditions in access to hw->aq.asq_last_status. 2687 * It also calls _v2 versions of asq_send_command functions to 2688 * get the aq_status on the stack. 2689 **/ 2690 int 2691 i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid, 2692 struct i40e_aqc_remove_macvlan_element_data *mv_list, 2693 u16 count, struct i40e_asq_cmd_details *cmd_details, 2694 enum i40e_admin_queue_err *aq_status) 2695 { 2696 struct i40e_aqc_macvlan *cmd; 2697 struct i40e_aq_desc desc; 2698 u16 buf_size; 2699 2700 if (count == 0 || !mv_list || !hw) 2701 return -EINVAL; 2702 2703 buf_size = count * sizeof(*mv_list); 2704 2705 /* prep the rest of the request */ 2706 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); 2707 cmd = (struct i40e_aqc_macvlan *)&desc.params.raw; 2708 cmd->num_addresses = cpu_to_le16(count); 2709 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2710 cmd->seid[1] = 0; 2711 cmd->seid[2] = 0; 2712 2713 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2714 if (buf_size > I40E_AQ_LARGE_BUF) 2715 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2716 2717 return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size, 2718 cmd_details, true, aq_status); 2719 } 2720 2721 /** 2722 * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule 2723 * @hw: pointer to the hw struct 2724 * @opcode: AQ opcode for add or delete mirror rule 2725 * @sw_seid: Switch SEID (to which rule refers) 2726 * @rule_type: Rule Type (ingress/egress/VLAN) 2727 * @id: Destination VSI SEID or Rule ID 2728 * @count: length of the list 2729 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs 2730 * @cmd_details: pointer to command details structure or NULL 2731 * @rule_id: Rule ID returned from FW 2732 * @rules_used: Number of rules used in internal switch 2733 * @rules_free: Number of rules free in internal switch 2734 * 2735 * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for 2736 * VEBs/VEPA elements only 2737 **/ 2738 static int i40e_mirrorrule_op(struct i40e_hw *hw, 2739 u16 opcode, u16 sw_seid, u16 rule_type, u16 id, 2740 u16 count, __le16 *mr_list, 2741 struct i40e_asq_cmd_details *cmd_details, 2742 u16 *rule_id, u16 *rules_used, u16 *rules_free) 2743 { 2744 struct i40e_aq_desc desc; 2745 struct i40e_aqc_add_delete_mirror_rule *cmd = 2746 (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw; 2747 struct i40e_aqc_add_delete_mirror_rule_completion *resp = 2748 (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw; 2749 u16 buf_size; 2750 int status; 2751 2752 buf_size = count * sizeof(*mr_list); 2753 2754 /* prep the rest of the request */ 2755 i40e_fill_default_direct_cmd_desc(&desc, opcode); 2756 cmd->seid = cpu_to_le16(sw_seid); 2757 cmd->rule_type = cpu_to_le16(rule_type & 2758 I40E_AQC_MIRROR_RULE_TYPE_MASK); 2759 cmd->num_entries = cpu_to_le16(count); 2760 /* Dest VSI for add, rule_id for delete */ 2761 cmd->destination = cpu_to_le16(id); 2762 if (mr_list) { 2763 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 2764 I40E_AQ_FLAG_RD)); 2765 if (buf_size > I40E_AQ_LARGE_BUF) 2766 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2767 } 2768 2769 status = i40e_asq_send_command(hw, &desc, mr_list, buf_size, 2770 cmd_details); 2771 if (!status || 2772 hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) { 2773 if (rule_id) 2774 *rule_id = le16_to_cpu(resp->rule_id); 2775 if (rules_used) 2776 *rules_used = le16_to_cpu(resp->mirror_rules_used); 2777 if (rules_free) 2778 *rules_free = le16_to_cpu(resp->mirror_rules_free); 2779 } 2780 return status; 2781 } 2782 2783 /** 2784 * i40e_aq_add_mirrorrule - add a mirror rule 2785 * @hw: pointer to the hw struct 2786 * @sw_seid: Switch SEID (to which rule refers) 2787 * @rule_type: Rule Type (ingress/egress/VLAN) 2788 * @dest_vsi: SEID of VSI to which packets will be mirrored 2789 * @count: length of the list 2790 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs 2791 * @cmd_details: pointer to command details structure or NULL 2792 * @rule_id: Rule ID returned from FW 2793 * @rules_used: Number of rules used in internal switch 2794 * @rules_free: Number of rules free in internal switch 2795 * 2796 * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only 2797 **/ 2798 int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, 2799 u16 rule_type, u16 dest_vsi, u16 count, 2800 __le16 *mr_list, 2801 struct i40e_asq_cmd_details *cmd_details, 2802 u16 *rule_id, u16 *rules_used, u16 *rules_free) 2803 { 2804 if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS || 2805 rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) { 2806 if (count == 0 || !mr_list) 2807 return -EINVAL; 2808 } 2809 2810 return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid, 2811 rule_type, dest_vsi, count, mr_list, 2812 cmd_details, rule_id, rules_used, rules_free); 2813 } 2814 2815 /** 2816 * i40e_aq_delete_mirrorrule - delete a mirror rule 2817 * @hw: pointer to the hw struct 2818 * @sw_seid: Switch SEID (to which rule refers) 2819 * @rule_type: Rule Type (ingress/egress/VLAN) 2820 * @count: length of the list 2821 * @rule_id: Rule ID that is returned in the receive desc as part of 2822 * add_mirrorrule. 2823 * @mr_list: list of mirrored VLAN IDs to be removed 2824 * @cmd_details: pointer to command details structure or NULL 2825 * @rules_used: Number of rules used in internal switch 2826 * @rules_free: Number of rules free in internal switch 2827 * 2828 * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only 2829 **/ 2830 int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, 2831 u16 rule_type, u16 rule_id, u16 count, 2832 __le16 *mr_list, 2833 struct i40e_asq_cmd_details *cmd_details, 2834 u16 *rules_used, u16 *rules_free) 2835 { 2836 /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */ 2837 if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) { 2838 /* count and mr_list shall be valid for rule_type INGRESS VLAN 2839 * mirroring. For other rule_type, count and rule_type should 2840 * not matter. 2841 */ 2842 if (count == 0 || !mr_list) 2843 return -EINVAL; 2844 } 2845 2846 return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid, 2847 rule_type, rule_id, count, mr_list, 2848 cmd_details, NULL, rules_used, rules_free); 2849 } 2850 2851 /** 2852 * i40e_aq_send_msg_to_vf 2853 * @hw: pointer to the hardware structure 2854 * @vfid: VF id to send msg 2855 * @v_opcode: opcodes for VF-PF communication 2856 * @v_retval: return error code 2857 * @msg: pointer to the msg buffer 2858 * @msglen: msg length 2859 * @cmd_details: pointer to command details 2860 * 2861 * send msg to vf 2862 **/ 2863 int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, 2864 u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, 2865 struct i40e_asq_cmd_details *cmd_details) 2866 { 2867 struct i40e_aq_desc desc; 2868 struct i40e_aqc_pf_vf_message *cmd = 2869 (struct i40e_aqc_pf_vf_message *)&desc.params.raw; 2870 int status; 2871 2872 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf); 2873 cmd->id = cpu_to_le32(vfid); 2874 desc.cookie_high = cpu_to_le32(v_opcode); 2875 desc.cookie_low = cpu_to_le32(v_retval); 2876 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI); 2877 if (msglen) { 2878 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 2879 I40E_AQ_FLAG_RD)); 2880 if (msglen > I40E_AQ_LARGE_BUF) 2881 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2882 desc.datalen = cpu_to_le16(msglen); 2883 } 2884 status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details); 2885 2886 return status; 2887 } 2888 2889 /** 2890 * i40e_aq_debug_read_register 2891 * @hw: pointer to the hw struct 2892 * @reg_addr: register address 2893 * @reg_val: register value 2894 * @cmd_details: pointer to command details structure or NULL 2895 * 2896 * Read the register using the admin queue commands 2897 **/ 2898 int i40e_aq_debug_read_register(struct i40e_hw *hw, 2899 u32 reg_addr, u64 *reg_val, 2900 struct i40e_asq_cmd_details *cmd_details) 2901 { 2902 struct i40e_aq_desc desc; 2903 struct i40e_aqc_debug_reg_read_write *cmd_resp = 2904 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; 2905 int status; 2906 2907 if (reg_val == NULL) 2908 return -EINVAL; 2909 2910 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg); 2911 2912 cmd_resp->address = cpu_to_le32(reg_addr); 2913 2914 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2915 2916 if (!status) { 2917 *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) | 2918 (u64)le32_to_cpu(cmd_resp->value_low); 2919 } 2920 2921 return status; 2922 } 2923 2924 /** 2925 * i40e_aq_debug_write_register 2926 * @hw: pointer to the hw struct 2927 * @reg_addr: register address 2928 * @reg_val: register value 2929 * @cmd_details: pointer to command details structure or NULL 2930 * 2931 * Write to a register using the admin queue commands 2932 **/ 2933 int i40e_aq_debug_write_register(struct i40e_hw *hw, 2934 u32 reg_addr, u64 reg_val, 2935 struct i40e_asq_cmd_details *cmd_details) 2936 { 2937 struct i40e_aq_desc desc; 2938 struct i40e_aqc_debug_reg_read_write *cmd = 2939 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; 2940 int status; 2941 2942 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg); 2943 2944 cmd->address = cpu_to_le32(reg_addr); 2945 cmd->value_high = cpu_to_le32((u32)(reg_val >> 32)); 2946 cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF)); 2947 2948 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2949 2950 return status; 2951 } 2952 2953 /** 2954 * i40e_aq_request_resource 2955 * @hw: pointer to the hw struct 2956 * @resource: resource id 2957 * @access: access type 2958 * @sdp_number: resource number 2959 * @timeout: the maximum time in ms that the driver may hold the resource 2960 * @cmd_details: pointer to command details structure or NULL 2961 * 2962 * requests common resource using the admin queue commands 2963 **/ 2964 int i40e_aq_request_resource(struct i40e_hw *hw, 2965 enum i40e_aq_resources_ids resource, 2966 enum i40e_aq_resource_access_type access, 2967 u8 sdp_number, u64 *timeout, 2968 struct i40e_asq_cmd_details *cmd_details) 2969 { 2970 struct i40e_aq_desc desc; 2971 struct i40e_aqc_request_resource *cmd_resp = 2972 (struct i40e_aqc_request_resource *)&desc.params.raw; 2973 int status; 2974 2975 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource); 2976 2977 cmd_resp->resource_id = cpu_to_le16(resource); 2978 cmd_resp->access_type = cpu_to_le16(access); 2979 cmd_resp->resource_number = cpu_to_le32(sdp_number); 2980 2981 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2982 /* The completion specifies the maximum time in ms that the driver 2983 * may hold the resource in the Timeout field. 2984 * If the resource is held by someone else, the command completes with 2985 * busy return value and the timeout field indicates the maximum time 2986 * the current owner of the resource has to free it. 2987 */ 2988 if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) 2989 *timeout = le32_to_cpu(cmd_resp->timeout); 2990 2991 return status; 2992 } 2993 2994 /** 2995 * i40e_aq_release_resource 2996 * @hw: pointer to the hw struct 2997 * @resource: resource id 2998 * @sdp_number: resource number 2999 * @cmd_details: pointer to command details structure or NULL 3000 * 3001 * release common resource using the admin queue commands 3002 **/ 3003 int i40e_aq_release_resource(struct i40e_hw *hw, 3004 enum i40e_aq_resources_ids resource, 3005 u8 sdp_number, 3006 struct i40e_asq_cmd_details *cmd_details) 3007 { 3008 struct i40e_aq_desc desc; 3009 struct i40e_aqc_request_resource *cmd = 3010 (struct i40e_aqc_request_resource *)&desc.params.raw; 3011 int status; 3012 3013 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource); 3014 3015 cmd->resource_id = cpu_to_le16(resource); 3016 cmd->resource_number = cpu_to_le32(sdp_number); 3017 3018 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3019 3020 return status; 3021 } 3022 3023 /** 3024 * i40e_aq_read_nvm 3025 * @hw: pointer to the hw struct 3026 * @module_pointer: module pointer location in words from the NVM beginning 3027 * @offset: byte offset from the module beginning 3028 * @length: length of the section to be read (in bytes from the offset) 3029 * @data: command buffer (size [bytes] = length) 3030 * @last_command: tells if this is the last command in a series 3031 * @cmd_details: pointer to command details structure or NULL 3032 * 3033 * Read the NVM using the admin queue commands 3034 **/ 3035 int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, 3036 u32 offset, u16 length, void *data, 3037 bool last_command, 3038 struct i40e_asq_cmd_details *cmd_details) 3039 { 3040 struct i40e_aq_desc desc; 3041 struct i40e_aqc_nvm_update *cmd = 3042 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3043 int status; 3044 3045 /* In offset the highest byte must be zeroed. */ 3046 if (offset & 0xFF000000) { 3047 status = -EINVAL; 3048 goto i40e_aq_read_nvm_exit; 3049 } 3050 3051 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read); 3052 3053 /* If this is the last command in a series, set the proper flag. */ 3054 if (last_command) 3055 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3056 cmd->module_pointer = module_pointer; 3057 cmd->offset = cpu_to_le32(offset); 3058 cmd->length = cpu_to_le16(length); 3059 3060 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3061 if (length > I40E_AQ_LARGE_BUF) 3062 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3063 3064 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); 3065 3066 i40e_aq_read_nvm_exit: 3067 return status; 3068 } 3069 3070 /** 3071 * i40e_aq_erase_nvm 3072 * @hw: pointer to the hw struct 3073 * @module_pointer: module pointer location in words from the NVM beginning 3074 * @offset: offset in the module (expressed in 4 KB from module's beginning) 3075 * @length: length of the section to be erased (expressed in 4 KB) 3076 * @last_command: tells if this is the last command in a series 3077 * @cmd_details: pointer to command details structure or NULL 3078 * 3079 * Erase the NVM sector using the admin queue commands 3080 **/ 3081 int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, 3082 u32 offset, u16 length, bool last_command, 3083 struct i40e_asq_cmd_details *cmd_details) 3084 { 3085 struct i40e_aq_desc desc; 3086 struct i40e_aqc_nvm_update *cmd = 3087 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3088 int status; 3089 3090 /* In offset the highest byte must be zeroed. */ 3091 if (offset & 0xFF000000) { 3092 status = -EINVAL; 3093 goto i40e_aq_erase_nvm_exit; 3094 } 3095 3096 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase); 3097 3098 /* If this is the last command in a series, set the proper flag. */ 3099 if (last_command) 3100 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3101 cmd->module_pointer = module_pointer; 3102 cmd->offset = cpu_to_le32(offset); 3103 cmd->length = cpu_to_le16(length); 3104 3105 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3106 3107 i40e_aq_erase_nvm_exit: 3108 return status; 3109 } 3110 3111 /** 3112 * i40e_parse_discover_capabilities 3113 * @hw: pointer to the hw struct 3114 * @buff: pointer to a buffer containing device/function capability records 3115 * @cap_count: number of capability records in the list 3116 * @list_type_opc: type of capabilities list to parse 3117 * 3118 * Parse the device/function capabilities list. 3119 **/ 3120 static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, 3121 u32 cap_count, 3122 enum i40e_admin_queue_opc list_type_opc) 3123 { 3124 struct i40e_aqc_list_capabilities_element_resp *cap; 3125 u32 valid_functions, num_functions; 3126 u32 number, logical_id, phys_id; 3127 struct i40e_hw_capabilities *p; 3128 u16 id, ocp_cfg_word0; 3129 u8 major_rev; 3130 int status; 3131 u32 i = 0; 3132 3133 cap = (struct i40e_aqc_list_capabilities_element_resp *) buff; 3134 3135 if (list_type_opc == i40e_aqc_opc_list_dev_capabilities) 3136 p = &hw->dev_caps; 3137 else if (list_type_opc == i40e_aqc_opc_list_func_capabilities) 3138 p = &hw->func_caps; 3139 else 3140 return; 3141 3142 for (i = 0; i < cap_count; i++, cap++) { 3143 id = le16_to_cpu(cap->id); 3144 number = le32_to_cpu(cap->number); 3145 logical_id = le32_to_cpu(cap->logical_id); 3146 phys_id = le32_to_cpu(cap->phys_id); 3147 major_rev = cap->major_rev; 3148 3149 switch (id) { 3150 case I40E_AQ_CAP_ID_SWITCH_MODE: 3151 p->switch_mode = number; 3152 break; 3153 case I40E_AQ_CAP_ID_MNG_MODE: 3154 p->management_mode = number; 3155 if (major_rev > 1) { 3156 p->mng_protocols_over_mctp = logical_id; 3157 i40e_debug(hw, I40E_DEBUG_INIT, 3158 "HW Capability: Protocols over MCTP = %d\n", 3159 p->mng_protocols_over_mctp); 3160 } else { 3161 p->mng_protocols_over_mctp = 0; 3162 } 3163 break; 3164 case I40E_AQ_CAP_ID_NPAR_ACTIVE: 3165 p->npar_enable = number; 3166 break; 3167 case I40E_AQ_CAP_ID_OS2BMC_CAP: 3168 p->os2bmc = number; 3169 break; 3170 case I40E_AQ_CAP_ID_FUNCTIONS_VALID: 3171 p->valid_functions = number; 3172 break; 3173 case I40E_AQ_CAP_ID_SRIOV: 3174 if (number == 1) 3175 p->sr_iov_1_1 = true; 3176 break; 3177 case I40E_AQ_CAP_ID_VF: 3178 p->num_vfs = number; 3179 p->vf_base_id = logical_id; 3180 break; 3181 case I40E_AQ_CAP_ID_VMDQ: 3182 if (number == 1) 3183 p->vmdq = true; 3184 break; 3185 case I40E_AQ_CAP_ID_8021QBG: 3186 if (number == 1) 3187 p->evb_802_1_qbg = true; 3188 break; 3189 case I40E_AQ_CAP_ID_8021QBR: 3190 if (number == 1) 3191 p->evb_802_1_qbh = true; 3192 break; 3193 case I40E_AQ_CAP_ID_VSI: 3194 p->num_vsis = number; 3195 break; 3196 case I40E_AQ_CAP_ID_DCB: 3197 if (number == 1) { 3198 p->dcb = true; 3199 p->enabled_tcmap = logical_id; 3200 p->maxtc = phys_id; 3201 } 3202 break; 3203 case I40E_AQ_CAP_ID_FCOE: 3204 if (number == 1) 3205 p->fcoe = true; 3206 break; 3207 case I40E_AQ_CAP_ID_ISCSI: 3208 if (number == 1) 3209 p->iscsi = true; 3210 break; 3211 case I40E_AQ_CAP_ID_RSS: 3212 p->rss = true; 3213 p->rss_table_size = number; 3214 p->rss_table_entry_width = logical_id; 3215 break; 3216 case I40E_AQ_CAP_ID_RXQ: 3217 p->num_rx_qp = number; 3218 p->base_queue = phys_id; 3219 break; 3220 case I40E_AQ_CAP_ID_TXQ: 3221 p->num_tx_qp = number; 3222 p->base_queue = phys_id; 3223 break; 3224 case I40E_AQ_CAP_ID_MSIX: 3225 p->num_msix_vectors = number; 3226 i40e_debug(hw, I40E_DEBUG_INIT, 3227 "HW Capability: MSIX vector count = %d\n", 3228 p->num_msix_vectors); 3229 break; 3230 case I40E_AQ_CAP_ID_VF_MSIX: 3231 p->num_msix_vectors_vf = number; 3232 break; 3233 case I40E_AQ_CAP_ID_FLEX10: 3234 if (major_rev == 1) { 3235 if (number == 1) { 3236 p->flex10_enable = true; 3237 p->flex10_capable = true; 3238 } 3239 } else { 3240 /* Capability revision >= 2 */ 3241 if (number & 1) 3242 p->flex10_enable = true; 3243 if (number & 2) 3244 p->flex10_capable = true; 3245 } 3246 p->flex10_mode = logical_id; 3247 p->flex10_status = phys_id; 3248 break; 3249 case I40E_AQ_CAP_ID_CEM: 3250 if (number == 1) 3251 p->mgmt_cem = true; 3252 break; 3253 case I40E_AQ_CAP_ID_IWARP: 3254 if (number == 1) 3255 p->iwarp = true; 3256 break; 3257 case I40E_AQ_CAP_ID_LED: 3258 if (phys_id < I40E_HW_CAP_MAX_GPIO) 3259 p->led[phys_id] = true; 3260 break; 3261 case I40E_AQ_CAP_ID_SDP: 3262 if (phys_id < I40E_HW_CAP_MAX_GPIO) 3263 p->sdp[phys_id] = true; 3264 break; 3265 case I40E_AQ_CAP_ID_MDIO: 3266 if (number == 1) { 3267 p->mdio_port_num = phys_id; 3268 p->mdio_port_mode = logical_id; 3269 } 3270 break; 3271 case I40E_AQ_CAP_ID_1588: 3272 if (number == 1) 3273 p->ieee_1588 = true; 3274 break; 3275 case I40E_AQ_CAP_ID_FLOW_DIRECTOR: 3276 p->fd = true; 3277 p->fd_filters_guaranteed = number; 3278 p->fd_filters_best_effort = logical_id; 3279 break; 3280 case I40E_AQ_CAP_ID_WSR_PROT: 3281 p->wr_csr_prot = (u64)number; 3282 p->wr_csr_prot |= (u64)logical_id << 32; 3283 break; 3284 case I40E_AQ_CAP_ID_NVM_MGMT: 3285 if (number & I40E_NVM_MGMT_SEC_REV_DISABLED) 3286 p->sec_rev_disabled = true; 3287 if (number & I40E_NVM_MGMT_UPDATE_DISABLED) 3288 p->update_disabled = true; 3289 break; 3290 default: 3291 break; 3292 } 3293 } 3294 3295 if (p->fcoe) 3296 i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n"); 3297 3298 /* Software override ensuring FCoE is disabled if npar or mfp 3299 * mode because it is not supported in these modes. 3300 */ 3301 if (p->npar_enable || p->flex10_enable) 3302 p->fcoe = false; 3303 3304 /* count the enabled ports (aka the "not disabled" ports) */ 3305 hw->num_ports = 0; 3306 for (i = 0; i < 4; i++) { 3307 u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i); 3308 u64 port_cfg = 0; 3309 3310 /* use AQ read to get the physical register offset instead 3311 * of the port relative offset 3312 */ 3313 i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL); 3314 if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK)) 3315 hw->num_ports++; 3316 } 3317 3318 /* OCP cards case: if a mezz is removed the Ethernet port is at 3319 * disabled state in PRTGEN_CNF register. Additional NVM read is 3320 * needed in order to check if we are dealing with OCP card. 3321 * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting 3322 * physical ports results in wrong partition id calculation and thus 3323 * not supporting WoL. 3324 */ 3325 if (hw->mac.type == I40E_MAC_X722) { 3326 if (!i40e_acquire_nvm(hw, I40E_RESOURCE_READ)) { 3327 status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, 3328 2 * I40E_SR_OCP_CFG_WORD0, 3329 sizeof(ocp_cfg_word0), 3330 &ocp_cfg_word0, true, NULL); 3331 if (!status && 3332 (ocp_cfg_word0 & I40E_SR_OCP_ENABLED)) 3333 hw->num_ports = 4; 3334 i40e_release_nvm(hw); 3335 } 3336 } 3337 3338 valid_functions = p->valid_functions; 3339 num_functions = 0; 3340 while (valid_functions) { 3341 if (valid_functions & 1) 3342 num_functions++; 3343 valid_functions >>= 1; 3344 } 3345 3346 /* partition id is 1-based, and functions are evenly spread 3347 * across the ports as partitions 3348 */ 3349 if (hw->num_ports != 0) { 3350 hw->partition_id = (hw->pf_id / hw->num_ports) + 1; 3351 hw->num_partitions = num_functions / hw->num_ports; 3352 } 3353 3354 /* additional HW specific goodies that might 3355 * someday be HW version specific 3356 */ 3357 p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS; 3358 } 3359 3360 /** 3361 * i40e_aq_discover_capabilities 3362 * @hw: pointer to the hw struct 3363 * @buff: a virtual buffer to hold the capabilities 3364 * @buff_size: Size of the virtual buffer 3365 * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM 3366 * @list_type_opc: capabilities type to discover - pass in the command opcode 3367 * @cmd_details: pointer to command details structure or NULL 3368 * 3369 * Get the device capabilities descriptions from the firmware 3370 **/ 3371 int i40e_aq_discover_capabilities(struct i40e_hw *hw, 3372 void *buff, u16 buff_size, u16 *data_size, 3373 enum i40e_admin_queue_opc list_type_opc, 3374 struct i40e_asq_cmd_details *cmd_details) 3375 { 3376 struct i40e_aqc_list_capabilites *cmd; 3377 struct i40e_aq_desc desc; 3378 int status = 0; 3379 3380 cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw; 3381 3382 if (list_type_opc != i40e_aqc_opc_list_func_capabilities && 3383 list_type_opc != i40e_aqc_opc_list_dev_capabilities) { 3384 status = -EINVAL; 3385 goto exit; 3386 } 3387 3388 i40e_fill_default_direct_cmd_desc(&desc, list_type_opc); 3389 3390 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3391 if (buff_size > I40E_AQ_LARGE_BUF) 3392 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3393 3394 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3395 *data_size = le16_to_cpu(desc.datalen); 3396 3397 if (status) 3398 goto exit; 3399 3400 i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count), 3401 list_type_opc); 3402 3403 exit: 3404 return status; 3405 } 3406 3407 /** 3408 * i40e_aq_update_nvm 3409 * @hw: pointer to the hw struct 3410 * @module_pointer: module pointer location in words from the NVM beginning 3411 * @offset: byte offset from the module beginning 3412 * @length: length of the section to be written (in bytes from the offset) 3413 * @data: command buffer (size [bytes] = length) 3414 * @last_command: tells if this is the last command in a series 3415 * @preservation_flags: Preservation mode flags 3416 * @cmd_details: pointer to command details structure or NULL 3417 * 3418 * Update the NVM using the admin queue commands 3419 **/ 3420 int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, 3421 u32 offset, u16 length, void *data, 3422 bool last_command, u8 preservation_flags, 3423 struct i40e_asq_cmd_details *cmd_details) 3424 { 3425 struct i40e_aq_desc desc; 3426 struct i40e_aqc_nvm_update *cmd = 3427 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3428 int status; 3429 3430 /* In offset the highest byte must be zeroed. */ 3431 if (offset & 0xFF000000) { 3432 status = -EINVAL; 3433 goto i40e_aq_update_nvm_exit; 3434 } 3435 3436 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); 3437 3438 /* If this is the last command in a series, set the proper flag. */ 3439 if (last_command) 3440 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3441 if (hw->mac.type == I40E_MAC_X722) { 3442 if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED) 3443 cmd->command_flags |= 3444 (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED << 3445 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); 3446 else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL) 3447 cmd->command_flags |= 3448 (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL << 3449 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); 3450 } 3451 cmd->module_pointer = module_pointer; 3452 cmd->offset = cpu_to_le32(offset); 3453 cmd->length = cpu_to_le16(length); 3454 3455 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 3456 if (length > I40E_AQ_LARGE_BUF) 3457 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3458 3459 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); 3460 3461 i40e_aq_update_nvm_exit: 3462 return status; 3463 } 3464 3465 /** 3466 * i40e_aq_rearrange_nvm 3467 * @hw: pointer to the hw struct 3468 * @rearrange_nvm: defines direction of rearrangement 3469 * @cmd_details: pointer to command details structure or NULL 3470 * 3471 * Rearrange NVM structure, available only for transition FW 3472 **/ 3473 int i40e_aq_rearrange_nvm(struct i40e_hw *hw, 3474 u8 rearrange_nvm, 3475 struct i40e_asq_cmd_details *cmd_details) 3476 { 3477 struct i40e_aqc_nvm_update *cmd; 3478 struct i40e_aq_desc desc; 3479 int status; 3480 3481 cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; 3482 3483 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); 3484 3485 rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT | 3486 I40E_AQ_NVM_REARRANGE_TO_STRUCT); 3487 3488 if (!rearrange_nvm) { 3489 status = -EINVAL; 3490 goto i40e_aq_rearrange_nvm_exit; 3491 } 3492 3493 cmd->command_flags |= rearrange_nvm; 3494 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3495 3496 i40e_aq_rearrange_nvm_exit: 3497 return status; 3498 } 3499 3500 /** 3501 * i40e_aq_get_lldp_mib 3502 * @hw: pointer to the hw struct 3503 * @bridge_type: type of bridge requested 3504 * @mib_type: Local, Remote or both Local and Remote MIBs 3505 * @buff: pointer to a user supplied buffer to store the MIB block 3506 * @buff_size: size of the buffer (in bytes) 3507 * @local_len : length of the returned Local LLDP MIB 3508 * @remote_len: length of the returned Remote LLDP MIB 3509 * @cmd_details: pointer to command details structure or NULL 3510 * 3511 * Requests the complete LLDP MIB (entire packet). 3512 **/ 3513 int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, 3514 u8 mib_type, void *buff, u16 buff_size, 3515 u16 *local_len, u16 *remote_len, 3516 struct i40e_asq_cmd_details *cmd_details) 3517 { 3518 struct i40e_aq_desc desc; 3519 struct i40e_aqc_lldp_get_mib *cmd = 3520 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; 3521 struct i40e_aqc_lldp_get_mib *resp = 3522 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; 3523 int status; 3524 3525 if (buff_size == 0 || !buff) 3526 return -EINVAL; 3527 3528 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib); 3529 /* Indirect Command */ 3530 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3531 3532 cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK; 3533 cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & 3534 I40E_AQ_LLDP_BRIDGE_TYPE_MASK); 3535 3536 desc.datalen = cpu_to_le16(buff_size); 3537 3538 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3539 if (buff_size > I40E_AQ_LARGE_BUF) 3540 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3541 3542 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3543 if (!status) { 3544 if (local_len != NULL) 3545 *local_len = le16_to_cpu(resp->local_len); 3546 if (remote_len != NULL) 3547 *remote_len = le16_to_cpu(resp->remote_len); 3548 } 3549 3550 return status; 3551 } 3552 3553 /** 3554 * i40e_aq_set_lldp_mib - Set the LLDP MIB 3555 * @hw: pointer to the hw struct 3556 * @mib_type: Local, Remote or both Local and Remote MIBs 3557 * @buff: pointer to a user supplied buffer to store the MIB block 3558 * @buff_size: size of the buffer (in bytes) 3559 * @cmd_details: pointer to command details structure or NULL 3560 * 3561 * Set the LLDP MIB. 3562 **/ 3563 int 3564 i40e_aq_set_lldp_mib(struct i40e_hw *hw, 3565 u8 mib_type, void *buff, u16 buff_size, 3566 struct i40e_asq_cmd_details *cmd_details) 3567 { 3568 struct i40e_aqc_lldp_set_local_mib *cmd; 3569 struct i40e_aq_desc desc; 3570 int status; 3571 3572 cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw; 3573 if (buff_size == 0 || !buff) 3574 return -EINVAL; 3575 3576 i40e_fill_default_direct_cmd_desc(&desc, 3577 i40e_aqc_opc_lldp_set_local_mib); 3578 /* Indirect Command */ 3579 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 3580 if (buff_size > I40E_AQ_LARGE_BUF) 3581 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3582 desc.datalen = cpu_to_le16(buff_size); 3583 3584 cmd->type = mib_type; 3585 cmd->length = cpu_to_le16(buff_size); 3586 cmd->address_high = cpu_to_le32(upper_32_bits((uintptr_t)buff)); 3587 cmd->address_low = cpu_to_le32(lower_32_bits((uintptr_t)buff)); 3588 3589 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3590 return status; 3591 } 3592 3593 /** 3594 * i40e_aq_cfg_lldp_mib_change_event 3595 * @hw: pointer to the hw struct 3596 * @enable_update: Enable or Disable event posting 3597 * @cmd_details: pointer to command details structure or NULL 3598 * 3599 * Enable or Disable posting of an event on ARQ when LLDP MIB 3600 * associated with the interface changes 3601 **/ 3602 int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, 3603 bool enable_update, 3604 struct i40e_asq_cmd_details *cmd_details) 3605 { 3606 struct i40e_aq_desc desc; 3607 struct i40e_aqc_lldp_update_mib *cmd = 3608 (struct i40e_aqc_lldp_update_mib *)&desc.params.raw; 3609 int status; 3610 3611 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib); 3612 3613 if (!enable_update) 3614 cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE; 3615 3616 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3617 3618 return status; 3619 } 3620 3621 /** 3622 * i40e_aq_restore_lldp 3623 * @hw: pointer to the hw struct 3624 * @setting: pointer to factory setting variable or NULL 3625 * @restore: True if factory settings should be restored 3626 * @cmd_details: pointer to command details structure or NULL 3627 * 3628 * Restore LLDP Agent factory settings if @restore set to True. In other case 3629 * only returns factory setting in AQ response. 3630 **/ 3631 int 3632 i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore, 3633 struct i40e_asq_cmd_details *cmd_details) 3634 { 3635 struct i40e_aq_desc desc; 3636 struct i40e_aqc_lldp_restore *cmd = 3637 (struct i40e_aqc_lldp_restore *)&desc.params.raw; 3638 int status; 3639 3640 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) { 3641 i40e_debug(hw, I40E_DEBUG_ALL, 3642 "Restore LLDP not supported by current FW version.\n"); 3643 return -ENODEV; 3644 } 3645 3646 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore); 3647 3648 if (restore) 3649 cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE; 3650 3651 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3652 3653 if (setting) 3654 *setting = cmd->command & 1; 3655 3656 return status; 3657 } 3658 3659 /** 3660 * i40e_aq_stop_lldp 3661 * @hw: pointer to the hw struct 3662 * @shutdown_agent: True if LLDP Agent needs to be Shutdown 3663 * @persist: True if stop of LLDP should be persistent across power cycles 3664 * @cmd_details: pointer to command details structure or NULL 3665 * 3666 * Stop or Shutdown the embedded LLDP Agent 3667 **/ 3668 int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, 3669 bool persist, 3670 struct i40e_asq_cmd_details *cmd_details) 3671 { 3672 struct i40e_aq_desc desc; 3673 struct i40e_aqc_lldp_stop *cmd = 3674 (struct i40e_aqc_lldp_stop *)&desc.params.raw; 3675 int status; 3676 3677 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop); 3678 3679 if (shutdown_agent) 3680 cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN; 3681 3682 if (persist) { 3683 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) 3684 cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST; 3685 else 3686 i40e_debug(hw, I40E_DEBUG_ALL, 3687 "Persistent Stop LLDP not supported by current FW version.\n"); 3688 } 3689 3690 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3691 3692 return status; 3693 } 3694 3695 /** 3696 * i40e_aq_start_lldp 3697 * @hw: pointer to the hw struct 3698 * @persist: True if start of LLDP should be persistent across power cycles 3699 * @cmd_details: pointer to command details structure or NULL 3700 * 3701 * Start the embedded LLDP Agent on all ports. 3702 **/ 3703 int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist, 3704 struct i40e_asq_cmd_details *cmd_details) 3705 { 3706 struct i40e_aq_desc desc; 3707 struct i40e_aqc_lldp_start *cmd = 3708 (struct i40e_aqc_lldp_start *)&desc.params.raw; 3709 int status; 3710 3711 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start); 3712 3713 cmd->command = I40E_AQ_LLDP_AGENT_START; 3714 3715 if (persist) { 3716 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) 3717 cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST; 3718 else 3719 i40e_debug(hw, I40E_DEBUG_ALL, 3720 "Persistent Start LLDP not supported by current FW version.\n"); 3721 } 3722 3723 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3724 3725 return status; 3726 } 3727 3728 /** 3729 * i40e_aq_set_dcb_parameters 3730 * @hw: pointer to the hw struct 3731 * @cmd_details: pointer to command details structure or NULL 3732 * @dcb_enable: True if DCB configuration needs to be applied 3733 * 3734 **/ 3735 int 3736 i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable, 3737 struct i40e_asq_cmd_details *cmd_details) 3738 { 3739 struct i40e_aq_desc desc; 3740 struct i40e_aqc_set_dcb_parameters *cmd = 3741 (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw; 3742 int status; 3743 3744 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) 3745 return -ENODEV; 3746 3747 i40e_fill_default_direct_cmd_desc(&desc, 3748 i40e_aqc_opc_set_dcb_parameters); 3749 3750 if (dcb_enable) { 3751 cmd->valid_flags = I40E_DCB_VALID; 3752 cmd->command = I40E_AQ_DCB_SET_AGENT; 3753 } 3754 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3755 3756 return status; 3757 } 3758 3759 /** 3760 * i40e_aq_get_cee_dcb_config 3761 * @hw: pointer to the hw struct 3762 * @buff: response buffer that stores CEE operational configuration 3763 * @buff_size: size of the buffer passed 3764 * @cmd_details: pointer to command details structure or NULL 3765 * 3766 * Get CEE DCBX mode operational configuration from firmware 3767 **/ 3768 int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, 3769 void *buff, u16 buff_size, 3770 struct i40e_asq_cmd_details *cmd_details) 3771 { 3772 struct i40e_aq_desc desc; 3773 int status; 3774 3775 if (buff_size == 0 || !buff) 3776 return -EINVAL; 3777 3778 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg); 3779 3780 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3781 status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size, 3782 cmd_details); 3783 3784 return status; 3785 } 3786 3787 /** 3788 * i40e_aq_add_udp_tunnel 3789 * @hw: pointer to the hw struct 3790 * @udp_port: the UDP port to add in Host byte order 3791 * @protocol_index: protocol index type 3792 * @filter_index: pointer to filter index 3793 * @cmd_details: pointer to command details structure or NULL 3794 * 3795 * Note: Firmware expects the udp_port value to be in Little Endian format, 3796 * and this function will call cpu_to_le16 to convert from Host byte order to 3797 * Little Endian order. 3798 **/ 3799 int i40e_aq_add_udp_tunnel(struct i40e_hw *hw, 3800 u16 udp_port, u8 protocol_index, 3801 u8 *filter_index, 3802 struct i40e_asq_cmd_details *cmd_details) 3803 { 3804 struct i40e_aq_desc desc; 3805 struct i40e_aqc_add_udp_tunnel *cmd = 3806 (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw; 3807 struct i40e_aqc_del_udp_tunnel_completion *resp = 3808 (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw; 3809 int status; 3810 3811 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel); 3812 3813 cmd->udp_port = cpu_to_le16(udp_port); 3814 cmd->protocol_type = protocol_index; 3815 3816 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3817 3818 if (!status && filter_index) 3819 *filter_index = resp->index; 3820 3821 return status; 3822 } 3823 3824 /** 3825 * i40e_aq_del_udp_tunnel 3826 * @hw: pointer to the hw struct 3827 * @index: filter index 3828 * @cmd_details: pointer to command details structure or NULL 3829 **/ 3830 int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, 3831 struct i40e_asq_cmd_details *cmd_details) 3832 { 3833 struct i40e_aq_desc desc; 3834 struct i40e_aqc_remove_udp_tunnel *cmd = 3835 (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw; 3836 int status; 3837 3838 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel); 3839 3840 cmd->index = index; 3841 3842 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3843 3844 return status; 3845 } 3846 3847 /** 3848 * i40e_aq_delete_element - Delete switch element 3849 * @hw: pointer to the hw struct 3850 * @seid: the SEID to delete from the switch 3851 * @cmd_details: pointer to command details structure or NULL 3852 * 3853 * This deletes a switch element from the switch. 3854 **/ 3855 int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, 3856 struct i40e_asq_cmd_details *cmd_details) 3857 { 3858 struct i40e_aq_desc desc; 3859 struct i40e_aqc_switch_seid *cmd = 3860 (struct i40e_aqc_switch_seid *)&desc.params.raw; 3861 int status; 3862 3863 if (seid == 0) 3864 return -EINVAL; 3865 3866 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element); 3867 3868 cmd->seid = cpu_to_le16(seid); 3869 3870 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0, 3871 cmd_details, true); 3872 3873 return status; 3874 } 3875 3876 /** 3877 * i40e_aq_dcb_updated - DCB Updated Command 3878 * @hw: pointer to the hw struct 3879 * @cmd_details: pointer to command details structure or NULL 3880 * 3881 * EMP will return when the shared RPB settings have been 3882 * recomputed and modified. The retval field in the descriptor 3883 * will be set to 0 when RPB is modified. 3884 **/ 3885 int i40e_aq_dcb_updated(struct i40e_hw *hw, 3886 struct i40e_asq_cmd_details *cmd_details) 3887 { 3888 struct i40e_aq_desc desc; 3889 int status; 3890 3891 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated); 3892 3893 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3894 3895 return status; 3896 } 3897 3898 /** 3899 * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler 3900 * @hw: pointer to the hw struct 3901 * @seid: seid for the physical port/switching component/vsi 3902 * @buff: Indirect buffer to hold data parameters and response 3903 * @buff_size: Indirect buffer size 3904 * @opcode: Tx scheduler AQ command opcode 3905 * @cmd_details: pointer to command details structure or NULL 3906 * 3907 * Generic command handler for Tx scheduler AQ commands 3908 **/ 3909 static int i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid, 3910 void *buff, u16 buff_size, 3911 enum i40e_admin_queue_opc opcode, 3912 struct i40e_asq_cmd_details *cmd_details) 3913 { 3914 struct i40e_aq_desc desc; 3915 struct i40e_aqc_tx_sched_ind *cmd = 3916 (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; 3917 int status; 3918 bool cmd_param_flag = false; 3919 3920 switch (opcode) { 3921 case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit: 3922 case i40e_aqc_opc_configure_vsi_tc_bw: 3923 case i40e_aqc_opc_enable_switching_comp_ets: 3924 case i40e_aqc_opc_modify_switching_comp_ets: 3925 case i40e_aqc_opc_disable_switching_comp_ets: 3926 case i40e_aqc_opc_configure_switching_comp_ets_bw_limit: 3927 case i40e_aqc_opc_configure_switching_comp_bw_config: 3928 cmd_param_flag = true; 3929 break; 3930 case i40e_aqc_opc_query_vsi_bw_config: 3931 case i40e_aqc_opc_query_vsi_ets_sla_config: 3932 case i40e_aqc_opc_query_switching_comp_ets_config: 3933 case i40e_aqc_opc_query_port_ets_config: 3934 case i40e_aqc_opc_query_switching_comp_bw_config: 3935 cmd_param_flag = false; 3936 break; 3937 default: 3938 return -EINVAL; 3939 } 3940 3941 i40e_fill_default_direct_cmd_desc(&desc, opcode); 3942 3943 /* Indirect command */ 3944 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3945 if (cmd_param_flag) 3946 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 3947 if (buff_size > I40E_AQ_LARGE_BUF) 3948 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3949 3950 desc.datalen = cpu_to_le16(buff_size); 3951 3952 cmd->vsi_seid = cpu_to_le16(seid); 3953 3954 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3955 3956 return status; 3957 } 3958 3959 /** 3960 * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit 3961 * @hw: pointer to the hw struct 3962 * @seid: VSI seid 3963 * @credit: BW limit credits (0 = disabled) 3964 * @max_credit: Max BW limit credits 3965 * @cmd_details: pointer to command details structure or NULL 3966 **/ 3967 int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, 3968 u16 seid, u16 credit, u8 max_credit, 3969 struct i40e_asq_cmd_details *cmd_details) 3970 { 3971 struct i40e_aq_desc desc; 3972 struct i40e_aqc_configure_vsi_bw_limit *cmd = 3973 (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw; 3974 int status; 3975 3976 i40e_fill_default_direct_cmd_desc(&desc, 3977 i40e_aqc_opc_configure_vsi_bw_limit); 3978 3979 cmd->vsi_seid = cpu_to_le16(seid); 3980 cmd->credit = cpu_to_le16(credit); 3981 cmd->max_credit = max_credit; 3982 3983 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3984 3985 return status; 3986 } 3987 3988 /** 3989 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC 3990 * @hw: pointer to the hw struct 3991 * @seid: VSI seid 3992 * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits 3993 * @cmd_details: pointer to command details structure or NULL 3994 **/ 3995 int i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, 3996 u16 seid, 3997 struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, 3998 struct i40e_asq_cmd_details *cmd_details) 3999 { 4000 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4001 i40e_aqc_opc_configure_vsi_tc_bw, 4002 cmd_details); 4003 } 4004 4005 /** 4006 * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port 4007 * @hw: pointer to the hw struct 4008 * @seid: seid of the switching component connected to Physical Port 4009 * @ets_data: Buffer holding ETS parameters 4010 * @opcode: Tx scheduler AQ command opcode 4011 * @cmd_details: pointer to command details structure or NULL 4012 **/ 4013 int 4014 i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, 4015 u16 seid, 4016 struct i40e_aqc_configure_switching_comp_ets_data *ets_data, 4017 enum i40e_admin_queue_opc opcode, 4018 struct i40e_asq_cmd_details *cmd_details) 4019 { 4020 return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data, 4021 sizeof(*ets_data), opcode, cmd_details); 4022 } 4023 4024 /** 4025 * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC 4026 * @hw: pointer to the hw struct 4027 * @seid: seid of the switching component 4028 * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits 4029 * @cmd_details: pointer to command details structure or NULL 4030 **/ 4031 int 4032 i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, 4033 u16 seid, 4034 struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data, 4035 struct i40e_asq_cmd_details *cmd_details) 4036 { 4037 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4038 i40e_aqc_opc_configure_switching_comp_bw_config, 4039 cmd_details); 4040 } 4041 4042 /** 4043 * i40e_aq_query_vsi_bw_config - Query VSI BW configuration 4044 * @hw: pointer to the hw struct 4045 * @seid: seid of the VSI 4046 * @bw_data: Buffer to hold VSI BW configuration 4047 * @cmd_details: pointer to command details structure or NULL 4048 **/ 4049 int 4050 i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, 4051 u16 seid, 4052 struct i40e_aqc_query_vsi_bw_config_resp *bw_data, 4053 struct i40e_asq_cmd_details *cmd_details) 4054 { 4055 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4056 i40e_aqc_opc_query_vsi_bw_config, 4057 cmd_details); 4058 } 4059 4060 /** 4061 * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC 4062 * @hw: pointer to the hw struct 4063 * @seid: seid of the VSI 4064 * @bw_data: Buffer to hold VSI BW configuration per TC 4065 * @cmd_details: pointer to command details structure or NULL 4066 **/ 4067 int 4068 i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, 4069 u16 seid, 4070 struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, 4071 struct i40e_asq_cmd_details *cmd_details) 4072 { 4073 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4074 i40e_aqc_opc_query_vsi_ets_sla_config, 4075 cmd_details); 4076 } 4077 4078 /** 4079 * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC 4080 * @hw: pointer to the hw struct 4081 * @seid: seid of the switching component 4082 * @bw_data: Buffer to hold switching component's per TC BW config 4083 * @cmd_details: pointer to command details structure or NULL 4084 **/ 4085 int 4086 i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, 4087 u16 seid, 4088 struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, 4089 struct i40e_asq_cmd_details *cmd_details) 4090 { 4091 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4092 i40e_aqc_opc_query_switching_comp_ets_config, 4093 cmd_details); 4094 } 4095 4096 /** 4097 * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration 4098 * @hw: pointer to the hw struct 4099 * @seid: seid of the VSI or switching component connected to Physical Port 4100 * @bw_data: Buffer to hold current ETS configuration for the Physical Port 4101 * @cmd_details: pointer to command details structure or NULL 4102 **/ 4103 int 4104 i40e_aq_query_port_ets_config(struct i40e_hw *hw, 4105 u16 seid, 4106 struct i40e_aqc_query_port_ets_config_resp *bw_data, 4107 struct i40e_asq_cmd_details *cmd_details) 4108 { 4109 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4110 i40e_aqc_opc_query_port_ets_config, 4111 cmd_details); 4112 } 4113 4114 /** 4115 * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration 4116 * @hw: pointer to the hw struct 4117 * @seid: seid of the switching component 4118 * @bw_data: Buffer to hold switching component's BW configuration 4119 * @cmd_details: pointer to command details structure or NULL 4120 **/ 4121 int 4122 i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, 4123 u16 seid, 4124 struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, 4125 struct i40e_asq_cmd_details *cmd_details) 4126 { 4127 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4128 i40e_aqc_opc_query_switching_comp_bw_config, 4129 cmd_details); 4130 } 4131 4132 /** 4133 * i40e_validate_filter_settings 4134 * @hw: pointer to the hardware structure 4135 * @settings: Filter control settings 4136 * 4137 * Check and validate the filter control settings passed. 4138 * The function checks for the valid filter/context sizes being 4139 * passed for FCoE and PE. 4140 * 4141 * Returns 0 if the values passed are valid and within 4142 * range else returns an error. 4143 **/ 4144 static int 4145 i40e_validate_filter_settings(struct i40e_hw *hw, 4146 struct i40e_filter_control_settings *settings) 4147 { 4148 u32 fcoe_cntx_size, fcoe_filt_size; 4149 u32 fcoe_fmax; 4150 u32 val; 4151 4152 /* Validate FCoE settings passed */ 4153 switch (settings->fcoe_filt_num) { 4154 case I40E_HASH_FILTER_SIZE_1K: 4155 case I40E_HASH_FILTER_SIZE_2K: 4156 case I40E_HASH_FILTER_SIZE_4K: 4157 case I40E_HASH_FILTER_SIZE_8K: 4158 case I40E_HASH_FILTER_SIZE_16K: 4159 case I40E_HASH_FILTER_SIZE_32K: 4160 fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE; 4161 fcoe_filt_size <<= (u32)settings->fcoe_filt_num; 4162 break; 4163 default: 4164 return -EINVAL; 4165 } 4166 4167 switch (settings->fcoe_cntx_num) { 4168 case I40E_DMA_CNTX_SIZE_512: 4169 case I40E_DMA_CNTX_SIZE_1K: 4170 case I40E_DMA_CNTX_SIZE_2K: 4171 case I40E_DMA_CNTX_SIZE_4K: 4172 fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; 4173 fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num; 4174 break; 4175 default: 4176 return -EINVAL; 4177 } 4178 4179 /* Validate PE settings passed */ 4180 switch (settings->pe_filt_num) { 4181 case I40E_HASH_FILTER_SIZE_1K: 4182 case I40E_HASH_FILTER_SIZE_2K: 4183 case I40E_HASH_FILTER_SIZE_4K: 4184 case I40E_HASH_FILTER_SIZE_8K: 4185 case I40E_HASH_FILTER_SIZE_16K: 4186 case I40E_HASH_FILTER_SIZE_32K: 4187 case I40E_HASH_FILTER_SIZE_64K: 4188 case I40E_HASH_FILTER_SIZE_128K: 4189 case I40E_HASH_FILTER_SIZE_256K: 4190 case I40E_HASH_FILTER_SIZE_512K: 4191 case I40E_HASH_FILTER_SIZE_1M: 4192 break; 4193 default: 4194 return -EINVAL; 4195 } 4196 4197 switch (settings->pe_cntx_num) { 4198 case I40E_DMA_CNTX_SIZE_512: 4199 case I40E_DMA_CNTX_SIZE_1K: 4200 case I40E_DMA_CNTX_SIZE_2K: 4201 case I40E_DMA_CNTX_SIZE_4K: 4202 case I40E_DMA_CNTX_SIZE_8K: 4203 case I40E_DMA_CNTX_SIZE_16K: 4204 case I40E_DMA_CNTX_SIZE_32K: 4205 case I40E_DMA_CNTX_SIZE_64K: 4206 case I40E_DMA_CNTX_SIZE_128K: 4207 case I40E_DMA_CNTX_SIZE_256K: 4208 break; 4209 default: 4210 return -EINVAL; 4211 } 4212 4213 /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */ 4214 val = rd32(hw, I40E_GLHMC_FCOEFMAX); 4215 fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK) 4216 >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT; 4217 if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax) 4218 return -EINVAL; 4219 4220 return 0; 4221 } 4222 4223 /** 4224 * i40e_set_filter_control 4225 * @hw: pointer to the hardware structure 4226 * @settings: Filter control settings 4227 * 4228 * Set the Queue Filters for PE/FCoE and enable filters required 4229 * for a single PF. It is expected that these settings are programmed 4230 * at the driver initialization time. 4231 **/ 4232 int i40e_set_filter_control(struct i40e_hw *hw, 4233 struct i40e_filter_control_settings *settings) 4234 { 4235 u32 hash_lut_size = 0; 4236 int ret = 0; 4237 u32 val; 4238 4239 if (!settings) 4240 return -EINVAL; 4241 4242 /* Validate the input settings */ 4243 ret = i40e_validate_filter_settings(hw, settings); 4244 if (ret) 4245 return ret; 4246 4247 /* Read the PF Queue Filter control register */ 4248 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0); 4249 4250 /* Program required PE hash buckets for the PF */ 4251 val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK; 4252 val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) & 4253 I40E_PFQF_CTL_0_PEHSIZE_MASK; 4254 /* Program required PE contexts for the PF */ 4255 val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK; 4256 val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) & 4257 I40E_PFQF_CTL_0_PEDSIZE_MASK; 4258 4259 /* Program required FCoE hash buckets for the PF */ 4260 val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK; 4261 val |= ((u32)settings->fcoe_filt_num << 4262 I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) & 4263 I40E_PFQF_CTL_0_PFFCHSIZE_MASK; 4264 /* Program required FCoE DDP contexts for the PF */ 4265 val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK; 4266 val |= ((u32)settings->fcoe_cntx_num << 4267 I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) & 4268 I40E_PFQF_CTL_0_PFFCDSIZE_MASK; 4269 4270 /* Program Hash LUT size for the PF */ 4271 val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; 4272 if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512) 4273 hash_lut_size = 1; 4274 val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) & 4275 I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; 4276 4277 /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */ 4278 if (settings->enable_fdir) 4279 val |= I40E_PFQF_CTL_0_FD_ENA_MASK; 4280 if (settings->enable_ethtype) 4281 val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK; 4282 if (settings->enable_macvlan) 4283 val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK; 4284 4285 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val); 4286 4287 return 0; 4288 } 4289 4290 /** 4291 * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter 4292 * @hw: pointer to the hw struct 4293 * @mac_addr: MAC address to use in the filter 4294 * @ethtype: Ethertype to use in the filter 4295 * @flags: Flags that needs to be applied to the filter 4296 * @vsi_seid: seid of the control VSI 4297 * @queue: VSI queue number to send the packet to 4298 * @is_add: Add control packet filter if True else remove 4299 * @stats: Structure to hold information on control filter counts 4300 * @cmd_details: pointer to command details structure or NULL 4301 * 4302 * This command will Add or Remove control packet filter for a control VSI. 4303 * In return it will update the total number of perfect filter count in 4304 * the stats member. 4305 **/ 4306 int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, 4307 u8 *mac_addr, u16 ethtype, u16 flags, 4308 u16 vsi_seid, u16 queue, bool is_add, 4309 struct i40e_control_filter_stats *stats, 4310 struct i40e_asq_cmd_details *cmd_details) 4311 { 4312 struct i40e_aq_desc desc; 4313 struct i40e_aqc_add_remove_control_packet_filter *cmd = 4314 (struct i40e_aqc_add_remove_control_packet_filter *) 4315 &desc.params.raw; 4316 struct i40e_aqc_add_remove_control_packet_filter_completion *resp = 4317 (struct i40e_aqc_add_remove_control_packet_filter_completion *) 4318 &desc.params.raw; 4319 int status; 4320 4321 if (vsi_seid == 0) 4322 return -EINVAL; 4323 4324 if (is_add) { 4325 i40e_fill_default_direct_cmd_desc(&desc, 4326 i40e_aqc_opc_add_control_packet_filter); 4327 cmd->queue = cpu_to_le16(queue); 4328 } else { 4329 i40e_fill_default_direct_cmd_desc(&desc, 4330 i40e_aqc_opc_remove_control_packet_filter); 4331 } 4332 4333 if (mac_addr) 4334 ether_addr_copy(cmd->mac, mac_addr); 4335 4336 cmd->etype = cpu_to_le16(ethtype); 4337 cmd->flags = cpu_to_le16(flags); 4338 cmd->seid = cpu_to_le16(vsi_seid); 4339 4340 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4341 4342 if (!status && stats) { 4343 stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used); 4344 stats->etype_used = le16_to_cpu(resp->etype_used); 4345 stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free); 4346 stats->etype_free = le16_to_cpu(resp->etype_free); 4347 } 4348 4349 return status; 4350 } 4351 4352 /** 4353 * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control 4354 * @hw: pointer to the hw struct 4355 * @seid: VSI seid to add ethertype filter from 4356 **/ 4357 void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, 4358 u16 seid) 4359 { 4360 #define I40E_FLOW_CONTROL_ETHTYPE 0x8808 4361 u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC | 4362 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP | 4363 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX; 4364 u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE; 4365 int status; 4366 4367 status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag, 4368 seid, 0, true, NULL, 4369 NULL); 4370 if (status) 4371 hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n"); 4372 } 4373 4374 /** 4375 * i40e_aq_alternate_read 4376 * @hw: pointer to the hardware structure 4377 * @reg_addr0: address of first dword to be read 4378 * @reg_val0: pointer for data read from 'reg_addr0' 4379 * @reg_addr1: address of second dword to be read 4380 * @reg_val1: pointer for data read from 'reg_addr1' 4381 * 4382 * Read one or two dwords from alternate structure. Fields are indicated 4383 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer 4384 * is not passed then only register at 'reg_addr0' is read. 4385 * 4386 **/ 4387 static int i40e_aq_alternate_read(struct i40e_hw *hw, 4388 u32 reg_addr0, u32 *reg_val0, 4389 u32 reg_addr1, u32 *reg_val1) 4390 { 4391 struct i40e_aq_desc desc; 4392 struct i40e_aqc_alternate_write *cmd_resp = 4393 (struct i40e_aqc_alternate_write *)&desc.params.raw; 4394 int status; 4395 4396 if (!reg_val0) 4397 return -EINVAL; 4398 4399 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read); 4400 cmd_resp->address0 = cpu_to_le32(reg_addr0); 4401 cmd_resp->address1 = cpu_to_le32(reg_addr1); 4402 4403 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 4404 4405 if (!status) { 4406 *reg_val0 = le32_to_cpu(cmd_resp->data0); 4407 4408 if (reg_val1) 4409 *reg_val1 = le32_to_cpu(cmd_resp->data1); 4410 } 4411 4412 return status; 4413 } 4414 4415 /** 4416 * i40e_aq_suspend_port_tx 4417 * @hw: pointer to the hardware structure 4418 * @seid: port seid 4419 * @cmd_details: pointer to command details structure or NULL 4420 * 4421 * Suspend port's Tx traffic 4422 **/ 4423 int i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid, 4424 struct i40e_asq_cmd_details *cmd_details) 4425 { 4426 struct i40e_aqc_tx_sched_ind *cmd; 4427 struct i40e_aq_desc desc; 4428 int status; 4429 4430 cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; 4431 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_suspend_port_tx); 4432 cmd->vsi_seid = cpu_to_le16(seid); 4433 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4434 4435 return status; 4436 } 4437 4438 /** 4439 * i40e_aq_resume_port_tx 4440 * @hw: pointer to the hardware structure 4441 * @cmd_details: pointer to command details structure or NULL 4442 * 4443 * Resume port's Tx traffic 4444 **/ 4445 int i40e_aq_resume_port_tx(struct i40e_hw *hw, 4446 struct i40e_asq_cmd_details *cmd_details) 4447 { 4448 struct i40e_aq_desc desc; 4449 int status; 4450 4451 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx); 4452 4453 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4454 4455 return status; 4456 } 4457 4458 /** 4459 * i40e_set_pci_config_data - store PCI bus info 4460 * @hw: pointer to hardware structure 4461 * @link_status: the link status word from PCI config space 4462 * 4463 * Stores the PCI bus info (speed, width, type) within the i40e_hw structure 4464 **/ 4465 void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) 4466 { 4467 hw->bus.type = i40e_bus_type_pci_express; 4468 4469 switch (link_status & PCI_EXP_LNKSTA_NLW) { 4470 case PCI_EXP_LNKSTA_NLW_X1: 4471 hw->bus.width = i40e_bus_width_pcie_x1; 4472 break; 4473 case PCI_EXP_LNKSTA_NLW_X2: 4474 hw->bus.width = i40e_bus_width_pcie_x2; 4475 break; 4476 case PCI_EXP_LNKSTA_NLW_X4: 4477 hw->bus.width = i40e_bus_width_pcie_x4; 4478 break; 4479 case PCI_EXP_LNKSTA_NLW_X8: 4480 hw->bus.width = i40e_bus_width_pcie_x8; 4481 break; 4482 default: 4483 hw->bus.width = i40e_bus_width_unknown; 4484 break; 4485 } 4486 4487 switch (link_status & PCI_EXP_LNKSTA_CLS) { 4488 case PCI_EXP_LNKSTA_CLS_2_5GB: 4489 hw->bus.speed = i40e_bus_speed_2500; 4490 break; 4491 case PCI_EXP_LNKSTA_CLS_5_0GB: 4492 hw->bus.speed = i40e_bus_speed_5000; 4493 break; 4494 case PCI_EXP_LNKSTA_CLS_8_0GB: 4495 hw->bus.speed = i40e_bus_speed_8000; 4496 break; 4497 default: 4498 hw->bus.speed = i40e_bus_speed_unknown; 4499 break; 4500 } 4501 } 4502 4503 /** 4504 * i40e_aq_debug_dump 4505 * @hw: pointer to the hardware structure 4506 * @cluster_id: specific cluster to dump 4507 * @table_id: table id within cluster 4508 * @start_index: index of line in the block to read 4509 * @buff_size: dump buffer size 4510 * @buff: dump buffer 4511 * @ret_buff_size: actual buffer size returned 4512 * @ret_next_table: next block to read 4513 * @ret_next_index: next index to read 4514 * @cmd_details: pointer to command details structure or NULL 4515 * 4516 * Dump internal FW/HW data for debug purposes. 4517 * 4518 **/ 4519 int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, 4520 u8 table_id, u32 start_index, u16 buff_size, 4521 void *buff, u16 *ret_buff_size, 4522 u8 *ret_next_table, u32 *ret_next_index, 4523 struct i40e_asq_cmd_details *cmd_details) 4524 { 4525 struct i40e_aq_desc desc; 4526 struct i40e_aqc_debug_dump_internals *cmd = 4527 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; 4528 struct i40e_aqc_debug_dump_internals *resp = 4529 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; 4530 int status; 4531 4532 if (buff_size == 0 || !buff) 4533 return -EINVAL; 4534 4535 i40e_fill_default_direct_cmd_desc(&desc, 4536 i40e_aqc_opc_debug_dump_internals); 4537 /* Indirect Command */ 4538 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 4539 if (buff_size > I40E_AQ_LARGE_BUF) 4540 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 4541 4542 cmd->cluster_id = cluster_id; 4543 cmd->table_id = table_id; 4544 cmd->idx = cpu_to_le32(start_index); 4545 4546 desc.datalen = cpu_to_le16(buff_size); 4547 4548 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 4549 if (!status) { 4550 if (ret_buff_size) 4551 *ret_buff_size = le16_to_cpu(desc.datalen); 4552 if (ret_next_table) 4553 *ret_next_table = resp->table_id; 4554 if (ret_next_index) 4555 *ret_next_index = le32_to_cpu(resp->idx); 4556 } 4557 4558 return status; 4559 } 4560 4561 /** 4562 * i40e_read_bw_from_alt_ram 4563 * @hw: pointer to the hardware structure 4564 * @max_bw: pointer for max_bw read 4565 * @min_bw: pointer for min_bw read 4566 * @min_valid: pointer for bool that is true if min_bw is a valid value 4567 * @max_valid: pointer for bool that is true if max_bw is a valid value 4568 * 4569 * Read bw from the alternate ram for the given pf 4570 **/ 4571 int i40e_read_bw_from_alt_ram(struct i40e_hw *hw, 4572 u32 *max_bw, u32 *min_bw, 4573 bool *min_valid, bool *max_valid) 4574 { 4575 u32 max_bw_addr, min_bw_addr; 4576 int status; 4577 4578 /* Calculate the address of the min/max bw registers */ 4579 max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + 4580 I40E_ALT_STRUCT_MAX_BW_OFFSET + 4581 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); 4582 min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + 4583 I40E_ALT_STRUCT_MIN_BW_OFFSET + 4584 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); 4585 4586 /* Read the bandwidths from alt ram */ 4587 status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw, 4588 min_bw_addr, min_bw); 4589 4590 if (*min_bw & I40E_ALT_BW_VALID_MASK) 4591 *min_valid = true; 4592 else 4593 *min_valid = false; 4594 4595 if (*max_bw & I40E_ALT_BW_VALID_MASK) 4596 *max_valid = true; 4597 else 4598 *max_valid = false; 4599 4600 return status; 4601 } 4602 4603 /** 4604 * i40e_aq_configure_partition_bw 4605 * @hw: pointer to the hardware structure 4606 * @bw_data: Buffer holding valid pfs and bw limits 4607 * @cmd_details: pointer to command details 4608 * 4609 * Configure partitions guaranteed/max bw 4610 **/ 4611 int 4612 i40e_aq_configure_partition_bw(struct i40e_hw *hw, 4613 struct i40e_aqc_configure_partition_bw_data *bw_data, 4614 struct i40e_asq_cmd_details *cmd_details) 4615 { 4616 u16 bwd_size = sizeof(*bw_data); 4617 struct i40e_aq_desc desc; 4618 int status; 4619 4620 i40e_fill_default_direct_cmd_desc(&desc, 4621 i40e_aqc_opc_configure_partition_bw); 4622 4623 /* Indirect command */ 4624 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 4625 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 4626 4627 if (bwd_size > I40E_AQ_LARGE_BUF) 4628 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 4629 4630 desc.datalen = cpu_to_le16(bwd_size); 4631 4632 status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, 4633 cmd_details); 4634 4635 return status; 4636 } 4637 4638 /** 4639 * i40e_read_phy_register_clause22 4640 * @hw: pointer to the HW structure 4641 * @reg: register address in the page 4642 * @phy_addr: PHY address on MDIO interface 4643 * @value: PHY register value 4644 * 4645 * Reads specified PHY register value 4646 **/ 4647 int i40e_read_phy_register_clause22(struct i40e_hw *hw, 4648 u16 reg, u8 phy_addr, u16 *value) 4649 { 4650 u8 port_num = (u8)hw->func_caps.mdio_port_num; 4651 int status = -EIO; 4652 u32 command = 0; 4653 u16 retry = 1000; 4654 4655 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4656 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4657 (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) | 4658 (I40E_MDIO_CLAUSE22_STCODE_MASK) | 4659 (I40E_GLGEN_MSCA_MDICMD_MASK); 4660 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4661 do { 4662 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4663 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4664 status = 0; 4665 break; 4666 } 4667 udelay(10); 4668 retry--; 4669 } while (retry); 4670 4671 if (status) { 4672 i40e_debug(hw, I40E_DEBUG_PHY, 4673 "PHY: Can't write command to external PHY.\n"); 4674 } else { 4675 command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); 4676 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> 4677 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; 4678 } 4679 4680 return status; 4681 } 4682 4683 /** 4684 * i40e_write_phy_register_clause22 4685 * @hw: pointer to the HW structure 4686 * @reg: register address in the page 4687 * @phy_addr: PHY address on MDIO interface 4688 * @value: PHY register value 4689 * 4690 * Writes specified PHY register value 4691 **/ 4692 int i40e_write_phy_register_clause22(struct i40e_hw *hw, 4693 u16 reg, u8 phy_addr, u16 value) 4694 { 4695 u8 port_num = (u8)hw->func_caps.mdio_port_num; 4696 int status = -EIO; 4697 u32 command = 0; 4698 u16 retry = 1000; 4699 4700 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; 4701 wr32(hw, I40E_GLGEN_MSRWD(port_num), command); 4702 4703 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4704 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4705 (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) | 4706 (I40E_MDIO_CLAUSE22_STCODE_MASK) | 4707 (I40E_GLGEN_MSCA_MDICMD_MASK); 4708 4709 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4710 do { 4711 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4712 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4713 status = 0; 4714 break; 4715 } 4716 udelay(10); 4717 retry--; 4718 } while (retry); 4719 4720 return status; 4721 } 4722 4723 /** 4724 * i40e_read_phy_register_clause45 4725 * @hw: pointer to the HW structure 4726 * @page: registers page number 4727 * @reg: register address in the page 4728 * @phy_addr: PHY address on MDIO interface 4729 * @value: PHY register value 4730 * 4731 * Reads specified PHY register value 4732 **/ 4733 int i40e_read_phy_register_clause45(struct i40e_hw *hw, 4734 u8 page, u16 reg, u8 phy_addr, u16 *value) 4735 { 4736 u8 port_num = hw->func_caps.mdio_port_num; 4737 int status = -EIO; 4738 u32 command = 0; 4739 u16 retry = 1000; 4740 4741 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | 4742 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4743 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4744 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | 4745 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4746 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4747 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4748 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4749 do { 4750 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4751 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4752 status = 0; 4753 break; 4754 } 4755 usleep_range(10, 20); 4756 retry--; 4757 } while (retry); 4758 4759 if (status) { 4760 i40e_debug(hw, I40E_DEBUG_PHY, 4761 "PHY: Can't write command to external PHY.\n"); 4762 goto phy_read_end; 4763 } 4764 4765 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4766 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4767 (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) | 4768 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4769 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4770 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4771 status = -EIO; 4772 retry = 1000; 4773 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4774 do { 4775 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4776 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4777 status = 0; 4778 break; 4779 } 4780 usleep_range(10, 20); 4781 retry--; 4782 } while (retry); 4783 4784 if (!status) { 4785 command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); 4786 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> 4787 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; 4788 } else { 4789 i40e_debug(hw, I40E_DEBUG_PHY, 4790 "PHY: Can't read register value from external PHY.\n"); 4791 } 4792 4793 phy_read_end: 4794 return status; 4795 } 4796 4797 /** 4798 * i40e_write_phy_register_clause45 4799 * @hw: pointer to the HW structure 4800 * @page: registers page number 4801 * @reg: register address in the page 4802 * @phy_addr: PHY address on MDIO interface 4803 * @value: PHY register value 4804 * 4805 * Writes value to specified PHY register 4806 **/ 4807 int i40e_write_phy_register_clause45(struct i40e_hw *hw, 4808 u8 page, u16 reg, u8 phy_addr, u16 value) 4809 { 4810 u8 port_num = hw->func_caps.mdio_port_num; 4811 int status = -EIO; 4812 u16 retry = 1000; 4813 u32 command = 0; 4814 4815 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | 4816 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4817 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4818 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | 4819 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4820 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4821 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4822 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4823 do { 4824 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4825 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4826 status = 0; 4827 break; 4828 } 4829 usleep_range(10, 20); 4830 retry--; 4831 } while (retry); 4832 if (status) { 4833 i40e_debug(hw, I40E_DEBUG_PHY, 4834 "PHY: Can't write command to external PHY.\n"); 4835 goto phy_write_end; 4836 } 4837 4838 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; 4839 wr32(hw, I40E_GLGEN_MSRWD(port_num), command); 4840 4841 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4842 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4843 (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) | 4844 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4845 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4846 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4847 status = -EIO; 4848 retry = 1000; 4849 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4850 do { 4851 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4852 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4853 status = 0; 4854 break; 4855 } 4856 usleep_range(10, 20); 4857 retry--; 4858 } while (retry); 4859 4860 phy_write_end: 4861 return status; 4862 } 4863 4864 /** 4865 * i40e_write_phy_register 4866 * @hw: pointer to the HW structure 4867 * @page: registers page number 4868 * @reg: register address in the page 4869 * @phy_addr: PHY address on MDIO interface 4870 * @value: PHY register value 4871 * 4872 * Writes value to specified PHY register 4873 **/ 4874 int i40e_write_phy_register(struct i40e_hw *hw, 4875 u8 page, u16 reg, u8 phy_addr, u16 value) 4876 { 4877 int status; 4878 4879 switch (hw->device_id) { 4880 case I40E_DEV_ID_1G_BASE_T_X722: 4881 status = i40e_write_phy_register_clause22(hw, reg, phy_addr, 4882 value); 4883 break; 4884 case I40E_DEV_ID_1G_BASE_T_BC: 4885 case I40E_DEV_ID_5G_BASE_T_BC: 4886 case I40E_DEV_ID_10G_BASE_T: 4887 case I40E_DEV_ID_10G_BASE_T4: 4888 case I40E_DEV_ID_10G_BASE_T_BC: 4889 case I40E_DEV_ID_10G_BASE_T_X722: 4890 case I40E_DEV_ID_25G_B: 4891 case I40E_DEV_ID_25G_SFP28: 4892 status = i40e_write_phy_register_clause45(hw, page, reg, 4893 phy_addr, value); 4894 break; 4895 default: 4896 status = -EIO; 4897 break; 4898 } 4899 4900 return status; 4901 } 4902 4903 /** 4904 * i40e_read_phy_register 4905 * @hw: pointer to the HW structure 4906 * @page: registers page number 4907 * @reg: register address in the page 4908 * @phy_addr: PHY address on MDIO interface 4909 * @value: PHY register value 4910 * 4911 * Reads specified PHY register value 4912 **/ 4913 int i40e_read_phy_register(struct i40e_hw *hw, 4914 u8 page, u16 reg, u8 phy_addr, u16 *value) 4915 { 4916 int status; 4917 4918 switch (hw->device_id) { 4919 case I40E_DEV_ID_1G_BASE_T_X722: 4920 status = i40e_read_phy_register_clause22(hw, reg, phy_addr, 4921 value); 4922 break; 4923 case I40E_DEV_ID_1G_BASE_T_BC: 4924 case I40E_DEV_ID_5G_BASE_T_BC: 4925 case I40E_DEV_ID_10G_BASE_T: 4926 case I40E_DEV_ID_10G_BASE_T4: 4927 case I40E_DEV_ID_10G_BASE_T_BC: 4928 case I40E_DEV_ID_10G_BASE_T_X722: 4929 case I40E_DEV_ID_25G_B: 4930 case I40E_DEV_ID_25G_SFP28: 4931 status = i40e_read_phy_register_clause45(hw, page, reg, 4932 phy_addr, value); 4933 break; 4934 default: 4935 status = -EIO; 4936 break; 4937 } 4938 4939 return status; 4940 } 4941 4942 /** 4943 * i40e_get_phy_address 4944 * @hw: pointer to the HW structure 4945 * @dev_num: PHY port num that address we want 4946 * 4947 * Gets PHY address for current port 4948 **/ 4949 u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num) 4950 { 4951 u8 port_num = hw->func_caps.mdio_port_num; 4952 u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num)); 4953 4954 return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f; 4955 } 4956 4957 /** 4958 * i40e_blink_phy_link_led 4959 * @hw: pointer to the HW structure 4960 * @time: time how long led will blinks in secs 4961 * @interval: gap between LED on and off in msecs 4962 * 4963 * Blinks PHY link LED 4964 **/ 4965 int i40e_blink_phy_link_led(struct i40e_hw *hw, 4966 u32 time, u32 interval) 4967 { 4968 u16 led_addr = I40E_PHY_LED_PROV_REG_1; 4969 u16 gpio_led_port; 4970 u8 phy_addr = 0; 4971 int status = 0; 4972 u16 led_ctl; 4973 u8 port_num; 4974 u16 led_reg; 4975 u32 i; 4976 4977 i = rd32(hw, I40E_PFGEN_PORTNUM); 4978 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 4979 phy_addr = i40e_get_phy_address(hw, port_num); 4980 4981 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, 4982 led_addr++) { 4983 status = i40e_read_phy_register_clause45(hw, 4984 I40E_PHY_COM_REG_PAGE, 4985 led_addr, phy_addr, 4986 &led_reg); 4987 if (status) 4988 goto phy_blinking_end; 4989 led_ctl = led_reg; 4990 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { 4991 led_reg = 0; 4992 status = i40e_write_phy_register_clause45(hw, 4993 I40E_PHY_COM_REG_PAGE, 4994 led_addr, phy_addr, 4995 led_reg); 4996 if (status) 4997 goto phy_blinking_end; 4998 break; 4999 } 5000 } 5001 5002 if (time > 0 && interval > 0) { 5003 for (i = 0; i < time * 1000; i += interval) { 5004 status = i40e_read_phy_register_clause45(hw, 5005 I40E_PHY_COM_REG_PAGE, 5006 led_addr, phy_addr, &led_reg); 5007 if (status) 5008 goto restore_config; 5009 if (led_reg & I40E_PHY_LED_MANUAL_ON) 5010 led_reg = 0; 5011 else 5012 led_reg = I40E_PHY_LED_MANUAL_ON; 5013 status = i40e_write_phy_register_clause45(hw, 5014 I40E_PHY_COM_REG_PAGE, 5015 led_addr, phy_addr, led_reg); 5016 if (status) 5017 goto restore_config; 5018 msleep(interval); 5019 } 5020 } 5021 5022 restore_config: 5023 status = i40e_write_phy_register_clause45(hw, 5024 I40E_PHY_COM_REG_PAGE, 5025 led_addr, phy_addr, led_ctl); 5026 5027 phy_blinking_end: 5028 return status; 5029 } 5030 5031 /** 5032 * i40e_led_get_reg - read LED register 5033 * @hw: pointer to the HW structure 5034 * @led_addr: LED register address 5035 * @reg_val: read register value 5036 **/ 5037 static int i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr, 5038 u32 *reg_val) 5039 { 5040 u8 phy_addr = 0; 5041 u8 port_num; 5042 int status; 5043 u32 i; 5044 5045 *reg_val = 0; 5046 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5047 status = 5048 i40e_aq_get_phy_register(hw, 5049 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5050 I40E_PHY_COM_REG_PAGE, true, 5051 I40E_PHY_LED_PROV_REG_1, 5052 reg_val, NULL); 5053 } else { 5054 i = rd32(hw, I40E_PFGEN_PORTNUM); 5055 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5056 phy_addr = i40e_get_phy_address(hw, port_num); 5057 status = i40e_read_phy_register_clause45(hw, 5058 I40E_PHY_COM_REG_PAGE, 5059 led_addr, phy_addr, 5060 (u16 *)reg_val); 5061 } 5062 return status; 5063 } 5064 5065 /** 5066 * i40e_led_set_reg - write LED register 5067 * @hw: pointer to the HW structure 5068 * @led_addr: LED register address 5069 * @reg_val: register value to write 5070 **/ 5071 static int i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr, 5072 u32 reg_val) 5073 { 5074 u8 phy_addr = 0; 5075 u8 port_num; 5076 int status; 5077 u32 i; 5078 5079 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5080 status = 5081 i40e_aq_set_phy_register(hw, 5082 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5083 I40E_PHY_COM_REG_PAGE, true, 5084 I40E_PHY_LED_PROV_REG_1, 5085 reg_val, NULL); 5086 } else { 5087 i = rd32(hw, I40E_PFGEN_PORTNUM); 5088 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5089 phy_addr = i40e_get_phy_address(hw, port_num); 5090 status = i40e_write_phy_register_clause45(hw, 5091 I40E_PHY_COM_REG_PAGE, 5092 led_addr, phy_addr, 5093 (u16)reg_val); 5094 } 5095 5096 return status; 5097 } 5098 5099 /** 5100 * i40e_led_get_phy - return current on/off mode 5101 * @hw: pointer to the hw struct 5102 * @led_addr: address of led register to use 5103 * @val: original value of register to use 5104 * 5105 **/ 5106 int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, 5107 u16 *val) 5108 { 5109 u16 gpio_led_port; 5110 u8 phy_addr = 0; 5111 u32 reg_val_aq; 5112 int status = 0; 5113 u16 temp_addr; 5114 u16 reg_val; 5115 u8 port_num; 5116 u32 i; 5117 5118 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5119 status = 5120 i40e_aq_get_phy_register(hw, 5121 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5122 I40E_PHY_COM_REG_PAGE, true, 5123 I40E_PHY_LED_PROV_REG_1, 5124 ®_val_aq, NULL); 5125 if (status == 0) 5126 *val = (u16)reg_val_aq; 5127 return status; 5128 } 5129 temp_addr = I40E_PHY_LED_PROV_REG_1; 5130 i = rd32(hw, I40E_PFGEN_PORTNUM); 5131 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5132 phy_addr = i40e_get_phy_address(hw, port_num); 5133 5134 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, 5135 temp_addr++) { 5136 status = i40e_read_phy_register_clause45(hw, 5137 I40E_PHY_COM_REG_PAGE, 5138 temp_addr, phy_addr, 5139 ®_val); 5140 if (status) 5141 return status; 5142 *val = reg_val; 5143 if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) { 5144 *led_addr = temp_addr; 5145 break; 5146 } 5147 } 5148 return status; 5149 } 5150 5151 /** 5152 * i40e_led_set_phy 5153 * @hw: pointer to the HW structure 5154 * @on: true or false 5155 * @led_addr: address of led register to use 5156 * @mode: original val plus bit for set or ignore 5157 * 5158 * Set led's on or off when controlled by the PHY 5159 * 5160 **/ 5161 int i40e_led_set_phy(struct i40e_hw *hw, bool on, 5162 u16 led_addr, u32 mode) 5163 { 5164 u32 led_ctl = 0; 5165 u32 led_reg = 0; 5166 int status = 0; 5167 5168 status = i40e_led_get_reg(hw, led_addr, &led_reg); 5169 if (status) 5170 return status; 5171 led_ctl = led_reg; 5172 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { 5173 led_reg = 0; 5174 status = i40e_led_set_reg(hw, led_addr, led_reg); 5175 if (status) 5176 return status; 5177 } 5178 status = i40e_led_get_reg(hw, led_addr, &led_reg); 5179 if (status) 5180 goto restore_config; 5181 if (on) 5182 led_reg = I40E_PHY_LED_MANUAL_ON; 5183 else 5184 led_reg = 0; 5185 5186 status = i40e_led_set_reg(hw, led_addr, led_reg); 5187 if (status) 5188 goto restore_config; 5189 if (mode & I40E_PHY_LED_MODE_ORIG) { 5190 led_ctl = (mode & I40E_PHY_LED_MODE_MASK); 5191 status = i40e_led_set_reg(hw, led_addr, led_ctl); 5192 } 5193 return status; 5194 5195 restore_config: 5196 status = i40e_led_set_reg(hw, led_addr, led_ctl); 5197 return status; 5198 } 5199 5200 /** 5201 * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register 5202 * @hw: pointer to the hw struct 5203 * @reg_addr: register address 5204 * @reg_val: ptr to register value 5205 * @cmd_details: pointer to command details structure or NULL 5206 * 5207 * Use the firmware to read the Rx control register, 5208 * especially useful if the Rx unit is under heavy pressure 5209 **/ 5210 int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, 5211 u32 reg_addr, u32 *reg_val, 5212 struct i40e_asq_cmd_details *cmd_details) 5213 { 5214 struct i40e_aq_desc desc; 5215 struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp = 5216 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; 5217 int status; 5218 5219 if (!reg_val) 5220 return -EINVAL; 5221 5222 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read); 5223 5224 cmd_resp->address = cpu_to_le32(reg_addr); 5225 5226 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5227 5228 if (status == 0) 5229 *reg_val = le32_to_cpu(cmd_resp->value); 5230 5231 return status; 5232 } 5233 5234 /** 5235 * i40e_read_rx_ctl - read from an Rx control register 5236 * @hw: pointer to the hw struct 5237 * @reg_addr: register address 5238 **/ 5239 u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) 5240 { 5241 bool use_register; 5242 int status = 0; 5243 int retry = 5; 5244 u32 val = 0; 5245 5246 use_register = (((hw->aq.api_maj_ver == 1) && 5247 (hw->aq.api_min_ver < 5)) || 5248 (hw->mac.type == I40E_MAC_X722)); 5249 if (!use_register) { 5250 do_retry: 5251 status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL); 5252 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { 5253 usleep_range(1000, 2000); 5254 retry--; 5255 goto do_retry; 5256 } 5257 } 5258 5259 /* if the AQ access failed, try the old-fashioned way */ 5260 if (status || use_register) 5261 val = rd32(hw, reg_addr); 5262 5263 return val; 5264 } 5265 5266 /** 5267 * i40e_aq_rx_ctl_write_register 5268 * @hw: pointer to the hw struct 5269 * @reg_addr: register address 5270 * @reg_val: register value 5271 * @cmd_details: pointer to command details structure or NULL 5272 * 5273 * Use the firmware to write to an Rx control register, 5274 * especially useful if the Rx unit is under heavy pressure 5275 **/ 5276 int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, 5277 u32 reg_addr, u32 reg_val, 5278 struct i40e_asq_cmd_details *cmd_details) 5279 { 5280 struct i40e_aq_desc desc; 5281 struct i40e_aqc_rx_ctl_reg_read_write *cmd = 5282 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; 5283 int status; 5284 5285 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write); 5286 5287 cmd->address = cpu_to_le32(reg_addr); 5288 cmd->value = cpu_to_le32(reg_val); 5289 5290 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5291 5292 return status; 5293 } 5294 5295 /** 5296 * i40e_write_rx_ctl - write to an Rx control register 5297 * @hw: pointer to the hw struct 5298 * @reg_addr: register address 5299 * @reg_val: register value 5300 **/ 5301 void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) 5302 { 5303 bool use_register; 5304 int status = 0; 5305 int retry = 5; 5306 5307 use_register = (((hw->aq.api_maj_ver == 1) && 5308 (hw->aq.api_min_ver < 5)) || 5309 (hw->mac.type == I40E_MAC_X722)); 5310 if (!use_register) { 5311 do_retry: 5312 status = i40e_aq_rx_ctl_write_register(hw, reg_addr, 5313 reg_val, NULL); 5314 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { 5315 usleep_range(1000, 2000); 5316 retry--; 5317 goto do_retry; 5318 } 5319 } 5320 5321 /* if the AQ access failed, try the old-fashioned way */ 5322 if (status || use_register) 5323 wr32(hw, reg_addr, reg_val); 5324 } 5325 5326 /** 5327 * i40e_mdio_if_number_selection - MDIO I/F number selection 5328 * @hw: pointer to the hw struct 5329 * @set_mdio: use MDIO I/F number specified by mdio_num 5330 * @mdio_num: MDIO I/F number 5331 * @cmd: pointer to PHY Register command structure 5332 **/ 5333 static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio, 5334 u8 mdio_num, 5335 struct i40e_aqc_phy_register_access *cmd) 5336 { 5337 if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) { 5338 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED) 5339 cmd->cmd_flags |= 5340 I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER | 5341 ((mdio_num << 5342 I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) & 5343 I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK); 5344 else 5345 i40e_debug(hw, I40E_DEBUG_PHY, 5346 "MDIO I/F number selection not supported by current FW version.\n"); 5347 } 5348 } 5349 5350 /** 5351 * i40e_aq_set_phy_register_ext 5352 * @hw: pointer to the hw struct 5353 * @phy_select: select which phy should be accessed 5354 * @dev_addr: PHY device address 5355 * @page_change: flag to indicate if phy page should be updated 5356 * @set_mdio: use MDIO I/F number specified by mdio_num 5357 * @mdio_num: MDIO I/F number 5358 * @reg_addr: PHY register address 5359 * @reg_val: new register value 5360 * @cmd_details: pointer to command details structure or NULL 5361 * 5362 * Write the external PHY register. 5363 * NOTE: In common cases MDIO I/F number should not be changed, thats why you 5364 * may use simple wrapper i40e_aq_set_phy_register. 5365 **/ 5366 int i40e_aq_set_phy_register_ext(struct i40e_hw *hw, 5367 u8 phy_select, u8 dev_addr, bool page_change, 5368 bool set_mdio, u8 mdio_num, 5369 u32 reg_addr, u32 reg_val, 5370 struct i40e_asq_cmd_details *cmd_details) 5371 { 5372 struct i40e_aq_desc desc; 5373 struct i40e_aqc_phy_register_access *cmd = 5374 (struct i40e_aqc_phy_register_access *)&desc.params.raw; 5375 int status; 5376 5377 i40e_fill_default_direct_cmd_desc(&desc, 5378 i40e_aqc_opc_set_phy_register); 5379 5380 cmd->phy_interface = phy_select; 5381 cmd->dev_address = dev_addr; 5382 cmd->reg_address = cpu_to_le32(reg_addr); 5383 cmd->reg_value = cpu_to_le32(reg_val); 5384 5385 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd); 5386 5387 if (!page_change) 5388 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE; 5389 5390 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5391 5392 return status; 5393 } 5394 5395 /** 5396 * i40e_aq_get_phy_register_ext 5397 * @hw: pointer to the hw struct 5398 * @phy_select: select which phy should be accessed 5399 * @dev_addr: PHY device address 5400 * @page_change: flag to indicate if phy page should be updated 5401 * @set_mdio: use MDIO I/F number specified by mdio_num 5402 * @mdio_num: MDIO I/F number 5403 * @reg_addr: PHY register address 5404 * @reg_val: read register value 5405 * @cmd_details: pointer to command details structure or NULL 5406 * 5407 * Read the external PHY register. 5408 * NOTE: In common cases MDIO I/F number should not be changed, thats why you 5409 * may use simple wrapper i40e_aq_get_phy_register. 5410 **/ 5411 int i40e_aq_get_phy_register_ext(struct i40e_hw *hw, 5412 u8 phy_select, u8 dev_addr, bool page_change, 5413 bool set_mdio, u8 mdio_num, 5414 u32 reg_addr, u32 *reg_val, 5415 struct i40e_asq_cmd_details *cmd_details) 5416 { 5417 struct i40e_aq_desc desc; 5418 struct i40e_aqc_phy_register_access *cmd = 5419 (struct i40e_aqc_phy_register_access *)&desc.params.raw; 5420 int status; 5421 5422 i40e_fill_default_direct_cmd_desc(&desc, 5423 i40e_aqc_opc_get_phy_register); 5424 5425 cmd->phy_interface = phy_select; 5426 cmd->dev_address = dev_addr; 5427 cmd->reg_address = cpu_to_le32(reg_addr); 5428 5429 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd); 5430 5431 if (!page_change) 5432 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE; 5433 5434 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5435 if (!status) 5436 *reg_val = le32_to_cpu(cmd->reg_value); 5437 5438 return status; 5439 } 5440 5441 /** 5442 * i40e_aq_write_ddp - Write dynamic device personalization (ddp) 5443 * @hw: pointer to the hw struct 5444 * @buff: command buffer (size in bytes = buff_size) 5445 * @buff_size: buffer size in bytes 5446 * @track_id: package tracking id 5447 * @error_offset: returns error offset 5448 * @error_info: returns error information 5449 * @cmd_details: pointer to command details structure or NULL 5450 **/ 5451 int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, 5452 u16 buff_size, u32 track_id, 5453 u32 *error_offset, u32 *error_info, 5454 struct i40e_asq_cmd_details *cmd_details) 5455 { 5456 struct i40e_aq_desc desc; 5457 struct i40e_aqc_write_personalization_profile *cmd = 5458 (struct i40e_aqc_write_personalization_profile *) 5459 &desc.params.raw; 5460 struct i40e_aqc_write_ddp_resp *resp; 5461 int status; 5462 5463 i40e_fill_default_direct_cmd_desc(&desc, 5464 i40e_aqc_opc_write_personalization_profile); 5465 5466 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); 5467 if (buff_size > I40E_AQ_LARGE_BUF) 5468 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5469 5470 desc.datalen = cpu_to_le16(buff_size); 5471 5472 cmd->profile_track_id = cpu_to_le32(track_id); 5473 5474 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 5475 if (!status) { 5476 resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw; 5477 if (error_offset) 5478 *error_offset = le32_to_cpu(resp->error_offset); 5479 if (error_info) 5480 *error_info = le32_to_cpu(resp->error_info); 5481 } 5482 5483 return status; 5484 } 5485 5486 /** 5487 * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp) 5488 * @hw: pointer to the hw struct 5489 * @buff: command buffer (size in bytes = buff_size) 5490 * @buff_size: buffer size in bytes 5491 * @flags: AdminQ command flags 5492 * @cmd_details: pointer to command details structure or NULL 5493 **/ 5494 int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, 5495 u16 buff_size, u8 flags, 5496 struct i40e_asq_cmd_details *cmd_details) 5497 { 5498 struct i40e_aq_desc desc; 5499 struct i40e_aqc_get_applied_profiles *cmd = 5500 (struct i40e_aqc_get_applied_profiles *)&desc.params.raw; 5501 int status; 5502 5503 i40e_fill_default_direct_cmd_desc(&desc, 5504 i40e_aqc_opc_get_personalization_profile_list); 5505 5506 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 5507 if (buff_size > I40E_AQ_LARGE_BUF) 5508 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5509 desc.datalen = cpu_to_le16(buff_size); 5510 5511 cmd->flags = flags; 5512 5513 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 5514 5515 return status; 5516 } 5517 5518 /** 5519 * i40e_find_segment_in_package 5520 * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E) 5521 * @pkg_hdr: pointer to the package header to be searched 5522 * 5523 * This function searches a package file for a particular segment type. On 5524 * success it returns a pointer to the segment header, otherwise it will 5525 * return NULL. 5526 **/ 5527 struct i40e_generic_seg_header * 5528 i40e_find_segment_in_package(u32 segment_type, 5529 struct i40e_package_header *pkg_hdr) 5530 { 5531 struct i40e_generic_seg_header *segment; 5532 u32 i; 5533 5534 /* Search all package segments for the requested segment type */ 5535 for (i = 0; i < pkg_hdr->segment_count; i++) { 5536 segment = 5537 (struct i40e_generic_seg_header *)((u8 *)pkg_hdr + 5538 pkg_hdr->segment_offset[i]); 5539 5540 if (segment->type == segment_type) 5541 return segment; 5542 } 5543 5544 return NULL; 5545 } 5546 5547 /* Get section table in profile */ 5548 #define I40E_SECTION_TABLE(profile, sec_tbl) \ 5549 do { \ 5550 struct i40e_profile_segment *p = (profile); \ 5551 u32 count; \ 5552 u32 *nvm; \ 5553 count = p->device_table_count; \ 5554 nvm = (u32 *)&p->device_table[count]; \ 5555 sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \ 5556 } while (0) 5557 5558 /* Get section header in profile */ 5559 #define I40E_SECTION_HEADER(profile, offset) \ 5560 (struct i40e_profile_section_header *)((u8 *)(profile) + (offset)) 5561 5562 /** 5563 * i40e_find_section_in_profile 5564 * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE) 5565 * @profile: pointer to the i40e segment header to be searched 5566 * 5567 * This function searches i40e segment for a particular section type. On 5568 * success it returns a pointer to the section header, otherwise it will 5569 * return NULL. 5570 **/ 5571 struct i40e_profile_section_header * 5572 i40e_find_section_in_profile(u32 section_type, 5573 struct i40e_profile_segment *profile) 5574 { 5575 struct i40e_profile_section_header *sec; 5576 struct i40e_section_table *sec_tbl; 5577 u32 sec_off; 5578 u32 i; 5579 5580 if (profile->header.type != SEGMENT_TYPE_I40E) 5581 return NULL; 5582 5583 I40E_SECTION_TABLE(profile, sec_tbl); 5584 5585 for (i = 0; i < sec_tbl->section_count; i++) { 5586 sec_off = sec_tbl->section_offset[i]; 5587 sec = I40E_SECTION_HEADER(profile, sec_off); 5588 if (sec->section.type == section_type) 5589 return sec; 5590 } 5591 5592 return NULL; 5593 } 5594 5595 /** 5596 * i40e_ddp_exec_aq_section - Execute generic AQ for DDP 5597 * @hw: pointer to the hw struct 5598 * @aq: command buffer containing all data to execute AQ 5599 **/ 5600 static int i40e_ddp_exec_aq_section(struct i40e_hw *hw, 5601 struct i40e_profile_aq_section *aq) 5602 { 5603 struct i40e_aq_desc desc; 5604 u8 *msg = NULL; 5605 u16 msglen; 5606 int status; 5607 5608 i40e_fill_default_direct_cmd_desc(&desc, aq->opcode); 5609 desc.flags |= cpu_to_le16(aq->flags); 5610 memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw)); 5611 5612 msglen = aq->datalen; 5613 if (msglen) { 5614 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 5615 I40E_AQ_FLAG_RD)); 5616 if (msglen > I40E_AQ_LARGE_BUF) 5617 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5618 desc.datalen = cpu_to_le16(msglen); 5619 msg = &aq->data[0]; 5620 } 5621 5622 status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL); 5623 5624 if (status) { 5625 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5626 "unable to exec DDP AQ opcode %u, error %d\n", 5627 aq->opcode, status); 5628 return status; 5629 } 5630 5631 /* copy returned desc to aq_buf */ 5632 memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw)); 5633 5634 return 0; 5635 } 5636 5637 /** 5638 * i40e_validate_profile 5639 * @hw: pointer to the hardware structure 5640 * @profile: pointer to the profile segment of the package to be validated 5641 * @track_id: package tracking id 5642 * @rollback: flag if the profile is for rollback. 5643 * 5644 * Validates supported devices and profile's sections. 5645 */ 5646 static int 5647 i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5648 u32 track_id, bool rollback) 5649 { 5650 struct i40e_profile_section_header *sec = NULL; 5651 struct i40e_section_table *sec_tbl; 5652 u32 vendor_dev_id; 5653 int status = 0; 5654 u32 dev_cnt; 5655 u32 sec_off; 5656 u32 i; 5657 5658 if (track_id == I40E_DDP_TRACKID_INVALID) { 5659 i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n"); 5660 return -EOPNOTSUPP; 5661 } 5662 5663 dev_cnt = profile->device_table_count; 5664 for (i = 0; i < dev_cnt; i++) { 5665 vendor_dev_id = profile->device_table[i].vendor_dev_id; 5666 if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL && 5667 hw->device_id == (vendor_dev_id & 0xFFFF)) 5668 break; 5669 } 5670 if (dev_cnt && i == dev_cnt) { 5671 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5672 "Device doesn't support DDP\n"); 5673 return -ENODEV; 5674 } 5675 5676 I40E_SECTION_TABLE(profile, sec_tbl); 5677 5678 /* Validate sections types */ 5679 for (i = 0; i < sec_tbl->section_count; i++) { 5680 sec_off = sec_tbl->section_offset[i]; 5681 sec = I40E_SECTION_HEADER(profile, sec_off); 5682 if (rollback) { 5683 if (sec->section.type == SECTION_TYPE_MMIO || 5684 sec->section.type == SECTION_TYPE_AQ || 5685 sec->section.type == SECTION_TYPE_RB_AQ) { 5686 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5687 "Not a roll-back package\n"); 5688 return -EOPNOTSUPP; 5689 } 5690 } else { 5691 if (sec->section.type == SECTION_TYPE_RB_AQ || 5692 sec->section.type == SECTION_TYPE_RB_MMIO) { 5693 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5694 "Not an original package\n"); 5695 return -EOPNOTSUPP; 5696 } 5697 } 5698 } 5699 5700 return status; 5701 } 5702 5703 /** 5704 * i40e_write_profile 5705 * @hw: pointer to the hardware structure 5706 * @profile: pointer to the profile segment of the package to be downloaded 5707 * @track_id: package tracking id 5708 * 5709 * Handles the download of a complete package. 5710 */ 5711 int 5712 i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5713 u32 track_id) 5714 { 5715 struct i40e_profile_section_header *sec = NULL; 5716 struct i40e_profile_aq_section *ddp_aq; 5717 struct i40e_section_table *sec_tbl; 5718 u32 offset = 0, info = 0; 5719 u32 section_size = 0; 5720 int status = 0; 5721 u32 sec_off; 5722 u32 i; 5723 5724 status = i40e_validate_profile(hw, profile, track_id, false); 5725 if (status) 5726 return status; 5727 5728 I40E_SECTION_TABLE(profile, sec_tbl); 5729 5730 for (i = 0; i < sec_tbl->section_count; i++) { 5731 sec_off = sec_tbl->section_offset[i]; 5732 sec = I40E_SECTION_HEADER(profile, sec_off); 5733 /* Process generic admin command */ 5734 if (sec->section.type == SECTION_TYPE_AQ) { 5735 ddp_aq = (struct i40e_profile_aq_section *)&sec[1]; 5736 status = i40e_ddp_exec_aq_section(hw, ddp_aq); 5737 if (status) { 5738 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5739 "Failed to execute aq: section %d, opcode %u\n", 5740 i, ddp_aq->opcode); 5741 break; 5742 } 5743 sec->section.type = SECTION_TYPE_RB_AQ; 5744 } 5745 5746 /* Skip any non-mmio sections */ 5747 if (sec->section.type != SECTION_TYPE_MMIO) 5748 continue; 5749 5750 section_size = sec->section.size + 5751 sizeof(struct i40e_profile_section_header); 5752 5753 /* Write MMIO section */ 5754 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, 5755 track_id, &offset, &info, NULL); 5756 if (status) { 5757 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5758 "Failed to write profile: section %d, offset %d, info %d\n", 5759 i, offset, info); 5760 break; 5761 } 5762 } 5763 return status; 5764 } 5765 5766 /** 5767 * i40e_rollback_profile 5768 * @hw: pointer to the hardware structure 5769 * @profile: pointer to the profile segment of the package to be removed 5770 * @track_id: package tracking id 5771 * 5772 * Rolls back previously loaded package. 5773 */ 5774 int 5775 i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5776 u32 track_id) 5777 { 5778 struct i40e_profile_section_header *sec = NULL; 5779 struct i40e_section_table *sec_tbl; 5780 u32 offset = 0, info = 0; 5781 u32 section_size = 0; 5782 int status = 0; 5783 u32 sec_off; 5784 int i; 5785 5786 status = i40e_validate_profile(hw, profile, track_id, true); 5787 if (status) 5788 return status; 5789 5790 I40E_SECTION_TABLE(profile, sec_tbl); 5791 5792 /* For rollback write sections in reverse */ 5793 for (i = sec_tbl->section_count - 1; i >= 0; i--) { 5794 sec_off = sec_tbl->section_offset[i]; 5795 sec = I40E_SECTION_HEADER(profile, sec_off); 5796 5797 /* Skip any non-rollback sections */ 5798 if (sec->section.type != SECTION_TYPE_RB_MMIO) 5799 continue; 5800 5801 section_size = sec->section.size + 5802 sizeof(struct i40e_profile_section_header); 5803 5804 /* Write roll-back MMIO section */ 5805 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, 5806 track_id, &offset, &info, NULL); 5807 if (status) { 5808 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5809 "Failed to write profile: section %d, offset %d, info %d\n", 5810 i, offset, info); 5811 break; 5812 } 5813 } 5814 return status; 5815 } 5816 5817 /** 5818 * i40e_add_pinfo_to_list 5819 * @hw: pointer to the hardware structure 5820 * @profile: pointer to the profile segment of the package 5821 * @profile_info_sec: buffer for information section 5822 * @track_id: package tracking id 5823 * 5824 * Register a profile to the list of loaded profiles. 5825 */ 5826 int 5827 i40e_add_pinfo_to_list(struct i40e_hw *hw, 5828 struct i40e_profile_segment *profile, 5829 u8 *profile_info_sec, u32 track_id) 5830 { 5831 struct i40e_profile_section_header *sec = NULL; 5832 struct i40e_profile_info *pinfo; 5833 u32 offset = 0, info = 0; 5834 int status = 0; 5835 5836 sec = (struct i40e_profile_section_header *)profile_info_sec; 5837 sec->tbl_size = 1; 5838 sec->data_end = sizeof(struct i40e_profile_section_header) + 5839 sizeof(struct i40e_profile_info); 5840 sec->section.type = SECTION_TYPE_INFO; 5841 sec->section.offset = sizeof(struct i40e_profile_section_header); 5842 sec->section.size = sizeof(struct i40e_profile_info); 5843 pinfo = (struct i40e_profile_info *)(profile_info_sec + 5844 sec->section.offset); 5845 pinfo->track_id = track_id; 5846 pinfo->version = profile->version; 5847 pinfo->op = I40E_DDP_ADD_TRACKID; 5848 memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE); 5849 5850 status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end, 5851 track_id, &offset, &info, NULL); 5852 5853 return status; 5854 } 5855 5856 /** 5857 * i40e_aq_add_cloud_filters 5858 * @hw: pointer to the hardware structure 5859 * @seid: VSI seid to add cloud filters from 5860 * @filters: Buffer which contains the filters to be added 5861 * @filter_count: number of filters contained in the buffer 5862 * 5863 * Set the cloud filters for a given VSI. The contents of the 5864 * i40e_aqc_cloud_filters_element_data are filled in by the caller 5865 * of the function. 5866 * 5867 **/ 5868 int 5869 i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid, 5870 struct i40e_aqc_cloud_filters_element_data *filters, 5871 u8 filter_count) 5872 { 5873 struct i40e_aq_desc desc; 5874 struct i40e_aqc_add_remove_cloud_filters *cmd = 5875 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5876 u16 buff_len; 5877 int status; 5878 5879 i40e_fill_default_direct_cmd_desc(&desc, 5880 i40e_aqc_opc_add_cloud_filters); 5881 5882 buff_len = filter_count * sizeof(*filters); 5883 desc.datalen = cpu_to_le16(buff_len); 5884 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5885 cmd->num_filters = filter_count; 5886 cmd->seid = cpu_to_le16(seid); 5887 5888 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5889 5890 return status; 5891 } 5892 5893 /** 5894 * i40e_aq_add_cloud_filters_bb 5895 * @hw: pointer to the hardware structure 5896 * @seid: VSI seid to add cloud filters from 5897 * @filters: Buffer which contains the filters in big buffer to be added 5898 * @filter_count: number of filters contained in the buffer 5899 * 5900 * Set the big buffer cloud filters for a given VSI. The contents of the 5901 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the 5902 * function. 5903 * 5904 **/ 5905 int 5906 i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid, 5907 struct i40e_aqc_cloud_filters_element_bb *filters, 5908 u8 filter_count) 5909 { 5910 struct i40e_aq_desc desc; 5911 struct i40e_aqc_add_remove_cloud_filters *cmd = 5912 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5913 u16 buff_len; 5914 int status; 5915 int i; 5916 5917 i40e_fill_default_direct_cmd_desc(&desc, 5918 i40e_aqc_opc_add_cloud_filters); 5919 5920 buff_len = filter_count * sizeof(*filters); 5921 desc.datalen = cpu_to_le16(buff_len); 5922 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5923 cmd->num_filters = filter_count; 5924 cmd->seid = cpu_to_le16(seid); 5925 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; 5926 5927 for (i = 0; i < filter_count; i++) { 5928 u16 tnl_type; 5929 u32 ti; 5930 5931 tnl_type = (le16_to_cpu(filters[i].element.flags) & 5932 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> 5933 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; 5934 5935 /* Due to hardware eccentricities, the VNI for Geneve is shifted 5936 * one more byte further than normally used for Tenant ID in 5937 * other tunnel types. 5938 */ 5939 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { 5940 ti = le32_to_cpu(filters[i].element.tenant_id); 5941 filters[i].element.tenant_id = cpu_to_le32(ti << 8); 5942 } 5943 } 5944 5945 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5946 5947 return status; 5948 } 5949 5950 /** 5951 * i40e_aq_rem_cloud_filters 5952 * @hw: pointer to the hardware structure 5953 * @seid: VSI seid to remove cloud filters from 5954 * @filters: Buffer which contains the filters to be removed 5955 * @filter_count: number of filters contained in the buffer 5956 * 5957 * Remove the cloud filters for a given VSI. The contents of the 5958 * i40e_aqc_cloud_filters_element_data are filled in by the caller 5959 * of the function. 5960 * 5961 **/ 5962 int 5963 i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid, 5964 struct i40e_aqc_cloud_filters_element_data *filters, 5965 u8 filter_count) 5966 { 5967 struct i40e_aq_desc desc; 5968 struct i40e_aqc_add_remove_cloud_filters *cmd = 5969 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5970 u16 buff_len; 5971 int status; 5972 5973 i40e_fill_default_direct_cmd_desc(&desc, 5974 i40e_aqc_opc_remove_cloud_filters); 5975 5976 buff_len = filter_count * sizeof(*filters); 5977 desc.datalen = cpu_to_le16(buff_len); 5978 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5979 cmd->num_filters = filter_count; 5980 cmd->seid = cpu_to_le16(seid); 5981 5982 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5983 5984 return status; 5985 } 5986 5987 /** 5988 * i40e_aq_rem_cloud_filters_bb 5989 * @hw: pointer to the hardware structure 5990 * @seid: VSI seid to remove cloud filters from 5991 * @filters: Buffer which contains the filters in big buffer to be removed 5992 * @filter_count: number of filters contained in the buffer 5993 * 5994 * Remove the big buffer cloud filters for a given VSI. The contents of the 5995 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the 5996 * function. 5997 * 5998 **/ 5999 int 6000 i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid, 6001 struct i40e_aqc_cloud_filters_element_bb *filters, 6002 u8 filter_count) 6003 { 6004 struct i40e_aq_desc desc; 6005 struct i40e_aqc_add_remove_cloud_filters *cmd = 6006 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 6007 u16 buff_len; 6008 int status; 6009 int i; 6010 6011 i40e_fill_default_direct_cmd_desc(&desc, 6012 i40e_aqc_opc_remove_cloud_filters); 6013 6014 buff_len = filter_count * sizeof(*filters); 6015 desc.datalen = cpu_to_le16(buff_len); 6016 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 6017 cmd->num_filters = filter_count; 6018 cmd->seid = cpu_to_le16(seid); 6019 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; 6020 6021 for (i = 0; i < filter_count; i++) { 6022 u16 tnl_type; 6023 u32 ti; 6024 6025 tnl_type = (le16_to_cpu(filters[i].element.flags) & 6026 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> 6027 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; 6028 6029 /* Due to hardware eccentricities, the VNI for Geneve is shifted 6030 * one more byte further than normally used for Tenant ID in 6031 * other tunnel types. 6032 */ 6033 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { 6034 ti = le32_to_cpu(filters[i].element.tenant_id); 6035 filters[i].element.tenant_id = cpu_to_le32(ti << 8); 6036 } 6037 } 6038 6039 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 6040 6041 return status; 6042 } 6043