1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2021 Intel Corporation. */ 3 4 #include <linux/avf/virtchnl.h> 5 #include <linux/bitfield.h> 6 #include <linux/delay.h> 7 #include <linux/etherdevice.h> 8 #include <linux/pci.h> 9 #include "i40e_adminq_cmd.h" 10 #include "i40e_devids.h" 11 #include "i40e_prototype.h" 12 #include "i40e_register.h" 13 14 /** 15 * i40e_set_mac_type - Sets MAC type 16 * @hw: pointer to the HW structure 17 * 18 * This function sets the mac type of the adapter based on the 19 * vendor ID and device ID stored in the hw structure. 20 **/ 21 int i40e_set_mac_type(struct i40e_hw *hw) 22 { 23 int status = 0; 24 25 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { 26 switch (hw->device_id) { 27 case I40E_DEV_ID_SFP_XL710: 28 case I40E_DEV_ID_QEMU: 29 case I40E_DEV_ID_KX_B: 30 case I40E_DEV_ID_KX_C: 31 case I40E_DEV_ID_QSFP_A: 32 case I40E_DEV_ID_QSFP_B: 33 case I40E_DEV_ID_QSFP_C: 34 case I40E_DEV_ID_1G_BASE_T_BC: 35 case I40E_DEV_ID_5G_BASE_T_BC: 36 case I40E_DEV_ID_10G_BASE_T: 37 case I40E_DEV_ID_10G_BASE_T4: 38 case I40E_DEV_ID_10G_BASE_T_BC: 39 case I40E_DEV_ID_10G_B: 40 case I40E_DEV_ID_10G_SFP: 41 case I40E_DEV_ID_20G_KR2: 42 case I40E_DEV_ID_20G_KR2_A: 43 case I40E_DEV_ID_25G_B: 44 case I40E_DEV_ID_25G_SFP28: 45 case I40E_DEV_ID_X710_N3000: 46 case I40E_DEV_ID_XXV710_N3000: 47 hw->mac.type = I40E_MAC_XL710; 48 break; 49 case I40E_DEV_ID_KX_X722: 50 case I40E_DEV_ID_QSFP_X722: 51 case I40E_DEV_ID_SFP_X722: 52 case I40E_DEV_ID_1G_BASE_T_X722: 53 case I40E_DEV_ID_10G_BASE_T_X722: 54 case I40E_DEV_ID_SFP_I_X722: 55 case I40E_DEV_ID_SFP_X722_A: 56 hw->mac.type = I40E_MAC_X722; 57 break; 58 default: 59 hw->mac.type = I40E_MAC_GENERIC; 60 break; 61 } 62 } else { 63 status = -ENODEV; 64 } 65 66 hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n", 67 hw->mac.type, status); 68 return status; 69 } 70 71 /** 72 * i40e_aq_str - convert AQ err code to a string 73 * @hw: pointer to the HW structure 74 * @aq_err: the AQ error code to convert 75 **/ 76 const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) 77 { 78 switch (aq_err) { 79 case I40E_AQ_RC_OK: 80 return "OK"; 81 case I40E_AQ_RC_EPERM: 82 return "I40E_AQ_RC_EPERM"; 83 case I40E_AQ_RC_ENOENT: 84 return "I40E_AQ_RC_ENOENT"; 85 case I40E_AQ_RC_ESRCH: 86 return "I40E_AQ_RC_ESRCH"; 87 case I40E_AQ_RC_EINTR: 88 return "I40E_AQ_RC_EINTR"; 89 case I40E_AQ_RC_EIO: 90 return "I40E_AQ_RC_EIO"; 91 case I40E_AQ_RC_ENXIO: 92 return "I40E_AQ_RC_ENXIO"; 93 case I40E_AQ_RC_E2BIG: 94 return "I40E_AQ_RC_E2BIG"; 95 case I40E_AQ_RC_EAGAIN: 96 return "I40E_AQ_RC_EAGAIN"; 97 case I40E_AQ_RC_ENOMEM: 98 return "I40E_AQ_RC_ENOMEM"; 99 case I40E_AQ_RC_EACCES: 100 return "I40E_AQ_RC_EACCES"; 101 case I40E_AQ_RC_EFAULT: 102 return "I40E_AQ_RC_EFAULT"; 103 case I40E_AQ_RC_EBUSY: 104 return "I40E_AQ_RC_EBUSY"; 105 case I40E_AQ_RC_EEXIST: 106 return "I40E_AQ_RC_EEXIST"; 107 case I40E_AQ_RC_EINVAL: 108 return "I40E_AQ_RC_EINVAL"; 109 case I40E_AQ_RC_ENOTTY: 110 return "I40E_AQ_RC_ENOTTY"; 111 case I40E_AQ_RC_ENOSPC: 112 return "I40E_AQ_RC_ENOSPC"; 113 case I40E_AQ_RC_ENOSYS: 114 return "I40E_AQ_RC_ENOSYS"; 115 case I40E_AQ_RC_ERANGE: 116 return "I40E_AQ_RC_ERANGE"; 117 case I40E_AQ_RC_EFLUSHED: 118 return "I40E_AQ_RC_EFLUSHED"; 119 case I40E_AQ_RC_BAD_ADDR: 120 return "I40E_AQ_RC_BAD_ADDR"; 121 case I40E_AQ_RC_EMODE: 122 return "I40E_AQ_RC_EMODE"; 123 case I40E_AQ_RC_EFBIG: 124 return "I40E_AQ_RC_EFBIG"; 125 } 126 127 snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); 128 return hw->err_str; 129 } 130 131 /** 132 * i40e_debug_aq 133 * @hw: debug mask related to admin queue 134 * @mask: debug mask 135 * @desc: pointer to admin queue descriptor 136 * @buffer: pointer to command buffer 137 * @buf_len: max length of buffer 138 * 139 * Dumps debug log about adminq command with descriptor contents. 140 **/ 141 void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, 142 void *buffer, u16 buf_len) 143 { 144 struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; 145 u32 effective_mask = hw->debug_mask & mask; 146 char prefix[27]; 147 u16 len; 148 u8 *buf = (u8 *)buffer; 149 150 if (!effective_mask || !desc) 151 return; 152 153 len = le16_to_cpu(aq_desc->datalen); 154 155 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 156 "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", 157 le16_to_cpu(aq_desc->opcode), 158 le16_to_cpu(aq_desc->flags), 159 le16_to_cpu(aq_desc->datalen), 160 le16_to_cpu(aq_desc->retval)); 161 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 162 "\tcookie (h,l) 0x%08X 0x%08X\n", 163 le32_to_cpu(aq_desc->cookie_high), 164 le32_to_cpu(aq_desc->cookie_low)); 165 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 166 "\tparam (0,1) 0x%08X 0x%08X\n", 167 le32_to_cpu(aq_desc->params.internal.param0), 168 le32_to_cpu(aq_desc->params.internal.param1)); 169 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 170 "\taddr (h,l) 0x%08X 0x%08X\n", 171 le32_to_cpu(aq_desc->params.external.addr_high), 172 le32_to_cpu(aq_desc->params.external.addr_low)); 173 174 if (buffer && buf_len != 0 && len != 0 && 175 (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) { 176 i40e_debug(hw, mask, "AQ CMD Buffer:\n"); 177 if (buf_len < len) 178 len = buf_len; 179 180 snprintf(prefix, sizeof(prefix), 181 "i40e %02x:%02x.%x: \t0x", 182 hw->bus.bus_id, 183 hw->bus.device, 184 hw->bus.func); 185 186 print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 187 16, 1, buf, len, false); 188 } 189 } 190 191 /** 192 * i40e_check_asq_alive 193 * @hw: pointer to the hw struct 194 * 195 * Returns true if Queue is enabled else false. 196 **/ 197 bool i40e_check_asq_alive(struct i40e_hw *hw) 198 { 199 /* Check if the queue is initialized */ 200 if (!hw->aq.asq.count) 201 return false; 202 203 return !!(rd32(hw, I40E_PF_ATQLEN) & I40E_PF_ATQLEN_ATQENABLE_MASK); 204 } 205 206 /** 207 * i40e_aq_queue_shutdown 208 * @hw: pointer to the hw struct 209 * @unloading: is the driver unloading itself 210 * 211 * Tell the Firmware that we're shutting down the AdminQ and whether 212 * or not the driver is unloading as well. 213 **/ 214 int i40e_aq_queue_shutdown(struct i40e_hw *hw, 215 bool unloading) 216 { 217 struct i40e_aq_desc desc; 218 struct i40e_aqc_queue_shutdown *cmd = 219 (struct i40e_aqc_queue_shutdown *)&desc.params.raw; 220 int status; 221 222 i40e_fill_default_direct_cmd_desc(&desc, 223 i40e_aqc_opc_queue_shutdown); 224 225 if (unloading) 226 cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING); 227 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 228 229 return status; 230 } 231 232 /** 233 * i40e_aq_get_set_rss_lut 234 * @hw: pointer to the hardware structure 235 * @vsi_id: vsi fw index 236 * @pf_lut: for PF table set true, for VSI table set false 237 * @lut: pointer to the lut buffer provided by the caller 238 * @lut_size: size of the lut buffer 239 * @set: set true to set the table, false to get the table 240 * 241 * Internal function to get or set RSS look up table 242 **/ 243 static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw, 244 u16 vsi_id, bool pf_lut, 245 u8 *lut, u16 lut_size, 246 bool set) 247 { 248 struct i40e_aq_desc desc; 249 struct i40e_aqc_get_set_rss_lut *cmd_resp = 250 (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; 251 int status; 252 u16 flags; 253 254 if (set) 255 i40e_fill_default_direct_cmd_desc(&desc, 256 i40e_aqc_opc_set_rss_lut); 257 else 258 i40e_fill_default_direct_cmd_desc(&desc, 259 i40e_aqc_opc_get_rss_lut); 260 261 /* Indirect command */ 262 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 263 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 264 265 vsi_id = FIELD_PREP(I40E_AQC_SET_RSS_LUT_VSI_ID_MASK, vsi_id) | 266 FIELD_PREP(I40E_AQC_SET_RSS_LUT_VSI_VALID, 1); 267 cmd_resp->vsi_id = cpu_to_le16(vsi_id); 268 269 if (pf_lut) 270 flags = FIELD_PREP(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK, 271 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF); 272 else 273 flags = FIELD_PREP(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK, 274 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI); 275 276 cmd_resp->flags = cpu_to_le16(flags); 277 status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL); 278 279 return status; 280 } 281 282 /** 283 * i40e_aq_get_rss_lut 284 * @hw: pointer to the hardware structure 285 * @vsi_id: vsi fw index 286 * @pf_lut: for PF table set true, for VSI table set false 287 * @lut: pointer to the lut buffer provided by the caller 288 * @lut_size: size of the lut buffer 289 * 290 * get the RSS lookup table, PF or VSI type 291 **/ 292 int i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, 293 bool pf_lut, u8 *lut, u16 lut_size) 294 { 295 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, 296 false); 297 } 298 299 /** 300 * i40e_aq_set_rss_lut 301 * @hw: pointer to the hardware structure 302 * @vsi_id: vsi fw index 303 * @pf_lut: for PF table set true, for VSI table set false 304 * @lut: pointer to the lut buffer provided by the caller 305 * @lut_size: size of the lut buffer 306 * 307 * set the RSS lookup table, PF or VSI type 308 **/ 309 int i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, 310 bool pf_lut, u8 *lut, u16 lut_size) 311 { 312 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); 313 } 314 315 /** 316 * i40e_aq_get_set_rss_key 317 * @hw: pointer to the hw struct 318 * @vsi_id: vsi fw index 319 * @key: pointer to key info struct 320 * @set: set true to set the key, false to get the key 321 * 322 * get the RSS key per VSI 323 **/ 324 static int i40e_aq_get_set_rss_key(struct i40e_hw *hw, 325 u16 vsi_id, 326 struct i40e_aqc_get_set_rss_key_data *key, 327 bool set) 328 { 329 struct i40e_aq_desc desc; 330 struct i40e_aqc_get_set_rss_key *cmd_resp = 331 (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; 332 u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); 333 int status; 334 335 if (set) 336 i40e_fill_default_direct_cmd_desc(&desc, 337 i40e_aqc_opc_set_rss_key); 338 else 339 i40e_fill_default_direct_cmd_desc(&desc, 340 i40e_aqc_opc_get_rss_key); 341 342 /* Indirect command */ 343 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 344 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 345 346 vsi_id = FIELD_PREP(I40E_AQC_SET_RSS_KEY_VSI_ID_MASK, vsi_id) | 347 FIELD_PREP(I40E_AQC_SET_RSS_KEY_VSI_VALID, 1); 348 cmd_resp->vsi_id = cpu_to_le16(vsi_id); 349 350 status = i40e_asq_send_command(hw, &desc, key, key_size, NULL); 351 352 return status; 353 } 354 355 /** 356 * i40e_aq_get_rss_key 357 * @hw: pointer to the hw struct 358 * @vsi_id: vsi fw index 359 * @key: pointer to key info struct 360 * 361 **/ 362 int i40e_aq_get_rss_key(struct i40e_hw *hw, 363 u16 vsi_id, 364 struct i40e_aqc_get_set_rss_key_data *key) 365 { 366 return i40e_aq_get_set_rss_key(hw, vsi_id, key, false); 367 } 368 369 /** 370 * i40e_aq_set_rss_key 371 * @hw: pointer to the hw struct 372 * @vsi_id: vsi fw index 373 * @key: pointer to key info struct 374 * 375 * set the RSS key per VSI 376 **/ 377 int i40e_aq_set_rss_key(struct i40e_hw *hw, 378 u16 vsi_id, 379 struct i40e_aqc_get_set_rss_key_data *key) 380 { 381 return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); 382 } 383 384 /* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the 385 * hardware to a bit-field that can be used by SW to more easily determine the 386 * packet type. 387 * 388 * Macros are used to shorten the table lines and make this table human 389 * readable. 390 * 391 * We store the PTYPE in the top byte of the bit field - this is just so that 392 * we can check that the table doesn't have a row missing, as the index into 393 * the table should be the PTYPE. 394 * 395 * Typical work flow: 396 * 397 * IF NOT i40e_ptype_lookup[ptype].known 398 * THEN 399 * Packet is unknown 400 * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP 401 * Use the rest of the fields to look at the tunnels, inner protocols, etc 402 * ELSE 403 * Use the enum i40e_rx_l2_ptype to decode the packet type 404 * ENDIF 405 */ 406 407 /* macro to make the table lines short, use explicit indexing with [PTYPE] */ 408 #define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ 409 [PTYPE] = { \ 410 1, \ 411 I40E_RX_PTYPE_OUTER_##OUTER_IP, \ 412 I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ 413 I40E_RX_PTYPE_##OUTER_FRAG, \ 414 I40E_RX_PTYPE_TUNNEL_##T, \ 415 I40E_RX_PTYPE_TUNNEL_END_##TE, \ 416 I40E_RX_PTYPE_##TEF, \ 417 I40E_RX_PTYPE_INNER_PROT_##I, \ 418 I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } 419 420 #define I40E_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } 421 422 /* shorter macros makes the table fit but are terse */ 423 #define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG 424 #define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG 425 #define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC 426 427 /* Lookup table mapping in the 8-bit HW PTYPE to the bit field for decoding */ 428 struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = { 429 /* L2 Packet types */ 430 I40E_PTT_UNUSED_ENTRY(0), 431 I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 432 I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), 433 I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 434 I40E_PTT_UNUSED_ENTRY(4), 435 I40E_PTT_UNUSED_ENTRY(5), 436 I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 437 I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 438 I40E_PTT_UNUSED_ENTRY(8), 439 I40E_PTT_UNUSED_ENTRY(9), 440 I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 441 I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), 442 I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 443 I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 444 I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 445 I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 446 I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 447 I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 448 I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 449 I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 450 I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 451 I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 452 453 /* Non Tunneled IPv4 */ 454 I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), 455 I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), 456 I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), 457 I40E_PTT_UNUSED_ENTRY(25), 458 I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), 459 I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), 460 I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), 461 462 /* IPv4 --> IPv4 */ 463 I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), 464 I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), 465 I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), 466 I40E_PTT_UNUSED_ENTRY(32), 467 I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), 468 I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), 469 I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), 470 471 /* IPv4 --> IPv6 */ 472 I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), 473 I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), 474 I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), 475 I40E_PTT_UNUSED_ENTRY(39), 476 I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), 477 I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), 478 I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), 479 480 /* IPv4 --> GRE/NAT */ 481 I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), 482 483 /* IPv4 --> GRE/NAT --> IPv4 */ 484 I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), 485 I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), 486 I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), 487 I40E_PTT_UNUSED_ENTRY(47), 488 I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), 489 I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), 490 I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), 491 492 /* IPv4 --> GRE/NAT --> IPv6 */ 493 I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), 494 I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), 495 I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), 496 I40E_PTT_UNUSED_ENTRY(54), 497 I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), 498 I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), 499 I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), 500 501 /* IPv4 --> GRE/NAT --> MAC */ 502 I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), 503 504 /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ 505 I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), 506 I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), 507 I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), 508 I40E_PTT_UNUSED_ENTRY(62), 509 I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), 510 I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), 511 I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), 512 513 /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ 514 I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), 515 I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), 516 I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), 517 I40E_PTT_UNUSED_ENTRY(69), 518 I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), 519 I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), 520 I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), 521 522 /* IPv4 --> GRE/NAT --> MAC/VLAN */ 523 I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), 524 525 /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ 526 I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), 527 I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), 528 I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), 529 I40E_PTT_UNUSED_ENTRY(77), 530 I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), 531 I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), 532 I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), 533 534 /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ 535 I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), 536 I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), 537 I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), 538 I40E_PTT_UNUSED_ENTRY(84), 539 I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), 540 I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), 541 I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), 542 543 /* Non Tunneled IPv6 */ 544 I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), 545 I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), 546 I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), 547 I40E_PTT_UNUSED_ENTRY(91), 548 I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), 549 I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), 550 I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), 551 552 /* IPv6 --> IPv4 */ 553 I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), 554 I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), 555 I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), 556 I40E_PTT_UNUSED_ENTRY(98), 557 I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), 558 I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), 559 I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), 560 561 /* IPv6 --> IPv6 */ 562 I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), 563 I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), 564 I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), 565 I40E_PTT_UNUSED_ENTRY(105), 566 I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), 567 I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), 568 I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), 569 570 /* IPv6 --> GRE/NAT */ 571 I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), 572 573 /* IPv6 --> GRE/NAT -> IPv4 */ 574 I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), 575 I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), 576 I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), 577 I40E_PTT_UNUSED_ENTRY(113), 578 I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), 579 I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), 580 I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), 581 582 /* IPv6 --> GRE/NAT -> IPv6 */ 583 I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), 584 I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), 585 I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), 586 I40E_PTT_UNUSED_ENTRY(120), 587 I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), 588 I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), 589 I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), 590 591 /* IPv6 --> GRE/NAT -> MAC */ 592 I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), 593 594 /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ 595 I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), 596 I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), 597 I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), 598 I40E_PTT_UNUSED_ENTRY(128), 599 I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), 600 I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), 601 I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), 602 603 /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ 604 I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), 605 I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), 606 I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), 607 I40E_PTT_UNUSED_ENTRY(135), 608 I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), 609 I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), 610 I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), 611 612 /* IPv6 --> GRE/NAT -> MAC/VLAN */ 613 I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), 614 615 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ 616 I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), 617 I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), 618 I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), 619 I40E_PTT_UNUSED_ENTRY(143), 620 I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), 621 I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), 622 I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), 623 624 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ 625 I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), 626 I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), 627 I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), 628 I40E_PTT_UNUSED_ENTRY(150), 629 I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), 630 I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), 631 I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), 632 633 /* unused entries */ 634 [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } 635 }; 636 637 /** 638 * i40e_init_shared_code - Initialize the shared code 639 * @hw: pointer to hardware structure 640 * 641 * This assigns the MAC type and PHY code and inits the NVM. 642 * Does not touch the hardware. This function must be called prior to any 643 * other function in the shared code. The i40e_hw structure should be 644 * memset to 0 prior to calling this function. The following fields in 645 * hw structure should be filled in prior to calling this function: 646 * hw_addr, back, device_id, vendor_id, subsystem_device_id, 647 * subsystem_vendor_id, and revision_id 648 **/ 649 int i40e_init_shared_code(struct i40e_hw *hw) 650 { 651 u32 port, ari, func_rid; 652 int status = 0; 653 654 i40e_set_mac_type(hw); 655 656 switch (hw->mac.type) { 657 case I40E_MAC_XL710: 658 case I40E_MAC_X722: 659 break; 660 default: 661 return -ENODEV; 662 } 663 664 hw->phy.get_link_info = true; 665 666 /* Determine port number and PF number*/ 667 port = FIELD_GET(I40E_PFGEN_PORTNUM_PORT_NUM_MASK, 668 rd32(hw, I40E_PFGEN_PORTNUM)); 669 hw->port = (u8)port; 670 ari = FIELD_GET(I40E_GLPCI_CAPSUP_ARI_EN_MASK, 671 rd32(hw, I40E_GLPCI_CAPSUP)); 672 func_rid = rd32(hw, I40E_PF_FUNC_RID); 673 if (ari) 674 hw->pf_id = (u8)(func_rid & 0xff); 675 else 676 hw->pf_id = (u8)(func_rid & 0x7); 677 678 status = i40e_init_nvm(hw); 679 return status; 680 } 681 682 /** 683 * i40e_aq_mac_address_read - Retrieve the MAC addresses 684 * @hw: pointer to the hw struct 685 * @flags: a return indicator of what addresses were added to the addr store 686 * @addrs: the requestor's mac addr store 687 * @cmd_details: pointer to command details structure or NULL 688 **/ 689 static int 690 i40e_aq_mac_address_read(struct i40e_hw *hw, 691 u16 *flags, 692 struct i40e_aqc_mac_address_read_data *addrs, 693 struct i40e_asq_cmd_details *cmd_details) 694 { 695 struct i40e_aq_desc desc; 696 struct i40e_aqc_mac_address_read *cmd_data = 697 (struct i40e_aqc_mac_address_read *)&desc.params.raw; 698 int status; 699 700 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read); 701 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF); 702 703 status = i40e_asq_send_command(hw, &desc, addrs, 704 sizeof(*addrs), cmd_details); 705 *flags = le16_to_cpu(cmd_data->command_flags); 706 707 return status; 708 } 709 710 /** 711 * i40e_aq_mac_address_write - Change the MAC addresses 712 * @hw: pointer to the hw struct 713 * @flags: indicates which MAC to be written 714 * @mac_addr: address to write 715 * @cmd_details: pointer to command details structure or NULL 716 **/ 717 int i40e_aq_mac_address_write(struct i40e_hw *hw, 718 u16 flags, u8 *mac_addr, 719 struct i40e_asq_cmd_details *cmd_details) 720 { 721 struct i40e_aq_desc desc; 722 struct i40e_aqc_mac_address_write *cmd_data = 723 (struct i40e_aqc_mac_address_write *)&desc.params.raw; 724 int status; 725 726 i40e_fill_default_direct_cmd_desc(&desc, 727 i40e_aqc_opc_mac_address_write); 728 cmd_data->command_flags = cpu_to_le16(flags); 729 cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]); 730 cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) | 731 ((u32)mac_addr[3] << 16) | 732 ((u32)mac_addr[4] << 8) | 733 mac_addr[5]); 734 735 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 736 737 return status; 738 } 739 740 /** 741 * i40e_get_mac_addr - get MAC address 742 * @hw: pointer to the HW structure 743 * @mac_addr: pointer to MAC address 744 * 745 * Reads the adapter's MAC address from register 746 **/ 747 int i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr) 748 { 749 struct i40e_aqc_mac_address_read_data addrs; 750 u16 flags = 0; 751 int status; 752 753 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); 754 755 if (flags & I40E_AQC_LAN_ADDR_VALID) 756 ether_addr_copy(mac_addr, addrs.pf_lan_mac); 757 758 return status; 759 } 760 761 /** 762 * i40e_get_port_mac_addr - get Port MAC address 763 * @hw: pointer to the HW structure 764 * @mac_addr: pointer to Port MAC address 765 * 766 * Reads the adapter's Port MAC address 767 **/ 768 int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr) 769 { 770 struct i40e_aqc_mac_address_read_data addrs; 771 u16 flags = 0; 772 int status; 773 774 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); 775 if (status) 776 return status; 777 778 if (flags & I40E_AQC_PORT_ADDR_VALID) 779 ether_addr_copy(mac_addr, addrs.port_mac); 780 else 781 status = -EINVAL; 782 783 return status; 784 } 785 786 /** 787 * i40e_pre_tx_queue_cfg - pre tx queue configure 788 * @hw: pointer to the HW structure 789 * @queue: target PF queue index 790 * @enable: state change request 791 * 792 * Handles hw requirement to indicate intention to enable 793 * or disable target queue. 794 **/ 795 void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable) 796 { 797 u32 abs_queue_idx = hw->func_caps.base_queue + queue; 798 u32 reg_block = 0; 799 u32 reg_val; 800 801 if (abs_queue_idx >= 128) { 802 reg_block = abs_queue_idx / 128; 803 abs_queue_idx %= 128; 804 } 805 806 reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); 807 reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 808 reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 809 810 if (enable) 811 reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK; 812 else 813 reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 814 815 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val); 816 } 817 818 /** 819 * i40e_get_pba_string - Reads part number string from EEPROM 820 * @hw: pointer to hardware structure 821 * 822 * Reads the part number string from the EEPROM and stores it 823 * into newly allocated buffer and saves resulting pointer 824 * to i40e_hw->pba_id field. 825 **/ 826 void i40e_get_pba_string(struct i40e_hw *hw) 827 { 828 #define I40E_NVM_PBA_FLAGS_BLK_PRESENT 0xFAFA 829 u16 pba_word = 0; 830 u16 pba_size = 0; 831 u16 pba_ptr = 0; 832 int status; 833 char *ptr; 834 u16 i; 835 836 status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word); 837 if (status) { 838 hw_dbg(hw, "Failed to read PBA flags.\n"); 839 return; 840 } 841 if (pba_word != I40E_NVM_PBA_FLAGS_BLK_PRESENT) { 842 hw_dbg(hw, "PBA block is not present.\n"); 843 return; 844 } 845 846 status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr); 847 if (status) { 848 hw_dbg(hw, "Failed to read PBA Block pointer.\n"); 849 return; 850 } 851 852 status = i40e_read_nvm_word(hw, pba_ptr, &pba_size); 853 if (status) { 854 hw_dbg(hw, "Failed to read PBA Block size.\n"); 855 return; 856 } 857 858 /* Subtract one to get PBA word count (PBA Size word is included in 859 * total size) and advance pointer to first PBA word. 860 */ 861 pba_size--; 862 pba_ptr++; 863 if (!pba_size) { 864 hw_dbg(hw, "PBA ID is empty.\n"); 865 return; 866 } 867 868 ptr = devm_kzalloc(i40e_hw_to_dev(hw), pba_size * 2 + 1, GFP_KERNEL); 869 if (!ptr) 870 return; 871 hw->pba_id = ptr; 872 873 for (i = 0; i < pba_size; i++) { 874 status = i40e_read_nvm_word(hw, pba_ptr + i, &pba_word); 875 if (status) { 876 hw_dbg(hw, "Failed to read PBA Block word %d.\n", i); 877 devm_kfree(i40e_hw_to_dev(hw), hw->pba_id); 878 hw->pba_id = NULL; 879 return; 880 } 881 882 *ptr++ = (pba_word >> 8) & 0xFF; 883 *ptr++ = pba_word & 0xFF; 884 } 885 } 886 887 /** 888 * i40e_get_media_type - Gets media type 889 * @hw: pointer to the hardware structure 890 **/ 891 static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) 892 { 893 enum i40e_media_type media; 894 895 switch (hw->phy.link_info.phy_type) { 896 case I40E_PHY_TYPE_10GBASE_SR: 897 case I40E_PHY_TYPE_10GBASE_LR: 898 case I40E_PHY_TYPE_1000BASE_SX: 899 case I40E_PHY_TYPE_1000BASE_LX: 900 case I40E_PHY_TYPE_40GBASE_SR4: 901 case I40E_PHY_TYPE_40GBASE_LR4: 902 case I40E_PHY_TYPE_25GBASE_LR: 903 case I40E_PHY_TYPE_25GBASE_SR: 904 media = I40E_MEDIA_TYPE_FIBER; 905 break; 906 case I40E_PHY_TYPE_100BASE_TX: 907 case I40E_PHY_TYPE_1000BASE_T: 908 case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS: 909 case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS: 910 case I40E_PHY_TYPE_10GBASE_T: 911 media = I40E_MEDIA_TYPE_BASET; 912 break; 913 case I40E_PHY_TYPE_10GBASE_CR1_CU: 914 case I40E_PHY_TYPE_40GBASE_CR4_CU: 915 case I40E_PHY_TYPE_10GBASE_CR1: 916 case I40E_PHY_TYPE_40GBASE_CR4: 917 case I40E_PHY_TYPE_10GBASE_SFPP_CU: 918 case I40E_PHY_TYPE_40GBASE_AOC: 919 case I40E_PHY_TYPE_10GBASE_AOC: 920 case I40E_PHY_TYPE_25GBASE_CR: 921 case I40E_PHY_TYPE_25GBASE_AOC: 922 case I40E_PHY_TYPE_25GBASE_ACC: 923 media = I40E_MEDIA_TYPE_DA; 924 break; 925 case I40E_PHY_TYPE_1000BASE_KX: 926 case I40E_PHY_TYPE_10GBASE_KX4: 927 case I40E_PHY_TYPE_10GBASE_KR: 928 case I40E_PHY_TYPE_40GBASE_KR4: 929 case I40E_PHY_TYPE_20GBASE_KR2: 930 case I40E_PHY_TYPE_25GBASE_KR: 931 media = I40E_MEDIA_TYPE_BACKPLANE; 932 break; 933 case I40E_PHY_TYPE_SGMII: 934 case I40E_PHY_TYPE_XAUI: 935 case I40E_PHY_TYPE_XFI: 936 case I40E_PHY_TYPE_XLAUI: 937 case I40E_PHY_TYPE_XLPPI: 938 default: 939 media = I40E_MEDIA_TYPE_UNKNOWN; 940 break; 941 } 942 943 return media; 944 } 945 946 /** 947 * i40e_poll_globr - Poll for Global Reset completion 948 * @hw: pointer to the hardware structure 949 * @retry_limit: how many times to retry before failure 950 **/ 951 static int i40e_poll_globr(struct i40e_hw *hw, 952 u32 retry_limit) 953 { 954 u32 cnt, reg = 0; 955 956 for (cnt = 0; cnt < retry_limit; cnt++) { 957 reg = rd32(hw, I40E_GLGEN_RSTAT); 958 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 959 return 0; 960 msleep(100); 961 } 962 963 hw_dbg(hw, "Global reset failed.\n"); 964 hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg); 965 966 return -EIO; 967 } 968 969 #define I40E_PF_RESET_WAIT_COUNT_A0 200 970 #define I40E_PF_RESET_WAIT_COUNT 200 971 /** 972 * i40e_pf_reset - Reset the PF 973 * @hw: pointer to the hardware structure 974 * 975 * Assuming someone else has triggered a global reset, 976 * assure the global reset is complete and then reset the PF 977 **/ 978 int i40e_pf_reset(struct i40e_hw *hw) 979 { 980 u32 cnt = 0; 981 u32 cnt1 = 0; 982 u32 reg = 0; 983 u32 grst_del; 984 985 /* Poll for Global Reset steady state in case of recent GRST. 986 * The grst delay value is in 100ms units, and we'll wait a 987 * couple counts longer to be sure we don't just miss the end. 988 */ 989 grst_del = FIELD_GET(I40E_GLGEN_RSTCTL_GRSTDEL_MASK, 990 rd32(hw, I40E_GLGEN_RSTCTL)); 991 992 /* It can take upto 15 secs for GRST steady state. 993 * Bump it to 16 secs max to be safe. 994 */ 995 grst_del = grst_del * 20; 996 997 for (cnt = 0; cnt < grst_del; cnt++) { 998 reg = rd32(hw, I40E_GLGEN_RSTAT); 999 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 1000 break; 1001 msleep(100); 1002 } 1003 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 1004 hw_dbg(hw, "Global reset polling failed to complete.\n"); 1005 return -EIO; 1006 } 1007 1008 /* Now Wait for the FW to be ready */ 1009 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) { 1010 reg = rd32(hw, I40E_GLNVM_ULD); 1011 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1012 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK); 1013 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1014 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) { 1015 hw_dbg(hw, "Core and Global modules ready %d\n", cnt1); 1016 break; 1017 } 1018 usleep_range(10000, 20000); 1019 } 1020 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1021 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) { 1022 hw_dbg(hw, "wait for FW Reset complete timedout\n"); 1023 hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg); 1024 return -EIO; 1025 } 1026 1027 /* If there was a Global Reset in progress when we got here, 1028 * we don't need to do the PF Reset 1029 */ 1030 if (!cnt) { 1031 u32 reg2 = 0; 1032 if (hw->revision_id == 0) 1033 cnt = I40E_PF_RESET_WAIT_COUNT_A0; 1034 else 1035 cnt = I40E_PF_RESET_WAIT_COUNT; 1036 reg = rd32(hw, I40E_PFGEN_CTRL); 1037 wr32(hw, I40E_PFGEN_CTRL, 1038 (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); 1039 for (; cnt; cnt--) { 1040 reg = rd32(hw, I40E_PFGEN_CTRL); 1041 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) 1042 break; 1043 reg2 = rd32(hw, I40E_GLGEN_RSTAT); 1044 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) 1045 break; 1046 usleep_range(1000, 2000); 1047 } 1048 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 1049 if (i40e_poll_globr(hw, grst_del)) 1050 return -EIO; 1051 } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { 1052 hw_dbg(hw, "PF reset polling failed to complete.\n"); 1053 return -EIO; 1054 } 1055 } 1056 1057 i40e_clear_pxe_mode(hw); 1058 1059 return 0; 1060 } 1061 1062 /** 1063 * i40e_clear_hw - clear out any left over hw state 1064 * @hw: pointer to the hw struct 1065 * 1066 * Clear queues and interrupts, typically called at init time, 1067 * but after the capabilities have been found so we know how many 1068 * queues and msix vectors have been allocated. 1069 **/ 1070 void i40e_clear_hw(struct i40e_hw *hw) 1071 { 1072 u32 num_queues, base_queue; 1073 u32 num_pf_int; 1074 u32 num_vf_int; 1075 u32 num_vfs; 1076 u32 i, j; 1077 u32 val; 1078 u32 eol = 0x7ff; 1079 1080 /* get number of interrupts, queues, and VFs */ 1081 val = rd32(hw, I40E_GLPCI_CNF2); 1082 num_pf_int = FIELD_GET(I40E_GLPCI_CNF2_MSI_X_PF_N_MASK, val); 1083 num_vf_int = FIELD_GET(I40E_GLPCI_CNF2_MSI_X_VF_N_MASK, val); 1084 1085 val = rd32(hw, I40E_PFLAN_QALLOC); 1086 base_queue = FIELD_GET(I40E_PFLAN_QALLOC_FIRSTQ_MASK, val); 1087 j = FIELD_GET(I40E_PFLAN_QALLOC_LASTQ_MASK, val); 1088 if (val & I40E_PFLAN_QALLOC_VALID_MASK && j >= base_queue) 1089 num_queues = (j - base_queue) + 1; 1090 else 1091 num_queues = 0; 1092 1093 val = rd32(hw, I40E_PF_VT_PFALLOC); 1094 i = FIELD_GET(I40E_PF_VT_PFALLOC_FIRSTVF_MASK, val); 1095 j = FIELD_GET(I40E_PF_VT_PFALLOC_LASTVF_MASK, val); 1096 if (val & I40E_PF_VT_PFALLOC_VALID_MASK && j >= i) 1097 num_vfs = (j - i) + 1; 1098 else 1099 num_vfs = 0; 1100 1101 /* stop all the interrupts */ 1102 wr32(hw, I40E_PFINT_ICR0_ENA, 0); 1103 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 1104 for (i = 0; i < num_pf_int - 2; i++) 1105 wr32(hw, I40E_PFINT_DYN_CTLN(i), val); 1106 1107 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */ 1108 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 1109 wr32(hw, I40E_PFINT_LNKLST0, val); 1110 for (i = 0; i < num_pf_int - 2; i++) 1111 wr32(hw, I40E_PFINT_LNKLSTN(i), val); 1112 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT; 1113 for (i = 0; i < num_vfs; i++) 1114 wr32(hw, I40E_VPINT_LNKLST0(i), val); 1115 for (i = 0; i < num_vf_int - 2; i++) 1116 wr32(hw, I40E_VPINT_LNKLSTN(i), val); 1117 1118 /* warn the HW of the coming Tx disables */ 1119 for (i = 0; i < num_queues; i++) { 1120 u32 abs_queue_idx = base_queue + i; 1121 u32 reg_block = 0; 1122 1123 if (abs_queue_idx >= 128) { 1124 reg_block = abs_queue_idx / 128; 1125 abs_queue_idx %= 128; 1126 } 1127 1128 val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); 1129 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 1130 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 1131 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 1132 1133 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val); 1134 } 1135 udelay(400); 1136 1137 /* stop all the queues */ 1138 for (i = 0; i < num_queues; i++) { 1139 wr32(hw, I40E_QINT_TQCTL(i), 0); 1140 wr32(hw, I40E_QTX_ENA(i), 0); 1141 wr32(hw, I40E_QINT_RQCTL(i), 0); 1142 wr32(hw, I40E_QRX_ENA(i), 0); 1143 } 1144 1145 /* short wait for all queue disables to settle */ 1146 udelay(50); 1147 } 1148 1149 /** 1150 * i40e_clear_pxe_mode - clear pxe operations mode 1151 * @hw: pointer to the hw struct 1152 * 1153 * Make sure all PXE mode settings are cleared, including things 1154 * like descriptor fetch/write-back mode. 1155 **/ 1156 void i40e_clear_pxe_mode(struct i40e_hw *hw) 1157 { 1158 u32 reg; 1159 1160 if (i40e_check_asq_alive(hw)) 1161 i40e_aq_clear_pxe_mode(hw, NULL); 1162 1163 /* Clear single descriptor fetch/write-back mode */ 1164 reg = rd32(hw, I40E_GLLAN_RCTL_0); 1165 1166 if (hw->revision_id == 0) { 1167 /* As a work around clear PXE_MODE instead of setting it */ 1168 wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK))); 1169 } else { 1170 wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK)); 1171 } 1172 } 1173 1174 /** 1175 * i40e_led_is_mine - helper to find matching led 1176 * @hw: pointer to the hw struct 1177 * @idx: index into GPIO registers 1178 * 1179 * returns: 0 if no match, otherwise the value of the GPIO_CTL register 1180 */ 1181 static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) 1182 { 1183 u32 gpio_val = 0; 1184 u32 port; 1185 1186 if (!I40E_IS_X710TL_DEVICE(hw->device_id) && 1187 !hw->func_caps.led[idx]) 1188 return 0; 1189 gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx)); 1190 port = FIELD_GET(I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK, gpio_val); 1191 1192 /* if PRT_NUM_NA is 1 then this LED is not port specific, OR 1193 * if it is not our port then ignore 1194 */ 1195 if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) || 1196 (port != hw->port)) 1197 return 0; 1198 1199 return gpio_val; 1200 } 1201 1202 #define I40E_FW_LED BIT(4) 1203 #define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \ 1204 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) 1205 1206 #define I40E_LED0 22 1207 1208 #define I40E_PIN_FUNC_SDP 0x0 1209 #define I40E_PIN_FUNC_LED 0x1 1210 1211 /** 1212 * i40e_led_get - return current on/off mode 1213 * @hw: pointer to the hw struct 1214 * 1215 * The value returned is the 'mode' field as defined in the 1216 * GPIO register definitions: 0x0 = off, 0xf = on, and other 1217 * values are variations of possible behaviors relating to 1218 * blink, link, and wire. 1219 **/ 1220 u32 i40e_led_get(struct i40e_hw *hw) 1221 { 1222 u32 mode = 0; 1223 int i; 1224 1225 /* as per the documentation GPIO 22-29 are the LED 1226 * GPIO pins named LED0..LED7 1227 */ 1228 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { 1229 u32 gpio_val = i40e_led_is_mine(hw, i); 1230 1231 if (!gpio_val) 1232 continue; 1233 1234 mode = FIELD_GET(I40E_GLGEN_GPIO_CTL_LED_MODE_MASK, gpio_val); 1235 break; 1236 } 1237 1238 return mode; 1239 } 1240 1241 /** 1242 * i40e_led_set - set new on/off mode 1243 * @hw: pointer to the hw struct 1244 * @mode: 0=off, 0xf=on (else see manual for mode details) 1245 * @blink: true if the LED should blink when on, false if steady 1246 * 1247 * if this function is used to turn on the blink it should 1248 * be used to disable the blink when restoring the original state. 1249 **/ 1250 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) 1251 { 1252 int i; 1253 1254 if (mode & ~I40E_LED_MODE_VALID) { 1255 hw_dbg(hw, "invalid mode passed in %X\n", mode); 1256 return; 1257 } 1258 1259 /* as per the documentation GPIO 22-29 are the LED 1260 * GPIO pins named LED0..LED7 1261 */ 1262 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { 1263 u32 gpio_val = i40e_led_is_mine(hw, i); 1264 1265 if (!gpio_val) 1266 continue; 1267 1268 if (I40E_IS_X710TL_DEVICE(hw->device_id)) { 1269 u32 pin_func = 0; 1270 1271 if (mode & I40E_FW_LED) 1272 pin_func = I40E_PIN_FUNC_SDP; 1273 else 1274 pin_func = I40E_PIN_FUNC_LED; 1275 1276 gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK; 1277 gpio_val |= 1278 FIELD_PREP(I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK, 1279 pin_func); 1280 } 1281 gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK; 1282 /* this & is a bit of paranoia, but serves as a range check */ 1283 gpio_val |= FIELD_PREP(I40E_GLGEN_GPIO_CTL_LED_MODE_MASK, 1284 mode); 1285 1286 if (blink) 1287 gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); 1288 else 1289 gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); 1290 1291 wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val); 1292 break; 1293 } 1294 } 1295 1296 /* Admin command wrappers */ 1297 1298 /** 1299 * i40e_aq_get_phy_capabilities 1300 * @hw: pointer to the hw struct 1301 * @abilities: structure for PHY capabilities to be filled 1302 * @qualified_modules: report Qualified Modules 1303 * @report_init: report init capabilities (active are default) 1304 * @cmd_details: pointer to command details structure or NULL 1305 * 1306 * Returns the various PHY abilities supported on the Port. 1307 **/ 1308 int 1309 i40e_aq_get_phy_capabilities(struct i40e_hw *hw, 1310 bool qualified_modules, bool report_init, 1311 struct i40e_aq_get_phy_abilities_resp *abilities, 1312 struct i40e_asq_cmd_details *cmd_details) 1313 { 1314 u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp); 1315 u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0; 1316 struct i40e_aq_desc desc; 1317 int status; 1318 1319 if (!abilities) 1320 return -EINVAL; 1321 1322 do { 1323 i40e_fill_default_direct_cmd_desc(&desc, 1324 i40e_aqc_opc_get_phy_abilities); 1325 1326 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 1327 if (abilities_size > I40E_AQ_LARGE_BUF) 1328 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 1329 1330 if (qualified_modules) 1331 desc.params.external.param0 |= 1332 cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES); 1333 1334 if (report_init) 1335 desc.params.external.param0 |= 1336 cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES); 1337 1338 status = i40e_asq_send_command(hw, &desc, abilities, 1339 abilities_size, cmd_details); 1340 1341 switch (hw->aq.asq_last_status) { 1342 case I40E_AQ_RC_EIO: 1343 status = -EIO; 1344 break; 1345 case I40E_AQ_RC_EAGAIN: 1346 usleep_range(1000, 2000); 1347 total_delay++; 1348 status = -EIO; 1349 break; 1350 /* also covers I40E_AQ_RC_OK */ 1351 default: 1352 break; 1353 } 1354 1355 } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) && 1356 (total_delay < max_delay)); 1357 1358 if (status) 1359 return status; 1360 1361 if (report_init) { 1362 if (hw->mac.type == I40E_MAC_XL710 && 1363 i40e_is_aq_api_ver_ge(hw, I40E_FW_API_VERSION_MAJOR, 1364 I40E_MINOR_VER_GET_LINK_INFO_XL710)) { 1365 status = i40e_aq_get_link_info(hw, true, NULL, NULL); 1366 } else { 1367 hw->phy.phy_types = le32_to_cpu(abilities->phy_type); 1368 hw->phy.phy_types |= 1369 ((u64)abilities->phy_type_ext << 32); 1370 } 1371 } 1372 1373 return status; 1374 } 1375 1376 /** 1377 * i40e_aq_set_phy_config 1378 * @hw: pointer to the hw struct 1379 * @config: structure with PHY configuration to be set 1380 * @cmd_details: pointer to command details structure or NULL 1381 * 1382 * Set the various PHY configuration parameters 1383 * supported on the Port.One or more of the Set PHY config parameters may be 1384 * ignored in an MFP mode as the PF may not have the privilege to set some 1385 * of the PHY Config parameters. This status will be indicated by the 1386 * command response. 1387 **/ 1388 int i40e_aq_set_phy_config(struct i40e_hw *hw, 1389 struct i40e_aq_set_phy_config *config, 1390 struct i40e_asq_cmd_details *cmd_details) 1391 { 1392 struct i40e_aq_desc desc; 1393 struct i40e_aq_set_phy_config *cmd = 1394 (struct i40e_aq_set_phy_config *)&desc.params.raw; 1395 int status; 1396 1397 if (!config) 1398 return -EINVAL; 1399 1400 i40e_fill_default_direct_cmd_desc(&desc, 1401 i40e_aqc_opc_set_phy_config); 1402 1403 *cmd = *config; 1404 1405 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1406 1407 return status; 1408 } 1409 1410 static noinline_for_stack int 1411 i40e_set_fc_status(struct i40e_hw *hw, 1412 struct i40e_aq_get_phy_abilities_resp *abilities, 1413 bool atomic_restart) 1414 { 1415 struct i40e_aq_set_phy_config config; 1416 enum i40e_fc_mode fc_mode = hw->fc.requested_mode; 1417 u8 pause_mask = 0x0; 1418 1419 switch (fc_mode) { 1420 case I40E_FC_FULL: 1421 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; 1422 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; 1423 break; 1424 case I40E_FC_RX_PAUSE: 1425 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; 1426 break; 1427 case I40E_FC_TX_PAUSE: 1428 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; 1429 break; 1430 default: 1431 break; 1432 } 1433 1434 memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); 1435 /* clear the old pause settings */ 1436 config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & 1437 ~(I40E_AQ_PHY_FLAG_PAUSE_RX); 1438 /* set the new abilities */ 1439 config.abilities |= pause_mask; 1440 /* If the abilities have changed, then set the new config */ 1441 if (config.abilities == abilities->abilities) 1442 return 0; 1443 1444 /* Auto restart link so settings take effect */ 1445 if (atomic_restart) 1446 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 1447 /* Copy over all the old settings */ 1448 config.phy_type = abilities->phy_type; 1449 config.phy_type_ext = abilities->phy_type_ext; 1450 config.link_speed = abilities->link_speed; 1451 config.eee_capability = abilities->eee_capability; 1452 config.eeer = abilities->eeer_val; 1453 config.low_power_ctrl = abilities->d3_lpan; 1454 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & 1455 I40E_AQ_PHY_FEC_CONFIG_MASK; 1456 1457 return i40e_aq_set_phy_config(hw, &config, NULL); 1458 } 1459 1460 /** 1461 * i40e_set_fc 1462 * @hw: pointer to the hw struct 1463 * @aq_failures: buffer to return AdminQ failure information 1464 * @atomic_restart: whether to enable atomic link restart 1465 * 1466 * Set the requested flow control mode using set_phy_config. 1467 **/ 1468 int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, 1469 bool atomic_restart) 1470 { 1471 struct i40e_aq_get_phy_abilities_resp abilities; 1472 int status; 1473 1474 *aq_failures = 0x0; 1475 1476 /* Get the current phy config */ 1477 status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, 1478 NULL); 1479 if (status) { 1480 *aq_failures |= I40E_SET_FC_AQ_FAIL_GET; 1481 return status; 1482 } 1483 1484 status = i40e_set_fc_status(hw, &abilities, atomic_restart); 1485 if (status) 1486 *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; 1487 1488 /* Update the link info */ 1489 status = i40e_update_link_info(hw); 1490 if (status) { 1491 /* Wait a little bit (on 40G cards it sometimes takes a really 1492 * long time for link to come back from the atomic reset) 1493 * and try once more 1494 */ 1495 msleep(1000); 1496 status = i40e_update_link_info(hw); 1497 } 1498 if (status) 1499 *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE; 1500 1501 return status; 1502 } 1503 1504 /** 1505 * i40e_aq_clear_pxe_mode 1506 * @hw: pointer to the hw struct 1507 * @cmd_details: pointer to command details structure or NULL 1508 * 1509 * Tell the firmware that the driver is taking over from PXE 1510 **/ 1511 int i40e_aq_clear_pxe_mode(struct i40e_hw *hw, 1512 struct i40e_asq_cmd_details *cmd_details) 1513 { 1514 struct i40e_aq_desc desc; 1515 struct i40e_aqc_clear_pxe *cmd = 1516 (struct i40e_aqc_clear_pxe *)&desc.params.raw; 1517 int status; 1518 1519 i40e_fill_default_direct_cmd_desc(&desc, 1520 i40e_aqc_opc_clear_pxe_mode); 1521 1522 cmd->rx_cnt = 0x2; 1523 1524 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1525 1526 wr32(hw, I40E_GLLAN_RCTL_0, 0x1); 1527 1528 return status; 1529 } 1530 1531 /** 1532 * i40e_aq_set_link_restart_an 1533 * @hw: pointer to the hw struct 1534 * @enable_link: if true: enable link, if false: disable link 1535 * @cmd_details: pointer to command details structure or NULL 1536 * 1537 * Sets up the link and restarts the Auto-Negotiation over the link. 1538 **/ 1539 int i40e_aq_set_link_restart_an(struct i40e_hw *hw, 1540 bool enable_link, 1541 struct i40e_asq_cmd_details *cmd_details) 1542 { 1543 struct i40e_aq_desc desc; 1544 struct i40e_aqc_set_link_restart_an *cmd = 1545 (struct i40e_aqc_set_link_restart_an *)&desc.params.raw; 1546 int status; 1547 1548 i40e_fill_default_direct_cmd_desc(&desc, 1549 i40e_aqc_opc_set_link_restart_an); 1550 1551 cmd->command = I40E_AQ_PHY_RESTART_AN; 1552 if (enable_link) 1553 cmd->command |= I40E_AQ_PHY_LINK_ENABLE; 1554 else 1555 cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE; 1556 1557 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1558 1559 return status; 1560 } 1561 1562 /** 1563 * i40e_aq_get_link_info 1564 * @hw: pointer to the hw struct 1565 * @enable_lse: enable/disable LinkStatusEvent reporting 1566 * @link: pointer to link status structure - optional 1567 * @cmd_details: pointer to command details structure or NULL 1568 * 1569 * Returns the link status of the adapter. 1570 **/ 1571 int i40e_aq_get_link_info(struct i40e_hw *hw, 1572 bool enable_lse, struct i40e_link_status *link, 1573 struct i40e_asq_cmd_details *cmd_details) 1574 { 1575 struct i40e_aq_desc desc; 1576 struct i40e_aqc_get_link_status *resp = 1577 (struct i40e_aqc_get_link_status *)&desc.params.raw; 1578 struct i40e_link_status *hw_link_info = &hw->phy.link_info; 1579 bool tx_pause, rx_pause; 1580 u16 command_flags; 1581 int status; 1582 1583 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); 1584 1585 if (enable_lse) 1586 command_flags = I40E_AQ_LSE_ENABLE; 1587 else 1588 command_flags = I40E_AQ_LSE_DISABLE; 1589 resp->command_flags = cpu_to_le16(command_flags); 1590 1591 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1592 1593 if (status) 1594 goto aq_get_link_info_exit; 1595 1596 /* save off old link status information */ 1597 hw->phy.link_info_old = *hw_link_info; 1598 1599 /* update link status */ 1600 hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type; 1601 hw->phy.media_type = i40e_get_media_type(hw); 1602 hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed; 1603 hw_link_info->link_info = resp->link_info; 1604 hw_link_info->an_info = resp->an_info; 1605 hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA | 1606 I40E_AQ_CONFIG_FEC_RS_ENA); 1607 hw_link_info->ext_info = resp->ext_info; 1608 hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK; 1609 hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size); 1610 hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK; 1611 1612 /* update fc info */ 1613 tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX); 1614 rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX); 1615 if (tx_pause & rx_pause) 1616 hw->fc.current_mode = I40E_FC_FULL; 1617 else if (tx_pause) 1618 hw->fc.current_mode = I40E_FC_TX_PAUSE; 1619 else if (rx_pause) 1620 hw->fc.current_mode = I40E_FC_RX_PAUSE; 1621 else 1622 hw->fc.current_mode = I40E_FC_NONE; 1623 1624 if (resp->config & I40E_AQ_CONFIG_CRC_ENA) 1625 hw_link_info->crc_enable = true; 1626 else 1627 hw_link_info->crc_enable = false; 1628 1629 if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED)) 1630 hw_link_info->lse_enable = true; 1631 else 1632 hw_link_info->lse_enable = false; 1633 1634 if (hw->mac.type == I40E_MAC_XL710 && i40e_is_fw_ver_lt(hw, 4, 40) && 1635 hw_link_info->phy_type == 0xE) 1636 hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; 1637 1638 if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps) && 1639 hw->mac.type != I40E_MAC_X722) { 1640 __le32 tmp; 1641 1642 memcpy(&tmp, resp->link_type, sizeof(tmp)); 1643 hw->phy.phy_types = le32_to_cpu(tmp); 1644 hw->phy.phy_types |= ((u64)resp->link_type_ext << 32); 1645 } 1646 1647 /* save link status information */ 1648 if (link) 1649 *link = *hw_link_info; 1650 1651 /* flag cleared so helper functions don't call AQ again */ 1652 hw->phy.get_link_info = false; 1653 1654 aq_get_link_info_exit: 1655 return status; 1656 } 1657 1658 /** 1659 * i40e_aq_set_phy_int_mask 1660 * @hw: pointer to the hw struct 1661 * @mask: interrupt mask to be set 1662 * @cmd_details: pointer to command details structure or NULL 1663 * 1664 * Set link interrupt mask. 1665 **/ 1666 int i40e_aq_set_phy_int_mask(struct i40e_hw *hw, 1667 u16 mask, 1668 struct i40e_asq_cmd_details *cmd_details) 1669 { 1670 struct i40e_aq_desc desc; 1671 struct i40e_aqc_set_phy_int_mask *cmd = 1672 (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw; 1673 int status; 1674 1675 i40e_fill_default_direct_cmd_desc(&desc, 1676 i40e_aqc_opc_set_phy_int_mask); 1677 1678 cmd->event_mask = cpu_to_le16(mask); 1679 1680 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1681 1682 return status; 1683 } 1684 1685 /** 1686 * i40e_aq_set_mac_loopback 1687 * @hw: pointer to the HW struct 1688 * @ena_lpbk: Enable or Disable loopback 1689 * @cmd_details: pointer to command details structure or NULL 1690 * 1691 * Enable/disable loopback on a given port 1692 */ 1693 int i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk, 1694 struct i40e_asq_cmd_details *cmd_details) 1695 { 1696 struct i40e_aq_desc desc; 1697 struct i40e_aqc_set_lb_mode *cmd = 1698 (struct i40e_aqc_set_lb_mode *)&desc.params.raw; 1699 1700 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_lb_modes); 1701 if (ena_lpbk) { 1702 if (hw->nvm.version <= I40E_LEGACY_LOOPBACK_NVM_VER) 1703 cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL_LEGACY); 1704 else 1705 cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL); 1706 } 1707 1708 return i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1709 } 1710 1711 /** 1712 * i40e_aq_set_phy_debug 1713 * @hw: pointer to the hw struct 1714 * @cmd_flags: debug command flags 1715 * @cmd_details: pointer to command details structure or NULL 1716 * 1717 * Reset the external PHY. 1718 **/ 1719 int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, 1720 struct i40e_asq_cmd_details *cmd_details) 1721 { 1722 struct i40e_aq_desc desc; 1723 struct i40e_aqc_set_phy_debug *cmd = 1724 (struct i40e_aqc_set_phy_debug *)&desc.params.raw; 1725 int status; 1726 1727 i40e_fill_default_direct_cmd_desc(&desc, 1728 i40e_aqc_opc_set_phy_debug); 1729 1730 cmd->command_flags = cmd_flags; 1731 1732 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1733 1734 return status; 1735 } 1736 1737 /** 1738 * i40e_aq_add_vsi 1739 * @hw: pointer to the hw struct 1740 * @vsi_ctx: pointer to a vsi context struct 1741 * @cmd_details: pointer to command details structure or NULL 1742 * 1743 * Add a VSI context to the hardware. 1744 **/ 1745 int i40e_aq_add_vsi(struct i40e_hw *hw, 1746 struct i40e_vsi_context *vsi_ctx, 1747 struct i40e_asq_cmd_details *cmd_details) 1748 { 1749 struct i40e_aq_desc desc; 1750 struct i40e_aqc_add_get_update_vsi *cmd = 1751 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 1752 struct i40e_aqc_add_get_update_vsi_completion *resp = 1753 (struct i40e_aqc_add_get_update_vsi_completion *) 1754 &desc.params.raw; 1755 int status; 1756 1757 i40e_fill_default_direct_cmd_desc(&desc, 1758 i40e_aqc_opc_add_vsi); 1759 1760 cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid); 1761 cmd->connection_type = vsi_ctx->connection_type; 1762 cmd->vf_id = vsi_ctx->vf_num; 1763 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); 1764 1765 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 1766 1767 status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info, 1768 sizeof(vsi_ctx->info), 1769 cmd_details, true); 1770 1771 if (status) 1772 goto aq_add_vsi_exit; 1773 1774 vsi_ctx->seid = le16_to_cpu(resp->seid); 1775 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); 1776 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 1777 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 1778 1779 aq_add_vsi_exit: 1780 return status; 1781 } 1782 1783 /** 1784 * i40e_aq_set_default_vsi 1785 * @hw: pointer to the hw struct 1786 * @seid: vsi number 1787 * @cmd_details: pointer to command details structure or NULL 1788 **/ 1789 int i40e_aq_set_default_vsi(struct i40e_hw *hw, 1790 u16 seid, 1791 struct i40e_asq_cmd_details *cmd_details) 1792 { 1793 struct i40e_aq_desc desc; 1794 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1795 (struct i40e_aqc_set_vsi_promiscuous_modes *) 1796 &desc.params.raw; 1797 int status; 1798 1799 i40e_fill_default_direct_cmd_desc(&desc, 1800 i40e_aqc_opc_set_vsi_promiscuous_modes); 1801 1802 cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 1803 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 1804 cmd->seid = cpu_to_le16(seid); 1805 1806 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1807 1808 return status; 1809 } 1810 1811 /** 1812 * i40e_aq_clear_default_vsi 1813 * @hw: pointer to the hw struct 1814 * @seid: vsi number 1815 * @cmd_details: pointer to command details structure or NULL 1816 **/ 1817 int i40e_aq_clear_default_vsi(struct i40e_hw *hw, 1818 u16 seid, 1819 struct i40e_asq_cmd_details *cmd_details) 1820 { 1821 struct i40e_aq_desc desc; 1822 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1823 (struct i40e_aqc_set_vsi_promiscuous_modes *) 1824 &desc.params.raw; 1825 int status; 1826 1827 i40e_fill_default_direct_cmd_desc(&desc, 1828 i40e_aqc_opc_set_vsi_promiscuous_modes); 1829 1830 cmd->promiscuous_flags = cpu_to_le16(0); 1831 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 1832 cmd->seid = cpu_to_le16(seid); 1833 1834 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1835 1836 return status; 1837 } 1838 1839 /** 1840 * i40e_aq_set_vsi_unicast_promiscuous 1841 * @hw: pointer to the hw struct 1842 * @seid: vsi number 1843 * @set: set unicast promiscuous enable/disable 1844 * @cmd_details: pointer to command details structure or NULL 1845 * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc 1846 **/ 1847 int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, 1848 u16 seid, bool set, 1849 struct i40e_asq_cmd_details *cmd_details, 1850 bool rx_only_promisc) 1851 { 1852 struct i40e_aq_desc desc; 1853 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1854 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 1855 u16 flags = 0; 1856 int status; 1857 1858 i40e_fill_default_direct_cmd_desc(&desc, 1859 i40e_aqc_opc_set_vsi_promiscuous_modes); 1860 1861 if (set) { 1862 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; 1863 if (rx_only_promisc && i40e_is_aq_api_ver_ge(hw, 1, 5)) 1864 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; 1865 } 1866 1867 cmd->promiscuous_flags = cpu_to_le16(flags); 1868 1869 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); 1870 if (i40e_is_aq_api_ver_ge(hw, 1, 5)) 1871 cmd->valid_flags |= 1872 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); 1873 1874 cmd->seid = cpu_to_le16(seid); 1875 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1876 1877 return status; 1878 } 1879 1880 /** 1881 * i40e_aq_set_vsi_multicast_promiscuous 1882 * @hw: pointer to the hw struct 1883 * @seid: vsi number 1884 * @set: set multicast promiscuous enable/disable 1885 * @cmd_details: pointer to command details structure or NULL 1886 **/ 1887 int i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, 1888 u16 seid, bool set, 1889 struct i40e_asq_cmd_details *cmd_details) 1890 { 1891 struct i40e_aq_desc desc; 1892 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1893 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 1894 u16 flags = 0; 1895 int status; 1896 1897 i40e_fill_default_direct_cmd_desc(&desc, 1898 i40e_aqc_opc_set_vsi_promiscuous_modes); 1899 1900 if (set) 1901 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; 1902 1903 cmd->promiscuous_flags = cpu_to_le16(flags); 1904 1905 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); 1906 1907 cmd->seid = cpu_to_le16(seid); 1908 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1909 1910 return status; 1911 } 1912 1913 /** 1914 * i40e_aq_set_vsi_mc_promisc_on_vlan 1915 * @hw: pointer to the hw struct 1916 * @seid: vsi number 1917 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 1918 * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag 1919 * @cmd_details: pointer to command details structure or NULL 1920 **/ 1921 int i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, 1922 u16 seid, bool enable, 1923 u16 vid, 1924 struct i40e_asq_cmd_details *cmd_details) 1925 { 1926 struct i40e_aq_desc desc; 1927 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1928 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 1929 u16 flags = 0; 1930 int status; 1931 1932 i40e_fill_default_direct_cmd_desc(&desc, 1933 i40e_aqc_opc_set_vsi_promiscuous_modes); 1934 1935 if (enable) 1936 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; 1937 1938 cmd->promiscuous_flags = cpu_to_le16(flags); 1939 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); 1940 cmd->seid = cpu_to_le16(seid); 1941 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 1942 1943 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0, 1944 cmd_details, true); 1945 1946 return status; 1947 } 1948 1949 /** 1950 * i40e_aq_set_vsi_uc_promisc_on_vlan 1951 * @hw: pointer to the hw struct 1952 * @seid: vsi number 1953 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 1954 * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag 1955 * @cmd_details: pointer to command details structure or NULL 1956 **/ 1957 int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, 1958 u16 seid, bool enable, 1959 u16 vid, 1960 struct i40e_asq_cmd_details *cmd_details) 1961 { 1962 struct i40e_aq_desc desc; 1963 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1964 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 1965 u16 flags = 0; 1966 int status; 1967 1968 i40e_fill_default_direct_cmd_desc(&desc, 1969 i40e_aqc_opc_set_vsi_promiscuous_modes); 1970 1971 if (enable) { 1972 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; 1973 if (i40e_is_aq_api_ver_ge(hw, 1, 5)) 1974 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; 1975 } 1976 1977 cmd->promiscuous_flags = cpu_to_le16(flags); 1978 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); 1979 if (i40e_is_aq_api_ver_ge(hw, 1, 5)) 1980 cmd->valid_flags |= 1981 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); 1982 cmd->seid = cpu_to_le16(seid); 1983 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 1984 1985 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0, 1986 cmd_details, true); 1987 1988 return status; 1989 } 1990 1991 /** 1992 * i40e_aq_set_vsi_bc_promisc_on_vlan 1993 * @hw: pointer to the hw struct 1994 * @seid: vsi number 1995 * @enable: set broadcast promiscuous enable/disable for a given VLAN 1996 * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag 1997 * @cmd_details: pointer to command details structure or NULL 1998 **/ 1999 int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, 2000 u16 seid, bool enable, u16 vid, 2001 struct i40e_asq_cmd_details *cmd_details) 2002 { 2003 struct i40e_aq_desc desc; 2004 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2005 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2006 u16 flags = 0; 2007 int status; 2008 2009 i40e_fill_default_direct_cmd_desc(&desc, 2010 i40e_aqc_opc_set_vsi_promiscuous_modes); 2011 2012 if (enable) 2013 flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST; 2014 2015 cmd->promiscuous_flags = cpu_to_le16(flags); 2016 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2017 cmd->seid = cpu_to_le16(seid); 2018 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2019 2020 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2021 2022 return status; 2023 } 2024 2025 /** 2026 * i40e_aq_set_vsi_broadcast 2027 * @hw: pointer to the hw struct 2028 * @seid: vsi number 2029 * @set_filter: true to set filter, false to clear filter 2030 * @cmd_details: pointer to command details structure or NULL 2031 * 2032 * Set or clear the broadcast promiscuous flag (filter) for a given VSI. 2033 **/ 2034 int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, 2035 u16 seid, bool set_filter, 2036 struct i40e_asq_cmd_details *cmd_details) 2037 { 2038 struct i40e_aq_desc desc; 2039 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2040 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2041 int status; 2042 2043 i40e_fill_default_direct_cmd_desc(&desc, 2044 i40e_aqc_opc_set_vsi_promiscuous_modes); 2045 2046 if (set_filter) 2047 cmd->promiscuous_flags 2048 |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2049 else 2050 cmd->promiscuous_flags 2051 &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2052 2053 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2054 cmd->seid = cpu_to_le16(seid); 2055 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2056 2057 return status; 2058 } 2059 2060 /** 2061 * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting 2062 * @hw: pointer to the hw struct 2063 * @seid: vsi number 2064 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2065 * @cmd_details: pointer to command details structure or NULL 2066 **/ 2067 int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, 2068 u16 seid, bool enable, 2069 struct i40e_asq_cmd_details *cmd_details) 2070 { 2071 struct i40e_aq_desc desc; 2072 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2073 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2074 u16 flags = 0; 2075 int status; 2076 2077 i40e_fill_default_direct_cmd_desc(&desc, 2078 i40e_aqc_opc_set_vsi_promiscuous_modes); 2079 if (enable) 2080 flags |= I40E_AQC_SET_VSI_PROMISC_VLAN; 2081 2082 cmd->promiscuous_flags = cpu_to_le16(flags); 2083 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN); 2084 cmd->seid = cpu_to_le16(seid); 2085 2086 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2087 2088 return status; 2089 } 2090 2091 /** 2092 * i40e_aq_get_vsi_params - get VSI configuration info 2093 * @hw: pointer to the hw struct 2094 * @vsi_ctx: pointer to a vsi context struct 2095 * @cmd_details: pointer to command details structure or NULL 2096 **/ 2097 int i40e_aq_get_vsi_params(struct i40e_hw *hw, 2098 struct i40e_vsi_context *vsi_ctx, 2099 struct i40e_asq_cmd_details *cmd_details) 2100 { 2101 struct i40e_aq_desc desc; 2102 struct i40e_aqc_add_get_update_vsi *cmd = 2103 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 2104 struct i40e_aqc_add_get_update_vsi_completion *resp = 2105 (struct i40e_aqc_add_get_update_vsi_completion *) 2106 &desc.params.raw; 2107 int status; 2108 2109 i40e_fill_default_direct_cmd_desc(&desc, 2110 i40e_aqc_opc_get_vsi_parameters); 2111 2112 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 2113 2114 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 2115 2116 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 2117 sizeof(vsi_ctx->info), NULL); 2118 2119 if (status) 2120 goto aq_get_vsi_params_exit; 2121 2122 vsi_ctx->seid = le16_to_cpu(resp->seid); 2123 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); 2124 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 2125 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 2126 2127 aq_get_vsi_params_exit: 2128 return status; 2129 } 2130 2131 /** 2132 * i40e_aq_update_vsi_params 2133 * @hw: pointer to the hw struct 2134 * @vsi_ctx: pointer to a vsi context struct 2135 * @cmd_details: pointer to command details structure or NULL 2136 * 2137 * Update a VSI context. 2138 **/ 2139 int i40e_aq_update_vsi_params(struct i40e_hw *hw, 2140 struct i40e_vsi_context *vsi_ctx, 2141 struct i40e_asq_cmd_details *cmd_details) 2142 { 2143 struct i40e_aq_desc desc; 2144 struct i40e_aqc_add_get_update_vsi *cmd = 2145 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 2146 struct i40e_aqc_add_get_update_vsi_completion *resp = 2147 (struct i40e_aqc_add_get_update_vsi_completion *) 2148 &desc.params.raw; 2149 int status; 2150 2151 i40e_fill_default_direct_cmd_desc(&desc, 2152 i40e_aqc_opc_update_vsi_parameters); 2153 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 2154 2155 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2156 2157 status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info, 2158 sizeof(vsi_ctx->info), 2159 cmd_details, true); 2160 2161 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 2162 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 2163 2164 return status; 2165 } 2166 2167 /** 2168 * i40e_aq_get_switch_config 2169 * @hw: pointer to the hardware structure 2170 * @buf: pointer to the result buffer 2171 * @buf_size: length of input buffer 2172 * @start_seid: seid to start for the report, 0 == beginning 2173 * @cmd_details: pointer to command details structure or NULL 2174 * 2175 * Fill the buf with switch configuration returned from AdminQ command 2176 **/ 2177 int i40e_aq_get_switch_config(struct i40e_hw *hw, 2178 struct i40e_aqc_get_switch_config_resp *buf, 2179 u16 buf_size, u16 *start_seid, 2180 struct i40e_asq_cmd_details *cmd_details) 2181 { 2182 struct i40e_aq_desc desc; 2183 struct i40e_aqc_switch_seid *scfg = 2184 (struct i40e_aqc_switch_seid *)&desc.params.raw; 2185 int status; 2186 2187 i40e_fill_default_direct_cmd_desc(&desc, 2188 i40e_aqc_opc_get_switch_config); 2189 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 2190 if (buf_size > I40E_AQ_LARGE_BUF) 2191 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2192 scfg->seid = cpu_to_le16(*start_seid); 2193 2194 status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details); 2195 *start_seid = le16_to_cpu(scfg->seid); 2196 2197 return status; 2198 } 2199 2200 /** 2201 * i40e_aq_set_switch_config 2202 * @hw: pointer to the hardware structure 2203 * @flags: bit flag values to set 2204 * @mode: cloud filter mode 2205 * @valid_flags: which bit flags to set 2206 * @mode: cloud filter mode 2207 * @cmd_details: pointer to command details structure or NULL 2208 * 2209 * Set switch configuration bits 2210 **/ 2211 int i40e_aq_set_switch_config(struct i40e_hw *hw, 2212 u16 flags, 2213 u16 valid_flags, u8 mode, 2214 struct i40e_asq_cmd_details *cmd_details) 2215 { 2216 struct i40e_aq_desc desc; 2217 struct i40e_aqc_set_switch_config *scfg = 2218 (struct i40e_aqc_set_switch_config *)&desc.params.raw; 2219 int status; 2220 2221 i40e_fill_default_direct_cmd_desc(&desc, 2222 i40e_aqc_opc_set_switch_config); 2223 scfg->flags = cpu_to_le16(flags); 2224 scfg->valid_flags = cpu_to_le16(valid_flags); 2225 scfg->mode = mode; 2226 if (test_bit(I40E_HW_CAP_802_1AD, hw->caps)) { 2227 scfg->switch_tag = cpu_to_le16(hw->switch_tag); 2228 scfg->first_tag = cpu_to_le16(hw->first_tag); 2229 scfg->second_tag = cpu_to_le16(hw->second_tag); 2230 } 2231 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2232 2233 return status; 2234 } 2235 2236 /** 2237 * i40e_aq_get_firmware_version 2238 * @hw: pointer to the hw struct 2239 * @fw_major_version: firmware major version 2240 * @fw_minor_version: firmware minor version 2241 * @fw_build: firmware build number 2242 * @api_major_version: major queue version 2243 * @api_minor_version: minor queue version 2244 * @cmd_details: pointer to command details structure or NULL 2245 * 2246 * Get the firmware version from the admin queue commands 2247 **/ 2248 int i40e_aq_get_firmware_version(struct i40e_hw *hw, 2249 u16 *fw_major_version, u16 *fw_minor_version, 2250 u32 *fw_build, 2251 u16 *api_major_version, u16 *api_minor_version, 2252 struct i40e_asq_cmd_details *cmd_details) 2253 { 2254 struct i40e_aq_desc desc; 2255 struct i40e_aqc_get_version *resp = 2256 (struct i40e_aqc_get_version *)&desc.params.raw; 2257 int status; 2258 2259 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version); 2260 2261 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2262 2263 if (!status) { 2264 if (fw_major_version) 2265 *fw_major_version = le16_to_cpu(resp->fw_major); 2266 if (fw_minor_version) 2267 *fw_minor_version = le16_to_cpu(resp->fw_minor); 2268 if (fw_build) 2269 *fw_build = le32_to_cpu(resp->fw_build); 2270 if (api_major_version) 2271 *api_major_version = le16_to_cpu(resp->api_major); 2272 if (api_minor_version) 2273 *api_minor_version = le16_to_cpu(resp->api_minor); 2274 } 2275 2276 return status; 2277 } 2278 2279 /** 2280 * i40e_aq_send_driver_version 2281 * @hw: pointer to the hw struct 2282 * @dv: driver's major, minor version 2283 * @cmd_details: pointer to command details structure or NULL 2284 * 2285 * Send the driver version to the firmware 2286 **/ 2287 int i40e_aq_send_driver_version(struct i40e_hw *hw, 2288 struct i40e_driver_version *dv, 2289 struct i40e_asq_cmd_details *cmd_details) 2290 { 2291 struct i40e_aq_desc desc; 2292 struct i40e_aqc_driver_version *cmd = 2293 (struct i40e_aqc_driver_version *)&desc.params.raw; 2294 int status; 2295 u16 len; 2296 2297 if (dv == NULL) 2298 return -EINVAL; 2299 2300 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version); 2301 2302 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); 2303 cmd->driver_major_ver = dv->major_version; 2304 cmd->driver_minor_ver = dv->minor_version; 2305 cmd->driver_build_ver = dv->build_version; 2306 cmd->driver_subbuild_ver = dv->subbuild_version; 2307 2308 len = 0; 2309 while (len < sizeof(dv->driver_string) && 2310 (dv->driver_string[len] < 0x80) && 2311 dv->driver_string[len]) 2312 len++; 2313 status = i40e_asq_send_command(hw, &desc, dv->driver_string, 2314 len, cmd_details); 2315 2316 return status; 2317 } 2318 2319 /** 2320 * i40e_get_link_status - get status of the HW network link 2321 * @hw: pointer to the hw struct 2322 * @link_up: pointer to bool (true/false = linkup/linkdown) 2323 * 2324 * Variable link_up true if link is up, false if link is down. 2325 * The variable link_up is invalid if returned value of status != 0 2326 * 2327 * Side effect: LinkStatusEvent reporting becomes enabled 2328 **/ 2329 int i40e_get_link_status(struct i40e_hw *hw, bool *link_up) 2330 { 2331 int status = 0; 2332 2333 if (hw->phy.get_link_info) { 2334 status = i40e_update_link_info(hw); 2335 2336 if (status) 2337 i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n", 2338 status); 2339 } 2340 2341 *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; 2342 2343 return status; 2344 } 2345 2346 /** 2347 * i40e_update_link_info - update status of the HW network link 2348 * @hw: pointer to the hw struct 2349 **/ 2350 noinline_for_stack int i40e_update_link_info(struct i40e_hw *hw) 2351 { 2352 struct i40e_aq_get_phy_abilities_resp abilities; 2353 int status = 0; 2354 2355 status = i40e_aq_get_link_info(hw, true, NULL, NULL); 2356 if (status) 2357 return status; 2358 2359 /* extra checking needed to ensure link info to user is timely */ 2360 if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) && 2361 ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) || 2362 !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) { 2363 status = i40e_aq_get_phy_capabilities(hw, false, false, 2364 &abilities, NULL); 2365 if (status) 2366 return status; 2367 2368 if (abilities.fec_cfg_curr_mod_ext_info & 2369 I40E_AQ_ENABLE_FEC_AUTO) 2370 hw->phy.link_info.req_fec_info = 2371 (I40E_AQ_REQUEST_FEC_KR | 2372 I40E_AQ_REQUEST_FEC_RS); 2373 else 2374 hw->phy.link_info.req_fec_info = 2375 abilities.fec_cfg_curr_mod_ext_info & 2376 (I40E_AQ_REQUEST_FEC_KR | 2377 I40E_AQ_REQUEST_FEC_RS); 2378 2379 memcpy(hw->phy.link_info.module_type, &abilities.module_type, 2380 sizeof(hw->phy.link_info.module_type)); 2381 } 2382 2383 return status; 2384 } 2385 2386 /** 2387 * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC 2388 * @hw: pointer to the hw struct 2389 * @uplink_seid: the MAC or other gizmo SEID 2390 * @downlink_seid: the VSI SEID 2391 * @enabled_tc: bitmap of TCs to be enabled 2392 * @default_port: true for default port VSI, false for control port 2393 * @veb_seid: pointer to where to put the resulting VEB SEID 2394 * @enable_stats: true to turn on VEB stats 2395 * @cmd_details: pointer to command details structure or NULL 2396 * 2397 * This asks the FW to add a VEB between the uplink and downlink 2398 * elements. If the uplink SEID is 0, this will be a floating VEB. 2399 **/ 2400 int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, 2401 u16 downlink_seid, u8 enabled_tc, 2402 bool default_port, u16 *veb_seid, 2403 bool enable_stats, 2404 struct i40e_asq_cmd_details *cmd_details) 2405 { 2406 struct i40e_aq_desc desc; 2407 struct i40e_aqc_add_veb *cmd = 2408 (struct i40e_aqc_add_veb *)&desc.params.raw; 2409 struct i40e_aqc_add_veb_completion *resp = 2410 (struct i40e_aqc_add_veb_completion *)&desc.params.raw; 2411 u16 veb_flags = 0; 2412 int status; 2413 2414 /* SEIDs need to either both be set or both be 0 for floating VEB */ 2415 if (!!uplink_seid != !!downlink_seid) 2416 return -EINVAL; 2417 2418 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb); 2419 2420 cmd->uplink_seid = cpu_to_le16(uplink_seid); 2421 cmd->downlink_seid = cpu_to_le16(downlink_seid); 2422 cmd->enable_tcs = enabled_tc; 2423 if (!uplink_seid) 2424 veb_flags |= I40E_AQC_ADD_VEB_FLOATING; 2425 if (default_port) 2426 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT; 2427 else 2428 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA; 2429 2430 /* reverse logic here: set the bitflag to disable the stats */ 2431 if (!enable_stats) 2432 veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS; 2433 2434 cmd->veb_flags = cpu_to_le16(veb_flags); 2435 2436 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2437 2438 if (!status && veb_seid) 2439 *veb_seid = le16_to_cpu(resp->veb_seid); 2440 2441 return status; 2442 } 2443 2444 /** 2445 * i40e_aq_get_veb_parameters - Retrieve VEB parameters 2446 * @hw: pointer to the hw struct 2447 * @veb_seid: the SEID of the VEB to query 2448 * @switch_id: the uplink switch id 2449 * @floating: set to true if the VEB is floating 2450 * @statistic_index: index of the stats counter block for this VEB 2451 * @vebs_used: number of VEB's used by function 2452 * @vebs_free: total VEB's not reserved by any function 2453 * @cmd_details: pointer to command details structure or NULL 2454 * 2455 * This retrieves the parameters for a particular VEB, specified by 2456 * uplink_seid, and returns them to the caller. 2457 **/ 2458 int i40e_aq_get_veb_parameters(struct i40e_hw *hw, 2459 u16 veb_seid, u16 *switch_id, 2460 bool *floating, u16 *statistic_index, 2461 u16 *vebs_used, u16 *vebs_free, 2462 struct i40e_asq_cmd_details *cmd_details) 2463 { 2464 struct i40e_aq_desc desc; 2465 struct i40e_aqc_get_veb_parameters_completion *cmd_resp = 2466 (struct i40e_aqc_get_veb_parameters_completion *) 2467 &desc.params.raw; 2468 int status; 2469 2470 if (veb_seid == 0) 2471 return -EINVAL; 2472 2473 i40e_fill_default_direct_cmd_desc(&desc, 2474 i40e_aqc_opc_get_veb_parameters); 2475 cmd_resp->seid = cpu_to_le16(veb_seid); 2476 2477 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2478 if (status) 2479 goto get_veb_exit; 2480 2481 if (switch_id) 2482 *switch_id = le16_to_cpu(cmd_resp->switch_id); 2483 if (statistic_index) 2484 *statistic_index = le16_to_cpu(cmd_resp->statistic_index); 2485 if (vebs_used) 2486 *vebs_used = le16_to_cpu(cmd_resp->vebs_used); 2487 if (vebs_free) 2488 *vebs_free = le16_to_cpu(cmd_resp->vebs_free); 2489 if (floating) { 2490 u16 flags = le16_to_cpu(cmd_resp->veb_flags); 2491 2492 if (flags & I40E_AQC_ADD_VEB_FLOATING) 2493 *floating = true; 2494 else 2495 *floating = false; 2496 } 2497 2498 get_veb_exit: 2499 return status; 2500 } 2501 2502 /** 2503 * i40e_prepare_add_macvlan 2504 * @mv_list: list of macvlans to be added 2505 * @desc: pointer to AQ descriptor structure 2506 * @count: length of the list 2507 * @seid: VSI for the mac address 2508 * 2509 * Internal helper function that prepares the add macvlan request 2510 * and returns the buffer size. 2511 **/ 2512 static u16 2513 i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list, 2514 struct i40e_aq_desc *desc, u16 count, u16 seid) 2515 { 2516 struct i40e_aqc_macvlan *cmd = 2517 (struct i40e_aqc_macvlan *)&desc->params.raw; 2518 u16 buf_size; 2519 int i; 2520 2521 buf_size = count * sizeof(*mv_list); 2522 2523 /* prep the rest of the request */ 2524 i40e_fill_default_direct_cmd_desc(desc, i40e_aqc_opc_add_macvlan); 2525 cmd->num_addresses = cpu_to_le16(count); 2526 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2527 cmd->seid[1] = 0; 2528 cmd->seid[2] = 0; 2529 2530 for (i = 0; i < count; i++) 2531 if (is_multicast_ether_addr(mv_list[i].mac_addr)) 2532 mv_list[i].flags |= 2533 cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC); 2534 2535 desc->flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2536 if (buf_size > I40E_AQ_LARGE_BUF) 2537 desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2538 2539 return buf_size; 2540 } 2541 2542 /** 2543 * i40e_aq_add_macvlan 2544 * @hw: pointer to the hw struct 2545 * @seid: VSI for the mac address 2546 * @mv_list: list of macvlans to be added 2547 * @count: length of the list 2548 * @cmd_details: pointer to command details structure or NULL 2549 * 2550 * Add MAC/VLAN addresses to the HW filtering 2551 **/ 2552 int 2553 i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, 2554 struct i40e_aqc_add_macvlan_element_data *mv_list, 2555 u16 count, struct i40e_asq_cmd_details *cmd_details) 2556 { 2557 struct i40e_aq_desc desc; 2558 u16 buf_size; 2559 2560 if (count == 0 || !mv_list || !hw) 2561 return -EINVAL; 2562 2563 buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid); 2564 2565 return i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size, 2566 cmd_details, true); 2567 } 2568 2569 /** 2570 * i40e_aq_add_macvlan_v2 2571 * @hw: pointer to the hw struct 2572 * @seid: VSI for the mac address 2573 * @mv_list: list of macvlans to be added 2574 * @count: length of the list 2575 * @cmd_details: pointer to command details structure or NULL 2576 * @aq_status: pointer to Admin Queue status return value 2577 * 2578 * Add MAC/VLAN addresses to the HW filtering. 2579 * The _v2 version returns the last Admin Queue status in aq_status 2580 * to avoid race conditions in access to hw->aq.asq_last_status. 2581 * It also calls _v2 versions of asq_send_command functions to 2582 * get the aq_status on the stack. 2583 **/ 2584 int 2585 i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid, 2586 struct i40e_aqc_add_macvlan_element_data *mv_list, 2587 u16 count, struct i40e_asq_cmd_details *cmd_details, 2588 enum i40e_admin_queue_err *aq_status) 2589 { 2590 struct i40e_aq_desc desc; 2591 u16 buf_size; 2592 2593 if (count == 0 || !mv_list || !hw) 2594 return -EINVAL; 2595 2596 buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid); 2597 2598 return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size, 2599 cmd_details, true, aq_status); 2600 } 2601 2602 /** 2603 * i40e_aq_remove_macvlan 2604 * @hw: pointer to the hw struct 2605 * @seid: VSI for the mac address 2606 * @mv_list: list of macvlans to be removed 2607 * @count: length of the list 2608 * @cmd_details: pointer to command details structure or NULL 2609 * 2610 * Remove MAC/VLAN addresses from the HW filtering 2611 **/ 2612 int 2613 i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, 2614 struct i40e_aqc_remove_macvlan_element_data *mv_list, 2615 u16 count, struct i40e_asq_cmd_details *cmd_details) 2616 { 2617 struct i40e_aq_desc desc; 2618 struct i40e_aqc_macvlan *cmd = 2619 (struct i40e_aqc_macvlan *)&desc.params.raw; 2620 u16 buf_size; 2621 int status; 2622 2623 if (count == 0 || !mv_list || !hw) 2624 return -EINVAL; 2625 2626 buf_size = count * sizeof(*mv_list); 2627 2628 /* prep the rest of the request */ 2629 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); 2630 cmd->num_addresses = cpu_to_le16(count); 2631 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2632 cmd->seid[1] = 0; 2633 cmd->seid[2] = 0; 2634 2635 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2636 if (buf_size > I40E_AQ_LARGE_BUF) 2637 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2638 2639 status = i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size, 2640 cmd_details, true); 2641 2642 return status; 2643 } 2644 2645 /** 2646 * i40e_aq_remove_macvlan_v2 2647 * @hw: pointer to the hw struct 2648 * @seid: VSI for the mac address 2649 * @mv_list: list of macvlans to be removed 2650 * @count: length of the list 2651 * @cmd_details: pointer to command details structure or NULL 2652 * @aq_status: pointer to Admin Queue status return value 2653 * 2654 * Remove MAC/VLAN addresses from the HW filtering. 2655 * The _v2 version returns the last Admin Queue status in aq_status 2656 * to avoid race conditions in access to hw->aq.asq_last_status. 2657 * It also calls _v2 versions of asq_send_command functions to 2658 * get the aq_status on the stack. 2659 **/ 2660 int 2661 i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid, 2662 struct i40e_aqc_remove_macvlan_element_data *mv_list, 2663 u16 count, struct i40e_asq_cmd_details *cmd_details, 2664 enum i40e_admin_queue_err *aq_status) 2665 { 2666 struct i40e_aqc_macvlan *cmd; 2667 struct i40e_aq_desc desc; 2668 u16 buf_size; 2669 2670 if (count == 0 || !mv_list || !hw) 2671 return -EINVAL; 2672 2673 buf_size = count * sizeof(*mv_list); 2674 2675 /* prep the rest of the request */ 2676 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); 2677 cmd = (struct i40e_aqc_macvlan *)&desc.params.raw; 2678 cmd->num_addresses = cpu_to_le16(count); 2679 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2680 cmd->seid[1] = 0; 2681 cmd->seid[2] = 0; 2682 2683 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2684 if (buf_size > I40E_AQ_LARGE_BUF) 2685 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2686 2687 return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size, 2688 cmd_details, true, aq_status); 2689 } 2690 2691 /** 2692 * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule 2693 * @hw: pointer to the hw struct 2694 * @opcode: AQ opcode for add or delete mirror rule 2695 * @sw_seid: Switch SEID (to which rule refers) 2696 * @rule_type: Rule Type (ingress/egress/VLAN) 2697 * @id: Destination VSI SEID or Rule ID 2698 * @count: length of the list 2699 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs 2700 * @cmd_details: pointer to command details structure or NULL 2701 * @rule_id: Rule ID returned from FW 2702 * @rules_used: Number of rules used in internal switch 2703 * @rules_free: Number of rules free in internal switch 2704 * 2705 * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for 2706 * VEBs/VEPA elements only 2707 **/ 2708 static int i40e_mirrorrule_op(struct i40e_hw *hw, 2709 u16 opcode, u16 sw_seid, u16 rule_type, u16 id, 2710 u16 count, __le16 *mr_list, 2711 struct i40e_asq_cmd_details *cmd_details, 2712 u16 *rule_id, u16 *rules_used, u16 *rules_free) 2713 { 2714 struct i40e_aq_desc desc; 2715 struct i40e_aqc_add_delete_mirror_rule *cmd = 2716 (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw; 2717 struct i40e_aqc_add_delete_mirror_rule_completion *resp = 2718 (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw; 2719 u16 buf_size; 2720 int status; 2721 2722 buf_size = count * sizeof(*mr_list); 2723 2724 /* prep the rest of the request */ 2725 i40e_fill_default_direct_cmd_desc(&desc, opcode); 2726 cmd->seid = cpu_to_le16(sw_seid); 2727 cmd->rule_type = cpu_to_le16(rule_type & 2728 I40E_AQC_MIRROR_RULE_TYPE_MASK); 2729 cmd->num_entries = cpu_to_le16(count); 2730 /* Dest VSI for add, rule_id for delete */ 2731 cmd->destination = cpu_to_le16(id); 2732 if (mr_list) { 2733 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 2734 I40E_AQ_FLAG_RD)); 2735 if (buf_size > I40E_AQ_LARGE_BUF) 2736 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2737 } 2738 2739 status = i40e_asq_send_command(hw, &desc, mr_list, buf_size, 2740 cmd_details); 2741 if (!status || 2742 hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) { 2743 if (rule_id) 2744 *rule_id = le16_to_cpu(resp->rule_id); 2745 if (rules_used) 2746 *rules_used = le16_to_cpu(resp->mirror_rules_used); 2747 if (rules_free) 2748 *rules_free = le16_to_cpu(resp->mirror_rules_free); 2749 } 2750 return status; 2751 } 2752 2753 /** 2754 * i40e_aq_add_mirrorrule - add a mirror rule 2755 * @hw: pointer to the hw struct 2756 * @sw_seid: Switch SEID (to which rule refers) 2757 * @rule_type: Rule Type (ingress/egress/VLAN) 2758 * @dest_vsi: SEID of VSI to which packets will be mirrored 2759 * @count: length of the list 2760 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs 2761 * @cmd_details: pointer to command details structure or NULL 2762 * @rule_id: Rule ID returned from FW 2763 * @rules_used: Number of rules used in internal switch 2764 * @rules_free: Number of rules free in internal switch 2765 * 2766 * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only 2767 **/ 2768 int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, 2769 u16 rule_type, u16 dest_vsi, u16 count, 2770 __le16 *mr_list, 2771 struct i40e_asq_cmd_details *cmd_details, 2772 u16 *rule_id, u16 *rules_used, u16 *rules_free) 2773 { 2774 if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS || 2775 rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) { 2776 if (count == 0 || !mr_list) 2777 return -EINVAL; 2778 } 2779 2780 return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid, 2781 rule_type, dest_vsi, count, mr_list, 2782 cmd_details, rule_id, rules_used, rules_free); 2783 } 2784 2785 /** 2786 * i40e_aq_delete_mirrorrule - delete a mirror rule 2787 * @hw: pointer to the hw struct 2788 * @sw_seid: Switch SEID (to which rule refers) 2789 * @rule_type: Rule Type (ingress/egress/VLAN) 2790 * @count: length of the list 2791 * @rule_id: Rule ID that is returned in the receive desc as part of 2792 * add_mirrorrule. 2793 * @mr_list: list of mirrored VLAN IDs to be removed 2794 * @cmd_details: pointer to command details structure or NULL 2795 * @rules_used: Number of rules used in internal switch 2796 * @rules_free: Number of rules free in internal switch 2797 * 2798 * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only 2799 **/ 2800 int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, 2801 u16 rule_type, u16 rule_id, u16 count, 2802 __le16 *mr_list, 2803 struct i40e_asq_cmd_details *cmd_details, 2804 u16 *rules_used, u16 *rules_free) 2805 { 2806 /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */ 2807 if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) { 2808 /* count and mr_list shall be valid for rule_type INGRESS VLAN 2809 * mirroring. For other rule_type, count and rule_type should 2810 * not matter. 2811 */ 2812 if (count == 0 || !mr_list) 2813 return -EINVAL; 2814 } 2815 2816 return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid, 2817 rule_type, rule_id, count, mr_list, 2818 cmd_details, NULL, rules_used, rules_free); 2819 } 2820 2821 /** 2822 * i40e_aq_send_msg_to_vf 2823 * @hw: pointer to the hardware structure 2824 * @vfid: VF id to send msg 2825 * @v_opcode: opcodes for VF-PF communication 2826 * @v_retval: return error code 2827 * @msg: pointer to the msg buffer 2828 * @msglen: msg length 2829 * @cmd_details: pointer to command details 2830 * 2831 * send msg to vf 2832 **/ 2833 int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, 2834 u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, 2835 struct i40e_asq_cmd_details *cmd_details) 2836 { 2837 struct i40e_aq_desc desc; 2838 struct i40e_aqc_pf_vf_message *cmd = 2839 (struct i40e_aqc_pf_vf_message *)&desc.params.raw; 2840 int status; 2841 2842 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf); 2843 cmd->id = cpu_to_le32(vfid); 2844 desc.cookie_high = cpu_to_le32(v_opcode); 2845 desc.cookie_low = cpu_to_le32(v_retval); 2846 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI); 2847 if (msglen) { 2848 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 2849 I40E_AQ_FLAG_RD)); 2850 if (msglen > I40E_AQ_LARGE_BUF) 2851 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2852 desc.datalen = cpu_to_le16(msglen); 2853 } 2854 status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details); 2855 2856 return status; 2857 } 2858 2859 /** 2860 * i40e_aq_debug_read_register 2861 * @hw: pointer to the hw struct 2862 * @reg_addr: register address 2863 * @reg_val: register value 2864 * @cmd_details: pointer to command details structure or NULL 2865 * 2866 * Read the register using the admin queue commands 2867 **/ 2868 int i40e_aq_debug_read_register(struct i40e_hw *hw, 2869 u32 reg_addr, u64 *reg_val, 2870 struct i40e_asq_cmd_details *cmd_details) 2871 { 2872 struct i40e_aq_desc desc; 2873 struct i40e_aqc_debug_reg_read_write *cmd_resp = 2874 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; 2875 int status; 2876 2877 if (reg_val == NULL) 2878 return -EINVAL; 2879 2880 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg); 2881 2882 cmd_resp->address = cpu_to_le32(reg_addr); 2883 2884 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2885 2886 if (!status) { 2887 *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) | 2888 (u64)le32_to_cpu(cmd_resp->value_low); 2889 } 2890 2891 return status; 2892 } 2893 2894 /** 2895 * i40e_aq_debug_write_register 2896 * @hw: pointer to the hw struct 2897 * @reg_addr: register address 2898 * @reg_val: register value 2899 * @cmd_details: pointer to command details structure or NULL 2900 * 2901 * Write to a register using the admin queue commands 2902 **/ 2903 int i40e_aq_debug_write_register(struct i40e_hw *hw, 2904 u32 reg_addr, u64 reg_val, 2905 struct i40e_asq_cmd_details *cmd_details) 2906 { 2907 struct i40e_aq_desc desc; 2908 struct i40e_aqc_debug_reg_read_write *cmd = 2909 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; 2910 int status; 2911 2912 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg); 2913 2914 cmd->address = cpu_to_le32(reg_addr); 2915 cmd->value_high = cpu_to_le32((u32)(reg_val >> 32)); 2916 cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF)); 2917 2918 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2919 2920 return status; 2921 } 2922 2923 /** 2924 * i40e_aq_request_resource 2925 * @hw: pointer to the hw struct 2926 * @resource: resource id 2927 * @access: access type 2928 * @sdp_number: resource number 2929 * @timeout: the maximum time in ms that the driver may hold the resource 2930 * @cmd_details: pointer to command details structure or NULL 2931 * 2932 * requests common resource using the admin queue commands 2933 **/ 2934 int i40e_aq_request_resource(struct i40e_hw *hw, 2935 enum i40e_aq_resources_ids resource, 2936 enum i40e_aq_resource_access_type access, 2937 u8 sdp_number, u64 *timeout, 2938 struct i40e_asq_cmd_details *cmd_details) 2939 { 2940 struct i40e_aq_desc desc; 2941 struct i40e_aqc_request_resource *cmd_resp = 2942 (struct i40e_aqc_request_resource *)&desc.params.raw; 2943 int status; 2944 2945 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource); 2946 2947 cmd_resp->resource_id = cpu_to_le16(resource); 2948 cmd_resp->access_type = cpu_to_le16(access); 2949 cmd_resp->resource_number = cpu_to_le32(sdp_number); 2950 2951 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2952 /* The completion specifies the maximum time in ms that the driver 2953 * may hold the resource in the Timeout field. 2954 * If the resource is held by someone else, the command completes with 2955 * busy return value and the timeout field indicates the maximum time 2956 * the current owner of the resource has to free it. 2957 */ 2958 if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) 2959 *timeout = le32_to_cpu(cmd_resp->timeout); 2960 2961 return status; 2962 } 2963 2964 /** 2965 * i40e_aq_release_resource 2966 * @hw: pointer to the hw struct 2967 * @resource: resource id 2968 * @sdp_number: resource number 2969 * @cmd_details: pointer to command details structure or NULL 2970 * 2971 * release common resource using the admin queue commands 2972 **/ 2973 int i40e_aq_release_resource(struct i40e_hw *hw, 2974 enum i40e_aq_resources_ids resource, 2975 u8 sdp_number, 2976 struct i40e_asq_cmd_details *cmd_details) 2977 { 2978 struct i40e_aq_desc desc; 2979 struct i40e_aqc_request_resource *cmd = 2980 (struct i40e_aqc_request_resource *)&desc.params.raw; 2981 int status; 2982 2983 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource); 2984 2985 cmd->resource_id = cpu_to_le16(resource); 2986 cmd->resource_number = cpu_to_le32(sdp_number); 2987 2988 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2989 2990 return status; 2991 } 2992 2993 /** 2994 * i40e_aq_read_nvm 2995 * @hw: pointer to the hw struct 2996 * @module_pointer: module pointer location in words from the NVM beginning 2997 * @offset: byte offset from the module beginning 2998 * @length: length of the section to be read (in bytes from the offset) 2999 * @data: command buffer (size [bytes] = length) 3000 * @last_command: tells if this is the last command in a series 3001 * @cmd_details: pointer to command details structure or NULL 3002 * 3003 * Read the NVM using the admin queue commands 3004 **/ 3005 int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, 3006 u32 offset, u16 length, void *data, 3007 bool last_command, 3008 struct i40e_asq_cmd_details *cmd_details) 3009 { 3010 struct i40e_aq_desc desc; 3011 struct i40e_aqc_nvm_update *cmd = 3012 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3013 int status; 3014 3015 /* In offset the highest byte must be zeroed. */ 3016 if (offset & 0xFF000000) { 3017 status = -EINVAL; 3018 goto i40e_aq_read_nvm_exit; 3019 } 3020 3021 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read); 3022 3023 /* If this is the last command in a series, set the proper flag. */ 3024 if (last_command) 3025 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3026 cmd->module_pointer = module_pointer; 3027 cmd->offset = cpu_to_le32(offset); 3028 cmd->length = cpu_to_le16(length); 3029 3030 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3031 if (length > I40E_AQ_LARGE_BUF) 3032 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3033 3034 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); 3035 3036 i40e_aq_read_nvm_exit: 3037 return status; 3038 } 3039 3040 /** 3041 * i40e_aq_erase_nvm 3042 * @hw: pointer to the hw struct 3043 * @module_pointer: module pointer location in words from the NVM beginning 3044 * @offset: offset in the module (expressed in 4 KB from module's beginning) 3045 * @length: length of the section to be erased (expressed in 4 KB) 3046 * @last_command: tells if this is the last command in a series 3047 * @cmd_details: pointer to command details structure or NULL 3048 * 3049 * Erase the NVM sector using the admin queue commands 3050 **/ 3051 int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, 3052 u32 offset, u16 length, bool last_command, 3053 struct i40e_asq_cmd_details *cmd_details) 3054 { 3055 struct i40e_aq_desc desc; 3056 struct i40e_aqc_nvm_update *cmd = 3057 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3058 int status; 3059 3060 /* In offset the highest byte must be zeroed. */ 3061 if (offset & 0xFF000000) { 3062 status = -EINVAL; 3063 goto i40e_aq_erase_nvm_exit; 3064 } 3065 3066 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase); 3067 3068 /* If this is the last command in a series, set the proper flag. */ 3069 if (last_command) 3070 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3071 cmd->module_pointer = module_pointer; 3072 cmd->offset = cpu_to_le32(offset); 3073 cmd->length = cpu_to_le16(length); 3074 3075 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3076 3077 i40e_aq_erase_nvm_exit: 3078 return status; 3079 } 3080 3081 /** 3082 * i40e_parse_discover_capabilities 3083 * @hw: pointer to the hw struct 3084 * @buff: pointer to a buffer containing device/function capability records 3085 * @cap_count: number of capability records in the list 3086 * @list_type_opc: type of capabilities list to parse 3087 * 3088 * Parse the device/function capabilities list. 3089 **/ 3090 static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, 3091 u32 cap_count, 3092 enum i40e_admin_queue_opc list_type_opc) 3093 { 3094 struct i40e_aqc_list_capabilities_element_resp *cap; 3095 u32 valid_functions, num_functions; 3096 u32 number, logical_id, phys_id; 3097 struct i40e_hw_capabilities *p; 3098 u16 id, ocp_cfg_word0; 3099 u8 major_rev; 3100 int status; 3101 u32 i = 0; 3102 3103 cap = (struct i40e_aqc_list_capabilities_element_resp *) buff; 3104 3105 if (list_type_opc == i40e_aqc_opc_list_dev_capabilities) 3106 p = &hw->dev_caps; 3107 else if (list_type_opc == i40e_aqc_opc_list_func_capabilities) 3108 p = &hw->func_caps; 3109 else 3110 return; 3111 3112 for (i = 0; i < cap_count; i++, cap++) { 3113 id = le16_to_cpu(cap->id); 3114 number = le32_to_cpu(cap->number); 3115 logical_id = le32_to_cpu(cap->logical_id); 3116 phys_id = le32_to_cpu(cap->phys_id); 3117 major_rev = cap->major_rev; 3118 3119 switch (id) { 3120 case I40E_AQ_CAP_ID_SWITCH_MODE: 3121 p->switch_mode = number; 3122 break; 3123 case I40E_AQ_CAP_ID_MNG_MODE: 3124 p->management_mode = number; 3125 if (major_rev > 1) { 3126 p->mng_protocols_over_mctp = logical_id; 3127 i40e_debug(hw, I40E_DEBUG_INIT, 3128 "HW Capability: Protocols over MCTP = %d\n", 3129 p->mng_protocols_over_mctp); 3130 } else { 3131 p->mng_protocols_over_mctp = 0; 3132 } 3133 break; 3134 case I40E_AQ_CAP_ID_NPAR_ACTIVE: 3135 p->npar_enable = number; 3136 break; 3137 case I40E_AQ_CAP_ID_OS2BMC_CAP: 3138 p->os2bmc = number; 3139 break; 3140 case I40E_AQ_CAP_ID_FUNCTIONS_VALID: 3141 p->valid_functions = number; 3142 break; 3143 case I40E_AQ_CAP_ID_SRIOV: 3144 if (number == 1) 3145 p->sr_iov_1_1 = true; 3146 break; 3147 case I40E_AQ_CAP_ID_VF: 3148 p->num_vfs = number; 3149 p->vf_base_id = logical_id; 3150 break; 3151 case I40E_AQ_CAP_ID_VMDQ: 3152 if (number == 1) 3153 p->vmdq = true; 3154 break; 3155 case I40E_AQ_CAP_ID_8021QBG: 3156 if (number == 1) 3157 p->evb_802_1_qbg = true; 3158 break; 3159 case I40E_AQ_CAP_ID_8021QBR: 3160 if (number == 1) 3161 p->evb_802_1_qbh = true; 3162 break; 3163 case I40E_AQ_CAP_ID_VSI: 3164 p->num_vsis = number; 3165 break; 3166 case I40E_AQ_CAP_ID_DCB: 3167 if (number == 1) { 3168 p->dcb = true; 3169 p->enabled_tcmap = logical_id; 3170 p->maxtc = phys_id; 3171 } 3172 break; 3173 case I40E_AQ_CAP_ID_FCOE: 3174 if (number == 1) 3175 p->fcoe = true; 3176 break; 3177 case I40E_AQ_CAP_ID_ISCSI: 3178 if (number == 1) 3179 p->iscsi = true; 3180 break; 3181 case I40E_AQ_CAP_ID_RSS: 3182 p->rss = true; 3183 p->rss_table_size = number; 3184 p->rss_table_entry_width = logical_id; 3185 break; 3186 case I40E_AQ_CAP_ID_RXQ: 3187 p->num_rx_qp = number; 3188 p->base_queue = phys_id; 3189 break; 3190 case I40E_AQ_CAP_ID_TXQ: 3191 p->num_tx_qp = number; 3192 p->base_queue = phys_id; 3193 break; 3194 case I40E_AQ_CAP_ID_MSIX: 3195 p->num_msix_vectors = number; 3196 i40e_debug(hw, I40E_DEBUG_INIT, 3197 "HW Capability: MSIX vector count = %d\n", 3198 p->num_msix_vectors); 3199 break; 3200 case I40E_AQ_CAP_ID_VF_MSIX: 3201 p->num_msix_vectors_vf = number; 3202 break; 3203 case I40E_AQ_CAP_ID_FLEX10: 3204 if (major_rev == 1) { 3205 if (number == 1) { 3206 p->flex10_enable = true; 3207 p->flex10_capable = true; 3208 } 3209 } else { 3210 /* Capability revision >= 2 */ 3211 if (number & 1) 3212 p->flex10_enable = true; 3213 if (number & 2) 3214 p->flex10_capable = true; 3215 } 3216 p->flex10_mode = logical_id; 3217 p->flex10_status = phys_id; 3218 break; 3219 case I40E_AQ_CAP_ID_CEM: 3220 if (number == 1) 3221 p->mgmt_cem = true; 3222 break; 3223 case I40E_AQ_CAP_ID_IWARP: 3224 if (number == 1) 3225 p->iwarp = true; 3226 break; 3227 case I40E_AQ_CAP_ID_LED: 3228 if (phys_id < I40E_HW_CAP_MAX_GPIO) 3229 p->led[phys_id] = true; 3230 break; 3231 case I40E_AQ_CAP_ID_SDP: 3232 if (phys_id < I40E_HW_CAP_MAX_GPIO) 3233 p->sdp[phys_id] = true; 3234 break; 3235 case I40E_AQ_CAP_ID_MDIO: 3236 if (number == 1) { 3237 p->mdio_port_num = phys_id; 3238 p->mdio_port_mode = logical_id; 3239 } 3240 break; 3241 case I40E_AQ_CAP_ID_1588: 3242 if (number == 1) 3243 p->ieee_1588 = true; 3244 break; 3245 case I40E_AQ_CAP_ID_FLOW_DIRECTOR: 3246 p->fd = true; 3247 p->fd_filters_guaranteed = number; 3248 p->fd_filters_best_effort = logical_id; 3249 break; 3250 case I40E_AQ_CAP_ID_WSR_PROT: 3251 p->wr_csr_prot = (u64)number; 3252 p->wr_csr_prot |= (u64)logical_id << 32; 3253 break; 3254 case I40E_AQ_CAP_ID_NVM_MGMT: 3255 if (number & I40E_NVM_MGMT_SEC_REV_DISABLED) 3256 p->sec_rev_disabled = true; 3257 if (number & I40E_NVM_MGMT_UPDATE_DISABLED) 3258 p->update_disabled = true; 3259 break; 3260 default: 3261 break; 3262 } 3263 } 3264 3265 if (p->fcoe) 3266 i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n"); 3267 3268 /* Software override ensuring FCoE is disabled if npar or mfp 3269 * mode because it is not supported in these modes. 3270 */ 3271 if (p->npar_enable || p->flex10_enable) 3272 p->fcoe = false; 3273 3274 /* count the enabled ports (aka the "not disabled" ports) */ 3275 hw->num_ports = 0; 3276 for (i = 0; i < 4; i++) { 3277 u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i); 3278 u64 port_cfg = 0; 3279 3280 /* use AQ read to get the physical register offset instead 3281 * of the port relative offset 3282 */ 3283 i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL); 3284 if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK)) 3285 hw->num_ports++; 3286 } 3287 3288 /* OCP cards case: if a mezz is removed the Ethernet port is at 3289 * disabled state in PRTGEN_CNF register. Additional NVM read is 3290 * needed in order to check if we are dealing with OCP card. 3291 * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting 3292 * physical ports results in wrong partition id calculation and thus 3293 * not supporting WoL. 3294 */ 3295 if (hw->mac.type == I40E_MAC_X722) { 3296 if (!i40e_acquire_nvm(hw, I40E_RESOURCE_READ)) { 3297 status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, 3298 2 * I40E_SR_OCP_CFG_WORD0, 3299 sizeof(ocp_cfg_word0), 3300 &ocp_cfg_word0, true, NULL); 3301 if (!status && 3302 (ocp_cfg_word0 & I40E_SR_OCP_ENABLED)) 3303 hw->num_ports = 4; 3304 i40e_release_nvm(hw); 3305 } 3306 } 3307 3308 valid_functions = p->valid_functions; 3309 num_functions = 0; 3310 while (valid_functions) { 3311 if (valid_functions & 1) 3312 num_functions++; 3313 valid_functions >>= 1; 3314 } 3315 3316 /* partition id is 1-based, and functions are evenly spread 3317 * across the ports as partitions 3318 */ 3319 if (hw->num_ports != 0) { 3320 hw->partition_id = (hw->pf_id / hw->num_ports) + 1; 3321 hw->num_partitions = num_functions / hw->num_ports; 3322 } 3323 3324 /* additional HW specific goodies that might 3325 * someday be HW version specific 3326 */ 3327 p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS; 3328 } 3329 3330 /** 3331 * i40e_aq_discover_capabilities 3332 * @hw: pointer to the hw struct 3333 * @buff: a virtual buffer to hold the capabilities 3334 * @buff_size: Size of the virtual buffer 3335 * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM 3336 * @list_type_opc: capabilities type to discover - pass in the command opcode 3337 * @cmd_details: pointer to command details structure or NULL 3338 * 3339 * Get the device capabilities descriptions from the firmware 3340 **/ 3341 int i40e_aq_discover_capabilities(struct i40e_hw *hw, 3342 void *buff, u16 buff_size, u16 *data_size, 3343 enum i40e_admin_queue_opc list_type_opc, 3344 struct i40e_asq_cmd_details *cmd_details) 3345 { 3346 struct i40e_aqc_list_capabilites *cmd; 3347 struct i40e_aq_desc desc; 3348 int status = 0; 3349 3350 cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw; 3351 3352 if (list_type_opc != i40e_aqc_opc_list_func_capabilities && 3353 list_type_opc != i40e_aqc_opc_list_dev_capabilities) { 3354 status = -EINVAL; 3355 goto exit; 3356 } 3357 3358 i40e_fill_default_direct_cmd_desc(&desc, list_type_opc); 3359 3360 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3361 if (buff_size > I40E_AQ_LARGE_BUF) 3362 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3363 3364 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3365 *data_size = le16_to_cpu(desc.datalen); 3366 3367 if (status) 3368 goto exit; 3369 3370 i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count), 3371 list_type_opc); 3372 3373 exit: 3374 return status; 3375 } 3376 3377 /** 3378 * i40e_aq_update_nvm 3379 * @hw: pointer to the hw struct 3380 * @module_pointer: module pointer location in words from the NVM beginning 3381 * @offset: byte offset from the module beginning 3382 * @length: length of the section to be written (in bytes from the offset) 3383 * @data: command buffer (size [bytes] = length) 3384 * @last_command: tells if this is the last command in a series 3385 * @preservation_flags: Preservation mode flags 3386 * @cmd_details: pointer to command details structure or NULL 3387 * 3388 * Update the NVM using the admin queue commands 3389 **/ 3390 int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, 3391 u32 offset, u16 length, void *data, 3392 bool last_command, u8 preservation_flags, 3393 struct i40e_asq_cmd_details *cmd_details) 3394 { 3395 struct i40e_aq_desc desc; 3396 struct i40e_aqc_nvm_update *cmd = 3397 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3398 int status; 3399 3400 /* In offset the highest byte must be zeroed. */ 3401 if (offset & 0xFF000000) { 3402 status = -EINVAL; 3403 goto i40e_aq_update_nvm_exit; 3404 } 3405 3406 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); 3407 3408 /* If this is the last command in a series, set the proper flag. */ 3409 if (last_command) 3410 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3411 if (hw->mac.type == I40E_MAC_X722) { 3412 if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED) 3413 cmd->command_flags |= 3414 (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED << 3415 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); 3416 else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL) 3417 cmd->command_flags |= 3418 (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL << 3419 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); 3420 } 3421 cmd->module_pointer = module_pointer; 3422 cmd->offset = cpu_to_le32(offset); 3423 cmd->length = cpu_to_le16(length); 3424 3425 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 3426 if (length > I40E_AQ_LARGE_BUF) 3427 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3428 3429 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); 3430 3431 i40e_aq_update_nvm_exit: 3432 return status; 3433 } 3434 3435 /** 3436 * i40e_aq_rearrange_nvm 3437 * @hw: pointer to the hw struct 3438 * @rearrange_nvm: defines direction of rearrangement 3439 * @cmd_details: pointer to command details structure or NULL 3440 * 3441 * Rearrange NVM structure, available only for transition FW 3442 **/ 3443 int i40e_aq_rearrange_nvm(struct i40e_hw *hw, 3444 u8 rearrange_nvm, 3445 struct i40e_asq_cmd_details *cmd_details) 3446 { 3447 struct i40e_aqc_nvm_update *cmd; 3448 struct i40e_aq_desc desc; 3449 int status; 3450 3451 cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; 3452 3453 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); 3454 3455 rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT | 3456 I40E_AQ_NVM_REARRANGE_TO_STRUCT); 3457 3458 if (!rearrange_nvm) { 3459 status = -EINVAL; 3460 goto i40e_aq_rearrange_nvm_exit; 3461 } 3462 3463 cmd->command_flags |= rearrange_nvm; 3464 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3465 3466 i40e_aq_rearrange_nvm_exit: 3467 return status; 3468 } 3469 3470 /** 3471 * i40e_aq_get_lldp_mib 3472 * @hw: pointer to the hw struct 3473 * @bridge_type: type of bridge requested 3474 * @mib_type: Local, Remote or both Local and Remote MIBs 3475 * @buff: pointer to a user supplied buffer to store the MIB block 3476 * @buff_size: size of the buffer (in bytes) 3477 * @local_len : length of the returned Local LLDP MIB 3478 * @remote_len: length of the returned Remote LLDP MIB 3479 * @cmd_details: pointer to command details structure or NULL 3480 * 3481 * Requests the complete LLDP MIB (entire packet). 3482 **/ 3483 int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, 3484 u8 mib_type, void *buff, u16 buff_size, 3485 u16 *local_len, u16 *remote_len, 3486 struct i40e_asq_cmd_details *cmd_details) 3487 { 3488 struct i40e_aq_desc desc; 3489 struct i40e_aqc_lldp_get_mib *cmd = 3490 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; 3491 struct i40e_aqc_lldp_get_mib *resp = 3492 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; 3493 int status; 3494 3495 if (buff_size == 0 || !buff) 3496 return -EINVAL; 3497 3498 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib); 3499 /* Indirect Command */ 3500 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3501 3502 cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK; 3503 cmd->type |= FIELD_PREP(I40E_AQ_LLDP_BRIDGE_TYPE_MASK, bridge_type); 3504 3505 desc.datalen = cpu_to_le16(buff_size); 3506 3507 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3508 if (buff_size > I40E_AQ_LARGE_BUF) 3509 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3510 3511 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3512 if (!status) { 3513 if (local_len != NULL) 3514 *local_len = le16_to_cpu(resp->local_len); 3515 if (remote_len != NULL) 3516 *remote_len = le16_to_cpu(resp->remote_len); 3517 } 3518 3519 return status; 3520 } 3521 3522 /** 3523 * i40e_aq_set_lldp_mib - Set the LLDP MIB 3524 * @hw: pointer to the hw struct 3525 * @mib_type: Local, Remote or both Local and Remote MIBs 3526 * @buff: pointer to a user supplied buffer to store the MIB block 3527 * @buff_size: size of the buffer (in bytes) 3528 * @cmd_details: pointer to command details structure or NULL 3529 * 3530 * Set the LLDP MIB. 3531 **/ 3532 int 3533 i40e_aq_set_lldp_mib(struct i40e_hw *hw, 3534 u8 mib_type, void *buff, u16 buff_size, 3535 struct i40e_asq_cmd_details *cmd_details) 3536 { 3537 struct i40e_aqc_lldp_set_local_mib *cmd; 3538 struct i40e_aq_desc desc; 3539 int status; 3540 3541 cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw; 3542 if (buff_size == 0 || !buff) 3543 return -EINVAL; 3544 3545 i40e_fill_default_direct_cmd_desc(&desc, 3546 i40e_aqc_opc_lldp_set_local_mib); 3547 /* Indirect Command */ 3548 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 3549 if (buff_size > I40E_AQ_LARGE_BUF) 3550 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3551 desc.datalen = cpu_to_le16(buff_size); 3552 3553 cmd->type = mib_type; 3554 cmd->length = cpu_to_le16(buff_size); 3555 cmd->address_high = cpu_to_le32(upper_32_bits((uintptr_t)buff)); 3556 cmd->address_low = cpu_to_le32(lower_32_bits((uintptr_t)buff)); 3557 3558 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3559 return status; 3560 } 3561 3562 /** 3563 * i40e_aq_cfg_lldp_mib_change_event 3564 * @hw: pointer to the hw struct 3565 * @enable_update: Enable or Disable event posting 3566 * @cmd_details: pointer to command details structure or NULL 3567 * 3568 * Enable or Disable posting of an event on ARQ when LLDP MIB 3569 * associated with the interface changes 3570 **/ 3571 int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, 3572 bool enable_update, 3573 struct i40e_asq_cmd_details *cmd_details) 3574 { 3575 struct i40e_aq_desc desc; 3576 struct i40e_aqc_lldp_update_mib *cmd = 3577 (struct i40e_aqc_lldp_update_mib *)&desc.params.raw; 3578 int status; 3579 3580 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib); 3581 3582 if (!enable_update) 3583 cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE; 3584 3585 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3586 3587 return status; 3588 } 3589 3590 /** 3591 * i40e_aq_restore_lldp 3592 * @hw: pointer to the hw struct 3593 * @setting: pointer to factory setting variable or NULL 3594 * @restore: True if factory settings should be restored 3595 * @cmd_details: pointer to command details structure or NULL 3596 * 3597 * Restore LLDP Agent factory settings if @restore set to True. In other case 3598 * only returns factory setting in AQ response. 3599 **/ 3600 int 3601 i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore, 3602 struct i40e_asq_cmd_details *cmd_details) 3603 { 3604 struct i40e_aq_desc desc; 3605 struct i40e_aqc_lldp_restore *cmd = 3606 (struct i40e_aqc_lldp_restore *)&desc.params.raw; 3607 int status; 3608 3609 if (!test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps)) { 3610 i40e_debug(hw, I40E_DEBUG_ALL, 3611 "Restore LLDP not supported by current FW version.\n"); 3612 return -ENODEV; 3613 } 3614 3615 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore); 3616 3617 if (restore) 3618 cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE; 3619 3620 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3621 3622 if (setting) 3623 *setting = cmd->command & 1; 3624 3625 return status; 3626 } 3627 3628 /** 3629 * i40e_aq_stop_lldp 3630 * @hw: pointer to the hw struct 3631 * @shutdown_agent: True if LLDP Agent needs to be Shutdown 3632 * @persist: True if stop of LLDP should be persistent across power cycles 3633 * @cmd_details: pointer to command details structure or NULL 3634 * 3635 * Stop or Shutdown the embedded LLDP Agent 3636 **/ 3637 int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, 3638 bool persist, 3639 struct i40e_asq_cmd_details *cmd_details) 3640 { 3641 struct i40e_aq_desc desc; 3642 struct i40e_aqc_lldp_stop *cmd = 3643 (struct i40e_aqc_lldp_stop *)&desc.params.raw; 3644 int status; 3645 3646 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop); 3647 3648 if (shutdown_agent) 3649 cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN; 3650 3651 if (persist) { 3652 if (test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps)) 3653 cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST; 3654 else 3655 i40e_debug(hw, I40E_DEBUG_ALL, 3656 "Persistent Stop LLDP not supported by current FW version.\n"); 3657 } 3658 3659 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3660 3661 return status; 3662 } 3663 3664 /** 3665 * i40e_aq_start_lldp 3666 * @hw: pointer to the hw struct 3667 * @persist: True if start of LLDP should be persistent across power cycles 3668 * @cmd_details: pointer to command details structure or NULL 3669 * 3670 * Start the embedded LLDP Agent on all ports. 3671 **/ 3672 int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist, 3673 struct i40e_asq_cmd_details *cmd_details) 3674 { 3675 struct i40e_aq_desc desc; 3676 struct i40e_aqc_lldp_start *cmd = 3677 (struct i40e_aqc_lldp_start *)&desc.params.raw; 3678 int status; 3679 3680 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start); 3681 3682 cmd->command = I40E_AQ_LLDP_AGENT_START; 3683 3684 if (persist) { 3685 if (test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps)) 3686 cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST; 3687 else 3688 i40e_debug(hw, I40E_DEBUG_ALL, 3689 "Persistent Start LLDP not supported by current FW version.\n"); 3690 } 3691 3692 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3693 3694 return status; 3695 } 3696 3697 /** 3698 * i40e_aq_set_dcb_parameters 3699 * @hw: pointer to the hw struct 3700 * @cmd_details: pointer to command details structure or NULL 3701 * @dcb_enable: True if DCB configuration needs to be applied 3702 * 3703 **/ 3704 int 3705 i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable, 3706 struct i40e_asq_cmd_details *cmd_details) 3707 { 3708 struct i40e_aq_desc desc; 3709 struct i40e_aqc_set_dcb_parameters *cmd = 3710 (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw; 3711 int status; 3712 3713 if (!test_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, hw->caps)) 3714 return -ENODEV; 3715 3716 i40e_fill_default_direct_cmd_desc(&desc, 3717 i40e_aqc_opc_set_dcb_parameters); 3718 3719 if (dcb_enable) { 3720 cmd->valid_flags = I40E_DCB_VALID; 3721 cmd->command = I40E_AQ_DCB_SET_AGENT; 3722 } 3723 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3724 3725 return status; 3726 } 3727 3728 /** 3729 * i40e_aq_get_cee_dcb_config 3730 * @hw: pointer to the hw struct 3731 * @buff: response buffer that stores CEE operational configuration 3732 * @buff_size: size of the buffer passed 3733 * @cmd_details: pointer to command details structure or NULL 3734 * 3735 * Get CEE DCBX mode operational configuration from firmware 3736 **/ 3737 int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, 3738 void *buff, u16 buff_size, 3739 struct i40e_asq_cmd_details *cmd_details) 3740 { 3741 struct i40e_aq_desc desc; 3742 int status; 3743 3744 if (buff_size == 0 || !buff) 3745 return -EINVAL; 3746 3747 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg); 3748 3749 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3750 status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size, 3751 cmd_details); 3752 3753 return status; 3754 } 3755 3756 /** 3757 * i40e_aq_add_udp_tunnel 3758 * @hw: pointer to the hw struct 3759 * @udp_port: the UDP port to add in Host byte order 3760 * @protocol_index: protocol index type 3761 * @filter_index: pointer to filter index 3762 * @cmd_details: pointer to command details structure or NULL 3763 * 3764 * Note: Firmware expects the udp_port value to be in Little Endian format, 3765 * and this function will call cpu_to_le16 to convert from Host byte order to 3766 * Little Endian order. 3767 **/ 3768 int i40e_aq_add_udp_tunnel(struct i40e_hw *hw, 3769 u16 udp_port, u8 protocol_index, 3770 u8 *filter_index, 3771 struct i40e_asq_cmd_details *cmd_details) 3772 { 3773 struct i40e_aq_desc desc; 3774 struct i40e_aqc_add_udp_tunnel *cmd = 3775 (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw; 3776 struct i40e_aqc_del_udp_tunnel_completion *resp = 3777 (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw; 3778 int status; 3779 3780 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel); 3781 3782 cmd->udp_port = cpu_to_le16(udp_port); 3783 cmd->protocol_type = protocol_index; 3784 3785 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3786 3787 if (!status && filter_index) 3788 *filter_index = resp->index; 3789 3790 return status; 3791 } 3792 3793 /** 3794 * i40e_aq_del_udp_tunnel 3795 * @hw: pointer to the hw struct 3796 * @index: filter index 3797 * @cmd_details: pointer to command details structure or NULL 3798 **/ 3799 int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, 3800 struct i40e_asq_cmd_details *cmd_details) 3801 { 3802 struct i40e_aq_desc desc; 3803 struct i40e_aqc_remove_udp_tunnel *cmd = 3804 (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw; 3805 int status; 3806 3807 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel); 3808 3809 cmd->index = index; 3810 3811 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3812 3813 return status; 3814 } 3815 3816 /** 3817 * i40e_aq_delete_element - Delete switch element 3818 * @hw: pointer to the hw struct 3819 * @seid: the SEID to delete from the switch 3820 * @cmd_details: pointer to command details structure or NULL 3821 * 3822 * This deletes a switch element from the switch. 3823 **/ 3824 int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, 3825 struct i40e_asq_cmd_details *cmd_details) 3826 { 3827 struct i40e_aq_desc desc; 3828 struct i40e_aqc_switch_seid *cmd = 3829 (struct i40e_aqc_switch_seid *)&desc.params.raw; 3830 int status; 3831 3832 if (seid == 0) 3833 return -EINVAL; 3834 3835 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element); 3836 3837 cmd->seid = cpu_to_le16(seid); 3838 3839 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0, 3840 cmd_details, true); 3841 3842 return status; 3843 } 3844 3845 /** 3846 * i40e_aq_dcb_updated - DCB Updated Command 3847 * @hw: pointer to the hw struct 3848 * @cmd_details: pointer to command details structure or NULL 3849 * 3850 * EMP will return when the shared RPB settings have been 3851 * recomputed and modified. The retval field in the descriptor 3852 * will be set to 0 when RPB is modified. 3853 **/ 3854 int i40e_aq_dcb_updated(struct i40e_hw *hw, 3855 struct i40e_asq_cmd_details *cmd_details) 3856 { 3857 struct i40e_aq_desc desc; 3858 int status; 3859 3860 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated); 3861 3862 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3863 3864 return status; 3865 } 3866 3867 /** 3868 * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler 3869 * @hw: pointer to the hw struct 3870 * @seid: seid for the physical port/switching component/vsi 3871 * @buff: Indirect buffer to hold data parameters and response 3872 * @buff_size: Indirect buffer size 3873 * @opcode: Tx scheduler AQ command opcode 3874 * @cmd_details: pointer to command details structure or NULL 3875 * 3876 * Generic command handler for Tx scheduler AQ commands 3877 **/ 3878 static int i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid, 3879 void *buff, u16 buff_size, 3880 enum i40e_admin_queue_opc opcode, 3881 struct i40e_asq_cmd_details *cmd_details) 3882 { 3883 struct i40e_aq_desc desc; 3884 struct i40e_aqc_tx_sched_ind *cmd = 3885 (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; 3886 int status; 3887 bool cmd_param_flag = false; 3888 3889 switch (opcode) { 3890 case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit: 3891 case i40e_aqc_opc_configure_vsi_tc_bw: 3892 case i40e_aqc_opc_enable_switching_comp_ets: 3893 case i40e_aqc_opc_modify_switching_comp_ets: 3894 case i40e_aqc_opc_disable_switching_comp_ets: 3895 case i40e_aqc_opc_configure_switching_comp_ets_bw_limit: 3896 case i40e_aqc_opc_configure_switching_comp_bw_config: 3897 cmd_param_flag = true; 3898 break; 3899 case i40e_aqc_opc_query_vsi_bw_config: 3900 case i40e_aqc_opc_query_vsi_ets_sla_config: 3901 case i40e_aqc_opc_query_switching_comp_ets_config: 3902 case i40e_aqc_opc_query_port_ets_config: 3903 case i40e_aqc_opc_query_switching_comp_bw_config: 3904 cmd_param_flag = false; 3905 break; 3906 default: 3907 return -EINVAL; 3908 } 3909 3910 i40e_fill_default_direct_cmd_desc(&desc, opcode); 3911 3912 /* Indirect command */ 3913 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3914 if (cmd_param_flag) 3915 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 3916 if (buff_size > I40E_AQ_LARGE_BUF) 3917 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3918 3919 desc.datalen = cpu_to_le16(buff_size); 3920 3921 cmd->vsi_seid = cpu_to_le16(seid); 3922 3923 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3924 3925 return status; 3926 } 3927 3928 /** 3929 * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit 3930 * @hw: pointer to the hw struct 3931 * @seid: VSI seid 3932 * @credit: BW limit credits (0 = disabled) 3933 * @max_credit: Max BW limit credits 3934 * @cmd_details: pointer to command details structure or NULL 3935 **/ 3936 int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, 3937 u16 seid, u16 credit, u8 max_credit, 3938 struct i40e_asq_cmd_details *cmd_details) 3939 { 3940 struct i40e_aq_desc desc; 3941 struct i40e_aqc_configure_vsi_bw_limit *cmd = 3942 (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw; 3943 int status; 3944 3945 i40e_fill_default_direct_cmd_desc(&desc, 3946 i40e_aqc_opc_configure_vsi_bw_limit); 3947 3948 cmd->vsi_seid = cpu_to_le16(seid); 3949 cmd->credit = cpu_to_le16(credit); 3950 cmd->max_credit = max_credit; 3951 3952 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3953 3954 return status; 3955 } 3956 3957 /** 3958 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC 3959 * @hw: pointer to the hw struct 3960 * @seid: VSI seid 3961 * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits 3962 * @cmd_details: pointer to command details structure or NULL 3963 **/ 3964 int i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, 3965 u16 seid, 3966 struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, 3967 struct i40e_asq_cmd_details *cmd_details) 3968 { 3969 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 3970 i40e_aqc_opc_configure_vsi_tc_bw, 3971 cmd_details); 3972 } 3973 3974 /** 3975 * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port 3976 * @hw: pointer to the hw struct 3977 * @seid: seid of the switching component connected to Physical Port 3978 * @ets_data: Buffer holding ETS parameters 3979 * @opcode: Tx scheduler AQ command opcode 3980 * @cmd_details: pointer to command details structure or NULL 3981 **/ 3982 int 3983 i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, 3984 u16 seid, 3985 struct i40e_aqc_configure_switching_comp_ets_data *ets_data, 3986 enum i40e_admin_queue_opc opcode, 3987 struct i40e_asq_cmd_details *cmd_details) 3988 { 3989 return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data, 3990 sizeof(*ets_data), opcode, cmd_details); 3991 } 3992 3993 /** 3994 * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC 3995 * @hw: pointer to the hw struct 3996 * @seid: seid of the switching component 3997 * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits 3998 * @cmd_details: pointer to command details structure or NULL 3999 **/ 4000 int 4001 i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, 4002 u16 seid, 4003 struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data, 4004 struct i40e_asq_cmd_details *cmd_details) 4005 { 4006 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4007 i40e_aqc_opc_configure_switching_comp_bw_config, 4008 cmd_details); 4009 } 4010 4011 /** 4012 * i40e_aq_query_vsi_bw_config - Query VSI BW configuration 4013 * @hw: pointer to the hw struct 4014 * @seid: seid of the VSI 4015 * @bw_data: Buffer to hold VSI BW configuration 4016 * @cmd_details: pointer to command details structure or NULL 4017 **/ 4018 int 4019 i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, 4020 u16 seid, 4021 struct i40e_aqc_query_vsi_bw_config_resp *bw_data, 4022 struct i40e_asq_cmd_details *cmd_details) 4023 { 4024 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4025 i40e_aqc_opc_query_vsi_bw_config, 4026 cmd_details); 4027 } 4028 4029 /** 4030 * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC 4031 * @hw: pointer to the hw struct 4032 * @seid: seid of the VSI 4033 * @bw_data: Buffer to hold VSI BW configuration per TC 4034 * @cmd_details: pointer to command details structure or NULL 4035 **/ 4036 int 4037 i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, 4038 u16 seid, 4039 struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, 4040 struct i40e_asq_cmd_details *cmd_details) 4041 { 4042 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4043 i40e_aqc_opc_query_vsi_ets_sla_config, 4044 cmd_details); 4045 } 4046 4047 /** 4048 * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC 4049 * @hw: pointer to the hw struct 4050 * @seid: seid of the switching component 4051 * @bw_data: Buffer to hold switching component's per TC BW config 4052 * @cmd_details: pointer to command details structure or NULL 4053 **/ 4054 int 4055 i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, 4056 u16 seid, 4057 struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, 4058 struct i40e_asq_cmd_details *cmd_details) 4059 { 4060 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4061 i40e_aqc_opc_query_switching_comp_ets_config, 4062 cmd_details); 4063 } 4064 4065 /** 4066 * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration 4067 * @hw: pointer to the hw struct 4068 * @seid: seid of the VSI or switching component connected to Physical Port 4069 * @bw_data: Buffer to hold current ETS configuration for the Physical Port 4070 * @cmd_details: pointer to command details structure or NULL 4071 **/ 4072 int 4073 i40e_aq_query_port_ets_config(struct i40e_hw *hw, 4074 u16 seid, 4075 struct i40e_aqc_query_port_ets_config_resp *bw_data, 4076 struct i40e_asq_cmd_details *cmd_details) 4077 { 4078 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4079 i40e_aqc_opc_query_port_ets_config, 4080 cmd_details); 4081 } 4082 4083 /** 4084 * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration 4085 * @hw: pointer to the hw struct 4086 * @seid: seid of the switching component 4087 * @bw_data: Buffer to hold switching component's BW configuration 4088 * @cmd_details: pointer to command details structure or NULL 4089 **/ 4090 int 4091 i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, 4092 u16 seid, 4093 struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, 4094 struct i40e_asq_cmd_details *cmd_details) 4095 { 4096 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4097 i40e_aqc_opc_query_switching_comp_bw_config, 4098 cmd_details); 4099 } 4100 4101 /** 4102 * i40e_validate_filter_settings 4103 * @hw: pointer to the hardware structure 4104 * @settings: Filter control settings 4105 * 4106 * Check and validate the filter control settings passed. 4107 * The function checks for the valid filter/context sizes being 4108 * passed for FCoE and PE. 4109 * 4110 * Returns 0 if the values passed are valid and within 4111 * range else returns an error. 4112 **/ 4113 static int 4114 i40e_validate_filter_settings(struct i40e_hw *hw, 4115 struct i40e_filter_control_settings *settings) 4116 { 4117 u32 fcoe_cntx_size, fcoe_filt_size; 4118 u32 fcoe_fmax; 4119 u32 val; 4120 4121 /* Validate FCoE settings passed */ 4122 switch (settings->fcoe_filt_num) { 4123 case I40E_HASH_FILTER_SIZE_1K: 4124 case I40E_HASH_FILTER_SIZE_2K: 4125 case I40E_HASH_FILTER_SIZE_4K: 4126 case I40E_HASH_FILTER_SIZE_8K: 4127 case I40E_HASH_FILTER_SIZE_16K: 4128 case I40E_HASH_FILTER_SIZE_32K: 4129 fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE; 4130 fcoe_filt_size <<= (u32)settings->fcoe_filt_num; 4131 break; 4132 default: 4133 return -EINVAL; 4134 } 4135 4136 switch (settings->fcoe_cntx_num) { 4137 case I40E_DMA_CNTX_SIZE_512: 4138 case I40E_DMA_CNTX_SIZE_1K: 4139 case I40E_DMA_CNTX_SIZE_2K: 4140 case I40E_DMA_CNTX_SIZE_4K: 4141 fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; 4142 fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num; 4143 break; 4144 default: 4145 return -EINVAL; 4146 } 4147 4148 /* Validate PE settings passed */ 4149 switch (settings->pe_filt_num) { 4150 case I40E_HASH_FILTER_SIZE_1K: 4151 case I40E_HASH_FILTER_SIZE_2K: 4152 case I40E_HASH_FILTER_SIZE_4K: 4153 case I40E_HASH_FILTER_SIZE_8K: 4154 case I40E_HASH_FILTER_SIZE_16K: 4155 case I40E_HASH_FILTER_SIZE_32K: 4156 case I40E_HASH_FILTER_SIZE_64K: 4157 case I40E_HASH_FILTER_SIZE_128K: 4158 case I40E_HASH_FILTER_SIZE_256K: 4159 case I40E_HASH_FILTER_SIZE_512K: 4160 case I40E_HASH_FILTER_SIZE_1M: 4161 break; 4162 default: 4163 return -EINVAL; 4164 } 4165 4166 switch (settings->pe_cntx_num) { 4167 case I40E_DMA_CNTX_SIZE_512: 4168 case I40E_DMA_CNTX_SIZE_1K: 4169 case I40E_DMA_CNTX_SIZE_2K: 4170 case I40E_DMA_CNTX_SIZE_4K: 4171 case I40E_DMA_CNTX_SIZE_8K: 4172 case I40E_DMA_CNTX_SIZE_16K: 4173 case I40E_DMA_CNTX_SIZE_32K: 4174 case I40E_DMA_CNTX_SIZE_64K: 4175 case I40E_DMA_CNTX_SIZE_128K: 4176 case I40E_DMA_CNTX_SIZE_256K: 4177 break; 4178 default: 4179 return -EINVAL; 4180 } 4181 4182 /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */ 4183 val = rd32(hw, I40E_GLHMC_FCOEFMAX); 4184 fcoe_fmax = FIELD_GET(I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK, val); 4185 if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax) 4186 return -EINVAL; 4187 4188 return 0; 4189 } 4190 4191 /** 4192 * i40e_set_filter_control 4193 * @hw: pointer to the hardware structure 4194 * @settings: Filter control settings 4195 * 4196 * Set the Queue Filters for PE/FCoE and enable filters required 4197 * for a single PF. It is expected that these settings are programmed 4198 * at the driver initialization time. 4199 **/ 4200 int i40e_set_filter_control(struct i40e_hw *hw, 4201 struct i40e_filter_control_settings *settings) 4202 { 4203 u32 hash_lut_size = 0; 4204 int ret = 0; 4205 u32 val; 4206 4207 if (!settings) 4208 return -EINVAL; 4209 4210 /* Validate the input settings */ 4211 ret = i40e_validate_filter_settings(hw, settings); 4212 if (ret) 4213 return ret; 4214 4215 /* Read the PF Queue Filter control register */ 4216 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0); 4217 4218 /* Program required PE hash buckets for the PF */ 4219 val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK; 4220 val |= FIELD_PREP(I40E_PFQF_CTL_0_PEHSIZE_MASK, settings->pe_filt_num); 4221 /* Program required PE contexts for the PF */ 4222 val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK; 4223 val |= FIELD_PREP(I40E_PFQF_CTL_0_PEDSIZE_MASK, settings->pe_cntx_num); 4224 4225 /* Program required FCoE hash buckets for the PF */ 4226 val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK; 4227 val |= FIELD_PREP(I40E_PFQF_CTL_0_PFFCHSIZE_MASK, 4228 settings->fcoe_filt_num); 4229 /* Program required FCoE DDP contexts for the PF */ 4230 val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK; 4231 val |= FIELD_PREP(I40E_PFQF_CTL_0_PFFCDSIZE_MASK, 4232 settings->fcoe_cntx_num); 4233 4234 /* Program Hash LUT size for the PF */ 4235 val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; 4236 if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512) 4237 hash_lut_size = 1; 4238 val |= FIELD_PREP(I40E_PFQF_CTL_0_HASHLUTSIZE_MASK, hash_lut_size); 4239 4240 /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */ 4241 if (settings->enable_fdir) 4242 val |= I40E_PFQF_CTL_0_FD_ENA_MASK; 4243 if (settings->enable_ethtype) 4244 val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK; 4245 if (settings->enable_macvlan) 4246 val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK; 4247 4248 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val); 4249 4250 return 0; 4251 } 4252 4253 /** 4254 * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter 4255 * @hw: pointer to the hw struct 4256 * @mac_addr: MAC address to use in the filter 4257 * @ethtype: Ethertype to use in the filter 4258 * @flags: Flags that needs to be applied to the filter 4259 * @vsi_seid: seid of the control VSI 4260 * @queue: VSI queue number to send the packet to 4261 * @is_add: Add control packet filter if True else remove 4262 * @stats: Structure to hold information on control filter counts 4263 * @cmd_details: pointer to command details structure or NULL 4264 * 4265 * This command will Add or Remove control packet filter for a control VSI. 4266 * In return it will update the total number of perfect filter count in 4267 * the stats member. 4268 **/ 4269 int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, 4270 u8 *mac_addr, u16 ethtype, u16 flags, 4271 u16 vsi_seid, u16 queue, bool is_add, 4272 struct i40e_control_filter_stats *stats, 4273 struct i40e_asq_cmd_details *cmd_details) 4274 { 4275 struct i40e_aq_desc desc; 4276 struct i40e_aqc_add_remove_control_packet_filter *cmd = 4277 (struct i40e_aqc_add_remove_control_packet_filter *) 4278 &desc.params.raw; 4279 struct i40e_aqc_add_remove_control_packet_filter_completion *resp = 4280 (struct i40e_aqc_add_remove_control_packet_filter_completion *) 4281 &desc.params.raw; 4282 int status; 4283 4284 if (vsi_seid == 0) 4285 return -EINVAL; 4286 4287 if (is_add) { 4288 i40e_fill_default_direct_cmd_desc(&desc, 4289 i40e_aqc_opc_add_control_packet_filter); 4290 cmd->queue = cpu_to_le16(queue); 4291 } else { 4292 i40e_fill_default_direct_cmd_desc(&desc, 4293 i40e_aqc_opc_remove_control_packet_filter); 4294 } 4295 4296 if (mac_addr) 4297 ether_addr_copy(cmd->mac, mac_addr); 4298 4299 cmd->etype = cpu_to_le16(ethtype); 4300 cmd->flags = cpu_to_le16(flags); 4301 cmd->seid = cpu_to_le16(vsi_seid); 4302 4303 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4304 4305 if (!status && stats) { 4306 stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used); 4307 stats->etype_used = le16_to_cpu(resp->etype_used); 4308 stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free); 4309 stats->etype_free = le16_to_cpu(resp->etype_free); 4310 } 4311 4312 return status; 4313 } 4314 4315 /** 4316 * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control 4317 * @hw: pointer to the hw struct 4318 * @seid: VSI seid to add ethertype filter from 4319 **/ 4320 void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, 4321 u16 seid) 4322 { 4323 #define I40E_FLOW_CONTROL_ETHTYPE 0x8808 4324 u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC | 4325 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP | 4326 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX; 4327 u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE; 4328 int status; 4329 4330 status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag, 4331 seid, 0, true, NULL, 4332 NULL); 4333 if (status) 4334 hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n"); 4335 } 4336 4337 /** 4338 * i40e_aq_alternate_read 4339 * @hw: pointer to the hardware structure 4340 * @reg_addr0: address of first dword to be read 4341 * @reg_val0: pointer for data read from 'reg_addr0' 4342 * @reg_addr1: address of second dword to be read 4343 * @reg_val1: pointer for data read from 'reg_addr1' 4344 * 4345 * Read one or two dwords from alternate structure. Fields are indicated 4346 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer 4347 * is not passed then only register at 'reg_addr0' is read. 4348 * 4349 **/ 4350 static int i40e_aq_alternate_read(struct i40e_hw *hw, 4351 u32 reg_addr0, u32 *reg_val0, 4352 u32 reg_addr1, u32 *reg_val1) 4353 { 4354 struct i40e_aq_desc desc; 4355 struct i40e_aqc_alternate_write *cmd_resp = 4356 (struct i40e_aqc_alternate_write *)&desc.params.raw; 4357 int status; 4358 4359 if (!reg_val0) 4360 return -EINVAL; 4361 4362 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read); 4363 cmd_resp->address0 = cpu_to_le32(reg_addr0); 4364 cmd_resp->address1 = cpu_to_le32(reg_addr1); 4365 4366 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 4367 4368 if (!status) { 4369 *reg_val0 = le32_to_cpu(cmd_resp->data0); 4370 4371 if (reg_val1) 4372 *reg_val1 = le32_to_cpu(cmd_resp->data1); 4373 } 4374 4375 return status; 4376 } 4377 4378 /** 4379 * i40e_aq_suspend_port_tx 4380 * @hw: pointer to the hardware structure 4381 * @seid: port seid 4382 * @cmd_details: pointer to command details structure or NULL 4383 * 4384 * Suspend port's Tx traffic 4385 **/ 4386 int i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid, 4387 struct i40e_asq_cmd_details *cmd_details) 4388 { 4389 struct i40e_aqc_tx_sched_ind *cmd; 4390 struct i40e_aq_desc desc; 4391 int status; 4392 4393 cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; 4394 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_suspend_port_tx); 4395 cmd->vsi_seid = cpu_to_le16(seid); 4396 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4397 4398 return status; 4399 } 4400 4401 /** 4402 * i40e_aq_resume_port_tx 4403 * @hw: pointer to the hardware structure 4404 * @cmd_details: pointer to command details structure or NULL 4405 * 4406 * Resume port's Tx traffic 4407 **/ 4408 int i40e_aq_resume_port_tx(struct i40e_hw *hw, 4409 struct i40e_asq_cmd_details *cmd_details) 4410 { 4411 struct i40e_aq_desc desc; 4412 int status; 4413 4414 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx); 4415 4416 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4417 4418 return status; 4419 } 4420 4421 /** 4422 * i40e_set_pci_config_data - store PCI bus info 4423 * @hw: pointer to hardware structure 4424 * @link_status: the link status word from PCI config space 4425 * 4426 * Stores the PCI bus info (speed, width, type) within the i40e_hw structure 4427 **/ 4428 void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) 4429 { 4430 hw->bus.type = i40e_bus_type_pci_express; 4431 4432 switch (link_status & PCI_EXP_LNKSTA_NLW) { 4433 case PCI_EXP_LNKSTA_NLW_X1: 4434 hw->bus.width = i40e_bus_width_pcie_x1; 4435 break; 4436 case PCI_EXP_LNKSTA_NLW_X2: 4437 hw->bus.width = i40e_bus_width_pcie_x2; 4438 break; 4439 case PCI_EXP_LNKSTA_NLW_X4: 4440 hw->bus.width = i40e_bus_width_pcie_x4; 4441 break; 4442 case PCI_EXP_LNKSTA_NLW_X8: 4443 hw->bus.width = i40e_bus_width_pcie_x8; 4444 break; 4445 default: 4446 hw->bus.width = i40e_bus_width_unknown; 4447 break; 4448 } 4449 4450 switch (link_status & PCI_EXP_LNKSTA_CLS) { 4451 case PCI_EXP_LNKSTA_CLS_2_5GB: 4452 hw->bus.speed = i40e_bus_speed_2500; 4453 break; 4454 case PCI_EXP_LNKSTA_CLS_5_0GB: 4455 hw->bus.speed = i40e_bus_speed_5000; 4456 break; 4457 case PCI_EXP_LNKSTA_CLS_8_0GB: 4458 hw->bus.speed = i40e_bus_speed_8000; 4459 break; 4460 default: 4461 hw->bus.speed = i40e_bus_speed_unknown; 4462 break; 4463 } 4464 } 4465 4466 /** 4467 * i40e_aq_debug_dump 4468 * @hw: pointer to the hardware structure 4469 * @cluster_id: specific cluster to dump 4470 * @table_id: table id within cluster 4471 * @start_index: index of line in the block to read 4472 * @buff_size: dump buffer size 4473 * @buff: dump buffer 4474 * @ret_buff_size: actual buffer size returned 4475 * @ret_next_table: next block to read 4476 * @ret_next_index: next index to read 4477 * @cmd_details: pointer to command details structure or NULL 4478 * 4479 * Dump internal FW/HW data for debug purposes. 4480 * 4481 **/ 4482 int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, 4483 u8 table_id, u32 start_index, u16 buff_size, 4484 void *buff, u16 *ret_buff_size, 4485 u8 *ret_next_table, u32 *ret_next_index, 4486 struct i40e_asq_cmd_details *cmd_details) 4487 { 4488 struct i40e_aq_desc desc; 4489 struct i40e_aqc_debug_dump_internals *cmd = 4490 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; 4491 struct i40e_aqc_debug_dump_internals *resp = 4492 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; 4493 int status; 4494 4495 if (buff_size == 0 || !buff) 4496 return -EINVAL; 4497 4498 i40e_fill_default_direct_cmd_desc(&desc, 4499 i40e_aqc_opc_debug_dump_internals); 4500 /* Indirect Command */ 4501 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 4502 if (buff_size > I40E_AQ_LARGE_BUF) 4503 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 4504 4505 cmd->cluster_id = cluster_id; 4506 cmd->table_id = table_id; 4507 cmd->idx = cpu_to_le32(start_index); 4508 4509 desc.datalen = cpu_to_le16(buff_size); 4510 4511 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 4512 if (!status) { 4513 if (ret_buff_size) 4514 *ret_buff_size = le16_to_cpu(desc.datalen); 4515 if (ret_next_table) 4516 *ret_next_table = resp->table_id; 4517 if (ret_next_index) 4518 *ret_next_index = le32_to_cpu(resp->idx); 4519 } 4520 4521 return status; 4522 } 4523 4524 /** 4525 * i40e_read_bw_from_alt_ram 4526 * @hw: pointer to the hardware structure 4527 * @max_bw: pointer for max_bw read 4528 * @min_bw: pointer for min_bw read 4529 * @min_valid: pointer for bool that is true if min_bw is a valid value 4530 * @max_valid: pointer for bool that is true if max_bw is a valid value 4531 * 4532 * Read bw from the alternate ram for the given pf 4533 **/ 4534 int i40e_read_bw_from_alt_ram(struct i40e_hw *hw, 4535 u32 *max_bw, u32 *min_bw, 4536 bool *min_valid, bool *max_valid) 4537 { 4538 u32 max_bw_addr, min_bw_addr; 4539 int status; 4540 4541 /* Calculate the address of the min/max bw registers */ 4542 max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + 4543 I40E_ALT_STRUCT_MAX_BW_OFFSET + 4544 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); 4545 min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + 4546 I40E_ALT_STRUCT_MIN_BW_OFFSET + 4547 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); 4548 4549 /* Read the bandwidths from alt ram */ 4550 status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw, 4551 min_bw_addr, min_bw); 4552 4553 if (*min_bw & I40E_ALT_BW_VALID_MASK) 4554 *min_valid = true; 4555 else 4556 *min_valid = false; 4557 4558 if (*max_bw & I40E_ALT_BW_VALID_MASK) 4559 *max_valid = true; 4560 else 4561 *max_valid = false; 4562 4563 return status; 4564 } 4565 4566 /** 4567 * i40e_aq_configure_partition_bw 4568 * @hw: pointer to the hardware structure 4569 * @bw_data: Buffer holding valid pfs and bw limits 4570 * @cmd_details: pointer to command details 4571 * 4572 * Configure partitions guaranteed/max bw 4573 **/ 4574 int 4575 i40e_aq_configure_partition_bw(struct i40e_hw *hw, 4576 struct i40e_aqc_configure_partition_bw_data *bw_data, 4577 struct i40e_asq_cmd_details *cmd_details) 4578 { 4579 u16 bwd_size = sizeof(*bw_data); 4580 struct i40e_aq_desc desc; 4581 int status; 4582 4583 i40e_fill_default_direct_cmd_desc(&desc, 4584 i40e_aqc_opc_configure_partition_bw); 4585 4586 /* Indirect command */ 4587 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 4588 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 4589 4590 if (bwd_size > I40E_AQ_LARGE_BUF) 4591 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 4592 4593 desc.datalen = cpu_to_le16(bwd_size); 4594 4595 status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, 4596 cmd_details); 4597 4598 return status; 4599 } 4600 4601 /** 4602 * i40e_read_phy_register_clause22 4603 * @hw: pointer to the HW structure 4604 * @reg: register address in the page 4605 * @phy_addr: PHY address on MDIO interface 4606 * @value: PHY register value 4607 * 4608 * Reads specified PHY register value 4609 **/ 4610 int i40e_read_phy_register_clause22(struct i40e_hw *hw, 4611 u16 reg, u8 phy_addr, u16 *value) 4612 { 4613 u8 port_num = (u8)hw->func_caps.mdio_port_num; 4614 int status = -EIO; 4615 u32 command = 0; 4616 u16 retry = 1000; 4617 4618 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4619 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4620 (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) | 4621 (I40E_MDIO_CLAUSE22_STCODE_MASK) | 4622 (I40E_GLGEN_MSCA_MDICMD_MASK); 4623 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4624 do { 4625 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4626 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4627 status = 0; 4628 break; 4629 } 4630 udelay(10); 4631 retry--; 4632 } while (retry); 4633 4634 if (status) { 4635 i40e_debug(hw, I40E_DEBUG_PHY, 4636 "PHY: Can't write command to external PHY.\n"); 4637 } else { 4638 command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); 4639 *value = FIELD_GET(I40E_GLGEN_MSRWD_MDIRDDATA_MASK, command); 4640 } 4641 4642 return status; 4643 } 4644 4645 /** 4646 * i40e_write_phy_register_clause22 4647 * @hw: pointer to the HW structure 4648 * @reg: register address in the page 4649 * @phy_addr: PHY address on MDIO interface 4650 * @value: PHY register value 4651 * 4652 * Writes specified PHY register value 4653 **/ 4654 int i40e_write_phy_register_clause22(struct i40e_hw *hw, 4655 u16 reg, u8 phy_addr, u16 value) 4656 { 4657 u8 port_num = (u8)hw->func_caps.mdio_port_num; 4658 int status = -EIO; 4659 u32 command = 0; 4660 u16 retry = 1000; 4661 4662 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; 4663 wr32(hw, I40E_GLGEN_MSRWD(port_num), command); 4664 4665 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4666 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4667 (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) | 4668 (I40E_MDIO_CLAUSE22_STCODE_MASK) | 4669 (I40E_GLGEN_MSCA_MDICMD_MASK); 4670 4671 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4672 do { 4673 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4674 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4675 status = 0; 4676 break; 4677 } 4678 udelay(10); 4679 retry--; 4680 } while (retry); 4681 4682 return status; 4683 } 4684 4685 /** 4686 * i40e_read_phy_register_clause45 4687 * @hw: pointer to the HW structure 4688 * @page: registers page number 4689 * @reg: register address in the page 4690 * @phy_addr: PHY address on MDIO interface 4691 * @value: PHY register value 4692 * 4693 * Reads specified PHY register value 4694 **/ 4695 int i40e_read_phy_register_clause45(struct i40e_hw *hw, 4696 u8 page, u16 reg, u8 phy_addr, u16 *value) 4697 { 4698 u8 port_num = hw->func_caps.mdio_port_num; 4699 int status = -EIO; 4700 u32 command = 0; 4701 u16 retry = 1000; 4702 4703 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | 4704 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4705 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4706 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | 4707 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4708 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4709 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4710 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4711 do { 4712 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4713 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4714 status = 0; 4715 break; 4716 } 4717 usleep_range(10, 20); 4718 retry--; 4719 } while (retry); 4720 4721 if (status) { 4722 i40e_debug(hw, I40E_DEBUG_PHY, 4723 "PHY: Can't write command to external PHY.\n"); 4724 goto phy_read_end; 4725 } 4726 4727 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4728 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4729 (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) | 4730 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4731 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4732 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4733 status = -EIO; 4734 retry = 1000; 4735 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4736 do { 4737 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4738 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4739 status = 0; 4740 break; 4741 } 4742 usleep_range(10, 20); 4743 retry--; 4744 } while (retry); 4745 4746 if (!status) { 4747 command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); 4748 *value = FIELD_GET(I40E_GLGEN_MSRWD_MDIRDDATA_MASK, command); 4749 } else { 4750 i40e_debug(hw, I40E_DEBUG_PHY, 4751 "PHY: Can't read register value from external PHY.\n"); 4752 } 4753 4754 phy_read_end: 4755 return status; 4756 } 4757 4758 /** 4759 * i40e_write_phy_register_clause45 4760 * @hw: pointer to the HW structure 4761 * @page: registers page number 4762 * @reg: register address in the page 4763 * @phy_addr: PHY address on MDIO interface 4764 * @value: PHY register value 4765 * 4766 * Writes value to specified PHY register 4767 **/ 4768 int i40e_write_phy_register_clause45(struct i40e_hw *hw, 4769 u8 page, u16 reg, u8 phy_addr, u16 value) 4770 { 4771 u8 port_num = hw->func_caps.mdio_port_num; 4772 int status = -EIO; 4773 u16 retry = 1000; 4774 u32 command = 0; 4775 4776 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | 4777 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4778 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4779 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | 4780 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4781 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4782 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4783 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4784 do { 4785 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4786 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4787 status = 0; 4788 break; 4789 } 4790 usleep_range(10, 20); 4791 retry--; 4792 } while (retry); 4793 if (status) { 4794 i40e_debug(hw, I40E_DEBUG_PHY, 4795 "PHY: Can't write command to external PHY.\n"); 4796 goto phy_write_end; 4797 } 4798 4799 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; 4800 wr32(hw, I40E_GLGEN_MSRWD(port_num), command); 4801 4802 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4803 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4804 (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) | 4805 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4806 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4807 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4808 status = -EIO; 4809 retry = 1000; 4810 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4811 do { 4812 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4813 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4814 status = 0; 4815 break; 4816 } 4817 usleep_range(10, 20); 4818 retry--; 4819 } while (retry); 4820 4821 phy_write_end: 4822 return status; 4823 } 4824 4825 /** 4826 * i40e_write_phy_register 4827 * @hw: pointer to the HW structure 4828 * @page: registers page number 4829 * @reg: register address in the page 4830 * @phy_addr: PHY address on MDIO interface 4831 * @value: PHY register value 4832 * 4833 * Writes value to specified PHY register 4834 **/ 4835 int i40e_write_phy_register(struct i40e_hw *hw, 4836 u8 page, u16 reg, u8 phy_addr, u16 value) 4837 { 4838 int status; 4839 4840 switch (hw->device_id) { 4841 case I40E_DEV_ID_1G_BASE_T_X722: 4842 status = i40e_write_phy_register_clause22(hw, reg, phy_addr, 4843 value); 4844 break; 4845 case I40E_DEV_ID_1G_BASE_T_BC: 4846 case I40E_DEV_ID_5G_BASE_T_BC: 4847 case I40E_DEV_ID_10G_BASE_T: 4848 case I40E_DEV_ID_10G_BASE_T4: 4849 case I40E_DEV_ID_10G_BASE_T_BC: 4850 case I40E_DEV_ID_10G_BASE_T_X722: 4851 case I40E_DEV_ID_25G_B: 4852 case I40E_DEV_ID_25G_SFP28: 4853 status = i40e_write_phy_register_clause45(hw, page, reg, 4854 phy_addr, value); 4855 break; 4856 default: 4857 status = -EIO; 4858 break; 4859 } 4860 4861 return status; 4862 } 4863 4864 /** 4865 * i40e_read_phy_register 4866 * @hw: pointer to the HW structure 4867 * @page: registers page number 4868 * @reg: register address in the page 4869 * @phy_addr: PHY address on MDIO interface 4870 * @value: PHY register value 4871 * 4872 * Reads specified PHY register value 4873 **/ 4874 int i40e_read_phy_register(struct i40e_hw *hw, 4875 u8 page, u16 reg, u8 phy_addr, u16 *value) 4876 { 4877 int status; 4878 4879 switch (hw->device_id) { 4880 case I40E_DEV_ID_1G_BASE_T_X722: 4881 status = i40e_read_phy_register_clause22(hw, reg, phy_addr, 4882 value); 4883 break; 4884 case I40E_DEV_ID_1G_BASE_T_BC: 4885 case I40E_DEV_ID_5G_BASE_T_BC: 4886 case I40E_DEV_ID_10G_BASE_T: 4887 case I40E_DEV_ID_10G_BASE_T4: 4888 case I40E_DEV_ID_10G_BASE_T_BC: 4889 case I40E_DEV_ID_10G_BASE_T_X722: 4890 case I40E_DEV_ID_25G_B: 4891 case I40E_DEV_ID_25G_SFP28: 4892 status = i40e_read_phy_register_clause45(hw, page, reg, 4893 phy_addr, value); 4894 break; 4895 default: 4896 status = -EIO; 4897 break; 4898 } 4899 4900 return status; 4901 } 4902 4903 /** 4904 * i40e_get_phy_address 4905 * @hw: pointer to the HW structure 4906 * @dev_num: PHY port num that address we want 4907 * 4908 * Gets PHY address for current port 4909 **/ 4910 u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num) 4911 { 4912 u8 port_num = hw->func_caps.mdio_port_num; 4913 u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num)); 4914 4915 return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f; 4916 } 4917 4918 /** 4919 * i40e_blink_phy_link_led 4920 * @hw: pointer to the HW structure 4921 * @time: time how long led will blinks in secs 4922 * @interval: gap between LED on and off in msecs 4923 * 4924 * Blinks PHY link LED 4925 **/ 4926 int i40e_blink_phy_link_led(struct i40e_hw *hw, 4927 u32 time, u32 interval) 4928 { 4929 u16 led_addr = I40E_PHY_LED_PROV_REG_1; 4930 u16 gpio_led_port; 4931 u8 phy_addr = 0; 4932 int status = 0; 4933 u16 led_ctl; 4934 u8 port_num; 4935 u16 led_reg; 4936 u32 i; 4937 4938 i = rd32(hw, I40E_PFGEN_PORTNUM); 4939 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 4940 phy_addr = i40e_get_phy_address(hw, port_num); 4941 4942 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, 4943 led_addr++) { 4944 status = i40e_read_phy_register_clause45(hw, 4945 I40E_PHY_COM_REG_PAGE, 4946 led_addr, phy_addr, 4947 &led_reg); 4948 if (status) 4949 goto phy_blinking_end; 4950 led_ctl = led_reg; 4951 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { 4952 led_reg = 0; 4953 status = i40e_write_phy_register_clause45(hw, 4954 I40E_PHY_COM_REG_PAGE, 4955 led_addr, phy_addr, 4956 led_reg); 4957 if (status) 4958 goto phy_blinking_end; 4959 break; 4960 } 4961 } 4962 4963 if (time > 0 && interval > 0) { 4964 for (i = 0; i < time * 1000; i += interval) { 4965 status = i40e_read_phy_register_clause45(hw, 4966 I40E_PHY_COM_REG_PAGE, 4967 led_addr, phy_addr, &led_reg); 4968 if (status) 4969 goto restore_config; 4970 if (led_reg & I40E_PHY_LED_MANUAL_ON) 4971 led_reg = 0; 4972 else 4973 led_reg = I40E_PHY_LED_MANUAL_ON; 4974 status = i40e_write_phy_register_clause45(hw, 4975 I40E_PHY_COM_REG_PAGE, 4976 led_addr, phy_addr, led_reg); 4977 if (status) 4978 goto restore_config; 4979 msleep(interval); 4980 } 4981 } 4982 4983 restore_config: 4984 status = i40e_write_phy_register_clause45(hw, 4985 I40E_PHY_COM_REG_PAGE, 4986 led_addr, phy_addr, led_ctl); 4987 4988 phy_blinking_end: 4989 return status; 4990 } 4991 4992 /** 4993 * i40e_led_get_reg - read LED register 4994 * @hw: pointer to the HW structure 4995 * @led_addr: LED register address 4996 * @reg_val: read register value 4997 **/ 4998 static int i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr, 4999 u32 *reg_val) 5000 { 5001 u8 phy_addr = 0; 5002 u8 port_num; 5003 int status; 5004 u32 i; 5005 5006 *reg_val = 0; 5007 if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) { 5008 status = 5009 i40e_aq_get_phy_register(hw, 5010 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5011 I40E_PHY_COM_REG_PAGE, true, 5012 I40E_PHY_LED_PROV_REG_1, 5013 reg_val, NULL); 5014 } else { 5015 i = rd32(hw, I40E_PFGEN_PORTNUM); 5016 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5017 phy_addr = i40e_get_phy_address(hw, port_num); 5018 status = i40e_read_phy_register_clause45(hw, 5019 I40E_PHY_COM_REG_PAGE, 5020 led_addr, phy_addr, 5021 (u16 *)reg_val); 5022 } 5023 return status; 5024 } 5025 5026 /** 5027 * i40e_led_set_reg - write LED register 5028 * @hw: pointer to the HW structure 5029 * @led_addr: LED register address 5030 * @reg_val: register value to write 5031 **/ 5032 static int i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr, 5033 u32 reg_val) 5034 { 5035 u8 phy_addr = 0; 5036 u8 port_num; 5037 int status; 5038 u32 i; 5039 5040 if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) { 5041 status = 5042 i40e_aq_set_phy_register(hw, 5043 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5044 I40E_PHY_COM_REG_PAGE, true, 5045 I40E_PHY_LED_PROV_REG_1, 5046 reg_val, NULL); 5047 } else { 5048 i = rd32(hw, I40E_PFGEN_PORTNUM); 5049 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5050 phy_addr = i40e_get_phy_address(hw, port_num); 5051 status = i40e_write_phy_register_clause45(hw, 5052 I40E_PHY_COM_REG_PAGE, 5053 led_addr, phy_addr, 5054 (u16)reg_val); 5055 } 5056 5057 return status; 5058 } 5059 5060 /** 5061 * i40e_led_get_phy - return current on/off mode 5062 * @hw: pointer to the hw struct 5063 * @led_addr: address of led register to use 5064 * @val: original value of register to use 5065 * 5066 **/ 5067 int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, 5068 u16 *val) 5069 { 5070 u16 gpio_led_port; 5071 u8 phy_addr = 0; 5072 u32 reg_val_aq; 5073 int status = 0; 5074 u16 temp_addr; 5075 u16 reg_val; 5076 u8 port_num; 5077 u32 i; 5078 5079 if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) { 5080 status = 5081 i40e_aq_get_phy_register(hw, 5082 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5083 I40E_PHY_COM_REG_PAGE, true, 5084 I40E_PHY_LED_PROV_REG_1, 5085 ®_val_aq, NULL); 5086 if (status == 0) 5087 *val = (u16)reg_val_aq; 5088 return status; 5089 } 5090 temp_addr = I40E_PHY_LED_PROV_REG_1; 5091 i = rd32(hw, I40E_PFGEN_PORTNUM); 5092 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5093 phy_addr = i40e_get_phy_address(hw, port_num); 5094 5095 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, 5096 temp_addr++) { 5097 status = i40e_read_phy_register_clause45(hw, 5098 I40E_PHY_COM_REG_PAGE, 5099 temp_addr, phy_addr, 5100 ®_val); 5101 if (status) 5102 return status; 5103 *val = reg_val; 5104 if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) { 5105 *led_addr = temp_addr; 5106 break; 5107 } 5108 } 5109 return status; 5110 } 5111 5112 /** 5113 * i40e_led_set_phy 5114 * @hw: pointer to the HW structure 5115 * @on: true or false 5116 * @led_addr: address of led register to use 5117 * @mode: original val plus bit for set or ignore 5118 * 5119 * Set led's on or off when controlled by the PHY 5120 * 5121 **/ 5122 int i40e_led_set_phy(struct i40e_hw *hw, bool on, 5123 u16 led_addr, u32 mode) 5124 { 5125 u32 led_ctl = 0; 5126 u32 led_reg = 0; 5127 int status = 0; 5128 5129 status = i40e_led_get_reg(hw, led_addr, &led_reg); 5130 if (status) 5131 return status; 5132 led_ctl = led_reg; 5133 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { 5134 led_reg = 0; 5135 status = i40e_led_set_reg(hw, led_addr, led_reg); 5136 if (status) 5137 return status; 5138 } 5139 status = i40e_led_get_reg(hw, led_addr, &led_reg); 5140 if (status) 5141 goto restore_config; 5142 if (on) 5143 led_reg = I40E_PHY_LED_MANUAL_ON; 5144 else 5145 led_reg = 0; 5146 5147 status = i40e_led_set_reg(hw, led_addr, led_reg); 5148 if (status) 5149 goto restore_config; 5150 if (mode & I40E_PHY_LED_MODE_ORIG) { 5151 led_ctl = (mode & I40E_PHY_LED_MODE_MASK); 5152 status = i40e_led_set_reg(hw, led_addr, led_ctl); 5153 } 5154 return status; 5155 5156 restore_config: 5157 status = i40e_led_set_reg(hw, led_addr, led_ctl); 5158 return status; 5159 } 5160 5161 /** 5162 * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register 5163 * @hw: pointer to the hw struct 5164 * @reg_addr: register address 5165 * @reg_val: ptr to register value 5166 * @cmd_details: pointer to command details structure or NULL 5167 * 5168 * Use the firmware to read the Rx control register, 5169 * especially useful if the Rx unit is under heavy pressure 5170 **/ 5171 int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, 5172 u32 reg_addr, u32 *reg_val, 5173 struct i40e_asq_cmd_details *cmd_details) 5174 { 5175 struct i40e_aq_desc desc; 5176 struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp = 5177 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; 5178 int status; 5179 5180 if (!reg_val) 5181 return -EINVAL; 5182 5183 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read); 5184 5185 cmd_resp->address = cpu_to_le32(reg_addr); 5186 5187 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5188 5189 if (status == 0) 5190 *reg_val = le32_to_cpu(cmd_resp->value); 5191 5192 return status; 5193 } 5194 5195 /** 5196 * i40e_read_rx_ctl - read from an Rx control register 5197 * @hw: pointer to the hw struct 5198 * @reg_addr: register address 5199 **/ 5200 u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) 5201 { 5202 bool use_register = false; 5203 int status = 0; 5204 int retry = 5; 5205 u32 val = 0; 5206 5207 if (i40e_is_aq_api_ver_lt(hw, 1, 5) || hw->mac.type == I40E_MAC_X722) 5208 use_register = true; 5209 5210 if (!use_register) { 5211 do_retry: 5212 status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL); 5213 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { 5214 usleep_range(1000, 2000); 5215 retry--; 5216 goto do_retry; 5217 } 5218 } 5219 5220 /* if the AQ access failed, try the old-fashioned way */ 5221 if (status || use_register) 5222 val = rd32(hw, reg_addr); 5223 5224 return val; 5225 } 5226 5227 /** 5228 * i40e_aq_rx_ctl_write_register 5229 * @hw: pointer to the hw struct 5230 * @reg_addr: register address 5231 * @reg_val: register value 5232 * @cmd_details: pointer to command details structure or NULL 5233 * 5234 * Use the firmware to write to an Rx control register, 5235 * especially useful if the Rx unit is under heavy pressure 5236 **/ 5237 int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, 5238 u32 reg_addr, u32 reg_val, 5239 struct i40e_asq_cmd_details *cmd_details) 5240 { 5241 struct i40e_aq_desc desc; 5242 struct i40e_aqc_rx_ctl_reg_read_write *cmd = 5243 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; 5244 int status; 5245 5246 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write); 5247 5248 cmd->address = cpu_to_le32(reg_addr); 5249 cmd->value = cpu_to_le32(reg_val); 5250 5251 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5252 5253 return status; 5254 } 5255 5256 /** 5257 * i40e_write_rx_ctl - write to an Rx control register 5258 * @hw: pointer to the hw struct 5259 * @reg_addr: register address 5260 * @reg_val: register value 5261 **/ 5262 void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) 5263 { 5264 bool use_register = false; 5265 int status = 0; 5266 int retry = 5; 5267 5268 if (i40e_is_aq_api_ver_lt(hw, 1, 5) || hw->mac.type == I40E_MAC_X722) 5269 use_register = true; 5270 5271 if (!use_register) { 5272 do_retry: 5273 status = i40e_aq_rx_ctl_write_register(hw, reg_addr, 5274 reg_val, NULL); 5275 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { 5276 usleep_range(1000, 2000); 5277 retry--; 5278 goto do_retry; 5279 } 5280 } 5281 5282 /* if the AQ access failed, try the old-fashioned way */ 5283 if (status || use_register) 5284 wr32(hw, reg_addr, reg_val); 5285 } 5286 5287 /** 5288 * i40e_mdio_if_number_selection - MDIO I/F number selection 5289 * @hw: pointer to the hw struct 5290 * @set_mdio: use MDIO I/F number specified by mdio_num 5291 * @mdio_num: MDIO I/F number 5292 * @cmd: pointer to PHY Register command structure 5293 **/ 5294 static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio, 5295 u8 mdio_num, 5296 struct i40e_aqc_phy_register_access *cmd) 5297 { 5298 if (!set_mdio || 5299 cmd->phy_interface != I40E_AQ_PHY_REG_ACCESS_EXTERNAL) 5300 return; 5301 5302 if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS_EXTENDED, hw->caps)) { 5303 cmd->cmd_flags |= 5304 I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER | 5305 FIELD_PREP(I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK, 5306 mdio_num); 5307 } else { 5308 i40e_debug(hw, I40E_DEBUG_PHY, "MDIO I/F number selection not supported by current FW version.\n"); 5309 } 5310 } 5311 5312 /** 5313 * i40e_aq_set_phy_register_ext 5314 * @hw: pointer to the hw struct 5315 * @phy_select: select which phy should be accessed 5316 * @dev_addr: PHY device address 5317 * @page_change: flag to indicate if phy page should be updated 5318 * @set_mdio: use MDIO I/F number specified by mdio_num 5319 * @mdio_num: MDIO I/F number 5320 * @reg_addr: PHY register address 5321 * @reg_val: new register value 5322 * @cmd_details: pointer to command details structure or NULL 5323 * 5324 * Write the external PHY register. 5325 * NOTE: In common cases MDIO I/F number should not be changed, thats why you 5326 * may use simple wrapper i40e_aq_set_phy_register. 5327 **/ 5328 int i40e_aq_set_phy_register_ext(struct i40e_hw *hw, 5329 u8 phy_select, u8 dev_addr, bool page_change, 5330 bool set_mdio, u8 mdio_num, 5331 u32 reg_addr, u32 reg_val, 5332 struct i40e_asq_cmd_details *cmd_details) 5333 { 5334 struct i40e_aq_desc desc; 5335 struct i40e_aqc_phy_register_access *cmd = 5336 (struct i40e_aqc_phy_register_access *)&desc.params.raw; 5337 int status; 5338 5339 i40e_fill_default_direct_cmd_desc(&desc, 5340 i40e_aqc_opc_set_phy_register); 5341 5342 cmd->phy_interface = phy_select; 5343 cmd->dev_address = dev_addr; 5344 cmd->reg_address = cpu_to_le32(reg_addr); 5345 cmd->reg_value = cpu_to_le32(reg_val); 5346 5347 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd); 5348 5349 if (!page_change) 5350 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE; 5351 5352 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5353 5354 return status; 5355 } 5356 5357 /** 5358 * i40e_aq_get_phy_register_ext 5359 * @hw: pointer to the hw struct 5360 * @phy_select: select which phy should be accessed 5361 * @dev_addr: PHY device address 5362 * @page_change: flag to indicate if phy page should be updated 5363 * @set_mdio: use MDIO I/F number specified by mdio_num 5364 * @mdio_num: MDIO I/F number 5365 * @reg_addr: PHY register address 5366 * @reg_val: read register value 5367 * @cmd_details: pointer to command details structure or NULL 5368 * 5369 * Read the external PHY register. 5370 * NOTE: In common cases MDIO I/F number should not be changed, thats why you 5371 * may use simple wrapper i40e_aq_get_phy_register. 5372 **/ 5373 int i40e_aq_get_phy_register_ext(struct i40e_hw *hw, 5374 u8 phy_select, u8 dev_addr, bool page_change, 5375 bool set_mdio, u8 mdio_num, 5376 u32 reg_addr, u32 *reg_val, 5377 struct i40e_asq_cmd_details *cmd_details) 5378 { 5379 struct i40e_aq_desc desc; 5380 struct i40e_aqc_phy_register_access *cmd = 5381 (struct i40e_aqc_phy_register_access *)&desc.params.raw; 5382 int status; 5383 5384 i40e_fill_default_direct_cmd_desc(&desc, 5385 i40e_aqc_opc_get_phy_register); 5386 5387 cmd->phy_interface = phy_select; 5388 cmd->dev_address = dev_addr; 5389 cmd->reg_address = cpu_to_le32(reg_addr); 5390 5391 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd); 5392 5393 if (!page_change) 5394 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE; 5395 5396 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5397 if (!status) 5398 *reg_val = le32_to_cpu(cmd->reg_value); 5399 5400 return status; 5401 } 5402 5403 /** 5404 * i40e_aq_write_ddp - Write dynamic device personalization (ddp) 5405 * @hw: pointer to the hw struct 5406 * @buff: command buffer (size in bytes = buff_size) 5407 * @buff_size: buffer size in bytes 5408 * @track_id: package tracking id 5409 * @error_offset: returns error offset 5410 * @error_info: returns error information 5411 * @cmd_details: pointer to command details structure or NULL 5412 **/ 5413 int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, 5414 u16 buff_size, u32 track_id, 5415 u32 *error_offset, u32 *error_info, 5416 struct i40e_asq_cmd_details *cmd_details) 5417 { 5418 struct i40e_aq_desc desc; 5419 struct i40e_aqc_write_personalization_profile *cmd = 5420 (struct i40e_aqc_write_personalization_profile *) 5421 &desc.params.raw; 5422 struct i40e_aqc_write_ddp_resp *resp; 5423 int status; 5424 5425 i40e_fill_default_direct_cmd_desc(&desc, 5426 i40e_aqc_opc_write_personalization_profile); 5427 5428 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); 5429 if (buff_size > I40E_AQ_LARGE_BUF) 5430 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5431 5432 desc.datalen = cpu_to_le16(buff_size); 5433 5434 cmd->profile_track_id = cpu_to_le32(track_id); 5435 5436 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 5437 if (!status) { 5438 resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw; 5439 if (error_offset) 5440 *error_offset = le32_to_cpu(resp->error_offset); 5441 if (error_info) 5442 *error_info = le32_to_cpu(resp->error_info); 5443 } 5444 5445 return status; 5446 } 5447 5448 /** 5449 * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp) 5450 * @hw: pointer to the hw struct 5451 * @buff: command buffer (size in bytes = buff_size) 5452 * @buff_size: buffer size in bytes 5453 * @flags: AdminQ command flags 5454 * @cmd_details: pointer to command details structure or NULL 5455 **/ 5456 int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, 5457 u16 buff_size, u8 flags, 5458 struct i40e_asq_cmd_details *cmd_details) 5459 { 5460 struct i40e_aq_desc desc; 5461 struct i40e_aqc_get_applied_profiles *cmd = 5462 (struct i40e_aqc_get_applied_profiles *)&desc.params.raw; 5463 int status; 5464 5465 i40e_fill_default_direct_cmd_desc(&desc, 5466 i40e_aqc_opc_get_personalization_profile_list); 5467 5468 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 5469 if (buff_size > I40E_AQ_LARGE_BUF) 5470 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5471 desc.datalen = cpu_to_le16(buff_size); 5472 5473 cmd->flags = flags; 5474 5475 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 5476 5477 return status; 5478 } 5479 5480 /** 5481 * i40e_find_segment_in_package 5482 * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E) 5483 * @pkg_hdr: pointer to the package header to be searched 5484 * 5485 * This function searches a package file for a particular segment type. On 5486 * success it returns a pointer to the segment header, otherwise it will 5487 * return NULL. 5488 **/ 5489 struct i40e_generic_seg_header * 5490 i40e_find_segment_in_package(u32 segment_type, 5491 struct i40e_package_header *pkg_hdr) 5492 { 5493 struct i40e_generic_seg_header *segment; 5494 u32 i; 5495 5496 /* Search all package segments for the requested segment type */ 5497 for (i = 0; i < pkg_hdr->segment_count; i++) { 5498 segment = 5499 (struct i40e_generic_seg_header *)((u8 *)pkg_hdr + 5500 pkg_hdr->segment_offset[i]); 5501 5502 if (segment->type == segment_type) 5503 return segment; 5504 } 5505 5506 return NULL; 5507 } 5508 5509 /* Get section table in profile */ 5510 #define I40E_SECTION_TABLE(profile, sec_tbl) \ 5511 do { \ 5512 struct i40e_profile_segment *p = (profile); \ 5513 u32 count; \ 5514 u32 *nvm; \ 5515 count = p->device_table_count; \ 5516 nvm = (u32 *)&p->device_table[count]; \ 5517 sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \ 5518 } while (0) 5519 5520 /* Get section header in profile */ 5521 #define I40E_SECTION_HEADER(profile, offset) \ 5522 (struct i40e_profile_section_header *)((u8 *)(profile) + (offset)) 5523 5524 /** 5525 * i40e_find_section_in_profile 5526 * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE) 5527 * @profile: pointer to the i40e segment header to be searched 5528 * 5529 * This function searches i40e segment for a particular section type. On 5530 * success it returns a pointer to the section header, otherwise it will 5531 * return NULL. 5532 **/ 5533 struct i40e_profile_section_header * 5534 i40e_find_section_in_profile(u32 section_type, 5535 struct i40e_profile_segment *profile) 5536 { 5537 struct i40e_profile_section_header *sec; 5538 struct i40e_section_table *sec_tbl; 5539 u32 sec_off; 5540 u32 i; 5541 5542 if (profile->header.type != SEGMENT_TYPE_I40E) 5543 return NULL; 5544 5545 I40E_SECTION_TABLE(profile, sec_tbl); 5546 5547 for (i = 0; i < sec_tbl->section_count; i++) { 5548 sec_off = sec_tbl->section_offset[i]; 5549 sec = I40E_SECTION_HEADER(profile, sec_off); 5550 if (sec->section.type == section_type) 5551 return sec; 5552 } 5553 5554 return NULL; 5555 } 5556 5557 /** 5558 * i40e_ddp_exec_aq_section - Execute generic AQ for DDP 5559 * @hw: pointer to the hw struct 5560 * @aq: command buffer containing all data to execute AQ 5561 **/ 5562 static int i40e_ddp_exec_aq_section(struct i40e_hw *hw, 5563 struct i40e_profile_aq_section *aq) 5564 { 5565 struct i40e_aq_desc desc; 5566 u8 *msg = NULL; 5567 u16 msglen; 5568 int status; 5569 5570 i40e_fill_default_direct_cmd_desc(&desc, aq->opcode); 5571 desc.flags |= cpu_to_le16(aq->flags); 5572 memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw)); 5573 5574 msglen = aq->datalen; 5575 if (msglen) { 5576 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 5577 I40E_AQ_FLAG_RD)); 5578 if (msglen > I40E_AQ_LARGE_BUF) 5579 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5580 desc.datalen = cpu_to_le16(msglen); 5581 msg = &aq->data[0]; 5582 } 5583 5584 status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL); 5585 5586 if (status) { 5587 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5588 "unable to exec DDP AQ opcode %u, error %d\n", 5589 aq->opcode, status); 5590 return status; 5591 } 5592 5593 /* copy returned desc to aq_buf */ 5594 memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw)); 5595 5596 return 0; 5597 } 5598 5599 /** 5600 * i40e_validate_profile 5601 * @hw: pointer to the hardware structure 5602 * @profile: pointer to the profile segment of the package to be validated 5603 * @track_id: package tracking id 5604 * @rollback: flag if the profile is for rollback. 5605 * 5606 * Validates supported devices and profile's sections. 5607 */ 5608 static int 5609 i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5610 u32 track_id, bool rollback) 5611 { 5612 struct i40e_profile_section_header *sec = NULL; 5613 struct i40e_section_table *sec_tbl; 5614 u32 vendor_dev_id; 5615 int status = 0; 5616 u32 dev_cnt; 5617 u32 sec_off; 5618 u32 i; 5619 5620 if (track_id == I40E_DDP_TRACKID_INVALID) { 5621 i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n"); 5622 return -EOPNOTSUPP; 5623 } 5624 5625 dev_cnt = profile->device_table_count; 5626 for (i = 0; i < dev_cnt; i++) { 5627 vendor_dev_id = profile->device_table[i].vendor_dev_id; 5628 if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL && 5629 hw->device_id == (vendor_dev_id & 0xFFFF)) 5630 break; 5631 } 5632 if (dev_cnt && i == dev_cnt) { 5633 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5634 "Device doesn't support DDP\n"); 5635 return -ENODEV; 5636 } 5637 5638 I40E_SECTION_TABLE(profile, sec_tbl); 5639 5640 /* Validate sections types */ 5641 for (i = 0; i < sec_tbl->section_count; i++) { 5642 sec_off = sec_tbl->section_offset[i]; 5643 sec = I40E_SECTION_HEADER(profile, sec_off); 5644 if (rollback) { 5645 if (sec->section.type == SECTION_TYPE_MMIO || 5646 sec->section.type == SECTION_TYPE_AQ || 5647 sec->section.type == SECTION_TYPE_RB_AQ) { 5648 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5649 "Not a roll-back package\n"); 5650 return -EOPNOTSUPP; 5651 } 5652 } else { 5653 if (sec->section.type == SECTION_TYPE_RB_AQ || 5654 sec->section.type == SECTION_TYPE_RB_MMIO) { 5655 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5656 "Not an original package\n"); 5657 return -EOPNOTSUPP; 5658 } 5659 } 5660 } 5661 5662 return status; 5663 } 5664 5665 /** 5666 * i40e_write_profile 5667 * @hw: pointer to the hardware structure 5668 * @profile: pointer to the profile segment of the package to be downloaded 5669 * @track_id: package tracking id 5670 * 5671 * Handles the download of a complete package. 5672 */ 5673 int 5674 i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5675 u32 track_id) 5676 { 5677 struct i40e_profile_section_header *sec = NULL; 5678 struct i40e_profile_aq_section *ddp_aq; 5679 struct i40e_section_table *sec_tbl; 5680 u32 offset = 0, info = 0; 5681 u32 section_size = 0; 5682 int status = 0; 5683 u32 sec_off; 5684 u32 i; 5685 5686 status = i40e_validate_profile(hw, profile, track_id, false); 5687 if (status) 5688 return status; 5689 5690 I40E_SECTION_TABLE(profile, sec_tbl); 5691 5692 for (i = 0; i < sec_tbl->section_count; i++) { 5693 sec_off = sec_tbl->section_offset[i]; 5694 sec = I40E_SECTION_HEADER(profile, sec_off); 5695 /* Process generic admin command */ 5696 if (sec->section.type == SECTION_TYPE_AQ) { 5697 ddp_aq = (struct i40e_profile_aq_section *)&sec[1]; 5698 status = i40e_ddp_exec_aq_section(hw, ddp_aq); 5699 if (status) { 5700 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5701 "Failed to execute aq: section %d, opcode %u\n", 5702 i, ddp_aq->opcode); 5703 break; 5704 } 5705 sec->section.type = SECTION_TYPE_RB_AQ; 5706 } 5707 5708 /* Skip any non-mmio sections */ 5709 if (sec->section.type != SECTION_TYPE_MMIO) 5710 continue; 5711 5712 section_size = sec->section.size + 5713 sizeof(struct i40e_profile_section_header); 5714 5715 /* Write MMIO section */ 5716 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, 5717 track_id, &offset, &info, NULL); 5718 if (status) { 5719 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5720 "Failed to write profile: section %d, offset %d, info %d\n", 5721 i, offset, info); 5722 break; 5723 } 5724 } 5725 return status; 5726 } 5727 5728 /** 5729 * i40e_rollback_profile 5730 * @hw: pointer to the hardware structure 5731 * @profile: pointer to the profile segment of the package to be removed 5732 * @track_id: package tracking id 5733 * 5734 * Rolls back previously loaded package. 5735 */ 5736 int 5737 i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5738 u32 track_id) 5739 { 5740 struct i40e_profile_section_header *sec = NULL; 5741 struct i40e_section_table *sec_tbl; 5742 u32 offset = 0, info = 0; 5743 u32 section_size = 0; 5744 int status = 0; 5745 u32 sec_off; 5746 int i; 5747 5748 status = i40e_validate_profile(hw, profile, track_id, true); 5749 if (status) 5750 return status; 5751 5752 I40E_SECTION_TABLE(profile, sec_tbl); 5753 5754 /* For rollback write sections in reverse */ 5755 for (i = sec_tbl->section_count - 1; i >= 0; i--) { 5756 sec_off = sec_tbl->section_offset[i]; 5757 sec = I40E_SECTION_HEADER(profile, sec_off); 5758 5759 /* Skip any non-rollback sections */ 5760 if (sec->section.type != SECTION_TYPE_RB_MMIO) 5761 continue; 5762 5763 section_size = sec->section.size + 5764 sizeof(struct i40e_profile_section_header); 5765 5766 /* Write roll-back MMIO section */ 5767 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, 5768 track_id, &offset, &info, NULL); 5769 if (status) { 5770 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5771 "Failed to write profile: section %d, offset %d, info %d\n", 5772 i, offset, info); 5773 break; 5774 } 5775 } 5776 return status; 5777 } 5778 5779 /** 5780 * i40e_add_pinfo_to_list 5781 * @hw: pointer to the hardware structure 5782 * @profile: pointer to the profile segment of the package 5783 * @profile_info_sec: buffer for information section 5784 * @track_id: package tracking id 5785 * 5786 * Register a profile to the list of loaded profiles. 5787 */ 5788 int 5789 i40e_add_pinfo_to_list(struct i40e_hw *hw, 5790 struct i40e_profile_segment *profile, 5791 u8 *profile_info_sec, u32 track_id) 5792 { 5793 struct i40e_profile_section_header *sec = NULL; 5794 struct i40e_profile_info *pinfo; 5795 u32 offset = 0, info = 0; 5796 int status = 0; 5797 5798 sec = (struct i40e_profile_section_header *)profile_info_sec; 5799 sec->tbl_size = 1; 5800 sec->data_end = sizeof(struct i40e_profile_section_header) + 5801 sizeof(struct i40e_profile_info); 5802 sec->section.type = SECTION_TYPE_INFO; 5803 sec->section.offset = sizeof(struct i40e_profile_section_header); 5804 sec->section.size = sizeof(struct i40e_profile_info); 5805 pinfo = (struct i40e_profile_info *)(profile_info_sec + 5806 sec->section.offset); 5807 pinfo->track_id = track_id; 5808 pinfo->version = profile->version; 5809 pinfo->op = I40E_DDP_ADD_TRACKID; 5810 memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE); 5811 5812 status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end, 5813 track_id, &offset, &info, NULL); 5814 5815 return status; 5816 } 5817 5818 /** 5819 * i40e_aq_add_cloud_filters 5820 * @hw: pointer to the hardware structure 5821 * @seid: VSI seid to add cloud filters from 5822 * @filters: Buffer which contains the filters to be added 5823 * @filter_count: number of filters contained in the buffer 5824 * 5825 * Set the cloud filters for a given VSI. The contents of the 5826 * i40e_aqc_cloud_filters_element_data are filled in by the caller 5827 * of the function. 5828 * 5829 **/ 5830 int 5831 i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid, 5832 struct i40e_aqc_cloud_filters_element_data *filters, 5833 u8 filter_count) 5834 { 5835 struct i40e_aq_desc desc; 5836 struct i40e_aqc_add_remove_cloud_filters *cmd = 5837 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5838 u16 buff_len; 5839 int status; 5840 5841 i40e_fill_default_direct_cmd_desc(&desc, 5842 i40e_aqc_opc_add_cloud_filters); 5843 5844 buff_len = filter_count * sizeof(*filters); 5845 desc.datalen = cpu_to_le16(buff_len); 5846 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5847 cmd->num_filters = filter_count; 5848 cmd->seid = cpu_to_le16(seid); 5849 5850 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5851 5852 return status; 5853 } 5854 5855 /** 5856 * i40e_aq_add_cloud_filters_bb 5857 * @hw: pointer to the hardware structure 5858 * @seid: VSI seid to add cloud filters from 5859 * @filters: Buffer which contains the filters in big buffer to be added 5860 * @filter_count: number of filters contained in the buffer 5861 * 5862 * Set the big buffer cloud filters for a given VSI. The contents of the 5863 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the 5864 * function. 5865 * 5866 **/ 5867 int 5868 i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid, 5869 struct i40e_aqc_cloud_filters_element_bb *filters, 5870 u8 filter_count) 5871 { 5872 struct i40e_aq_desc desc; 5873 struct i40e_aqc_add_remove_cloud_filters *cmd = 5874 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5875 u16 buff_len; 5876 int status; 5877 int i; 5878 5879 i40e_fill_default_direct_cmd_desc(&desc, 5880 i40e_aqc_opc_add_cloud_filters); 5881 5882 buff_len = filter_count * sizeof(*filters); 5883 desc.datalen = cpu_to_le16(buff_len); 5884 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5885 cmd->num_filters = filter_count; 5886 cmd->seid = cpu_to_le16(seid); 5887 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; 5888 5889 for (i = 0; i < filter_count; i++) { 5890 u16 tnl_type; 5891 u32 ti; 5892 5893 tnl_type = le16_get_bits(filters[i].element.flags, 5894 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK); 5895 5896 /* Due to hardware eccentricities, the VNI for Geneve is shifted 5897 * one more byte further than normally used for Tenant ID in 5898 * other tunnel types. 5899 */ 5900 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { 5901 ti = le32_to_cpu(filters[i].element.tenant_id); 5902 filters[i].element.tenant_id = cpu_to_le32(ti << 8); 5903 } 5904 } 5905 5906 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5907 5908 return status; 5909 } 5910 5911 /** 5912 * i40e_aq_rem_cloud_filters 5913 * @hw: pointer to the hardware structure 5914 * @seid: VSI seid to remove cloud filters from 5915 * @filters: Buffer which contains the filters to be removed 5916 * @filter_count: number of filters contained in the buffer 5917 * 5918 * Remove the cloud filters for a given VSI. The contents of the 5919 * i40e_aqc_cloud_filters_element_data are filled in by the caller 5920 * of the function. 5921 * 5922 **/ 5923 int 5924 i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid, 5925 struct i40e_aqc_cloud_filters_element_data *filters, 5926 u8 filter_count) 5927 { 5928 struct i40e_aq_desc desc; 5929 struct i40e_aqc_add_remove_cloud_filters *cmd = 5930 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5931 u16 buff_len; 5932 int status; 5933 5934 i40e_fill_default_direct_cmd_desc(&desc, 5935 i40e_aqc_opc_remove_cloud_filters); 5936 5937 buff_len = filter_count * sizeof(*filters); 5938 desc.datalen = cpu_to_le16(buff_len); 5939 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5940 cmd->num_filters = filter_count; 5941 cmd->seid = cpu_to_le16(seid); 5942 5943 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5944 5945 return status; 5946 } 5947 5948 /** 5949 * i40e_aq_rem_cloud_filters_bb 5950 * @hw: pointer to the hardware structure 5951 * @seid: VSI seid to remove cloud filters from 5952 * @filters: Buffer which contains the filters in big buffer to be removed 5953 * @filter_count: number of filters contained in the buffer 5954 * 5955 * Remove the big buffer cloud filters for a given VSI. The contents of the 5956 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the 5957 * function. 5958 * 5959 **/ 5960 int 5961 i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid, 5962 struct i40e_aqc_cloud_filters_element_bb *filters, 5963 u8 filter_count) 5964 { 5965 struct i40e_aq_desc desc; 5966 struct i40e_aqc_add_remove_cloud_filters *cmd = 5967 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5968 u16 buff_len; 5969 int status; 5970 int i; 5971 5972 i40e_fill_default_direct_cmd_desc(&desc, 5973 i40e_aqc_opc_remove_cloud_filters); 5974 5975 buff_len = filter_count * sizeof(*filters); 5976 desc.datalen = cpu_to_le16(buff_len); 5977 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5978 cmd->num_filters = filter_count; 5979 cmd->seid = cpu_to_le16(seid); 5980 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; 5981 5982 for (i = 0; i < filter_count; i++) { 5983 u16 tnl_type; 5984 u32 ti; 5985 5986 tnl_type = le16_get_bits(filters[i].element.flags, 5987 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK); 5988 5989 /* Due to hardware eccentricities, the VNI for Geneve is shifted 5990 * one more byte further than normally used for Tenant ID in 5991 * other tunnel types. 5992 */ 5993 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { 5994 ti = le32_to_cpu(filters[i].element.tenant_id); 5995 filters[i].element.tenant_id = cpu_to_le32(ti << 8); 5996 } 5997 } 5998 5999 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 6000 6001 return status; 6002 } 6003