1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2021 Intel Corporation. */ 3 4 #include <linux/avf/virtchnl.h> 5 #include <linux/delay.h> 6 #include <linux/etherdevice.h> 7 #include <linux/pci.h> 8 #include "i40e_adminq_cmd.h" 9 #include "i40e_devids.h" 10 #include "i40e_prototype.h" 11 #include "i40e_register.h" 12 13 /** 14 * i40e_set_mac_type - Sets MAC type 15 * @hw: pointer to the HW structure 16 * 17 * This function sets the mac type of the adapter based on the 18 * vendor ID and device ID stored in the hw structure. 19 **/ 20 int i40e_set_mac_type(struct i40e_hw *hw) 21 { 22 int status = 0; 23 24 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { 25 switch (hw->device_id) { 26 case I40E_DEV_ID_SFP_XL710: 27 case I40E_DEV_ID_QEMU: 28 case I40E_DEV_ID_KX_B: 29 case I40E_DEV_ID_KX_C: 30 case I40E_DEV_ID_QSFP_A: 31 case I40E_DEV_ID_QSFP_B: 32 case I40E_DEV_ID_QSFP_C: 33 case I40E_DEV_ID_1G_BASE_T_BC: 34 case I40E_DEV_ID_5G_BASE_T_BC: 35 case I40E_DEV_ID_10G_BASE_T: 36 case I40E_DEV_ID_10G_BASE_T4: 37 case I40E_DEV_ID_10G_BASE_T_BC: 38 case I40E_DEV_ID_10G_B: 39 case I40E_DEV_ID_10G_SFP: 40 case I40E_DEV_ID_20G_KR2: 41 case I40E_DEV_ID_20G_KR2_A: 42 case I40E_DEV_ID_25G_B: 43 case I40E_DEV_ID_25G_SFP28: 44 case I40E_DEV_ID_X710_N3000: 45 case I40E_DEV_ID_XXV710_N3000: 46 hw->mac.type = I40E_MAC_XL710; 47 break; 48 case I40E_DEV_ID_KX_X722: 49 case I40E_DEV_ID_QSFP_X722: 50 case I40E_DEV_ID_SFP_X722: 51 case I40E_DEV_ID_1G_BASE_T_X722: 52 case I40E_DEV_ID_10G_BASE_T_X722: 53 case I40E_DEV_ID_SFP_I_X722: 54 case I40E_DEV_ID_SFP_X722_A: 55 hw->mac.type = I40E_MAC_X722; 56 break; 57 default: 58 hw->mac.type = I40E_MAC_GENERIC; 59 break; 60 } 61 } else { 62 status = -ENODEV; 63 } 64 65 hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n", 66 hw->mac.type, status); 67 return status; 68 } 69 70 /** 71 * i40e_aq_str - convert AQ err code to a string 72 * @hw: pointer to the HW structure 73 * @aq_err: the AQ error code to convert 74 **/ 75 const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) 76 { 77 switch (aq_err) { 78 case I40E_AQ_RC_OK: 79 return "OK"; 80 case I40E_AQ_RC_EPERM: 81 return "I40E_AQ_RC_EPERM"; 82 case I40E_AQ_RC_ENOENT: 83 return "I40E_AQ_RC_ENOENT"; 84 case I40E_AQ_RC_ESRCH: 85 return "I40E_AQ_RC_ESRCH"; 86 case I40E_AQ_RC_EINTR: 87 return "I40E_AQ_RC_EINTR"; 88 case I40E_AQ_RC_EIO: 89 return "I40E_AQ_RC_EIO"; 90 case I40E_AQ_RC_ENXIO: 91 return "I40E_AQ_RC_ENXIO"; 92 case I40E_AQ_RC_E2BIG: 93 return "I40E_AQ_RC_E2BIG"; 94 case I40E_AQ_RC_EAGAIN: 95 return "I40E_AQ_RC_EAGAIN"; 96 case I40E_AQ_RC_ENOMEM: 97 return "I40E_AQ_RC_ENOMEM"; 98 case I40E_AQ_RC_EACCES: 99 return "I40E_AQ_RC_EACCES"; 100 case I40E_AQ_RC_EFAULT: 101 return "I40E_AQ_RC_EFAULT"; 102 case I40E_AQ_RC_EBUSY: 103 return "I40E_AQ_RC_EBUSY"; 104 case I40E_AQ_RC_EEXIST: 105 return "I40E_AQ_RC_EEXIST"; 106 case I40E_AQ_RC_EINVAL: 107 return "I40E_AQ_RC_EINVAL"; 108 case I40E_AQ_RC_ENOTTY: 109 return "I40E_AQ_RC_ENOTTY"; 110 case I40E_AQ_RC_ENOSPC: 111 return "I40E_AQ_RC_ENOSPC"; 112 case I40E_AQ_RC_ENOSYS: 113 return "I40E_AQ_RC_ENOSYS"; 114 case I40E_AQ_RC_ERANGE: 115 return "I40E_AQ_RC_ERANGE"; 116 case I40E_AQ_RC_EFLUSHED: 117 return "I40E_AQ_RC_EFLUSHED"; 118 case I40E_AQ_RC_BAD_ADDR: 119 return "I40E_AQ_RC_BAD_ADDR"; 120 case I40E_AQ_RC_EMODE: 121 return "I40E_AQ_RC_EMODE"; 122 case I40E_AQ_RC_EFBIG: 123 return "I40E_AQ_RC_EFBIG"; 124 } 125 126 snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); 127 return hw->err_str; 128 } 129 130 /** 131 * i40e_debug_aq 132 * @hw: debug mask related to admin queue 133 * @mask: debug mask 134 * @desc: pointer to admin queue descriptor 135 * @buffer: pointer to command buffer 136 * @buf_len: max length of buffer 137 * 138 * Dumps debug log about adminq command with descriptor contents. 139 **/ 140 void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, 141 void *buffer, u16 buf_len) 142 { 143 struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; 144 u32 effective_mask = hw->debug_mask & mask; 145 char prefix[27]; 146 u16 len; 147 u8 *buf = (u8 *)buffer; 148 149 if (!effective_mask || !desc) 150 return; 151 152 len = le16_to_cpu(aq_desc->datalen); 153 154 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 155 "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", 156 le16_to_cpu(aq_desc->opcode), 157 le16_to_cpu(aq_desc->flags), 158 le16_to_cpu(aq_desc->datalen), 159 le16_to_cpu(aq_desc->retval)); 160 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 161 "\tcookie (h,l) 0x%08X 0x%08X\n", 162 le32_to_cpu(aq_desc->cookie_high), 163 le32_to_cpu(aq_desc->cookie_low)); 164 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 165 "\tparam (0,1) 0x%08X 0x%08X\n", 166 le32_to_cpu(aq_desc->params.internal.param0), 167 le32_to_cpu(aq_desc->params.internal.param1)); 168 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 169 "\taddr (h,l) 0x%08X 0x%08X\n", 170 le32_to_cpu(aq_desc->params.external.addr_high), 171 le32_to_cpu(aq_desc->params.external.addr_low)); 172 173 if (buffer && buf_len != 0 && len != 0 && 174 (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) { 175 i40e_debug(hw, mask, "AQ CMD Buffer:\n"); 176 if (buf_len < len) 177 len = buf_len; 178 179 snprintf(prefix, sizeof(prefix), 180 "i40e %02x:%02x.%x: \t0x", 181 hw->bus.bus_id, 182 hw->bus.device, 183 hw->bus.func); 184 185 print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 186 16, 1, buf, len, false); 187 } 188 } 189 190 /** 191 * i40e_check_asq_alive 192 * @hw: pointer to the hw struct 193 * 194 * Returns true if Queue is enabled else false. 195 **/ 196 bool i40e_check_asq_alive(struct i40e_hw *hw) 197 { 198 /* Check if the queue is initialized */ 199 if (!hw->aq.asq.count) 200 return false; 201 202 return !!(rd32(hw, I40E_PF_ATQLEN) & I40E_PF_ATQLEN_ATQENABLE_MASK); 203 } 204 205 /** 206 * i40e_aq_queue_shutdown 207 * @hw: pointer to the hw struct 208 * @unloading: is the driver unloading itself 209 * 210 * Tell the Firmware that we're shutting down the AdminQ and whether 211 * or not the driver is unloading as well. 212 **/ 213 int i40e_aq_queue_shutdown(struct i40e_hw *hw, 214 bool unloading) 215 { 216 struct i40e_aq_desc desc; 217 struct i40e_aqc_queue_shutdown *cmd = 218 (struct i40e_aqc_queue_shutdown *)&desc.params.raw; 219 int status; 220 221 i40e_fill_default_direct_cmd_desc(&desc, 222 i40e_aqc_opc_queue_shutdown); 223 224 if (unloading) 225 cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING); 226 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 227 228 return status; 229 } 230 231 /** 232 * i40e_aq_get_set_rss_lut 233 * @hw: pointer to the hardware structure 234 * @vsi_id: vsi fw index 235 * @pf_lut: for PF table set true, for VSI table set false 236 * @lut: pointer to the lut buffer provided by the caller 237 * @lut_size: size of the lut buffer 238 * @set: set true to set the table, false to get the table 239 * 240 * Internal function to get or set RSS look up table 241 **/ 242 static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw, 243 u16 vsi_id, bool pf_lut, 244 u8 *lut, u16 lut_size, 245 bool set) 246 { 247 struct i40e_aq_desc desc; 248 struct i40e_aqc_get_set_rss_lut *cmd_resp = 249 (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; 250 int status; 251 252 if (set) 253 i40e_fill_default_direct_cmd_desc(&desc, 254 i40e_aqc_opc_set_rss_lut); 255 else 256 i40e_fill_default_direct_cmd_desc(&desc, 257 i40e_aqc_opc_get_rss_lut); 258 259 /* Indirect command */ 260 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 261 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 262 263 cmd_resp->vsi_id = 264 cpu_to_le16((u16)((vsi_id << 265 I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & 266 I40E_AQC_SET_RSS_LUT_VSI_ID_MASK)); 267 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); 268 269 if (pf_lut) 270 cmd_resp->flags |= cpu_to_le16((u16) 271 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF << 272 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & 273 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); 274 else 275 cmd_resp->flags |= cpu_to_le16((u16) 276 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << 277 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & 278 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); 279 280 status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL); 281 282 return status; 283 } 284 285 /** 286 * i40e_aq_get_rss_lut 287 * @hw: pointer to the hardware structure 288 * @vsi_id: vsi fw index 289 * @pf_lut: for PF table set true, for VSI table set false 290 * @lut: pointer to the lut buffer provided by the caller 291 * @lut_size: size of the lut buffer 292 * 293 * get the RSS lookup table, PF or VSI type 294 **/ 295 int i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, 296 bool pf_lut, u8 *lut, u16 lut_size) 297 { 298 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, 299 false); 300 } 301 302 /** 303 * i40e_aq_set_rss_lut 304 * @hw: pointer to the hardware structure 305 * @vsi_id: vsi fw index 306 * @pf_lut: for PF table set true, for VSI table set false 307 * @lut: pointer to the lut buffer provided by the caller 308 * @lut_size: size of the lut buffer 309 * 310 * set the RSS lookup table, PF or VSI type 311 **/ 312 int i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, 313 bool pf_lut, u8 *lut, u16 lut_size) 314 { 315 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); 316 } 317 318 /** 319 * i40e_aq_get_set_rss_key 320 * @hw: pointer to the hw struct 321 * @vsi_id: vsi fw index 322 * @key: pointer to key info struct 323 * @set: set true to set the key, false to get the key 324 * 325 * get the RSS key per VSI 326 **/ 327 static int i40e_aq_get_set_rss_key(struct i40e_hw *hw, 328 u16 vsi_id, 329 struct i40e_aqc_get_set_rss_key_data *key, 330 bool set) 331 { 332 struct i40e_aq_desc desc; 333 struct i40e_aqc_get_set_rss_key *cmd_resp = 334 (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; 335 u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); 336 int status; 337 338 if (set) 339 i40e_fill_default_direct_cmd_desc(&desc, 340 i40e_aqc_opc_set_rss_key); 341 else 342 i40e_fill_default_direct_cmd_desc(&desc, 343 i40e_aqc_opc_get_rss_key); 344 345 /* Indirect command */ 346 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 347 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 348 349 cmd_resp->vsi_id = 350 cpu_to_le16((u16)((vsi_id << 351 I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & 352 I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); 353 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); 354 355 status = i40e_asq_send_command(hw, &desc, key, key_size, NULL); 356 357 return status; 358 } 359 360 /** 361 * i40e_aq_get_rss_key 362 * @hw: pointer to the hw struct 363 * @vsi_id: vsi fw index 364 * @key: pointer to key info struct 365 * 366 **/ 367 int i40e_aq_get_rss_key(struct i40e_hw *hw, 368 u16 vsi_id, 369 struct i40e_aqc_get_set_rss_key_data *key) 370 { 371 return i40e_aq_get_set_rss_key(hw, vsi_id, key, false); 372 } 373 374 /** 375 * i40e_aq_set_rss_key 376 * @hw: pointer to the hw struct 377 * @vsi_id: vsi fw index 378 * @key: pointer to key info struct 379 * 380 * set the RSS key per VSI 381 **/ 382 int i40e_aq_set_rss_key(struct i40e_hw *hw, 383 u16 vsi_id, 384 struct i40e_aqc_get_set_rss_key_data *key) 385 { 386 return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); 387 } 388 389 /* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the 390 * hardware to a bit-field that can be used by SW to more easily determine the 391 * packet type. 392 * 393 * Macros are used to shorten the table lines and make this table human 394 * readable. 395 * 396 * We store the PTYPE in the top byte of the bit field - this is just so that 397 * we can check that the table doesn't have a row missing, as the index into 398 * the table should be the PTYPE. 399 * 400 * Typical work flow: 401 * 402 * IF NOT i40e_ptype_lookup[ptype].known 403 * THEN 404 * Packet is unknown 405 * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP 406 * Use the rest of the fields to look at the tunnels, inner protocols, etc 407 * ELSE 408 * Use the enum i40e_rx_l2_ptype to decode the packet type 409 * ENDIF 410 */ 411 412 /* macro to make the table lines short, use explicit indexing with [PTYPE] */ 413 #define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ 414 [PTYPE] = { \ 415 1, \ 416 I40E_RX_PTYPE_OUTER_##OUTER_IP, \ 417 I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ 418 I40E_RX_PTYPE_##OUTER_FRAG, \ 419 I40E_RX_PTYPE_TUNNEL_##T, \ 420 I40E_RX_PTYPE_TUNNEL_END_##TE, \ 421 I40E_RX_PTYPE_##TEF, \ 422 I40E_RX_PTYPE_INNER_PROT_##I, \ 423 I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } 424 425 #define I40E_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } 426 427 /* shorter macros makes the table fit but are terse */ 428 #define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG 429 #define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG 430 #define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC 431 432 /* Lookup table mapping in the 8-bit HW PTYPE to the bit field for decoding */ 433 struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = { 434 /* L2 Packet types */ 435 I40E_PTT_UNUSED_ENTRY(0), 436 I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 437 I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), 438 I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 439 I40E_PTT_UNUSED_ENTRY(4), 440 I40E_PTT_UNUSED_ENTRY(5), 441 I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 442 I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 443 I40E_PTT_UNUSED_ENTRY(8), 444 I40E_PTT_UNUSED_ENTRY(9), 445 I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 446 I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), 447 I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 448 I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 449 I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 450 I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 451 I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 452 I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 453 I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 454 I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 455 I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 456 I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 457 458 /* Non Tunneled IPv4 */ 459 I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), 460 I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), 461 I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), 462 I40E_PTT_UNUSED_ENTRY(25), 463 I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), 464 I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), 465 I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), 466 467 /* IPv4 --> IPv4 */ 468 I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), 469 I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), 470 I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), 471 I40E_PTT_UNUSED_ENTRY(32), 472 I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), 473 I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), 474 I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), 475 476 /* IPv4 --> IPv6 */ 477 I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), 478 I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), 479 I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), 480 I40E_PTT_UNUSED_ENTRY(39), 481 I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), 482 I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), 483 I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), 484 485 /* IPv4 --> GRE/NAT */ 486 I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), 487 488 /* IPv4 --> GRE/NAT --> IPv4 */ 489 I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), 490 I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), 491 I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), 492 I40E_PTT_UNUSED_ENTRY(47), 493 I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), 494 I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), 495 I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), 496 497 /* IPv4 --> GRE/NAT --> IPv6 */ 498 I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), 499 I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), 500 I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), 501 I40E_PTT_UNUSED_ENTRY(54), 502 I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), 503 I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), 504 I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), 505 506 /* IPv4 --> GRE/NAT --> MAC */ 507 I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), 508 509 /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ 510 I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), 511 I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), 512 I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), 513 I40E_PTT_UNUSED_ENTRY(62), 514 I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), 515 I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), 516 I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), 517 518 /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ 519 I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), 520 I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), 521 I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), 522 I40E_PTT_UNUSED_ENTRY(69), 523 I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), 524 I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), 525 I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), 526 527 /* IPv4 --> GRE/NAT --> MAC/VLAN */ 528 I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), 529 530 /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ 531 I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), 532 I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), 533 I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), 534 I40E_PTT_UNUSED_ENTRY(77), 535 I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), 536 I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), 537 I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), 538 539 /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ 540 I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), 541 I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), 542 I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), 543 I40E_PTT_UNUSED_ENTRY(84), 544 I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), 545 I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), 546 I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), 547 548 /* Non Tunneled IPv6 */ 549 I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), 550 I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), 551 I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), 552 I40E_PTT_UNUSED_ENTRY(91), 553 I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), 554 I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), 555 I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), 556 557 /* IPv6 --> IPv4 */ 558 I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), 559 I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), 560 I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), 561 I40E_PTT_UNUSED_ENTRY(98), 562 I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), 563 I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), 564 I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), 565 566 /* IPv6 --> IPv6 */ 567 I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), 568 I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), 569 I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), 570 I40E_PTT_UNUSED_ENTRY(105), 571 I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), 572 I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), 573 I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), 574 575 /* IPv6 --> GRE/NAT */ 576 I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), 577 578 /* IPv6 --> GRE/NAT -> IPv4 */ 579 I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), 580 I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), 581 I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), 582 I40E_PTT_UNUSED_ENTRY(113), 583 I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), 584 I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), 585 I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), 586 587 /* IPv6 --> GRE/NAT -> IPv6 */ 588 I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), 589 I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), 590 I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), 591 I40E_PTT_UNUSED_ENTRY(120), 592 I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), 593 I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), 594 I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), 595 596 /* IPv6 --> GRE/NAT -> MAC */ 597 I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), 598 599 /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ 600 I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), 601 I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), 602 I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), 603 I40E_PTT_UNUSED_ENTRY(128), 604 I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), 605 I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), 606 I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), 607 608 /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ 609 I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), 610 I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), 611 I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), 612 I40E_PTT_UNUSED_ENTRY(135), 613 I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), 614 I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), 615 I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), 616 617 /* IPv6 --> GRE/NAT -> MAC/VLAN */ 618 I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), 619 620 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ 621 I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), 622 I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), 623 I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), 624 I40E_PTT_UNUSED_ENTRY(143), 625 I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), 626 I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), 627 I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), 628 629 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ 630 I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), 631 I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), 632 I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), 633 I40E_PTT_UNUSED_ENTRY(150), 634 I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), 635 I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), 636 I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), 637 638 /* unused entries */ 639 [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } 640 }; 641 642 /** 643 * i40e_init_shared_code - Initialize the shared code 644 * @hw: pointer to hardware structure 645 * 646 * This assigns the MAC type and PHY code and inits the NVM. 647 * Does not touch the hardware. This function must be called prior to any 648 * other function in the shared code. The i40e_hw structure should be 649 * memset to 0 prior to calling this function. The following fields in 650 * hw structure should be filled in prior to calling this function: 651 * hw_addr, back, device_id, vendor_id, subsystem_device_id, 652 * subsystem_vendor_id, and revision_id 653 **/ 654 int i40e_init_shared_code(struct i40e_hw *hw) 655 { 656 u32 port, ari, func_rid; 657 int status = 0; 658 659 i40e_set_mac_type(hw); 660 661 switch (hw->mac.type) { 662 case I40E_MAC_XL710: 663 case I40E_MAC_X722: 664 break; 665 default: 666 return -ENODEV; 667 } 668 669 hw->phy.get_link_info = true; 670 671 /* Determine port number and PF number*/ 672 port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) 673 >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT; 674 hw->port = (u8)port; 675 ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >> 676 I40E_GLPCI_CAPSUP_ARI_EN_SHIFT; 677 func_rid = rd32(hw, I40E_PF_FUNC_RID); 678 if (ari) 679 hw->pf_id = (u8)(func_rid & 0xff); 680 else 681 hw->pf_id = (u8)(func_rid & 0x7); 682 683 status = i40e_init_nvm(hw); 684 return status; 685 } 686 687 /** 688 * i40e_aq_mac_address_read - Retrieve the MAC addresses 689 * @hw: pointer to the hw struct 690 * @flags: a return indicator of what addresses were added to the addr store 691 * @addrs: the requestor's mac addr store 692 * @cmd_details: pointer to command details structure or NULL 693 **/ 694 static int 695 i40e_aq_mac_address_read(struct i40e_hw *hw, 696 u16 *flags, 697 struct i40e_aqc_mac_address_read_data *addrs, 698 struct i40e_asq_cmd_details *cmd_details) 699 { 700 struct i40e_aq_desc desc; 701 struct i40e_aqc_mac_address_read *cmd_data = 702 (struct i40e_aqc_mac_address_read *)&desc.params.raw; 703 int status; 704 705 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read); 706 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF); 707 708 status = i40e_asq_send_command(hw, &desc, addrs, 709 sizeof(*addrs), cmd_details); 710 *flags = le16_to_cpu(cmd_data->command_flags); 711 712 return status; 713 } 714 715 /** 716 * i40e_aq_mac_address_write - Change the MAC addresses 717 * @hw: pointer to the hw struct 718 * @flags: indicates which MAC to be written 719 * @mac_addr: address to write 720 * @cmd_details: pointer to command details structure or NULL 721 **/ 722 int i40e_aq_mac_address_write(struct i40e_hw *hw, 723 u16 flags, u8 *mac_addr, 724 struct i40e_asq_cmd_details *cmd_details) 725 { 726 struct i40e_aq_desc desc; 727 struct i40e_aqc_mac_address_write *cmd_data = 728 (struct i40e_aqc_mac_address_write *)&desc.params.raw; 729 int status; 730 731 i40e_fill_default_direct_cmd_desc(&desc, 732 i40e_aqc_opc_mac_address_write); 733 cmd_data->command_flags = cpu_to_le16(flags); 734 cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]); 735 cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) | 736 ((u32)mac_addr[3] << 16) | 737 ((u32)mac_addr[4] << 8) | 738 mac_addr[5]); 739 740 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 741 742 return status; 743 } 744 745 /** 746 * i40e_get_mac_addr - get MAC address 747 * @hw: pointer to the HW structure 748 * @mac_addr: pointer to MAC address 749 * 750 * Reads the adapter's MAC address from register 751 **/ 752 int i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr) 753 { 754 struct i40e_aqc_mac_address_read_data addrs; 755 u16 flags = 0; 756 int status; 757 758 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); 759 760 if (flags & I40E_AQC_LAN_ADDR_VALID) 761 ether_addr_copy(mac_addr, addrs.pf_lan_mac); 762 763 return status; 764 } 765 766 /** 767 * i40e_get_port_mac_addr - get Port MAC address 768 * @hw: pointer to the HW structure 769 * @mac_addr: pointer to Port MAC address 770 * 771 * Reads the adapter's Port MAC address 772 **/ 773 int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr) 774 { 775 struct i40e_aqc_mac_address_read_data addrs; 776 u16 flags = 0; 777 int status; 778 779 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); 780 if (status) 781 return status; 782 783 if (flags & I40E_AQC_PORT_ADDR_VALID) 784 ether_addr_copy(mac_addr, addrs.port_mac); 785 else 786 status = -EINVAL; 787 788 return status; 789 } 790 791 /** 792 * i40e_pre_tx_queue_cfg - pre tx queue configure 793 * @hw: pointer to the HW structure 794 * @queue: target PF queue index 795 * @enable: state change request 796 * 797 * Handles hw requirement to indicate intention to enable 798 * or disable target queue. 799 **/ 800 void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable) 801 { 802 u32 abs_queue_idx = hw->func_caps.base_queue + queue; 803 u32 reg_block = 0; 804 u32 reg_val; 805 806 if (abs_queue_idx >= 128) { 807 reg_block = abs_queue_idx / 128; 808 abs_queue_idx %= 128; 809 } 810 811 reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); 812 reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 813 reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 814 815 if (enable) 816 reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK; 817 else 818 reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 819 820 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val); 821 } 822 823 /** 824 * i40e_get_pba_string - Reads part number string from EEPROM 825 * @hw: pointer to hardware structure 826 * 827 * Reads the part number string from the EEPROM and stores it 828 * into newly allocated buffer and saves resulting pointer 829 * to i40e_hw->pba_id field. 830 **/ 831 void i40e_get_pba_string(struct i40e_hw *hw) 832 { 833 #define I40E_NVM_PBA_FLAGS_BLK_PRESENT 0xFAFA 834 u16 pba_word = 0; 835 u16 pba_size = 0; 836 u16 pba_ptr = 0; 837 int status; 838 char *ptr; 839 u16 i; 840 841 status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word); 842 if (status) { 843 hw_dbg(hw, "Failed to read PBA flags.\n"); 844 return; 845 } 846 if (pba_word != I40E_NVM_PBA_FLAGS_BLK_PRESENT) { 847 hw_dbg(hw, "PBA block is not present.\n"); 848 return; 849 } 850 851 status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr); 852 if (status) { 853 hw_dbg(hw, "Failed to read PBA Block pointer.\n"); 854 return; 855 } 856 857 status = i40e_read_nvm_word(hw, pba_ptr, &pba_size); 858 if (status) { 859 hw_dbg(hw, "Failed to read PBA Block size.\n"); 860 return; 861 } 862 863 /* Subtract one to get PBA word count (PBA Size word is included in 864 * total size) and advance pointer to first PBA word. 865 */ 866 pba_size--; 867 pba_ptr++; 868 if (!pba_size) { 869 hw_dbg(hw, "PBA ID is empty.\n"); 870 return; 871 } 872 873 ptr = devm_kzalloc(i40e_hw_to_dev(hw), pba_size * 2 + 1, GFP_KERNEL); 874 if (!ptr) 875 return; 876 hw->pba_id = ptr; 877 878 for (i = 0; i < pba_size; i++) { 879 status = i40e_read_nvm_word(hw, pba_ptr + i, &pba_word); 880 if (status) { 881 hw_dbg(hw, "Failed to read PBA Block word %d.\n", i); 882 devm_kfree(i40e_hw_to_dev(hw), hw->pba_id); 883 hw->pba_id = NULL; 884 return; 885 } 886 887 *ptr++ = (pba_word >> 8) & 0xFF; 888 *ptr++ = pba_word & 0xFF; 889 } 890 } 891 892 /** 893 * i40e_get_media_type - Gets media type 894 * @hw: pointer to the hardware structure 895 **/ 896 static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) 897 { 898 enum i40e_media_type media; 899 900 switch (hw->phy.link_info.phy_type) { 901 case I40E_PHY_TYPE_10GBASE_SR: 902 case I40E_PHY_TYPE_10GBASE_LR: 903 case I40E_PHY_TYPE_1000BASE_SX: 904 case I40E_PHY_TYPE_1000BASE_LX: 905 case I40E_PHY_TYPE_40GBASE_SR4: 906 case I40E_PHY_TYPE_40GBASE_LR4: 907 case I40E_PHY_TYPE_25GBASE_LR: 908 case I40E_PHY_TYPE_25GBASE_SR: 909 media = I40E_MEDIA_TYPE_FIBER; 910 break; 911 case I40E_PHY_TYPE_100BASE_TX: 912 case I40E_PHY_TYPE_1000BASE_T: 913 case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS: 914 case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS: 915 case I40E_PHY_TYPE_10GBASE_T: 916 media = I40E_MEDIA_TYPE_BASET; 917 break; 918 case I40E_PHY_TYPE_10GBASE_CR1_CU: 919 case I40E_PHY_TYPE_40GBASE_CR4_CU: 920 case I40E_PHY_TYPE_10GBASE_CR1: 921 case I40E_PHY_TYPE_40GBASE_CR4: 922 case I40E_PHY_TYPE_10GBASE_SFPP_CU: 923 case I40E_PHY_TYPE_40GBASE_AOC: 924 case I40E_PHY_TYPE_10GBASE_AOC: 925 case I40E_PHY_TYPE_25GBASE_CR: 926 case I40E_PHY_TYPE_25GBASE_AOC: 927 case I40E_PHY_TYPE_25GBASE_ACC: 928 media = I40E_MEDIA_TYPE_DA; 929 break; 930 case I40E_PHY_TYPE_1000BASE_KX: 931 case I40E_PHY_TYPE_10GBASE_KX4: 932 case I40E_PHY_TYPE_10GBASE_KR: 933 case I40E_PHY_TYPE_40GBASE_KR4: 934 case I40E_PHY_TYPE_20GBASE_KR2: 935 case I40E_PHY_TYPE_25GBASE_KR: 936 media = I40E_MEDIA_TYPE_BACKPLANE; 937 break; 938 case I40E_PHY_TYPE_SGMII: 939 case I40E_PHY_TYPE_XAUI: 940 case I40E_PHY_TYPE_XFI: 941 case I40E_PHY_TYPE_XLAUI: 942 case I40E_PHY_TYPE_XLPPI: 943 default: 944 media = I40E_MEDIA_TYPE_UNKNOWN; 945 break; 946 } 947 948 return media; 949 } 950 951 /** 952 * i40e_poll_globr - Poll for Global Reset completion 953 * @hw: pointer to the hardware structure 954 * @retry_limit: how many times to retry before failure 955 **/ 956 static int i40e_poll_globr(struct i40e_hw *hw, 957 u32 retry_limit) 958 { 959 u32 cnt, reg = 0; 960 961 for (cnt = 0; cnt < retry_limit; cnt++) { 962 reg = rd32(hw, I40E_GLGEN_RSTAT); 963 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 964 return 0; 965 msleep(100); 966 } 967 968 hw_dbg(hw, "Global reset failed.\n"); 969 hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg); 970 971 return -EIO; 972 } 973 974 #define I40E_PF_RESET_WAIT_COUNT_A0 200 975 #define I40E_PF_RESET_WAIT_COUNT 200 976 /** 977 * i40e_pf_reset - Reset the PF 978 * @hw: pointer to the hardware structure 979 * 980 * Assuming someone else has triggered a global reset, 981 * assure the global reset is complete and then reset the PF 982 **/ 983 int i40e_pf_reset(struct i40e_hw *hw) 984 { 985 u32 cnt = 0; 986 u32 cnt1 = 0; 987 u32 reg = 0; 988 u32 grst_del; 989 990 /* Poll for Global Reset steady state in case of recent GRST. 991 * The grst delay value is in 100ms units, and we'll wait a 992 * couple counts longer to be sure we don't just miss the end. 993 */ 994 grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) & 995 I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> 996 I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; 997 998 /* It can take upto 15 secs for GRST steady state. 999 * Bump it to 16 secs max to be safe. 1000 */ 1001 grst_del = grst_del * 20; 1002 1003 for (cnt = 0; cnt < grst_del; cnt++) { 1004 reg = rd32(hw, I40E_GLGEN_RSTAT); 1005 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 1006 break; 1007 msleep(100); 1008 } 1009 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 1010 hw_dbg(hw, "Global reset polling failed to complete.\n"); 1011 return -EIO; 1012 } 1013 1014 /* Now Wait for the FW to be ready */ 1015 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) { 1016 reg = rd32(hw, I40E_GLNVM_ULD); 1017 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1018 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK); 1019 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1020 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) { 1021 hw_dbg(hw, "Core and Global modules ready %d\n", cnt1); 1022 break; 1023 } 1024 usleep_range(10000, 20000); 1025 } 1026 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1027 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) { 1028 hw_dbg(hw, "wait for FW Reset complete timedout\n"); 1029 hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg); 1030 return -EIO; 1031 } 1032 1033 /* If there was a Global Reset in progress when we got here, 1034 * we don't need to do the PF Reset 1035 */ 1036 if (!cnt) { 1037 u32 reg2 = 0; 1038 if (hw->revision_id == 0) 1039 cnt = I40E_PF_RESET_WAIT_COUNT_A0; 1040 else 1041 cnt = I40E_PF_RESET_WAIT_COUNT; 1042 reg = rd32(hw, I40E_PFGEN_CTRL); 1043 wr32(hw, I40E_PFGEN_CTRL, 1044 (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); 1045 for (; cnt; cnt--) { 1046 reg = rd32(hw, I40E_PFGEN_CTRL); 1047 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) 1048 break; 1049 reg2 = rd32(hw, I40E_GLGEN_RSTAT); 1050 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) 1051 break; 1052 usleep_range(1000, 2000); 1053 } 1054 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 1055 if (i40e_poll_globr(hw, grst_del)) 1056 return -EIO; 1057 } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { 1058 hw_dbg(hw, "PF reset polling failed to complete.\n"); 1059 return -EIO; 1060 } 1061 } 1062 1063 i40e_clear_pxe_mode(hw); 1064 1065 return 0; 1066 } 1067 1068 /** 1069 * i40e_clear_hw - clear out any left over hw state 1070 * @hw: pointer to the hw struct 1071 * 1072 * Clear queues and interrupts, typically called at init time, 1073 * but after the capabilities have been found so we know how many 1074 * queues and msix vectors have been allocated. 1075 **/ 1076 void i40e_clear_hw(struct i40e_hw *hw) 1077 { 1078 u32 num_queues, base_queue; 1079 u32 num_pf_int; 1080 u32 num_vf_int; 1081 u32 num_vfs; 1082 u32 i, j; 1083 u32 val; 1084 u32 eol = 0x7ff; 1085 1086 /* get number of interrupts, queues, and VFs */ 1087 val = rd32(hw, I40E_GLPCI_CNF2); 1088 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> 1089 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; 1090 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >> 1091 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT; 1092 1093 val = rd32(hw, I40E_PFLAN_QALLOC); 1094 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >> 1095 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; 1096 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> 1097 I40E_PFLAN_QALLOC_LASTQ_SHIFT; 1098 if (val & I40E_PFLAN_QALLOC_VALID_MASK && j >= base_queue) 1099 num_queues = (j - base_queue) + 1; 1100 else 1101 num_queues = 0; 1102 1103 val = rd32(hw, I40E_PF_VT_PFALLOC); 1104 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >> 1105 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; 1106 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >> 1107 I40E_PF_VT_PFALLOC_LASTVF_SHIFT; 1108 if (val & I40E_PF_VT_PFALLOC_VALID_MASK && j >= i) 1109 num_vfs = (j - i) + 1; 1110 else 1111 num_vfs = 0; 1112 1113 /* stop all the interrupts */ 1114 wr32(hw, I40E_PFINT_ICR0_ENA, 0); 1115 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 1116 for (i = 0; i < num_pf_int - 2; i++) 1117 wr32(hw, I40E_PFINT_DYN_CTLN(i), val); 1118 1119 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */ 1120 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 1121 wr32(hw, I40E_PFINT_LNKLST0, val); 1122 for (i = 0; i < num_pf_int - 2; i++) 1123 wr32(hw, I40E_PFINT_LNKLSTN(i), val); 1124 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT; 1125 for (i = 0; i < num_vfs; i++) 1126 wr32(hw, I40E_VPINT_LNKLST0(i), val); 1127 for (i = 0; i < num_vf_int - 2; i++) 1128 wr32(hw, I40E_VPINT_LNKLSTN(i), val); 1129 1130 /* warn the HW of the coming Tx disables */ 1131 for (i = 0; i < num_queues; i++) { 1132 u32 abs_queue_idx = base_queue + i; 1133 u32 reg_block = 0; 1134 1135 if (abs_queue_idx >= 128) { 1136 reg_block = abs_queue_idx / 128; 1137 abs_queue_idx %= 128; 1138 } 1139 1140 val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); 1141 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 1142 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 1143 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 1144 1145 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val); 1146 } 1147 udelay(400); 1148 1149 /* stop all the queues */ 1150 for (i = 0; i < num_queues; i++) { 1151 wr32(hw, I40E_QINT_TQCTL(i), 0); 1152 wr32(hw, I40E_QTX_ENA(i), 0); 1153 wr32(hw, I40E_QINT_RQCTL(i), 0); 1154 wr32(hw, I40E_QRX_ENA(i), 0); 1155 } 1156 1157 /* short wait for all queue disables to settle */ 1158 udelay(50); 1159 } 1160 1161 /** 1162 * i40e_clear_pxe_mode - clear pxe operations mode 1163 * @hw: pointer to the hw struct 1164 * 1165 * Make sure all PXE mode settings are cleared, including things 1166 * like descriptor fetch/write-back mode. 1167 **/ 1168 void i40e_clear_pxe_mode(struct i40e_hw *hw) 1169 { 1170 u32 reg; 1171 1172 if (i40e_check_asq_alive(hw)) 1173 i40e_aq_clear_pxe_mode(hw, NULL); 1174 1175 /* Clear single descriptor fetch/write-back mode */ 1176 reg = rd32(hw, I40E_GLLAN_RCTL_0); 1177 1178 if (hw->revision_id == 0) { 1179 /* As a work around clear PXE_MODE instead of setting it */ 1180 wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK))); 1181 } else { 1182 wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK)); 1183 } 1184 } 1185 1186 /** 1187 * i40e_led_is_mine - helper to find matching led 1188 * @hw: pointer to the hw struct 1189 * @idx: index into GPIO registers 1190 * 1191 * returns: 0 if no match, otherwise the value of the GPIO_CTL register 1192 */ 1193 static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) 1194 { 1195 u32 gpio_val = 0; 1196 u32 port; 1197 1198 if (!I40E_IS_X710TL_DEVICE(hw->device_id) && 1199 !hw->func_caps.led[idx]) 1200 return 0; 1201 gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx)); 1202 port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >> 1203 I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT; 1204 1205 /* if PRT_NUM_NA is 1 then this LED is not port specific, OR 1206 * if it is not our port then ignore 1207 */ 1208 if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) || 1209 (port != hw->port)) 1210 return 0; 1211 1212 return gpio_val; 1213 } 1214 1215 #define I40E_FW_LED BIT(4) 1216 #define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \ 1217 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) 1218 1219 #define I40E_LED0 22 1220 1221 #define I40E_PIN_FUNC_SDP 0x0 1222 #define I40E_PIN_FUNC_LED 0x1 1223 1224 /** 1225 * i40e_led_get - return current on/off mode 1226 * @hw: pointer to the hw struct 1227 * 1228 * The value returned is the 'mode' field as defined in the 1229 * GPIO register definitions: 0x0 = off, 0xf = on, and other 1230 * values are variations of possible behaviors relating to 1231 * blink, link, and wire. 1232 **/ 1233 u32 i40e_led_get(struct i40e_hw *hw) 1234 { 1235 u32 mode = 0; 1236 int i; 1237 1238 /* as per the documentation GPIO 22-29 are the LED 1239 * GPIO pins named LED0..LED7 1240 */ 1241 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { 1242 u32 gpio_val = i40e_led_is_mine(hw, i); 1243 1244 if (!gpio_val) 1245 continue; 1246 1247 mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> 1248 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT; 1249 break; 1250 } 1251 1252 return mode; 1253 } 1254 1255 /** 1256 * i40e_led_set - set new on/off mode 1257 * @hw: pointer to the hw struct 1258 * @mode: 0=off, 0xf=on (else see manual for mode details) 1259 * @blink: true if the LED should blink when on, false if steady 1260 * 1261 * if this function is used to turn on the blink it should 1262 * be used to disable the blink when restoring the original state. 1263 **/ 1264 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) 1265 { 1266 int i; 1267 1268 if (mode & ~I40E_LED_MODE_VALID) { 1269 hw_dbg(hw, "invalid mode passed in %X\n", mode); 1270 return; 1271 } 1272 1273 /* as per the documentation GPIO 22-29 are the LED 1274 * GPIO pins named LED0..LED7 1275 */ 1276 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { 1277 u32 gpio_val = i40e_led_is_mine(hw, i); 1278 1279 if (!gpio_val) 1280 continue; 1281 1282 if (I40E_IS_X710TL_DEVICE(hw->device_id)) { 1283 u32 pin_func = 0; 1284 1285 if (mode & I40E_FW_LED) 1286 pin_func = I40E_PIN_FUNC_SDP; 1287 else 1288 pin_func = I40E_PIN_FUNC_LED; 1289 1290 gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK; 1291 gpio_val |= ((pin_func << 1292 I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) & 1293 I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK); 1294 } 1295 gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK; 1296 /* this & is a bit of paranoia, but serves as a range check */ 1297 gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & 1298 I40E_GLGEN_GPIO_CTL_LED_MODE_MASK); 1299 1300 if (blink) 1301 gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); 1302 else 1303 gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); 1304 1305 wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val); 1306 break; 1307 } 1308 } 1309 1310 /* Admin command wrappers */ 1311 1312 /** 1313 * i40e_aq_get_phy_capabilities 1314 * @hw: pointer to the hw struct 1315 * @abilities: structure for PHY capabilities to be filled 1316 * @qualified_modules: report Qualified Modules 1317 * @report_init: report init capabilities (active are default) 1318 * @cmd_details: pointer to command details structure or NULL 1319 * 1320 * Returns the various PHY abilities supported on the Port. 1321 **/ 1322 int 1323 i40e_aq_get_phy_capabilities(struct i40e_hw *hw, 1324 bool qualified_modules, bool report_init, 1325 struct i40e_aq_get_phy_abilities_resp *abilities, 1326 struct i40e_asq_cmd_details *cmd_details) 1327 { 1328 u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp); 1329 u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0; 1330 struct i40e_aq_desc desc; 1331 int status; 1332 1333 if (!abilities) 1334 return -EINVAL; 1335 1336 do { 1337 i40e_fill_default_direct_cmd_desc(&desc, 1338 i40e_aqc_opc_get_phy_abilities); 1339 1340 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 1341 if (abilities_size > I40E_AQ_LARGE_BUF) 1342 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 1343 1344 if (qualified_modules) 1345 desc.params.external.param0 |= 1346 cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES); 1347 1348 if (report_init) 1349 desc.params.external.param0 |= 1350 cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES); 1351 1352 status = i40e_asq_send_command(hw, &desc, abilities, 1353 abilities_size, cmd_details); 1354 1355 switch (hw->aq.asq_last_status) { 1356 case I40E_AQ_RC_EIO: 1357 status = -EIO; 1358 break; 1359 case I40E_AQ_RC_EAGAIN: 1360 usleep_range(1000, 2000); 1361 total_delay++; 1362 status = -EIO; 1363 break; 1364 /* also covers I40E_AQ_RC_OK */ 1365 default: 1366 break; 1367 } 1368 1369 } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) && 1370 (total_delay < max_delay)); 1371 1372 if (status) 1373 return status; 1374 1375 if (report_init) { 1376 if (hw->mac.type == I40E_MAC_XL710 && 1377 i40e_is_aq_api_ver_ge(hw, I40E_FW_API_VERSION_MAJOR, 1378 I40E_MINOR_VER_GET_LINK_INFO_XL710)) { 1379 status = i40e_aq_get_link_info(hw, true, NULL, NULL); 1380 } else { 1381 hw->phy.phy_types = le32_to_cpu(abilities->phy_type); 1382 hw->phy.phy_types |= 1383 ((u64)abilities->phy_type_ext << 32); 1384 } 1385 } 1386 1387 return status; 1388 } 1389 1390 /** 1391 * i40e_aq_set_phy_config 1392 * @hw: pointer to the hw struct 1393 * @config: structure with PHY configuration to be set 1394 * @cmd_details: pointer to command details structure or NULL 1395 * 1396 * Set the various PHY configuration parameters 1397 * supported on the Port.One or more of the Set PHY config parameters may be 1398 * ignored in an MFP mode as the PF may not have the privilege to set some 1399 * of the PHY Config parameters. This status will be indicated by the 1400 * command response. 1401 **/ 1402 int i40e_aq_set_phy_config(struct i40e_hw *hw, 1403 struct i40e_aq_set_phy_config *config, 1404 struct i40e_asq_cmd_details *cmd_details) 1405 { 1406 struct i40e_aq_desc desc; 1407 struct i40e_aq_set_phy_config *cmd = 1408 (struct i40e_aq_set_phy_config *)&desc.params.raw; 1409 int status; 1410 1411 if (!config) 1412 return -EINVAL; 1413 1414 i40e_fill_default_direct_cmd_desc(&desc, 1415 i40e_aqc_opc_set_phy_config); 1416 1417 *cmd = *config; 1418 1419 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1420 1421 return status; 1422 } 1423 1424 static noinline_for_stack int 1425 i40e_set_fc_status(struct i40e_hw *hw, 1426 struct i40e_aq_get_phy_abilities_resp *abilities, 1427 bool atomic_restart) 1428 { 1429 struct i40e_aq_set_phy_config config; 1430 enum i40e_fc_mode fc_mode = hw->fc.requested_mode; 1431 u8 pause_mask = 0x0; 1432 1433 switch (fc_mode) { 1434 case I40E_FC_FULL: 1435 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; 1436 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; 1437 break; 1438 case I40E_FC_RX_PAUSE: 1439 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; 1440 break; 1441 case I40E_FC_TX_PAUSE: 1442 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; 1443 break; 1444 default: 1445 break; 1446 } 1447 1448 memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); 1449 /* clear the old pause settings */ 1450 config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & 1451 ~(I40E_AQ_PHY_FLAG_PAUSE_RX); 1452 /* set the new abilities */ 1453 config.abilities |= pause_mask; 1454 /* If the abilities have changed, then set the new config */ 1455 if (config.abilities == abilities->abilities) 1456 return 0; 1457 1458 /* Auto restart link so settings take effect */ 1459 if (atomic_restart) 1460 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 1461 /* Copy over all the old settings */ 1462 config.phy_type = abilities->phy_type; 1463 config.phy_type_ext = abilities->phy_type_ext; 1464 config.link_speed = abilities->link_speed; 1465 config.eee_capability = abilities->eee_capability; 1466 config.eeer = abilities->eeer_val; 1467 config.low_power_ctrl = abilities->d3_lpan; 1468 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & 1469 I40E_AQ_PHY_FEC_CONFIG_MASK; 1470 1471 return i40e_aq_set_phy_config(hw, &config, NULL); 1472 } 1473 1474 /** 1475 * i40e_set_fc 1476 * @hw: pointer to the hw struct 1477 * @aq_failures: buffer to return AdminQ failure information 1478 * @atomic_restart: whether to enable atomic link restart 1479 * 1480 * Set the requested flow control mode using set_phy_config. 1481 **/ 1482 int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, 1483 bool atomic_restart) 1484 { 1485 struct i40e_aq_get_phy_abilities_resp abilities; 1486 int status; 1487 1488 *aq_failures = 0x0; 1489 1490 /* Get the current phy config */ 1491 status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, 1492 NULL); 1493 if (status) { 1494 *aq_failures |= I40E_SET_FC_AQ_FAIL_GET; 1495 return status; 1496 } 1497 1498 status = i40e_set_fc_status(hw, &abilities, atomic_restart); 1499 if (status) 1500 *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; 1501 1502 /* Update the link info */ 1503 status = i40e_update_link_info(hw); 1504 if (status) { 1505 /* Wait a little bit (on 40G cards it sometimes takes a really 1506 * long time for link to come back from the atomic reset) 1507 * and try once more 1508 */ 1509 msleep(1000); 1510 status = i40e_update_link_info(hw); 1511 } 1512 if (status) 1513 *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE; 1514 1515 return status; 1516 } 1517 1518 /** 1519 * i40e_aq_clear_pxe_mode 1520 * @hw: pointer to the hw struct 1521 * @cmd_details: pointer to command details structure or NULL 1522 * 1523 * Tell the firmware that the driver is taking over from PXE 1524 **/ 1525 int i40e_aq_clear_pxe_mode(struct i40e_hw *hw, 1526 struct i40e_asq_cmd_details *cmd_details) 1527 { 1528 struct i40e_aq_desc desc; 1529 struct i40e_aqc_clear_pxe *cmd = 1530 (struct i40e_aqc_clear_pxe *)&desc.params.raw; 1531 int status; 1532 1533 i40e_fill_default_direct_cmd_desc(&desc, 1534 i40e_aqc_opc_clear_pxe_mode); 1535 1536 cmd->rx_cnt = 0x2; 1537 1538 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1539 1540 wr32(hw, I40E_GLLAN_RCTL_0, 0x1); 1541 1542 return status; 1543 } 1544 1545 /** 1546 * i40e_aq_set_link_restart_an 1547 * @hw: pointer to the hw struct 1548 * @enable_link: if true: enable link, if false: disable link 1549 * @cmd_details: pointer to command details structure or NULL 1550 * 1551 * Sets up the link and restarts the Auto-Negotiation over the link. 1552 **/ 1553 int i40e_aq_set_link_restart_an(struct i40e_hw *hw, 1554 bool enable_link, 1555 struct i40e_asq_cmd_details *cmd_details) 1556 { 1557 struct i40e_aq_desc desc; 1558 struct i40e_aqc_set_link_restart_an *cmd = 1559 (struct i40e_aqc_set_link_restart_an *)&desc.params.raw; 1560 int status; 1561 1562 i40e_fill_default_direct_cmd_desc(&desc, 1563 i40e_aqc_opc_set_link_restart_an); 1564 1565 cmd->command = I40E_AQ_PHY_RESTART_AN; 1566 if (enable_link) 1567 cmd->command |= I40E_AQ_PHY_LINK_ENABLE; 1568 else 1569 cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE; 1570 1571 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1572 1573 return status; 1574 } 1575 1576 /** 1577 * i40e_aq_get_link_info 1578 * @hw: pointer to the hw struct 1579 * @enable_lse: enable/disable LinkStatusEvent reporting 1580 * @link: pointer to link status structure - optional 1581 * @cmd_details: pointer to command details structure or NULL 1582 * 1583 * Returns the link status of the adapter. 1584 **/ 1585 int i40e_aq_get_link_info(struct i40e_hw *hw, 1586 bool enable_lse, struct i40e_link_status *link, 1587 struct i40e_asq_cmd_details *cmd_details) 1588 { 1589 struct i40e_aq_desc desc; 1590 struct i40e_aqc_get_link_status *resp = 1591 (struct i40e_aqc_get_link_status *)&desc.params.raw; 1592 struct i40e_link_status *hw_link_info = &hw->phy.link_info; 1593 bool tx_pause, rx_pause; 1594 u16 command_flags; 1595 int status; 1596 1597 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); 1598 1599 if (enable_lse) 1600 command_flags = I40E_AQ_LSE_ENABLE; 1601 else 1602 command_flags = I40E_AQ_LSE_DISABLE; 1603 resp->command_flags = cpu_to_le16(command_flags); 1604 1605 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1606 1607 if (status) 1608 goto aq_get_link_info_exit; 1609 1610 /* save off old link status information */ 1611 hw->phy.link_info_old = *hw_link_info; 1612 1613 /* update link status */ 1614 hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type; 1615 hw->phy.media_type = i40e_get_media_type(hw); 1616 hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed; 1617 hw_link_info->link_info = resp->link_info; 1618 hw_link_info->an_info = resp->an_info; 1619 hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA | 1620 I40E_AQ_CONFIG_FEC_RS_ENA); 1621 hw_link_info->ext_info = resp->ext_info; 1622 hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK; 1623 hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size); 1624 hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK; 1625 1626 /* update fc info */ 1627 tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX); 1628 rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX); 1629 if (tx_pause & rx_pause) 1630 hw->fc.current_mode = I40E_FC_FULL; 1631 else if (tx_pause) 1632 hw->fc.current_mode = I40E_FC_TX_PAUSE; 1633 else if (rx_pause) 1634 hw->fc.current_mode = I40E_FC_RX_PAUSE; 1635 else 1636 hw->fc.current_mode = I40E_FC_NONE; 1637 1638 if (resp->config & I40E_AQ_CONFIG_CRC_ENA) 1639 hw_link_info->crc_enable = true; 1640 else 1641 hw_link_info->crc_enable = false; 1642 1643 if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED)) 1644 hw_link_info->lse_enable = true; 1645 else 1646 hw_link_info->lse_enable = false; 1647 1648 if (hw->mac.type == I40E_MAC_XL710 && i40e_is_fw_ver_lt(hw, 4, 40) && 1649 hw_link_info->phy_type == 0xE) 1650 hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; 1651 1652 if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps) && 1653 hw->mac.type != I40E_MAC_X722) { 1654 __le32 tmp; 1655 1656 memcpy(&tmp, resp->link_type, sizeof(tmp)); 1657 hw->phy.phy_types = le32_to_cpu(tmp); 1658 hw->phy.phy_types |= ((u64)resp->link_type_ext << 32); 1659 } 1660 1661 /* save link status information */ 1662 if (link) 1663 *link = *hw_link_info; 1664 1665 /* flag cleared so helper functions don't call AQ again */ 1666 hw->phy.get_link_info = false; 1667 1668 aq_get_link_info_exit: 1669 return status; 1670 } 1671 1672 /** 1673 * i40e_aq_set_phy_int_mask 1674 * @hw: pointer to the hw struct 1675 * @mask: interrupt mask to be set 1676 * @cmd_details: pointer to command details structure or NULL 1677 * 1678 * Set link interrupt mask. 1679 **/ 1680 int i40e_aq_set_phy_int_mask(struct i40e_hw *hw, 1681 u16 mask, 1682 struct i40e_asq_cmd_details *cmd_details) 1683 { 1684 struct i40e_aq_desc desc; 1685 struct i40e_aqc_set_phy_int_mask *cmd = 1686 (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw; 1687 int status; 1688 1689 i40e_fill_default_direct_cmd_desc(&desc, 1690 i40e_aqc_opc_set_phy_int_mask); 1691 1692 cmd->event_mask = cpu_to_le16(mask); 1693 1694 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1695 1696 return status; 1697 } 1698 1699 /** 1700 * i40e_aq_set_mac_loopback 1701 * @hw: pointer to the HW struct 1702 * @ena_lpbk: Enable or Disable loopback 1703 * @cmd_details: pointer to command details structure or NULL 1704 * 1705 * Enable/disable loopback on a given port 1706 */ 1707 int i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk, 1708 struct i40e_asq_cmd_details *cmd_details) 1709 { 1710 struct i40e_aq_desc desc; 1711 struct i40e_aqc_set_lb_mode *cmd = 1712 (struct i40e_aqc_set_lb_mode *)&desc.params.raw; 1713 1714 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_lb_modes); 1715 if (ena_lpbk) { 1716 if (hw->nvm.version <= I40E_LEGACY_LOOPBACK_NVM_VER) 1717 cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL_LEGACY); 1718 else 1719 cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL); 1720 } 1721 1722 return i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1723 } 1724 1725 /** 1726 * i40e_aq_set_phy_debug 1727 * @hw: pointer to the hw struct 1728 * @cmd_flags: debug command flags 1729 * @cmd_details: pointer to command details structure or NULL 1730 * 1731 * Reset the external PHY. 1732 **/ 1733 int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, 1734 struct i40e_asq_cmd_details *cmd_details) 1735 { 1736 struct i40e_aq_desc desc; 1737 struct i40e_aqc_set_phy_debug *cmd = 1738 (struct i40e_aqc_set_phy_debug *)&desc.params.raw; 1739 int status; 1740 1741 i40e_fill_default_direct_cmd_desc(&desc, 1742 i40e_aqc_opc_set_phy_debug); 1743 1744 cmd->command_flags = cmd_flags; 1745 1746 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1747 1748 return status; 1749 } 1750 1751 /** 1752 * i40e_aq_add_vsi 1753 * @hw: pointer to the hw struct 1754 * @vsi_ctx: pointer to a vsi context struct 1755 * @cmd_details: pointer to command details structure or NULL 1756 * 1757 * Add a VSI context to the hardware. 1758 **/ 1759 int i40e_aq_add_vsi(struct i40e_hw *hw, 1760 struct i40e_vsi_context *vsi_ctx, 1761 struct i40e_asq_cmd_details *cmd_details) 1762 { 1763 struct i40e_aq_desc desc; 1764 struct i40e_aqc_add_get_update_vsi *cmd = 1765 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 1766 struct i40e_aqc_add_get_update_vsi_completion *resp = 1767 (struct i40e_aqc_add_get_update_vsi_completion *) 1768 &desc.params.raw; 1769 int status; 1770 1771 i40e_fill_default_direct_cmd_desc(&desc, 1772 i40e_aqc_opc_add_vsi); 1773 1774 cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid); 1775 cmd->connection_type = vsi_ctx->connection_type; 1776 cmd->vf_id = vsi_ctx->vf_num; 1777 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); 1778 1779 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 1780 1781 status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info, 1782 sizeof(vsi_ctx->info), 1783 cmd_details, true); 1784 1785 if (status) 1786 goto aq_add_vsi_exit; 1787 1788 vsi_ctx->seid = le16_to_cpu(resp->seid); 1789 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); 1790 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 1791 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 1792 1793 aq_add_vsi_exit: 1794 return status; 1795 } 1796 1797 /** 1798 * i40e_aq_set_default_vsi 1799 * @hw: pointer to the hw struct 1800 * @seid: vsi number 1801 * @cmd_details: pointer to command details structure or NULL 1802 **/ 1803 int i40e_aq_set_default_vsi(struct i40e_hw *hw, 1804 u16 seid, 1805 struct i40e_asq_cmd_details *cmd_details) 1806 { 1807 struct i40e_aq_desc desc; 1808 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1809 (struct i40e_aqc_set_vsi_promiscuous_modes *) 1810 &desc.params.raw; 1811 int status; 1812 1813 i40e_fill_default_direct_cmd_desc(&desc, 1814 i40e_aqc_opc_set_vsi_promiscuous_modes); 1815 1816 cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 1817 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 1818 cmd->seid = cpu_to_le16(seid); 1819 1820 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1821 1822 return status; 1823 } 1824 1825 /** 1826 * i40e_aq_clear_default_vsi 1827 * @hw: pointer to the hw struct 1828 * @seid: vsi number 1829 * @cmd_details: pointer to command details structure or NULL 1830 **/ 1831 int i40e_aq_clear_default_vsi(struct i40e_hw *hw, 1832 u16 seid, 1833 struct i40e_asq_cmd_details *cmd_details) 1834 { 1835 struct i40e_aq_desc desc; 1836 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1837 (struct i40e_aqc_set_vsi_promiscuous_modes *) 1838 &desc.params.raw; 1839 int status; 1840 1841 i40e_fill_default_direct_cmd_desc(&desc, 1842 i40e_aqc_opc_set_vsi_promiscuous_modes); 1843 1844 cmd->promiscuous_flags = cpu_to_le16(0); 1845 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 1846 cmd->seid = cpu_to_le16(seid); 1847 1848 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1849 1850 return status; 1851 } 1852 1853 /** 1854 * i40e_aq_set_vsi_unicast_promiscuous 1855 * @hw: pointer to the hw struct 1856 * @seid: vsi number 1857 * @set: set unicast promiscuous enable/disable 1858 * @cmd_details: pointer to command details structure or NULL 1859 * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc 1860 **/ 1861 int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, 1862 u16 seid, bool set, 1863 struct i40e_asq_cmd_details *cmd_details, 1864 bool rx_only_promisc) 1865 { 1866 struct i40e_aq_desc desc; 1867 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1868 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 1869 u16 flags = 0; 1870 int status; 1871 1872 i40e_fill_default_direct_cmd_desc(&desc, 1873 i40e_aqc_opc_set_vsi_promiscuous_modes); 1874 1875 if (set) { 1876 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; 1877 if (rx_only_promisc && i40e_is_aq_api_ver_ge(hw, 1, 5)) 1878 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; 1879 } 1880 1881 cmd->promiscuous_flags = cpu_to_le16(flags); 1882 1883 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); 1884 if (i40e_is_aq_api_ver_ge(hw, 1, 5)) 1885 cmd->valid_flags |= 1886 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); 1887 1888 cmd->seid = cpu_to_le16(seid); 1889 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1890 1891 return status; 1892 } 1893 1894 /** 1895 * i40e_aq_set_vsi_multicast_promiscuous 1896 * @hw: pointer to the hw struct 1897 * @seid: vsi number 1898 * @set: set multicast promiscuous enable/disable 1899 * @cmd_details: pointer to command details structure or NULL 1900 **/ 1901 int i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, 1902 u16 seid, bool set, 1903 struct i40e_asq_cmd_details *cmd_details) 1904 { 1905 struct i40e_aq_desc desc; 1906 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1907 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 1908 u16 flags = 0; 1909 int status; 1910 1911 i40e_fill_default_direct_cmd_desc(&desc, 1912 i40e_aqc_opc_set_vsi_promiscuous_modes); 1913 1914 if (set) 1915 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; 1916 1917 cmd->promiscuous_flags = cpu_to_le16(flags); 1918 1919 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); 1920 1921 cmd->seid = cpu_to_le16(seid); 1922 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1923 1924 return status; 1925 } 1926 1927 /** 1928 * i40e_aq_set_vsi_mc_promisc_on_vlan 1929 * @hw: pointer to the hw struct 1930 * @seid: vsi number 1931 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 1932 * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag 1933 * @cmd_details: pointer to command details structure or NULL 1934 **/ 1935 int i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, 1936 u16 seid, bool enable, 1937 u16 vid, 1938 struct i40e_asq_cmd_details *cmd_details) 1939 { 1940 struct i40e_aq_desc desc; 1941 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1942 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 1943 u16 flags = 0; 1944 int status; 1945 1946 i40e_fill_default_direct_cmd_desc(&desc, 1947 i40e_aqc_opc_set_vsi_promiscuous_modes); 1948 1949 if (enable) 1950 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; 1951 1952 cmd->promiscuous_flags = cpu_to_le16(flags); 1953 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); 1954 cmd->seid = cpu_to_le16(seid); 1955 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 1956 1957 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0, 1958 cmd_details, true); 1959 1960 return status; 1961 } 1962 1963 /** 1964 * i40e_aq_set_vsi_uc_promisc_on_vlan 1965 * @hw: pointer to the hw struct 1966 * @seid: vsi number 1967 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 1968 * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag 1969 * @cmd_details: pointer to command details structure or NULL 1970 **/ 1971 int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, 1972 u16 seid, bool enable, 1973 u16 vid, 1974 struct i40e_asq_cmd_details *cmd_details) 1975 { 1976 struct i40e_aq_desc desc; 1977 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1978 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 1979 u16 flags = 0; 1980 int status; 1981 1982 i40e_fill_default_direct_cmd_desc(&desc, 1983 i40e_aqc_opc_set_vsi_promiscuous_modes); 1984 1985 if (enable) { 1986 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; 1987 if (i40e_is_aq_api_ver_ge(hw, 1, 5)) 1988 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; 1989 } 1990 1991 cmd->promiscuous_flags = cpu_to_le16(flags); 1992 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); 1993 if (i40e_is_aq_api_ver_ge(hw, 1, 5)) 1994 cmd->valid_flags |= 1995 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); 1996 cmd->seid = cpu_to_le16(seid); 1997 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 1998 1999 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0, 2000 cmd_details, true); 2001 2002 return status; 2003 } 2004 2005 /** 2006 * i40e_aq_set_vsi_bc_promisc_on_vlan 2007 * @hw: pointer to the hw struct 2008 * @seid: vsi number 2009 * @enable: set broadcast promiscuous enable/disable for a given VLAN 2010 * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag 2011 * @cmd_details: pointer to command details structure or NULL 2012 **/ 2013 int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, 2014 u16 seid, bool enable, u16 vid, 2015 struct i40e_asq_cmd_details *cmd_details) 2016 { 2017 struct i40e_aq_desc desc; 2018 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2019 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2020 u16 flags = 0; 2021 int status; 2022 2023 i40e_fill_default_direct_cmd_desc(&desc, 2024 i40e_aqc_opc_set_vsi_promiscuous_modes); 2025 2026 if (enable) 2027 flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST; 2028 2029 cmd->promiscuous_flags = cpu_to_le16(flags); 2030 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2031 cmd->seid = cpu_to_le16(seid); 2032 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2033 2034 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2035 2036 return status; 2037 } 2038 2039 /** 2040 * i40e_aq_set_vsi_broadcast 2041 * @hw: pointer to the hw struct 2042 * @seid: vsi number 2043 * @set_filter: true to set filter, false to clear filter 2044 * @cmd_details: pointer to command details structure or NULL 2045 * 2046 * Set or clear the broadcast promiscuous flag (filter) for a given VSI. 2047 **/ 2048 int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, 2049 u16 seid, bool set_filter, 2050 struct i40e_asq_cmd_details *cmd_details) 2051 { 2052 struct i40e_aq_desc desc; 2053 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2054 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2055 int status; 2056 2057 i40e_fill_default_direct_cmd_desc(&desc, 2058 i40e_aqc_opc_set_vsi_promiscuous_modes); 2059 2060 if (set_filter) 2061 cmd->promiscuous_flags 2062 |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2063 else 2064 cmd->promiscuous_flags 2065 &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2066 2067 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2068 cmd->seid = cpu_to_le16(seid); 2069 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2070 2071 return status; 2072 } 2073 2074 /** 2075 * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting 2076 * @hw: pointer to the hw struct 2077 * @seid: vsi number 2078 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2079 * @cmd_details: pointer to command details structure or NULL 2080 **/ 2081 int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, 2082 u16 seid, bool enable, 2083 struct i40e_asq_cmd_details *cmd_details) 2084 { 2085 struct i40e_aq_desc desc; 2086 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2087 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2088 u16 flags = 0; 2089 int status; 2090 2091 i40e_fill_default_direct_cmd_desc(&desc, 2092 i40e_aqc_opc_set_vsi_promiscuous_modes); 2093 if (enable) 2094 flags |= I40E_AQC_SET_VSI_PROMISC_VLAN; 2095 2096 cmd->promiscuous_flags = cpu_to_le16(flags); 2097 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN); 2098 cmd->seid = cpu_to_le16(seid); 2099 2100 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2101 2102 return status; 2103 } 2104 2105 /** 2106 * i40e_aq_get_vsi_params - get VSI configuration info 2107 * @hw: pointer to the hw struct 2108 * @vsi_ctx: pointer to a vsi context struct 2109 * @cmd_details: pointer to command details structure or NULL 2110 **/ 2111 int i40e_aq_get_vsi_params(struct i40e_hw *hw, 2112 struct i40e_vsi_context *vsi_ctx, 2113 struct i40e_asq_cmd_details *cmd_details) 2114 { 2115 struct i40e_aq_desc desc; 2116 struct i40e_aqc_add_get_update_vsi *cmd = 2117 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 2118 struct i40e_aqc_add_get_update_vsi_completion *resp = 2119 (struct i40e_aqc_add_get_update_vsi_completion *) 2120 &desc.params.raw; 2121 int status; 2122 2123 i40e_fill_default_direct_cmd_desc(&desc, 2124 i40e_aqc_opc_get_vsi_parameters); 2125 2126 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 2127 2128 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 2129 2130 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 2131 sizeof(vsi_ctx->info), NULL); 2132 2133 if (status) 2134 goto aq_get_vsi_params_exit; 2135 2136 vsi_ctx->seid = le16_to_cpu(resp->seid); 2137 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); 2138 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 2139 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 2140 2141 aq_get_vsi_params_exit: 2142 return status; 2143 } 2144 2145 /** 2146 * i40e_aq_update_vsi_params 2147 * @hw: pointer to the hw struct 2148 * @vsi_ctx: pointer to a vsi context struct 2149 * @cmd_details: pointer to command details structure or NULL 2150 * 2151 * Update a VSI context. 2152 **/ 2153 int i40e_aq_update_vsi_params(struct i40e_hw *hw, 2154 struct i40e_vsi_context *vsi_ctx, 2155 struct i40e_asq_cmd_details *cmd_details) 2156 { 2157 struct i40e_aq_desc desc; 2158 struct i40e_aqc_add_get_update_vsi *cmd = 2159 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 2160 struct i40e_aqc_add_get_update_vsi_completion *resp = 2161 (struct i40e_aqc_add_get_update_vsi_completion *) 2162 &desc.params.raw; 2163 int status; 2164 2165 i40e_fill_default_direct_cmd_desc(&desc, 2166 i40e_aqc_opc_update_vsi_parameters); 2167 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 2168 2169 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2170 2171 status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info, 2172 sizeof(vsi_ctx->info), 2173 cmd_details, true); 2174 2175 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 2176 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 2177 2178 return status; 2179 } 2180 2181 /** 2182 * i40e_aq_get_switch_config 2183 * @hw: pointer to the hardware structure 2184 * @buf: pointer to the result buffer 2185 * @buf_size: length of input buffer 2186 * @start_seid: seid to start for the report, 0 == beginning 2187 * @cmd_details: pointer to command details structure or NULL 2188 * 2189 * Fill the buf with switch configuration returned from AdminQ command 2190 **/ 2191 int i40e_aq_get_switch_config(struct i40e_hw *hw, 2192 struct i40e_aqc_get_switch_config_resp *buf, 2193 u16 buf_size, u16 *start_seid, 2194 struct i40e_asq_cmd_details *cmd_details) 2195 { 2196 struct i40e_aq_desc desc; 2197 struct i40e_aqc_switch_seid *scfg = 2198 (struct i40e_aqc_switch_seid *)&desc.params.raw; 2199 int status; 2200 2201 i40e_fill_default_direct_cmd_desc(&desc, 2202 i40e_aqc_opc_get_switch_config); 2203 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 2204 if (buf_size > I40E_AQ_LARGE_BUF) 2205 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2206 scfg->seid = cpu_to_le16(*start_seid); 2207 2208 status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details); 2209 *start_seid = le16_to_cpu(scfg->seid); 2210 2211 return status; 2212 } 2213 2214 /** 2215 * i40e_aq_set_switch_config 2216 * @hw: pointer to the hardware structure 2217 * @flags: bit flag values to set 2218 * @mode: cloud filter mode 2219 * @valid_flags: which bit flags to set 2220 * @mode: cloud filter mode 2221 * @cmd_details: pointer to command details structure or NULL 2222 * 2223 * Set switch configuration bits 2224 **/ 2225 int i40e_aq_set_switch_config(struct i40e_hw *hw, 2226 u16 flags, 2227 u16 valid_flags, u8 mode, 2228 struct i40e_asq_cmd_details *cmd_details) 2229 { 2230 struct i40e_aq_desc desc; 2231 struct i40e_aqc_set_switch_config *scfg = 2232 (struct i40e_aqc_set_switch_config *)&desc.params.raw; 2233 int status; 2234 2235 i40e_fill_default_direct_cmd_desc(&desc, 2236 i40e_aqc_opc_set_switch_config); 2237 scfg->flags = cpu_to_le16(flags); 2238 scfg->valid_flags = cpu_to_le16(valid_flags); 2239 scfg->mode = mode; 2240 if (test_bit(I40E_HW_CAP_802_1AD, hw->caps)) { 2241 scfg->switch_tag = cpu_to_le16(hw->switch_tag); 2242 scfg->first_tag = cpu_to_le16(hw->first_tag); 2243 scfg->second_tag = cpu_to_le16(hw->second_tag); 2244 } 2245 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2246 2247 return status; 2248 } 2249 2250 /** 2251 * i40e_aq_get_firmware_version 2252 * @hw: pointer to the hw struct 2253 * @fw_major_version: firmware major version 2254 * @fw_minor_version: firmware minor version 2255 * @fw_build: firmware build number 2256 * @api_major_version: major queue version 2257 * @api_minor_version: minor queue version 2258 * @cmd_details: pointer to command details structure or NULL 2259 * 2260 * Get the firmware version from the admin queue commands 2261 **/ 2262 int i40e_aq_get_firmware_version(struct i40e_hw *hw, 2263 u16 *fw_major_version, u16 *fw_minor_version, 2264 u32 *fw_build, 2265 u16 *api_major_version, u16 *api_minor_version, 2266 struct i40e_asq_cmd_details *cmd_details) 2267 { 2268 struct i40e_aq_desc desc; 2269 struct i40e_aqc_get_version *resp = 2270 (struct i40e_aqc_get_version *)&desc.params.raw; 2271 int status; 2272 2273 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version); 2274 2275 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2276 2277 if (!status) { 2278 if (fw_major_version) 2279 *fw_major_version = le16_to_cpu(resp->fw_major); 2280 if (fw_minor_version) 2281 *fw_minor_version = le16_to_cpu(resp->fw_minor); 2282 if (fw_build) 2283 *fw_build = le32_to_cpu(resp->fw_build); 2284 if (api_major_version) 2285 *api_major_version = le16_to_cpu(resp->api_major); 2286 if (api_minor_version) 2287 *api_minor_version = le16_to_cpu(resp->api_minor); 2288 } 2289 2290 return status; 2291 } 2292 2293 /** 2294 * i40e_aq_send_driver_version 2295 * @hw: pointer to the hw struct 2296 * @dv: driver's major, minor version 2297 * @cmd_details: pointer to command details structure or NULL 2298 * 2299 * Send the driver version to the firmware 2300 **/ 2301 int i40e_aq_send_driver_version(struct i40e_hw *hw, 2302 struct i40e_driver_version *dv, 2303 struct i40e_asq_cmd_details *cmd_details) 2304 { 2305 struct i40e_aq_desc desc; 2306 struct i40e_aqc_driver_version *cmd = 2307 (struct i40e_aqc_driver_version *)&desc.params.raw; 2308 int status; 2309 u16 len; 2310 2311 if (dv == NULL) 2312 return -EINVAL; 2313 2314 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version); 2315 2316 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); 2317 cmd->driver_major_ver = dv->major_version; 2318 cmd->driver_minor_ver = dv->minor_version; 2319 cmd->driver_build_ver = dv->build_version; 2320 cmd->driver_subbuild_ver = dv->subbuild_version; 2321 2322 len = 0; 2323 while (len < sizeof(dv->driver_string) && 2324 (dv->driver_string[len] < 0x80) && 2325 dv->driver_string[len]) 2326 len++; 2327 status = i40e_asq_send_command(hw, &desc, dv->driver_string, 2328 len, cmd_details); 2329 2330 return status; 2331 } 2332 2333 /** 2334 * i40e_get_link_status - get status of the HW network link 2335 * @hw: pointer to the hw struct 2336 * @link_up: pointer to bool (true/false = linkup/linkdown) 2337 * 2338 * Variable link_up true if link is up, false if link is down. 2339 * The variable link_up is invalid if returned value of status != 0 2340 * 2341 * Side effect: LinkStatusEvent reporting becomes enabled 2342 **/ 2343 int i40e_get_link_status(struct i40e_hw *hw, bool *link_up) 2344 { 2345 int status = 0; 2346 2347 if (hw->phy.get_link_info) { 2348 status = i40e_update_link_info(hw); 2349 2350 if (status) 2351 i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n", 2352 status); 2353 } 2354 2355 *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; 2356 2357 return status; 2358 } 2359 2360 /** 2361 * i40e_update_link_info - update status of the HW network link 2362 * @hw: pointer to the hw struct 2363 **/ 2364 noinline_for_stack int i40e_update_link_info(struct i40e_hw *hw) 2365 { 2366 struct i40e_aq_get_phy_abilities_resp abilities; 2367 int status = 0; 2368 2369 status = i40e_aq_get_link_info(hw, true, NULL, NULL); 2370 if (status) 2371 return status; 2372 2373 /* extra checking needed to ensure link info to user is timely */ 2374 if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) && 2375 ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) || 2376 !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) { 2377 status = i40e_aq_get_phy_capabilities(hw, false, false, 2378 &abilities, NULL); 2379 if (status) 2380 return status; 2381 2382 if (abilities.fec_cfg_curr_mod_ext_info & 2383 I40E_AQ_ENABLE_FEC_AUTO) 2384 hw->phy.link_info.req_fec_info = 2385 (I40E_AQ_REQUEST_FEC_KR | 2386 I40E_AQ_REQUEST_FEC_RS); 2387 else 2388 hw->phy.link_info.req_fec_info = 2389 abilities.fec_cfg_curr_mod_ext_info & 2390 (I40E_AQ_REQUEST_FEC_KR | 2391 I40E_AQ_REQUEST_FEC_RS); 2392 2393 memcpy(hw->phy.link_info.module_type, &abilities.module_type, 2394 sizeof(hw->phy.link_info.module_type)); 2395 } 2396 2397 return status; 2398 } 2399 2400 /** 2401 * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC 2402 * @hw: pointer to the hw struct 2403 * @uplink_seid: the MAC or other gizmo SEID 2404 * @downlink_seid: the VSI SEID 2405 * @enabled_tc: bitmap of TCs to be enabled 2406 * @default_port: true for default port VSI, false for control port 2407 * @veb_seid: pointer to where to put the resulting VEB SEID 2408 * @enable_stats: true to turn on VEB stats 2409 * @cmd_details: pointer to command details structure or NULL 2410 * 2411 * This asks the FW to add a VEB between the uplink and downlink 2412 * elements. If the uplink SEID is 0, this will be a floating VEB. 2413 **/ 2414 int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, 2415 u16 downlink_seid, u8 enabled_tc, 2416 bool default_port, u16 *veb_seid, 2417 bool enable_stats, 2418 struct i40e_asq_cmd_details *cmd_details) 2419 { 2420 struct i40e_aq_desc desc; 2421 struct i40e_aqc_add_veb *cmd = 2422 (struct i40e_aqc_add_veb *)&desc.params.raw; 2423 struct i40e_aqc_add_veb_completion *resp = 2424 (struct i40e_aqc_add_veb_completion *)&desc.params.raw; 2425 u16 veb_flags = 0; 2426 int status; 2427 2428 /* SEIDs need to either both be set or both be 0 for floating VEB */ 2429 if (!!uplink_seid != !!downlink_seid) 2430 return -EINVAL; 2431 2432 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb); 2433 2434 cmd->uplink_seid = cpu_to_le16(uplink_seid); 2435 cmd->downlink_seid = cpu_to_le16(downlink_seid); 2436 cmd->enable_tcs = enabled_tc; 2437 if (!uplink_seid) 2438 veb_flags |= I40E_AQC_ADD_VEB_FLOATING; 2439 if (default_port) 2440 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT; 2441 else 2442 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA; 2443 2444 /* reverse logic here: set the bitflag to disable the stats */ 2445 if (!enable_stats) 2446 veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS; 2447 2448 cmd->veb_flags = cpu_to_le16(veb_flags); 2449 2450 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2451 2452 if (!status && veb_seid) 2453 *veb_seid = le16_to_cpu(resp->veb_seid); 2454 2455 return status; 2456 } 2457 2458 /** 2459 * i40e_aq_get_veb_parameters - Retrieve VEB parameters 2460 * @hw: pointer to the hw struct 2461 * @veb_seid: the SEID of the VEB to query 2462 * @switch_id: the uplink switch id 2463 * @floating: set to true if the VEB is floating 2464 * @statistic_index: index of the stats counter block for this VEB 2465 * @vebs_used: number of VEB's used by function 2466 * @vebs_free: total VEB's not reserved by any function 2467 * @cmd_details: pointer to command details structure or NULL 2468 * 2469 * This retrieves the parameters for a particular VEB, specified by 2470 * uplink_seid, and returns them to the caller. 2471 **/ 2472 int i40e_aq_get_veb_parameters(struct i40e_hw *hw, 2473 u16 veb_seid, u16 *switch_id, 2474 bool *floating, u16 *statistic_index, 2475 u16 *vebs_used, u16 *vebs_free, 2476 struct i40e_asq_cmd_details *cmd_details) 2477 { 2478 struct i40e_aq_desc desc; 2479 struct i40e_aqc_get_veb_parameters_completion *cmd_resp = 2480 (struct i40e_aqc_get_veb_parameters_completion *) 2481 &desc.params.raw; 2482 int status; 2483 2484 if (veb_seid == 0) 2485 return -EINVAL; 2486 2487 i40e_fill_default_direct_cmd_desc(&desc, 2488 i40e_aqc_opc_get_veb_parameters); 2489 cmd_resp->seid = cpu_to_le16(veb_seid); 2490 2491 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2492 if (status) 2493 goto get_veb_exit; 2494 2495 if (switch_id) 2496 *switch_id = le16_to_cpu(cmd_resp->switch_id); 2497 if (statistic_index) 2498 *statistic_index = le16_to_cpu(cmd_resp->statistic_index); 2499 if (vebs_used) 2500 *vebs_used = le16_to_cpu(cmd_resp->vebs_used); 2501 if (vebs_free) 2502 *vebs_free = le16_to_cpu(cmd_resp->vebs_free); 2503 if (floating) { 2504 u16 flags = le16_to_cpu(cmd_resp->veb_flags); 2505 2506 if (flags & I40E_AQC_ADD_VEB_FLOATING) 2507 *floating = true; 2508 else 2509 *floating = false; 2510 } 2511 2512 get_veb_exit: 2513 return status; 2514 } 2515 2516 /** 2517 * i40e_prepare_add_macvlan 2518 * @mv_list: list of macvlans to be added 2519 * @desc: pointer to AQ descriptor structure 2520 * @count: length of the list 2521 * @seid: VSI for the mac address 2522 * 2523 * Internal helper function that prepares the add macvlan request 2524 * and returns the buffer size. 2525 **/ 2526 static u16 2527 i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list, 2528 struct i40e_aq_desc *desc, u16 count, u16 seid) 2529 { 2530 struct i40e_aqc_macvlan *cmd = 2531 (struct i40e_aqc_macvlan *)&desc->params.raw; 2532 u16 buf_size; 2533 int i; 2534 2535 buf_size = count * sizeof(*mv_list); 2536 2537 /* prep the rest of the request */ 2538 i40e_fill_default_direct_cmd_desc(desc, i40e_aqc_opc_add_macvlan); 2539 cmd->num_addresses = cpu_to_le16(count); 2540 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2541 cmd->seid[1] = 0; 2542 cmd->seid[2] = 0; 2543 2544 for (i = 0; i < count; i++) 2545 if (is_multicast_ether_addr(mv_list[i].mac_addr)) 2546 mv_list[i].flags |= 2547 cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC); 2548 2549 desc->flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2550 if (buf_size > I40E_AQ_LARGE_BUF) 2551 desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2552 2553 return buf_size; 2554 } 2555 2556 /** 2557 * i40e_aq_add_macvlan 2558 * @hw: pointer to the hw struct 2559 * @seid: VSI for the mac address 2560 * @mv_list: list of macvlans to be added 2561 * @count: length of the list 2562 * @cmd_details: pointer to command details structure or NULL 2563 * 2564 * Add MAC/VLAN addresses to the HW filtering 2565 **/ 2566 int 2567 i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, 2568 struct i40e_aqc_add_macvlan_element_data *mv_list, 2569 u16 count, struct i40e_asq_cmd_details *cmd_details) 2570 { 2571 struct i40e_aq_desc desc; 2572 u16 buf_size; 2573 2574 if (count == 0 || !mv_list || !hw) 2575 return -EINVAL; 2576 2577 buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid); 2578 2579 return i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size, 2580 cmd_details, true); 2581 } 2582 2583 /** 2584 * i40e_aq_add_macvlan_v2 2585 * @hw: pointer to the hw struct 2586 * @seid: VSI for the mac address 2587 * @mv_list: list of macvlans to be added 2588 * @count: length of the list 2589 * @cmd_details: pointer to command details structure or NULL 2590 * @aq_status: pointer to Admin Queue status return value 2591 * 2592 * Add MAC/VLAN addresses to the HW filtering. 2593 * The _v2 version returns the last Admin Queue status in aq_status 2594 * to avoid race conditions in access to hw->aq.asq_last_status. 2595 * It also calls _v2 versions of asq_send_command functions to 2596 * get the aq_status on the stack. 2597 **/ 2598 int 2599 i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid, 2600 struct i40e_aqc_add_macvlan_element_data *mv_list, 2601 u16 count, struct i40e_asq_cmd_details *cmd_details, 2602 enum i40e_admin_queue_err *aq_status) 2603 { 2604 struct i40e_aq_desc desc; 2605 u16 buf_size; 2606 2607 if (count == 0 || !mv_list || !hw) 2608 return -EINVAL; 2609 2610 buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid); 2611 2612 return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size, 2613 cmd_details, true, aq_status); 2614 } 2615 2616 /** 2617 * i40e_aq_remove_macvlan 2618 * @hw: pointer to the hw struct 2619 * @seid: VSI for the mac address 2620 * @mv_list: list of macvlans to be removed 2621 * @count: length of the list 2622 * @cmd_details: pointer to command details structure or NULL 2623 * 2624 * Remove MAC/VLAN addresses from the HW filtering 2625 **/ 2626 int 2627 i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, 2628 struct i40e_aqc_remove_macvlan_element_data *mv_list, 2629 u16 count, struct i40e_asq_cmd_details *cmd_details) 2630 { 2631 struct i40e_aq_desc desc; 2632 struct i40e_aqc_macvlan *cmd = 2633 (struct i40e_aqc_macvlan *)&desc.params.raw; 2634 u16 buf_size; 2635 int status; 2636 2637 if (count == 0 || !mv_list || !hw) 2638 return -EINVAL; 2639 2640 buf_size = count * sizeof(*mv_list); 2641 2642 /* prep the rest of the request */ 2643 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); 2644 cmd->num_addresses = cpu_to_le16(count); 2645 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2646 cmd->seid[1] = 0; 2647 cmd->seid[2] = 0; 2648 2649 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2650 if (buf_size > I40E_AQ_LARGE_BUF) 2651 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2652 2653 status = i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size, 2654 cmd_details, true); 2655 2656 return status; 2657 } 2658 2659 /** 2660 * i40e_aq_remove_macvlan_v2 2661 * @hw: pointer to the hw struct 2662 * @seid: VSI for the mac address 2663 * @mv_list: list of macvlans to be removed 2664 * @count: length of the list 2665 * @cmd_details: pointer to command details structure or NULL 2666 * @aq_status: pointer to Admin Queue status return value 2667 * 2668 * Remove MAC/VLAN addresses from the HW filtering. 2669 * The _v2 version returns the last Admin Queue status in aq_status 2670 * to avoid race conditions in access to hw->aq.asq_last_status. 2671 * It also calls _v2 versions of asq_send_command functions to 2672 * get the aq_status on the stack. 2673 **/ 2674 int 2675 i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid, 2676 struct i40e_aqc_remove_macvlan_element_data *mv_list, 2677 u16 count, struct i40e_asq_cmd_details *cmd_details, 2678 enum i40e_admin_queue_err *aq_status) 2679 { 2680 struct i40e_aqc_macvlan *cmd; 2681 struct i40e_aq_desc desc; 2682 u16 buf_size; 2683 2684 if (count == 0 || !mv_list || !hw) 2685 return -EINVAL; 2686 2687 buf_size = count * sizeof(*mv_list); 2688 2689 /* prep the rest of the request */ 2690 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); 2691 cmd = (struct i40e_aqc_macvlan *)&desc.params.raw; 2692 cmd->num_addresses = cpu_to_le16(count); 2693 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2694 cmd->seid[1] = 0; 2695 cmd->seid[2] = 0; 2696 2697 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2698 if (buf_size > I40E_AQ_LARGE_BUF) 2699 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2700 2701 return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size, 2702 cmd_details, true, aq_status); 2703 } 2704 2705 /** 2706 * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule 2707 * @hw: pointer to the hw struct 2708 * @opcode: AQ opcode for add or delete mirror rule 2709 * @sw_seid: Switch SEID (to which rule refers) 2710 * @rule_type: Rule Type (ingress/egress/VLAN) 2711 * @id: Destination VSI SEID or Rule ID 2712 * @count: length of the list 2713 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs 2714 * @cmd_details: pointer to command details structure or NULL 2715 * @rule_id: Rule ID returned from FW 2716 * @rules_used: Number of rules used in internal switch 2717 * @rules_free: Number of rules free in internal switch 2718 * 2719 * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for 2720 * VEBs/VEPA elements only 2721 **/ 2722 static int i40e_mirrorrule_op(struct i40e_hw *hw, 2723 u16 opcode, u16 sw_seid, u16 rule_type, u16 id, 2724 u16 count, __le16 *mr_list, 2725 struct i40e_asq_cmd_details *cmd_details, 2726 u16 *rule_id, u16 *rules_used, u16 *rules_free) 2727 { 2728 struct i40e_aq_desc desc; 2729 struct i40e_aqc_add_delete_mirror_rule *cmd = 2730 (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw; 2731 struct i40e_aqc_add_delete_mirror_rule_completion *resp = 2732 (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw; 2733 u16 buf_size; 2734 int status; 2735 2736 buf_size = count * sizeof(*mr_list); 2737 2738 /* prep the rest of the request */ 2739 i40e_fill_default_direct_cmd_desc(&desc, opcode); 2740 cmd->seid = cpu_to_le16(sw_seid); 2741 cmd->rule_type = cpu_to_le16(rule_type & 2742 I40E_AQC_MIRROR_RULE_TYPE_MASK); 2743 cmd->num_entries = cpu_to_le16(count); 2744 /* Dest VSI for add, rule_id for delete */ 2745 cmd->destination = cpu_to_le16(id); 2746 if (mr_list) { 2747 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 2748 I40E_AQ_FLAG_RD)); 2749 if (buf_size > I40E_AQ_LARGE_BUF) 2750 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2751 } 2752 2753 status = i40e_asq_send_command(hw, &desc, mr_list, buf_size, 2754 cmd_details); 2755 if (!status || 2756 hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) { 2757 if (rule_id) 2758 *rule_id = le16_to_cpu(resp->rule_id); 2759 if (rules_used) 2760 *rules_used = le16_to_cpu(resp->mirror_rules_used); 2761 if (rules_free) 2762 *rules_free = le16_to_cpu(resp->mirror_rules_free); 2763 } 2764 return status; 2765 } 2766 2767 /** 2768 * i40e_aq_add_mirrorrule - add a mirror rule 2769 * @hw: pointer to the hw struct 2770 * @sw_seid: Switch SEID (to which rule refers) 2771 * @rule_type: Rule Type (ingress/egress/VLAN) 2772 * @dest_vsi: SEID of VSI to which packets will be mirrored 2773 * @count: length of the list 2774 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs 2775 * @cmd_details: pointer to command details structure or NULL 2776 * @rule_id: Rule ID returned from FW 2777 * @rules_used: Number of rules used in internal switch 2778 * @rules_free: Number of rules free in internal switch 2779 * 2780 * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only 2781 **/ 2782 int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, 2783 u16 rule_type, u16 dest_vsi, u16 count, 2784 __le16 *mr_list, 2785 struct i40e_asq_cmd_details *cmd_details, 2786 u16 *rule_id, u16 *rules_used, u16 *rules_free) 2787 { 2788 if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS || 2789 rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) { 2790 if (count == 0 || !mr_list) 2791 return -EINVAL; 2792 } 2793 2794 return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid, 2795 rule_type, dest_vsi, count, mr_list, 2796 cmd_details, rule_id, rules_used, rules_free); 2797 } 2798 2799 /** 2800 * i40e_aq_delete_mirrorrule - delete a mirror rule 2801 * @hw: pointer to the hw struct 2802 * @sw_seid: Switch SEID (to which rule refers) 2803 * @rule_type: Rule Type (ingress/egress/VLAN) 2804 * @count: length of the list 2805 * @rule_id: Rule ID that is returned in the receive desc as part of 2806 * add_mirrorrule. 2807 * @mr_list: list of mirrored VLAN IDs to be removed 2808 * @cmd_details: pointer to command details structure or NULL 2809 * @rules_used: Number of rules used in internal switch 2810 * @rules_free: Number of rules free in internal switch 2811 * 2812 * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only 2813 **/ 2814 int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, 2815 u16 rule_type, u16 rule_id, u16 count, 2816 __le16 *mr_list, 2817 struct i40e_asq_cmd_details *cmd_details, 2818 u16 *rules_used, u16 *rules_free) 2819 { 2820 /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */ 2821 if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) { 2822 /* count and mr_list shall be valid for rule_type INGRESS VLAN 2823 * mirroring. For other rule_type, count and rule_type should 2824 * not matter. 2825 */ 2826 if (count == 0 || !mr_list) 2827 return -EINVAL; 2828 } 2829 2830 return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid, 2831 rule_type, rule_id, count, mr_list, 2832 cmd_details, NULL, rules_used, rules_free); 2833 } 2834 2835 /** 2836 * i40e_aq_send_msg_to_vf 2837 * @hw: pointer to the hardware structure 2838 * @vfid: VF id to send msg 2839 * @v_opcode: opcodes for VF-PF communication 2840 * @v_retval: return error code 2841 * @msg: pointer to the msg buffer 2842 * @msglen: msg length 2843 * @cmd_details: pointer to command details 2844 * 2845 * send msg to vf 2846 **/ 2847 int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, 2848 u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, 2849 struct i40e_asq_cmd_details *cmd_details) 2850 { 2851 struct i40e_aq_desc desc; 2852 struct i40e_aqc_pf_vf_message *cmd = 2853 (struct i40e_aqc_pf_vf_message *)&desc.params.raw; 2854 int status; 2855 2856 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf); 2857 cmd->id = cpu_to_le32(vfid); 2858 desc.cookie_high = cpu_to_le32(v_opcode); 2859 desc.cookie_low = cpu_to_le32(v_retval); 2860 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI); 2861 if (msglen) { 2862 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 2863 I40E_AQ_FLAG_RD)); 2864 if (msglen > I40E_AQ_LARGE_BUF) 2865 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2866 desc.datalen = cpu_to_le16(msglen); 2867 } 2868 status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details); 2869 2870 return status; 2871 } 2872 2873 /** 2874 * i40e_aq_debug_read_register 2875 * @hw: pointer to the hw struct 2876 * @reg_addr: register address 2877 * @reg_val: register value 2878 * @cmd_details: pointer to command details structure or NULL 2879 * 2880 * Read the register using the admin queue commands 2881 **/ 2882 int i40e_aq_debug_read_register(struct i40e_hw *hw, 2883 u32 reg_addr, u64 *reg_val, 2884 struct i40e_asq_cmd_details *cmd_details) 2885 { 2886 struct i40e_aq_desc desc; 2887 struct i40e_aqc_debug_reg_read_write *cmd_resp = 2888 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; 2889 int status; 2890 2891 if (reg_val == NULL) 2892 return -EINVAL; 2893 2894 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg); 2895 2896 cmd_resp->address = cpu_to_le32(reg_addr); 2897 2898 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2899 2900 if (!status) { 2901 *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) | 2902 (u64)le32_to_cpu(cmd_resp->value_low); 2903 } 2904 2905 return status; 2906 } 2907 2908 /** 2909 * i40e_aq_debug_write_register 2910 * @hw: pointer to the hw struct 2911 * @reg_addr: register address 2912 * @reg_val: register value 2913 * @cmd_details: pointer to command details structure or NULL 2914 * 2915 * Write to a register using the admin queue commands 2916 **/ 2917 int i40e_aq_debug_write_register(struct i40e_hw *hw, 2918 u32 reg_addr, u64 reg_val, 2919 struct i40e_asq_cmd_details *cmd_details) 2920 { 2921 struct i40e_aq_desc desc; 2922 struct i40e_aqc_debug_reg_read_write *cmd = 2923 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; 2924 int status; 2925 2926 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg); 2927 2928 cmd->address = cpu_to_le32(reg_addr); 2929 cmd->value_high = cpu_to_le32((u32)(reg_val >> 32)); 2930 cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF)); 2931 2932 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2933 2934 return status; 2935 } 2936 2937 /** 2938 * i40e_aq_request_resource 2939 * @hw: pointer to the hw struct 2940 * @resource: resource id 2941 * @access: access type 2942 * @sdp_number: resource number 2943 * @timeout: the maximum time in ms that the driver may hold the resource 2944 * @cmd_details: pointer to command details structure or NULL 2945 * 2946 * requests common resource using the admin queue commands 2947 **/ 2948 int i40e_aq_request_resource(struct i40e_hw *hw, 2949 enum i40e_aq_resources_ids resource, 2950 enum i40e_aq_resource_access_type access, 2951 u8 sdp_number, u64 *timeout, 2952 struct i40e_asq_cmd_details *cmd_details) 2953 { 2954 struct i40e_aq_desc desc; 2955 struct i40e_aqc_request_resource *cmd_resp = 2956 (struct i40e_aqc_request_resource *)&desc.params.raw; 2957 int status; 2958 2959 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource); 2960 2961 cmd_resp->resource_id = cpu_to_le16(resource); 2962 cmd_resp->access_type = cpu_to_le16(access); 2963 cmd_resp->resource_number = cpu_to_le32(sdp_number); 2964 2965 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2966 /* The completion specifies the maximum time in ms that the driver 2967 * may hold the resource in the Timeout field. 2968 * If the resource is held by someone else, the command completes with 2969 * busy return value and the timeout field indicates the maximum time 2970 * the current owner of the resource has to free it. 2971 */ 2972 if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) 2973 *timeout = le32_to_cpu(cmd_resp->timeout); 2974 2975 return status; 2976 } 2977 2978 /** 2979 * i40e_aq_release_resource 2980 * @hw: pointer to the hw struct 2981 * @resource: resource id 2982 * @sdp_number: resource number 2983 * @cmd_details: pointer to command details structure or NULL 2984 * 2985 * release common resource using the admin queue commands 2986 **/ 2987 int i40e_aq_release_resource(struct i40e_hw *hw, 2988 enum i40e_aq_resources_ids resource, 2989 u8 sdp_number, 2990 struct i40e_asq_cmd_details *cmd_details) 2991 { 2992 struct i40e_aq_desc desc; 2993 struct i40e_aqc_request_resource *cmd = 2994 (struct i40e_aqc_request_resource *)&desc.params.raw; 2995 int status; 2996 2997 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource); 2998 2999 cmd->resource_id = cpu_to_le16(resource); 3000 cmd->resource_number = cpu_to_le32(sdp_number); 3001 3002 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3003 3004 return status; 3005 } 3006 3007 /** 3008 * i40e_aq_read_nvm 3009 * @hw: pointer to the hw struct 3010 * @module_pointer: module pointer location in words from the NVM beginning 3011 * @offset: byte offset from the module beginning 3012 * @length: length of the section to be read (in bytes from the offset) 3013 * @data: command buffer (size [bytes] = length) 3014 * @last_command: tells if this is the last command in a series 3015 * @cmd_details: pointer to command details structure or NULL 3016 * 3017 * Read the NVM using the admin queue commands 3018 **/ 3019 int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, 3020 u32 offset, u16 length, void *data, 3021 bool last_command, 3022 struct i40e_asq_cmd_details *cmd_details) 3023 { 3024 struct i40e_aq_desc desc; 3025 struct i40e_aqc_nvm_update *cmd = 3026 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3027 int status; 3028 3029 /* In offset the highest byte must be zeroed. */ 3030 if (offset & 0xFF000000) { 3031 status = -EINVAL; 3032 goto i40e_aq_read_nvm_exit; 3033 } 3034 3035 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read); 3036 3037 /* If this is the last command in a series, set the proper flag. */ 3038 if (last_command) 3039 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3040 cmd->module_pointer = module_pointer; 3041 cmd->offset = cpu_to_le32(offset); 3042 cmd->length = cpu_to_le16(length); 3043 3044 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3045 if (length > I40E_AQ_LARGE_BUF) 3046 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3047 3048 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); 3049 3050 i40e_aq_read_nvm_exit: 3051 return status; 3052 } 3053 3054 /** 3055 * i40e_aq_erase_nvm 3056 * @hw: pointer to the hw struct 3057 * @module_pointer: module pointer location in words from the NVM beginning 3058 * @offset: offset in the module (expressed in 4 KB from module's beginning) 3059 * @length: length of the section to be erased (expressed in 4 KB) 3060 * @last_command: tells if this is the last command in a series 3061 * @cmd_details: pointer to command details structure or NULL 3062 * 3063 * Erase the NVM sector using the admin queue commands 3064 **/ 3065 int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, 3066 u32 offset, u16 length, bool last_command, 3067 struct i40e_asq_cmd_details *cmd_details) 3068 { 3069 struct i40e_aq_desc desc; 3070 struct i40e_aqc_nvm_update *cmd = 3071 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3072 int status; 3073 3074 /* In offset the highest byte must be zeroed. */ 3075 if (offset & 0xFF000000) { 3076 status = -EINVAL; 3077 goto i40e_aq_erase_nvm_exit; 3078 } 3079 3080 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase); 3081 3082 /* If this is the last command in a series, set the proper flag. */ 3083 if (last_command) 3084 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3085 cmd->module_pointer = module_pointer; 3086 cmd->offset = cpu_to_le32(offset); 3087 cmd->length = cpu_to_le16(length); 3088 3089 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3090 3091 i40e_aq_erase_nvm_exit: 3092 return status; 3093 } 3094 3095 /** 3096 * i40e_parse_discover_capabilities 3097 * @hw: pointer to the hw struct 3098 * @buff: pointer to a buffer containing device/function capability records 3099 * @cap_count: number of capability records in the list 3100 * @list_type_opc: type of capabilities list to parse 3101 * 3102 * Parse the device/function capabilities list. 3103 **/ 3104 static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, 3105 u32 cap_count, 3106 enum i40e_admin_queue_opc list_type_opc) 3107 { 3108 struct i40e_aqc_list_capabilities_element_resp *cap; 3109 u32 valid_functions, num_functions; 3110 u32 number, logical_id, phys_id; 3111 struct i40e_hw_capabilities *p; 3112 u16 id, ocp_cfg_word0; 3113 u8 major_rev; 3114 int status; 3115 u32 i = 0; 3116 3117 cap = (struct i40e_aqc_list_capabilities_element_resp *) buff; 3118 3119 if (list_type_opc == i40e_aqc_opc_list_dev_capabilities) 3120 p = &hw->dev_caps; 3121 else if (list_type_opc == i40e_aqc_opc_list_func_capabilities) 3122 p = &hw->func_caps; 3123 else 3124 return; 3125 3126 for (i = 0; i < cap_count; i++, cap++) { 3127 id = le16_to_cpu(cap->id); 3128 number = le32_to_cpu(cap->number); 3129 logical_id = le32_to_cpu(cap->logical_id); 3130 phys_id = le32_to_cpu(cap->phys_id); 3131 major_rev = cap->major_rev; 3132 3133 switch (id) { 3134 case I40E_AQ_CAP_ID_SWITCH_MODE: 3135 p->switch_mode = number; 3136 break; 3137 case I40E_AQ_CAP_ID_MNG_MODE: 3138 p->management_mode = number; 3139 if (major_rev > 1) { 3140 p->mng_protocols_over_mctp = logical_id; 3141 i40e_debug(hw, I40E_DEBUG_INIT, 3142 "HW Capability: Protocols over MCTP = %d\n", 3143 p->mng_protocols_over_mctp); 3144 } else { 3145 p->mng_protocols_over_mctp = 0; 3146 } 3147 break; 3148 case I40E_AQ_CAP_ID_NPAR_ACTIVE: 3149 p->npar_enable = number; 3150 break; 3151 case I40E_AQ_CAP_ID_OS2BMC_CAP: 3152 p->os2bmc = number; 3153 break; 3154 case I40E_AQ_CAP_ID_FUNCTIONS_VALID: 3155 p->valid_functions = number; 3156 break; 3157 case I40E_AQ_CAP_ID_SRIOV: 3158 if (number == 1) 3159 p->sr_iov_1_1 = true; 3160 break; 3161 case I40E_AQ_CAP_ID_VF: 3162 p->num_vfs = number; 3163 p->vf_base_id = logical_id; 3164 break; 3165 case I40E_AQ_CAP_ID_VMDQ: 3166 if (number == 1) 3167 p->vmdq = true; 3168 break; 3169 case I40E_AQ_CAP_ID_8021QBG: 3170 if (number == 1) 3171 p->evb_802_1_qbg = true; 3172 break; 3173 case I40E_AQ_CAP_ID_8021QBR: 3174 if (number == 1) 3175 p->evb_802_1_qbh = true; 3176 break; 3177 case I40E_AQ_CAP_ID_VSI: 3178 p->num_vsis = number; 3179 break; 3180 case I40E_AQ_CAP_ID_DCB: 3181 if (number == 1) { 3182 p->dcb = true; 3183 p->enabled_tcmap = logical_id; 3184 p->maxtc = phys_id; 3185 } 3186 break; 3187 case I40E_AQ_CAP_ID_FCOE: 3188 if (number == 1) 3189 p->fcoe = true; 3190 break; 3191 case I40E_AQ_CAP_ID_ISCSI: 3192 if (number == 1) 3193 p->iscsi = true; 3194 break; 3195 case I40E_AQ_CAP_ID_RSS: 3196 p->rss = true; 3197 p->rss_table_size = number; 3198 p->rss_table_entry_width = logical_id; 3199 break; 3200 case I40E_AQ_CAP_ID_RXQ: 3201 p->num_rx_qp = number; 3202 p->base_queue = phys_id; 3203 break; 3204 case I40E_AQ_CAP_ID_TXQ: 3205 p->num_tx_qp = number; 3206 p->base_queue = phys_id; 3207 break; 3208 case I40E_AQ_CAP_ID_MSIX: 3209 p->num_msix_vectors = number; 3210 i40e_debug(hw, I40E_DEBUG_INIT, 3211 "HW Capability: MSIX vector count = %d\n", 3212 p->num_msix_vectors); 3213 break; 3214 case I40E_AQ_CAP_ID_VF_MSIX: 3215 p->num_msix_vectors_vf = number; 3216 break; 3217 case I40E_AQ_CAP_ID_FLEX10: 3218 if (major_rev == 1) { 3219 if (number == 1) { 3220 p->flex10_enable = true; 3221 p->flex10_capable = true; 3222 } 3223 } else { 3224 /* Capability revision >= 2 */ 3225 if (number & 1) 3226 p->flex10_enable = true; 3227 if (number & 2) 3228 p->flex10_capable = true; 3229 } 3230 p->flex10_mode = logical_id; 3231 p->flex10_status = phys_id; 3232 break; 3233 case I40E_AQ_CAP_ID_CEM: 3234 if (number == 1) 3235 p->mgmt_cem = true; 3236 break; 3237 case I40E_AQ_CAP_ID_IWARP: 3238 if (number == 1) 3239 p->iwarp = true; 3240 break; 3241 case I40E_AQ_CAP_ID_LED: 3242 if (phys_id < I40E_HW_CAP_MAX_GPIO) 3243 p->led[phys_id] = true; 3244 break; 3245 case I40E_AQ_CAP_ID_SDP: 3246 if (phys_id < I40E_HW_CAP_MAX_GPIO) 3247 p->sdp[phys_id] = true; 3248 break; 3249 case I40E_AQ_CAP_ID_MDIO: 3250 if (number == 1) { 3251 p->mdio_port_num = phys_id; 3252 p->mdio_port_mode = logical_id; 3253 } 3254 break; 3255 case I40E_AQ_CAP_ID_1588: 3256 if (number == 1) 3257 p->ieee_1588 = true; 3258 break; 3259 case I40E_AQ_CAP_ID_FLOW_DIRECTOR: 3260 p->fd = true; 3261 p->fd_filters_guaranteed = number; 3262 p->fd_filters_best_effort = logical_id; 3263 break; 3264 case I40E_AQ_CAP_ID_WSR_PROT: 3265 p->wr_csr_prot = (u64)number; 3266 p->wr_csr_prot |= (u64)logical_id << 32; 3267 break; 3268 case I40E_AQ_CAP_ID_NVM_MGMT: 3269 if (number & I40E_NVM_MGMT_SEC_REV_DISABLED) 3270 p->sec_rev_disabled = true; 3271 if (number & I40E_NVM_MGMT_UPDATE_DISABLED) 3272 p->update_disabled = true; 3273 break; 3274 default: 3275 break; 3276 } 3277 } 3278 3279 if (p->fcoe) 3280 i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n"); 3281 3282 /* Software override ensuring FCoE is disabled if npar or mfp 3283 * mode because it is not supported in these modes. 3284 */ 3285 if (p->npar_enable || p->flex10_enable) 3286 p->fcoe = false; 3287 3288 /* count the enabled ports (aka the "not disabled" ports) */ 3289 hw->num_ports = 0; 3290 for (i = 0; i < 4; i++) { 3291 u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i); 3292 u64 port_cfg = 0; 3293 3294 /* use AQ read to get the physical register offset instead 3295 * of the port relative offset 3296 */ 3297 i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL); 3298 if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK)) 3299 hw->num_ports++; 3300 } 3301 3302 /* OCP cards case: if a mezz is removed the Ethernet port is at 3303 * disabled state in PRTGEN_CNF register. Additional NVM read is 3304 * needed in order to check if we are dealing with OCP card. 3305 * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting 3306 * physical ports results in wrong partition id calculation and thus 3307 * not supporting WoL. 3308 */ 3309 if (hw->mac.type == I40E_MAC_X722) { 3310 if (!i40e_acquire_nvm(hw, I40E_RESOURCE_READ)) { 3311 status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, 3312 2 * I40E_SR_OCP_CFG_WORD0, 3313 sizeof(ocp_cfg_word0), 3314 &ocp_cfg_word0, true, NULL); 3315 if (!status && 3316 (ocp_cfg_word0 & I40E_SR_OCP_ENABLED)) 3317 hw->num_ports = 4; 3318 i40e_release_nvm(hw); 3319 } 3320 } 3321 3322 valid_functions = p->valid_functions; 3323 num_functions = 0; 3324 while (valid_functions) { 3325 if (valid_functions & 1) 3326 num_functions++; 3327 valid_functions >>= 1; 3328 } 3329 3330 /* partition id is 1-based, and functions are evenly spread 3331 * across the ports as partitions 3332 */ 3333 if (hw->num_ports != 0) { 3334 hw->partition_id = (hw->pf_id / hw->num_ports) + 1; 3335 hw->num_partitions = num_functions / hw->num_ports; 3336 } 3337 3338 /* additional HW specific goodies that might 3339 * someday be HW version specific 3340 */ 3341 p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS; 3342 } 3343 3344 /** 3345 * i40e_aq_discover_capabilities 3346 * @hw: pointer to the hw struct 3347 * @buff: a virtual buffer to hold the capabilities 3348 * @buff_size: Size of the virtual buffer 3349 * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM 3350 * @list_type_opc: capabilities type to discover - pass in the command opcode 3351 * @cmd_details: pointer to command details structure or NULL 3352 * 3353 * Get the device capabilities descriptions from the firmware 3354 **/ 3355 int i40e_aq_discover_capabilities(struct i40e_hw *hw, 3356 void *buff, u16 buff_size, u16 *data_size, 3357 enum i40e_admin_queue_opc list_type_opc, 3358 struct i40e_asq_cmd_details *cmd_details) 3359 { 3360 struct i40e_aqc_list_capabilites *cmd; 3361 struct i40e_aq_desc desc; 3362 int status = 0; 3363 3364 cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw; 3365 3366 if (list_type_opc != i40e_aqc_opc_list_func_capabilities && 3367 list_type_opc != i40e_aqc_opc_list_dev_capabilities) { 3368 status = -EINVAL; 3369 goto exit; 3370 } 3371 3372 i40e_fill_default_direct_cmd_desc(&desc, list_type_opc); 3373 3374 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3375 if (buff_size > I40E_AQ_LARGE_BUF) 3376 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3377 3378 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3379 *data_size = le16_to_cpu(desc.datalen); 3380 3381 if (status) 3382 goto exit; 3383 3384 i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count), 3385 list_type_opc); 3386 3387 exit: 3388 return status; 3389 } 3390 3391 /** 3392 * i40e_aq_update_nvm 3393 * @hw: pointer to the hw struct 3394 * @module_pointer: module pointer location in words from the NVM beginning 3395 * @offset: byte offset from the module beginning 3396 * @length: length of the section to be written (in bytes from the offset) 3397 * @data: command buffer (size [bytes] = length) 3398 * @last_command: tells if this is the last command in a series 3399 * @preservation_flags: Preservation mode flags 3400 * @cmd_details: pointer to command details structure or NULL 3401 * 3402 * Update the NVM using the admin queue commands 3403 **/ 3404 int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, 3405 u32 offset, u16 length, void *data, 3406 bool last_command, u8 preservation_flags, 3407 struct i40e_asq_cmd_details *cmd_details) 3408 { 3409 struct i40e_aq_desc desc; 3410 struct i40e_aqc_nvm_update *cmd = 3411 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3412 int status; 3413 3414 /* In offset the highest byte must be zeroed. */ 3415 if (offset & 0xFF000000) { 3416 status = -EINVAL; 3417 goto i40e_aq_update_nvm_exit; 3418 } 3419 3420 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); 3421 3422 /* If this is the last command in a series, set the proper flag. */ 3423 if (last_command) 3424 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3425 if (hw->mac.type == I40E_MAC_X722) { 3426 if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED) 3427 cmd->command_flags |= 3428 (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED << 3429 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); 3430 else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL) 3431 cmd->command_flags |= 3432 (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL << 3433 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); 3434 } 3435 cmd->module_pointer = module_pointer; 3436 cmd->offset = cpu_to_le32(offset); 3437 cmd->length = cpu_to_le16(length); 3438 3439 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 3440 if (length > I40E_AQ_LARGE_BUF) 3441 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3442 3443 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); 3444 3445 i40e_aq_update_nvm_exit: 3446 return status; 3447 } 3448 3449 /** 3450 * i40e_aq_rearrange_nvm 3451 * @hw: pointer to the hw struct 3452 * @rearrange_nvm: defines direction of rearrangement 3453 * @cmd_details: pointer to command details structure or NULL 3454 * 3455 * Rearrange NVM structure, available only for transition FW 3456 **/ 3457 int i40e_aq_rearrange_nvm(struct i40e_hw *hw, 3458 u8 rearrange_nvm, 3459 struct i40e_asq_cmd_details *cmd_details) 3460 { 3461 struct i40e_aqc_nvm_update *cmd; 3462 struct i40e_aq_desc desc; 3463 int status; 3464 3465 cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; 3466 3467 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); 3468 3469 rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT | 3470 I40E_AQ_NVM_REARRANGE_TO_STRUCT); 3471 3472 if (!rearrange_nvm) { 3473 status = -EINVAL; 3474 goto i40e_aq_rearrange_nvm_exit; 3475 } 3476 3477 cmd->command_flags |= rearrange_nvm; 3478 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3479 3480 i40e_aq_rearrange_nvm_exit: 3481 return status; 3482 } 3483 3484 /** 3485 * i40e_aq_get_lldp_mib 3486 * @hw: pointer to the hw struct 3487 * @bridge_type: type of bridge requested 3488 * @mib_type: Local, Remote or both Local and Remote MIBs 3489 * @buff: pointer to a user supplied buffer to store the MIB block 3490 * @buff_size: size of the buffer (in bytes) 3491 * @local_len : length of the returned Local LLDP MIB 3492 * @remote_len: length of the returned Remote LLDP MIB 3493 * @cmd_details: pointer to command details structure or NULL 3494 * 3495 * Requests the complete LLDP MIB (entire packet). 3496 **/ 3497 int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, 3498 u8 mib_type, void *buff, u16 buff_size, 3499 u16 *local_len, u16 *remote_len, 3500 struct i40e_asq_cmd_details *cmd_details) 3501 { 3502 struct i40e_aq_desc desc; 3503 struct i40e_aqc_lldp_get_mib *cmd = 3504 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; 3505 struct i40e_aqc_lldp_get_mib *resp = 3506 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; 3507 int status; 3508 3509 if (buff_size == 0 || !buff) 3510 return -EINVAL; 3511 3512 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib); 3513 /* Indirect Command */ 3514 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3515 3516 cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK; 3517 cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & 3518 I40E_AQ_LLDP_BRIDGE_TYPE_MASK); 3519 3520 desc.datalen = cpu_to_le16(buff_size); 3521 3522 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3523 if (buff_size > I40E_AQ_LARGE_BUF) 3524 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3525 3526 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3527 if (!status) { 3528 if (local_len != NULL) 3529 *local_len = le16_to_cpu(resp->local_len); 3530 if (remote_len != NULL) 3531 *remote_len = le16_to_cpu(resp->remote_len); 3532 } 3533 3534 return status; 3535 } 3536 3537 /** 3538 * i40e_aq_set_lldp_mib - Set the LLDP MIB 3539 * @hw: pointer to the hw struct 3540 * @mib_type: Local, Remote or both Local and Remote MIBs 3541 * @buff: pointer to a user supplied buffer to store the MIB block 3542 * @buff_size: size of the buffer (in bytes) 3543 * @cmd_details: pointer to command details structure or NULL 3544 * 3545 * Set the LLDP MIB. 3546 **/ 3547 int 3548 i40e_aq_set_lldp_mib(struct i40e_hw *hw, 3549 u8 mib_type, void *buff, u16 buff_size, 3550 struct i40e_asq_cmd_details *cmd_details) 3551 { 3552 struct i40e_aqc_lldp_set_local_mib *cmd; 3553 struct i40e_aq_desc desc; 3554 int status; 3555 3556 cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw; 3557 if (buff_size == 0 || !buff) 3558 return -EINVAL; 3559 3560 i40e_fill_default_direct_cmd_desc(&desc, 3561 i40e_aqc_opc_lldp_set_local_mib); 3562 /* Indirect Command */ 3563 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 3564 if (buff_size > I40E_AQ_LARGE_BUF) 3565 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3566 desc.datalen = cpu_to_le16(buff_size); 3567 3568 cmd->type = mib_type; 3569 cmd->length = cpu_to_le16(buff_size); 3570 cmd->address_high = cpu_to_le32(upper_32_bits((uintptr_t)buff)); 3571 cmd->address_low = cpu_to_le32(lower_32_bits((uintptr_t)buff)); 3572 3573 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3574 return status; 3575 } 3576 3577 /** 3578 * i40e_aq_cfg_lldp_mib_change_event 3579 * @hw: pointer to the hw struct 3580 * @enable_update: Enable or Disable event posting 3581 * @cmd_details: pointer to command details structure or NULL 3582 * 3583 * Enable or Disable posting of an event on ARQ when LLDP MIB 3584 * associated with the interface changes 3585 **/ 3586 int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, 3587 bool enable_update, 3588 struct i40e_asq_cmd_details *cmd_details) 3589 { 3590 struct i40e_aq_desc desc; 3591 struct i40e_aqc_lldp_update_mib *cmd = 3592 (struct i40e_aqc_lldp_update_mib *)&desc.params.raw; 3593 int status; 3594 3595 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib); 3596 3597 if (!enable_update) 3598 cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE; 3599 3600 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3601 3602 return status; 3603 } 3604 3605 /** 3606 * i40e_aq_restore_lldp 3607 * @hw: pointer to the hw struct 3608 * @setting: pointer to factory setting variable or NULL 3609 * @restore: True if factory settings should be restored 3610 * @cmd_details: pointer to command details structure or NULL 3611 * 3612 * Restore LLDP Agent factory settings if @restore set to True. In other case 3613 * only returns factory setting in AQ response. 3614 **/ 3615 int 3616 i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore, 3617 struct i40e_asq_cmd_details *cmd_details) 3618 { 3619 struct i40e_aq_desc desc; 3620 struct i40e_aqc_lldp_restore *cmd = 3621 (struct i40e_aqc_lldp_restore *)&desc.params.raw; 3622 int status; 3623 3624 if (!test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps)) { 3625 i40e_debug(hw, I40E_DEBUG_ALL, 3626 "Restore LLDP not supported by current FW version.\n"); 3627 return -ENODEV; 3628 } 3629 3630 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore); 3631 3632 if (restore) 3633 cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE; 3634 3635 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3636 3637 if (setting) 3638 *setting = cmd->command & 1; 3639 3640 return status; 3641 } 3642 3643 /** 3644 * i40e_aq_stop_lldp 3645 * @hw: pointer to the hw struct 3646 * @shutdown_agent: True if LLDP Agent needs to be Shutdown 3647 * @persist: True if stop of LLDP should be persistent across power cycles 3648 * @cmd_details: pointer to command details structure or NULL 3649 * 3650 * Stop or Shutdown the embedded LLDP Agent 3651 **/ 3652 int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, 3653 bool persist, 3654 struct i40e_asq_cmd_details *cmd_details) 3655 { 3656 struct i40e_aq_desc desc; 3657 struct i40e_aqc_lldp_stop *cmd = 3658 (struct i40e_aqc_lldp_stop *)&desc.params.raw; 3659 int status; 3660 3661 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop); 3662 3663 if (shutdown_agent) 3664 cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN; 3665 3666 if (persist) { 3667 if (test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps)) 3668 cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST; 3669 else 3670 i40e_debug(hw, I40E_DEBUG_ALL, 3671 "Persistent Stop LLDP not supported by current FW version.\n"); 3672 } 3673 3674 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3675 3676 return status; 3677 } 3678 3679 /** 3680 * i40e_aq_start_lldp 3681 * @hw: pointer to the hw struct 3682 * @persist: True if start of LLDP should be persistent across power cycles 3683 * @cmd_details: pointer to command details structure or NULL 3684 * 3685 * Start the embedded LLDP Agent on all ports. 3686 **/ 3687 int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist, 3688 struct i40e_asq_cmd_details *cmd_details) 3689 { 3690 struct i40e_aq_desc desc; 3691 struct i40e_aqc_lldp_start *cmd = 3692 (struct i40e_aqc_lldp_start *)&desc.params.raw; 3693 int status; 3694 3695 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start); 3696 3697 cmd->command = I40E_AQ_LLDP_AGENT_START; 3698 3699 if (persist) { 3700 if (test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps)) 3701 cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST; 3702 else 3703 i40e_debug(hw, I40E_DEBUG_ALL, 3704 "Persistent Start LLDP not supported by current FW version.\n"); 3705 } 3706 3707 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3708 3709 return status; 3710 } 3711 3712 /** 3713 * i40e_aq_set_dcb_parameters 3714 * @hw: pointer to the hw struct 3715 * @cmd_details: pointer to command details structure or NULL 3716 * @dcb_enable: True if DCB configuration needs to be applied 3717 * 3718 **/ 3719 int 3720 i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable, 3721 struct i40e_asq_cmd_details *cmd_details) 3722 { 3723 struct i40e_aq_desc desc; 3724 struct i40e_aqc_set_dcb_parameters *cmd = 3725 (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw; 3726 int status; 3727 3728 if (!test_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, hw->caps)) 3729 return -ENODEV; 3730 3731 i40e_fill_default_direct_cmd_desc(&desc, 3732 i40e_aqc_opc_set_dcb_parameters); 3733 3734 if (dcb_enable) { 3735 cmd->valid_flags = I40E_DCB_VALID; 3736 cmd->command = I40E_AQ_DCB_SET_AGENT; 3737 } 3738 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3739 3740 return status; 3741 } 3742 3743 /** 3744 * i40e_aq_get_cee_dcb_config 3745 * @hw: pointer to the hw struct 3746 * @buff: response buffer that stores CEE operational configuration 3747 * @buff_size: size of the buffer passed 3748 * @cmd_details: pointer to command details structure or NULL 3749 * 3750 * Get CEE DCBX mode operational configuration from firmware 3751 **/ 3752 int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, 3753 void *buff, u16 buff_size, 3754 struct i40e_asq_cmd_details *cmd_details) 3755 { 3756 struct i40e_aq_desc desc; 3757 int status; 3758 3759 if (buff_size == 0 || !buff) 3760 return -EINVAL; 3761 3762 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg); 3763 3764 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3765 status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size, 3766 cmd_details); 3767 3768 return status; 3769 } 3770 3771 /** 3772 * i40e_aq_add_udp_tunnel 3773 * @hw: pointer to the hw struct 3774 * @udp_port: the UDP port to add in Host byte order 3775 * @protocol_index: protocol index type 3776 * @filter_index: pointer to filter index 3777 * @cmd_details: pointer to command details structure or NULL 3778 * 3779 * Note: Firmware expects the udp_port value to be in Little Endian format, 3780 * and this function will call cpu_to_le16 to convert from Host byte order to 3781 * Little Endian order. 3782 **/ 3783 int i40e_aq_add_udp_tunnel(struct i40e_hw *hw, 3784 u16 udp_port, u8 protocol_index, 3785 u8 *filter_index, 3786 struct i40e_asq_cmd_details *cmd_details) 3787 { 3788 struct i40e_aq_desc desc; 3789 struct i40e_aqc_add_udp_tunnel *cmd = 3790 (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw; 3791 struct i40e_aqc_del_udp_tunnel_completion *resp = 3792 (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw; 3793 int status; 3794 3795 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel); 3796 3797 cmd->udp_port = cpu_to_le16(udp_port); 3798 cmd->protocol_type = protocol_index; 3799 3800 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3801 3802 if (!status && filter_index) 3803 *filter_index = resp->index; 3804 3805 return status; 3806 } 3807 3808 /** 3809 * i40e_aq_del_udp_tunnel 3810 * @hw: pointer to the hw struct 3811 * @index: filter index 3812 * @cmd_details: pointer to command details structure or NULL 3813 **/ 3814 int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, 3815 struct i40e_asq_cmd_details *cmd_details) 3816 { 3817 struct i40e_aq_desc desc; 3818 struct i40e_aqc_remove_udp_tunnel *cmd = 3819 (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw; 3820 int status; 3821 3822 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel); 3823 3824 cmd->index = index; 3825 3826 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3827 3828 return status; 3829 } 3830 3831 /** 3832 * i40e_aq_delete_element - Delete switch element 3833 * @hw: pointer to the hw struct 3834 * @seid: the SEID to delete from the switch 3835 * @cmd_details: pointer to command details structure or NULL 3836 * 3837 * This deletes a switch element from the switch. 3838 **/ 3839 int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, 3840 struct i40e_asq_cmd_details *cmd_details) 3841 { 3842 struct i40e_aq_desc desc; 3843 struct i40e_aqc_switch_seid *cmd = 3844 (struct i40e_aqc_switch_seid *)&desc.params.raw; 3845 int status; 3846 3847 if (seid == 0) 3848 return -EINVAL; 3849 3850 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element); 3851 3852 cmd->seid = cpu_to_le16(seid); 3853 3854 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0, 3855 cmd_details, true); 3856 3857 return status; 3858 } 3859 3860 /** 3861 * i40e_aq_dcb_updated - DCB Updated Command 3862 * @hw: pointer to the hw struct 3863 * @cmd_details: pointer to command details structure or NULL 3864 * 3865 * EMP will return when the shared RPB settings have been 3866 * recomputed and modified. The retval field in the descriptor 3867 * will be set to 0 when RPB is modified. 3868 **/ 3869 int i40e_aq_dcb_updated(struct i40e_hw *hw, 3870 struct i40e_asq_cmd_details *cmd_details) 3871 { 3872 struct i40e_aq_desc desc; 3873 int status; 3874 3875 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated); 3876 3877 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3878 3879 return status; 3880 } 3881 3882 /** 3883 * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler 3884 * @hw: pointer to the hw struct 3885 * @seid: seid for the physical port/switching component/vsi 3886 * @buff: Indirect buffer to hold data parameters and response 3887 * @buff_size: Indirect buffer size 3888 * @opcode: Tx scheduler AQ command opcode 3889 * @cmd_details: pointer to command details structure or NULL 3890 * 3891 * Generic command handler for Tx scheduler AQ commands 3892 **/ 3893 static int i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid, 3894 void *buff, u16 buff_size, 3895 enum i40e_admin_queue_opc opcode, 3896 struct i40e_asq_cmd_details *cmd_details) 3897 { 3898 struct i40e_aq_desc desc; 3899 struct i40e_aqc_tx_sched_ind *cmd = 3900 (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; 3901 int status; 3902 bool cmd_param_flag = false; 3903 3904 switch (opcode) { 3905 case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit: 3906 case i40e_aqc_opc_configure_vsi_tc_bw: 3907 case i40e_aqc_opc_enable_switching_comp_ets: 3908 case i40e_aqc_opc_modify_switching_comp_ets: 3909 case i40e_aqc_opc_disable_switching_comp_ets: 3910 case i40e_aqc_opc_configure_switching_comp_ets_bw_limit: 3911 case i40e_aqc_opc_configure_switching_comp_bw_config: 3912 cmd_param_flag = true; 3913 break; 3914 case i40e_aqc_opc_query_vsi_bw_config: 3915 case i40e_aqc_opc_query_vsi_ets_sla_config: 3916 case i40e_aqc_opc_query_switching_comp_ets_config: 3917 case i40e_aqc_opc_query_port_ets_config: 3918 case i40e_aqc_opc_query_switching_comp_bw_config: 3919 cmd_param_flag = false; 3920 break; 3921 default: 3922 return -EINVAL; 3923 } 3924 3925 i40e_fill_default_direct_cmd_desc(&desc, opcode); 3926 3927 /* Indirect command */ 3928 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3929 if (cmd_param_flag) 3930 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 3931 if (buff_size > I40E_AQ_LARGE_BUF) 3932 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3933 3934 desc.datalen = cpu_to_le16(buff_size); 3935 3936 cmd->vsi_seid = cpu_to_le16(seid); 3937 3938 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3939 3940 return status; 3941 } 3942 3943 /** 3944 * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit 3945 * @hw: pointer to the hw struct 3946 * @seid: VSI seid 3947 * @credit: BW limit credits (0 = disabled) 3948 * @max_credit: Max BW limit credits 3949 * @cmd_details: pointer to command details structure or NULL 3950 **/ 3951 int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, 3952 u16 seid, u16 credit, u8 max_credit, 3953 struct i40e_asq_cmd_details *cmd_details) 3954 { 3955 struct i40e_aq_desc desc; 3956 struct i40e_aqc_configure_vsi_bw_limit *cmd = 3957 (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw; 3958 int status; 3959 3960 i40e_fill_default_direct_cmd_desc(&desc, 3961 i40e_aqc_opc_configure_vsi_bw_limit); 3962 3963 cmd->vsi_seid = cpu_to_le16(seid); 3964 cmd->credit = cpu_to_le16(credit); 3965 cmd->max_credit = max_credit; 3966 3967 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3968 3969 return status; 3970 } 3971 3972 /** 3973 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC 3974 * @hw: pointer to the hw struct 3975 * @seid: VSI seid 3976 * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits 3977 * @cmd_details: pointer to command details structure or NULL 3978 **/ 3979 int i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, 3980 u16 seid, 3981 struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, 3982 struct i40e_asq_cmd_details *cmd_details) 3983 { 3984 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 3985 i40e_aqc_opc_configure_vsi_tc_bw, 3986 cmd_details); 3987 } 3988 3989 /** 3990 * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port 3991 * @hw: pointer to the hw struct 3992 * @seid: seid of the switching component connected to Physical Port 3993 * @ets_data: Buffer holding ETS parameters 3994 * @opcode: Tx scheduler AQ command opcode 3995 * @cmd_details: pointer to command details structure or NULL 3996 **/ 3997 int 3998 i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, 3999 u16 seid, 4000 struct i40e_aqc_configure_switching_comp_ets_data *ets_data, 4001 enum i40e_admin_queue_opc opcode, 4002 struct i40e_asq_cmd_details *cmd_details) 4003 { 4004 return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data, 4005 sizeof(*ets_data), opcode, cmd_details); 4006 } 4007 4008 /** 4009 * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC 4010 * @hw: pointer to the hw struct 4011 * @seid: seid of the switching component 4012 * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits 4013 * @cmd_details: pointer to command details structure or NULL 4014 **/ 4015 int 4016 i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, 4017 u16 seid, 4018 struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data, 4019 struct i40e_asq_cmd_details *cmd_details) 4020 { 4021 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4022 i40e_aqc_opc_configure_switching_comp_bw_config, 4023 cmd_details); 4024 } 4025 4026 /** 4027 * i40e_aq_query_vsi_bw_config - Query VSI BW configuration 4028 * @hw: pointer to the hw struct 4029 * @seid: seid of the VSI 4030 * @bw_data: Buffer to hold VSI BW configuration 4031 * @cmd_details: pointer to command details structure or NULL 4032 **/ 4033 int 4034 i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, 4035 u16 seid, 4036 struct i40e_aqc_query_vsi_bw_config_resp *bw_data, 4037 struct i40e_asq_cmd_details *cmd_details) 4038 { 4039 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4040 i40e_aqc_opc_query_vsi_bw_config, 4041 cmd_details); 4042 } 4043 4044 /** 4045 * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC 4046 * @hw: pointer to the hw struct 4047 * @seid: seid of the VSI 4048 * @bw_data: Buffer to hold VSI BW configuration per TC 4049 * @cmd_details: pointer to command details structure or NULL 4050 **/ 4051 int 4052 i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, 4053 u16 seid, 4054 struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, 4055 struct i40e_asq_cmd_details *cmd_details) 4056 { 4057 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4058 i40e_aqc_opc_query_vsi_ets_sla_config, 4059 cmd_details); 4060 } 4061 4062 /** 4063 * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC 4064 * @hw: pointer to the hw struct 4065 * @seid: seid of the switching component 4066 * @bw_data: Buffer to hold switching component's per TC BW config 4067 * @cmd_details: pointer to command details structure or NULL 4068 **/ 4069 int 4070 i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, 4071 u16 seid, 4072 struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, 4073 struct i40e_asq_cmd_details *cmd_details) 4074 { 4075 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4076 i40e_aqc_opc_query_switching_comp_ets_config, 4077 cmd_details); 4078 } 4079 4080 /** 4081 * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration 4082 * @hw: pointer to the hw struct 4083 * @seid: seid of the VSI or switching component connected to Physical Port 4084 * @bw_data: Buffer to hold current ETS configuration for the Physical Port 4085 * @cmd_details: pointer to command details structure or NULL 4086 **/ 4087 int 4088 i40e_aq_query_port_ets_config(struct i40e_hw *hw, 4089 u16 seid, 4090 struct i40e_aqc_query_port_ets_config_resp *bw_data, 4091 struct i40e_asq_cmd_details *cmd_details) 4092 { 4093 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4094 i40e_aqc_opc_query_port_ets_config, 4095 cmd_details); 4096 } 4097 4098 /** 4099 * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration 4100 * @hw: pointer to the hw struct 4101 * @seid: seid of the switching component 4102 * @bw_data: Buffer to hold switching component's BW configuration 4103 * @cmd_details: pointer to command details structure or NULL 4104 **/ 4105 int 4106 i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, 4107 u16 seid, 4108 struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, 4109 struct i40e_asq_cmd_details *cmd_details) 4110 { 4111 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4112 i40e_aqc_opc_query_switching_comp_bw_config, 4113 cmd_details); 4114 } 4115 4116 /** 4117 * i40e_validate_filter_settings 4118 * @hw: pointer to the hardware structure 4119 * @settings: Filter control settings 4120 * 4121 * Check and validate the filter control settings passed. 4122 * The function checks for the valid filter/context sizes being 4123 * passed for FCoE and PE. 4124 * 4125 * Returns 0 if the values passed are valid and within 4126 * range else returns an error. 4127 **/ 4128 static int 4129 i40e_validate_filter_settings(struct i40e_hw *hw, 4130 struct i40e_filter_control_settings *settings) 4131 { 4132 u32 fcoe_cntx_size, fcoe_filt_size; 4133 u32 fcoe_fmax; 4134 u32 val; 4135 4136 /* Validate FCoE settings passed */ 4137 switch (settings->fcoe_filt_num) { 4138 case I40E_HASH_FILTER_SIZE_1K: 4139 case I40E_HASH_FILTER_SIZE_2K: 4140 case I40E_HASH_FILTER_SIZE_4K: 4141 case I40E_HASH_FILTER_SIZE_8K: 4142 case I40E_HASH_FILTER_SIZE_16K: 4143 case I40E_HASH_FILTER_SIZE_32K: 4144 fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE; 4145 fcoe_filt_size <<= (u32)settings->fcoe_filt_num; 4146 break; 4147 default: 4148 return -EINVAL; 4149 } 4150 4151 switch (settings->fcoe_cntx_num) { 4152 case I40E_DMA_CNTX_SIZE_512: 4153 case I40E_DMA_CNTX_SIZE_1K: 4154 case I40E_DMA_CNTX_SIZE_2K: 4155 case I40E_DMA_CNTX_SIZE_4K: 4156 fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; 4157 fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num; 4158 break; 4159 default: 4160 return -EINVAL; 4161 } 4162 4163 /* Validate PE settings passed */ 4164 switch (settings->pe_filt_num) { 4165 case I40E_HASH_FILTER_SIZE_1K: 4166 case I40E_HASH_FILTER_SIZE_2K: 4167 case I40E_HASH_FILTER_SIZE_4K: 4168 case I40E_HASH_FILTER_SIZE_8K: 4169 case I40E_HASH_FILTER_SIZE_16K: 4170 case I40E_HASH_FILTER_SIZE_32K: 4171 case I40E_HASH_FILTER_SIZE_64K: 4172 case I40E_HASH_FILTER_SIZE_128K: 4173 case I40E_HASH_FILTER_SIZE_256K: 4174 case I40E_HASH_FILTER_SIZE_512K: 4175 case I40E_HASH_FILTER_SIZE_1M: 4176 break; 4177 default: 4178 return -EINVAL; 4179 } 4180 4181 switch (settings->pe_cntx_num) { 4182 case I40E_DMA_CNTX_SIZE_512: 4183 case I40E_DMA_CNTX_SIZE_1K: 4184 case I40E_DMA_CNTX_SIZE_2K: 4185 case I40E_DMA_CNTX_SIZE_4K: 4186 case I40E_DMA_CNTX_SIZE_8K: 4187 case I40E_DMA_CNTX_SIZE_16K: 4188 case I40E_DMA_CNTX_SIZE_32K: 4189 case I40E_DMA_CNTX_SIZE_64K: 4190 case I40E_DMA_CNTX_SIZE_128K: 4191 case I40E_DMA_CNTX_SIZE_256K: 4192 break; 4193 default: 4194 return -EINVAL; 4195 } 4196 4197 /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */ 4198 val = rd32(hw, I40E_GLHMC_FCOEFMAX); 4199 fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK) 4200 >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT; 4201 if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax) 4202 return -EINVAL; 4203 4204 return 0; 4205 } 4206 4207 /** 4208 * i40e_set_filter_control 4209 * @hw: pointer to the hardware structure 4210 * @settings: Filter control settings 4211 * 4212 * Set the Queue Filters for PE/FCoE and enable filters required 4213 * for a single PF. It is expected that these settings are programmed 4214 * at the driver initialization time. 4215 **/ 4216 int i40e_set_filter_control(struct i40e_hw *hw, 4217 struct i40e_filter_control_settings *settings) 4218 { 4219 u32 hash_lut_size = 0; 4220 int ret = 0; 4221 u32 val; 4222 4223 if (!settings) 4224 return -EINVAL; 4225 4226 /* Validate the input settings */ 4227 ret = i40e_validate_filter_settings(hw, settings); 4228 if (ret) 4229 return ret; 4230 4231 /* Read the PF Queue Filter control register */ 4232 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0); 4233 4234 /* Program required PE hash buckets for the PF */ 4235 val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK; 4236 val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) & 4237 I40E_PFQF_CTL_0_PEHSIZE_MASK; 4238 /* Program required PE contexts for the PF */ 4239 val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK; 4240 val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) & 4241 I40E_PFQF_CTL_0_PEDSIZE_MASK; 4242 4243 /* Program required FCoE hash buckets for the PF */ 4244 val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK; 4245 val |= ((u32)settings->fcoe_filt_num << 4246 I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) & 4247 I40E_PFQF_CTL_0_PFFCHSIZE_MASK; 4248 /* Program required FCoE DDP contexts for the PF */ 4249 val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK; 4250 val |= ((u32)settings->fcoe_cntx_num << 4251 I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) & 4252 I40E_PFQF_CTL_0_PFFCDSIZE_MASK; 4253 4254 /* Program Hash LUT size for the PF */ 4255 val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; 4256 if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512) 4257 hash_lut_size = 1; 4258 val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) & 4259 I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; 4260 4261 /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */ 4262 if (settings->enable_fdir) 4263 val |= I40E_PFQF_CTL_0_FD_ENA_MASK; 4264 if (settings->enable_ethtype) 4265 val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK; 4266 if (settings->enable_macvlan) 4267 val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK; 4268 4269 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val); 4270 4271 return 0; 4272 } 4273 4274 /** 4275 * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter 4276 * @hw: pointer to the hw struct 4277 * @mac_addr: MAC address to use in the filter 4278 * @ethtype: Ethertype to use in the filter 4279 * @flags: Flags that needs to be applied to the filter 4280 * @vsi_seid: seid of the control VSI 4281 * @queue: VSI queue number to send the packet to 4282 * @is_add: Add control packet filter if True else remove 4283 * @stats: Structure to hold information on control filter counts 4284 * @cmd_details: pointer to command details structure or NULL 4285 * 4286 * This command will Add or Remove control packet filter for a control VSI. 4287 * In return it will update the total number of perfect filter count in 4288 * the stats member. 4289 **/ 4290 int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, 4291 u8 *mac_addr, u16 ethtype, u16 flags, 4292 u16 vsi_seid, u16 queue, bool is_add, 4293 struct i40e_control_filter_stats *stats, 4294 struct i40e_asq_cmd_details *cmd_details) 4295 { 4296 struct i40e_aq_desc desc; 4297 struct i40e_aqc_add_remove_control_packet_filter *cmd = 4298 (struct i40e_aqc_add_remove_control_packet_filter *) 4299 &desc.params.raw; 4300 struct i40e_aqc_add_remove_control_packet_filter_completion *resp = 4301 (struct i40e_aqc_add_remove_control_packet_filter_completion *) 4302 &desc.params.raw; 4303 int status; 4304 4305 if (vsi_seid == 0) 4306 return -EINVAL; 4307 4308 if (is_add) { 4309 i40e_fill_default_direct_cmd_desc(&desc, 4310 i40e_aqc_opc_add_control_packet_filter); 4311 cmd->queue = cpu_to_le16(queue); 4312 } else { 4313 i40e_fill_default_direct_cmd_desc(&desc, 4314 i40e_aqc_opc_remove_control_packet_filter); 4315 } 4316 4317 if (mac_addr) 4318 ether_addr_copy(cmd->mac, mac_addr); 4319 4320 cmd->etype = cpu_to_le16(ethtype); 4321 cmd->flags = cpu_to_le16(flags); 4322 cmd->seid = cpu_to_le16(vsi_seid); 4323 4324 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4325 4326 if (!status && stats) { 4327 stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used); 4328 stats->etype_used = le16_to_cpu(resp->etype_used); 4329 stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free); 4330 stats->etype_free = le16_to_cpu(resp->etype_free); 4331 } 4332 4333 return status; 4334 } 4335 4336 /** 4337 * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control 4338 * @hw: pointer to the hw struct 4339 * @seid: VSI seid to add ethertype filter from 4340 **/ 4341 void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, 4342 u16 seid) 4343 { 4344 #define I40E_FLOW_CONTROL_ETHTYPE 0x8808 4345 u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC | 4346 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP | 4347 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX; 4348 u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE; 4349 int status; 4350 4351 status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag, 4352 seid, 0, true, NULL, 4353 NULL); 4354 if (status) 4355 hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n"); 4356 } 4357 4358 /** 4359 * i40e_aq_alternate_read 4360 * @hw: pointer to the hardware structure 4361 * @reg_addr0: address of first dword to be read 4362 * @reg_val0: pointer for data read from 'reg_addr0' 4363 * @reg_addr1: address of second dword to be read 4364 * @reg_val1: pointer for data read from 'reg_addr1' 4365 * 4366 * Read one or two dwords from alternate structure. Fields are indicated 4367 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer 4368 * is not passed then only register at 'reg_addr0' is read. 4369 * 4370 **/ 4371 static int i40e_aq_alternate_read(struct i40e_hw *hw, 4372 u32 reg_addr0, u32 *reg_val0, 4373 u32 reg_addr1, u32 *reg_val1) 4374 { 4375 struct i40e_aq_desc desc; 4376 struct i40e_aqc_alternate_write *cmd_resp = 4377 (struct i40e_aqc_alternate_write *)&desc.params.raw; 4378 int status; 4379 4380 if (!reg_val0) 4381 return -EINVAL; 4382 4383 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read); 4384 cmd_resp->address0 = cpu_to_le32(reg_addr0); 4385 cmd_resp->address1 = cpu_to_le32(reg_addr1); 4386 4387 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 4388 4389 if (!status) { 4390 *reg_val0 = le32_to_cpu(cmd_resp->data0); 4391 4392 if (reg_val1) 4393 *reg_val1 = le32_to_cpu(cmd_resp->data1); 4394 } 4395 4396 return status; 4397 } 4398 4399 /** 4400 * i40e_aq_suspend_port_tx 4401 * @hw: pointer to the hardware structure 4402 * @seid: port seid 4403 * @cmd_details: pointer to command details structure or NULL 4404 * 4405 * Suspend port's Tx traffic 4406 **/ 4407 int i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid, 4408 struct i40e_asq_cmd_details *cmd_details) 4409 { 4410 struct i40e_aqc_tx_sched_ind *cmd; 4411 struct i40e_aq_desc desc; 4412 int status; 4413 4414 cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; 4415 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_suspend_port_tx); 4416 cmd->vsi_seid = cpu_to_le16(seid); 4417 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4418 4419 return status; 4420 } 4421 4422 /** 4423 * i40e_aq_resume_port_tx 4424 * @hw: pointer to the hardware structure 4425 * @cmd_details: pointer to command details structure or NULL 4426 * 4427 * Resume port's Tx traffic 4428 **/ 4429 int i40e_aq_resume_port_tx(struct i40e_hw *hw, 4430 struct i40e_asq_cmd_details *cmd_details) 4431 { 4432 struct i40e_aq_desc desc; 4433 int status; 4434 4435 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx); 4436 4437 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4438 4439 return status; 4440 } 4441 4442 /** 4443 * i40e_set_pci_config_data - store PCI bus info 4444 * @hw: pointer to hardware structure 4445 * @link_status: the link status word from PCI config space 4446 * 4447 * Stores the PCI bus info (speed, width, type) within the i40e_hw structure 4448 **/ 4449 void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) 4450 { 4451 hw->bus.type = i40e_bus_type_pci_express; 4452 4453 switch (link_status & PCI_EXP_LNKSTA_NLW) { 4454 case PCI_EXP_LNKSTA_NLW_X1: 4455 hw->bus.width = i40e_bus_width_pcie_x1; 4456 break; 4457 case PCI_EXP_LNKSTA_NLW_X2: 4458 hw->bus.width = i40e_bus_width_pcie_x2; 4459 break; 4460 case PCI_EXP_LNKSTA_NLW_X4: 4461 hw->bus.width = i40e_bus_width_pcie_x4; 4462 break; 4463 case PCI_EXP_LNKSTA_NLW_X8: 4464 hw->bus.width = i40e_bus_width_pcie_x8; 4465 break; 4466 default: 4467 hw->bus.width = i40e_bus_width_unknown; 4468 break; 4469 } 4470 4471 switch (link_status & PCI_EXP_LNKSTA_CLS) { 4472 case PCI_EXP_LNKSTA_CLS_2_5GB: 4473 hw->bus.speed = i40e_bus_speed_2500; 4474 break; 4475 case PCI_EXP_LNKSTA_CLS_5_0GB: 4476 hw->bus.speed = i40e_bus_speed_5000; 4477 break; 4478 case PCI_EXP_LNKSTA_CLS_8_0GB: 4479 hw->bus.speed = i40e_bus_speed_8000; 4480 break; 4481 default: 4482 hw->bus.speed = i40e_bus_speed_unknown; 4483 break; 4484 } 4485 } 4486 4487 /** 4488 * i40e_aq_debug_dump 4489 * @hw: pointer to the hardware structure 4490 * @cluster_id: specific cluster to dump 4491 * @table_id: table id within cluster 4492 * @start_index: index of line in the block to read 4493 * @buff_size: dump buffer size 4494 * @buff: dump buffer 4495 * @ret_buff_size: actual buffer size returned 4496 * @ret_next_table: next block to read 4497 * @ret_next_index: next index to read 4498 * @cmd_details: pointer to command details structure or NULL 4499 * 4500 * Dump internal FW/HW data for debug purposes. 4501 * 4502 **/ 4503 int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, 4504 u8 table_id, u32 start_index, u16 buff_size, 4505 void *buff, u16 *ret_buff_size, 4506 u8 *ret_next_table, u32 *ret_next_index, 4507 struct i40e_asq_cmd_details *cmd_details) 4508 { 4509 struct i40e_aq_desc desc; 4510 struct i40e_aqc_debug_dump_internals *cmd = 4511 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; 4512 struct i40e_aqc_debug_dump_internals *resp = 4513 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; 4514 int status; 4515 4516 if (buff_size == 0 || !buff) 4517 return -EINVAL; 4518 4519 i40e_fill_default_direct_cmd_desc(&desc, 4520 i40e_aqc_opc_debug_dump_internals); 4521 /* Indirect Command */ 4522 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 4523 if (buff_size > I40E_AQ_LARGE_BUF) 4524 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 4525 4526 cmd->cluster_id = cluster_id; 4527 cmd->table_id = table_id; 4528 cmd->idx = cpu_to_le32(start_index); 4529 4530 desc.datalen = cpu_to_le16(buff_size); 4531 4532 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 4533 if (!status) { 4534 if (ret_buff_size) 4535 *ret_buff_size = le16_to_cpu(desc.datalen); 4536 if (ret_next_table) 4537 *ret_next_table = resp->table_id; 4538 if (ret_next_index) 4539 *ret_next_index = le32_to_cpu(resp->idx); 4540 } 4541 4542 return status; 4543 } 4544 4545 /** 4546 * i40e_read_bw_from_alt_ram 4547 * @hw: pointer to the hardware structure 4548 * @max_bw: pointer for max_bw read 4549 * @min_bw: pointer for min_bw read 4550 * @min_valid: pointer for bool that is true if min_bw is a valid value 4551 * @max_valid: pointer for bool that is true if max_bw is a valid value 4552 * 4553 * Read bw from the alternate ram for the given pf 4554 **/ 4555 int i40e_read_bw_from_alt_ram(struct i40e_hw *hw, 4556 u32 *max_bw, u32 *min_bw, 4557 bool *min_valid, bool *max_valid) 4558 { 4559 u32 max_bw_addr, min_bw_addr; 4560 int status; 4561 4562 /* Calculate the address of the min/max bw registers */ 4563 max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + 4564 I40E_ALT_STRUCT_MAX_BW_OFFSET + 4565 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); 4566 min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + 4567 I40E_ALT_STRUCT_MIN_BW_OFFSET + 4568 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); 4569 4570 /* Read the bandwidths from alt ram */ 4571 status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw, 4572 min_bw_addr, min_bw); 4573 4574 if (*min_bw & I40E_ALT_BW_VALID_MASK) 4575 *min_valid = true; 4576 else 4577 *min_valid = false; 4578 4579 if (*max_bw & I40E_ALT_BW_VALID_MASK) 4580 *max_valid = true; 4581 else 4582 *max_valid = false; 4583 4584 return status; 4585 } 4586 4587 /** 4588 * i40e_aq_configure_partition_bw 4589 * @hw: pointer to the hardware structure 4590 * @bw_data: Buffer holding valid pfs and bw limits 4591 * @cmd_details: pointer to command details 4592 * 4593 * Configure partitions guaranteed/max bw 4594 **/ 4595 int 4596 i40e_aq_configure_partition_bw(struct i40e_hw *hw, 4597 struct i40e_aqc_configure_partition_bw_data *bw_data, 4598 struct i40e_asq_cmd_details *cmd_details) 4599 { 4600 u16 bwd_size = sizeof(*bw_data); 4601 struct i40e_aq_desc desc; 4602 int status; 4603 4604 i40e_fill_default_direct_cmd_desc(&desc, 4605 i40e_aqc_opc_configure_partition_bw); 4606 4607 /* Indirect command */ 4608 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 4609 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 4610 4611 if (bwd_size > I40E_AQ_LARGE_BUF) 4612 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 4613 4614 desc.datalen = cpu_to_le16(bwd_size); 4615 4616 status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, 4617 cmd_details); 4618 4619 return status; 4620 } 4621 4622 /** 4623 * i40e_read_phy_register_clause22 4624 * @hw: pointer to the HW structure 4625 * @reg: register address in the page 4626 * @phy_addr: PHY address on MDIO interface 4627 * @value: PHY register value 4628 * 4629 * Reads specified PHY register value 4630 **/ 4631 int i40e_read_phy_register_clause22(struct i40e_hw *hw, 4632 u16 reg, u8 phy_addr, u16 *value) 4633 { 4634 u8 port_num = (u8)hw->func_caps.mdio_port_num; 4635 int status = -EIO; 4636 u32 command = 0; 4637 u16 retry = 1000; 4638 4639 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4640 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4641 (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) | 4642 (I40E_MDIO_CLAUSE22_STCODE_MASK) | 4643 (I40E_GLGEN_MSCA_MDICMD_MASK); 4644 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4645 do { 4646 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4647 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4648 status = 0; 4649 break; 4650 } 4651 udelay(10); 4652 retry--; 4653 } while (retry); 4654 4655 if (status) { 4656 i40e_debug(hw, I40E_DEBUG_PHY, 4657 "PHY: Can't write command to external PHY.\n"); 4658 } else { 4659 command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); 4660 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> 4661 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; 4662 } 4663 4664 return status; 4665 } 4666 4667 /** 4668 * i40e_write_phy_register_clause22 4669 * @hw: pointer to the HW structure 4670 * @reg: register address in the page 4671 * @phy_addr: PHY address on MDIO interface 4672 * @value: PHY register value 4673 * 4674 * Writes specified PHY register value 4675 **/ 4676 int i40e_write_phy_register_clause22(struct i40e_hw *hw, 4677 u16 reg, u8 phy_addr, u16 value) 4678 { 4679 u8 port_num = (u8)hw->func_caps.mdio_port_num; 4680 int status = -EIO; 4681 u32 command = 0; 4682 u16 retry = 1000; 4683 4684 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; 4685 wr32(hw, I40E_GLGEN_MSRWD(port_num), command); 4686 4687 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4688 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4689 (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) | 4690 (I40E_MDIO_CLAUSE22_STCODE_MASK) | 4691 (I40E_GLGEN_MSCA_MDICMD_MASK); 4692 4693 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4694 do { 4695 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4696 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4697 status = 0; 4698 break; 4699 } 4700 udelay(10); 4701 retry--; 4702 } while (retry); 4703 4704 return status; 4705 } 4706 4707 /** 4708 * i40e_read_phy_register_clause45 4709 * @hw: pointer to the HW structure 4710 * @page: registers page number 4711 * @reg: register address in the page 4712 * @phy_addr: PHY address on MDIO interface 4713 * @value: PHY register value 4714 * 4715 * Reads specified PHY register value 4716 **/ 4717 int i40e_read_phy_register_clause45(struct i40e_hw *hw, 4718 u8 page, u16 reg, u8 phy_addr, u16 *value) 4719 { 4720 u8 port_num = hw->func_caps.mdio_port_num; 4721 int status = -EIO; 4722 u32 command = 0; 4723 u16 retry = 1000; 4724 4725 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | 4726 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4727 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4728 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | 4729 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4730 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4731 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4732 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4733 do { 4734 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4735 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4736 status = 0; 4737 break; 4738 } 4739 usleep_range(10, 20); 4740 retry--; 4741 } while (retry); 4742 4743 if (status) { 4744 i40e_debug(hw, I40E_DEBUG_PHY, 4745 "PHY: Can't write command to external PHY.\n"); 4746 goto phy_read_end; 4747 } 4748 4749 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4750 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4751 (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) | 4752 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4753 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4754 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4755 status = -EIO; 4756 retry = 1000; 4757 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4758 do { 4759 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4760 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4761 status = 0; 4762 break; 4763 } 4764 usleep_range(10, 20); 4765 retry--; 4766 } while (retry); 4767 4768 if (!status) { 4769 command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); 4770 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> 4771 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; 4772 } else { 4773 i40e_debug(hw, I40E_DEBUG_PHY, 4774 "PHY: Can't read register value from external PHY.\n"); 4775 } 4776 4777 phy_read_end: 4778 return status; 4779 } 4780 4781 /** 4782 * i40e_write_phy_register_clause45 4783 * @hw: pointer to the HW structure 4784 * @page: registers page number 4785 * @reg: register address in the page 4786 * @phy_addr: PHY address on MDIO interface 4787 * @value: PHY register value 4788 * 4789 * Writes value to specified PHY register 4790 **/ 4791 int i40e_write_phy_register_clause45(struct i40e_hw *hw, 4792 u8 page, u16 reg, u8 phy_addr, u16 value) 4793 { 4794 u8 port_num = hw->func_caps.mdio_port_num; 4795 int status = -EIO; 4796 u16 retry = 1000; 4797 u32 command = 0; 4798 4799 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | 4800 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4801 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4802 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | 4803 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4804 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4805 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4806 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4807 do { 4808 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4809 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4810 status = 0; 4811 break; 4812 } 4813 usleep_range(10, 20); 4814 retry--; 4815 } while (retry); 4816 if (status) { 4817 i40e_debug(hw, I40E_DEBUG_PHY, 4818 "PHY: Can't write command to external PHY.\n"); 4819 goto phy_write_end; 4820 } 4821 4822 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; 4823 wr32(hw, I40E_GLGEN_MSRWD(port_num), command); 4824 4825 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4826 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4827 (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) | 4828 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4829 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4830 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4831 status = -EIO; 4832 retry = 1000; 4833 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4834 do { 4835 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4836 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4837 status = 0; 4838 break; 4839 } 4840 usleep_range(10, 20); 4841 retry--; 4842 } while (retry); 4843 4844 phy_write_end: 4845 return status; 4846 } 4847 4848 /** 4849 * i40e_write_phy_register 4850 * @hw: pointer to the HW structure 4851 * @page: registers page number 4852 * @reg: register address in the page 4853 * @phy_addr: PHY address on MDIO interface 4854 * @value: PHY register value 4855 * 4856 * Writes value to specified PHY register 4857 **/ 4858 int i40e_write_phy_register(struct i40e_hw *hw, 4859 u8 page, u16 reg, u8 phy_addr, u16 value) 4860 { 4861 int status; 4862 4863 switch (hw->device_id) { 4864 case I40E_DEV_ID_1G_BASE_T_X722: 4865 status = i40e_write_phy_register_clause22(hw, reg, phy_addr, 4866 value); 4867 break; 4868 case I40E_DEV_ID_1G_BASE_T_BC: 4869 case I40E_DEV_ID_5G_BASE_T_BC: 4870 case I40E_DEV_ID_10G_BASE_T: 4871 case I40E_DEV_ID_10G_BASE_T4: 4872 case I40E_DEV_ID_10G_BASE_T_BC: 4873 case I40E_DEV_ID_10G_BASE_T_X722: 4874 case I40E_DEV_ID_25G_B: 4875 case I40E_DEV_ID_25G_SFP28: 4876 status = i40e_write_phy_register_clause45(hw, page, reg, 4877 phy_addr, value); 4878 break; 4879 default: 4880 status = -EIO; 4881 break; 4882 } 4883 4884 return status; 4885 } 4886 4887 /** 4888 * i40e_read_phy_register 4889 * @hw: pointer to the HW structure 4890 * @page: registers page number 4891 * @reg: register address in the page 4892 * @phy_addr: PHY address on MDIO interface 4893 * @value: PHY register value 4894 * 4895 * Reads specified PHY register value 4896 **/ 4897 int i40e_read_phy_register(struct i40e_hw *hw, 4898 u8 page, u16 reg, u8 phy_addr, u16 *value) 4899 { 4900 int status; 4901 4902 switch (hw->device_id) { 4903 case I40E_DEV_ID_1G_BASE_T_X722: 4904 status = i40e_read_phy_register_clause22(hw, reg, phy_addr, 4905 value); 4906 break; 4907 case I40E_DEV_ID_1G_BASE_T_BC: 4908 case I40E_DEV_ID_5G_BASE_T_BC: 4909 case I40E_DEV_ID_10G_BASE_T: 4910 case I40E_DEV_ID_10G_BASE_T4: 4911 case I40E_DEV_ID_10G_BASE_T_BC: 4912 case I40E_DEV_ID_10G_BASE_T_X722: 4913 case I40E_DEV_ID_25G_B: 4914 case I40E_DEV_ID_25G_SFP28: 4915 status = i40e_read_phy_register_clause45(hw, page, reg, 4916 phy_addr, value); 4917 break; 4918 default: 4919 status = -EIO; 4920 break; 4921 } 4922 4923 return status; 4924 } 4925 4926 /** 4927 * i40e_get_phy_address 4928 * @hw: pointer to the HW structure 4929 * @dev_num: PHY port num that address we want 4930 * 4931 * Gets PHY address for current port 4932 **/ 4933 u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num) 4934 { 4935 u8 port_num = hw->func_caps.mdio_port_num; 4936 u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num)); 4937 4938 return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f; 4939 } 4940 4941 /** 4942 * i40e_blink_phy_link_led 4943 * @hw: pointer to the HW structure 4944 * @time: time how long led will blinks in secs 4945 * @interval: gap between LED on and off in msecs 4946 * 4947 * Blinks PHY link LED 4948 **/ 4949 int i40e_blink_phy_link_led(struct i40e_hw *hw, 4950 u32 time, u32 interval) 4951 { 4952 u16 led_addr = I40E_PHY_LED_PROV_REG_1; 4953 u16 gpio_led_port; 4954 u8 phy_addr = 0; 4955 int status = 0; 4956 u16 led_ctl; 4957 u8 port_num; 4958 u16 led_reg; 4959 u32 i; 4960 4961 i = rd32(hw, I40E_PFGEN_PORTNUM); 4962 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 4963 phy_addr = i40e_get_phy_address(hw, port_num); 4964 4965 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, 4966 led_addr++) { 4967 status = i40e_read_phy_register_clause45(hw, 4968 I40E_PHY_COM_REG_PAGE, 4969 led_addr, phy_addr, 4970 &led_reg); 4971 if (status) 4972 goto phy_blinking_end; 4973 led_ctl = led_reg; 4974 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { 4975 led_reg = 0; 4976 status = i40e_write_phy_register_clause45(hw, 4977 I40E_PHY_COM_REG_PAGE, 4978 led_addr, phy_addr, 4979 led_reg); 4980 if (status) 4981 goto phy_blinking_end; 4982 break; 4983 } 4984 } 4985 4986 if (time > 0 && interval > 0) { 4987 for (i = 0; i < time * 1000; i += interval) { 4988 status = i40e_read_phy_register_clause45(hw, 4989 I40E_PHY_COM_REG_PAGE, 4990 led_addr, phy_addr, &led_reg); 4991 if (status) 4992 goto restore_config; 4993 if (led_reg & I40E_PHY_LED_MANUAL_ON) 4994 led_reg = 0; 4995 else 4996 led_reg = I40E_PHY_LED_MANUAL_ON; 4997 status = i40e_write_phy_register_clause45(hw, 4998 I40E_PHY_COM_REG_PAGE, 4999 led_addr, phy_addr, led_reg); 5000 if (status) 5001 goto restore_config; 5002 msleep(interval); 5003 } 5004 } 5005 5006 restore_config: 5007 status = i40e_write_phy_register_clause45(hw, 5008 I40E_PHY_COM_REG_PAGE, 5009 led_addr, phy_addr, led_ctl); 5010 5011 phy_blinking_end: 5012 return status; 5013 } 5014 5015 /** 5016 * i40e_led_get_reg - read LED register 5017 * @hw: pointer to the HW structure 5018 * @led_addr: LED register address 5019 * @reg_val: read register value 5020 **/ 5021 static int i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr, 5022 u32 *reg_val) 5023 { 5024 u8 phy_addr = 0; 5025 u8 port_num; 5026 int status; 5027 u32 i; 5028 5029 *reg_val = 0; 5030 if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) { 5031 status = 5032 i40e_aq_get_phy_register(hw, 5033 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5034 I40E_PHY_COM_REG_PAGE, true, 5035 I40E_PHY_LED_PROV_REG_1, 5036 reg_val, NULL); 5037 } else { 5038 i = rd32(hw, I40E_PFGEN_PORTNUM); 5039 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5040 phy_addr = i40e_get_phy_address(hw, port_num); 5041 status = i40e_read_phy_register_clause45(hw, 5042 I40E_PHY_COM_REG_PAGE, 5043 led_addr, phy_addr, 5044 (u16 *)reg_val); 5045 } 5046 return status; 5047 } 5048 5049 /** 5050 * i40e_led_set_reg - write LED register 5051 * @hw: pointer to the HW structure 5052 * @led_addr: LED register address 5053 * @reg_val: register value to write 5054 **/ 5055 static int i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr, 5056 u32 reg_val) 5057 { 5058 u8 phy_addr = 0; 5059 u8 port_num; 5060 int status; 5061 u32 i; 5062 5063 if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) { 5064 status = 5065 i40e_aq_set_phy_register(hw, 5066 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5067 I40E_PHY_COM_REG_PAGE, true, 5068 I40E_PHY_LED_PROV_REG_1, 5069 reg_val, NULL); 5070 } else { 5071 i = rd32(hw, I40E_PFGEN_PORTNUM); 5072 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5073 phy_addr = i40e_get_phy_address(hw, port_num); 5074 status = i40e_write_phy_register_clause45(hw, 5075 I40E_PHY_COM_REG_PAGE, 5076 led_addr, phy_addr, 5077 (u16)reg_val); 5078 } 5079 5080 return status; 5081 } 5082 5083 /** 5084 * i40e_led_get_phy - return current on/off mode 5085 * @hw: pointer to the hw struct 5086 * @led_addr: address of led register to use 5087 * @val: original value of register to use 5088 * 5089 **/ 5090 int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, 5091 u16 *val) 5092 { 5093 u16 gpio_led_port; 5094 u8 phy_addr = 0; 5095 u32 reg_val_aq; 5096 int status = 0; 5097 u16 temp_addr; 5098 u16 reg_val; 5099 u8 port_num; 5100 u32 i; 5101 5102 if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) { 5103 status = 5104 i40e_aq_get_phy_register(hw, 5105 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5106 I40E_PHY_COM_REG_PAGE, true, 5107 I40E_PHY_LED_PROV_REG_1, 5108 ®_val_aq, NULL); 5109 if (status == 0) 5110 *val = (u16)reg_val_aq; 5111 return status; 5112 } 5113 temp_addr = I40E_PHY_LED_PROV_REG_1; 5114 i = rd32(hw, I40E_PFGEN_PORTNUM); 5115 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5116 phy_addr = i40e_get_phy_address(hw, port_num); 5117 5118 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, 5119 temp_addr++) { 5120 status = i40e_read_phy_register_clause45(hw, 5121 I40E_PHY_COM_REG_PAGE, 5122 temp_addr, phy_addr, 5123 ®_val); 5124 if (status) 5125 return status; 5126 *val = reg_val; 5127 if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) { 5128 *led_addr = temp_addr; 5129 break; 5130 } 5131 } 5132 return status; 5133 } 5134 5135 /** 5136 * i40e_led_set_phy 5137 * @hw: pointer to the HW structure 5138 * @on: true or false 5139 * @led_addr: address of led register to use 5140 * @mode: original val plus bit for set or ignore 5141 * 5142 * Set led's on or off when controlled by the PHY 5143 * 5144 **/ 5145 int i40e_led_set_phy(struct i40e_hw *hw, bool on, 5146 u16 led_addr, u32 mode) 5147 { 5148 u32 led_ctl = 0; 5149 u32 led_reg = 0; 5150 int status = 0; 5151 5152 status = i40e_led_get_reg(hw, led_addr, &led_reg); 5153 if (status) 5154 return status; 5155 led_ctl = led_reg; 5156 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { 5157 led_reg = 0; 5158 status = i40e_led_set_reg(hw, led_addr, led_reg); 5159 if (status) 5160 return status; 5161 } 5162 status = i40e_led_get_reg(hw, led_addr, &led_reg); 5163 if (status) 5164 goto restore_config; 5165 if (on) 5166 led_reg = I40E_PHY_LED_MANUAL_ON; 5167 else 5168 led_reg = 0; 5169 5170 status = i40e_led_set_reg(hw, led_addr, led_reg); 5171 if (status) 5172 goto restore_config; 5173 if (mode & I40E_PHY_LED_MODE_ORIG) { 5174 led_ctl = (mode & I40E_PHY_LED_MODE_MASK); 5175 status = i40e_led_set_reg(hw, led_addr, led_ctl); 5176 } 5177 return status; 5178 5179 restore_config: 5180 status = i40e_led_set_reg(hw, led_addr, led_ctl); 5181 return status; 5182 } 5183 5184 /** 5185 * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register 5186 * @hw: pointer to the hw struct 5187 * @reg_addr: register address 5188 * @reg_val: ptr to register value 5189 * @cmd_details: pointer to command details structure or NULL 5190 * 5191 * Use the firmware to read the Rx control register, 5192 * especially useful if the Rx unit is under heavy pressure 5193 **/ 5194 int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, 5195 u32 reg_addr, u32 *reg_val, 5196 struct i40e_asq_cmd_details *cmd_details) 5197 { 5198 struct i40e_aq_desc desc; 5199 struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp = 5200 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; 5201 int status; 5202 5203 if (!reg_val) 5204 return -EINVAL; 5205 5206 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read); 5207 5208 cmd_resp->address = cpu_to_le32(reg_addr); 5209 5210 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5211 5212 if (status == 0) 5213 *reg_val = le32_to_cpu(cmd_resp->value); 5214 5215 return status; 5216 } 5217 5218 /** 5219 * i40e_read_rx_ctl - read from an Rx control register 5220 * @hw: pointer to the hw struct 5221 * @reg_addr: register address 5222 **/ 5223 u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) 5224 { 5225 bool use_register = false; 5226 int status = 0; 5227 int retry = 5; 5228 u32 val = 0; 5229 5230 if (i40e_is_aq_api_ver_lt(hw, 1, 5) || hw->mac.type == I40E_MAC_X722) 5231 use_register = true; 5232 5233 if (!use_register) { 5234 do_retry: 5235 status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL); 5236 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { 5237 usleep_range(1000, 2000); 5238 retry--; 5239 goto do_retry; 5240 } 5241 } 5242 5243 /* if the AQ access failed, try the old-fashioned way */ 5244 if (status || use_register) 5245 val = rd32(hw, reg_addr); 5246 5247 return val; 5248 } 5249 5250 /** 5251 * i40e_aq_rx_ctl_write_register 5252 * @hw: pointer to the hw struct 5253 * @reg_addr: register address 5254 * @reg_val: register value 5255 * @cmd_details: pointer to command details structure or NULL 5256 * 5257 * Use the firmware to write to an Rx control register, 5258 * especially useful if the Rx unit is under heavy pressure 5259 **/ 5260 int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, 5261 u32 reg_addr, u32 reg_val, 5262 struct i40e_asq_cmd_details *cmd_details) 5263 { 5264 struct i40e_aq_desc desc; 5265 struct i40e_aqc_rx_ctl_reg_read_write *cmd = 5266 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; 5267 int status; 5268 5269 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write); 5270 5271 cmd->address = cpu_to_le32(reg_addr); 5272 cmd->value = cpu_to_le32(reg_val); 5273 5274 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5275 5276 return status; 5277 } 5278 5279 /** 5280 * i40e_write_rx_ctl - write to an Rx control register 5281 * @hw: pointer to the hw struct 5282 * @reg_addr: register address 5283 * @reg_val: register value 5284 **/ 5285 void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) 5286 { 5287 bool use_register = false; 5288 int status = 0; 5289 int retry = 5; 5290 5291 if (i40e_is_aq_api_ver_lt(hw, 1, 5) || hw->mac.type == I40E_MAC_X722) 5292 use_register = true; 5293 5294 if (!use_register) { 5295 do_retry: 5296 status = i40e_aq_rx_ctl_write_register(hw, reg_addr, 5297 reg_val, NULL); 5298 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { 5299 usleep_range(1000, 2000); 5300 retry--; 5301 goto do_retry; 5302 } 5303 } 5304 5305 /* if the AQ access failed, try the old-fashioned way */ 5306 if (status || use_register) 5307 wr32(hw, reg_addr, reg_val); 5308 } 5309 5310 /** 5311 * i40e_mdio_if_number_selection - MDIO I/F number selection 5312 * @hw: pointer to the hw struct 5313 * @set_mdio: use MDIO I/F number specified by mdio_num 5314 * @mdio_num: MDIO I/F number 5315 * @cmd: pointer to PHY Register command structure 5316 **/ 5317 static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio, 5318 u8 mdio_num, 5319 struct i40e_aqc_phy_register_access *cmd) 5320 { 5321 if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) { 5322 if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS_EXTENDED, hw->caps)) 5323 cmd->cmd_flags |= 5324 I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER | 5325 ((mdio_num << 5326 I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) & 5327 I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK); 5328 else 5329 i40e_debug(hw, I40E_DEBUG_PHY, 5330 "MDIO I/F number selection not supported by current FW version.\n"); 5331 } 5332 } 5333 5334 /** 5335 * i40e_aq_set_phy_register_ext 5336 * @hw: pointer to the hw struct 5337 * @phy_select: select which phy should be accessed 5338 * @dev_addr: PHY device address 5339 * @page_change: flag to indicate if phy page should be updated 5340 * @set_mdio: use MDIO I/F number specified by mdio_num 5341 * @mdio_num: MDIO I/F number 5342 * @reg_addr: PHY register address 5343 * @reg_val: new register value 5344 * @cmd_details: pointer to command details structure or NULL 5345 * 5346 * Write the external PHY register. 5347 * NOTE: In common cases MDIO I/F number should not be changed, thats why you 5348 * may use simple wrapper i40e_aq_set_phy_register. 5349 **/ 5350 int i40e_aq_set_phy_register_ext(struct i40e_hw *hw, 5351 u8 phy_select, u8 dev_addr, bool page_change, 5352 bool set_mdio, u8 mdio_num, 5353 u32 reg_addr, u32 reg_val, 5354 struct i40e_asq_cmd_details *cmd_details) 5355 { 5356 struct i40e_aq_desc desc; 5357 struct i40e_aqc_phy_register_access *cmd = 5358 (struct i40e_aqc_phy_register_access *)&desc.params.raw; 5359 int status; 5360 5361 i40e_fill_default_direct_cmd_desc(&desc, 5362 i40e_aqc_opc_set_phy_register); 5363 5364 cmd->phy_interface = phy_select; 5365 cmd->dev_address = dev_addr; 5366 cmd->reg_address = cpu_to_le32(reg_addr); 5367 cmd->reg_value = cpu_to_le32(reg_val); 5368 5369 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd); 5370 5371 if (!page_change) 5372 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE; 5373 5374 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5375 5376 return status; 5377 } 5378 5379 /** 5380 * i40e_aq_get_phy_register_ext 5381 * @hw: pointer to the hw struct 5382 * @phy_select: select which phy should be accessed 5383 * @dev_addr: PHY device address 5384 * @page_change: flag to indicate if phy page should be updated 5385 * @set_mdio: use MDIO I/F number specified by mdio_num 5386 * @mdio_num: MDIO I/F number 5387 * @reg_addr: PHY register address 5388 * @reg_val: read register value 5389 * @cmd_details: pointer to command details structure or NULL 5390 * 5391 * Read the external PHY register. 5392 * NOTE: In common cases MDIO I/F number should not be changed, thats why you 5393 * may use simple wrapper i40e_aq_get_phy_register. 5394 **/ 5395 int i40e_aq_get_phy_register_ext(struct i40e_hw *hw, 5396 u8 phy_select, u8 dev_addr, bool page_change, 5397 bool set_mdio, u8 mdio_num, 5398 u32 reg_addr, u32 *reg_val, 5399 struct i40e_asq_cmd_details *cmd_details) 5400 { 5401 struct i40e_aq_desc desc; 5402 struct i40e_aqc_phy_register_access *cmd = 5403 (struct i40e_aqc_phy_register_access *)&desc.params.raw; 5404 int status; 5405 5406 i40e_fill_default_direct_cmd_desc(&desc, 5407 i40e_aqc_opc_get_phy_register); 5408 5409 cmd->phy_interface = phy_select; 5410 cmd->dev_address = dev_addr; 5411 cmd->reg_address = cpu_to_le32(reg_addr); 5412 5413 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd); 5414 5415 if (!page_change) 5416 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE; 5417 5418 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5419 if (!status) 5420 *reg_val = le32_to_cpu(cmd->reg_value); 5421 5422 return status; 5423 } 5424 5425 /** 5426 * i40e_aq_write_ddp - Write dynamic device personalization (ddp) 5427 * @hw: pointer to the hw struct 5428 * @buff: command buffer (size in bytes = buff_size) 5429 * @buff_size: buffer size in bytes 5430 * @track_id: package tracking id 5431 * @error_offset: returns error offset 5432 * @error_info: returns error information 5433 * @cmd_details: pointer to command details structure or NULL 5434 **/ 5435 int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, 5436 u16 buff_size, u32 track_id, 5437 u32 *error_offset, u32 *error_info, 5438 struct i40e_asq_cmd_details *cmd_details) 5439 { 5440 struct i40e_aq_desc desc; 5441 struct i40e_aqc_write_personalization_profile *cmd = 5442 (struct i40e_aqc_write_personalization_profile *) 5443 &desc.params.raw; 5444 struct i40e_aqc_write_ddp_resp *resp; 5445 int status; 5446 5447 i40e_fill_default_direct_cmd_desc(&desc, 5448 i40e_aqc_opc_write_personalization_profile); 5449 5450 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); 5451 if (buff_size > I40E_AQ_LARGE_BUF) 5452 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5453 5454 desc.datalen = cpu_to_le16(buff_size); 5455 5456 cmd->profile_track_id = cpu_to_le32(track_id); 5457 5458 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 5459 if (!status) { 5460 resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw; 5461 if (error_offset) 5462 *error_offset = le32_to_cpu(resp->error_offset); 5463 if (error_info) 5464 *error_info = le32_to_cpu(resp->error_info); 5465 } 5466 5467 return status; 5468 } 5469 5470 /** 5471 * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp) 5472 * @hw: pointer to the hw struct 5473 * @buff: command buffer (size in bytes = buff_size) 5474 * @buff_size: buffer size in bytes 5475 * @flags: AdminQ command flags 5476 * @cmd_details: pointer to command details structure or NULL 5477 **/ 5478 int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, 5479 u16 buff_size, u8 flags, 5480 struct i40e_asq_cmd_details *cmd_details) 5481 { 5482 struct i40e_aq_desc desc; 5483 struct i40e_aqc_get_applied_profiles *cmd = 5484 (struct i40e_aqc_get_applied_profiles *)&desc.params.raw; 5485 int status; 5486 5487 i40e_fill_default_direct_cmd_desc(&desc, 5488 i40e_aqc_opc_get_personalization_profile_list); 5489 5490 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 5491 if (buff_size > I40E_AQ_LARGE_BUF) 5492 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5493 desc.datalen = cpu_to_le16(buff_size); 5494 5495 cmd->flags = flags; 5496 5497 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 5498 5499 return status; 5500 } 5501 5502 /** 5503 * i40e_find_segment_in_package 5504 * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E) 5505 * @pkg_hdr: pointer to the package header to be searched 5506 * 5507 * This function searches a package file for a particular segment type. On 5508 * success it returns a pointer to the segment header, otherwise it will 5509 * return NULL. 5510 **/ 5511 struct i40e_generic_seg_header * 5512 i40e_find_segment_in_package(u32 segment_type, 5513 struct i40e_package_header *pkg_hdr) 5514 { 5515 struct i40e_generic_seg_header *segment; 5516 u32 i; 5517 5518 /* Search all package segments for the requested segment type */ 5519 for (i = 0; i < pkg_hdr->segment_count; i++) { 5520 segment = 5521 (struct i40e_generic_seg_header *)((u8 *)pkg_hdr + 5522 pkg_hdr->segment_offset[i]); 5523 5524 if (segment->type == segment_type) 5525 return segment; 5526 } 5527 5528 return NULL; 5529 } 5530 5531 /* Get section table in profile */ 5532 #define I40E_SECTION_TABLE(profile, sec_tbl) \ 5533 do { \ 5534 struct i40e_profile_segment *p = (profile); \ 5535 u32 count; \ 5536 u32 *nvm; \ 5537 count = p->device_table_count; \ 5538 nvm = (u32 *)&p->device_table[count]; \ 5539 sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \ 5540 } while (0) 5541 5542 /* Get section header in profile */ 5543 #define I40E_SECTION_HEADER(profile, offset) \ 5544 (struct i40e_profile_section_header *)((u8 *)(profile) + (offset)) 5545 5546 /** 5547 * i40e_find_section_in_profile 5548 * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE) 5549 * @profile: pointer to the i40e segment header to be searched 5550 * 5551 * This function searches i40e segment for a particular section type. On 5552 * success it returns a pointer to the section header, otherwise it will 5553 * return NULL. 5554 **/ 5555 struct i40e_profile_section_header * 5556 i40e_find_section_in_profile(u32 section_type, 5557 struct i40e_profile_segment *profile) 5558 { 5559 struct i40e_profile_section_header *sec; 5560 struct i40e_section_table *sec_tbl; 5561 u32 sec_off; 5562 u32 i; 5563 5564 if (profile->header.type != SEGMENT_TYPE_I40E) 5565 return NULL; 5566 5567 I40E_SECTION_TABLE(profile, sec_tbl); 5568 5569 for (i = 0; i < sec_tbl->section_count; i++) { 5570 sec_off = sec_tbl->section_offset[i]; 5571 sec = I40E_SECTION_HEADER(profile, sec_off); 5572 if (sec->section.type == section_type) 5573 return sec; 5574 } 5575 5576 return NULL; 5577 } 5578 5579 /** 5580 * i40e_ddp_exec_aq_section - Execute generic AQ for DDP 5581 * @hw: pointer to the hw struct 5582 * @aq: command buffer containing all data to execute AQ 5583 **/ 5584 static int i40e_ddp_exec_aq_section(struct i40e_hw *hw, 5585 struct i40e_profile_aq_section *aq) 5586 { 5587 struct i40e_aq_desc desc; 5588 u8 *msg = NULL; 5589 u16 msglen; 5590 int status; 5591 5592 i40e_fill_default_direct_cmd_desc(&desc, aq->opcode); 5593 desc.flags |= cpu_to_le16(aq->flags); 5594 memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw)); 5595 5596 msglen = aq->datalen; 5597 if (msglen) { 5598 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 5599 I40E_AQ_FLAG_RD)); 5600 if (msglen > I40E_AQ_LARGE_BUF) 5601 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5602 desc.datalen = cpu_to_le16(msglen); 5603 msg = &aq->data[0]; 5604 } 5605 5606 status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL); 5607 5608 if (status) { 5609 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5610 "unable to exec DDP AQ opcode %u, error %d\n", 5611 aq->opcode, status); 5612 return status; 5613 } 5614 5615 /* copy returned desc to aq_buf */ 5616 memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw)); 5617 5618 return 0; 5619 } 5620 5621 /** 5622 * i40e_validate_profile 5623 * @hw: pointer to the hardware structure 5624 * @profile: pointer to the profile segment of the package to be validated 5625 * @track_id: package tracking id 5626 * @rollback: flag if the profile is for rollback. 5627 * 5628 * Validates supported devices and profile's sections. 5629 */ 5630 static int 5631 i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5632 u32 track_id, bool rollback) 5633 { 5634 struct i40e_profile_section_header *sec = NULL; 5635 struct i40e_section_table *sec_tbl; 5636 u32 vendor_dev_id; 5637 int status = 0; 5638 u32 dev_cnt; 5639 u32 sec_off; 5640 u32 i; 5641 5642 if (track_id == I40E_DDP_TRACKID_INVALID) { 5643 i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n"); 5644 return -EOPNOTSUPP; 5645 } 5646 5647 dev_cnt = profile->device_table_count; 5648 for (i = 0; i < dev_cnt; i++) { 5649 vendor_dev_id = profile->device_table[i].vendor_dev_id; 5650 if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL && 5651 hw->device_id == (vendor_dev_id & 0xFFFF)) 5652 break; 5653 } 5654 if (dev_cnt && i == dev_cnt) { 5655 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5656 "Device doesn't support DDP\n"); 5657 return -ENODEV; 5658 } 5659 5660 I40E_SECTION_TABLE(profile, sec_tbl); 5661 5662 /* Validate sections types */ 5663 for (i = 0; i < sec_tbl->section_count; i++) { 5664 sec_off = sec_tbl->section_offset[i]; 5665 sec = I40E_SECTION_HEADER(profile, sec_off); 5666 if (rollback) { 5667 if (sec->section.type == SECTION_TYPE_MMIO || 5668 sec->section.type == SECTION_TYPE_AQ || 5669 sec->section.type == SECTION_TYPE_RB_AQ) { 5670 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5671 "Not a roll-back package\n"); 5672 return -EOPNOTSUPP; 5673 } 5674 } else { 5675 if (sec->section.type == SECTION_TYPE_RB_AQ || 5676 sec->section.type == SECTION_TYPE_RB_MMIO) { 5677 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5678 "Not an original package\n"); 5679 return -EOPNOTSUPP; 5680 } 5681 } 5682 } 5683 5684 return status; 5685 } 5686 5687 /** 5688 * i40e_write_profile 5689 * @hw: pointer to the hardware structure 5690 * @profile: pointer to the profile segment of the package to be downloaded 5691 * @track_id: package tracking id 5692 * 5693 * Handles the download of a complete package. 5694 */ 5695 int 5696 i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5697 u32 track_id) 5698 { 5699 struct i40e_profile_section_header *sec = NULL; 5700 struct i40e_profile_aq_section *ddp_aq; 5701 struct i40e_section_table *sec_tbl; 5702 u32 offset = 0, info = 0; 5703 u32 section_size = 0; 5704 int status = 0; 5705 u32 sec_off; 5706 u32 i; 5707 5708 status = i40e_validate_profile(hw, profile, track_id, false); 5709 if (status) 5710 return status; 5711 5712 I40E_SECTION_TABLE(profile, sec_tbl); 5713 5714 for (i = 0; i < sec_tbl->section_count; i++) { 5715 sec_off = sec_tbl->section_offset[i]; 5716 sec = I40E_SECTION_HEADER(profile, sec_off); 5717 /* Process generic admin command */ 5718 if (sec->section.type == SECTION_TYPE_AQ) { 5719 ddp_aq = (struct i40e_profile_aq_section *)&sec[1]; 5720 status = i40e_ddp_exec_aq_section(hw, ddp_aq); 5721 if (status) { 5722 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5723 "Failed to execute aq: section %d, opcode %u\n", 5724 i, ddp_aq->opcode); 5725 break; 5726 } 5727 sec->section.type = SECTION_TYPE_RB_AQ; 5728 } 5729 5730 /* Skip any non-mmio sections */ 5731 if (sec->section.type != SECTION_TYPE_MMIO) 5732 continue; 5733 5734 section_size = sec->section.size + 5735 sizeof(struct i40e_profile_section_header); 5736 5737 /* Write MMIO section */ 5738 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, 5739 track_id, &offset, &info, NULL); 5740 if (status) { 5741 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5742 "Failed to write profile: section %d, offset %d, info %d\n", 5743 i, offset, info); 5744 break; 5745 } 5746 } 5747 return status; 5748 } 5749 5750 /** 5751 * i40e_rollback_profile 5752 * @hw: pointer to the hardware structure 5753 * @profile: pointer to the profile segment of the package to be removed 5754 * @track_id: package tracking id 5755 * 5756 * Rolls back previously loaded package. 5757 */ 5758 int 5759 i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5760 u32 track_id) 5761 { 5762 struct i40e_profile_section_header *sec = NULL; 5763 struct i40e_section_table *sec_tbl; 5764 u32 offset = 0, info = 0; 5765 u32 section_size = 0; 5766 int status = 0; 5767 u32 sec_off; 5768 int i; 5769 5770 status = i40e_validate_profile(hw, profile, track_id, true); 5771 if (status) 5772 return status; 5773 5774 I40E_SECTION_TABLE(profile, sec_tbl); 5775 5776 /* For rollback write sections in reverse */ 5777 for (i = sec_tbl->section_count - 1; i >= 0; i--) { 5778 sec_off = sec_tbl->section_offset[i]; 5779 sec = I40E_SECTION_HEADER(profile, sec_off); 5780 5781 /* Skip any non-rollback sections */ 5782 if (sec->section.type != SECTION_TYPE_RB_MMIO) 5783 continue; 5784 5785 section_size = sec->section.size + 5786 sizeof(struct i40e_profile_section_header); 5787 5788 /* Write roll-back MMIO section */ 5789 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, 5790 track_id, &offset, &info, NULL); 5791 if (status) { 5792 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5793 "Failed to write profile: section %d, offset %d, info %d\n", 5794 i, offset, info); 5795 break; 5796 } 5797 } 5798 return status; 5799 } 5800 5801 /** 5802 * i40e_add_pinfo_to_list 5803 * @hw: pointer to the hardware structure 5804 * @profile: pointer to the profile segment of the package 5805 * @profile_info_sec: buffer for information section 5806 * @track_id: package tracking id 5807 * 5808 * Register a profile to the list of loaded profiles. 5809 */ 5810 int 5811 i40e_add_pinfo_to_list(struct i40e_hw *hw, 5812 struct i40e_profile_segment *profile, 5813 u8 *profile_info_sec, u32 track_id) 5814 { 5815 struct i40e_profile_section_header *sec = NULL; 5816 struct i40e_profile_info *pinfo; 5817 u32 offset = 0, info = 0; 5818 int status = 0; 5819 5820 sec = (struct i40e_profile_section_header *)profile_info_sec; 5821 sec->tbl_size = 1; 5822 sec->data_end = sizeof(struct i40e_profile_section_header) + 5823 sizeof(struct i40e_profile_info); 5824 sec->section.type = SECTION_TYPE_INFO; 5825 sec->section.offset = sizeof(struct i40e_profile_section_header); 5826 sec->section.size = sizeof(struct i40e_profile_info); 5827 pinfo = (struct i40e_profile_info *)(profile_info_sec + 5828 sec->section.offset); 5829 pinfo->track_id = track_id; 5830 pinfo->version = profile->version; 5831 pinfo->op = I40E_DDP_ADD_TRACKID; 5832 memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE); 5833 5834 status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end, 5835 track_id, &offset, &info, NULL); 5836 5837 return status; 5838 } 5839 5840 /** 5841 * i40e_aq_add_cloud_filters 5842 * @hw: pointer to the hardware structure 5843 * @seid: VSI seid to add cloud filters from 5844 * @filters: Buffer which contains the filters to be added 5845 * @filter_count: number of filters contained in the buffer 5846 * 5847 * Set the cloud filters for a given VSI. The contents of the 5848 * i40e_aqc_cloud_filters_element_data are filled in by the caller 5849 * of the function. 5850 * 5851 **/ 5852 int 5853 i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid, 5854 struct i40e_aqc_cloud_filters_element_data *filters, 5855 u8 filter_count) 5856 { 5857 struct i40e_aq_desc desc; 5858 struct i40e_aqc_add_remove_cloud_filters *cmd = 5859 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5860 u16 buff_len; 5861 int status; 5862 5863 i40e_fill_default_direct_cmd_desc(&desc, 5864 i40e_aqc_opc_add_cloud_filters); 5865 5866 buff_len = filter_count * sizeof(*filters); 5867 desc.datalen = cpu_to_le16(buff_len); 5868 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5869 cmd->num_filters = filter_count; 5870 cmd->seid = cpu_to_le16(seid); 5871 5872 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5873 5874 return status; 5875 } 5876 5877 /** 5878 * i40e_aq_add_cloud_filters_bb 5879 * @hw: pointer to the hardware structure 5880 * @seid: VSI seid to add cloud filters from 5881 * @filters: Buffer which contains the filters in big buffer to be added 5882 * @filter_count: number of filters contained in the buffer 5883 * 5884 * Set the big buffer cloud filters for a given VSI. The contents of the 5885 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the 5886 * function. 5887 * 5888 **/ 5889 int 5890 i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid, 5891 struct i40e_aqc_cloud_filters_element_bb *filters, 5892 u8 filter_count) 5893 { 5894 struct i40e_aq_desc desc; 5895 struct i40e_aqc_add_remove_cloud_filters *cmd = 5896 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5897 u16 buff_len; 5898 int status; 5899 int i; 5900 5901 i40e_fill_default_direct_cmd_desc(&desc, 5902 i40e_aqc_opc_add_cloud_filters); 5903 5904 buff_len = filter_count * sizeof(*filters); 5905 desc.datalen = cpu_to_le16(buff_len); 5906 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5907 cmd->num_filters = filter_count; 5908 cmd->seid = cpu_to_le16(seid); 5909 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; 5910 5911 for (i = 0; i < filter_count; i++) { 5912 u16 tnl_type; 5913 u32 ti; 5914 5915 tnl_type = (le16_to_cpu(filters[i].element.flags) & 5916 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> 5917 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; 5918 5919 /* Due to hardware eccentricities, the VNI for Geneve is shifted 5920 * one more byte further than normally used for Tenant ID in 5921 * other tunnel types. 5922 */ 5923 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { 5924 ti = le32_to_cpu(filters[i].element.tenant_id); 5925 filters[i].element.tenant_id = cpu_to_le32(ti << 8); 5926 } 5927 } 5928 5929 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5930 5931 return status; 5932 } 5933 5934 /** 5935 * i40e_aq_rem_cloud_filters 5936 * @hw: pointer to the hardware structure 5937 * @seid: VSI seid to remove cloud filters from 5938 * @filters: Buffer which contains the filters to be removed 5939 * @filter_count: number of filters contained in the buffer 5940 * 5941 * Remove the cloud filters for a given VSI. The contents of the 5942 * i40e_aqc_cloud_filters_element_data are filled in by the caller 5943 * of the function. 5944 * 5945 **/ 5946 int 5947 i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid, 5948 struct i40e_aqc_cloud_filters_element_data *filters, 5949 u8 filter_count) 5950 { 5951 struct i40e_aq_desc desc; 5952 struct i40e_aqc_add_remove_cloud_filters *cmd = 5953 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5954 u16 buff_len; 5955 int status; 5956 5957 i40e_fill_default_direct_cmd_desc(&desc, 5958 i40e_aqc_opc_remove_cloud_filters); 5959 5960 buff_len = filter_count * sizeof(*filters); 5961 desc.datalen = cpu_to_le16(buff_len); 5962 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5963 cmd->num_filters = filter_count; 5964 cmd->seid = cpu_to_le16(seid); 5965 5966 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5967 5968 return status; 5969 } 5970 5971 /** 5972 * i40e_aq_rem_cloud_filters_bb 5973 * @hw: pointer to the hardware structure 5974 * @seid: VSI seid to remove cloud filters from 5975 * @filters: Buffer which contains the filters in big buffer to be removed 5976 * @filter_count: number of filters contained in the buffer 5977 * 5978 * Remove the big buffer cloud filters for a given VSI. The contents of the 5979 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the 5980 * function. 5981 * 5982 **/ 5983 int 5984 i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid, 5985 struct i40e_aqc_cloud_filters_element_bb *filters, 5986 u8 filter_count) 5987 { 5988 struct i40e_aq_desc desc; 5989 struct i40e_aqc_add_remove_cloud_filters *cmd = 5990 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5991 u16 buff_len; 5992 int status; 5993 int i; 5994 5995 i40e_fill_default_direct_cmd_desc(&desc, 5996 i40e_aqc_opc_remove_cloud_filters); 5997 5998 buff_len = filter_count * sizeof(*filters); 5999 desc.datalen = cpu_to_le16(buff_len); 6000 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 6001 cmd->num_filters = filter_count; 6002 cmd->seid = cpu_to_le16(seid); 6003 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; 6004 6005 for (i = 0; i < filter_count; i++) { 6006 u16 tnl_type; 6007 u32 ti; 6008 6009 tnl_type = (le16_to_cpu(filters[i].element.flags) & 6010 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> 6011 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; 6012 6013 /* Due to hardware eccentricities, the VNI for Geneve is shifted 6014 * one more byte further than normally used for Tenant ID in 6015 * other tunnel types. 6016 */ 6017 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { 6018 ti = le32_to_cpu(filters[i].element.tenant_id); 6019 filters[i].element.tenant_id = cpu_to_le32(ti << 8); 6020 } 6021 } 6022 6023 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 6024 6025 return status; 6026 } 6027