1 // SPDX-License-Identifier: GPL-2.0-only 2 /**************************************************************************** 3 * Driver for Solarflare network controllers and boards 4 * Copyright 2012-2013 Solarflare Communications Inc. 5 */ 6 7 #include "net_driver.h" 8 #include "ef10_regs.h" 9 #include "io.h" 10 #include "mcdi.h" 11 #include "mcdi_pcol.h" 12 #include "nic.h" 13 #include "workarounds.h" 14 #include "selftest.h" 15 #include "ef10_sriov.h" 16 #include <linux/in.h> 17 #include <linux/jhash.h> 18 #include <linux/wait.h> 19 #include <linux/workqueue.h> 20 21 /* Hardware control for EF10 architecture including 'Huntington'. */ 22 23 #define EFX_EF10_DRVGEN_EV 7 24 enum { 25 EFX_EF10_TEST = 1, 26 EFX_EF10_REFILL, 27 }; 28 /* The maximum size of a shared RSS context */ 29 /* TODO: this should really be from the mcdi protocol export */ 30 #define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL 31 32 /* The filter table(s) are managed by firmware and we have write-only 33 * access. When removing filters we must identify them to the 34 * firmware by a 64-bit handle, but this is too wide for Linux kernel 35 * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to 36 * be able to tell in advance whether a requested insertion will 37 * replace an existing filter. Therefore we maintain a software hash 38 * table, which should be at least as large as the hardware hash 39 * table. 40 * 41 * Huntington has a single 8K filter table shared between all filter 42 * types and both ports. 43 */ 44 #define HUNT_FILTER_TBL_ROWS 8192 45 46 #define EFX_EF10_FILTER_ID_INVALID 0xffff 47 48 #define EFX_EF10_FILTER_DEV_UC_MAX 32 49 #define EFX_EF10_FILTER_DEV_MC_MAX 256 50 51 /* VLAN list entry */ 52 struct efx_ef10_vlan { 53 struct list_head list; 54 u16 vid; 55 }; 56 57 enum efx_ef10_default_filters { 58 EFX_EF10_BCAST, 59 EFX_EF10_UCDEF, 60 EFX_EF10_MCDEF, 61 EFX_EF10_VXLAN4_UCDEF, 62 EFX_EF10_VXLAN4_MCDEF, 63 EFX_EF10_VXLAN6_UCDEF, 64 EFX_EF10_VXLAN6_MCDEF, 65 EFX_EF10_NVGRE4_UCDEF, 66 EFX_EF10_NVGRE4_MCDEF, 67 EFX_EF10_NVGRE6_UCDEF, 68 EFX_EF10_NVGRE6_MCDEF, 69 EFX_EF10_GENEVE4_UCDEF, 70 EFX_EF10_GENEVE4_MCDEF, 71 EFX_EF10_GENEVE6_UCDEF, 72 EFX_EF10_GENEVE6_MCDEF, 73 74 EFX_EF10_NUM_DEFAULT_FILTERS 75 }; 76 77 /* Per-VLAN filters information */ 78 struct efx_ef10_filter_vlan { 79 struct list_head list; 80 u16 vid; 81 u16 uc[EFX_EF10_FILTER_DEV_UC_MAX]; 82 u16 mc[EFX_EF10_FILTER_DEV_MC_MAX]; 83 u16 default_filters[EFX_EF10_NUM_DEFAULT_FILTERS]; 84 }; 85 86 struct efx_ef10_dev_addr { 87 u8 addr[ETH_ALEN]; 88 }; 89 90 struct efx_ef10_filter_table { 91 /* The MCDI match masks supported by this fw & hw, in order of priority */ 92 u32 rx_match_mcdi_flags[ 93 MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2]; 94 unsigned int rx_match_count; 95 96 struct rw_semaphore lock; /* Protects entries */ 97 struct { 98 unsigned long spec; /* pointer to spec plus flag bits */ 99 /* AUTO_OLD is used to mark and sweep MAC filters for the device address lists. */ 100 /* unused flag 1UL */ 101 #define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL 102 #define EFX_EF10_FILTER_FLAGS 3UL 103 u64 handle; /* firmware handle */ 104 } *entry; 105 /* Shadow of net_device address lists, guarded by mac_lock */ 106 struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX]; 107 struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX]; 108 int dev_uc_count; 109 int dev_mc_count; 110 bool uc_promisc; 111 bool mc_promisc; 112 /* Whether in multicast promiscuous mode when last changed */ 113 bool mc_promisc_last; 114 bool mc_overflow; /* Too many MC addrs; should always imply mc_promisc */ 115 bool vlan_filter; 116 struct list_head vlan_list; 117 }; 118 119 /* An arbitrary search limit for the software hash table */ 120 #define EFX_EF10_FILTER_SEARCH_LIMIT 200 121 122 static void efx_ef10_rx_free_indir_table(struct efx_nic *efx); 123 static void efx_ef10_filter_table_remove(struct efx_nic *efx); 124 static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid); 125 static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx, 126 struct efx_ef10_filter_vlan *vlan); 127 static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid); 128 static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading); 129 130 static u32 efx_ef10_filter_get_unsafe_id(u32 filter_id) 131 { 132 WARN_ON_ONCE(filter_id == EFX_EF10_FILTER_ID_INVALID); 133 return filter_id & (HUNT_FILTER_TBL_ROWS - 1); 134 } 135 136 static unsigned int efx_ef10_filter_get_unsafe_pri(u32 filter_id) 137 { 138 return filter_id / (HUNT_FILTER_TBL_ROWS * 2); 139 } 140 141 static u32 efx_ef10_make_filter_id(unsigned int pri, u16 idx) 142 { 143 return pri * HUNT_FILTER_TBL_ROWS * 2 + idx; 144 } 145 146 static int efx_ef10_get_warm_boot_count(struct efx_nic *efx) 147 { 148 efx_dword_t reg; 149 150 efx_readd(efx, ®, ER_DZ_BIU_MC_SFT_STATUS); 151 return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ? 152 EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO; 153 } 154 155 /* On all EF10s up to and including SFC9220 (Medford1), all PFs use BAR 0 for 156 * I/O space and BAR 2(&3) for memory. On SFC9250 (Medford2), there is no I/O 157 * bar; PFs use BAR 0/1 for memory. 158 */ 159 static unsigned int efx_ef10_pf_mem_bar(struct efx_nic *efx) 160 { 161 switch (efx->pci_dev->device) { 162 case 0x0b03: /* SFC9250 PF */ 163 return 0; 164 default: 165 return 2; 166 } 167 } 168 169 /* All VFs use BAR 0/1 for memory */ 170 static unsigned int efx_ef10_vf_mem_bar(struct efx_nic *efx) 171 { 172 return 0; 173 } 174 175 static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx) 176 { 177 int bar; 178 179 bar = efx->type->mem_bar(efx); 180 return resource_size(&efx->pci_dev->resource[bar]); 181 } 182 183 static bool efx_ef10_is_vf(struct efx_nic *efx) 184 { 185 return efx->type->is_vf; 186 } 187 188 static int efx_ef10_get_pf_index(struct efx_nic *efx) 189 { 190 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN); 191 struct efx_ef10_nic_data *nic_data = efx->nic_data; 192 size_t outlen; 193 int rc; 194 195 rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf, 196 sizeof(outbuf), &outlen); 197 if (rc) 198 return rc; 199 if (outlen < sizeof(outbuf)) 200 return -EIO; 201 202 nic_data->pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF); 203 return 0; 204 } 205 206 #ifdef CONFIG_SFC_SRIOV 207 static int efx_ef10_get_vf_index(struct efx_nic *efx) 208 { 209 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN); 210 struct efx_ef10_nic_data *nic_data = efx->nic_data; 211 size_t outlen; 212 int rc; 213 214 rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf, 215 sizeof(outbuf), &outlen); 216 if (rc) 217 return rc; 218 if (outlen < sizeof(outbuf)) 219 return -EIO; 220 221 nic_data->vf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_VF); 222 return 0; 223 } 224 #endif 225 226 static int efx_ef10_init_datapath_caps(struct efx_nic *efx) 227 { 228 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V4_OUT_LEN); 229 struct efx_ef10_nic_data *nic_data = efx->nic_data; 230 size_t outlen; 231 int rc; 232 233 BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0); 234 235 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0, 236 outbuf, sizeof(outbuf), &outlen); 237 if (rc) 238 return rc; 239 if (outlen < MC_CMD_GET_CAPABILITIES_OUT_LEN) { 240 netif_err(efx, drv, efx->net_dev, 241 "unable to read datapath firmware capabilities\n"); 242 return -EIO; 243 } 244 245 nic_data->datapath_caps = 246 MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1); 247 248 if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) { 249 nic_data->datapath_caps2 = MCDI_DWORD(outbuf, 250 GET_CAPABILITIES_V2_OUT_FLAGS2); 251 nic_data->piobuf_size = MCDI_WORD(outbuf, 252 GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF); 253 } else { 254 nic_data->datapath_caps2 = 0; 255 nic_data->piobuf_size = ER_DZ_TX_PIOBUF_SIZE; 256 } 257 258 /* record the DPCPU firmware IDs to determine VEB vswitching support. 259 */ 260 nic_data->rx_dpcpu_fw_id = 261 MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID); 262 nic_data->tx_dpcpu_fw_id = 263 MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID); 264 265 if (!(nic_data->datapath_caps & 266 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) { 267 netif_err(efx, probe, efx->net_dev, 268 "current firmware does not support an RX prefix\n"); 269 return -ENODEV; 270 } 271 272 if (outlen >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) { 273 u8 vi_window_mode = MCDI_BYTE(outbuf, 274 GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE); 275 276 switch (vi_window_mode) { 277 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K: 278 efx->vi_stride = 8192; 279 break; 280 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K: 281 efx->vi_stride = 16384; 282 break; 283 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K: 284 efx->vi_stride = 65536; 285 break; 286 default: 287 netif_err(efx, probe, efx->net_dev, 288 "Unrecognised VI window mode %d\n", 289 vi_window_mode); 290 return -EIO; 291 } 292 netif_dbg(efx, probe, efx->net_dev, "vi_stride = %u\n", 293 efx->vi_stride); 294 } else { 295 /* keep default VI stride */ 296 netif_dbg(efx, probe, efx->net_dev, 297 "firmware did not report VI window mode, assuming vi_stride = %u\n", 298 efx->vi_stride); 299 } 300 301 if (outlen >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) { 302 efx->num_mac_stats = MCDI_WORD(outbuf, 303 GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS); 304 netif_dbg(efx, probe, efx->net_dev, 305 "firmware reports num_mac_stats = %u\n", 306 efx->num_mac_stats); 307 } else { 308 /* leave num_mac_stats as the default value, MC_CMD_MAC_NSTATS */ 309 netif_dbg(efx, probe, efx->net_dev, 310 "firmware did not report num_mac_stats, assuming %u\n", 311 efx->num_mac_stats); 312 } 313 314 return 0; 315 } 316 317 static void efx_ef10_read_licensed_features(struct efx_nic *efx) 318 { 319 MCDI_DECLARE_BUF(inbuf, MC_CMD_LICENSING_V3_IN_LEN); 320 MCDI_DECLARE_BUF(outbuf, MC_CMD_LICENSING_V3_OUT_LEN); 321 struct efx_ef10_nic_data *nic_data = efx->nic_data; 322 size_t outlen; 323 int rc; 324 325 MCDI_SET_DWORD(inbuf, LICENSING_V3_IN_OP, 326 MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE); 327 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_LICENSING_V3, inbuf, sizeof(inbuf), 328 outbuf, sizeof(outbuf), &outlen); 329 if (rc || (outlen < MC_CMD_LICENSING_V3_OUT_LEN)) 330 return; 331 332 nic_data->licensed_features = MCDI_QWORD(outbuf, 333 LICENSING_V3_OUT_LICENSED_FEATURES); 334 } 335 336 static int efx_ef10_get_sysclk_freq(struct efx_nic *efx) 337 { 338 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN); 339 int rc; 340 341 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0, 342 outbuf, sizeof(outbuf), NULL); 343 if (rc) 344 return rc; 345 rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ); 346 return rc > 0 ? rc : -ERANGE; 347 } 348 349 static int efx_ef10_get_timer_workarounds(struct efx_nic *efx) 350 { 351 struct efx_ef10_nic_data *nic_data = efx->nic_data; 352 unsigned int implemented; 353 unsigned int enabled; 354 int rc; 355 356 nic_data->workaround_35388 = false; 357 nic_data->workaround_61265 = false; 358 359 rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled); 360 361 if (rc == -ENOSYS) { 362 /* Firmware without GET_WORKAROUNDS - not a problem. */ 363 rc = 0; 364 } else if (rc == 0) { 365 /* Bug61265 workaround is always enabled if implemented. */ 366 if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG61265) 367 nic_data->workaround_61265 = true; 368 369 if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) { 370 nic_data->workaround_35388 = true; 371 } else if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) { 372 /* Workaround is implemented but not enabled. 373 * Try to enable it. 374 */ 375 rc = efx_mcdi_set_workaround(efx, 376 MC_CMD_WORKAROUND_BUG35388, 377 true, NULL); 378 if (rc == 0) 379 nic_data->workaround_35388 = true; 380 /* If we failed to set the workaround just carry on. */ 381 rc = 0; 382 } 383 } 384 385 netif_dbg(efx, probe, efx->net_dev, 386 "workaround for bug 35388 is %sabled\n", 387 nic_data->workaround_35388 ? "en" : "dis"); 388 netif_dbg(efx, probe, efx->net_dev, 389 "workaround for bug 61265 is %sabled\n", 390 nic_data->workaround_61265 ? "en" : "dis"); 391 392 return rc; 393 } 394 395 static void efx_ef10_process_timer_config(struct efx_nic *efx, 396 const efx_dword_t *data) 397 { 398 unsigned int max_count; 399 400 if (EFX_EF10_WORKAROUND_61265(efx)) { 401 efx->timer_quantum_ns = MCDI_DWORD(data, 402 GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS); 403 efx->timer_max_ns = MCDI_DWORD(data, 404 GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS); 405 } else if (EFX_EF10_WORKAROUND_35388(efx)) { 406 efx->timer_quantum_ns = MCDI_DWORD(data, 407 GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT); 408 max_count = MCDI_DWORD(data, 409 GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT); 410 efx->timer_max_ns = max_count * efx->timer_quantum_ns; 411 } else { 412 efx->timer_quantum_ns = MCDI_DWORD(data, 413 GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT); 414 max_count = MCDI_DWORD(data, 415 GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT); 416 efx->timer_max_ns = max_count * efx->timer_quantum_ns; 417 } 418 419 netif_dbg(efx, probe, efx->net_dev, 420 "got timer properties from MC: quantum %u ns; max %u ns\n", 421 efx->timer_quantum_ns, efx->timer_max_ns); 422 } 423 424 static int efx_ef10_get_timer_config(struct efx_nic *efx) 425 { 426 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN); 427 int rc; 428 429 rc = efx_ef10_get_timer_workarounds(efx); 430 if (rc) 431 return rc; 432 433 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, NULL, 0, 434 outbuf, sizeof(outbuf), NULL); 435 436 if (rc == 0) { 437 efx_ef10_process_timer_config(efx, outbuf); 438 } else if (rc == -ENOSYS || rc == -EPERM) { 439 /* Not available - fall back to Huntington defaults. */ 440 unsigned int quantum; 441 442 rc = efx_ef10_get_sysclk_freq(efx); 443 if (rc < 0) 444 return rc; 445 446 quantum = 1536000 / rc; /* 1536 cycles */ 447 efx->timer_quantum_ns = quantum; 448 efx->timer_max_ns = efx->type->timer_period_max * quantum; 449 rc = 0; 450 } else { 451 efx_mcdi_display_error(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, 452 MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN, 453 NULL, 0, rc); 454 } 455 456 return rc; 457 } 458 459 static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address) 460 { 461 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN); 462 size_t outlen; 463 int rc; 464 465 BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0); 466 467 rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0, 468 outbuf, sizeof(outbuf), &outlen); 469 if (rc) 470 return rc; 471 if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) 472 return -EIO; 473 474 ether_addr_copy(mac_address, 475 MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE)); 476 return 0; 477 } 478 479 static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address) 480 { 481 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN); 482 MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX); 483 size_t outlen; 484 int num_addrs, rc; 485 486 MCDI_SET_DWORD(inbuf, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID, 487 EVB_PORT_ID_ASSIGNED); 488 rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_GET_MAC_ADDRESSES, inbuf, 489 sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); 490 491 if (rc) 492 return rc; 493 if (outlen < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) 494 return -EIO; 495 496 num_addrs = MCDI_DWORD(outbuf, 497 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT); 498 499 WARN_ON(num_addrs != 1); 500 501 ether_addr_copy(mac_address, 502 MCDI_PTR(outbuf, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR)); 503 504 return 0; 505 } 506 507 static ssize_t efx_ef10_show_link_control_flag(struct device *dev, 508 struct device_attribute *attr, 509 char *buf) 510 { 511 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 512 513 return sprintf(buf, "%d\n", 514 ((efx->mcdi->fn_flags) & 515 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL)) 516 ? 1 : 0); 517 } 518 519 static ssize_t efx_ef10_show_primary_flag(struct device *dev, 520 struct device_attribute *attr, 521 char *buf) 522 { 523 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 524 525 return sprintf(buf, "%d\n", 526 ((efx->mcdi->fn_flags) & 527 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) 528 ? 1 : 0); 529 } 530 531 static struct efx_ef10_vlan *efx_ef10_find_vlan(struct efx_nic *efx, u16 vid) 532 { 533 struct efx_ef10_nic_data *nic_data = efx->nic_data; 534 struct efx_ef10_vlan *vlan; 535 536 WARN_ON(!mutex_is_locked(&nic_data->vlan_lock)); 537 538 list_for_each_entry(vlan, &nic_data->vlan_list, list) { 539 if (vlan->vid == vid) 540 return vlan; 541 } 542 543 return NULL; 544 } 545 546 static int efx_ef10_add_vlan(struct efx_nic *efx, u16 vid) 547 { 548 struct efx_ef10_nic_data *nic_data = efx->nic_data; 549 struct efx_ef10_vlan *vlan; 550 int rc; 551 552 mutex_lock(&nic_data->vlan_lock); 553 554 vlan = efx_ef10_find_vlan(efx, vid); 555 if (vlan) { 556 /* We add VID 0 on init. 8021q adds it on module init 557 * for all interfaces with VLAN filtring feature. 558 */ 559 if (vid == 0) 560 goto done_unlock; 561 netif_warn(efx, drv, efx->net_dev, 562 "VLAN %u already added\n", vid); 563 rc = -EALREADY; 564 goto fail_exist; 565 } 566 567 rc = -ENOMEM; 568 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); 569 if (!vlan) 570 goto fail_alloc; 571 572 vlan->vid = vid; 573 574 list_add_tail(&vlan->list, &nic_data->vlan_list); 575 576 if (efx->filter_state) { 577 mutex_lock(&efx->mac_lock); 578 down_write(&efx->filter_sem); 579 rc = efx_ef10_filter_add_vlan(efx, vlan->vid); 580 up_write(&efx->filter_sem); 581 mutex_unlock(&efx->mac_lock); 582 if (rc) 583 goto fail_filter_add_vlan; 584 } 585 586 done_unlock: 587 mutex_unlock(&nic_data->vlan_lock); 588 return 0; 589 590 fail_filter_add_vlan: 591 list_del(&vlan->list); 592 kfree(vlan); 593 fail_alloc: 594 fail_exist: 595 mutex_unlock(&nic_data->vlan_lock); 596 return rc; 597 } 598 599 static void efx_ef10_del_vlan_internal(struct efx_nic *efx, 600 struct efx_ef10_vlan *vlan) 601 { 602 struct efx_ef10_nic_data *nic_data = efx->nic_data; 603 604 WARN_ON(!mutex_is_locked(&nic_data->vlan_lock)); 605 606 if (efx->filter_state) { 607 down_write(&efx->filter_sem); 608 efx_ef10_filter_del_vlan(efx, vlan->vid); 609 up_write(&efx->filter_sem); 610 } 611 612 list_del(&vlan->list); 613 kfree(vlan); 614 } 615 616 static int efx_ef10_del_vlan(struct efx_nic *efx, u16 vid) 617 { 618 struct efx_ef10_nic_data *nic_data = efx->nic_data; 619 struct efx_ef10_vlan *vlan; 620 int rc = 0; 621 622 /* 8021q removes VID 0 on module unload for all interfaces 623 * with VLAN filtering feature. We need to keep it to receive 624 * untagged traffic. 625 */ 626 if (vid == 0) 627 return 0; 628 629 mutex_lock(&nic_data->vlan_lock); 630 631 vlan = efx_ef10_find_vlan(efx, vid); 632 if (!vlan) { 633 netif_err(efx, drv, efx->net_dev, 634 "VLAN %u to be deleted not found\n", vid); 635 rc = -ENOENT; 636 } else { 637 efx_ef10_del_vlan_internal(efx, vlan); 638 } 639 640 mutex_unlock(&nic_data->vlan_lock); 641 642 return rc; 643 } 644 645 static void efx_ef10_cleanup_vlans(struct efx_nic *efx) 646 { 647 struct efx_ef10_nic_data *nic_data = efx->nic_data; 648 struct efx_ef10_vlan *vlan, *next_vlan; 649 650 mutex_lock(&nic_data->vlan_lock); 651 list_for_each_entry_safe(vlan, next_vlan, &nic_data->vlan_list, list) 652 efx_ef10_del_vlan_internal(efx, vlan); 653 mutex_unlock(&nic_data->vlan_lock); 654 } 655 656 static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag, 657 NULL); 658 static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL); 659 660 static int efx_ef10_probe(struct efx_nic *efx) 661 { 662 struct efx_ef10_nic_data *nic_data; 663 int i, rc; 664 665 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); 666 if (!nic_data) 667 return -ENOMEM; 668 efx->nic_data = nic_data; 669 670 /* we assume later that we can copy from this buffer in dwords */ 671 BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4); 672 673 rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf, 674 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL); 675 if (rc) 676 goto fail1; 677 678 /* Get the MC's warm boot count. In case it's rebooting right 679 * now, be prepared to retry. 680 */ 681 i = 0; 682 for (;;) { 683 rc = efx_ef10_get_warm_boot_count(efx); 684 if (rc >= 0) 685 break; 686 if (++i == 5) 687 goto fail2; 688 ssleep(1); 689 } 690 nic_data->warm_boot_count = rc; 691 692 efx->rss_context.context_id = EFX_EF10_RSS_CONTEXT_INVALID; 693 694 nic_data->vport_id = EVB_PORT_ID_ASSIGNED; 695 696 /* In case we're recovering from a crash (kexec), we want to 697 * cancel any outstanding request by the previous user of this 698 * function. We send a special message using the least 699 * significant bits of the 'high' (doorbell) register. 700 */ 701 _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD); 702 703 rc = efx_mcdi_init(efx); 704 if (rc) 705 goto fail2; 706 707 mutex_init(&nic_data->udp_tunnels_lock); 708 709 /* Reset (most) configuration for this function */ 710 rc = efx_mcdi_reset(efx, RESET_TYPE_ALL); 711 if (rc) 712 goto fail3; 713 714 /* Enable event logging */ 715 rc = efx_mcdi_log_ctrl(efx, true, false, 0); 716 if (rc) 717 goto fail3; 718 719 rc = device_create_file(&efx->pci_dev->dev, 720 &dev_attr_link_control_flag); 721 if (rc) 722 goto fail3; 723 724 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag); 725 if (rc) 726 goto fail4; 727 728 rc = efx_ef10_get_pf_index(efx); 729 if (rc) 730 goto fail5; 731 732 rc = efx_ef10_init_datapath_caps(efx); 733 if (rc < 0) 734 goto fail5; 735 736 efx_ef10_read_licensed_features(efx); 737 738 /* We can have one VI for each vi_stride-byte region. 739 * However, until we use TX option descriptors we need two TX queues 740 * per channel. 741 */ 742 efx->max_channels = min_t(unsigned int, 743 EFX_MAX_CHANNELS, 744 efx_ef10_mem_map_size(efx) / 745 (efx->vi_stride * EFX_TXQ_TYPES)); 746 efx->max_tx_channels = efx->max_channels; 747 if (WARN_ON(efx->max_channels == 0)) { 748 rc = -EIO; 749 goto fail5; 750 } 751 752 efx->rx_packet_len_offset = 753 ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE; 754 755 if (nic_data->datapath_caps & 756 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN)) 757 efx->net_dev->hw_features |= NETIF_F_RXFCS; 758 759 rc = efx_mcdi_port_get_number(efx); 760 if (rc < 0) 761 goto fail5; 762 efx->port_num = rc; 763 764 rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr); 765 if (rc) 766 goto fail5; 767 768 rc = efx_ef10_get_timer_config(efx); 769 if (rc < 0) 770 goto fail5; 771 772 rc = efx_mcdi_mon_probe(efx); 773 if (rc && rc != -EPERM) 774 goto fail5; 775 776 efx_ptp_defer_probe_with_channel(efx); 777 778 #ifdef CONFIG_SFC_SRIOV 779 if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) { 780 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; 781 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); 782 783 efx_pf->type->get_mac_address(efx_pf, nic_data->port_id); 784 } else 785 #endif 786 ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr); 787 788 INIT_LIST_HEAD(&nic_data->vlan_list); 789 mutex_init(&nic_data->vlan_lock); 790 791 /* Add unspecified VID to support VLAN filtering being disabled */ 792 rc = efx_ef10_add_vlan(efx, EFX_FILTER_VID_UNSPEC); 793 if (rc) 794 goto fail_add_vid_unspec; 795 796 /* If VLAN filtering is enabled, we need VID 0 to get untagged 797 * traffic. It is added automatically if 8021q module is loaded, 798 * but we can't rely on it since module may be not loaded. 799 */ 800 rc = efx_ef10_add_vlan(efx, 0); 801 if (rc) 802 goto fail_add_vid_0; 803 804 return 0; 805 806 fail_add_vid_0: 807 efx_ef10_cleanup_vlans(efx); 808 fail_add_vid_unspec: 809 mutex_destroy(&nic_data->vlan_lock); 810 efx_ptp_remove(efx); 811 efx_mcdi_mon_remove(efx); 812 fail5: 813 device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); 814 fail4: 815 device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag); 816 fail3: 817 efx_mcdi_detach(efx); 818 819 mutex_lock(&nic_data->udp_tunnels_lock); 820 memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels)); 821 (void)efx_ef10_set_udp_tnl_ports(efx, true); 822 mutex_unlock(&nic_data->udp_tunnels_lock); 823 mutex_destroy(&nic_data->udp_tunnels_lock); 824 825 efx_mcdi_fini(efx); 826 fail2: 827 efx_nic_free_buffer(efx, &nic_data->mcdi_buf); 828 fail1: 829 kfree(nic_data); 830 efx->nic_data = NULL; 831 return rc; 832 } 833 834 static int efx_ef10_free_vis(struct efx_nic *efx) 835 { 836 MCDI_DECLARE_BUF_ERR(outbuf); 837 size_t outlen; 838 int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0, 839 outbuf, sizeof(outbuf), &outlen); 840 841 /* -EALREADY means nothing to free, so ignore */ 842 if (rc == -EALREADY) 843 rc = 0; 844 if (rc) 845 efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen, 846 rc); 847 return rc; 848 } 849 850 #ifdef EFX_USE_PIO 851 852 static void efx_ef10_free_piobufs(struct efx_nic *efx) 853 { 854 struct efx_ef10_nic_data *nic_data = efx->nic_data; 855 MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN); 856 unsigned int i; 857 int rc; 858 859 BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0); 860 861 for (i = 0; i < nic_data->n_piobufs; i++) { 862 MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE, 863 nic_data->piobuf_handle[i]); 864 rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf), 865 NULL, 0, NULL); 866 WARN_ON(rc); 867 } 868 869 nic_data->n_piobufs = 0; 870 } 871 872 static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) 873 { 874 struct efx_ef10_nic_data *nic_data = efx->nic_data; 875 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN); 876 unsigned int i; 877 size_t outlen; 878 int rc = 0; 879 880 BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0); 881 882 for (i = 0; i < n; i++) { 883 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0, 884 outbuf, sizeof(outbuf), &outlen); 885 if (rc) { 886 /* Don't display the MC error if we didn't have space 887 * for a VF. 888 */ 889 if (!(efx_ef10_is_vf(efx) && rc == -ENOSPC)) 890 efx_mcdi_display_error(efx, MC_CMD_ALLOC_PIOBUF, 891 0, outbuf, outlen, rc); 892 break; 893 } 894 if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) { 895 rc = -EIO; 896 break; 897 } 898 nic_data->piobuf_handle[i] = 899 MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE); 900 netif_dbg(efx, probe, efx->net_dev, 901 "allocated PIO buffer %u handle %x\n", i, 902 nic_data->piobuf_handle[i]); 903 } 904 905 nic_data->n_piobufs = i; 906 if (rc) 907 efx_ef10_free_piobufs(efx); 908 return rc; 909 } 910 911 static int efx_ef10_link_piobufs(struct efx_nic *efx) 912 { 913 struct efx_ef10_nic_data *nic_data = efx->nic_data; 914 MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_PIOBUF_IN_LEN); 915 struct efx_channel *channel; 916 struct efx_tx_queue *tx_queue; 917 unsigned int offset, index; 918 int rc; 919 920 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0); 921 BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0); 922 923 /* Link a buffer to each VI in the write-combining mapping */ 924 for (index = 0; index < nic_data->n_piobufs; ++index) { 925 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE, 926 nic_data->piobuf_handle[index]); 927 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE, 928 nic_data->pio_write_vi_base + index); 929 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF, 930 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN, 931 NULL, 0, NULL); 932 if (rc) { 933 netif_err(efx, drv, efx->net_dev, 934 "failed to link VI %u to PIO buffer %u (%d)\n", 935 nic_data->pio_write_vi_base + index, index, 936 rc); 937 goto fail; 938 } 939 netif_dbg(efx, probe, efx->net_dev, 940 "linked VI %u to PIO buffer %u\n", 941 nic_data->pio_write_vi_base + index, index); 942 } 943 944 /* Link a buffer to each TX queue */ 945 efx_for_each_channel(channel, efx) { 946 /* Extra channels, even those with TXQs (PTP), do not require 947 * PIO resources. 948 */ 949 if (!channel->type->want_pio) 950 continue; 951 efx_for_each_channel_tx_queue(tx_queue, channel) { 952 /* We assign the PIO buffers to queues in 953 * reverse order to allow for the following 954 * special case. 955 */ 956 offset = ((efx->tx_channel_offset + efx->n_tx_channels - 957 tx_queue->channel->channel - 1) * 958 efx_piobuf_size); 959 index = offset / nic_data->piobuf_size; 960 offset = offset % nic_data->piobuf_size; 961 962 /* When the host page size is 4K, the first 963 * host page in the WC mapping may be within 964 * the same VI page as the last TX queue. We 965 * can only link one buffer to each VI. 966 */ 967 if (tx_queue->queue == nic_data->pio_write_vi_base) { 968 BUG_ON(index != 0); 969 rc = 0; 970 } else { 971 MCDI_SET_DWORD(inbuf, 972 LINK_PIOBUF_IN_PIOBUF_HANDLE, 973 nic_data->piobuf_handle[index]); 974 MCDI_SET_DWORD(inbuf, 975 LINK_PIOBUF_IN_TXQ_INSTANCE, 976 tx_queue->queue); 977 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF, 978 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN, 979 NULL, 0, NULL); 980 } 981 982 if (rc) { 983 /* This is non-fatal; the TX path just 984 * won't use PIO for this queue 985 */ 986 netif_err(efx, drv, efx->net_dev, 987 "failed to link VI %u to PIO buffer %u (%d)\n", 988 tx_queue->queue, index, rc); 989 tx_queue->piobuf = NULL; 990 } else { 991 tx_queue->piobuf = 992 nic_data->pio_write_base + 993 index * efx->vi_stride + offset; 994 tx_queue->piobuf_offset = offset; 995 netif_dbg(efx, probe, efx->net_dev, 996 "linked VI %u to PIO buffer %u offset %x addr %p\n", 997 tx_queue->queue, index, 998 tx_queue->piobuf_offset, 999 tx_queue->piobuf); 1000 } 1001 } 1002 } 1003 1004 return 0; 1005 1006 fail: 1007 /* inbuf was defined for MC_CMD_LINK_PIOBUF. We can use the same 1008 * buffer for MC_CMD_UNLINK_PIOBUF because it's shorter. 1009 */ 1010 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_IN_LEN < MC_CMD_UNLINK_PIOBUF_IN_LEN); 1011 while (index--) { 1012 MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE, 1013 nic_data->pio_write_vi_base + index); 1014 efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF, 1015 inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN, 1016 NULL, 0, NULL); 1017 } 1018 return rc; 1019 } 1020 1021 static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) 1022 { 1023 struct efx_channel *channel; 1024 struct efx_tx_queue *tx_queue; 1025 1026 /* All our existing PIO buffers went away */ 1027 efx_for_each_channel(channel, efx) 1028 efx_for_each_channel_tx_queue(tx_queue, channel) 1029 tx_queue->piobuf = NULL; 1030 } 1031 1032 #else /* !EFX_USE_PIO */ 1033 1034 static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) 1035 { 1036 return n == 0 ? 0 : -ENOBUFS; 1037 } 1038 1039 static int efx_ef10_link_piobufs(struct efx_nic *efx) 1040 { 1041 return 0; 1042 } 1043 1044 static void efx_ef10_free_piobufs(struct efx_nic *efx) 1045 { 1046 } 1047 1048 static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) 1049 { 1050 } 1051 1052 #endif /* EFX_USE_PIO */ 1053 1054 static void efx_ef10_remove(struct efx_nic *efx) 1055 { 1056 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1057 int rc; 1058 1059 #ifdef CONFIG_SFC_SRIOV 1060 struct efx_ef10_nic_data *nic_data_pf; 1061 struct pci_dev *pci_dev_pf; 1062 struct efx_nic *efx_pf; 1063 struct ef10_vf *vf; 1064 1065 if (efx->pci_dev->is_virtfn) { 1066 pci_dev_pf = efx->pci_dev->physfn; 1067 if (pci_dev_pf) { 1068 efx_pf = pci_get_drvdata(pci_dev_pf); 1069 nic_data_pf = efx_pf->nic_data; 1070 vf = nic_data_pf->vf + nic_data->vf_index; 1071 vf->efx = NULL; 1072 } else 1073 netif_info(efx, drv, efx->net_dev, 1074 "Could not get the PF id from VF\n"); 1075 } 1076 #endif 1077 1078 efx_ef10_cleanup_vlans(efx); 1079 mutex_destroy(&nic_data->vlan_lock); 1080 1081 efx_ptp_remove(efx); 1082 1083 efx_mcdi_mon_remove(efx); 1084 1085 efx_ef10_rx_free_indir_table(efx); 1086 1087 if (nic_data->wc_membase) 1088 iounmap(nic_data->wc_membase); 1089 1090 rc = efx_ef10_free_vis(efx); 1091 WARN_ON(rc != 0); 1092 1093 if (!nic_data->must_restore_piobufs) 1094 efx_ef10_free_piobufs(efx); 1095 1096 device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); 1097 device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag); 1098 1099 efx_mcdi_detach(efx); 1100 1101 memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels)); 1102 mutex_lock(&nic_data->udp_tunnels_lock); 1103 (void)efx_ef10_set_udp_tnl_ports(efx, true); 1104 mutex_unlock(&nic_data->udp_tunnels_lock); 1105 1106 mutex_destroy(&nic_data->udp_tunnels_lock); 1107 1108 efx_mcdi_fini(efx); 1109 efx_nic_free_buffer(efx, &nic_data->mcdi_buf); 1110 kfree(nic_data); 1111 } 1112 1113 static int efx_ef10_probe_pf(struct efx_nic *efx) 1114 { 1115 return efx_ef10_probe(efx); 1116 } 1117 1118 int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id, 1119 u32 *port_flags, u32 *vadaptor_flags, 1120 unsigned int *vlan_tags) 1121 { 1122 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1123 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_QUERY_IN_LEN); 1124 MCDI_DECLARE_BUF(outbuf, MC_CMD_VADAPTOR_QUERY_OUT_LEN); 1125 size_t outlen; 1126 int rc; 1127 1128 if (nic_data->datapath_caps & 1129 (1 << MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN)) { 1130 MCDI_SET_DWORD(inbuf, VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID, 1131 port_id); 1132 1133 rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_QUERY, inbuf, sizeof(inbuf), 1134 outbuf, sizeof(outbuf), &outlen); 1135 if (rc) 1136 return rc; 1137 1138 if (outlen < sizeof(outbuf)) { 1139 rc = -EIO; 1140 return rc; 1141 } 1142 } 1143 1144 if (port_flags) 1145 *port_flags = MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_PORT_FLAGS); 1146 if (vadaptor_flags) 1147 *vadaptor_flags = 1148 MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS); 1149 if (vlan_tags) 1150 *vlan_tags = 1151 MCDI_DWORD(outbuf, 1152 VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS); 1153 1154 return 0; 1155 } 1156 1157 int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id) 1158 { 1159 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN); 1160 1161 MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id); 1162 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf), 1163 NULL, 0, NULL); 1164 } 1165 1166 int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id) 1167 { 1168 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN); 1169 1170 MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id); 1171 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf), 1172 NULL, 0, NULL); 1173 } 1174 1175 int efx_ef10_vport_add_mac(struct efx_nic *efx, 1176 unsigned int port_id, u8 *mac) 1177 { 1178 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN); 1179 1180 MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id); 1181 ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac); 1182 1183 return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf, 1184 sizeof(inbuf), NULL, 0, NULL); 1185 } 1186 1187 int efx_ef10_vport_del_mac(struct efx_nic *efx, 1188 unsigned int port_id, u8 *mac) 1189 { 1190 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN); 1191 1192 MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id); 1193 ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac); 1194 1195 return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf, 1196 sizeof(inbuf), NULL, 0, NULL); 1197 } 1198 1199 #ifdef CONFIG_SFC_SRIOV 1200 static int efx_ef10_probe_vf(struct efx_nic *efx) 1201 { 1202 int rc; 1203 struct pci_dev *pci_dev_pf; 1204 1205 /* If the parent PF has no VF data structure, it doesn't know about this 1206 * VF so fail probe. The VF needs to be re-created. This can happen 1207 * if the PF driver is unloaded while the VF is assigned to a guest. 1208 */ 1209 pci_dev_pf = efx->pci_dev->physfn; 1210 if (pci_dev_pf) { 1211 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); 1212 struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data; 1213 1214 if (!nic_data_pf->vf) { 1215 netif_info(efx, drv, efx->net_dev, 1216 "The VF cannot link to its parent PF; " 1217 "please destroy and re-create the VF\n"); 1218 return -EBUSY; 1219 } 1220 } 1221 1222 rc = efx_ef10_probe(efx); 1223 if (rc) 1224 return rc; 1225 1226 rc = efx_ef10_get_vf_index(efx); 1227 if (rc) 1228 goto fail; 1229 1230 if (efx->pci_dev->is_virtfn) { 1231 if (efx->pci_dev->physfn) { 1232 struct efx_nic *efx_pf = 1233 pci_get_drvdata(efx->pci_dev->physfn); 1234 struct efx_ef10_nic_data *nic_data_p = efx_pf->nic_data; 1235 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1236 1237 nic_data_p->vf[nic_data->vf_index].efx = efx; 1238 nic_data_p->vf[nic_data->vf_index].pci_dev = 1239 efx->pci_dev; 1240 } else 1241 netif_info(efx, drv, efx->net_dev, 1242 "Could not get the PF id from VF\n"); 1243 } 1244 1245 return 0; 1246 1247 fail: 1248 efx_ef10_remove(efx); 1249 return rc; 1250 } 1251 #else 1252 static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused))) 1253 { 1254 return 0; 1255 } 1256 #endif 1257 1258 static int efx_ef10_alloc_vis(struct efx_nic *efx, 1259 unsigned int min_vis, unsigned int max_vis) 1260 { 1261 MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN); 1262 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN); 1263 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1264 size_t outlen; 1265 int rc; 1266 1267 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis); 1268 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis); 1269 rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf), 1270 outbuf, sizeof(outbuf), &outlen); 1271 if (rc != 0) 1272 return rc; 1273 1274 if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN) 1275 return -EIO; 1276 1277 netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n", 1278 MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE)); 1279 1280 nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE); 1281 nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT); 1282 return 0; 1283 } 1284 1285 /* Note that the failure path of this function does not free 1286 * resources, as this will be done by efx_ef10_remove(). 1287 */ 1288 static int efx_ef10_dimension_resources(struct efx_nic *efx) 1289 { 1290 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1291 unsigned int uc_mem_map_size, wc_mem_map_size; 1292 unsigned int min_vis = max(EFX_TXQ_TYPES, 1293 efx_separate_tx_channels ? 2 : 1); 1294 unsigned int channel_vis, pio_write_vi_base, max_vis; 1295 void __iomem *membase; 1296 int rc; 1297 1298 channel_vis = max(efx->n_channels, 1299 (efx->n_tx_channels + efx->n_extra_tx_channels) * 1300 EFX_TXQ_TYPES); 1301 1302 #ifdef EFX_USE_PIO 1303 /* Try to allocate PIO buffers if wanted and if the full 1304 * number of PIO buffers would be sufficient to allocate one 1305 * copy-buffer per TX channel. Failure is non-fatal, as there 1306 * are only a small number of PIO buffers shared between all 1307 * functions of the controller. 1308 */ 1309 if (efx_piobuf_size != 0 && 1310 nic_data->piobuf_size / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >= 1311 efx->n_tx_channels) { 1312 unsigned int n_piobufs = 1313 DIV_ROUND_UP(efx->n_tx_channels, 1314 nic_data->piobuf_size / efx_piobuf_size); 1315 1316 rc = efx_ef10_alloc_piobufs(efx, n_piobufs); 1317 if (rc == -ENOSPC) 1318 netif_dbg(efx, probe, efx->net_dev, 1319 "out of PIO buffers; cannot allocate more\n"); 1320 else if (rc == -EPERM) 1321 netif_dbg(efx, probe, efx->net_dev, 1322 "not permitted to allocate PIO buffers\n"); 1323 else if (rc) 1324 netif_err(efx, probe, efx->net_dev, 1325 "failed to allocate PIO buffers (%d)\n", rc); 1326 else 1327 netif_dbg(efx, probe, efx->net_dev, 1328 "allocated %u PIO buffers\n", n_piobufs); 1329 } 1330 #else 1331 nic_data->n_piobufs = 0; 1332 #endif 1333 1334 /* PIO buffers should be mapped with write-combining enabled, 1335 * and we want to make single UC and WC mappings rather than 1336 * several of each (in fact that's the only option if host 1337 * page size is >4K). So we may allocate some extra VIs just 1338 * for writing PIO buffers through. 1339 * 1340 * The UC mapping contains (channel_vis - 1) complete VIs and the 1341 * first 4K of the next VI. Then the WC mapping begins with 1342 * the remainder of this last VI. 1343 */ 1344 uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * efx->vi_stride + 1345 ER_DZ_TX_PIOBUF); 1346 if (nic_data->n_piobufs) { 1347 /* pio_write_vi_base rounds down to give the number of complete 1348 * VIs inside the UC mapping. 1349 */ 1350 pio_write_vi_base = uc_mem_map_size / efx->vi_stride; 1351 wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base + 1352 nic_data->n_piobufs) * 1353 efx->vi_stride) - 1354 uc_mem_map_size); 1355 max_vis = pio_write_vi_base + nic_data->n_piobufs; 1356 } else { 1357 pio_write_vi_base = 0; 1358 wc_mem_map_size = 0; 1359 max_vis = channel_vis; 1360 } 1361 1362 /* In case the last attached driver failed to free VIs, do it now */ 1363 rc = efx_ef10_free_vis(efx); 1364 if (rc != 0) 1365 return rc; 1366 1367 rc = efx_ef10_alloc_vis(efx, min_vis, max_vis); 1368 if (rc != 0) 1369 return rc; 1370 1371 if (nic_data->n_allocated_vis < channel_vis) { 1372 netif_info(efx, drv, efx->net_dev, 1373 "Could not allocate enough VIs to satisfy RSS" 1374 " requirements. Performance may not be optimal.\n"); 1375 /* We didn't get the VIs to populate our channels. 1376 * We could keep what we got but then we'd have more 1377 * interrupts than we need. 1378 * Instead calculate new max_channels and restart 1379 */ 1380 efx->max_channels = nic_data->n_allocated_vis; 1381 efx->max_tx_channels = 1382 nic_data->n_allocated_vis / EFX_TXQ_TYPES; 1383 1384 efx_ef10_free_vis(efx); 1385 return -EAGAIN; 1386 } 1387 1388 /* If we didn't get enough VIs to map all the PIO buffers, free the 1389 * PIO buffers 1390 */ 1391 if (nic_data->n_piobufs && 1392 nic_data->n_allocated_vis < 1393 pio_write_vi_base + nic_data->n_piobufs) { 1394 netif_dbg(efx, probe, efx->net_dev, 1395 "%u VIs are not sufficient to map %u PIO buffers\n", 1396 nic_data->n_allocated_vis, nic_data->n_piobufs); 1397 efx_ef10_free_piobufs(efx); 1398 } 1399 1400 /* Shrink the original UC mapping of the memory BAR */ 1401 membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size); 1402 if (!membase) { 1403 netif_err(efx, probe, efx->net_dev, 1404 "could not shrink memory BAR to %x\n", 1405 uc_mem_map_size); 1406 return -ENOMEM; 1407 } 1408 iounmap(efx->membase); 1409 efx->membase = membase; 1410 1411 /* Set up the WC mapping if needed */ 1412 if (wc_mem_map_size) { 1413 nic_data->wc_membase = ioremap_wc(efx->membase_phys + 1414 uc_mem_map_size, 1415 wc_mem_map_size); 1416 if (!nic_data->wc_membase) { 1417 netif_err(efx, probe, efx->net_dev, 1418 "could not allocate WC mapping of size %x\n", 1419 wc_mem_map_size); 1420 return -ENOMEM; 1421 } 1422 nic_data->pio_write_vi_base = pio_write_vi_base; 1423 nic_data->pio_write_base = 1424 nic_data->wc_membase + 1425 (pio_write_vi_base * efx->vi_stride + ER_DZ_TX_PIOBUF - 1426 uc_mem_map_size); 1427 1428 rc = efx_ef10_link_piobufs(efx); 1429 if (rc) 1430 efx_ef10_free_piobufs(efx); 1431 } 1432 1433 netif_dbg(efx, probe, efx->net_dev, 1434 "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n", 1435 &efx->membase_phys, efx->membase, uc_mem_map_size, 1436 nic_data->wc_membase, wc_mem_map_size); 1437 1438 return 0; 1439 } 1440 1441 static int efx_ef10_init_nic(struct efx_nic *efx) 1442 { 1443 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1444 int rc; 1445 1446 if (nic_data->must_check_datapath_caps) { 1447 rc = efx_ef10_init_datapath_caps(efx); 1448 if (rc) 1449 return rc; 1450 nic_data->must_check_datapath_caps = false; 1451 } 1452 1453 if (nic_data->must_realloc_vis) { 1454 /* We cannot let the number of VIs change now */ 1455 rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis, 1456 nic_data->n_allocated_vis); 1457 if (rc) 1458 return rc; 1459 nic_data->must_realloc_vis = false; 1460 } 1461 1462 if (nic_data->must_restore_piobufs && nic_data->n_piobufs) { 1463 rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs); 1464 if (rc == 0) { 1465 rc = efx_ef10_link_piobufs(efx); 1466 if (rc) 1467 efx_ef10_free_piobufs(efx); 1468 } 1469 1470 /* Log an error on failure, but this is non-fatal. 1471 * Permission errors are less important - we've presumably 1472 * had the PIO buffer licence removed. 1473 */ 1474 if (rc == -EPERM) 1475 netif_dbg(efx, drv, efx->net_dev, 1476 "not permitted to restore PIO buffers\n"); 1477 else if (rc) 1478 netif_err(efx, drv, efx->net_dev, 1479 "failed to restore PIO buffers (%d)\n", rc); 1480 nic_data->must_restore_piobufs = false; 1481 } 1482 1483 /* don't fail init if RSS setup doesn't work */ 1484 rc = efx->type->rx_push_rss_config(efx, false, 1485 efx->rss_context.rx_indir_table, NULL); 1486 1487 return 0; 1488 } 1489 1490 static void efx_ef10_reset_mc_allocations(struct efx_nic *efx) 1491 { 1492 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1493 #ifdef CONFIG_SFC_SRIOV 1494 unsigned int i; 1495 #endif 1496 1497 /* All our allocations have been reset */ 1498 nic_data->must_realloc_vis = true; 1499 nic_data->must_restore_rss_contexts = true; 1500 nic_data->must_restore_filters = true; 1501 nic_data->must_restore_piobufs = true; 1502 efx_ef10_forget_old_piobufs(efx); 1503 efx->rss_context.context_id = EFX_EF10_RSS_CONTEXT_INVALID; 1504 1505 /* Driver-created vswitches and vports must be re-created */ 1506 nic_data->must_probe_vswitching = true; 1507 nic_data->vport_id = EVB_PORT_ID_ASSIGNED; 1508 #ifdef CONFIG_SFC_SRIOV 1509 if (nic_data->vf) 1510 for (i = 0; i < efx->vf_count; i++) 1511 nic_data->vf[i].vport_id = 0; 1512 #endif 1513 } 1514 1515 static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason) 1516 { 1517 if (reason == RESET_TYPE_MC_FAILURE) 1518 return RESET_TYPE_DATAPATH; 1519 1520 return efx_mcdi_map_reset_reason(reason); 1521 } 1522 1523 static int efx_ef10_map_reset_flags(u32 *flags) 1524 { 1525 enum { 1526 EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) << 1527 ETH_RESET_SHARED_SHIFT), 1528 EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER | 1529 ETH_RESET_OFFLOAD | ETH_RESET_MAC | 1530 ETH_RESET_PHY | ETH_RESET_MGMT) << 1531 ETH_RESET_SHARED_SHIFT) 1532 }; 1533 1534 /* We assume for now that our PCI function is permitted to 1535 * reset everything. 1536 */ 1537 1538 if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) { 1539 *flags &= ~EF10_RESET_MC; 1540 return RESET_TYPE_WORLD; 1541 } 1542 1543 if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) { 1544 *flags &= ~EF10_RESET_PORT; 1545 return RESET_TYPE_ALL; 1546 } 1547 1548 /* no invisible reset implemented */ 1549 1550 return -EINVAL; 1551 } 1552 1553 static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type) 1554 { 1555 int rc = efx_mcdi_reset(efx, reset_type); 1556 1557 /* Unprivileged functions return -EPERM, but need to return success 1558 * here so that the datapath is brought back up. 1559 */ 1560 if (reset_type == RESET_TYPE_WORLD && rc == -EPERM) 1561 rc = 0; 1562 1563 /* If it was a port reset, trigger reallocation of MC resources. 1564 * Note that on an MC reset nothing needs to be done now because we'll 1565 * detect the MC reset later and handle it then. 1566 * For an FLR, we never get an MC reset event, but the MC has reset all 1567 * resources assigned to us, so we have to trigger reallocation now. 1568 */ 1569 if ((reset_type == RESET_TYPE_ALL || 1570 reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc) 1571 efx_ef10_reset_mc_allocations(efx); 1572 return rc; 1573 } 1574 1575 #define EF10_DMA_STAT(ext_name, mcdi_name) \ 1576 [EF10_STAT_ ## ext_name] = \ 1577 { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name } 1578 #define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \ 1579 [EF10_STAT_ ## int_name] = \ 1580 { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name } 1581 #define EF10_OTHER_STAT(ext_name) \ 1582 [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 } 1583 #define GENERIC_SW_STAT(ext_name) \ 1584 [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 } 1585 1586 static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { 1587 EF10_DMA_STAT(port_tx_bytes, TX_BYTES), 1588 EF10_DMA_STAT(port_tx_packets, TX_PKTS), 1589 EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS), 1590 EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS), 1591 EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS), 1592 EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS), 1593 EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS), 1594 EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS), 1595 EF10_DMA_STAT(port_tx_64, TX_64_PKTS), 1596 EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS), 1597 EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS), 1598 EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS), 1599 EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS), 1600 EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS), 1601 EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS), 1602 EF10_DMA_STAT(port_rx_bytes, RX_BYTES), 1603 EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES), 1604 EF10_OTHER_STAT(port_rx_good_bytes), 1605 EF10_OTHER_STAT(port_rx_bad_bytes), 1606 EF10_DMA_STAT(port_rx_packets, RX_PKTS), 1607 EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS), 1608 EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS), 1609 EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS), 1610 EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS), 1611 EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS), 1612 EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS), 1613 EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS), 1614 EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS), 1615 EF10_DMA_STAT(port_rx_64, RX_64_PKTS), 1616 EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS), 1617 EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS), 1618 EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS), 1619 EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS), 1620 EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS), 1621 EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS), 1622 EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS), 1623 EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS), 1624 EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS), 1625 EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS), 1626 EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS), 1627 EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS), 1628 GENERIC_SW_STAT(rx_nodesc_trunc), 1629 GENERIC_SW_STAT(rx_noskb_drops), 1630 EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW), 1631 EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW), 1632 EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL), 1633 EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL), 1634 EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB), 1635 EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB), 1636 EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING), 1637 EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS), 1638 EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS), 1639 EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS), 1640 EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS), 1641 EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS), 1642 EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS), 1643 EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES), 1644 EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS), 1645 EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES), 1646 EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS), 1647 EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES), 1648 EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS), 1649 EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES), 1650 EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW), 1651 EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS), 1652 EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES), 1653 EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS), 1654 EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES), 1655 EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS), 1656 EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES), 1657 EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS), 1658 EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES), 1659 EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW), 1660 EF10_DMA_STAT(fec_uncorrected_errors, FEC_UNCORRECTED_ERRORS), 1661 EF10_DMA_STAT(fec_corrected_errors, FEC_CORRECTED_ERRORS), 1662 EF10_DMA_STAT(fec_corrected_symbols_lane0, FEC_CORRECTED_SYMBOLS_LANE0), 1663 EF10_DMA_STAT(fec_corrected_symbols_lane1, FEC_CORRECTED_SYMBOLS_LANE1), 1664 EF10_DMA_STAT(fec_corrected_symbols_lane2, FEC_CORRECTED_SYMBOLS_LANE2), 1665 EF10_DMA_STAT(fec_corrected_symbols_lane3, FEC_CORRECTED_SYMBOLS_LANE3), 1666 EF10_DMA_STAT(ctpio_vi_busy_fallback, CTPIO_VI_BUSY_FALLBACK), 1667 EF10_DMA_STAT(ctpio_long_write_success, CTPIO_LONG_WRITE_SUCCESS), 1668 EF10_DMA_STAT(ctpio_missing_dbell_fail, CTPIO_MISSING_DBELL_FAIL), 1669 EF10_DMA_STAT(ctpio_overflow_fail, CTPIO_OVERFLOW_FAIL), 1670 EF10_DMA_STAT(ctpio_underflow_fail, CTPIO_UNDERFLOW_FAIL), 1671 EF10_DMA_STAT(ctpio_timeout_fail, CTPIO_TIMEOUT_FAIL), 1672 EF10_DMA_STAT(ctpio_noncontig_wr_fail, CTPIO_NONCONTIG_WR_FAIL), 1673 EF10_DMA_STAT(ctpio_frm_clobber_fail, CTPIO_FRM_CLOBBER_FAIL), 1674 EF10_DMA_STAT(ctpio_invalid_wr_fail, CTPIO_INVALID_WR_FAIL), 1675 EF10_DMA_STAT(ctpio_vi_clobber_fallback, CTPIO_VI_CLOBBER_FALLBACK), 1676 EF10_DMA_STAT(ctpio_unqualified_fallback, CTPIO_UNQUALIFIED_FALLBACK), 1677 EF10_DMA_STAT(ctpio_runt_fallback, CTPIO_RUNT_FALLBACK), 1678 EF10_DMA_STAT(ctpio_success, CTPIO_SUCCESS), 1679 EF10_DMA_STAT(ctpio_fallback, CTPIO_FALLBACK), 1680 EF10_DMA_STAT(ctpio_poison, CTPIO_POISON), 1681 EF10_DMA_STAT(ctpio_erase, CTPIO_ERASE), 1682 }; 1683 1684 #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \ 1685 (1ULL << EF10_STAT_port_tx_packets) | \ 1686 (1ULL << EF10_STAT_port_tx_pause) | \ 1687 (1ULL << EF10_STAT_port_tx_unicast) | \ 1688 (1ULL << EF10_STAT_port_tx_multicast) | \ 1689 (1ULL << EF10_STAT_port_tx_broadcast) | \ 1690 (1ULL << EF10_STAT_port_rx_bytes) | \ 1691 (1ULL << \ 1692 EF10_STAT_port_rx_bytes_minus_good_bytes) | \ 1693 (1ULL << EF10_STAT_port_rx_good_bytes) | \ 1694 (1ULL << EF10_STAT_port_rx_bad_bytes) | \ 1695 (1ULL << EF10_STAT_port_rx_packets) | \ 1696 (1ULL << EF10_STAT_port_rx_good) | \ 1697 (1ULL << EF10_STAT_port_rx_bad) | \ 1698 (1ULL << EF10_STAT_port_rx_pause) | \ 1699 (1ULL << EF10_STAT_port_rx_control) | \ 1700 (1ULL << EF10_STAT_port_rx_unicast) | \ 1701 (1ULL << EF10_STAT_port_rx_multicast) | \ 1702 (1ULL << EF10_STAT_port_rx_broadcast) | \ 1703 (1ULL << EF10_STAT_port_rx_lt64) | \ 1704 (1ULL << EF10_STAT_port_rx_64) | \ 1705 (1ULL << EF10_STAT_port_rx_65_to_127) | \ 1706 (1ULL << EF10_STAT_port_rx_128_to_255) | \ 1707 (1ULL << EF10_STAT_port_rx_256_to_511) | \ 1708 (1ULL << EF10_STAT_port_rx_512_to_1023) |\ 1709 (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\ 1710 (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\ 1711 (1ULL << EF10_STAT_port_rx_gtjumbo) | \ 1712 (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\ 1713 (1ULL << EF10_STAT_port_rx_overflow) | \ 1714 (1ULL << EF10_STAT_port_rx_nodesc_drops) |\ 1715 (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \ 1716 (1ULL << GENERIC_STAT_rx_noskb_drops)) 1717 1718 /* On 7000 series NICs, these statistics are only provided by the 10G MAC. 1719 * For a 10G/40G switchable port we do not expose these because they might 1720 * not include all the packets they should. 1721 * On 8000 series NICs these statistics are always provided. 1722 */ 1723 #define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \ 1724 (1ULL << EF10_STAT_port_tx_lt64) | \ 1725 (1ULL << EF10_STAT_port_tx_64) | \ 1726 (1ULL << EF10_STAT_port_tx_65_to_127) |\ 1727 (1ULL << EF10_STAT_port_tx_128_to_255) |\ 1728 (1ULL << EF10_STAT_port_tx_256_to_511) |\ 1729 (1ULL << EF10_STAT_port_tx_512_to_1023) |\ 1730 (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\ 1731 (1ULL << EF10_STAT_port_tx_15xx_to_jumbo)) 1732 1733 /* These statistics are only provided by the 40G MAC. For a 10G/40G 1734 * switchable port we do expose these because the errors will otherwise 1735 * be silent. 1736 */ 1737 #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\ 1738 (1ULL << EF10_STAT_port_rx_length_error)) 1739 1740 /* These statistics are only provided if the firmware supports the 1741 * capability PM_AND_RXDP_COUNTERS. 1742 */ 1743 #define HUNT_PM_AND_RXDP_STAT_MASK ( \ 1744 (1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) | \ 1745 (1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) | \ 1746 (1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) | \ 1747 (1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) | \ 1748 (1ULL << EF10_STAT_port_rx_pm_trunc_qbb) | \ 1749 (1ULL << EF10_STAT_port_rx_pm_discard_qbb) | \ 1750 (1ULL << EF10_STAT_port_rx_pm_discard_mapping) | \ 1751 (1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) | \ 1752 (1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) | \ 1753 (1ULL << EF10_STAT_port_rx_dp_streaming_packets) | \ 1754 (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) | \ 1755 (1ULL << EF10_STAT_port_rx_dp_hlb_wait)) 1756 1757 /* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V2, 1758 * indicated by returning a value >= MC_CMD_MAC_NSTATS_V2 in 1759 * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS. 1760 * These bits are in the second u64 of the raw mask. 1761 */ 1762 #define EF10_FEC_STAT_MASK ( \ 1763 (1ULL << (EF10_STAT_fec_uncorrected_errors - 64)) | \ 1764 (1ULL << (EF10_STAT_fec_corrected_errors - 64)) | \ 1765 (1ULL << (EF10_STAT_fec_corrected_symbols_lane0 - 64)) | \ 1766 (1ULL << (EF10_STAT_fec_corrected_symbols_lane1 - 64)) | \ 1767 (1ULL << (EF10_STAT_fec_corrected_symbols_lane2 - 64)) | \ 1768 (1ULL << (EF10_STAT_fec_corrected_symbols_lane3 - 64))) 1769 1770 /* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V3, 1771 * indicated by returning a value >= MC_CMD_MAC_NSTATS_V3 in 1772 * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS. 1773 * These bits are in the second u64 of the raw mask. 1774 */ 1775 #define EF10_CTPIO_STAT_MASK ( \ 1776 (1ULL << (EF10_STAT_ctpio_vi_busy_fallback - 64)) | \ 1777 (1ULL << (EF10_STAT_ctpio_long_write_success - 64)) | \ 1778 (1ULL << (EF10_STAT_ctpio_missing_dbell_fail - 64)) | \ 1779 (1ULL << (EF10_STAT_ctpio_overflow_fail - 64)) | \ 1780 (1ULL << (EF10_STAT_ctpio_underflow_fail - 64)) | \ 1781 (1ULL << (EF10_STAT_ctpio_timeout_fail - 64)) | \ 1782 (1ULL << (EF10_STAT_ctpio_noncontig_wr_fail - 64)) | \ 1783 (1ULL << (EF10_STAT_ctpio_frm_clobber_fail - 64)) | \ 1784 (1ULL << (EF10_STAT_ctpio_invalid_wr_fail - 64)) | \ 1785 (1ULL << (EF10_STAT_ctpio_vi_clobber_fallback - 64)) | \ 1786 (1ULL << (EF10_STAT_ctpio_unqualified_fallback - 64)) | \ 1787 (1ULL << (EF10_STAT_ctpio_runt_fallback - 64)) | \ 1788 (1ULL << (EF10_STAT_ctpio_success - 64)) | \ 1789 (1ULL << (EF10_STAT_ctpio_fallback - 64)) | \ 1790 (1ULL << (EF10_STAT_ctpio_poison - 64)) | \ 1791 (1ULL << (EF10_STAT_ctpio_erase - 64))) 1792 1793 static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) 1794 { 1795 u64 raw_mask = HUNT_COMMON_STAT_MASK; 1796 u32 port_caps = efx_mcdi_phy_get_caps(efx); 1797 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1798 1799 if (!(efx->mcdi->fn_flags & 1800 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL)) 1801 return 0; 1802 1803 if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) { 1804 raw_mask |= HUNT_40G_EXTRA_STAT_MASK; 1805 /* 8000 series have everything even at 40G */ 1806 if (nic_data->datapath_caps2 & 1807 (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN)) 1808 raw_mask |= HUNT_10G_ONLY_STAT_MASK; 1809 } else { 1810 raw_mask |= HUNT_10G_ONLY_STAT_MASK; 1811 } 1812 1813 if (nic_data->datapath_caps & 1814 (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN)) 1815 raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK; 1816 1817 return raw_mask; 1818 } 1819 1820 static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask) 1821 { 1822 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1823 u64 raw_mask[2]; 1824 1825 raw_mask[0] = efx_ef10_raw_stat_mask(efx); 1826 1827 /* Only show vadaptor stats when EVB capability is present */ 1828 if (nic_data->datapath_caps & 1829 (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) { 1830 raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1); 1831 raw_mask[1] = (1ULL << (EF10_STAT_V1_COUNT - 64)) - 1; 1832 } else { 1833 raw_mask[1] = 0; 1834 } 1835 /* Only show FEC stats when NIC supports MC_CMD_MAC_STATS_V2 */ 1836 if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V2) 1837 raw_mask[1] |= EF10_FEC_STAT_MASK; 1838 1839 /* CTPIO stats appear in V3. Only show them on devices that actually 1840 * support CTPIO. Although this driver doesn't use CTPIO others might, 1841 * and we may be reporting the stats for the underlying port. 1842 */ 1843 if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V3 && 1844 (nic_data->datapath_caps2 & 1845 (1 << MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN))) 1846 raw_mask[1] |= EF10_CTPIO_STAT_MASK; 1847 1848 #if BITS_PER_LONG == 64 1849 BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2); 1850 mask[0] = raw_mask[0]; 1851 mask[1] = raw_mask[1]; 1852 #else 1853 BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 3); 1854 mask[0] = raw_mask[0] & 0xffffffff; 1855 mask[1] = raw_mask[0] >> 32; 1856 mask[2] = raw_mask[1] & 0xffffffff; 1857 #endif 1858 } 1859 1860 static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names) 1861 { 1862 DECLARE_BITMAP(mask, EF10_STAT_COUNT); 1863 1864 efx_ef10_get_stat_mask(efx, mask); 1865 return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, 1866 mask, names); 1867 } 1868 1869 static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats, 1870 struct rtnl_link_stats64 *core_stats) 1871 { 1872 DECLARE_BITMAP(mask, EF10_STAT_COUNT); 1873 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1874 u64 *stats = nic_data->stats; 1875 size_t stats_count = 0, index; 1876 1877 efx_ef10_get_stat_mask(efx, mask); 1878 1879 if (full_stats) { 1880 for_each_set_bit(index, mask, EF10_STAT_COUNT) { 1881 if (efx_ef10_stat_desc[index].name) { 1882 *full_stats++ = stats[index]; 1883 ++stats_count; 1884 } 1885 } 1886 } 1887 1888 if (!core_stats) 1889 return stats_count; 1890 1891 if (nic_data->datapath_caps & 1892 1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN) { 1893 /* Use vadaptor stats. */ 1894 core_stats->rx_packets = stats[EF10_STAT_rx_unicast] + 1895 stats[EF10_STAT_rx_multicast] + 1896 stats[EF10_STAT_rx_broadcast]; 1897 core_stats->tx_packets = stats[EF10_STAT_tx_unicast] + 1898 stats[EF10_STAT_tx_multicast] + 1899 stats[EF10_STAT_tx_broadcast]; 1900 core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] + 1901 stats[EF10_STAT_rx_multicast_bytes] + 1902 stats[EF10_STAT_rx_broadcast_bytes]; 1903 core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] + 1904 stats[EF10_STAT_tx_multicast_bytes] + 1905 stats[EF10_STAT_tx_broadcast_bytes]; 1906 core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] + 1907 stats[GENERIC_STAT_rx_noskb_drops]; 1908 core_stats->multicast = stats[EF10_STAT_rx_multicast]; 1909 core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad]; 1910 core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow]; 1911 core_stats->rx_errors = core_stats->rx_crc_errors; 1912 core_stats->tx_errors = stats[EF10_STAT_tx_bad]; 1913 } else { 1914 /* Use port stats. */ 1915 core_stats->rx_packets = stats[EF10_STAT_port_rx_packets]; 1916 core_stats->tx_packets = stats[EF10_STAT_port_tx_packets]; 1917 core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes]; 1918 core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes]; 1919 core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] + 1920 stats[GENERIC_STAT_rx_nodesc_trunc] + 1921 stats[GENERIC_STAT_rx_noskb_drops]; 1922 core_stats->multicast = stats[EF10_STAT_port_rx_multicast]; 1923 core_stats->rx_length_errors = 1924 stats[EF10_STAT_port_rx_gtjumbo] + 1925 stats[EF10_STAT_port_rx_length_error]; 1926 core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad]; 1927 core_stats->rx_frame_errors = 1928 stats[EF10_STAT_port_rx_align_error]; 1929 core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow]; 1930 core_stats->rx_errors = (core_stats->rx_length_errors + 1931 core_stats->rx_crc_errors + 1932 core_stats->rx_frame_errors); 1933 } 1934 1935 return stats_count; 1936 } 1937 1938 static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx) 1939 { 1940 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1941 DECLARE_BITMAP(mask, EF10_STAT_COUNT); 1942 __le64 generation_start, generation_end; 1943 u64 *stats = nic_data->stats; 1944 __le64 *dma_stats; 1945 1946 efx_ef10_get_stat_mask(efx, mask); 1947 1948 dma_stats = efx->stats_buffer.addr; 1949 1950 generation_end = dma_stats[efx->num_mac_stats - 1]; 1951 if (generation_end == EFX_MC_STATS_GENERATION_INVALID) 1952 return 0; 1953 rmb(); 1954 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask, 1955 stats, efx->stats_buffer.addr, false); 1956 rmb(); 1957 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; 1958 if (generation_end != generation_start) 1959 return -EAGAIN; 1960 1961 /* Update derived statistics */ 1962 efx_nic_fix_nodesc_drop_stat(efx, 1963 &stats[EF10_STAT_port_rx_nodesc_drops]); 1964 stats[EF10_STAT_port_rx_good_bytes] = 1965 stats[EF10_STAT_port_rx_bytes] - 1966 stats[EF10_STAT_port_rx_bytes_minus_good_bytes]; 1967 efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes], 1968 stats[EF10_STAT_port_rx_bytes_minus_good_bytes]); 1969 efx_update_sw_stats(efx, stats); 1970 return 0; 1971 } 1972 1973 1974 static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats, 1975 struct rtnl_link_stats64 *core_stats) 1976 { 1977 int retry; 1978 1979 /* If we're unlucky enough to read statistics during the DMA, wait 1980 * up to 10ms for it to finish (typically takes <500us) 1981 */ 1982 for (retry = 0; retry < 100; ++retry) { 1983 if (efx_ef10_try_update_nic_stats_pf(efx) == 0) 1984 break; 1985 udelay(100); 1986 } 1987 1988 return efx_ef10_update_stats_common(efx, full_stats, core_stats); 1989 } 1990 1991 static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx) 1992 { 1993 MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); 1994 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1995 DECLARE_BITMAP(mask, EF10_STAT_COUNT); 1996 __le64 generation_start, generation_end; 1997 u64 *stats = nic_data->stats; 1998 u32 dma_len = efx->num_mac_stats * sizeof(u64); 1999 struct efx_buffer stats_buf; 2000 __le64 *dma_stats; 2001 int rc; 2002 2003 spin_unlock_bh(&efx->stats_lock); 2004 2005 if (in_interrupt()) { 2006 /* If in atomic context, cannot update stats. Just update the 2007 * software stats and return so the caller can continue. 2008 */ 2009 spin_lock_bh(&efx->stats_lock); 2010 efx_update_sw_stats(efx, stats); 2011 return 0; 2012 } 2013 2014 efx_ef10_get_stat_mask(efx, mask); 2015 2016 rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC); 2017 if (rc) { 2018 spin_lock_bh(&efx->stats_lock); 2019 return rc; 2020 } 2021 2022 dma_stats = stats_buf.addr; 2023 dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID; 2024 2025 MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr); 2026 MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD, 2027 MAC_STATS_IN_DMA, 1); 2028 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); 2029 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); 2030 2031 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), 2032 NULL, 0, NULL); 2033 spin_lock_bh(&efx->stats_lock); 2034 if (rc) { 2035 /* Expect ENOENT if DMA queues have not been set up */ 2036 if (rc != -ENOENT || atomic_read(&efx->active_queues)) 2037 efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, 2038 sizeof(inbuf), NULL, 0, rc); 2039 goto out; 2040 } 2041 2042 generation_end = dma_stats[efx->num_mac_stats - 1]; 2043 if (generation_end == EFX_MC_STATS_GENERATION_INVALID) { 2044 WARN_ON_ONCE(1); 2045 goto out; 2046 } 2047 rmb(); 2048 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask, 2049 stats, stats_buf.addr, false); 2050 rmb(); 2051 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; 2052 if (generation_end != generation_start) { 2053 rc = -EAGAIN; 2054 goto out; 2055 } 2056 2057 efx_update_sw_stats(efx, stats); 2058 out: 2059 efx_nic_free_buffer(efx, &stats_buf); 2060 return rc; 2061 } 2062 2063 static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats, 2064 struct rtnl_link_stats64 *core_stats) 2065 { 2066 if (efx_ef10_try_update_nic_stats_vf(efx)) 2067 return 0; 2068 2069 return efx_ef10_update_stats_common(efx, full_stats, core_stats); 2070 } 2071 2072 static void efx_ef10_push_irq_moderation(struct efx_channel *channel) 2073 { 2074 struct efx_nic *efx = channel->efx; 2075 unsigned int mode, usecs; 2076 efx_dword_t timer_cmd; 2077 2078 if (channel->irq_moderation_us) { 2079 mode = 3; 2080 usecs = channel->irq_moderation_us; 2081 } else { 2082 mode = 0; 2083 usecs = 0; 2084 } 2085 2086 if (EFX_EF10_WORKAROUND_61265(efx)) { 2087 MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_EVQ_TMR_IN_LEN); 2088 unsigned int ns = usecs * 1000; 2089 2090 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_INSTANCE, 2091 channel->channel); 2092 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, ns); 2093 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, ns); 2094 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_MODE, mode); 2095 2096 efx_mcdi_rpc_async(efx, MC_CMD_SET_EVQ_TMR, 2097 inbuf, sizeof(inbuf), 0, NULL, 0); 2098 } else if (EFX_EF10_WORKAROUND_35388(efx)) { 2099 unsigned int ticks = efx_usecs_to_ticks(efx, usecs); 2100 2101 EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS, 2102 EFE_DD_EVQ_IND_TIMER_FLAGS, 2103 ERF_DD_EVQ_IND_TIMER_MODE, mode, 2104 ERF_DD_EVQ_IND_TIMER_VAL, ticks); 2105 efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT, 2106 channel->channel); 2107 } else { 2108 unsigned int ticks = efx_usecs_to_ticks(efx, usecs); 2109 2110 EFX_POPULATE_DWORD_3(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode, 2111 ERF_DZ_TC_TIMER_VAL, ticks, 2112 ERF_FZ_TC_TMR_REL_VAL, ticks); 2113 efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR, 2114 channel->channel); 2115 } 2116 } 2117 2118 static void efx_ef10_get_wol_vf(struct efx_nic *efx, 2119 struct ethtool_wolinfo *wol) {} 2120 2121 static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type) 2122 { 2123 return -EOPNOTSUPP; 2124 } 2125 2126 static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) 2127 { 2128 wol->supported = 0; 2129 wol->wolopts = 0; 2130 memset(&wol->sopass, 0, sizeof(wol->sopass)); 2131 } 2132 2133 static int efx_ef10_set_wol(struct efx_nic *efx, u32 type) 2134 { 2135 if (type != 0) 2136 return -EINVAL; 2137 return 0; 2138 } 2139 2140 static void efx_ef10_mcdi_request(struct efx_nic *efx, 2141 const efx_dword_t *hdr, size_t hdr_len, 2142 const efx_dword_t *sdu, size_t sdu_len) 2143 { 2144 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2145 u8 *pdu = nic_data->mcdi_buf.addr; 2146 2147 memcpy(pdu, hdr, hdr_len); 2148 memcpy(pdu + hdr_len, sdu, sdu_len); 2149 wmb(); 2150 2151 /* The hardware provides 'low' and 'high' (doorbell) registers 2152 * for passing the 64-bit address of an MCDI request to 2153 * firmware. However the dwords are swapped by firmware. The 2154 * least significant bits of the doorbell are then 0 for all 2155 * MCDI requests due to alignment. 2156 */ 2157 _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32), 2158 ER_DZ_MC_DB_LWRD); 2159 _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr), 2160 ER_DZ_MC_DB_HWRD); 2161 } 2162 2163 static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx) 2164 { 2165 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2166 const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr; 2167 2168 rmb(); 2169 return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE); 2170 } 2171 2172 static void 2173 efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf, 2174 size_t offset, size_t outlen) 2175 { 2176 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2177 const u8 *pdu = nic_data->mcdi_buf.addr; 2178 2179 memcpy(outbuf, pdu + offset, outlen); 2180 } 2181 2182 static void efx_ef10_mcdi_reboot_detected(struct efx_nic *efx) 2183 { 2184 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2185 2186 /* All our allocations have been reset */ 2187 efx_ef10_reset_mc_allocations(efx); 2188 2189 /* The datapath firmware might have been changed */ 2190 nic_data->must_check_datapath_caps = true; 2191 2192 /* MAC statistics have been cleared on the NIC; clear the local 2193 * statistic that we update with efx_update_diff_stat(). 2194 */ 2195 nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0; 2196 } 2197 2198 static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx) 2199 { 2200 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2201 int rc; 2202 2203 rc = efx_ef10_get_warm_boot_count(efx); 2204 if (rc < 0) { 2205 /* The firmware is presumably in the process of 2206 * rebooting. However, we are supposed to report each 2207 * reboot just once, so we must only do that once we 2208 * can read and store the updated warm boot count. 2209 */ 2210 return 0; 2211 } 2212 2213 if (rc == nic_data->warm_boot_count) 2214 return 0; 2215 2216 nic_data->warm_boot_count = rc; 2217 efx_ef10_mcdi_reboot_detected(efx); 2218 2219 return -EIO; 2220 } 2221 2222 /* Handle an MSI interrupt 2223 * 2224 * Handle an MSI hardware interrupt. This routine schedules event 2225 * queue processing. No interrupt acknowledgement cycle is necessary. 2226 * Also, we never need to check that the interrupt is for us, since 2227 * MSI interrupts cannot be shared. 2228 */ 2229 static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id) 2230 { 2231 struct efx_msi_context *context = dev_id; 2232 struct efx_nic *efx = context->efx; 2233 2234 netif_vdbg(efx, intr, efx->net_dev, 2235 "IRQ %d on CPU %d\n", irq, raw_smp_processor_id()); 2236 2237 if (likely(READ_ONCE(efx->irq_soft_enabled))) { 2238 /* Note test interrupts */ 2239 if (context->index == efx->irq_level) 2240 efx->last_irq_cpu = raw_smp_processor_id(); 2241 2242 /* Schedule processing of the channel */ 2243 efx_schedule_channel_irq(efx->channel[context->index]); 2244 } 2245 2246 return IRQ_HANDLED; 2247 } 2248 2249 static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id) 2250 { 2251 struct efx_nic *efx = dev_id; 2252 bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); 2253 struct efx_channel *channel; 2254 efx_dword_t reg; 2255 u32 queues; 2256 2257 /* Read the ISR which also ACKs the interrupts */ 2258 efx_readd(efx, ®, ER_DZ_BIU_INT_ISR); 2259 queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG); 2260 2261 if (queues == 0) 2262 return IRQ_NONE; 2263 2264 if (likely(soft_enabled)) { 2265 /* Note test interrupts */ 2266 if (queues & (1U << efx->irq_level)) 2267 efx->last_irq_cpu = raw_smp_processor_id(); 2268 2269 efx_for_each_channel(channel, efx) { 2270 if (queues & 1) 2271 efx_schedule_channel_irq(channel); 2272 queues >>= 1; 2273 } 2274 } 2275 2276 netif_vdbg(efx, intr, efx->net_dev, 2277 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", 2278 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); 2279 2280 return IRQ_HANDLED; 2281 } 2282 2283 static int efx_ef10_irq_test_generate(struct efx_nic *efx) 2284 { 2285 MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN); 2286 2287 if (efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG41750, true, 2288 NULL) == 0) 2289 return -ENOTSUPP; 2290 2291 BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0); 2292 2293 MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level); 2294 return efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT, 2295 inbuf, sizeof(inbuf), NULL, 0, NULL); 2296 } 2297 2298 static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue) 2299 { 2300 return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf, 2301 (tx_queue->ptr_mask + 1) * 2302 sizeof(efx_qword_t), 2303 GFP_KERNEL); 2304 } 2305 2306 /* This writes to the TX_DESC_WPTR and also pushes data */ 2307 static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue, 2308 const efx_qword_t *txd) 2309 { 2310 unsigned int write_ptr; 2311 efx_oword_t reg; 2312 2313 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 2314 EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr); 2315 reg.qword[0] = *txd; 2316 efx_writeo_page(tx_queue->efx, ®, 2317 ER_DZ_TX_DESC_UPD, tx_queue->queue); 2318 } 2319 2320 /* Add Firmware-Assisted TSO v2 option descriptors to a queue. 2321 */ 2322 static int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, 2323 struct sk_buff *skb, 2324 bool *data_mapped) 2325 { 2326 struct efx_tx_buffer *buffer; 2327 struct tcphdr *tcp; 2328 struct iphdr *ip; 2329 2330 u16 ipv4_id; 2331 u32 seqnum; 2332 u32 mss; 2333 2334 EFX_WARN_ON_ONCE_PARANOID(tx_queue->tso_version != 2); 2335 2336 mss = skb_shinfo(skb)->gso_size; 2337 2338 if (unlikely(mss < 4)) { 2339 WARN_ONCE(1, "MSS of %u is too small for TSO v2\n", mss); 2340 return -EINVAL; 2341 } 2342 2343 ip = ip_hdr(skb); 2344 if (ip->version == 4) { 2345 /* Modify IPv4 header if needed. */ 2346 ip->tot_len = 0; 2347 ip->check = 0; 2348 ipv4_id = ntohs(ip->id); 2349 } else { 2350 /* Modify IPv6 header if needed. */ 2351 struct ipv6hdr *ipv6 = ipv6_hdr(skb); 2352 2353 ipv6->payload_len = 0; 2354 ipv4_id = 0; 2355 } 2356 2357 tcp = tcp_hdr(skb); 2358 seqnum = ntohl(tcp->seq); 2359 2360 buffer = efx_tx_queue_get_insert_buffer(tx_queue); 2361 2362 buffer->flags = EFX_TX_BUF_OPTION; 2363 buffer->len = 0; 2364 buffer->unmap_len = 0; 2365 EFX_POPULATE_QWORD_5(buffer->option, 2366 ESF_DZ_TX_DESC_IS_OPT, 1, 2367 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO, 2368 ESF_DZ_TX_TSO_OPTION_TYPE, 2369 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A, 2370 ESF_DZ_TX_TSO_IP_ID, ipv4_id, 2371 ESF_DZ_TX_TSO_TCP_SEQNO, seqnum 2372 ); 2373 ++tx_queue->insert_count; 2374 2375 buffer = efx_tx_queue_get_insert_buffer(tx_queue); 2376 2377 buffer->flags = EFX_TX_BUF_OPTION; 2378 buffer->len = 0; 2379 buffer->unmap_len = 0; 2380 EFX_POPULATE_QWORD_4(buffer->option, 2381 ESF_DZ_TX_DESC_IS_OPT, 1, 2382 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO, 2383 ESF_DZ_TX_TSO_OPTION_TYPE, 2384 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B, 2385 ESF_DZ_TX_TSO_TCP_MSS, mss 2386 ); 2387 ++tx_queue->insert_count; 2388 2389 return 0; 2390 } 2391 2392 static u32 efx_ef10_tso_versions(struct efx_nic *efx) 2393 { 2394 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2395 u32 tso_versions = 0; 2396 2397 if (nic_data->datapath_caps & 2398 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) 2399 tso_versions |= BIT(1); 2400 if (nic_data->datapath_caps2 & 2401 (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN)) 2402 tso_versions |= BIT(2); 2403 return tso_versions; 2404 } 2405 2406 static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) 2407 { 2408 MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / 2409 EFX_BUF_SIZE)); 2410 bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; 2411 size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE; 2412 struct efx_channel *channel = tx_queue->channel; 2413 struct efx_nic *efx = tx_queue->efx; 2414 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2415 bool tso_v2 = false; 2416 size_t inlen; 2417 dma_addr_t dma_addr; 2418 efx_qword_t *txd; 2419 int rc; 2420 int i; 2421 BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0); 2422 2423 /* Only attempt to enable TX timestamping if we have the license for it, 2424 * otherwise TXQ init will fail 2425 */ 2426 if (!(nic_data->licensed_features & 2427 (1 << LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN))) { 2428 tx_queue->timestamping = false; 2429 /* Disable sync events on this channel. */ 2430 if (efx->type->ptp_set_ts_sync_events) 2431 efx->type->ptp_set_ts_sync_events(efx, false, false); 2432 } 2433 2434 /* TSOv2 is a limited resource that can only be configured on a limited 2435 * number of queues. TSO without checksum offload is not really a thing, 2436 * so we only enable it for those queues. 2437 * TSOv2 cannot be used with Hardware timestamping. 2438 */ 2439 if (csum_offload && (nic_data->datapath_caps2 & 2440 (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN)) && 2441 !tx_queue->timestamping) { 2442 tso_v2 = true; 2443 netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n", 2444 channel->channel); 2445 } 2446 2447 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1); 2448 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel); 2449 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue); 2450 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue); 2451 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0); 2452 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id); 2453 2454 dma_addr = tx_queue->txd.buf.dma_addr; 2455 2456 netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n", 2457 tx_queue->queue, entries, (u64)dma_addr); 2458 2459 for (i = 0; i < entries; ++i) { 2460 MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr); 2461 dma_addr += EFX_BUF_SIZE; 2462 } 2463 2464 inlen = MC_CMD_INIT_TXQ_IN_LEN(entries); 2465 2466 do { 2467 MCDI_POPULATE_DWORD_4(inbuf, INIT_TXQ_IN_FLAGS, 2468 /* This flag was removed from mcdi_pcol.h for 2469 * the non-_EXT version of INIT_TXQ. However, 2470 * firmware still honours it. 2471 */ 2472 INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2, 2473 INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload, 2474 INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload, 2475 INIT_TXQ_EXT_IN_FLAG_TIMESTAMP, 2476 tx_queue->timestamping); 2477 2478 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen, 2479 NULL, 0, NULL); 2480 if (rc == -ENOSPC && tso_v2) { 2481 /* Retry without TSOv2 if we're short on contexts. */ 2482 tso_v2 = false; 2483 netif_warn(efx, probe, efx->net_dev, 2484 "TSOv2 context not available to segment in hardware. TCP performance may be reduced.\n"); 2485 } else if (rc) { 2486 efx_mcdi_display_error(efx, MC_CMD_INIT_TXQ, 2487 MC_CMD_INIT_TXQ_EXT_IN_LEN, 2488 NULL, 0, rc); 2489 goto fail; 2490 } 2491 } while (rc); 2492 2493 /* A previous user of this TX queue might have set us up the 2494 * bomb by writing a descriptor to the TX push collector but 2495 * not the doorbell. (Each collector belongs to a port, not a 2496 * queue or function, so cannot easily be reset.) We must 2497 * attempt to push a no-op descriptor in its place. 2498 */ 2499 tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION; 2500 tx_queue->insert_count = 1; 2501 txd = efx_tx_desc(tx_queue, 0); 2502 EFX_POPULATE_QWORD_5(*txd, 2503 ESF_DZ_TX_DESC_IS_OPT, true, 2504 ESF_DZ_TX_OPTION_TYPE, 2505 ESE_DZ_TX_OPTION_DESC_CRC_CSUM, 2506 ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload, 2507 ESF_DZ_TX_OPTION_IP_CSUM, csum_offload, 2508 ESF_DZ_TX_TIMESTAMP, tx_queue->timestamping); 2509 tx_queue->write_count = 1; 2510 2511 if (tso_v2) { 2512 tx_queue->handle_tso = efx_ef10_tx_tso_desc; 2513 tx_queue->tso_version = 2; 2514 } else if (nic_data->datapath_caps & 2515 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) { 2516 tx_queue->tso_version = 1; 2517 } 2518 2519 wmb(); 2520 efx_ef10_push_tx_desc(tx_queue, txd); 2521 2522 return; 2523 2524 fail: 2525 netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n", 2526 tx_queue->queue); 2527 } 2528 2529 static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue) 2530 { 2531 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN); 2532 MCDI_DECLARE_BUF_ERR(outbuf); 2533 struct efx_nic *efx = tx_queue->efx; 2534 size_t outlen; 2535 int rc; 2536 2537 MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE, 2538 tx_queue->queue); 2539 2540 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf), 2541 outbuf, sizeof(outbuf), &outlen); 2542 2543 if (rc && rc != -EALREADY) 2544 goto fail; 2545 2546 return; 2547 2548 fail: 2549 efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN, 2550 outbuf, outlen, rc); 2551 } 2552 2553 static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue) 2554 { 2555 efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf); 2556 } 2557 2558 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ 2559 static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue) 2560 { 2561 unsigned int write_ptr; 2562 efx_dword_t reg; 2563 2564 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 2565 EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr); 2566 efx_writed_page(tx_queue->efx, ®, 2567 ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue); 2568 } 2569 2570 #define EFX_EF10_MAX_TX_DESCRIPTOR_LEN 0x3fff 2571 2572 static unsigned int efx_ef10_tx_limit_len(struct efx_tx_queue *tx_queue, 2573 dma_addr_t dma_addr, unsigned int len) 2574 { 2575 if (len > EFX_EF10_MAX_TX_DESCRIPTOR_LEN) { 2576 /* If we need to break across multiple descriptors we should 2577 * stop at a page boundary. This assumes the length limit is 2578 * greater than the page size. 2579 */ 2580 dma_addr_t end = dma_addr + EFX_EF10_MAX_TX_DESCRIPTOR_LEN; 2581 2582 BUILD_BUG_ON(EFX_EF10_MAX_TX_DESCRIPTOR_LEN < EFX_PAGE_SIZE); 2583 len = (end & (~(EFX_PAGE_SIZE - 1))) - dma_addr; 2584 } 2585 2586 return len; 2587 } 2588 2589 static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue) 2590 { 2591 unsigned int old_write_count = tx_queue->write_count; 2592 struct efx_tx_buffer *buffer; 2593 unsigned int write_ptr; 2594 efx_qword_t *txd; 2595 2596 tx_queue->xmit_more_available = false; 2597 if (unlikely(tx_queue->write_count == tx_queue->insert_count)) 2598 return; 2599 2600 do { 2601 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 2602 buffer = &tx_queue->buffer[write_ptr]; 2603 txd = efx_tx_desc(tx_queue, write_ptr); 2604 ++tx_queue->write_count; 2605 2606 /* Create TX descriptor ring entry */ 2607 if (buffer->flags & EFX_TX_BUF_OPTION) { 2608 *txd = buffer->option; 2609 if (EFX_QWORD_FIELD(*txd, ESF_DZ_TX_OPTION_TYPE) == 1) 2610 /* PIO descriptor */ 2611 tx_queue->packet_write_count = tx_queue->write_count; 2612 } else { 2613 tx_queue->packet_write_count = tx_queue->write_count; 2614 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); 2615 EFX_POPULATE_QWORD_3( 2616 *txd, 2617 ESF_DZ_TX_KER_CONT, 2618 buffer->flags & EFX_TX_BUF_CONT, 2619 ESF_DZ_TX_KER_BYTE_CNT, buffer->len, 2620 ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr); 2621 } 2622 } while (tx_queue->write_count != tx_queue->insert_count); 2623 2624 wmb(); /* Ensure descriptors are written before they are fetched */ 2625 2626 if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) { 2627 txd = efx_tx_desc(tx_queue, 2628 old_write_count & tx_queue->ptr_mask); 2629 efx_ef10_push_tx_desc(tx_queue, txd); 2630 ++tx_queue->pushes; 2631 } else { 2632 efx_ef10_notify_tx_desc(tx_queue); 2633 } 2634 } 2635 2636 #define RSS_MODE_HASH_ADDRS (1 << RSS_MODE_HASH_SRC_ADDR_LBN |\ 2637 1 << RSS_MODE_HASH_DST_ADDR_LBN) 2638 #define RSS_MODE_HASH_PORTS (1 << RSS_MODE_HASH_SRC_PORT_LBN |\ 2639 1 << RSS_MODE_HASH_DST_PORT_LBN) 2640 #define RSS_CONTEXT_FLAGS_DEFAULT (1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN |\ 2641 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN |\ 2642 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN |\ 2643 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN |\ 2644 (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN |\ 2645 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN |\ 2646 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN |\ 2647 (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN |\ 2648 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN |\ 2649 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN) 2650 2651 static int efx_ef10_get_rss_flags(struct efx_nic *efx, u32 context, u32 *flags) 2652 { 2653 /* Firmware had a bug (sfc bug 61952) where it would not actually 2654 * fill in the flags field in the response to MC_CMD_RSS_CONTEXT_GET_FLAGS. 2655 * This meant that it would always contain whatever was previously 2656 * in the MCDI buffer. Fortunately, all firmware versions with 2657 * this bug have the same default flags value for a newly-allocated 2658 * RSS context, and the only time we want to get the flags is just 2659 * after allocating. Moreover, the response has a 32-bit hole 2660 * where the context ID would be in the request, so we can use an 2661 * overlength buffer in the request and pre-fill the flags field 2662 * with what we believe the default to be. Thus if the firmware 2663 * has the bug, it will leave our pre-filled value in the flags 2664 * field of the response, and we will get the right answer. 2665 * 2666 * However, this does mean that this function should NOT be used if 2667 * the RSS context flags might not be their defaults - it is ONLY 2668 * reliably correct for a newly-allocated RSS context. 2669 */ 2670 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN); 2671 MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN); 2672 size_t outlen; 2673 int rc; 2674 2675 /* Check we have a hole for the context ID */ 2676 BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN != MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST); 2677 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID, context); 2678 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS, 2679 RSS_CONTEXT_FLAGS_DEFAULT); 2680 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_FLAGS, inbuf, 2681 sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); 2682 if (rc == 0) { 2683 if (outlen < MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN) 2684 rc = -EIO; 2685 else 2686 *flags = MCDI_DWORD(outbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS); 2687 } 2688 return rc; 2689 } 2690 2691 /* Attempt to enable 4-tuple UDP hashing on the specified RSS context. 2692 * If we fail, we just leave the RSS context at its default hash settings, 2693 * which is safe but may slightly reduce performance. 2694 * Defaults are 4-tuple for TCP and 2-tuple for UDP and other-IP, so we 2695 * just need to set the UDP ports flags (for both IP versions). 2696 */ 2697 static void efx_ef10_set_rss_flags(struct efx_nic *efx, 2698 struct efx_rss_context *ctx) 2699 { 2700 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN); 2701 u32 flags; 2702 2703 BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN != 0); 2704 2705 if (efx_ef10_get_rss_flags(efx, ctx->context_id, &flags) != 0) 2706 return; 2707 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID, 2708 ctx->context_id); 2709 flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN; 2710 flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN; 2711 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_FLAGS, flags); 2712 if (!efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_FLAGS, inbuf, sizeof(inbuf), 2713 NULL, 0, NULL)) 2714 /* Succeeded, so UDP 4-tuple is now enabled */ 2715 ctx->rx_hash_udp_4tuple = true; 2716 } 2717 2718 static int efx_ef10_alloc_rss_context(struct efx_nic *efx, bool exclusive, 2719 struct efx_rss_context *ctx, 2720 unsigned *context_size) 2721 { 2722 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN); 2723 MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN); 2724 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2725 size_t outlen; 2726 int rc; 2727 u32 alloc_type = exclusive ? 2728 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE : 2729 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED; 2730 unsigned rss_spread = exclusive ? 2731 efx->rss_spread : 2732 min(rounddown_pow_of_two(efx->rss_spread), 2733 EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE); 2734 2735 if (!exclusive && rss_spread == 1) { 2736 ctx->context_id = EFX_EF10_RSS_CONTEXT_INVALID; 2737 if (context_size) 2738 *context_size = 1; 2739 return 0; 2740 } 2741 2742 if (nic_data->datapath_caps & 2743 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN) 2744 return -EOPNOTSUPP; 2745 2746 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID, 2747 nic_data->vport_id); 2748 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type); 2749 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread); 2750 2751 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf), 2752 outbuf, sizeof(outbuf), &outlen); 2753 if (rc != 0) 2754 return rc; 2755 2756 if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN) 2757 return -EIO; 2758 2759 ctx->context_id = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID); 2760 2761 if (context_size) 2762 *context_size = rss_spread; 2763 2764 if (nic_data->datapath_caps & 2765 1 << MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN) 2766 efx_ef10_set_rss_flags(efx, ctx); 2767 2768 return 0; 2769 } 2770 2771 static int efx_ef10_free_rss_context(struct efx_nic *efx, u32 context) 2772 { 2773 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN); 2774 2775 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID, 2776 context); 2777 return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf), 2778 NULL, 0, NULL); 2779 } 2780 2781 static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context, 2782 const u32 *rx_indir_table, const u8 *key) 2783 { 2784 MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN); 2785 MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN); 2786 int i, rc; 2787 2788 MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID, 2789 context); 2790 BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) != 2791 MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN); 2792 2793 /* This iterates over the length of efx->rss_context.rx_indir_table, but 2794 * copies bytes from rx_indir_table. That's because the latter is a 2795 * pointer rather than an array, but should have the same length. 2796 * The efx->rss_context.rx_hash_key loop below is similar. 2797 */ 2798 for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_indir_table); ++i) 2799 MCDI_PTR(tablebuf, 2800 RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] = 2801 (u8) rx_indir_table[i]; 2802 2803 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf, 2804 sizeof(tablebuf), NULL, 0, NULL); 2805 if (rc != 0) 2806 return rc; 2807 2808 MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID, 2809 context); 2810 BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_hash_key) != 2811 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); 2812 for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_hash_key); ++i) 2813 MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = key[i]; 2814 2815 return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf, 2816 sizeof(keybuf), NULL, 0, NULL); 2817 } 2818 2819 static void efx_ef10_rx_free_indir_table(struct efx_nic *efx) 2820 { 2821 int rc; 2822 2823 if (efx->rss_context.context_id != EFX_EF10_RSS_CONTEXT_INVALID) { 2824 rc = efx_ef10_free_rss_context(efx, efx->rss_context.context_id); 2825 WARN_ON(rc != 0); 2826 } 2827 efx->rss_context.context_id = EFX_EF10_RSS_CONTEXT_INVALID; 2828 } 2829 2830 static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx, 2831 unsigned *context_size) 2832 { 2833 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2834 int rc = efx_ef10_alloc_rss_context(efx, false, &efx->rss_context, 2835 context_size); 2836 2837 if (rc != 0) 2838 return rc; 2839 2840 nic_data->rx_rss_context_exclusive = false; 2841 efx_set_default_rx_indir_table(efx, &efx->rss_context); 2842 return 0; 2843 } 2844 2845 static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx, 2846 const u32 *rx_indir_table, 2847 const u8 *key) 2848 { 2849 u32 old_rx_rss_context = efx->rss_context.context_id; 2850 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2851 int rc; 2852 2853 if (efx->rss_context.context_id == EFX_EF10_RSS_CONTEXT_INVALID || 2854 !nic_data->rx_rss_context_exclusive) { 2855 rc = efx_ef10_alloc_rss_context(efx, true, &efx->rss_context, 2856 NULL); 2857 if (rc == -EOPNOTSUPP) 2858 return rc; 2859 else if (rc != 0) 2860 goto fail1; 2861 } 2862 2863 rc = efx_ef10_populate_rss_table(efx, efx->rss_context.context_id, 2864 rx_indir_table, key); 2865 if (rc != 0) 2866 goto fail2; 2867 2868 if (efx->rss_context.context_id != old_rx_rss_context && 2869 old_rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID) 2870 WARN_ON(efx_ef10_free_rss_context(efx, old_rx_rss_context) != 0); 2871 nic_data->rx_rss_context_exclusive = true; 2872 if (rx_indir_table != efx->rss_context.rx_indir_table) 2873 memcpy(efx->rss_context.rx_indir_table, rx_indir_table, 2874 sizeof(efx->rss_context.rx_indir_table)); 2875 if (key != efx->rss_context.rx_hash_key) 2876 memcpy(efx->rss_context.rx_hash_key, key, 2877 efx->type->rx_hash_key_size); 2878 2879 return 0; 2880 2881 fail2: 2882 if (old_rx_rss_context != efx->rss_context.context_id) { 2883 WARN_ON(efx_ef10_free_rss_context(efx, efx->rss_context.context_id) != 0); 2884 efx->rss_context.context_id = old_rx_rss_context; 2885 } 2886 fail1: 2887 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 2888 return rc; 2889 } 2890 2891 static int efx_ef10_rx_push_rss_context_config(struct efx_nic *efx, 2892 struct efx_rss_context *ctx, 2893 const u32 *rx_indir_table, 2894 const u8 *key) 2895 { 2896 int rc; 2897 2898 WARN_ON(!mutex_is_locked(&efx->rss_lock)); 2899 2900 if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) { 2901 rc = efx_ef10_alloc_rss_context(efx, true, ctx, NULL); 2902 if (rc) 2903 return rc; 2904 } 2905 2906 if (!rx_indir_table) /* Delete this context */ 2907 return efx_ef10_free_rss_context(efx, ctx->context_id); 2908 2909 rc = efx_ef10_populate_rss_table(efx, ctx->context_id, 2910 rx_indir_table, key); 2911 if (rc) 2912 return rc; 2913 2914 memcpy(ctx->rx_indir_table, rx_indir_table, 2915 sizeof(efx->rss_context.rx_indir_table)); 2916 memcpy(ctx->rx_hash_key, key, efx->type->rx_hash_key_size); 2917 2918 return 0; 2919 } 2920 2921 static int efx_ef10_rx_pull_rss_context_config(struct efx_nic *efx, 2922 struct efx_rss_context *ctx) 2923 { 2924 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN); 2925 MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN); 2926 MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN); 2927 size_t outlen; 2928 int rc, i; 2929 2930 WARN_ON(!mutex_is_locked(&efx->rss_lock)); 2931 2932 BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN != 2933 MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN); 2934 2935 if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) 2936 return -ENOENT; 2937 2938 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID, 2939 ctx->context_id); 2940 BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_indir_table) != 2941 MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN); 2942 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf), 2943 tablebuf, sizeof(tablebuf), &outlen); 2944 if (rc != 0) 2945 return rc; 2946 2947 if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN)) 2948 return -EIO; 2949 2950 for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++) 2951 ctx->rx_indir_table[i] = MCDI_PTR(tablebuf, 2952 RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i]; 2953 2954 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID, 2955 ctx->context_id); 2956 BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_hash_key) != 2957 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); 2958 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf), 2959 keybuf, sizeof(keybuf), &outlen); 2960 if (rc != 0) 2961 return rc; 2962 2963 if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN)) 2964 return -EIO; 2965 2966 for (i = 0; i < ARRAY_SIZE(ctx->rx_hash_key); ++i) 2967 ctx->rx_hash_key[i] = MCDI_PTR( 2968 keybuf, RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY)[i]; 2969 2970 return 0; 2971 } 2972 2973 static int efx_ef10_rx_pull_rss_config(struct efx_nic *efx) 2974 { 2975 int rc; 2976 2977 mutex_lock(&efx->rss_lock); 2978 rc = efx_ef10_rx_pull_rss_context_config(efx, &efx->rss_context); 2979 mutex_unlock(&efx->rss_lock); 2980 return rc; 2981 } 2982 2983 static void efx_ef10_rx_restore_rss_contexts(struct efx_nic *efx) 2984 { 2985 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2986 struct efx_rss_context *ctx; 2987 int rc; 2988 2989 WARN_ON(!mutex_is_locked(&efx->rss_lock)); 2990 2991 if (!nic_data->must_restore_rss_contexts) 2992 return; 2993 2994 list_for_each_entry(ctx, &efx->rss_context.list, list) { 2995 /* previous NIC RSS context is gone */ 2996 ctx->context_id = EFX_EF10_RSS_CONTEXT_INVALID; 2997 /* so try to allocate a new one */ 2998 rc = efx_ef10_rx_push_rss_context_config(efx, ctx, 2999 ctx->rx_indir_table, 3000 ctx->rx_hash_key); 3001 if (rc) 3002 netif_warn(efx, probe, efx->net_dev, 3003 "failed to restore RSS context %u, rc=%d" 3004 "; RSS filters may fail to be applied\n", 3005 ctx->user_id, rc); 3006 } 3007 nic_data->must_restore_rss_contexts = false; 3008 } 3009 3010 static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user, 3011 const u32 *rx_indir_table, 3012 const u8 *key) 3013 { 3014 int rc; 3015 3016 if (efx->rss_spread == 1) 3017 return 0; 3018 3019 if (!key) 3020 key = efx->rss_context.rx_hash_key; 3021 3022 rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table, key); 3023 3024 if (rc == -ENOBUFS && !user) { 3025 unsigned context_size; 3026 bool mismatch = false; 3027 size_t i; 3028 3029 for (i = 0; 3030 i < ARRAY_SIZE(efx->rss_context.rx_indir_table) && !mismatch; 3031 i++) 3032 mismatch = rx_indir_table[i] != 3033 ethtool_rxfh_indir_default(i, efx->rss_spread); 3034 3035 rc = efx_ef10_rx_push_shared_rss_config(efx, &context_size); 3036 if (rc == 0) { 3037 if (context_size != efx->rss_spread) 3038 netif_warn(efx, probe, efx->net_dev, 3039 "Could not allocate an exclusive RSS" 3040 " context; allocated a shared one of" 3041 " different size." 3042 " Wanted %u, got %u.\n", 3043 efx->rss_spread, context_size); 3044 else if (mismatch) 3045 netif_warn(efx, probe, efx->net_dev, 3046 "Could not allocate an exclusive RSS" 3047 " context; allocated a shared one but" 3048 " could not apply custom" 3049 " indirection.\n"); 3050 else 3051 netif_info(efx, probe, efx->net_dev, 3052 "Could not allocate an exclusive RSS" 3053 " context; allocated a shared one.\n"); 3054 } 3055 } 3056 return rc; 3057 } 3058 3059 static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user, 3060 const u32 *rx_indir_table 3061 __attribute__ ((unused)), 3062 const u8 *key 3063 __attribute__ ((unused))) 3064 { 3065 if (user) 3066 return -EOPNOTSUPP; 3067 if (efx->rss_context.context_id != EFX_EF10_RSS_CONTEXT_INVALID) 3068 return 0; 3069 return efx_ef10_rx_push_shared_rss_config(efx, NULL); 3070 } 3071 3072 static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue) 3073 { 3074 return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf, 3075 (rx_queue->ptr_mask + 1) * 3076 sizeof(efx_qword_t), 3077 GFP_KERNEL); 3078 } 3079 3080 static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue) 3081 { 3082 MCDI_DECLARE_BUF(inbuf, 3083 MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / 3084 EFX_BUF_SIZE)); 3085 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 3086 size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE; 3087 struct efx_nic *efx = rx_queue->efx; 3088 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3089 size_t inlen; 3090 dma_addr_t dma_addr; 3091 int rc; 3092 int i; 3093 BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0); 3094 3095 rx_queue->scatter_n = 0; 3096 rx_queue->scatter_len = 0; 3097 3098 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1); 3099 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel); 3100 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue)); 3101 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE, 3102 efx_rx_queue_index(rx_queue)); 3103 MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS, 3104 INIT_RXQ_IN_FLAG_PREFIX, 1, 3105 INIT_RXQ_IN_FLAG_TIMESTAMP, 1); 3106 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0); 3107 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id); 3108 3109 dma_addr = rx_queue->rxd.buf.dma_addr; 3110 3111 netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n", 3112 efx_rx_queue_index(rx_queue), entries, (u64)dma_addr); 3113 3114 for (i = 0; i < entries; ++i) { 3115 MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr); 3116 dma_addr += EFX_BUF_SIZE; 3117 } 3118 3119 inlen = MC_CMD_INIT_RXQ_IN_LEN(entries); 3120 3121 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen, 3122 NULL, 0, NULL); 3123 if (rc) 3124 netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n", 3125 efx_rx_queue_index(rx_queue)); 3126 } 3127 3128 static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue) 3129 { 3130 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN); 3131 MCDI_DECLARE_BUF_ERR(outbuf); 3132 struct efx_nic *efx = rx_queue->efx; 3133 size_t outlen; 3134 int rc; 3135 3136 MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE, 3137 efx_rx_queue_index(rx_queue)); 3138 3139 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf), 3140 outbuf, sizeof(outbuf), &outlen); 3141 3142 if (rc && rc != -EALREADY) 3143 goto fail; 3144 3145 return; 3146 3147 fail: 3148 efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN, 3149 outbuf, outlen, rc); 3150 } 3151 3152 static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue) 3153 { 3154 efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf); 3155 } 3156 3157 /* This creates an entry in the RX descriptor queue */ 3158 static inline void 3159 efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) 3160 { 3161 struct efx_rx_buffer *rx_buf; 3162 efx_qword_t *rxd; 3163 3164 rxd = efx_rx_desc(rx_queue, index); 3165 rx_buf = efx_rx_buffer(rx_queue, index); 3166 EFX_POPULATE_QWORD_2(*rxd, 3167 ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len, 3168 ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); 3169 } 3170 3171 static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue) 3172 { 3173 struct efx_nic *efx = rx_queue->efx; 3174 unsigned int write_count; 3175 efx_dword_t reg; 3176 3177 /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */ 3178 write_count = rx_queue->added_count & ~7; 3179 if (rx_queue->notified_count == write_count) 3180 return; 3181 3182 do 3183 efx_ef10_build_rx_desc( 3184 rx_queue, 3185 rx_queue->notified_count & rx_queue->ptr_mask); 3186 while (++rx_queue->notified_count != write_count); 3187 3188 wmb(); 3189 EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR, 3190 write_count & rx_queue->ptr_mask); 3191 efx_writed_page(efx, ®, ER_DZ_RX_DESC_UPD, 3192 efx_rx_queue_index(rx_queue)); 3193 } 3194 3195 static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete; 3196 3197 static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue) 3198 { 3199 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 3200 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); 3201 efx_qword_t event; 3202 3203 EFX_POPULATE_QWORD_2(event, 3204 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV, 3205 ESF_DZ_EV_DATA, EFX_EF10_REFILL); 3206 3207 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); 3208 3209 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has 3210 * already swapped the data to little-endian order. 3211 */ 3212 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], 3213 sizeof(efx_qword_t)); 3214 3215 efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT, 3216 inbuf, sizeof(inbuf), 0, 3217 efx_ef10_rx_defer_refill_complete, 0); 3218 } 3219 3220 static void 3221 efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie, 3222 int rc, efx_dword_t *outbuf, 3223 size_t outlen_actual) 3224 { 3225 /* nothing to do */ 3226 } 3227 3228 static int efx_ef10_ev_probe(struct efx_channel *channel) 3229 { 3230 return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf, 3231 (channel->eventq_mask + 1) * 3232 sizeof(efx_qword_t), 3233 GFP_KERNEL); 3234 } 3235 3236 static void efx_ef10_ev_fini(struct efx_channel *channel) 3237 { 3238 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN); 3239 MCDI_DECLARE_BUF_ERR(outbuf); 3240 struct efx_nic *efx = channel->efx; 3241 size_t outlen; 3242 int rc; 3243 3244 MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel); 3245 3246 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf), 3247 outbuf, sizeof(outbuf), &outlen); 3248 3249 if (rc && rc != -EALREADY) 3250 goto fail; 3251 3252 return; 3253 3254 fail: 3255 efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN, 3256 outbuf, outlen, rc); 3257 } 3258 3259 static int efx_ef10_ev_init(struct efx_channel *channel) 3260 { 3261 MCDI_DECLARE_BUF(inbuf, 3262 MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 / 3263 EFX_BUF_SIZE)); 3264 MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN); 3265 size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE; 3266 struct efx_nic *efx = channel->efx; 3267 struct efx_ef10_nic_data *nic_data; 3268 size_t inlen, outlen; 3269 unsigned int enabled, implemented; 3270 dma_addr_t dma_addr; 3271 int rc; 3272 int i; 3273 3274 nic_data = efx->nic_data; 3275 3276 /* Fill event queue with all ones (i.e. empty events) */ 3277 memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); 3278 3279 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1); 3280 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel); 3281 /* INIT_EVQ expects index in vector table, not absolute */ 3282 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel); 3283 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE, 3284 MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS); 3285 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0); 3286 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0); 3287 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE, 3288 MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS); 3289 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0); 3290 3291 if (nic_data->datapath_caps2 & 3292 1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN) { 3293 /* Use the new generic approach to specifying event queue 3294 * configuration, requesting lower latency or higher throughput. 3295 * The options that actually get used appear in the output. 3296 */ 3297 MCDI_POPULATE_DWORD_2(inbuf, INIT_EVQ_V2_IN_FLAGS, 3298 INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1, 3299 INIT_EVQ_V2_IN_FLAG_TYPE, 3300 MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO); 3301 } else { 3302 bool cut_thru = !(nic_data->datapath_caps & 3303 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN); 3304 3305 MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS, 3306 INIT_EVQ_IN_FLAG_INTERRUPTING, 1, 3307 INIT_EVQ_IN_FLAG_RX_MERGE, 1, 3308 INIT_EVQ_IN_FLAG_TX_MERGE, 1, 3309 INIT_EVQ_IN_FLAG_CUT_THRU, cut_thru); 3310 } 3311 3312 dma_addr = channel->eventq.buf.dma_addr; 3313 for (i = 0; i < entries; ++i) { 3314 MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr); 3315 dma_addr += EFX_BUF_SIZE; 3316 } 3317 3318 inlen = MC_CMD_INIT_EVQ_IN_LEN(entries); 3319 3320 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen, 3321 outbuf, sizeof(outbuf), &outlen); 3322 3323 if (outlen >= MC_CMD_INIT_EVQ_V2_OUT_LEN) 3324 netif_dbg(efx, drv, efx->net_dev, 3325 "Channel %d using event queue flags %08x\n", 3326 channel->channel, 3327 MCDI_DWORD(outbuf, INIT_EVQ_V2_OUT_FLAGS)); 3328 3329 /* IRQ return is ignored */ 3330 if (channel->channel || rc) 3331 return rc; 3332 3333 /* Successfully created event queue on channel 0 */ 3334 rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled); 3335 if (rc == -ENOSYS) { 3336 /* GET_WORKAROUNDS was implemented before this workaround, 3337 * thus it must be unavailable in this firmware. 3338 */ 3339 nic_data->workaround_26807 = false; 3340 rc = 0; 3341 } else if (rc) { 3342 goto fail; 3343 } else { 3344 nic_data->workaround_26807 = 3345 !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807); 3346 3347 if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 && 3348 !nic_data->workaround_26807) { 3349 unsigned int flags; 3350 3351 rc = efx_mcdi_set_workaround(efx, 3352 MC_CMD_WORKAROUND_BUG26807, 3353 true, &flags); 3354 3355 if (!rc) { 3356 if (flags & 3357 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) { 3358 netif_info(efx, drv, efx->net_dev, 3359 "other functions on NIC have been reset\n"); 3360 3361 /* With MCFW v4.6.x and earlier, the 3362 * boot count will have incremented, 3363 * so re-read the warm_boot_count 3364 * value now to ensure this function 3365 * doesn't think it has changed next 3366 * time it checks. 3367 */ 3368 rc = efx_ef10_get_warm_boot_count(efx); 3369 if (rc >= 0) { 3370 nic_data->warm_boot_count = rc; 3371 rc = 0; 3372 } 3373 } 3374 nic_data->workaround_26807 = true; 3375 } else if (rc == -EPERM) { 3376 rc = 0; 3377 } 3378 } 3379 } 3380 3381 if (!rc) 3382 return 0; 3383 3384 fail: 3385 efx_ef10_ev_fini(channel); 3386 return rc; 3387 } 3388 3389 static void efx_ef10_ev_remove(struct efx_channel *channel) 3390 { 3391 efx_nic_free_buffer(channel->efx, &channel->eventq.buf); 3392 } 3393 3394 static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue, 3395 unsigned int rx_queue_label) 3396 { 3397 struct efx_nic *efx = rx_queue->efx; 3398 3399 netif_info(efx, hw, efx->net_dev, 3400 "rx event arrived on queue %d labeled as queue %u\n", 3401 efx_rx_queue_index(rx_queue), rx_queue_label); 3402 3403 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 3404 } 3405 3406 static void 3407 efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue, 3408 unsigned int actual, unsigned int expected) 3409 { 3410 unsigned int dropped = (actual - expected) & rx_queue->ptr_mask; 3411 struct efx_nic *efx = rx_queue->efx; 3412 3413 netif_info(efx, hw, efx->net_dev, 3414 "dropped %d events (index=%d expected=%d)\n", 3415 dropped, actual, expected); 3416 3417 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 3418 } 3419 3420 /* partially received RX was aborted. clean up. */ 3421 static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue) 3422 { 3423 unsigned int rx_desc_ptr; 3424 3425 netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev, 3426 "scattered RX aborted (dropping %u buffers)\n", 3427 rx_queue->scatter_n); 3428 3429 rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask; 3430 3431 efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n, 3432 0, EFX_RX_PKT_DISCARD); 3433 3434 rx_queue->removed_count += rx_queue->scatter_n; 3435 rx_queue->scatter_n = 0; 3436 rx_queue->scatter_len = 0; 3437 ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc; 3438 } 3439 3440 static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel, 3441 unsigned int n_packets, 3442 unsigned int rx_encap_hdr, 3443 unsigned int rx_l3_class, 3444 unsigned int rx_l4_class, 3445 const efx_qword_t *event) 3446 { 3447 struct efx_nic *efx = channel->efx; 3448 bool handled = false; 3449 3450 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)) { 3451 if (!(efx->net_dev->features & NETIF_F_RXALL)) { 3452 if (!efx->loopback_selftest) 3453 channel->n_rx_eth_crc_err += n_packets; 3454 return EFX_RX_PKT_DISCARD; 3455 } 3456 handled = true; 3457 } 3458 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR)) { 3459 if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN && 3460 rx_l3_class != ESE_DZ_L3_CLASS_IP4 && 3461 rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG && 3462 rx_l3_class != ESE_DZ_L3_CLASS_IP6 && 3463 rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG)) 3464 netdev_WARN(efx->net_dev, 3465 "invalid class for RX_IPCKSUM_ERR: event=" 3466 EFX_QWORD_FMT "\n", 3467 EFX_QWORD_VAL(*event)); 3468 if (!efx->loopback_selftest) 3469 *(rx_encap_hdr ? 3470 &channel->n_rx_outer_ip_hdr_chksum_err : 3471 &channel->n_rx_ip_hdr_chksum_err) += n_packets; 3472 return 0; 3473 } 3474 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) { 3475 if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN && 3476 ((rx_l3_class != ESE_DZ_L3_CLASS_IP4 && 3477 rx_l3_class != ESE_DZ_L3_CLASS_IP6) || 3478 (rx_l4_class != ESE_FZ_L4_CLASS_TCP && 3479 rx_l4_class != ESE_FZ_L4_CLASS_UDP)))) 3480 netdev_WARN(efx->net_dev, 3481 "invalid class for RX_TCPUDP_CKSUM_ERR: event=" 3482 EFX_QWORD_FMT "\n", 3483 EFX_QWORD_VAL(*event)); 3484 if (!efx->loopback_selftest) 3485 *(rx_encap_hdr ? 3486 &channel->n_rx_outer_tcp_udp_chksum_err : 3487 &channel->n_rx_tcp_udp_chksum_err) += n_packets; 3488 return 0; 3489 } 3490 if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_IP_INNER_CHKSUM_ERR)) { 3491 if (unlikely(!rx_encap_hdr)) 3492 netdev_WARN(efx->net_dev, 3493 "invalid encapsulation type for RX_IP_INNER_CHKSUM_ERR: event=" 3494 EFX_QWORD_FMT "\n", 3495 EFX_QWORD_VAL(*event)); 3496 else if (unlikely(rx_l3_class != ESE_DZ_L3_CLASS_IP4 && 3497 rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG && 3498 rx_l3_class != ESE_DZ_L3_CLASS_IP6 && 3499 rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG)) 3500 netdev_WARN(efx->net_dev, 3501 "invalid class for RX_IP_INNER_CHKSUM_ERR: event=" 3502 EFX_QWORD_FMT "\n", 3503 EFX_QWORD_VAL(*event)); 3504 if (!efx->loopback_selftest) 3505 channel->n_rx_inner_ip_hdr_chksum_err += n_packets; 3506 return 0; 3507 } 3508 if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR)) { 3509 if (unlikely(!rx_encap_hdr)) 3510 netdev_WARN(efx->net_dev, 3511 "invalid encapsulation type for RX_TCP_UDP_INNER_CHKSUM_ERR: event=" 3512 EFX_QWORD_FMT "\n", 3513 EFX_QWORD_VAL(*event)); 3514 else if (unlikely((rx_l3_class != ESE_DZ_L3_CLASS_IP4 && 3515 rx_l3_class != ESE_DZ_L3_CLASS_IP6) || 3516 (rx_l4_class != ESE_FZ_L4_CLASS_TCP && 3517 rx_l4_class != ESE_FZ_L4_CLASS_UDP))) 3518 netdev_WARN(efx->net_dev, 3519 "invalid class for RX_TCP_UDP_INNER_CHKSUM_ERR: event=" 3520 EFX_QWORD_FMT "\n", 3521 EFX_QWORD_VAL(*event)); 3522 if (!efx->loopback_selftest) 3523 channel->n_rx_inner_tcp_udp_chksum_err += n_packets; 3524 return 0; 3525 } 3526 3527 WARN_ON(!handled); /* No error bits were recognised */ 3528 return 0; 3529 } 3530 3531 static int efx_ef10_handle_rx_event(struct efx_channel *channel, 3532 const efx_qword_t *event) 3533 { 3534 unsigned int rx_bytes, next_ptr_lbits, rx_queue_label; 3535 unsigned int rx_l3_class, rx_l4_class, rx_encap_hdr; 3536 unsigned int n_descs, n_packets, i; 3537 struct efx_nic *efx = channel->efx; 3538 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3539 struct efx_rx_queue *rx_queue; 3540 efx_qword_t errors; 3541 bool rx_cont; 3542 u16 flags = 0; 3543 3544 if (unlikely(READ_ONCE(efx->reset_pending))) 3545 return 0; 3546 3547 /* Basic packet information */ 3548 rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES); 3549 next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS); 3550 rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL); 3551 rx_l3_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L3_CLASS); 3552 rx_l4_class = EFX_QWORD_FIELD(*event, ESF_FZ_RX_L4_CLASS); 3553 rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT); 3554 rx_encap_hdr = 3555 nic_data->datapath_caps & 3556 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN) ? 3557 EFX_QWORD_FIELD(*event, ESF_EZ_RX_ENCAP_HDR) : 3558 ESE_EZ_ENCAP_HDR_NONE; 3559 3560 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT)) 3561 netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event=" 3562 EFX_QWORD_FMT "\n", 3563 EFX_QWORD_VAL(*event)); 3564 3565 rx_queue = efx_channel_get_rx_queue(channel); 3566 3567 if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue))) 3568 efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label); 3569 3570 n_descs = ((next_ptr_lbits - rx_queue->removed_count) & 3571 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1)); 3572 3573 if (n_descs != rx_queue->scatter_n + 1) { 3574 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3575 3576 /* detect rx abort */ 3577 if (unlikely(n_descs == rx_queue->scatter_n)) { 3578 if (rx_queue->scatter_n == 0 || rx_bytes != 0) 3579 netdev_WARN(efx->net_dev, 3580 "invalid RX abort: scatter_n=%u event=" 3581 EFX_QWORD_FMT "\n", 3582 rx_queue->scatter_n, 3583 EFX_QWORD_VAL(*event)); 3584 efx_ef10_handle_rx_abort(rx_queue); 3585 return 0; 3586 } 3587 3588 /* Check that RX completion merging is valid, i.e. 3589 * the current firmware supports it and this is a 3590 * non-scattered packet. 3591 */ 3592 if (!(nic_data->datapath_caps & 3593 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) || 3594 rx_queue->scatter_n != 0 || rx_cont) { 3595 efx_ef10_handle_rx_bad_lbits( 3596 rx_queue, next_ptr_lbits, 3597 (rx_queue->removed_count + 3598 rx_queue->scatter_n + 1) & 3599 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1)); 3600 return 0; 3601 } 3602 3603 /* Merged completion for multiple non-scattered packets */ 3604 rx_queue->scatter_n = 1; 3605 rx_queue->scatter_len = 0; 3606 n_packets = n_descs; 3607 ++channel->n_rx_merge_events; 3608 channel->n_rx_merge_packets += n_packets; 3609 flags |= EFX_RX_PKT_PREFIX_LEN; 3610 } else { 3611 ++rx_queue->scatter_n; 3612 rx_queue->scatter_len += rx_bytes; 3613 if (rx_cont) 3614 return 0; 3615 n_packets = 1; 3616 } 3617 3618 EFX_POPULATE_QWORD_5(errors, ESF_DZ_RX_ECRC_ERR, 1, 3619 ESF_DZ_RX_IPCKSUM_ERR, 1, 3620 ESF_DZ_RX_TCPUDP_CKSUM_ERR, 1, 3621 ESF_EZ_RX_IP_INNER_CHKSUM_ERR, 1, 3622 ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR, 1); 3623 EFX_AND_QWORD(errors, *event, errors); 3624 if (unlikely(!EFX_QWORD_IS_ZERO(errors))) { 3625 flags |= efx_ef10_handle_rx_event_errors(channel, n_packets, 3626 rx_encap_hdr, 3627 rx_l3_class, rx_l4_class, 3628 event); 3629 } else { 3630 bool tcpudp = rx_l4_class == ESE_FZ_L4_CLASS_TCP || 3631 rx_l4_class == ESE_FZ_L4_CLASS_UDP; 3632 3633 switch (rx_encap_hdr) { 3634 case ESE_EZ_ENCAP_HDR_VXLAN: /* VxLAN or GENEVE */ 3635 flags |= EFX_RX_PKT_CSUMMED; /* outer UDP csum */ 3636 if (tcpudp) 3637 flags |= EFX_RX_PKT_CSUM_LEVEL; /* inner L4 */ 3638 break; 3639 case ESE_EZ_ENCAP_HDR_GRE: 3640 case ESE_EZ_ENCAP_HDR_NONE: 3641 if (tcpudp) 3642 flags |= EFX_RX_PKT_CSUMMED; 3643 break; 3644 default: 3645 netdev_WARN(efx->net_dev, 3646 "unknown encapsulation type: event=" 3647 EFX_QWORD_FMT "\n", 3648 EFX_QWORD_VAL(*event)); 3649 } 3650 } 3651 3652 if (rx_l4_class == ESE_FZ_L4_CLASS_TCP) 3653 flags |= EFX_RX_PKT_TCP; 3654 3655 channel->irq_mod_score += 2 * n_packets; 3656 3657 /* Handle received packet(s) */ 3658 for (i = 0; i < n_packets; i++) { 3659 efx_rx_packet(rx_queue, 3660 rx_queue->removed_count & rx_queue->ptr_mask, 3661 rx_queue->scatter_n, rx_queue->scatter_len, 3662 flags); 3663 rx_queue->removed_count += rx_queue->scatter_n; 3664 } 3665 3666 rx_queue->scatter_n = 0; 3667 rx_queue->scatter_len = 0; 3668 3669 return n_packets; 3670 } 3671 3672 static u32 efx_ef10_extract_event_ts(efx_qword_t *event) 3673 { 3674 u32 tstamp; 3675 3676 tstamp = EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI); 3677 tstamp <<= 16; 3678 tstamp |= EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO); 3679 3680 return tstamp; 3681 } 3682 3683 static void 3684 efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) 3685 { 3686 struct efx_nic *efx = channel->efx; 3687 struct efx_tx_queue *tx_queue; 3688 unsigned int tx_ev_desc_ptr; 3689 unsigned int tx_ev_q_label; 3690 unsigned int tx_ev_type; 3691 u64 ts_part; 3692 3693 if (unlikely(READ_ONCE(efx->reset_pending))) 3694 return; 3695 3696 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT))) 3697 return; 3698 3699 /* Get the transmit queue */ 3700 tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL); 3701 tx_queue = efx_channel_get_tx_queue(channel, 3702 tx_ev_q_label % EFX_TXQ_TYPES); 3703 3704 if (!tx_queue->timestamping) { 3705 /* Transmit completion */ 3706 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX); 3707 efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask); 3708 return; 3709 } 3710 3711 /* Transmit timestamps are only available for 8XXX series. They result 3712 * in three events per packet. These occur in order, and are: 3713 * - the normal completion event 3714 * - the low part of the timestamp 3715 * - the high part of the timestamp 3716 * 3717 * Each part of the timestamp is itself split across two 16 bit 3718 * fields in the event. 3719 */ 3720 tx_ev_type = EFX_QWORD_FIELD(*event, ESF_EZ_TX_SOFT1); 3721 3722 switch (tx_ev_type) { 3723 case TX_TIMESTAMP_EVENT_TX_EV_COMPLETION: 3724 /* In case of Queue flush or FLR, we might have received 3725 * the previous TX completion event but not the Timestamp 3726 * events. 3727 */ 3728 if (tx_queue->completed_desc_ptr != tx_queue->ptr_mask) 3729 efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr); 3730 3731 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, 3732 ESF_DZ_TX_DESCR_INDX); 3733 tx_queue->completed_desc_ptr = 3734 tx_ev_desc_ptr & tx_queue->ptr_mask; 3735 break; 3736 3737 case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO: 3738 ts_part = efx_ef10_extract_event_ts(event); 3739 tx_queue->completed_timestamp_minor = ts_part; 3740 break; 3741 3742 case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI: 3743 ts_part = efx_ef10_extract_event_ts(event); 3744 tx_queue->completed_timestamp_major = ts_part; 3745 3746 efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr); 3747 tx_queue->completed_desc_ptr = tx_queue->ptr_mask; 3748 break; 3749 3750 default: 3751 netif_err(efx, hw, efx->net_dev, 3752 "channel %d unknown tx event type %d (data " 3753 EFX_QWORD_FMT ")\n", 3754 channel->channel, tx_ev_type, 3755 EFX_QWORD_VAL(*event)); 3756 break; 3757 } 3758 } 3759 3760 static void 3761 efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) 3762 { 3763 struct efx_nic *efx = channel->efx; 3764 int subcode; 3765 3766 subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE); 3767 3768 switch (subcode) { 3769 case ESE_DZ_DRV_TIMER_EV: 3770 case ESE_DZ_DRV_WAKE_UP_EV: 3771 break; 3772 case ESE_DZ_DRV_START_UP_EV: 3773 /* event queue init complete. ok. */ 3774 break; 3775 default: 3776 netif_err(efx, hw, efx->net_dev, 3777 "channel %d unknown driver event type %d" 3778 " (data " EFX_QWORD_FMT ")\n", 3779 channel->channel, subcode, 3780 EFX_QWORD_VAL(*event)); 3781 3782 } 3783 } 3784 3785 static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel, 3786 efx_qword_t *event) 3787 { 3788 struct efx_nic *efx = channel->efx; 3789 u32 subcode; 3790 3791 subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0); 3792 3793 switch (subcode) { 3794 case EFX_EF10_TEST: 3795 channel->event_test_cpu = raw_smp_processor_id(); 3796 break; 3797 case EFX_EF10_REFILL: 3798 /* The queue must be empty, so we won't receive any rx 3799 * events, so efx_process_channel() won't refill the 3800 * queue. Refill it here 3801 */ 3802 efx_fast_push_rx_descriptors(&channel->rx_queue, true); 3803 break; 3804 default: 3805 netif_err(efx, hw, efx->net_dev, 3806 "channel %d unknown driver event type %u" 3807 " (data " EFX_QWORD_FMT ")\n", 3808 channel->channel, (unsigned) subcode, 3809 EFX_QWORD_VAL(*event)); 3810 } 3811 } 3812 3813 static int efx_ef10_ev_process(struct efx_channel *channel, int quota) 3814 { 3815 struct efx_nic *efx = channel->efx; 3816 efx_qword_t event, *p_event; 3817 unsigned int read_ptr; 3818 int ev_code; 3819 int spent = 0; 3820 3821 if (quota <= 0) 3822 return spent; 3823 3824 read_ptr = channel->eventq_read_ptr; 3825 3826 for (;;) { 3827 p_event = efx_event(channel, read_ptr); 3828 event = *p_event; 3829 3830 if (!efx_event_present(&event)) 3831 break; 3832 3833 EFX_SET_QWORD(*p_event); 3834 3835 ++read_ptr; 3836 3837 ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE); 3838 3839 netif_vdbg(efx, drv, efx->net_dev, 3840 "processing event on %d " EFX_QWORD_FMT "\n", 3841 channel->channel, EFX_QWORD_VAL(event)); 3842 3843 switch (ev_code) { 3844 case ESE_DZ_EV_CODE_MCDI_EV: 3845 efx_mcdi_process_event(channel, &event); 3846 break; 3847 case ESE_DZ_EV_CODE_RX_EV: 3848 spent += efx_ef10_handle_rx_event(channel, &event); 3849 if (spent >= quota) { 3850 /* XXX can we split a merged event to 3851 * avoid going over-quota? 3852 */ 3853 spent = quota; 3854 goto out; 3855 } 3856 break; 3857 case ESE_DZ_EV_CODE_TX_EV: 3858 efx_ef10_handle_tx_event(channel, &event); 3859 break; 3860 case ESE_DZ_EV_CODE_DRIVER_EV: 3861 efx_ef10_handle_driver_event(channel, &event); 3862 if (++spent == quota) 3863 goto out; 3864 break; 3865 case EFX_EF10_DRVGEN_EV: 3866 efx_ef10_handle_driver_generated_event(channel, &event); 3867 break; 3868 default: 3869 netif_err(efx, hw, efx->net_dev, 3870 "channel %d unknown event type %d" 3871 " (data " EFX_QWORD_FMT ")\n", 3872 channel->channel, ev_code, 3873 EFX_QWORD_VAL(event)); 3874 } 3875 } 3876 3877 out: 3878 channel->eventq_read_ptr = read_ptr; 3879 return spent; 3880 } 3881 3882 static void efx_ef10_ev_read_ack(struct efx_channel *channel) 3883 { 3884 struct efx_nic *efx = channel->efx; 3885 efx_dword_t rptr; 3886 3887 if (EFX_EF10_WORKAROUND_35388(efx)) { 3888 BUILD_BUG_ON(EFX_MIN_EVQ_SIZE < 3889 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH)); 3890 BUILD_BUG_ON(EFX_MAX_EVQ_SIZE > 3891 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH)); 3892 3893 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS, 3894 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH, 3895 ERF_DD_EVQ_IND_RPTR, 3896 (channel->eventq_read_ptr & 3897 channel->eventq_mask) >> 3898 ERF_DD_EVQ_IND_RPTR_WIDTH); 3899 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT, 3900 channel->channel); 3901 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS, 3902 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW, 3903 ERF_DD_EVQ_IND_RPTR, 3904 channel->eventq_read_ptr & 3905 ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1)); 3906 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT, 3907 channel->channel); 3908 } else { 3909 EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR, 3910 channel->eventq_read_ptr & 3911 channel->eventq_mask); 3912 efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel); 3913 } 3914 } 3915 3916 static void efx_ef10_ev_test_generate(struct efx_channel *channel) 3917 { 3918 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); 3919 struct efx_nic *efx = channel->efx; 3920 efx_qword_t event; 3921 int rc; 3922 3923 EFX_POPULATE_QWORD_2(event, 3924 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV, 3925 ESF_DZ_EV_DATA, EFX_EF10_TEST); 3926 3927 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); 3928 3929 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has 3930 * already swapped the data to little-endian order. 3931 */ 3932 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], 3933 sizeof(efx_qword_t)); 3934 3935 rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf), 3936 NULL, 0, NULL); 3937 if (rc != 0) 3938 goto fail; 3939 3940 return; 3941 3942 fail: 3943 WARN_ON(true); 3944 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 3945 } 3946 3947 void efx_ef10_handle_drain_event(struct efx_nic *efx) 3948 { 3949 if (atomic_dec_and_test(&efx->active_queues)) 3950 wake_up(&efx->flush_wq); 3951 3952 WARN_ON(atomic_read(&efx->active_queues) < 0); 3953 } 3954 3955 static int efx_ef10_fini_dmaq(struct efx_nic *efx) 3956 { 3957 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3958 struct efx_channel *channel; 3959 struct efx_tx_queue *tx_queue; 3960 struct efx_rx_queue *rx_queue; 3961 int pending; 3962 3963 /* If the MC has just rebooted, the TX/RX queues will have already been 3964 * torn down, but efx->active_queues needs to be set to zero. 3965 */ 3966 if (nic_data->must_realloc_vis) { 3967 atomic_set(&efx->active_queues, 0); 3968 return 0; 3969 } 3970 3971 /* Do not attempt to write to the NIC during EEH recovery */ 3972 if (efx->state != STATE_RECOVERY) { 3973 efx_for_each_channel(channel, efx) { 3974 efx_for_each_channel_rx_queue(rx_queue, channel) 3975 efx_ef10_rx_fini(rx_queue); 3976 efx_for_each_channel_tx_queue(tx_queue, channel) 3977 efx_ef10_tx_fini(tx_queue); 3978 } 3979 3980 wait_event_timeout(efx->flush_wq, 3981 atomic_read(&efx->active_queues) == 0, 3982 msecs_to_jiffies(EFX_MAX_FLUSH_TIME)); 3983 pending = atomic_read(&efx->active_queues); 3984 if (pending) { 3985 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n", 3986 pending); 3987 return -ETIMEDOUT; 3988 } 3989 } 3990 3991 return 0; 3992 } 3993 3994 static void efx_ef10_prepare_flr(struct efx_nic *efx) 3995 { 3996 atomic_set(&efx->active_queues, 0); 3997 } 3998 3999 /* Decide whether a filter should be exclusive or else should allow 4000 * delivery to additional recipients. Currently we decide that 4001 * filters for specific local unicast MAC and IP addresses are 4002 * exclusive. 4003 */ 4004 static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec) 4005 { 4006 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC && 4007 !is_multicast_ether_addr(spec->loc_mac)) 4008 return true; 4009 4010 if ((spec->match_flags & 4011 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) == 4012 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) { 4013 if (spec->ether_type == htons(ETH_P_IP) && 4014 !ipv4_is_multicast(spec->loc_host[0])) 4015 return true; 4016 if (spec->ether_type == htons(ETH_P_IPV6) && 4017 ((const u8 *)spec->loc_host)[0] != 0xff) 4018 return true; 4019 } 4020 4021 return false; 4022 } 4023 4024 static struct efx_filter_spec * 4025 efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table, 4026 unsigned int filter_idx) 4027 { 4028 return (struct efx_filter_spec *)(table->entry[filter_idx].spec & 4029 ~EFX_EF10_FILTER_FLAGS); 4030 } 4031 4032 static unsigned int 4033 efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table, 4034 unsigned int filter_idx) 4035 { 4036 return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS; 4037 } 4038 4039 static void 4040 efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table, 4041 unsigned int filter_idx, 4042 const struct efx_filter_spec *spec, 4043 unsigned int flags) 4044 { 4045 table->entry[filter_idx].spec = (unsigned long)spec | flags; 4046 } 4047 4048 static void 4049 efx_ef10_filter_push_prep_set_match_fields(struct efx_nic *efx, 4050 const struct efx_filter_spec *spec, 4051 efx_dword_t *inbuf) 4052 { 4053 enum efx_encap_type encap_type = efx_filter_get_encap_type(spec); 4054 u32 match_fields = 0, uc_match, mc_match; 4055 4056 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 4057 efx_ef10_filter_is_exclusive(spec) ? 4058 MC_CMD_FILTER_OP_IN_OP_INSERT : 4059 MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE); 4060 4061 /* Convert match flags and values. Unlike almost 4062 * everything else in MCDI, these fields are in 4063 * network byte order. 4064 */ 4065 #define COPY_VALUE(value, mcdi_field) \ 4066 do { \ 4067 match_fields |= \ 4068 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ 4069 mcdi_field ## _LBN; \ 4070 BUILD_BUG_ON( \ 4071 MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \ 4072 sizeof(value)); \ 4073 memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \ 4074 &value, sizeof(value)); \ 4075 } while (0) 4076 #define COPY_FIELD(gen_flag, gen_field, mcdi_field) \ 4077 if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \ 4078 COPY_VALUE(spec->gen_field, mcdi_field); \ 4079 } 4080 /* Handle encap filters first. They will always be mismatch 4081 * (unknown UC or MC) filters 4082 */ 4083 if (encap_type) { 4084 /* ether_type and outer_ip_proto need to be variables 4085 * because COPY_VALUE wants to memcpy them 4086 */ 4087 __be16 ether_type = 4088 htons(encap_type & EFX_ENCAP_FLAG_IPV6 ? 4089 ETH_P_IPV6 : ETH_P_IP); 4090 u8 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE; 4091 u8 outer_ip_proto; 4092 4093 switch (encap_type & EFX_ENCAP_TYPES_MASK) { 4094 case EFX_ENCAP_TYPE_VXLAN: 4095 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN; 4096 /* fallthrough */ 4097 case EFX_ENCAP_TYPE_GENEVE: 4098 COPY_VALUE(ether_type, ETHER_TYPE); 4099 outer_ip_proto = IPPROTO_UDP; 4100 COPY_VALUE(outer_ip_proto, IP_PROTO); 4101 /* We always need to set the type field, even 4102 * though we're not matching on the TNI. 4103 */ 4104 MCDI_POPULATE_DWORD_1(inbuf, 4105 FILTER_OP_EXT_IN_VNI_OR_VSID, 4106 FILTER_OP_EXT_IN_VNI_TYPE, 4107 vni_type); 4108 break; 4109 case EFX_ENCAP_TYPE_NVGRE: 4110 COPY_VALUE(ether_type, ETHER_TYPE); 4111 outer_ip_proto = IPPROTO_GRE; 4112 COPY_VALUE(outer_ip_proto, IP_PROTO); 4113 break; 4114 default: 4115 WARN_ON(1); 4116 } 4117 4118 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN; 4119 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN; 4120 } else { 4121 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN; 4122 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN; 4123 } 4124 4125 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) 4126 match_fields |= 4127 is_multicast_ether_addr(spec->loc_mac) ? 4128 1 << mc_match : 4129 1 << uc_match; 4130 COPY_FIELD(REM_HOST, rem_host, SRC_IP); 4131 COPY_FIELD(LOC_HOST, loc_host, DST_IP); 4132 COPY_FIELD(REM_MAC, rem_mac, SRC_MAC); 4133 COPY_FIELD(REM_PORT, rem_port, SRC_PORT); 4134 COPY_FIELD(LOC_MAC, loc_mac, DST_MAC); 4135 COPY_FIELD(LOC_PORT, loc_port, DST_PORT); 4136 COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE); 4137 COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN); 4138 COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN); 4139 COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO); 4140 #undef COPY_FIELD 4141 #undef COPY_VALUE 4142 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS, 4143 match_fields); 4144 } 4145 4146 static void efx_ef10_filter_push_prep(struct efx_nic *efx, 4147 const struct efx_filter_spec *spec, 4148 efx_dword_t *inbuf, u64 handle, 4149 struct efx_rss_context *ctx, 4150 bool replacing) 4151 { 4152 struct efx_ef10_nic_data *nic_data = efx->nic_data; 4153 u32 flags = spec->flags; 4154 4155 memset(inbuf, 0, MC_CMD_FILTER_OP_EXT_IN_LEN); 4156 4157 /* If RSS filter, caller better have given us an RSS context */ 4158 if (flags & EFX_FILTER_FLAG_RX_RSS) { 4159 /* We don't have the ability to return an error, so we'll just 4160 * log a warning and disable RSS for the filter. 4161 */ 4162 if (WARN_ON_ONCE(!ctx)) 4163 flags &= ~EFX_FILTER_FLAG_RX_RSS; 4164 else if (WARN_ON_ONCE(ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID)) 4165 flags &= ~EFX_FILTER_FLAG_RX_RSS; 4166 } 4167 4168 if (replacing) { 4169 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 4170 MC_CMD_FILTER_OP_IN_OP_REPLACE); 4171 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle); 4172 } else { 4173 efx_ef10_filter_push_prep_set_match_fields(efx, spec, inbuf); 4174 } 4175 4176 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id); 4177 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST, 4178 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? 4179 MC_CMD_FILTER_OP_IN_RX_DEST_DROP : 4180 MC_CMD_FILTER_OP_IN_RX_DEST_HOST); 4181 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0); 4182 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST, 4183 MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT); 4184 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, 4185 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? 4186 0 : spec->dmaq_id); 4187 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE, 4188 (flags & EFX_FILTER_FLAG_RX_RSS) ? 4189 MC_CMD_FILTER_OP_IN_RX_MODE_RSS : 4190 MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE); 4191 if (flags & EFX_FILTER_FLAG_RX_RSS) 4192 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT, ctx->context_id); 4193 } 4194 4195 static int efx_ef10_filter_push(struct efx_nic *efx, 4196 const struct efx_filter_spec *spec, u64 *handle, 4197 struct efx_rss_context *ctx, bool replacing) 4198 { 4199 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); 4200 MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN); 4201 int rc; 4202 4203 efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, ctx, replacing); 4204 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 4205 outbuf, sizeof(outbuf), NULL); 4206 if (rc == 0) 4207 *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE); 4208 if (rc == -ENOSPC) 4209 rc = -EBUSY; /* to match efx_farch_filter_insert() */ 4210 return rc; 4211 } 4212 4213 static u32 efx_ef10_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec) 4214 { 4215 enum efx_encap_type encap_type = efx_filter_get_encap_type(spec); 4216 unsigned int match_flags = spec->match_flags; 4217 unsigned int uc_match, mc_match; 4218 u32 mcdi_flags = 0; 4219 4220 #define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field, encap) { \ 4221 unsigned int old_match_flags = match_flags; \ 4222 match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag; \ 4223 if (match_flags != old_match_flags) \ 4224 mcdi_flags |= \ 4225 (1 << ((encap) ? \ 4226 MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ ## \ 4227 mcdi_field ## _LBN : \ 4228 MC_CMD_FILTER_OP_EXT_IN_MATCH_ ##\ 4229 mcdi_field ## _LBN)); \ 4230 } 4231 /* inner or outer based on encap type */ 4232 MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP, encap_type); 4233 MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP, encap_type); 4234 MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC, encap_type); 4235 MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT, encap_type); 4236 MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC, encap_type); 4237 MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT, encap_type); 4238 MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE, encap_type); 4239 MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO, encap_type); 4240 /* always outer */ 4241 MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN, false); 4242 MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN, false); 4243 #undef MAP_FILTER_TO_MCDI_FLAG 4244 4245 /* special handling for encap type, and mismatch */ 4246 if (encap_type) { 4247 match_flags &= ~EFX_FILTER_MATCH_ENCAP_TYPE; 4248 mcdi_flags |= 4249 (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN); 4250 mcdi_flags |= (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN); 4251 4252 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN; 4253 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN; 4254 } else { 4255 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN; 4256 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN; 4257 } 4258 4259 if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) { 4260 match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG; 4261 mcdi_flags |= 4262 is_multicast_ether_addr(spec->loc_mac) ? 4263 1 << mc_match : 4264 1 << uc_match; 4265 } 4266 4267 /* Did we map them all? */ 4268 WARN_ON_ONCE(match_flags); 4269 4270 return mcdi_flags; 4271 } 4272 4273 static int efx_ef10_filter_pri(struct efx_ef10_filter_table *table, 4274 const struct efx_filter_spec *spec) 4275 { 4276 u32 mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec); 4277 unsigned int match_pri; 4278 4279 for (match_pri = 0; 4280 match_pri < table->rx_match_count; 4281 match_pri++) 4282 if (table->rx_match_mcdi_flags[match_pri] == mcdi_flags) 4283 return match_pri; 4284 4285 return -EPROTONOSUPPORT; 4286 } 4287 4288 static s32 efx_ef10_filter_insert_locked(struct efx_nic *efx, 4289 struct efx_filter_spec *spec, 4290 bool replace_equal) 4291 { 4292 DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); 4293 struct efx_ef10_nic_data *nic_data = efx->nic_data; 4294 struct efx_ef10_filter_table *table; 4295 struct efx_filter_spec *saved_spec; 4296 struct efx_rss_context *ctx = NULL; 4297 unsigned int match_pri, hash; 4298 unsigned int priv_flags; 4299 bool rss_locked = false; 4300 bool replacing = false; 4301 unsigned int depth, i; 4302 int ins_index = -1; 4303 DEFINE_WAIT(wait); 4304 bool is_mc_recip; 4305 s32 rc; 4306 4307 WARN_ON(!rwsem_is_locked(&efx->filter_sem)); 4308 table = efx->filter_state; 4309 down_write(&table->lock); 4310 4311 /* For now, only support RX filters */ 4312 if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) != 4313 EFX_FILTER_FLAG_RX) { 4314 rc = -EINVAL; 4315 goto out_unlock; 4316 } 4317 4318 rc = efx_ef10_filter_pri(table, spec); 4319 if (rc < 0) 4320 goto out_unlock; 4321 match_pri = rc; 4322 4323 hash = efx_filter_spec_hash(spec); 4324 is_mc_recip = efx_filter_is_mc_recipient(spec); 4325 if (is_mc_recip) 4326 bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); 4327 4328 if (spec->flags & EFX_FILTER_FLAG_RX_RSS) { 4329 mutex_lock(&efx->rss_lock); 4330 rss_locked = true; 4331 if (spec->rss_context) 4332 ctx = efx_find_rss_context_entry(efx, spec->rss_context); 4333 else 4334 ctx = &efx->rss_context; 4335 if (!ctx) { 4336 rc = -ENOENT; 4337 goto out_unlock; 4338 } 4339 if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) { 4340 rc = -EOPNOTSUPP; 4341 goto out_unlock; 4342 } 4343 } 4344 4345 /* Find any existing filters with the same match tuple or 4346 * else a free slot to insert at. 4347 */ 4348 for (depth = 1; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) { 4349 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); 4350 saved_spec = efx_ef10_filter_entry_spec(table, i); 4351 4352 if (!saved_spec) { 4353 if (ins_index < 0) 4354 ins_index = i; 4355 } else if (efx_filter_spec_equal(spec, saved_spec)) { 4356 if (spec->priority < saved_spec->priority && 4357 spec->priority != EFX_FILTER_PRI_AUTO) { 4358 rc = -EPERM; 4359 goto out_unlock; 4360 } 4361 if (!is_mc_recip) { 4362 /* This is the only one */ 4363 if (spec->priority == 4364 saved_spec->priority && 4365 !replace_equal) { 4366 rc = -EEXIST; 4367 goto out_unlock; 4368 } 4369 ins_index = i; 4370 break; 4371 } else if (spec->priority > 4372 saved_spec->priority || 4373 (spec->priority == 4374 saved_spec->priority && 4375 replace_equal)) { 4376 if (ins_index < 0) 4377 ins_index = i; 4378 else 4379 __set_bit(depth, mc_rem_map); 4380 } 4381 } 4382 } 4383 4384 /* Once we reach the maximum search depth, use the first suitable 4385 * slot, or return -EBUSY if there was none 4386 */ 4387 if (ins_index < 0) { 4388 rc = -EBUSY; 4389 goto out_unlock; 4390 } 4391 4392 /* Create a software table entry if necessary. */ 4393 saved_spec = efx_ef10_filter_entry_spec(table, ins_index); 4394 if (saved_spec) { 4395 if (spec->priority == EFX_FILTER_PRI_AUTO && 4396 saved_spec->priority >= EFX_FILTER_PRI_AUTO) { 4397 /* Just make sure it won't be removed */ 4398 if (saved_spec->priority > EFX_FILTER_PRI_AUTO) 4399 saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO; 4400 table->entry[ins_index].spec &= 4401 ~EFX_EF10_FILTER_FLAG_AUTO_OLD; 4402 rc = ins_index; 4403 goto out_unlock; 4404 } 4405 replacing = true; 4406 priv_flags = efx_ef10_filter_entry_flags(table, ins_index); 4407 } else { 4408 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC); 4409 if (!saved_spec) { 4410 rc = -ENOMEM; 4411 goto out_unlock; 4412 } 4413 *saved_spec = *spec; 4414 priv_flags = 0; 4415 } 4416 efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags); 4417 4418 /* Actually insert the filter on the HW */ 4419 rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle, 4420 ctx, replacing); 4421 4422 if (rc == -EINVAL && nic_data->must_realloc_vis) 4423 /* The MC rebooted under us, causing it to reject our filter 4424 * insertion as pointing to an invalid VI (spec->dmaq_id). 4425 */ 4426 rc = -EAGAIN; 4427 4428 /* Finalise the software table entry */ 4429 if (rc == 0) { 4430 if (replacing) { 4431 /* Update the fields that may differ */ 4432 if (saved_spec->priority == EFX_FILTER_PRI_AUTO) 4433 saved_spec->flags |= 4434 EFX_FILTER_FLAG_RX_OVER_AUTO; 4435 saved_spec->priority = spec->priority; 4436 saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO; 4437 saved_spec->flags |= spec->flags; 4438 saved_spec->rss_context = spec->rss_context; 4439 saved_spec->dmaq_id = spec->dmaq_id; 4440 } 4441 } else if (!replacing) { 4442 kfree(saved_spec); 4443 saved_spec = NULL; 4444 } else { 4445 /* We failed to replace, so the old filter is still present. 4446 * Roll back the software table to reflect this. In fact the 4447 * efx_ef10_filter_set_entry() call below will do the right 4448 * thing, so nothing extra is needed here. 4449 */ 4450 } 4451 efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags); 4452 4453 /* Remove and finalise entries for lower-priority multicast 4454 * recipients 4455 */ 4456 if (is_mc_recip) { 4457 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); 4458 unsigned int depth, i; 4459 4460 memset(inbuf, 0, sizeof(inbuf)); 4461 4462 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) { 4463 if (!test_bit(depth, mc_rem_map)) 4464 continue; 4465 4466 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); 4467 saved_spec = efx_ef10_filter_entry_spec(table, i); 4468 priv_flags = efx_ef10_filter_entry_flags(table, i); 4469 4470 if (rc == 0) { 4471 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 4472 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); 4473 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, 4474 table->entry[i].handle); 4475 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, 4476 inbuf, sizeof(inbuf), 4477 NULL, 0, NULL); 4478 } 4479 4480 if (rc == 0) { 4481 kfree(saved_spec); 4482 saved_spec = NULL; 4483 priv_flags = 0; 4484 } 4485 efx_ef10_filter_set_entry(table, i, saved_spec, 4486 priv_flags); 4487 } 4488 } 4489 4490 /* If successful, return the inserted filter ID */ 4491 if (rc == 0) 4492 rc = efx_ef10_make_filter_id(match_pri, ins_index); 4493 4494 out_unlock: 4495 if (rss_locked) 4496 mutex_unlock(&efx->rss_lock); 4497 up_write(&table->lock); 4498 return rc; 4499 } 4500 4501 static s32 efx_ef10_filter_insert(struct efx_nic *efx, 4502 struct efx_filter_spec *spec, 4503 bool replace_equal) 4504 { 4505 s32 ret; 4506 4507 down_read(&efx->filter_sem); 4508 ret = efx_ef10_filter_insert_locked(efx, spec, replace_equal); 4509 up_read(&efx->filter_sem); 4510 4511 return ret; 4512 } 4513 4514 static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx) 4515 { 4516 /* no need to do anything here on EF10 */ 4517 } 4518 4519 /* Remove a filter. 4520 * If !by_index, remove by ID 4521 * If by_index, remove by index 4522 * Filter ID may come from userland and must be range-checked. 4523 * Caller must hold efx->filter_sem for read, and efx->filter_state->lock 4524 * for write. 4525 */ 4526 static int efx_ef10_filter_remove_internal(struct efx_nic *efx, 4527 unsigned int priority_mask, 4528 u32 filter_id, bool by_index) 4529 { 4530 unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id); 4531 struct efx_ef10_filter_table *table = efx->filter_state; 4532 MCDI_DECLARE_BUF(inbuf, 4533 MC_CMD_FILTER_OP_IN_HANDLE_OFST + 4534 MC_CMD_FILTER_OP_IN_HANDLE_LEN); 4535 struct efx_filter_spec *spec; 4536 DEFINE_WAIT(wait); 4537 int rc; 4538 4539 spec = efx_ef10_filter_entry_spec(table, filter_idx); 4540 if (!spec || 4541 (!by_index && 4542 efx_ef10_filter_pri(table, spec) != 4543 efx_ef10_filter_get_unsafe_pri(filter_id))) 4544 return -ENOENT; 4545 4546 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO && 4547 priority_mask == (1U << EFX_FILTER_PRI_AUTO)) { 4548 /* Just remove flags */ 4549 spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO; 4550 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD; 4551 return 0; 4552 } 4553 4554 if (!(priority_mask & (1U << spec->priority))) 4555 return -ENOENT; 4556 4557 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) { 4558 /* Reset to an automatic filter */ 4559 4560 struct efx_filter_spec new_spec = *spec; 4561 4562 new_spec.priority = EFX_FILTER_PRI_AUTO; 4563 new_spec.flags = (EFX_FILTER_FLAG_RX | 4564 (efx_rss_active(&efx->rss_context) ? 4565 EFX_FILTER_FLAG_RX_RSS : 0)); 4566 new_spec.dmaq_id = 0; 4567 new_spec.rss_context = 0; 4568 rc = efx_ef10_filter_push(efx, &new_spec, 4569 &table->entry[filter_idx].handle, 4570 &efx->rss_context, 4571 true); 4572 4573 if (rc == 0) 4574 *spec = new_spec; 4575 } else { 4576 /* Really remove the filter */ 4577 4578 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 4579 efx_ef10_filter_is_exclusive(spec) ? 4580 MC_CMD_FILTER_OP_IN_OP_REMOVE : 4581 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); 4582 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, 4583 table->entry[filter_idx].handle); 4584 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, 4585 inbuf, sizeof(inbuf), NULL, 0, NULL); 4586 4587 if ((rc == 0) || (rc == -ENOENT)) { 4588 /* Filter removed OK or didn't actually exist */ 4589 kfree(spec); 4590 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); 4591 } else { 4592 efx_mcdi_display_error(efx, MC_CMD_FILTER_OP, 4593 MC_CMD_FILTER_OP_EXT_IN_LEN, 4594 NULL, 0, rc); 4595 } 4596 } 4597 4598 return rc; 4599 } 4600 4601 static int efx_ef10_filter_remove_safe(struct efx_nic *efx, 4602 enum efx_filter_priority priority, 4603 u32 filter_id) 4604 { 4605 struct efx_ef10_filter_table *table; 4606 int rc; 4607 4608 down_read(&efx->filter_sem); 4609 table = efx->filter_state; 4610 down_write(&table->lock); 4611 rc = efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id, 4612 false); 4613 up_write(&table->lock); 4614 up_read(&efx->filter_sem); 4615 return rc; 4616 } 4617 4618 /* Caller must hold efx->filter_sem for read */ 4619 static void efx_ef10_filter_remove_unsafe(struct efx_nic *efx, 4620 enum efx_filter_priority priority, 4621 u32 filter_id) 4622 { 4623 struct efx_ef10_filter_table *table = efx->filter_state; 4624 4625 if (filter_id == EFX_EF10_FILTER_ID_INVALID) 4626 return; 4627 4628 down_write(&table->lock); 4629 efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id, 4630 true); 4631 up_write(&table->lock); 4632 } 4633 4634 static int efx_ef10_filter_get_safe(struct efx_nic *efx, 4635 enum efx_filter_priority priority, 4636 u32 filter_id, struct efx_filter_spec *spec) 4637 { 4638 unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id); 4639 const struct efx_filter_spec *saved_spec; 4640 struct efx_ef10_filter_table *table; 4641 int rc; 4642 4643 down_read(&efx->filter_sem); 4644 table = efx->filter_state; 4645 down_read(&table->lock); 4646 saved_spec = efx_ef10_filter_entry_spec(table, filter_idx); 4647 if (saved_spec && saved_spec->priority == priority && 4648 efx_ef10_filter_pri(table, saved_spec) == 4649 efx_ef10_filter_get_unsafe_pri(filter_id)) { 4650 *spec = *saved_spec; 4651 rc = 0; 4652 } else { 4653 rc = -ENOENT; 4654 } 4655 up_read(&table->lock); 4656 up_read(&efx->filter_sem); 4657 return rc; 4658 } 4659 4660 static int efx_ef10_filter_clear_rx(struct efx_nic *efx, 4661 enum efx_filter_priority priority) 4662 { 4663 struct efx_ef10_filter_table *table; 4664 unsigned int priority_mask; 4665 unsigned int i; 4666 int rc; 4667 4668 priority_mask = (((1U << (priority + 1)) - 1) & 4669 ~(1U << EFX_FILTER_PRI_AUTO)); 4670 4671 down_read(&efx->filter_sem); 4672 table = efx->filter_state; 4673 down_write(&table->lock); 4674 for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { 4675 rc = efx_ef10_filter_remove_internal(efx, priority_mask, 4676 i, true); 4677 if (rc && rc != -ENOENT) 4678 break; 4679 rc = 0; 4680 } 4681 4682 up_write(&table->lock); 4683 up_read(&efx->filter_sem); 4684 return rc; 4685 } 4686 4687 static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx, 4688 enum efx_filter_priority priority) 4689 { 4690 struct efx_ef10_filter_table *table; 4691 unsigned int filter_idx; 4692 s32 count = 0; 4693 4694 down_read(&efx->filter_sem); 4695 table = efx->filter_state; 4696 down_read(&table->lock); 4697 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { 4698 if (table->entry[filter_idx].spec && 4699 efx_ef10_filter_entry_spec(table, filter_idx)->priority == 4700 priority) 4701 ++count; 4702 } 4703 up_read(&table->lock); 4704 up_read(&efx->filter_sem); 4705 return count; 4706 } 4707 4708 static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx) 4709 { 4710 struct efx_ef10_filter_table *table = efx->filter_state; 4711 4712 return table->rx_match_count * HUNT_FILTER_TBL_ROWS * 2; 4713 } 4714 4715 static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx, 4716 enum efx_filter_priority priority, 4717 u32 *buf, u32 size) 4718 { 4719 struct efx_ef10_filter_table *table; 4720 struct efx_filter_spec *spec; 4721 unsigned int filter_idx; 4722 s32 count = 0; 4723 4724 down_read(&efx->filter_sem); 4725 table = efx->filter_state; 4726 down_read(&table->lock); 4727 4728 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { 4729 spec = efx_ef10_filter_entry_spec(table, filter_idx); 4730 if (spec && spec->priority == priority) { 4731 if (count == size) { 4732 count = -EMSGSIZE; 4733 break; 4734 } 4735 buf[count++] = 4736 efx_ef10_make_filter_id( 4737 efx_ef10_filter_pri(table, spec), 4738 filter_idx); 4739 } 4740 } 4741 up_read(&table->lock); 4742 up_read(&efx->filter_sem); 4743 return count; 4744 } 4745 4746 #ifdef CONFIG_RFS_ACCEL 4747 4748 static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, 4749 unsigned int filter_idx) 4750 { 4751 struct efx_filter_spec *spec, saved_spec; 4752 struct efx_ef10_filter_table *table; 4753 struct efx_arfs_rule *rule = NULL; 4754 bool ret = true, force = false; 4755 u16 arfs_id; 4756 4757 down_read(&efx->filter_sem); 4758 table = efx->filter_state; 4759 down_write(&table->lock); 4760 spec = efx_ef10_filter_entry_spec(table, filter_idx); 4761 4762 if (!spec || spec->priority != EFX_FILTER_PRI_HINT) 4763 goto out_unlock; 4764 4765 spin_lock_bh(&efx->rps_hash_lock); 4766 if (!efx->rps_hash_table) { 4767 /* In the absence of the table, we always return 0 to ARFS. */ 4768 arfs_id = 0; 4769 } else { 4770 rule = efx_rps_hash_find(efx, spec); 4771 if (!rule) 4772 /* ARFS table doesn't know of this filter, so remove it */ 4773 goto expire; 4774 arfs_id = rule->arfs_id; 4775 ret = efx_rps_check_rule(rule, filter_idx, &force); 4776 if (force) 4777 goto expire; 4778 if (!ret) { 4779 spin_unlock_bh(&efx->rps_hash_lock); 4780 goto out_unlock; 4781 } 4782 } 4783 if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, arfs_id)) 4784 ret = false; 4785 else if (rule) 4786 rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; 4787 expire: 4788 saved_spec = *spec; /* remove operation will kfree spec */ 4789 spin_unlock_bh(&efx->rps_hash_lock); 4790 /* At this point (since we dropped the lock), another thread might queue 4791 * up a fresh insertion request (but the actual insertion will be held 4792 * up by our possession of the filter table lock). In that case, it 4793 * will set rule->filter_id to EFX_ARFS_FILTER_ID_PENDING, meaning that 4794 * the rule is not removed by efx_rps_hash_del() below. 4795 */ 4796 if (ret) 4797 ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority, 4798 filter_idx, true) == 0; 4799 /* While we can't safely dereference rule (we dropped the lock), we can 4800 * still test it for NULL. 4801 */ 4802 if (ret && rule) { 4803 /* Expiring, so remove entry from ARFS table */ 4804 spin_lock_bh(&efx->rps_hash_lock); 4805 efx_rps_hash_del(efx, &saved_spec); 4806 spin_unlock_bh(&efx->rps_hash_lock); 4807 } 4808 out_unlock: 4809 up_write(&table->lock); 4810 up_read(&efx->filter_sem); 4811 return ret; 4812 } 4813 4814 #endif /* CONFIG_RFS_ACCEL */ 4815 4816 static int efx_ef10_filter_match_flags_from_mcdi(bool encap, u32 mcdi_flags) 4817 { 4818 int match_flags = 0; 4819 4820 #define MAP_FLAG(gen_flag, mcdi_field) do { \ 4821 u32 old_mcdi_flags = mcdi_flags; \ 4822 mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ ## \ 4823 mcdi_field ## _LBN); \ 4824 if (mcdi_flags != old_mcdi_flags) \ 4825 match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \ 4826 } while (0) 4827 4828 if (encap) { 4829 /* encap filters must specify encap type */ 4830 match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; 4831 /* and imply ethertype and ip proto */ 4832 mcdi_flags &= 4833 ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN); 4834 mcdi_flags &= 4835 ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN); 4836 /* VLAN tags refer to the outer packet */ 4837 MAP_FLAG(INNER_VID, INNER_VLAN); 4838 MAP_FLAG(OUTER_VID, OUTER_VLAN); 4839 /* everything else refers to the inner packet */ 4840 MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_UCAST_DST); 4841 MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_MCAST_DST); 4842 MAP_FLAG(REM_HOST, IFRM_SRC_IP); 4843 MAP_FLAG(LOC_HOST, IFRM_DST_IP); 4844 MAP_FLAG(REM_MAC, IFRM_SRC_MAC); 4845 MAP_FLAG(REM_PORT, IFRM_SRC_PORT); 4846 MAP_FLAG(LOC_MAC, IFRM_DST_MAC); 4847 MAP_FLAG(LOC_PORT, IFRM_DST_PORT); 4848 MAP_FLAG(ETHER_TYPE, IFRM_ETHER_TYPE); 4849 MAP_FLAG(IP_PROTO, IFRM_IP_PROTO); 4850 } else { 4851 MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST); 4852 MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST); 4853 MAP_FLAG(REM_HOST, SRC_IP); 4854 MAP_FLAG(LOC_HOST, DST_IP); 4855 MAP_FLAG(REM_MAC, SRC_MAC); 4856 MAP_FLAG(REM_PORT, SRC_PORT); 4857 MAP_FLAG(LOC_MAC, DST_MAC); 4858 MAP_FLAG(LOC_PORT, DST_PORT); 4859 MAP_FLAG(ETHER_TYPE, ETHER_TYPE); 4860 MAP_FLAG(INNER_VID, INNER_VLAN); 4861 MAP_FLAG(OUTER_VID, OUTER_VLAN); 4862 MAP_FLAG(IP_PROTO, IP_PROTO); 4863 } 4864 #undef MAP_FLAG 4865 4866 /* Did we map them all? */ 4867 if (mcdi_flags) 4868 return -EINVAL; 4869 4870 return match_flags; 4871 } 4872 4873 static void efx_ef10_filter_cleanup_vlans(struct efx_nic *efx) 4874 { 4875 struct efx_ef10_filter_table *table = efx->filter_state; 4876 struct efx_ef10_filter_vlan *vlan, *next_vlan; 4877 4878 /* See comment in efx_ef10_filter_table_remove() */ 4879 if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) 4880 return; 4881 4882 if (!table) 4883 return; 4884 4885 list_for_each_entry_safe(vlan, next_vlan, &table->vlan_list, list) 4886 efx_ef10_filter_del_vlan_internal(efx, vlan); 4887 } 4888 4889 static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table, 4890 bool encap, 4891 enum efx_filter_match_flags match_flags) 4892 { 4893 unsigned int match_pri; 4894 int mf; 4895 4896 for (match_pri = 0; 4897 match_pri < table->rx_match_count; 4898 match_pri++) { 4899 mf = efx_ef10_filter_match_flags_from_mcdi(encap, 4900 table->rx_match_mcdi_flags[match_pri]); 4901 if (mf == match_flags) 4902 return true; 4903 } 4904 4905 return false; 4906 } 4907 4908 static int 4909 efx_ef10_filter_table_probe_matches(struct efx_nic *efx, 4910 struct efx_ef10_filter_table *table, 4911 bool encap) 4912 { 4913 MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN); 4914 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX); 4915 unsigned int pd_match_pri, pd_match_count; 4916 size_t outlen; 4917 int rc; 4918 4919 /* Find out which RX filter types are supported, and their priorities */ 4920 MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP, 4921 encap ? 4922 MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES : 4923 MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES); 4924 rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO, 4925 inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), 4926 &outlen); 4927 if (rc) 4928 return rc; 4929 4930 pd_match_count = MCDI_VAR_ARRAY_LEN( 4931 outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES); 4932 4933 for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) { 4934 u32 mcdi_flags = 4935 MCDI_ARRAY_DWORD( 4936 outbuf, 4937 GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES, 4938 pd_match_pri); 4939 rc = efx_ef10_filter_match_flags_from_mcdi(encap, mcdi_flags); 4940 if (rc < 0) { 4941 netif_dbg(efx, probe, efx->net_dev, 4942 "%s: fw flags %#x pri %u not supported in driver\n", 4943 __func__, mcdi_flags, pd_match_pri); 4944 } else { 4945 netif_dbg(efx, probe, efx->net_dev, 4946 "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n", 4947 __func__, mcdi_flags, pd_match_pri, 4948 rc, table->rx_match_count); 4949 table->rx_match_mcdi_flags[table->rx_match_count] = mcdi_flags; 4950 table->rx_match_count++; 4951 } 4952 } 4953 4954 return 0; 4955 } 4956 4957 static int efx_ef10_filter_table_probe(struct efx_nic *efx) 4958 { 4959 struct efx_ef10_nic_data *nic_data = efx->nic_data; 4960 struct net_device *net_dev = efx->net_dev; 4961 struct efx_ef10_filter_table *table; 4962 struct efx_ef10_vlan *vlan; 4963 int rc; 4964 4965 if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) 4966 return -EINVAL; 4967 4968 if (efx->filter_state) /* already probed */ 4969 return 0; 4970 4971 table = kzalloc(sizeof(*table), GFP_KERNEL); 4972 if (!table) 4973 return -ENOMEM; 4974 4975 table->rx_match_count = 0; 4976 rc = efx_ef10_filter_table_probe_matches(efx, table, false); 4977 if (rc) 4978 goto fail; 4979 if (nic_data->datapath_caps & 4980 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)) 4981 rc = efx_ef10_filter_table_probe_matches(efx, table, true); 4982 if (rc) 4983 goto fail; 4984 if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) && 4985 !(efx_ef10_filter_match_supported(table, false, 4986 (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) && 4987 efx_ef10_filter_match_supported(table, false, 4988 (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) { 4989 netif_info(efx, probe, net_dev, 4990 "VLAN filters are not supported in this firmware variant\n"); 4991 net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 4992 efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 4993 net_dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 4994 } 4995 4996 table->entry = vzalloc(array_size(HUNT_FILTER_TBL_ROWS, 4997 sizeof(*table->entry))); 4998 if (!table->entry) { 4999 rc = -ENOMEM; 5000 goto fail; 5001 } 5002 5003 table->mc_promisc_last = false; 5004 table->vlan_filter = 5005 !!(efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER); 5006 INIT_LIST_HEAD(&table->vlan_list); 5007 init_rwsem(&table->lock); 5008 5009 efx->filter_state = table; 5010 5011 list_for_each_entry(vlan, &nic_data->vlan_list, list) { 5012 rc = efx_ef10_filter_add_vlan(efx, vlan->vid); 5013 if (rc) 5014 goto fail_add_vlan; 5015 } 5016 5017 return 0; 5018 5019 fail_add_vlan: 5020 efx_ef10_filter_cleanup_vlans(efx); 5021 efx->filter_state = NULL; 5022 fail: 5023 kfree(table); 5024 return rc; 5025 } 5026 5027 /* Caller must hold efx->filter_sem for read if race against 5028 * efx_ef10_filter_table_remove() is possible 5029 */ 5030 static void efx_ef10_filter_table_restore(struct efx_nic *efx) 5031 { 5032 struct efx_ef10_filter_table *table = efx->filter_state; 5033 struct efx_ef10_nic_data *nic_data = efx->nic_data; 5034 unsigned int invalid_filters = 0, failed = 0; 5035 struct efx_ef10_filter_vlan *vlan; 5036 struct efx_filter_spec *spec; 5037 struct efx_rss_context *ctx; 5038 unsigned int filter_idx; 5039 u32 mcdi_flags; 5040 int match_pri; 5041 int rc, i; 5042 5043 WARN_ON(!rwsem_is_locked(&efx->filter_sem)); 5044 5045 if (!nic_data->must_restore_filters) 5046 return; 5047 5048 if (!table) 5049 return; 5050 5051 down_write(&table->lock); 5052 mutex_lock(&efx->rss_lock); 5053 5054 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { 5055 spec = efx_ef10_filter_entry_spec(table, filter_idx); 5056 if (!spec) 5057 continue; 5058 5059 mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec); 5060 match_pri = 0; 5061 while (match_pri < table->rx_match_count && 5062 table->rx_match_mcdi_flags[match_pri] != mcdi_flags) 5063 ++match_pri; 5064 if (match_pri >= table->rx_match_count) { 5065 invalid_filters++; 5066 goto not_restored; 5067 } 5068 if (spec->rss_context) 5069 ctx = efx_find_rss_context_entry(efx, spec->rss_context); 5070 else 5071 ctx = &efx->rss_context; 5072 if (spec->flags & EFX_FILTER_FLAG_RX_RSS) { 5073 if (!ctx) { 5074 netif_warn(efx, drv, efx->net_dev, 5075 "Warning: unable to restore a filter with nonexistent RSS context %u.\n", 5076 spec->rss_context); 5077 invalid_filters++; 5078 goto not_restored; 5079 } 5080 if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) { 5081 netif_warn(efx, drv, efx->net_dev, 5082 "Warning: unable to restore a filter with RSS context %u as it was not created.\n", 5083 spec->rss_context); 5084 invalid_filters++; 5085 goto not_restored; 5086 } 5087 } 5088 5089 rc = efx_ef10_filter_push(efx, spec, 5090 &table->entry[filter_idx].handle, 5091 ctx, false); 5092 if (rc) 5093 failed++; 5094 5095 if (rc) { 5096 not_restored: 5097 list_for_each_entry(vlan, &table->vlan_list, list) 5098 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; ++i) 5099 if (vlan->default_filters[i] == filter_idx) 5100 vlan->default_filters[i] = 5101 EFX_EF10_FILTER_ID_INVALID; 5102 5103 kfree(spec); 5104 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); 5105 } 5106 } 5107 5108 mutex_unlock(&efx->rss_lock); 5109 up_write(&table->lock); 5110 5111 /* This can happen validly if the MC's capabilities have changed, so 5112 * is not an error. 5113 */ 5114 if (invalid_filters) 5115 netif_dbg(efx, drv, efx->net_dev, 5116 "Did not restore %u filters that are now unsupported.\n", 5117 invalid_filters); 5118 5119 if (failed) 5120 netif_err(efx, hw, efx->net_dev, 5121 "unable to restore %u filters\n", failed); 5122 else 5123 nic_data->must_restore_filters = false; 5124 } 5125 5126 static void efx_ef10_filter_table_remove(struct efx_nic *efx) 5127 { 5128 struct efx_ef10_filter_table *table = efx->filter_state; 5129 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); 5130 struct efx_filter_spec *spec; 5131 unsigned int filter_idx; 5132 int rc; 5133 5134 efx_ef10_filter_cleanup_vlans(efx); 5135 efx->filter_state = NULL; 5136 /* If we were called without locking, then it's not safe to free 5137 * the table as others might be using it. So we just WARN, leak 5138 * the memory, and potentially get an inconsistent filter table 5139 * state. 5140 * This should never actually happen. 5141 */ 5142 if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) 5143 return; 5144 5145 if (!table) 5146 return; 5147 5148 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { 5149 spec = efx_ef10_filter_entry_spec(table, filter_idx); 5150 if (!spec) 5151 continue; 5152 5153 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 5154 efx_ef10_filter_is_exclusive(spec) ? 5155 MC_CMD_FILTER_OP_IN_OP_REMOVE : 5156 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); 5157 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, 5158 table->entry[filter_idx].handle); 5159 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf, 5160 sizeof(inbuf), NULL, 0, NULL); 5161 if (rc) 5162 netif_info(efx, drv, efx->net_dev, 5163 "%s: filter %04x remove failed\n", 5164 __func__, filter_idx); 5165 kfree(spec); 5166 } 5167 5168 vfree(table->entry); 5169 kfree(table); 5170 } 5171 5172 static void efx_ef10_filter_mark_one_old(struct efx_nic *efx, uint16_t *id) 5173 { 5174 struct efx_ef10_filter_table *table = efx->filter_state; 5175 unsigned int filter_idx; 5176 5177 efx_rwsem_assert_write_locked(&table->lock); 5178 5179 if (*id != EFX_EF10_FILTER_ID_INVALID) { 5180 filter_idx = efx_ef10_filter_get_unsafe_id(*id); 5181 if (!table->entry[filter_idx].spec) 5182 netif_dbg(efx, drv, efx->net_dev, 5183 "marked null spec old %04x:%04x\n", *id, 5184 filter_idx); 5185 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; 5186 *id = EFX_EF10_FILTER_ID_INVALID; 5187 } 5188 } 5189 5190 /* Mark old per-VLAN filters that may need to be removed */ 5191 static void _efx_ef10_filter_vlan_mark_old(struct efx_nic *efx, 5192 struct efx_ef10_filter_vlan *vlan) 5193 { 5194 struct efx_ef10_filter_table *table = efx->filter_state; 5195 unsigned int i; 5196 5197 for (i = 0; i < table->dev_uc_count; i++) 5198 efx_ef10_filter_mark_one_old(efx, &vlan->uc[i]); 5199 for (i = 0; i < table->dev_mc_count; i++) 5200 efx_ef10_filter_mark_one_old(efx, &vlan->mc[i]); 5201 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++) 5202 efx_ef10_filter_mark_one_old(efx, &vlan->default_filters[i]); 5203 } 5204 5205 /* Mark old filters that may need to be removed. 5206 * Caller must hold efx->filter_sem for read if race against 5207 * efx_ef10_filter_table_remove() is possible 5208 */ 5209 static void efx_ef10_filter_mark_old(struct efx_nic *efx) 5210 { 5211 struct efx_ef10_filter_table *table = efx->filter_state; 5212 struct efx_ef10_filter_vlan *vlan; 5213 5214 down_write(&table->lock); 5215 list_for_each_entry(vlan, &table->vlan_list, list) 5216 _efx_ef10_filter_vlan_mark_old(efx, vlan); 5217 up_write(&table->lock); 5218 } 5219 5220 static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx) 5221 { 5222 struct efx_ef10_filter_table *table = efx->filter_state; 5223 struct net_device *net_dev = efx->net_dev; 5224 struct netdev_hw_addr *uc; 5225 unsigned int i; 5226 5227 table->uc_promisc = !!(net_dev->flags & IFF_PROMISC); 5228 ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr); 5229 i = 1; 5230 netdev_for_each_uc_addr(uc, net_dev) { 5231 if (i >= EFX_EF10_FILTER_DEV_UC_MAX) { 5232 table->uc_promisc = true; 5233 break; 5234 } 5235 ether_addr_copy(table->dev_uc_list[i].addr, uc->addr); 5236 i++; 5237 } 5238 5239 table->dev_uc_count = i; 5240 } 5241 5242 static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx) 5243 { 5244 struct efx_ef10_filter_table *table = efx->filter_state; 5245 struct net_device *net_dev = efx->net_dev; 5246 struct netdev_hw_addr *mc; 5247 unsigned int i; 5248 5249 table->mc_overflow = false; 5250 table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)); 5251 5252 i = 0; 5253 netdev_for_each_mc_addr(mc, net_dev) { 5254 if (i >= EFX_EF10_FILTER_DEV_MC_MAX) { 5255 table->mc_promisc = true; 5256 table->mc_overflow = true; 5257 break; 5258 } 5259 ether_addr_copy(table->dev_mc_list[i].addr, mc->addr); 5260 i++; 5261 } 5262 5263 table->dev_mc_count = i; 5264 } 5265 5266 static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, 5267 struct efx_ef10_filter_vlan *vlan, 5268 bool multicast, bool rollback) 5269 { 5270 struct efx_ef10_filter_table *table = efx->filter_state; 5271 struct efx_ef10_dev_addr *addr_list; 5272 enum efx_filter_flags filter_flags; 5273 struct efx_filter_spec spec; 5274 u8 baddr[ETH_ALEN]; 5275 unsigned int i, j; 5276 int addr_count; 5277 u16 *ids; 5278 int rc; 5279 5280 if (multicast) { 5281 addr_list = table->dev_mc_list; 5282 addr_count = table->dev_mc_count; 5283 ids = vlan->mc; 5284 } else { 5285 addr_list = table->dev_uc_list; 5286 addr_count = table->dev_uc_count; 5287 ids = vlan->uc; 5288 } 5289 5290 filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0; 5291 5292 /* Insert/renew filters */ 5293 for (i = 0; i < addr_count; i++) { 5294 EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID); 5295 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); 5296 efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr); 5297 rc = efx_ef10_filter_insert_locked(efx, &spec, true); 5298 if (rc < 0) { 5299 if (rollback) { 5300 netif_info(efx, drv, efx->net_dev, 5301 "efx_ef10_filter_insert failed rc=%d\n", 5302 rc); 5303 /* Fall back to promiscuous */ 5304 for (j = 0; j < i; j++) { 5305 efx_ef10_filter_remove_unsafe( 5306 efx, EFX_FILTER_PRI_AUTO, 5307 ids[j]); 5308 ids[j] = EFX_EF10_FILTER_ID_INVALID; 5309 } 5310 return rc; 5311 } else { 5312 /* keep invalid ID, and carry on */ 5313 } 5314 } else { 5315 ids[i] = efx_ef10_filter_get_unsafe_id(rc); 5316 } 5317 } 5318 5319 if (multicast && rollback) { 5320 /* Also need an Ethernet broadcast filter */ 5321 EFX_WARN_ON_PARANOID(vlan->default_filters[EFX_EF10_BCAST] != 5322 EFX_EF10_FILTER_ID_INVALID); 5323 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); 5324 eth_broadcast_addr(baddr); 5325 efx_filter_set_eth_local(&spec, vlan->vid, baddr); 5326 rc = efx_ef10_filter_insert_locked(efx, &spec, true); 5327 if (rc < 0) { 5328 netif_warn(efx, drv, efx->net_dev, 5329 "Broadcast filter insert failed rc=%d\n", rc); 5330 /* Fall back to promiscuous */ 5331 for (j = 0; j < i; j++) { 5332 efx_ef10_filter_remove_unsafe( 5333 efx, EFX_FILTER_PRI_AUTO, 5334 ids[j]); 5335 ids[j] = EFX_EF10_FILTER_ID_INVALID; 5336 } 5337 return rc; 5338 } else { 5339 vlan->default_filters[EFX_EF10_BCAST] = 5340 efx_ef10_filter_get_unsafe_id(rc); 5341 } 5342 } 5343 5344 return 0; 5345 } 5346 5347 static int efx_ef10_filter_insert_def(struct efx_nic *efx, 5348 struct efx_ef10_filter_vlan *vlan, 5349 enum efx_encap_type encap_type, 5350 bool multicast, bool rollback) 5351 { 5352 struct efx_ef10_nic_data *nic_data = efx->nic_data; 5353 enum efx_filter_flags filter_flags; 5354 struct efx_filter_spec spec; 5355 u8 baddr[ETH_ALEN]; 5356 int rc; 5357 u16 *id; 5358 5359 filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0; 5360 5361 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); 5362 5363 if (multicast) 5364 efx_filter_set_mc_def(&spec); 5365 else 5366 efx_filter_set_uc_def(&spec); 5367 5368 if (encap_type) { 5369 if (nic_data->datapath_caps & 5370 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)) 5371 efx_filter_set_encap_type(&spec, encap_type); 5372 else 5373 /* don't insert encap filters on non-supporting 5374 * platforms. ID will be left as INVALID. 5375 */ 5376 return 0; 5377 } 5378 5379 if (vlan->vid != EFX_FILTER_VID_UNSPEC) 5380 efx_filter_set_eth_local(&spec, vlan->vid, NULL); 5381 5382 rc = efx_ef10_filter_insert_locked(efx, &spec, true); 5383 if (rc < 0) { 5384 const char *um = multicast ? "Multicast" : "Unicast"; 5385 const char *encap_name = ""; 5386 const char *encap_ipv = ""; 5387 5388 if ((encap_type & EFX_ENCAP_TYPES_MASK) == 5389 EFX_ENCAP_TYPE_VXLAN) 5390 encap_name = "VXLAN "; 5391 else if ((encap_type & EFX_ENCAP_TYPES_MASK) == 5392 EFX_ENCAP_TYPE_NVGRE) 5393 encap_name = "NVGRE "; 5394 else if ((encap_type & EFX_ENCAP_TYPES_MASK) == 5395 EFX_ENCAP_TYPE_GENEVE) 5396 encap_name = "GENEVE "; 5397 if (encap_type & EFX_ENCAP_FLAG_IPV6) 5398 encap_ipv = "IPv6 "; 5399 else if (encap_type) 5400 encap_ipv = "IPv4 "; 5401 5402 /* unprivileged functions can't insert mismatch filters 5403 * for encapsulated or unicast traffic, so downgrade 5404 * those warnings to debug. 5405 */ 5406 netif_cond_dbg(efx, drv, efx->net_dev, 5407 rc == -EPERM && (encap_type || !multicast), warn, 5408 "%s%s%s mismatch filter insert failed rc=%d\n", 5409 encap_name, encap_ipv, um, rc); 5410 } else if (multicast) { 5411 /* mapping from encap types to default filter IDs (multicast) */ 5412 static enum efx_ef10_default_filters map[] = { 5413 [EFX_ENCAP_TYPE_NONE] = EFX_EF10_MCDEF, 5414 [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_MCDEF, 5415 [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_MCDEF, 5416 [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_MCDEF, 5417 [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] = 5418 EFX_EF10_VXLAN6_MCDEF, 5419 [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] = 5420 EFX_EF10_NVGRE6_MCDEF, 5421 [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] = 5422 EFX_EF10_GENEVE6_MCDEF, 5423 }; 5424 5425 /* quick bounds check (BCAST result impossible) */ 5426 BUILD_BUG_ON(EFX_EF10_BCAST != 0); 5427 if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) { 5428 WARN_ON(1); 5429 return -EINVAL; 5430 } 5431 /* then follow map */ 5432 id = &vlan->default_filters[map[encap_type]]; 5433 5434 EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID); 5435 *id = efx_ef10_filter_get_unsafe_id(rc); 5436 if (!nic_data->workaround_26807 && !encap_type) { 5437 /* Also need an Ethernet broadcast filter */ 5438 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, 5439 filter_flags, 0); 5440 eth_broadcast_addr(baddr); 5441 efx_filter_set_eth_local(&spec, vlan->vid, baddr); 5442 rc = efx_ef10_filter_insert_locked(efx, &spec, true); 5443 if (rc < 0) { 5444 netif_warn(efx, drv, efx->net_dev, 5445 "Broadcast filter insert failed rc=%d\n", 5446 rc); 5447 if (rollback) { 5448 /* Roll back the mc_def filter */ 5449 efx_ef10_filter_remove_unsafe( 5450 efx, EFX_FILTER_PRI_AUTO, 5451 *id); 5452 *id = EFX_EF10_FILTER_ID_INVALID; 5453 return rc; 5454 } 5455 } else { 5456 EFX_WARN_ON_PARANOID( 5457 vlan->default_filters[EFX_EF10_BCAST] != 5458 EFX_EF10_FILTER_ID_INVALID); 5459 vlan->default_filters[EFX_EF10_BCAST] = 5460 efx_ef10_filter_get_unsafe_id(rc); 5461 } 5462 } 5463 rc = 0; 5464 } else { 5465 /* mapping from encap types to default filter IDs (unicast) */ 5466 static enum efx_ef10_default_filters map[] = { 5467 [EFX_ENCAP_TYPE_NONE] = EFX_EF10_UCDEF, 5468 [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_UCDEF, 5469 [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_UCDEF, 5470 [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_UCDEF, 5471 [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] = 5472 EFX_EF10_VXLAN6_UCDEF, 5473 [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] = 5474 EFX_EF10_NVGRE6_UCDEF, 5475 [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] = 5476 EFX_EF10_GENEVE6_UCDEF, 5477 }; 5478 5479 /* quick bounds check (BCAST result impossible) */ 5480 BUILD_BUG_ON(EFX_EF10_BCAST != 0); 5481 if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) { 5482 WARN_ON(1); 5483 return -EINVAL; 5484 } 5485 /* then follow map */ 5486 id = &vlan->default_filters[map[encap_type]]; 5487 EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID); 5488 *id = rc; 5489 rc = 0; 5490 } 5491 return rc; 5492 } 5493 5494 /* Remove filters that weren't renewed. */ 5495 static void efx_ef10_filter_remove_old(struct efx_nic *efx) 5496 { 5497 struct efx_ef10_filter_table *table = efx->filter_state; 5498 int remove_failed = 0; 5499 int remove_noent = 0; 5500 int rc; 5501 int i; 5502 5503 down_write(&table->lock); 5504 for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { 5505 if (READ_ONCE(table->entry[i].spec) & 5506 EFX_EF10_FILTER_FLAG_AUTO_OLD) { 5507 rc = efx_ef10_filter_remove_internal(efx, 5508 1U << EFX_FILTER_PRI_AUTO, i, true); 5509 if (rc == -ENOENT) 5510 remove_noent++; 5511 else if (rc) 5512 remove_failed++; 5513 } 5514 } 5515 up_write(&table->lock); 5516 5517 if (remove_failed) 5518 netif_info(efx, drv, efx->net_dev, 5519 "%s: failed to remove %d filters\n", 5520 __func__, remove_failed); 5521 if (remove_noent) 5522 netif_info(efx, drv, efx->net_dev, 5523 "%s: failed to remove %d non-existent filters\n", 5524 __func__, remove_noent); 5525 } 5526 5527 static int efx_ef10_vport_set_mac_address(struct efx_nic *efx) 5528 { 5529 struct efx_ef10_nic_data *nic_data = efx->nic_data; 5530 u8 mac_old[ETH_ALEN]; 5531 int rc, rc2; 5532 5533 /* Only reconfigure a PF-created vport */ 5534 if (is_zero_ether_addr(nic_data->vport_mac)) 5535 return 0; 5536 5537 efx_device_detach_sync(efx); 5538 efx_net_stop(efx->net_dev); 5539 down_write(&efx->filter_sem); 5540 efx_ef10_filter_table_remove(efx); 5541 up_write(&efx->filter_sem); 5542 5543 rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id); 5544 if (rc) 5545 goto restore_filters; 5546 5547 ether_addr_copy(mac_old, nic_data->vport_mac); 5548 rc = efx_ef10_vport_del_mac(efx, nic_data->vport_id, 5549 nic_data->vport_mac); 5550 if (rc) 5551 goto restore_vadaptor; 5552 5553 rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id, 5554 efx->net_dev->dev_addr); 5555 if (!rc) { 5556 ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr); 5557 } else { 5558 rc2 = efx_ef10_vport_add_mac(efx, nic_data->vport_id, mac_old); 5559 if (rc2) { 5560 /* Failed to add original MAC, so clear vport_mac */ 5561 eth_zero_addr(nic_data->vport_mac); 5562 goto reset_nic; 5563 } 5564 } 5565 5566 restore_vadaptor: 5567 rc2 = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id); 5568 if (rc2) 5569 goto reset_nic; 5570 restore_filters: 5571 down_write(&efx->filter_sem); 5572 rc2 = efx_ef10_filter_table_probe(efx); 5573 up_write(&efx->filter_sem); 5574 if (rc2) 5575 goto reset_nic; 5576 5577 rc2 = efx_net_open(efx->net_dev); 5578 if (rc2) 5579 goto reset_nic; 5580 5581 efx_device_attach_if_not_resetting(efx); 5582 5583 return rc; 5584 5585 reset_nic: 5586 netif_err(efx, drv, efx->net_dev, 5587 "Failed to restore when changing MAC address - scheduling reset\n"); 5588 efx_schedule_reset(efx, RESET_TYPE_DATAPATH); 5589 5590 return rc ? rc : rc2; 5591 } 5592 5593 /* Caller must hold efx->filter_sem for read if race against 5594 * efx_ef10_filter_table_remove() is possible 5595 */ 5596 static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx, 5597 struct efx_ef10_filter_vlan *vlan) 5598 { 5599 struct efx_ef10_filter_table *table = efx->filter_state; 5600 struct efx_ef10_nic_data *nic_data = efx->nic_data; 5601 5602 /* Do not install unspecified VID if VLAN filtering is enabled. 5603 * Do not install all specified VIDs if VLAN filtering is disabled. 5604 */ 5605 if ((vlan->vid == EFX_FILTER_VID_UNSPEC) == table->vlan_filter) 5606 return; 5607 5608 /* Insert/renew unicast filters */ 5609 if (table->uc_promisc) { 5610 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NONE, 5611 false, false); 5612 efx_ef10_filter_insert_addr_list(efx, vlan, false, false); 5613 } else { 5614 /* If any of the filters failed to insert, fall back to 5615 * promiscuous mode - add in the uc_def filter. But keep 5616 * our individual unicast filters. 5617 */ 5618 if (efx_ef10_filter_insert_addr_list(efx, vlan, false, false)) 5619 efx_ef10_filter_insert_def(efx, vlan, 5620 EFX_ENCAP_TYPE_NONE, 5621 false, false); 5622 } 5623 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN, 5624 false, false); 5625 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN | 5626 EFX_ENCAP_FLAG_IPV6, 5627 false, false); 5628 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE, 5629 false, false); 5630 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE | 5631 EFX_ENCAP_FLAG_IPV6, 5632 false, false); 5633 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE, 5634 false, false); 5635 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE | 5636 EFX_ENCAP_FLAG_IPV6, 5637 false, false); 5638 5639 /* Insert/renew multicast filters */ 5640 /* If changing promiscuous state with cascaded multicast filters, remove 5641 * old filters first, so that packets are dropped rather than duplicated 5642 */ 5643 if (nic_data->workaround_26807 && 5644 table->mc_promisc_last != table->mc_promisc) 5645 efx_ef10_filter_remove_old(efx); 5646 if (table->mc_promisc) { 5647 if (nic_data->workaround_26807) { 5648 /* If we failed to insert promiscuous filters, rollback 5649 * and fall back to individual multicast filters 5650 */ 5651 if (efx_ef10_filter_insert_def(efx, vlan, 5652 EFX_ENCAP_TYPE_NONE, 5653 true, true)) { 5654 /* Changing promisc state, so remove old filters */ 5655 efx_ef10_filter_remove_old(efx); 5656 efx_ef10_filter_insert_addr_list(efx, vlan, 5657 true, false); 5658 } 5659 } else { 5660 /* If we failed to insert promiscuous filters, don't 5661 * rollback. Regardless, also insert the mc_list, 5662 * unless it's incomplete due to overflow 5663 */ 5664 efx_ef10_filter_insert_def(efx, vlan, 5665 EFX_ENCAP_TYPE_NONE, 5666 true, false); 5667 if (!table->mc_overflow) 5668 efx_ef10_filter_insert_addr_list(efx, vlan, 5669 true, false); 5670 } 5671 } else { 5672 /* If any filters failed to insert, rollback and fall back to 5673 * promiscuous mode - mc_def filter and maybe broadcast. If 5674 * that fails, roll back again and insert as many of our 5675 * individual multicast filters as we can. 5676 */ 5677 if (efx_ef10_filter_insert_addr_list(efx, vlan, true, true)) { 5678 /* Changing promisc state, so remove old filters */ 5679 if (nic_data->workaround_26807) 5680 efx_ef10_filter_remove_old(efx); 5681 if (efx_ef10_filter_insert_def(efx, vlan, 5682 EFX_ENCAP_TYPE_NONE, 5683 true, true)) 5684 efx_ef10_filter_insert_addr_list(efx, vlan, 5685 true, false); 5686 } 5687 } 5688 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN, 5689 true, false); 5690 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN | 5691 EFX_ENCAP_FLAG_IPV6, 5692 true, false); 5693 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE, 5694 true, false); 5695 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE | 5696 EFX_ENCAP_FLAG_IPV6, 5697 true, false); 5698 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE, 5699 true, false); 5700 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE | 5701 EFX_ENCAP_FLAG_IPV6, 5702 true, false); 5703 } 5704 5705 /* Caller must hold efx->filter_sem for read if race against 5706 * efx_ef10_filter_table_remove() is possible 5707 */ 5708 static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) 5709 { 5710 struct efx_ef10_filter_table *table = efx->filter_state; 5711 struct net_device *net_dev = efx->net_dev; 5712 struct efx_ef10_filter_vlan *vlan; 5713 bool vlan_filter; 5714 5715 if (!efx_dev_registered(efx)) 5716 return; 5717 5718 if (!table) 5719 return; 5720 5721 efx_ef10_filter_mark_old(efx); 5722 5723 /* Copy/convert the address lists; add the primary station 5724 * address and broadcast address 5725 */ 5726 netif_addr_lock_bh(net_dev); 5727 efx_ef10_filter_uc_addr_list(efx); 5728 efx_ef10_filter_mc_addr_list(efx); 5729 netif_addr_unlock_bh(net_dev); 5730 5731 /* If VLAN filtering changes, all old filters are finally removed. 5732 * Do it in advance to avoid conflicts for unicast untagged and 5733 * VLAN 0 tagged filters. 5734 */ 5735 vlan_filter = !!(net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER); 5736 if (table->vlan_filter != vlan_filter) { 5737 table->vlan_filter = vlan_filter; 5738 efx_ef10_filter_remove_old(efx); 5739 } 5740 5741 list_for_each_entry(vlan, &table->vlan_list, list) 5742 efx_ef10_filter_vlan_sync_rx_mode(efx, vlan); 5743 5744 efx_ef10_filter_remove_old(efx); 5745 table->mc_promisc_last = table->mc_promisc; 5746 } 5747 5748 static struct efx_ef10_filter_vlan *efx_ef10_filter_find_vlan(struct efx_nic *efx, u16 vid) 5749 { 5750 struct efx_ef10_filter_table *table = efx->filter_state; 5751 struct efx_ef10_filter_vlan *vlan; 5752 5753 WARN_ON(!rwsem_is_locked(&efx->filter_sem)); 5754 5755 list_for_each_entry(vlan, &table->vlan_list, list) { 5756 if (vlan->vid == vid) 5757 return vlan; 5758 } 5759 5760 return NULL; 5761 } 5762 5763 static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid) 5764 { 5765 struct efx_ef10_filter_table *table = efx->filter_state; 5766 struct efx_ef10_filter_vlan *vlan; 5767 unsigned int i; 5768 5769 if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) 5770 return -EINVAL; 5771 5772 vlan = efx_ef10_filter_find_vlan(efx, vid); 5773 if (WARN_ON(vlan)) { 5774 netif_err(efx, drv, efx->net_dev, 5775 "VLAN %u already added\n", vid); 5776 return -EALREADY; 5777 } 5778 5779 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); 5780 if (!vlan) 5781 return -ENOMEM; 5782 5783 vlan->vid = vid; 5784 5785 for (i = 0; i < ARRAY_SIZE(vlan->uc); i++) 5786 vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID; 5787 for (i = 0; i < ARRAY_SIZE(vlan->mc); i++) 5788 vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID; 5789 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++) 5790 vlan->default_filters[i] = EFX_EF10_FILTER_ID_INVALID; 5791 5792 list_add_tail(&vlan->list, &table->vlan_list); 5793 5794 if (efx_dev_registered(efx)) 5795 efx_ef10_filter_vlan_sync_rx_mode(efx, vlan); 5796 5797 return 0; 5798 } 5799 5800 static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx, 5801 struct efx_ef10_filter_vlan *vlan) 5802 { 5803 unsigned int i; 5804 5805 /* See comment in efx_ef10_filter_table_remove() */ 5806 if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) 5807 return; 5808 5809 list_del(&vlan->list); 5810 5811 for (i = 0; i < ARRAY_SIZE(vlan->uc); i++) 5812 efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, 5813 vlan->uc[i]); 5814 for (i = 0; i < ARRAY_SIZE(vlan->mc); i++) 5815 efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, 5816 vlan->mc[i]); 5817 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++) 5818 if (vlan->default_filters[i] != EFX_EF10_FILTER_ID_INVALID) 5819 efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, 5820 vlan->default_filters[i]); 5821 5822 kfree(vlan); 5823 } 5824 5825 static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid) 5826 { 5827 struct efx_ef10_filter_vlan *vlan; 5828 5829 /* See comment in efx_ef10_filter_table_remove() */ 5830 if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) 5831 return; 5832 5833 vlan = efx_ef10_filter_find_vlan(efx, vid); 5834 if (!vlan) { 5835 netif_err(efx, drv, efx->net_dev, 5836 "VLAN %u not found in filter state\n", vid); 5837 return; 5838 } 5839 5840 efx_ef10_filter_del_vlan_internal(efx, vlan); 5841 } 5842 5843 static int efx_ef10_set_mac_address(struct efx_nic *efx) 5844 { 5845 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN); 5846 struct efx_ef10_nic_data *nic_data = efx->nic_data; 5847 bool was_enabled = efx->port_enabled; 5848 int rc; 5849 5850 efx_device_detach_sync(efx); 5851 efx_net_stop(efx->net_dev); 5852 5853 mutex_lock(&efx->mac_lock); 5854 down_write(&efx->filter_sem); 5855 efx_ef10_filter_table_remove(efx); 5856 5857 ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR), 5858 efx->net_dev->dev_addr); 5859 MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID, 5860 nic_data->vport_id); 5861 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf, 5862 sizeof(inbuf), NULL, 0, NULL); 5863 5864 efx_ef10_filter_table_probe(efx); 5865 up_write(&efx->filter_sem); 5866 mutex_unlock(&efx->mac_lock); 5867 5868 if (was_enabled) 5869 efx_net_open(efx->net_dev); 5870 efx_device_attach_if_not_resetting(efx); 5871 5872 #ifdef CONFIG_SFC_SRIOV 5873 if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) { 5874 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; 5875 5876 if (rc == -EPERM) { 5877 struct efx_nic *efx_pf; 5878 5879 /* Switch to PF and change MAC address on vport */ 5880 efx_pf = pci_get_drvdata(pci_dev_pf); 5881 5882 rc = efx_ef10_sriov_set_vf_mac(efx_pf, 5883 nic_data->vf_index, 5884 efx->net_dev->dev_addr); 5885 } else if (!rc) { 5886 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); 5887 struct efx_ef10_nic_data *nic_data = efx_pf->nic_data; 5888 unsigned int i; 5889 5890 /* MAC address successfully changed by VF (with MAC 5891 * spoofing) so update the parent PF if possible. 5892 */ 5893 for (i = 0; i < efx_pf->vf_count; ++i) { 5894 struct ef10_vf *vf = nic_data->vf + i; 5895 5896 if (vf->efx == efx) { 5897 ether_addr_copy(vf->mac, 5898 efx->net_dev->dev_addr); 5899 return 0; 5900 } 5901 } 5902 } 5903 } else 5904 #endif 5905 if (rc == -EPERM) { 5906 netif_err(efx, drv, efx->net_dev, 5907 "Cannot change MAC address; use sfboot to enable" 5908 " mac-spoofing on this interface\n"); 5909 } else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) { 5910 /* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC 5911 * fall-back to the method of changing the MAC address on the 5912 * vport. This only applies to PFs because such versions of 5913 * MCFW do not support VFs. 5914 */ 5915 rc = efx_ef10_vport_set_mac_address(efx); 5916 } else if (rc) { 5917 efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC, 5918 sizeof(inbuf), NULL, 0, rc); 5919 } 5920 5921 return rc; 5922 } 5923 5924 static int efx_ef10_mac_reconfigure(struct efx_nic *efx) 5925 { 5926 efx_ef10_filter_sync_rx_mode(efx); 5927 5928 return efx_mcdi_set_mac(efx); 5929 } 5930 5931 static int efx_ef10_mac_reconfigure_vf(struct efx_nic *efx) 5932 { 5933 efx_ef10_filter_sync_rx_mode(efx); 5934 5935 return 0; 5936 } 5937 5938 static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type) 5939 { 5940 MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN); 5941 5942 MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type); 5943 return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf), 5944 NULL, 0, NULL); 5945 } 5946 5947 /* MC BISTs follow a different poll mechanism to phy BISTs. 5948 * The BIST is done in the poll handler on the MC, and the MCDI command 5949 * will block until the BIST is done. 5950 */ 5951 static int efx_ef10_poll_bist(struct efx_nic *efx) 5952 { 5953 int rc; 5954 MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN); 5955 size_t outlen; 5956 u32 result; 5957 5958 rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0, 5959 outbuf, sizeof(outbuf), &outlen); 5960 if (rc != 0) 5961 return rc; 5962 5963 if (outlen < MC_CMD_POLL_BIST_OUT_LEN) 5964 return -EIO; 5965 5966 result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT); 5967 switch (result) { 5968 case MC_CMD_POLL_BIST_PASSED: 5969 netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n"); 5970 return 0; 5971 case MC_CMD_POLL_BIST_TIMEOUT: 5972 netif_err(efx, hw, efx->net_dev, "BIST timed out\n"); 5973 return -EIO; 5974 case MC_CMD_POLL_BIST_FAILED: 5975 netif_err(efx, hw, efx->net_dev, "BIST failed.\n"); 5976 return -EIO; 5977 default: 5978 netif_err(efx, hw, efx->net_dev, 5979 "BIST returned unknown result %u", result); 5980 return -EIO; 5981 } 5982 } 5983 5984 static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type) 5985 { 5986 int rc; 5987 5988 netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type); 5989 5990 rc = efx_ef10_start_bist(efx, bist_type); 5991 if (rc != 0) 5992 return rc; 5993 5994 return efx_ef10_poll_bist(efx); 5995 } 5996 5997 static int 5998 efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) 5999 { 6000 int rc, rc2; 6001 6002 efx_reset_down(efx, RESET_TYPE_WORLD); 6003 6004 rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST, 6005 NULL, 0, NULL, 0, NULL); 6006 if (rc != 0) 6007 goto out; 6008 6009 tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1; 6010 tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1; 6011 6012 rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD); 6013 6014 out: 6015 if (rc == -EPERM) 6016 rc = 0; 6017 rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0); 6018 return rc ? rc : rc2; 6019 } 6020 6021 #ifdef CONFIG_SFC_MTD 6022 6023 struct efx_ef10_nvram_type_info { 6024 u16 type, type_mask; 6025 u8 port; 6026 const char *name; 6027 }; 6028 6029 static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = { 6030 { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" }, 6031 { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" }, 6032 { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" }, 6033 { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" }, 6034 { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" }, 6035 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" }, 6036 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" }, 6037 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" }, 6038 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" }, 6039 { NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" }, 6040 { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" }, 6041 { NVRAM_PARTITION_TYPE_MUM_FIRMWARE, 0, 0, "sfc_mumfw" }, 6042 { NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 0, 0, "sfc_uefi" }, 6043 { NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS, 0, 0, "sfc_dynamic_cfg_dflt" }, 6044 { NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS, 0, 0, "sfc_exp_rom_cfg_dflt" }, 6045 { NVRAM_PARTITION_TYPE_STATUS, 0, 0, "sfc_status" }, 6046 { NVRAM_PARTITION_TYPE_BUNDLE, 0, 0, "sfc_bundle" }, 6047 { NVRAM_PARTITION_TYPE_BUNDLE_METADATA, 0, 0, "sfc_bundle_metadata" }, 6048 }; 6049 #define EF10_NVRAM_PARTITION_COUNT ARRAY_SIZE(efx_ef10_nvram_types) 6050 6051 static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, 6052 struct efx_mcdi_mtd_partition *part, 6053 unsigned int type, 6054 unsigned long *found) 6055 { 6056 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN); 6057 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX); 6058 const struct efx_ef10_nvram_type_info *info; 6059 size_t size, erase_size, outlen; 6060 int type_idx = 0; 6061 bool protected; 6062 int rc; 6063 6064 for (type_idx = 0; ; type_idx++) { 6065 if (type_idx == EF10_NVRAM_PARTITION_COUNT) 6066 return -ENODEV; 6067 info = efx_ef10_nvram_types + type_idx; 6068 if ((type & ~info->type_mask) == info->type) 6069 break; 6070 } 6071 if (info->port != efx_port_num(efx)) 6072 return -ENODEV; 6073 6074 rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected); 6075 if (rc) 6076 return rc; 6077 if (protected && 6078 (type != NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS && 6079 type != NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS)) 6080 /* Hide protected partitions that don't provide defaults. */ 6081 return -ENODEV; 6082 6083 if (protected) 6084 /* Protected partitions are read only. */ 6085 erase_size = 0; 6086 6087 /* If we've already exposed a partition of this type, hide this 6088 * duplicate. All operations on MTDs are keyed by the type anyway, 6089 * so we can't act on the duplicate. 6090 */ 6091 if (__test_and_set_bit(type_idx, found)) 6092 return -EEXIST; 6093 6094 part->nvram_type = type; 6095 6096 MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type); 6097 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf), 6098 outbuf, sizeof(outbuf), &outlen); 6099 if (rc) 6100 return rc; 6101 if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN) 6102 return -EIO; 6103 if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) & 6104 (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN)) 6105 part->fw_subtype = MCDI_DWORD(outbuf, 6106 NVRAM_METADATA_OUT_SUBTYPE); 6107 6108 part->common.dev_type_name = "EF10 NVRAM manager"; 6109 part->common.type_name = info->name; 6110 6111 part->common.mtd.type = MTD_NORFLASH; 6112 part->common.mtd.flags = MTD_CAP_NORFLASH; 6113 part->common.mtd.size = size; 6114 part->common.mtd.erasesize = erase_size; 6115 /* sfc_status is read-only */ 6116 if (!erase_size) 6117 part->common.mtd.flags |= MTD_NO_ERASE; 6118 6119 return 0; 6120 } 6121 6122 static int efx_ef10_mtd_probe(struct efx_nic *efx) 6123 { 6124 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); 6125 DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT) = { 0 }; 6126 struct efx_mcdi_mtd_partition *parts; 6127 size_t outlen, n_parts_total, i, n_parts; 6128 unsigned int type; 6129 int rc; 6130 6131 ASSERT_RTNL(); 6132 6133 BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0); 6134 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0, 6135 outbuf, sizeof(outbuf), &outlen); 6136 if (rc) 6137 return rc; 6138 if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN) 6139 return -EIO; 6140 6141 n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS); 6142 if (n_parts_total > 6143 MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID)) 6144 return -EIO; 6145 6146 parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL); 6147 if (!parts) 6148 return -ENOMEM; 6149 6150 n_parts = 0; 6151 for (i = 0; i < n_parts_total; i++) { 6152 type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID, 6153 i); 6154 rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type, 6155 found); 6156 if (rc == -EEXIST || rc == -ENODEV) 6157 continue; 6158 if (rc) 6159 goto fail; 6160 n_parts++; 6161 } 6162 6163 rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); 6164 fail: 6165 if (rc) 6166 kfree(parts); 6167 return rc; 6168 } 6169 6170 #endif /* CONFIG_SFC_MTD */ 6171 6172 static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time) 6173 { 6174 _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD); 6175 } 6176 6177 static void efx_ef10_ptp_write_host_time_vf(struct efx_nic *efx, 6178 u32 host_time) {} 6179 6180 static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel, 6181 bool temp) 6182 { 6183 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN); 6184 int rc; 6185 6186 if (channel->sync_events_state == SYNC_EVENTS_REQUESTED || 6187 channel->sync_events_state == SYNC_EVENTS_VALID || 6188 (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED)) 6189 return 0; 6190 channel->sync_events_state = SYNC_EVENTS_REQUESTED; 6191 6192 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE); 6193 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); 6194 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE, 6195 channel->channel); 6196 6197 rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP, 6198 inbuf, sizeof(inbuf), NULL, 0, NULL); 6199 6200 if (rc != 0) 6201 channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT : 6202 SYNC_EVENTS_DISABLED; 6203 6204 return rc; 6205 } 6206 6207 static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel, 6208 bool temp) 6209 { 6210 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN); 6211 int rc; 6212 6213 if (channel->sync_events_state == SYNC_EVENTS_DISABLED || 6214 (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT)) 6215 return 0; 6216 if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) { 6217 channel->sync_events_state = SYNC_EVENTS_DISABLED; 6218 return 0; 6219 } 6220 channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT : 6221 SYNC_EVENTS_DISABLED; 6222 6223 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE); 6224 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); 6225 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL, 6226 MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE); 6227 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE, 6228 channel->channel); 6229 6230 rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP, 6231 inbuf, sizeof(inbuf), NULL, 0, NULL); 6232 6233 return rc; 6234 } 6235 6236 static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en, 6237 bool temp) 6238 { 6239 int (*set)(struct efx_channel *channel, bool temp); 6240 struct efx_channel *channel; 6241 6242 set = en ? 6243 efx_ef10_rx_enable_timestamping : 6244 efx_ef10_rx_disable_timestamping; 6245 6246 channel = efx_ptp_channel(efx); 6247 if (channel) { 6248 int rc = set(channel, temp); 6249 if (en && rc != 0) { 6250 efx_ef10_ptp_set_ts_sync_events(efx, false, temp); 6251 return rc; 6252 } 6253 } 6254 6255 return 0; 6256 } 6257 6258 static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx, 6259 struct hwtstamp_config *init) 6260 { 6261 return -EOPNOTSUPP; 6262 } 6263 6264 static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx, 6265 struct hwtstamp_config *init) 6266 { 6267 int rc; 6268 6269 switch (init->rx_filter) { 6270 case HWTSTAMP_FILTER_NONE: 6271 efx_ef10_ptp_set_ts_sync_events(efx, false, false); 6272 /* if TX timestamping is still requested then leave PTP on */ 6273 return efx_ptp_change_mode(efx, 6274 init->tx_type != HWTSTAMP_TX_OFF, 0); 6275 case HWTSTAMP_FILTER_ALL: 6276 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 6277 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 6278 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 6279 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 6280 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 6281 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 6282 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 6283 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 6284 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 6285 case HWTSTAMP_FILTER_PTP_V2_EVENT: 6286 case HWTSTAMP_FILTER_PTP_V2_SYNC: 6287 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 6288 case HWTSTAMP_FILTER_NTP_ALL: 6289 init->rx_filter = HWTSTAMP_FILTER_ALL; 6290 rc = efx_ptp_change_mode(efx, true, 0); 6291 if (!rc) 6292 rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false); 6293 if (rc) 6294 efx_ptp_change_mode(efx, false, 0); 6295 return rc; 6296 default: 6297 return -ERANGE; 6298 } 6299 } 6300 6301 static int efx_ef10_get_phys_port_id(struct efx_nic *efx, 6302 struct netdev_phys_item_id *ppid) 6303 { 6304 struct efx_ef10_nic_data *nic_data = efx->nic_data; 6305 6306 if (!is_valid_ether_addr(nic_data->port_id)) 6307 return -EOPNOTSUPP; 6308 6309 ppid->id_len = ETH_ALEN; 6310 memcpy(ppid->id, nic_data->port_id, ppid->id_len); 6311 6312 return 0; 6313 } 6314 6315 static int efx_ef10_vlan_rx_add_vid(struct efx_nic *efx, __be16 proto, u16 vid) 6316 { 6317 if (proto != htons(ETH_P_8021Q)) 6318 return -EINVAL; 6319 6320 return efx_ef10_add_vlan(efx, vid); 6321 } 6322 6323 static int efx_ef10_vlan_rx_kill_vid(struct efx_nic *efx, __be16 proto, u16 vid) 6324 { 6325 if (proto != htons(ETH_P_8021Q)) 6326 return -EINVAL; 6327 6328 return efx_ef10_del_vlan(efx, vid); 6329 } 6330 6331 /* We rely on the MCDI wiping out our TX rings if it made any changes to the 6332 * ports table, ensuring that any TSO descriptors that were made on a now- 6333 * removed tunnel port will be blown away and won't break things when we try 6334 * to transmit them using the new ports table. 6335 */ 6336 static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading) 6337 { 6338 struct efx_ef10_nic_data *nic_data = efx->nic_data; 6339 MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX); 6340 MCDI_DECLARE_BUF(outbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN); 6341 bool will_reset = false; 6342 size_t num_entries = 0; 6343 size_t inlen, outlen; 6344 size_t i; 6345 int rc; 6346 efx_dword_t flags_and_num_entries; 6347 6348 WARN_ON(!mutex_is_locked(&nic_data->udp_tunnels_lock)); 6349 6350 nic_data->udp_tunnels_dirty = false; 6351 6352 if (!(nic_data->datapath_caps & 6353 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) { 6354 efx_device_attach_if_not_resetting(efx); 6355 return 0; 6356 } 6357 6358 BUILD_BUG_ON(ARRAY_SIZE(nic_data->udp_tunnels) > 6359 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM); 6360 6361 for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) { 6362 if (nic_data->udp_tunnels[i].count && 6363 nic_data->udp_tunnels[i].port) { 6364 efx_dword_t entry; 6365 6366 EFX_POPULATE_DWORD_2(entry, 6367 TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT, 6368 ntohs(nic_data->udp_tunnels[i].port), 6369 TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL, 6370 nic_data->udp_tunnels[i].type); 6371 *_MCDI_ARRAY_DWORD(inbuf, 6372 SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES, 6373 num_entries++) = entry; 6374 } 6375 } 6376 6377 BUILD_BUG_ON((MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST - 6378 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST) * 8 != 6379 EFX_WORD_1_LBN); 6380 BUILD_BUG_ON(MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN * 8 != 6381 EFX_WORD_1_WIDTH); 6382 EFX_POPULATE_DWORD_2(flags_and_num_entries, 6383 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING, 6384 !!unloading, 6385 EFX_WORD_1, num_entries); 6386 *_MCDI_DWORD(inbuf, SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS) = 6387 flags_and_num_entries; 6388 6389 inlen = MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num_entries); 6390 6391 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS, 6392 inbuf, inlen, outbuf, sizeof(outbuf), &outlen); 6393 if (rc == -EIO) { 6394 /* Most likely the MC rebooted due to another function also 6395 * setting its tunnel port list. Mark the tunnel port list as 6396 * dirty, so it will be pushed upon coming up from the reboot. 6397 */ 6398 nic_data->udp_tunnels_dirty = true; 6399 return 0; 6400 } 6401 6402 if (rc) { 6403 /* expected not available on unprivileged functions */ 6404 if (rc != -EPERM) 6405 netif_warn(efx, drv, efx->net_dev, 6406 "Unable to set UDP tunnel ports; rc=%d.\n", rc); 6407 } else if (MCDI_DWORD(outbuf, SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS) & 6408 (1 << MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN)) { 6409 netif_info(efx, drv, efx->net_dev, 6410 "Rebooting MC due to UDP tunnel port list change\n"); 6411 will_reset = true; 6412 if (unloading) 6413 /* Delay for the MC reset to complete. This will make 6414 * unloading other functions a bit smoother. This is a 6415 * race, but the other unload will work whichever way 6416 * it goes, this just avoids an unnecessary error 6417 * message. 6418 */ 6419 msleep(100); 6420 } 6421 if (!will_reset && !unloading) { 6422 /* The caller will have detached, relying on the MC reset to 6423 * trigger a re-attach. Since there won't be an MC reset, we 6424 * have to do the attach ourselves. 6425 */ 6426 efx_device_attach_if_not_resetting(efx); 6427 } 6428 6429 return rc; 6430 } 6431 6432 static int efx_ef10_udp_tnl_push_ports(struct efx_nic *efx) 6433 { 6434 struct efx_ef10_nic_data *nic_data = efx->nic_data; 6435 int rc = 0; 6436 6437 mutex_lock(&nic_data->udp_tunnels_lock); 6438 if (nic_data->udp_tunnels_dirty) { 6439 /* Make sure all TX are stopped while we modify the table, else 6440 * we might race against an efx_features_check(). 6441 */ 6442 efx_device_detach_sync(efx); 6443 rc = efx_ef10_set_udp_tnl_ports(efx, false); 6444 } 6445 mutex_unlock(&nic_data->udp_tunnels_lock); 6446 return rc; 6447 } 6448 6449 static struct efx_udp_tunnel *__efx_ef10_udp_tnl_lookup_port(struct efx_nic *efx, 6450 __be16 port) 6451 { 6452 struct efx_ef10_nic_data *nic_data = efx->nic_data; 6453 size_t i; 6454 6455 for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) { 6456 if (!nic_data->udp_tunnels[i].count) 6457 continue; 6458 if (nic_data->udp_tunnels[i].port == port) 6459 return &nic_data->udp_tunnels[i]; 6460 } 6461 return NULL; 6462 } 6463 6464 static int efx_ef10_udp_tnl_add_port(struct efx_nic *efx, 6465 struct efx_udp_tunnel tnl) 6466 { 6467 struct efx_ef10_nic_data *nic_data = efx->nic_data; 6468 struct efx_udp_tunnel *match; 6469 char typebuf[8]; 6470 size_t i; 6471 int rc; 6472 6473 if (!(nic_data->datapath_caps & 6474 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) 6475 return 0; 6476 6477 efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf)); 6478 netif_dbg(efx, drv, efx->net_dev, "Adding UDP tunnel (%s) port %d\n", 6479 typebuf, ntohs(tnl.port)); 6480 6481 mutex_lock(&nic_data->udp_tunnels_lock); 6482 /* Make sure all TX are stopped while we add to the table, else we 6483 * might race against an efx_features_check(). 6484 */ 6485 efx_device_detach_sync(efx); 6486 6487 match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port); 6488 if (match != NULL) { 6489 if (match->type == tnl.type) { 6490 netif_dbg(efx, drv, efx->net_dev, 6491 "Referencing existing tunnel entry\n"); 6492 match->count++; 6493 /* No need to cause an MCDI update */ 6494 rc = 0; 6495 goto unlock_out; 6496 } 6497 efx_get_udp_tunnel_type_name(match->type, 6498 typebuf, sizeof(typebuf)); 6499 netif_dbg(efx, drv, efx->net_dev, 6500 "UDP port %d is already in use by %s\n", 6501 ntohs(tnl.port), typebuf); 6502 rc = -EEXIST; 6503 goto unlock_out; 6504 } 6505 6506 for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) 6507 if (!nic_data->udp_tunnels[i].count) { 6508 nic_data->udp_tunnels[i] = tnl; 6509 nic_data->udp_tunnels[i].count = 1; 6510 rc = efx_ef10_set_udp_tnl_ports(efx, false); 6511 goto unlock_out; 6512 } 6513 6514 netif_dbg(efx, drv, efx->net_dev, 6515 "Unable to add UDP tunnel (%s) port %d; insufficient resources.\n", 6516 typebuf, ntohs(tnl.port)); 6517 6518 rc = -ENOMEM; 6519 6520 unlock_out: 6521 mutex_unlock(&nic_data->udp_tunnels_lock); 6522 return rc; 6523 } 6524 6525 /* Called under the TX lock with the TX queue running, hence no-one can be 6526 * in the middle of updating the UDP tunnels table. However, they could 6527 * have tried and failed the MCDI, in which case they'll have set the dirty 6528 * flag before dropping their locks. 6529 */ 6530 static bool efx_ef10_udp_tnl_has_port(struct efx_nic *efx, __be16 port) 6531 { 6532 struct efx_ef10_nic_data *nic_data = efx->nic_data; 6533 6534 if (!(nic_data->datapath_caps & 6535 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) 6536 return false; 6537 6538 if (nic_data->udp_tunnels_dirty) 6539 /* SW table may not match HW state, so just assume we can't 6540 * use any UDP tunnel offloads. 6541 */ 6542 return false; 6543 6544 return __efx_ef10_udp_tnl_lookup_port(efx, port) != NULL; 6545 } 6546 6547 static int efx_ef10_udp_tnl_del_port(struct efx_nic *efx, 6548 struct efx_udp_tunnel tnl) 6549 { 6550 struct efx_ef10_nic_data *nic_data = efx->nic_data; 6551 struct efx_udp_tunnel *match; 6552 char typebuf[8]; 6553 int rc; 6554 6555 if (!(nic_data->datapath_caps & 6556 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) 6557 return 0; 6558 6559 efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf)); 6560 netif_dbg(efx, drv, efx->net_dev, "Removing UDP tunnel (%s) port %d\n", 6561 typebuf, ntohs(tnl.port)); 6562 6563 mutex_lock(&nic_data->udp_tunnels_lock); 6564 /* Make sure all TX are stopped while we remove from the table, else we 6565 * might race against an efx_features_check(). 6566 */ 6567 efx_device_detach_sync(efx); 6568 6569 match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port); 6570 if (match != NULL) { 6571 if (match->type == tnl.type) { 6572 if (--match->count) { 6573 /* Port is still in use, so nothing to do */ 6574 netif_dbg(efx, drv, efx->net_dev, 6575 "UDP tunnel port %d remains active\n", 6576 ntohs(tnl.port)); 6577 rc = 0; 6578 goto out_unlock; 6579 } 6580 rc = efx_ef10_set_udp_tnl_ports(efx, false); 6581 goto out_unlock; 6582 } 6583 efx_get_udp_tunnel_type_name(match->type, 6584 typebuf, sizeof(typebuf)); 6585 netif_warn(efx, drv, efx->net_dev, 6586 "UDP port %d is actually in use by %s, not removing\n", 6587 ntohs(tnl.port), typebuf); 6588 } 6589 rc = -ENOENT; 6590 6591 out_unlock: 6592 mutex_unlock(&nic_data->udp_tunnels_lock); 6593 return rc; 6594 } 6595 6596 #define EF10_OFFLOAD_FEATURES \ 6597 (NETIF_F_IP_CSUM | \ 6598 NETIF_F_HW_VLAN_CTAG_FILTER | \ 6599 NETIF_F_IPV6_CSUM | \ 6600 NETIF_F_RXHASH | \ 6601 NETIF_F_NTUPLE) 6602 6603 const struct efx_nic_type efx_hunt_a0_vf_nic_type = { 6604 .is_vf = true, 6605 .mem_bar = efx_ef10_vf_mem_bar, 6606 .mem_map_size = efx_ef10_mem_map_size, 6607 .probe = efx_ef10_probe_vf, 6608 .remove = efx_ef10_remove, 6609 .dimension_resources = efx_ef10_dimension_resources, 6610 .init = efx_ef10_init_nic, 6611 .fini = efx_port_dummy_op_void, 6612 .map_reset_reason = efx_ef10_map_reset_reason, 6613 .map_reset_flags = efx_ef10_map_reset_flags, 6614 .reset = efx_ef10_reset, 6615 .probe_port = efx_mcdi_port_probe, 6616 .remove_port = efx_mcdi_port_remove, 6617 .fini_dmaq = efx_ef10_fini_dmaq, 6618 .prepare_flr = efx_ef10_prepare_flr, 6619 .finish_flr = efx_port_dummy_op_void, 6620 .describe_stats = efx_ef10_describe_stats, 6621 .update_stats = efx_ef10_update_stats_vf, 6622 .start_stats = efx_port_dummy_op_void, 6623 .pull_stats = efx_port_dummy_op_void, 6624 .stop_stats = efx_port_dummy_op_void, 6625 .set_id_led = efx_mcdi_set_id_led, 6626 .push_irq_moderation = efx_ef10_push_irq_moderation, 6627 .reconfigure_mac = efx_ef10_mac_reconfigure_vf, 6628 .check_mac_fault = efx_mcdi_mac_check_fault, 6629 .reconfigure_port = efx_mcdi_port_reconfigure, 6630 .get_wol = efx_ef10_get_wol_vf, 6631 .set_wol = efx_ef10_set_wol_vf, 6632 .resume_wol = efx_port_dummy_op_void, 6633 .mcdi_request = efx_ef10_mcdi_request, 6634 .mcdi_poll_response = efx_ef10_mcdi_poll_response, 6635 .mcdi_read_response = efx_ef10_mcdi_read_response, 6636 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot, 6637 .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected, 6638 .irq_enable_master = efx_port_dummy_op_void, 6639 .irq_test_generate = efx_ef10_irq_test_generate, 6640 .irq_disable_non_ev = efx_port_dummy_op_void, 6641 .irq_handle_msi = efx_ef10_msi_interrupt, 6642 .irq_handle_legacy = efx_ef10_legacy_interrupt, 6643 .tx_probe = efx_ef10_tx_probe, 6644 .tx_init = efx_ef10_tx_init, 6645 .tx_remove = efx_ef10_tx_remove, 6646 .tx_write = efx_ef10_tx_write, 6647 .tx_limit_len = efx_ef10_tx_limit_len, 6648 .rx_push_rss_config = efx_ef10_vf_rx_push_rss_config, 6649 .rx_pull_rss_config = efx_ef10_rx_pull_rss_config, 6650 .rx_probe = efx_ef10_rx_probe, 6651 .rx_init = efx_ef10_rx_init, 6652 .rx_remove = efx_ef10_rx_remove, 6653 .rx_write = efx_ef10_rx_write, 6654 .rx_defer_refill = efx_ef10_rx_defer_refill, 6655 .ev_probe = efx_ef10_ev_probe, 6656 .ev_init = efx_ef10_ev_init, 6657 .ev_fini = efx_ef10_ev_fini, 6658 .ev_remove = efx_ef10_ev_remove, 6659 .ev_process = efx_ef10_ev_process, 6660 .ev_read_ack = efx_ef10_ev_read_ack, 6661 .ev_test_generate = efx_ef10_ev_test_generate, 6662 .filter_table_probe = efx_ef10_filter_table_probe, 6663 .filter_table_restore = efx_ef10_filter_table_restore, 6664 .filter_table_remove = efx_ef10_filter_table_remove, 6665 .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter, 6666 .filter_insert = efx_ef10_filter_insert, 6667 .filter_remove_safe = efx_ef10_filter_remove_safe, 6668 .filter_get_safe = efx_ef10_filter_get_safe, 6669 .filter_clear_rx = efx_ef10_filter_clear_rx, 6670 .filter_count_rx_used = efx_ef10_filter_count_rx_used, 6671 .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit, 6672 .filter_get_rx_ids = efx_ef10_filter_get_rx_ids, 6673 #ifdef CONFIG_RFS_ACCEL 6674 .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one, 6675 #endif 6676 #ifdef CONFIG_SFC_MTD 6677 .mtd_probe = efx_port_dummy_op_int, 6678 #endif 6679 .ptp_write_host_time = efx_ef10_ptp_write_host_time_vf, 6680 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf, 6681 .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid, 6682 .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid, 6683 #ifdef CONFIG_SFC_SRIOV 6684 .vswitching_probe = efx_ef10_vswitching_probe_vf, 6685 .vswitching_restore = efx_ef10_vswitching_restore_vf, 6686 .vswitching_remove = efx_ef10_vswitching_remove_vf, 6687 #endif 6688 .get_mac_address = efx_ef10_get_mac_address_vf, 6689 .set_mac_address = efx_ef10_set_mac_address, 6690 6691 .get_phys_port_id = efx_ef10_get_phys_port_id, 6692 .revision = EFX_REV_HUNT_A0, 6693 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), 6694 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, 6695 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST, 6696 .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST, 6697 .can_rx_scatter = true, 6698 .always_rx_scatter = true, 6699 .min_interrupt_mode = EFX_INT_MODE_MSIX, 6700 .max_interrupt_mode = EFX_INT_MODE_MSIX, 6701 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, 6702 .offload_features = EF10_OFFLOAD_FEATURES, 6703 .mcdi_max_ver = 2, 6704 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, 6705 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | 6706 1 << HWTSTAMP_FILTER_ALL, 6707 .rx_hash_key_size = 40, 6708 }; 6709 6710 const struct efx_nic_type efx_hunt_a0_nic_type = { 6711 .is_vf = false, 6712 .mem_bar = efx_ef10_pf_mem_bar, 6713 .mem_map_size = efx_ef10_mem_map_size, 6714 .probe = efx_ef10_probe_pf, 6715 .remove = efx_ef10_remove, 6716 .dimension_resources = efx_ef10_dimension_resources, 6717 .init = efx_ef10_init_nic, 6718 .fini = efx_port_dummy_op_void, 6719 .map_reset_reason = efx_ef10_map_reset_reason, 6720 .map_reset_flags = efx_ef10_map_reset_flags, 6721 .reset = efx_ef10_reset, 6722 .probe_port = efx_mcdi_port_probe, 6723 .remove_port = efx_mcdi_port_remove, 6724 .fini_dmaq = efx_ef10_fini_dmaq, 6725 .prepare_flr = efx_ef10_prepare_flr, 6726 .finish_flr = efx_port_dummy_op_void, 6727 .describe_stats = efx_ef10_describe_stats, 6728 .update_stats = efx_ef10_update_stats_pf, 6729 .start_stats = efx_mcdi_mac_start_stats, 6730 .pull_stats = efx_mcdi_mac_pull_stats, 6731 .stop_stats = efx_mcdi_mac_stop_stats, 6732 .set_id_led = efx_mcdi_set_id_led, 6733 .push_irq_moderation = efx_ef10_push_irq_moderation, 6734 .reconfigure_mac = efx_ef10_mac_reconfigure, 6735 .check_mac_fault = efx_mcdi_mac_check_fault, 6736 .reconfigure_port = efx_mcdi_port_reconfigure, 6737 .get_wol = efx_ef10_get_wol, 6738 .set_wol = efx_ef10_set_wol, 6739 .resume_wol = efx_port_dummy_op_void, 6740 .test_chip = efx_ef10_test_chip, 6741 .test_nvram = efx_mcdi_nvram_test_all, 6742 .mcdi_request = efx_ef10_mcdi_request, 6743 .mcdi_poll_response = efx_ef10_mcdi_poll_response, 6744 .mcdi_read_response = efx_ef10_mcdi_read_response, 6745 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot, 6746 .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected, 6747 .irq_enable_master = efx_port_dummy_op_void, 6748 .irq_test_generate = efx_ef10_irq_test_generate, 6749 .irq_disable_non_ev = efx_port_dummy_op_void, 6750 .irq_handle_msi = efx_ef10_msi_interrupt, 6751 .irq_handle_legacy = efx_ef10_legacy_interrupt, 6752 .tx_probe = efx_ef10_tx_probe, 6753 .tx_init = efx_ef10_tx_init, 6754 .tx_remove = efx_ef10_tx_remove, 6755 .tx_write = efx_ef10_tx_write, 6756 .tx_limit_len = efx_ef10_tx_limit_len, 6757 .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config, 6758 .rx_pull_rss_config = efx_ef10_rx_pull_rss_config, 6759 .rx_push_rss_context_config = efx_ef10_rx_push_rss_context_config, 6760 .rx_pull_rss_context_config = efx_ef10_rx_pull_rss_context_config, 6761 .rx_restore_rss_contexts = efx_ef10_rx_restore_rss_contexts, 6762 .rx_probe = efx_ef10_rx_probe, 6763 .rx_init = efx_ef10_rx_init, 6764 .rx_remove = efx_ef10_rx_remove, 6765 .rx_write = efx_ef10_rx_write, 6766 .rx_defer_refill = efx_ef10_rx_defer_refill, 6767 .ev_probe = efx_ef10_ev_probe, 6768 .ev_init = efx_ef10_ev_init, 6769 .ev_fini = efx_ef10_ev_fini, 6770 .ev_remove = efx_ef10_ev_remove, 6771 .ev_process = efx_ef10_ev_process, 6772 .ev_read_ack = efx_ef10_ev_read_ack, 6773 .ev_test_generate = efx_ef10_ev_test_generate, 6774 .filter_table_probe = efx_ef10_filter_table_probe, 6775 .filter_table_restore = efx_ef10_filter_table_restore, 6776 .filter_table_remove = efx_ef10_filter_table_remove, 6777 .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter, 6778 .filter_insert = efx_ef10_filter_insert, 6779 .filter_remove_safe = efx_ef10_filter_remove_safe, 6780 .filter_get_safe = efx_ef10_filter_get_safe, 6781 .filter_clear_rx = efx_ef10_filter_clear_rx, 6782 .filter_count_rx_used = efx_ef10_filter_count_rx_used, 6783 .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit, 6784 .filter_get_rx_ids = efx_ef10_filter_get_rx_ids, 6785 #ifdef CONFIG_RFS_ACCEL 6786 .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one, 6787 #endif 6788 #ifdef CONFIG_SFC_MTD 6789 .mtd_probe = efx_ef10_mtd_probe, 6790 .mtd_rename = efx_mcdi_mtd_rename, 6791 .mtd_read = efx_mcdi_mtd_read, 6792 .mtd_erase = efx_mcdi_mtd_erase, 6793 .mtd_write = efx_mcdi_mtd_write, 6794 .mtd_sync = efx_mcdi_mtd_sync, 6795 #endif 6796 .ptp_write_host_time = efx_ef10_ptp_write_host_time, 6797 .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events, 6798 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config, 6799 .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid, 6800 .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid, 6801 .udp_tnl_push_ports = efx_ef10_udp_tnl_push_ports, 6802 .udp_tnl_add_port = efx_ef10_udp_tnl_add_port, 6803 .udp_tnl_has_port = efx_ef10_udp_tnl_has_port, 6804 .udp_tnl_del_port = efx_ef10_udp_tnl_del_port, 6805 #ifdef CONFIG_SFC_SRIOV 6806 .sriov_configure = efx_ef10_sriov_configure, 6807 .sriov_init = efx_ef10_sriov_init, 6808 .sriov_fini = efx_ef10_sriov_fini, 6809 .sriov_wanted = efx_ef10_sriov_wanted, 6810 .sriov_reset = efx_ef10_sriov_reset, 6811 .sriov_flr = efx_ef10_sriov_flr, 6812 .sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac, 6813 .sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan, 6814 .sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk, 6815 .sriov_get_vf_config = efx_ef10_sriov_get_vf_config, 6816 .sriov_set_vf_link_state = efx_ef10_sriov_set_vf_link_state, 6817 .vswitching_probe = efx_ef10_vswitching_probe_pf, 6818 .vswitching_restore = efx_ef10_vswitching_restore_pf, 6819 .vswitching_remove = efx_ef10_vswitching_remove_pf, 6820 #endif 6821 .get_mac_address = efx_ef10_get_mac_address_pf, 6822 .set_mac_address = efx_ef10_set_mac_address, 6823 .tso_versions = efx_ef10_tso_versions, 6824 6825 .get_phys_port_id = efx_ef10_get_phys_port_id, 6826 .revision = EFX_REV_HUNT_A0, 6827 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), 6828 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, 6829 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST, 6830 .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST, 6831 .can_rx_scatter = true, 6832 .always_rx_scatter = true, 6833 .option_descriptors = true, 6834 .min_interrupt_mode = EFX_INT_MODE_LEGACY, 6835 .max_interrupt_mode = EFX_INT_MODE_MSIX, 6836 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, 6837 .offload_features = EF10_OFFLOAD_FEATURES, 6838 .mcdi_max_ver = 2, 6839 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, 6840 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | 6841 1 << HWTSTAMP_FILTER_ALL, 6842 .rx_hash_key_size = 40, 6843 }; 6844