1 // SPDX-License-Identifier: GPL-2.0-only 2 /**************************************************************************** 3 * Driver for Solarflare network controllers and boards 4 * Copyright 2012-2013 Solarflare Communications Inc. 5 */ 6 7 #include "net_driver.h" 8 #include "rx_common.h" 9 #include "ef10_regs.h" 10 #include "io.h" 11 #include "mcdi.h" 12 #include "mcdi_pcol.h" 13 #include "mcdi_port.h" 14 #include "mcdi_port_common.h" 15 #include "mcdi_functions.h" 16 #include "nic.h" 17 #include "mcdi_filters.h" 18 #include "workarounds.h" 19 #include "selftest.h" 20 #include "ef10_sriov.h" 21 #include <linux/in.h> 22 #include <linux/jhash.h> 23 #include <linux/wait.h> 24 #include <linux/workqueue.h> 25 26 /* Hardware control for EF10 architecture including 'Huntington'. */ 27 28 #define EFX_EF10_DRVGEN_EV 7 29 enum { 30 EFX_EF10_TEST = 1, 31 EFX_EF10_REFILL, 32 }; 33 34 /* VLAN list entry */ 35 struct efx_ef10_vlan { 36 struct list_head list; 37 u16 vid; 38 }; 39 40 static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading); 41 42 static int efx_ef10_get_warm_boot_count(struct efx_nic *efx) 43 { 44 efx_dword_t reg; 45 46 efx_readd(efx, ®, ER_DZ_BIU_MC_SFT_STATUS); 47 return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ? 48 EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO; 49 } 50 51 /* On all EF10s up to and including SFC9220 (Medford1), all PFs use BAR 0 for 52 * I/O space and BAR 2(&3) for memory. On SFC9250 (Medford2), there is no I/O 53 * bar; PFs use BAR 0/1 for memory. 54 */ 55 static unsigned int efx_ef10_pf_mem_bar(struct efx_nic *efx) 56 { 57 switch (efx->pci_dev->device) { 58 case 0x0b03: /* SFC9250 PF */ 59 return 0; 60 default: 61 return 2; 62 } 63 } 64 65 /* All VFs use BAR 0/1 for memory */ 66 static unsigned int efx_ef10_vf_mem_bar(struct efx_nic *efx) 67 { 68 return 0; 69 } 70 71 static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx) 72 { 73 int bar; 74 75 bar = efx->type->mem_bar(efx); 76 return resource_size(&efx->pci_dev->resource[bar]); 77 } 78 79 static bool efx_ef10_is_vf(struct efx_nic *efx) 80 { 81 return efx->type->is_vf; 82 } 83 84 #ifdef CONFIG_SFC_SRIOV 85 static int efx_ef10_get_vf_index(struct efx_nic *efx) 86 { 87 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN); 88 struct efx_ef10_nic_data *nic_data = efx->nic_data; 89 size_t outlen; 90 int rc; 91 92 rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf, 93 sizeof(outbuf), &outlen); 94 if (rc) 95 return rc; 96 if (outlen < sizeof(outbuf)) 97 return -EIO; 98 99 nic_data->vf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_VF); 100 return 0; 101 } 102 #endif 103 104 static int efx_ef10_init_datapath_caps(struct efx_nic *efx) 105 { 106 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V4_OUT_LEN); 107 struct efx_ef10_nic_data *nic_data = efx->nic_data; 108 size_t outlen; 109 int rc; 110 111 BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0); 112 113 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0, 114 outbuf, sizeof(outbuf), &outlen); 115 if (rc) 116 return rc; 117 if (outlen < MC_CMD_GET_CAPABILITIES_OUT_LEN) { 118 netif_err(efx, drv, efx->net_dev, 119 "unable to read datapath firmware capabilities\n"); 120 return -EIO; 121 } 122 123 nic_data->datapath_caps = 124 MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1); 125 126 if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) { 127 nic_data->datapath_caps2 = MCDI_DWORD(outbuf, 128 GET_CAPABILITIES_V2_OUT_FLAGS2); 129 nic_data->piobuf_size = MCDI_WORD(outbuf, 130 GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF); 131 } else { 132 nic_data->datapath_caps2 = 0; 133 nic_data->piobuf_size = ER_DZ_TX_PIOBUF_SIZE; 134 } 135 136 /* record the DPCPU firmware IDs to determine VEB vswitching support. 137 */ 138 nic_data->rx_dpcpu_fw_id = 139 MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID); 140 nic_data->tx_dpcpu_fw_id = 141 MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID); 142 143 if (!(nic_data->datapath_caps & 144 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) { 145 netif_err(efx, probe, efx->net_dev, 146 "current firmware does not support an RX prefix\n"); 147 return -ENODEV; 148 } 149 150 if (outlen >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) { 151 u8 vi_window_mode = MCDI_BYTE(outbuf, 152 GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE); 153 154 rc = efx_mcdi_window_mode_to_stride(efx, vi_window_mode); 155 if (rc) 156 return rc; 157 } else { 158 /* keep default VI stride */ 159 netif_dbg(efx, probe, efx->net_dev, 160 "firmware did not report VI window mode, assuming vi_stride = %u\n", 161 efx->vi_stride); 162 } 163 164 if (outlen >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) { 165 efx->num_mac_stats = MCDI_WORD(outbuf, 166 GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS); 167 netif_dbg(efx, probe, efx->net_dev, 168 "firmware reports num_mac_stats = %u\n", 169 efx->num_mac_stats); 170 } else { 171 /* leave num_mac_stats as the default value, MC_CMD_MAC_NSTATS */ 172 netif_dbg(efx, probe, efx->net_dev, 173 "firmware did not report num_mac_stats, assuming %u\n", 174 efx->num_mac_stats); 175 } 176 177 return 0; 178 } 179 180 static void efx_ef10_read_licensed_features(struct efx_nic *efx) 181 { 182 MCDI_DECLARE_BUF(inbuf, MC_CMD_LICENSING_V3_IN_LEN); 183 MCDI_DECLARE_BUF(outbuf, MC_CMD_LICENSING_V3_OUT_LEN); 184 struct efx_ef10_nic_data *nic_data = efx->nic_data; 185 size_t outlen; 186 int rc; 187 188 MCDI_SET_DWORD(inbuf, LICENSING_V3_IN_OP, 189 MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE); 190 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_LICENSING_V3, inbuf, sizeof(inbuf), 191 outbuf, sizeof(outbuf), &outlen); 192 if (rc || (outlen < MC_CMD_LICENSING_V3_OUT_LEN)) 193 return; 194 195 nic_data->licensed_features = MCDI_QWORD(outbuf, 196 LICENSING_V3_OUT_LICENSED_FEATURES); 197 } 198 199 static int efx_ef10_get_sysclk_freq(struct efx_nic *efx) 200 { 201 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN); 202 int rc; 203 204 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0, 205 outbuf, sizeof(outbuf), NULL); 206 if (rc) 207 return rc; 208 rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ); 209 return rc > 0 ? rc : -ERANGE; 210 } 211 212 static int efx_ef10_get_timer_workarounds(struct efx_nic *efx) 213 { 214 struct efx_ef10_nic_data *nic_data = efx->nic_data; 215 unsigned int implemented; 216 unsigned int enabled; 217 int rc; 218 219 nic_data->workaround_35388 = false; 220 nic_data->workaround_61265 = false; 221 222 rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled); 223 224 if (rc == -ENOSYS) { 225 /* Firmware without GET_WORKAROUNDS - not a problem. */ 226 rc = 0; 227 } else if (rc == 0) { 228 /* Bug61265 workaround is always enabled if implemented. */ 229 if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG61265) 230 nic_data->workaround_61265 = true; 231 232 if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) { 233 nic_data->workaround_35388 = true; 234 } else if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) { 235 /* Workaround is implemented but not enabled. 236 * Try to enable it. 237 */ 238 rc = efx_mcdi_set_workaround(efx, 239 MC_CMD_WORKAROUND_BUG35388, 240 true, NULL); 241 if (rc == 0) 242 nic_data->workaround_35388 = true; 243 /* If we failed to set the workaround just carry on. */ 244 rc = 0; 245 } 246 } 247 248 netif_dbg(efx, probe, efx->net_dev, 249 "workaround for bug 35388 is %sabled\n", 250 nic_data->workaround_35388 ? "en" : "dis"); 251 netif_dbg(efx, probe, efx->net_dev, 252 "workaround for bug 61265 is %sabled\n", 253 nic_data->workaround_61265 ? "en" : "dis"); 254 255 return rc; 256 } 257 258 static void efx_ef10_process_timer_config(struct efx_nic *efx, 259 const efx_dword_t *data) 260 { 261 unsigned int max_count; 262 263 if (EFX_EF10_WORKAROUND_61265(efx)) { 264 efx->timer_quantum_ns = MCDI_DWORD(data, 265 GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS); 266 efx->timer_max_ns = MCDI_DWORD(data, 267 GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS); 268 } else if (EFX_EF10_WORKAROUND_35388(efx)) { 269 efx->timer_quantum_ns = MCDI_DWORD(data, 270 GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT); 271 max_count = MCDI_DWORD(data, 272 GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT); 273 efx->timer_max_ns = max_count * efx->timer_quantum_ns; 274 } else { 275 efx->timer_quantum_ns = MCDI_DWORD(data, 276 GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT); 277 max_count = MCDI_DWORD(data, 278 GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT); 279 efx->timer_max_ns = max_count * efx->timer_quantum_ns; 280 } 281 282 netif_dbg(efx, probe, efx->net_dev, 283 "got timer properties from MC: quantum %u ns; max %u ns\n", 284 efx->timer_quantum_ns, efx->timer_max_ns); 285 } 286 287 static int efx_ef10_get_timer_config(struct efx_nic *efx) 288 { 289 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN); 290 int rc; 291 292 rc = efx_ef10_get_timer_workarounds(efx); 293 if (rc) 294 return rc; 295 296 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, NULL, 0, 297 outbuf, sizeof(outbuf), NULL); 298 299 if (rc == 0) { 300 efx_ef10_process_timer_config(efx, outbuf); 301 } else if (rc == -ENOSYS || rc == -EPERM) { 302 /* Not available - fall back to Huntington defaults. */ 303 unsigned int quantum; 304 305 rc = efx_ef10_get_sysclk_freq(efx); 306 if (rc < 0) 307 return rc; 308 309 quantum = 1536000 / rc; /* 1536 cycles */ 310 efx->timer_quantum_ns = quantum; 311 efx->timer_max_ns = efx->type->timer_period_max * quantum; 312 rc = 0; 313 } else { 314 efx_mcdi_display_error(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, 315 MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN, 316 NULL, 0, rc); 317 } 318 319 return rc; 320 } 321 322 static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address) 323 { 324 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN); 325 size_t outlen; 326 int rc; 327 328 BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0); 329 330 rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0, 331 outbuf, sizeof(outbuf), &outlen); 332 if (rc) 333 return rc; 334 if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) 335 return -EIO; 336 337 ether_addr_copy(mac_address, 338 MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE)); 339 return 0; 340 } 341 342 static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address) 343 { 344 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN); 345 MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX); 346 size_t outlen; 347 int num_addrs, rc; 348 349 MCDI_SET_DWORD(inbuf, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID, 350 EVB_PORT_ID_ASSIGNED); 351 rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_GET_MAC_ADDRESSES, inbuf, 352 sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); 353 354 if (rc) 355 return rc; 356 if (outlen < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) 357 return -EIO; 358 359 num_addrs = MCDI_DWORD(outbuf, 360 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT); 361 362 WARN_ON(num_addrs != 1); 363 364 ether_addr_copy(mac_address, 365 MCDI_PTR(outbuf, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR)); 366 367 return 0; 368 } 369 370 static ssize_t efx_ef10_show_link_control_flag(struct device *dev, 371 struct device_attribute *attr, 372 char *buf) 373 { 374 struct efx_nic *efx = dev_get_drvdata(dev); 375 376 return sprintf(buf, "%d\n", 377 ((efx->mcdi->fn_flags) & 378 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL)) 379 ? 1 : 0); 380 } 381 382 static ssize_t efx_ef10_show_primary_flag(struct device *dev, 383 struct device_attribute *attr, 384 char *buf) 385 { 386 struct efx_nic *efx = dev_get_drvdata(dev); 387 388 return sprintf(buf, "%d\n", 389 ((efx->mcdi->fn_flags) & 390 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) 391 ? 1 : 0); 392 } 393 394 static struct efx_ef10_vlan *efx_ef10_find_vlan(struct efx_nic *efx, u16 vid) 395 { 396 struct efx_ef10_nic_data *nic_data = efx->nic_data; 397 struct efx_ef10_vlan *vlan; 398 399 WARN_ON(!mutex_is_locked(&nic_data->vlan_lock)); 400 401 list_for_each_entry(vlan, &nic_data->vlan_list, list) { 402 if (vlan->vid == vid) 403 return vlan; 404 } 405 406 return NULL; 407 } 408 409 static int efx_ef10_add_vlan(struct efx_nic *efx, u16 vid) 410 { 411 struct efx_ef10_nic_data *nic_data = efx->nic_data; 412 struct efx_ef10_vlan *vlan; 413 int rc; 414 415 mutex_lock(&nic_data->vlan_lock); 416 417 vlan = efx_ef10_find_vlan(efx, vid); 418 if (vlan) { 419 /* We add VID 0 on init. 8021q adds it on module init 420 * for all interfaces with VLAN filtring feature. 421 */ 422 if (vid == 0) 423 goto done_unlock; 424 netif_warn(efx, drv, efx->net_dev, 425 "VLAN %u already added\n", vid); 426 rc = -EALREADY; 427 goto fail_exist; 428 } 429 430 rc = -ENOMEM; 431 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); 432 if (!vlan) 433 goto fail_alloc; 434 435 vlan->vid = vid; 436 437 list_add_tail(&vlan->list, &nic_data->vlan_list); 438 439 if (efx->filter_state) { 440 mutex_lock(&efx->mac_lock); 441 down_write(&efx->filter_sem); 442 rc = efx_mcdi_filter_add_vlan(efx, vlan->vid); 443 up_write(&efx->filter_sem); 444 mutex_unlock(&efx->mac_lock); 445 if (rc) 446 goto fail_filter_add_vlan; 447 } 448 449 done_unlock: 450 mutex_unlock(&nic_data->vlan_lock); 451 return 0; 452 453 fail_filter_add_vlan: 454 list_del(&vlan->list); 455 kfree(vlan); 456 fail_alloc: 457 fail_exist: 458 mutex_unlock(&nic_data->vlan_lock); 459 return rc; 460 } 461 462 static void efx_ef10_del_vlan_internal(struct efx_nic *efx, 463 struct efx_ef10_vlan *vlan) 464 { 465 struct efx_ef10_nic_data *nic_data = efx->nic_data; 466 467 WARN_ON(!mutex_is_locked(&nic_data->vlan_lock)); 468 469 if (efx->filter_state) { 470 down_write(&efx->filter_sem); 471 efx_mcdi_filter_del_vlan(efx, vlan->vid); 472 up_write(&efx->filter_sem); 473 } 474 475 list_del(&vlan->list); 476 kfree(vlan); 477 } 478 479 static int efx_ef10_del_vlan(struct efx_nic *efx, u16 vid) 480 { 481 struct efx_ef10_nic_data *nic_data = efx->nic_data; 482 struct efx_ef10_vlan *vlan; 483 int rc = 0; 484 485 /* 8021q removes VID 0 on module unload for all interfaces 486 * with VLAN filtering feature. We need to keep it to receive 487 * untagged traffic. 488 */ 489 if (vid == 0) 490 return 0; 491 492 mutex_lock(&nic_data->vlan_lock); 493 494 vlan = efx_ef10_find_vlan(efx, vid); 495 if (!vlan) { 496 netif_err(efx, drv, efx->net_dev, 497 "VLAN %u to be deleted not found\n", vid); 498 rc = -ENOENT; 499 } else { 500 efx_ef10_del_vlan_internal(efx, vlan); 501 } 502 503 mutex_unlock(&nic_data->vlan_lock); 504 505 return rc; 506 } 507 508 static void efx_ef10_cleanup_vlans(struct efx_nic *efx) 509 { 510 struct efx_ef10_nic_data *nic_data = efx->nic_data; 511 struct efx_ef10_vlan *vlan, *next_vlan; 512 513 mutex_lock(&nic_data->vlan_lock); 514 list_for_each_entry_safe(vlan, next_vlan, &nic_data->vlan_list, list) 515 efx_ef10_del_vlan_internal(efx, vlan); 516 mutex_unlock(&nic_data->vlan_lock); 517 } 518 519 static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag, 520 NULL); 521 static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL); 522 523 static int efx_ef10_probe(struct efx_nic *efx) 524 { 525 struct efx_ef10_nic_data *nic_data; 526 int i, rc; 527 528 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); 529 if (!nic_data) 530 return -ENOMEM; 531 efx->nic_data = nic_data; 532 533 /* we assume later that we can copy from this buffer in dwords */ 534 BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4); 535 536 rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf, 537 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL); 538 if (rc) 539 goto fail1; 540 541 /* Get the MC's warm boot count. In case it's rebooting right 542 * now, be prepared to retry. 543 */ 544 i = 0; 545 for (;;) { 546 rc = efx_ef10_get_warm_boot_count(efx); 547 if (rc >= 0) 548 break; 549 if (++i == 5) 550 goto fail2; 551 ssleep(1); 552 } 553 nic_data->warm_boot_count = rc; 554 555 /* In case we're recovering from a crash (kexec), we want to 556 * cancel any outstanding request by the previous user of this 557 * function. We send a special message using the least 558 * significant bits of the 'high' (doorbell) register. 559 */ 560 _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD); 561 562 rc = efx_mcdi_init(efx); 563 if (rc) 564 goto fail2; 565 566 mutex_init(&nic_data->udp_tunnels_lock); 567 568 /* Reset (most) configuration for this function */ 569 rc = efx_mcdi_reset(efx, RESET_TYPE_ALL); 570 if (rc) 571 goto fail3; 572 573 /* Enable event logging */ 574 rc = efx_mcdi_log_ctrl(efx, true, false, 0); 575 if (rc) 576 goto fail3; 577 578 rc = device_create_file(&efx->pci_dev->dev, 579 &dev_attr_link_control_flag); 580 if (rc) 581 goto fail3; 582 583 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag); 584 if (rc) 585 goto fail4; 586 587 rc = efx_get_pf_index(efx, &nic_data->pf_index); 588 if (rc) 589 goto fail5; 590 591 rc = efx_ef10_init_datapath_caps(efx); 592 if (rc < 0) 593 goto fail5; 594 595 efx_ef10_read_licensed_features(efx); 596 597 /* We can have one VI for each vi_stride-byte region. 598 * However, until we use TX option descriptors we need two TX queues 599 * per channel. 600 */ 601 efx->tx_queues_per_channel = 2; 602 efx->max_vis = efx_ef10_mem_map_size(efx) / efx->vi_stride; 603 if (!efx->max_vis) { 604 netif_err(efx, drv, efx->net_dev, "error determining max VIs\n"); 605 rc = -EIO; 606 goto fail5; 607 } 608 efx->max_channels = min_t(unsigned int, EFX_MAX_CHANNELS, 609 efx->max_vis / efx->tx_queues_per_channel); 610 efx->max_tx_channels = efx->max_channels; 611 if (WARN_ON(efx->max_channels == 0)) { 612 rc = -EIO; 613 goto fail5; 614 } 615 616 efx->rx_packet_len_offset = 617 ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE; 618 619 if (nic_data->datapath_caps & 620 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN)) 621 efx->net_dev->hw_features |= NETIF_F_RXFCS; 622 623 rc = efx_mcdi_port_get_number(efx); 624 if (rc < 0) 625 goto fail5; 626 efx->port_num = rc; 627 628 rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr); 629 if (rc) 630 goto fail5; 631 632 rc = efx_ef10_get_timer_config(efx); 633 if (rc < 0) 634 goto fail5; 635 636 rc = efx_mcdi_mon_probe(efx); 637 if (rc && rc != -EPERM) 638 goto fail5; 639 640 efx_ptp_defer_probe_with_channel(efx); 641 642 #ifdef CONFIG_SFC_SRIOV 643 if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) { 644 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; 645 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); 646 647 efx_pf->type->get_mac_address(efx_pf, nic_data->port_id); 648 } else 649 #endif 650 ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr); 651 652 INIT_LIST_HEAD(&nic_data->vlan_list); 653 mutex_init(&nic_data->vlan_lock); 654 655 /* Add unspecified VID to support VLAN filtering being disabled */ 656 rc = efx_ef10_add_vlan(efx, EFX_FILTER_VID_UNSPEC); 657 if (rc) 658 goto fail_add_vid_unspec; 659 660 /* If VLAN filtering is enabled, we need VID 0 to get untagged 661 * traffic. It is added automatically if 8021q module is loaded, 662 * but we can't rely on it since module may be not loaded. 663 */ 664 rc = efx_ef10_add_vlan(efx, 0); 665 if (rc) 666 goto fail_add_vid_0; 667 668 return 0; 669 670 fail_add_vid_0: 671 efx_ef10_cleanup_vlans(efx); 672 fail_add_vid_unspec: 673 mutex_destroy(&nic_data->vlan_lock); 674 efx_ptp_remove(efx); 675 efx_mcdi_mon_remove(efx); 676 fail5: 677 device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); 678 fail4: 679 device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag); 680 fail3: 681 efx_mcdi_detach(efx); 682 683 mutex_lock(&nic_data->udp_tunnels_lock); 684 memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels)); 685 (void)efx_ef10_set_udp_tnl_ports(efx, true); 686 mutex_unlock(&nic_data->udp_tunnels_lock); 687 mutex_destroy(&nic_data->udp_tunnels_lock); 688 689 efx_mcdi_fini(efx); 690 fail2: 691 efx_nic_free_buffer(efx, &nic_data->mcdi_buf); 692 fail1: 693 kfree(nic_data); 694 efx->nic_data = NULL; 695 return rc; 696 } 697 698 #ifdef EFX_USE_PIO 699 700 static void efx_ef10_free_piobufs(struct efx_nic *efx) 701 { 702 struct efx_ef10_nic_data *nic_data = efx->nic_data; 703 MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN); 704 unsigned int i; 705 int rc; 706 707 BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0); 708 709 for (i = 0; i < nic_data->n_piobufs; i++) { 710 MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE, 711 nic_data->piobuf_handle[i]); 712 rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf), 713 NULL, 0, NULL); 714 WARN_ON(rc); 715 } 716 717 nic_data->n_piobufs = 0; 718 } 719 720 static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) 721 { 722 struct efx_ef10_nic_data *nic_data = efx->nic_data; 723 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN); 724 unsigned int i; 725 size_t outlen; 726 int rc = 0; 727 728 BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0); 729 730 for (i = 0; i < n; i++) { 731 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0, 732 outbuf, sizeof(outbuf), &outlen); 733 if (rc) { 734 /* Don't display the MC error if we didn't have space 735 * for a VF. 736 */ 737 if (!(efx_ef10_is_vf(efx) && rc == -ENOSPC)) 738 efx_mcdi_display_error(efx, MC_CMD_ALLOC_PIOBUF, 739 0, outbuf, outlen, rc); 740 break; 741 } 742 if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) { 743 rc = -EIO; 744 break; 745 } 746 nic_data->piobuf_handle[i] = 747 MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE); 748 netif_dbg(efx, probe, efx->net_dev, 749 "allocated PIO buffer %u handle %x\n", i, 750 nic_data->piobuf_handle[i]); 751 } 752 753 nic_data->n_piobufs = i; 754 if (rc) 755 efx_ef10_free_piobufs(efx); 756 return rc; 757 } 758 759 static int efx_ef10_link_piobufs(struct efx_nic *efx) 760 { 761 struct efx_ef10_nic_data *nic_data = efx->nic_data; 762 MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_PIOBUF_IN_LEN); 763 struct efx_channel *channel; 764 struct efx_tx_queue *tx_queue; 765 unsigned int offset, index; 766 int rc; 767 768 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0); 769 BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0); 770 771 /* Link a buffer to each VI in the write-combining mapping */ 772 for (index = 0; index < nic_data->n_piobufs; ++index) { 773 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE, 774 nic_data->piobuf_handle[index]); 775 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE, 776 nic_data->pio_write_vi_base + index); 777 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF, 778 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN, 779 NULL, 0, NULL); 780 if (rc) { 781 netif_err(efx, drv, efx->net_dev, 782 "failed to link VI %u to PIO buffer %u (%d)\n", 783 nic_data->pio_write_vi_base + index, index, 784 rc); 785 goto fail; 786 } 787 netif_dbg(efx, probe, efx->net_dev, 788 "linked VI %u to PIO buffer %u\n", 789 nic_data->pio_write_vi_base + index, index); 790 } 791 792 /* Link a buffer to each TX queue */ 793 efx_for_each_channel(channel, efx) { 794 /* Extra channels, even those with TXQs (PTP), do not require 795 * PIO resources. 796 */ 797 if (!channel->type->want_pio || 798 channel->channel >= efx->xdp_channel_offset) 799 continue; 800 801 efx_for_each_channel_tx_queue(tx_queue, channel) { 802 /* We assign the PIO buffers to queues in 803 * reverse order to allow for the following 804 * special case. 805 */ 806 offset = ((efx->tx_channel_offset + efx->n_tx_channels - 807 tx_queue->channel->channel - 1) * 808 efx_piobuf_size); 809 index = offset / nic_data->piobuf_size; 810 offset = offset % nic_data->piobuf_size; 811 812 /* When the host page size is 4K, the first 813 * host page in the WC mapping may be within 814 * the same VI page as the last TX queue. We 815 * can only link one buffer to each VI. 816 */ 817 if (tx_queue->queue == nic_data->pio_write_vi_base) { 818 BUG_ON(index != 0); 819 rc = 0; 820 } else { 821 MCDI_SET_DWORD(inbuf, 822 LINK_PIOBUF_IN_PIOBUF_HANDLE, 823 nic_data->piobuf_handle[index]); 824 MCDI_SET_DWORD(inbuf, 825 LINK_PIOBUF_IN_TXQ_INSTANCE, 826 tx_queue->queue); 827 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF, 828 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN, 829 NULL, 0, NULL); 830 } 831 832 if (rc) { 833 /* This is non-fatal; the TX path just 834 * won't use PIO for this queue 835 */ 836 netif_err(efx, drv, efx->net_dev, 837 "failed to link VI %u to PIO buffer %u (%d)\n", 838 tx_queue->queue, index, rc); 839 tx_queue->piobuf = NULL; 840 } else { 841 tx_queue->piobuf = 842 nic_data->pio_write_base + 843 index * efx->vi_stride + offset; 844 tx_queue->piobuf_offset = offset; 845 netif_dbg(efx, probe, efx->net_dev, 846 "linked VI %u to PIO buffer %u offset %x addr %p\n", 847 tx_queue->queue, index, 848 tx_queue->piobuf_offset, 849 tx_queue->piobuf); 850 } 851 } 852 } 853 854 return 0; 855 856 fail: 857 /* inbuf was defined for MC_CMD_LINK_PIOBUF. We can use the same 858 * buffer for MC_CMD_UNLINK_PIOBUF because it's shorter. 859 */ 860 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_IN_LEN < MC_CMD_UNLINK_PIOBUF_IN_LEN); 861 while (index--) { 862 MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE, 863 nic_data->pio_write_vi_base + index); 864 efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF, 865 inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN, 866 NULL, 0, NULL); 867 } 868 return rc; 869 } 870 871 static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) 872 { 873 struct efx_channel *channel; 874 struct efx_tx_queue *tx_queue; 875 876 /* All our existing PIO buffers went away */ 877 efx_for_each_channel(channel, efx) 878 efx_for_each_channel_tx_queue(tx_queue, channel) 879 tx_queue->piobuf = NULL; 880 } 881 882 #else /* !EFX_USE_PIO */ 883 884 static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) 885 { 886 return n == 0 ? 0 : -ENOBUFS; 887 } 888 889 static int efx_ef10_link_piobufs(struct efx_nic *efx) 890 { 891 return 0; 892 } 893 894 static void efx_ef10_free_piobufs(struct efx_nic *efx) 895 { 896 } 897 898 static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) 899 { 900 } 901 902 #endif /* EFX_USE_PIO */ 903 904 static void efx_ef10_remove(struct efx_nic *efx) 905 { 906 struct efx_ef10_nic_data *nic_data = efx->nic_data; 907 int rc; 908 909 #ifdef CONFIG_SFC_SRIOV 910 struct efx_ef10_nic_data *nic_data_pf; 911 struct pci_dev *pci_dev_pf; 912 struct efx_nic *efx_pf; 913 struct ef10_vf *vf; 914 915 if (efx->pci_dev->is_virtfn) { 916 pci_dev_pf = efx->pci_dev->physfn; 917 if (pci_dev_pf) { 918 efx_pf = pci_get_drvdata(pci_dev_pf); 919 nic_data_pf = efx_pf->nic_data; 920 vf = nic_data_pf->vf + nic_data->vf_index; 921 vf->efx = NULL; 922 } else 923 netif_info(efx, drv, efx->net_dev, 924 "Could not get the PF id from VF\n"); 925 } 926 #endif 927 928 efx_ef10_cleanup_vlans(efx); 929 mutex_destroy(&nic_data->vlan_lock); 930 931 efx_ptp_remove(efx); 932 933 efx_mcdi_mon_remove(efx); 934 935 efx_mcdi_rx_free_indir_table(efx); 936 937 if (nic_data->wc_membase) 938 iounmap(nic_data->wc_membase); 939 940 rc = efx_mcdi_free_vis(efx); 941 WARN_ON(rc != 0); 942 943 if (!nic_data->must_restore_piobufs) 944 efx_ef10_free_piobufs(efx); 945 946 device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); 947 device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag); 948 949 efx_mcdi_detach(efx); 950 951 memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels)); 952 mutex_lock(&nic_data->udp_tunnels_lock); 953 (void)efx_ef10_set_udp_tnl_ports(efx, true); 954 mutex_unlock(&nic_data->udp_tunnels_lock); 955 956 mutex_destroy(&nic_data->udp_tunnels_lock); 957 958 efx_mcdi_fini(efx); 959 efx_nic_free_buffer(efx, &nic_data->mcdi_buf); 960 kfree(nic_data); 961 } 962 963 static int efx_ef10_probe_pf(struct efx_nic *efx) 964 { 965 return efx_ef10_probe(efx); 966 } 967 968 int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id, 969 u32 *port_flags, u32 *vadaptor_flags, 970 unsigned int *vlan_tags) 971 { 972 struct efx_ef10_nic_data *nic_data = efx->nic_data; 973 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_QUERY_IN_LEN); 974 MCDI_DECLARE_BUF(outbuf, MC_CMD_VADAPTOR_QUERY_OUT_LEN); 975 size_t outlen; 976 int rc; 977 978 if (nic_data->datapath_caps & 979 (1 << MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN)) { 980 MCDI_SET_DWORD(inbuf, VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID, 981 port_id); 982 983 rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_QUERY, inbuf, sizeof(inbuf), 984 outbuf, sizeof(outbuf), &outlen); 985 if (rc) 986 return rc; 987 988 if (outlen < sizeof(outbuf)) { 989 rc = -EIO; 990 return rc; 991 } 992 } 993 994 if (port_flags) 995 *port_flags = MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_PORT_FLAGS); 996 if (vadaptor_flags) 997 *vadaptor_flags = 998 MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS); 999 if (vlan_tags) 1000 *vlan_tags = 1001 MCDI_DWORD(outbuf, 1002 VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS); 1003 1004 return 0; 1005 } 1006 1007 int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id) 1008 { 1009 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN); 1010 1011 MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id); 1012 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf), 1013 NULL, 0, NULL); 1014 } 1015 1016 int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id) 1017 { 1018 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN); 1019 1020 MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id); 1021 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf), 1022 NULL, 0, NULL); 1023 } 1024 1025 int efx_ef10_vport_add_mac(struct efx_nic *efx, 1026 unsigned int port_id, u8 *mac) 1027 { 1028 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN); 1029 1030 MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id); 1031 ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac); 1032 1033 return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf, 1034 sizeof(inbuf), NULL, 0, NULL); 1035 } 1036 1037 int efx_ef10_vport_del_mac(struct efx_nic *efx, 1038 unsigned int port_id, u8 *mac) 1039 { 1040 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN); 1041 1042 MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id); 1043 ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac); 1044 1045 return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf, 1046 sizeof(inbuf), NULL, 0, NULL); 1047 } 1048 1049 #ifdef CONFIG_SFC_SRIOV 1050 static int efx_ef10_probe_vf(struct efx_nic *efx) 1051 { 1052 int rc; 1053 struct pci_dev *pci_dev_pf; 1054 1055 /* If the parent PF has no VF data structure, it doesn't know about this 1056 * VF so fail probe. The VF needs to be re-created. This can happen 1057 * if the PF driver is unloaded while the VF is assigned to a guest. 1058 */ 1059 pci_dev_pf = efx->pci_dev->physfn; 1060 if (pci_dev_pf) { 1061 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); 1062 struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data; 1063 1064 if (!nic_data_pf->vf) { 1065 netif_info(efx, drv, efx->net_dev, 1066 "The VF cannot link to its parent PF; " 1067 "please destroy and re-create the VF\n"); 1068 return -EBUSY; 1069 } 1070 } 1071 1072 rc = efx_ef10_probe(efx); 1073 if (rc) 1074 return rc; 1075 1076 rc = efx_ef10_get_vf_index(efx); 1077 if (rc) 1078 goto fail; 1079 1080 if (efx->pci_dev->is_virtfn) { 1081 if (efx->pci_dev->physfn) { 1082 struct efx_nic *efx_pf = 1083 pci_get_drvdata(efx->pci_dev->physfn); 1084 struct efx_ef10_nic_data *nic_data_p = efx_pf->nic_data; 1085 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1086 1087 nic_data_p->vf[nic_data->vf_index].efx = efx; 1088 nic_data_p->vf[nic_data->vf_index].pci_dev = 1089 efx->pci_dev; 1090 } else 1091 netif_info(efx, drv, efx->net_dev, 1092 "Could not get the PF id from VF\n"); 1093 } 1094 1095 return 0; 1096 1097 fail: 1098 efx_ef10_remove(efx); 1099 return rc; 1100 } 1101 #else 1102 static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused))) 1103 { 1104 return 0; 1105 } 1106 #endif 1107 1108 static int efx_ef10_alloc_vis(struct efx_nic *efx, 1109 unsigned int min_vis, unsigned int max_vis) 1110 { 1111 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1112 1113 return efx_mcdi_alloc_vis(efx, min_vis, max_vis, &nic_data->vi_base, 1114 &nic_data->n_allocated_vis); 1115 } 1116 1117 /* Note that the failure path of this function does not free 1118 * resources, as this will be done by efx_ef10_remove(). 1119 */ 1120 static int efx_ef10_dimension_resources(struct efx_nic *efx) 1121 { 1122 unsigned int min_vis = max_t(unsigned int, efx->tx_queues_per_channel, 1123 efx_separate_tx_channels ? 2 : 1); 1124 unsigned int channel_vis, pio_write_vi_base, max_vis; 1125 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1126 unsigned int uc_mem_map_size, wc_mem_map_size; 1127 void __iomem *membase; 1128 int rc; 1129 1130 channel_vis = max(efx->n_channels, 1131 ((efx->n_tx_channels + efx->n_extra_tx_channels) * 1132 efx->tx_queues_per_channel) + 1133 efx->n_xdp_channels * efx->xdp_tx_per_channel); 1134 if (efx->max_vis && efx->max_vis < channel_vis) { 1135 netif_dbg(efx, drv, efx->net_dev, 1136 "Reducing channel VIs from %u to %u\n", 1137 channel_vis, efx->max_vis); 1138 channel_vis = efx->max_vis; 1139 } 1140 1141 #ifdef EFX_USE_PIO 1142 /* Try to allocate PIO buffers if wanted and if the full 1143 * number of PIO buffers would be sufficient to allocate one 1144 * copy-buffer per TX channel. Failure is non-fatal, as there 1145 * are only a small number of PIO buffers shared between all 1146 * functions of the controller. 1147 */ 1148 if (efx_piobuf_size != 0 && 1149 nic_data->piobuf_size / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >= 1150 efx->n_tx_channels) { 1151 unsigned int n_piobufs = 1152 DIV_ROUND_UP(efx->n_tx_channels, 1153 nic_data->piobuf_size / efx_piobuf_size); 1154 1155 rc = efx_ef10_alloc_piobufs(efx, n_piobufs); 1156 if (rc == -ENOSPC) 1157 netif_dbg(efx, probe, efx->net_dev, 1158 "out of PIO buffers; cannot allocate more\n"); 1159 else if (rc == -EPERM) 1160 netif_dbg(efx, probe, efx->net_dev, 1161 "not permitted to allocate PIO buffers\n"); 1162 else if (rc) 1163 netif_err(efx, probe, efx->net_dev, 1164 "failed to allocate PIO buffers (%d)\n", rc); 1165 else 1166 netif_dbg(efx, probe, efx->net_dev, 1167 "allocated %u PIO buffers\n", n_piobufs); 1168 } 1169 #else 1170 nic_data->n_piobufs = 0; 1171 #endif 1172 1173 /* PIO buffers should be mapped with write-combining enabled, 1174 * and we want to make single UC and WC mappings rather than 1175 * several of each (in fact that's the only option if host 1176 * page size is >4K). So we may allocate some extra VIs just 1177 * for writing PIO buffers through. 1178 * 1179 * The UC mapping contains (channel_vis - 1) complete VIs and the 1180 * first 4K of the next VI. Then the WC mapping begins with 1181 * the remainder of this last VI. 1182 */ 1183 uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * efx->vi_stride + 1184 ER_DZ_TX_PIOBUF); 1185 if (nic_data->n_piobufs) { 1186 /* pio_write_vi_base rounds down to give the number of complete 1187 * VIs inside the UC mapping. 1188 */ 1189 pio_write_vi_base = uc_mem_map_size / efx->vi_stride; 1190 wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base + 1191 nic_data->n_piobufs) * 1192 efx->vi_stride) - 1193 uc_mem_map_size); 1194 max_vis = pio_write_vi_base + nic_data->n_piobufs; 1195 } else { 1196 pio_write_vi_base = 0; 1197 wc_mem_map_size = 0; 1198 max_vis = channel_vis; 1199 } 1200 1201 /* In case the last attached driver failed to free VIs, do it now */ 1202 rc = efx_mcdi_free_vis(efx); 1203 if (rc != 0) 1204 return rc; 1205 1206 rc = efx_ef10_alloc_vis(efx, min_vis, max_vis); 1207 if (rc != 0) 1208 return rc; 1209 1210 if (nic_data->n_allocated_vis < channel_vis) { 1211 netif_info(efx, drv, efx->net_dev, 1212 "Could not allocate enough VIs to satisfy RSS" 1213 " requirements. Performance may not be optimal.\n"); 1214 /* We didn't get the VIs to populate our channels. 1215 * We could keep what we got but then we'd have more 1216 * interrupts than we need. 1217 * Instead calculate new max_channels and restart 1218 */ 1219 efx->max_channels = nic_data->n_allocated_vis; 1220 efx->max_tx_channels = 1221 nic_data->n_allocated_vis / efx->tx_queues_per_channel; 1222 1223 efx_mcdi_free_vis(efx); 1224 return -EAGAIN; 1225 } 1226 1227 /* If we didn't get enough VIs to map all the PIO buffers, free the 1228 * PIO buffers 1229 */ 1230 if (nic_data->n_piobufs && 1231 nic_data->n_allocated_vis < 1232 pio_write_vi_base + nic_data->n_piobufs) { 1233 netif_dbg(efx, probe, efx->net_dev, 1234 "%u VIs are not sufficient to map %u PIO buffers\n", 1235 nic_data->n_allocated_vis, nic_data->n_piobufs); 1236 efx_ef10_free_piobufs(efx); 1237 } 1238 1239 /* Shrink the original UC mapping of the memory BAR */ 1240 membase = ioremap(efx->membase_phys, uc_mem_map_size); 1241 if (!membase) { 1242 netif_err(efx, probe, efx->net_dev, 1243 "could not shrink memory BAR to %x\n", 1244 uc_mem_map_size); 1245 return -ENOMEM; 1246 } 1247 iounmap(efx->membase); 1248 efx->membase = membase; 1249 1250 /* Set up the WC mapping if needed */ 1251 if (wc_mem_map_size) { 1252 nic_data->wc_membase = ioremap_wc(efx->membase_phys + 1253 uc_mem_map_size, 1254 wc_mem_map_size); 1255 if (!nic_data->wc_membase) { 1256 netif_err(efx, probe, efx->net_dev, 1257 "could not allocate WC mapping of size %x\n", 1258 wc_mem_map_size); 1259 return -ENOMEM; 1260 } 1261 nic_data->pio_write_vi_base = pio_write_vi_base; 1262 nic_data->pio_write_base = 1263 nic_data->wc_membase + 1264 (pio_write_vi_base * efx->vi_stride + ER_DZ_TX_PIOBUF - 1265 uc_mem_map_size); 1266 1267 rc = efx_ef10_link_piobufs(efx); 1268 if (rc) 1269 efx_ef10_free_piobufs(efx); 1270 } 1271 1272 netif_dbg(efx, probe, efx->net_dev, 1273 "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n", 1274 &efx->membase_phys, efx->membase, uc_mem_map_size, 1275 nic_data->wc_membase, wc_mem_map_size); 1276 1277 return 0; 1278 } 1279 1280 static void efx_ef10_fini_nic(struct efx_nic *efx) 1281 { 1282 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1283 1284 kfree(nic_data->mc_stats); 1285 nic_data->mc_stats = NULL; 1286 } 1287 1288 static int efx_ef10_init_nic(struct efx_nic *efx) 1289 { 1290 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1291 int rc; 1292 1293 if (nic_data->must_check_datapath_caps) { 1294 rc = efx_ef10_init_datapath_caps(efx); 1295 if (rc) 1296 return rc; 1297 nic_data->must_check_datapath_caps = false; 1298 } 1299 1300 if (efx->must_realloc_vis) { 1301 /* We cannot let the number of VIs change now */ 1302 rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis, 1303 nic_data->n_allocated_vis); 1304 if (rc) 1305 return rc; 1306 efx->must_realloc_vis = false; 1307 } 1308 1309 nic_data->mc_stats = kmalloc(efx->num_mac_stats * sizeof(__le64), 1310 GFP_KERNEL); 1311 if (!nic_data->mc_stats) 1312 return -ENOMEM; 1313 1314 if (nic_data->must_restore_piobufs && nic_data->n_piobufs) { 1315 rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs); 1316 if (rc == 0) { 1317 rc = efx_ef10_link_piobufs(efx); 1318 if (rc) 1319 efx_ef10_free_piobufs(efx); 1320 } 1321 1322 /* Log an error on failure, but this is non-fatal. 1323 * Permission errors are less important - we've presumably 1324 * had the PIO buffer licence removed. 1325 */ 1326 if (rc == -EPERM) 1327 netif_dbg(efx, drv, efx->net_dev, 1328 "not permitted to restore PIO buffers\n"); 1329 else if (rc) 1330 netif_err(efx, drv, efx->net_dev, 1331 "failed to restore PIO buffers (%d)\n", rc); 1332 nic_data->must_restore_piobufs = false; 1333 } 1334 1335 /* don't fail init if RSS setup doesn't work */ 1336 rc = efx->type->rx_push_rss_config(efx, false, 1337 efx->rss_context.rx_indir_table, NULL); 1338 1339 return 0; 1340 } 1341 1342 static void efx_ef10_table_reset_mc_allocations(struct efx_nic *efx) 1343 { 1344 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1345 #ifdef CONFIG_SFC_SRIOV 1346 unsigned int i; 1347 #endif 1348 1349 /* All our allocations have been reset */ 1350 efx->must_realloc_vis = true; 1351 efx_mcdi_filter_table_reset_mc_allocations(efx); 1352 nic_data->must_restore_piobufs = true; 1353 efx_ef10_forget_old_piobufs(efx); 1354 efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID; 1355 1356 /* Driver-created vswitches and vports must be re-created */ 1357 nic_data->must_probe_vswitching = true; 1358 efx->vport_id = EVB_PORT_ID_ASSIGNED; 1359 #ifdef CONFIG_SFC_SRIOV 1360 if (nic_data->vf) 1361 for (i = 0; i < efx->vf_count; i++) 1362 nic_data->vf[i].vport_id = 0; 1363 #endif 1364 } 1365 1366 static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason) 1367 { 1368 if (reason == RESET_TYPE_MC_FAILURE) 1369 return RESET_TYPE_DATAPATH; 1370 1371 return efx_mcdi_map_reset_reason(reason); 1372 } 1373 1374 static int efx_ef10_map_reset_flags(u32 *flags) 1375 { 1376 enum { 1377 EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) << 1378 ETH_RESET_SHARED_SHIFT), 1379 EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER | 1380 ETH_RESET_OFFLOAD | ETH_RESET_MAC | 1381 ETH_RESET_PHY | ETH_RESET_MGMT) << 1382 ETH_RESET_SHARED_SHIFT) 1383 }; 1384 1385 /* We assume for now that our PCI function is permitted to 1386 * reset everything. 1387 */ 1388 1389 if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) { 1390 *flags &= ~EF10_RESET_MC; 1391 return RESET_TYPE_WORLD; 1392 } 1393 1394 if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) { 1395 *flags &= ~EF10_RESET_PORT; 1396 return RESET_TYPE_ALL; 1397 } 1398 1399 /* no invisible reset implemented */ 1400 1401 return -EINVAL; 1402 } 1403 1404 static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type) 1405 { 1406 int rc = efx_mcdi_reset(efx, reset_type); 1407 1408 /* Unprivileged functions return -EPERM, but need to return success 1409 * here so that the datapath is brought back up. 1410 */ 1411 if (reset_type == RESET_TYPE_WORLD && rc == -EPERM) 1412 rc = 0; 1413 1414 /* If it was a port reset, trigger reallocation of MC resources. 1415 * Note that on an MC reset nothing needs to be done now because we'll 1416 * detect the MC reset later and handle it then. 1417 * For an FLR, we never get an MC reset event, but the MC has reset all 1418 * resources assigned to us, so we have to trigger reallocation now. 1419 */ 1420 if ((reset_type == RESET_TYPE_ALL || 1421 reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc) 1422 efx_ef10_table_reset_mc_allocations(efx); 1423 return rc; 1424 } 1425 1426 #define EF10_DMA_STAT(ext_name, mcdi_name) \ 1427 [EF10_STAT_ ## ext_name] = \ 1428 { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name } 1429 #define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \ 1430 [EF10_STAT_ ## int_name] = \ 1431 { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name } 1432 #define EF10_OTHER_STAT(ext_name) \ 1433 [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 } 1434 1435 static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { 1436 EF10_DMA_STAT(port_tx_bytes, TX_BYTES), 1437 EF10_DMA_STAT(port_tx_packets, TX_PKTS), 1438 EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS), 1439 EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS), 1440 EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS), 1441 EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS), 1442 EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS), 1443 EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS), 1444 EF10_DMA_STAT(port_tx_64, TX_64_PKTS), 1445 EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS), 1446 EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS), 1447 EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS), 1448 EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS), 1449 EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS), 1450 EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS), 1451 EF10_DMA_STAT(port_rx_bytes, RX_BYTES), 1452 EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES), 1453 EF10_OTHER_STAT(port_rx_good_bytes), 1454 EF10_OTHER_STAT(port_rx_bad_bytes), 1455 EF10_DMA_STAT(port_rx_packets, RX_PKTS), 1456 EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS), 1457 EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS), 1458 EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS), 1459 EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS), 1460 EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS), 1461 EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS), 1462 EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS), 1463 EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS), 1464 EF10_DMA_STAT(port_rx_64, RX_64_PKTS), 1465 EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS), 1466 EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS), 1467 EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS), 1468 EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS), 1469 EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS), 1470 EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS), 1471 EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS), 1472 EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS), 1473 EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS), 1474 EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS), 1475 EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS), 1476 EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS), 1477 EFX_GENERIC_SW_STAT(rx_nodesc_trunc), 1478 EFX_GENERIC_SW_STAT(rx_noskb_drops), 1479 EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW), 1480 EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW), 1481 EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL), 1482 EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL), 1483 EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB), 1484 EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB), 1485 EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING), 1486 EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS), 1487 EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS), 1488 EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS), 1489 EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS), 1490 EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS), 1491 EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS), 1492 EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES), 1493 EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS), 1494 EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES), 1495 EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS), 1496 EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES), 1497 EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS), 1498 EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES), 1499 EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW), 1500 EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS), 1501 EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES), 1502 EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS), 1503 EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES), 1504 EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS), 1505 EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES), 1506 EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS), 1507 EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES), 1508 EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW), 1509 EF10_DMA_STAT(fec_uncorrected_errors, FEC_UNCORRECTED_ERRORS), 1510 EF10_DMA_STAT(fec_corrected_errors, FEC_CORRECTED_ERRORS), 1511 EF10_DMA_STAT(fec_corrected_symbols_lane0, FEC_CORRECTED_SYMBOLS_LANE0), 1512 EF10_DMA_STAT(fec_corrected_symbols_lane1, FEC_CORRECTED_SYMBOLS_LANE1), 1513 EF10_DMA_STAT(fec_corrected_symbols_lane2, FEC_CORRECTED_SYMBOLS_LANE2), 1514 EF10_DMA_STAT(fec_corrected_symbols_lane3, FEC_CORRECTED_SYMBOLS_LANE3), 1515 EF10_DMA_STAT(ctpio_vi_busy_fallback, CTPIO_VI_BUSY_FALLBACK), 1516 EF10_DMA_STAT(ctpio_long_write_success, CTPIO_LONG_WRITE_SUCCESS), 1517 EF10_DMA_STAT(ctpio_missing_dbell_fail, CTPIO_MISSING_DBELL_FAIL), 1518 EF10_DMA_STAT(ctpio_overflow_fail, CTPIO_OVERFLOW_FAIL), 1519 EF10_DMA_STAT(ctpio_underflow_fail, CTPIO_UNDERFLOW_FAIL), 1520 EF10_DMA_STAT(ctpio_timeout_fail, CTPIO_TIMEOUT_FAIL), 1521 EF10_DMA_STAT(ctpio_noncontig_wr_fail, CTPIO_NONCONTIG_WR_FAIL), 1522 EF10_DMA_STAT(ctpio_frm_clobber_fail, CTPIO_FRM_CLOBBER_FAIL), 1523 EF10_DMA_STAT(ctpio_invalid_wr_fail, CTPIO_INVALID_WR_FAIL), 1524 EF10_DMA_STAT(ctpio_vi_clobber_fallback, CTPIO_VI_CLOBBER_FALLBACK), 1525 EF10_DMA_STAT(ctpio_unqualified_fallback, CTPIO_UNQUALIFIED_FALLBACK), 1526 EF10_DMA_STAT(ctpio_runt_fallback, CTPIO_RUNT_FALLBACK), 1527 EF10_DMA_STAT(ctpio_success, CTPIO_SUCCESS), 1528 EF10_DMA_STAT(ctpio_fallback, CTPIO_FALLBACK), 1529 EF10_DMA_STAT(ctpio_poison, CTPIO_POISON), 1530 EF10_DMA_STAT(ctpio_erase, CTPIO_ERASE), 1531 }; 1532 1533 #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \ 1534 (1ULL << EF10_STAT_port_tx_packets) | \ 1535 (1ULL << EF10_STAT_port_tx_pause) | \ 1536 (1ULL << EF10_STAT_port_tx_unicast) | \ 1537 (1ULL << EF10_STAT_port_tx_multicast) | \ 1538 (1ULL << EF10_STAT_port_tx_broadcast) | \ 1539 (1ULL << EF10_STAT_port_rx_bytes) | \ 1540 (1ULL << \ 1541 EF10_STAT_port_rx_bytes_minus_good_bytes) | \ 1542 (1ULL << EF10_STAT_port_rx_good_bytes) | \ 1543 (1ULL << EF10_STAT_port_rx_bad_bytes) | \ 1544 (1ULL << EF10_STAT_port_rx_packets) | \ 1545 (1ULL << EF10_STAT_port_rx_good) | \ 1546 (1ULL << EF10_STAT_port_rx_bad) | \ 1547 (1ULL << EF10_STAT_port_rx_pause) | \ 1548 (1ULL << EF10_STAT_port_rx_control) | \ 1549 (1ULL << EF10_STAT_port_rx_unicast) | \ 1550 (1ULL << EF10_STAT_port_rx_multicast) | \ 1551 (1ULL << EF10_STAT_port_rx_broadcast) | \ 1552 (1ULL << EF10_STAT_port_rx_lt64) | \ 1553 (1ULL << EF10_STAT_port_rx_64) | \ 1554 (1ULL << EF10_STAT_port_rx_65_to_127) | \ 1555 (1ULL << EF10_STAT_port_rx_128_to_255) | \ 1556 (1ULL << EF10_STAT_port_rx_256_to_511) | \ 1557 (1ULL << EF10_STAT_port_rx_512_to_1023) |\ 1558 (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\ 1559 (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\ 1560 (1ULL << EF10_STAT_port_rx_gtjumbo) | \ 1561 (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\ 1562 (1ULL << EF10_STAT_port_rx_overflow) | \ 1563 (1ULL << EF10_STAT_port_rx_nodesc_drops) |\ 1564 (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \ 1565 (1ULL << GENERIC_STAT_rx_noskb_drops)) 1566 1567 /* On 7000 series NICs, these statistics are only provided by the 10G MAC. 1568 * For a 10G/40G switchable port we do not expose these because they might 1569 * not include all the packets they should. 1570 * On 8000 series NICs these statistics are always provided. 1571 */ 1572 #define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \ 1573 (1ULL << EF10_STAT_port_tx_lt64) | \ 1574 (1ULL << EF10_STAT_port_tx_64) | \ 1575 (1ULL << EF10_STAT_port_tx_65_to_127) |\ 1576 (1ULL << EF10_STAT_port_tx_128_to_255) |\ 1577 (1ULL << EF10_STAT_port_tx_256_to_511) |\ 1578 (1ULL << EF10_STAT_port_tx_512_to_1023) |\ 1579 (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\ 1580 (1ULL << EF10_STAT_port_tx_15xx_to_jumbo)) 1581 1582 /* These statistics are only provided by the 40G MAC. For a 10G/40G 1583 * switchable port we do expose these because the errors will otherwise 1584 * be silent. 1585 */ 1586 #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\ 1587 (1ULL << EF10_STAT_port_rx_length_error)) 1588 1589 /* These statistics are only provided if the firmware supports the 1590 * capability PM_AND_RXDP_COUNTERS. 1591 */ 1592 #define HUNT_PM_AND_RXDP_STAT_MASK ( \ 1593 (1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) | \ 1594 (1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) | \ 1595 (1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) | \ 1596 (1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) | \ 1597 (1ULL << EF10_STAT_port_rx_pm_trunc_qbb) | \ 1598 (1ULL << EF10_STAT_port_rx_pm_discard_qbb) | \ 1599 (1ULL << EF10_STAT_port_rx_pm_discard_mapping) | \ 1600 (1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) | \ 1601 (1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) | \ 1602 (1ULL << EF10_STAT_port_rx_dp_streaming_packets) | \ 1603 (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) | \ 1604 (1ULL << EF10_STAT_port_rx_dp_hlb_wait)) 1605 1606 /* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V2, 1607 * indicated by returning a value >= MC_CMD_MAC_NSTATS_V2 in 1608 * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS. 1609 * These bits are in the second u64 of the raw mask. 1610 */ 1611 #define EF10_FEC_STAT_MASK ( \ 1612 (1ULL << (EF10_STAT_fec_uncorrected_errors - 64)) | \ 1613 (1ULL << (EF10_STAT_fec_corrected_errors - 64)) | \ 1614 (1ULL << (EF10_STAT_fec_corrected_symbols_lane0 - 64)) | \ 1615 (1ULL << (EF10_STAT_fec_corrected_symbols_lane1 - 64)) | \ 1616 (1ULL << (EF10_STAT_fec_corrected_symbols_lane2 - 64)) | \ 1617 (1ULL << (EF10_STAT_fec_corrected_symbols_lane3 - 64))) 1618 1619 /* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V3, 1620 * indicated by returning a value >= MC_CMD_MAC_NSTATS_V3 in 1621 * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS. 1622 * These bits are in the second u64 of the raw mask. 1623 */ 1624 #define EF10_CTPIO_STAT_MASK ( \ 1625 (1ULL << (EF10_STAT_ctpio_vi_busy_fallback - 64)) | \ 1626 (1ULL << (EF10_STAT_ctpio_long_write_success - 64)) | \ 1627 (1ULL << (EF10_STAT_ctpio_missing_dbell_fail - 64)) | \ 1628 (1ULL << (EF10_STAT_ctpio_overflow_fail - 64)) | \ 1629 (1ULL << (EF10_STAT_ctpio_underflow_fail - 64)) | \ 1630 (1ULL << (EF10_STAT_ctpio_timeout_fail - 64)) | \ 1631 (1ULL << (EF10_STAT_ctpio_noncontig_wr_fail - 64)) | \ 1632 (1ULL << (EF10_STAT_ctpio_frm_clobber_fail - 64)) | \ 1633 (1ULL << (EF10_STAT_ctpio_invalid_wr_fail - 64)) | \ 1634 (1ULL << (EF10_STAT_ctpio_vi_clobber_fallback - 64)) | \ 1635 (1ULL << (EF10_STAT_ctpio_unqualified_fallback - 64)) | \ 1636 (1ULL << (EF10_STAT_ctpio_runt_fallback - 64)) | \ 1637 (1ULL << (EF10_STAT_ctpio_success - 64)) | \ 1638 (1ULL << (EF10_STAT_ctpio_fallback - 64)) | \ 1639 (1ULL << (EF10_STAT_ctpio_poison - 64)) | \ 1640 (1ULL << (EF10_STAT_ctpio_erase - 64))) 1641 1642 static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) 1643 { 1644 u64 raw_mask = HUNT_COMMON_STAT_MASK; 1645 u32 port_caps = efx_mcdi_phy_get_caps(efx); 1646 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1647 1648 if (!(efx->mcdi->fn_flags & 1649 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL)) 1650 return 0; 1651 1652 if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) { 1653 raw_mask |= HUNT_40G_EXTRA_STAT_MASK; 1654 /* 8000 series have everything even at 40G */ 1655 if (nic_data->datapath_caps2 & 1656 (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN)) 1657 raw_mask |= HUNT_10G_ONLY_STAT_MASK; 1658 } else { 1659 raw_mask |= HUNT_10G_ONLY_STAT_MASK; 1660 } 1661 1662 if (nic_data->datapath_caps & 1663 (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN)) 1664 raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK; 1665 1666 return raw_mask; 1667 } 1668 1669 static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask) 1670 { 1671 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1672 u64 raw_mask[2]; 1673 1674 raw_mask[0] = efx_ef10_raw_stat_mask(efx); 1675 1676 /* Only show vadaptor stats when EVB capability is present */ 1677 if (nic_data->datapath_caps & 1678 (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) { 1679 raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1); 1680 raw_mask[1] = (1ULL << (EF10_STAT_V1_COUNT - 64)) - 1; 1681 } else { 1682 raw_mask[1] = 0; 1683 } 1684 /* Only show FEC stats when NIC supports MC_CMD_MAC_STATS_V2 */ 1685 if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V2) 1686 raw_mask[1] |= EF10_FEC_STAT_MASK; 1687 1688 /* CTPIO stats appear in V3. Only show them on devices that actually 1689 * support CTPIO. Although this driver doesn't use CTPIO others might, 1690 * and we may be reporting the stats for the underlying port. 1691 */ 1692 if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V3 && 1693 (nic_data->datapath_caps2 & 1694 (1 << MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN))) 1695 raw_mask[1] |= EF10_CTPIO_STAT_MASK; 1696 1697 #if BITS_PER_LONG == 64 1698 BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2); 1699 mask[0] = raw_mask[0]; 1700 mask[1] = raw_mask[1]; 1701 #else 1702 BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 3); 1703 mask[0] = raw_mask[0] & 0xffffffff; 1704 mask[1] = raw_mask[0] >> 32; 1705 mask[2] = raw_mask[1] & 0xffffffff; 1706 #endif 1707 } 1708 1709 static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names) 1710 { 1711 DECLARE_BITMAP(mask, EF10_STAT_COUNT); 1712 1713 efx_ef10_get_stat_mask(efx, mask); 1714 return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, 1715 mask, names); 1716 } 1717 1718 static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats, 1719 struct rtnl_link_stats64 *core_stats) 1720 { 1721 DECLARE_BITMAP(mask, EF10_STAT_COUNT); 1722 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1723 u64 *stats = nic_data->stats; 1724 size_t stats_count = 0, index; 1725 1726 efx_ef10_get_stat_mask(efx, mask); 1727 1728 if (full_stats) { 1729 for_each_set_bit(index, mask, EF10_STAT_COUNT) { 1730 if (efx_ef10_stat_desc[index].name) { 1731 *full_stats++ = stats[index]; 1732 ++stats_count; 1733 } 1734 } 1735 } 1736 1737 if (!core_stats) 1738 return stats_count; 1739 1740 if (nic_data->datapath_caps & 1741 1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN) { 1742 /* Use vadaptor stats. */ 1743 core_stats->rx_packets = stats[EF10_STAT_rx_unicast] + 1744 stats[EF10_STAT_rx_multicast] + 1745 stats[EF10_STAT_rx_broadcast]; 1746 core_stats->tx_packets = stats[EF10_STAT_tx_unicast] + 1747 stats[EF10_STAT_tx_multicast] + 1748 stats[EF10_STAT_tx_broadcast]; 1749 core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] + 1750 stats[EF10_STAT_rx_multicast_bytes] + 1751 stats[EF10_STAT_rx_broadcast_bytes]; 1752 core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] + 1753 stats[EF10_STAT_tx_multicast_bytes] + 1754 stats[EF10_STAT_tx_broadcast_bytes]; 1755 core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] + 1756 stats[GENERIC_STAT_rx_noskb_drops]; 1757 core_stats->multicast = stats[EF10_STAT_rx_multicast]; 1758 core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad]; 1759 core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow]; 1760 core_stats->rx_errors = core_stats->rx_crc_errors; 1761 core_stats->tx_errors = stats[EF10_STAT_tx_bad]; 1762 } else { 1763 /* Use port stats. */ 1764 core_stats->rx_packets = stats[EF10_STAT_port_rx_packets]; 1765 core_stats->tx_packets = stats[EF10_STAT_port_tx_packets]; 1766 core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes]; 1767 core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes]; 1768 core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] + 1769 stats[GENERIC_STAT_rx_nodesc_trunc] + 1770 stats[GENERIC_STAT_rx_noskb_drops]; 1771 core_stats->multicast = stats[EF10_STAT_port_rx_multicast]; 1772 core_stats->rx_length_errors = 1773 stats[EF10_STAT_port_rx_gtjumbo] + 1774 stats[EF10_STAT_port_rx_length_error]; 1775 core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad]; 1776 core_stats->rx_frame_errors = 1777 stats[EF10_STAT_port_rx_align_error]; 1778 core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow]; 1779 core_stats->rx_errors = (core_stats->rx_length_errors + 1780 core_stats->rx_crc_errors + 1781 core_stats->rx_frame_errors); 1782 } 1783 1784 return stats_count; 1785 } 1786 1787 static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats, 1788 struct rtnl_link_stats64 *core_stats) 1789 { 1790 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1791 DECLARE_BITMAP(mask, EF10_STAT_COUNT); 1792 u64 *stats = nic_data->stats; 1793 1794 efx_ef10_get_stat_mask(efx, mask); 1795 1796 efx_nic_copy_stats(efx, nic_data->mc_stats); 1797 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, 1798 mask, stats, nic_data->mc_stats, false); 1799 1800 /* Update derived statistics */ 1801 efx_nic_fix_nodesc_drop_stat(efx, 1802 &stats[EF10_STAT_port_rx_nodesc_drops]); 1803 /* MC Firmware reads RX_BYTES and RX_GOOD_BYTES from the MAC. 1804 * It then calculates RX_BAD_BYTES and DMAs it to us with RX_BYTES. 1805 * We report these as port_rx_ stats. We are not given RX_GOOD_BYTES. 1806 * Here we calculate port_rx_good_bytes. 1807 */ 1808 stats[EF10_STAT_port_rx_good_bytes] = 1809 stats[EF10_STAT_port_rx_bytes] - 1810 stats[EF10_STAT_port_rx_bytes_minus_good_bytes]; 1811 1812 /* The asynchronous reads used to calculate RX_BAD_BYTES in 1813 * MC Firmware are done such that we should not see an increase in 1814 * RX_BAD_BYTES when a good packet has arrived. Unfortunately this 1815 * does mean that the stat can decrease at times. Here we do not 1816 * update the stat unless it has increased or has gone to zero 1817 * (In the case of the NIC rebooting). 1818 * Please see Bug 33781 for a discussion of why things work this way. 1819 */ 1820 efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes], 1821 stats[EF10_STAT_port_rx_bytes_minus_good_bytes]); 1822 efx_update_sw_stats(efx, stats); 1823 1824 return efx_ef10_update_stats_common(efx, full_stats, core_stats); 1825 } 1826 1827 static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx) 1828 __must_hold(&efx->stats_lock) 1829 { 1830 MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); 1831 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1832 DECLARE_BITMAP(mask, EF10_STAT_COUNT); 1833 __le64 generation_start, generation_end; 1834 u64 *stats = nic_data->stats; 1835 u32 dma_len = efx->num_mac_stats * sizeof(u64); 1836 struct efx_buffer stats_buf; 1837 __le64 *dma_stats; 1838 int rc; 1839 1840 spin_unlock_bh(&efx->stats_lock); 1841 1842 if (in_interrupt()) { 1843 /* If in atomic context, cannot update stats. Just update the 1844 * software stats and return so the caller can continue. 1845 */ 1846 spin_lock_bh(&efx->stats_lock); 1847 efx_update_sw_stats(efx, stats); 1848 return 0; 1849 } 1850 1851 efx_ef10_get_stat_mask(efx, mask); 1852 1853 rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC); 1854 if (rc) { 1855 spin_lock_bh(&efx->stats_lock); 1856 return rc; 1857 } 1858 1859 dma_stats = stats_buf.addr; 1860 dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID; 1861 1862 MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr); 1863 MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD, 1864 MAC_STATS_IN_DMA, 1); 1865 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); 1866 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); 1867 1868 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), 1869 NULL, 0, NULL); 1870 spin_lock_bh(&efx->stats_lock); 1871 if (rc) { 1872 /* Expect ENOENT if DMA queues have not been set up */ 1873 if (rc != -ENOENT || atomic_read(&efx->active_queues)) 1874 efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, 1875 sizeof(inbuf), NULL, 0, rc); 1876 goto out; 1877 } 1878 1879 generation_end = dma_stats[efx->num_mac_stats - 1]; 1880 if (generation_end == EFX_MC_STATS_GENERATION_INVALID) { 1881 WARN_ON_ONCE(1); 1882 goto out; 1883 } 1884 rmb(); 1885 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask, 1886 stats, stats_buf.addr, false); 1887 rmb(); 1888 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; 1889 if (generation_end != generation_start) { 1890 rc = -EAGAIN; 1891 goto out; 1892 } 1893 1894 efx_update_sw_stats(efx, stats); 1895 out: 1896 efx_nic_free_buffer(efx, &stats_buf); 1897 return rc; 1898 } 1899 1900 static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats, 1901 struct rtnl_link_stats64 *core_stats) 1902 { 1903 if (efx_ef10_try_update_nic_stats_vf(efx)) 1904 return 0; 1905 1906 return efx_ef10_update_stats_common(efx, full_stats, core_stats); 1907 } 1908 1909 static void efx_ef10_push_irq_moderation(struct efx_channel *channel) 1910 { 1911 struct efx_nic *efx = channel->efx; 1912 unsigned int mode, usecs; 1913 efx_dword_t timer_cmd; 1914 1915 if (channel->irq_moderation_us) { 1916 mode = 3; 1917 usecs = channel->irq_moderation_us; 1918 } else { 1919 mode = 0; 1920 usecs = 0; 1921 } 1922 1923 if (EFX_EF10_WORKAROUND_61265(efx)) { 1924 MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_EVQ_TMR_IN_LEN); 1925 unsigned int ns = usecs * 1000; 1926 1927 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_INSTANCE, 1928 channel->channel); 1929 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, ns); 1930 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, ns); 1931 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_MODE, mode); 1932 1933 efx_mcdi_rpc_async(efx, MC_CMD_SET_EVQ_TMR, 1934 inbuf, sizeof(inbuf), 0, NULL, 0); 1935 } else if (EFX_EF10_WORKAROUND_35388(efx)) { 1936 unsigned int ticks = efx_usecs_to_ticks(efx, usecs); 1937 1938 EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS, 1939 EFE_DD_EVQ_IND_TIMER_FLAGS, 1940 ERF_DD_EVQ_IND_TIMER_MODE, mode, 1941 ERF_DD_EVQ_IND_TIMER_VAL, ticks); 1942 efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT, 1943 channel->channel); 1944 } else { 1945 unsigned int ticks = efx_usecs_to_ticks(efx, usecs); 1946 1947 EFX_POPULATE_DWORD_3(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode, 1948 ERF_DZ_TC_TIMER_VAL, ticks, 1949 ERF_FZ_TC_TMR_REL_VAL, ticks); 1950 efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR, 1951 channel->channel); 1952 } 1953 } 1954 1955 static void efx_ef10_get_wol_vf(struct efx_nic *efx, 1956 struct ethtool_wolinfo *wol) {} 1957 1958 static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type) 1959 { 1960 return -EOPNOTSUPP; 1961 } 1962 1963 static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) 1964 { 1965 wol->supported = 0; 1966 wol->wolopts = 0; 1967 memset(&wol->sopass, 0, sizeof(wol->sopass)); 1968 } 1969 1970 static int efx_ef10_set_wol(struct efx_nic *efx, u32 type) 1971 { 1972 if (type != 0) 1973 return -EINVAL; 1974 return 0; 1975 } 1976 1977 static void efx_ef10_mcdi_request(struct efx_nic *efx, 1978 const efx_dword_t *hdr, size_t hdr_len, 1979 const efx_dword_t *sdu, size_t sdu_len) 1980 { 1981 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1982 u8 *pdu = nic_data->mcdi_buf.addr; 1983 1984 memcpy(pdu, hdr, hdr_len); 1985 memcpy(pdu + hdr_len, sdu, sdu_len); 1986 wmb(); 1987 1988 /* The hardware provides 'low' and 'high' (doorbell) registers 1989 * for passing the 64-bit address of an MCDI request to 1990 * firmware. However the dwords are swapped by firmware. The 1991 * least significant bits of the doorbell are then 0 for all 1992 * MCDI requests due to alignment. 1993 */ 1994 _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32), 1995 ER_DZ_MC_DB_LWRD); 1996 _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr), 1997 ER_DZ_MC_DB_HWRD); 1998 } 1999 2000 static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx) 2001 { 2002 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2003 const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr; 2004 2005 rmb(); 2006 return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE); 2007 } 2008 2009 static void 2010 efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf, 2011 size_t offset, size_t outlen) 2012 { 2013 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2014 const u8 *pdu = nic_data->mcdi_buf.addr; 2015 2016 memcpy(outbuf, pdu + offset, outlen); 2017 } 2018 2019 static void efx_ef10_mcdi_reboot_detected(struct efx_nic *efx) 2020 { 2021 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2022 2023 /* All our allocations have been reset */ 2024 efx_ef10_table_reset_mc_allocations(efx); 2025 2026 /* The datapath firmware might have been changed */ 2027 nic_data->must_check_datapath_caps = true; 2028 2029 /* MAC statistics have been cleared on the NIC; clear the local 2030 * statistic that we update with efx_update_diff_stat(). 2031 */ 2032 nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0; 2033 } 2034 2035 static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx) 2036 { 2037 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2038 int rc; 2039 2040 rc = efx_ef10_get_warm_boot_count(efx); 2041 if (rc < 0) { 2042 /* The firmware is presumably in the process of 2043 * rebooting. However, we are supposed to report each 2044 * reboot just once, so we must only do that once we 2045 * can read and store the updated warm boot count. 2046 */ 2047 return 0; 2048 } 2049 2050 if (rc == nic_data->warm_boot_count) 2051 return 0; 2052 2053 nic_data->warm_boot_count = rc; 2054 efx_ef10_mcdi_reboot_detected(efx); 2055 2056 return -EIO; 2057 } 2058 2059 /* Handle an MSI interrupt 2060 * 2061 * Handle an MSI hardware interrupt. This routine schedules event 2062 * queue processing. No interrupt acknowledgement cycle is necessary. 2063 * Also, we never need to check that the interrupt is for us, since 2064 * MSI interrupts cannot be shared. 2065 */ 2066 static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id) 2067 { 2068 struct efx_msi_context *context = dev_id; 2069 struct efx_nic *efx = context->efx; 2070 2071 netif_vdbg(efx, intr, efx->net_dev, 2072 "IRQ %d on CPU %d\n", irq, raw_smp_processor_id()); 2073 2074 if (likely(READ_ONCE(efx->irq_soft_enabled))) { 2075 /* Note test interrupts */ 2076 if (context->index == efx->irq_level) 2077 efx->last_irq_cpu = raw_smp_processor_id(); 2078 2079 /* Schedule processing of the channel */ 2080 efx_schedule_channel_irq(efx->channel[context->index]); 2081 } 2082 2083 return IRQ_HANDLED; 2084 } 2085 2086 static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id) 2087 { 2088 struct efx_nic *efx = dev_id; 2089 bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); 2090 struct efx_channel *channel; 2091 efx_dword_t reg; 2092 u32 queues; 2093 2094 /* Read the ISR which also ACKs the interrupts */ 2095 efx_readd(efx, ®, ER_DZ_BIU_INT_ISR); 2096 queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG); 2097 2098 if (queues == 0) 2099 return IRQ_NONE; 2100 2101 if (likely(soft_enabled)) { 2102 /* Note test interrupts */ 2103 if (queues & (1U << efx->irq_level)) 2104 efx->last_irq_cpu = raw_smp_processor_id(); 2105 2106 efx_for_each_channel(channel, efx) { 2107 if (queues & 1) 2108 efx_schedule_channel_irq(channel); 2109 queues >>= 1; 2110 } 2111 } 2112 2113 netif_vdbg(efx, intr, efx->net_dev, 2114 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", 2115 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); 2116 2117 return IRQ_HANDLED; 2118 } 2119 2120 static int efx_ef10_irq_test_generate(struct efx_nic *efx) 2121 { 2122 MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN); 2123 2124 if (efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG41750, true, 2125 NULL) == 0) 2126 return -ENOTSUPP; 2127 2128 BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0); 2129 2130 MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level); 2131 return efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT, 2132 inbuf, sizeof(inbuf), NULL, 0, NULL); 2133 } 2134 2135 static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue) 2136 { 2137 return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf, 2138 (tx_queue->ptr_mask + 1) * 2139 sizeof(efx_qword_t), 2140 GFP_KERNEL); 2141 } 2142 2143 /* This writes to the TX_DESC_WPTR and also pushes data */ 2144 static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue, 2145 const efx_qword_t *txd) 2146 { 2147 unsigned int write_ptr; 2148 efx_oword_t reg; 2149 2150 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 2151 EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr); 2152 reg.qword[0] = *txd; 2153 efx_writeo_page(tx_queue->efx, ®, 2154 ER_DZ_TX_DESC_UPD, tx_queue->queue); 2155 } 2156 2157 /* Add Firmware-Assisted TSO v2 option descriptors to a queue. 2158 */ 2159 static int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, 2160 struct sk_buff *skb, 2161 bool *data_mapped) 2162 { 2163 struct efx_tx_buffer *buffer; 2164 struct tcphdr *tcp; 2165 struct iphdr *ip; 2166 2167 u16 ipv4_id; 2168 u32 seqnum; 2169 u32 mss; 2170 2171 EFX_WARN_ON_ONCE_PARANOID(tx_queue->tso_version != 2); 2172 2173 mss = skb_shinfo(skb)->gso_size; 2174 2175 if (unlikely(mss < 4)) { 2176 WARN_ONCE(1, "MSS of %u is too small for TSO v2\n", mss); 2177 return -EINVAL; 2178 } 2179 2180 ip = ip_hdr(skb); 2181 if (ip->version == 4) { 2182 /* Modify IPv4 header if needed. */ 2183 ip->tot_len = 0; 2184 ip->check = 0; 2185 ipv4_id = ntohs(ip->id); 2186 } else { 2187 /* Modify IPv6 header if needed. */ 2188 struct ipv6hdr *ipv6 = ipv6_hdr(skb); 2189 2190 ipv6->payload_len = 0; 2191 ipv4_id = 0; 2192 } 2193 2194 tcp = tcp_hdr(skb); 2195 seqnum = ntohl(tcp->seq); 2196 2197 buffer = efx_tx_queue_get_insert_buffer(tx_queue); 2198 2199 buffer->flags = EFX_TX_BUF_OPTION; 2200 buffer->len = 0; 2201 buffer->unmap_len = 0; 2202 EFX_POPULATE_QWORD_5(buffer->option, 2203 ESF_DZ_TX_DESC_IS_OPT, 1, 2204 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO, 2205 ESF_DZ_TX_TSO_OPTION_TYPE, 2206 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A, 2207 ESF_DZ_TX_TSO_IP_ID, ipv4_id, 2208 ESF_DZ_TX_TSO_TCP_SEQNO, seqnum 2209 ); 2210 ++tx_queue->insert_count; 2211 2212 buffer = efx_tx_queue_get_insert_buffer(tx_queue); 2213 2214 buffer->flags = EFX_TX_BUF_OPTION; 2215 buffer->len = 0; 2216 buffer->unmap_len = 0; 2217 EFX_POPULATE_QWORD_4(buffer->option, 2218 ESF_DZ_TX_DESC_IS_OPT, 1, 2219 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO, 2220 ESF_DZ_TX_TSO_OPTION_TYPE, 2221 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B, 2222 ESF_DZ_TX_TSO_TCP_MSS, mss 2223 ); 2224 ++tx_queue->insert_count; 2225 2226 return 0; 2227 } 2228 2229 static u32 efx_ef10_tso_versions(struct efx_nic *efx) 2230 { 2231 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2232 u32 tso_versions = 0; 2233 2234 if (nic_data->datapath_caps & 2235 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) 2236 tso_versions |= BIT(1); 2237 if (nic_data->datapath_caps2 & 2238 (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN)) 2239 tso_versions |= BIT(2); 2240 return tso_versions; 2241 } 2242 2243 static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) 2244 { 2245 bool csum_offload = tx_queue->label & EFX_TXQ_TYPE_OFFLOAD; 2246 struct efx_channel *channel = tx_queue->channel; 2247 struct efx_nic *efx = tx_queue->efx; 2248 struct efx_ef10_nic_data *nic_data; 2249 bool tso_v2 = false; 2250 efx_qword_t *txd; 2251 int rc; 2252 2253 nic_data = efx->nic_data; 2254 2255 /* Only attempt to enable TX timestamping if we have the license for it, 2256 * otherwise TXQ init will fail 2257 */ 2258 if (!(nic_data->licensed_features & 2259 (1 << LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN))) { 2260 tx_queue->timestamping = false; 2261 /* Disable sync events on this channel. */ 2262 if (efx->type->ptp_set_ts_sync_events) 2263 efx->type->ptp_set_ts_sync_events(efx, false, false); 2264 } 2265 2266 /* TSOv2 is a limited resource that can only be configured on a limited 2267 * number of queues. TSO without checksum offload is not really a thing, 2268 * so we only enable it for those queues. 2269 * TSOv2 cannot be used with Hardware timestamping, and is never needed 2270 * for XDP tx. 2271 */ 2272 if (csum_offload && (nic_data->datapath_caps2 & 2273 (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN)) && 2274 !tx_queue->timestamping && !tx_queue->xdp_tx) { 2275 tso_v2 = true; 2276 netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n", 2277 channel->channel); 2278 } 2279 2280 rc = efx_mcdi_tx_init(tx_queue, tso_v2); 2281 if (rc) 2282 goto fail; 2283 2284 /* A previous user of this TX queue might have set us up the 2285 * bomb by writing a descriptor to the TX push collector but 2286 * not the doorbell. (Each collector belongs to a port, not a 2287 * queue or function, so cannot easily be reset.) We must 2288 * attempt to push a no-op descriptor in its place. 2289 */ 2290 tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION; 2291 tx_queue->insert_count = 1; 2292 txd = efx_tx_desc(tx_queue, 0); 2293 EFX_POPULATE_QWORD_5(*txd, 2294 ESF_DZ_TX_DESC_IS_OPT, true, 2295 ESF_DZ_TX_OPTION_TYPE, 2296 ESE_DZ_TX_OPTION_DESC_CRC_CSUM, 2297 ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload, 2298 ESF_DZ_TX_OPTION_IP_CSUM, csum_offload, 2299 ESF_DZ_TX_TIMESTAMP, tx_queue->timestamping); 2300 tx_queue->write_count = 1; 2301 2302 if (tso_v2) { 2303 tx_queue->handle_tso = efx_ef10_tx_tso_desc; 2304 tx_queue->tso_version = 2; 2305 } else if (nic_data->datapath_caps & 2306 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) { 2307 tx_queue->tso_version = 1; 2308 } 2309 2310 wmb(); 2311 efx_ef10_push_tx_desc(tx_queue, txd); 2312 2313 return; 2314 2315 fail: 2316 netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n", 2317 tx_queue->queue); 2318 } 2319 2320 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ 2321 static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue) 2322 { 2323 unsigned int write_ptr; 2324 efx_dword_t reg; 2325 2326 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 2327 EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr); 2328 efx_writed_page(tx_queue->efx, ®, 2329 ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue); 2330 } 2331 2332 #define EFX_EF10_MAX_TX_DESCRIPTOR_LEN 0x3fff 2333 2334 static unsigned int efx_ef10_tx_limit_len(struct efx_tx_queue *tx_queue, 2335 dma_addr_t dma_addr, unsigned int len) 2336 { 2337 if (len > EFX_EF10_MAX_TX_DESCRIPTOR_LEN) { 2338 /* If we need to break across multiple descriptors we should 2339 * stop at a page boundary. This assumes the length limit is 2340 * greater than the page size. 2341 */ 2342 dma_addr_t end = dma_addr + EFX_EF10_MAX_TX_DESCRIPTOR_LEN; 2343 2344 BUILD_BUG_ON(EFX_EF10_MAX_TX_DESCRIPTOR_LEN < EFX_PAGE_SIZE); 2345 len = (end & (~(EFX_PAGE_SIZE - 1))) - dma_addr; 2346 } 2347 2348 return len; 2349 } 2350 2351 static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue) 2352 { 2353 unsigned int old_write_count = tx_queue->write_count; 2354 struct efx_tx_buffer *buffer; 2355 unsigned int write_ptr; 2356 efx_qword_t *txd; 2357 2358 tx_queue->xmit_more_available = false; 2359 if (unlikely(tx_queue->write_count == tx_queue->insert_count)) 2360 return; 2361 2362 do { 2363 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 2364 buffer = &tx_queue->buffer[write_ptr]; 2365 txd = efx_tx_desc(tx_queue, write_ptr); 2366 ++tx_queue->write_count; 2367 2368 /* Create TX descriptor ring entry */ 2369 if (buffer->flags & EFX_TX_BUF_OPTION) { 2370 *txd = buffer->option; 2371 if (EFX_QWORD_FIELD(*txd, ESF_DZ_TX_OPTION_TYPE) == 1) 2372 /* PIO descriptor */ 2373 tx_queue->packet_write_count = tx_queue->write_count; 2374 } else { 2375 tx_queue->packet_write_count = tx_queue->write_count; 2376 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); 2377 EFX_POPULATE_QWORD_3( 2378 *txd, 2379 ESF_DZ_TX_KER_CONT, 2380 buffer->flags & EFX_TX_BUF_CONT, 2381 ESF_DZ_TX_KER_BYTE_CNT, buffer->len, 2382 ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr); 2383 } 2384 } while (tx_queue->write_count != tx_queue->insert_count); 2385 2386 wmb(); /* Ensure descriptors are written before they are fetched */ 2387 2388 if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) { 2389 txd = efx_tx_desc(tx_queue, 2390 old_write_count & tx_queue->ptr_mask); 2391 efx_ef10_push_tx_desc(tx_queue, txd); 2392 ++tx_queue->pushes; 2393 } else { 2394 efx_ef10_notify_tx_desc(tx_queue); 2395 } 2396 } 2397 2398 static int efx_ef10_probe_multicast_chaining(struct efx_nic *efx) 2399 { 2400 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2401 unsigned int enabled, implemented; 2402 bool want_workaround_26807; 2403 int rc; 2404 2405 rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled); 2406 if (rc == -ENOSYS) { 2407 /* GET_WORKAROUNDS was implemented before this workaround, 2408 * thus it must be unavailable in this firmware. 2409 */ 2410 nic_data->workaround_26807 = false; 2411 return 0; 2412 } 2413 if (rc) 2414 return rc; 2415 want_workaround_26807 = 2416 implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807; 2417 nic_data->workaround_26807 = 2418 !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807); 2419 2420 if (want_workaround_26807 && !nic_data->workaround_26807) { 2421 unsigned int flags; 2422 2423 rc = efx_mcdi_set_workaround(efx, 2424 MC_CMD_WORKAROUND_BUG26807, 2425 true, &flags); 2426 if (!rc) { 2427 if (flags & 2428 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) { 2429 netif_info(efx, drv, efx->net_dev, 2430 "other functions on NIC have been reset\n"); 2431 2432 /* With MCFW v4.6.x and earlier, the 2433 * boot count will have incremented, 2434 * so re-read the warm_boot_count 2435 * value now to ensure this function 2436 * doesn't think it has changed next 2437 * time it checks. 2438 */ 2439 rc = efx_ef10_get_warm_boot_count(efx); 2440 if (rc >= 0) { 2441 nic_data->warm_boot_count = rc; 2442 rc = 0; 2443 } 2444 } 2445 nic_data->workaround_26807 = true; 2446 } else if (rc == -EPERM) { 2447 rc = 0; 2448 } 2449 } 2450 return rc; 2451 } 2452 2453 static int efx_ef10_filter_table_probe(struct efx_nic *efx) 2454 { 2455 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2456 int rc = efx_ef10_probe_multicast_chaining(efx); 2457 struct efx_mcdi_filter_vlan *vlan; 2458 2459 if (rc) 2460 return rc; 2461 rc = efx_mcdi_filter_table_probe(efx, nic_data->workaround_26807); 2462 2463 if (rc) 2464 return rc; 2465 2466 list_for_each_entry(vlan, &nic_data->vlan_list, list) { 2467 rc = efx_mcdi_filter_add_vlan(efx, vlan->vid); 2468 if (rc) 2469 goto fail_add_vlan; 2470 } 2471 return 0; 2472 2473 fail_add_vlan: 2474 efx_mcdi_filter_table_remove(efx); 2475 return rc; 2476 } 2477 2478 /* This creates an entry in the RX descriptor queue */ 2479 static inline void 2480 efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) 2481 { 2482 struct efx_rx_buffer *rx_buf; 2483 efx_qword_t *rxd; 2484 2485 rxd = efx_rx_desc(rx_queue, index); 2486 rx_buf = efx_rx_buffer(rx_queue, index); 2487 EFX_POPULATE_QWORD_2(*rxd, 2488 ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len, 2489 ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); 2490 } 2491 2492 static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue) 2493 { 2494 struct efx_nic *efx = rx_queue->efx; 2495 unsigned int write_count; 2496 efx_dword_t reg; 2497 2498 /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */ 2499 write_count = rx_queue->added_count & ~7; 2500 if (rx_queue->notified_count == write_count) 2501 return; 2502 2503 do 2504 efx_ef10_build_rx_desc( 2505 rx_queue, 2506 rx_queue->notified_count & rx_queue->ptr_mask); 2507 while (++rx_queue->notified_count != write_count); 2508 2509 wmb(); 2510 EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR, 2511 write_count & rx_queue->ptr_mask); 2512 efx_writed_page(efx, ®, ER_DZ_RX_DESC_UPD, 2513 efx_rx_queue_index(rx_queue)); 2514 } 2515 2516 static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete; 2517 2518 static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue) 2519 { 2520 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 2521 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); 2522 efx_qword_t event; 2523 2524 EFX_POPULATE_QWORD_2(event, 2525 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV, 2526 ESF_DZ_EV_DATA, EFX_EF10_REFILL); 2527 2528 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); 2529 2530 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has 2531 * already swapped the data to little-endian order. 2532 */ 2533 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], 2534 sizeof(efx_qword_t)); 2535 2536 efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT, 2537 inbuf, sizeof(inbuf), 0, 2538 efx_ef10_rx_defer_refill_complete, 0); 2539 } 2540 2541 static void 2542 efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie, 2543 int rc, efx_dword_t *outbuf, 2544 size_t outlen_actual) 2545 { 2546 /* nothing to do */ 2547 } 2548 2549 static int efx_ef10_ev_init(struct efx_channel *channel) 2550 { 2551 struct efx_nic *efx = channel->efx; 2552 struct efx_ef10_nic_data *nic_data; 2553 bool use_v2, cut_thru; 2554 2555 nic_data = efx->nic_data; 2556 use_v2 = nic_data->datapath_caps2 & 2557 1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN; 2558 cut_thru = !(nic_data->datapath_caps & 2559 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN); 2560 return efx_mcdi_ev_init(channel, cut_thru, use_v2); 2561 } 2562 2563 static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue, 2564 unsigned int rx_queue_label) 2565 { 2566 struct efx_nic *efx = rx_queue->efx; 2567 2568 netif_info(efx, hw, efx->net_dev, 2569 "rx event arrived on queue %d labeled as queue %u\n", 2570 efx_rx_queue_index(rx_queue), rx_queue_label); 2571 2572 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 2573 } 2574 2575 static void 2576 efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue, 2577 unsigned int actual, unsigned int expected) 2578 { 2579 unsigned int dropped = (actual - expected) & rx_queue->ptr_mask; 2580 struct efx_nic *efx = rx_queue->efx; 2581 2582 netif_info(efx, hw, efx->net_dev, 2583 "dropped %d events (index=%d expected=%d)\n", 2584 dropped, actual, expected); 2585 2586 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 2587 } 2588 2589 /* partially received RX was aborted. clean up. */ 2590 static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue) 2591 { 2592 unsigned int rx_desc_ptr; 2593 2594 netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev, 2595 "scattered RX aborted (dropping %u buffers)\n", 2596 rx_queue->scatter_n); 2597 2598 rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask; 2599 2600 efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n, 2601 0, EFX_RX_PKT_DISCARD); 2602 2603 rx_queue->removed_count += rx_queue->scatter_n; 2604 rx_queue->scatter_n = 0; 2605 rx_queue->scatter_len = 0; 2606 ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc; 2607 } 2608 2609 static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel, 2610 unsigned int n_packets, 2611 unsigned int rx_encap_hdr, 2612 unsigned int rx_l3_class, 2613 unsigned int rx_l4_class, 2614 const efx_qword_t *event) 2615 { 2616 struct efx_nic *efx = channel->efx; 2617 bool handled = false; 2618 2619 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)) { 2620 if (!(efx->net_dev->features & NETIF_F_RXALL)) { 2621 if (!efx->loopback_selftest) 2622 channel->n_rx_eth_crc_err += n_packets; 2623 return EFX_RX_PKT_DISCARD; 2624 } 2625 handled = true; 2626 } 2627 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR)) { 2628 if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN && 2629 rx_l3_class != ESE_DZ_L3_CLASS_IP4 && 2630 rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG && 2631 rx_l3_class != ESE_DZ_L3_CLASS_IP6 && 2632 rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG)) 2633 netdev_WARN(efx->net_dev, 2634 "invalid class for RX_IPCKSUM_ERR: event=" 2635 EFX_QWORD_FMT "\n", 2636 EFX_QWORD_VAL(*event)); 2637 if (!efx->loopback_selftest) 2638 *(rx_encap_hdr ? 2639 &channel->n_rx_outer_ip_hdr_chksum_err : 2640 &channel->n_rx_ip_hdr_chksum_err) += n_packets; 2641 return 0; 2642 } 2643 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) { 2644 if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN && 2645 ((rx_l3_class != ESE_DZ_L3_CLASS_IP4 && 2646 rx_l3_class != ESE_DZ_L3_CLASS_IP6) || 2647 (rx_l4_class != ESE_FZ_L4_CLASS_TCP && 2648 rx_l4_class != ESE_FZ_L4_CLASS_UDP)))) 2649 netdev_WARN(efx->net_dev, 2650 "invalid class for RX_TCPUDP_CKSUM_ERR: event=" 2651 EFX_QWORD_FMT "\n", 2652 EFX_QWORD_VAL(*event)); 2653 if (!efx->loopback_selftest) 2654 *(rx_encap_hdr ? 2655 &channel->n_rx_outer_tcp_udp_chksum_err : 2656 &channel->n_rx_tcp_udp_chksum_err) += n_packets; 2657 return 0; 2658 } 2659 if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_IP_INNER_CHKSUM_ERR)) { 2660 if (unlikely(!rx_encap_hdr)) 2661 netdev_WARN(efx->net_dev, 2662 "invalid encapsulation type for RX_IP_INNER_CHKSUM_ERR: event=" 2663 EFX_QWORD_FMT "\n", 2664 EFX_QWORD_VAL(*event)); 2665 else if (unlikely(rx_l3_class != ESE_DZ_L3_CLASS_IP4 && 2666 rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG && 2667 rx_l3_class != ESE_DZ_L3_CLASS_IP6 && 2668 rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG)) 2669 netdev_WARN(efx->net_dev, 2670 "invalid class for RX_IP_INNER_CHKSUM_ERR: event=" 2671 EFX_QWORD_FMT "\n", 2672 EFX_QWORD_VAL(*event)); 2673 if (!efx->loopback_selftest) 2674 channel->n_rx_inner_ip_hdr_chksum_err += n_packets; 2675 return 0; 2676 } 2677 if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR)) { 2678 if (unlikely(!rx_encap_hdr)) 2679 netdev_WARN(efx->net_dev, 2680 "invalid encapsulation type for RX_TCP_UDP_INNER_CHKSUM_ERR: event=" 2681 EFX_QWORD_FMT "\n", 2682 EFX_QWORD_VAL(*event)); 2683 else if (unlikely((rx_l3_class != ESE_DZ_L3_CLASS_IP4 && 2684 rx_l3_class != ESE_DZ_L3_CLASS_IP6) || 2685 (rx_l4_class != ESE_FZ_L4_CLASS_TCP && 2686 rx_l4_class != ESE_FZ_L4_CLASS_UDP))) 2687 netdev_WARN(efx->net_dev, 2688 "invalid class for RX_TCP_UDP_INNER_CHKSUM_ERR: event=" 2689 EFX_QWORD_FMT "\n", 2690 EFX_QWORD_VAL(*event)); 2691 if (!efx->loopback_selftest) 2692 channel->n_rx_inner_tcp_udp_chksum_err += n_packets; 2693 return 0; 2694 } 2695 2696 WARN_ON(!handled); /* No error bits were recognised */ 2697 return 0; 2698 } 2699 2700 static int efx_ef10_handle_rx_event(struct efx_channel *channel, 2701 const efx_qword_t *event) 2702 { 2703 unsigned int rx_bytes, next_ptr_lbits, rx_queue_label; 2704 unsigned int rx_l3_class, rx_l4_class, rx_encap_hdr; 2705 unsigned int n_descs, n_packets, i; 2706 struct efx_nic *efx = channel->efx; 2707 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2708 struct efx_rx_queue *rx_queue; 2709 efx_qword_t errors; 2710 bool rx_cont; 2711 u16 flags = 0; 2712 2713 if (unlikely(READ_ONCE(efx->reset_pending))) 2714 return 0; 2715 2716 /* Basic packet information */ 2717 rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES); 2718 next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS); 2719 rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL); 2720 rx_l3_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L3_CLASS); 2721 rx_l4_class = EFX_QWORD_FIELD(*event, ESF_FZ_RX_L4_CLASS); 2722 rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT); 2723 rx_encap_hdr = 2724 nic_data->datapath_caps & 2725 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN) ? 2726 EFX_QWORD_FIELD(*event, ESF_EZ_RX_ENCAP_HDR) : 2727 ESE_EZ_ENCAP_HDR_NONE; 2728 2729 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT)) 2730 netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event=" 2731 EFX_QWORD_FMT "\n", 2732 EFX_QWORD_VAL(*event)); 2733 2734 rx_queue = efx_channel_get_rx_queue(channel); 2735 2736 if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue))) 2737 efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label); 2738 2739 n_descs = ((next_ptr_lbits - rx_queue->removed_count) & 2740 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1)); 2741 2742 if (n_descs != rx_queue->scatter_n + 1) { 2743 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2744 2745 /* detect rx abort */ 2746 if (unlikely(n_descs == rx_queue->scatter_n)) { 2747 if (rx_queue->scatter_n == 0 || rx_bytes != 0) 2748 netdev_WARN(efx->net_dev, 2749 "invalid RX abort: scatter_n=%u event=" 2750 EFX_QWORD_FMT "\n", 2751 rx_queue->scatter_n, 2752 EFX_QWORD_VAL(*event)); 2753 efx_ef10_handle_rx_abort(rx_queue); 2754 return 0; 2755 } 2756 2757 /* Check that RX completion merging is valid, i.e. 2758 * the current firmware supports it and this is a 2759 * non-scattered packet. 2760 */ 2761 if (!(nic_data->datapath_caps & 2762 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) || 2763 rx_queue->scatter_n != 0 || rx_cont) { 2764 efx_ef10_handle_rx_bad_lbits( 2765 rx_queue, next_ptr_lbits, 2766 (rx_queue->removed_count + 2767 rx_queue->scatter_n + 1) & 2768 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1)); 2769 return 0; 2770 } 2771 2772 /* Merged completion for multiple non-scattered packets */ 2773 rx_queue->scatter_n = 1; 2774 rx_queue->scatter_len = 0; 2775 n_packets = n_descs; 2776 ++channel->n_rx_merge_events; 2777 channel->n_rx_merge_packets += n_packets; 2778 flags |= EFX_RX_PKT_PREFIX_LEN; 2779 } else { 2780 ++rx_queue->scatter_n; 2781 rx_queue->scatter_len += rx_bytes; 2782 if (rx_cont) 2783 return 0; 2784 n_packets = 1; 2785 } 2786 2787 EFX_POPULATE_QWORD_5(errors, ESF_DZ_RX_ECRC_ERR, 1, 2788 ESF_DZ_RX_IPCKSUM_ERR, 1, 2789 ESF_DZ_RX_TCPUDP_CKSUM_ERR, 1, 2790 ESF_EZ_RX_IP_INNER_CHKSUM_ERR, 1, 2791 ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR, 1); 2792 EFX_AND_QWORD(errors, *event, errors); 2793 if (unlikely(!EFX_QWORD_IS_ZERO(errors))) { 2794 flags |= efx_ef10_handle_rx_event_errors(channel, n_packets, 2795 rx_encap_hdr, 2796 rx_l3_class, rx_l4_class, 2797 event); 2798 } else { 2799 bool tcpudp = rx_l4_class == ESE_FZ_L4_CLASS_TCP || 2800 rx_l4_class == ESE_FZ_L4_CLASS_UDP; 2801 2802 switch (rx_encap_hdr) { 2803 case ESE_EZ_ENCAP_HDR_VXLAN: /* VxLAN or GENEVE */ 2804 flags |= EFX_RX_PKT_CSUMMED; /* outer UDP csum */ 2805 if (tcpudp) 2806 flags |= EFX_RX_PKT_CSUM_LEVEL; /* inner L4 */ 2807 break; 2808 case ESE_EZ_ENCAP_HDR_GRE: 2809 case ESE_EZ_ENCAP_HDR_NONE: 2810 if (tcpudp) 2811 flags |= EFX_RX_PKT_CSUMMED; 2812 break; 2813 default: 2814 netdev_WARN(efx->net_dev, 2815 "unknown encapsulation type: event=" 2816 EFX_QWORD_FMT "\n", 2817 EFX_QWORD_VAL(*event)); 2818 } 2819 } 2820 2821 if (rx_l4_class == ESE_FZ_L4_CLASS_TCP) 2822 flags |= EFX_RX_PKT_TCP; 2823 2824 channel->irq_mod_score += 2 * n_packets; 2825 2826 /* Handle received packet(s) */ 2827 for (i = 0; i < n_packets; i++) { 2828 efx_rx_packet(rx_queue, 2829 rx_queue->removed_count & rx_queue->ptr_mask, 2830 rx_queue->scatter_n, rx_queue->scatter_len, 2831 flags); 2832 rx_queue->removed_count += rx_queue->scatter_n; 2833 } 2834 2835 rx_queue->scatter_n = 0; 2836 rx_queue->scatter_len = 0; 2837 2838 return n_packets; 2839 } 2840 2841 static u32 efx_ef10_extract_event_ts(efx_qword_t *event) 2842 { 2843 u32 tstamp; 2844 2845 tstamp = EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI); 2846 tstamp <<= 16; 2847 tstamp |= EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO); 2848 2849 return tstamp; 2850 } 2851 2852 static void 2853 efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) 2854 { 2855 struct efx_nic *efx = channel->efx; 2856 struct efx_tx_queue *tx_queue; 2857 unsigned int tx_ev_desc_ptr; 2858 unsigned int tx_ev_q_label; 2859 unsigned int tx_ev_type; 2860 u64 ts_part; 2861 2862 if (unlikely(READ_ONCE(efx->reset_pending))) 2863 return; 2864 2865 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT))) 2866 return; 2867 2868 /* Get the transmit queue */ 2869 tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL); 2870 tx_queue = efx_channel_get_tx_queue(channel, 2871 tx_ev_q_label % EFX_TXQ_TYPES); 2872 2873 if (!tx_queue->timestamping) { 2874 /* Transmit completion */ 2875 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX); 2876 efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask); 2877 return; 2878 } 2879 2880 /* Transmit timestamps are only available for 8XXX series. They result 2881 * in up to three events per packet. These occur in order, and are: 2882 * - the normal completion event (may be omitted) 2883 * - the low part of the timestamp 2884 * - the high part of the timestamp 2885 * 2886 * It's possible for multiple completion events to appear before the 2887 * corresponding timestamps. So we can for example get: 2888 * COMP N 2889 * COMP N+1 2890 * TS_LO N 2891 * TS_HI N 2892 * TS_LO N+1 2893 * TS_HI N+1 2894 * 2895 * In addition it's also possible for the adjacent completions to be 2896 * merged, so we may not see COMP N above. As such, the completion 2897 * events are not very useful here. 2898 * 2899 * Each part of the timestamp is itself split across two 16 bit 2900 * fields in the event. 2901 */ 2902 tx_ev_type = EFX_QWORD_FIELD(*event, ESF_EZ_TX_SOFT1); 2903 2904 switch (tx_ev_type) { 2905 case TX_TIMESTAMP_EVENT_TX_EV_COMPLETION: 2906 /* Ignore this event - see above. */ 2907 break; 2908 2909 case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO: 2910 ts_part = efx_ef10_extract_event_ts(event); 2911 tx_queue->completed_timestamp_minor = ts_part; 2912 break; 2913 2914 case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI: 2915 ts_part = efx_ef10_extract_event_ts(event); 2916 tx_queue->completed_timestamp_major = ts_part; 2917 2918 efx_xmit_done_single(tx_queue); 2919 break; 2920 2921 default: 2922 netif_err(efx, hw, efx->net_dev, 2923 "channel %d unknown tx event type %d (data " 2924 EFX_QWORD_FMT ")\n", 2925 channel->channel, tx_ev_type, 2926 EFX_QWORD_VAL(*event)); 2927 break; 2928 } 2929 } 2930 2931 static void 2932 efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) 2933 { 2934 struct efx_nic *efx = channel->efx; 2935 int subcode; 2936 2937 subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE); 2938 2939 switch (subcode) { 2940 case ESE_DZ_DRV_TIMER_EV: 2941 case ESE_DZ_DRV_WAKE_UP_EV: 2942 break; 2943 case ESE_DZ_DRV_START_UP_EV: 2944 /* event queue init complete. ok. */ 2945 break; 2946 default: 2947 netif_err(efx, hw, efx->net_dev, 2948 "channel %d unknown driver event type %d" 2949 " (data " EFX_QWORD_FMT ")\n", 2950 channel->channel, subcode, 2951 EFX_QWORD_VAL(*event)); 2952 2953 } 2954 } 2955 2956 static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel, 2957 efx_qword_t *event) 2958 { 2959 struct efx_nic *efx = channel->efx; 2960 u32 subcode; 2961 2962 subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0); 2963 2964 switch (subcode) { 2965 case EFX_EF10_TEST: 2966 channel->event_test_cpu = raw_smp_processor_id(); 2967 break; 2968 case EFX_EF10_REFILL: 2969 /* The queue must be empty, so we won't receive any rx 2970 * events, so efx_process_channel() won't refill the 2971 * queue. Refill it here 2972 */ 2973 efx_fast_push_rx_descriptors(&channel->rx_queue, true); 2974 break; 2975 default: 2976 netif_err(efx, hw, efx->net_dev, 2977 "channel %d unknown driver event type %u" 2978 " (data " EFX_QWORD_FMT ")\n", 2979 channel->channel, (unsigned) subcode, 2980 EFX_QWORD_VAL(*event)); 2981 } 2982 } 2983 2984 static int efx_ef10_ev_process(struct efx_channel *channel, int quota) 2985 { 2986 struct efx_nic *efx = channel->efx; 2987 efx_qword_t event, *p_event; 2988 unsigned int read_ptr; 2989 int ev_code; 2990 int spent = 0; 2991 2992 if (quota <= 0) 2993 return spent; 2994 2995 read_ptr = channel->eventq_read_ptr; 2996 2997 for (;;) { 2998 p_event = efx_event(channel, read_ptr); 2999 event = *p_event; 3000 3001 if (!efx_event_present(&event)) 3002 break; 3003 3004 EFX_SET_QWORD(*p_event); 3005 3006 ++read_ptr; 3007 3008 ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE); 3009 3010 netif_vdbg(efx, drv, efx->net_dev, 3011 "processing event on %d " EFX_QWORD_FMT "\n", 3012 channel->channel, EFX_QWORD_VAL(event)); 3013 3014 switch (ev_code) { 3015 case ESE_DZ_EV_CODE_MCDI_EV: 3016 efx_mcdi_process_event(channel, &event); 3017 break; 3018 case ESE_DZ_EV_CODE_RX_EV: 3019 spent += efx_ef10_handle_rx_event(channel, &event); 3020 if (spent >= quota) { 3021 /* XXX can we split a merged event to 3022 * avoid going over-quota? 3023 */ 3024 spent = quota; 3025 goto out; 3026 } 3027 break; 3028 case ESE_DZ_EV_CODE_TX_EV: 3029 efx_ef10_handle_tx_event(channel, &event); 3030 break; 3031 case ESE_DZ_EV_CODE_DRIVER_EV: 3032 efx_ef10_handle_driver_event(channel, &event); 3033 if (++spent == quota) 3034 goto out; 3035 break; 3036 case EFX_EF10_DRVGEN_EV: 3037 efx_ef10_handle_driver_generated_event(channel, &event); 3038 break; 3039 default: 3040 netif_err(efx, hw, efx->net_dev, 3041 "channel %d unknown event type %d" 3042 " (data " EFX_QWORD_FMT ")\n", 3043 channel->channel, ev_code, 3044 EFX_QWORD_VAL(event)); 3045 } 3046 } 3047 3048 out: 3049 channel->eventq_read_ptr = read_ptr; 3050 return spent; 3051 } 3052 3053 static void efx_ef10_ev_read_ack(struct efx_channel *channel) 3054 { 3055 struct efx_nic *efx = channel->efx; 3056 efx_dword_t rptr; 3057 3058 if (EFX_EF10_WORKAROUND_35388(efx)) { 3059 BUILD_BUG_ON(EFX_MIN_EVQ_SIZE < 3060 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH)); 3061 BUILD_BUG_ON(EFX_MAX_EVQ_SIZE > 3062 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH)); 3063 3064 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS, 3065 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH, 3066 ERF_DD_EVQ_IND_RPTR, 3067 (channel->eventq_read_ptr & 3068 channel->eventq_mask) >> 3069 ERF_DD_EVQ_IND_RPTR_WIDTH); 3070 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT, 3071 channel->channel); 3072 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS, 3073 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW, 3074 ERF_DD_EVQ_IND_RPTR, 3075 channel->eventq_read_ptr & 3076 ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1)); 3077 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT, 3078 channel->channel); 3079 } else { 3080 EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR, 3081 channel->eventq_read_ptr & 3082 channel->eventq_mask); 3083 efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel); 3084 } 3085 } 3086 3087 static void efx_ef10_ev_test_generate(struct efx_channel *channel) 3088 { 3089 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); 3090 struct efx_nic *efx = channel->efx; 3091 efx_qword_t event; 3092 int rc; 3093 3094 EFX_POPULATE_QWORD_2(event, 3095 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV, 3096 ESF_DZ_EV_DATA, EFX_EF10_TEST); 3097 3098 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); 3099 3100 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has 3101 * already swapped the data to little-endian order. 3102 */ 3103 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], 3104 sizeof(efx_qword_t)); 3105 3106 rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf), 3107 NULL, 0, NULL); 3108 if (rc != 0) 3109 goto fail; 3110 3111 return; 3112 3113 fail: 3114 WARN_ON(true); 3115 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 3116 } 3117 3118 static void efx_ef10_prepare_flr(struct efx_nic *efx) 3119 { 3120 atomic_set(&efx->active_queues, 0); 3121 } 3122 3123 static int efx_ef10_vport_set_mac_address(struct efx_nic *efx) 3124 { 3125 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3126 u8 mac_old[ETH_ALEN]; 3127 int rc, rc2; 3128 3129 /* Only reconfigure a PF-created vport */ 3130 if (is_zero_ether_addr(nic_data->vport_mac)) 3131 return 0; 3132 3133 efx_device_detach_sync(efx); 3134 efx_net_stop(efx->net_dev); 3135 down_write(&efx->filter_sem); 3136 efx_mcdi_filter_table_remove(efx); 3137 up_write(&efx->filter_sem); 3138 3139 rc = efx_ef10_vadaptor_free(efx, efx->vport_id); 3140 if (rc) 3141 goto restore_filters; 3142 3143 ether_addr_copy(mac_old, nic_data->vport_mac); 3144 rc = efx_ef10_vport_del_mac(efx, efx->vport_id, 3145 nic_data->vport_mac); 3146 if (rc) 3147 goto restore_vadaptor; 3148 3149 rc = efx_ef10_vport_add_mac(efx, efx->vport_id, 3150 efx->net_dev->dev_addr); 3151 if (!rc) { 3152 ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr); 3153 } else { 3154 rc2 = efx_ef10_vport_add_mac(efx, efx->vport_id, mac_old); 3155 if (rc2) { 3156 /* Failed to add original MAC, so clear vport_mac */ 3157 eth_zero_addr(nic_data->vport_mac); 3158 goto reset_nic; 3159 } 3160 } 3161 3162 restore_vadaptor: 3163 rc2 = efx_ef10_vadaptor_alloc(efx, efx->vport_id); 3164 if (rc2) 3165 goto reset_nic; 3166 restore_filters: 3167 down_write(&efx->filter_sem); 3168 rc2 = efx_ef10_filter_table_probe(efx); 3169 up_write(&efx->filter_sem); 3170 if (rc2) 3171 goto reset_nic; 3172 3173 rc2 = efx_net_open(efx->net_dev); 3174 if (rc2) 3175 goto reset_nic; 3176 3177 efx_device_attach_if_not_resetting(efx); 3178 3179 return rc; 3180 3181 reset_nic: 3182 netif_err(efx, drv, efx->net_dev, 3183 "Failed to restore when changing MAC address - scheduling reset\n"); 3184 efx_schedule_reset(efx, RESET_TYPE_DATAPATH); 3185 3186 return rc ? rc : rc2; 3187 } 3188 3189 static int efx_ef10_set_mac_address(struct efx_nic *efx) 3190 { 3191 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN); 3192 bool was_enabled = efx->port_enabled; 3193 int rc; 3194 3195 efx_device_detach_sync(efx); 3196 efx_net_stop(efx->net_dev); 3197 3198 mutex_lock(&efx->mac_lock); 3199 down_write(&efx->filter_sem); 3200 efx_mcdi_filter_table_remove(efx); 3201 3202 ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR), 3203 efx->net_dev->dev_addr); 3204 MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID, 3205 efx->vport_id); 3206 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf, 3207 sizeof(inbuf), NULL, 0, NULL); 3208 3209 efx_ef10_filter_table_probe(efx); 3210 up_write(&efx->filter_sem); 3211 mutex_unlock(&efx->mac_lock); 3212 3213 if (was_enabled) 3214 efx_net_open(efx->net_dev); 3215 efx_device_attach_if_not_resetting(efx); 3216 3217 #ifdef CONFIG_SFC_SRIOV 3218 if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) { 3219 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3220 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; 3221 3222 if (rc == -EPERM) { 3223 struct efx_nic *efx_pf; 3224 3225 /* Switch to PF and change MAC address on vport */ 3226 efx_pf = pci_get_drvdata(pci_dev_pf); 3227 3228 rc = efx_ef10_sriov_set_vf_mac(efx_pf, 3229 nic_data->vf_index, 3230 efx->net_dev->dev_addr); 3231 } else if (!rc) { 3232 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); 3233 struct efx_ef10_nic_data *nic_data = efx_pf->nic_data; 3234 unsigned int i; 3235 3236 /* MAC address successfully changed by VF (with MAC 3237 * spoofing) so update the parent PF if possible. 3238 */ 3239 for (i = 0; i < efx_pf->vf_count; ++i) { 3240 struct ef10_vf *vf = nic_data->vf + i; 3241 3242 if (vf->efx == efx) { 3243 ether_addr_copy(vf->mac, 3244 efx->net_dev->dev_addr); 3245 return 0; 3246 } 3247 } 3248 } 3249 } else 3250 #endif 3251 if (rc == -EPERM) { 3252 netif_err(efx, drv, efx->net_dev, 3253 "Cannot change MAC address; use sfboot to enable" 3254 " mac-spoofing on this interface\n"); 3255 } else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) { 3256 /* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC 3257 * fall-back to the method of changing the MAC address on the 3258 * vport. This only applies to PFs because such versions of 3259 * MCFW do not support VFs. 3260 */ 3261 rc = efx_ef10_vport_set_mac_address(efx); 3262 } else if (rc) { 3263 efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC, 3264 sizeof(inbuf), NULL, 0, rc); 3265 } 3266 3267 return rc; 3268 } 3269 3270 static int efx_ef10_mac_reconfigure(struct efx_nic *efx, bool mtu_only) 3271 { 3272 WARN_ON(!mutex_is_locked(&efx->mac_lock)); 3273 3274 efx_mcdi_filter_sync_rx_mode(efx); 3275 3276 if (mtu_only && efx_has_cap(efx, SET_MAC_ENHANCED)) 3277 return efx_mcdi_set_mtu(efx); 3278 return efx_mcdi_set_mac(efx); 3279 } 3280 3281 static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type) 3282 { 3283 MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN); 3284 3285 MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type); 3286 return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf), 3287 NULL, 0, NULL); 3288 } 3289 3290 /* MC BISTs follow a different poll mechanism to phy BISTs. 3291 * The BIST is done in the poll handler on the MC, and the MCDI command 3292 * will block until the BIST is done. 3293 */ 3294 static int efx_ef10_poll_bist(struct efx_nic *efx) 3295 { 3296 int rc; 3297 MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN); 3298 size_t outlen; 3299 u32 result; 3300 3301 rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0, 3302 outbuf, sizeof(outbuf), &outlen); 3303 if (rc != 0) 3304 return rc; 3305 3306 if (outlen < MC_CMD_POLL_BIST_OUT_LEN) 3307 return -EIO; 3308 3309 result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT); 3310 switch (result) { 3311 case MC_CMD_POLL_BIST_PASSED: 3312 netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n"); 3313 return 0; 3314 case MC_CMD_POLL_BIST_TIMEOUT: 3315 netif_err(efx, hw, efx->net_dev, "BIST timed out\n"); 3316 return -EIO; 3317 case MC_CMD_POLL_BIST_FAILED: 3318 netif_err(efx, hw, efx->net_dev, "BIST failed.\n"); 3319 return -EIO; 3320 default: 3321 netif_err(efx, hw, efx->net_dev, 3322 "BIST returned unknown result %u", result); 3323 return -EIO; 3324 } 3325 } 3326 3327 static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type) 3328 { 3329 int rc; 3330 3331 netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type); 3332 3333 rc = efx_ef10_start_bist(efx, bist_type); 3334 if (rc != 0) 3335 return rc; 3336 3337 return efx_ef10_poll_bist(efx); 3338 } 3339 3340 static int 3341 efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) 3342 { 3343 int rc, rc2; 3344 3345 efx_reset_down(efx, RESET_TYPE_WORLD); 3346 3347 rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST, 3348 NULL, 0, NULL, 0, NULL); 3349 if (rc != 0) 3350 goto out; 3351 3352 tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1; 3353 tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1; 3354 3355 rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD); 3356 3357 out: 3358 if (rc == -EPERM) 3359 rc = 0; 3360 rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0); 3361 return rc ? rc : rc2; 3362 } 3363 3364 #ifdef CONFIG_SFC_MTD 3365 3366 struct efx_ef10_nvram_type_info { 3367 u16 type, type_mask; 3368 u8 port; 3369 const char *name; 3370 }; 3371 3372 static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = { 3373 { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" }, 3374 { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" }, 3375 { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" }, 3376 { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" }, 3377 { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" }, 3378 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" }, 3379 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" }, 3380 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" }, 3381 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" }, 3382 { NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" }, 3383 { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" }, 3384 { NVRAM_PARTITION_TYPE_MUM_FIRMWARE, 0, 0, "sfc_mumfw" }, 3385 { NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 0, 0, "sfc_uefi" }, 3386 { NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS, 0, 0, "sfc_dynamic_cfg_dflt" }, 3387 { NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS, 0, 0, "sfc_exp_rom_cfg_dflt" }, 3388 { NVRAM_PARTITION_TYPE_STATUS, 0, 0, "sfc_status" }, 3389 { NVRAM_PARTITION_TYPE_BUNDLE, 0, 0, "sfc_bundle" }, 3390 { NVRAM_PARTITION_TYPE_BUNDLE_METADATA, 0, 0, "sfc_bundle_metadata" }, 3391 }; 3392 #define EF10_NVRAM_PARTITION_COUNT ARRAY_SIZE(efx_ef10_nvram_types) 3393 3394 static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, 3395 struct efx_mcdi_mtd_partition *part, 3396 unsigned int type, 3397 unsigned long *found) 3398 { 3399 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN); 3400 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX); 3401 const struct efx_ef10_nvram_type_info *info; 3402 size_t size, erase_size, outlen; 3403 int type_idx = 0; 3404 bool protected; 3405 int rc; 3406 3407 for (type_idx = 0; ; type_idx++) { 3408 if (type_idx == EF10_NVRAM_PARTITION_COUNT) 3409 return -ENODEV; 3410 info = efx_ef10_nvram_types + type_idx; 3411 if ((type & ~info->type_mask) == info->type) 3412 break; 3413 } 3414 if (info->port != efx_port_num(efx)) 3415 return -ENODEV; 3416 3417 rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected); 3418 if (rc) 3419 return rc; 3420 if (protected && 3421 (type != NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS && 3422 type != NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS)) 3423 /* Hide protected partitions that don't provide defaults. */ 3424 return -ENODEV; 3425 3426 if (protected) 3427 /* Protected partitions are read only. */ 3428 erase_size = 0; 3429 3430 /* If we've already exposed a partition of this type, hide this 3431 * duplicate. All operations on MTDs are keyed by the type anyway, 3432 * so we can't act on the duplicate. 3433 */ 3434 if (__test_and_set_bit(type_idx, found)) 3435 return -EEXIST; 3436 3437 part->nvram_type = type; 3438 3439 MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type); 3440 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf), 3441 outbuf, sizeof(outbuf), &outlen); 3442 if (rc) 3443 return rc; 3444 if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN) 3445 return -EIO; 3446 if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) & 3447 (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN)) 3448 part->fw_subtype = MCDI_DWORD(outbuf, 3449 NVRAM_METADATA_OUT_SUBTYPE); 3450 3451 part->common.dev_type_name = "EF10 NVRAM manager"; 3452 part->common.type_name = info->name; 3453 3454 part->common.mtd.type = MTD_NORFLASH; 3455 part->common.mtd.flags = MTD_CAP_NORFLASH; 3456 part->common.mtd.size = size; 3457 part->common.mtd.erasesize = erase_size; 3458 /* sfc_status is read-only */ 3459 if (!erase_size) 3460 part->common.mtd.flags |= MTD_NO_ERASE; 3461 3462 return 0; 3463 } 3464 3465 static int efx_ef10_mtd_probe(struct efx_nic *efx) 3466 { 3467 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); 3468 DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT) = { 0 }; 3469 struct efx_mcdi_mtd_partition *parts; 3470 size_t outlen, n_parts_total, i, n_parts; 3471 unsigned int type; 3472 int rc; 3473 3474 ASSERT_RTNL(); 3475 3476 BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0); 3477 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0, 3478 outbuf, sizeof(outbuf), &outlen); 3479 if (rc) 3480 return rc; 3481 if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN) 3482 return -EIO; 3483 3484 n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS); 3485 if (n_parts_total > 3486 MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID)) 3487 return -EIO; 3488 3489 parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL); 3490 if (!parts) 3491 return -ENOMEM; 3492 3493 n_parts = 0; 3494 for (i = 0; i < n_parts_total; i++) { 3495 type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID, 3496 i); 3497 rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type, 3498 found); 3499 if (rc == -EEXIST || rc == -ENODEV) 3500 continue; 3501 if (rc) 3502 goto fail; 3503 n_parts++; 3504 } 3505 3506 rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); 3507 fail: 3508 if (rc) 3509 kfree(parts); 3510 return rc; 3511 } 3512 3513 #endif /* CONFIG_SFC_MTD */ 3514 3515 static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time) 3516 { 3517 _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD); 3518 } 3519 3520 static void efx_ef10_ptp_write_host_time_vf(struct efx_nic *efx, 3521 u32 host_time) {} 3522 3523 static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel, 3524 bool temp) 3525 { 3526 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN); 3527 int rc; 3528 3529 if (channel->sync_events_state == SYNC_EVENTS_REQUESTED || 3530 channel->sync_events_state == SYNC_EVENTS_VALID || 3531 (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED)) 3532 return 0; 3533 channel->sync_events_state = SYNC_EVENTS_REQUESTED; 3534 3535 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE); 3536 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); 3537 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE, 3538 channel->channel); 3539 3540 rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP, 3541 inbuf, sizeof(inbuf), NULL, 0, NULL); 3542 3543 if (rc != 0) 3544 channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT : 3545 SYNC_EVENTS_DISABLED; 3546 3547 return rc; 3548 } 3549 3550 static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel, 3551 bool temp) 3552 { 3553 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN); 3554 int rc; 3555 3556 if (channel->sync_events_state == SYNC_EVENTS_DISABLED || 3557 (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT)) 3558 return 0; 3559 if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) { 3560 channel->sync_events_state = SYNC_EVENTS_DISABLED; 3561 return 0; 3562 } 3563 channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT : 3564 SYNC_EVENTS_DISABLED; 3565 3566 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE); 3567 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); 3568 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL, 3569 MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE); 3570 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE, 3571 channel->channel); 3572 3573 rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP, 3574 inbuf, sizeof(inbuf), NULL, 0, NULL); 3575 3576 return rc; 3577 } 3578 3579 static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en, 3580 bool temp) 3581 { 3582 int (*set)(struct efx_channel *channel, bool temp); 3583 struct efx_channel *channel; 3584 3585 set = en ? 3586 efx_ef10_rx_enable_timestamping : 3587 efx_ef10_rx_disable_timestamping; 3588 3589 channel = efx_ptp_channel(efx); 3590 if (channel) { 3591 int rc = set(channel, temp); 3592 if (en && rc != 0) { 3593 efx_ef10_ptp_set_ts_sync_events(efx, false, temp); 3594 return rc; 3595 } 3596 } 3597 3598 return 0; 3599 } 3600 3601 static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx, 3602 struct hwtstamp_config *init) 3603 { 3604 return -EOPNOTSUPP; 3605 } 3606 3607 static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx, 3608 struct hwtstamp_config *init) 3609 { 3610 int rc; 3611 3612 switch (init->rx_filter) { 3613 case HWTSTAMP_FILTER_NONE: 3614 efx_ef10_ptp_set_ts_sync_events(efx, false, false); 3615 /* if TX timestamping is still requested then leave PTP on */ 3616 return efx_ptp_change_mode(efx, 3617 init->tx_type != HWTSTAMP_TX_OFF, 0); 3618 case HWTSTAMP_FILTER_ALL: 3619 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 3620 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 3621 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 3622 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 3623 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 3624 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 3625 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 3626 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 3627 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 3628 case HWTSTAMP_FILTER_PTP_V2_EVENT: 3629 case HWTSTAMP_FILTER_PTP_V2_SYNC: 3630 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 3631 case HWTSTAMP_FILTER_NTP_ALL: 3632 init->rx_filter = HWTSTAMP_FILTER_ALL; 3633 rc = efx_ptp_change_mode(efx, true, 0); 3634 if (!rc) 3635 rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false); 3636 if (rc) 3637 efx_ptp_change_mode(efx, false, 0); 3638 return rc; 3639 default: 3640 return -ERANGE; 3641 } 3642 } 3643 3644 static int efx_ef10_get_phys_port_id(struct efx_nic *efx, 3645 struct netdev_phys_item_id *ppid) 3646 { 3647 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3648 3649 if (!is_valid_ether_addr(nic_data->port_id)) 3650 return -EOPNOTSUPP; 3651 3652 ppid->id_len = ETH_ALEN; 3653 memcpy(ppid->id, nic_data->port_id, ppid->id_len); 3654 3655 return 0; 3656 } 3657 3658 static int efx_ef10_vlan_rx_add_vid(struct efx_nic *efx, __be16 proto, u16 vid) 3659 { 3660 if (proto != htons(ETH_P_8021Q)) 3661 return -EINVAL; 3662 3663 return efx_ef10_add_vlan(efx, vid); 3664 } 3665 3666 static int efx_ef10_vlan_rx_kill_vid(struct efx_nic *efx, __be16 proto, u16 vid) 3667 { 3668 if (proto != htons(ETH_P_8021Q)) 3669 return -EINVAL; 3670 3671 return efx_ef10_del_vlan(efx, vid); 3672 } 3673 3674 /* We rely on the MCDI wiping out our TX rings if it made any changes to the 3675 * ports table, ensuring that any TSO descriptors that were made on a now- 3676 * removed tunnel port will be blown away and won't break things when we try 3677 * to transmit them using the new ports table. 3678 */ 3679 static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading) 3680 { 3681 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3682 MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX); 3683 MCDI_DECLARE_BUF(outbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN); 3684 bool will_reset = false; 3685 size_t num_entries = 0; 3686 size_t inlen, outlen; 3687 size_t i; 3688 int rc; 3689 efx_dword_t flags_and_num_entries; 3690 3691 WARN_ON(!mutex_is_locked(&nic_data->udp_tunnels_lock)); 3692 3693 nic_data->udp_tunnels_dirty = false; 3694 3695 if (!(nic_data->datapath_caps & 3696 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) { 3697 efx_device_attach_if_not_resetting(efx); 3698 return 0; 3699 } 3700 3701 BUILD_BUG_ON(ARRAY_SIZE(nic_data->udp_tunnels) > 3702 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM); 3703 3704 for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) { 3705 if (nic_data->udp_tunnels[i].count && 3706 nic_data->udp_tunnels[i].port) { 3707 efx_dword_t entry; 3708 3709 EFX_POPULATE_DWORD_2(entry, 3710 TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT, 3711 ntohs(nic_data->udp_tunnels[i].port), 3712 TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL, 3713 nic_data->udp_tunnels[i].type); 3714 *_MCDI_ARRAY_DWORD(inbuf, 3715 SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES, 3716 num_entries++) = entry; 3717 } 3718 } 3719 3720 BUILD_BUG_ON((MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST - 3721 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST) * 8 != 3722 EFX_WORD_1_LBN); 3723 BUILD_BUG_ON(MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN * 8 != 3724 EFX_WORD_1_WIDTH); 3725 EFX_POPULATE_DWORD_2(flags_and_num_entries, 3726 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING, 3727 !!unloading, 3728 EFX_WORD_1, num_entries); 3729 *_MCDI_DWORD(inbuf, SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS) = 3730 flags_and_num_entries; 3731 3732 inlen = MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num_entries); 3733 3734 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS, 3735 inbuf, inlen, outbuf, sizeof(outbuf), &outlen); 3736 if (rc == -EIO) { 3737 /* Most likely the MC rebooted due to another function also 3738 * setting its tunnel port list. Mark the tunnel port list as 3739 * dirty, so it will be pushed upon coming up from the reboot. 3740 */ 3741 nic_data->udp_tunnels_dirty = true; 3742 return 0; 3743 } 3744 3745 if (rc) { 3746 /* expected not available on unprivileged functions */ 3747 if (rc != -EPERM) 3748 netif_warn(efx, drv, efx->net_dev, 3749 "Unable to set UDP tunnel ports; rc=%d.\n", rc); 3750 } else if (MCDI_DWORD(outbuf, SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS) & 3751 (1 << MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN)) { 3752 netif_info(efx, drv, efx->net_dev, 3753 "Rebooting MC due to UDP tunnel port list change\n"); 3754 will_reset = true; 3755 if (unloading) 3756 /* Delay for the MC reset to complete. This will make 3757 * unloading other functions a bit smoother. This is a 3758 * race, but the other unload will work whichever way 3759 * it goes, this just avoids an unnecessary error 3760 * message. 3761 */ 3762 msleep(100); 3763 } 3764 if (!will_reset && !unloading) { 3765 /* The caller will have detached, relying on the MC reset to 3766 * trigger a re-attach. Since there won't be an MC reset, we 3767 * have to do the attach ourselves. 3768 */ 3769 efx_device_attach_if_not_resetting(efx); 3770 } 3771 3772 return rc; 3773 } 3774 3775 static int efx_ef10_udp_tnl_push_ports(struct efx_nic *efx) 3776 { 3777 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3778 int rc = 0; 3779 3780 mutex_lock(&nic_data->udp_tunnels_lock); 3781 if (nic_data->udp_tunnels_dirty) { 3782 /* Make sure all TX are stopped while we modify the table, else 3783 * we might race against an efx_features_check(). 3784 */ 3785 efx_device_detach_sync(efx); 3786 rc = efx_ef10_set_udp_tnl_ports(efx, false); 3787 } 3788 mutex_unlock(&nic_data->udp_tunnels_lock); 3789 return rc; 3790 } 3791 3792 static struct efx_udp_tunnel *__efx_ef10_udp_tnl_lookup_port(struct efx_nic *efx, 3793 __be16 port) 3794 { 3795 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3796 size_t i; 3797 3798 for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) { 3799 if (!nic_data->udp_tunnels[i].count) 3800 continue; 3801 if (nic_data->udp_tunnels[i].port == port) 3802 return &nic_data->udp_tunnels[i]; 3803 } 3804 return NULL; 3805 } 3806 3807 static int efx_ef10_udp_tnl_add_port(struct efx_nic *efx, 3808 struct efx_udp_tunnel tnl) 3809 { 3810 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3811 struct efx_udp_tunnel *match; 3812 char typebuf[8]; 3813 size_t i; 3814 int rc; 3815 3816 if (!(nic_data->datapath_caps & 3817 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) 3818 return 0; 3819 3820 efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf)); 3821 netif_dbg(efx, drv, efx->net_dev, "Adding UDP tunnel (%s) port %d\n", 3822 typebuf, ntohs(tnl.port)); 3823 3824 mutex_lock(&nic_data->udp_tunnels_lock); 3825 /* Make sure all TX are stopped while we add to the table, else we 3826 * might race against an efx_features_check(). 3827 */ 3828 efx_device_detach_sync(efx); 3829 3830 match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port); 3831 if (match != NULL) { 3832 if (match->type == tnl.type) { 3833 netif_dbg(efx, drv, efx->net_dev, 3834 "Referencing existing tunnel entry\n"); 3835 match->count++; 3836 /* No need to cause an MCDI update */ 3837 rc = 0; 3838 goto unlock_out; 3839 } 3840 efx_get_udp_tunnel_type_name(match->type, 3841 typebuf, sizeof(typebuf)); 3842 netif_dbg(efx, drv, efx->net_dev, 3843 "UDP port %d is already in use by %s\n", 3844 ntohs(tnl.port), typebuf); 3845 rc = -EEXIST; 3846 goto unlock_out; 3847 } 3848 3849 for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) 3850 if (!nic_data->udp_tunnels[i].count) { 3851 nic_data->udp_tunnels[i] = tnl; 3852 nic_data->udp_tunnels[i].count = 1; 3853 rc = efx_ef10_set_udp_tnl_ports(efx, false); 3854 goto unlock_out; 3855 } 3856 3857 netif_dbg(efx, drv, efx->net_dev, 3858 "Unable to add UDP tunnel (%s) port %d; insufficient resources.\n", 3859 typebuf, ntohs(tnl.port)); 3860 3861 rc = -ENOMEM; 3862 3863 unlock_out: 3864 mutex_unlock(&nic_data->udp_tunnels_lock); 3865 return rc; 3866 } 3867 3868 /* Called under the TX lock with the TX queue running, hence no-one can be 3869 * in the middle of updating the UDP tunnels table. However, they could 3870 * have tried and failed the MCDI, in which case they'll have set the dirty 3871 * flag before dropping their locks. 3872 */ 3873 static bool efx_ef10_udp_tnl_has_port(struct efx_nic *efx, __be16 port) 3874 { 3875 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3876 3877 if (!(nic_data->datapath_caps & 3878 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) 3879 return false; 3880 3881 if (nic_data->udp_tunnels_dirty) 3882 /* SW table may not match HW state, so just assume we can't 3883 * use any UDP tunnel offloads. 3884 */ 3885 return false; 3886 3887 return __efx_ef10_udp_tnl_lookup_port(efx, port) != NULL; 3888 } 3889 3890 static int efx_ef10_udp_tnl_del_port(struct efx_nic *efx, 3891 struct efx_udp_tunnel tnl) 3892 { 3893 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3894 struct efx_udp_tunnel *match; 3895 char typebuf[8]; 3896 int rc; 3897 3898 if (!(nic_data->datapath_caps & 3899 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) 3900 return 0; 3901 3902 efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf)); 3903 netif_dbg(efx, drv, efx->net_dev, "Removing UDP tunnel (%s) port %d\n", 3904 typebuf, ntohs(tnl.port)); 3905 3906 mutex_lock(&nic_data->udp_tunnels_lock); 3907 /* Make sure all TX are stopped while we remove from the table, else we 3908 * might race against an efx_features_check(). 3909 */ 3910 efx_device_detach_sync(efx); 3911 3912 match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port); 3913 if (match != NULL) { 3914 if (match->type == tnl.type) { 3915 if (--match->count) { 3916 /* Port is still in use, so nothing to do */ 3917 netif_dbg(efx, drv, efx->net_dev, 3918 "UDP tunnel port %d remains active\n", 3919 ntohs(tnl.port)); 3920 rc = 0; 3921 goto out_unlock; 3922 } 3923 rc = efx_ef10_set_udp_tnl_ports(efx, false); 3924 goto out_unlock; 3925 } 3926 efx_get_udp_tunnel_type_name(match->type, 3927 typebuf, sizeof(typebuf)); 3928 netif_warn(efx, drv, efx->net_dev, 3929 "UDP port %d is actually in use by %s, not removing\n", 3930 ntohs(tnl.port), typebuf); 3931 } 3932 rc = -ENOENT; 3933 3934 out_unlock: 3935 mutex_unlock(&nic_data->udp_tunnels_lock); 3936 return rc; 3937 } 3938 3939 /* EF10 may have multiple datapath firmware variants within a 3940 * single version. Report which variants are running. 3941 */ 3942 static size_t efx_ef10_print_additional_fwver(struct efx_nic *efx, char *buf, 3943 size_t len) 3944 { 3945 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3946 3947 return scnprintf(buf, len, " rx%x tx%x", 3948 nic_data->rx_dpcpu_fw_id, 3949 nic_data->tx_dpcpu_fw_id); 3950 } 3951 3952 static unsigned int ef10_check_caps(const struct efx_nic *efx, 3953 u8 flag, 3954 u32 offset) 3955 { 3956 const struct efx_ef10_nic_data *nic_data = efx->nic_data; 3957 3958 switch (offset) { 3959 case(MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST): 3960 return nic_data->datapath_caps & BIT_ULL(flag); 3961 case(MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST): 3962 return nic_data->datapath_caps2 & BIT_ULL(flag); 3963 default: 3964 return 0; 3965 } 3966 } 3967 3968 #define EF10_OFFLOAD_FEATURES \ 3969 (NETIF_F_IP_CSUM | \ 3970 NETIF_F_HW_VLAN_CTAG_FILTER | \ 3971 NETIF_F_IPV6_CSUM | \ 3972 NETIF_F_RXHASH | \ 3973 NETIF_F_NTUPLE) 3974 3975 const struct efx_nic_type efx_hunt_a0_vf_nic_type = { 3976 .is_vf = true, 3977 .mem_bar = efx_ef10_vf_mem_bar, 3978 .mem_map_size = efx_ef10_mem_map_size, 3979 .probe = efx_ef10_probe_vf, 3980 .remove = efx_ef10_remove, 3981 .dimension_resources = efx_ef10_dimension_resources, 3982 .init = efx_ef10_init_nic, 3983 .fini = efx_ef10_fini_nic, 3984 .map_reset_reason = efx_ef10_map_reset_reason, 3985 .map_reset_flags = efx_ef10_map_reset_flags, 3986 .reset = efx_ef10_reset, 3987 .probe_port = efx_mcdi_port_probe, 3988 .remove_port = efx_mcdi_port_remove, 3989 .fini_dmaq = efx_fini_dmaq, 3990 .prepare_flr = efx_ef10_prepare_flr, 3991 .finish_flr = efx_port_dummy_op_void, 3992 .describe_stats = efx_ef10_describe_stats, 3993 .update_stats = efx_ef10_update_stats_vf, 3994 .start_stats = efx_port_dummy_op_void, 3995 .pull_stats = efx_port_dummy_op_void, 3996 .stop_stats = efx_port_dummy_op_void, 3997 .set_id_led = efx_mcdi_set_id_led, 3998 .push_irq_moderation = efx_ef10_push_irq_moderation, 3999 .reconfigure_mac = efx_ef10_mac_reconfigure, 4000 .check_mac_fault = efx_mcdi_mac_check_fault, 4001 .reconfigure_port = efx_mcdi_port_reconfigure, 4002 .get_wol = efx_ef10_get_wol_vf, 4003 .set_wol = efx_ef10_set_wol_vf, 4004 .resume_wol = efx_port_dummy_op_void, 4005 .mcdi_request = efx_ef10_mcdi_request, 4006 .mcdi_poll_response = efx_ef10_mcdi_poll_response, 4007 .mcdi_read_response = efx_ef10_mcdi_read_response, 4008 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot, 4009 .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected, 4010 .irq_enable_master = efx_port_dummy_op_void, 4011 .irq_test_generate = efx_ef10_irq_test_generate, 4012 .irq_disable_non_ev = efx_port_dummy_op_void, 4013 .irq_handle_msi = efx_ef10_msi_interrupt, 4014 .irq_handle_legacy = efx_ef10_legacy_interrupt, 4015 .tx_probe = efx_ef10_tx_probe, 4016 .tx_init = efx_ef10_tx_init, 4017 .tx_remove = efx_mcdi_tx_remove, 4018 .tx_write = efx_ef10_tx_write, 4019 .tx_limit_len = efx_ef10_tx_limit_len, 4020 .rx_push_rss_config = efx_mcdi_vf_rx_push_rss_config, 4021 .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config, 4022 .rx_probe = efx_mcdi_rx_probe, 4023 .rx_init = efx_mcdi_rx_init, 4024 .rx_remove = efx_mcdi_rx_remove, 4025 .rx_write = efx_ef10_rx_write, 4026 .rx_defer_refill = efx_ef10_rx_defer_refill, 4027 .ev_probe = efx_mcdi_ev_probe, 4028 .ev_init = efx_ef10_ev_init, 4029 .ev_fini = efx_mcdi_ev_fini, 4030 .ev_remove = efx_mcdi_ev_remove, 4031 .ev_process = efx_ef10_ev_process, 4032 .ev_read_ack = efx_ef10_ev_read_ack, 4033 .ev_test_generate = efx_ef10_ev_test_generate, 4034 .filter_table_probe = efx_ef10_filter_table_probe, 4035 .filter_table_restore = efx_mcdi_filter_table_restore, 4036 .filter_table_remove = efx_mcdi_filter_table_remove, 4037 .filter_update_rx_scatter = efx_mcdi_update_rx_scatter, 4038 .filter_insert = efx_mcdi_filter_insert, 4039 .filter_remove_safe = efx_mcdi_filter_remove_safe, 4040 .filter_get_safe = efx_mcdi_filter_get_safe, 4041 .filter_clear_rx = efx_mcdi_filter_clear_rx, 4042 .filter_count_rx_used = efx_mcdi_filter_count_rx_used, 4043 .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit, 4044 .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids, 4045 #ifdef CONFIG_RFS_ACCEL 4046 .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one, 4047 #endif 4048 #ifdef CONFIG_SFC_MTD 4049 .mtd_probe = efx_port_dummy_op_int, 4050 #endif 4051 .ptp_write_host_time = efx_ef10_ptp_write_host_time_vf, 4052 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf, 4053 .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid, 4054 .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid, 4055 #ifdef CONFIG_SFC_SRIOV 4056 .vswitching_probe = efx_ef10_vswitching_probe_vf, 4057 .vswitching_restore = efx_ef10_vswitching_restore_vf, 4058 .vswitching_remove = efx_ef10_vswitching_remove_vf, 4059 #endif 4060 .get_mac_address = efx_ef10_get_mac_address_vf, 4061 .set_mac_address = efx_ef10_set_mac_address, 4062 4063 .get_phys_port_id = efx_ef10_get_phys_port_id, 4064 .revision = EFX_REV_HUNT_A0, 4065 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), 4066 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, 4067 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST, 4068 .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST, 4069 .can_rx_scatter = true, 4070 .always_rx_scatter = true, 4071 .min_interrupt_mode = EFX_INT_MODE_MSIX, 4072 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, 4073 .offload_features = EF10_OFFLOAD_FEATURES, 4074 .mcdi_max_ver = 2, 4075 .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS, 4076 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | 4077 1 << HWTSTAMP_FILTER_ALL, 4078 .rx_hash_key_size = 40, 4079 .check_caps = ef10_check_caps, 4080 .print_additional_fwver = efx_ef10_print_additional_fwver, 4081 }; 4082 4083 const struct efx_nic_type efx_hunt_a0_nic_type = { 4084 .is_vf = false, 4085 .mem_bar = efx_ef10_pf_mem_bar, 4086 .mem_map_size = efx_ef10_mem_map_size, 4087 .probe = efx_ef10_probe_pf, 4088 .remove = efx_ef10_remove, 4089 .dimension_resources = efx_ef10_dimension_resources, 4090 .init = efx_ef10_init_nic, 4091 .fini = efx_ef10_fini_nic, 4092 .map_reset_reason = efx_ef10_map_reset_reason, 4093 .map_reset_flags = efx_ef10_map_reset_flags, 4094 .reset = efx_ef10_reset, 4095 .probe_port = efx_mcdi_port_probe, 4096 .remove_port = efx_mcdi_port_remove, 4097 .fini_dmaq = efx_fini_dmaq, 4098 .prepare_flr = efx_ef10_prepare_flr, 4099 .finish_flr = efx_port_dummy_op_void, 4100 .describe_stats = efx_ef10_describe_stats, 4101 .update_stats = efx_ef10_update_stats_pf, 4102 .start_stats = efx_mcdi_mac_start_stats, 4103 .pull_stats = efx_mcdi_mac_pull_stats, 4104 .stop_stats = efx_mcdi_mac_stop_stats, 4105 .set_id_led = efx_mcdi_set_id_led, 4106 .push_irq_moderation = efx_ef10_push_irq_moderation, 4107 .reconfigure_mac = efx_ef10_mac_reconfigure, 4108 .check_mac_fault = efx_mcdi_mac_check_fault, 4109 .reconfigure_port = efx_mcdi_port_reconfigure, 4110 .get_wol = efx_ef10_get_wol, 4111 .set_wol = efx_ef10_set_wol, 4112 .resume_wol = efx_port_dummy_op_void, 4113 .test_chip = efx_ef10_test_chip, 4114 .test_nvram = efx_mcdi_nvram_test_all, 4115 .mcdi_request = efx_ef10_mcdi_request, 4116 .mcdi_poll_response = efx_ef10_mcdi_poll_response, 4117 .mcdi_read_response = efx_ef10_mcdi_read_response, 4118 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot, 4119 .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected, 4120 .irq_enable_master = efx_port_dummy_op_void, 4121 .irq_test_generate = efx_ef10_irq_test_generate, 4122 .irq_disable_non_ev = efx_port_dummy_op_void, 4123 .irq_handle_msi = efx_ef10_msi_interrupt, 4124 .irq_handle_legacy = efx_ef10_legacy_interrupt, 4125 .tx_probe = efx_ef10_tx_probe, 4126 .tx_init = efx_ef10_tx_init, 4127 .tx_remove = efx_mcdi_tx_remove, 4128 .tx_write = efx_ef10_tx_write, 4129 .tx_limit_len = efx_ef10_tx_limit_len, 4130 .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config, 4131 .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config, 4132 .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config, 4133 .rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config, 4134 .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts, 4135 .rx_probe = efx_mcdi_rx_probe, 4136 .rx_init = efx_mcdi_rx_init, 4137 .rx_remove = efx_mcdi_rx_remove, 4138 .rx_write = efx_ef10_rx_write, 4139 .rx_defer_refill = efx_ef10_rx_defer_refill, 4140 .ev_probe = efx_mcdi_ev_probe, 4141 .ev_init = efx_ef10_ev_init, 4142 .ev_fini = efx_mcdi_ev_fini, 4143 .ev_remove = efx_mcdi_ev_remove, 4144 .ev_process = efx_ef10_ev_process, 4145 .ev_read_ack = efx_ef10_ev_read_ack, 4146 .ev_test_generate = efx_ef10_ev_test_generate, 4147 .filter_table_probe = efx_ef10_filter_table_probe, 4148 .filter_table_restore = efx_mcdi_filter_table_restore, 4149 .filter_table_remove = efx_mcdi_filter_table_remove, 4150 .filter_update_rx_scatter = efx_mcdi_update_rx_scatter, 4151 .filter_insert = efx_mcdi_filter_insert, 4152 .filter_remove_safe = efx_mcdi_filter_remove_safe, 4153 .filter_get_safe = efx_mcdi_filter_get_safe, 4154 .filter_clear_rx = efx_mcdi_filter_clear_rx, 4155 .filter_count_rx_used = efx_mcdi_filter_count_rx_used, 4156 .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit, 4157 .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids, 4158 #ifdef CONFIG_RFS_ACCEL 4159 .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one, 4160 #endif 4161 #ifdef CONFIG_SFC_MTD 4162 .mtd_probe = efx_ef10_mtd_probe, 4163 .mtd_rename = efx_mcdi_mtd_rename, 4164 .mtd_read = efx_mcdi_mtd_read, 4165 .mtd_erase = efx_mcdi_mtd_erase, 4166 .mtd_write = efx_mcdi_mtd_write, 4167 .mtd_sync = efx_mcdi_mtd_sync, 4168 #endif 4169 .ptp_write_host_time = efx_ef10_ptp_write_host_time, 4170 .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events, 4171 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config, 4172 .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid, 4173 .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid, 4174 .udp_tnl_push_ports = efx_ef10_udp_tnl_push_ports, 4175 .udp_tnl_add_port = efx_ef10_udp_tnl_add_port, 4176 .udp_tnl_has_port = efx_ef10_udp_tnl_has_port, 4177 .udp_tnl_del_port = efx_ef10_udp_tnl_del_port, 4178 #ifdef CONFIG_SFC_SRIOV 4179 .sriov_configure = efx_ef10_sriov_configure, 4180 .sriov_init = efx_ef10_sriov_init, 4181 .sriov_fini = efx_ef10_sriov_fini, 4182 .sriov_wanted = efx_ef10_sriov_wanted, 4183 .sriov_reset = efx_ef10_sriov_reset, 4184 .sriov_flr = efx_ef10_sriov_flr, 4185 .sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac, 4186 .sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan, 4187 .sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk, 4188 .sriov_get_vf_config = efx_ef10_sriov_get_vf_config, 4189 .sriov_set_vf_link_state = efx_ef10_sriov_set_vf_link_state, 4190 .vswitching_probe = efx_ef10_vswitching_probe_pf, 4191 .vswitching_restore = efx_ef10_vswitching_restore_pf, 4192 .vswitching_remove = efx_ef10_vswitching_remove_pf, 4193 #endif 4194 .get_mac_address = efx_ef10_get_mac_address_pf, 4195 .set_mac_address = efx_ef10_set_mac_address, 4196 .tso_versions = efx_ef10_tso_versions, 4197 4198 .get_phys_port_id = efx_ef10_get_phys_port_id, 4199 .revision = EFX_REV_HUNT_A0, 4200 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), 4201 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, 4202 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST, 4203 .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST, 4204 .can_rx_scatter = true, 4205 .always_rx_scatter = true, 4206 .option_descriptors = true, 4207 .min_interrupt_mode = EFX_INT_MODE_LEGACY, 4208 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, 4209 .offload_features = EF10_OFFLOAD_FEATURES, 4210 .mcdi_max_ver = 2, 4211 .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS, 4212 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | 4213 1 << HWTSTAMP_FILTER_ALL, 4214 .rx_hash_key_size = 40, 4215 .check_caps = ef10_check_caps, 4216 .print_additional_fwver = efx_ef10_print_additional_fwver, 4217 }; 4218