1 // SPDX-License-Identifier: GPL-2.0-only 2 /**************************************************************************** 3 * Driver for Solarflare network controllers and boards 4 * Copyright 2012-2013 Solarflare Communications Inc. 5 */ 6 7 #include "net_driver.h" 8 #include "rx_common.h" 9 #include "ef10_regs.h" 10 #include "io.h" 11 #include "mcdi.h" 12 #include "mcdi_pcol.h" 13 #include "mcdi_port.h" 14 #include "mcdi_port_common.h" 15 #include "mcdi_functions.h" 16 #include "nic.h" 17 #include "mcdi_filters.h" 18 #include "workarounds.h" 19 #include "selftest.h" 20 #include "ef10_sriov.h" 21 #include <linux/in.h> 22 #include <linux/jhash.h> 23 #include <linux/wait.h> 24 #include <linux/workqueue.h> 25 26 /* Hardware control for EF10 architecture including 'Huntington'. */ 27 28 #define EFX_EF10_DRVGEN_EV 7 29 enum { 30 EFX_EF10_TEST = 1, 31 EFX_EF10_REFILL, 32 }; 33 34 /* VLAN list entry */ 35 struct efx_ef10_vlan { 36 struct list_head list; 37 u16 vid; 38 }; 39 40 static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading); 41 42 static int efx_ef10_get_warm_boot_count(struct efx_nic *efx) 43 { 44 efx_dword_t reg; 45 46 efx_readd(efx, ®, ER_DZ_BIU_MC_SFT_STATUS); 47 return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ? 48 EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO; 49 } 50 51 /* On all EF10s up to and including SFC9220 (Medford1), all PFs use BAR 0 for 52 * I/O space and BAR 2(&3) for memory. On SFC9250 (Medford2), there is no I/O 53 * bar; PFs use BAR 0/1 for memory. 54 */ 55 static unsigned int efx_ef10_pf_mem_bar(struct efx_nic *efx) 56 { 57 switch (efx->pci_dev->device) { 58 case 0x0b03: /* SFC9250 PF */ 59 return 0; 60 default: 61 return 2; 62 } 63 } 64 65 /* All VFs use BAR 0/1 for memory */ 66 static unsigned int efx_ef10_vf_mem_bar(struct efx_nic *efx) 67 { 68 return 0; 69 } 70 71 static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx) 72 { 73 int bar; 74 75 bar = efx->type->mem_bar(efx); 76 return resource_size(&efx->pci_dev->resource[bar]); 77 } 78 79 static bool efx_ef10_is_vf(struct efx_nic *efx) 80 { 81 return efx->type->is_vf; 82 } 83 84 #ifdef CONFIG_SFC_SRIOV 85 static int efx_ef10_get_vf_index(struct efx_nic *efx) 86 { 87 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN); 88 struct efx_ef10_nic_data *nic_data = efx->nic_data; 89 size_t outlen; 90 int rc; 91 92 rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf, 93 sizeof(outbuf), &outlen); 94 if (rc) 95 return rc; 96 if (outlen < sizeof(outbuf)) 97 return -EIO; 98 99 nic_data->vf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_VF); 100 return 0; 101 } 102 #endif 103 104 static int efx_ef10_init_datapath_caps(struct efx_nic *efx) 105 { 106 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V4_OUT_LEN); 107 struct efx_ef10_nic_data *nic_data = efx->nic_data; 108 size_t outlen; 109 int rc; 110 111 BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0); 112 113 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0, 114 outbuf, sizeof(outbuf), &outlen); 115 if (rc) 116 return rc; 117 if (outlen < MC_CMD_GET_CAPABILITIES_OUT_LEN) { 118 netif_err(efx, drv, efx->net_dev, 119 "unable to read datapath firmware capabilities\n"); 120 return -EIO; 121 } 122 123 nic_data->datapath_caps = 124 MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1); 125 126 if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) { 127 nic_data->datapath_caps2 = MCDI_DWORD(outbuf, 128 GET_CAPABILITIES_V2_OUT_FLAGS2); 129 nic_data->piobuf_size = MCDI_WORD(outbuf, 130 GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF); 131 } else { 132 nic_data->datapath_caps2 = 0; 133 nic_data->piobuf_size = ER_DZ_TX_PIOBUF_SIZE; 134 } 135 136 /* record the DPCPU firmware IDs to determine VEB vswitching support. 137 */ 138 nic_data->rx_dpcpu_fw_id = 139 MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID); 140 nic_data->tx_dpcpu_fw_id = 141 MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID); 142 143 if (!(nic_data->datapath_caps & 144 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) { 145 netif_err(efx, probe, efx->net_dev, 146 "current firmware does not support an RX prefix\n"); 147 return -ENODEV; 148 } 149 150 if (outlen >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) { 151 u8 vi_window_mode = MCDI_BYTE(outbuf, 152 GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE); 153 154 rc = efx_mcdi_window_mode_to_stride(efx, vi_window_mode); 155 if (rc) 156 return rc; 157 } else { 158 /* keep default VI stride */ 159 netif_dbg(efx, probe, efx->net_dev, 160 "firmware did not report VI window mode, assuming vi_stride = %u\n", 161 efx->vi_stride); 162 } 163 164 if (outlen >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) { 165 efx->num_mac_stats = MCDI_WORD(outbuf, 166 GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS); 167 netif_dbg(efx, probe, efx->net_dev, 168 "firmware reports num_mac_stats = %u\n", 169 efx->num_mac_stats); 170 } else { 171 /* leave num_mac_stats as the default value, MC_CMD_MAC_NSTATS */ 172 netif_dbg(efx, probe, efx->net_dev, 173 "firmware did not report num_mac_stats, assuming %u\n", 174 efx->num_mac_stats); 175 } 176 177 return 0; 178 } 179 180 static void efx_ef10_read_licensed_features(struct efx_nic *efx) 181 { 182 MCDI_DECLARE_BUF(inbuf, MC_CMD_LICENSING_V3_IN_LEN); 183 MCDI_DECLARE_BUF(outbuf, MC_CMD_LICENSING_V3_OUT_LEN); 184 struct efx_ef10_nic_data *nic_data = efx->nic_data; 185 size_t outlen; 186 int rc; 187 188 MCDI_SET_DWORD(inbuf, LICENSING_V3_IN_OP, 189 MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE); 190 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_LICENSING_V3, inbuf, sizeof(inbuf), 191 outbuf, sizeof(outbuf), &outlen); 192 if (rc || (outlen < MC_CMD_LICENSING_V3_OUT_LEN)) 193 return; 194 195 nic_data->licensed_features = MCDI_QWORD(outbuf, 196 LICENSING_V3_OUT_LICENSED_FEATURES); 197 } 198 199 static int efx_ef10_get_sysclk_freq(struct efx_nic *efx) 200 { 201 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN); 202 int rc; 203 204 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0, 205 outbuf, sizeof(outbuf), NULL); 206 if (rc) 207 return rc; 208 rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ); 209 return rc > 0 ? rc : -ERANGE; 210 } 211 212 static int efx_ef10_get_timer_workarounds(struct efx_nic *efx) 213 { 214 struct efx_ef10_nic_data *nic_data = efx->nic_data; 215 unsigned int implemented; 216 unsigned int enabled; 217 int rc; 218 219 nic_data->workaround_35388 = false; 220 nic_data->workaround_61265 = false; 221 222 rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled); 223 224 if (rc == -ENOSYS) { 225 /* Firmware without GET_WORKAROUNDS - not a problem. */ 226 rc = 0; 227 } else if (rc == 0) { 228 /* Bug61265 workaround is always enabled if implemented. */ 229 if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG61265) 230 nic_data->workaround_61265 = true; 231 232 if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) { 233 nic_data->workaround_35388 = true; 234 } else if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) { 235 /* Workaround is implemented but not enabled. 236 * Try to enable it. 237 */ 238 rc = efx_mcdi_set_workaround(efx, 239 MC_CMD_WORKAROUND_BUG35388, 240 true, NULL); 241 if (rc == 0) 242 nic_data->workaround_35388 = true; 243 /* If we failed to set the workaround just carry on. */ 244 rc = 0; 245 } 246 } 247 248 netif_dbg(efx, probe, efx->net_dev, 249 "workaround for bug 35388 is %sabled\n", 250 nic_data->workaround_35388 ? "en" : "dis"); 251 netif_dbg(efx, probe, efx->net_dev, 252 "workaround for bug 61265 is %sabled\n", 253 nic_data->workaround_61265 ? "en" : "dis"); 254 255 return rc; 256 } 257 258 static void efx_ef10_process_timer_config(struct efx_nic *efx, 259 const efx_dword_t *data) 260 { 261 unsigned int max_count; 262 263 if (EFX_EF10_WORKAROUND_61265(efx)) { 264 efx->timer_quantum_ns = MCDI_DWORD(data, 265 GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS); 266 efx->timer_max_ns = MCDI_DWORD(data, 267 GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS); 268 } else if (EFX_EF10_WORKAROUND_35388(efx)) { 269 efx->timer_quantum_ns = MCDI_DWORD(data, 270 GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT); 271 max_count = MCDI_DWORD(data, 272 GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT); 273 efx->timer_max_ns = max_count * efx->timer_quantum_ns; 274 } else { 275 efx->timer_quantum_ns = MCDI_DWORD(data, 276 GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT); 277 max_count = MCDI_DWORD(data, 278 GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT); 279 efx->timer_max_ns = max_count * efx->timer_quantum_ns; 280 } 281 282 netif_dbg(efx, probe, efx->net_dev, 283 "got timer properties from MC: quantum %u ns; max %u ns\n", 284 efx->timer_quantum_ns, efx->timer_max_ns); 285 } 286 287 static int efx_ef10_get_timer_config(struct efx_nic *efx) 288 { 289 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN); 290 int rc; 291 292 rc = efx_ef10_get_timer_workarounds(efx); 293 if (rc) 294 return rc; 295 296 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, NULL, 0, 297 outbuf, sizeof(outbuf), NULL); 298 299 if (rc == 0) { 300 efx_ef10_process_timer_config(efx, outbuf); 301 } else if (rc == -ENOSYS || rc == -EPERM) { 302 /* Not available - fall back to Huntington defaults. */ 303 unsigned int quantum; 304 305 rc = efx_ef10_get_sysclk_freq(efx); 306 if (rc < 0) 307 return rc; 308 309 quantum = 1536000 / rc; /* 1536 cycles */ 310 efx->timer_quantum_ns = quantum; 311 efx->timer_max_ns = efx->type->timer_period_max * quantum; 312 rc = 0; 313 } else { 314 efx_mcdi_display_error(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, 315 MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN, 316 NULL, 0, rc); 317 } 318 319 return rc; 320 } 321 322 static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address) 323 { 324 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN); 325 size_t outlen; 326 int rc; 327 328 BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0); 329 330 rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0, 331 outbuf, sizeof(outbuf), &outlen); 332 if (rc) 333 return rc; 334 if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) 335 return -EIO; 336 337 ether_addr_copy(mac_address, 338 MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE)); 339 return 0; 340 } 341 342 static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address) 343 { 344 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN); 345 MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX); 346 size_t outlen; 347 int num_addrs, rc; 348 349 MCDI_SET_DWORD(inbuf, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID, 350 EVB_PORT_ID_ASSIGNED); 351 rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_GET_MAC_ADDRESSES, inbuf, 352 sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); 353 354 if (rc) 355 return rc; 356 if (outlen < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) 357 return -EIO; 358 359 num_addrs = MCDI_DWORD(outbuf, 360 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT); 361 362 WARN_ON(num_addrs != 1); 363 364 ether_addr_copy(mac_address, 365 MCDI_PTR(outbuf, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR)); 366 367 return 0; 368 } 369 370 static ssize_t efx_ef10_show_link_control_flag(struct device *dev, 371 struct device_attribute *attr, 372 char *buf) 373 { 374 struct efx_nic *efx = dev_get_drvdata(dev); 375 376 return sprintf(buf, "%d\n", 377 ((efx->mcdi->fn_flags) & 378 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL)) 379 ? 1 : 0); 380 } 381 382 static ssize_t efx_ef10_show_primary_flag(struct device *dev, 383 struct device_attribute *attr, 384 char *buf) 385 { 386 struct efx_nic *efx = dev_get_drvdata(dev); 387 388 return sprintf(buf, "%d\n", 389 ((efx->mcdi->fn_flags) & 390 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) 391 ? 1 : 0); 392 } 393 394 static struct efx_ef10_vlan *efx_ef10_find_vlan(struct efx_nic *efx, u16 vid) 395 { 396 struct efx_ef10_nic_data *nic_data = efx->nic_data; 397 struct efx_ef10_vlan *vlan; 398 399 WARN_ON(!mutex_is_locked(&nic_data->vlan_lock)); 400 401 list_for_each_entry(vlan, &nic_data->vlan_list, list) { 402 if (vlan->vid == vid) 403 return vlan; 404 } 405 406 return NULL; 407 } 408 409 static int efx_ef10_add_vlan(struct efx_nic *efx, u16 vid) 410 { 411 struct efx_ef10_nic_data *nic_data = efx->nic_data; 412 struct efx_ef10_vlan *vlan; 413 int rc; 414 415 mutex_lock(&nic_data->vlan_lock); 416 417 vlan = efx_ef10_find_vlan(efx, vid); 418 if (vlan) { 419 /* We add VID 0 on init. 8021q adds it on module init 420 * for all interfaces with VLAN filtring feature. 421 */ 422 if (vid == 0) 423 goto done_unlock; 424 netif_warn(efx, drv, efx->net_dev, 425 "VLAN %u already added\n", vid); 426 rc = -EALREADY; 427 goto fail_exist; 428 } 429 430 rc = -ENOMEM; 431 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); 432 if (!vlan) 433 goto fail_alloc; 434 435 vlan->vid = vid; 436 437 list_add_tail(&vlan->list, &nic_data->vlan_list); 438 439 if (efx->filter_state) { 440 mutex_lock(&efx->mac_lock); 441 down_write(&efx->filter_sem); 442 rc = efx_mcdi_filter_add_vlan(efx, vlan->vid); 443 up_write(&efx->filter_sem); 444 mutex_unlock(&efx->mac_lock); 445 if (rc) 446 goto fail_filter_add_vlan; 447 } 448 449 done_unlock: 450 mutex_unlock(&nic_data->vlan_lock); 451 return 0; 452 453 fail_filter_add_vlan: 454 list_del(&vlan->list); 455 kfree(vlan); 456 fail_alloc: 457 fail_exist: 458 mutex_unlock(&nic_data->vlan_lock); 459 return rc; 460 } 461 462 static void efx_ef10_del_vlan_internal(struct efx_nic *efx, 463 struct efx_ef10_vlan *vlan) 464 { 465 struct efx_ef10_nic_data *nic_data = efx->nic_data; 466 467 WARN_ON(!mutex_is_locked(&nic_data->vlan_lock)); 468 469 if (efx->filter_state) { 470 down_write(&efx->filter_sem); 471 efx_mcdi_filter_del_vlan(efx, vlan->vid); 472 up_write(&efx->filter_sem); 473 } 474 475 list_del(&vlan->list); 476 kfree(vlan); 477 } 478 479 static int efx_ef10_del_vlan(struct efx_nic *efx, u16 vid) 480 { 481 struct efx_ef10_nic_data *nic_data = efx->nic_data; 482 struct efx_ef10_vlan *vlan; 483 int rc = 0; 484 485 /* 8021q removes VID 0 on module unload for all interfaces 486 * with VLAN filtering feature. We need to keep it to receive 487 * untagged traffic. 488 */ 489 if (vid == 0) 490 return 0; 491 492 mutex_lock(&nic_data->vlan_lock); 493 494 vlan = efx_ef10_find_vlan(efx, vid); 495 if (!vlan) { 496 netif_err(efx, drv, efx->net_dev, 497 "VLAN %u to be deleted not found\n", vid); 498 rc = -ENOENT; 499 } else { 500 efx_ef10_del_vlan_internal(efx, vlan); 501 } 502 503 mutex_unlock(&nic_data->vlan_lock); 504 505 return rc; 506 } 507 508 static void efx_ef10_cleanup_vlans(struct efx_nic *efx) 509 { 510 struct efx_ef10_nic_data *nic_data = efx->nic_data; 511 struct efx_ef10_vlan *vlan, *next_vlan; 512 513 mutex_lock(&nic_data->vlan_lock); 514 list_for_each_entry_safe(vlan, next_vlan, &nic_data->vlan_list, list) 515 efx_ef10_del_vlan_internal(efx, vlan); 516 mutex_unlock(&nic_data->vlan_lock); 517 } 518 519 static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag, 520 NULL); 521 static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL); 522 523 static int efx_ef10_probe(struct efx_nic *efx) 524 { 525 struct efx_ef10_nic_data *nic_data; 526 int i, rc; 527 528 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); 529 if (!nic_data) 530 return -ENOMEM; 531 efx->nic_data = nic_data; 532 533 /* we assume later that we can copy from this buffer in dwords */ 534 BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4); 535 536 rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf, 537 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL); 538 if (rc) 539 goto fail1; 540 541 /* Get the MC's warm boot count. In case it's rebooting right 542 * now, be prepared to retry. 543 */ 544 i = 0; 545 for (;;) { 546 rc = efx_ef10_get_warm_boot_count(efx); 547 if (rc >= 0) 548 break; 549 if (++i == 5) 550 goto fail2; 551 ssleep(1); 552 } 553 nic_data->warm_boot_count = rc; 554 555 efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID; 556 557 /* In case we're recovering from a crash (kexec), we want to 558 * cancel any outstanding request by the previous user of this 559 * function. We send a special message using the least 560 * significant bits of the 'high' (doorbell) register. 561 */ 562 _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD); 563 564 rc = efx_mcdi_init(efx); 565 if (rc) 566 goto fail2; 567 568 mutex_init(&nic_data->udp_tunnels_lock); 569 570 /* Reset (most) configuration for this function */ 571 rc = efx_mcdi_reset(efx, RESET_TYPE_ALL); 572 if (rc) 573 goto fail3; 574 575 /* Enable event logging */ 576 rc = efx_mcdi_log_ctrl(efx, true, false, 0); 577 if (rc) 578 goto fail3; 579 580 rc = device_create_file(&efx->pci_dev->dev, 581 &dev_attr_link_control_flag); 582 if (rc) 583 goto fail3; 584 585 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag); 586 if (rc) 587 goto fail4; 588 589 rc = efx_get_pf_index(efx, &nic_data->pf_index); 590 if (rc) 591 goto fail5; 592 593 rc = efx_ef10_init_datapath_caps(efx); 594 if (rc < 0) 595 goto fail5; 596 597 efx_ef10_read_licensed_features(efx); 598 599 /* We can have one VI for each vi_stride-byte region. 600 * However, until we use TX option descriptors we need two TX queues 601 * per channel. 602 */ 603 efx->max_vis = efx_ef10_mem_map_size(efx) / efx->vi_stride; 604 if (!efx->max_vis) { 605 netif_err(efx, drv, efx->net_dev, "error determining max VIs\n"); 606 rc = -EIO; 607 goto fail5; 608 } 609 efx->max_channels = min_t(unsigned int, EFX_MAX_CHANNELS, 610 efx->max_vis / EFX_TXQ_TYPES); 611 efx->max_tx_channels = efx->max_channels; 612 if (WARN_ON(efx->max_channels == 0)) { 613 rc = -EIO; 614 goto fail5; 615 } 616 617 efx->rx_packet_len_offset = 618 ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE; 619 620 if (nic_data->datapath_caps & 621 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN)) 622 efx->net_dev->hw_features |= NETIF_F_RXFCS; 623 624 rc = efx_mcdi_port_get_number(efx); 625 if (rc < 0) 626 goto fail5; 627 efx->port_num = rc; 628 629 rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr); 630 if (rc) 631 goto fail5; 632 633 rc = efx_ef10_get_timer_config(efx); 634 if (rc < 0) 635 goto fail5; 636 637 rc = efx_mcdi_mon_probe(efx); 638 if (rc && rc != -EPERM) 639 goto fail5; 640 641 efx_ptp_defer_probe_with_channel(efx); 642 643 #ifdef CONFIG_SFC_SRIOV 644 if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) { 645 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; 646 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); 647 648 efx_pf->type->get_mac_address(efx_pf, nic_data->port_id); 649 } else 650 #endif 651 ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr); 652 653 INIT_LIST_HEAD(&nic_data->vlan_list); 654 mutex_init(&nic_data->vlan_lock); 655 656 /* Add unspecified VID to support VLAN filtering being disabled */ 657 rc = efx_ef10_add_vlan(efx, EFX_FILTER_VID_UNSPEC); 658 if (rc) 659 goto fail_add_vid_unspec; 660 661 /* If VLAN filtering is enabled, we need VID 0 to get untagged 662 * traffic. It is added automatically if 8021q module is loaded, 663 * but we can't rely on it since module may be not loaded. 664 */ 665 rc = efx_ef10_add_vlan(efx, 0); 666 if (rc) 667 goto fail_add_vid_0; 668 669 return 0; 670 671 fail_add_vid_0: 672 efx_ef10_cleanup_vlans(efx); 673 fail_add_vid_unspec: 674 mutex_destroy(&nic_data->vlan_lock); 675 efx_ptp_remove(efx); 676 efx_mcdi_mon_remove(efx); 677 fail5: 678 device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); 679 fail4: 680 device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag); 681 fail3: 682 efx_mcdi_detach(efx); 683 684 mutex_lock(&nic_data->udp_tunnels_lock); 685 memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels)); 686 (void)efx_ef10_set_udp_tnl_ports(efx, true); 687 mutex_unlock(&nic_data->udp_tunnels_lock); 688 mutex_destroy(&nic_data->udp_tunnels_lock); 689 690 efx_mcdi_fini(efx); 691 fail2: 692 efx_nic_free_buffer(efx, &nic_data->mcdi_buf); 693 fail1: 694 kfree(nic_data); 695 efx->nic_data = NULL; 696 return rc; 697 } 698 699 #ifdef EFX_USE_PIO 700 701 static void efx_ef10_free_piobufs(struct efx_nic *efx) 702 { 703 struct efx_ef10_nic_data *nic_data = efx->nic_data; 704 MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN); 705 unsigned int i; 706 int rc; 707 708 BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0); 709 710 for (i = 0; i < nic_data->n_piobufs; i++) { 711 MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE, 712 nic_data->piobuf_handle[i]); 713 rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf), 714 NULL, 0, NULL); 715 WARN_ON(rc); 716 } 717 718 nic_data->n_piobufs = 0; 719 } 720 721 static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) 722 { 723 struct efx_ef10_nic_data *nic_data = efx->nic_data; 724 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN); 725 unsigned int i; 726 size_t outlen; 727 int rc = 0; 728 729 BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0); 730 731 for (i = 0; i < n; i++) { 732 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0, 733 outbuf, sizeof(outbuf), &outlen); 734 if (rc) { 735 /* Don't display the MC error if we didn't have space 736 * for a VF. 737 */ 738 if (!(efx_ef10_is_vf(efx) && rc == -ENOSPC)) 739 efx_mcdi_display_error(efx, MC_CMD_ALLOC_PIOBUF, 740 0, outbuf, outlen, rc); 741 break; 742 } 743 if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) { 744 rc = -EIO; 745 break; 746 } 747 nic_data->piobuf_handle[i] = 748 MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE); 749 netif_dbg(efx, probe, efx->net_dev, 750 "allocated PIO buffer %u handle %x\n", i, 751 nic_data->piobuf_handle[i]); 752 } 753 754 nic_data->n_piobufs = i; 755 if (rc) 756 efx_ef10_free_piobufs(efx); 757 return rc; 758 } 759 760 static int efx_ef10_link_piobufs(struct efx_nic *efx) 761 { 762 struct efx_ef10_nic_data *nic_data = efx->nic_data; 763 MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_PIOBUF_IN_LEN); 764 struct efx_channel *channel; 765 struct efx_tx_queue *tx_queue; 766 unsigned int offset, index; 767 int rc; 768 769 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0); 770 BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0); 771 772 /* Link a buffer to each VI in the write-combining mapping */ 773 for (index = 0; index < nic_data->n_piobufs; ++index) { 774 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE, 775 nic_data->piobuf_handle[index]); 776 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE, 777 nic_data->pio_write_vi_base + index); 778 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF, 779 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN, 780 NULL, 0, NULL); 781 if (rc) { 782 netif_err(efx, drv, efx->net_dev, 783 "failed to link VI %u to PIO buffer %u (%d)\n", 784 nic_data->pio_write_vi_base + index, index, 785 rc); 786 goto fail; 787 } 788 netif_dbg(efx, probe, efx->net_dev, 789 "linked VI %u to PIO buffer %u\n", 790 nic_data->pio_write_vi_base + index, index); 791 } 792 793 /* Link a buffer to each TX queue */ 794 efx_for_each_channel(channel, efx) { 795 /* Extra channels, even those with TXQs (PTP), do not require 796 * PIO resources. 797 */ 798 if (!channel->type->want_pio || 799 channel->channel >= efx->xdp_channel_offset) 800 continue; 801 802 efx_for_each_channel_tx_queue(tx_queue, channel) { 803 /* We assign the PIO buffers to queues in 804 * reverse order to allow for the following 805 * special case. 806 */ 807 offset = ((efx->tx_channel_offset + efx->n_tx_channels - 808 tx_queue->channel->channel - 1) * 809 efx_piobuf_size); 810 index = offset / nic_data->piobuf_size; 811 offset = offset % nic_data->piobuf_size; 812 813 /* When the host page size is 4K, the first 814 * host page in the WC mapping may be within 815 * the same VI page as the last TX queue. We 816 * can only link one buffer to each VI. 817 */ 818 if (tx_queue->queue == nic_data->pio_write_vi_base) { 819 BUG_ON(index != 0); 820 rc = 0; 821 } else { 822 MCDI_SET_DWORD(inbuf, 823 LINK_PIOBUF_IN_PIOBUF_HANDLE, 824 nic_data->piobuf_handle[index]); 825 MCDI_SET_DWORD(inbuf, 826 LINK_PIOBUF_IN_TXQ_INSTANCE, 827 tx_queue->queue); 828 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF, 829 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN, 830 NULL, 0, NULL); 831 } 832 833 if (rc) { 834 /* This is non-fatal; the TX path just 835 * won't use PIO for this queue 836 */ 837 netif_err(efx, drv, efx->net_dev, 838 "failed to link VI %u to PIO buffer %u (%d)\n", 839 tx_queue->queue, index, rc); 840 tx_queue->piobuf = NULL; 841 } else { 842 tx_queue->piobuf = 843 nic_data->pio_write_base + 844 index * efx->vi_stride + offset; 845 tx_queue->piobuf_offset = offset; 846 netif_dbg(efx, probe, efx->net_dev, 847 "linked VI %u to PIO buffer %u offset %x addr %p\n", 848 tx_queue->queue, index, 849 tx_queue->piobuf_offset, 850 tx_queue->piobuf); 851 } 852 } 853 } 854 855 return 0; 856 857 fail: 858 /* inbuf was defined for MC_CMD_LINK_PIOBUF. We can use the same 859 * buffer for MC_CMD_UNLINK_PIOBUF because it's shorter. 860 */ 861 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_IN_LEN < MC_CMD_UNLINK_PIOBUF_IN_LEN); 862 while (index--) { 863 MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE, 864 nic_data->pio_write_vi_base + index); 865 efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF, 866 inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN, 867 NULL, 0, NULL); 868 } 869 return rc; 870 } 871 872 static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) 873 { 874 struct efx_channel *channel; 875 struct efx_tx_queue *tx_queue; 876 877 /* All our existing PIO buffers went away */ 878 efx_for_each_channel(channel, efx) 879 efx_for_each_channel_tx_queue(tx_queue, channel) 880 tx_queue->piobuf = NULL; 881 } 882 883 #else /* !EFX_USE_PIO */ 884 885 static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) 886 { 887 return n == 0 ? 0 : -ENOBUFS; 888 } 889 890 static int efx_ef10_link_piobufs(struct efx_nic *efx) 891 { 892 return 0; 893 } 894 895 static void efx_ef10_free_piobufs(struct efx_nic *efx) 896 { 897 } 898 899 static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) 900 { 901 } 902 903 #endif /* EFX_USE_PIO */ 904 905 static void efx_ef10_remove(struct efx_nic *efx) 906 { 907 struct efx_ef10_nic_data *nic_data = efx->nic_data; 908 int rc; 909 910 #ifdef CONFIG_SFC_SRIOV 911 struct efx_ef10_nic_data *nic_data_pf; 912 struct pci_dev *pci_dev_pf; 913 struct efx_nic *efx_pf; 914 struct ef10_vf *vf; 915 916 if (efx->pci_dev->is_virtfn) { 917 pci_dev_pf = efx->pci_dev->physfn; 918 if (pci_dev_pf) { 919 efx_pf = pci_get_drvdata(pci_dev_pf); 920 nic_data_pf = efx_pf->nic_data; 921 vf = nic_data_pf->vf + nic_data->vf_index; 922 vf->efx = NULL; 923 } else 924 netif_info(efx, drv, efx->net_dev, 925 "Could not get the PF id from VF\n"); 926 } 927 #endif 928 929 efx_ef10_cleanup_vlans(efx); 930 mutex_destroy(&nic_data->vlan_lock); 931 932 efx_ptp_remove(efx); 933 934 efx_mcdi_mon_remove(efx); 935 936 efx_mcdi_rx_free_indir_table(efx); 937 938 if (nic_data->wc_membase) 939 iounmap(nic_data->wc_membase); 940 941 rc = efx_mcdi_free_vis(efx); 942 WARN_ON(rc != 0); 943 944 if (!nic_data->must_restore_piobufs) 945 efx_ef10_free_piobufs(efx); 946 947 device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); 948 device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag); 949 950 efx_mcdi_detach(efx); 951 952 memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels)); 953 mutex_lock(&nic_data->udp_tunnels_lock); 954 (void)efx_ef10_set_udp_tnl_ports(efx, true); 955 mutex_unlock(&nic_data->udp_tunnels_lock); 956 957 mutex_destroy(&nic_data->udp_tunnels_lock); 958 959 efx_mcdi_fini(efx); 960 efx_nic_free_buffer(efx, &nic_data->mcdi_buf); 961 kfree(nic_data); 962 } 963 964 static int efx_ef10_probe_pf(struct efx_nic *efx) 965 { 966 return efx_ef10_probe(efx); 967 } 968 969 int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id, 970 u32 *port_flags, u32 *vadaptor_flags, 971 unsigned int *vlan_tags) 972 { 973 struct efx_ef10_nic_data *nic_data = efx->nic_data; 974 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_QUERY_IN_LEN); 975 MCDI_DECLARE_BUF(outbuf, MC_CMD_VADAPTOR_QUERY_OUT_LEN); 976 size_t outlen; 977 int rc; 978 979 if (nic_data->datapath_caps & 980 (1 << MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN)) { 981 MCDI_SET_DWORD(inbuf, VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID, 982 port_id); 983 984 rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_QUERY, inbuf, sizeof(inbuf), 985 outbuf, sizeof(outbuf), &outlen); 986 if (rc) 987 return rc; 988 989 if (outlen < sizeof(outbuf)) { 990 rc = -EIO; 991 return rc; 992 } 993 } 994 995 if (port_flags) 996 *port_flags = MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_PORT_FLAGS); 997 if (vadaptor_flags) 998 *vadaptor_flags = 999 MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS); 1000 if (vlan_tags) 1001 *vlan_tags = 1002 MCDI_DWORD(outbuf, 1003 VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS); 1004 1005 return 0; 1006 } 1007 1008 int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id) 1009 { 1010 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN); 1011 1012 MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id); 1013 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf), 1014 NULL, 0, NULL); 1015 } 1016 1017 int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id) 1018 { 1019 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN); 1020 1021 MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id); 1022 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf), 1023 NULL, 0, NULL); 1024 } 1025 1026 int efx_ef10_vport_add_mac(struct efx_nic *efx, 1027 unsigned int port_id, u8 *mac) 1028 { 1029 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN); 1030 1031 MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id); 1032 ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac); 1033 1034 return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf, 1035 sizeof(inbuf), NULL, 0, NULL); 1036 } 1037 1038 int efx_ef10_vport_del_mac(struct efx_nic *efx, 1039 unsigned int port_id, u8 *mac) 1040 { 1041 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN); 1042 1043 MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id); 1044 ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac); 1045 1046 return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf, 1047 sizeof(inbuf), NULL, 0, NULL); 1048 } 1049 1050 #ifdef CONFIG_SFC_SRIOV 1051 static int efx_ef10_probe_vf(struct efx_nic *efx) 1052 { 1053 int rc; 1054 struct pci_dev *pci_dev_pf; 1055 1056 /* If the parent PF has no VF data structure, it doesn't know about this 1057 * VF so fail probe. The VF needs to be re-created. This can happen 1058 * if the PF driver is unloaded while the VF is assigned to a guest. 1059 */ 1060 pci_dev_pf = efx->pci_dev->physfn; 1061 if (pci_dev_pf) { 1062 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); 1063 struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data; 1064 1065 if (!nic_data_pf->vf) { 1066 netif_info(efx, drv, efx->net_dev, 1067 "The VF cannot link to its parent PF; " 1068 "please destroy and re-create the VF\n"); 1069 return -EBUSY; 1070 } 1071 } 1072 1073 rc = efx_ef10_probe(efx); 1074 if (rc) 1075 return rc; 1076 1077 rc = efx_ef10_get_vf_index(efx); 1078 if (rc) 1079 goto fail; 1080 1081 if (efx->pci_dev->is_virtfn) { 1082 if (efx->pci_dev->physfn) { 1083 struct efx_nic *efx_pf = 1084 pci_get_drvdata(efx->pci_dev->physfn); 1085 struct efx_ef10_nic_data *nic_data_p = efx_pf->nic_data; 1086 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1087 1088 nic_data_p->vf[nic_data->vf_index].efx = efx; 1089 nic_data_p->vf[nic_data->vf_index].pci_dev = 1090 efx->pci_dev; 1091 } else 1092 netif_info(efx, drv, efx->net_dev, 1093 "Could not get the PF id from VF\n"); 1094 } 1095 1096 return 0; 1097 1098 fail: 1099 efx_ef10_remove(efx); 1100 return rc; 1101 } 1102 #else 1103 static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused))) 1104 { 1105 return 0; 1106 } 1107 #endif 1108 1109 static int efx_ef10_alloc_vis(struct efx_nic *efx, 1110 unsigned int min_vis, unsigned int max_vis) 1111 { 1112 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1113 1114 return efx_mcdi_alloc_vis(efx, min_vis, max_vis, &nic_data->vi_base, 1115 &nic_data->n_allocated_vis); 1116 } 1117 1118 /* Note that the failure path of this function does not free 1119 * resources, as this will be done by efx_ef10_remove(). 1120 */ 1121 static int efx_ef10_dimension_resources(struct efx_nic *efx) 1122 { 1123 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1124 unsigned int uc_mem_map_size, wc_mem_map_size; 1125 unsigned int min_vis = max(EFX_TXQ_TYPES, 1126 efx_separate_tx_channels ? 2 : 1); 1127 unsigned int channel_vis, pio_write_vi_base, max_vis; 1128 void __iomem *membase; 1129 int rc; 1130 1131 channel_vis = max(efx->n_channels, 1132 ((efx->n_tx_channels + efx->n_extra_tx_channels) * 1133 EFX_TXQ_TYPES) + 1134 efx->n_xdp_channels * efx->xdp_tx_per_channel); 1135 if (efx->max_vis && efx->max_vis < channel_vis) { 1136 netif_dbg(efx, drv, efx->net_dev, 1137 "Reducing channel VIs from %u to %u\n", 1138 channel_vis, efx->max_vis); 1139 channel_vis = efx->max_vis; 1140 } 1141 1142 #ifdef EFX_USE_PIO 1143 /* Try to allocate PIO buffers if wanted and if the full 1144 * number of PIO buffers would be sufficient to allocate one 1145 * copy-buffer per TX channel. Failure is non-fatal, as there 1146 * are only a small number of PIO buffers shared between all 1147 * functions of the controller. 1148 */ 1149 if (efx_piobuf_size != 0 && 1150 nic_data->piobuf_size / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >= 1151 efx->n_tx_channels) { 1152 unsigned int n_piobufs = 1153 DIV_ROUND_UP(efx->n_tx_channels, 1154 nic_data->piobuf_size / efx_piobuf_size); 1155 1156 rc = efx_ef10_alloc_piobufs(efx, n_piobufs); 1157 if (rc == -ENOSPC) 1158 netif_dbg(efx, probe, efx->net_dev, 1159 "out of PIO buffers; cannot allocate more\n"); 1160 else if (rc == -EPERM) 1161 netif_dbg(efx, probe, efx->net_dev, 1162 "not permitted to allocate PIO buffers\n"); 1163 else if (rc) 1164 netif_err(efx, probe, efx->net_dev, 1165 "failed to allocate PIO buffers (%d)\n", rc); 1166 else 1167 netif_dbg(efx, probe, efx->net_dev, 1168 "allocated %u PIO buffers\n", n_piobufs); 1169 } 1170 #else 1171 nic_data->n_piobufs = 0; 1172 #endif 1173 1174 /* PIO buffers should be mapped with write-combining enabled, 1175 * and we want to make single UC and WC mappings rather than 1176 * several of each (in fact that's the only option if host 1177 * page size is >4K). So we may allocate some extra VIs just 1178 * for writing PIO buffers through. 1179 * 1180 * The UC mapping contains (channel_vis - 1) complete VIs and the 1181 * first 4K of the next VI. Then the WC mapping begins with 1182 * the remainder of this last VI. 1183 */ 1184 uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * efx->vi_stride + 1185 ER_DZ_TX_PIOBUF); 1186 if (nic_data->n_piobufs) { 1187 /* pio_write_vi_base rounds down to give the number of complete 1188 * VIs inside the UC mapping. 1189 */ 1190 pio_write_vi_base = uc_mem_map_size / efx->vi_stride; 1191 wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base + 1192 nic_data->n_piobufs) * 1193 efx->vi_stride) - 1194 uc_mem_map_size); 1195 max_vis = pio_write_vi_base + nic_data->n_piobufs; 1196 } else { 1197 pio_write_vi_base = 0; 1198 wc_mem_map_size = 0; 1199 max_vis = channel_vis; 1200 } 1201 1202 /* In case the last attached driver failed to free VIs, do it now */ 1203 rc = efx_mcdi_free_vis(efx); 1204 if (rc != 0) 1205 return rc; 1206 1207 rc = efx_ef10_alloc_vis(efx, min_vis, max_vis); 1208 if (rc != 0) 1209 return rc; 1210 1211 if (nic_data->n_allocated_vis < channel_vis) { 1212 netif_info(efx, drv, efx->net_dev, 1213 "Could not allocate enough VIs to satisfy RSS" 1214 " requirements. Performance may not be optimal.\n"); 1215 /* We didn't get the VIs to populate our channels. 1216 * We could keep what we got but then we'd have more 1217 * interrupts than we need. 1218 * Instead calculate new max_channels and restart 1219 */ 1220 efx->max_channels = nic_data->n_allocated_vis; 1221 efx->max_tx_channels = 1222 nic_data->n_allocated_vis / EFX_TXQ_TYPES; 1223 1224 efx_mcdi_free_vis(efx); 1225 return -EAGAIN; 1226 } 1227 1228 /* If we didn't get enough VIs to map all the PIO buffers, free the 1229 * PIO buffers 1230 */ 1231 if (nic_data->n_piobufs && 1232 nic_data->n_allocated_vis < 1233 pio_write_vi_base + nic_data->n_piobufs) { 1234 netif_dbg(efx, probe, efx->net_dev, 1235 "%u VIs are not sufficient to map %u PIO buffers\n", 1236 nic_data->n_allocated_vis, nic_data->n_piobufs); 1237 efx_ef10_free_piobufs(efx); 1238 } 1239 1240 /* Shrink the original UC mapping of the memory BAR */ 1241 membase = ioremap(efx->membase_phys, uc_mem_map_size); 1242 if (!membase) { 1243 netif_err(efx, probe, efx->net_dev, 1244 "could not shrink memory BAR to %x\n", 1245 uc_mem_map_size); 1246 return -ENOMEM; 1247 } 1248 iounmap(efx->membase); 1249 efx->membase = membase; 1250 1251 /* Set up the WC mapping if needed */ 1252 if (wc_mem_map_size) { 1253 nic_data->wc_membase = ioremap_wc(efx->membase_phys + 1254 uc_mem_map_size, 1255 wc_mem_map_size); 1256 if (!nic_data->wc_membase) { 1257 netif_err(efx, probe, efx->net_dev, 1258 "could not allocate WC mapping of size %x\n", 1259 wc_mem_map_size); 1260 return -ENOMEM; 1261 } 1262 nic_data->pio_write_vi_base = pio_write_vi_base; 1263 nic_data->pio_write_base = 1264 nic_data->wc_membase + 1265 (pio_write_vi_base * efx->vi_stride + ER_DZ_TX_PIOBUF - 1266 uc_mem_map_size); 1267 1268 rc = efx_ef10_link_piobufs(efx); 1269 if (rc) 1270 efx_ef10_free_piobufs(efx); 1271 } 1272 1273 netif_dbg(efx, probe, efx->net_dev, 1274 "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n", 1275 &efx->membase_phys, efx->membase, uc_mem_map_size, 1276 nic_data->wc_membase, wc_mem_map_size); 1277 1278 return 0; 1279 } 1280 1281 static void efx_ef10_fini_nic(struct efx_nic *efx) 1282 { 1283 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1284 1285 kfree(nic_data->mc_stats); 1286 nic_data->mc_stats = NULL; 1287 } 1288 1289 static int efx_ef10_init_nic(struct efx_nic *efx) 1290 { 1291 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1292 int rc; 1293 1294 if (nic_data->must_check_datapath_caps) { 1295 rc = efx_ef10_init_datapath_caps(efx); 1296 if (rc) 1297 return rc; 1298 nic_data->must_check_datapath_caps = false; 1299 } 1300 1301 if (efx->must_realloc_vis) { 1302 /* We cannot let the number of VIs change now */ 1303 rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis, 1304 nic_data->n_allocated_vis); 1305 if (rc) 1306 return rc; 1307 efx->must_realloc_vis = false; 1308 } 1309 1310 nic_data->mc_stats = kmalloc(efx->num_mac_stats * sizeof(__le64), 1311 GFP_KERNEL); 1312 if (!nic_data->mc_stats) 1313 return -ENOMEM; 1314 1315 if (nic_data->must_restore_piobufs && nic_data->n_piobufs) { 1316 rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs); 1317 if (rc == 0) { 1318 rc = efx_ef10_link_piobufs(efx); 1319 if (rc) 1320 efx_ef10_free_piobufs(efx); 1321 } 1322 1323 /* Log an error on failure, but this is non-fatal. 1324 * Permission errors are less important - we've presumably 1325 * had the PIO buffer licence removed. 1326 */ 1327 if (rc == -EPERM) 1328 netif_dbg(efx, drv, efx->net_dev, 1329 "not permitted to restore PIO buffers\n"); 1330 else if (rc) 1331 netif_err(efx, drv, efx->net_dev, 1332 "failed to restore PIO buffers (%d)\n", rc); 1333 nic_data->must_restore_piobufs = false; 1334 } 1335 1336 /* don't fail init if RSS setup doesn't work */ 1337 rc = efx->type->rx_push_rss_config(efx, false, 1338 efx->rss_context.rx_indir_table, NULL); 1339 1340 return 0; 1341 } 1342 1343 static void efx_ef10_table_reset_mc_allocations(struct efx_nic *efx) 1344 { 1345 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1346 #ifdef CONFIG_SFC_SRIOV 1347 unsigned int i; 1348 #endif 1349 1350 /* All our allocations have been reset */ 1351 efx->must_realloc_vis = true; 1352 efx_mcdi_filter_table_reset_mc_allocations(efx); 1353 nic_data->must_restore_piobufs = true; 1354 efx_ef10_forget_old_piobufs(efx); 1355 efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID; 1356 1357 /* Driver-created vswitches and vports must be re-created */ 1358 nic_data->must_probe_vswitching = true; 1359 efx->vport_id = EVB_PORT_ID_ASSIGNED; 1360 #ifdef CONFIG_SFC_SRIOV 1361 if (nic_data->vf) 1362 for (i = 0; i < efx->vf_count; i++) 1363 nic_data->vf[i].vport_id = 0; 1364 #endif 1365 } 1366 1367 static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason) 1368 { 1369 if (reason == RESET_TYPE_MC_FAILURE) 1370 return RESET_TYPE_DATAPATH; 1371 1372 return efx_mcdi_map_reset_reason(reason); 1373 } 1374 1375 static int efx_ef10_map_reset_flags(u32 *flags) 1376 { 1377 enum { 1378 EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) << 1379 ETH_RESET_SHARED_SHIFT), 1380 EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER | 1381 ETH_RESET_OFFLOAD | ETH_RESET_MAC | 1382 ETH_RESET_PHY | ETH_RESET_MGMT) << 1383 ETH_RESET_SHARED_SHIFT) 1384 }; 1385 1386 /* We assume for now that our PCI function is permitted to 1387 * reset everything. 1388 */ 1389 1390 if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) { 1391 *flags &= ~EF10_RESET_MC; 1392 return RESET_TYPE_WORLD; 1393 } 1394 1395 if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) { 1396 *flags &= ~EF10_RESET_PORT; 1397 return RESET_TYPE_ALL; 1398 } 1399 1400 /* no invisible reset implemented */ 1401 1402 return -EINVAL; 1403 } 1404 1405 static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type) 1406 { 1407 int rc = efx_mcdi_reset(efx, reset_type); 1408 1409 /* Unprivileged functions return -EPERM, but need to return success 1410 * here so that the datapath is brought back up. 1411 */ 1412 if (reset_type == RESET_TYPE_WORLD && rc == -EPERM) 1413 rc = 0; 1414 1415 /* If it was a port reset, trigger reallocation of MC resources. 1416 * Note that on an MC reset nothing needs to be done now because we'll 1417 * detect the MC reset later and handle it then. 1418 * For an FLR, we never get an MC reset event, but the MC has reset all 1419 * resources assigned to us, so we have to trigger reallocation now. 1420 */ 1421 if ((reset_type == RESET_TYPE_ALL || 1422 reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc) 1423 efx_ef10_table_reset_mc_allocations(efx); 1424 return rc; 1425 } 1426 1427 #define EF10_DMA_STAT(ext_name, mcdi_name) \ 1428 [EF10_STAT_ ## ext_name] = \ 1429 { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name } 1430 #define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \ 1431 [EF10_STAT_ ## int_name] = \ 1432 { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name } 1433 #define EF10_OTHER_STAT(ext_name) \ 1434 [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 } 1435 1436 static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { 1437 EF10_DMA_STAT(port_tx_bytes, TX_BYTES), 1438 EF10_DMA_STAT(port_tx_packets, TX_PKTS), 1439 EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS), 1440 EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS), 1441 EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS), 1442 EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS), 1443 EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS), 1444 EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS), 1445 EF10_DMA_STAT(port_tx_64, TX_64_PKTS), 1446 EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS), 1447 EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS), 1448 EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS), 1449 EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS), 1450 EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS), 1451 EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS), 1452 EF10_DMA_STAT(port_rx_bytes, RX_BYTES), 1453 EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES), 1454 EF10_OTHER_STAT(port_rx_good_bytes), 1455 EF10_OTHER_STAT(port_rx_bad_bytes), 1456 EF10_DMA_STAT(port_rx_packets, RX_PKTS), 1457 EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS), 1458 EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS), 1459 EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS), 1460 EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS), 1461 EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS), 1462 EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS), 1463 EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS), 1464 EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS), 1465 EF10_DMA_STAT(port_rx_64, RX_64_PKTS), 1466 EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS), 1467 EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS), 1468 EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS), 1469 EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS), 1470 EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS), 1471 EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS), 1472 EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS), 1473 EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS), 1474 EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS), 1475 EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS), 1476 EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS), 1477 EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS), 1478 EFX_GENERIC_SW_STAT(rx_nodesc_trunc), 1479 EFX_GENERIC_SW_STAT(rx_noskb_drops), 1480 EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW), 1481 EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW), 1482 EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL), 1483 EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL), 1484 EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB), 1485 EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB), 1486 EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING), 1487 EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS), 1488 EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS), 1489 EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS), 1490 EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS), 1491 EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS), 1492 EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS), 1493 EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES), 1494 EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS), 1495 EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES), 1496 EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS), 1497 EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES), 1498 EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS), 1499 EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES), 1500 EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW), 1501 EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS), 1502 EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES), 1503 EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS), 1504 EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES), 1505 EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS), 1506 EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES), 1507 EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS), 1508 EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES), 1509 EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW), 1510 EF10_DMA_STAT(fec_uncorrected_errors, FEC_UNCORRECTED_ERRORS), 1511 EF10_DMA_STAT(fec_corrected_errors, FEC_CORRECTED_ERRORS), 1512 EF10_DMA_STAT(fec_corrected_symbols_lane0, FEC_CORRECTED_SYMBOLS_LANE0), 1513 EF10_DMA_STAT(fec_corrected_symbols_lane1, FEC_CORRECTED_SYMBOLS_LANE1), 1514 EF10_DMA_STAT(fec_corrected_symbols_lane2, FEC_CORRECTED_SYMBOLS_LANE2), 1515 EF10_DMA_STAT(fec_corrected_symbols_lane3, FEC_CORRECTED_SYMBOLS_LANE3), 1516 EF10_DMA_STAT(ctpio_vi_busy_fallback, CTPIO_VI_BUSY_FALLBACK), 1517 EF10_DMA_STAT(ctpio_long_write_success, CTPIO_LONG_WRITE_SUCCESS), 1518 EF10_DMA_STAT(ctpio_missing_dbell_fail, CTPIO_MISSING_DBELL_FAIL), 1519 EF10_DMA_STAT(ctpio_overflow_fail, CTPIO_OVERFLOW_FAIL), 1520 EF10_DMA_STAT(ctpio_underflow_fail, CTPIO_UNDERFLOW_FAIL), 1521 EF10_DMA_STAT(ctpio_timeout_fail, CTPIO_TIMEOUT_FAIL), 1522 EF10_DMA_STAT(ctpio_noncontig_wr_fail, CTPIO_NONCONTIG_WR_FAIL), 1523 EF10_DMA_STAT(ctpio_frm_clobber_fail, CTPIO_FRM_CLOBBER_FAIL), 1524 EF10_DMA_STAT(ctpio_invalid_wr_fail, CTPIO_INVALID_WR_FAIL), 1525 EF10_DMA_STAT(ctpio_vi_clobber_fallback, CTPIO_VI_CLOBBER_FALLBACK), 1526 EF10_DMA_STAT(ctpio_unqualified_fallback, CTPIO_UNQUALIFIED_FALLBACK), 1527 EF10_DMA_STAT(ctpio_runt_fallback, CTPIO_RUNT_FALLBACK), 1528 EF10_DMA_STAT(ctpio_success, CTPIO_SUCCESS), 1529 EF10_DMA_STAT(ctpio_fallback, CTPIO_FALLBACK), 1530 EF10_DMA_STAT(ctpio_poison, CTPIO_POISON), 1531 EF10_DMA_STAT(ctpio_erase, CTPIO_ERASE), 1532 }; 1533 1534 #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \ 1535 (1ULL << EF10_STAT_port_tx_packets) | \ 1536 (1ULL << EF10_STAT_port_tx_pause) | \ 1537 (1ULL << EF10_STAT_port_tx_unicast) | \ 1538 (1ULL << EF10_STAT_port_tx_multicast) | \ 1539 (1ULL << EF10_STAT_port_tx_broadcast) | \ 1540 (1ULL << EF10_STAT_port_rx_bytes) | \ 1541 (1ULL << \ 1542 EF10_STAT_port_rx_bytes_minus_good_bytes) | \ 1543 (1ULL << EF10_STAT_port_rx_good_bytes) | \ 1544 (1ULL << EF10_STAT_port_rx_bad_bytes) | \ 1545 (1ULL << EF10_STAT_port_rx_packets) | \ 1546 (1ULL << EF10_STAT_port_rx_good) | \ 1547 (1ULL << EF10_STAT_port_rx_bad) | \ 1548 (1ULL << EF10_STAT_port_rx_pause) | \ 1549 (1ULL << EF10_STAT_port_rx_control) | \ 1550 (1ULL << EF10_STAT_port_rx_unicast) | \ 1551 (1ULL << EF10_STAT_port_rx_multicast) | \ 1552 (1ULL << EF10_STAT_port_rx_broadcast) | \ 1553 (1ULL << EF10_STAT_port_rx_lt64) | \ 1554 (1ULL << EF10_STAT_port_rx_64) | \ 1555 (1ULL << EF10_STAT_port_rx_65_to_127) | \ 1556 (1ULL << EF10_STAT_port_rx_128_to_255) | \ 1557 (1ULL << EF10_STAT_port_rx_256_to_511) | \ 1558 (1ULL << EF10_STAT_port_rx_512_to_1023) |\ 1559 (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\ 1560 (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\ 1561 (1ULL << EF10_STAT_port_rx_gtjumbo) | \ 1562 (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\ 1563 (1ULL << EF10_STAT_port_rx_overflow) | \ 1564 (1ULL << EF10_STAT_port_rx_nodesc_drops) |\ 1565 (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \ 1566 (1ULL << GENERIC_STAT_rx_noskb_drops)) 1567 1568 /* On 7000 series NICs, these statistics are only provided by the 10G MAC. 1569 * For a 10G/40G switchable port we do not expose these because they might 1570 * not include all the packets they should. 1571 * On 8000 series NICs these statistics are always provided. 1572 */ 1573 #define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \ 1574 (1ULL << EF10_STAT_port_tx_lt64) | \ 1575 (1ULL << EF10_STAT_port_tx_64) | \ 1576 (1ULL << EF10_STAT_port_tx_65_to_127) |\ 1577 (1ULL << EF10_STAT_port_tx_128_to_255) |\ 1578 (1ULL << EF10_STAT_port_tx_256_to_511) |\ 1579 (1ULL << EF10_STAT_port_tx_512_to_1023) |\ 1580 (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\ 1581 (1ULL << EF10_STAT_port_tx_15xx_to_jumbo)) 1582 1583 /* These statistics are only provided by the 40G MAC. For a 10G/40G 1584 * switchable port we do expose these because the errors will otherwise 1585 * be silent. 1586 */ 1587 #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\ 1588 (1ULL << EF10_STAT_port_rx_length_error)) 1589 1590 /* These statistics are only provided if the firmware supports the 1591 * capability PM_AND_RXDP_COUNTERS. 1592 */ 1593 #define HUNT_PM_AND_RXDP_STAT_MASK ( \ 1594 (1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) | \ 1595 (1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) | \ 1596 (1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) | \ 1597 (1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) | \ 1598 (1ULL << EF10_STAT_port_rx_pm_trunc_qbb) | \ 1599 (1ULL << EF10_STAT_port_rx_pm_discard_qbb) | \ 1600 (1ULL << EF10_STAT_port_rx_pm_discard_mapping) | \ 1601 (1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) | \ 1602 (1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) | \ 1603 (1ULL << EF10_STAT_port_rx_dp_streaming_packets) | \ 1604 (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) | \ 1605 (1ULL << EF10_STAT_port_rx_dp_hlb_wait)) 1606 1607 /* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V2, 1608 * indicated by returning a value >= MC_CMD_MAC_NSTATS_V2 in 1609 * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS. 1610 * These bits are in the second u64 of the raw mask. 1611 */ 1612 #define EF10_FEC_STAT_MASK ( \ 1613 (1ULL << (EF10_STAT_fec_uncorrected_errors - 64)) | \ 1614 (1ULL << (EF10_STAT_fec_corrected_errors - 64)) | \ 1615 (1ULL << (EF10_STAT_fec_corrected_symbols_lane0 - 64)) | \ 1616 (1ULL << (EF10_STAT_fec_corrected_symbols_lane1 - 64)) | \ 1617 (1ULL << (EF10_STAT_fec_corrected_symbols_lane2 - 64)) | \ 1618 (1ULL << (EF10_STAT_fec_corrected_symbols_lane3 - 64))) 1619 1620 /* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V3, 1621 * indicated by returning a value >= MC_CMD_MAC_NSTATS_V3 in 1622 * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS. 1623 * These bits are in the second u64 of the raw mask. 1624 */ 1625 #define EF10_CTPIO_STAT_MASK ( \ 1626 (1ULL << (EF10_STAT_ctpio_vi_busy_fallback - 64)) | \ 1627 (1ULL << (EF10_STAT_ctpio_long_write_success - 64)) | \ 1628 (1ULL << (EF10_STAT_ctpio_missing_dbell_fail - 64)) | \ 1629 (1ULL << (EF10_STAT_ctpio_overflow_fail - 64)) | \ 1630 (1ULL << (EF10_STAT_ctpio_underflow_fail - 64)) | \ 1631 (1ULL << (EF10_STAT_ctpio_timeout_fail - 64)) | \ 1632 (1ULL << (EF10_STAT_ctpio_noncontig_wr_fail - 64)) | \ 1633 (1ULL << (EF10_STAT_ctpio_frm_clobber_fail - 64)) | \ 1634 (1ULL << (EF10_STAT_ctpio_invalid_wr_fail - 64)) | \ 1635 (1ULL << (EF10_STAT_ctpio_vi_clobber_fallback - 64)) | \ 1636 (1ULL << (EF10_STAT_ctpio_unqualified_fallback - 64)) | \ 1637 (1ULL << (EF10_STAT_ctpio_runt_fallback - 64)) | \ 1638 (1ULL << (EF10_STAT_ctpio_success - 64)) | \ 1639 (1ULL << (EF10_STAT_ctpio_fallback - 64)) | \ 1640 (1ULL << (EF10_STAT_ctpio_poison - 64)) | \ 1641 (1ULL << (EF10_STAT_ctpio_erase - 64))) 1642 1643 static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) 1644 { 1645 u64 raw_mask = HUNT_COMMON_STAT_MASK; 1646 u32 port_caps = efx_mcdi_phy_get_caps(efx); 1647 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1648 1649 if (!(efx->mcdi->fn_flags & 1650 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL)) 1651 return 0; 1652 1653 if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) { 1654 raw_mask |= HUNT_40G_EXTRA_STAT_MASK; 1655 /* 8000 series have everything even at 40G */ 1656 if (nic_data->datapath_caps2 & 1657 (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN)) 1658 raw_mask |= HUNT_10G_ONLY_STAT_MASK; 1659 } else { 1660 raw_mask |= HUNT_10G_ONLY_STAT_MASK; 1661 } 1662 1663 if (nic_data->datapath_caps & 1664 (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN)) 1665 raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK; 1666 1667 return raw_mask; 1668 } 1669 1670 static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask) 1671 { 1672 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1673 u64 raw_mask[2]; 1674 1675 raw_mask[0] = efx_ef10_raw_stat_mask(efx); 1676 1677 /* Only show vadaptor stats when EVB capability is present */ 1678 if (nic_data->datapath_caps & 1679 (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) { 1680 raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1); 1681 raw_mask[1] = (1ULL << (EF10_STAT_V1_COUNT - 64)) - 1; 1682 } else { 1683 raw_mask[1] = 0; 1684 } 1685 /* Only show FEC stats when NIC supports MC_CMD_MAC_STATS_V2 */ 1686 if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V2) 1687 raw_mask[1] |= EF10_FEC_STAT_MASK; 1688 1689 /* CTPIO stats appear in V3. Only show them on devices that actually 1690 * support CTPIO. Although this driver doesn't use CTPIO others might, 1691 * and we may be reporting the stats for the underlying port. 1692 */ 1693 if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V3 && 1694 (nic_data->datapath_caps2 & 1695 (1 << MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN))) 1696 raw_mask[1] |= EF10_CTPIO_STAT_MASK; 1697 1698 #if BITS_PER_LONG == 64 1699 BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2); 1700 mask[0] = raw_mask[0]; 1701 mask[1] = raw_mask[1]; 1702 #else 1703 BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 3); 1704 mask[0] = raw_mask[0] & 0xffffffff; 1705 mask[1] = raw_mask[0] >> 32; 1706 mask[2] = raw_mask[1] & 0xffffffff; 1707 #endif 1708 } 1709 1710 static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names) 1711 { 1712 DECLARE_BITMAP(mask, EF10_STAT_COUNT); 1713 1714 efx_ef10_get_stat_mask(efx, mask); 1715 return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, 1716 mask, names); 1717 } 1718 1719 static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats, 1720 struct rtnl_link_stats64 *core_stats) 1721 { 1722 DECLARE_BITMAP(mask, EF10_STAT_COUNT); 1723 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1724 u64 *stats = nic_data->stats; 1725 size_t stats_count = 0, index; 1726 1727 efx_ef10_get_stat_mask(efx, mask); 1728 1729 if (full_stats) { 1730 for_each_set_bit(index, mask, EF10_STAT_COUNT) { 1731 if (efx_ef10_stat_desc[index].name) { 1732 *full_stats++ = stats[index]; 1733 ++stats_count; 1734 } 1735 } 1736 } 1737 1738 if (!core_stats) 1739 return stats_count; 1740 1741 if (nic_data->datapath_caps & 1742 1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN) { 1743 /* Use vadaptor stats. */ 1744 core_stats->rx_packets = stats[EF10_STAT_rx_unicast] + 1745 stats[EF10_STAT_rx_multicast] + 1746 stats[EF10_STAT_rx_broadcast]; 1747 core_stats->tx_packets = stats[EF10_STAT_tx_unicast] + 1748 stats[EF10_STAT_tx_multicast] + 1749 stats[EF10_STAT_tx_broadcast]; 1750 core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] + 1751 stats[EF10_STAT_rx_multicast_bytes] + 1752 stats[EF10_STAT_rx_broadcast_bytes]; 1753 core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] + 1754 stats[EF10_STAT_tx_multicast_bytes] + 1755 stats[EF10_STAT_tx_broadcast_bytes]; 1756 core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] + 1757 stats[GENERIC_STAT_rx_noskb_drops]; 1758 core_stats->multicast = stats[EF10_STAT_rx_multicast]; 1759 core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad]; 1760 core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow]; 1761 core_stats->rx_errors = core_stats->rx_crc_errors; 1762 core_stats->tx_errors = stats[EF10_STAT_tx_bad]; 1763 } else { 1764 /* Use port stats. */ 1765 core_stats->rx_packets = stats[EF10_STAT_port_rx_packets]; 1766 core_stats->tx_packets = stats[EF10_STAT_port_tx_packets]; 1767 core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes]; 1768 core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes]; 1769 core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] + 1770 stats[GENERIC_STAT_rx_nodesc_trunc] + 1771 stats[GENERIC_STAT_rx_noskb_drops]; 1772 core_stats->multicast = stats[EF10_STAT_port_rx_multicast]; 1773 core_stats->rx_length_errors = 1774 stats[EF10_STAT_port_rx_gtjumbo] + 1775 stats[EF10_STAT_port_rx_length_error]; 1776 core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad]; 1777 core_stats->rx_frame_errors = 1778 stats[EF10_STAT_port_rx_align_error]; 1779 core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow]; 1780 core_stats->rx_errors = (core_stats->rx_length_errors + 1781 core_stats->rx_crc_errors + 1782 core_stats->rx_frame_errors); 1783 } 1784 1785 return stats_count; 1786 } 1787 1788 static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats, 1789 struct rtnl_link_stats64 *core_stats) 1790 { 1791 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1792 DECLARE_BITMAP(mask, EF10_STAT_COUNT); 1793 u64 *stats = nic_data->stats; 1794 1795 efx_ef10_get_stat_mask(efx, mask); 1796 1797 efx_nic_copy_stats(efx, nic_data->mc_stats); 1798 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, 1799 mask, stats, nic_data->mc_stats, false); 1800 1801 /* Update derived statistics */ 1802 efx_nic_fix_nodesc_drop_stat(efx, 1803 &stats[EF10_STAT_port_rx_nodesc_drops]); 1804 /* MC Firmware reads RX_BYTES and RX_GOOD_BYTES from the MAC. 1805 * It then calculates RX_BAD_BYTES and DMAs it to us with RX_BYTES. 1806 * We report these as port_rx_ stats. We are not given RX_GOOD_BYTES. 1807 * Here we calculate port_rx_good_bytes. 1808 */ 1809 stats[EF10_STAT_port_rx_good_bytes] = 1810 stats[EF10_STAT_port_rx_bytes] - 1811 stats[EF10_STAT_port_rx_bytes_minus_good_bytes]; 1812 1813 /* The asynchronous reads used to calculate RX_BAD_BYTES in 1814 * MC Firmware are done such that we should not see an increase in 1815 * RX_BAD_BYTES when a good packet has arrived. Unfortunately this 1816 * does mean that the stat can decrease at times. Here we do not 1817 * update the stat unless it has increased or has gone to zero 1818 * (In the case of the NIC rebooting). 1819 * Please see Bug 33781 for a discussion of why things work this way. 1820 */ 1821 efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes], 1822 stats[EF10_STAT_port_rx_bytes_minus_good_bytes]); 1823 efx_update_sw_stats(efx, stats); 1824 1825 return efx_ef10_update_stats_common(efx, full_stats, core_stats); 1826 } 1827 1828 static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx) 1829 __must_hold(&efx->stats_lock) 1830 { 1831 MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); 1832 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1833 DECLARE_BITMAP(mask, EF10_STAT_COUNT); 1834 __le64 generation_start, generation_end; 1835 u64 *stats = nic_data->stats; 1836 u32 dma_len = efx->num_mac_stats * sizeof(u64); 1837 struct efx_buffer stats_buf; 1838 __le64 *dma_stats; 1839 int rc; 1840 1841 spin_unlock_bh(&efx->stats_lock); 1842 1843 if (in_interrupt()) { 1844 /* If in atomic context, cannot update stats. Just update the 1845 * software stats and return so the caller can continue. 1846 */ 1847 spin_lock_bh(&efx->stats_lock); 1848 efx_update_sw_stats(efx, stats); 1849 return 0; 1850 } 1851 1852 efx_ef10_get_stat_mask(efx, mask); 1853 1854 rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC); 1855 if (rc) { 1856 spin_lock_bh(&efx->stats_lock); 1857 return rc; 1858 } 1859 1860 dma_stats = stats_buf.addr; 1861 dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID; 1862 1863 MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr); 1864 MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD, 1865 MAC_STATS_IN_DMA, 1); 1866 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); 1867 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); 1868 1869 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), 1870 NULL, 0, NULL); 1871 spin_lock_bh(&efx->stats_lock); 1872 if (rc) { 1873 /* Expect ENOENT if DMA queues have not been set up */ 1874 if (rc != -ENOENT || atomic_read(&efx->active_queues)) 1875 efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, 1876 sizeof(inbuf), NULL, 0, rc); 1877 goto out; 1878 } 1879 1880 generation_end = dma_stats[efx->num_mac_stats - 1]; 1881 if (generation_end == EFX_MC_STATS_GENERATION_INVALID) { 1882 WARN_ON_ONCE(1); 1883 goto out; 1884 } 1885 rmb(); 1886 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask, 1887 stats, stats_buf.addr, false); 1888 rmb(); 1889 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; 1890 if (generation_end != generation_start) { 1891 rc = -EAGAIN; 1892 goto out; 1893 } 1894 1895 efx_update_sw_stats(efx, stats); 1896 out: 1897 efx_nic_free_buffer(efx, &stats_buf); 1898 return rc; 1899 } 1900 1901 static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats, 1902 struct rtnl_link_stats64 *core_stats) 1903 { 1904 if (efx_ef10_try_update_nic_stats_vf(efx)) 1905 return 0; 1906 1907 return efx_ef10_update_stats_common(efx, full_stats, core_stats); 1908 } 1909 1910 static void efx_ef10_push_irq_moderation(struct efx_channel *channel) 1911 { 1912 struct efx_nic *efx = channel->efx; 1913 unsigned int mode, usecs; 1914 efx_dword_t timer_cmd; 1915 1916 if (channel->irq_moderation_us) { 1917 mode = 3; 1918 usecs = channel->irq_moderation_us; 1919 } else { 1920 mode = 0; 1921 usecs = 0; 1922 } 1923 1924 if (EFX_EF10_WORKAROUND_61265(efx)) { 1925 MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_EVQ_TMR_IN_LEN); 1926 unsigned int ns = usecs * 1000; 1927 1928 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_INSTANCE, 1929 channel->channel); 1930 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, ns); 1931 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, ns); 1932 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_MODE, mode); 1933 1934 efx_mcdi_rpc_async(efx, MC_CMD_SET_EVQ_TMR, 1935 inbuf, sizeof(inbuf), 0, NULL, 0); 1936 } else if (EFX_EF10_WORKAROUND_35388(efx)) { 1937 unsigned int ticks = efx_usecs_to_ticks(efx, usecs); 1938 1939 EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS, 1940 EFE_DD_EVQ_IND_TIMER_FLAGS, 1941 ERF_DD_EVQ_IND_TIMER_MODE, mode, 1942 ERF_DD_EVQ_IND_TIMER_VAL, ticks); 1943 efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT, 1944 channel->channel); 1945 } else { 1946 unsigned int ticks = efx_usecs_to_ticks(efx, usecs); 1947 1948 EFX_POPULATE_DWORD_3(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode, 1949 ERF_DZ_TC_TIMER_VAL, ticks, 1950 ERF_FZ_TC_TMR_REL_VAL, ticks); 1951 efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR, 1952 channel->channel); 1953 } 1954 } 1955 1956 static void efx_ef10_get_wol_vf(struct efx_nic *efx, 1957 struct ethtool_wolinfo *wol) {} 1958 1959 static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type) 1960 { 1961 return -EOPNOTSUPP; 1962 } 1963 1964 static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) 1965 { 1966 wol->supported = 0; 1967 wol->wolopts = 0; 1968 memset(&wol->sopass, 0, sizeof(wol->sopass)); 1969 } 1970 1971 static int efx_ef10_set_wol(struct efx_nic *efx, u32 type) 1972 { 1973 if (type != 0) 1974 return -EINVAL; 1975 return 0; 1976 } 1977 1978 static void efx_ef10_mcdi_request(struct efx_nic *efx, 1979 const efx_dword_t *hdr, size_t hdr_len, 1980 const efx_dword_t *sdu, size_t sdu_len) 1981 { 1982 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1983 u8 *pdu = nic_data->mcdi_buf.addr; 1984 1985 memcpy(pdu, hdr, hdr_len); 1986 memcpy(pdu + hdr_len, sdu, sdu_len); 1987 wmb(); 1988 1989 /* The hardware provides 'low' and 'high' (doorbell) registers 1990 * for passing the 64-bit address of an MCDI request to 1991 * firmware. However the dwords are swapped by firmware. The 1992 * least significant bits of the doorbell are then 0 for all 1993 * MCDI requests due to alignment. 1994 */ 1995 _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32), 1996 ER_DZ_MC_DB_LWRD); 1997 _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr), 1998 ER_DZ_MC_DB_HWRD); 1999 } 2000 2001 static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx) 2002 { 2003 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2004 const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr; 2005 2006 rmb(); 2007 return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE); 2008 } 2009 2010 static void 2011 efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf, 2012 size_t offset, size_t outlen) 2013 { 2014 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2015 const u8 *pdu = nic_data->mcdi_buf.addr; 2016 2017 memcpy(outbuf, pdu + offset, outlen); 2018 } 2019 2020 static void efx_ef10_mcdi_reboot_detected(struct efx_nic *efx) 2021 { 2022 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2023 2024 /* All our allocations have been reset */ 2025 efx_ef10_table_reset_mc_allocations(efx); 2026 2027 /* The datapath firmware might have been changed */ 2028 nic_data->must_check_datapath_caps = true; 2029 2030 /* MAC statistics have been cleared on the NIC; clear the local 2031 * statistic that we update with efx_update_diff_stat(). 2032 */ 2033 nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0; 2034 } 2035 2036 static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx) 2037 { 2038 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2039 int rc; 2040 2041 rc = efx_ef10_get_warm_boot_count(efx); 2042 if (rc < 0) { 2043 /* The firmware is presumably in the process of 2044 * rebooting. However, we are supposed to report each 2045 * reboot just once, so we must only do that once we 2046 * can read and store the updated warm boot count. 2047 */ 2048 return 0; 2049 } 2050 2051 if (rc == nic_data->warm_boot_count) 2052 return 0; 2053 2054 nic_data->warm_boot_count = rc; 2055 efx_ef10_mcdi_reboot_detected(efx); 2056 2057 return -EIO; 2058 } 2059 2060 /* Handle an MSI interrupt 2061 * 2062 * Handle an MSI hardware interrupt. This routine schedules event 2063 * queue processing. No interrupt acknowledgement cycle is necessary. 2064 * Also, we never need to check that the interrupt is for us, since 2065 * MSI interrupts cannot be shared. 2066 */ 2067 static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id) 2068 { 2069 struct efx_msi_context *context = dev_id; 2070 struct efx_nic *efx = context->efx; 2071 2072 netif_vdbg(efx, intr, efx->net_dev, 2073 "IRQ %d on CPU %d\n", irq, raw_smp_processor_id()); 2074 2075 if (likely(READ_ONCE(efx->irq_soft_enabled))) { 2076 /* Note test interrupts */ 2077 if (context->index == efx->irq_level) 2078 efx->last_irq_cpu = raw_smp_processor_id(); 2079 2080 /* Schedule processing of the channel */ 2081 efx_schedule_channel_irq(efx->channel[context->index]); 2082 } 2083 2084 return IRQ_HANDLED; 2085 } 2086 2087 static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id) 2088 { 2089 struct efx_nic *efx = dev_id; 2090 bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); 2091 struct efx_channel *channel; 2092 efx_dword_t reg; 2093 u32 queues; 2094 2095 /* Read the ISR which also ACKs the interrupts */ 2096 efx_readd(efx, ®, ER_DZ_BIU_INT_ISR); 2097 queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG); 2098 2099 if (queues == 0) 2100 return IRQ_NONE; 2101 2102 if (likely(soft_enabled)) { 2103 /* Note test interrupts */ 2104 if (queues & (1U << efx->irq_level)) 2105 efx->last_irq_cpu = raw_smp_processor_id(); 2106 2107 efx_for_each_channel(channel, efx) { 2108 if (queues & 1) 2109 efx_schedule_channel_irq(channel); 2110 queues >>= 1; 2111 } 2112 } 2113 2114 netif_vdbg(efx, intr, efx->net_dev, 2115 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", 2116 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); 2117 2118 return IRQ_HANDLED; 2119 } 2120 2121 static int efx_ef10_irq_test_generate(struct efx_nic *efx) 2122 { 2123 MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN); 2124 2125 if (efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG41750, true, 2126 NULL) == 0) 2127 return -ENOTSUPP; 2128 2129 BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0); 2130 2131 MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level); 2132 return efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT, 2133 inbuf, sizeof(inbuf), NULL, 0, NULL); 2134 } 2135 2136 static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue) 2137 { 2138 return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf, 2139 (tx_queue->ptr_mask + 1) * 2140 sizeof(efx_qword_t), 2141 GFP_KERNEL); 2142 } 2143 2144 /* This writes to the TX_DESC_WPTR and also pushes data */ 2145 static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue, 2146 const efx_qword_t *txd) 2147 { 2148 unsigned int write_ptr; 2149 efx_oword_t reg; 2150 2151 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 2152 EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr); 2153 reg.qword[0] = *txd; 2154 efx_writeo_page(tx_queue->efx, ®, 2155 ER_DZ_TX_DESC_UPD, tx_queue->queue); 2156 } 2157 2158 /* Add Firmware-Assisted TSO v2 option descriptors to a queue. 2159 */ 2160 static int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, 2161 struct sk_buff *skb, 2162 bool *data_mapped) 2163 { 2164 struct efx_tx_buffer *buffer; 2165 struct tcphdr *tcp; 2166 struct iphdr *ip; 2167 2168 u16 ipv4_id; 2169 u32 seqnum; 2170 u32 mss; 2171 2172 EFX_WARN_ON_ONCE_PARANOID(tx_queue->tso_version != 2); 2173 2174 mss = skb_shinfo(skb)->gso_size; 2175 2176 if (unlikely(mss < 4)) { 2177 WARN_ONCE(1, "MSS of %u is too small for TSO v2\n", mss); 2178 return -EINVAL; 2179 } 2180 2181 ip = ip_hdr(skb); 2182 if (ip->version == 4) { 2183 /* Modify IPv4 header if needed. */ 2184 ip->tot_len = 0; 2185 ip->check = 0; 2186 ipv4_id = ntohs(ip->id); 2187 } else { 2188 /* Modify IPv6 header if needed. */ 2189 struct ipv6hdr *ipv6 = ipv6_hdr(skb); 2190 2191 ipv6->payload_len = 0; 2192 ipv4_id = 0; 2193 } 2194 2195 tcp = tcp_hdr(skb); 2196 seqnum = ntohl(tcp->seq); 2197 2198 buffer = efx_tx_queue_get_insert_buffer(tx_queue); 2199 2200 buffer->flags = EFX_TX_BUF_OPTION; 2201 buffer->len = 0; 2202 buffer->unmap_len = 0; 2203 EFX_POPULATE_QWORD_5(buffer->option, 2204 ESF_DZ_TX_DESC_IS_OPT, 1, 2205 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO, 2206 ESF_DZ_TX_TSO_OPTION_TYPE, 2207 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A, 2208 ESF_DZ_TX_TSO_IP_ID, ipv4_id, 2209 ESF_DZ_TX_TSO_TCP_SEQNO, seqnum 2210 ); 2211 ++tx_queue->insert_count; 2212 2213 buffer = efx_tx_queue_get_insert_buffer(tx_queue); 2214 2215 buffer->flags = EFX_TX_BUF_OPTION; 2216 buffer->len = 0; 2217 buffer->unmap_len = 0; 2218 EFX_POPULATE_QWORD_4(buffer->option, 2219 ESF_DZ_TX_DESC_IS_OPT, 1, 2220 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO, 2221 ESF_DZ_TX_TSO_OPTION_TYPE, 2222 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B, 2223 ESF_DZ_TX_TSO_TCP_MSS, mss 2224 ); 2225 ++tx_queue->insert_count; 2226 2227 return 0; 2228 } 2229 2230 static u32 efx_ef10_tso_versions(struct efx_nic *efx) 2231 { 2232 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2233 u32 tso_versions = 0; 2234 2235 if (nic_data->datapath_caps & 2236 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) 2237 tso_versions |= BIT(1); 2238 if (nic_data->datapath_caps2 & 2239 (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN)) 2240 tso_versions |= BIT(2); 2241 return tso_versions; 2242 } 2243 2244 static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) 2245 { 2246 bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; 2247 struct efx_channel *channel = tx_queue->channel; 2248 struct efx_nic *efx = tx_queue->efx; 2249 struct efx_ef10_nic_data *nic_data; 2250 bool tso_v2 = false; 2251 efx_qword_t *txd; 2252 int rc; 2253 2254 nic_data = efx->nic_data; 2255 2256 /* Only attempt to enable TX timestamping if we have the license for it, 2257 * otherwise TXQ init will fail 2258 */ 2259 if (!(nic_data->licensed_features & 2260 (1 << LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN))) { 2261 tx_queue->timestamping = false; 2262 /* Disable sync events on this channel. */ 2263 if (efx->type->ptp_set_ts_sync_events) 2264 efx->type->ptp_set_ts_sync_events(efx, false, false); 2265 } 2266 2267 /* TSOv2 is a limited resource that can only be configured on a limited 2268 * number of queues. TSO without checksum offload is not really a thing, 2269 * so we only enable it for those queues. 2270 * TSOv2 cannot be used with Hardware timestamping, and is never needed 2271 * for XDP tx. 2272 */ 2273 if (csum_offload && (nic_data->datapath_caps2 & 2274 (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN)) && 2275 !tx_queue->timestamping && !tx_queue->xdp_tx) { 2276 tso_v2 = true; 2277 netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n", 2278 channel->channel); 2279 } 2280 2281 rc = efx_mcdi_tx_init(tx_queue, tso_v2); 2282 if (rc) 2283 goto fail; 2284 2285 /* A previous user of this TX queue might have set us up the 2286 * bomb by writing a descriptor to the TX push collector but 2287 * not the doorbell. (Each collector belongs to a port, not a 2288 * queue or function, so cannot easily be reset.) We must 2289 * attempt to push a no-op descriptor in its place. 2290 */ 2291 tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION; 2292 tx_queue->insert_count = 1; 2293 txd = efx_tx_desc(tx_queue, 0); 2294 EFX_POPULATE_QWORD_5(*txd, 2295 ESF_DZ_TX_DESC_IS_OPT, true, 2296 ESF_DZ_TX_OPTION_TYPE, 2297 ESE_DZ_TX_OPTION_DESC_CRC_CSUM, 2298 ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload, 2299 ESF_DZ_TX_OPTION_IP_CSUM, csum_offload, 2300 ESF_DZ_TX_TIMESTAMP, tx_queue->timestamping); 2301 tx_queue->write_count = 1; 2302 2303 if (tso_v2) { 2304 tx_queue->handle_tso = efx_ef10_tx_tso_desc; 2305 tx_queue->tso_version = 2; 2306 } else if (nic_data->datapath_caps & 2307 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) { 2308 tx_queue->tso_version = 1; 2309 } 2310 2311 wmb(); 2312 efx_ef10_push_tx_desc(tx_queue, txd); 2313 2314 return; 2315 2316 fail: 2317 netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n", 2318 tx_queue->queue); 2319 } 2320 2321 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ 2322 static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue) 2323 { 2324 unsigned int write_ptr; 2325 efx_dword_t reg; 2326 2327 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 2328 EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr); 2329 efx_writed_page(tx_queue->efx, ®, 2330 ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue); 2331 } 2332 2333 #define EFX_EF10_MAX_TX_DESCRIPTOR_LEN 0x3fff 2334 2335 static unsigned int efx_ef10_tx_limit_len(struct efx_tx_queue *tx_queue, 2336 dma_addr_t dma_addr, unsigned int len) 2337 { 2338 if (len > EFX_EF10_MAX_TX_DESCRIPTOR_LEN) { 2339 /* If we need to break across multiple descriptors we should 2340 * stop at a page boundary. This assumes the length limit is 2341 * greater than the page size. 2342 */ 2343 dma_addr_t end = dma_addr + EFX_EF10_MAX_TX_DESCRIPTOR_LEN; 2344 2345 BUILD_BUG_ON(EFX_EF10_MAX_TX_DESCRIPTOR_LEN < EFX_PAGE_SIZE); 2346 len = (end & (~(EFX_PAGE_SIZE - 1))) - dma_addr; 2347 } 2348 2349 return len; 2350 } 2351 2352 static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue) 2353 { 2354 unsigned int old_write_count = tx_queue->write_count; 2355 struct efx_tx_buffer *buffer; 2356 unsigned int write_ptr; 2357 efx_qword_t *txd; 2358 2359 tx_queue->xmit_more_available = false; 2360 if (unlikely(tx_queue->write_count == tx_queue->insert_count)) 2361 return; 2362 2363 do { 2364 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 2365 buffer = &tx_queue->buffer[write_ptr]; 2366 txd = efx_tx_desc(tx_queue, write_ptr); 2367 ++tx_queue->write_count; 2368 2369 /* Create TX descriptor ring entry */ 2370 if (buffer->flags & EFX_TX_BUF_OPTION) { 2371 *txd = buffer->option; 2372 if (EFX_QWORD_FIELD(*txd, ESF_DZ_TX_OPTION_TYPE) == 1) 2373 /* PIO descriptor */ 2374 tx_queue->packet_write_count = tx_queue->write_count; 2375 } else { 2376 tx_queue->packet_write_count = tx_queue->write_count; 2377 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); 2378 EFX_POPULATE_QWORD_3( 2379 *txd, 2380 ESF_DZ_TX_KER_CONT, 2381 buffer->flags & EFX_TX_BUF_CONT, 2382 ESF_DZ_TX_KER_BYTE_CNT, buffer->len, 2383 ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr); 2384 } 2385 } while (tx_queue->write_count != tx_queue->insert_count); 2386 2387 wmb(); /* Ensure descriptors are written before they are fetched */ 2388 2389 if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) { 2390 txd = efx_tx_desc(tx_queue, 2391 old_write_count & tx_queue->ptr_mask); 2392 efx_ef10_push_tx_desc(tx_queue, txd); 2393 ++tx_queue->pushes; 2394 } else { 2395 efx_ef10_notify_tx_desc(tx_queue); 2396 } 2397 } 2398 2399 static int efx_ef10_probe_multicast_chaining(struct efx_nic *efx) 2400 { 2401 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2402 unsigned int enabled, implemented; 2403 bool want_workaround_26807; 2404 int rc; 2405 2406 rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled); 2407 if (rc == -ENOSYS) { 2408 /* GET_WORKAROUNDS was implemented before this workaround, 2409 * thus it must be unavailable in this firmware. 2410 */ 2411 nic_data->workaround_26807 = false; 2412 return 0; 2413 } 2414 if (rc) 2415 return rc; 2416 want_workaround_26807 = 2417 implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807; 2418 nic_data->workaround_26807 = 2419 !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807); 2420 2421 if (want_workaround_26807 && !nic_data->workaround_26807) { 2422 unsigned int flags; 2423 2424 rc = efx_mcdi_set_workaround(efx, 2425 MC_CMD_WORKAROUND_BUG26807, 2426 true, &flags); 2427 if (!rc) { 2428 if (flags & 2429 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) { 2430 netif_info(efx, drv, efx->net_dev, 2431 "other functions on NIC have been reset\n"); 2432 2433 /* With MCFW v4.6.x and earlier, the 2434 * boot count will have incremented, 2435 * so re-read the warm_boot_count 2436 * value now to ensure this function 2437 * doesn't think it has changed next 2438 * time it checks. 2439 */ 2440 rc = efx_ef10_get_warm_boot_count(efx); 2441 if (rc >= 0) { 2442 nic_data->warm_boot_count = rc; 2443 rc = 0; 2444 } 2445 } 2446 nic_data->workaround_26807 = true; 2447 } else if (rc == -EPERM) { 2448 rc = 0; 2449 } 2450 } 2451 return rc; 2452 } 2453 2454 static int efx_ef10_filter_table_probe(struct efx_nic *efx) 2455 { 2456 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2457 int rc = efx_ef10_probe_multicast_chaining(efx); 2458 struct efx_mcdi_filter_vlan *vlan; 2459 2460 if (rc) 2461 return rc; 2462 rc = efx_mcdi_filter_table_probe(efx, nic_data->workaround_26807); 2463 2464 if (rc) 2465 return rc; 2466 2467 list_for_each_entry(vlan, &nic_data->vlan_list, list) { 2468 rc = efx_mcdi_filter_add_vlan(efx, vlan->vid); 2469 if (rc) 2470 goto fail_add_vlan; 2471 } 2472 return 0; 2473 2474 fail_add_vlan: 2475 efx_mcdi_filter_table_remove(efx); 2476 return rc; 2477 } 2478 2479 /* This creates an entry in the RX descriptor queue */ 2480 static inline void 2481 efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) 2482 { 2483 struct efx_rx_buffer *rx_buf; 2484 efx_qword_t *rxd; 2485 2486 rxd = efx_rx_desc(rx_queue, index); 2487 rx_buf = efx_rx_buffer(rx_queue, index); 2488 EFX_POPULATE_QWORD_2(*rxd, 2489 ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len, 2490 ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); 2491 } 2492 2493 static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue) 2494 { 2495 struct efx_nic *efx = rx_queue->efx; 2496 unsigned int write_count; 2497 efx_dword_t reg; 2498 2499 /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */ 2500 write_count = rx_queue->added_count & ~7; 2501 if (rx_queue->notified_count == write_count) 2502 return; 2503 2504 do 2505 efx_ef10_build_rx_desc( 2506 rx_queue, 2507 rx_queue->notified_count & rx_queue->ptr_mask); 2508 while (++rx_queue->notified_count != write_count); 2509 2510 wmb(); 2511 EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR, 2512 write_count & rx_queue->ptr_mask); 2513 efx_writed_page(efx, ®, ER_DZ_RX_DESC_UPD, 2514 efx_rx_queue_index(rx_queue)); 2515 } 2516 2517 static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete; 2518 2519 static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue) 2520 { 2521 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 2522 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); 2523 efx_qword_t event; 2524 2525 EFX_POPULATE_QWORD_2(event, 2526 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV, 2527 ESF_DZ_EV_DATA, EFX_EF10_REFILL); 2528 2529 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); 2530 2531 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has 2532 * already swapped the data to little-endian order. 2533 */ 2534 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], 2535 sizeof(efx_qword_t)); 2536 2537 efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT, 2538 inbuf, sizeof(inbuf), 0, 2539 efx_ef10_rx_defer_refill_complete, 0); 2540 } 2541 2542 static void 2543 efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie, 2544 int rc, efx_dword_t *outbuf, 2545 size_t outlen_actual) 2546 { 2547 /* nothing to do */ 2548 } 2549 2550 static int efx_ef10_ev_init(struct efx_channel *channel) 2551 { 2552 struct efx_nic *efx = channel->efx; 2553 struct efx_ef10_nic_data *nic_data; 2554 bool use_v2, cut_thru; 2555 2556 nic_data = efx->nic_data; 2557 use_v2 = nic_data->datapath_caps2 & 2558 1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN; 2559 cut_thru = !(nic_data->datapath_caps & 2560 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN); 2561 return efx_mcdi_ev_init(channel, cut_thru, use_v2); 2562 } 2563 2564 static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue, 2565 unsigned int rx_queue_label) 2566 { 2567 struct efx_nic *efx = rx_queue->efx; 2568 2569 netif_info(efx, hw, efx->net_dev, 2570 "rx event arrived on queue %d labeled as queue %u\n", 2571 efx_rx_queue_index(rx_queue), rx_queue_label); 2572 2573 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 2574 } 2575 2576 static void 2577 efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue, 2578 unsigned int actual, unsigned int expected) 2579 { 2580 unsigned int dropped = (actual - expected) & rx_queue->ptr_mask; 2581 struct efx_nic *efx = rx_queue->efx; 2582 2583 netif_info(efx, hw, efx->net_dev, 2584 "dropped %d events (index=%d expected=%d)\n", 2585 dropped, actual, expected); 2586 2587 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 2588 } 2589 2590 /* partially received RX was aborted. clean up. */ 2591 static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue) 2592 { 2593 unsigned int rx_desc_ptr; 2594 2595 netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev, 2596 "scattered RX aborted (dropping %u buffers)\n", 2597 rx_queue->scatter_n); 2598 2599 rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask; 2600 2601 efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n, 2602 0, EFX_RX_PKT_DISCARD); 2603 2604 rx_queue->removed_count += rx_queue->scatter_n; 2605 rx_queue->scatter_n = 0; 2606 rx_queue->scatter_len = 0; 2607 ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc; 2608 } 2609 2610 static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel, 2611 unsigned int n_packets, 2612 unsigned int rx_encap_hdr, 2613 unsigned int rx_l3_class, 2614 unsigned int rx_l4_class, 2615 const efx_qword_t *event) 2616 { 2617 struct efx_nic *efx = channel->efx; 2618 bool handled = false; 2619 2620 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)) { 2621 if (!(efx->net_dev->features & NETIF_F_RXALL)) { 2622 if (!efx->loopback_selftest) 2623 channel->n_rx_eth_crc_err += n_packets; 2624 return EFX_RX_PKT_DISCARD; 2625 } 2626 handled = true; 2627 } 2628 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR)) { 2629 if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN && 2630 rx_l3_class != ESE_DZ_L3_CLASS_IP4 && 2631 rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG && 2632 rx_l3_class != ESE_DZ_L3_CLASS_IP6 && 2633 rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG)) 2634 netdev_WARN(efx->net_dev, 2635 "invalid class for RX_IPCKSUM_ERR: event=" 2636 EFX_QWORD_FMT "\n", 2637 EFX_QWORD_VAL(*event)); 2638 if (!efx->loopback_selftest) 2639 *(rx_encap_hdr ? 2640 &channel->n_rx_outer_ip_hdr_chksum_err : 2641 &channel->n_rx_ip_hdr_chksum_err) += n_packets; 2642 return 0; 2643 } 2644 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) { 2645 if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN && 2646 ((rx_l3_class != ESE_DZ_L3_CLASS_IP4 && 2647 rx_l3_class != ESE_DZ_L3_CLASS_IP6) || 2648 (rx_l4_class != ESE_FZ_L4_CLASS_TCP && 2649 rx_l4_class != ESE_FZ_L4_CLASS_UDP)))) 2650 netdev_WARN(efx->net_dev, 2651 "invalid class for RX_TCPUDP_CKSUM_ERR: event=" 2652 EFX_QWORD_FMT "\n", 2653 EFX_QWORD_VAL(*event)); 2654 if (!efx->loopback_selftest) 2655 *(rx_encap_hdr ? 2656 &channel->n_rx_outer_tcp_udp_chksum_err : 2657 &channel->n_rx_tcp_udp_chksum_err) += n_packets; 2658 return 0; 2659 } 2660 if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_IP_INNER_CHKSUM_ERR)) { 2661 if (unlikely(!rx_encap_hdr)) 2662 netdev_WARN(efx->net_dev, 2663 "invalid encapsulation type for RX_IP_INNER_CHKSUM_ERR: event=" 2664 EFX_QWORD_FMT "\n", 2665 EFX_QWORD_VAL(*event)); 2666 else if (unlikely(rx_l3_class != ESE_DZ_L3_CLASS_IP4 && 2667 rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG && 2668 rx_l3_class != ESE_DZ_L3_CLASS_IP6 && 2669 rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG)) 2670 netdev_WARN(efx->net_dev, 2671 "invalid class for RX_IP_INNER_CHKSUM_ERR: event=" 2672 EFX_QWORD_FMT "\n", 2673 EFX_QWORD_VAL(*event)); 2674 if (!efx->loopback_selftest) 2675 channel->n_rx_inner_ip_hdr_chksum_err += n_packets; 2676 return 0; 2677 } 2678 if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR)) { 2679 if (unlikely(!rx_encap_hdr)) 2680 netdev_WARN(efx->net_dev, 2681 "invalid encapsulation type for RX_TCP_UDP_INNER_CHKSUM_ERR: event=" 2682 EFX_QWORD_FMT "\n", 2683 EFX_QWORD_VAL(*event)); 2684 else if (unlikely((rx_l3_class != ESE_DZ_L3_CLASS_IP4 && 2685 rx_l3_class != ESE_DZ_L3_CLASS_IP6) || 2686 (rx_l4_class != ESE_FZ_L4_CLASS_TCP && 2687 rx_l4_class != ESE_FZ_L4_CLASS_UDP))) 2688 netdev_WARN(efx->net_dev, 2689 "invalid class for RX_TCP_UDP_INNER_CHKSUM_ERR: event=" 2690 EFX_QWORD_FMT "\n", 2691 EFX_QWORD_VAL(*event)); 2692 if (!efx->loopback_selftest) 2693 channel->n_rx_inner_tcp_udp_chksum_err += n_packets; 2694 return 0; 2695 } 2696 2697 WARN_ON(!handled); /* No error bits were recognised */ 2698 return 0; 2699 } 2700 2701 static int efx_ef10_handle_rx_event(struct efx_channel *channel, 2702 const efx_qword_t *event) 2703 { 2704 unsigned int rx_bytes, next_ptr_lbits, rx_queue_label; 2705 unsigned int rx_l3_class, rx_l4_class, rx_encap_hdr; 2706 unsigned int n_descs, n_packets, i; 2707 struct efx_nic *efx = channel->efx; 2708 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2709 struct efx_rx_queue *rx_queue; 2710 efx_qword_t errors; 2711 bool rx_cont; 2712 u16 flags = 0; 2713 2714 if (unlikely(READ_ONCE(efx->reset_pending))) 2715 return 0; 2716 2717 /* Basic packet information */ 2718 rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES); 2719 next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS); 2720 rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL); 2721 rx_l3_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L3_CLASS); 2722 rx_l4_class = EFX_QWORD_FIELD(*event, ESF_FZ_RX_L4_CLASS); 2723 rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT); 2724 rx_encap_hdr = 2725 nic_data->datapath_caps & 2726 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN) ? 2727 EFX_QWORD_FIELD(*event, ESF_EZ_RX_ENCAP_HDR) : 2728 ESE_EZ_ENCAP_HDR_NONE; 2729 2730 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT)) 2731 netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event=" 2732 EFX_QWORD_FMT "\n", 2733 EFX_QWORD_VAL(*event)); 2734 2735 rx_queue = efx_channel_get_rx_queue(channel); 2736 2737 if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue))) 2738 efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label); 2739 2740 n_descs = ((next_ptr_lbits - rx_queue->removed_count) & 2741 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1)); 2742 2743 if (n_descs != rx_queue->scatter_n + 1) { 2744 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2745 2746 /* detect rx abort */ 2747 if (unlikely(n_descs == rx_queue->scatter_n)) { 2748 if (rx_queue->scatter_n == 0 || rx_bytes != 0) 2749 netdev_WARN(efx->net_dev, 2750 "invalid RX abort: scatter_n=%u event=" 2751 EFX_QWORD_FMT "\n", 2752 rx_queue->scatter_n, 2753 EFX_QWORD_VAL(*event)); 2754 efx_ef10_handle_rx_abort(rx_queue); 2755 return 0; 2756 } 2757 2758 /* Check that RX completion merging is valid, i.e. 2759 * the current firmware supports it and this is a 2760 * non-scattered packet. 2761 */ 2762 if (!(nic_data->datapath_caps & 2763 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) || 2764 rx_queue->scatter_n != 0 || rx_cont) { 2765 efx_ef10_handle_rx_bad_lbits( 2766 rx_queue, next_ptr_lbits, 2767 (rx_queue->removed_count + 2768 rx_queue->scatter_n + 1) & 2769 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1)); 2770 return 0; 2771 } 2772 2773 /* Merged completion for multiple non-scattered packets */ 2774 rx_queue->scatter_n = 1; 2775 rx_queue->scatter_len = 0; 2776 n_packets = n_descs; 2777 ++channel->n_rx_merge_events; 2778 channel->n_rx_merge_packets += n_packets; 2779 flags |= EFX_RX_PKT_PREFIX_LEN; 2780 } else { 2781 ++rx_queue->scatter_n; 2782 rx_queue->scatter_len += rx_bytes; 2783 if (rx_cont) 2784 return 0; 2785 n_packets = 1; 2786 } 2787 2788 EFX_POPULATE_QWORD_5(errors, ESF_DZ_RX_ECRC_ERR, 1, 2789 ESF_DZ_RX_IPCKSUM_ERR, 1, 2790 ESF_DZ_RX_TCPUDP_CKSUM_ERR, 1, 2791 ESF_EZ_RX_IP_INNER_CHKSUM_ERR, 1, 2792 ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR, 1); 2793 EFX_AND_QWORD(errors, *event, errors); 2794 if (unlikely(!EFX_QWORD_IS_ZERO(errors))) { 2795 flags |= efx_ef10_handle_rx_event_errors(channel, n_packets, 2796 rx_encap_hdr, 2797 rx_l3_class, rx_l4_class, 2798 event); 2799 } else { 2800 bool tcpudp = rx_l4_class == ESE_FZ_L4_CLASS_TCP || 2801 rx_l4_class == ESE_FZ_L4_CLASS_UDP; 2802 2803 switch (rx_encap_hdr) { 2804 case ESE_EZ_ENCAP_HDR_VXLAN: /* VxLAN or GENEVE */ 2805 flags |= EFX_RX_PKT_CSUMMED; /* outer UDP csum */ 2806 if (tcpudp) 2807 flags |= EFX_RX_PKT_CSUM_LEVEL; /* inner L4 */ 2808 break; 2809 case ESE_EZ_ENCAP_HDR_GRE: 2810 case ESE_EZ_ENCAP_HDR_NONE: 2811 if (tcpudp) 2812 flags |= EFX_RX_PKT_CSUMMED; 2813 break; 2814 default: 2815 netdev_WARN(efx->net_dev, 2816 "unknown encapsulation type: event=" 2817 EFX_QWORD_FMT "\n", 2818 EFX_QWORD_VAL(*event)); 2819 } 2820 } 2821 2822 if (rx_l4_class == ESE_FZ_L4_CLASS_TCP) 2823 flags |= EFX_RX_PKT_TCP; 2824 2825 channel->irq_mod_score += 2 * n_packets; 2826 2827 /* Handle received packet(s) */ 2828 for (i = 0; i < n_packets; i++) { 2829 efx_rx_packet(rx_queue, 2830 rx_queue->removed_count & rx_queue->ptr_mask, 2831 rx_queue->scatter_n, rx_queue->scatter_len, 2832 flags); 2833 rx_queue->removed_count += rx_queue->scatter_n; 2834 } 2835 2836 rx_queue->scatter_n = 0; 2837 rx_queue->scatter_len = 0; 2838 2839 return n_packets; 2840 } 2841 2842 static u32 efx_ef10_extract_event_ts(efx_qword_t *event) 2843 { 2844 u32 tstamp; 2845 2846 tstamp = EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI); 2847 tstamp <<= 16; 2848 tstamp |= EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO); 2849 2850 return tstamp; 2851 } 2852 2853 static void 2854 efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) 2855 { 2856 struct efx_nic *efx = channel->efx; 2857 struct efx_tx_queue *tx_queue; 2858 unsigned int tx_ev_desc_ptr; 2859 unsigned int tx_ev_q_label; 2860 unsigned int tx_ev_type; 2861 u64 ts_part; 2862 2863 if (unlikely(READ_ONCE(efx->reset_pending))) 2864 return; 2865 2866 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT))) 2867 return; 2868 2869 /* Get the transmit queue */ 2870 tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL); 2871 tx_queue = efx_channel_get_tx_queue(channel, 2872 tx_ev_q_label % EFX_TXQ_TYPES); 2873 2874 if (!tx_queue->timestamping) { 2875 /* Transmit completion */ 2876 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX); 2877 efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask); 2878 return; 2879 } 2880 2881 /* Transmit timestamps are only available for 8XXX series. They result 2882 * in up to three events per packet. These occur in order, and are: 2883 * - the normal completion event (may be omitted) 2884 * - the low part of the timestamp 2885 * - the high part of the timestamp 2886 * 2887 * It's possible for multiple completion events to appear before the 2888 * corresponding timestamps. So we can for example get: 2889 * COMP N 2890 * COMP N+1 2891 * TS_LO N 2892 * TS_HI N 2893 * TS_LO N+1 2894 * TS_HI N+1 2895 * 2896 * In addition it's also possible for the adjacent completions to be 2897 * merged, so we may not see COMP N above. As such, the completion 2898 * events are not very useful here. 2899 * 2900 * Each part of the timestamp is itself split across two 16 bit 2901 * fields in the event. 2902 */ 2903 tx_ev_type = EFX_QWORD_FIELD(*event, ESF_EZ_TX_SOFT1); 2904 2905 switch (tx_ev_type) { 2906 case TX_TIMESTAMP_EVENT_TX_EV_COMPLETION: 2907 /* Ignore this event - see above. */ 2908 break; 2909 2910 case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO: 2911 ts_part = efx_ef10_extract_event_ts(event); 2912 tx_queue->completed_timestamp_minor = ts_part; 2913 break; 2914 2915 case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI: 2916 ts_part = efx_ef10_extract_event_ts(event); 2917 tx_queue->completed_timestamp_major = ts_part; 2918 2919 efx_xmit_done_single(tx_queue); 2920 break; 2921 2922 default: 2923 netif_err(efx, hw, efx->net_dev, 2924 "channel %d unknown tx event type %d (data " 2925 EFX_QWORD_FMT ")\n", 2926 channel->channel, tx_ev_type, 2927 EFX_QWORD_VAL(*event)); 2928 break; 2929 } 2930 } 2931 2932 static void 2933 efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) 2934 { 2935 struct efx_nic *efx = channel->efx; 2936 int subcode; 2937 2938 subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE); 2939 2940 switch (subcode) { 2941 case ESE_DZ_DRV_TIMER_EV: 2942 case ESE_DZ_DRV_WAKE_UP_EV: 2943 break; 2944 case ESE_DZ_DRV_START_UP_EV: 2945 /* event queue init complete. ok. */ 2946 break; 2947 default: 2948 netif_err(efx, hw, efx->net_dev, 2949 "channel %d unknown driver event type %d" 2950 " (data " EFX_QWORD_FMT ")\n", 2951 channel->channel, subcode, 2952 EFX_QWORD_VAL(*event)); 2953 2954 } 2955 } 2956 2957 static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel, 2958 efx_qword_t *event) 2959 { 2960 struct efx_nic *efx = channel->efx; 2961 u32 subcode; 2962 2963 subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0); 2964 2965 switch (subcode) { 2966 case EFX_EF10_TEST: 2967 channel->event_test_cpu = raw_smp_processor_id(); 2968 break; 2969 case EFX_EF10_REFILL: 2970 /* The queue must be empty, so we won't receive any rx 2971 * events, so efx_process_channel() won't refill the 2972 * queue. Refill it here 2973 */ 2974 efx_fast_push_rx_descriptors(&channel->rx_queue, true); 2975 break; 2976 default: 2977 netif_err(efx, hw, efx->net_dev, 2978 "channel %d unknown driver event type %u" 2979 " (data " EFX_QWORD_FMT ")\n", 2980 channel->channel, (unsigned) subcode, 2981 EFX_QWORD_VAL(*event)); 2982 } 2983 } 2984 2985 static int efx_ef10_ev_process(struct efx_channel *channel, int quota) 2986 { 2987 struct efx_nic *efx = channel->efx; 2988 efx_qword_t event, *p_event; 2989 unsigned int read_ptr; 2990 int ev_code; 2991 int spent = 0; 2992 2993 if (quota <= 0) 2994 return spent; 2995 2996 read_ptr = channel->eventq_read_ptr; 2997 2998 for (;;) { 2999 p_event = efx_event(channel, read_ptr); 3000 event = *p_event; 3001 3002 if (!efx_event_present(&event)) 3003 break; 3004 3005 EFX_SET_QWORD(*p_event); 3006 3007 ++read_ptr; 3008 3009 ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE); 3010 3011 netif_vdbg(efx, drv, efx->net_dev, 3012 "processing event on %d " EFX_QWORD_FMT "\n", 3013 channel->channel, EFX_QWORD_VAL(event)); 3014 3015 switch (ev_code) { 3016 case ESE_DZ_EV_CODE_MCDI_EV: 3017 efx_mcdi_process_event(channel, &event); 3018 break; 3019 case ESE_DZ_EV_CODE_RX_EV: 3020 spent += efx_ef10_handle_rx_event(channel, &event); 3021 if (spent >= quota) { 3022 /* XXX can we split a merged event to 3023 * avoid going over-quota? 3024 */ 3025 spent = quota; 3026 goto out; 3027 } 3028 break; 3029 case ESE_DZ_EV_CODE_TX_EV: 3030 efx_ef10_handle_tx_event(channel, &event); 3031 break; 3032 case ESE_DZ_EV_CODE_DRIVER_EV: 3033 efx_ef10_handle_driver_event(channel, &event); 3034 if (++spent == quota) 3035 goto out; 3036 break; 3037 case EFX_EF10_DRVGEN_EV: 3038 efx_ef10_handle_driver_generated_event(channel, &event); 3039 break; 3040 default: 3041 netif_err(efx, hw, efx->net_dev, 3042 "channel %d unknown event type %d" 3043 " (data " EFX_QWORD_FMT ")\n", 3044 channel->channel, ev_code, 3045 EFX_QWORD_VAL(event)); 3046 } 3047 } 3048 3049 out: 3050 channel->eventq_read_ptr = read_ptr; 3051 return spent; 3052 } 3053 3054 static void efx_ef10_ev_read_ack(struct efx_channel *channel) 3055 { 3056 struct efx_nic *efx = channel->efx; 3057 efx_dword_t rptr; 3058 3059 if (EFX_EF10_WORKAROUND_35388(efx)) { 3060 BUILD_BUG_ON(EFX_MIN_EVQ_SIZE < 3061 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH)); 3062 BUILD_BUG_ON(EFX_MAX_EVQ_SIZE > 3063 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH)); 3064 3065 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS, 3066 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH, 3067 ERF_DD_EVQ_IND_RPTR, 3068 (channel->eventq_read_ptr & 3069 channel->eventq_mask) >> 3070 ERF_DD_EVQ_IND_RPTR_WIDTH); 3071 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT, 3072 channel->channel); 3073 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS, 3074 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW, 3075 ERF_DD_EVQ_IND_RPTR, 3076 channel->eventq_read_ptr & 3077 ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1)); 3078 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT, 3079 channel->channel); 3080 } else { 3081 EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR, 3082 channel->eventq_read_ptr & 3083 channel->eventq_mask); 3084 efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel); 3085 } 3086 } 3087 3088 static void efx_ef10_ev_test_generate(struct efx_channel *channel) 3089 { 3090 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); 3091 struct efx_nic *efx = channel->efx; 3092 efx_qword_t event; 3093 int rc; 3094 3095 EFX_POPULATE_QWORD_2(event, 3096 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV, 3097 ESF_DZ_EV_DATA, EFX_EF10_TEST); 3098 3099 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); 3100 3101 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has 3102 * already swapped the data to little-endian order. 3103 */ 3104 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], 3105 sizeof(efx_qword_t)); 3106 3107 rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf), 3108 NULL, 0, NULL); 3109 if (rc != 0) 3110 goto fail; 3111 3112 return; 3113 3114 fail: 3115 WARN_ON(true); 3116 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 3117 } 3118 3119 static int efx_ef10_fini_dmaq(struct efx_nic *efx) 3120 { 3121 struct efx_tx_queue *tx_queue; 3122 struct efx_rx_queue *rx_queue; 3123 struct efx_channel *channel; 3124 int pending; 3125 3126 /* If the MC has just rebooted, the TX/RX queues will have already been 3127 * torn down, but efx->active_queues needs to be set to zero. 3128 */ 3129 if (efx->must_realloc_vis) { 3130 atomic_set(&efx->active_queues, 0); 3131 return 0; 3132 } 3133 3134 /* Do not attempt to write to the NIC during EEH recovery */ 3135 if (efx->state != STATE_RECOVERY) { 3136 efx_for_each_channel(channel, efx) { 3137 efx_for_each_channel_rx_queue(rx_queue, channel) 3138 efx_mcdi_rx_fini(rx_queue); 3139 efx_for_each_channel_tx_queue(tx_queue, channel) 3140 efx_mcdi_tx_fini(tx_queue); 3141 } 3142 3143 wait_event_timeout(efx->flush_wq, 3144 atomic_read(&efx->active_queues) == 0, 3145 msecs_to_jiffies(EFX_MAX_FLUSH_TIME)); 3146 pending = atomic_read(&efx->active_queues); 3147 if (pending) { 3148 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n", 3149 pending); 3150 return -ETIMEDOUT; 3151 } 3152 } 3153 3154 return 0; 3155 } 3156 3157 static void efx_ef10_prepare_flr(struct efx_nic *efx) 3158 { 3159 atomic_set(&efx->active_queues, 0); 3160 } 3161 3162 static int efx_ef10_vport_set_mac_address(struct efx_nic *efx) 3163 { 3164 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3165 u8 mac_old[ETH_ALEN]; 3166 int rc, rc2; 3167 3168 /* Only reconfigure a PF-created vport */ 3169 if (is_zero_ether_addr(nic_data->vport_mac)) 3170 return 0; 3171 3172 efx_device_detach_sync(efx); 3173 efx_net_stop(efx->net_dev); 3174 down_write(&efx->filter_sem); 3175 efx_mcdi_filter_table_remove(efx); 3176 up_write(&efx->filter_sem); 3177 3178 rc = efx_ef10_vadaptor_free(efx, efx->vport_id); 3179 if (rc) 3180 goto restore_filters; 3181 3182 ether_addr_copy(mac_old, nic_data->vport_mac); 3183 rc = efx_ef10_vport_del_mac(efx, efx->vport_id, 3184 nic_data->vport_mac); 3185 if (rc) 3186 goto restore_vadaptor; 3187 3188 rc = efx_ef10_vport_add_mac(efx, efx->vport_id, 3189 efx->net_dev->dev_addr); 3190 if (!rc) { 3191 ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr); 3192 } else { 3193 rc2 = efx_ef10_vport_add_mac(efx, efx->vport_id, mac_old); 3194 if (rc2) { 3195 /* Failed to add original MAC, so clear vport_mac */ 3196 eth_zero_addr(nic_data->vport_mac); 3197 goto reset_nic; 3198 } 3199 } 3200 3201 restore_vadaptor: 3202 rc2 = efx_ef10_vadaptor_alloc(efx, efx->vport_id); 3203 if (rc2) 3204 goto reset_nic; 3205 restore_filters: 3206 down_write(&efx->filter_sem); 3207 rc2 = efx_ef10_filter_table_probe(efx); 3208 up_write(&efx->filter_sem); 3209 if (rc2) 3210 goto reset_nic; 3211 3212 rc2 = efx_net_open(efx->net_dev); 3213 if (rc2) 3214 goto reset_nic; 3215 3216 efx_device_attach_if_not_resetting(efx); 3217 3218 return rc; 3219 3220 reset_nic: 3221 netif_err(efx, drv, efx->net_dev, 3222 "Failed to restore when changing MAC address - scheduling reset\n"); 3223 efx_schedule_reset(efx, RESET_TYPE_DATAPATH); 3224 3225 return rc ? rc : rc2; 3226 } 3227 3228 static int efx_ef10_set_mac_address(struct efx_nic *efx) 3229 { 3230 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN); 3231 bool was_enabled = efx->port_enabled; 3232 int rc; 3233 3234 efx_device_detach_sync(efx); 3235 efx_net_stop(efx->net_dev); 3236 3237 mutex_lock(&efx->mac_lock); 3238 down_write(&efx->filter_sem); 3239 efx_mcdi_filter_table_remove(efx); 3240 3241 ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR), 3242 efx->net_dev->dev_addr); 3243 MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID, 3244 efx->vport_id); 3245 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf, 3246 sizeof(inbuf), NULL, 0, NULL); 3247 3248 efx_ef10_filter_table_probe(efx); 3249 up_write(&efx->filter_sem); 3250 mutex_unlock(&efx->mac_lock); 3251 3252 if (was_enabled) 3253 efx_net_open(efx->net_dev); 3254 efx_device_attach_if_not_resetting(efx); 3255 3256 #ifdef CONFIG_SFC_SRIOV 3257 if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) { 3258 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3259 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; 3260 3261 if (rc == -EPERM) { 3262 struct efx_nic *efx_pf; 3263 3264 /* Switch to PF and change MAC address on vport */ 3265 efx_pf = pci_get_drvdata(pci_dev_pf); 3266 3267 rc = efx_ef10_sriov_set_vf_mac(efx_pf, 3268 nic_data->vf_index, 3269 efx->net_dev->dev_addr); 3270 } else if (!rc) { 3271 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); 3272 struct efx_ef10_nic_data *nic_data = efx_pf->nic_data; 3273 unsigned int i; 3274 3275 /* MAC address successfully changed by VF (with MAC 3276 * spoofing) so update the parent PF if possible. 3277 */ 3278 for (i = 0; i < efx_pf->vf_count; ++i) { 3279 struct ef10_vf *vf = nic_data->vf + i; 3280 3281 if (vf->efx == efx) { 3282 ether_addr_copy(vf->mac, 3283 efx->net_dev->dev_addr); 3284 return 0; 3285 } 3286 } 3287 } 3288 } else 3289 #endif 3290 if (rc == -EPERM) { 3291 netif_err(efx, drv, efx->net_dev, 3292 "Cannot change MAC address; use sfboot to enable" 3293 " mac-spoofing on this interface\n"); 3294 } else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) { 3295 /* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC 3296 * fall-back to the method of changing the MAC address on the 3297 * vport. This only applies to PFs because such versions of 3298 * MCFW do not support VFs. 3299 */ 3300 rc = efx_ef10_vport_set_mac_address(efx); 3301 } else if (rc) { 3302 efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC, 3303 sizeof(inbuf), NULL, 0, rc); 3304 } 3305 3306 return rc; 3307 } 3308 3309 static int efx_ef10_mac_reconfigure(struct efx_nic *efx) 3310 { 3311 efx_mcdi_filter_sync_rx_mode(efx); 3312 3313 return efx_mcdi_set_mac(efx); 3314 } 3315 3316 static int efx_ef10_mac_reconfigure_vf(struct efx_nic *efx) 3317 { 3318 efx_mcdi_filter_sync_rx_mode(efx); 3319 3320 return 0; 3321 } 3322 3323 static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type) 3324 { 3325 MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN); 3326 3327 MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type); 3328 return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf), 3329 NULL, 0, NULL); 3330 } 3331 3332 /* MC BISTs follow a different poll mechanism to phy BISTs. 3333 * The BIST is done in the poll handler on the MC, and the MCDI command 3334 * will block until the BIST is done. 3335 */ 3336 static int efx_ef10_poll_bist(struct efx_nic *efx) 3337 { 3338 int rc; 3339 MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN); 3340 size_t outlen; 3341 u32 result; 3342 3343 rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0, 3344 outbuf, sizeof(outbuf), &outlen); 3345 if (rc != 0) 3346 return rc; 3347 3348 if (outlen < MC_CMD_POLL_BIST_OUT_LEN) 3349 return -EIO; 3350 3351 result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT); 3352 switch (result) { 3353 case MC_CMD_POLL_BIST_PASSED: 3354 netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n"); 3355 return 0; 3356 case MC_CMD_POLL_BIST_TIMEOUT: 3357 netif_err(efx, hw, efx->net_dev, "BIST timed out\n"); 3358 return -EIO; 3359 case MC_CMD_POLL_BIST_FAILED: 3360 netif_err(efx, hw, efx->net_dev, "BIST failed.\n"); 3361 return -EIO; 3362 default: 3363 netif_err(efx, hw, efx->net_dev, 3364 "BIST returned unknown result %u", result); 3365 return -EIO; 3366 } 3367 } 3368 3369 static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type) 3370 { 3371 int rc; 3372 3373 netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type); 3374 3375 rc = efx_ef10_start_bist(efx, bist_type); 3376 if (rc != 0) 3377 return rc; 3378 3379 return efx_ef10_poll_bist(efx); 3380 } 3381 3382 static int 3383 efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) 3384 { 3385 int rc, rc2; 3386 3387 efx_reset_down(efx, RESET_TYPE_WORLD); 3388 3389 rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST, 3390 NULL, 0, NULL, 0, NULL); 3391 if (rc != 0) 3392 goto out; 3393 3394 tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1; 3395 tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1; 3396 3397 rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD); 3398 3399 out: 3400 if (rc == -EPERM) 3401 rc = 0; 3402 rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0); 3403 return rc ? rc : rc2; 3404 } 3405 3406 #ifdef CONFIG_SFC_MTD 3407 3408 struct efx_ef10_nvram_type_info { 3409 u16 type, type_mask; 3410 u8 port; 3411 const char *name; 3412 }; 3413 3414 static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = { 3415 { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" }, 3416 { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" }, 3417 { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" }, 3418 { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" }, 3419 { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" }, 3420 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" }, 3421 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" }, 3422 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" }, 3423 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" }, 3424 { NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" }, 3425 { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" }, 3426 { NVRAM_PARTITION_TYPE_MUM_FIRMWARE, 0, 0, "sfc_mumfw" }, 3427 { NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 0, 0, "sfc_uefi" }, 3428 { NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS, 0, 0, "sfc_dynamic_cfg_dflt" }, 3429 { NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS, 0, 0, "sfc_exp_rom_cfg_dflt" }, 3430 { NVRAM_PARTITION_TYPE_STATUS, 0, 0, "sfc_status" }, 3431 { NVRAM_PARTITION_TYPE_BUNDLE, 0, 0, "sfc_bundle" }, 3432 { NVRAM_PARTITION_TYPE_BUNDLE_METADATA, 0, 0, "sfc_bundle_metadata" }, 3433 }; 3434 #define EF10_NVRAM_PARTITION_COUNT ARRAY_SIZE(efx_ef10_nvram_types) 3435 3436 static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, 3437 struct efx_mcdi_mtd_partition *part, 3438 unsigned int type, 3439 unsigned long *found) 3440 { 3441 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN); 3442 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX); 3443 const struct efx_ef10_nvram_type_info *info; 3444 size_t size, erase_size, outlen; 3445 int type_idx = 0; 3446 bool protected; 3447 int rc; 3448 3449 for (type_idx = 0; ; type_idx++) { 3450 if (type_idx == EF10_NVRAM_PARTITION_COUNT) 3451 return -ENODEV; 3452 info = efx_ef10_nvram_types + type_idx; 3453 if ((type & ~info->type_mask) == info->type) 3454 break; 3455 } 3456 if (info->port != efx_port_num(efx)) 3457 return -ENODEV; 3458 3459 rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected); 3460 if (rc) 3461 return rc; 3462 if (protected && 3463 (type != NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS && 3464 type != NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS)) 3465 /* Hide protected partitions that don't provide defaults. */ 3466 return -ENODEV; 3467 3468 if (protected) 3469 /* Protected partitions are read only. */ 3470 erase_size = 0; 3471 3472 /* If we've already exposed a partition of this type, hide this 3473 * duplicate. All operations on MTDs are keyed by the type anyway, 3474 * so we can't act on the duplicate. 3475 */ 3476 if (__test_and_set_bit(type_idx, found)) 3477 return -EEXIST; 3478 3479 part->nvram_type = type; 3480 3481 MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type); 3482 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf), 3483 outbuf, sizeof(outbuf), &outlen); 3484 if (rc) 3485 return rc; 3486 if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN) 3487 return -EIO; 3488 if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) & 3489 (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN)) 3490 part->fw_subtype = MCDI_DWORD(outbuf, 3491 NVRAM_METADATA_OUT_SUBTYPE); 3492 3493 part->common.dev_type_name = "EF10 NVRAM manager"; 3494 part->common.type_name = info->name; 3495 3496 part->common.mtd.type = MTD_NORFLASH; 3497 part->common.mtd.flags = MTD_CAP_NORFLASH; 3498 part->common.mtd.size = size; 3499 part->common.mtd.erasesize = erase_size; 3500 /* sfc_status is read-only */ 3501 if (!erase_size) 3502 part->common.mtd.flags |= MTD_NO_ERASE; 3503 3504 return 0; 3505 } 3506 3507 static int efx_ef10_mtd_probe(struct efx_nic *efx) 3508 { 3509 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); 3510 DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT) = { 0 }; 3511 struct efx_mcdi_mtd_partition *parts; 3512 size_t outlen, n_parts_total, i, n_parts; 3513 unsigned int type; 3514 int rc; 3515 3516 ASSERT_RTNL(); 3517 3518 BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0); 3519 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0, 3520 outbuf, sizeof(outbuf), &outlen); 3521 if (rc) 3522 return rc; 3523 if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN) 3524 return -EIO; 3525 3526 n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS); 3527 if (n_parts_total > 3528 MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID)) 3529 return -EIO; 3530 3531 parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL); 3532 if (!parts) 3533 return -ENOMEM; 3534 3535 n_parts = 0; 3536 for (i = 0; i < n_parts_total; i++) { 3537 type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID, 3538 i); 3539 rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type, 3540 found); 3541 if (rc == -EEXIST || rc == -ENODEV) 3542 continue; 3543 if (rc) 3544 goto fail; 3545 n_parts++; 3546 } 3547 3548 rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); 3549 fail: 3550 if (rc) 3551 kfree(parts); 3552 return rc; 3553 } 3554 3555 #endif /* CONFIG_SFC_MTD */ 3556 3557 static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time) 3558 { 3559 _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD); 3560 } 3561 3562 static void efx_ef10_ptp_write_host_time_vf(struct efx_nic *efx, 3563 u32 host_time) {} 3564 3565 static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel, 3566 bool temp) 3567 { 3568 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN); 3569 int rc; 3570 3571 if (channel->sync_events_state == SYNC_EVENTS_REQUESTED || 3572 channel->sync_events_state == SYNC_EVENTS_VALID || 3573 (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED)) 3574 return 0; 3575 channel->sync_events_state = SYNC_EVENTS_REQUESTED; 3576 3577 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE); 3578 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); 3579 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE, 3580 channel->channel); 3581 3582 rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP, 3583 inbuf, sizeof(inbuf), NULL, 0, NULL); 3584 3585 if (rc != 0) 3586 channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT : 3587 SYNC_EVENTS_DISABLED; 3588 3589 return rc; 3590 } 3591 3592 static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel, 3593 bool temp) 3594 { 3595 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN); 3596 int rc; 3597 3598 if (channel->sync_events_state == SYNC_EVENTS_DISABLED || 3599 (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT)) 3600 return 0; 3601 if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) { 3602 channel->sync_events_state = SYNC_EVENTS_DISABLED; 3603 return 0; 3604 } 3605 channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT : 3606 SYNC_EVENTS_DISABLED; 3607 3608 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE); 3609 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); 3610 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL, 3611 MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE); 3612 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE, 3613 channel->channel); 3614 3615 rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP, 3616 inbuf, sizeof(inbuf), NULL, 0, NULL); 3617 3618 return rc; 3619 } 3620 3621 static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en, 3622 bool temp) 3623 { 3624 int (*set)(struct efx_channel *channel, bool temp); 3625 struct efx_channel *channel; 3626 3627 set = en ? 3628 efx_ef10_rx_enable_timestamping : 3629 efx_ef10_rx_disable_timestamping; 3630 3631 channel = efx_ptp_channel(efx); 3632 if (channel) { 3633 int rc = set(channel, temp); 3634 if (en && rc != 0) { 3635 efx_ef10_ptp_set_ts_sync_events(efx, false, temp); 3636 return rc; 3637 } 3638 } 3639 3640 return 0; 3641 } 3642 3643 static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx, 3644 struct hwtstamp_config *init) 3645 { 3646 return -EOPNOTSUPP; 3647 } 3648 3649 static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx, 3650 struct hwtstamp_config *init) 3651 { 3652 int rc; 3653 3654 switch (init->rx_filter) { 3655 case HWTSTAMP_FILTER_NONE: 3656 efx_ef10_ptp_set_ts_sync_events(efx, false, false); 3657 /* if TX timestamping is still requested then leave PTP on */ 3658 return efx_ptp_change_mode(efx, 3659 init->tx_type != HWTSTAMP_TX_OFF, 0); 3660 case HWTSTAMP_FILTER_ALL: 3661 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 3662 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 3663 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 3664 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 3665 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 3666 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 3667 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 3668 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 3669 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 3670 case HWTSTAMP_FILTER_PTP_V2_EVENT: 3671 case HWTSTAMP_FILTER_PTP_V2_SYNC: 3672 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 3673 case HWTSTAMP_FILTER_NTP_ALL: 3674 init->rx_filter = HWTSTAMP_FILTER_ALL; 3675 rc = efx_ptp_change_mode(efx, true, 0); 3676 if (!rc) 3677 rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false); 3678 if (rc) 3679 efx_ptp_change_mode(efx, false, 0); 3680 return rc; 3681 default: 3682 return -ERANGE; 3683 } 3684 } 3685 3686 static int efx_ef10_get_phys_port_id(struct efx_nic *efx, 3687 struct netdev_phys_item_id *ppid) 3688 { 3689 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3690 3691 if (!is_valid_ether_addr(nic_data->port_id)) 3692 return -EOPNOTSUPP; 3693 3694 ppid->id_len = ETH_ALEN; 3695 memcpy(ppid->id, nic_data->port_id, ppid->id_len); 3696 3697 return 0; 3698 } 3699 3700 static int efx_ef10_vlan_rx_add_vid(struct efx_nic *efx, __be16 proto, u16 vid) 3701 { 3702 if (proto != htons(ETH_P_8021Q)) 3703 return -EINVAL; 3704 3705 return efx_ef10_add_vlan(efx, vid); 3706 } 3707 3708 static int efx_ef10_vlan_rx_kill_vid(struct efx_nic *efx, __be16 proto, u16 vid) 3709 { 3710 if (proto != htons(ETH_P_8021Q)) 3711 return -EINVAL; 3712 3713 return efx_ef10_del_vlan(efx, vid); 3714 } 3715 3716 /* We rely on the MCDI wiping out our TX rings if it made any changes to the 3717 * ports table, ensuring that any TSO descriptors that were made on a now- 3718 * removed tunnel port will be blown away and won't break things when we try 3719 * to transmit them using the new ports table. 3720 */ 3721 static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading) 3722 { 3723 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3724 MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX); 3725 MCDI_DECLARE_BUF(outbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN); 3726 bool will_reset = false; 3727 size_t num_entries = 0; 3728 size_t inlen, outlen; 3729 size_t i; 3730 int rc; 3731 efx_dword_t flags_and_num_entries; 3732 3733 WARN_ON(!mutex_is_locked(&nic_data->udp_tunnels_lock)); 3734 3735 nic_data->udp_tunnels_dirty = false; 3736 3737 if (!(nic_data->datapath_caps & 3738 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) { 3739 efx_device_attach_if_not_resetting(efx); 3740 return 0; 3741 } 3742 3743 BUILD_BUG_ON(ARRAY_SIZE(nic_data->udp_tunnels) > 3744 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM); 3745 3746 for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) { 3747 if (nic_data->udp_tunnels[i].count && 3748 nic_data->udp_tunnels[i].port) { 3749 efx_dword_t entry; 3750 3751 EFX_POPULATE_DWORD_2(entry, 3752 TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT, 3753 ntohs(nic_data->udp_tunnels[i].port), 3754 TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL, 3755 nic_data->udp_tunnels[i].type); 3756 *_MCDI_ARRAY_DWORD(inbuf, 3757 SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES, 3758 num_entries++) = entry; 3759 } 3760 } 3761 3762 BUILD_BUG_ON((MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST - 3763 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST) * 8 != 3764 EFX_WORD_1_LBN); 3765 BUILD_BUG_ON(MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN * 8 != 3766 EFX_WORD_1_WIDTH); 3767 EFX_POPULATE_DWORD_2(flags_and_num_entries, 3768 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING, 3769 !!unloading, 3770 EFX_WORD_1, num_entries); 3771 *_MCDI_DWORD(inbuf, SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS) = 3772 flags_and_num_entries; 3773 3774 inlen = MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num_entries); 3775 3776 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS, 3777 inbuf, inlen, outbuf, sizeof(outbuf), &outlen); 3778 if (rc == -EIO) { 3779 /* Most likely the MC rebooted due to another function also 3780 * setting its tunnel port list. Mark the tunnel port list as 3781 * dirty, so it will be pushed upon coming up from the reboot. 3782 */ 3783 nic_data->udp_tunnels_dirty = true; 3784 return 0; 3785 } 3786 3787 if (rc) { 3788 /* expected not available on unprivileged functions */ 3789 if (rc != -EPERM) 3790 netif_warn(efx, drv, efx->net_dev, 3791 "Unable to set UDP tunnel ports; rc=%d.\n", rc); 3792 } else if (MCDI_DWORD(outbuf, SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS) & 3793 (1 << MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN)) { 3794 netif_info(efx, drv, efx->net_dev, 3795 "Rebooting MC due to UDP tunnel port list change\n"); 3796 will_reset = true; 3797 if (unloading) 3798 /* Delay for the MC reset to complete. This will make 3799 * unloading other functions a bit smoother. This is a 3800 * race, but the other unload will work whichever way 3801 * it goes, this just avoids an unnecessary error 3802 * message. 3803 */ 3804 msleep(100); 3805 } 3806 if (!will_reset && !unloading) { 3807 /* The caller will have detached, relying on the MC reset to 3808 * trigger a re-attach. Since there won't be an MC reset, we 3809 * have to do the attach ourselves. 3810 */ 3811 efx_device_attach_if_not_resetting(efx); 3812 } 3813 3814 return rc; 3815 } 3816 3817 static int efx_ef10_udp_tnl_push_ports(struct efx_nic *efx) 3818 { 3819 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3820 int rc = 0; 3821 3822 mutex_lock(&nic_data->udp_tunnels_lock); 3823 if (nic_data->udp_tunnels_dirty) { 3824 /* Make sure all TX are stopped while we modify the table, else 3825 * we might race against an efx_features_check(). 3826 */ 3827 efx_device_detach_sync(efx); 3828 rc = efx_ef10_set_udp_tnl_ports(efx, false); 3829 } 3830 mutex_unlock(&nic_data->udp_tunnels_lock); 3831 return rc; 3832 } 3833 3834 static struct efx_udp_tunnel *__efx_ef10_udp_tnl_lookup_port(struct efx_nic *efx, 3835 __be16 port) 3836 { 3837 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3838 size_t i; 3839 3840 for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) { 3841 if (!nic_data->udp_tunnels[i].count) 3842 continue; 3843 if (nic_data->udp_tunnels[i].port == port) 3844 return &nic_data->udp_tunnels[i]; 3845 } 3846 return NULL; 3847 } 3848 3849 static int efx_ef10_udp_tnl_add_port(struct efx_nic *efx, 3850 struct efx_udp_tunnel tnl) 3851 { 3852 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3853 struct efx_udp_tunnel *match; 3854 char typebuf[8]; 3855 size_t i; 3856 int rc; 3857 3858 if (!(nic_data->datapath_caps & 3859 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) 3860 return 0; 3861 3862 efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf)); 3863 netif_dbg(efx, drv, efx->net_dev, "Adding UDP tunnel (%s) port %d\n", 3864 typebuf, ntohs(tnl.port)); 3865 3866 mutex_lock(&nic_data->udp_tunnels_lock); 3867 /* Make sure all TX are stopped while we add to the table, else we 3868 * might race against an efx_features_check(). 3869 */ 3870 efx_device_detach_sync(efx); 3871 3872 match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port); 3873 if (match != NULL) { 3874 if (match->type == tnl.type) { 3875 netif_dbg(efx, drv, efx->net_dev, 3876 "Referencing existing tunnel entry\n"); 3877 match->count++; 3878 /* No need to cause an MCDI update */ 3879 rc = 0; 3880 goto unlock_out; 3881 } 3882 efx_get_udp_tunnel_type_name(match->type, 3883 typebuf, sizeof(typebuf)); 3884 netif_dbg(efx, drv, efx->net_dev, 3885 "UDP port %d is already in use by %s\n", 3886 ntohs(tnl.port), typebuf); 3887 rc = -EEXIST; 3888 goto unlock_out; 3889 } 3890 3891 for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) 3892 if (!nic_data->udp_tunnels[i].count) { 3893 nic_data->udp_tunnels[i] = tnl; 3894 nic_data->udp_tunnels[i].count = 1; 3895 rc = efx_ef10_set_udp_tnl_ports(efx, false); 3896 goto unlock_out; 3897 } 3898 3899 netif_dbg(efx, drv, efx->net_dev, 3900 "Unable to add UDP tunnel (%s) port %d; insufficient resources.\n", 3901 typebuf, ntohs(tnl.port)); 3902 3903 rc = -ENOMEM; 3904 3905 unlock_out: 3906 mutex_unlock(&nic_data->udp_tunnels_lock); 3907 return rc; 3908 } 3909 3910 /* Called under the TX lock with the TX queue running, hence no-one can be 3911 * in the middle of updating the UDP tunnels table. However, they could 3912 * have tried and failed the MCDI, in which case they'll have set the dirty 3913 * flag before dropping their locks. 3914 */ 3915 static bool efx_ef10_udp_tnl_has_port(struct efx_nic *efx, __be16 port) 3916 { 3917 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3918 3919 if (!(nic_data->datapath_caps & 3920 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) 3921 return false; 3922 3923 if (nic_data->udp_tunnels_dirty) 3924 /* SW table may not match HW state, so just assume we can't 3925 * use any UDP tunnel offloads. 3926 */ 3927 return false; 3928 3929 return __efx_ef10_udp_tnl_lookup_port(efx, port) != NULL; 3930 } 3931 3932 static int efx_ef10_udp_tnl_del_port(struct efx_nic *efx, 3933 struct efx_udp_tunnel tnl) 3934 { 3935 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3936 struct efx_udp_tunnel *match; 3937 char typebuf[8]; 3938 int rc; 3939 3940 if (!(nic_data->datapath_caps & 3941 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) 3942 return 0; 3943 3944 efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf)); 3945 netif_dbg(efx, drv, efx->net_dev, "Removing UDP tunnel (%s) port %d\n", 3946 typebuf, ntohs(tnl.port)); 3947 3948 mutex_lock(&nic_data->udp_tunnels_lock); 3949 /* Make sure all TX are stopped while we remove from the table, else we 3950 * might race against an efx_features_check(). 3951 */ 3952 efx_device_detach_sync(efx); 3953 3954 match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port); 3955 if (match != NULL) { 3956 if (match->type == tnl.type) { 3957 if (--match->count) { 3958 /* Port is still in use, so nothing to do */ 3959 netif_dbg(efx, drv, efx->net_dev, 3960 "UDP tunnel port %d remains active\n", 3961 ntohs(tnl.port)); 3962 rc = 0; 3963 goto out_unlock; 3964 } 3965 rc = efx_ef10_set_udp_tnl_ports(efx, false); 3966 goto out_unlock; 3967 } 3968 efx_get_udp_tunnel_type_name(match->type, 3969 typebuf, sizeof(typebuf)); 3970 netif_warn(efx, drv, efx->net_dev, 3971 "UDP port %d is actually in use by %s, not removing\n", 3972 ntohs(tnl.port), typebuf); 3973 } 3974 rc = -ENOENT; 3975 3976 out_unlock: 3977 mutex_unlock(&nic_data->udp_tunnels_lock); 3978 return rc; 3979 } 3980 3981 /* EF10 may have multiple datapath firmware variants within a 3982 * single version. Report which variants are running. 3983 */ 3984 static size_t efx_ef10_print_additional_fwver(struct efx_nic *efx, char *buf, 3985 size_t len) 3986 { 3987 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3988 3989 return scnprintf(buf, len, " rx%x tx%x", 3990 nic_data->rx_dpcpu_fw_id, 3991 nic_data->tx_dpcpu_fw_id); 3992 } 3993 3994 static unsigned int ef10_check_caps(const struct efx_nic *efx, 3995 u8 flag, 3996 u32 offset) 3997 { 3998 const struct efx_ef10_nic_data *nic_data = efx->nic_data; 3999 4000 switch (offset) { 4001 case(MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST): 4002 return nic_data->datapath_caps & BIT_ULL(flag); 4003 case(MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST): 4004 return nic_data->datapath_caps2 & BIT_ULL(flag); 4005 default: 4006 return 0; 4007 } 4008 } 4009 4010 #define EF10_OFFLOAD_FEATURES \ 4011 (NETIF_F_IP_CSUM | \ 4012 NETIF_F_HW_VLAN_CTAG_FILTER | \ 4013 NETIF_F_IPV6_CSUM | \ 4014 NETIF_F_RXHASH | \ 4015 NETIF_F_NTUPLE) 4016 4017 const struct efx_nic_type efx_hunt_a0_vf_nic_type = { 4018 .is_vf = true, 4019 .mem_bar = efx_ef10_vf_mem_bar, 4020 .mem_map_size = efx_ef10_mem_map_size, 4021 .probe = efx_ef10_probe_vf, 4022 .remove = efx_ef10_remove, 4023 .dimension_resources = efx_ef10_dimension_resources, 4024 .init = efx_ef10_init_nic, 4025 .fini = efx_ef10_fini_nic, 4026 .map_reset_reason = efx_ef10_map_reset_reason, 4027 .map_reset_flags = efx_ef10_map_reset_flags, 4028 .reset = efx_ef10_reset, 4029 .probe_port = efx_mcdi_port_probe, 4030 .remove_port = efx_mcdi_port_remove, 4031 .fini_dmaq = efx_ef10_fini_dmaq, 4032 .prepare_flr = efx_ef10_prepare_flr, 4033 .finish_flr = efx_port_dummy_op_void, 4034 .describe_stats = efx_ef10_describe_stats, 4035 .update_stats = efx_ef10_update_stats_vf, 4036 .start_stats = efx_port_dummy_op_void, 4037 .pull_stats = efx_port_dummy_op_void, 4038 .stop_stats = efx_port_dummy_op_void, 4039 .set_id_led = efx_mcdi_set_id_led, 4040 .push_irq_moderation = efx_ef10_push_irq_moderation, 4041 .reconfigure_mac = efx_ef10_mac_reconfigure_vf, 4042 .check_mac_fault = efx_mcdi_mac_check_fault, 4043 .reconfigure_port = efx_mcdi_port_reconfigure, 4044 .get_wol = efx_ef10_get_wol_vf, 4045 .set_wol = efx_ef10_set_wol_vf, 4046 .resume_wol = efx_port_dummy_op_void, 4047 .mcdi_request = efx_ef10_mcdi_request, 4048 .mcdi_poll_response = efx_ef10_mcdi_poll_response, 4049 .mcdi_read_response = efx_ef10_mcdi_read_response, 4050 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot, 4051 .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected, 4052 .irq_enable_master = efx_port_dummy_op_void, 4053 .irq_test_generate = efx_ef10_irq_test_generate, 4054 .irq_disable_non_ev = efx_port_dummy_op_void, 4055 .irq_handle_msi = efx_ef10_msi_interrupt, 4056 .irq_handle_legacy = efx_ef10_legacy_interrupt, 4057 .tx_probe = efx_ef10_tx_probe, 4058 .tx_init = efx_ef10_tx_init, 4059 .tx_remove = efx_mcdi_tx_remove, 4060 .tx_write = efx_ef10_tx_write, 4061 .tx_limit_len = efx_ef10_tx_limit_len, 4062 .rx_push_rss_config = efx_mcdi_vf_rx_push_rss_config, 4063 .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config, 4064 .rx_probe = efx_mcdi_rx_probe, 4065 .rx_init = efx_mcdi_rx_init, 4066 .rx_remove = efx_mcdi_rx_remove, 4067 .rx_write = efx_ef10_rx_write, 4068 .rx_defer_refill = efx_ef10_rx_defer_refill, 4069 .ev_probe = efx_mcdi_ev_probe, 4070 .ev_init = efx_ef10_ev_init, 4071 .ev_fini = efx_mcdi_ev_fini, 4072 .ev_remove = efx_mcdi_ev_remove, 4073 .ev_process = efx_ef10_ev_process, 4074 .ev_read_ack = efx_ef10_ev_read_ack, 4075 .ev_test_generate = efx_ef10_ev_test_generate, 4076 .filter_table_probe = efx_ef10_filter_table_probe, 4077 .filter_table_restore = efx_mcdi_filter_table_restore, 4078 .filter_table_remove = efx_mcdi_filter_table_remove, 4079 .filter_update_rx_scatter = efx_mcdi_update_rx_scatter, 4080 .filter_insert = efx_mcdi_filter_insert, 4081 .filter_remove_safe = efx_mcdi_filter_remove_safe, 4082 .filter_get_safe = efx_mcdi_filter_get_safe, 4083 .filter_clear_rx = efx_mcdi_filter_clear_rx, 4084 .filter_count_rx_used = efx_mcdi_filter_count_rx_used, 4085 .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit, 4086 .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids, 4087 #ifdef CONFIG_RFS_ACCEL 4088 .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one, 4089 #endif 4090 #ifdef CONFIG_SFC_MTD 4091 .mtd_probe = efx_port_dummy_op_int, 4092 #endif 4093 .ptp_write_host_time = efx_ef10_ptp_write_host_time_vf, 4094 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf, 4095 .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid, 4096 .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid, 4097 #ifdef CONFIG_SFC_SRIOV 4098 .vswitching_probe = efx_ef10_vswitching_probe_vf, 4099 .vswitching_restore = efx_ef10_vswitching_restore_vf, 4100 .vswitching_remove = efx_ef10_vswitching_remove_vf, 4101 #endif 4102 .get_mac_address = efx_ef10_get_mac_address_vf, 4103 .set_mac_address = efx_ef10_set_mac_address, 4104 4105 .get_phys_port_id = efx_ef10_get_phys_port_id, 4106 .revision = EFX_REV_HUNT_A0, 4107 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), 4108 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, 4109 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST, 4110 .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST, 4111 .can_rx_scatter = true, 4112 .always_rx_scatter = true, 4113 .min_interrupt_mode = EFX_INT_MODE_MSIX, 4114 .max_interrupt_mode = EFX_INT_MODE_MSIX, 4115 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, 4116 .offload_features = EF10_OFFLOAD_FEATURES, 4117 .mcdi_max_ver = 2, 4118 .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS, 4119 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | 4120 1 << HWTSTAMP_FILTER_ALL, 4121 .rx_hash_key_size = 40, 4122 .check_caps = ef10_check_caps, 4123 .print_additional_fwver = efx_ef10_print_additional_fwver, 4124 }; 4125 4126 const struct efx_nic_type efx_hunt_a0_nic_type = { 4127 .is_vf = false, 4128 .mem_bar = efx_ef10_pf_mem_bar, 4129 .mem_map_size = efx_ef10_mem_map_size, 4130 .probe = efx_ef10_probe_pf, 4131 .remove = efx_ef10_remove, 4132 .dimension_resources = efx_ef10_dimension_resources, 4133 .init = efx_ef10_init_nic, 4134 .fini = efx_ef10_fini_nic, 4135 .map_reset_reason = efx_ef10_map_reset_reason, 4136 .map_reset_flags = efx_ef10_map_reset_flags, 4137 .reset = efx_ef10_reset, 4138 .probe_port = efx_mcdi_port_probe, 4139 .remove_port = efx_mcdi_port_remove, 4140 .fini_dmaq = efx_ef10_fini_dmaq, 4141 .prepare_flr = efx_ef10_prepare_flr, 4142 .finish_flr = efx_port_dummy_op_void, 4143 .describe_stats = efx_ef10_describe_stats, 4144 .update_stats = efx_ef10_update_stats_pf, 4145 .start_stats = efx_mcdi_mac_start_stats, 4146 .pull_stats = efx_mcdi_mac_pull_stats, 4147 .stop_stats = efx_mcdi_mac_stop_stats, 4148 .set_id_led = efx_mcdi_set_id_led, 4149 .push_irq_moderation = efx_ef10_push_irq_moderation, 4150 .reconfigure_mac = efx_ef10_mac_reconfigure, 4151 .check_mac_fault = efx_mcdi_mac_check_fault, 4152 .reconfigure_port = efx_mcdi_port_reconfigure, 4153 .get_wol = efx_ef10_get_wol, 4154 .set_wol = efx_ef10_set_wol, 4155 .resume_wol = efx_port_dummy_op_void, 4156 .test_chip = efx_ef10_test_chip, 4157 .test_nvram = efx_mcdi_nvram_test_all, 4158 .mcdi_request = efx_ef10_mcdi_request, 4159 .mcdi_poll_response = efx_ef10_mcdi_poll_response, 4160 .mcdi_read_response = efx_ef10_mcdi_read_response, 4161 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot, 4162 .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected, 4163 .irq_enable_master = efx_port_dummy_op_void, 4164 .irq_test_generate = efx_ef10_irq_test_generate, 4165 .irq_disable_non_ev = efx_port_dummy_op_void, 4166 .irq_handle_msi = efx_ef10_msi_interrupt, 4167 .irq_handle_legacy = efx_ef10_legacy_interrupt, 4168 .tx_probe = efx_ef10_tx_probe, 4169 .tx_init = efx_ef10_tx_init, 4170 .tx_remove = efx_mcdi_tx_remove, 4171 .tx_write = efx_ef10_tx_write, 4172 .tx_limit_len = efx_ef10_tx_limit_len, 4173 .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config, 4174 .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config, 4175 .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config, 4176 .rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config, 4177 .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts, 4178 .rx_probe = efx_mcdi_rx_probe, 4179 .rx_init = efx_mcdi_rx_init, 4180 .rx_remove = efx_mcdi_rx_remove, 4181 .rx_write = efx_ef10_rx_write, 4182 .rx_defer_refill = efx_ef10_rx_defer_refill, 4183 .ev_probe = efx_mcdi_ev_probe, 4184 .ev_init = efx_ef10_ev_init, 4185 .ev_fini = efx_mcdi_ev_fini, 4186 .ev_remove = efx_mcdi_ev_remove, 4187 .ev_process = efx_ef10_ev_process, 4188 .ev_read_ack = efx_ef10_ev_read_ack, 4189 .ev_test_generate = efx_ef10_ev_test_generate, 4190 .filter_table_probe = efx_ef10_filter_table_probe, 4191 .filter_table_restore = efx_mcdi_filter_table_restore, 4192 .filter_table_remove = efx_mcdi_filter_table_remove, 4193 .filter_update_rx_scatter = efx_mcdi_update_rx_scatter, 4194 .filter_insert = efx_mcdi_filter_insert, 4195 .filter_remove_safe = efx_mcdi_filter_remove_safe, 4196 .filter_get_safe = efx_mcdi_filter_get_safe, 4197 .filter_clear_rx = efx_mcdi_filter_clear_rx, 4198 .filter_count_rx_used = efx_mcdi_filter_count_rx_used, 4199 .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit, 4200 .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids, 4201 #ifdef CONFIG_RFS_ACCEL 4202 .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one, 4203 #endif 4204 #ifdef CONFIG_SFC_MTD 4205 .mtd_probe = efx_ef10_mtd_probe, 4206 .mtd_rename = efx_mcdi_mtd_rename, 4207 .mtd_read = efx_mcdi_mtd_read, 4208 .mtd_erase = efx_mcdi_mtd_erase, 4209 .mtd_write = efx_mcdi_mtd_write, 4210 .mtd_sync = efx_mcdi_mtd_sync, 4211 #endif 4212 .ptp_write_host_time = efx_ef10_ptp_write_host_time, 4213 .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events, 4214 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config, 4215 .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid, 4216 .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid, 4217 .udp_tnl_push_ports = efx_ef10_udp_tnl_push_ports, 4218 .udp_tnl_add_port = efx_ef10_udp_tnl_add_port, 4219 .udp_tnl_has_port = efx_ef10_udp_tnl_has_port, 4220 .udp_tnl_del_port = efx_ef10_udp_tnl_del_port, 4221 #ifdef CONFIG_SFC_SRIOV 4222 .sriov_configure = efx_ef10_sriov_configure, 4223 .sriov_init = efx_ef10_sriov_init, 4224 .sriov_fini = efx_ef10_sriov_fini, 4225 .sriov_wanted = efx_ef10_sriov_wanted, 4226 .sriov_reset = efx_ef10_sriov_reset, 4227 .sriov_flr = efx_ef10_sriov_flr, 4228 .sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac, 4229 .sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan, 4230 .sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk, 4231 .sriov_get_vf_config = efx_ef10_sriov_get_vf_config, 4232 .sriov_set_vf_link_state = efx_ef10_sriov_set_vf_link_state, 4233 .vswitching_probe = efx_ef10_vswitching_probe_pf, 4234 .vswitching_restore = efx_ef10_vswitching_restore_pf, 4235 .vswitching_remove = efx_ef10_vswitching_remove_pf, 4236 #endif 4237 .get_mac_address = efx_ef10_get_mac_address_pf, 4238 .set_mac_address = efx_ef10_set_mac_address, 4239 .tso_versions = efx_ef10_tso_versions, 4240 4241 .get_phys_port_id = efx_ef10_get_phys_port_id, 4242 .revision = EFX_REV_HUNT_A0, 4243 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), 4244 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, 4245 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST, 4246 .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST, 4247 .can_rx_scatter = true, 4248 .always_rx_scatter = true, 4249 .option_descriptors = true, 4250 .min_interrupt_mode = EFX_INT_MODE_LEGACY, 4251 .max_interrupt_mode = EFX_INT_MODE_MSIX, 4252 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, 4253 .offload_features = EF10_OFFLOAD_FEATURES, 4254 .mcdi_max_ver = 2, 4255 .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS, 4256 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | 4257 1 << HWTSTAMP_FILTER_ALL, 4258 .rx_hash_key_size = 40, 4259 .check_caps = ef10_check_caps, 4260 .print_additional_fwver = efx_ef10_print_additional_fwver, 4261 }; 4262