1 /**************************************************************************** 2 * Driver for Solarflare network controllers and boards 3 * Copyright 2012-2013 Solarflare Communications Inc. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published 7 * by the Free Software Foundation, incorporated herein by reference. 8 */ 9 10 #include "net_driver.h" 11 #include "ef10_regs.h" 12 #include "io.h" 13 #include "mcdi.h" 14 #include "mcdi_pcol.h" 15 #include "nic.h" 16 #include "workarounds.h" 17 #include "selftest.h" 18 #include "ef10_sriov.h" 19 #include <linux/in.h> 20 #include <linux/jhash.h> 21 #include <linux/wait.h> 22 #include <linux/workqueue.h> 23 24 /* Hardware control for EF10 architecture including 'Huntington'. */ 25 26 #define EFX_EF10_DRVGEN_EV 7 27 enum { 28 EFX_EF10_TEST = 1, 29 EFX_EF10_REFILL, 30 }; 31 32 /* The reserved RSS context value */ 33 #define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff 34 /* The maximum size of a shared RSS context */ 35 /* TODO: this should really be from the mcdi protocol export */ 36 #define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL 37 38 /* The filter table(s) are managed by firmware and we have write-only 39 * access. When removing filters we must identify them to the 40 * firmware by a 64-bit handle, but this is too wide for Linux kernel 41 * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to 42 * be able to tell in advance whether a requested insertion will 43 * replace an existing filter. Therefore we maintain a software hash 44 * table, which should be at least as large as the hardware hash 45 * table. 46 * 47 * Huntington has a single 8K filter table shared between all filter 48 * types and both ports. 49 */ 50 #define HUNT_FILTER_TBL_ROWS 8192 51 52 struct efx_ef10_filter_table { 53 /* The RX match field masks supported by this fw & hw, in order of priority */ 54 enum efx_filter_match_flags rx_match_flags[ 55 MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM]; 56 unsigned int rx_match_count; 57 58 struct { 59 unsigned long spec; /* pointer to spec plus flag bits */ 60 /* BUSY flag indicates that an update is in progress. AUTO_OLD is 61 * used to mark and sweep MAC filters for the device address lists. 62 */ 63 #define EFX_EF10_FILTER_FLAG_BUSY 1UL 64 #define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL 65 #define EFX_EF10_FILTER_FLAGS 3UL 66 u64 handle; /* firmware handle */ 67 } *entry; 68 wait_queue_head_t waitq; 69 /* Shadow of net_device address lists, guarded by mac_lock */ 70 #define EFX_EF10_FILTER_DEV_UC_MAX 32 71 #define EFX_EF10_FILTER_DEV_MC_MAX 256 72 struct { 73 u8 addr[ETH_ALEN]; 74 u16 id; 75 } dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX], 76 dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX]; 77 int dev_uc_count; /* negative for PROMISC */ 78 int dev_mc_count; /* negative for PROMISC/ALLMULTI */ 79 }; 80 81 /* An arbitrary search limit for the software hash table */ 82 #define EFX_EF10_FILTER_SEARCH_LIMIT 200 83 84 static void efx_ef10_rx_free_indir_table(struct efx_nic *efx); 85 static void efx_ef10_filter_table_remove(struct efx_nic *efx); 86 87 static int efx_ef10_get_warm_boot_count(struct efx_nic *efx) 88 { 89 efx_dword_t reg; 90 91 efx_readd(efx, ®, ER_DZ_BIU_MC_SFT_STATUS); 92 return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ? 93 EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO; 94 } 95 96 static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx) 97 { 98 int bar; 99 100 bar = efx->type->mem_bar; 101 return resource_size(&efx->pci_dev->resource[bar]); 102 } 103 104 static int efx_ef10_get_pf_index(struct efx_nic *efx) 105 { 106 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN); 107 struct efx_ef10_nic_data *nic_data = efx->nic_data; 108 size_t outlen; 109 int rc; 110 111 rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf, 112 sizeof(outbuf), &outlen); 113 if (rc) 114 return rc; 115 if (outlen < sizeof(outbuf)) 116 return -EIO; 117 118 nic_data->pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF); 119 return 0; 120 } 121 122 #ifdef CONFIG_SFC_SRIOV 123 static int efx_ef10_get_vf_index(struct efx_nic *efx) 124 { 125 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN); 126 struct efx_ef10_nic_data *nic_data = efx->nic_data; 127 size_t outlen; 128 int rc; 129 130 rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf, 131 sizeof(outbuf), &outlen); 132 if (rc) 133 return rc; 134 if (outlen < sizeof(outbuf)) 135 return -EIO; 136 137 nic_data->vf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_VF); 138 return 0; 139 } 140 #endif 141 142 static int efx_ef10_init_datapath_caps(struct efx_nic *efx) 143 { 144 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN); 145 struct efx_ef10_nic_data *nic_data = efx->nic_data; 146 size_t outlen; 147 int rc; 148 149 BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0); 150 151 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0, 152 outbuf, sizeof(outbuf), &outlen); 153 if (rc) 154 return rc; 155 if (outlen < sizeof(outbuf)) { 156 netif_err(efx, drv, efx->net_dev, 157 "unable to read datapath firmware capabilities\n"); 158 return -EIO; 159 } 160 161 nic_data->datapath_caps = 162 MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1); 163 164 /* record the DPCPU firmware IDs to determine VEB vswitching support. 165 */ 166 nic_data->rx_dpcpu_fw_id = 167 MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID); 168 nic_data->tx_dpcpu_fw_id = 169 MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID); 170 171 if (!(nic_data->datapath_caps & 172 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) { 173 netif_err(efx, drv, efx->net_dev, 174 "current firmware does not support TSO\n"); 175 return -ENODEV; 176 } 177 178 if (!(nic_data->datapath_caps & 179 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) { 180 netif_err(efx, probe, efx->net_dev, 181 "current firmware does not support an RX prefix\n"); 182 return -ENODEV; 183 } 184 185 return 0; 186 } 187 188 static int efx_ef10_get_sysclk_freq(struct efx_nic *efx) 189 { 190 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN); 191 int rc; 192 193 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0, 194 outbuf, sizeof(outbuf), NULL); 195 if (rc) 196 return rc; 197 rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ); 198 return rc > 0 ? rc : -ERANGE; 199 } 200 201 static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address) 202 { 203 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN); 204 size_t outlen; 205 int rc; 206 207 BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0); 208 209 rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0, 210 outbuf, sizeof(outbuf), &outlen); 211 if (rc) 212 return rc; 213 if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) 214 return -EIO; 215 216 ether_addr_copy(mac_address, 217 MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE)); 218 return 0; 219 } 220 221 static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address) 222 { 223 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN); 224 MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX); 225 size_t outlen; 226 int num_addrs, rc; 227 228 MCDI_SET_DWORD(inbuf, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID, 229 EVB_PORT_ID_ASSIGNED); 230 rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_GET_MAC_ADDRESSES, inbuf, 231 sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); 232 233 if (rc) 234 return rc; 235 if (outlen < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) 236 return -EIO; 237 238 num_addrs = MCDI_DWORD(outbuf, 239 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT); 240 241 WARN_ON(num_addrs != 1); 242 243 ether_addr_copy(mac_address, 244 MCDI_PTR(outbuf, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR)); 245 246 return 0; 247 } 248 249 static ssize_t efx_ef10_show_link_control_flag(struct device *dev, 250 struct device_attribute *attr, 251 char *buf) 252 { 253 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 254 255 return sprintf(buf, "%d\n", 256 ((efx->mcdi->fn_flags) & 257 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL)) 258 ? 1 : 0); 259 } 260 261 static ssize_t efx_ef10_show_primary_flag(struct device *dev, 262 struct device_attribute *attr, 263 char *buf) 264 { 265 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 266 267 return sprintf(buf, "%d\n", 268 ((efx->mcdi->fn_flags) & 269 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) 270 ? 1 : 0); 271 } 272 273 static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag, 274 NULL); 275 static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL); 276 277 static int efx_ef10_probe(struct efx_nic *efx) 278 { 279 struct efx_ef10_nic_data *nic_data; 280 struct net_device *net_dev = efx->net_dev; 281 int i, rc; 282 283 /* We can have one VI for each 8K region. However, until we 284 * use TX option descriptors we need two TX queues per channel. 285 */ 286 efx->max_channels = 287 min_t(unsigned int, 288 EFX_MAX_CHANNELS, 289 efx_ef10_mem_map_size(efx) / 290 (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES)); 291 if (WARN_ON(efx->max_channels == 0)) 292 return -EIO; 293 294 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); 295 if (!nic_data) 296 return -ENOMEM; 297 efx->nic_data = nic_data; 298 299 /* we assume later that we can copy from this buffer in dwords */ 300 BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4); 301 302 rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf, 303 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL); 304 if (rc) 305 goto fail1; 306 307 /* Get the MC's warm boot count. In case it's rebooting right 308 * now, be prepared to retry. 309 */ 310 i = 0; 311 for (;;) { 312 rc = efx_ef10_get_warm_boot_count(efx); 313 if (rc >= 0) 314 break; 315 if (++i == 5) 316 goto fail2; 317 ssleep(1); 318 } 319 nic_data->warm_boot_count = rc; 320 321 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; 322 323 nic_data->vport_id = EVB_PORT_ID_ASSIGNED; 324 325 /* In case we're recovering from a crash (kexec), we want to 326 * cancel any outstanding request by the previous user of this 327 * function. We send a special message using the least 328 * significant bits of the 'high' (doorbell) register. 329 */ 330 _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD); 331 332 rc = efx_mcdi_init(efx); 333 if (rc) 334 goto fail2; 335 336 /* Reset (most) configuration for this function */ 337 rc = efx_mcdi_reset(efx, RESET_TYPE_ALL); 338 if (rc) 339 goto fail3; 340 341 /* Enable event logging */ 342 rc = efx_mcdi_log_ctrl(efx, true, false, 0); 343 if (rc) 344 goto fail3; 345 346 rc = device_create_file(&efx->pci_dev->dev, 347 &dev_attr_link_control_flag); 348 if (rc) 349 goto fail3; 350 351 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag); 352 if (rc) 353 goto fail4; 354 355 rc = efx_ef10_get_pf_index(efx); 356 if (rc) 357 goto fail5; 358 359 rc = efx_ef10_init_datapath_caps(efx); 360 if (rc < 0) 361 goto fail5; 362 363 efx->rx_packet_len_offset = 364 ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE; 365 366 rc = efx_mcdi_port_get_number(efx); 367 if (rc < 0) 368 goto fail5; 369 efx->port_num = rc; 370 net_dev->dev_port = rc; 371 372 rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr); 373 if (rc) 374 goto fail5; 375 376 rc = efx_ef10_get_sysclk_freq(efx); 377 if (rc < 0) 378 goto fail5; 379 efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */ 380 381 /* Check whether firmware supports bug 35388 workaround. 382 * First try to enable it, then if we get EPERM, just 383 * ask if it's already enabled 384 */ 385 rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true); 386 if (rc == 0) { 387 nic_data->workaround_35388 = true; 388 } else if (rc == -EPERM) { 389 unsigned int enabled; 390 391 rc = efx_mcdi_get_workarounds(efx, NULL, &enabled); 392 if (rc) 393 goto fail3; 394 nic_data->workaround_35388 = enabled & 395 MC_CMD_GET_WORKAROUNDS_OUT_BUG35388; 396 } else if (rc != -ENOSYS && rc != -ENOENT) { 397 goto fail5; 398 } 399 netif_dbg(efx, probe, efx->net_dev, 400 "workaround for bug 35388 is %sabled\n", 401 nic_data->workaround_35388 ? "en" : "dis"); 402 403 rc = efx_mcdi_mon_probe(efx); 404 if (rc && rc != -EPERM) 405 goto fail5; 406 407 efx_ptp_probe(efx, NULL); 408 409 #ifdef CONFIG_SFC_SRIOV 410 if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) { 411 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; 412 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); 413 414 efx_pf->type->get_mac_address(efx_pf, nic_data->port_id); 415 } else 416 #endif 417 ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr); 418 419 return 0; 420 421 fail5: 422 device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); 423 fail4: 424 device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag); 425 fail3: 426 efx_mcdi_fini(efx); 427 fail2: 428 efx_nic_free_buffer(efx, &nic_data->mcdi_buf); 429 fail1: 430 kfree(nic_data); 431 efx->nic_data = NULL; 432 return rc; 433 } 434 435 static int efx_ef10_free_vis(struct efx_nic *efx) 436 { 437 MCDI_DECLARE_BUF_ERR(outbuf); 438 size_t outlen; 439 int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0, 440 outbuf, sizeof(outbuf), &outlen); 441 442 /* -EALREADY means nothing to free, so ignore */ 443 if (rc == -EALREADY) 444 rc = 0; 445 if (rc) 446 efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen, 447 rc); 448 return rc; 449 } 450 451 #ifdef EFX_USE_PIO 452 453 static void efx_ef10_free_piobufs(struct efx_nic *efx) 454 { 455 struct efx_ef10_nic_data *nic_data = efx->nic_data; 456 MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN); 457 unsigned int i; 458 int rc; 459 460 BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0); 461 462 for (i = 0; i < nic_data->n_piobufs; i++) { 463 MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE, 464 nic_data->piobuf_handle[i]); 465 rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf), 466 NULL, 0, NULL); 467 WARN_ON(rc); 468 } 469 470 nic_data->n_piobufs = 0; 471 } 472 473 static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) 474 { 475 struct efx_ef10_nic_data *nic_data = efx->nic_data; 476 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN); 477 unsigned int i; 478 size_t outlen; 479 int rc = 0; 480 481 BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0); 482 483 for (i = 0; i < n; i++) { 484 rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0, 485 outbuf, sizeof(outbuf), &outlen); 486 if (rc) 487 break; 488 if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) { 489 rc = -EIO; 490 break; 491 } 492 nic_data->piobuf_handle[i] = 493 MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE); 494 netif_dbg(efx, probe, efx->net_dev, 495 "allocated PIO buffer %u handle %x\n", i, 496 nic_data->piobuf_handle[i]); 497 } 498 499 nic_data->n_piobufs = i; 500 if (rc) 501 efx_ef10_free_piobufs(efx); 502 return rc; 503 } 504 505 static int efx_ef10_link_piobufs(struct efx_nic *efx) 506 { 507 struct efx_ef10_nic_data *nic_data = efx->nic_data; 508 _MCDI_DECLARE_BUF(inbuf, 509 max(MC_CMD_LINK_PIOBUF_IN_LEN, 510 MC_CMD_UNLINK_PIOBUF_IN_LEN)); 511 struct efx_channel *channel; 512 struct efx_tx_queue *tx_queue; 513 unsigned int offset, index; 514 int rc; 515 516 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0); 517 BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0); 518 519 memset(inbuf, 0, sizeof(inbuf)); 520 521 /* Link a buffer to each VI in the write-combining mapping */ 522 for (index = 0; index < nic_data->n_piobufs; ++index) { 523 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE, 524 nic_data->piobuf_handle[index]); 525 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE, 526 nic_data->pio_write_vi_base + index); 527 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF, 528 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN, 529 NULL, 0, NULL); 530 if (rc) { 531 netif_err(efx, drv, efx->net_dev, 532 "failed to link VI %u to PIO buffer %u (%d)\n", 533 nic_data->pio_write_vi_base + index, index, 534 rc); 535 goto fail; 536 } 537 netif_dbg(efx, probe, efx->net_dev, 538 "linked VI %u to PIO buffer %u\n", 539 nic_data->pio_write_vi_base + index, index); 540 } 541 542 /* Link a buffer to each TX queue */ 543 efx_for_each_channel(channel, efx) { 544 efx_for_each_channel_tx_queue(tx_queue, channel) { 545 /* We assign the PIO buffers to queues in 546 * reverse order to allow for the following 547 * special case. 548 */ 549 offset = ((efx->tx_channel_offset + efx->n_tx_channels - 550 tx_queue->channel->channel - 1) * 551 efx_piobuf_size); 552 index = offset / ER_DZ_TX_PIOBUF_SIZE; 553 offset = offset % ER_DZ_TX_PIOBUF_SIZE; 554 555 /* When the host page size is 4K, the first 556 * host page in the WC mapping may be within 557 * the same VI page as the last TX queue. We 558 * can only link one buffer to each VI. 559 */ 560 if (tx_queue->queue == nic_data->pio_write_vi_base) { 561 BUG_ON(index != 0); 562 rc = 0; 563 } else { 564 MCDI_SET_DWORD(inbuf, 565 LINK_PIOBUF_IN_PIOBUF_HANDLE, 566 nic_data->piobuf_handle[index]); 567 MCDI_SET_DWORD(inbuf, 568 LINK_PIOBUF_IN_TXQ_INSTANCE, 569 tx_queue->queue); 570 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF, 571 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN, 572 NULL, 0, NULL); 573 } 574 575 if (rc) { 576 /* This is non-fatal; the TX path just 577 * won't use PIO for this queue 578 */ 579 netif_err(efx, drv, efx->net_dev, 580 "failed to link VI %u to PIO buffer %u (%d)\n", 581 tx_queue->queue, index, rc); 582 tx_queue->piobuf = NULL; 583 } else { 584 tx_queue->piobuf = 585 nic_data->pio_write_base + 586 index * EFX_VI_PAGE_SIZE + offset; 587 tx_queue->piobuf_offset = offset; 588 netif_dbg(efx, probe, efx->net_dev, 589 "linked VI %u to PIO buffer %u offset %x addr %p\n", 590 tx_queue->queue, index, 591 tx_queue->piobuf_offset, 592 tx_queue->piobuf); 593 } 594 } 595 } 596 597 return 0; 598 599 fail: 600 while (index--) { 601 MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE, 602 nic_data->pio_write_vi_base + index); 603 efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF, 604 inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN, 605 NULL, 0, NULL); 606 } 607 return rc; 608 } 609 610 #else /* !EFX_USE_PIO */ 611 612 static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) 613 { 614 return n == 0 ? 0 : -ENOBUFS; 615 } 616 617 static int efx_ef10_link_piobufs(struct efx_nic *efx) 618 { 619 return 0; 620 } 621 622 static void efx_ef10_free_piobufs(struct efx_nic *efx) 623 { 624 } 625 626 #endif /* EFX_USE_PIO */ 627 628 static void efx_ef10_remove(struct efx_nic *efx) 629 { 630 struct efx_ef10_nic_data *nic_data = efx->nic_data; 631 int rc; 632 633 #ifdef CONFIG_SFC_SRIOV 634 struct efx_ef10_nic_data *nic_data_pf; 635 struct pci_dev *pci_dev_pf; 636 struct efx_nic *efx_pf; 637 struct ef10_vf *vf; 638 639 if (efx->pci_dev->is_virtfn) { 640 pci_dev_pf = efx->pci_dev->physfn; 641 if (pci_dev_pf) { 642 efx_pf = pci_get_drvdata(pci_dev_pf); 643 nic_data_pf = efx_pf->nic_data; 644 vf = nic_data_pf->vf + nic_data->vf_index; 645 vf->efx = NULL; 646 } else 647 netif_info(efx, drv, efx->net_dev, 648 "Could not get the PF id from VF\n"); 649 } 650 #endif 651 652 efx_ptp_remove(efx); 653 654 efx_mcdi_mon_remove(efx); 655 656 efx_ef10_rx_free_indir_table(efx); 657 658 if (nic_data->wc_membase) 659 iounmap(nic_data->wc_membase); 660 661 rc = efx_ef10_free_vis(efx); 662 WARN_ON(rc != 0); 663 664 if (!nic_data->must_restore_piobufs) 665 efx_ef10_free_piobufs(efx); 666 667 device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); 668 device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag); 669 670 efx_mcdi_fini(efx); 671 efx_nic_free_buffer(efx, &nic_data->mcdi_buf); 672 kfree(nic_data); 673 } 674 675 static int efx_ef10_probe_pf(struct efx_nic *efx) 676 { 677 return efx_ef10_probe(efx); 678 } 679 680 #ifdef CONFIG_SFC_SRIOV 681 static int efx_ef10_probe_vf(struct efx_nic *efx) 682 { 683 int rc; 684 struct pci_dev *pci_dev_pf; 685 686 /* If the parent PF has no VF data structure, it doesn't know about this 687 * VF so fail probe. The VF needs to be re-created. This can happen 688 * if the PF driver is unloaded while the VF is assigned to a guest. 689 */ 690 pci_dev_pf = efx->pci_dev->physfn; 691 if (pci_dev_pf) { 692 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); 693 struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data; 694 695 if (!nic_data_pf->vf) { 696 netif_info(efx, drv, efx->net_dev, 697 "The VF cannot link to its parent PF; " 698 "please destroy and re-create the VF\n"); 699 return -EBUSY; 700 } 701 } 702 703 rc = efx_ef10_probe(efx); 704 if (rc) 705 return rc; 706 707 rc = efx_ef10_get_vf_index(efx); 708 if (rc) 709 goto fail; 710 711 if (efx->pci_dev->is_virtfn) { 712 if (efx->pci_dev->physfn) { 713 struct efx_nic *efx_pf = 714 pci_get_drvdata(efx->pci_dev->physfn); 715 struct efx_ef10_nic_data *nic_data_p = efx_pf->nic_data; 716 struct efx_ef10_nic_data *nic_data = efx->nic_data; 717 718 nic_data_p->vf[nic_data->vf_index].efx = efx; 719 nic_data_p->vf[nic_data->vf_index].pci_dev = 720 efx->pci_dev; 721 } else 722 netif_info(efx, drv, efx->net_dev, 723 "Could not get the PF id from VF\n"); 724 } 725 726 return 0; 727 728 fail: 729 efx_ef10_remove(efx); 730 return rc; 731 } 732 #else 733 static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused))) 734 { 735 return 0; 736 } 737 #endif 738 739 static int efx_ef10_alloc_vis(struct efx_nic *efx, 740 unsigned int min_vis, unsigned int max_vis) 741 { 742 MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN); 743 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN); 744 struct efx_ef10_nic_data *nic_data = efx->nic_data; 745 size_t outlen; 746 int rc; 747 748 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis); 749 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis); 750 rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf), 751 outbuf, sizeof(outbuf), &outlen); 752 if (rc != 0) 753 return rc; 754 755 if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN) 756 return -EIO; 757 758 netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n", 759 MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE)); 760 761 nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE); 762 nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT); 763 return 0; 764 } 765 766 /* Note that the failure path of this function does not free 767 * resources, as this will be done by efx_ef10_remove(). 768 */ 769 static int efx_ef10_dimension_resources(struct efx_nic *efx) 770 { 771 struct efx_ef10_nic_data *nic_data = efx->nic_data; 772 unsigned int uc_mem_map_size, wc_mem_map_size; 773 unsigned int min_vis, pio_write_vi_base, max_vis; 774 void __iomem *membase; 775 int rc; 776 777 min_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); 778 779 #ifdef EFX_USE_PIO 780 /* Try to allocate PIO buffers if wanted and if the full 781 * number of PIO buffers would be sufficient to allocate one 782 * copy-buffer per TX channel. Failure is non-fatal, as there 783 * are only a small number of PIO buffers shared between all 784 * functions of the controller. 785 */ 786 if (efx_piobuf_size != 0 && 787 ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >= 788 efx->n_tx_channels) { 789 unsigned int n_piobufs = 790 DIV_ROUND_UP(efx->n_tx_channels, 791 ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size); 792 793 rc = efx_ef10_alloc_piobufs(efx, n_piobufs); 794 if (rc) 795 netif_err(efx, probe, efx->net_dev, 796 "failed to allocate PIO buffers (%d)\n", rc); 797 else 798 netif_dbg(efx, probe, efx->net_dev, 799 "allocated %u PIO buffers\n", n_piobufs); 800 } 801 #else 802 nic_data->n_piobufs = 0; 803 #endif 804 805 /* PIO buffers should be mapped with write-combining enabled, 806 * and we want to make single UC and WC mappings rather than 807 * several of each (in fact that's the only option if host 808 * page size is >4K). So we may allocate some extra VIs just 809 * for writing PIO buffers through. 810 * 811 * The UC mapping contains (min_vis - 1) complete VIs and the 812 * first half of the next VI. Then the WC mapping begins with 813 * the second half of this last VI. 814 */ 815 uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE + 816 ER_DZ_TX_PIOBUF); 817 if (nic_data->n_piobufs) { 818 /* pio_write_vi_base rounds down to give the number of complete 819 * VIs inside the UC mapping. 820 */ 821 pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE; 822 wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base + 823 nic_data->n_piobufs) * 824 EFX_VI_PAGE_SIZE) - 825 uc_mem_map_size); 826 max_vis = pio_write_vi_base + nic_data->n_piobufs; 827 } else { 828 pio_write_vi_base = 0; 829 wc_mem_map_size = 0; 830 max_vis = min_vis; 831 } 832 833 /* In case the last attached driver failed to free VIs, do it now */ 834 rc = efx_ef10_free_vis(efx); 835 if (rc != 0) 836 return rc; 837 838 rc = efx_ef10_alloc_vis(efx, min_vis, max_vis); 839 if (rc != 0) 840 return rc; 841 842 /* If we didn't get enough VIs to map all the PIO buffers, free the 843 * PIO buffers 844 */ 845 if (nic_data->n_piobufs && 846 nic_data->n_allocated_vis < 847 pio_write_vi_base + nic_data->n_piobufs) { 848 netif_dbg(efx, probe, efx->net_dev, 849 "%u VIs are not sufficient to map %u PIO buffers\n", 850 nic_data->n_allocated_vis, nic_data->n_piobufs); 851 efx_ef10_free_piobufs(efx); 852 } 853 854 /* Shrink the original UC mapping of the memory BAR */ 855 membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size); 856 if (!membase) { 857 netif_err(efx, probe, efx->net_dev, 858 "could not shrink memory BAR to %x\n", 859 uc_mem_map_size); 860 return -ENOMEM; 861 } 862 iounmap(efx->membase); 863 efx->membase = membase; 864 865 /* Set up the WC mapping if needed */ 866 if (wc_mem_map_size) { 867 nic_data->wc_membase = ioremap_wc(efx->membase_phys + 868 uc_mem_map_size, 869 wc_mem_map_size); 870 if (!nic_data->wc_membase) { 871 netif_err(efx, probe, efx->net_dev, 872 "could not allocate WC mapping of size %x\n", 873 wc_mem_map_size); 874 return -ENOMEM; 875 } 876 nic_data->pio_write_vi_base = pio_write_vi_base; 877 nic_data->pio_write_base = 878 nic_data->wc_membase + 879 (pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF - 880 uc_mem_map_size); 881 882 rc = efx_ef10_link_piobufs(efx); 883 if (rc) 884 efx_ef10_free_piobufs(efx); 885 } 886 887 netif_dbg(efx, probe, efx->net_dev, 888 "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n", 889 &efx->membase_phys, efx->membase, uc_mem_map_size, 890 nic_data->wc_membase, wc_mem_map_size); 891 892 return 0; 893 } 894 895 static int efx_ef10_init_nic(struct efx_nic *efx) 896 { 897 struct efx_ef10_nic_data *nic_data = efx->nic_data; 898 int rc; 899 900 if (nic_data->must_check_datapath_caps) { 901 rc = efx_ef10_init_datapath_caps(efx); 902 if (rc) 903 return rc; 904 nic_data->must_check_datapath_caps = false; 905 } 906 907 if (nic_data->must_realloc_vis) { 908 /* We cannot let the number of VIs change now */ 909 rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis, 910 nic_data->n_allocated_vis); 911 if (rc) 912 return rc; 913 nic_data->must_realloc_vis = false; 914 } 915 916 if (nic_data->must_restore_piobufs && nic_data->n_piobufs) { 917 rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs); 918 if (rc == 0) { 919 rc = efx_ef10_link_piobufs(efx); 920 if (rc) 921 efx_ef10_free_piobufs(efx); 922 } 923 924 /* Log an error on failure, but this is non-fatal */ 925 if (rc) 926 netif_err(efx, drv, efx->net_dev, 927 "failed to restore PIO buffers (%d)\n", rc); 928 nic_data->must_restore_piobufs = false; 929 } 930 931 /* don't fail init if RSS setup doesn't work */ 932 efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table); 933 934 return 0; 935 } 936 937 static void efx_ef10_reset_mc_allocations(struct efx_nic *efx) 938 { 939 struct efx_ef10_nic_data *nic_data = efx->nic_data; 940 941 /* All our allocations have been reset */ 942 nic_data->must_realloc_vis = true; 943 nic_data->must_restore_filters = true; 944 nic_data->must_restore_piobufs = true; 945 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; 946 } 947 948 static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason) 949 { 950 if (reason == RESET_TYPE_MC_FAILURE) 951 return RESET_TYPE_DATAPATH; 952 953 return efx_mcdi_map_reset_reason(reason); 954 } 955 956 static int efx_ef10_map_reset_flags(u32 *flags) 957 { 958 enum { 959 EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) << 960 ETH_RESET_SHARED_SHIFT), 961 EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER | 962 ETH_RESET_OFFLOAD | ETH_RESET_MAC | 963 ETH_RESET_PHY | ETH_RESET_MGMT) << 964 ETH_RESET_SHARED_SHIFT) 965 }; 966 967 /* We assume for now that our PCI function is permitted to 968 * reset everything. 969 */ 970 971 if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) { 972 *flags &= ~EF10_RESET_MC; 973 return RESET_TYPE_WORLD; 974 } 975 976 if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) { 977 *flags &= ~EF10_RESET_PORT; 978 return RESET_TYPE_ALL; 979 } 980 981 /* no invisible reset implemented */ 982 983 return -EINVAL; 984 } 985 986 static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type) 987 { 988 int rc = efx_mcdi_reset(efx, reset_type); 989 990 /* If it was a port reset, trigger reallocation of MC resources. 991 * Note that on an MC reset nothing needs to be done now because we'll 992 * detect the MC reset later and handle it then. 993 * For an FLR, we never get an MC reset event, but the MC has reset all 994 * resources assigned to us, so we have to trigger reallocation now. 995 */ 996 if ((reset_type == RESET_TYPE_ALL || 997 reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc) 998 efx_ef10_reset_mc_allocations(efx); 999 return rc; 1000 } 1001 1002 #define EF10_DMA_STAT(ext_name, mcdi_name) \ 1003 [EF10_STAT_ ## ext_name] = \ 1004 { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name } 1005 #define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \ 1006 [EF10_STAT_ ## int_name] = \ 1007 { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name } 1008 #define EF10_OTHER_STAT(ext_name) \ 1009 [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 } 1010 #define GENERIC_SW_STAT(ext_name) \ 1011 [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 } 1012 1013 static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { 1014 EF10_DMA_STAT(port_tx_bytes, TX_BYTES), 1015 EF10_DMA_STAT(port_tx_packets, TX_PKTS), 1016 EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS), 1017 EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS), 1018 EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS), 1019 EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS), 1020 EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS), 1021 EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS), 1022 EF10_DMA_STAT(port_tx_64, TX_64_PKTS), 1023 EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS), 1024 EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS), 1025 EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS), 1026 EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS), 1027 EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS), 1028 EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS), 1029 EF10_DMA_STAT(port_rx_bytes, RX_BYTES), 1030 EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES), 1031 EF10_OTHER_STAT(port_rx_good_bytes), 1032 EF10_OTHER_STAT(port_rx_bad_bytes), 1033 EF10_DMA_STAT(port_rx_packets, RX_PKTS), 1034 EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS), 1035 EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS), 1036 EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS), 1037 EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS), 1038 EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS), 1039 EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS), 1040 EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS), 1041 EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS), 1042 EF10_DMA_STAT(port_rx_64, RX_64_PKTS), 1043 EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS), 1044 EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS), 1045 EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS), 1046 EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS), 1047 EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS), 1048 EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS), 1049 EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS), 1050 EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS), 1051 EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS), 1052 EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS), 1053 EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS), 1054 EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS), 1055 GENERIC_SW_STAT(rx_nodesc_trunc), 1056 GENERIC_SW_STAT(rx_noskb_drops), 1057 EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW), 1058 EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW), 1059 EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL), 1060 EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL), 1061 EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB), 1062 EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB), 1063 EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING), 1064 EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS), 1065 EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS), 1066 EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS), 1067 EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS), 1068 EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS), 1069 EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS), 1070 EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES), 1071 EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS), 1072 EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES), 1073 EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS), 1074 EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES), 1075 EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS), 1076 EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES), 1077 EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW), 1078 EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS), 1079 EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES), 1080 EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS), 1081 EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES), 1082 EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS), 1083 EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES), 1084 EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS), 1085 EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES), 1086 EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW), 1087 }; 1088 1089 #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \ 1090 (1ULL << EF10_STAT_port_tx_packets) | \ 1091 (1ULL << EF10_STAT_port_tx_pause) | \ 1092 (1ULL << EF10_STAT_port_tx_unicast) | \ 1093 (1ULL << EF10_STAT_port_tx_multicast) | \ 1094 (1ULL << EF10_STAT_port_tx_broadcast) | \ 1095 (1ULL << EF10_STAT_port_rx_bytes) | \ 1096 (1ULL << \ 1097 EF10_STAT_port_rx_bytes_minus_good_bytes) | \ 1098 (1ULL << EF10_STAT_port_rx_good_bytes) | \ 1099 (1ULL << EF10_STAT_port_rx_bad_bytes) | \ 1100 (1ULL << EF10_STAT_port_rx_packets) | \ 1101 (1ULL << EF10_STAT_port_rx_good) | \ 1102 (1ULL << EF10_STAT_port_rx_bad) | \ 1103 (1ULL << EF10_STAT_port_rx_pause) | \ 1104 (1ULL << EF10_STAT_port_rx_control) | \ 1105 (1ULL << EF10_STAT_port_rx_unicast) | \ 1106 (1ULL << EF10_STAT_port_rx_multicast) | \ 1107 (1ULL << EF10_STAT_port_rx_broadcast) | \ 1108 (1ULL << EF10_STAT_port_rx_lt64) | \ 1109 (1ULL << EF10_STAT_port_rx_64) | \ 1110 (1ULL << EF10_STAT_port_rx_65_to_127) | \ 1111 (1ULL << EF10_STAT_port_rx_128_to_255) | \ 1112 (1ULL << EF10_STAT_port_rx_256_to_511) | \ 1113 (1ULL << EF10_STAT_port_rx_512_to_1023) |\ 1114 (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\ 1115 (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\ 1116 (1ULL << EF10_STAT_port_rx_gtjumbo) | \ 1117 (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\ 1118 (1ULL << EF10_STAT_port_rx_overflow) | \ 1119 (1ULL << EF10_STAT_port_rx_nodesc_drops) |\ 1120 (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \ 1121 (1ULL << GENERIC_STAT_rx_noskb_drops)) 1122 1123 /* These statistics are only provided by the 10G MAC. For a 10G/40G 1124 * switchable port we do not expose these because they might not 1125 * include all the packets they should. 1126 */ 1127 #define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \ 1128 (1ULL << EF10_STAT_port_tx_lt64) | \ 1129 (1ULL << EF10_STAT_port_tx_64) | \ 1130 (1ULL << EF10_STAT_port_tx_65_to_127) |\ 1131 (1ULL << EF10_STAT_port_tx_128_to_255) |\ 1132 (1ULL << EF10_STAT_port_tx_256_to_511) |\ 1133 (1ULL << EF10_STAT_port_tx_512_to_1023) |\ 1134 (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\ 1135 (1ULL << EF10_STAT_port_tx_15xx_to_jumbo)) 1136 1137 /* These statistics are only provided by the 40G MAC. For a 10G/40G 1138 * switchable port we do expose these because the errors will otherwise 1139 * be silent. 1140 */ 1141 #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\ 1142 (1ULL << EF10_STAT_port_rx_length_error)) 1143 1144 /* These statistics are only provided if the firmware supports the 1145 * capability PM_AND_RXDP_COUNTERS. 1146 */ 1147 #define HUNT_PM_AND_RXDP_STAT_MASK ( \ 1148 (1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) | \ 1149 (1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) | \ 1150 (1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) | \ 1151 (1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) | \ 1152 (1ULL << EF10_STAT_port_rx_pm_trunc_qbb) | \ 1153 (1ULL << EF10_STAT_port_rx_pm_discard_qbb) | \ 1154 (1ULL << EF10_STAT_port_rx_pm_discard_mapping) | \ 1155 (1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) | \ 1156 (1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) | \ 1157 (1ULL << EF10_STAT_port_rx_dp_streaming_packets) | \ 1158 (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) | \ 1159 (1ULL << EF10_STAT_port_rx_dp_hlb_wait)) 1160 1161 static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) 1162 { 1163 u64 raw_mask = HUNT_COMMON_STAT_MASK; 1164 u32 port_caps = efx_mcdi_phy_get_caps(efx); 1165 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1166 1167 if (!(efx->mcdi->fn_flags & 1168 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL)) 1169 return 0; 1170 1171 if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) 1172 raw_mask |= HUNT_40G_EXTRA_STAT_MASK; 1173 else 1174 raw_mask |= HUNT_10G_ONLY_STAT_MASK; 1175 1176 if (nic_data->datapath_caps & 1177 (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN)) 1178 raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK; 1179 1180 return raw_mask; 1181 } 1182 1183 static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask) 1184 { 1185 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1186 u64 raw_mask[2]; 1187 1188 raw_mask[0] = efx_ef10_raw_stat_mask(efx); 1189 1190 /* Only show vadaptor stats when EVB capability is present */ 1191 if (nic_data->datapath_caps & 1192 (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) { 1193 raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1); 1194 raw_mask[1] = (1ULL << (EF10_STAT_COUNT - 63)) - 1; 1195 } else { 1196 raw_mask[1] = 0; 1197 } 1198 1199 #if BITS_PER_LONG == 64 1200 mask[0] = raw_mask[0]; 1201 mask[1] = raw_mask[1]; 1202 #else 1203 mask[0] = raw_mask[0] & 0xffffffff; 1204 mask[1] = raw_mask[0] >> 32; 1205 mask[2] = raw_mask[1] & 0xffffffff; 1206 mask[3] = raw_mask[1] >> 32; 1207 #endif 1208 } 1209 1210 static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names) 1211 { 1212 DECLARE_BITMAP(mask, EF10_STAT_COUNT); 1213 1214 efx_ef10_get_stat_mask(efx, mask); 1215 return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, 1216 mask, names); 1217 } 1218 1219 static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats, 1220 struct rtnl_link_stats64 *core_stats) 1221 { 1222 DECLARE_BITMAP(mask, EF10_STAT_COUNT); 1223 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1224 u64 *stats = nic_data->stats; 1225 size_t stats_count = 0, index; 1226 1227 efx_ef10_get_stat_mask(efx, mask); 1228 1229 if (full_stats) { 1230 for_each_set_bit(index, mask, EF10_STAT_COUNT) { 1231 if (efx_ef10_stat_desc[index].name) { 1232 *full_stats++ = stats[index]; 1233 ++stats_count; 1234 } 1235 } 1236 } 1237 1238 if (core_stats) { 1239 core_stats->rx_packets = stats[EF10_STAT_rx_unicast] + 1240 stats[EF10_STAT_rx_multicast] + 1241 stats[EF10_STAT_rx_broadcast]; 1242 core_stats->tx_packets = stats[EF10_STAT_tx_unicast] + 1243 stats[EF10_STAT_tx_multicast] + 1244 stats[EF10_STAT_tx_broadcast]; 1245 core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] + 1246 stats[EF10_STAT_rx_multicast_bytes] + 1247 stats[EF10_STAT_rx_broadcast_bytes]; 1248 core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] + 1249 stats[EF10_STAT_tx_multicast_bytes] + 1250 stats[EF10_STAT_tx_broadcast_bytes]; 1251 core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] + 1252 stats[GENERIC_STAT_rx_noskb_drops]; 1253 core_stats->multicast = stats[EF10_STAT_rx_multicast]; 1254 core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad]; 1255 core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow]; 1256 core_stats->rx_errors = core_stats->rx_crc_errors; 1257 core_stats->tx_errors = stats[EF10_STAT_tx_bad]; 1258 } 1259 1260 return stats_count; 1261 } 1262 1263 static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx) 1264 { 1265 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1266 DECLARE_BITMAP(mask, EF10_STAT_COUNT); 1267 __le64 generation_start, generation_end; 1268 u64 *stats = nic_data->stats; 1269 __le64 *dma_stats; 1270 1271 efx_ef10_get_stat_mask(efx, mask); 1272 1273 dma_stats = efx->stats_buffer.addr; 1274 nic_data = efx->nic_data; 1275 1276 generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; 1277 if (generation_end == EFX_MC_STATS_GENERATION_INVALID) 1278 return 0; 1279 rmb(); 1280 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask, 1281 stats, efx->stats_buffer.addr, false); 1282 rmb(); 1283 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; 1284 if (generation_end != generation_start) 1285 return -EAGAIN; 1286 1287 /* Update derived statistics */ 1288 efx_nic_fix_nodesc_drop_stat(efx, 1289 &stats[EF10_STAT_port_rx_nodesc_drops]); 1290 stats[EF10_STAT_port_rx_good_bytes] = 1291 stats[EF10_STAT_port_rx_bytes] - 1292 stats[EF10_STAT_port_rx_bytes_minus_good_bytes]; 1293 efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes], 1294 stats[EF10_STAT_port_rx_bytes_minus_good_bytes]); 1295 efx_update_sw_stats(efx, stats); 1296 return 0; 1297 } 1298 1299 1300 static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats, 1301 struct rtnl_link_stats64 *core_stats) 1302 { 1303 int retry; 1304 1305 /* If we're unlucky enough to read statistics during the DMA, wait 1306 * up to 10ms for it to finish (typically takes <500us) 1307 */ 1308 for (retry = 0; retry < 100; ++retry) { 1309 if (efx_ef10_try_update_nic_stats_pf(efx) == 0) 1310 break; 1311 udelay(100); 1312 } 1313 1314 return efx_ef10_update_stats_common(efx, full_stats, core_stats); 1315 } 1316 1317 static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx) 1318 { 1319 MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); 1320 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1321 DECLARE_BITMAP(mask, EF10_STAT_COUNT); 1322 __le64 generation_start, generation_end; 1323 u64 *stats = nic_data->stats; 1324 u32 dma_len = MC_CMD_MAC_NSTATS * sizeof(u64); 1325 struct efx_buffer stats_buf; 1326 __le64 *dma_stats; 1327 int rc; 1328 1329 spin_unlock_bh(&efx->stats_lock); 1330 1331 if (in_interrupt()) { 1332 /* If in atomic context, cannot update stats. Just update the 1333 * software stats and return so the caller can continue. 1334 */ 1335 spin_lock_bh(&efx->stats_lock); 1336 efx_update_sw_stats(efx, stats); 1337 return 0; 1338 } 1339 1340 efx_ef10_get_stat_mask(efx, mask); 1341 1342 rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC); 1343 if (rc) { 1344 spin_lock_bh(&efx->stats_lock); 1345 return rc; 1346 } 1347 1348 dma_stats = stats_buf.addr; 1349 dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID; 1350 1351 MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr); 1352 MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD, 1353 MAC_STATS_IN_DMA, 1); 1354 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); 1355 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); 1356 1357 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), 1358 NULL, 0, NULL); 1359 spin_lock_bh(&efx->stats_lock); 1360 if (rc) { 1361 /* Expect ENOENT if DMA queues have not been set up */ 1362 if (rc != -ENOENT || atomic_read(&efx->active_queues)) 1363 efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, 1364 sizeof(inbuf), NULL, 0, rc); 1365 goto out; 1366 } 1367 1368 generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; 1369 if (generation_end == EFX_MC_STATS_GENERATION_INVALID) { 1370 WARN_ON_ONCE(1); 1371 goto out; 1372 } 1373 rmb(); 1374 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask, 1375 stats, stats_buf.addr, false); 1376 rmb(); 1377 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; 1378 if (generation_end != generation_start) { 1379 rc = -EAGAIN; 1380 goto out; 1381 } 1382 1383 efx_update_sw_stats(efx, stats); 1384 out: 1385 efx_nic_free_buffer(efx, &stats_buf); 1386 return rc; 1387 } 1388 1389 static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats, 1390 struct rtnl_link_stats64 *core_stats) 1391 { 1392 if (efx_ef10_try_update_nic_stats_vf(efx)) 1393 return 0; 1394 1395 return efx_ef10_update_stats_common(efx, full_stats, core_stats); 1396 } 1397 1398 static void efx_ef10_push_irq_moderation(struct efx_channel *channel) 1399 { 1400 struct efx_nic *efx = channel->efx; 1401 unsigned int mode, value; 1402 efx_dword_t timer_cmd; 1403 1404 if (channel->irq_moderation) { 1405 mode = 3; 1406 value = channel->irq_moderation - 1; 1407 } else { 1408 mode = 0; 1409 value = 0; 1410 } 1411 1412 if (EFX_EF10_WORKAROUND_35388(efx)) { 1413 EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS, 1414 EFE_DD_EVQ_IND_TIMER_FLAGS, 1415 ERF_DD_EVQ_IND_TIMER_MODE, mode, 1416 ERF_DD_EVQ_IND_TIMER_VAL, value); 1417 efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT, 1418 channel->channel); 1419 } else { 1420 EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode, 1421 ERF_DZ_TC_TIMER_VAL, value); 1422 efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR, 1423 channel->channel); 1424 } 1425 } 1426 1427 static void efx_ef10_get_wol_vf(struct efx_nic *efx, 1428 struct ethtool_wolinfo *wol) {} 1429 1430 static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type) 1431 { 1432 return -EOPNOTSUPP; 1433 } 1434 1435 static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) 1436 { 1437 wol->supported = 0; 1438 wol->wolopts = 0; 1439 memset(&wol->sopass, 0, sizeof(wol->sopass)); 1440 } 1441 1442 static int efx_ef10_set_wol(struct efx_nic *efx, u32 type) 1443 { 1444 if (type != 0) 1445 return -EINVAL; 1446 return 0; 1447 } 1448 1449 static void efx_ef10_mcdi_request(struct efx_nic *efx, 1450 const efx_dword_t *hdr, size_t hdr_len, 1451 const efx_dword_t *sdu, size_t sdu_len) 1452 { 1453 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1454 u8 *pdu = nic_data->mcdi_buf.addr; 1455 1456 memcpy(pdu, hdr, hdr_len); 1457 memcpy(pdu + hdr_len, sdu, sdu_len); 1458 wmb(); 1459 1460 /* The hardware provides 'low' and 'high' (doorbell) registers 1461 * for passing the 64-bit address of an MCDI request to 1462 * firmware. However the dwords are swapped by firmware. The 1463 * least significant bits of the doorbell are then 0 for all 1464 * MCDI requests due to alignment. 1465 */ 1466 _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32), 1467 ER_DZ_MC_DB_LWRD); 1468 _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr), 1469 ER_DZ_MC_DB_HWRD); 1470 } 1471 1472 static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx) 1473 { 1474 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1475 const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr; 1476 1477 rmb(); 1478 return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE); 1479 } 1480 1481 static void 1482 efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf, 1483 size_t offset, size_t outlen) 1484 { 1485 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1486 const u8 *pdu = nic_data->mcdi_buf.addr; 1487 1488 memcpy(outbuf, pdu + offset, outlen); 1489 } 1490 1491 static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx) 1492 { 1493 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1494 int rc; 1495 1496 rc = efx_ef10_get_warm_boot_count(efx); 1497 if (rc < 0) { 1498 /* The firmware is presumably in the process of 1499 * rebooting. However, we are supposed to report each 1500 * reboot just once, so we must only do that once we 1501 * can read and store the updated warm boot count. 1502 */ 1503 return 0; 1504 } 1505 1506 if (rc == nic_data->warm_boot_count) 1507 return 0; 1508 1509 nic_data->warm_boot_count = rc; 1510 1511 /* All our allocations have been reset */ 1512 efx_ef10_reset_mc_allocations(efx); 1513 1514 /* Driver-created vswitches and vports must be re-created */ 1515 nic_data->must_probe_vswitching = true; 1516 nic_data->vport_id = EVB_PORT_ID_ASSIGNED; 1517 1518 /* The datapath firmware might have been changed */ 1519 nic_data->must_check_datapath_caps = true; 1520 1521 /* MAC statistics have been cleared on the NIC; clear the local 1522 * statistic that we update with efx_update_diff_stat(). 1523 */ 1524 nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0; 1525 1526 return -EIO; 1527 } 1528 1529 /* Handle an MSI interrupt 1530 * 1531 * Handle an MSI hardware interrupt. This routine schedules event 1532 * queue processing. No interrupt acknowledgement cycle is necessary. 1533 * Also, we never need to check that the interrupt is for us, since 1534 * MSI interrupts cannot be shared. 1535 */ 1536 static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id) 1537 { 1538 struct efx_msi_context *context = dev_id; 1539 struct efx_nic *efx = context->efx; 1540 1541 netif_vdbg(efx, intr, efx->net_dev, 1542 "IRQ %d on CPU %d\n", irq, raw_smp_processor_id()); 1543 1544 if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) { 1545 /* Note test interrupts */ 1546 if (context->index == efx->irq_level) 1547 efx->last_irq_cpu = raw_smp_processor_id(); 1548 1549 /* Schedule processing of the channel */ 1550 efx_schedule_channel_irq(efx->channel[context->index]); 1551 } 1552 1553 return IRQ_HANDLED; 1554 } 1555 1556 static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id) 1557 { 1558 struct efx_nic *efx = dev_id; 1559 bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); 1560 struct efx_channel *channel; 1561 efx_dword_t reg; 1562 u32 queues; 1563 1564 /* Read the ISR which also ACKs the interrupts */ 1565 efx_readd(efx, ®, ER_DZ_BIU_INT_ISR); 1566 queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG); 1567 1568 if (queues == 0) 1569 return IRQ_NONE; 1570 1571 if (likely(soft_enabled)) { 1572 /* Note test interrupts */ 1573 if (queues & (1U << efx->irq_level)) 1574 efx->last_irq_cpu = raw_smp_processor_id(); 1575 1576 efx_for_each_channel(channel, efx) { 1577 if (queues & 1) 1578 efx_schedule_channel_irq(channel); 1579 queues >>= 1; 1580 } 1581 } 1582 1583 netif_vdbg(efx, intr, efx->net_dev, 1584 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", 1585 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); 1586 1587 return IRQ_HANDLED; 1588 } 1589 1590 static void efx_ef10_irq_test_generate(struct efx_nic *efx) 1591 { 1592 MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN); 1593 1594 BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0); 1595 1596 MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level); 1597 (void) efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT, 1598 inbuf, sizeof(inbuf), NULL, 0, NULL); 1599 } 1600 1601 static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue) 1602 { 1603 return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf, 1604 (tx_queue->ptr_mask + 1) * 1605 sizeof(efx_qword_t), 1606 GFP_KERNEL); 1607 } 1608 1609 /* This writes to the TX_DESC_WPTR and also pushes data */ 1610 static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue, 1611 const efx_qword_t *txd) 1612 { 1613 unsigned int write_ptr; 1614 efx_oword_t reg; 1615 1616 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 1617 EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr); 1618 reg.qword[0] = *txd; 1619 efx_writeo_page(tx_queue->efx, ®, 1620 ER_DZ_TX_DESC_UPD, tx_queue->queue); 1621 } 1622 1623 static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) 1624 { 1625 MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / 1626 EFX_BUF_SIZE)); 1627 bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; 1628 size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE; 1629 struct efx_channel *channel = tx_queue->channel; 1630 struct efx_nic *efx = tx_queue->efx; 1631 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1632 size_t inlen; 1633 dma_addr_t dma_addr; 1634 efx_qword_t *txd; 1635 int rc; 1636 int i; 1637 BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0); 1638 1639 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1); 1640 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel); 1641 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue); 1642 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue); 1643 MCDI_POPULATE_DWORD_2(inbuf, INIT_TXQ_IN_FLAGS, 1644 INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload, 1645 INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload); 1646 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0); 1647 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id); 1648 1649 dma_addr = tx_queue->txd.buf.dma_addr; 1650 1651 netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n", 1652 tx_queue->queue, entries, (u64)dma_addr); 1653 1654 for (i = 0; i < entries; ++i) { 1655 MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr); 1656 dma_addr += EFX_BUF_SIZE; 1657 } 1658 1659 inlen = MC_CMD_INIT_TXQ_IN_LEN(entries); 1660 1661 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen, 1662 NULL, 0, NULL); 1663 if (rc) 1664 goto fail; 1665 1666 /* A previous user of this TX queue might have set us up the 1667 * bomb by writing a descriptor to the TX push collector but 1668 * not the doorbell. (Each collector belongs to a port, not a 1669 * queue or function, so cannot easily be reset.) We must 1670 * attempt to push a no-op descriptor in its place. 1671 */ 1672 tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION; 1673 tx_queue->insert_count = 1; 1674 txd = efx_tx_desc(tx_queue, 0); 1675 EFX_POPULATE_QWORD_4(*txd, 1676 ESF_DZ_TX_DESC_IS_OPT, true, 1677 ESF_DZ_TX_OPTION_TYPE, 1678 ESE_DZ_TX_OPTION_DESC_CRC_CSUM, 1679 ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload, 1680 ESF_DZ_TX_OPTION_IP_CSUM, csum_offload); 1681 tx_queue->write_count = 1; 1682 wmb(); 1683 efx_ef10_push_tx_desc(tx_queue, txd); 1684 1685 return; 1686 1687 fail: 1688 netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n", 1689 tx_queue->queue); 1690 } 1691 1692 static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue) 1693 { 1694 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN); 1695 MCDI_DECLARE_BUF_ERR(outbuf); 1696 struct efx_nic *efx = tx_queue->efx; 1697 size_t outlen; 1698 int rc; 1699 1700 MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE, 1701 tx_queue->queue); 1702 1703 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf), 1704 outbuf, sizeof(outbuf), &outlen); 1705 1706 if (rc && rc != -EALREADY) 1707 goto fail; 1708 1709 return; 1710 1711 fail: 1712 efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN, 1713 outbuf, outlen, rc); 1714 } 1715 1716 static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue) 1717 { 1718 efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf); 1719 } 1720 1721 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ 1722 static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue) 1723 { 1724 unsigned int write_ptr; 1725 efx_dword_t reg; 1726 1727 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 1728 EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr); 1729 efx_writed_page(tx_queue->efx, ®, 1730 ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue); 1731 } 1732 1733 static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue) 1734 { 1735 unsigned int old_write_count = tx_queue->write_count; 1736 struct efx_tx_buffer *buffer; 1737 unsigned int write_ptr; 1738 efx_qword_t *txd; 1739 1740 BUG_ON(tx_queue->write_count == tx_queue->insert_count); 1741 1742 do { 1743 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 1744 buffer = &tx_queue->buffer[write_ptr]; 1745 txd = efx_tx_desc(tx_queue, write_ptr); 1746 ++tx_queue->write_count; 1747 1748 /* Create TX descriptor ring entry */ 1749 if (buffer->flags & EFX_TX_BUF_OPTION) { 1750 *txd = buffer->option; 1751 } else { 1752 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); 1753 EFX_POPULATE_QWORD_3( 1754 *txd, 1755 ESF_DZ_TX_KER_CONT, 1756 buffer->flags & EFX_TX_BUF_CONT, 1757 ESF_DZ_TX_KER_BYTE_CNT, buffer->len, 1758 ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr); 1759 } 1760 } while (tx_queue->write_count != tx_queue->insert_count); 1761 1762 wmb(); /* Ensure descriptors are written before they are fetched */ 1763 1764 if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) { 1765 txd = efx_tx_desc(tx_queue, 1766 old_write_count & tx_queue->ptr_mask); 1767 efx_ef10_push_tx_desc(tx_queue, txd); 1768 ++tx_queue->pushes; 1769 } else { 1770 efx_ef10_notify_tx_desc(tx_queue); 1771 } 1772 } 1773 1774 static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context, 1775 bool exclusive, unsigned *context_size) 1776 { 1777 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN); 1778 MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN); 1779 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1780 size_t outlen; 1781 int rc; 1782 u32 alloc_type = exclusive ? 1783 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE : 1784 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED; 1785 unsigned rss_spread = exclusive ? 1786 efx->rss_spread : 1787 min(rounddown_pow_of_two(efx->rss_spread), 1788 EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE); 1789 1790 if (!exclusive && rss_spread == 1) { 1791 *context = EFX_EF10_RSS_CONTEXT_INVALID; 1792 if (context_size) 1793 *context_size = 1; 1794 return 0; 1795 } 1796 1797 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID, 1798 nic_data->vport_id); 1799 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type); 1800 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread); 1801 1802 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf), 1803 outbuf, sizeof(outbuf), &outlen); 1804 if (rc != 0) 1805 return rc; 1806 1807 if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN) 1808 return -EIO; 1809 1810 *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID); 1811 1812 if (context_size) 1813 *context_size = rss_spread; 1814 1815 return 0; 1816 } 1817 1818 static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context) 1819 { 1820 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN); 1821 int rc; 1822 1823 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID, 1824 context); 1825 1826 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf), 1827 NULL, 0, NULL); 1828 WARN_ON(rc != 0); 1829 } 1830 1831 static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context, 1832 const u32 *rx_indir_table) 1833 { 1834 MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN); 1835 MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN); 1836 int i, rc; 1837 1838 MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID, 1839 context); 1840 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != 1841 MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN); 1842 1843 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i) 1844 MCDI_PTR(tablebuf, 1845 RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] = 1846 (u8) rx_indir_table[i]; 1847 1848 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf, 1849 sizeof(tablebuf), NULL, 0, NULL); 1850 if (rc != 0) 1851 return rc; 1852 1853 MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID, 1854 context); 1855 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) != 1856 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); 1857 for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i) 1858 MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = 1859 efx->rx_hash_key[i]; 1860 1861 return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf, 1862 sizeof(keybuf), NULL, 0, NULL); 1863 } 1864 1865 static void efx_ef10_rx_free_indir_table(struct efx_nic *efx) 1866 { 1867 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1868 1869 if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID) 1870 efx_ef10_free_rss_context(efx, nic_data->rx_rss_context); 1871 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; 1872 } 1873 1874 static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx, 1875 unsigned *context_size) 1876 { 1877 u32 new_rx_rss_context; 1878 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1879 int rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context, 1880 false, context_size); 1881 1882 if (rc != 0) 1883 return rc; 1884 1885 nic_data->rx_rss_context = new_rx_rss_context; 1886 nic_data->rx_rss_context_exclusive = false; 1887 efx_set_default_rx_indir_table(efx); 1888 return 0; 1889 } 1890 1891 static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx, 1892 const u32 *rx_indir_table) 1893 { 1894 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1895 int rc; 1896 u32 new_rx_rss_context; 1897 1898 if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID || 1899 !nic_data->rx_rss_context_exclusive) { 1900 rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context, 1901 true, NULL); 1902 if (rc == -EOPNOTSUPP) 1903 return rc; 1904 else if (rc != 0) 1905 goto fail1; 1906 } else { 1907 new_rx_rss_context = nic_data->rx_rss_context; 1908 } 1909 1910 rc = efx_ef10_populate_rss_table(efx, new_rx_rss_context, 1911 rx_indir_table); 1912 if (rc != 0) 1913 goto fail2; 1914 1915 if (nic_data->rx_rss_context != new_rx_rss_context) 1916 efx_ef10_rx_free_indir_table(efx); 1917 nic_data->rx_rss_context = new_rx_rss_context; 1918 nic_data->rx_rss_context_exclusive = true; 1919 if (rx_indir_table != efx->rx_indir_table) 1920 memcpy(efx->rx_indir_table, rx_indir_table, 1921 sizeof(efx->rx_indir_table)); 1922 return 0; 1923 1924 fail2: 1925 if (new_rx_rss_context != nic_data->rx_rss_context) 1926 efx_ef10_free_rss_context(efx, new_rx_rss_context); 1927 fail1: 1928 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1929 return rc; 1930 } 1931 1932 static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user, 1933 const u32 *rx_indir_table) 1934 { 1935 int rc; 1936 1937 if (efx->rss_spread == 1) 1938 return 0; 1939 1940 rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table); 1941 1942 if (rc == -ENOBUFS && !user) { 1943 unsigned context_size; 1944 bool mismatch = false; 1945 size_t i; 1946 1947 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table) && !mismatch; 1948 i++) 1949 mismatch = rx_indir_table[i] != 1950 ethtool_rxfh_indir_default(i, efx->rss_spread); 1951 1952 rc = efx_ef10_rx_push_shared_rss_config(efx, &context_size); 1953 if (rc == 0) { 1954 if (context_size != efx->rss_spread) 1955 netif_warn(efx, probe, efx->net_dev, 1956 "Could not allocate an exclusive RSS" 1957 " context; allocated a shared one of" 1958 " different size." 1959 " Wanted %u, got %u.\n", 1960 efx->rss_spread, context_size); 1961 else if (mismatch) 1962 netif_warn(efx, probe, efx->net_dev, 1963 "Could not allocate an exclusive RSS" 1964 " context; allocated a shared one but" 1965 " could not apply custom" 1966 " indirection.\n"); 1967 else 1968 netif_info(efx, probe, efx->net_dev, 1969 "Could not allocate an exclusive RSS" 1970 " context; allocated a shared one.\n"); 1971 } 1972 } 1973 return rc; 1974 } 1975 1976 static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user, 1977 const u32 *rx_indir_table 1978 __attribute__ ((unused))) 1979 { 1980 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1981 1982 if (user) 1983 return -EOPNOTSUPP; 1984 if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID) 1985 return 0; 1986 return efx_ef10_rx_push_shared_rss_config(efx, NULL); 1987 } 1988 1989 static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue) 1990 { 1991 return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf, 1992 (rx_queue->ptr_mask + 1) * 1993 sizeof(efx_qword_t), 1994 GFP_KERNEL); 1995 } 1996 1997 static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue) 1998 { 1999 MCDI_DECLARE_BUF(inbuf, 2000 MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / 2001 EFX_BUF_SIZE)); 2002 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 2003 size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE; 2004 struct efx_nic *efx = rx_queue->efx; 2005 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2006 size_t inlen; 2007 dma_addr_t dma_addr; 2008 int rc; 2009 int i; 2010 BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0); 2011 2012 rx_queue->scatter_n = 0; 2013 rx_queue->scatter_len = 0; 2014 2015 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1); 2016 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel); 2017 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue)); 2018 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE, 2019 efx_rx_queue_index(rx_queue)); 2020 MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS, 2021 INIT_RXQ_IN_FLAG_PREFIX, 1, 2022 INIT_RXQ_IN_FLAG_TIMESTAMP, 1); 2023 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0); 2024 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id); 2025 2026 dma_addr = rx_queue->rxd.buf.dma_addr; 2027 2028 netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n", 2029 efx_rx_queue_index(rx_queue), entries, (u64)dma_addr); 2030 2031 for (i = 0; i < entries; ++i) { 2032 MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr); 2033 dma_addr += EFX_BUF_SIZE; 2034 } 2035 2036 inlen = MC_CMD_INIT_RXQ_IN_LEN(entries); 2037 2038 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen, 2039 NULL, 0, NULL); 2040 if (rc) 2041 netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n", 2042 efx_rx_queue_index(rx_queue)); 2043 } 2044 2045 static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue) 2046 { 2047 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN); 2048 MCDI_DECLARE_BUF_ERR(outbuf); 2049 struct efx_nic *efx = rx_queue->efx; 2050 size_t outlen; 2051 int rc; 2052 2053 MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE, 2054 efx_rx_queue_index(rx_queue)); 2055 2056 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf), 2057 outbuf, sizeof(outbuf), &outlen); 2058 2059 if (rc && rc != -EALREADY) 2060 goto fail; 2061 2062 return; 2063 2064 fail: 2065 efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN, 2066 outbuf, outlen, rc); 2067 } 2068 2069 static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue) 2070 { 2071 efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf); 2072 } 2073 2074 /* This creates an entry in the RX descriptor queue */ 2075 static inline void 2076 efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) 2077 { 2078 struct efx_rx_buffer *rx_buf; 2079 efx_qword_t *rxd; 2080 2081 rxd = efx_rx_desc(rx_queue, index); 2082 rx_buf = efx_rx_buffer(rx_queue, index); 2083 EFX_POPULATE_QWORD_2(*rxd, 2084 ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len, 2085 ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); 2086 } 2087 2088 static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue) 2089 { 2090 struct efx_nic *efx = rx_queue->efx; 2091 unsigned int write_count; 2092 efx_dword_t reg; 2093 2094 /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */ 2095 write_count = rx_queue->added_count & ~7; 2096 if (rx_queue->notified_count == write_count) 2097 return; 2098 2099 do 2100 efx_ef10_build_rx_desc( 2101 rx_queue, 2102 rx_queue->notified_count & rx_queue->ptr_mask); 2103 while (++rx_queue->notified_count != write_count); 2104 2105 wmb(); 2106 EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR, 2107 write_count & rx_queue->ptr_mask); 2108 efx_writed_page(efx, ®, ER_DZ_RX_DESC_UPD, 2109 efx_rx_queue_index(rx_queue)); 2110 } 2111 2112 static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete; 2113 2114 static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue) 2115 { 2116 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 2117 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); 2118 efx_qword_t event; 2119 2120 EFX_POPULATE_QWORD_2(event, 2121 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV, 2122 ESF_DZ_EV_DATA, EFX_EF10_REFILL); 2123 2124 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); 2125 2126 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has 2127 * already swapped the data to little-endian order. 2128 */ 2129 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], 2130 sizeof(efx_qword_t)); 2131 2132 efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT, 2133 inbuf, sizeof(inbuf), 0, 2134 efx_ef10_rx_defer_refill_complete, 0); 2135 } 2136 2137 static void 2138 efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie, 2139 int rc, efx_dword_t *outbuf, 2140 size_t outlen_actual) 2141 { 2142 /* nothing to do */ 2143 } 2144 2145 static int efx_ef10_ev_probe(struct efx_channel *channel) 2146 { 2147 return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf, 2148 (channel->eventq_mask + 1) * 2149 sizeof(efx_qword_t), 2150 GFP_KERNEL); 2151 } 2152 2153 static int efx_ef10_ev_init(struct efx_channel *channel) 2154 { 2155 MCDI_DECLARE_BUF(inbuf, 2156 MC_CMD_INIT_EVQ_IN_LEN(EFX_MAX_EVQ_SIZE * 8 / 2157 EFX_BUF_SIZE)); 2158 MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_OUT_LEN); 2159 size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE; 2160 struct efx_nic *efx = channel->efx; 2161 struct efx_ef10_nic_data *nic_data; 2162 bool supports_rx_merge; 2163 size_t inlen, outlen; 2164 dma_addr_t dma_addr; 2165 int rc; 2166 int i; 2167 2168 nic_data = efx->nic_data; 2169 supports_rx_merge = 2170 !!(nic_data->datapath_caps & 2171 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN); 2172 2173 /* Fill event queue with all ones (i.e. empty events) */ 2174 memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); 2175 2176 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1); 2177 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel); 2178 /* INIT_EVQ expects index in vector table, not absolute */ 2179 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel); 2180 MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS, 2181 INIT_EVQ_IN_FLAG_INTERRUPTING, 1, 2182 INIT_EVQ_IN_FLAG_RX_MERGE, 1, 2183 INIT_EVQ_IN_FLAG_TX_MERGE, 1, 2184 INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_merge); 2185 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE, 2186 MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS); 2187 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0); 2188 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0); 2189 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE, 2190 MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS); 2191 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0); 2192 2193 dma_addr = channel->eventq.buf.dma_addr; 2194 for (i = 0; i < entries; ++i) { 2195 MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr); 2196 dma_addr += EFX_BUF_SIZE; 2197 } 2198 2199 inlen = MC_CMD_INIT_EVQ_IN_LEN(entries); 2200 2201 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen, 2202 outbuf, sizeof(outbuf), &outlen); 2203 /* IRQ return is ignored */ 2204 return rc; 2205 } 2206 2207 static void efx_ef10_ev_fini(struct efx_channel *channel) 2208 { 2209 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN); 2210 MCDI_DECLARE_BUF_ERR(outbuf); 2211 struct efx_nic *efx = channel->efx; 2212 size_t outlen; 2213 int rc; 2214 2215 MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel); 2216 2217 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf), 2218 outbuf, sizeof(outbuf), &outlen); 2219 2220 if (rc && rc != -EALREADY) 2221 goto fail; 2222 2223 return; 2224 2225 fail: 2226 efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN, 2227 outbuf, outlen, rc); 2228 } 2229 2230 static void efx_ef10_ev_remove(struct efx_channel *channel) 2231 { 2232 efx_nic_free_buffer(channel->efx, &channel->eventq.buf); 2233 } 2234 2235 static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue, 2236 unsigned int rx_queue_label) 2237 { 2238 struct efx_nic *efx = rx_queue->efx; 2239 2240 netif_info(efx, hw, efx->net_dev, 2241 "rx event arrived on queue %d labeled as queue %u\n", 2242 efx_rx_queue_index(rx_queue), rx_queue_label); 2243 2244 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 2245 } 2246 2247 static void 2248 efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue, 2249 unsigned int actual, unsigned int expected) 2250 { 2251 unsigned int dropped = (actual - expected) & rx_queue->ptr_mask; 2252 struct efx_nic *efx = rx_queue->efx; 2253 2254 netif_info(efx, hw, efx->net_dev, 2255 "dropped %d events (index=%d expected=%d)\n", 2256 dropped, actual, expected); 2257 2258 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 2259 } 2260 2261 /* partially received RX was aborted. clean up. */ 2262 static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue) 2263 { 2264 unsigned int rx_desc_ptr; 2265 2266 netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev, 2267 "scattered RX aborted (dropping %u buffers)\n", 2268 rx_queue->scatter_n); 2269 2270 rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask; 2271 2272 efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n, 2273 0, EFX_RX_PKT_DISCARD); 2274 2275 rx_queue->removed_count += rx_queue->scatter_n; 2276 rx_queue->scatter_n = 0; 2277 rx_queue->scatter_len = 0; 2278 ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc; 2279 } 2280 2281 static int efx_ef10_handle_rx_event(struct efx_channel *channel, 2282 const efx_qword_t *event) 2283 { 2284 unsigned int rx_bytes, next_ptr_lbits, rx_queue_label, rx_l4_class; 2285 unsigned int n_descs, n_packets, i; 2286 struct efx_nic *efx = channel->efx; 2287 struct efx_rx_queue *rx_queue; 2288 bool rx_cont; 2289 u16 flags = 0; 2290 2291 if (unlikely(ACCESS_ONCE(efx->reset_pending))) 2292 return 0; 2293 2294 /* Basic packet information */ 2295 rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES); 2296 next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS); 2297 rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL); 2298 rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS); 2299 rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT); 2300 2301 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT)) 2302 netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event=" 2303 EFX_QWORD_FMT "\n", 2304 EFX_QWORD_VAL(*event)); 2305 2306 rx_queue = efx_channel_get_rx_queue(channel); 2307 2308 if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue))) 2309 efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label); 2310 2311 n_descs = ((next_ptr_lbits - rx_queue->removed_count) & 2312 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1)); 2313 2314 if (n_descs != rx_queue->scatter_n + 1) { 2315 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2316 2317 /* detect rx abort */ 2318 if (unlikely(n_descs == rx_queue->scatter_n)) { 2319 if (rx_queue->scatter_n == 0 || rx_bytes != 0) 2320 netdev_WARN(efx->net_dev, 2321 "invalid RX abort: scatter_n=%u event=" 2322 EFX_QWORD_FMT "\n", 2323 rx_queue->scatter_n, 2324 EFX_QWORD_VAL(*event)); 2325 efx_ef10_handle_rx_abort(rx_queue); 2326 return 0; 2327 } 2328 2329 /* Check that RX completion merging is valid, i.e. 2330 * the current firmware supports it and this is a 2331 * non-scattered packet. 2332 */ 2333 if (!(nic_data->datapath_caps & 2334 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) || 2335 rx_queue->scatter_n != 0 || rx_cont) { 2336 efx_ef10_handle_rx_bad_lbits( 2337 rx_queue, next_ptr_lbits, 2338 (rx_queue->removed_count + 2339 rx_queue->scatter_n + 1) & 2340 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1)); 2341 return 0; 2342 } 2343 2344 /* Merged completion for multiple non-scattered packets */ 2345 rx_queue->scatter_n = 1; 2346 rx_queue->scatter_len = 0; 2347 n_packets = n_descs; 2348 ++channel->n_rx_merge_events; 2349 channel->n_rx_merge_packets += n_packets; 2350 flags |= EFX_RX_PKT_PREFIX_LEN; 2351 } else { 2352 ++rx_queue->scatter_n; 2353 rx_queue->scatter_len += rx_bytes; 2354 if (rx_cont) 2355 return 0; 2356 n_packets = 1; 2357 } 2358 2359 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR))) 2360 flags |= EFX_RX_PKT_DISCARD; 2361 2362 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR))) { 2363 channel->n_rx_ip_hdr_chksum_err += n_packets; 2364 } else if (unlikely(EFX_QWORD_FIELD(*event, 2365 ESF_DZ_RX_TCPUDP_CKSUM_ERR))) { 2366 channel->n_rx_tcp_udp_chksum_err += n_packets; 2367 } else if (rx_l4_class == ESE_DZ_L4_CLASS_TCP || 2368 rx_l4_class == ESE_DZ_L4_CLASS_UDP) { 2369 flags |= EFX_RX_PKT_CSUMMED; 2370 } 2371 2372 if (rx_l4_class == ESE_DZ_L4_CLASS_TCP) 2373 flags |= EFX_RX_PKT_TCP; 2374 2375 channel->irq_mod_score += 2 * n_packets; 2376 2377 /* Handle received packet(s) */ 2378 for (i = 0; i < n_packets; i++) { 2379 efx_rx_packet(rx_queue, 2380 rx_queue->removed_count & rx_queue->ptr_mask, 2381 rx_queue->scatter_n, rx_queue->scatter_len, 2382 flags); 2383 rx_queue->removed_count += rx_queue->scatter_n; 2384 } 2385 2386 rx_queue->scatter_n = 0; 2387 rx_queue->scatter_len = 0; 2388 2389 return n_packets; 2390 } 2391 2392 static int 2393 efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) 2394 { 2395 struct efx_nic *efx = channel->efx; 2396 struct efx_tx_queue *tx_queue; 2397 unsigned int tx_ev_desc_ptr; 2398 unsigned int tx_ev_q_label; 2399 int tx_descs = 0; 2400 2401 if (unlikely(ACCESS_ONCE(efx->reset_pending))) 2402 return 0; 2403 2404 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT))) 2405 return 0; 2406 2407 /* Transmit completion */ 2408 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX); 2409 tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL); 2410 tx_queue = efx_channel_get_tx_queue(channel, 2411 tx_ev_q_label % EFX_TXQ_TYPES); 2412 tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) & 2413 tx_queue->ptr_mask); 2414 efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask); 2415 2416 return tx_descs; 2417 } 2418 2419 static void 2420 efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) 2421 { 2422 struct efx_nic *efx = channel->efx; 2423 int subcode; 2424 2425 subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE); 2426 2427 switch (subcode) { 2428 case ESE_DZ_DRV_TIMER_EV: 2429 case ESE_DZ_DRV_WAKE_UP_EV: 2430 break; 2431 case ESE_DZ_DRV_START_UP_EV: 2432 /* event queue init complete. ok. */ 2433 break; 2434 default: 2435 netif_err(efx, hw, efx->net_dev, 2436 "channel %d unknown driver event type %d" 2437 " (data " EFX_QWORD_FMT ")\n", 2438 channel->channel, subcode, 2439 EFX_QWORD_VAL(*event)); 2440 2441 } 2442 } 2443 2444 static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel, 2445 efx_qword_t *event) 2446 { 2447 struct efx_nic *efx = channel->efx; 2448 u32 subcode; 2449 2450 subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0); 2451 2452 switch (subcode) { 2453 case EFX_EF10_TEST: 2454 channel->event_test_cpu = raw_smp_processor_id(); 2455 break; 2456 case EFX_EF10_REFILL: 2457 /* The queue must be empty, so we won't receive any rx 2458 * events, so efx_process_channel() won't refill the 2459 * queue. Refill it here 2460 */ 2461 efx_fast_push_rx_descriptors(&channel->rx_queue, true); 2462 break; 2463 default: 2464 netif_err(efx, hw, efx->net_dev, 2465 "channel %d unknown driver event type %u" 2466 " (data " EFX_QWORD_FMT ")\n", 2467 channel->channel, (unsigned) subcode, 2468 EFX_QWORD_VAL(*event)); 2469 } 2470 } 2471 2472 static int efx_ef10_ev_process(struct efx_channel *channel, int quota) 2473 { 2474 struct efx_nic *efx = channel->efx; 2475 efx_qword_t event, *p_event; 2476 unsigned int read_ptr; 2477 int ev_code; 2478 int tx_descs = 0; 2479 int spent = 0; 2480 2481 if (quota <= 0) 2482 return spent; 2483 2484 read_ptr = channel->eventq_read_ptr; 2485 2486 for (;;) { 2487 p_event = efx_event(channel, read_ptr); 2488 event = *p_event; 2489 2490 if (!efx_event_present(&event)) 2491 break; 2492 2493 EFX_SET_QWORD(*p_event); 2494 2495 ++read_ptr; 2496 2497 ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE); 2498 2499 netif_vdbg(efx, drv, efx->net_dev, 2500 "processing event on %d " EFX_QWORD_FMT "\n", 2501 channel->channel, EFX_QWORD_VAL(event)); 2502 2503 switch (ev_code) { 2504 case ESE_DZ_EV_CODE_MCDI_EV: 2505 efx_mcdi_process_event(channel, &event); 2506 break; 2507 case ESE_DZ_EV_CODE_RX_EV: 2508 spent += efx_ef10_handle_rx_event(channel, &event); 2509 if (spent >= quota) { 2510 /* XXX can we split a merged event to 2511 * avoid going over-quota? 2512 */ 2513 spent = quota; 2514 goto out; 2515 } 2516 break; 2517 case ESE_DZ_EV_CODE_TX_EV: 2518 tx_descs += efx_ef10_handle_tx_event(channel, &event); 2519 if (tx_descs > efx->txq_entries) { 2520 spent = quota; 2521 goto out; 2522 } else if (++spent == quota) { 2523 goto out; 2524 } 2525 break; 2526 case ESE_DZ_EV_CODE_DRIVER_EV: 2527 efx_ef10_handle_driver_event(channel, &event); 2528 if (++spent == quota) 2529 goto out; 2530 break; 2531 case EFX_EF10_DRVGEN_EV: 2532 efx_ef10_handle_driver_generated_event(channel, &event); 2533 break; 2534 default: 2535 netif_err(efx, hw, efx->net_dev, 2536 "channel %d unknown event type %d" 2537 " (data " EFX_QWORD_FMT ")\n", 2538 channel->channel, ev_code, 2539 EFX_QWORD_VAL(event)); 2540 } 2541 } 2542 2543 out: 2544 channel->eventq_read_ptr = read_ptr; 2545 return spent; 2546 } 2547 2548 static void efx_ef10_ev_read_ack(struct efx_channel *channel) 2549 { 2550 struct efx_nic *efx = channel->efx; 2551 efx_dword_t rptr; 2552 2553 if (EFX_EF10_WORKAROUND_35388(efx)) { 2554 BUILD_BUG_ON(EFX_MIN_EVQ_SIZE < 2555 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH)); 2556 BUILD_BUG_ON(EFX_MAX_EVQ_SIZE > 2557 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH)); 2558 2559 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS, 2560 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH, 2561 ERF_DD_EVQ_IND_RPTR, 2562 (channel->eventq_read_ptr & 2563 channel->eventq_mask) >> 2564 ERF_DD_EVQ_IND_RPTR_WIDTH); 2565 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT, 2566 channel->channel); 2567 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS, 2568 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW, 2569 ERF_DD_EVQ_IND_RPTR, 2570 channel->eventq_read_ptr & 2571 ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1)); 2572 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT, 2573 channel->channel); 2574 } else { 2575 EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR, 2576 channel->eventq_read_ptr & 2577 channel->eventq_mask); 2578 efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel); 2579 } 2580 } 2581 2582 static void efx_ef10_ev_test_generate(struct efx_channel *channel) 2583 { 2584 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); 2585 struct efx_nic *efx = channel->efx; 2586 efx_qword_t event; 2587 int rc; 2588 2589 EFX_POPULATE_QWORD_2(event, 2590 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV, 2591 ESF_DZ_EV_DATA, EFX_EF10_TEST); 2592 2593 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); 2594 2595 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has 2596 * already swapped the data to little-endian order. 2597 */ 2598 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], 2599 sizeof(efx_qword_t)); 2600 2601 rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf), 2602 NULL, 0, NULL); 2603 if (rc != 0) 2604 goto fail; 2605 2606 return; 2607 2608 fail: 2609 WARN_ON(true); 2610 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 2611 } 2612 2613 void efx_ef10_handle_drain_event(struct efx_nic *efx) 2614 { 2615 if (atomic_dec_and_test(&efx->active_queues)) 2616 wake_up(&efx->flush_wq); 2617 2618 WARN_ON(atomic_read(&efx->active_queues) < 0); 2619 } 2620 2621 static int efx_ef10_fini_dmaq(struct efx_nic *efx) 2622 { 2623 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2624 struct efx_channel *channel; 2625 struct efx_tx_queue *tx_queue; 2626 struct efx_rx_queue *rx_queue; 2627 int pending; 2628 2629 /* If the MC has just rebooted, the TX/RX queues will have already been 2630 * torn down, but efx->active_queues needs to be set to zero. 2631 */ 2632 if (nic_data->must_realloc_vis) { 2633 atomic_set(&efx->active_queues, 0); 2634 return 0; 2635 } 2636 2637 /* Do not attempt to write to the NIC during EEH recovery */ 2638 if (efx->state != STATE_RECOVERY) { 2639 efx_for_each_channel(channel, efx) { 2640 efx_for_each_channel_rx_queue(rx_queue, channel) 2641 efx_ef10_rx_fini(rx_queue); 2642 efx_for_each_channel_tx_queue(tx_queue, channel) 2643 efx_ef10_tx_fini(tx_queue); 2644 } 2645 2646 wait_event_timeout(efx->flush_wq, 2647 atomic_read(&efx->active_queues) == 0, 2648 msecs_to_jiffies(EFX_MAX_FLUSH_TIME)); 2649 pending = atomic_read(&efx->active_queues); 2650 if (pending) { 2651 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n", 2652 pending); 2653 return -ETIMEDOUT; 2654 } 2655 } 2656 2657 return 0; 2658 } 2659 2660 static void efx_ef10_prepare_flr(struct efx_nic *efx) 2661 { 2662 atomic_set(&efx->active_queues, 0); 2663 } 2664 2665 static bool efx_ef10_filter_equal(const struct efx_filter_spec *left, 2666 const struct efx_filter_spec *right) 2667 { 2668 if ((left->match_flags ^ right->match_flags) | 2669 ((left->flags ^ right->flags) & 2670 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX))) 2671 return false; 2672 2673 return memcmp(&left->outer_vid, &right->outer_vid, 2674 sizeof(struct efx_filter_spec) - 2675 offsetof(struct efx_filter_spec, outer_vid)) == 0; 2676 } 2677 2678 static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec) 2679 { 2680 BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3); 2681 return jhash2((const u32 *)&spec->outer_vid, 2682 (sizeof(struct efx_filter_spec) - 2683 offsetof(struct efx_filter_spec, outer_vid)) / 4, 2684 0); 2685 /* XXX should we randomise the initval? */ 2686 } 2687 2688 /* Decide whether a filter should be exclusive or else should allow 2689 * delivery to additional recipients. Currently we decide that 2690 * filters for specific local unicast MAC and IP addresses are 2691 * exclusive. 2692 */ 2693 static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec) 2694 { 2695 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC && 2696 !is_multicast_ether_addr(spec->loc_mac)) 2697 return true; 2698 2699 if ((spec->match_flags & 2700 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) == 2701 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) { 2702 if (spec->ether_type == htons(ETH_P_IP) && 2703 !ipv4_is_multicast(spec->loc_host[0])) 2704 return true; 2705 if (spec->ether_type == htons(ETH_P_IPV6) && 2706 ((const u8 *)spec->loc_host)[0] != 0xff) 2707 return true; 2708 } 2709 2710 return false; 2711 } 2712 2713 static struct efx_filter_spec * 2714 efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table, 2715 unsigned int filter_idx) 2716 { 2717 return (struct efx_filter_spec *)(table->entry[filter_idx].spec & 2718 ~EFX_EF10_FILTER_FLAGS); 2719 } 2720 2721 static unsigned int 2722 efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table, 2723 unsigned int filter_idx) 2724 { 2725 return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS; 2726 } 2727 2728 static void 2729 efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table, 2730 unsigned int filter_idx, 2731 const struct efx_filter_spec *spec, 2732 unsigned int flags) 2733 { 2734 table->entry[filter_idx].spec = (unsigned long)spec | flags; 2735 } 2736 2737 static void efx_ef10_filter_push_prep(struct efx_nic *efx, 2738 const struct efx_filter_spec *spec, 2739 efx_dword_t *inbuf, u64 handle, 2740 bool replacing) 2741 { 2742 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2743 2744 memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN); 2745 2746 if (replacing) { 2747 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 2748 MC_CMD_FILTER_OP_IN_OP_REPLACE); 2749 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle); 2750 } else { 2751 u32 match_fields = 0; 2752 2753 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 2754 efx_ef10_filter_is_exclusive(spec) ? 2755 MC_CMD_FILTER_OP_IN_OP_INSERT : 2756 MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE); 2757 2758 /* Convert match flags and values. Unlike almost 2759 * everything else in MCDI, these fields are in 2760 * network byte order. 2761 */ 2762 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) 2763 match_fields |= 2764 is_multicast_ether_addr(spec->loc_mac) ? 2765 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN : 2766 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN; 2767 #define COPY_FIELD(gen_flag, gen_field, mcdi_field) \ 2768 if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \ 2769 match_fields |= \ 2770 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ 2771 mcdi_field ## _LBN; \ 2772 BUILD_BUG_ON( \ 2773 MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \ 2774 sizeof(spec->gen_field)); \ 2775 memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \ 2776 &spec->gen_field, sizeof(spec->gen_field)); \ 2777 } 2778 COPY_FIELD(REM_HOST, rem_host, SRC_IP); 2779 COPY_FIELD(LOC_HOST, loc_host, DST_IP); 2780 COPY_FIELD(REM_MAC, rem_mac, SRC_MAC); 2781 COPY_FIELD(REM_PORT, rem_port, SRC_PORT); 2782 COPY_FIELD(LOC_MAC, loc_mac, DST_MAC); 2783 COPY_FIELD(LOC_PORT, loc_port, DST_PORT); 2784 COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE); 2785 COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN); 2786 COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN); 2787 COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO); 2788 #undef COPY_FIELD 2789 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS, 2790 match_fields); 2791 } 2792 2793 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id); 2794 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST, 2795 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? 2796 MC_CMD_FILTER_OP_IN_RX_DEST_DROP : 2797 MC_CMD_FILTER_OP_IN_RX_DEST_HOST); 2798 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0); 2799 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST, 2800 MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT); 2801 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, 2802 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? 2803 0 : spec->dmaq_id); 2804 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE, 2805 (spec->flags & EFX_FILTER_FLAG_RX_RSS) ? 2806 MC_CMD_FILTER_OP_IN_RX_MODE_RSS : 2807 MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE); 2808 if (spec->flags & EFX_FILTER_FLAG_RX_RSS) 2809 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT, 2810 spec->rss_context != 2811 EFX_FILTER_RSS_CONTEXT_DEFAULT ? 2812 spec->rss_context : nic_data->rx_rss_context); 2813 } 2814 2815 static int efx_ef10_filter_push(struct efx_nic *efx, 2816 const struct efx_filter_spec *spec, 2817 u64 *handle, bool replacing) 2818 { 2819 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); 2820 MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN); 2821 int rc; 2822 2823 efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing); 2824 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 2825 outbuf, sizeof(outbuf), NULL); 2826 if (rc == 0) 2827 *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE); 2828 if (rc == -ENOSPC) 2829 rc = -EBUSY; /* to match efx_farch_filter_insert() */ 2830 return rc; 2831 } 2832 2833 static int efx_ef10_filter_rx_match_pri(struct efx_ef10_filter_table *table, 2834 enum efx_filter_match_flags match_flags) 2835 { 2836 unsigned int match_pri; 2837 2838 for (match_pri = 0; 2839 match_pri < table->rx_match_count; 2840 match_pri++) 2841 if (table->rx_match_flags[match_pri] == match_flags) 2842 return match_pri; 2843 2844 return -EPROTONOSUPPORT; 2845 } 2846 2847 static s32 efx_ef10_filter_insert(struct efx_nic *efx, 2848 struct efx_filter_spec *spec, 2849 bool replace_equal) 2850 { 2851 struct efx_ef10_filter_table *table = efx->filter_state; 2852 DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); 2853 struct efx_filter_spec *saved_spec; 2854 unsigned int match_pri, hash; 2855 unsigned int priv_flags; 2856 bool replacing = false; 2857 int ins_index = -1; 2858 DEFINE_WAIT(wait); 2859 bool is_mc_recip; 2860 s32 rc; 2861 2862 /* For now, only support RX filters */ 2863 if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) != 2864 EFX_FILTER_FLAG_RX) 2865 return -EINVAL; 2866 2867 rc = efx_ef10_filter_rx_match_pri(table, spec->match_flags); 2868 if (rc < 0) 2869 return rc; 2870 match_pri = rc; 2871 2872 hash = efx_ef10_filter_hash(spec); 2873 is_mc_recip = efx_filter_is_mc_recipient(spec); 2874 if (is_mc_recip) 2875 bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); 2876 2877 /* Find any existing filters with the same match tuple or 2878 * else a free slot to insert at. If any of them are busy, 2879 * we have to wait and retry. 2880 */ 2881 for (;;) { 2882 unsigned int depth = 1; 2883 unsigned int i; 2884 2885 spin_lock_bh(&efx->filter_lock); 2886 2887 for (;;) { 2888 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); 2889 saved_spec = efx_ef10_filter_entry_spec(table, i); 2890 2891 if (!saved_spec) { 2892 if (ins_index < 0) 2893 ins_index = i; 2894 } else if (efx_ef10_filter_equal(spec, saved_spec)) { 2895 if (table->entry[i].spec & 2896 EFX_EF10_FILTER_FLAG_BUSY) 2897 break; 2898 if (spec->priority < saved_spec->priority && 2899 spec->priority != EFX_FILTER_PRI_AUTO) { 2900 rc = -EPERM; 2901 goto out_unlock; 2902 } 2903 if (!is_mc_recip) { 2904 /* This is the only one */ 2905 if (spec->priority == 2906 saved_spec->priority && 2907 !replace_equal) { 2908 rc = -EEXIST; 2909 goto out_unlock; 2910 } 2911 ins_index = i; 2912 goto found; 2913 } else if (spec->priority > 2914 saved_spec->priority || 2915 (spec->priority == 2916 saved_spec->priority && 2917 replace_equal)) { 2918 if (ins_index < 0) 2919 ins_index = i; 2920 else 2921 __set_bit(depth, mc_rem_map); 2922 } 2923 } 2924 2925 /* Once we reach the maximum search depth, use 2926 * the first suitable slot or return -EBUSY if 2927 * there was none 2928 */ 2929 if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) { 2930 if (ins_index < 0) { 2931 rc = -EBUSY; 2932 goto out_unlock; 2933 } 2934 goto found; 2935 } 2936 2937 ++depth; 2938 } 2939 2940 prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE); 2941 spin_unlock_bh(&efx->filter_lock); 2942 schedule(); 2943 } 2944 2945 found: 2946 /* Create a software table entry if necessary, and mark it 2947 * busy. We might yet fail to insert, but any attempt to 2948 * insert a conflicting filter while we're waiting for the 2949 * firmware must find the busy entry. 2950 */ 2951 saved_spec = efx_ef10_filter_entry_spec(table, ins_index); 2952 if (saved_spec) { 2953 if (spec->priority == EFX_FILTER_PRI_AUTO && 2954 saved_spec->priority >= EFX_FILTER_PRI_AUTO) { 2955 /* Just make sure it won't be removed */ 2956 if (saved_spec->priority > EFX_FILTER_PRI_AUTO) 2957 saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO; 2958 table->entry[ins_index].spec &= 2959 ~EFX_EF10_FILTER_FLAG_AUTO_OLD; 2960 rc = ins_index; 2961 goto out_unlock; 2962 } 2963 replacing = true; 2964 priv_flags = efx_ef10_filter_entry_flags(table, ins_index); 2965 } else { 2966 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC); 2967 if (!saved_spec) { 2968 rc = -ENOMEM; 2969 goto out_unlock; 2970 } 2971 *saved_spec = *spec; 2972 priv_flags = 0; 2973 } 2974 efx_ef10_filter_set_entry(table, ins_index, saved_spec, 2975 priv_flags | EFX_EF10_FILTER_FLAG_BUSY); 2976 2977 /* Mark lower-priority multicast recipients busy prior to removal */ 2978 if (is_mc_recip) { 2979 unsigned int depth, i; 2980 2981 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) { 2982 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); 2983 if (test_bit(depth, mc_rem_map)) 2984 table->entry[i].spec |= 2985 EFX_EF10_FILTER_FLAG_BUSY; 2986 } 2987 } 2988 2989 spin_unlock_bh(&efx->filter_lock); 2990 2991 rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle, 2992 replacing); 2993 2994 /* Finalise the software table entry */ 2995 spin_lock_bh(&efx->filter_lock); 2996 if (rc == 0) { 2997 if (replacing) { 2998 /* Update the fields that may differ */ 2999 if (saved_spec->priority == EFX_FILTER_PRI_AUTO) 3000 saved_spec->flags |= 3001 EFX_FILTER_FLAG_RX_OVER_AUTO; 3002 saved_spec->priority = spec->priority; 3003 saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO; 3004 saved_spec->flags |= spec->flags; 3005 saved_spec->rss_context = spec->rss_context; 3006 saved_spec->dmaq_id = spec->dmaq_id; 3007 } 3008 } else if (!replacing) { 3009 kfree(saved_spec); 3010 saved_spec = NULL; 3011 } 3012 efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags); 3013 3014 /* Remove and finalise entries for lower-priority multicast 3015 * recipients 3016 */ 3017 if (is_mc_recip) { 3018 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); 3019 unsigned int depth, i; 3020 3021 memset(inbuf, 0, sizeof(inbuf)); 3022 3023 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) { 3024 if (!test_bit(depth, mc_rem_map)) 3025 continue; 3026 3027 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); 3028 saved_spec = efx_ef10_filter_entry_spec(table, i); 3029 priv_flags = efx_ef10_filter_entry_flags(table, i); 3030 3031 if (rc == 0) { 3032 spin_unlock_bh(&efx->filter_lock); 3033 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 3034 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); 3035 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, 3036 table->entry[i].handle); 3037 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, 3038 inbuf, sizeof(inbuf), 3039 NULL, 0, NULL); 3040 spin_lock_bh(&efx->filter_lock); 3041 } 3042 3043 if (rc == 0) { 3044 kfree(saved_spec); 3045 saved_spec = NULL; 3046 priv_flags = 0; 3047 } else { 3048 priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY; 3049 } 3050 efx_ef10_filter_set_entry(table, i, saved_spec, 3051 priv_flags); 3052 } 3053 } 3054 3055 /* If successful, return the inserted filter ID */ 3056 if (rc == 0) 3057 rc = match_pri * HUNT_FILTER_TBL_ROWS + ins_index; 3058 3059 wake_up_all(&table->waitq); 3060 out_unlock: 3061 spin_unlock_bh(&efx->filter_lock); 3062 finish_wait(&table->waitq, &wait); 3063 return rc; 3064 } 3065 3066 static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx) 3067 { 3068 /* no need to do anything here on EF10 */ 3069 } 3070 3071 /* Remove a filter. 3072 * If !by_index, remove by ID 3073 * If by_index, remove by index 3074 * Filter ID may come from userland and must be range-checked. 3075 */ 3076 static int efx_ef10_filter_remove_internal(struct efx_nic *efx, 3077 unsigned int priority_mask, 3078 u32 filter_id, bool by_index) 3079 { 3080 unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS; 3081 struct efx_ef10_filter_table *table = efx->filter_state; 3082 MCDI_DECLARE_BUF(inbuf, 3083 MC_CMD_FILTER_OP_IN_HANDLE_OFST + 3084 MC_CMD_FILTER_OP_IN_HANDLE_LEN); 3085 struct efx_filter_spec *spec; 3086 DEFINE_WAIT(wait); 3087 int rc; 3088 3089 /* Find the software table entry and mark it busy. Don't 3090 * remove it yet; any attempt to update while we're waiting 3091 * for the firmware must find the busy entry. 3092 */ 3093 for (;;) { 3094 spin_lock_bh(&efx->filter_lock); 3095 if (!(table->entry[filter_idx].spec & 3096 EFX_EF10_FILTER_FLAG_BUSY)) 3097 break; 3098 prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE); 3099 spin_unlock_bh(&efx->filter_lock); 3100 schedule(); 3101 } 3102 3103 spec = efx_ef10_filter_entry_spec(table, filter_idx); 3104 if (!spec || 3105 (!by_index && 3106 efx_ef10_filter_rx_match_pri(table, spec->match_flags) != 3107 filter_id / HUNT_FILTER_TBL_ROWS)) { 3108 rc = -ENOENT; 3109 goto out_unlock; 3110 } 3111 3112 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO && 3113 priority_mask == (1U << EFX_FILTER_PRI_AUTO)) { 3114 /* Just remove flags */ 3115 spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO; 3116 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD; 3117 rc = 0; 3118 goto out_unlock; 3119 } 3120 3121 if (!(priority_mask & (1U << spec->priority))) { 3122 rc = -ENOENT; 3123 goto out_unlock; 3124 } 3125 3126 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; 3127 spin_unlock_bh(&efx->filter_lock); 3128 3129 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) { 3130 /* Reset to an automatic filter */ 3131 3132 struct efx_filter_spec new_spec = *spec; 3133 3134 new_spec.priority = EFX_FILTER_PRI_AUTO; 3135 new_spec.flags = (EFX_FILTER_FLAG_RX | 3136 EFX_FILTER_FLAG_RX_RSS); 3137 new_spec.dmaq_id = 0; 3138 new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT; 3139 rc = efx_ef10_filter_push(efx, &new_spec, 3140 &table->entry[filter_idx].handle, 3141 true); 3142 3143 spin_lock_bh(&efx->filter_lock); 3144 if (rc == 0) 3145 *spec = new_spec; 3146 } else { 3147 /* Really remove the filter */ 3148 3149 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 3150 efx_ef10_filter_is_exclusive(spec) ? 3151 MC_CMD_FILTER_OP_IN_OP_REMOVE : 3152 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); 3153 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, 3154 table->entry[filter_idx].handle); 3155 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, 3156 inbuf, sizeof(inbuf), NULL, 0, NULL); 3157 3158 spin_lock_bh(&efx->filter_lock); 3159 if (rc == 0) { 3160 kfree(spec); 3161 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); 3162 } 3163 } 3164 3165 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY; 3166 wake_up_all(&table->waitq); 3167 out_unlock: 3168 spin_unlock_bh(&efx->filter_lock); 3169 finish_wait(&table->waitq, &wait); 3170 return rc; 3171 } 3172 3173 static int efx_ef10_filter_remove_safe(struct efx_nic *efx, 3174 enum efx_filter_priority priority, 3175 u32 filter_id) 3176 { 3177 return efx_ef10_filter_remove_internal(efx, 1U << priority, 3178 filter_id, false); 3179 } 3180 3181 static int efx_ef10_filter_get_safe(struct efx_nic *efx, 3182 enum efx_filter_priority priority, 3183 u32 filter_id, struct efx_filter_spec *spec) 3184 { 3185 unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS; 3186 struct efx_ef10_filter_table *table = efx->filter_state; 3187 const struct efx_filter_spec *saved_spec; 3188 int rc; 3189 3190 spin_lock_bh(&efx->filter_lock); 3191 saved_spec = efx_ef10_filter_entry_spec(table, filter_idx); 3192 if (saved_spec && saved_spec->priority == priority && 3193 efx_ef10_filter_rx_match_pri(table, saved_spec->match_flags) == 3194 filter_id / HUNT_FILTER_TBL_ROWS) { 3195 *spec = *saved_spec; 3196 rc = 0; 3197 } else { 3198 rc = -ENOENT; 3199 } 3200 spin_unlock_bh(&efx->filter_lock); 3201 return rc; 3202 } 3203 3204 static int efx_ef10_filter_clear_rx(struct efx_nic *efx, 3205 enum efx_filter_priority priority) 3206 { 3207 unsigned int priority_mask; 3208 unsigned int i; 3209 int rc; 3210 3211 priority_mask = (((1U << (priority + 1)) - 1) & 3212 ~(1U << EFX_FILTER_PRI_AUTO)); 3213 3214 for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { 3215 rc = efx_ef10_filter_remove_internal(efx, priority_mask, 3216 i, true); 3217 if (rc && rc != -ENOENT) 3218 return rc; 3219 } 3220 3221 return 0; 3222 } 3223 3224 static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx, 3225 enum efx_filter_priority priority) 3226 { 3227 struct efx_ef10_filter_table *table = efx->filter_state; 3228 unsigned int filter_idx; 3229 s32 count = 0; 3230 3231 spin_lock_bh(&efx->filter_lock); 3232 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { 3233 if (table->entry[filter_idx].spec && 3234 efx_ef10_filter_entry_spec(table, filter_idx)->priority == 3235 priority) 3236 ++count; 3237 } 3238 spin_unlock_bh(&efx->filter_lock); 3239 return count; 3240 } 3241 3242 static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx) 3243 { 3244 struct efx_ef10_filter_table *table = efx->filter_state; 3245 3246 return table->rx_match_count * HUNT_FILTER_TBL_ROWS; 3247 } 3248 3249 static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx, 3250 enum efx_filter_priority priority, 3251 u32 *buf, u32 size) 3252 { 3253 struct efx_ef10_filter_table *table = efx->filter_state; 3254 struct efx_filter_spec *spec; 3255 unsigned int filter_idx; 3256 s32 count = 0; 3257 3258 spin_lock_bh(&efx->filter_lock); 3259 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { 3260 spec = efx_ef10_filter_entry_spec(table, filter_idx); 3261 if (spec && spec->priority == priority) { 3262 if (count == size) { 3263 count = -EMSGSIZE; 3264 break; 3265 } 3266 buf[count++] = (efx_ef10_filter_rx_match_pri( 3267 table, spec->match_flags) * 3268 HUNT_FILTER_TBL_ROWS + 3269 filter_idx); 3270 } 3271 } 3272 spin_unlock_bh(&efx->filter_lock); 3273 return count; 3274 } 3275 3276 #ifdef CONFIG_RFS_ACCEL 3277 3278 static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete; 3279 3280 static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx, 3281 struct efx_filter_spec *spec) 3282 { 3283 struct efx_ef10_filter_table *table = efx->filter_state; 3284 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); 3285 struct efx_filter_spec *saved_spec; 3286 unsigned int hash, i, depth = 1; 3287 bool replacing = false; 3288 int ins_index = -1; 3289 u64 cookie; 3290 s32 rc; 3291 3292 /* Must be an RX filter without RSS and not for a multicast 3293 * destination address (RFS only works for connected sockets). 3294 * These restrictions allow us to pass only a tiny amount of 3295 * data through to the completion function. 3296 */ 3297 EFX_WARN_ON_PARANOID(spec->flags != 3298 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_SCATTER)); 3299 EFX_WARN_ON_PARANOID(spec->priority != EFX_FILTER_PRI_HINT); 3300 EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec)); 3301 3302 hash = efx_ef10_filter_hash(spec); 3303 3304 spin_lock_bh(&efx->filter_lock); 3305 3306 /* Find any existing filter with the same match tuple or else 3307 * a free slot to insert at. If an existing filter is busy, 3308 * we have to give up. 3309 */ 3310 for (;;) { 3311 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); 3312 saved_spec = efx_ef10_filter_entry_spec(table, i); 3313 3314 if (!saved_spec) { 3315 if (ins_index < 0) 3316 ins_index = i; 3317 } else if (efx_ef10_filter_equal(spec, saved_spec)) { 3318 if (table->entry[i].spec & EFX_EF10_FILTER_FLAG_BUSY) { 3319 rc = -EBUSY; 3320 goto fail_unlock; 3321 } 3322 if (spec->priority < saved_spec->priority) { 3323 rc = -EPERM; 3324 goto fail_unlock; 3325 } 3326 ins_index = i; 3327 break; 3328 } 3329 3330 /* Once we reach the maximum search depth, use the 3331 * first suitable slot or return -EBUSY if there was 3332 * none 3333 */ 3334 if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) { 3335 if (ins_index < 0) { 3336 rc = -EBUSY; 3337 goto fail_unlock; 3338 } 3339 break; 3340 } 3341 3342 ++depth; 3343 } 3344 3345 /* Create a software table entry if necessary, and mark it 3346 * busy. We might yet fail to insert, but any attempt to 3347 * insert a conflicting filter while we're waiting for the 3348 * firmware must find the busy entry. 3349 */ 3350 saved_spec = efx_ef10_filter_entry_spec(table, ins_index); 3351 if (saved_spec) { 3352 replacing = true; 3353 } else { 3354 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC); 3355 if (!saved_spec) { 3356 rc = -ENOMEM; 3357 goto fail_unlock; 3358 } 3359 *saved_spec = *spec; 3360 } 3361 efx_ef10_filter_set_entry(table, ins_index, saved_spec, 3362 EFX_EF10_FILTER_FLAG_BUSY); 3363 3364 spin_unlock_bh(&efx->filter_lock); 3365 3366 /* Pack up the variables needed on completion */ 3367 cookie = replacing << 31 | ins_index << 16 | spec->dmaq_id; 3368 3369 efx_ef10_filter_push_prep(efx, spec, inbuf, 3370 table->entry[ins_index].handle, replacing); 3371 efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 3372 MC_CMD_FILTER_OP_OUT_LEN, 3373 efx_ef10_filter_rfs_insert_complete, cookie); 3374 3375 return ins_index; 3376 3377 fail_unlock: 3378 spin_unlock_bh(&efx->filter_lock); 3379 return rc; 3380 } 3381 3382 static void 3383 efx_ef10_filter_rfs_insert_complete(struct efx_nic *efx, unsigned long cookie, 3384 int rc, efx_dword_t *outbuf, 3385 size_t outlen_actual) 3386 { 3387 struct efx_ef10_filter_table *table = efx->filter_state; 3388 unsigned int ins_index, dmaq_id; 3389 struct efx_filter_spec *spec; 3390 bool replacing; 3391 3392 /* Unpack the cookie */ 3393 replacing = cookie >> 31; 3394 ins_index = (cookie >> 16) & (HUNT_FILTER_TBL_ROWS - 1); 3395 dmaq_id = cookie & 0xffff; 3396 3397 spin_lock_bh(&efx->filter_lock); 3398 spec = efx_ef10_filter_entry_spec(table, ins_index); 3399 if (rc == 0) { 3400 table->entry[ins_index].handle = 3401 MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE); 3402 if (replacing) 3403 spec->dmaq_id = dmaq_id; 3404 } else if (!replacing) { 3405 kfree(spec); 3406 spec = NULL; 3407 } 3408 efx_ef10_filter_set_entry(table, ins_index, spec, 0); 3409 spin_unlock_bh(&efx->filter_lock); 3410 3411 wake_up_all(&table->waitq); 3412 } 3413 3414 static void 3415 efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx, 3416 unsigned long filter_idx, 3417 int rc, efx_dword_t *outbuf, 3418 size_t outlen_actual); 3419 3420 static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, 3421 unsigned int filter_idx) 3422 { 3423 struct efx_ef10_filter_table *table = efx->filter_state; 3424 struct efx_filter_spec *spec = 3425 efx_ef10_filter_entry_spec(table, filter_idx); 3426 MCDI_DECLARE_BUF(inbuf, 3427 MC_CMD_FILTER_OP_IN_HANDLE_OFST + 3428 MC_CMD_FILTER_OP_IN_HANDLE_LEN); 3429 3430 if (!spec || 3431 (table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) || 3432 spec->priority != EFX_FILTER_PRI_HINT || 3433 !rps_may_expire_flow(efx->net_dev, spec->dmaq_id, 3434 flow_id, filter_idx)) 3435 return false; 3436 3437 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 3438 MC_CMD_FILTER_OP_IN_OP_REMOVE); 3439 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, 3440 table->entry[filter_idx].handle); 3441 if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0, 3442 efx_ef10_filter_rfs_expire_complete, filter_idx)) 3443 return false; 3444 3445 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; 3446 return true; 3447 } 3448 3449 static void 3450 efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx, 3451 unsigned long filter_idx, 3452 int rc, efx_dword_t *outbuf, 3453 size_t outlen_actual) 3454 { 3455 struct efx_ef10_filter_table *table = efx->filter_state; 3456 struct efx_filter_spec *spec = 3457 efx_ef10_filter_entry_spec(table, filter_idx); 3458 3459 spin_lock_bh(&efx->filter_lock); 3460 if (rc == 0) { 3461 kfree(spec); 3462 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); 3463 } 3464 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY; 3465 wake_up_all(&table->waitq); 3466 spin_unlock_bh(&efx->filter_lock); 3467 } 3468 3469 #endif /* CONFIG_RFS_ACCEL */ 3470 3471 static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags) 3472 { 3473 int match_flags = 0; 3474 3475 #define MAP_FLAG(gen_flag, mcdi_field) { \ 3476 u32 old_mcdi_flags = mcdi_flags; \ 3477 mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ 3478 mcdi_field ## _LBN); \ 3479 if (mcdi_flags != old_mcdi_flags) \ 3480 match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \ 3481 } 3482 MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST); 3483 MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST); 3484 MAP_FLAG(REM_HOST, SRC_IP); 3485 MAP_FLAG(LOC_HOST, DST_IP); 3486 MAP_FLAG(REM_MAC, SRC_MAC); 3487 MAP_FLAG(REM_PORT, SRC_PORT); 3488 MAP_FLAG(LOC_MAC, DST_MAC); 3489 MAP_FLAG(LOC_PORT, DST_PORT); 3490 MAP_FLAG(ETHER_TYPE, ETHER_TYPE); 3491 MAP_FLAG(INNER_VID, INNER_VLAN); 3492 MAP_FLAG(OUTER_VID, OUTER_VLAN); 3493 MAP_FLAG(IP_PROTO, IP_PROTO); 3494 #undef MAP_FLAG 3495 3496 /* Did we map them all? */ 3497 if (mcdi_flags) 3498 return -EINVAL; 3499 3500 return match_flags; 3501 } 3502 3503 static int efx_ef10_filter_table_probe(struct efx_nic *efx) 3504 { 3505 MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN); 3506 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX); 3507 unsigned int pd_match_pri, pd_match_count; 3508 struct efx_ef10_filter_table *table; 3509 size_t outlen; 3510 int rc; 3511 3512 table = kzalloc(sizeof(*table), GFP_KERNEL); 3513 if (!table) 3514 return -ENOMEM; 3515 3516 /* Find out which RX filter types are supported, and their priorities */ 3517 MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP, 3518 MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES); 3519 rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO, 3520 inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), 3521 &outlen); 3522 if (rc) 3523 goto fail; 3524 pd_match_count = MCDI_VAR_ARRAY_LEN( 3525 outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES); 3526 table->rx_match_count = 0; 3527 3528 for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) { 3529 u32 mcdi_flags = 3530 MCDI_ARRAY_DWORD( 3531 outbuf, 3532 GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES, 3533 pd_match_pri); 3534 rc = efx_ef10_filter_match_flags_from_mcdi(mcdi_flags); 3535 if (rc < 0) { 3536 netif_dbg(efx, probe, efx->net_dev, 3537 "%s: fw flags %#x pri %u not supported in driver\n", 3538 __func__, mcdi_flags, pd_match_pri); 3539 } else { 3540 netif_dbg(efx, probe, efx->net_dev, 3541 "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n", 3542 __func__, mcdi_flags, pd_match_pri, 3543 rc, table->rx_match_count); 3544 table->rx_match_flags[table->rx_match_count++] = rc; 3545 } 3546 } 3547 3548 table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry)); 3549 if (!table->entry) { 3550 rc = -ENOMEM; 3551 goto fail; 3552 } 3553 3554 efx->filter_state = table; 3555 init_waitqueue_head(&table->waitq); 3556 return 0; 3557 3558 fail: 3559 kfree(table); 3560 return rc; 3561 } 3562 3563 /* Caller must hold efx->filter_sem for read if race against 3564 * efx_ef10_filter_table_remove() is possible 3565 */ 3566 static void efx_ef10_filter_table_restore(struct efx_nic *efx) 3567 { 3568 struct efx_ef10_filter_table *table = efx->filter_state; 3569 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3570 struct efx_filter_spec *spec; 3571 unsigned int filter_idx; 3572 bool failed = false; 3573 int rc; 3574 3575 WARN_ON(!rwsem_is_locked(&efx->filter_sem)); 3576 3577 if (!nic_data->must_restore_filters) 3578 return; 3579 3580 if (!table) 3581 return; 3582 3583 spin_lock_bh(&efx->filter_lock); 3584 3585 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { 3586 spec = efx_ef10_filter_entry_spec(table, filter_idx); 3587 if (!spec) 3588 continue; 3589 3590 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; 3591 spin_unlock_bh(&efx->filter_lock); 3592 3593 rc = efx_ef10_filter_push(efx, spec, 3594 &table->entry[filter_idx].handle, 3595 false); 3596 if (rc) 3597 failed = true; 3598 3599 spin_lock_bh(&efx->filter_lock); 3600 if (rc) { 3601 kfree(spec); 3602 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); 3603 } else { 3604 table->entry[filter_idx].spec &= 3605 ~EFX_EF10_FILTER_FLAG_BUSY; 3606 } 3607 } 3608 3609 spin_unlock_bh(&efx->filter_lock); 3610 3611 if (failed) 3612 netif_err(efx, hw, efx->net_dev, 3613 "unable to restore all filters\n"); 3614 else 3615 nic_data->must_restore_filters = false; 3616 } 3617 3618 /* Caller must hold efx->filter_sem for write */ 3619 static void efx_ef10_filter_table_remove(struct efx_nic *efx) 3620 { 3621 struct efx_ef10_filter_table *table = efx->filter_state; 3622 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); 3623 struct efx_filter_spec *spec; 3624 unsigned int filter_idx; 3625 int rc; 3626 3627 efx->filter_state = NULL; 3628 if (!table) 3629 return; 3630 3631 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { 3632 spec = efx_ef10_filter_entry_spec(table, filter_idx); 3633 if (!spec) 3634 continue; 3635 3636 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 3637 efx_ef10_filter_is_exclusive(spec) ? 3638 MC_CMD_FILTER_OP_IN_OP_REMOVE : 3639 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); 3640 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, 3641 table->entry[filter_idx].handle); 3642 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 3643 NULL, 0, NULL); 3644 if (rc) 3645 netdev_WARN(efx->net_dev, 3646 "filter_idx=%#x handle=%#llx\n", 3647 filter_idx, 3648 table->entry[filter_idx].handle); 3649 kfree(spec); 3650 } 3651 3652 vfree(table->entry); 3653 kfree(table); 3654 } 3655 3656 /* Caller must hold efx->filter_sem for read if race against 3657 * efx_ef10_filter_table_remove() is possible 3658 */ 3659 static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) 3660 { 3661 struct efx_ef10_filter_table *table = efx->filter_state; 3662 struct net_device *net_dev = efx->net_dev; 3663 struct efx_filter_spec spec; 3664 bool remove_failed = false; 3665 struct netdev_hw_addr *uc; 3666 struct netdev_hw_addr *mc; 3667 unsigned int filter_idx; 3668 int i, n, rc; 3669 3670 if (!efx_dev_registered(efx)) 3671 return; 3672 3673 if (!table) 3674 return; 3675 3676 /* Mark old filters that may need to be removed */ 3677 spin_lock_bh(&efx->filter_lock); 3678 n = table->dev_uc_count < 0 ? 1 : table->dev_uc_count; 3679 for (i = 0; i < n; i++) { 3680 filter_idx = table->dev_uc_list[i].id % HUNT_FILTER_TBL_ROWS; 3681 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; 3682 } 3683 n = table->dev_mc_count < 0 ? 1 : table->dev_mc_count; 3684 for (i = 0; i < n; i++) { 3685 filter_idx = table->dev_mc_list[i].id % HUNT_FILTER_TBL_ROWS; 3686 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; 3687 } 3688 spin_unlock_bh(&efx->filter_lock); 3689 3690 /* Copy/convert the address lists; add the primary station 3691 * address and broadcast address 3692 */ 3693 netif_addr_lock_bh(net_dev); 3694 if (net_dev->flags & IFF_PROMISC || 3695 netdev_uc_count(net_dev) >= EFX_EF10_FILTER_DEV_UC_MAX) { 3696 table->dev_uc_count = -1; 3697 } else { 3698 table->dev_uc_count = 1 + netdev_uc_count(net_dev); 3699 ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr); 3700 i = 1; 3701 netdev_for_each_uc_addr(uc, net_dev) { 3702 ether_addr_copy(table->dev_uc_list[i].addr, uc->addr); 3703 i++; 3704 } 3705 } 3706 if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) || 3707 netdev_mc_count(net_dev) >= EFX_EF10_FILTER_DEV_MC_MAX) { 3708 table->dev_mc_count = -1; 3709 } else { 3710 table->dev_mc_count = 1 + netdev_mc_count(net_dev); 3711 eth_broadcast_addr(table->dev_mc_list[0].addr); 3712 i = 1; 3713 netdev_for_each_mc_addr(mc, net_dev) { 3714 ether_addr_copy(table->dev_mc_list[i].addr, mc->addr); 3715 i++; 3716 } 3717 } 3718 netif_addr_unlock_bh(net_dev); 3719 3720 /* Insert/renew unicast filters */ 3721 if (table->dev_uc_count >= 0) { 3722 for (i = 0; i < table->dev_uc_count; i++) { 3723 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, 3724 EFX_FILTER_FLAG_RX_RSS, 3725 0); 3726 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, 3727 table->dev_uc_list[i].addr); 3728 rc = efx_ef10_filter_insert(efx, &spec, true); 3729 if (rc < 0) { 3730 /* Fall back to unicast-promisc */ 3731 while (i--) 3732 efx_ef10_filter_remove_safe( 3733 efx, EFX_FILTER_PRI_AUTO, 3734 table->dev_uc_list[i].id); 3735 table->dev_uc_count = -1; 3736 break; 3737 } 3738 table->dev_uc_list[i].id = rc; 3739 } 3740 } 3741 if (table->dev_uc_count < 0) { 3742 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, 3743 EFX_FILTER_FLAG_RX_RSS, 3744 0); 3745 efx_filter_set_uc_def(&spec); 3746 rc = efx_ef10_filter_insert(efx, &spec, true); 3747 if (rc < 0) { 3748 WARN_ON(1); 3749 table->dev_uc_count = 0; 3750 } else { 3751 table->dev_uc_list[0].id = rc; 3752 } 3753 } 3754 3755 /* Insert/renew multicast filters */ 3756 if (table->dev_mc_count >= 0) { 3757 for (i = 0; i < table->dev_mc_count; i++) { 3758 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, 3759 EFX_FILTER_FLAG_RX_RSS, 3760 0); 3761 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, 3762 table->dev_mc_list[i].addr); 3763 rc = efx_ef10_filter_insert(efx, &spec, true); 3764 if (rc < 0) { 3765 /* Fall back to multicast-promisc */ 3766 while (i--) 3767 efx_ef10_filter_remove_safe( 3768 efx, EFX_FILTER_PRI_AUTO, 3769 table->dev_mc_list[i].id); 3770 table->dev_mc_count = -1; 3771 break; 3772 } 3773 table->dev_mc_list[i].id = rc; 3774 } 3775 } 3776 if (table->dev_mc_count < 0) { 3777 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, 3778 EFX_FILTER_FLAG_RX_RSS, 3779 0); 3780 efx_filter_set_mc_def(&spec); 3781 rc = efx_ef10_filter_insert(efx, &spec, true); 3782 if (rc < 0) { 3783 WARN_ON(1); 3784 table->dev_mc_count = 0; 3785 } else { 3786 table->dev_mc_list[0].id = rc; 3787 } 3788 } 3789 3790 /* Remove filters that weren't renewed. Since nothing else 3791 * changes the AUTO_OLD flag or removes these filters, we 3792 * don't need to hold the filter_lock while scanning for 3793 * these filters. 3794 */ 3795 for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { 3796 if (ACCESS_ONCE(table->entry[i].spec) & 3797 EFX_EF10_FILTER_FLAG_AUTO_OLD) { 3798 if (efx_ef10_filter_remove_internal( 3799 efx, 1U << EFX_FILTER_PRI_AUTO, 3800 i, true) < 0) 3801 remove_failed = true; 3802 } 3803 } 3804 WARN_ON(remove_failed); 3805 } 3806 3807 static int efx_ef10_set_mac_address(struct efx_nic *efx) 3808 { 3809 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN); 3810 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3811 bool was_enabled = efx->port_enabled; 3812 int rc; 3813 3814 efx_device_detach_sync(efx); 3815 efx_net_stop(efx->net_dev); 3816 down_write(&efx->filter_sem); 3817 efx_ef10_filter_table_remove(efx); 3818 3819 ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR), 3820 efx->net_dev->dev_addr); 3821 MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID, 3822 nic_data->vport_id); 3823 rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf, 3824 sizeof(inbuf), NULL, 0, NULL); 3825 3826 efx_ef10_filter_table_probe(efx); 3827 up_write(&efx->filter_sem); 3828 if (was_enabled) 3829 efx_net_open(efx->net_dev); 3830 netif_device_attach(efx->net_dev); 3831 3832 #if !defined(CONFIG_SFC_SRIOV) 3833 if (rc == -EPERM) 3834 netif_err(efx, drv, efx->net_dev, 3835 "Cannot change MAC address; use sfboot to enable mac-spoofing" 3836 " on this interface\n"); 3837 #else 3838 if (rc == -EPERM) { 3839 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; 3840 3841 /* Switch to PF and change MAC address on vport */ 3842 if (efx->pci_dev->is_virtfn && pci_dev_pf) { 3843 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); 3844 3845 if (!efx_ef10_sriov_set_vf_mac(efx_pf, 3846 nic_data->vf_index, 3847 efx->net_dev->dev_addr)) 3848 return 0; 3849 } 3850 netif_err(efx, drv, efx->net_dev, 3851 "Cannot change MAC address; use sfboot to enable mac-spoofing" 3852 " on this interface\n"); 3853 } else if (efx->pci_dev->is_virtfn) { 3854 /* Successfully changed by VF (with MAC spoofing), so update the 3855 * parent PF if possible. 3856 */ 3857 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; 3858 3859 if (pci_dev_pf) { 3860 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); 3861 struct efx_ef10_nic_data *nic_data = efx_pf->nic_data; 3862 unsigned int i; 3863 3864 for (i = 0; i < efx_pf->vf_count; ++i) { 3865 struct ef10_vf *vf = nic_data->vf + i; 3866 3867 if (vf->efx == efx) { 3868 ether_addr_copy(vf->mac, 3869 efx->net_dev->dev_addr); 3870 return 0; 3871 } 3872 } 3873 } 3874 } 3875 #endif 3876 return rc; 3877 } 3878 3879 static int efx_ef10_mac_reconfigure(struct efx_nic *efx) 3880 { 3881 efx_ef10_filter_sync_rx_mode(efx); 3882 3883 return efx_mcdi_set_mac(efx); 3884 } 3885 3886 static int efx_ef10_mac_reconfigure_vf(struct efx_nic *efx) 3887 { 3888 efx_ef10_filter_sync_rx_mode(efx); 3889 3890 return 0; 3891 } 3892 3893 static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type) 3894 { 3895 MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN); 3896 3897 MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type); 3898 return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf), 3899 NULL, 0, NULL); 3900 } 3901 3902 /* MC BISTs follow a different poll mechanism to phy BISTs. 3903 * The BIST is done in the poll handler on the MC, and the MCDI command 3904 * will block until the BIST is done. 3905 */ 3906 static int efx_ef10_poll_bist(struct efx_nic *efx) 3907 { 3908 int rc; 3909 MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN); 3910 size_t outlen; 3911 u32 result; 3912 3913 rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0, 3914 outbuf, sizeof(outbuf), &outlen); 3915 if (rc != 0) 3916 return rc; 3917 3918 if (outlen < MC_CMD_POLL_BIST_OUT_LEN) 3919 return -EIO; 3920 3921 result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT); 3922 switch (result) { 3923 case MC_CMD_POLL_BIST_PASSED: 3924 netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n"); 3925 return 0; 3926 case MC_CMD_POLL_BIST_TIMEOUT: 3927 netif_err(efx, hw, efx->net_dev, "BIST timed out\n"); 3928 return -EIO; 3929 case MC_CMD_POLL_BIST_FAILED: 3930 netif_err(efx, hw, efx->net_dev, "BIST failed.\n"); 3931 return -EIO; 3932 default: 3933 netif_err(efx, hw, efx->net_dev, 3934 "BIST returned unknown result %u", result); 3935 return -EIO; 3936 } 3937 } 3938 3939 static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type) 3940 { 3941 int rc; 3942 3943 netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type); 3944 3945 rc = efx_ef10_start_bist(efx, bist_type); 3946 if (rc != 0) 3947 return rc; 3948 3949 return efx_ef10_poll_bist(efx); 3950 } 3951 3952 static int 3953 efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) 3954 { 3955 int rc, rc2; 3956 3957 efx_reset_down(efx, RESET_TYPE_WORLD); 3958 3959 rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST, 3960 NULL, 0, NULL, 0, NULL); 3961 if (rc != 0) 3962 goto out; 3963 3964 tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1; 3965 tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1; 3966 3967 rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD); 3968 3969 out: 3970 rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0); 3971 return rc ? rc : rc2; 3972 } 3973 3974 #ifdef CONFIG_SFC_MTD 3975 3976 struct efx_ef10_nvram_type_info { 3977 u16 type, type_mask; 3978 u8 port; 3979 const char *name; 3980 }; 3981 3982 static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = { 3983 { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" }, 3984 { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" }, 3985 { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" }, 3986 { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" }, 3987 { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" }, 3988 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" }, 3989 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" }, 3990 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" }, 3991 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" }, 3992 { NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" }, 3993 { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" }, 3994 }; 3995 3996 static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, 3997 struct efx_mcdi_mtd_partition *part, 3998 unsigned int type) 3999 { 4000 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN); 4001 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX); 4002 const struct efx_ef10_nvram_type_info *info; 4003 size_t size, erase_size, outlen; 4004 bool protected; 4005 int rc; 4006 4007 for (info = efx_ef10_nvram_types; ; info++) { 4008 if (info == 4009 efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types)) 4010 return -ENODEV; 4011 if ((type & ~info->type_mask) == info->type) 4012 break; 4013 } 4014 if (info->port != efx_port_num(efx)) 4015 return -ENODEV; 4016 4017 rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected); 4018 if (rc) 4019 return rc; 4020 if (protected) 4021 return -ENODEV; /* hide it */ 4022 4023 part->nvram_type = type; 4024 4025 MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type); 4026 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf), 4027 outbuf, sizeof(outbuf), &outlen); 4028 if (rc) 4029 return rc; 4030 if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN) 4031 return -EIO; 4032 if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) & 4033 (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN)) 4034 part->fw_subtype = MCDI_DWORD(outbuf, 4035 NVRAM_METADATA_OUT_SUBTYPE); 4036 4037 part->common.dev_type_name = "EF10 NVRAM manager"; 4038 part->common.type_name = info->name; 4039 4040 part->common.mtd.type = MTD_NORFLASH; 4041 part->common.mtd.flags = MTD_CAP_NORFLASH; 4042 part->common.mtd.size = size; 4043 part->common.mtd.erasesize = erase_size; 4044 4045 return 0; 4046 } 4047 4048 static int efx_ef10_mtd_probe(struct efx_nic *efx) 4049 { 4050 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); 4051 struct efx_mcdi_mtd_partition *parts; 4052 size_t outlen, n_parts_total, i, n_parts; 4053 unsigned int type; 4054 int rc; 4055 4056 ASSERT_RTNL(); 4057 4058 BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0); 4059 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0, 4060 outbuf, sizeof(outbuf), &outlen); 4061 if (rc) 4062 return rc; 4063 if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN) 4064 return -EIO; 4065 4066 n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS); 4067 if (n_parts_total > 4068 MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID)) 4069 return -EIO; 4070 4071 parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL); 4072 if (!parts) 4073 return -ENOMEM; 4074 4075 n_parts = 0; 4076 for (i = 0; i < n_parts_total; i++) { 4077 type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID, 4078 i); 4079 rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type); 4080 if (rc == 0) 4081 n_parts++; 4082 else if (rc != -ENODEV) 4083 goto fail; 4084 } 4085 4086 rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); 4087 fail: 4088 if (rc) 4089 kfree(parts); 4090 return rc; 4091 } 4092 4093 #endif /* CONFIG_SFC_MTD */ 4094 4095 static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time) 4096 { 4097 _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD); 4098 } 4099 4100 static void efx_ef10_ptp_write_host_time_vf(struct efx_nic *efx, 4101 u32 host_time) {} 4102 4103 static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel, 4104 bool temp) 4105 { 4106 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN); 4107 int rc; 4108 4109 if (channel->sync_events_state == SYNC_EVENTS_REQUESTED || 4110 channel->sync_events_state == SYNC_EVENTS_VALID || 4111 (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED)) 4112 return 0; 4113 channel->sync_events_state = SYNC_EVENTS_REQUESTED; 4114 4115 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE); 4116 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); 4117 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE, 4118 channel->channel); 4119 4120 rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP, 4121 inbuf, sizeof(inbuf), NULL, 0, NULL); 4122 4123 if (rc != 0) 4124 channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT : 4125 SYNC_EVENTS_DISABLED; 4126 4127 return rc; 4128 } 4129 4130 static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel, 4131 bool temp) 4132 { 4133 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN); 4134 int rc; 4135 4136 if (channel->sync_events_state == SYNC_EVENTS_DISABLED || 4137 (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT)) 4138 return 0; 4139 if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) { 4140 channel->sync_events_state = SYNC_EVENTS_DISABLED; 4141 return 0; 4142 } 4143 channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT : 4144 SYNC_EVENTS_DISABLED; 4145 4146 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE); 4147 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); 4148 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL, 4149 MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE); 4150 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE, 4151 channel->channel); 4152 4153 rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP, 4154 inbuf, sizeof(inbuf), NULL, 0, NULL); 4155 4156 return rc; 4157 } 4158 4159 static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en, 4160 bool temp) 4161 { 4162 int (*set)(struct efx_channel *channel, bool temp); 4163 struct efx_channel *channel; 4164 4165 set = en ? 4166 efx_ef10_rx_enable_timestamping : 4167 efx_ef10_rx_disable_timestamping; 4168 4169 efx_for_each_channel(channel, efx) { 4170 int rc = set(channel, temp); 4171 if (en && rc != 0) { 4172 efx_ef10_ptp_set_ts_sync_events(efx, false, temp); 4173 return rc; 4174 } 4175 } 4176 4177 return 0; 4178 } 4179 4180 static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx, 4181 struct hwtstamp_config *init) 4182 { 4183 return -EOPNOTSUPP; 4184 } 4185 4186 static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx, 4187 struct hwtstamp_config *init) 4188 { 4189 int rc; 4190 4191 switch (init->rx_filter) { 4192 case HWTSTAMP_FILTER_NONE: 4193 efx_ef10_ptp_set_ts_sync_events(efx, false, false); 4194 /* if TX timestamping is still requested then leave PTP on */ 4195 return efx_ptp_change_mode(efx, 4196 init->tx_type != HWTSTAMP_TX_OFF, 0); 4197 case HWTSTAMP_FILTER_ALL: 4198 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 4199 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 4200 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 4201 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 4202 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 4203 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 4204 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 4205 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 4206 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 4207 case HWTSTAMP_FILTER_PTP_V2_EVENT: 4208 case HWTSTAMP_FILTER_PTP_V2_SYNC: 4209 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 4210 init->rx_filter = HWTSTAMP_FILTER_ALL; 4211 rc = efx_ptp_change_mode(efx, true, 0); 4212 if (!rc) 4213 rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false); 4214 if (rc) 4215 efx_ptp_change_mode(efx, false, 0); 4216 return rc; 4217 default: 4218 return -ERANGE; 4219 } 4220 } 4221 4222 const struct efx_nic_type efx_hunt_a0_vf_nic_type = { 4223 .is_vf = true, 4224 .mem_bar = EFX_MEM_VF_BAR, 4225 .mem_map_size = efx_ef10_mem_map_size, 4226 .probe = efx_ef10_probe_vf, 4227 .remove = efx_ef10_remove, 4228 .dimension_resources = efx_ef10_dimension_resources, 4229 .init = efx_ef10_init_nic, 4230 .fini = efx_port_dummy_op_void, 4231 .map_reset_reason = efx_ef10_map_reset_reason, 4232 .map_reset_flags = efx_ef10_map_reset_flags, 4233 .reset = efx_ef10_reset, 4234 .probe_port = efx_mcdi_port_probe, 4235 .remove_port = efx_mcdi_port_remove, 4236 .fini_dmaq = efx_ef10_fini_dmaq, 4237 .prepare_flr = efx_ef10_prepare_flr, 4238 .finish_flr = efx_port_dummy_op_void, 4239 .describe_stats = efx_ef10_describe_stats, 4240 .update_stats = efx_ef10_update_stats_vf, 4241 .start_stats = efx_port_dummy_op_void, 4242 .pull_stats = efx_port_dummy_op_void, 4243 .stop_stats = efx_port_dummy_op_void, 4244 .set_id_led = efx_mcdi_set_id_led, 4245 .push_irq_moderation = efx_ef10_push_irq_moderation, 4246 .reconfigure_mac = efx_ef10_mac_reconfigure_vf, 4247 .check_mac_fault = efx_mcdi_mac_check_fault, 4248 .reconfigure_port = efx_mcdi_port_reconfigure, 4249 .get_wol = efx_ef10_get_wol_vf, 4250 .set_wol = efx_ef10_set_wol_vf, 4251 .resume_wol = efx_port_dummy_op_void, 4252 .mcdi_request = efx_ef10_mcdi_request, 4253 .mcdi_poll_response = efx_ef10_mcdi_poll_response, 4254 .mcdi_read_response = efx_ef10_mcdi_read_response, 4255 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot, 4256 .irq_enable_master = efx_port_dummy_op_void, 4257 .irq_test_generate = efx_ef10_irq_test_generate, 4258 .irq_disable_non_ev = efx_port_dummy_op_void, 4259 .irq_handle_msi = efx_ef10_msi_interrupt, 4260 .irq_handle_legacy = efx_ef10_legacy_interrupt, 4261 .tx_probe = efx_ef10_tx_probe, 4262 .tx_init = efx_ef10_tx_init, 4263 .tx_remove = efx_ef10_tx_remove, 4264 .tx_write = efx_ef10_tx_write, 4265 .rx_push_rss_config = efx_ef10_vf_rx_push_rss_config, 4266 .rx_probe = efx_ef10_rx_probe, 4267 .rx_init = efx_ef10_rx_init, 4268 .rx_remove = efx_ef10_rx_remove, 4269 .rx_write = efx_ef10_rx_write, 4270 .rx_defer_refill = efx_ef10_rx_defer_refill, 4271 .ev_probe = efx_ef10_ev_probe, 4272 .ev_init = efx_ef10_ev_init, 4273 .ev_fini = efx_ef10_ev_fini, 4274 .ev_remove = efx_ef10_ev_remove, 4275 .ev_process = efx_ef10_ev_process, 4276 .ev_read_ack = efx_ef10_ev_read_ack, 4277 .ev_test_generate = efx_ef10_ev_test_generate, 4278 .filter_table_probe = efx_ef10_filter_table_probe, 4279 .filter_table_restore = efx_ef10_filter_table_restore, 4280 .filter_table_remove = efx_ef10_filter_table_remove, 4281 .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter, 4282 .filter_insert = efx_ef10_filter_insert, 4283 .filter_remove_safe = efx_ef10_filter_remove_safe, 4284 .filter_get_safe = efx_ef10_filter_get_safe, 4285 .filter_clear_rx = efx_ef10_filter_clear_rx, 4286 .filter_count_rx_used = efx_ef10_filter_count_rx_used, 4287 .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit, 4288 .filter_get_rx_ids = efx_ef10_filter_get_rx_ids, 4289 #ifdef CONFIG_RFS_ACCEL 4290 .filter_rfs_insert = efx_ef10_filter_rfs_insert, 4291 .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one, 4292 #endif 4293 #ifdef CONFIG_SFC_MTD 4294 .mtd_probe = efx_port_dummy_op_int, 4295 #endif 4296 .ptp_write_host_time = efx_ef10_ptp_write_host_time_vf, 4297 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf, 4298 #ifdef CONFIG_SFC_SRIOV 4299 .vswitching_probe = efx_ef10_vswitching_probe_vf, 4300 .vswitching_restore = efx_ef10_vswitching_restore_vf, 4301 .vswitching_remove = efx_ef10_vswitching_remove_vf, 4302 .sriov_get_phys_port_id = efx_ef10_sriov_get_phys_port_id, 4303 #endif 4304 .get_mac_address = efx_ef10_get_mac_address_vf, 4305 .set_mac_address = efx_ef10_set_mac_address, 4306 4307 .revision = EFX_REV_HUNT_A0, 4308 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), 4309 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, 4310 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST, 4311 .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST, 4312 .can_rx_scatter = true, 4313 .always_rx_scatter = true, 4314 .max_interrupt_mode = EFX_INT_MODE_MSIX, 4315 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, 4316 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 4317 NETIF_F_RXHASH | NETIF_F_NTUPLE), 4318 .mcdi_max_ver = 2, 4319 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, 4320 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | 4321 1 << HWTSTAMP_FILTER_ALL, 4322 }; 4323 4324 const struct efx_nic_type efx_hunt_a0_nic_type = { 4325 .is_vf = false, 4326 .mem_bar = EFX_MEM_BAR, 4327 .mem_map_size = efx_ef10_mem_map_size, 4328 .probe = efx_ef10_probe_pf, 4329 .remove = efx_ef10_remove, 4330 .dimension_resources = efx_ef10_dimension_resources, 4331 .init = efx_ef10_init_nic, 4332 .fini = efx_port_dummy_op_void, 4333 .map_reset_reason = efx_ef10_map_reset_reason, 4334 .map_reset_flags = efx_ef10_map_reset_flags, 4335 .reset = efx_ef10_reset, 4336 .probe_port = efx_mcdi_port_probe, 4337 .remove_port = efx_mcdi_port_remove, 4338 .fini_dmaq = efx_ef10_fini_dmaq, 4339 .prepare_flr = efx_ef10_prepare_flr, 4340 .finish_flr = efx_port_dummy_op_void, 4341 .describe_stats = efx_ef10_describe_stats, 4342 .update_stats = efx_ef10_update_stats_pf, 4343 .start_stats = efx_mcdi_mac_start_stats, 4344 .pull_stats = efx_mcdi_mac_pull_stats, 4345 .stop_stats = efx_mcdi_mac_stop_stats, 4346 .set_id_led = efx_mcdi_set_id_led, 4347 .push_irq_moderation = efx_ef10_push_irq_moderation, 4348 .reconfigure_mac = efx_ef10_mac_reconfigure, 4349 .check_mac_fault = efx_mcdi_mac_check_fault, 4350 .reconfigure_port = efx_mcdi_port_reconfigure, 4351 .get_wol = efx_ef10_get_wol, 4352 .set_wol = efx_ef10_set_wol, 4353 .resume_wol = efx_port_dummy_op_void, 4354 .test_chip = efx_ef10_test_chip, 4355 .test_nvram = efx_mcdi_nvram_test_all, 4356 .mcdi_request = efx_ef10_mcdi_request, 4357 .mcdi_poll_response = efx_ef10_mcdi_poll_response, 4358 .mcdi_read_response = efx_ef10_mcdi_read_response, 4359 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot, 4360 .irq_enable_master = efx_port_dummy_op_void, 4361 .irq_test_generate = efx_ef10_irq_test_generate, 4362 .irq_disable_non_ev = efx_port_dummy_op_void, 4363 .irq_handle_msi = efx_ef10_msi_interrupt, 4364 .irq_handle_legacy = efx_ef10_legacy_interrupt, 4365 .tx_probe = efx_ef10_tx_probe, 4366 .tx_init = efx_ef10_tx_init, 4367 .tx_remove = efx_ef10_tx_remove, 4368 .tx_write = efx_ef10_tx_write, 4369 .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config, 4370 .rx_probe = efx_ef10_rx_probe, 4371 .rx_init = efx_ef10_rx_init, 4372 .rx_remove = efx_ef10_rx_remove, 4373 .rx_write = efx_ef10_rx_write, 4374 .rx_defer_refill = efx_ef10_rx_defer_refill, 4375 .ev_probe = efx_ef10_ev_probe, 4376 .ev_init = efx_ef10_ev_init, 4377 .ev_fini = efx_ef10_ev_fini, 4378 .ev_remove = efx_ef10_ev_remove, 4379 .ev_process = efx_ef10_ev_process, 4380 .ev_read_ack = efx_ef10_ev_read_ack, 4381 .ev_test_generate = efx_ef10_ev_test_generate, 4382 .filter_table_probe = efx_ef10_filter_table_probe, 4383 .filter_table_restore = efx_ef10_filter_table_restore, 4384 .filter_table_remove = efx_ef10_filter_table_remove, 4385 .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter, 4386 .filter_insert = efx_ef10_filter_insert, 4387 .filter_remove_safe = efx_ef10_filter_remove_safe, 4388 .filter_get_safe = efx_ef10_filter_get_safe, 4389 .filter_clear_rx = efx_ef10_filter_clear_rx, 4390 .filter_count_rx_used = efx_ef10_filter_count_rx_used, 4391 .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit, 4392 .filter_get_rx_ids = efx_ef10_filter_get_rx_ids, 4393 #ifdef CONFIG_RFS_ACCEL 4394 .filter_rfs_insert = efx_ef10_filter_rfs_insert, 4395 .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one, 4396 #endif 4397 #ifdef CONFIG_SFC_MTD 4398 .mtd_probe = efx_ef10_mtd_probe, 4399 .mtd_rename = efx_mcdi_mtd_rename, 4400 .mtd_read = efx_mcdi_mtd_read, 4401 .mtd_erase = efx_mcdi_mtd_erase, 4402 .mtd_write = efx_mcdi_mtd_write, 4403 .mtd_sync = efx_mcdi_mtd_sync, 4404 #endif 4405 .ptp_write_host_time = efx_ef10_ptp_write_host_time, 4406 .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events, 4407 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config, 4408 #ifdef CONFIG_SFC_SRIOV 4409 .sriov_configure = efx_ef10_sriov_configure, 4410 .sriov_init = efx_ef10_sriov_init, 4411 .sriov_fini = efx_ef10_sriov_fini, 4412 .sriov_wanted = efx_ef10_sriov_wanted, 4413 .sriov_reset = efx_ef10_sriov_reset, 4414 .sriov_flr = efx_ef10_sriov_flr, 4415 .sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac, 4416 .sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan, 4417 .sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk, 4418 .sriov_get_vf_config = efx_ef10_sriov_get_vf_config, 4419 .sriov_set_vf_link_state = efx_ef10_sriov_set_vf_link_state, 4420 .vswitching_probe = efx_ef10_vswitching_probe_pf, 4421 .vswitching_restore = efx_ef10_vswitching_restore_pf, 4422 .vswitching_remove = efx_ef10_vswitching_remove_pf, 4423 #endif 4424 .get_mac_address = efx_ef10_get_mac_address_pf, 4425 .set_mac_address = efx_ef10_set_mac_address, 4426 4427 .revision = EFX_REV_HUNT_A0, 4428 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), 4429 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, 4430 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST, 4431 .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST, 4432 .can_rx_scatter = true, 4433 .always_rx_scatter = true, 4434 .max_interrupt_mode = EFX_INT_MODE_MSIX, 4435 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, 4436 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 4437 NETIF_F_RXHASH | NETIF_F_NTUPLE), 4438 .mcdi_max_ver = 2, 4439 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, 4440 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | 4441 1 << HWTSTAMP_FILTER_ALL, 4442 }; 4443