Lines Matching +full:probe +full:- +full:reset

1 // SPDX-License-Identifier: GPL-2.0-only
37 * On Falcon-based NICs, this will:
38 * - Check the on-board hardware monitor;
39 * - Poll the link state and reconfigure the hardware as necessary.
40 * On Siena-based NICs for power systems with EEH support, this will give EEH a
45 /* How often and how many times to poll for a reset while waiting for a
107 /* Reset workqueue. If any NIC has a hardware failure then a reset will be
108 * queued onto this work queue. This is not a per-nic work queue, because
117 printk(KERN_ERR "Failed to create reset workqueue\n"); in efx_siena_create_reset_workqueue()
118 return -ENOMEM; in efx_siena_create_reset_workqueue()
126 queue_work(reset_workqueue, &efx->reset_work); in efx_siena_queue_reset_work()
131 cancel_work_sync(&efx->reset_work); in efx_siena_flush_reset_workqueue()
142 /* We assume that efx->type->reconfigure_mac will always try to sync RX
143 * filters and therefore needs to read-lock the filter table against freeing
147 if (efx->type->reconfigure_mac) { in efx_siena_mac_reconfigure()
148 down_read(&efx->filter_sem); in efx_siena_mac_reconfigure()
149 efx->type->reconfigure_mac(efx, mtu_only); in efx_siena_mac_reconfigure()
150 up_read(&efx->filter_sem); in efx_siena_mac_reconfigure()
162 mutex_lock(&efx->mac_lock); in efx_mac_work()
163 if (efx->port_enabled) in efx_mac_work()
165 mutex_unlock(&efx->mac_lock); in efx_mac_work()
172 u8 *new_addr = addr->sa_data; in efx_siena_set_mac_address()
177 netif_err(efx, drv, efx->net_dev, in efx_siena_set_mac_address()
180 return -EADDRNOTAVAIL; in efx_siena_set_mac_address()
184 ether_addr_copy(old_addr, net_dev->dev_addr); in efx_siena_set_mac_address()
186 if (efx->type->set_mac_address) { in efx_siena_set_mac_address()
187 rc = efx->type->set_mac_address(efx); in efx_siena_set_mac_address()
195 mutex_lock(&efx->mac_lock); in efx_siena_set_mac_address()
197 mutex_unlock(&efx->mac_lock); in efx_siena_set_mac_address()
207 if (efx->port_enabled) in efx_siena_set_rx_mode()
208 queue_work(efx->workqueue, &efx->mac_work); in efx_siena_set_rx_mode()
217 /* If disabling RX n-tuple filtering, clear existing filters */ in efx_siena_set_features()
218 if (net_dev->features & ~data & NETIF_F_NTUPLE) { in efx_siena_set_features()
219 rc = efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL); in efx_siena_set_features()
225 * If rx-fcs is changed, mac_reconfigure updates that too. in efx_siena_set_features()
227 if ((net_dev->features ^ data) & (NETIF_F_HW_VLAN_CTAG_FILTER | in efx_siena_set_features()
244 struct efx_link_state *link_state = &efx->link_state; in efx_siena_link_status_changed()
251 if (!netif_running(efx->net_dev)) in efx_siena_link_status_changed()
254 if (link_state->up != netif_carrier_ok(efx->net_dev)) { in efx_siena_link_status_changed()
255 efx->n_link_state_changes++; in efx_siena_link_status_changed()
257 if (link_state->up) in efx_siena_link_status_changed()
258 netif_carrier_on(efx->net_dev); in efx_siena_link_status_changed()
260 netif_carrier_off(efx->net_dev); in efx_siena_link_status_changed()
264 if (link_state->up) in efx_siena_link_status_changed()
265 netif_info(efx, link, efx->net_dev, in efx_siena_link_status_changed()
266 "link up at %uMbps %s-duplex (MTU %d)\n", in efx_siena_link_status_changed()
267 link_state->speed, link_state->fd ? "full" : "half", in efx_siena_link_status_changed()
268 efx->net_dev->mtu); in efx_siena_link_status_changed()
270 netif_info(efx, link, efx->net_dev, "link down\n"); in efx_siena_link_status_changed()
279 efx->rx_prefix_size + efx->type->rx_buffer_padding + in efx_siena_xdp_max_mtu()
280 efx->rx_ip_align + EFX_XDP_HEADROOM + EFX_XDP_TAILROOM; in efx_siena_xdp_max_mtu()
282 return PAGE_SIZE - overhead; in efx_siena_xdp_max_mtu()
295 if (rtnl_dereference(efx->xdp_prog) && in efx_siena_change_mtu()
297 netif_err(efx, drv, efx->net_dev, in efx_siena_change_mtu()
300 return -EINVAL; in efx_siena_change_mtu()
303 netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); in efx_siena_change_mtu()
308 mutex_lock(&efx->mac_lock); in efx_siena_change_mtu()
309 WRITE_ONCE(net_dev->mtu, new_mtu); in efx_siena_change_mtu()
311 mutex_unlock(&efx->mac_lock); in efx_siena_change_mtu()
330 netif_vdbg(efx, timer, efx->net_dev, in efx_monitor()
333 BUG_ON(efx->type->monitor == NULL); in efx_monitor()
339 if (mutex_trylock(&efx->mac_lock)) { in efx_monitor()
340 if (efx->port_enabled && efx->type->monitor) in efx_monitor()
341 efx->type->monitor(efx); in efx_monitor()
342 mutex_unlock(&efx->mac_lock); in efx_monitor()
350 if (efx->type->monitor) in efx_siena_start_monitor()
351 queue_delayed_work(efx->workqueue, &efx->monitor_work, in efx_siena_start_monitor()
367 netdev_features_t old_features = efx->net_dev->features; in efx_start_datapath()
368 bool old_rx_scatter = efx->rx_scatter; in efx_start_datapath()
375 efx->rx_dma_len = (efx->rx_prefix_size + in efx_start_datapath()
376 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + in efx_start_datapath()
377 efx->type->rx_buffer_padding); in efx_start_datapath()
379 efx->rx_ip_align + efx->rx_dma_len + EFX_XDP_TAILROOM); in efx_start_datapath()
382 efx->rx_scatter = efx->type->always_rx_scatter; in efx_start_datapath()
383 efx->rx_buffer_order = 0; in efx_start_datapath()
384 } else if (efx->type->can_rx_scatter) { in efx_start_datapath()
390 efx->rx_scatter = true; in efx_start_datapath()
391 efx->rx_dma_len = EFX_RX_USR_BUF_SIZE; in efx_start_datapath()
392 efx->rx_buffer_order = 0; in efx_start_datapath()
394 efx->rx_scatter = false; in efx_start_datapath()
395 efx->rx_buffer_order = get_order(rx_buf_len); in efx_start_datapath()
399 if (efx->rx_buffer_order) in efx_start_datapath()
400 netif_dbg(efx, drv, efx->net_dev, in efx_start_datapath()
402 efx->rx_dma_len, efx->rx_buffer_order, in efx_start_datapath()
403 efx->rx_pages_per_batch); in efx_start_datapath()
405 netif_dbg(efx, drv, efx->net_dev, in efx_start_datapath()
407 efx->rx_dma_len, efx->rx_page_buf_step, in efx_start_datapath()
408 efx->rx_bufs_per_page, efx->rx_pages_per_batch); in efx_start_datapath()
413 efx->net_dev->hw_features |= efx->net_dev->features; in efx_start_datapath()
414 efx->net_dev->hw_features &= ~efx->fixed_features; in efx_start_datapath()
415 efx->net_dev->features |= efx->fixed_features; in efx_start_datapath()
416 if (efx->net_dev->features != old_features) in efx_start_datapath()
417 netdev_features_change(efx->net_dev); in efx_start_datapath()
419 /* RX filters may also have scatter-enabled flags */ in efx_start_datapath()
420 if ((efx->rx_scatter != old_rx_scatter) && in efx_start_datapath()
421 efx->type->filter_update_rx_scatter) in efx_start_datapath()
422 efx->type->filter_update_rx_scatter(efx); in efx_start_datapath()
431 efx->txq_stop_thresh = efx->txq_entries - efx_siena_tx_max_skb_descs(efx); in efx_start_datapath()
432 efx->txq_wake_thresh = efx->txq_stop_thresh / 2; in efx_start_datapath()
439 if (netif_device_present(efx->net_dev)) in efx_start_datapath()
440 netif_tx_wake_all_queues(efx->net_dev); in efx_start_datapath()
446 BUG_ON(efx->port_enabled); in efx_stop_datapath()
459 /* Equivalent to efx_siena_link_set_advertising with all-zeroes, except does not
464 bitmap_zero(efx->link_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS); in efx_siena_link_clear_advertising()
465 efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); in efx_siena_link_clear_advertising()
470 efx->wanted_fc = wanted_fc; in efx_siena_link_set_wanted_fc()
471 if (efx->link_advertising[0]) { in efx_siena_link_set_wanted_fc()
473 efx->link_advertising[0] |= (ADVERTISED_Pause | in efx_siena_link_set_wanted_fc()
476 efx->link_advertising[0] &= ~(ADVERTISED_Pause | in efx_siena_link_set_wanted_fc()
479 efx->link_advertising[0] ^= ADVERTISED_Asym_Pause; in efx_siena_link_set_wanted_fc()
485 netif_dbg(efx, ifup, efx->net_dev, "start port\n"); in efx_start_port()
486 BUG_ON(efx->port_enabled); in efx_start_port()
488 mutex_lock(&efx->mac_lock); in efx_start_port()
489 efx->port_enabled = true; in efx_start_port()
494 mutex_unlock(&efx->mac_lock); in efx_start_port()
498 * and the async self-test, wait for them to finish and prevent them
504 netif_dbg(efx, ifdown, efx->net_dev, "stop port\n"); in efx_stop_port()
508 mutex_lock(&efx->mac_lock); in efx_stop_port()
509 efx->port_enabled = false; in efx_stop_port()
510 mutex_unlock(&efx->mac_lock); in efx_stop_port()
513 netif_addr_lock_bh(efx->net_dev); in efx_stop_port()
514 netif_addr_unlock_bh(efx->net_dev); in efx_stop_port()
516 cancel_delayed_work_sync(&efx->monitor_work); in efx_stop_port()
518 cancel_work_sync(&efx->mac_work); in efx_stop_port()
531 BUG_ON(efx->state == STATE_DISABLED); in efx_siena_start_all()
536 if (efx->port_enabled || !netif_running(efx->net_dev) || in efx_siena_start_all()
537 efx->reset_pending) in efx_siena_start_all()
546 /* Link state detection is normally event-driven; we have in efx_siena_start_all()
549 mutex_lock(&efx->mac_lock); in efx_siena_start_all()
552 mutex_unlock(&efx->mac_lock); in efx_siena_start_all()
554 if (efx->type->start_stats) { in efx_siena_start_all()
555 efx->type->start_stats(efx); in efx_siena_start_all()
556 efx->type->pull_stats(efx); in efx_siena_start_all()
557 spin_lock_bh(&efx->stats_lock); in efx_siena_start_all()
558 efx->type->update_stats(efx, NULL, NULL); in efx_siena_start_all()
559 spin_unlock_bh(&efx->stats_lock); in efx_siena_start_all()
573 if (!efx->port_enabled) in efx_siena_stop_all()
576 if (efx->type->update_stats) { in efx_siena_stop_all()
580 efx->type->pull_stats(efx); in efx_siena_stop_all()
581 spin_lock_bh(&efx->stats_lock); in efx_siena_stop_all()
582 efx->type->update_stats(efx, NULL, NULL); in efx_siena_stop_all()
583 spin_unlock_bh(&efx->stats_lock); in efx_siena_stop_all()
584 efx->type->stop_stats(efx); in efx_siena_stop_all()
593 WARN_ON(netif_running(efx->net_dev) && in efx_siena_stop_all()
594 netif_device_present(efx->net_dev)); in efx_siena_stop_all()
595 netif_tx_disable(efx->net_dev); in efx_siena_stop_all()
603 if (efx->type->update_stats_atomic) in efx_siena_update_stats_atomic()
604 return efx->type->update_stats_atomic(efx, full_stats, core_stats); in efx_siena_update_stats_atomic()
605 return efx->type->update_stats(efx, full_stats, core_stats); in efx_siena_update_stats_atomic()
608 /* Context: process, rcu_read_lock or RTNL held, non-blocking. */
614 spin_lock_bh(&efx->stats_lock); in efx_siena_net_stats()
616 spin_unlock_bh(&efx->stats_lock); in efx_siena_net_stats()
621 * through phy_op->set_settings(), and pushed asynchronously to the MAC
631 WARN_ON(!mutex_is_locked(&efx->mac_lock)); in __efx_siena_reconfigure_port()
634 phy_mode = efx->phy_mode; in __efx_siena_reconfigure_port()
636 efx->phy_mode |= PHY_MODE_TX_DISABLED; in __efx_siena_reconfigure_port()
638 efx->phy_mode &= ~PHY_MODE_TX_DISABLED; in __efx_siena_reconfigure_port()
640 if (efx->type->reconfigure_port) in __efx_siena_reconfigure_port()
641 rc = efx->type->reconfigure_port(efx); in __efx_siena_reconfigure_port()
644 efx->phy_mode = phy_mode; in __efx_siena_reconfigure_port()
658 mutex_lock(&efx->mac_lock); in efx_siena_reconfigure_port()
660 mutex_unlock(&efx->mac_lock); in efx_siena_reconfigure_port()
667 * Device reset and suspend
681 netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n"); in efx_wait_for_bist_end()
686 efx->mc_bist_for_other_fn = false; in efx_wait_for_bist_end()
692 * Returns a non-zero value otherwise.
699 * schedule a 'recover or reset', leading to this recovery handler. in efx_siena_try_recovery()
702 struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev); in efx_siena_try_recovery()
704 /* The EEH mechanisms will handle the error and reset the in efx_siena_try_recovery()
714 * before reset.
721 efx->type->prepare_flr(efx); in efx_siena_reset_down()
726 mutex_lock(&efx->mac_lock); in efx_siena_reset_down()
727 down_write(&efx->filter_sem); in efx_siena_reset_down()
728 efx->type->fini(efx); in efx_siena_reset_down()
736 netif_err(efx, tx_err, efx->net_dev, in efx_siena_watchdog()
738 efx->port_enabled); in efx_siena_watchdog()
756 efx->type->finish_flr(efx); in efx_siena_reset_up()
759 rc = efx->type->init(efx); in efx_siena_reset_up()
761 netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n"); in efx_siena_reset_up()
768 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE && in efx_siena_reset_up()
771 if (rc && rc != -EPERM) in efx_siena_reset_up()
772 netif_err(efx, drv, efx->net_dev, in efx_siena_reset_up()
781 rc = efx->type->vswitching_restore(efx); in efx_siena_reset_up()
783 netif_warn(efx, probe, efx->net_dev, in efx_siena_reset_up()
788 efx->type->filter_table_restore(efx); in efx_siena_reset_up()
789 up_write(&efx->filter_sem); in efx_siena_reset_up()
790 if (efx->type->sriov_reset) in efx_siena_reset_up()
791 efx->type->sriov_reset(efx); in efx_siena_reset_up()
793 mutex_unlock(&efx->mac_lock); in efx_siena_reset_up()
797 if (efx->type->udp_tnl_push_ports) in efx_siena_reset_up()
798 efx->type->udp_tnl_push_ports(efx); in efx_siena_reset_up()
803 efx->port_initialized = false; in efx_siena_reset_up()
805 up_write(&efx->filter_sem); in efx_siena_reset_up()
806 mutex_unlock(&efx->mac_lock); in efx_siena_reset_up()
811 /* Reset the NIC using the specified method. Note that the reset may
821 netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", in efx_siena_reset()
826 * EF100 reset is handled in the efx_nic_type callback below. in efx_siena_reset()
831 rc = efx->type->reset(efx, method); in efx_siena_reset()
833 netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n"); in efx_siena_reset()
841 efx->reset_pending &= -(1 << (method + 1)); in efx_siena_reset()
842 else /* it doesn't fit into the well-ordered scope hierarchy */ in efx_siena_reset()
843 __clear_bit(method, &efx->reset_pending); in efx_siena_reset()
845 /* Reinitialise bus-mastering, which may have been turned off before in efx_siena_reset()
846 * the reset was scheduled. This is still appropriate, even in the in efx_siena_reset()
850 pci_set_master(efx->pci_dev); in efx_siena_reset()
866 dev_close(efx->net_dev); in efx_siena_reset()
867 netif_err(efx, drv, efx->net_dev, "has been disabled\n"); in efx_siena_reset()
868 efx->state = STATE_DISABLED; in efx_siena_reset()
870 netif_dbg(efx, drv, efx->net_dev, "reset complete\n"); in efx_siena_reset()
877 * schedule a reset for later.
885 pending = READ_ONCE(efx->reset_pending); in efx_reset_work()
886 method = fls(pending) - 1; in efx_reset_work()
905 if (efx->state == STATE_READY) in efx_reset_work()
915 if (efx->state == STATE_RECOVERY) { in efx_siena_schedule_reset()
916 netif_dbg(efx, drv, efx->net_dev, in efx_siena_schedule_reset()
917 "recovering: skip scheduling %s reset\n", in efx_siena_schedule_reset()
933 netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", in efx_siena_schedule_reset()
937 method = efx->type->map_reset_reason(type); in efx_siena_schedule_reset()
938 netif_dbg(efx, drv, efx->net_dev, in efx_siena_schedule_reset()
939 "scheduling %s reset for %s\n", in efx_siena_schedule_reset()
944 set_bit(method, &efx->reset_pending); in efx_siena_schedule_reset()
948 * to abort probing or reschedule the reset later. in efx_siena_schedule_reset()
950 if (READ_ONCE(efx->state) != STATE_READY) in efx_siena_schedule_reset()
954 * reset is scheduled. So switch back to poll'd MCDI completions. in efx_siena_schedule_reset()
984 * efx_nic (including all sub-structures).
989 int rc = -ENOMEM; in efx_siena_init_struct()
992 INIT_LIST_HEAD(&efx->node); in efx_siena_init_struct()
993 INIT_LIST_HEAD(&efx->secondary_list); in efx_siena_init_struct()
994 spin_lock_init(&efx->biu_lock); in efx_siena_init_struct()
996 INIT_LIST_HEAD(&efx->mtd_list); in efx_siena_init_struct()
998 INIT_WORK(&efx->reset_work, efx_reset_work); in efx_siena_init_struct()
999 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); in efx_siena_init_struct()
1001 efx->pci_dev = pci_dev; in efx_siena_init_struct()
1002 efx->msg_enable = debug; in efx_siena_init_struct()
1003 efx->state = STATE_UNINIT; in efx_siena_init_struct()
1004 strscpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); in efx_siena_init_struct()
1006 efx->net_dev = net_dev; in efx_siena_init_struct()
1007 efx->rx_prefix_size = efx->type->rx_prefix_size; in efx_siena_init_struct()
1008 efx->rx_ip_align = in efx_siena_init_struct()
1009 NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0; in efx_siena_init_struct()
1010 efx->rx_packet_hash_offset = in efx_siena_init_struct()
1011 efx->type->rx_hash_offset - efx->type->rx_prefix_size; in efx_siena_init_struct()
1012 efx->rx_packet_ts_offset = in efx_siena_init_struct()
1013 efx->type->rx_ts_offset - efx->type->rx_prefix_size; in efx_siena_init_struct()
1014 efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID; in efx_siena_init_struct()
1015 efx->vport_id = EVB_PORT_ID_ASSIGNED; in efx_siena_init_struct()
1016 spin_lock_init(&efx->stats_lock); in efx_siena_init_struct()
1017 efx->vi_stride = EFX_DEFAULT_VI_STRIDE; in efx_siena_init_struct()
1018 efx->num_mac_stats = MC_CMD_MAC_NSTATS; in efx_siena_init_struct()
1019 BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END); in efx_siena_init_struct()
1020 mutex_init(&efx->mac_lock); in efx_siena_init_struct()
1021 init_rwsem(&efx->filter_sem); in efx_siena_init_struct()
1023 mutex_init(&efx->rps_mutex); in efx_siena_init_struct()
1024 spin_lock_init(&efx->rps_hash_lock); in efx_siena_init_struct()
1026 efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE, in efx_siena_init_struct()
1027 sizeof(*efx->rps_hash_table), GFP_KERNEL); in efx_siena_init_struct()
1029 efx->mdio.dev = net_dev; in efx_siena_init_struct()
1030 INIT_WORK(&efx->mac_work, efx_mac_work); in efx_siena_init_struct()
1031 init_waitqueue_head(&efx->flush_wq); in efx_siena_init_struct()
1033 efx->tx_queues_per_channel = 1; in efx_siena_init_struct()
1034 efx->rxq_entries = EFX_DEFAULT_DMAQ_SIZE; in efx_siena_init_struct()
1035 efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; in efx_siena_init_struct()
1037 efx->mem_bar = UINT_MAX; in efx_siena_init_struct()
1044 snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s", in efx_siena_init_struct()
1046 efx->workqueue = create_singlethread_workqueue(efx->workqueue_name); in efx_siena_init_struct()
1047 if (!efx->workqueue) { in efx_siena_init_struct()
1048 rc = -ENOMEM; in efx_siena_init_struct()
1062 kfree(efx->rps_hash_table); in efx_siena_fini_struct()
1067 kfree(efx->vpd_sn); in efx_siena_fini_struct()
1069 if (efx->workqueue) { in efx_siena_fini_struct()
1070 destroy_workqueue(efx->workqueue); in efx_siena_fini_struct()
1071 efx->workqueue = NULL; in efx_siena_fini_struct()
1079 struct pci_dev *pci_dev = efx->pci_dev; in efx_siena_init_io()
1082 efx->mem_bar = UINT_MAX; in efx_siena_init_io()
1084 netif_dbg(efx, probe, efx->net_dev, "initialising I/O bar=%d\n", bar); in efx_siena_init_io()
1088 netif_err(efx, probe, efx->net_dev, in efx_siena_init_io()
1095 rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask); in efx_siena_init_io()
1097 netif_err(efx, probe, efx->net_dev, in efx_siena_init_io()
1101 netif_dbg(efx, probe, efx->net_dev, in efx_siena_init_io()
1104 efx->membase_phys = pci_resource_start(efx->pci_dev, bar); in efx_siena_init_io()
1105 if (!efx->membase_phys) { in efx_siena_init_io()
1106 netif_err(efx, probe, efx->net_dev, in efx_siena_init_io()
1109 rc = -ENODEV; in efx_siena_init_io()
1115 netif_err(efx, probe, efx->net_dev, in efx_siena_init_io()
1117 rc = -EIO; in efx_siena_init_io()
1120 efx->mem_bar = bar; in efx_siena_init_io()
1121 efx->membase = ioremap(efx->membase_phys, mem_map_size); in efx_siena_init_io()
1122 if (!efx->membase) { in efx_siena_init_io()
1123 netif_err(efx, probe, efx->net_dev, in efx_siena_init_io()
1125 (unsigned long long)efx->membase_phys, mem_map_size); in efx_siena_init_io()
1126 rc = -ENOMEM; in efx_siena_init_io()
1129 netif_dbg(efx, probe, efx->net_dev, in efx_siena_init_io()
1131 (unsigned long long)efx->membase_phys, mem_map_size, in efx_siena_init_io()
1132 efx->membase); in efx_siena_init_io()
1137 pci_release_region(efx->pci_dev, bar); in efx_siena_init_io()
1139 efx->membase_phys = 0; in efx_siena_init_io()
1141 pci_disable_device(efx->pci_dev); in efx_siena_init_io()
1148 netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n"); in efx_siena_fini_io()
1150 if (efx->membase) { in efx_siena_fini_io()
1151 iounmap(efx->membase); in efx_siena_fini_io()
1152 efx->membase = NULL; in efx_siena_fini_io()
1155 if (efx->membase_phys) { in efx_siena_fini_io()
1156 pci_release_region(efx->pci_dev, efx->mem_bar); in efx_siena_fini_io()
1157 efx->membase_phys = 0; in efx_siena_fini_io()
1158 efx->mem_bar = UINT_MAX; in efx_siena_fini_io()
1161 /* Don't disable bus-mastering if VFs are assigned */ in efx_siena_fini_io()
1162 if (!pci_vfs_assigned(efx->pci_dev)) in efx_siena_fini_io()
1163 pci_disable_device(efx->pci_dev); in efx_siena_fini_io()
1174 return sysfs_emit(buf, "%d\n", mcdi->logging_enabled); in mcdi_logging_show()
1185 mcdi->logging_enabled = enable; in mcdi_logging_store()
1193 int rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging); in efx_siena_init_mcdi_logging()
1196 netif_warn(efx, drv, efx->net_dev, in efx_siena_init_mcdi_logging()
1203 device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging); in efx_siena_fini_mcdi_logging()
1209 * Stop the software path and request a slot reset.
1222 if (efx->state != STATE_DISABLED) { in efx_io_error_detected()
1223 efx->state = STATE_RECOVERY; in efx_io_error_detected()
1224 efx->reset_pending = 0; in efx_io_error_detected()
1246 /* Fake a successful reset, which will be performed later in efx_io_resume. */
1253 netif_err(efx, hw, efx->net_dev, in efx_io_slot_reset()
1254 "Cannot re-enable PCI device after reset.\n"); in efx_io_slot_reset()
1261 /* Perform the actual reset and resume I/O operations. */
1269 if (efx->state == STATE_DISABLED) in efx_io_resume()
1274 netif_err(efx, hw, efx->net_dev, in efx_io_resume()
1277 efx->state = STATE_READY; in efx_io_resume()
1278 netif_dbg(efx, hw, efx->net_dev, in efx_io_resume()
1286 /* For simplicity and reliability, we always require a slot reset and try to
1287 * reset the hardware when a pci error affecting the device is detected.
1289 * with our request for slot reset the mmio_enabled callback will never be
1311 if (WARN_ON_ONCE(!efx->type->udp_tnl_has_port)) in efx_can_encap_offloads()
1315 switch (skb->protocol) { in efx_can_encap_offloads()
1317 ipproto = ip_hdr(skb)->protocol; in efx_can_encap_offloads()
1323 ipproto = ipv6_hdr(skb)->nexthdr; in efx_can_encap_offloads()
1340 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER) in efx_can_encap_offloads()
1342 if (ntohs(skb->inner_protocol) != ETH_P_TEB) in efx_can_encap_offloads()
1344 if (skb_inner_mac_header(skb) - skb_transport_header(skb) != 8) in efx_can_encap_offloads()
1347 return !(greh->flags & (GRE_CSUM | GRE_SEQ)); in efx_can_encap_offloads()
1353 dst_port = udp_hdr(skb)->dest; in efx_can_encap_offloads()
1354 return efx->type->udp_tnl_has_port(efx, dst_port); in efx_can_encap_offloads()
1366 if (skb->encapsulation) { in efx_siena_features_check()
1387 if (efx->type->get_phys_port_id) in efx_siena_get_phys_port_id()
1388 return efx->type->get_phys_port_id(efx, ppid); in efx_siena_get_phys_port_id()
1390 return -EOPNOTSUPP; in efx_siena_get_phys_port_id()
1398 if (snprintf(name, len, "p%u", efx->port_num) >= len) in efx_siena_get_phys_port_name()
1399 return -EINVAL; in efx_siena_get_phys_port_name()