Lines Matching refs:priv

44 static int mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs);
368 mlx5e_update_carrier(struct mlx5e_priv *priv) in mlx5e_update_carrier() argument
370 struct mlx5_core_dev *mdev = priv->mdev; in mlx5e_update_carrier()
385 priv->media_status_last |= IFM_ACTIVE; in mlx5e_update_carrier()
387 priv->media_status_last &= ~IFM_ACTIVE; in mlx5e_update_carrier()
388 priv->media_active_last = IFM_ETHER; in mlx5e_update_carrier()
389 if_link_state_change(priv->ifp, LINK_STATE_DOWN); in mlx5e_update_carrier()
396 priv->media_active_last = IFM_ETHER; in mlx5e_update_carrier()
397 if_setbaudrate(priv->ifp, 1); in mlx5e_update_carrier()
398 mlx5_en_err(priv->ifp, "query port ptys failed: 0x%x\n", in mlx5e_update_carrier()
419 mlx5_en_err(priv->ifp, in mlx5e_update_carrier()
428 mlx5_en_err(priv->ifp, in mlx5e_update_carrier()
437 mlx5_en_err(priv->ifp, in mlx5e_update_carrier()
444 priv->media_active_last = media_entry.subtype | IFM_ETHER | IFM_FDX; in mlx5e_update_carrier()
445 if_setbaudrate(priv->ifp, media_entry.baudrate); in mlx5e_update_carrier()
447 if_link_state_change(priv->ifp, LINK_STATE_UP); in mlx5e_update_carrier()
453 struct mlx5e_priv *priv = if_getsoftc(dev); in mlx5e_media_status() local
455 ifmr->ifm_status = priv->media_status_last; in mlx5e_media_status()
456 ifmr->ifm_current = ifmr->ifm_active = priv->media_active_last | in mlx5e_media_status()
457 (priv->params.rx_pauseframe_control ? IFM_ETH_RXPAUSE : 0) | in mlx5e_media_status()
458 (priv->params.tx_pauseframe_control ? IFM_ETH_TXPAUSE : 0); in mlx5e_media_status()
498 mlx5e_set_port_pause_and_pfc(struct mlx5e_priv *priv) in mlx5e_set_port_pause_and_pfc() argument
500 return (mlx5_set_port_pause_and_pfc(priv->mdev, 1, in mlx5e_set_port_pause_and_pfc()
501 priv->params.rx_pauseframe_control, in mlx5e_set_port_pause_and_pfc()
502 priv->params.tx_pauseframe_control, in mlx5e_set_port_pause_and_pfc()
503 priv->params.rx_priority_flow_control, in mlx5e_set_port_pause_and_pfc()
504 priv->params.tx_priority_flow_control)); in mlx5e_set_port_pause_and_pfc()
508 mlx5e_set_port_pfc(struct mlx5e_priv *priv) in mlx5e_set_port_pfc() argument
512 if (priv->gone != 0) { in mlx5e_set_port_pfc()
514 } else if (priv->params.rx_pauseframe_control || in mlx5e_set_port_pfc()
515 priv->params.tx_pauseframe_control) { in mlx5e_set_port_pfc()
516 mlx5_en_err(priv->ifp, in mlx5e_set_port_pfc()
520 error = mlx5e_set_port_pause_and_pfc(priv); in mlx5e_set_port_pfc()
528 struct mlx5e_priv *priv = if_getsoftc(dev); in mlx5e_media_change() local
529 struct mlx5_core_dev *mdev = priv->mdev; in mlx5e_media_change()
538 locked = PRIV_LOCKED(priv); in mlx5e_media_change()
540 PRIV_LOCK(priv); in mlx5e_media_change()
542 if (IFM_TYPE(priv->media.ifm_media) != IFM_ETHER) { in mlx5e_media_change()
555 link_mode = mlx5e_find_link_mode(IFM_SUBTYPE(priv->media.ifm_media), ext); in mlx5e_media_change()
562 if (IFM_SUBTYPE(priv->media.ifm_media) == IFM_AUTO) { in mlx5e_media_change()
577 if (priv->media.ifm_media & (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { in mlx5e_media_change()
579 if (priv->params.rx_priority_flow_control || in mlx5e_media_change()
580 priv->params.tx_priority_flow_control) { in mlx5e_media_change()
587 priv->params.rx_pauseframe_control = in mlx5e_media_change()
588 (priv->media.ifm_media & IFM_ETH_RXPAUSE) ? 1 : 0; in mlx5e_media_change()
589 priv->params.tx_pauseframe_control = in mlx5e_media_change()
590 (priv->media.ifm_media & IFM_ETH_TXPAUSE) ? 1 : 0; in mlx5e_media_change()
593 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); in mlx5e_media_change()
598 error = -mlx5e_set_port_pause_and_pfc(priv); in mlx5e_media_change()
604 PRIV_UNLOCK(priv); in mlx5e_media_change()
611 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, in mlx5e_update_carrier_work() local
614 PRIV_LOCK(priv); in mlx5e_update_carrier_work()
615 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) in mlx5e_update_carrier_work()
616 mlx5e_update_carrier(priv); in mlx5e_update_carrier_work()
617 PRIV_UNLOCK(priv); in mlx5e_update_carrier_work()
627 mlx5e_update_pcie_counters(struct mlx5e_priv *priv) in mlx5e_update_pcie_counters() argument
629 struct mlx5_core_dev *mdev = priv->mdev; in mlx5e_update_pcie_counters()
630 struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug; in mlx5e_update_pcie_counters()
677 mlx5e_update_pport_counters(struct mlx5e_priv *priv) in mlx5e_update_pport_counters() argument
679 struct mlx5_core_dev *mdev = priv->mdev; in mlx5e_update_pport_counters()
680 struct mlx5e_pport_stats *s = &priv->stats.pport; in mlx5e_update_pport_counters()
681 struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug; in mlx5e_update_pport_counters()
752 mlx5e_update_pcie_counters(priv); in mlx5e_update_pport_counters()
775 mlx5e_grp_vnic_env_update_stats(struct mlx5e_priv *priv) in mlx5e_grp_vnic_env_update_stats() argument
780 if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard)) in mlx5e_grp_vnic_env_update_stats()
788 if (mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out)) != 0) in mlx5e_grp_vnic_env_update_stats()
791 priv->stats.vport.rx_steer_missed_packets = in mlx5e_grp_vnic_env_update_stats()
803 mlx5e_update_stats_locked(struct mlx5e_priv *priv) in mlx5e_update_stats_locked() argument
805 struct mlx5_core_dev *mdev = priv->mdev; in mlx5e_update_stats_locked()
806 struct mlx5e_vport_stats *s = &priv->stats.vport; in mlx5e_update_stats_locked()
836 for (i = 0; i < priv->params.num_channels; i++) { in mlx5e_update_stats_locked()
837 struct mlx5e_channel *pch = priv->channel + i; in mlx5e_update_stats_locked()
855 for (j = 0; j < priv->num_tc; j++) { in mlx5e_update_stats_locked()
869 for (j = 0; j < priv->rl.param.tx_worker_threads_def; j++) { in mlx5e_update_stats_locked()
870 struct mlx5e_rl_worker *rlw = priv->rl.workers + j; in mlx5e_update_stats_locked()
872 for (i = 0; i < priv->rl.param.tx_channels_per_worker_def; i++) { in mlx5e_update_stats_locked()
907 mlx5e_grp_vnic_env_update_stats(priv); in mlx5e_update_stats_locked()
920 if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0 && in mlx5e_update_stats_locked()
921 mlx5_vport_query_out_of_rx_buffer(mdev, priv->counter_set_id, in mlx5e_update_stats_locked()
978 mlx5e_update_pport_counters(priv); in mlx5e_update_stats_locked()
981 priv->stats.port_stats_debug.tx_stat_p1519to2047octets + in mlx5e_update_stats_locked()
982 priv->stats.port_stats_debug.tx_stat_p2048to4095octets + in mlx5e_update_stats_locked()
983 priv->stats.port_stats_debug.tx_stat_p4096to8191octets + in mlx5e_update_stats_locked()
984 priv->stats.port_stats_debug.tx_stat_p8192to10239octets; in mlx5e_update_stats_locked()
990 if (priv->params_ethtool.diag_pci_enable || in mlx5e_update_stats_locked()
991 priv->params_ethtool.diag_general_enable) { in mlx5e_update_stats_locked()
993 priv->params_ethtool.diag_pci_enable ? &priv->params_pci : NULL, in mlx5e_update_stats_locked()
994 priv->params_ethtool.diag_general_enable ? &priv->params_general : NULL); in mlx5e_update_stats_locked()
996 mlx5_en_err(priv->ifp, in mlx5e_update_stats_locked()
1001 error = mlx5e_fec_update(priv); in mlx5e_update_stats_locked()
1003 mlx5_en_err(priv->ifp, in mlx5e_update_stats_locked()
1008 if (priv->params_ethtool.hw_num_temp != 0) { in mlx5e_update_stats_locked()
1009 error = mlx5e_hw_temperature_update(priv); in mlx5e_update_stats_locked()
1011 mlx5_en_err(priv->ifp, in mlx5e_update_stats_locked()
1020 struct mlx5e_priv *priv; in mlx5e_update_stats_work() local
1022 priv = container_of(work, struct mlx5e_priv, update_stats_work); in mlx5e_update_stats_work()
1023 PRIV_LOCK(priv); in mlx5e_update_stats_work()
1024 if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0 && in mlx5e_update_stats_work()
1025 !test_bit(MLX5_INTERFACE_STATE_TEARDOWN, &priv->mdev->intf_state)) in mlx5e_update_stats_work()
1026 mlx5e_update_stats_locked(priv); in mlx5e_update_stats_work()
1027 PRIV_UNLOCK(priv); in mlx5e_update_stats_work()
1033 struct mlx5e_priv *priv = arg; in mlx5e_update_stats() local
1035 queue_work(priv->wq, &priv->update_stats_work); in mlx5e_update_stats()
1037 callout_reset(&priv->watchdog, hz / 4, &mlx5e_update_stats, priv); in mlx5e_update_stats()
1041 mlx5e_async_event_sub(struct mlx5e_priv *priv, in mlx5e_async_event_sub() argument
1047 queue_work(priv->wq, &priv->update_carrier_work); in mlx5e_async_event_sub()
1059 struct mlx5e_priv *priv = vpriv; in mlx5e_async_event() local
1061 mtx_lock(&priv->async_events_mtx); in mlx5e_async_event()
1062 if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state)) in mlx5e_async_event()
1063 mlx5e_async_event_sub(priv, event); in mlx5e_async_event()
1064 mtx_unlock(&priv->async_events_mtx); in mlx5e_async_event()
1068 mlx5e_enable_async_events(struct mlx5e_priv *priv) in mlx5e_enable_async_events() argument
1070 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); in mlx5e_enable_async_events()
1074 mlx5e_disable_async_events(struct mlx5e_priv *priv) in mlx5e_disable_async_events() argument
1076 mtx_lock(&priv->async_events_mtx); in mlx5e_disable_async_events()
1077 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); in mlx5e_disable_async_events()
1078 mtx_unlock(&priv->async_events_mtx); in mlx5e_disable_async_events()
1103 mlx5e_reset_calibration_callout(struct mlx5e_priv *priv) in mlx5e_reset_calibration_callout() argument
1106 if (priv->clbr_done == 0) in mlx5e_reset_calibration_callout()
1107 mlx5e_calibration_callout(priv); in mlx5e_reset_calibration_callout()
1109 callout_reset_sbt_curcpu(&priv->tstmp_clbr, (priv->clbr_done < in mlx5e_reset_calibration_callout()
1112 mlx5e_calibration_callout, priv, C_DIRECT_EXEC); in mlx5e_reset_calibration_callout()
1123 mlx5e_hw_clock(struct mlx5e_priv *priv) in mlx5e_hw_clock() argument
1128 iseg = priv->mdev->iseg; in mlx5e_hw_clock()
1147 struct mlx5e_priv *priv; in mlx5e_calibration_callout() local
1152 priv = arg; in mlx5e_calibration_callout()
1153 curr = &priv->clbr_points[priv->clbr_curr]; in mlx5e_calibration_callout()
1154 clbr_curr_next = priv->clbr_curr + 1; in mlx5e_calibration_callout()
1155 if (clbr_curr_next >= nitems(priv->clbr_points)) in mlx5e_calibration_callout()
1157 next = &priv->clbr_points[clbr_curr_next]; in mlx5e_calibration_callout()
1162 next->clbr_hw_curr = mlx5e_hw_clock(priv); in mlx5e_calibration_callout()
1165 if (priv->clbr_done != 0) { in mlx5e_calibration_callout()
1166 mlx5_en_err(priv->ifp, in mlx5e_calibration_callout()
1169 priv->clbr_done = 0; in mlx5e_calibration_callout()
1180 priv->clbr_curr = clbr_curr_next; in mlx5e_calibration_callout()
1181 atomic_store_rel_int(&next->clbr_gen, ++(priv->clbr_gen)); in mlx5e_calibration_callout()
1183 if (priv->clbr_done < mlx5e_calibration_duration) in mlx5e_calibration_callout()
1184 priv->clbr_done++; in mlx5e_calibration_callout()
1185 mlx5e_reset_calibration_callout(priv); in mlx5e_calibration_callout()
1197 struct mlx5e_priv *priv = c->priv; in mlx5e_create_rq() local
1198 struct mlx5_core_dev *mdev = priv->mdev; in mlx5e_create_rq()
1207 err = mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs); in mlx5e_create_rq()
1234 err = mlx5e_get_wqe_sz(priv, &rq->wqe_sz, &rq->nsegs); in mlx5e_create_rq()
1240 err = -tcp_lro_init_args(&rq->lro, priv->ifp, TCP_LRO_ENTRIES, wq_sz); in mlx5e_create_rq()
1259 wqe->data[j].lkey = cpu_to_be32(priv->mr.key); in mlx5e_create_rq()
1263 if (priv->params.rx_cq_moderation_mode < 2) { in mlx5e_create_rq()
1282 rq->ifp = priv->ifp; in mlx5e_create_rq()
1287 mlx5e_create_stats(&rq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), in mlx5e_create_rq()
1334 struct mlx5e_priv *priv = c->priv; in mlx5e_enable_rq() local
1335 struct mlx5_core_dev *mdev = priv->mdev; in mlx5e_enable_rq()
1358 if (priv->counter_set_id >= 0) in mlx5e_enable_rq()
1359 MLX5_SET(rqc, rqc, counter_set_id, priv->counter_set_id); in mlx5e_enable_rq()
1378 struct mlx5e_priv *priv = c->priv; in mlx5e_modify_rq() local
1379 struct mlx5_core_dev *mdev = priv->mdev; in mlx5e_modify_rq()
1408 struct mlx5e_priv *priv = c->priv; in mlx5e_disable_rq() local
1409 struct mlx5_core_dev *mdev = priv->mdev; in mlx5e_disable_rq()
1501 mlx5e_open_drop_rq(struct mlx5e_priv *priv, in mlx5e_open_drop_rq() argument
1510 drop_rq->channel = priv->channel; in mlx5e_open_drop_rq()
1514 MLX5_SET(cqc, param_cq.cqc, uar_page, priv->mdev->priv.uar->index); in mlx5e_open_drop_rq()
1517 err = mlx5e_open_cq(priv, &param_cq, &drop_rq->cq, in mlx5e_open_drop_rq()
1527 MLX5_SET(wq, rqc_wq, pd, priv->pdn); in mlx5e_open_drop_rq()
1531 err = mlx5_wq_ll_create(priv->mdev, &param_rq.wq, rqc_wq, &drop_rq->wq, in mlx5e_open_drop_rq()
1596 mlx5_dev_domainset(sq->priv->mdev), M_WAITOK | M_ZERO); in mlx5e_alloc_sq_db()
1618 sq->max_inline = sq->priv->params.tx_max_inline; in mlx5e_update_sq_inline()
1619 sq->min_inline_mode = sq->priv->params.tx_min_inline_mode; in mlx5e_update_sq_inline()
1625 if (sq->priv->params_ethtool.trust_state != MLX5_QPTS_TRUST_PCP || in mlx5e_update_sq_inline()
1627 if (MLX5_CAP_ETH(sq->priv->mdev, wqe_vlan_insert)) in mlx5e_update_sq_inline()
1637 mlx5e_refresh_sq_inline_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c) in mlx5e_refresh_sq_inline_sub() argument
1641 for (i = 0; i != priv->num_tc; i++) { in mlx5e_refresh_sq_inline_sub()
1649 mlx5e_refresh_sq_inline(struct mlx5e_priv *priv) in mlx5e_refresh_sq_inline() argument
1654 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) in mlx5e_refresh_sq_inline()
1657 for (i = 0; i < priv->params.num_channels; i++) in mlx5e_refresh_sq_inline()
1658 mlx5e_refresh_sq_inline_sub(priv, &priv->channel[i]); in mlx5e_refresh_sq_inline()
1667 struct mlx5e_priv *priv = c->priv; in mlx5e_create_sq() local
1668 struct mlx5_core_dev *mdev = priv->mdev; in mlx5e_create_sq()
1690 sq->mkey_be = cpu_to_be32(priv->mr.key); in mlx5e_create_sq()
1691 sq->ifp = priv->ifp; in mlx5e_create_sq()
1692 sq->priv = priv; in mlx5e_create_sq()
1709 mlx5e_create_stats(&sq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), in mlx5e_create_sq()
1754 ts_format = mlx5_get_sq_default_ts(sq->priv->mdev); in mlx5e_enable_sq()
1777 err = mlx5_core_create_sq(sq->priv->mdev, in, inlen, &sq->sqn); in mlx5e_enable_sq()
1803 err = mlx5_core_modify_sq(sq->priv->mdev, in, inlen); in mlx5e_modify_sq()
1814 mlx5_core_destroy_sq(sq->priv->mdev, sq->sqn); in mlx5e_disable_sq()
1825 sq->cev_factor = c->priv->params_ethtool.tx_completion_fact; in mlx5e_open_sq()
1835 err = mlx5e_enable_sq(sq, param, &c->bfreg, c->priv->tisn[tc]); in mlx5e_open_sq()
1910 struct mlx5_core_dev *mdev= sq->priv->mdev; in mlx5e_drain_sq()
1941 (sq->priv->media_status_last & IFM_ACTIVE) != 0 && in mlx5e_drain_sq()
1981 mlx5e_create_cq(struct mlx5e_priv *priv, in mlx5e_create_cq() argument
1987 struct mlx5_core_dev *mdev = priv->mdev; in mlx5e_create_cq()
2019 cq->priv = priv; in mlx5e_create_cq()
2055 mlx5_vector2eqn(cq->priv->mdev, eq_ix, &eqn, &irqn_not_used); in mlx5e_enable_cq()
2062 err = mlx5_core_create_cq(cq->priv->mdev, mcq, in, inlen, out, sizeof(out)); in mlx5e_enable_cq()
2069 mlx5e_cq_arm(cq, MLX5_GET_DOORBELL_LOCK(&cq->priv->doorbell_lock)); in mlx5e_enable_cq()
2078 mlx5_core_destroy_cq(cq->priv->mdev, &cq->mcq); in mlx5e_disable_cq()
2082 mlx5e_open_cq(struct mlx5e_priv *priv, in mlx5e_open_cq() argument
2090 err = mlx5e_create_cq(priv, param, cq, comp, eq_ix); in mlx5e_open_cq()
2120 for (tc = 0; tc < c->priv->num_tc; tc++) { in mlx5e_open_tx_cqs()
2122 err = mlx5e_open_cq(c->priv, &cparam->tx_cq, &c->sq[tc].cq, in mlx5e_open_tx_cqs()
2141 for (tc = 0; tc < c->priv->num_tc; tc++) in mlx5e_close_tx_cqs()
2152 for (tc = 0; tc < c->priv->num_tc; tc++) { in mlx5e_open_sqs()
2172 for (tc = 0; tc < c->priv->num_tc; tc++) in mlx5e_close_sqs_wait()
2177 mlx5e_chan_static_init(struct mlx5e_priv *priv, struct mlx5e_channel *c, int ix) in mlx5e_chan_static_init() argument
2182 c->priv = priv; in mlx5e_chan_static_init()
2186 m_snd_tag_init(&c->tag, c->priv->ifp, &mlx5e_ul_snd_tag_sw); in mlx5e_chan_static_init()
2217 mlx5e_priv_wait_for_completion(struct mlx5e_priv *priv, const uint32_t channels) in mlx5e_priv_wait_for_completion() argument
2222 mlx5e_chan_wait_for_completion(&priv->channel[x]); in mlx5e_priv_wait_for_completion()
2244 mlx5e_open_channel(struct mlx5e_priv *priv, in mlx5e_open_channel() argument
2253 for (i = 0; i != priv->num_tc; i++) in mlx5e_open_channel()
2263 err = mlx5e_open_cq(c->priv, &cparam->rx_cq, &c->rq.cq, in mlx5e_open_channel()
2319 mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs) in mlx5e_get_wqe_sz() argument
2323 maxs = priv->params.hw_lro_en ? priv->params.lro_wqe_sz : in mlx5e_get_wqe_sz()
2324 MLX5E_SW2MB_MTU(if_getmtu(priv->ifp)); in mlx5e_get_wqe_sz()
2342 mlx5e_build_rq_param(struct mlx5e_priv *priv, in mlx5e_build_rq_param() argument
2349 mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs); in mlx5e_build_rq_param()
2354 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size); in mlx5e_build_rq_param()
2355 MLX5_SET(wq, wq, pd, priv->pdn); in mlx5e_build_rq_param()
2361 mlx5e_build_sq_param(struct mlx5e_priv *priv, in mlx5e_build_sq_param() argument
2367 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size); in mlx5e_build_sq_param()
2369 MLX5_SET(wq, wq, pd, priv->pdn); in mlx5e_build_sq_param()
2375 mlx5e_build_common_cq_param(struct mlx5e_priv *priv, in mlx5e_build_common_cq_param() argument
2380 MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index); in mlx5e_build_common_cq_param()
2384 mlx5e_get_default_profile(struct mlx5e_priv *priv, int mode, struct net_dim_cq_moder *ptr) in mlx5e_get_default_profile() argument
2390 if (priv->params.hw_lro_en && in mlx5e_get_default_profile()
2397 mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, in mlx5e_build_rx_cq_param() argument
2410 if (priv->params.cqe_zipping_en) { in mlx5e_build_rx_cq_param()
2415 MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size); in mlx5e_build_rx_cq_param()
2417 switch (priv->params.rx_cq_moderation_mode) { in mlx5e_build_rx_cq_param()
2419 MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec); in mlx5e_build_rx_cq_param()
2420 MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts); in mlx5e_build_rx_cq_param()
2424 MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec); in mlx5e_build_rx_cq_param()
2425 MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts); in mlx5e_build_rx_cq_param()
2426 if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) in mlx5e_build_rx_cq_param()
2432 mlx5e_get_default_profile(priv, NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE, &curr); in mlx5e_build_rx_cq_param()
2438 mlx5e_get_default_profile(priv, NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE, &curr); in mlx5e_build_rx_cq_param()
2441 if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) in mlx5e_build_rx_cq_param()
2450 mlx5e_dim_build_cq_param(priv, param); in mlx5e_build_rx_cq_param()
2452 mlx5e_build_common_cq_param(priv, param); in mlx5e_build_rx_cq_param()
2456 mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, in mlx5e_build_tx_cq_param() argument
2461 MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size); in mlx5e_build_tx_cq_param()
2462 MLX5_SET(cqc, cqc, cq_period, priv->params.tx_cq_moderation_usec); in mlx5e_build_tx_cq_param()
2463 MLX5_SET(cqc, cqc, cq_max_count, priv->params.tx_cq_moderation_pkts); in mlx5e_build_tx_cq_param()
2465 switch (priv->params.tx_cq_moderation_mode) { in mlx5e_build_tx_cq_param()
2470 if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) in mlx5e_build_tx_cq_param()
2477 mlx5e_build_common_cq_param(priv, param); in mlx5e_build_tx_cq_param()
2481 mlx5e_build_channel_param(struct mlx5e_priv *priv, in mlx5e_build_channel_param() argument
2486 mlx5e_build_rq_param(priv, &cparam->rq); in mlx5e_build_channel_param()
2487 mlx5e_build_sq_param(priv, &cparam->sq); in mlx5e_build_channel_param()
2488 mlx5e_build_rx_cq_param(priv, &cparam->rx_cq); in mlx5e_build_channel_param()
2489 mlx5e_build_tx_cq_param(priv, &cparam->tx_cq); in mlx5e_build_channel_param()
2493 mlx5e_open_channels(struct mlx5e_priv *priv) in mlx5e_open_channels() argument
2501 mlx5e_build_channel_param(priv, cparam); in mlx5e_open_channels()
2502 for (i = 0; i < priv->params.num_channels; i++) { in mlx5e_open_channels()
2503 err = mlx5e_open_channel(priv, cparam, &priv->channel[i]); in mlx5e_open_channels()
2508 if (priv->params_ethtool.irq_cpu_base > -1) { in mlx5e_open_channels()
2515 err = mlx5_vector2eqn(priv->mdev, i, in mlx5e_open_channels()
2522 irq = priv->mdev->priv.msix_arr[nirq].vector; in mlx5e_open_channels()
2523 cpu = (unsigned)(priv->params_ethtool.irq_cpu_base + in mlx5e_open_channels()
2524 i * priv->params_ethtool.irq_cpu_stride) % (unsigned)mp_ncpus; in mlx5e_open_channels()
2536 mlx5e_close_channel(&priv->channel[i]); in mlx5e_open_channels()
2537 mlx5e_close_channel_wait(&priv->channel[i]); in mlx5e_open_channels()
2544 mlx5e_close_channels(struct mlx5e_priv *priv) in mlx5e_close_channels() argument
2548 for (i = 0; i < priv->params.num_channels; i++) in mlx5e_close_channels()
2549 mlx5e_close_channel(&priv->channel[i]); in mlx5e_close_channels()
2550 for (i = 0; i < priv->params.num_channels; i++) in mlx5e_close_channels()
2551 mlx5e_close_channel_wait(&priv->channel[i]); in mlx5e_close_channels()
2555 mlx5e_refresh_sq_params(struct mlx5e_priv *priv, struct mlx5e_sq *sq) in mlx5e_refresh_sq_params() argument
2558 if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) { in mlx5e_refresh_sq_params()
2561 switch (priv->params.tx_cq_moderation_mode) { in mlx5e_refresh_sq_params()
2571 return (mlx5_core_modify_cq_moderation_mode(priv->mdev, &sq->cq.mcq, in mlx5e_refresh_sq_params()
2572 priv->params.tx_cq_moderation_usec, in mlx5e_refresh_sq_params()
2573 priv->params.tx_cq_moderation_pkts, in mlx5e_refresh_sq_params()
2577 return (mlx5_core_modify_cq_moderation(priv->mdev, &sq->cq.mcq, in mlx5e_refresh_sq_params()
2578 priv->params.tx_cq_moderation_usec, in mlx5e_refresh_sq_params()
2579 priv->params.tx_cq_moderation_pkts)); in mlx5e_refresh_sq_params()
2583 mlx5e_refresh_rq_params(struct mlx5e_priv *priv, struct mlx5e_rq *rq) in mlx5e_refresh_rq_params() argument
2586 if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) { in mlx5e_refresh_rq_params()
2591 switch (priv->params.rx_cq_moderation_mode) { in mlx5e_refresh_rq_params()
2611 if (priv->params.rx_cq_moderation_mode >= 2) { in mlx5e_refresh_rq_params()
2614 mlx5e_get_default_profile(priv, dim_mode, &curr); in mlx5e_refresh_rq_params()
2616 retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq, in mlx5e_refresh_rq_params()
2626 retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq, in mlx5e_refresh_rq_params()
2627 priv->params.rx_cq_moderation_usec, in mlx5e_refresh_rq_params()
2628 priv->params.rx_cq_moderation_pkts, in mlx5e_refresh_rq_params()
2634 return (mlx5_core_modify_cq_moderation(priv->mdev, &rq->cq.mcq, in mlx5e_refresh_rq_params()
2635 priv->params.rx_cq_moderation_usec, in mlx5e_refresh_rq_params()
2636 priv->params.rx_cq_moderation_pkts)); in mlx5e_refresh_rq_params()
2640 mlx5e_refresh_channel_params_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c) in mlx5e_refresh_channel_params_sub() argument
2645 err = mlx5e_refresh_rq_params(priv, &c->rq); in mlx5e_refresh_channel_params_sub()
2649 for (i = 0; i != priv->num_tc; i++) { in mlx5e_refresh_channel_params_sub()
2650 err = mlx5e_refresh_sq_params(priv, &c->sq[i]); in mlx5e_refresh_channel_params_sub()
2659 mlx5e_refresh_channel_params(struct mlx5e_priv *priv) in mlx5e_refresh_channel_params() argument
2664 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) in mlx5e_refresh_channel_params()
2667 for (i = 0; i < priv->params.num_channels; i++) { in mlx5e_refresh_channel_params()
2670 err = mlx5e_refresh_channel_params_sub(priv, &priv->channel[i]); in mlx5e_refresh_channel_params()
2678 mlx5e_open_tis(struct mlx5e_priv *priv, int tc) in mlx5e_open_tis() argument
2680 struct mlx5_core_dev *mdev = priv->mdev; in mlx5e_open_tis()
2687 MLX5_SET(tisc, tisc, transport_domain, priv->tdn); in mlx5e_open_tis()
2689 return (mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc])); in mlx5e_open_tis()
2693 mlx5e_close_tis(struct mlx5e_priv *priv, int tc) in mlx5e_close_tis() argument
2695 mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc], 0); in mlx5e_close_tis()
2699 mlx5e_open_tises(struct mlx5e_priv *priv) in mlx5e_open_tises() argument
2701 int num_tc = priv->num_tc; in mlx5e_open_tises()
2706 err = mlx5e_open_tis(priv, tc); in mlx5e_open_tises()
2715 mlx5e_close_tis(priv, tc); in mlx5e_open_tises()
2721 mlx5e_close_tises(struct mlx5e_priv *priv) in mlx5e_close_tises() argument
2723 int num_tc = priv->num_tc; in mlx5e_close_tises()
2727 mlx5e_close_tis(priv, tc); in mlx5e_close_tises()
2731 mlx5e_open_default_rqt(struct mlx5e_priv *priv, u32 *prqtn, int sz) in mlx5e_open_default_rqt() argument
2749 MLX5_SET(rqtc, rqtc, rq_num[i], priv->drop_rq.rqn); in mlx5e_open_default_rqt()
2751 err = mlx5_core_create_rqt(priv->mdev, in, inlen, prqtn); in mlx5e_open_default_rqt()
2758 mlx5e_open_rqts(struct mlx5e_priv *priv) in mlx5e_open_rqts() argument
2763 err = mlx5e_open_default_rqt(priv, &priv->rqtn, in mlx5e_open_rqts()
2764 1 << priv->params.rx_hash_log_tbl_sz); in mlx5e_open_rqts()
2768 for (i = 0; i != priv->mdev->priv.eq_table.num_comp_vectors; i++) { in mlx5e_open_rqts()
2769 err = mlx5e_open_default_rqt(priv, &priv->channel[i].rqtn, 1); in mlx5e_open_rqts()
2777 mlx5_core_destroy_rqt(priv->mdev, priv->channel[i].rqtn, 0); in mlx5e_open_rqts()
2779 mlx5_core_destroy_rqt(priv->mdev, priv->rqtn, 0); in mlx5e_open_rqts()
2786 mlx5e_close_rqts(struct mlx5e_priv *priv) in mlx5e_close_rqts() argument
2790 for (i = 0; i != priv->mdev->priv.eq_table.num_comp_vectors; i++) in mlx5e_close_rqts()
2791 mlx5_core_destroy_rqt(priv->mdev, priv->channel[i].rqtn, 0); in mlx5e_close_rqts()
2793 mlx5_core_destroy_rqt(priv->mdev, priv->rqtn, 0); in mlx5e_close_rqts()
2797 mlx5e_activate_rqt(struct mlx5e_priv *priv) in mlx5e_activate_rqt() argument
2806 sz = 1 << priv->params.rx_hash_log_tbl_sz; in mlx5e_activate_rqt()
2826 ix %= priv->params.num_channels; in mlx5e_activate_rqt()
2829 ix -= ix % (int)priv->params.channels_rsss; in mlx5e_activate_rqt()
2831 MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix].rq.rqn); in mlx5e_activate_rqt()
2834 err = mlx5_core_modify_rqt(priv->mdev, priv->rqtn, in, inlen); in mlx5e_activate_rqt()
2842 for (i = 0; i != priv->mdev->priv.eq_table.num_comp_vectors; i++) { in mlx5e_activate_rqt()
2850 ix %= priv->params.num_channels; in mlx5e_activate_rqt()
2853 ix -= ix % (int)priv->params.channels_rsss; in mlx5e_activate_rqt()
2855 MLX5_SET(rqtc, rqtc, rq_num[0], priv->channel[ix].rq.rqn); in mlx5e_activate_rqt()
2857 err = mlx5_core_modify_rqt(priv->mdev, priv->channel[i].rqtn, in, inlen); in mlx5e_activate_rqt()
2868 mlx5e_deactivate_rqt(struct mlx5e_priv *priv) in mlx5e_deactivate_rqt() argument
2877 sz = 1 << priv->params.rx_hash_log_tbl_sz; in mlx5e_deactivate_rqt()
2890 MLX5_SET(rqtc, rqtc, rq_num[i], priv->drop_rq.rqn); in mlx5e_deactivate_rqt()
2892 err = mlx5_core_modify_rqt(priv->mdev, priv->rqtn, in, inlen); in mlx5e_deactivate_rqt()
2900 for (i = 0; i != priv->mdev->priv.eq_table.num_comp_vectors; i++) { in mlx5e_deactivate_rqt()
2901 MLX5_SET(rqtc, rqtc, rq_num[0], priv->drop_rq.rqn); in mlx5e_deactivate_rqt()
2903 err = mlx5_core_modify_rqt(priv->mdev, priv->channel[i].rqtn, in, inlen); in mlx5e_deactivate_rqt()
2939 mlx5e_hw_lro_set_tir_ctx_lro_max_msg_sz(struct mlx5e_priv *priv, u32 *tirc) in mlx5e_hw_lro_set_tir_ctx_lro_max_msg_sz() argument
2941 MLX5_SET(tirc, tirc, lro_max_msg_sz, (priv->params.lro_wqe_sz >> 8) - in mlx5e_hw_lro_set_tir_ctx_lro_max_msg_sz()
2942 (MLX5_CAP_ETH(priv->mdev, lro_max_msg_sz_mode) == 0 ? 1 : 0)); in mlx5e_hw_lro_set_tir_ctx_lro_max_msg_sz()
2946 mlx5e_hw_lro_set_tir_ctx(struct mlx5e_priv *priv, u32 *tirc) in mlx5e_hw_lro_set_tir_ctx() argument
2953 MLX5_CAP_ETH(priv->mdev, lro_timer_supported_periods[2])); in mlx5e_hw_lro_set_tir_ctx()
2954 mlx5e_hw_lro_set_tir_ctx_lro_max_msg_sz(priv, tirc); in mlx5e_hw_lro_set_tir_ctx()
2958 mlx5e_hw_lro_update_tir(struct mlx5e_priv *priv, int tt, bool inner_vxlan) in mlx5e_hw_lro_update_tir() argument
2960 struct mlx5_core_dev *mdev = priv->mdev; in mlx5e_hw_lro_update_tir()
2974 priv->tirn_inner_vxlan[tt] : priv->tirn[tt]); in mlx5e_hw_lro_update_tir()
2979 if (priv->params.hw_lro_en) in mlx5e_hw_lro_update_tir()
2980 mlx5e_hw_lro_set_tir_ctx(priv, tirc); in mlx5e_hw_lro_update_tir()
2989 mlx5e_hw_lro_update_tirs(struct mlx5e_priv *priv) in mlx5e_hw_lro_update_tirs() argument
2995 err1 = mlx5e_hw_lro_update_tir(priv, i / 2, (i % 2) ? true : in mlx5e_hw_lro_update_tirs()
3004 mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt, bool inner_vxlan) in mlx5e_build_tir_ctx() argument
3011 MLX5_SET(tirc, tirc, transport_domain, priv->tdn); in mlx5e_build_tir_ctx()
3025 if (priv->params.hw_lro_en) in mlx5e_build_tir_ctx()
3026 mlx5e_hw_lro_set_tir_ctx(priv, tirc); in mlx5e_build_tir_ctx()
3040 priv->rqtn); in mlx5e_build_tir_ctx()
3167 mlx5e_open_tir(struct mlx5e_priv *priv, int tt, bool inner_vxlan) in mlx5e_open_tir() argument
3169 struct mlx5_core_dev *mdev = priv->mdev; in mlx5e_open_tir()
3181 mlx5e_build_tir_ctx(priv, tirc, tt, inner_vxlan); in mlx5e_open_tir()
3184 &priv->tirn_inner_vxlan[tt] : &priv->tirn[tt]); in mlx5e_open_tir()
3192 mlx5e_close_tir(struct mlx5e_priv *priv, int tt, bool inner_vxlan) in mlx5e_close_tir() argument
3194 mlx5_core_destroy_tir(priv->mdev, inner_vxlan ? in mlx5e_close_tir()
3195 priv->tirn_inner_vxlan[tt] : priv->tirn[tt], 0); in mlx5e_close_tir()
3199 mlx5e_open_tirs(struct mlx5e_priv *priv) in mlx5e_open_tirs() argument
3205 err = mlx5e_open_tir(priv, i / 2, (i % 2) ? true : false); in mlx5e_open_tirs()
3214 mlx5e_close_tir(priv, i / 2, (i % 2) ? true : false); in mlx5e_open_tirs()
3220 mlx5e_close_tirs(struct mlx5e_priv *priv) in mlx5e_close_tirs() argument
3225 mlx5e_close_tir(priv, i / 2, (i % 2) ? true : false); in mlx5e_close_tirs()
3235 struct mlx5e_priv *priv = if_getsoftc(ifp); in mlx5e_set_dev_port_mtu() local
3236 struct mlx5_core_dev *mdev = priv->mdev; in mlx5e_set_dev_port_mtu()
3279 priv->params_ethtool.hw_mtu = hw_mtu; in mlx5e_set_dev_port_mtu()
3284 priv->params_ethtool.hw_mtu_msb = hw_mtu; in mlx5e_set_dev_port_mtu()
3292 struct mlx5e_priv *priv = if_getsoftc(ifp); in mlx5e_open_locked() local
3297 if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0) in mlx5e_open_locked()
3301 if (rss_getnumbuckets() > priv->params.num_channels) { in mlx5e_open_locked()
3304 rss_getnumbuckets(), priv->params.num_channels); in mlx5e_open_locked()
3307 err = mlx5e_open_tises(priv); in mlx5e_open_locked()
3312 err = mlx5_vport_alloc_q_counter(priv->mdev, in mlx5e_open_locked()
3315 mlx5_en_err(priv->ifp, in mlx5e_open_locked()
3320 priv->counter_set_id = set_id; in mlx5e_open_locked()
3322 err = mlx5e_open_channels(priv); in mlx5e_open_locked()
3328 err = mlx5e_activate_rqt(priv); in mlx5e_open_locked()
3334 set_bit(MLX5E_STATE_OPENED, &priv->state); in mlx5e_open_locked()
3336 mlx5e_update_carrier(priv); in mlx5e_open_locked()
3341 mlx5e_close_channels(priv); in mlx5e_open_locked()
3344 mlx5_vport_dealloc_q_counter(priv->mdev, in mlx5e_open_locked()
3345 MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id); in mlx5e_open_locked()
3348 mlx5e_close_tises(priv); in mlx5e_open_locked()
3356 struct mlx5e_priv *priv = arg; in mlx5e_open() local
3358 PRIV_LOCK(priv); in mlx5e_open()
3359 if (mlx5_set_port_status(priv->mdev, MLX5_PORT_UP)) in mlx5e_open()
3360 mlx5_en_err(priv->ifp, in mlx5e_open()
3363 mlx5e_open_locked(priv->ifp); in mlx5e_open()
3364 if_setdrvflagbits(priv->ifp, IFF_DRV_RUNNING, 0); in mlx5e_open()
3365 PRIV_UNLOCK(priv); in mlx5e_open()
3371 struct mlx5e_priv *priv = if_getsoftc(ifp); in mlx5e_close_locked() local
3374 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) in mlx5e_close_locked()
3377 clear_bit(MLX5E_STATE_OPENED, &priv->state); in mlx5e_close_locked()
3379 if_link_state_change(priv->ifp, LINK_STATE_DOWN); in mlx5e_close_locked()
3381 mlx5e_deactivate_rqt(priv); in mlx5e_close_locked()
3382 mlx5e_close_channels(priv); in mlx5e_close_locked()
3383 mlx5_vport_dealloc_q_counter(priv->mdev, in mlx5e_close_locked()
3384 MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id); in mlx5e_close_locked()
3385 mlx5e_close_tises(priv); in mlx5e_close_locked()
3393 struct mlx5e_priv *priv = if_getsoftc(ifp); in mlx5e_get_counter() local
3399 retval = priv->stats.vport.rx_packets; in mlx5e_get_counter()
3402 retval = priv->stats.pport.in_range_len_errors + in mlx5e_get_counter()
3403 priv->stats.pport.out_of_range_len + in mlx5e_get_counter()
3404 priv->stats.pport.too_long_errors + in mlx5e_get_counter()
3405 priv->stats.pport.check_seq_err + in mlx5e_get_counter()
3406 priv->stats.pport.alignment_err; in mlx5e_get_counter()
3409 retval = priv->stats.vport.rx_out_of_buffer; in mlx5e_get_counter()
3412 retval = priv->stats.vport.tx_packets; in mlx5e_get_counter()
3415 retval = priv->stats.port_stats_debug.out_discards; in mlx5e_get_counter()
3418 retval = priv->stats.vport.rx_bytes; in mlx5e_get_counter()
3421 retval = priv->stats.vport.tx_bytes; in mlx5e_get_counter()
3424 retval = priv->stats.vport.rx_multicast_packets; in mlx5e_get_counter()
3427 retval = priv->stats.vport.tx_multicast_packets; in mlx5e_get_counter()
3430 retval = priv->stats.vport.tx_queue_dropped; in mlx5e_get_counter()
3433 retval = priv->stats.pport.collisions; in mlx5e_get_counter()
3446 struct mlx5e_priv *priv = if_getsoftc(ifp); in mlx5e_set_rx_mode() local
3448 queue_work(priv->wq, &priv->set_rx_mode_work); in mlx5e_set_rx_mode()
3499 struct mlx5e_priv *priv; in mlx5e_ioctl() local
3514 priv = if_getsoftc(ifp); in mlx5e_ioctl()
3517 if (priv == NULL || priv->gone != 0) in mlx5e_ioctl()
3524 PRIV_LOCK(priv); in mlx5e_ioctl()
3525 mlx5_query_port_max_mtu(priv->mdev, &max_mtu); in mlx5e_ioctl()
3531 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); in mlx5e_ioctl()
3546 PRIV_UNLOCK(priv); in mlx5e_ioctl()
3554 PRIV_LOCK(priv); in mlx5e_ioctl()
3557 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) in mlx5e_ioctl()
3560 mlx5_set_port_status(priv->mdev, MLX5_PORT_UP); in mlx5e_ioctl()
3564 mlx5_set_port_status(priv->mdev, in mlx5e_ioctl()
3566 if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0) in mlx5e_ioctl()
3568 mlx5e_update_carrier(priv); in mlx5e_ioctl()
3572 PRIV_UNLOCK(priv); in mlx5e_ioctl()
3582 error = ifmedia_ioctl(ifp, ifr, &priv->media, command); in mlx5e_ioctl()
3591 PRIV_LOCK(priv); in mlx5e_ioctl()
3597 PRIV_LOCK(priv); in mlx5e_ioctl()
3599 if (!mlx5e_is_tlstx_capable(priv->mdev)) { in mlx5e_ioctl()
3603 if (!mlx5e_is_tlsrx_capable(priv->mdev)) { in mlx5e_ioctl()
3608 if (!mlx5e_is_ipsec_capable(priv->mdev)) { in mlx5e_ioctl()
3612 if (!mlx5e_is_ratelimit_capable(priv->mdev)) { in mlx5e_ioctl()
3683 mlx5e_disable_vlan_filter(priv); in mlx5e_ioctl()
3685 mlx5e_enable_vlan_filter(priv); in mlx5e_ioctl()
3697 mlx5e_del_all_vxlan_rules(priv); in mlx5e_ioctl()
3703 int err = mlx5e_add_all_vxlan_rules(priv); in mlx5e_ioctl()
3725 if (priv->clbr_done == 0) in mlx5e_ioctl()
3726 mlx5e_reset_calibration_callout(priv); in mlx5e_ioctl()
3728 callout_drain(&priv->tstmp_clbr); in mlx5e_ioctl()
3729 priv->clbr_done = 0; in mlx5e_ioctl()
3743 ipsec_accel_on_ifdown(priv->ifp); in mlx5e_ioctl()
3750 PRIV_UNLOCK(priv); in mlx5e_ioctl()
3769 PRIV_LOCK(priv); in mlx5e_ioctl()
3771 error = mlx5_query_module_num(priv->mdev, &module_num); in mlx5e_ioctl()
3779 module_status = mlx5_query_module_status(priv->mdev, module_num); in mlx5e_ioctl()
3804 error = mlx5_query_eeprom(priv->mdev, in mlx5e_ioctl()
3816 error = mlx5_query_eeprom(priv->mdev, in mlx5e_ioctl()
3831 PRIV_UNLOCK(priv); in mlx5e_ioctl()
3836 PRIV_LOCK(priv); in mlx5e_ioctl()
3837 error = -mlx5_query_pddr_troubleshooting_info(priv->mdev, NULL, in mlx5e_ioctl()
3839 PRIV_UNLOCK(priv); in mlx5e_ioctl()
3910 struct mlx5e_priv *priv, in mlx5e_build_ifp_priv() argument
3919 priv->params.log_sq_size = in mlx5e_build_ifp_priv()
3921 priv->params.log_rq_size = in mlx5e_build_ifp_priv()
3923 priv->params.rx_cq_moderation_usec = in mlx5e_build_ifp_priv()
3927 priv->params.rx_cq_moderation_mode = in mlx5e_build_ifp_priv()
3929 priv->params.rx_cq_moderation_pkts = in mlx5e_build_ifp_priv()
3931 priv->params.tx_cq_moderation_usec = in mlx5e_build_ifp_priv()
3933 priv->params.tx_cq_moderation_pkts = in mlx5e_build_ifp_priv()
3935 priv->params.rx_hash_log_tbl_sz = in mlx5e_build_ifp_priv()
3940 priv->params.num_tc = 1; in mlx5e_build_ifp_priv()
3941 priv->params.default_vlan_prio = 0; in mlx5e_build_ifp_priv()
3942 priv->counter_set_id = -1; in mlx5e_build_ifp_priv()
3943 priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev); in mlx5e_build_ifp_priv()
3945 err = mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode); in mlx5e_build_ifp_priv()
3953 priv->params.hw_lro_en = false; in mlx5e_build_ifp_priv()
3954 priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; in mlx5e_build_ifp_priv()
3962 priv->params.cqe_zipping_en = false; in mlx5e_build_ifp_priv()
3964 priv->mdev = mdev; in mlx5e_build_ifp_priv()
3965 priv->params.num_channels = num_comp_vectors; in mlx5e_build_ifp_priv()
3966 priv->params.channels_rsss = 1; in mlx5e_build_ifp_priv()
3967 priv->order_base_2_num_channels = order_base_2(num_comp_vectors); in mlx5e_build_ifp_priv()
3968 priv->queue_mapping_channel_mask = in mlx5e_build_ifp_priv()
3970 priv->num_tc = priv->params.num_tc; in mlx5e_build_ifp_priv()
3971 priv->default_vlan_prio = priv->params.default_vlan_prio; in mlx5e_build_ifp_priv()
3973 INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work); in mlx5e_build_ifp_priv()
3974 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work); in mlx5e_build_ifp_priv()
3975 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); in mlx5e_build_ifp_priv()
3993 mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn, in mlx5e_create_mkey() argument
3996 if_t ifp = priv->ifp; in mlx5e_create_mkey()
3997 struct mlx5_core_dev *mdev = priv->mdev; in mlx5e_create_mkey()
4037 mlx5e_priv_static_init(struct mlx5e_priv *priv, struct mlx5_core_dev *mdev, in mlx5e_priv_static_init() argument
4043 mtx_init(&priv->async_events_mtx, "mlx5async", MTX_NETWORK_LOCK, MTX_DEF); in mlx5e_priv_static_init()
4044 sx_init(&priv->state_lock, "mlx5state"); in mlx5e_priv_static_init()
4045 callout_init_mtx(&priv->watchdog, &priv->async_events_mtx, 0); in mlx5e_priv_static_init()
4046 MLX5_INIT_DOORBELL_LOCK(&priv->doorbell_lock); in mlx5e_priv_static_init()
4048 mlx5e_chan_static_init(priv, &priv->channel[x], x); in mlx5e_priv_static_init()
4051 err = mlx5_alloc_bfreg(mdev, &priv->channel[x].bfreg, false, false); in mlx5e_priv_static_init()
4059 mlx5_free_bfreg(mdev, &priv->channel[x].bfreg); in mlx5e_priv_static_init()
4062 mlx5e_chan_static_destroy(&priv->channel[x]); in mlx5e_priv_static_init()
4063 callout_drain(&priv->watchdog); in mlx5e_priv_static_init()
4064 mtx_destroy(&priv->async_events_mtx); in mlx5e_priv_static_init()
4065 sx_destroy(&priv->state_lock); in mlx5e_priv_static_init()
4070 mlx5e_priv_static_destroy(struct mlx5e_priv *priv, struct mlx5_core_dev *mdev, in mlx5e_priv_static_destroy() argument
4076 mlx5_free_bfreg(mdev, &priv->channel[x].bfreg); in mlx5e_priv_static_destroy()
4078 mlx5e_chan_static_destroy(&priv->channel[x]); in mlx5e_priv_static_destroy()
4079 callout_drain(&priv->watchdog); in mlx5e_priv_static_destroy()
4080 mtx_destroy(&priv->async_events_mtx); in mlx5e_priv_static_destroy()
4081 sx_destroy(&priv->state_lock); in mlx5e_priv_static_destroy()
4095 struct mlx5e_priv *priv = arg1; in sysctl_firmware() local
4098 snprintf(fw, sizeof(fw), "%d.%d.%d", fw_rev_maj(priv->mdev), fw_rev_min(priv->mdev), in sysctl_firmware()
4099 fw_rev_sub(priv->mdev)); in sysctl_firmware()
4109 for (i = 0; i < ch->priv->num_tc; i++) in mlx5e_disable_tx_dma()
4160 for (i = 0; i < ch->priv->num_tc; i++) in mlx5e_enable_tx_dma()
4223 mlx5e_modify_tx_dma(struct mlx5e_priv *priv, uint8_t value) in mlx5e_modify_tx_dma() argument
4227 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) in mlx5e_modify_tx_dma()
4230 for (i = 0; i < priv->params.num_channels; i++) { in mlx5e_modify_tx_dma()
4232 mlx5e_disable_tx_dma(&priv->channel[i]); in mlx5e_modify_tx_dma()
4234 mlx5e_enable_tx_dma(&priv->channel[i]); in mlx5e_modify_tx_dma()
4239 mlx5e_modify_rx_dma(struct mlx5e_priv *priv, uint8_t value) in mlx5e_modify_rx_dma() argument
4243 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) in mlx5e_modify_rx_dma()
4246 for (i = 0; i < priv->params.num_channels; i++) { in mlx5e_modify_rx_dma()
4248 mlx5e_disable_rx_dma(&priv->channel[i]); in mlx5e_modify_rx_dma()
4250 mlx5e_enable_rx_dma(&priv->channel[i]); in mlx5e_modify_rx_dma()
4255 mlx5e_add_hw_stats(struct mlx5e_priv *priv) in mlx5e_add_hw_stats() argument
4257 SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw), in mlx5e_add_hw_stats()
4259 priv, 0, sysctl_firmware, "A", "HCA firmware version"); in mlx5e_add_hw_stats()
4261 SYSCTL_ADD_STRING(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw), in mlx5e_add_hw_stats()
4262 OID_AUTO, "board_id", CTLFLAG_RD, priv->mdev->board_id, 0, in mlx5e_add_hw_stats()
4269 struct mlx5e_priv *priv = arg1; in mlx5e_sysctl_tx_priority_flow_control() local
4275 PRIV_LOCK(priv); in mlx5e_sysctl_tx_priority_flow_control()
4277 tx_pfc = priv->params.tx_priority_flow_control; in mlx5e_sysctl_tx_priority_flow_control()
4289 priv->params.tx_priority_flow_control = 0; in mlx5e_sysctl_tx_priority_flow_control()
4297 priv->params.tx_priority_flow_control |= (temp[i] << i); in mlx5e_sysctl_tx_priority_flow_control()
4301 if (tx_pfc != priv->params.tx_priority_flow_control) in mlx5e_sysctl_tx_priority_flow_control()
4302 err = -mlx5e_set_port_pfc(priv); in mlx5e_sysctl_tx_priority_flow_control()
4305 priv->params.tx_priority_flow_control= tx_pfc; in mlx5e_sysctl_tx_priority_flow_control()
4306 PRIV_UNLOCK(priv); in mlx5e_sysctl_tx_priority_flow_control()
4314 struct mlx5e_priv *priv = arg1; in mlx5e_sysctl_rx_priority_flow_control() local
4320 PRIV_LOCK(priv); in mlx5e_sysctl_rx_priority_flow_control()
4322 rx_pfc = priv->params.rx_priority_flow_control; in mlx5e_sysctl_rx_priority_flow_control()
4334 priv->params.rx_priority_flow_control = 0; in mlx5e_sysctl_rx_priority_flow_control()
4342 priv->params.rx_priority_flow_control |= (temp[i] << i); in mlx5e_sysctl_rx_priority_flow_control()
4346 if (rx_pfc != priv->params.rx_priority_flow_control) { in mlx5e_sysctl_rx_priority_flow_control()
4347 err = -mlx5e_set_port_pfc(priv); in mlx5e_sysctl_rx_priority_flow_control()
4348 if (err == 0 && priv->sw_is_port_buf_owner) in mlx5e_sysctl_rx_priority_flow_control()
4349 err = mlx5e_update_buf_lossy(priv); in mlx5e_sysctl_rx_priority_flow_control()
4353 priv->params.rx_priority_flow_control= rx_pfc; in mlx5e_sysctl_rx_priority_flow_control()
4354 PRIV_UNLOCK(priv); in mlx5e_sysctl_rx_priority_flow_control()
4360 mlx5e_setup_pauseframes(struct mlx5e_priv *priv) in mlx5e_setup_pauseframes() argument
4365 priv->params.tx_pauseframe_control = 1; in mlx5e_setup_pauseframes()
4366 priv->params.rx_pauseframe_control = 1; in mlx5e_setup_pauseframes()
4369 priv->params.tx_priority_flow_control = 0; in mlx5e_setup_pauseframes()
4370 priv->params.rx_priority_flow_control = 0; in mlx5e_setup_pauseframes()
4373 SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), in mlx5e_setup_pauseframes()
4375 &priv->params.tx_pauseframe_control, 0, in mlx5e_setup_pauseframes()
4378 SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), in mlx5e_setup_pauseframes()
4380 &priv->params.rx_pauseframe_control, 0, in mlx5e_setup_pauseframes()
4384 SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), in mlx5e_setup_pauseframes()
4386 CTLFLAG_MPSAFE, priv, 0, &mlx5e_sysctl_tx_priority_flow_control, "CU", in mlx5e_setup_pauseframes()
4389 SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), in mlx5e_setup_pauseframes()
4391 CTLFLAG_MPSAFE, priv, 0, &mlx5e_sysctl_rx_priority_flow_control, "CU", in mlx5e_setup_pauseframes()
4394 PRIV_LOCK(priv); in mlx5e_setup_pauseframes()
4397 priv->params.tx_pauseframe_control = in mlx5e_setup_pauseframes()
4398 priv->params.tx_pauseframe_control ? 1 : 0; in mlx5e_setup_pauseframes()
4399 priv->params.rx_pauseframe_control = in mlx5e_setup_pauseframes()
4400 priv->params.rx_pauseframe_control ? 1 : 0; in mlx5e_setup_pauseframes()
4403 error = mlx5e_set_port_pause_and_pfc(priv); in mlx5e_setup_pauseframes()
4405 mlx5_en_err(priv->ifp, in mlx5e_setup_pauseframes()
4407 priv->params.rx_priority_flow_control = 0; in mlx5e_setup_pauseframes()
4408 priv->params.tx_priority_flow_control = 0; in mlx5e_setup_pauseframes()
4411 (void) mlx5e_set_port_pause_and_pfc(priv); in mlx5e_setup_pauseframes()
4413 PRIV_UNLOCK(priv); in mlx5e_setup_pauseframes()
4421 struct mlx5e_priv *priv; in mlx5e_ul_snd_tag_alloc() local
4424 priv = if_getsoftc(ifp); in mlx5e_ul_snd_tag_alloc()
4426 if (unlikely(priv->gone || params->hdr.flowtype == M_HASHTYPE_NONE)) { in mlx5e_ul_snd_tag_alloc()
4430 u32 ch = priv->params.num_channels; in mlx5e_ul_snd_tag_alloc()
4447 pch = priv->channel + ch; in mlx5e_ul_snd_tag_alloc()
4551 mlx5e_ifm_add(struct mlx5e_priv *priv, int type) in mlx5e_ifm_add() argument
4553 ifmedia_add(&priv->media, type | IFM_ETHER, 0, NULL); in mlx5e_ifm_add()
4554 ifmedia_add(&priv->media, type | IFM_ETHER | in mlx5e_ifm_add()
4556 ifmedia_add(&priv->media, type | IFM_ETHER | IFM_ETH_RXPAUSE, 0, NULL); in mlx5e_ifm_add()
4557 ifmedia_add(&priv->media, type | IFM_ETHER | IFM_ETH_TXPAUSE, 0, NULL); in mlx5e_ifm_add()
4558 ifmedia_add(&priv->media, type | IFM_ETHER | IFM_FDX, 0, NULL); in mlx5e_ifm_add()
4559 ifmedia_add(&priv->media, type | IFM_ETHER | IFM_FDX | in mlx5e_ifm_add()
4561 ifmedia_add(&priv->media, type | IFM_ETHER | IFM_FDX | in mlx5e_ifm_add()
4563 ifmedia_add(&priv->media, type | IFM_ETHER | IFM_FDX | in mlx5e_ifm_add()
4571 struct mlx5e_priv *priv; in mlx5e_create_ifp() local
4574 int ncv = mdev->priv.eq_table.num_comp_vectors; in mlx5e_create_ifp()
4592 priv = malloc_domainset(sizeof(*priv) + in mlx5e_create_ifp()
4593 (sizeof(priv->channel[0]) * mdev->priv.eq_table.num_comp_vectors), in mlx5e_create_ifp()
4596 ifp = priv->ifp = if_alloc_dev(IFT_ETHER, mdev->pdev->dev.bsddev); in mlx5e_create_ifp()
4598 if (mlx5e_priv_static_init(priv, mdev, mdev->priv.eq_table.num_comp_vectors)) { in mlx5e_create_ifp()
4603 if_setsoftc(ifp, priv); in mlx5e_create_ifp()
4666 sysctl_ctx_init(&priv->sysctl_ctx); in mlx5e_create_ifp()
4667 priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_dev), in mlx5e_create_ifp()
4670 if (priv->sysctl_ifnet == NULL) { in mlx5e_create_ifp()
4675 priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), in mlx5e_create_ifp()
4678 if (priv->sysctl_ifnet == NULL) { in mlx5e_create_ifp()
4685 priv->sysctl_hw = SYSCTL_ADD_NODE(&priv->sysctl_ctx, child, in mlx5e_create_ifp()
4688 if (priv->sysctl_hw == NULL) { in mlx5e_create_ifp()
4693 err = mlx5e_build_ifp_priv(mdev, priv, ncv); in mlx5e_create_ifp()
4700 priv->wq = mdev->priv.health.wq_watchdog; in mlx5e_create_ifp()
4702 err = mlx5_core_alloc_pd(mdev, &priv->pdn, 0); in mlx5e_create_ifp()
4707 err = mlx5_alloc_transport_domain(mdev, &priv->tdn, 0); in mlx5e_create_ifp()
4713 err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr); in mlx5e_create_ifp()
4718 mlx5_query_nic_vport_mac_address(priv->mdev, 0, dev_addr); in mlx5e_create_ifp()
4721 if (MLX5_CAP_GEN(priv->mdev, vport_group_manager) == 0 && in mlx5e_create_ifp()
4727 err = mlx5e_rl_init(priv); in mlx5e_create_ifp()
4733 err = mlx5e_tls_init(priv); in mlx5e_create_ifp()
4740 err = mlx5e_ipsec_init(priv); in mlx5e_create_ifp()
4747 err = mlx5e_open_drop_rq(priv, &priv->drop_rq); in mlx5e_create_ifp()
4753 err = mlx5e_open_rqts(priv); in mlx5e_create_ifp()
4759 err = mlx5e_open_tirs(priv); in mlx5e_create_ifp()
4765 err = mlx5e_open_flow_tables(priv); in mlx5e_create_ifp()
4771 err = mlx5e_tls_rx_init(priv); in mlx5e_create_ifp()
4781 priv->media_status_last = IFM_AVALID; in mlx5e_create_ifp()
4782 priv->media_active_last = IFM_ETHER | IFM_AUTO | IFM_FDX; in mlx5e_create_ifp()
4785 mlx5e_setup_pauseframes(priv); in mlx5e_create_ifp()
4799 ifmedia_init(&priv->media, IFM_IMASK, in mlx5e_create_ifp()
4821 mlx5e_ifm_add(priv, media_entry.subtype); in mlx5e_create_ifp()
4837 mlx5e_ifm_add(priv, media_entry.subtype); in mlx5e_create_ifp()
4841 mlx5e_ifm_add(priv, IFM_10G_LR); in mlx5e_create_ifp()
4846 mlx5e_ifm_add(priv, IFM_AUTO); in mlx5e_create_ifp()
4849 ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX | in mlx5e_create_ifp()
4857 priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, in mlx5e_create_ifp()
4858 mlx5e_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST); in mlx5e_create_ifp()
4859 priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, in mlx5e_create_ifp()
4860 mlx5e_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST); in mlx5e_create_ifp()
4863 priv->vxlan_start = EVENTHANDLER_REGISTER(vxlan_start, in mlx5e_create_ifp()
4864 mlx5e_vxlan_start, priv, EVENTHANDLER_PRI_ANY); in mlx5e_create_ifp()
4865 priv->vxlan_stop = EVENTHANDLER_REGISTER(vxlan_stop, in mlx5e_create_ifp()
4866 mlx5e_vxlan_stop, priv, EVENTHANDLER_PRI_ANY); in mlx5e_create_ifp()
4871 mlx5e_enable_async_events(priv); in mlx5e_create_ifp()
4873 mlx5e_add_hw_stats(priv); in mlx5e_create_ifp()
4875 mlx5e_create_stats(&priv->stats.vport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), in mlx5e_create_ifp()
4877 priv->stats.vport.arg); in mlx5e_create_ifp()
4879 mlx5e_create_stats(&priv->stats.pport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), in mlx5e_create_ifp()
4881 priv->stats.pport.arg); in mlx5e_create_ifp()
4883 mlx5e_create_ethtool(priv); in mlx5e_create_ifp()
4885 mtx_lock(&priv->async_events_mtx); in mlx5e_create_ifp()
4886 mlx5e_update_stats(priv); in mlx5e_create_ifp()
4887 mtx_unlock(&priv->async_events_mtx); in mlx5e_create_ifp()
4889 SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), in mlx5e_create_ifp()
4891 &priv->clbr_done, 0, in mlx5e_create_ifp()
4893 callout_init(&priv->tstmp_clbr, 1); in mlx5e_create_ifp()
4895 priv->cclk = (uint64_t)MLX5_CAP_GEN(mdev, device_frequency_khz) * 1000ULL; in mlx5e_create_ifp()
4896 mlx5e_reset_calibration_callout(priv); in mlx5e_create_ifp()
4902 priv->pfil = pfil_head_register(&pa); in mlx5e_create_ifp()
4904 PRIV_LOCK(priv); in mlx5e_create_ifp()
4905 err = mlx5e_open_flow_rules(priv); in mlx5e_create_ifp()
4910 PRIV_UNLOCK(priv); in mlx5e_create_ifp()
4912 return (priv); in mlx5e_create_ifp()
4915 mlx5e_close_flow_tables(priv); in mlx5e_create_ifp()
4918 mlx5e_close_tirs(priv); in mlx5e_create_ifp()
4921 mlx5e_close_rqts(priv); in mlx5e_create_ifp()
4924 mlx5e_close_drop_rq(&priv->drop_rq); in mlx5e_create_ifp()
4927 mlx5e_ipsec_cleanup(priv); in mlx5e_create_ifp()
4930 mlx5e_tls_cleanup(priv); in mlx5e_create_ifp()
4933 mlx5e_rl_cleanup(priv); in mlx5e_create_ifp()
4936 mlx5_core_destroy_mkey(priv->mdev, &priv->mr); in mlx5e_create_ifp()
4939 mlx5_dealloc_transport_domain(mdev, priv->tdn, 0); in mlx5e_create_ifp()
4942 mlx5_core_dealloc_pd(mdev, priv->pdn, 0); in mlx5e_create_ifp()
4945 flush_workqueue(priv->wq); in mlx5e_create_ifp()
4948 sysctl_ctx_free(&priv->sysctl_ctx); in mlx5e_create_ifp()
4949 if (priv->sysctl_debug) in mlx5e_create_ifp()
4950 sysctl_ctx_free(&priv->stats.port_stats_debug.ctx); in mlx5e_create_ifp()
4951 mlx5e_priv_static_destroy(priv, mdev, mdev->priv.eq_table.num_comp_vectors); in mlx5e_create_ifp()
4955 free(priv, M_MLX5EN); in mlx5e_create_ifp()
4962 struct mlx5e_priv *priv = vpriv; in mlx5e_destroy_ifp() local
4963 if_t ifp = priv->ifp; in mlx5e_destroy_ifp()
4966 priv->gone = 1; in mlx5e_destroy_ifp()
4982 while (READ_ONCE(priv->rl.stats.tx_active_connections) != 0) { in mlx5e_destroy_ifp()
4983 mlx5_en_err(priv->ifp, in mlx5e_destroy_ifp()
4991 while (priv->tls.init != 0 && in mlx5e_destroy_ifp()
4992 uma_zone_get_cur(priv->tls.zone) != 0) { in mlx5e_destroy_ifp()
4993 mlx5_en_err(priv->ifp, in mlx5e_destroy_ifp()
4999 while (priv->tls_rx.init != 0 && in mlx5e_destroy_ifp()
5000 uma_zone_get_cur(priv->tls_rx.zone) != 0) { in mlx5e_destroy_ifp()
5001 mlx5_en_err(priv->ifp, in mlx5e_destroy_ifp()
5007 mlx5e_priv_wait_for_completion(priv, mdev->priv.eq_table.num_comp_vectors); in mlx5e_destroy_ifp()
5010 callout_drain(&priv->watchdog); in mlx5e_destroy_ifp()
5012 callout_drain(&priv->tstmp_clbr); in mlx5e_destroy_ifp()
5014 if (priv->vlan_attach != NULL) in mlx5e_destroy_ifp()
5015 EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach); in mlx5e_destroy_ifp()
5016 if (priv->vlan_detach != NULL) in mlx5e_destroy_ifp()
5017 EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach); in mlx5e_destroy_ifp()
5018 if (priv->vxlan_start != NULL) in mlx5e_destroy_ifp()
5019 EVENTHANDLER_DEREGISTER(vxlan_start, priv->vxlan_start); in mlx5e_destroy_ifp()
5020 if (priv->vxlan_stop != NULL) in mlx5e_destroy_ifp()
5021 EVENTHANDLER_DEREGISTER(vxlan_stop, priv->vxlan_stop); in mlx5e_destroy_ifp()
5024 PRIV_LOCK(priv); in mlx5e_destroy_ifp()
5026 mlx5e_close_flow_rules(priv); in mlx5e_destroy_ifp()
5027 PRIV_UNLOCK(priv); in mlx5e_destroy_ifp()
5030 if (priv->pfil != NULL) { in mlx5e_destroy_ifp()
5031 pfil_head_unregister(priv->pfil); in mlx5e_destroy_ifp()
5032 priv->pfil = NULL; in mlx5e_destroy_ifp()
5036 ifmedia_removeall(&priv->media); in mlx5e_destroy_ifp()
5039 mlx5e_tls_rx_cleanup(priv); in mlx5e_destroy_ifp()
5041 ipsec_accel_on_ifdown(priv->ifp); in mlx5e_destroy_ifp()
5043 mlx5e_close_flow_tables(priv); in mlx5e_destroy_ifp()
5044 mlx5e_close_tirs(priv); in mlx5e_destroy_ifp()
5045 mlx5e_close_rqts(priv); in mlx5e_destroy_ifp()
5046 mlx5e_close_drop_rq(&priv->drop_rq); in mlx5e_destroy_ifp()
5047 mlx5e_ipsec_cleanup(priv); in mlx5e_destroy_ifp()
5048 mlx5e_tls_cleanup(priv); in mlx5e_destroy_ifp()
5049 mlx5e_rl_cleanup(priv); in mlx5e_destroy_ifp()
5052 sysctl_ctx_free(&priv->stats.vport.ctx); in mlx5e_destroy_ifp()
5053 sysctl_ctx_free(&priv->stats.pport.ctx); in mlx5e_destroy_ifp()
5054 if (priv->sysctl_debug) in mlx5e_destroy_ifp()
5055 sysctl_ctx_free(&priv->stats.port_stats_debug.ctx); in mlx5e_destroy_ifp()
5056 sysctl_ctx_free(&priv->sysctl_ctx); in mlx5e_destroy_ifp()
5058 mlx5_core_destroy_mkey(priv->mdev, &priv->mr); in mlx5e_destroy_ifp()
5059 mlx5_dealloc_transport_domain(priv->mdev, priv->tdn, 0); in mlx5e_destroy_ifp()
5060 mlx5_core_dealloc_pd(priv->mdev, priv->pdn, 0); in mlx5e_destroy_ifp()
5061 mlx5e_disable_async_events(priv); in mlx5e_destroy_ifp()
5062 flush_workqueue(priv->wq); in mlx5e_destroy_ifp()
5063 mlx5e_priv_static_destroy(priv, mdev, mdev->priv.eq_table.num_comp_vectors); in mlx5e_destroy_ifp()
5065 free(priv, M_MLX5EN); in mlx5e_destroy_ifp()
5072 struct mlx5e_priv *priv = if_getsoftc(dev); in mlx5_en_debugnet_init() local
5074 PRIV_LOCK(priv); in mlx5_en_debugnet_init()
5075 *nrxr = priv->params.num_channels; in mlx5_en_debugnet_init()
5078 PRIV_UNLOCK(priv); in mlx5_en_debugnet_init()
5089 struct mlx5e_priv *priv = if_getsoftc(dev); in mlx5_en_debugnet_transmit() local
5094 IFF_DRV_RUNNING || (priv->media_status_last & IFM_ACTIVE) == 0) in mlx5_en_debugnet_transmit()
5097 sq = &priv->channel[0].sq[0]; in mlx5_en_debugnet_transmit()
5119 struct mlx5e_priv *priv = if_getsoftc(dev); in mlx5_en_debugnet_poll() local
5122 (priv->media_status_last & IFM_ACTIVE) == 0) in mlx5_en_debugnet_poll()
5125 mlx5_poll_interrupts(priv->mdev); in mlx5_en_debugnet_poll()
5134 struct mlx5e_priv *priv = vpriv; in mlx5e_get_ifp() local
5136 return (priv->ifp); in mlx5e_get_ifp()