Lines Matching full:tx
2756 /* TX */
2758 #define call_tx_stop_cbfn(tx) \ argument
2760 if ((tx)->stop_cbfn) { \
2763 cbfn = (tx)->stop_cbfn; \
2764 cbarg = (tx)->stop_cbarg; \
2765 (tx)->stop_cbfn = NULL; \
2766 (tx)->stop_cbarg = NULL; \
2767 cbfn(cbarg, (tx)); \
2771 static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx);
2772 static void bna_bfi_tx_enet_start(struct bna_tx *tx);
2773 static void bna_tx_enet_stop(struct bna_tx *tx);
2800 bna_tx_sm_stopped_entry(struct bna_tx *tx) in bna_tx_sm_stopped_entry() argument
2802 call_tx_stop_cbfn(tx); in bna_tx_sm_stopped_entry()
2806 bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event) in bna_tx_sm_stopped() argument
2810 bfa_fsm_set_state(tx, bna_tx_sm_start_wait); in bna_tx_sm_stopped()
2814 call_tx_stop_cbfn(tx); in bna_tx_sm_stopped()
2831 bna_tx_sm_start_wait_entry(struct bna_tx *tx) in bna_tx_sm_start_wait_entry() argument
2833 bna_bfi_tx_enet_start(tx); in bna_tx_sm_start_wait_entry()
2837 bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event) in bna_tx_sm_start_wait() argument
2841 tx->flags &= ~BNA_TX_F_BW_UPDATED; in bna_tx_sm_start_wait()
2842 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); in bna_tx_sm_start_wait()
2846 tx->flags &= ~BNA_TX_F_BW_UPDATED; in bna_tx_sm_start_wait()
2847 bfa_fsm_set_state(tx, bna_tx_sm_stopped); in bna_tx_sm_start_wait()
2851 if (tx->flags & BNA_TX_F_BW_UPDATED) { in bna_tx_sm_start_wait()
2852 tx->flags &= ~BNA_TX_F_BW_UPDATED; in bna_tx_sm_start_wait()
2853 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait); in bna_tx_sm_start_wait()
2855 bfa_fsm_set_state(tx, bna_tx_sm_started); in bna_tx_sm_start_wait()
2859 tx->flags |= BNA_TX_F_BW_UPDATED; in bna_tx_sm_start_wait()
2868 bna_tx_sm_started_entry(struct bna_tx *tx) in bna_tx_sm_started_entry() argument
2871 int is_regular = (tx->type == BNA_TX_T_REGULAR); in bna_tx_sm_started_entry()
2873 list_for_each_entry(txq, &tx->txq_q, qe) { in bna_tx_sm_started_entry()
2876 bna_ib_start(tx->bna, &txq->ib, is_regular); in bna_tx_sm_started_entry()
2878 tx->tx_resume_cbfn(tx->bna->bnad, tx); in bna_tx_sm_started_entry()
2882 bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event) in bna_tx_sm_started() argument
2886 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); in bna_tx_sm_started()
2887 tx->tx_stall_cbfn(tx->bna->bnad, tx); in bna_tx_sm_started()
2888 bna_tx_enet_stop(tx); in bna_tx_sm_started()
2892 bfa_fsm_set_state(tx, bna_tx_sm_failed); in bna_tx_sm_started()
2893 tx->tx_stall_cbfn(tx->bna->bnad, tx); in bna_tx_sm_started()
2894 tx->tx_cleanup_cbfn(tx->bna->bnad, tx); in bna_tx_sm_started()
2898 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait); in bna_tx_sm_started()
2907 bna_tx_sm_stop_wait_entry(struct bna_tx *tx) in bna_tx_sm_stop_wait_entry() argument
2912 bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event) in bna_tx_sm_stop_wait() argument
2917 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); in bna_tx_sm_stop_wait()
2918 tx->tx_cleanup_cbfn(tx->bna->bnad, tx); in bna_tx_sm_stop_wait()
2926 bna_tx_enet_stop(tx); in bna_tx_sm_stop_wait()
2939 bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx) in bna_tx_sm_cleanup_wait_entry() argument
2944 bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event) in bna_tx_sm_cleanup_wait() argument
2953 bfa_fsm_set_state(tx, bna_tx_sm_stopped); in bna_tx_sm_cleanup_wait()
2962 bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx) in bna_tx_sm_prio_stop_wait_entry() argument
2964 tx->tx_stall_cbfn(tx->bna->bnad, tx); in bna_tx_sm_prio_stop_wait_entry()
2965 bna_tx_enet_stop(tx); in bna_tx_sm_prio_stop_wait_entry()
2969 bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event) in bna_tx_sm_prio_stop_wait() argument
2973 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); in bna_tx_sm_prio_stop_wait()
2977 bfa_fsm_set_state(tx, bna_tx_sm_failed); in bna_tx_sm_prio_stop_wait()
2978 tx->tx_cleanup_cbfn(tx->bna->bnad, tx); in bna_tx_sm_prio_stop_wait()
2982 bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait); in bna_tx_sm_prio_stop_wait()
2995 bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx) in bna_tx_sm_prio_cleanup_wait_entry() argument
2997 tx->tx_cleanup_cbfn(tx->bna->bnad, tx); in bna_tx_sm_prio_cleanup_wait_entry()
3001 bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event) in bna_tx_sm_prio_cleanup_wait() argument
3005 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); in bna_tx_sm_prio_cleanup_wait()
3009 bfa_fsm_set_state(tx, bna_tx_sm_failed); in bna_tx_sm_prio_cleanup_wait()
3017 bfa_fsm_set_state(tx, bna_tx_sm_start_wait); in bna_tx_sm_prio_cleanup_wait()
3026 bna_tx_sm_failed_entry(struct bna_tx *tx) in bna_tx_sm_failed_entry() argument
3031 bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event) in bna_tx_sm_failed() argument
3035 bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait); in bna_tx_sm_failed()
3039 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); in bna_tx_sm_failed()
3047 bfa_fsm_set_state(tx, bna_tx_sm_stopped); in bna_tx_sm_failed()
3056 bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx) in bna_tx_sm_quiesce_wait_entry() argument
3061 bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event) in bna_tx_sm_quiesce_wait() argument
3065 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); in bna_tx_sm_quiesce_wait()
3069 bfa_fsm_set_state(tx, bna_tx_sm_failed); in bna_tx_sm_quiesce_wait()
3073 bfa_fsm_set_state(tx, bna_tx_sm_start_wait); in bna_tx_sm_quiesce_wait()
3086 bna_bfi_tx_enet_start(struct bna_tx *tx) in bna_bfi_tx_enet_start() argument
3088 struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req; in bna_bfi_tx_enet_start()
3093 BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid); in bna_bfi_tx_enet_start()
3097 cfg_req->num_queues = tx->num_txq; in bna_bfi_tx_enet_start()
3098 for (i = 0; i < tx->num_txq; i++) { in bna_bfi_tx_enet_start()
3100 : list_first_entry(&tx->txq_q, struct bna_txq, qe); in bna_bfi_tx_enet_start()
3125 cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id); in bna_bfi_tx_enet_start()
3129 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, in bna_bfi_tx_enet_start()
3131 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd); in bna_bfi_tx_enet_start()
3135 bna_bfi_tx_enet_stop(struct bna_tx *tx) in bna_bfi_tx_enet_stop() argument
3137 struct bfi_enet_req *req = &tx->bfi_enet_cmd.req; in bna_bfi_tx_enet_stop()
3140 BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid); in bna_bfi_tx_enet_stop()
3143 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req), in bna_bfi_tx_enet_stop()
3145 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd); in bna_bfi_tx_enet_stop()
3149 bna_tx_enet_stop(struct bna_tx *tx) in bna_tx_enet_stop() argument
3154 list_for_each_entry(txq, &tx->txq_q, qe) in bna_tx_enet_stop()
3155 bna_ib_stop(tx->bna, &txq->ib); in bna_tx_enet_stop()
3157 bna_bfi_tx_enet_stop(tx); in bna_tx_enet_stop()
3199 struct bna_tx *tx = NULL; in bna_tx_get() local
3204 tx = list_first_entry(&tx_mod->tx_free_q, struct bna_tx, qe); in bna_tx_get()
3206 tx = list_last_entry(&tx_mod->tx_free_q, struct bna_tx, qe); in bna_tx_get()
3207 list_del(&tx->qe); in bna_tx_get()
3208 tx->type = type; in bna_tx_get()
3210 return tx; in bna_tx_get()
3214 bna_tx_free(struct bna_tx *tx) in bna_tx_free() argument
3216 struct bna_tx_mod *tx_mod = &tx->bna->tx_mod; in bna_tx_free()
3220 while (!list_empty(&tx->txq_q)) { in bna_tx_free()
3221 txq = list_first_entry(&tx->txq_q, struct bna_txq, qe); in bna_tx_free()
3223 txq->tx = NULL; in bna_tx_free()
3228 if (qe == &tx->qe) { in bna_tx_free()
3229 list_del(&tx->qe); in bna_tx_free()
3234 tx->bna = NULL; in bna_tx_free()
3235 tx->priv = NULL; in bna_tx_free()
3238 if (((struct bna_tx *)qe)->rid < tx->rid) in bna_tx_free()
3241 list_add(&tx->qe, qe); in bna_tx_free()
3245 bna_tx_start(struct bna_tx *tx) in bna_tx_start() argument
3247 tx->flags |= BNA_TX_F_ENET_STARTED; in bna_tx_start()
3248 if (tx->flags & BNA_TX_F_ENABLED) in bna_tx_start()
3249 bfa_fsm_send_event(tx, TX_E_START); in bna_tx_start()
3253 bna_tx_stop(struct bna_tx *tx) in bna_tx_stop() argument
3255 tx->stop_cbfn = bna_tx_mod_cb_tx_stopped; in bna_tx_stop()
3256 tx->stop_cbarg = &tx->bna->tx_mod; in bna_tx_stop()
3258 tx->flags &= ~BNA_TX_F_ENET_STARTED; in bna_tx_stop()
3259 bfa_fsm_send_event(tx, TX_E_STOP); in bna_tx_stop()
3263 bna_tx_fail(struct bna_tx *tx) in bna_tx_fail() argument
3265 tx->flags &= ~BNA_TX_F_ENET_STARTED; in bna_tx_fail()
3266 bfa_fsm_send_event(tx, TX_E_FAIL); in bna_tx_fail()
3270 bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr) in bna_bfi_tx_enet_start_rsp() argument
3272 struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp; in bna_bfi_tx_enet_start_rsp()
3276 bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp, in bna_bfi_tx_enet_start_rsp()
3279 tx->hw_id = cfg_rsp->hw_id; in bna_bfi_tx_enet_start_rsp()
3281 for (i = 0, txq = list_first_entry(&tx->txq_q, struct bna_txq, qe); in bna_bfi_tx_enet_start_rsp()
3282 i < tx->num_txq; i++, txq = list_next_entry(txq, qe)) { in bna_bfi_tx_enet_start_rsp()
3285 tx->bna->pcidev.pci_bar_kva in bna_bfi_tx_enet_start_rsp()
3288 tx->bna->pcidev.pci_bar_kva in bna_bfi_tx_enet_start_rsp()
3297 bfa_fsm_send_event(tx, TX_E_STARTED); in bna_bfi_tx_enet_start_rsp()
3301 bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr) in bna_bfi_tx_enet_stop_rsp() argument
3303 bfa_fsm_send_event(tx, TX_E_STOPPED); in bna_bfi_tx_enet_stop_rsp()
3309 struct bna_tx *tx; in bna_bfi_bw_update_aen() local
3311 list_for_each_entry(tx, &tx_mod->tx_active_q, qe) in bna_bfi_bw_update_aen()
3312 bfa_fsm_send_event(tx, TX_E_BW_UPDATE); in bna_bfi_bw_update_aen()
3370 struct bna_tx *tx; in bna_tx_create() local
3386 /* Tx */ in bna_tx_create()
3388 tx = bna_tx_get(tx_mod, tx_cfg->tx_type); in bna_tx_create()
3389 if (!tx) in bna_tx_create()
3391 tx->bna = bna; in bna_tx_create()
3392 tx->priv = priv; in bna_tx_create()
3396 INIT_LIST_HEAD(&tx->txq_q); in bna_tx_create()
3402 list_move_tail(&txq->qe, &tx->txq_q); in bna_tx_create()
3403 txq->tx = tx; in bna_tx_create()
3410 /* Tx */ in bna_tx_create()
3412 tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn; in bna_tx_create()
3413 tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn; in bna_tx_create()
3415 tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn; in bna_tx_create()
3416 tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn; in bna_tx_create()
3417 tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn; in bna_tx_create()
3419 list_add_tail(&tx->qe, &tx_mod->tx_active_q); in bna_tx_create()
3421 tx->num_txq = tx_cfg->num_txq; in bna_tx_create()
3423 tx->flags = 0; in bna_tx_create()
3424 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) { in bna_tx_create()
3425 switch (tx->type) { in bna_tx_create()
3427 if (!(tx->bna->tx_mod.flags & in bna_tx_create()
3429 tx->flags |= BNA_TX_F_ENET_STARTED; in bna_tx_create()
3432 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK) in bna_tx_create()
3433 tx->flags |= BNA_TX_F_ENET_STARTED; in bna_tx_create()
3441 list_for_each_entry(txq, &tx->txq_q, qe) { in bna_tx_create()
3486 if (tx->tcb_setup_cbfn) in bna_tx_create()
3487 (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb); in bna_tx_create()
3497 tx->txf_vlan_id = 0; in bna_tx_create()
3499 bfa_fsm_set_state(tx, bna_tx_sm_stopped); in bna_tx_create()
3501 tx_mod->rid_mask |= BIT(tx->rid); in bna_tx_create()
3503 return tx; in bna_tx_create()
3506 bna_tx_free(tx); in bna_tx_create()
3511 bna_tx_destroy(struct bna_tx *tx) in bna_tx_destroy() argument
3515 list_for_each_entry(txq, &tx->txq_q, qe) in bna_tx_destroy()
3516 if (tx->tcb_destroy_cbfn) in bna_tx_destroy()
3517 (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb); in bna_tx_destroy()
3519 tx->bna->tx_mod.rid_mask &= ~BIT(tx->rid); in bna_tx_destroy()
3520 bna_tx_free(tx); in bna_tx_destroy()
3524 bna_tx_enable(struct bna_tx *tx) in bna_tx_enable() argument
3526 if (tx->fsm != bna_tx_sm_stopped) in bna_tx_enable()
3529 tx->flags |= BNA_TX_F_ENABLED; in bna_tx_enable()
3531 if (tx->flags & BNA_TX_F_ENET_STARTED) in bna_tx_enable()
3532 bfa_fsm_send_event(tx, TX_E_START); in bna_tx_enable()
3536 bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type, in bna_tx_disable() argument
3540 (*cbfn)(tx->bna->bnad, tx); in bna_tx_disable()
3544 tx->stop_cbfn = cbfn; in bna_tx_disable()
3545 tx->stop_cbarg = tx->bna->bnad; in bna_tx_disable()
3547 tx->flags &= ~BNA_TX_F_ENABLED; in bna_tx_disable()
3549 bfa_fsm_send_event(tx, TX_E_STOP); in bna_tx_disable()
3553 bna_tx_cleanup_complete(struct bna_tx *tx) in bna_tx_cleanup_complete() argument
3555 bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE); in bna_tx_cleanup_complete()
3559 bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx) in bna_tx_mod_cb_tx_stopped() argument
3585 tx_mod->tx = (struct bna_tx *) in bna_tx_mod_init()
3596 tx_mod->tx[i].rid = i; in bna_tx_mod_init()
3597 list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q); in bna_tx_mod_init()
3616 struct bna_tx *tx; in bna_tx_mod_start() local
3622 list_for_each_entry(tx, &tx_mod->tx_active_q, qe) in bna_tx_mod_start()
3623 if (tx->type == type) in bna_tx_mod_start()
3624 bna_tx_start(tx); in bna_tx_mod_start()
3630 struct bna_tx *tx; in bna_tx_mod_stop() local
3639 list_for_each_entry(tx, &tx_mod->tx_active_q, qe) in bna_tx_mod_stop()
3640 if (tx->type == type) { in bna_tx_mod_stop()
3642 bna_tx_stop(tx); in bna_tx_mod_stop()
3651 struct bna_tx *tx; in bna_tx_mod_fail() local
3656 list_for_each_entry(tx, &tx_mod->tx_active_q, qe) in bna_tx_mod_fail()
3657 bna_tx_fail(tx); in bna_tx_mod_fail()
3661 bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo) in bna_tx_coalescing_timeo_set() argument
3665 list_for_each_entry(txq, &tx->txq_q, qe) in bna_tx_coalescing_timeo_set()