Lines Matching full:tcm

837 	low_latency = mvm->tcm.result.low_latency[mvmvif->id];  in iwl_mvm_tcm_iter()
839 if (!mvm->tcm.result.change[mvmvif->id] && in iwl_mvm_tcm_iter()
877 if (mvm->tcm.data[mvmvif->id].opened_rx_ba_sessions) in iwl_mvm_tcm_uapsd_nonagg_detected_wk()
908 if (mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected) in iwl_mvm_uapsd_agg_disconnect()
911 mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected = true; in iwl_mvm_uapsd_agg_disconnect()
922 u64 bytes = mvm->tcm.data[mac].uapsd_nonagg_detect.rx_bytes; in iwl_mvm_check_uapsd_agg_expected_tpt()
927 rate = ewma_rate_read(&mvm->tcm.data[mac].uapsd_nonagg_detect.rate); in iwl_mvm_check_uapsd_agg_expected_tpt()
929 if (!rate || mvm->tcm.data[mac].opened_rx_ba_sessions || in iwl_mvm_check_uapsd_agg_expected_tpt()
930 mvm->tcm.data[mac].uapsd_nonagg_detect.detected) in iwl_mvm_check_uapsd_agg_expected_tpt()
977 unsigned int elapsed = jiffies_to_msecs(ts - mvm->tcm.ts); in iwl_mvm_calc_tcm_stats()
979 jiffies_to_msecs(ts - mvm->tcm.uapsd_nonagg_ts); in iwl_mvm_calc_tcm_stats()
986 bool handle_ll = time_after(ts, mvm->tcm.ll_ts + MVM_LL_PERIOD); in iwl_mvm_calc_tcm_stats()
989 mvm->tcm.ll_ts = ts; in iwl_mvm_calc_tcm_stats()
991 mvm->tcm.uapsd_nonagg_ts = ts; in iwl_mvm_calc_tcm_stats()
993 mvm->tcm.result.elapsed = elapsed; in iwl_mvm_calc_tcm_stats()
1001 struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac]; in iwl_mvm_calc_tcm_stats()
1009 mvm->tcm.result.change[mac] = load != mvm->tcm.result.load[mac]; in iwl_mvm_calc_tcm_stats()
1010 mvm->tcm.result.load[mac] = load; in iwl_mvm_calc_tcm_stats()
1011 mvm->tcm.result.airtime[mac] = airtime; in iwl_mvm_calc_tcm_stats()
1019 mvm->tcm.result.low_latency[mac] = true; in iwl_mvm_calc_tcm_stats()
1021 mvm->tcm.result.low_latency[mac] = false; in iwl_mvm_calc_tcm_stats()
1028 low_latency |= mvm->tcm.result.low_latency[mac]; in iwl_mvm_calc_tcm_stats()
1030 if (!mvm->tcm.result.low_latency[mac] && handle_uapsd) in iwl_mvm_calc_tcm_stats()
1041 mvm->tcm.result.global_load = load; in iwl_mvm_calc_tcm_stats()
1045 mvm->tcm.result.band_load[i] = band_load; in iwl_mvm_calc_tcm_stats()
1050 * in the TCM period, so that we can return to low load if there in iwl_mvm_calc_tcm_stats()
1081 time_after(ts, mvm->tcm.uapsd_nonagg_ts + in iwl_mvm_recalc_tcm()
1084 spin_lock(&mvm->tcm.lock); in iwl_mvm_recalc_tcm()
1085 if (mvm->tcm.paused || !time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) { in iwl_mvm_recalc_tcm()
1086 spin_unlock(&mvm->tcm.lock); in iwl_mvm_recalc_tcm()
1089 spin_unlock(&mvm->tcm.lock); in iwl_mvm_recalc_tcm()
1097 spin_lock(&mvm->tcm.lock); in iwl_mvm_recalc_tcm()
1099 if (!mvm->tcm.paused && time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) { in iwl_mvm_recalc_tcm()
1106 mvm->tcm.ts = ts; in iwl_mvm_recalc_tcm()
1108 schedule_delayed_work(&mvm->tcm.work, work_delay); in iwl_mvm_recalc_tcm()
1110 spin_unlock(&mvm->tcm.lock); in iwl_mvm_recalc_tcm()
1119 tcm.work); in iwl_mvm_tcm_work()
1126 spin_lock_bh(&mvm->tcm.lock); in iwl_mvm_pause_tcm()
1127 mvm->tcm.paused = true; in iwl_mvm_pause_tcm()
1128 spin_unlock_bh(&mvm->tcm.lock); in iwl_mvm_pause_tcm()
1130 cancel_delayed_work_sync(&mvm->tcm.work); in iwl_mvm_pause_tcm()
1138 spin_lock_bh(&mvm->tcm.lock); in iwl_mvm_resume_tcm()
1139 mvm->tcm.ts = jiffies; in iwl_mvm_resume_tcm()
1140 mvm->tcm.ll_ts = jiffies; in iwl_mvm_resume_tcm()
1142 struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac]; in iwl_mvm_resume_tcm()
1149 if (mvm->tcm.result.low_latency[mac]) in iwl_mvm_resume_tcm()
1152 /* The TCM data needs to be reset before "paused" flag changes */ in iwl_mvm_resume_tcm()
1154 mvm->tcm.paused = false; in iwl_mvm_resume_tcm()
1160 if (mvm->tcm.result.global_load > IWL_MVM_TRAFFIC_LOW) in iwl_mvm_resume_tcm()
1161 schedule_delayed_work(&mvm->tcm.work, MVM_TCM_PERIOD); in iwl_mvm_resume_tcm()
1163 schedule_delayed_work(&mvm->tcm.work, MVM_LL_PERIOD); in iwl_mvm_resume_tcm()
1165 spin_unlock_bh(&mvm->tcm.lock); in iwl_mvm_resume_tcm()