Lines Matching defs:tid

300 				      sta->addr, ba_data->tid);
355 int sta_id, u16 *queueptr, u8 tid)
375 if (tid == IWL_MAX_TID_COUNT)
376 tid = IWL_MGMT_TID;
378 remove_cmd.u.remove.tid = cpu_to_le32(tid);
396 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
413 cmd.tid = mvm->queue_info[queue].txq_tid;
425 iwl_mvm_txq_from_tid(sta, tid);
454 int tid;
473 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
474 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
475 agg_tids |= BIT(tid);
494 int tid;
517 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
519 iwl_mvm_txq_from_tid(sta, tid);
521 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
522 disable_agg_tids |= BIT(tid);
523 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
555 u8 sta_id, tid;
567 tid = mvm->queue_info[queue].txq_tid;
581 ret = iwl_mvm_disable_txq(mvm, old_sta, sta_id, &queue_tmp, tid);
667 int sta_id, int tid, int frame_limit, u16 ssn)
678 .tid = tid,
704 static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
735 cmd.tid = mvm->queue_info[queue].txq_tid;
763 mvm->queue_info[queue].txq_tid = tid;
769 cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
850 u8 sta_id, u8 tid, unsigned int timeout)
855 if (tid == IWL_MAX_TID_COUNT) {
856 tid = IWL_MGMT_TID;
888 tid, size, timeout);
892 "Enabling TXQ #%d for sta mask 0x%x tid %d\n",
893 queue, sta_mask, tid);
900 int tid)
904 iwl_mvm_txq_from_tid(sta, tid);
912 "Allocating queue for sta %d on tid %d\n",
913 mvmsta->deflink.sta_id, tid);
915 tid, wdg_timeout);
920 mvm->tvqm_info[queue].txq_tid = tid;
926 mvmsta->tid_data[tid].txq_id = queue;
934 int queue, u8 sta_id, u8 tid)
939 if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
941 queue, tid);
949 mvm->queue_info[queue].tid_bitmap |= BIT(tid);
953 if (tid != IWL_MAX_TID_COUNT)
955 tid_to_mac80211_ac[tid];
959 mvm->queue_info[queue].txq_tid = tid;
964 iwl_mvm_txq_from_tid(sta, tid);
989 .tid = cfg->tid,
997 if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid))
1017 int tid;
1032 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
1033 cmd.tid = tid;
1034 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
1043 mvm->queue_info[queue].txq_tid = tid;
1044 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
1045 queue, tid);
1053 int tid = -1;
1069 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
1070 if (tid_bitmap != BIT(tid)) {
1076 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
1077 tid);
1088 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1090 ret = iwl_mvm_redirect_queue(mvm, queue, tid,
1091 tid_to_mac80211_ac[tid], ssn,
1093 iwl_mvm_txq_from_tid(sta, tid));
1100 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
1103 mvmsta->tid_disable_agg &= ~BIT(tid);
1140 unsigned int tid;
1149 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1151 if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1152 tid_bitmap &= ~BIT(tid);
1155 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1156 tid_bitmap &= ~BIT(tid);
1169 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1172 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1173 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
1193 tid, queue);
1197 "TXQ #%d left with tid bitmap 0x%x\n", queue,
1248 int tid;
1262 for_each_set_bit(tid, &queue_tid_bitmap,
1264 if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1268 inactive_tid_bitmap |= BIT(tid);
1326 struct ieee80211_sta *sta, u8 ac, int tid)
1332 .tid = tid,
1349 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
1353 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1356 if (tid == IWL_MAX_TID_COUNT) {
1404 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
1405 tid, cfg.sta_id);
1419 "Allocating %squeue #%d to sta %d on tid %d\n",
1421 mvmsta->deflink.sta_id, tid);
1449 * this ra/tid in our Tx path since we stop the Qdisc when we
1453 mvmsta->tid_data[tid].seq_number += 0x10;
1456 mvmsta->tid_data[tid].txq_id = queue;
1458 queue_state = mvmsta->tid_data[tid].state;
1471 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1477 ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn,
1479 iwl_mvm_txq_from_tid(sta, tid));
1488 iwl_mvm_disable_txq(mvm, sta, mvmsta->deflink.sta_id, &queue_tmp, tid);
1506 if (!iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, txq->tid)) {
1533 u8 tid;
1540 tid = txq->tid;
1541 if (tid == IEEE80211_NUM_TIDS)
1542 tid = IWL_MAX_TID_COUNT;
1550 if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) {
1652 "Re-mapping sta %d tid %d\n",
1676 cfg.tid = i;
1683 "Re-mapping sta %d tid %d to queue %d\n",
2210 .tid = IWL_MAX_TID_COUNT,
2374 .tid = IWL_MAX_TID_COUNT,
2575 .tid = 0,
2786 bool start, int tid, u16 ssn,
2799 cmd.add_immediate_ba_tid = tid;
2804 cmd.remove_immediate_ba_tid = tid;
2835 bool start, int tid, u16 ssn,
2855 cmd.alloc.tid = tid;
2865 cmd.remove.tid = cpu_to_le32(tid);
2887 bool start, int tid, u16 ssn, u16 buf_size,
2893 tid, ssn, buf_size, baid);
2896 tid, ssn, buf_size);
2900 int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
2957 baid = mvm_sta->tid_to_baid[tid];
2965 baid = iwl_mvm_fw_baid_op(mvm, sta, start, tid, ssn, buf_size,
2986 baid_data->tid = tid;
2990 mvm_sta->tid_to_baid[tid] = baid;
3003 mvm_sta->deflink.sta_id, tid, baid);
3007 baid = mvm_sta->tid_to_baid[tid];
3037 int tid, u8 queue, bool start)
3048 mvm_sta->tid_disable_agg &= ~BIT(tid);
3051 mvm_sta->tid_disable_agg |= BIT(tid);
3107 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
3115 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
3118 if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
3119 mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
3122 mvmsta->tid_data[tid].state);
3128 if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
3130 u8 ac = tid_to_mac80211_ac[tid];
3132 ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
3145 txq_id = mvmsta->tid_data[tid].txq_id;
3162 tid, IWL_MAX_HW_QUEUES - 1);
3169 "Can't start tid %d agg on shared queue!\n",
3170 tid);
3175 "AGG for tid %d will be on queue #%d\n",
3176 tid, txq_id);
3178 tid_data = &mvmsta->tid_data[tid];
3184 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
3185 mvmsta->deflink.sta_id, tid, txq_id,
3212 struct ieee80211_sta *sta, u16 tid, u16 buf_size,
3216 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3226 .tid = tid,
3245 mvmsta->agg_tids |= BIT(tid);
3252 * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
3265 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3271 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
3297 mvmsta->deflink.sta_id, tid,
3312 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3334 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
3335 sta->addr, tid);
3365 struct ieee80211_sta *sta, u16 tid)
3368 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3377 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3385 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
3386 mvmsta->deflink.sta_id, tid, txq_id,
3389 mvmsta->agg_tids &= ~BIT(tid);
3405 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3407 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3419 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3426 mvmsta->deflink.sta_id, tid, tid_data->state);
3438 struct ieee80211_sta *sta, u16 tid)
3441 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3451 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
3452 mvmsta->deflink.sta_id, tid, txq_id,
3456 mvmsta->agg_tids &= ~BIT(tid);
3466 BIT(tid)))
3477 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
4083 int tid, ret;
4090 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
4091 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
4105 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
4109 tid_data = &mvmsta->tid_data[tid];