xref: /linux/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c (revision bfd5bb6f90af092aa345b15cd78143956a13c2a8)
1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2014 Intel Mobile Communications GmbH
9  * Copyright(c) 2017 Intel Deutschland GmbH
10  * Copyright(C) 2018 Intel Corporation
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of version 2 of the GNU General Public License as
14  * published by the Free Software Foundation.
15  *
16  * This program is distributed in the hope that it will be useful, but
17  * WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program.
23  *
24  * The full GNU General Public License is included in this distribution
25  * in the file called COPYING.
26  *
27  * Contact Information:
28  *  Intel Linux Wireless <linuxwifi@intel.com>
29  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30  *
31  * BSD LICENSE
32  *
33  * Copyright(c) 2014 Intel Mobile Communications GmbH
34  * Copyright(c) 2017 Intel Deutschland GmbH
35  * Copyright(C) 2018 Intel Corporation
36  * All rights reserved.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  *
42  *  * Redistributions of source code must retain the above copyright
43  *    notice, this list of conditions and the following disclaimer.
44  *  * Redistributions in binary form must reproduce the above copyright
45  *    notice, this list of conditions and the following disclaimer in
46  *    the documentation and/or other materials provided with the
47  *    distribution.
48  *  * Neither the name Intel Corporation nor the names of its
49  *    contributors may be used to endorse or promote products derived
50  *    from this software without specific prior written permission.
51  *
52  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63  *
64  *****************************************************************************/
65 
66 #include <linux/etherdevice.h>
67 #include "mvm.h"
68 #include "time-event.h"
69 #include "iwl-io.h"
70 #include "iwl-prph.h"
71 
72 #define TU_TO_US(x) (x * 1024)
73 #define TU_TO_MS(x) (TU_TO_US(x) / 1000)
74 
75 void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
76 {
77 	struct ieee80211_sta *sta;
78 	struct iwl_mvm_sta *mvmsta;
79 	int i;
80 
81 	lockdep_assert_held(&mvm->mutex);
82 
83 	for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
84 		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
85 						lockdep_is_held(&mvm->mutex));
86 		if (!sta || IS_ERR(sta) || !sta->tdls)
87 			continue;
88 
89 		mvmsta = iwl_mvm_sta_from_mac80211(sta);
90 		ieee80211_tdls_oper_request(mvmsta->vif, sta->addr,
91 				NL80211_TDLS_TEARDOWN,
92 				WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED,
93 				GFP_KERNEL);
94 	}
95 }
96 
97 int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
98 {
99 	struct ieee80211_sta *sta;
100 	struct iwl_mvm_sta *mvmsta;
101 	int count = 0;
102 	int i;
103 
104 	lockdep_assert_held(&mvm->mutex);
105 
106 	for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
107 		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
108 						lockdep_is_held(&mvm->mutex));
109 		if (!sta || IS_ERR(sta) || !sta->tdls)
110 			continue;
111 
112 		if (vif) {
113 			mvmsta = iwl_mvm_sta_from_mac80211(sta);
114 			if (mvmsta->vif != vif)
115 				continue;
116 		}
117 
118 		count++;
119 	}
120 
121 	return count;
122 }
123 
124 static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
125 {
126 	struct iwl_rx_packet *pkt;
127 	struct iwl_tdls_config_res *resp;
128 	struct iwl_tdls_config_cmd tdls_cfg_cmd = {};
129 	struct iwl_host_cmd cmd = {
130 		.id = TDLS_CONFIG_CMD,
131 		.flags = CMD_WANT_SKB,
132 		.data = { &tdls_cfg_cmd, },
133 		.len = { sizeof(struct iwl_tdls_config_cmd), },
134 	};
135 	struct ieee80211_sta *sta;
136 	int ret, i, cnt;
137 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
138 
139 	lockdep_assert_held(&mvm->mutex);
140 
141 	tdls_cfg_cmd.id_and_color =
142 		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
143 	tdls_cfg_cmd.tx_to_ap_tid = IWL_MVM_TDLS_FW_TID;
144 	tdls_cfg_cmd.tx_to_ap_ssn = cpu_to_le16(0); /* not used for now */
145 
146 	/* for now the Tx cmd is empty and unused */
147 
148 	/* populate TDLS peer data */
149 	cnt = 0;
150 	for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
151 		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
152 						lockdep_is_held(&mvm->mutex));
153 		if (IS_ERR_OR_NULL(sta) || !sta->tdls)
154 			continue;
155 
156 		tdls_cfg_cmd.sta_info[cnt].sta_id = i;
157 		tdls_cfg_cmd.sta_info[cnt].tx_to_peer_tid =
158 							IWL_MVM_TDLS_FW_TID;
159 		tdls_cfg_cmd.sta_info[cnt].tx_to_peer_ssn = cpu_to_le16(0);
160 		tdls_cfg_cmd.sta_info[cnt].is_initiator =
161 				cpu_to_le32(sta->tdls_initiator ? 1 : 0);
162 
163 		cnt++;
164 	}
165 
166 	tdls_cfg_cmd.tdls_peer_count = cnt;
167 	IWL_DEBUG_TDLS(mvm, "send TDLS config to FW for %d peers\n", cnt);
168 
169 	ret = iwl_mvm_send_cmd(mvm, &cmd);
170 	if (WARN_ON_ONCE(ret))
171 		return;
172 
173 	pkt = cmd.resp_pkt;
174 
175 	WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp));
176 
177 	/* we don't really care about the response at this point */
178 
179 	iwl_free_resp(&cmd);
180 }
181 
182 void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
183 			       bool sta_added)
184 {
185 	int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif);
186 
187 	/* when the first peer joins, send a power update first */
188 	if (tdls_sta_cnt == 1 && sta_added)
189 		iwl_mvm_power_update_mac(mvm);
190 
191 	/* Configure the FW with TDLS peer info only if TDLS channel switch
192 	 * capability is set.
193 	 * TDLS config data is used currently only in TDLS channel switch code.
194 	 * Supposed to serve also TDLS buffer station which is not implemneted
195 	 * yet in FW*/
196 	if (fw_has_capa(&mvm->fw->ucode_capa,
197 			IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH))
198 		iwl_mvm_tdls_config(mvm, vif);
199 
200 	/* when the last peer leaves, send a power update last */
201 	if (tdls_sta_cnt == 0 && !sta_added)
202 		iwl_mvm_power_update_mac(mvm);
203 }
204 
205 void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
206 					   struct ieee80211_vif *vif)
207 {
208 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
209 	u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int;
210 
211 	/*
212 	 * iwl_mvm_protect_session() reads directly from the device
213 	 * (the system time), so make sure it is available.
214 	 */
215 	if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_TDLS))
216 		return;
217 
218 	mutex_lock(&mvm->mutex);
219 	/* Protect the session to hear the TDLS setup response on the channel */
220 	iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true);
221 	mutex_unlock(&mvm->mutex);
222 
223 	iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_TDLS);
224 }
225 
226 static const char *
227 iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state)
228 {
229 	switch (state) {
230 	case IWL_MVM_TDLS_SW_IDLE:
231 		return "IDLE";
232 	case IWL_MVM_TDLS_SW_REQ_SENT:
233 		return "REQ SENT";
234 	case IWL_MVM_TDLS_SW_RESP_RCVD:
235 		return "RESP RECEIVED";
236 	case IWL_MVM_TDLS_SW_REQ_RCVD:
237 		return "REQ RECEIVED";
238 	case IWL_MVM_TDLS_SW_ACTIVE:
239 		return "ACTIVE";
240 	}
241 
242 	return NULL;
243 }
244 
245 static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
246 					 enum iwl_mvm_tdls_cs_state state)
247 {
248 	if (mvm->tdls_cs.state == state)
249 		return;
250 
251 	IWL_DEBUG_TDLS(mvm, "TDLS channel switch state: %s -> %s\n",
252 		       iwl_mvm_tdls_cs_state_str(mvm->tdls_cs.state),
253 		       iwl_mvm_tdls_cs_state_str(state));
254 	mvm->tdls_cs.state = state;
255 
256 	/* we only send requests to our switching peer - update sent time */
257 	if (state == IWL_MVM_TDLS_SW_REQ_SENT)
258 		mvm->tdls_cs.peer.sent_timestamp =
259 			iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
260 
261 	if (state == IWL_MVM_TDLS_SW_IDLE)
262 		mvm->tdls_cs.cur_sta_id = IWL_MVM_INVALID_STA;
263 }
264 
265 void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
266 {
267 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
268 	struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data;
269 	struct ieee80211_sta *sta;
270 	unsigned int delay;
271 	struct iwl_mvm_sta *mvmsta;
272 	struct ieee80211_vif *vif;
273 	u32 sta_id = le32_to_cpu(notif->sta_id);
274 
275 	lockdep_assert_held(&mvm->mutex);
276 
277 	/* can fail sometimes */
278 	if (!le32_to_cpu(notif->status)) {
279 		iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
280 		return;
281 	}
282 
283 	if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT))
284 		return;
285 
286 	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
287 					lockdep_is_held(&mvm->mutex));
288 	/* the station may not be here, but if it is, it must be a TDLS peer */
289 	if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls))
290 		return;
291 
292 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
293 	vif = mvmsta->vif;
294 
295 	/*
296 	 * Update state and possibly switch again after this is over (DTIM).
297 	 * Also convert TU to msec.
298 	 */
299 	delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
300 	mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
301 			 msecs_to_jiffies(delay));
302 
303 	iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE);
304 }
305 
306 static int
307 iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
308 			  enum iwl_tdls_channel_switch_type type,
309 			  const u8 *peer, bool peer_initiator, u32 timestamp)
310 {
311 	bool same_peer = false;
312 	int ret = 0;
313 
314 	/* get the existing peer if it's there */
315 	if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE &&
316 	    mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
317 		struct ieee80211_sta *sta = rcu_dereference_protected(
318 				mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
319 				lockdep_is_held(&mvm->mutex));
320 		if (!IS_ERR_OR_NULL(sta))
321 			same_peer = ether_addr_equal(peer, sta->addr);
322 	}
323 
324 	switch (mvm->tdls_cs.state) {
325 	case IWL_MVM_TDLS_SW_IDLE:
326 		/*
327 		 * might be spurious packet from the peer after the switch is
328 		 * already done
329 		 */
330 		if (type == TDLS_MOVE_CH)
331 			ret = -EINVAL;
332 		break;
333 	case IWL_MVM_TDLS_SW_REQ_SENT:
334 		/* only allow requests from the same peer */
335 		if (!same_peer)
336 			ret = -EBUSY;
337 		else if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH &&
338 			 !peer_initiator)
339 			/*
340 			 * We received a ch-switch request while an outgoing
341 			 * one is pending. Allow it if the peer is the link
342 			 * initiator.
343 			 */
344 			ret = -EBUSY;
345 		else if (type == TDLS_SEND_CHAN_SW_REQ)
346 			/* wait for idle before sending another request */
347 			ret = -EBUSY;
348 		else if (timestamp <= mvm->tdls_cs.peer.sent_timestamp)
349 			/* we got a stale response - ignore it */
350 			ret = -EINVAL;
351 		break;
352 	case IWL_MVM_TDLS_SW_RESP_RCVD:
353 		/*
354 		 * we are waiting for the FW to give an "active" notification,
355 		 * so ignore requests in the meantime
356 		 */
357 		ret = -EBUSY;
358 		break;
359 	case IWL_MVM_TDLS_SW_REQ_RCVD:
360 		/* as above, allow the link initiator to proceed */
361 		if (type == TDLS_SEND_CHAN_SW_REQ) {
362 			if (!same_peer)
363 				ret = -EBUSY;
364 			else if (peer_initiator) /* they are the initiator */
365 				ret = -EBUSY;
366 		} else if (type == TDLS_MOVE_CH) {
367 			ret = -EINVAL;
368 		}
369 		break;
370 	case IWL_MVM_TDLS_SW_ACTIVE:
371 		/*
372 		 * the only valid request when active is a request to return
373 		 * to the base channel by the current off-channel peer
374 		 */
375 		if (type != TDLS_MOVE_CH || !same_peer)
376 			ret = -EBUSY;
377 		break;
378 	}
379 
380 	if (ret)
381 		IWL_DEBUG_TDLS(mvm,
382 			       "Invalid TDLS action %d state %d peer %pM same_peer %d initiator %d\n",
383 			       type, mvm->tdls_cs.state, peer, same_peer,
384 			       peer_initiator);
385 
386 	return ret;
387 }
388 
389 static int
390 iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
391 				   struct ieee80211_vif *vif,
392 				   enum iwl_tdls_channel_switch_type type,
393 				   const u8 *peer, bool peer_initiator,
394 				   u8 oper_class,
395 				   struct cfg80211_chan_def *chandef,
396 				   u32 timestamp, u16 switch_time,
397 				   u16 switch_timeout, struct sk_buff *skb,
398 				   u32 ch_sw_tm_ie)
399 {
400 	struct ieee80211_sta *sta;
401 	struct iwl_mvm_sta *mvmsta;
402 	struct ieee80211_tx_info *info;
403 	struct ieee80211_hdr *hdr;
404 	struct iwl_tdls_channel_switch_cmd cmd = {0};
405 	int ret;
406 
407 	lockdep_assert_held(&mvm->mutex);
408 
409 	ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator,
410 					timestamp);
411 	if (ret)
412 		return ret;
413 
414 	if (!skb || WARN_ON(skb->len > IWL_TDLS_CH_SW_FRAME_MAX_SIZE)) {
415 		ret = -EINVAL;
416 		goto out;
417 	}
418 
419 	cmd.switch_type = type;
420 	cmd.timing.frame_timestamp = cpu_to_le32(timestamp);
421 	cmd.timing.switch_time = cpu_to_le32(switch_time);
422 	cmd.timing.switch_timeout = cpu_to_le32(switch_timeout);
423 
424 	rcu_read_lock();
425 	sta = ieee80211_find_sta(vif, peer);
426 	if (!sta) {
427 		rcu_read_unlock();
428 		ret = -ENOENT;
429 		goto out;
430 	}
431 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
432 	cmd.peer_sta_id = cpu_to_le32(mvmsta->sta_id);
433 
434 	if (!chandef) {
435 		if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
436 		    mvm->tdls_cs.peer.chandef.chan) {
437 			/* actually moving to the channel */
438 			chandef = &mvm->tdls_cs.peer.chandef;
439 		} else if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_ACTIVE &&
440 			   type == TDLS_MOVE_CH) {
441 			/* we need to return to base channel */
442 			struct ieee80211_chanctx_conf *chanctx =
443 					rcu_dereference(vif->chanctx_conf);
444 
445 			if (WARN_ON_ONCE(!chanctx)) {
446 				rcu_read_unlock();
447 				goto out;
448 			}
449 
450 			chandef = &chanctx->def;
451 		}
452 	}
453 
454 	if (chandef) {
455 		cmd.ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ?
456 			       PHY_BAND_24 : PHY_BAND_5);
457 		cmd.ci.channel = chandef->chan->hw_value;
458 		cmd.ci.width = iwl_mvm_get_channel_width(chandef);
459 		cmd.ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
460 	}
461 
462 	/* keep quota calculation simple for now - 50% of DTIM for TDLS */
463 	cmd.timing.max_offchan_duration =
464 			cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period *
465 					     vif->bss_conf.beacon_int) / 2);
466 
467 	/* Switch time is the first element in the switch-timing IE. */
468 	cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
469 
470 	info = IEEE80211_SKB_CB(skb);
471 	hdr = (void *)skb->data;
472 	if (info->control.hw_key) {
473 		if (info->control.hw_key->cipher != WLAN_CIPHER_SUITE_CCMP) {
474 			rcu_read_unlock();
475 			ret = -EINVAL;
476 			goto out;
477 		}
478 		iwl_mvm_set_tx_cmd_ccmp(info, &cmd.frame.tx_cmd);
479 	}
480 
481 	iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info,
482 			   mvmsta->sta_id);
483 
484 	iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta,
485 				hdr->frame_control);
486 	rcu_read_unlock();
487 
488 	memcpy(cmd.frame.data, skb->data, skb->len);
489 
490 	ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0,
491 				   sizeof(cmd), &cmd);
492 	if (ret) {
493 		IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n",
494 			ret);
495 		goto out;
496 	}
497 
498 	/* channel switch has started, update state */
499 	if (type != TDLS_MOVE_CH) {
500 		mvm->tdls_cs.cur_sta_id = mvmsta->sta_id;
501 		iwl_mvm_tdls_update_cs_state(mvm,
502 					     type == TDLS_SEND_CHAN_SW_REQ ?
503 					     IWL_MVM_TDLS_SW_REQ_SENT :
504 					     IWL_MVM_TDLS_SW_REQ_RCVD);
505 	} else {
506 		iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_RESP_RCVD);
507 	}
508 
509 out:
510 
511 	/* channel switch failed - we are idle */
512 	if (ret)
513 		iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
514 
515 	return ret;
516 }
517 
518 void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
519 {
520 	struct iwl_mvm *mvm;
521 	struct ieee80211_sta *sta;
522 	struct iwl_mvm_sta *mvmsta;
523 	struct ieee80211_vif *vif;
524 	unsigned int delay;
525 	int ret;
526 
527 	mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work);
528 	mutex_lock(&mvm->mutex);
529 
530 	/* called after an active channel switch has finished or timed-out */
531 	iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
532 
533 	/* station might be gone, in that case do nothing */
534 	if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA)
535 		goto out;
536 
537 	sta = rcu_dereference_protected(
538 				mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
539 				lockdep_is_held(&mvm->mutex));
540 	/* the station may not be here, but if it is, it must be a TDLS peer */
541 	if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls))
542 		goto out;
543 
544 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
545 	vif = mvmsta->vif;
546 	ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
547 						 TDLS_SEND_CHAN_SW_REQ,
548 						 sta->addr,
549 						 mvm->tdls_cs.peer.initiator,
550 						 mvm->tdls_cs.peer.op_class,
551 						 &mvm->tdls_cs.peer.chandef,
552 						 0, 0, 0,
553 						 mvm->tdls_cs.peer.skb,
554 						 mvm->tdls_cs.peer.ch_sw_tm_ie);
555 	if (ret)
556 		IWL_ERR(mvm, "Not sending TDLS channel switch: %d\n", ret);
557 
558 	/* retry after a DTIM if we failed sending now */
559 	delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
560 	schedule_delayed_work(&mvm->tdls_cs.dwork, msecs_to_jiffies(delay));
561 out:
562 	mutex_unlock(&mvm->mutex);
563 }
564 
565 int
566 iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
567 			    struct ieee80211_vif *vif,
568 			    struct ieee80211_sta *sta, u8 oper_class,
569 			    struct cfg80211_chan_def *chandef,
570 			    struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie)
571 {
572 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
573 	struct iwl_mvm_sta *mvmsta;
574 	unsigned int delay;
575 	int ret;
576 
577 	mutex_lock(&mvm->mutex);
578 
579 	IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n",
580 		       sta->addr, chandef->chan->center_freq, chandef->width);
581 
582 	/* we only support a single peer for channel switching */
583 	if (mvm->tdls_cs.peer.sta_id != IWL_MVM_INVALID_STA) {
584 		IWL_DEBUG_TDLS(mvm,
585 			       "Existing peer. Can't start switch with %pM\n",
586 			       sta->addr);
587 		ret = -EBUSY;
588 		goto out;
589 	}
590 
591 	ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
592 						 TDLS_SEND_CHAN_SW_REQ,
593 						 sta->addr, sta->tdls_initiator,
594 						 oper_class, chandef, 0, 0, 0,
595 						 tmpl_skb, ch_sw_tm_ie);
596 	if (ret)
597 		goto out;
598 
599 	/*
600 	 * Mark the peer as "in tdls switch" for this vif. We only allow a
601 	 * single such peer per vif.
602 	 */
603 	mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL);
604 	if (!mvm->tdls_cs.peer.skb) {
605 		ret = -ENOMEM;
606 		goto out;
607 	}
608 
609 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
610 	mvm->tdls_cs.peer.sta_id = mvmsta->sta_id;
611 	mvm->tdls_cs.peer.chandef = *chandef;
612 	mvm->tdls_cs.peer.initiator = sta->tdls_initiator;
613 	mvm->tdls_cs.peer.op_class = oper_class;
614 	mvm->tdls_cs.peer.ch_sw_tm_ie = ch_sw_tm_ie;
615 
616 	/*
617 	 * Wait for 2 DTIM periods before attempting the next switch. The next
618 	 * switch will be made sooner if the current one completes before that.
619 	 */
620 	delay = 2 * TU_TO_MS(vif->bss_conf.dtim_period *
621 			     vif->bss_conf.beacon_int);
622 	mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
623 			 msecs_to_jiffies(delay));
624 
625 out:
626 	mutex_unlock(&mvm->mutex);
627 	return ret;
628 }
629 
630 void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
631 					struct ieee80211_vif *vif,
632 					struct ieee80211_sta *sta)
633 {
634 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
635 	struct ieee80211_sta *cur_sta;
636 	bool wait_for_phy = false;
637 
638 	mutex_lock(&mvm->mutex);
639 
640 	IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr);
641 
642 	/* we only support a single peer for channel switching */
643 	if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) {
644 		IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr);
645 		goto out;
646 	}
647 
648 	cur_sta = rcu_dereference_protected(
649 				mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
650 				lockdep_is_held(&mvm->mutex));
651 	/* make sure it's the same peer */
652 	if (cur_sta != sta)
653 		goto out;
654 
655 	/*
656 	 * If we're currently in a switch because of the now canceled peer,
657 	 * wait a DTIM here to make sure the phy is back on the base channel.
658 	 * We can't otherwise force it.
659 	 */
660 	if (mvm->tdls_cs.cur_sta_id == mvm->tdls_cs.peer.sta_id &&
661 	    mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE)
662 		wait_for_phy = true;
663 
664 	mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
665 	dev_kfree_skb(mvm->tdls_cs.peer.skb);
666 	mvm->tdls_cs.peer.skb = NULL;
667 
668 out:
669 	mutex_unlock(&mvm->mutex);
670 
671 	/* make sure the phy is on the base channel */
672 	if (wait_for_phy)
673 		msleep(TU_TO_MS(vif->bss_conf.dtim_period *
674 				vif->bss_conf.beacon_int));
675 
676 	/* flush the channel switch state */
677 	flush_delayed_work(&mvm->tdls_cs.dwork);
678 
679 	IWL_DEBUG_TDLS(mvm, "TDLS ending channel switch with %pM\n", sta->addr);
680 }
681 
682 void
683 iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
684 				 struct ieee80211_vif *vif,
685 				 struct ieee80211_tdls_ch_sw_params *params)
686 {
687 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
688 	enum iwl_tdls_channel_switch_type type;
689 	unsigned int delay;
690 	const char *action_str =
691 		params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ?
692 		"REQ" : "RESP";
693 
694 	mutex_lock(&mvm->mutex);
695 
696 	IWL_DEBUG_TDLS(mvm,
697 		       "Received TDLS ch switch action %s from %pM status %d\n",
698 		       action_str, params->sta->addr, params->status);
699 
700 	/*
701 	 * we got a non-zero status from a peer we were switching to - move to
702 	 * the idle state and retry again later
703 	 */
704 	if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE &&
705 	    params->status != 0 &&
706 	    mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
707 	    mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
708 		struct ieee80211_sta *cur_sta;
709 
710 		/* make sure it's the same peer */
711 		cur_sta = rcu_dereference_protected(
712 				mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
713 				lockdep_is_held(&mvm->mutex));
714 		if (cur_sta == params->sta) {
715 			iwl_mvm_tdls_update_cs_state(mvm,
716 						     IWL_MVM_TDLS_SW_IDLE);
717 			goto retry;
718 		}
719 	}
720 
721 	type = (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST) ?
722 	       TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH : TDLS_MOVE_CH;
723 
724 	iwl_mvm_tdls_config_channel_switch(mvm, vif, type, params->sta->addr,
725 					   params->sta->tdls_initiator, 0,
726 					   params->chandef, params->timestamp,
727 					   params->switch_time,
728 					   params->switch_timeout,
729 					   params->tmpl_skb,
730 					   params->ch_sw_tm_ie);
731 
732 retry:
733 	/* register a timeout in case we don't succeed in switching */
734 	delay = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int *
735 		1024 / 1000;
736 	mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
737 			 msecs_to_jiffies(delay));
738 	mutex_unlock(&mvm->mutex);
739 }
740