xref: /linux/drivers/net/wireless/ath/ath9k/mci.c (revision f3539c12d8196ce0a1993364d30b3a18908470d1)
1 /*
2  * Copyright (c) 2010-2011 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/dma-mapping.h>
18 #include <linux/slab.h>
19 
20 #include "ath9k.h"
21 #include "mci.h"
22 
23 static const u8 ath_mci_duty_cycle[] = { 55, 50, 60, 70, 80, 85, 90, 95, 98 };
24 
25 static struct ath_mci_profile_info*
26 ath_mci_find_profile(struct ath_mci_profile *mci,
27 		     struct ath_mci_profile_info *info)
28 {
29 	struct ath_mci_profile_info *entry;
30 
31 	if (list_empty(&mci->info))
32 		return NULL;
33 
34 	list_for_each_entry(entry, &mci->info, list) {
35 		if (entry->conn_handle == info->conn_handle)
36 			return entry;
37 	}
38 	return NULL;
39 }
40 
41 static bool ath_mci_add_profile(struct ath_common *common,
42 				struct ath_mci_profile *mci,
43 				struct ath_mci_profile_info *info)
44 {
45 	struct ath_mci_profile_info *entry;
46 	u8 voice_priority[] = { 110, 110, 110, 112, 110, 110, 114, 116, 118 };
47 
48 	if ((mci->num_sco == ATH_MCI_MAX_SCO_PROFILE) &&
49 	    (info->type == MCI_GPM_COEX_PROFILE_VOICE))
50 		return false;
51 
52 	if (((NUM_PROF(mci) - mci->num_sco) == ATH_MCI_MAX_ACL_PROFILE) &&
53 	    (info->type != MCI_GPM_COEX_PROFILE_VOICE))
54 		return false;
55 
56 	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
57 	if (!entry)
58 		return false;
59 
60 	memcpy(entry, info, 10);
61 	INC_PROF(mci, info);
62 	list_add_tail(&entry->list, &mci->info);
63 	if (info->type == MCI_GPM_COEX_PROFILE_VOICE) {
64 		if (info->voice_type < sizeof(voice_priority))
65 			mci->voice_priority = voice_priority[info->voice_type];
66 		else
67 			mci->voice_priority = 110;
68 	}
69 
70 	return true;
71 }
72 
73 static void ath_mci_del_profile(struct ath_common *common,
74 				struct ath_mci_profile *mci,
75 				struct ath_mci_profile_info *entry)
76 {
77 	if (!entry)
78 		return;
79 
80 	DEC_PROF(mci, entry);
81 	list_del(&entry->list);
82 	kfree(entry);
83 }
84 
85 void ath_mci_flush_profile(struct ath_mci_profile *mci)
86 {
87 	struct ath_mci_profile_info *info, *tinfo;
88 
89 	mci->aggr_limit = 0;
90 	mci->num_mgmt = 0;
91 
92 	if (list_empty(&mci->info))
93 		return;
94 
95 	list_for_each_entry_safe(info, tinfo, &mci->info, list) {
96 		list_del(&info->list);
97 		DEC_PROF(mci, info);
98 		kfree(info);
99 	}
100 }
101 
102 static void ath_mci_adjust_aggr_limit(struct ath_btcoex *btcoex)
103 {
104 	struct ath_mci_profile *mci = &btcoex->mci;
105 	u32 wlan_airtime = btcoex->btcoex_period *
106 				(100 - btcoex->duty_cycle) / 100;
107 
108 	/*
109 	 * Scale: wlan_airtime is in ms, aggr_limit is in 0.25 ms.
110 	 * When wlan_airtime is less than 4ms, aggregation limit has to be
111 	 * adjusted half of wlan_airtime to ensure that the aggregation can fit
112 	 * without collision with BT traffic.
113 	 */
114 	if ((wlan_airtime <= 4) &&
115 	    (!mci->aggr_limit || (mci->aggr_limit > (2 * wlan_airtime))))
116 		mci->aggr_limit = 2 * wlan_airtime;
117 }
118 
119 static void ath_mci_update_scheme(struct ath_softc *sc)
120 {
121 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
122 	struct ath_btcoex *btcoex = &sc->btcoex;
123 	struct ath_mci_profile *mci = &btcoex->mci;
124 	struct ath9k_hw_mci *mci_hw = &sc->sc_ah->btcoex_hw.mci;
125 	struct ath_mci_profile_info *info;
126 	u32 num_profile = NUM_PROF(mci);
127 
128 	if (mci_hw->config & ATH_MCI_CONFIG_DISABLE_TUNING)
129 		goto skip_tuning;
130 
131 	mci->aggr_limit = 0;
132 	btcoex->duty_cycle = ath_mci_duty_cycle[num_profile];
133 	btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD;
134 	if (NUM_PROF(mci))
135 		btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
136 	else
137 		btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL :
138 							ATH_BTCOEX_STOMP_LOW;
139 
140 	if (num_profile == 1) {
141 		info = list_first_entry(&mci->info,
142 					struct ath_mci_profile_info,
143 					list);
144 		if (mci->num_sco) {
145 			if (info->T == 12)
146 				mci->aggr_limit = 8;
147 			else if (info->T == 6) {
148 				mci->aggr_limit = 6;
149 				btcoex->duty_cycle = 30;
150 			} else
151 				mci->aggr_limit = 6;
152 			ath_dbg(common, MCI,
153 				"Single SCO, aggregation limit %d 1/4 ms\n",
154 				mci->aggr_limit);
155 		} else if (mci->num_pan || mci->num_other_acl) {
156 			/*
157 			 * For single PAN/FTP profile, allocate 35% for BT
158 			 * to improve WLAN throughput.
159 			 */
160 			btcoex->duty_cycle = AR_SREV_9565(sc->sc_ah) ? 40 : 35;
161 			btcoex->btcoex_period = 53;
162 			ath_dbg(common, MCI,
163 				"Single PAN/FTP bt period %d ms dutycycle %d\n",
164 				btcoex->duty_cycle, btcoex->btcoex_period);
165 		} else if (mci->num_hid) {
166 			btcoex->duty_cycle = 30;
167 			mci->aggr_limit = 6;
168 			ath_dbg(common, MCI,
169 				"Multiple attempt/timeout single HID "
170 				"aggregation limit 1.5 ms dutycycle 30%%\n");
171 		}
172 	} else if (num_profile == 2) {
173 		if (mci->num_hid == 2)
174 			btcoex->duty_cycle = 30;
175 		mci->aggr_limit = 6;
176 		ath_dbg(common, MCI,
177 			"Two BT profiles aggr limit 1.5 ms dutycycle %d%%\n",
178 			btcoex->duty_cycle);
179 	} else if (num_profile >= 3) {
180 		mci->aggr_limit = 4;
181 		ath_dbg(common, MCI,
182 			"Three or more profiles aggregation limit 1 ms\n");
183 	}
184 
185 skip_tuning:
186 	if (IS_CHAN_2GHZ(sc->sc_ah->curchan)) {
187 		if (IS_CHAN_HT(sc->sc_ah->curchan))
188 			ath_mci_adjust_aggr_limit(btcoex);
189 		else
190 			btcoex->btcoex_period >>= 1;
191 	}
192 
193 	ath9k_btcoex_timer_pause(sc);
194 	ath9k_hw_btcoex_disable(sc->sc_ah);
195 
196 	if (IS_CHAN_5GHZ(sc->sc_ah->curchan))
197 		return;
198 
199 	btcoex->duty_cycle += (mci->num_bdr ? ATH_MCI_BDR_DUTY_CYCLE : 0);
200 	if (btcoex->duty_cycle > ATH_MCI_MAX_DUTY_CYCLE)
201 		btcoex->duty_cycle = ATH_MCI_MAX_DUTY_CYCLE;
202 
203 	btcoex->btcoex_no_stomp =  btcoex->btcoex_period *
204 		(100 - btcoex->duty_cycle) / 100;
205 
206 	ath9k_hw_btcoex_enable(sc->sc_ah);
207 	ath9k_btcoex_timer_resume(sc);
208 }
209 
210 static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
211 {
212 	struct ath_hw *ah = sc->sc_ah;
213 	struct ath_common *common = ath9k_hw_common(ah);
214 	struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
215 	u32 payload[4] = {0, 0, 0, 0};
216 
217 	switch (opcode) {
218 	case MCI_GPM_BT_CAL_REQ:
219 		if (mci_hw->bt_state == MCI_BT_AWAKE) {
220 			mci_hw->bt_state = MCI_BT_CAL_START;
221 			ath9k_queue_reset(sc, RESET_TYPE_MCI);
222 		}
223 		ath_dbg(common, MCI, "MCI State : %d\n", mci_hw->bt_state);
224 		break;
225 	case MCI_GPM_BT_CAL_GRANT:
226 		MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_DONE);
227 		ar9003_mci_send_message(sc->sc_ah, MCI_GPM, 0, payload,
228 					16, false, true);
229 		break;
230 	default:
231 		ath_dbg(common, MCI, "Unknown GPM CAL message\n");
232 		break;
233 	}
234 }
235 
236 static void ath9k_mci_work(struct work_struct *work)
237 {
238 	struct ath_softc *sc = container_of(work, struct ath_softc, mci_work);
239 
240 	ath_mci_update_scheme(sc);
241 }
242 
243 static void ath_mci_update_stomp_txprio(u8 cur_txprio, u8 *stomp_prio)
244 {
245 	if (cur_txprio < stomp_prio[ATH_BTCOEX_STOMP_NONE])
246 		stomp_prio[ATH_BTCOEX_STOMP_NONE] = cur_txprio;
247 
248 	if (cur_txprio > stomp_prio[ATH_BTCOEX_STOMP_ALL])
249 		stomp_prio[ATH_BTCOEX_STOMP_ALL] = cur_txprio;
250 
251 	if ((cur_txprio > ATH_MCI_HI_PRIO) &&
252 	    (cur_txprio < stomp_prio[ATH_BTCOEX_STOMP_LOW]))
253 		stomp_prio[ATH_BTCOEX_STOMP_LOW] = cur_txprio;
254 }
255 
256 static void ath_mci_set_concur_txprio(struct ath_softc *sc)
257 {
258 	struct ath_btcoex *btcoex = &sc->btcoex;
259 	struct ath_mci_profile *mci = &btcoex->mci;
260 	u8 stomp_txprio[ATH_BTCOEX_STOMP_MAX];
261 
262 	memset(stomp_txprio, 0, sizeof(stomp_txprio));
263 	if (mci->num_mgmt) {
264 		stomp_txprio[ATH_BTCOEX_STOMP_ALL] = ATH_MCI_INQUIRY_PRIO;
265 		if (!mci->num_pan && !mci->num_other_acl)
266 			stomp_txprio[ATH_BTCOEX_STOMP_NONE] =
267 				ATH_MCI_INQUIRY_PRIO;
268 	} else {
269 		u8 prof_prio[] = { 50, 90, 94, 52 };/* RFCOMM, A2DP, HID, PAN */
270 
271 		stomp_txprio[ATH_BTCOEX_STOMP_LOW] =
272 		stomp_txprio[ATH_BTCOEX_STOMP_NONE] = 0xff;
273 
274 		if (mci->num_sco)
275 			ath_mci_update_stomp_txprio(mci->voice_priority,
276 						    stomp_txprio);
277 		if (mci->num_other_acl)
278 			ath_mci_update_stomp_txprio(prof_prio[0], stomp_txprio);
279 		if (mci->num_a2dp)
280 			ath_mci_update_stomp_txprio(prof_prio[1], stomp_txprio);
281 		if (mci->num_hid)
282 			ath_mci_update_stomp_txprio(prof_prio[2], stomp_txprio);
283 		if (mci->num_pan)
284 			ath_mci_update_stomp_txprio(prof_prio[3], stomp_txprio);
285 
286 		if (stomp_txprio[ATH_BTCOEX_STOMP_NONE] == 0xff)
287 			stomp_txprio[ATH_BTCOEX_STOMP_NONE] = 0;
288 
289 		if (stomp_txprio[ATH_BTCOEX_STOMP_LOW] == 0xff)
290 			stomp_txprio[ATH_BTCOEX_STOMP_LOW] = 0;
291 	}
292 	ath9k_hw_btcoex_set_concur_txprio(sc->sc_ah, stomp_txprio);
293 }
294 
295 static u8 ath_mci_process_profile(struct ath_softc *sc,
296 				  struct ath_mci_profile_info *info)
297 {
298 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
299 	struct ath_btcoex *btcoex = &sc->btcoex;
300 	struct ath_mci_profile *mci = &btcoex->mci;
301 	struct ath_mci_profile_info *entry = NULL;
302 
303 	entry = ath_mci_find_profile(mci, info);
304 	if (entry) {
305 		/*
306 		 * Two MCI interrupts are generated while connecting to
307 		 * headset and A2DP profile, but only one MCI interrupt
308 		 * is generated with last added profile type while disconnecting
309 		 * both profiles.
310 		 * So while adding second profile type decrement
311 		 * the first one.
312 		 */
313 		if (entry->type != info->type) {
314 			DEC_PROF(mci, entry);
315 			INC_PROF(mci, info);
316 		}
317 		memcpy(entry, info, 10);
318 	}
319 
320 	if (info->start) {
321 		if (!entry && !ath_mci_add_profile(common, mci, info))
322 			return 0;
323 	} else
324 		ath_mci_del_profile(common, mci, entry);
325 
326 	ath_mci_set_concur_txprio(sc);
327 	return 1;
328 }
329 
330 static u8 ath_mci_process_status(struct ath_softc *sc,
331 				 struct ath_mci_profile_status *status)
332 {
333 	struct ath_btcoex *btcoex = &sc->btcoex;
334 	struct ath_mci_profile *mci = &btcoex->mci;
335 	struct ath_mci_profile_info info;
336 	int i = 0, old_num_mgmt = mci->num_mgmt;
337 
338 	/* Link status type are not handled */
339 	if (status->is_link)
340 		return 0;
341 
342 	info.conn_handle = status->conn_handle;
343 	if (ath_mci_find_profile(mci, &info))
344 		return 0;
345 
346 	if (status->conn_handle >= ATH_MCI_MAX_PROFILE)
347 		return 0;
348 
349 	if (status->is_critical)
350 		__set_bit(status->conn_handle, mci->status);
351 	else
352 		__clear_bit(status->conn_handle, mci->status);
353 
354 	mci->num_mgmt = 0;
355 	do {
356 		if (test_bit(i, mci->status))
357 			mci->num_mgmt++;
358 	} while (++i < ATH_MCI_MAX_PROFILE);
359 
360 	ath_mci_set_concur_txprio(sc);
361 	if (old_num_mgmt != mci->num_mgmt)
362 		return 1;
363 
364 	return 0;
365 }
366 
367 static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
368 {
369 	struct ath_hw *ah = sc->sc_ah;
370 	struct ath_mci_profile_info profile_info;
371 	struct ath_mci_profile_status profile_status;
372 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
373 	u8 major, minor, update_scheme = 0;
374 	u32 seq_num;
375 
376 	if (ar9003_mci_state(ah, MCI_STATE_NEED_FLUSH_BT_INFO) &&
377 	    ar9003_mci_state(ah, MCI_STATE_ENABLE)) {
378 		ath_dbg(common, MCI, "(MCI) Need to flush BT profiles\n");
379 		ath_mci_flush_profile(&sc->btcoex.mci);
380 		ar9003_mci_state(ah, MCI_STATE_SEND_STATUS_QUERY);
381 	}
382 
383 	switch (opcode) {
384 	case MCI_GPM_COEX_VERSION_QUERY:
385 		ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_COEX_VERSION);
386 		break;
387 	case MCI_GPM_COEX_VERSION_RESPONSE:
388 		major = *(rx_payload + MCI_GPM_COEX_B_MAJOR_VERSION);
389 		minor = *(rx_payload + MCI_GPM_COEX_B_MINOR_VERSION);
390 		ar9003_mci_set_bt_version(ah, major, minor);
391 		break;
392 	case MCI_GPM_COEX_STATUS_QUERY:
393 		ar9003_mci_send_wlan_channels(ah);
394 		break;
395 	case MCI_GPM_COEX_BT_PROFILE_INFO:
396 		memcpy(&profile_info,
397 		       (rx_payload + MCI_GPM_COEX_B_PROFILE_TYPE), 10);
398 
399 		if ((profile_info.type == MCI_GPM_COEX_PROFILE_UNKNOWN) ||
400 		    (profile_info.type >= MCI_GPM_COEX_PROFILE_MAX)) {
401 			ath_dbg(common, MCI,
402 				"Illegal profile type = %d, state = %d\n",
403 				profile_info.type,
404 				profile_info.start);
405 			break;
406 		}
407 
408 		update_scheme += ath_mci_process_profile(sc, &profile_info);
409 		break;
410 	case MCI_GPM_COEX_BT_STATUS_UPDATE:
411 		profile_status.is_link = *(rx_payload +
412 					   MCI_GPM_COEX_B_STATUS_TYPE);
413 		profile_status.conn_handle = *(rx_payload +
414 					       MCI_GPM_COEX_B_STATUS_LINKID);
415 		profile_status.is_critical = *(rx_payload +
416 					       MCI_GPM_COEX_B_STATUS_STATE);
417 
418 		seq_num = *((u32 *)(rx_payload + 12));
419 		ath_dbg(common, MCI,
420 			"BT_Status_Update: is_link=%d, linkId=%d, state=%d, SEQ=%u\n",
421 			profile_status.is_link, profile_status.conn_handle,
422 			profile_status.is_critical, seq_num);
423 
424 		update_scheme += ath_mci_process_status(sc, &profile_status);
425 		break;
426 	default:
427 		ath_dbg(common, MCI, "Unknown GPM COEX message = 0x%02x\n", opcode);
428 		break;
429 	}
430 	if (update_scheme)
431 		ieee80211_queue_work(sc->hw, &sc->mci_work);
432 }
433 
434 int ath_mci_setup(struct ath_softc *sc)
435 {
436 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
437 	struct ath_mci_coex *mci = &sc->mci_coex;
438 	struct ath_mci_buf *buf = &mci->sched_buf;
439 	int ret;
440 
441 	buf->bf_addr = dmam_alloc_coherent(sc->dev,
442 				  ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE,
443 				  &buf->bf_paddr, GFP_KERNEL);
444 
445 	if (buf->bf_addr == NULL) {
446 		ath_dbg(common, FATAL, "MCI buffer alloc failed\n");
447 		return -ENOMEM;
448 	}
449 
450 	memset(buf->bf_addr, MCI_GPM_RSVD_PATTERN,
451 	       ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE);
452 
453 	mci->sched_buf.bf_len = ATH_MCI_SCHED_BUF_SIZE;
454 
455 	mci->gpm_buf.bf_len = ATH_MCI_GPM_BUF_SIZE;
456 	mci->gpm_buf.bf_addr = (u8 *)mci->sched_buf.bf_addr + mci->sched_buf.bf_len;
457 	mci->gpm_buf.bf_paddr = mci->sched_buf.bf_paddr + mci->sched_buf.bf_len;
458 
459 	ret = ar9003_mci_setup(sc->sc_ah, mci->gpm_buf.bf_paddr,
460 			       mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4),
461 			       mci->sched_buf.bf_paddr);
462 	if (ret) {
463 		ath_err(common, "Failed to initialize MCI\n");
464 		return ret;
465 	}
466 
467 	INIT_WORK(&sc->mci_work, ath9k_mci_work);
468 	ath_dbg(common, MCI, "MCI Initialized\n");
469 
470 	return 0;
471 }
472 
473 void ath_mci_cleanup(struct ath_softc *sc)
474 {
475 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
476 	struct ath_hw *ah = sc->sc_ah;
477 
478 	ar9003_mci_cleanup(ah);
479 
480 	ath_dbg(common, MCI, "MCI De-Initialized\n");
481 }
482 
483 void ath_mci_intr(struct ath_softc *sc)
484 {
485 	struct ath_mci_coex *mci = &sc->mci_coex;
486 	struct ath_hw *ah = sc->sc_ah;
487 	struct ath_common *common = ath9k_hw_common(ah);
488 	struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
489 	u32 mci_int, mci_int_rxmsg;
490 	u32 offset, subtype, opcode;
491 	u32 *pgpm;
492 	u32 more_data = MCI_GPM_MORE;
493 	bool skip_gpm = false;
494 
495 	ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg);
496 
497 	if (ar9003_mci_state(ah, MCI_STATE_ENABLE) == 0) {
498 		ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET);
499 		return;
500 	}
501 
502 	if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE) {
503 		u32 payload[4] = { 0xffffffff, 0xffffffff,
504 				   0xffffffff, 0xffffff00};
505 
506 		/*
507 		 * The following REMOTE_RESET and SYS_WAKING used to sent
508 		 * only when BT wake up. Now they are always sent, as a
509 		 * recovery method to reset BT MCI's RX alignment.
510 		 */
511 		ar9003_mci_send_message(ah, MCI_REMOTE_RESET, 0,
512 					payload, 16, true, false);
513 		ar9003_mci_send_message(ah, MCI_SYS_WAKING, 0,
514 					NULL, 0, true, false);
515 
516 		mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE;
517 		ar9003_mci_state(ah, MCI_STATE_RESET_REQ_WAKE);
518 
519 		/*
520 		 * always do this for recovery and 2G/5G toggling and LNA_TRANS
521 		 */
522 		ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE);
523 	}
524 
525 	if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING) {
526 		mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING;
527 
528 		if ((mci_hw->bt_state == MCI_BT_SLEEP) &&
529 		    (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP) !=
530 		     MCI_BT_SLEEP))
531 			ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE);
532 	}
533 
534 	if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) {
535 		mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING;
536 
537 		if ((mci_hw->bt_state == MCI_BT_AWAKE) &&
538 		    (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP) !=
539 		     MCI_BT_AWAKE))
540 			mci_hw->bt_state = MCI_BT_SLEEP;
541 	}
542 
543 	if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
544 	    (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) {
545 		ar9003_mci_state(ah, MCI_STATE_RECOVER_RX);
546 		skip_gpm = true;
547 	}
548 
549 	if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO) {
550 		mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO;
551 		offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET);
552 	}
553 
554 	if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_GPM) {
555 		mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_GPM;
556 
557 		while (more_data == MCI_GPM_MORE) {
558 			if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
559 				return;
560 
561 			pgpm = mci->gpm_buf.bf_addr;
562 			offset = ar9003_mci_get_next_gpm_offset(ah, &more_data);
563 
564 			if (offset == MCI_GPM_INVALID)
565 				break;
566 
567 			pgpm += (offset >> 2);
568 
569 			/*
570 			 * The first dword is timer.
571 			 * The real data starts from 2nd dword.
572 			 */
573 			subtype = MCI_GPM_TYPE(pgpm);
574 			opcode = MCI_GPM_OPCODE(pgpm);
575 
576 			if (skip_gpm)
577 				goto recycle;
578 
579 			if (MCI_GPM_IS_CAL_TYPE(subtype)) {
580 				ath_mci_cal_msg(sc, subtype, (u8 *)pgpm);
581 			} else {
582 				switch (subtype) {
583 				case MCI_GPM_COEX_AGENT:
584 					ath_mci_msg(sc, opcode, (u8 *)pgpm);
585 					break;
586 				default:
587 					break;
588 				}
589 			}
590 		recycle:
591 			MCI_GPM_RECYCLE(pgpm);
592 		}
593 	}
594 
595 	if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_HW_MSG_MASK) {
596 		if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL)
597 			mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL;
598 
599 		if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_INFO)
600 			mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_INFO;
601 
602 		if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) {
603 			int value_dbm = MS(mci_hw->cont_status,
604 					   AR_MCI_CONT_RSSI_POWER);
605 
606 			mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_INFO;
607 
608 			ath_dbg(common, MCI,
609 				"MCI CONT_INFO: (%s) pri = %d pwr = %d dBm\n",
610 				MS(mci_hw->cont_status, AR_MCI_CONT_TXRX) ?
611 				"tx" : "rx",
612 				MS(mci_hw->cont_status, AR_MCI_CONT_PRIORITY),
613 				value_dbm);
614 		}
615 
616 		if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_NACK)
617 			mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_NACK;
618 
619 		if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_RST)
620 			mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_RST;
621 	}
622 
623 	if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
624 	    (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) {
625 		mci_int &= ~(AR_MCI_INTERRUPT_RX_INVALID_HDR |
626 			     AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT);
627 		ath_mci_msg(sc, MCI_GPM_COEX_NOOP, NULL);
628 	}
629 }
630 
631 void ath_mci_enable(struct ath_softc *sc)
632 {
633 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
634 
635 	if (!common->btcoex_enabled)
636 		return;
637 
638 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
639 		sc->sc_ah->imask |= ATH9K_INT_MCI;
640 }
641 
642 void ath9k_mci_update_wlan_channels(struct ath_softc *sc, bool allow_all)
643 {
644 	struct ath_hw *ah = sc->sc_ah;
645 	struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
646 	struct ath9k_channel *chan = ah->curchan;
647 	u32 channelmap[] = {0x00000000, 0xffff0000, 0xffffffff, 0x7fffffff};
648 	int i;
649 	s16 chan_start, chan_end;
650 	u16 wlan_chan;
651 
652 	if (!chan || !IS_CHAN_2GHZ(chan))
653 		return;
654 
655 	if (allow_all)
656 		goto send_wlan_chan;
657 
658 	wlan_chan = chan->channel - 2402;
659 
660 	chan_start = wlan_chan - 10;
661 	chan_end = wlan_chan + 10;
662 
663 	if (IS_CHAN_HT40PLUS(chan))
664 		chan_end += 20;
665 	else if (IS_CHAN_HT40MINUS(chan))
666 		chan_start -= 20;
667 
668 	/* adjust side band */
669 	chan_start -= 7;
670 	chan_end += 7;
671 
672 	if (chan_start <= 0)
673 		chan_start = 0;
674 	if (chan_end >= ATH_MCI_NUM_BT_CHANNELS)
675 		chan_end = ATH_MCI_NUM_BT_CHANNELS - 1;
676 
677 	ath_dbg(ath9k_hw_common(ah), MCI,
678 		"WLAN current channel %d mask BT channel %d - %d\n",
679 		wlan_chan, chan_start, chan_end);
680 
681 	for (i = chan_start; i < chan_end; i++)
682 		MCI_GPM_CLR_CHANNEL_BIT(&channelmap, i);
683 
684 send_wlan_chan:
685 	/* update and send wlan channels info to BT */
686 	for (i = 0; i < 4; i++)
687 		mci->wlan_channels[i] = channelmap[i];
688 	ar9003_mci_send_wlan_channels(ah);
689 	ar9003_mci_state(ah, MCI_STATE_SEND_VERSION_QUERY);
690 }
691 
692 void ath9k_mci_set_txpower(struct ath_softc *sc, bool setchannel,
693 			   bool concur_tx)
694 {
695 	struct ath_hw *ah = sc->sc_ah;
696 	struct ath9k_hw_mci *mci_hw = &sc->sc_ah->btcoex_hw.mci;
697 	bool old_concur_tx = mci_hw->concur_tx;
698 
699 	if (!(mci_hw->config & ATH_MCI_CONFIG_CONCUR_TX)) {
700 		mci_hw->concur_tx = false;
701 		return;
702 	}
703 
704 	if (!IS_CHAN_2GHZ(ah->curchan))
705 		return;
706 
707 	if (setchannel) {
708 		struct ath9k_hw_cal_data *caldata = &sc->cur_chan->caldata;
709 		if (IS_CHAN_HT40PLUS(ah->curchan) &&
710 		    (ah->curchan->channel > caldata->channel) &&
711 		    (ah->curchan->channel <= caldata->channel + 20))
712 			return;
713 		if (IS_CHAN_HT40MINUS(ah->curchan) &&
714 		    (ah->curchan->channel < caldata->channel) &&
715 		    (ah->curchan->channel >= caldata->channel - 20))
716 			return;
717 		mci_hw->concur_tx = false;
718 	} else
719 		mci_hw->concur_tx = concur_tx;
720 
721 	if (old_concur_tx != mci_hw->concur_tx)
722 		ath9k_hw_set_txpowerlimit(ah, sc->cur_chan->txpower, false);
723 }
724 
725 static void ath9k_mci_stomp_audio(struct ath_softc *sc)
726 {
727 	struct ath_hw *ah = sc->sc_ah;
728 	struct ath_btcoex *btcoex = &sc->btcoex;
729 	struct ath_mci_profile *mci = &btcoex->mci;
730 
731 	if (!mci->num_sco && !mci->num_a2dp)
732 		return;
733 
734 	if (ah->stats.avgbrssi > 25) {
735 		btcoex->stomp_audio = 0;
736 		return;
737 	}
738 
739 	btcoex->stomp_audio++;
740 }
741 void ath9k_mci_update_rssi(struct ath_softc *sc)
742 {
743 	struct ath_hw *ah = sc->sc_ah;
744 	struct ath_btcoex *btcoex = &sc->btcoex;
745 	struct ath9k_hw_mci *mci_hw = &sc->sc_ah->btcoex_hw.mci;
746 
747 	ath9k_mci_stomp_audio(sc);
748 
749 	if (!(mci_hw->config & ATH_MCI_CONFIG_CONCUR_TX))
750 		return;
751 
752 	if (ah->stats.avgbrssi >= 40) {
753 		if (btcoex->rssi_count < 0)
754 			btcoex->rssi_count = 0;
755 		if (++btcoex->rssi_count >= ATH_MCI_CONCUR_TX_SWITCH) {
756 			btcoex->rssi_count = 0;
757 			ath9k_mci_set_txpower(sc, false, true);
758 		}
759 	} else {
760 		if (btcoex->rssi_count > 0)
761 			btcoex->rssi_count = 0;
762 		if (--btcoex->rssi_count <= -ATH_MCI_CONCUR_TX_SWITCH) {
763 			btcoex->rssi_count = 0;
764 			ath9k_mci_set_txpower(sc, false, false);
765 		}
766 	}
767 }
768