xref: /linux/net/bluetooth/l2cap_core.c (revision 498d319bb512992ef0784c278fa03679f2f5649d)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 
40 #include "smp.h"
41 #include "a2mp.h"
42 #include "amp.h"
43 
44 bool disable_ertm;
45 
46 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
47 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
48 
49 static LIST_HEAD(chan_list);
50 static DEFINE_RWLOCK(chan_list_lock);
51 
52 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
53 				       u8 code, u8 ident, u16 dlen, void *data);
54 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
55 			   void *data);
56 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
57 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
58 
59 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
60 		     struct sk_buff_head *skbs, u8 event);
61 
62 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
63 {
64 	if (hcon->type == LE_LINK) {
65 		if (type == ADDR_LE_DEV_PUBLIC)
66 			return BDADDR_LE_PUBLIC;
67 		else
68 			return BDADDR_LE_RANDOM;
69 	}
70 
71 	return BDADDR_BREDR;
72 }
73 
74 /* ---- L2CAP channels ---- */
75 
76 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
77 						   u16 cid)
78 {
79 	struct l2cap_chan *c;
80 
81 	list_for_each_entry(c, &conn->chan_l, list) {
82 		if (c->dcid == cid)
83 			return c;
84 	}
85 	return NULL;
86 }
87 
88 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
89 						   u16 cid)
90 {
91 	struct l2cap_chan *c;
92 
93 	list_for_each_entry(c, &conn->chan_l, list) {
94 		if (c->scid == cid)
95 			return c;
96 	}
97 	return NULL;
98 }
99 
100 /* Find channel with given SCID.
101  * Returns locked channel. */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 						 u16 cid)
104 {
105 	struct l2cap_chan *c;
106 
107 	mutex_lock(&conn->chan_lock);
108 	c = __l2cap_get_chan_by_scid(conn, cid);
109 	if (c)
110 		l2cap_chan_lock(c);
111 	mutex_unlock(&conn->chan_lock);
112 
113 	return c;
114 }
115 
116 /* Find channel with given DCID.
117  * Returns locked channel.
118  */
119 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
120 						 u16 cid)
121 {
122 	struct l2cap_chan *c;
123 
124 	mutex_lock(&conn->chan_lock);
125 	c = __l2cap_get_chan_by_dcid(conn, cid);
126 	if (c)
127 		l2cap_chan_lock(c);
128 	mutex_unlock(&conn->chan_lock);
129 
130 	return c;
131 }
132 
133 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
134 						    u8 ident)
135 {
136 	struct l2cap_chan *c;
137 
138 	list_for_each_entry(c, &conn->chan_l, list) {
139 		if (c->ident == ident)
140 			return c;
141 	}
142 	return NULL;
143 }
144 
145 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
146 						  u8 ident)
147 {
148 	struct l2cap_chan *c;
149 
150 	mutex_lock(&conn->chan_lock);
151 	c = __l2cap_get_chan_by_ident(conn, ident);
152 	if (c)
153 		l2cap_chan_lock(c);
154 	mutex_unlock(&conn->chan_lock);
155 
156 	return c;
157 }
158 
159 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
160 {
161 	struct l2cap_chan *c;
162 
163 	list_for_each_entry(c, &chan_list, global_l) {
164 		if (c->sport == psm && !bacmp(&c->src, src))
165 			return c;
166 	}
167 	return NULL;
168 }
169 
170 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
171 {
172 	int err;
173 
174 	write_lock(&chan_list_lock);
175 
176 	if (psm && __l2cap_global_chan_by_addr(psm, src)) {
177 		err = -EADDRINUSE;
178 		goto done;
179 	}
180 
181 	if (psm) {
182 		chan->psm = psm;
183 		chan->sport = psm;
184 		err = 0;
185 	} else {
186 		u16 p;
187 
188 		err = -EINVAL;
189 		for (p = 0x1001; p < 0x1100; p += 2)
190 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
191 				chan->psm   = cpu_to_le16(p);
192 				chan->sport = cpu_to_le16(p);
193 				err = 0;
194 				break;
195 			}
196 	}
197 
198 done:
199 	write_unlock(&chan_list_lock);
200 	return err;
201 }
202 
203 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
204 {
205 	write_lock(&chan_list_lock);
206 
207 	chan->scid = scid;
208 
209 	write_unlock(&chan_list_lock);
210 
211 	return 0;
212 }
213 
214 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
215 {
216 	u16 cid = L2CAP_CID_DYN_START;
217 
218 	for (; cid < L2CAP_CID_DYN_END; cid++) {
219 		if (!__l2cap_get_chan_by_scid(conn, cid))
220 			return cid;
221 	}
222 
223 	return 0;
224 }
225 
226 static void l2cap_state_change(struct l2cap_chan *chan, int state)
227 {
228 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
229 	       state_to_string(state));
230 
231 	chan->state = state;
232 	chan->ops->state_change(chan, state, 0);
233 }
234 
235 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
236 						int state, int err)
237 {
238 	chan->state = state;
239 	chan->ops->state_change(chan, chan->state, err);
240 }
241 
242 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
243 {
244 	chan->ops->state_change(chan, chan->state, err);
245 }
246 
247 static void __set_retrans_timer(struct l2cap_chan *chan)
248 {
249 	if (!delayed_work_pending(&chan->monitor_timer) &&
250 	    chan->retrans_timeout) {
251 		l2cap_set_timer(chan, &chan->retrans_timer,
252 				msecs_to_jiffies(chan->retrans_timeout));
253 	}
254 }
255 
256 static void __set_monitor_timer(struct l2cap_chan *chan)
257 {
258 	__clear_retrans_timer(chan);
259 	if (chan->monitor_timeout) {
260 		l2cap_set_timer(chan, &chan->monitor_timer,
261 				msecs_to_jiffies(chan->monitor_timeout));
262 	}
263 }
264 
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
266 					       u16 seq)
267 {
268 	struct sk_buff *skb;
269 
270 	skb_queue_walk(head, skb) {
271 		if (bt_cb(skb)->control.txseq == seq)
272 			return skb;
273 	}
274 
275 	return NULL;
276 }
277 
278 /* ---- L2CAP sequence number lists ---- */
279 
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281  * SREJ requests that are received and for frames that are to be
282  * retransmitted. These seq_list functions implement a singly-linked
283  * list in an array, where membership in the list can also be checked
284  * in constant time. Items can also be added to the tail of the list
285  * and removed from the head in constant time, without further memory
286  * allocs or frees.
287  */
288 
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
290 {
291 	size_t alloc_size, i;
292 
293 	/* Allocated size is a power of 2 to map sequence numbers
294 	 * (which may be up to 14 bits) in to a smaller array that is
295 	 * sized for the negotiated ERTM transmit windows.
296 	 */
297 	alloc_size = roundup_pow_of_two(size);
298 
299 	seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
300 	if (!seq_list->list)
301 		return -ENOMEM;
302 
303 	seq_list->mask = alloc_size - 1;
304 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 	for (i = 0; i < alloc_size; i++)
307 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
308 
309 	return 0;
310 }
311 
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
313 {
314 	kfree(seq_list->list);
315 }
316 
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
318 					   u16 seq)
319 {
320 	/* Constant-time check for list membership */
321 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
322 }
323 
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
325 {
326 	u16 mask = seq_list->mask;
327 
328 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 		/* In case someone tries to pop the head of an empty list */
330 		return L2CAP_SEQ_LIST_CLEAR;
331 	} else if (seq_list->head == seq) {
332 		/* Head can be removed in constant time */
333 		seq_list->head = seq_list->list[seq & mask];
334 		seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
335 
336 		if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 			seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 			seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 		}
340 	} else {
341 		/* Walk the list to find the sequence number */
342 		u16 prev = seq_list->head;
343 		while (seq_list->list[prev & mask] != seq) {
344 			prev = seq_list->list[prev & mask];
345 			if (prev == L2CAP_SEQ_LIST_TAIL)
346 				return L2CAP_SEQ_LIST_CLEAR;
347 		}
348 
349 		/* Unlink the number from the list and clear it */
350 		seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 		seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 		if (seq_list->tail == seq)
353 			seq_list->tail = prev;
354 	}
355 	return seq;
356 }
357 
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
359 {
360 	/* Remove the head in constant time */
361 	return l2cap_seq_list_remove(seq_list, seq_list->head);
362 }
363 
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
365 {
366 	u16 i;
367 
368 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
369 		return;
370 
371 	for (i = 0; i <= seq_list->mask; i++)
372 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
373 
374 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
376 }
377 
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
379 {
380 	u16 mask = seq_list->mask;
381 
382 	/* All appends happen in constant time */
383 
384 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
385 		return;
386 
387 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 		seq_list->head = seq;
389 	else
390 		seq_list->list[seq_list->tail & mask] = seq;
391 
392 	seq_list->tail = seq;
393 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
394 }
395 
396 static void l2cap_chan_timeout(struct work_struct *work)
397 {
398 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
399 					       chan_timer.work);
400 	struct l2cap_conn *conn = chan->conn;
401 	int reason;
402 
403 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
404 
405 	mutex_lock(&conn->chan_lock);
406 	l2cap_chan_lock(chan);
407 
408 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 		reason = ECONNREFUSED;
410 	else if (chan->state == BT_CONNECT &&
411 		 chan->sec_level != BT_SECURITY_SDP)
412 		reason = ECONNREFUSED;
413 	else
414 		reason = ETIMEDOUT;
415 
416 	l2cap_chan_close(chan, reason);
417 
418 	l2cap_chan_unlock(chan);
419 
420 	chan->ops->close(chan);
421 	mutex_unlock(&conn->chan_lock);
422 
423 	l2cap_chan_put(chan);
424 }
425 
426 struct l2cap_chan *l2cap_chan_create(void)
427 {
428 	struct l2cap_chan *chan;
429 
430 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
431 	if (!chan)
432 		return NULL;
433 
434 	mutex_init(&chan->lock);
435 
436 	write_lock(&chan_list_lock);
437 	list_add(&chan->global_l, &chan_list);
438 	write_unlock(&chan_list_lock);
439 
440 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
441 
442 	chan->state = BT_OPEN;
443 
444 	kref_init(&chan->kref);
445 
446 	/* This flag is cleared in l2cap_chan_ready() */
447 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
448 
449 	BT_DBG("chan %p", chan);
450 
451 	return chan;
452 }
453 
454 static void l2cap_chan_destroy(struct kref *kref)
455 {
456 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
457 
458 	BT_DBG("chan %p", chan);
459 
460 	write_lock(&chan_list_lock);
461 	list_del(&chan->global_l);
462 	write_unlock(&chan_list_lock);
463 
464 	kfree(chan);
465 }
466 
467 void l2cap_chan_hold(struct l2cap_chan *c)
468 {
469 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
470 
471 	kref_get(&c->kref);
472 }
473 
474 void l2cap_chan_put(struct l2cap_chan *c)
475 {
476 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
477 
478 	kref_put(&c->kref, l2cap_chan_destroy);
479 }
480 
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
482 {
483 	chan->fcs  = L2CAP_FCS_CRC16;
484 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 	chan->sec_level = BT_SECURITY_LOW;
489 
490 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
491 }
492 
493 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
494 {
495 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
496 	       __le16_to_cpu(chan->psm), chan->dcid);
497 
498 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
499 
500 	chan->conn = conn;
501 
502 	switch (chan->chan_type) {
503 	case L2CAP_CHAN_CONN_ORIENTED:
504 		if (conn->hcon->type == LE_LINK) {
505 			/* LE connection */
506 			chan->omtu = L2CAP_DEFAULT_MTU;
507 			if (chan->dcid == L2CAP_CID_ATT)
508 				chan->scid = L2CAP_CID_ATT;
509 			else
510 				chan->scid = l2cap_alloc_cid(conn);
511 		} else {
512 			/* Alloc CID for connection-oriented socket */
513 			chan->scid = l2cap_alloc_cid(conn);
514 			chan->omtu = L2CAP_DEFAULT_MTU;
515 		}
516 		break;
517 
518 	case L2CAP_CHAN_CONN_LESS:
519 		/* Connectionless socket */
520 		chan->scid = L2CAP_CID_CONN_LESS;
521 		chan->dcid = L2CAP_CID_CONN_LESS;
522 		chan->omtu = L2CAP_DEFAULT_MTU;
523 		break;
524 
525 	case L2CAP_CHAN_CONN_FIX_A2MP:
526 		chan->scid = L2CAP_CID_A2MP;
527 		chan->dcid = L2CAP_CID_A2MP;
528 		chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
529 		chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
530 		break;
531 
532 	default:
533 		/* Raw socket can send/recv signalling messages only */
534 		chan->scid = L2CAP_CID_SIGNALING;
535 		chan->dcid = L2CAP_CID_SIGNALING;
536 		chan->omtu = L2CAP_DEFAULT_MTU;
537 	}
538 
539 	chan->local_id		= L2CAP_BESTEFFORT_ID;
540 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
541 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
542 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
543 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
544 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
545 
546 	l2cap_chan_hold(chan);
547 
548 	hci_conn_hold(conn->hcon);
549 
550 	list_add(&chan->list, &conn->chan_l);
551 }
552 
553 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
554 {
555 	mutex_lock(&conn->chan_lock);
556 	__l2cap_chan_add(conn, chan);
557 	mutex_unlock(&conn->chan_lock);
558 }
559 
560 void l2cap_chan_del(struct l2cap_chan *chan, int err)
561 {
562 	struct l2cap_conn *conn = chan->conn;
563 
564 	__clear_chan_timer(chan);
565 
566 	BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
567 
568 	if (conn) {
569 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
570 		/* Delete from channel list */
571 		list_del(&chan->list);
572 
573 		l2cap_chan_put(chan);
574 
575 		chan->conn = NULL;
576 
577 		if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
578 			hci_conn_drop(conn->hcon);
579 
580 		if (mgr && mgr->bredr_chan == chan)
581 			mgr->bredr_chan = NULL;
582 	}
583 
584 	if (chan->hs_hchan) {
585 		struct hci_chan *hs_hchan = chan->hs_hchan;
586 
587 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
588 		amp_disconnect_logical_link(hs_hchan);
589 	}
590 
591 	chan->ops->teardown(chan, err);
592 
593 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
594 		return;
595 
596 	switch(chan->mode) {
597 	case L2CAP_MODE_BASIC:
598 		break;
599 
600 	case L2CAP_MODE_ERTM:
601 		__clear_retrans_timer(chan);
602 		__clear_monitor_timer(chan);
603 		__clear_ack_timer(chan);
604 
605 		skb_queue_purge(&chan->srej_q);
606 
607 		l2cap_seq_list_free(&chan->srej_list);
608 		l2cap_seq_list_free(&chan->retrans_list);
609 
610 		/* fall through */
611 
612 	case L2CAP_MODE_STREAMING:
613 		skb_queue_purge(&chan->tx_q);
614 		break;
615 	}
616 
617 	return;
618 }
619 
620 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
621 {
622 	struct l2cap_conn *conn = chan->conn;
623 
624 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
625 
626 	switch (chan->state) {
627 	case BT_LISTEN:
628 		chan->ops->teardown(chan, 0);
629 		break;
630 
631 	case BT_CONNECTED:
632 	case BT_CONFIG:
633 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
634 		    conn->hcon->type == ACL_LINK) {
635 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
636 			l2cap_send_disconn_req(chan, reason);
637 		} else
638 			l2cap_chan_del(chan, reason);
639 		break;
640 
641 	case BT_CONNECT2:
642 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
643 		    conn->hcon->type == ACL_LINK) {
644 			struct l2cap_conn_rsp rsp;
645 			__u16 result;
646 
647 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
648 				result = L2CAP_CR_SEC_BLOCK;
649 			else
650 				result = L2CAP_CR_BAD_PSM;
651 
652 			l2cap_state_change(chan, BT_DISCONN);
653 
654 			rsp.scid   = cpu_to_le16(chan->dcid);
655 			rsp.dcid   = cpu_to_le16(chan->scid);
656 			rsp.result = cpu_to_le16(result);
657 			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
658 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
659 				       sizeof(rsp), &rsp);
660 		}
661 
662 		l2cap_chan_del(chan, reason);
663 		break;
664 
665 	case BT_CONNECT:
666 	case BT_DISCONN:
667 		l2cap_chan_del(chan, reason);
668 		break;
669 
670 	default:
671 		chan->ops->teardown(chan, 0);
672 		break;
673 	}
674 }
675 
676 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
677 {
678 	switch (chan->chan_type) {
679 	case L2CAP_CHAN_RAW:
680 		switch (chan->sec_level) {
681 		case BT_SECURITY_HIGH:
682 			return HCI_AT_DEDICATED_BONDING_MITM;
683 		case BT_SECURITY_MEDIUM:
684 			return HCI_AT_DEDICATED_BONDING;
685 		default:
686 			return HCI_AT_NO_BONDING;
687 		}
688 		break;
689 	case L2CAP_CHAN_CONN_LESS:
690 		if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
691 			if (chan->sec_level == BT_SECURITY_LOW)
692 				chan->sec_level = BT_SECURITY_SDP;
693 		}
694 		if (chan->sec_level == BT_SECURITY_HIGH)
695 			return HCI_AT_NO_BONDING_MITM;
696 		else
697 			return HCI_AT_NO_BONDING;
698 		break;
699 	case L2CAP_CHAN_CONN_ORIENTED:
700 		if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
701 			if (chan->sec_level == BT_SECURITY_LOW)
702 				chan->sec_level = BT_SECURITY_SDP;
703 
704 			if (chan->sec_level == BT_SECURITY_HIGH)
705 				return HCI_AT_NO_BONDING_MITM;
706 			else
707 				return HCI_AT_NO_BONDING;
708 		}
709 		/* fall through */
710 	default:
711 		switch (chan->sec_level) {
712 		case BT_SECURITY_HIGH:
713 			return HCI_AT_GENERAL_BONDING_MITM;
714 		case BT_SECURITY_MEDIUM:
715 			return HCI_AT_GENERAL_BONDING;
716 		default:
717 			return HCI_AT_NO_BONDING;
718 		}
719 		break;
720 	}
721 }
722 
723 /* Service level security */
724 int l2cap_chan_check_security(struct l2cap_chan *chan)
725 {
726 	struct l2cap_conn *conn = chan->conn;
727 	__u8 auth_type;
728 
729 	auth_type = l2cap_get_auth_type(chan);
730 
731 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
732 }
733 
734 static u8 l2cap_get_ident(struct l2cap_conn *conn)
735 {
736 	u8 id;
737 
738 	/* Get next available identificator.
739 	 *    1 - 128 are used by kernel.
740 	 *  129 - 199 are reserved.
741 	 *  200 - 254 are used by utilities like l2ping, etc.
742 	 */
743 
744 	spin_lock(&conn->lock);
745 
746 	if (++conn->tx_ident > 128)
747 		conn->tx_ident = 1;
748 
749 	id = conn->tx_ident;
750 
751 	spin_unlock(&conn->lock);
752 
753 	return id;
754 }
755 
756 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
757 			   void *data)
758 {
759 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
760 	u8 flags;
761 
762 	BT_DBG("code 0x%2.2x", code);
763 
764 	if (!skb)
765 		return;
766 
767 	if (lmp_no_flush_capable(conn->hcon->hdev))
768 		flags = ACL_START_NO_FLUSH;
769 	else
770 		flags = ACL_START;
771 
772 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
773 	skb->priority = HCI_PRIO_MAX;
774 
775 	hci_send_acl(conn->hchan, skb, flags);
776 }
777 
778 static bool __chan_is_moving(struct l2cap_chan *chan)
779 {
780 	return chan->move_state != L2CAP_MOVE_STABLE &&
781 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
782 }
783 
784 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
785 {
786 	struct hci_conn *hcon = chan->conn->hcon;
787 	u16 flags;
788 
789 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
790 	       skb->priority);
791 
792 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
793 		if (chan->hs_hchan)
794 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
795 		else
796 			kfree_skb(skb);
797 
798 		return;
799 	}
800 
801 	if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
802 	    lmp_no_flush_capable(hcon->hdev))
803 		flags = ACL_START_NO_FLUSH;
804 	else
805 		flags = ACL_START;
806 
807 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
808 	hci_send_acl(chan->conn->hchan, skb, flags);
809 }
810 
811 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
812 {
813 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
814 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
815 
816 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
817 		/* S-Frame */
818 		control->sframe = 1;
819 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
820 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
821 
822 		control->sar = 0;
823 		control->txseq = 0;
824 	} else {
825 		/* I-Frame */
826 		control->sframe = 0;
827 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
828 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
829 
830 		control->poll = 0;
831 		control->super = 0;
832 	}
833 }
834 
835 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
836 {
837 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
838 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
839 
840 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
841 		/* S-Frame */
842 		control->sframe = 1;
843 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
844 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
845 
846 		control->sar = 0;
847 		control->txseq = 0;
848 	} else {
849 		/* I-Frame */
850 		control->sframe = 0;
851 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
852 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
853 
854 		control->poll = 0;
855 		control->super = 0;
856 	}
857 }
858 
859 static inline void __unpack_control(struct l2cap_chan *chan,
860 				    struct sk_buff *skb)
861 {
862 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
863 		__unpack_extended_control(get_unaligned_le32(skb->data),
864 					  &bt_cb(skb)->control);
865 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
866 	} else {
867 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
868 					  &bt_cb(skb)->control);
869 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
870 	}
871 }
872 
873 static u32 __pack_extended_control(struct l2cap_ctrl *control)
874 {
875 	u32 packed;
876 
877 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
878 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
879 
880 	if (control->sframe) {
881 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
882 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
883 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
884 	} else {
885 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
886 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
887 	}
888 
889 	return packed;
890 }
891 
892 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
893 {
894 	u16 packed;
895 
896 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
897 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
898 
899 	if (control->sframe) {
900 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
901 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
902 		packed |= L2CAP_CTRL_FRAME_TYPE;
903 	} else {
904 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
905 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
906 	}
907 
908 	return packed;
909 }
910 
911 static inline void __pack_control(struct l2cap_chan *chan,
912 				  struct l2cap_ctrl *control,
913 				  struct sk_buff *skb)
914 {
915 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
916 		put_unaligned_le32(__pack_extended_control(control),
917 				   skb->data + L2CAP_HDR_SIZE);
918 	} else {
919 		put_unaligned_le16(__pack_enhanced_control(control),
920 				   skb->data + L2CAP_HDR_SIZE);
921 	}
922 }
923 
924 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
925 {
926 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
927 		return L2CAP_EXT_HDR_SIZE;
928 	else
929 		return L2CAP_ENH_HDR_SIZE;
930 }
931 
932 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
933 					       u32 control)
934 {
935 	struct sk_buff *skb;
936 	struct l2cap_hdr *lh;
937 	int hlen = __ertm_hdr_size(chan);
938 
939 	if (chan->fcs == L2CAP_FCS_CRC16)
940 		hlen += L2CAP_FCS_SIZE;
941 
942 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
943 
944 	if (!skb)
945 		return ERR_PTR(-ENOMEM);
946 
947 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
948 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
949 	lh->cid = cpu_to_le16(chan->dcid);
950 
951 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
952 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
953 	else
954 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
955 
956 	if (chan->fcs == L2CAP_FCS_CRC16) {
957 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
958 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
959 	}
960 
961 	skb->priority = HCI_PRIO_MAX;
962 	return skb;
963 }
964 
965 static void l2cap_send_sframe(struct l2cap_chan *chan,
966 			      struct l2cap_ctrl *control)
967 {
968 	struct sk_buff *skb;
969 	u32 control_field;
970 
971 	BT_DBG("chan %p, control %p", chan, control);
972 
973 	if (!control->sframe)
974 		return;
975 
976 	if (__chan_is_moving(chan))
977 		return;
978 
979 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
980 	    !control->poll)
981 		control->final = 1;
982 
983 	if (control->super == L2CAP_SUPER_RR)
984 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
985 	else if (control->super == L2CAP_SUPER_RNR)
986 		set_bit(CONN_RNR_SENT, &chan->conn_state);
987 
988 	if (control->super != L2CAP_SUPER_SREJ) {
989 		chan->last_acked_seq = control->reqseq;
990 		__clear_ack_timer(chan);
991 	}
992 
993 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
994 	       control->final, control->poll, control->super);
995 
996 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
997 		control_field = __pack_extended_control(control);
998 	else
999 		control_field = __pack_enhanced_control(control);
1000 
1001 	skb = l2cap_create_sframe_pdu(chan, control_field);
1002 	if (!IS_ERR(skb))
1003 		l2cap_do_send(chan, skb);
1004 }
1005 
1006 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1007 {
1008 	struct l2cap_ctrl control;
1009 
1010 	BT_DBG("chan %p, poll %d", chan, poll);
1011 
1012 	memset(&control, 0, sizeof(control));
1013 	control.sframe = 1;
1014 	control.poll = poll;
1015 
1016 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1017 		control.super = L2CAP_SUPER_RNR;
1018 	else
1019 		control.super = L2CAP_SUPER_RR;
1020 
1021 	control.reqseq = chan->buffer_seq;
1022 	l2cap_send_sframe(chan, &control);
1023 }
1024 
1025 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1026 {
1027 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1028 }
1029 
1030 static bool __amp_capable(struct l2cap_chan *chan)
1031 {
1032 	struct l2cap_conn *conn = chan->conn;
1033 	struct hci_dev *hdev;
1034 	bool amp_available = false;
1035 
1036 	if (!conn->hs_enabled)
1037 		return false;
1038 
1039 	if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1040 		return false;
1041 
1042 	read_lock(&hci_dev_list_lock);
1043 	list_for_each_entry(hdev, &hci_dev_list, list) {
1044 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1045 		    test_bit(HCI_UP, &hdev->flags)) {
1046 			amp_available = true;
1047 			break;
1048 		}
1049 	}
1050 	read_unlock(&hci_dev_list_lock);
1051 
1052 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1053 		return amp_available;
1054 
1055 	return false;
1056 }
1057 
1058 static bool l2cap_check_efs(struct l2cap_chan *chan)
1059 {
1060 	/* Check EFS parameters */
1061 	return true;
1062 }
1063 
1064 void l2cap_send_conn_req(struct l2cap_chan *chan)
1065 {
1066 	struct l2cap_conn *conn = chan->conn;
1067 	struct l2cap_conn_req req;
1068 
1069 	req.scid = cpu_to_le16(chan->scid);
1070 	req.psm  = chan->psm;
1071 
1072 	chan->ident = l2cap_get_ident(conn);
1073 
1074 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1075 
1076 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1077 }
1078 
1079 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1080 {
1081 	struct l2cap_create_chan_req req;
1082 	req.scid = cpu_to_le16(chan->scid);
1083 	req.psm  = chan->psm;
1084 	req.amp_id = amp_id;
1085 
1086 	chan->ident = l2cap_get_ident(chan->conn);
1087 
1088 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1089 		       sizeof(req), &req);
1090 }
1091 
1092 static void l2cap_move_setup(struct l2cap_chan *chan)
1093 {
1094 	struct sk_buff *skb;
1095 
1096 	BT_DBG("chan %p", chan);
1097 
1098 	if (chan->mode != L2CAP_MODE_ERTM)
1099 		return;
1100 
1101 	__clear_retrans_timer(chan);
1102 	__clear_monitor_timer(chan);
1103 	__clear_ack_timer(chan);
1104 
1105 	chan->retry_count = 0;
1106 	skb_queue_walk(&chan->tx_q, skb) {
1107 		if (bt_cb(skb)->control.retries)
1108 			bt_cb(skb)->control.retries = 1;
1109 		else
1110 			break;
1111 	}
1112 
1113 	chan->expected_tx_seq = chan->buffer_seq;
1114 
1115 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1116 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1117 	l2cap_seq_list_clear(&chan->retrans_list);
1118 	l2cap_seq_list_clear(&chan->srej_list);
1119 	skb_queue_purge(&chan->srej_q);
1120 
1121 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1122 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1123 
1124 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1125 }
1126 
1127 static void l2cap_move_done(struct l2cap_chan *chan)
1128 {
1129 	u8 move_role = chan->move_role;
1130 	BT_DBG("chan %p", chan);
1131 
1132 	chan->move_state = L2CAP_MOVE_STABLE;
1133 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1134 
1135 	if (chan->mode != L2CAP_MODE_ERTM)
1136 		return;
1137 
1138 	switch (move_role) {
1139 	case L2CAP_MOVE_ROLE_INITIATOR:
1140 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1141 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1142 		break;
1143 	case L2CAP_MOVE_ROLE_RESPONDER:
1144 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1145 		break;
1146 	}
1147 }
1148 
1149 static void l2cap_chan_ready(struct l2cap_chan *chan)
1150 {
1151 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1152 	chan->conf_state = 0;
1153 	__clear_chan_timer(chan);
1154 
1155 	chan->state = BT_CONNECTED;
1156 
1157 	chan->ops->ready(chan);
1158 }
1159 
1160 static void l2cap_start_connection(struct l2cap_chan *chan)
1161 {
1162 	if (__amp_capable(chan)) {
1163 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1164 		a2mp_discover_amp(chan);
1165 	} else {
1166 		l2cap_send_conn_req(chan);
1167 	}
1168 }
1169 
1170 static void l2cap_do_start(struct l2cap_chan *chan)
1171 {
1172 	struct l2cap_conn *conn = chan->conn;
1173 
1174 	if (conn->hcon->type == LE_LINK) {
1175 		l2cap_chan_ready(chan);
1176 		return;
1177 	}
1178 
1179 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1180 		if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1181 			return;
1182 
1183 		if (l2cap_chan_check_security(chan) &&
1184 		    __l2cap_no_conn_pending(chan)) {
1185 			l2cap_start_connection(chan);
1186 		}
1187 	} else {
1188 		struct l2cap_info_req req;
1189 		req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1190 
1191 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1192 		conn->info_ident = l2cap_get_ident(conn);
1193 
1194 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1195 
1196 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1197 			       sizeof(req), &req);
1198 	}
1199 }
1200 
1201 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1202 {
1203 	u32 local_feat_mask = l2cap_feat_mask;
1204 	if (!disable_ertm)
1205 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1206 
1207 	switch (mode) {
1208 	case L2CAP_MODE_ERTM:
1209 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1210 	case L2CAP_MODE_STREAMING:
1211 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1212 	default:
1213 		return 0x00;
1214 	}
1215 }
1216 
1217 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1218 {
1219 	struct l2cap_conn *conn = chan->conn;
1220 	struct l2cap_disconn_req req;
1221 
1222 	if (!conn)
1223 		return;
1224 
1225 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1226 		__clear_retrans_timer(chan);
1227 		__clear_monitor_timer(chan);
1228 		__clear_ack_timer(chan);
1229 	}
1230 
1231 	if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1232 		l2cap_state_change(chan, BT_DISCONN);
1233 		return;
1234 	}
1235 
1236 	req.dcid = cpu_to_le16(chan->dcid);
1237 	req.scid = cpu_to_le16(chan->scid);
1238 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1239 		       sizeof(req), &req);
1240 
1241 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1242 }
1243 
1244 /* ---- L2CAP connections ---- */
1245 static void l2cap_conn_start(struct l2cap_conn *conn)
1246 {
1247 	struct l2cap_chan *chan, *tmp;
1248 
1249 	BT_DBG("conn %p", conn);
1250 
1251 	mutex_lock(&conn->chan_lock);
1252 
1253 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1254 		l2cap_chan_lock(chan);
1255 
1256 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1257 			l2cap_chan_unlock(chan);
1258 			continue;
1259 		}
1260 
1261 		if (chan->state == BT_CONNECT) {
1262 			if (!l2cap_chan_check_security(chan) ||
1263 			    !__l2cap_no_conn_pending(chan)) {
1264 				l2cap_chan_unlock(chan);
1265 				continue;
1266 			}
1267 
1268 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1269 			    && test_bit(CONF_STATE2_DEVICE,
1270 					&chan->conf_state)) {
1271 				l2cap_chan_close(chan, ECONNRESET);
1272 				l2cap_chan_unlock(chan);
1273 				continue;
1274 			}
1275 
1276 			l2cap_start_connection(chan);
1277 
1278 		} else if (chan->state == BT_CONNECT2) {
1279 			struct l2cap_conn_rsp rsp;
1280 			char buf[128];
1281 			rsp.scid = cpu_to_le16(chan->dcid);
1282 			rsp.dcid = cpu_to_le16(chan->scid);
1283 
1284 			if (l2cap_chan_check_security(chan)) {
1285 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1286 					rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1287 					rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1288 					chan->ops->defer(chan);
1289 
1290 				} else {
1291 					l2cap_state_change(chan, BT_CONFIG);
1292 					rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1293 					rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1294 				}
1295 			} else {
1296 				rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1297 				rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1298 			}
1299 
1300 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1301 				       sizeof(rsp), &rsp);
1302 
1303 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1304 			    rsp.result != L2CAP_CR_SUCCESS) {
1305 				l2cap_chan_unlock(chan);
1306 				continue;
1307 			}
1308 
1309 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1310 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1311 				       l2cap_build_conf_req(chan, buf), buf);
1312 			chan->num_conf_req++;
1313 		}
1314 
1315 		l2cap_chan_unlock(chan);
1316 	}
1317 
1318 	mutex_unlock(&conn->chan_lock);
1319 }
1320 
1321 /* Find socket with cid and source/destination bdaddr.
1322  * Returns closest match, locked.
1323  */
1324 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1325 						    bdaddr_t *src,
1326 						    bdaddr_t *dst)
1327 {
1328 	struct l2cap_chan *c, *c1 = NULL;
1329 
1330 	read_lock(&chan_list_lock);
1331 
1332 	list_for_each_entry(c, &chan_list, global_l) {
1333 		if (state && c->state != state)
1334 			continue;
1335 
1336 		if (c->scid == cid) {
1337 			int src_match, dst_match;
1338 			int src_any, dst_any;
1339 
1340 			/* Exact match. */
1341 			src_match = !bacmp(&c->src, src);
1342 			dst_match = !bacmp(&c->dst, dst);
1343 			if (src_match && dst_match) {
1344 				read_unlock(&chan_list_lock);
1345 				return c;
1346 			}
1347 
1348 			/* Closest match */
1349 			src_any = !bacmp(&c->src, BDADDR_ANY);
1350 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1351 			if ((src_match && dst_any) || (src_any && dst_match) ||
1352 			    (src_any && dst_any))
1353 				c1 = c;
1354 		}
1355 	}
1356 
1357 	read_unlock(&chan_list_lock);
1358 
1359 	return c1;
1360 }
1361 
1362 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1363 {
1364 	struct hci_conn *hcon = conn->hcon;
1365 	struct l2cap_chan *chan, *pchan;
1366 	u8 dst_type;
1367 
1368 	BT_DBG("");
1369 
1370 	/* Check if we have socket listening on cid */
1371 	pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1372 					  &hcon->src, &hcon->dst);
1373 	if (!pchan)
1374 		return;
1375 
1376 	/* Client ATT sockets should override the server one */
1377 	if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1378 		return;
1379 
1380 	dst_type = bdaddr_type(hcon, hcon->dst_type);
1381 
1382 	/* If device is blocked, do not create a channel for it */
1383 	if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
1384 		return;
1385 
1386 	l2cap_chan_lock(pchan);
1387 
1388 	chan = pchan->ops->new_connection(pchan);
1389 	if (!chan)
1390 		goto clean;
1391 
1392 	chan->dcid = L2CAP_CID_ATT;
1393 
1394 	bacpy(&chan->src, &hcon->src);
1395 	bacpy(&chan->dst, &hcon->dst);
1396 	chan->src_type = bdaddr_type(hcon, hcon->src_type);
1397 	chan->dst_type = dst_type;
1398 
1399 	__l2cap_chan_add(conn, chan);
1400 
1401 clean:
1402 	l2cap_chan_unlock(pchan);
1403 }
1404 
1405 static void l2cap_conn_ready(struct l2cap_conn *conn)
1406 {
1407 	struct l2cap_chan *chan;
1408 	struct hci_conn *hcon = conn->hcon;
1409 
1410 	BT_DBG("conn %p", conn);
1411 
1412 	/* For outgoing pairing which doesn't necessarily have an
1413 	 * associated socket (e.g. mgmt_pair_device).
1414 	 */
1415 	if (hcon->out && hcon->type == LE_LINK)
1416 		smp_conn_security(hcon, hcon->pending_sec_level);
1417 
1418 	mutex_lock(&conn->chan_lock);
1419 
1420 	if (hcon->type == LE_LINK)
1421 		l2cap_le_conn_ready(conn);
1422 
1423 	list_for_each_entry(chan, &conn->chan_l, list) {
1424 
1425 		l2cap_chan_lock(chan);
1426 
1427 		if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1428 			l2cap_chan_unlock(chan);
1429 			continue;
1430 		}
1431 
1432 		if (hcon->type == LE_LINK) {
1433 			if (smp_conn_security(hcon, chan->sec_level))
1434 				l2cap_chan_ready(chan);
1435 
1436 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1437 			l2cap_chan_ready(chan);
1438 
1439 		} else if (chan->state == BT_CONNECT) {
1440 			l2cap_do_start(chan);
1441 		}
1442 
1443 		l2cap_chan_unlock(chan);
1444 	}
1445 
1446 	mutex_unlock(&conn->chan_lock);
1447 }
1448 
1449 /* Notify sockets that we cannot guaranty reliability anymore */
1450 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1451 {
1452 	struct l2cap_chan *chan;
1453 
1454 	BT_DBG("conn %p", conn);
1455 
1456 	mutex_lock(&conn->chan_lock);
1457 
1458 	list_for_each_entry(chan, &conn->chan_l, list) {
1459 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1460 			l2cap_chan_set_err(chan, err);
1461 	}
1462 
1463 	mutex_unlock(&conn->chan_lock);
1464 }
1465 
1466 static void l2cap_info_timeout(struct work_struct *work)
1467 {
1468 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1469 					       info_timer.work);
1470 
1471 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1472 	conn->info_ident = 0;
1473 
1474 	l2cap_conn_start(conn);
1475 }
1476 
1477 /*
1478  * l2cap_user
1479  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1480  * callback is called during registration. The ->remove callback is called
1481  * during unregistration.
1482  * An l2cap_user object can either be explicitly unregistered or when the
1483  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1484  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1485  * External modules must own a reference to the l2cap_conn object if they intend
1486  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1487  * any time if they don't.
1488  */
1489 
1490 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1491 {
1492 	struct hci_dev *hdev = conn->hcon->hdev;
1493 	int ret;
1494 
1495 	/* We need to check whether l2cap_conn is registered. If it is not, we
1496 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1497 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1498 	 * relies on the parent hci_conn object to be locked. This itself relies
1499 	 * on the hci_dev object to be locked. So we must lock the hci device
1500 	 * here, too. */
1501 
1502 	hci_dev_lock(hdev);
1503 
1504 	if (user->list.next || user->list.prev) {
1505 		ret = -EINVAL;
1506 		goto out_unlock;
1507 	}
1508 
1509 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1510 	if (!conn->hchan) {
1511 		ret = -ENODEV;
1512 		goto out_unlock;
1513 	}
1514 
1515 	ret = user->probe(conn, user);
1516 	if (ret)
1517 		goto out_unlock;
1518 
1519 	list_add(&user->list, &conn->users);
1520 	ret = 0;
1521 
1522 out_unlock:
1523 	hci_dev_unlock(hdev);
1524 	return ret;
1525 }
1526 EXPORT_SYMBOL(l2cap_register_user);
1527 
1528 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1529 {
1530 	struct hci_dev *hdev = conn->hcon->hdev;
1531 
1532 	hci_dev_lock(hdev);
1533 
1534 	if (!user->list.next || !user->list.prev)
1535 		goto out_unlock;
1536 
1537 	list_del(&user->list);
1538 	user->list.next = NULL;
1539 	user->list.prev = NULL;
1540 	user->remove(conn, user);
1541 
1542 out_unlock:
1543 	hci_dev_unlock(hdev);
1544 }
1545 EXPORT_SYMBOL(l2cap_unregister_user);
1546 
1547 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1548 {
1549 	struct l2cap_user *user;
1550 
1551 	while (!list_empty(&conn->users)) {
1552 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1553 		list_del(&user->list);
1554 		user->list.next = NULL;
1555 		user->list.prev = NULL;
1556 		user->remove(conn, user);
1557 	}
1558 }
1559 
1560 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1561 {
1562 	struct l2cap_conn *conn = hcon->l2cap_data;
1563 	struct l2cap_chan *chan, *l;
1564 
1565 	if (!conn)
1566 		return;
1567 
1568 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1569 
1570 	kfree_skb(conn->rx_skb);
1571 
1572 	l2cap_unregister_all_users(conn);
1573 
1574 	mutex_lock(&conn->chan_lock);
1575 
1576 	/* Kill channels */
1577 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1578 		l2cap_chan_hold(chan);
1579 		l2cap_chan_lock(chan);
1580 
1581 		l2cap_chan_del(chan, err);
1582 
1583 		l2cap_chan_unlock(chan);
1584 
1585 		chan->ops->close(chan);
1586 		l2cap_chan_put(chan);
1587 	}
1588 
1589 	mutex_unlock(&conn->chan_lock);
1590 
1591 	hci_chan_del(conn->hchan);
1592 
1593 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1594 		cancel_delayed_work_sync(&conn->info_timer);
1595 
1596 	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1597 		cancel_delayed_work_sync(&conn->security_timer);
1598 		smp_chan_destroy(conn);
1599 	}
1600 
1601 	hcon->l2cap_data = NULL;
1602 	conn->hchan = NULL;
1603 	l2cap_conn_put(conn);
1604 }
1605 
1606 static void security_timeout(struct work_struct *work)
1607 {
1608 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1609 					       security_timer.work);
1610 
1611 	BT_DBG("conn %p", conn);
1612 
1613 	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1614 		smp_chan_destroy(conn);
1615 		l2cap_conn_del(conn->hcon, ETIMEDOUT);
1616 	}
1617 }
1618 
1619 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1620 {
1621 	struct l2cap_conn *conn = hcon->l2cap_data;
1622 	struct hci_chan *hchan;
1623 
1624 	if (conn)
1625 		return conn;
1626 
1627 	hchan = hci_chan_create(hcon);
1628 	if (!hchan)
1629 		return NULL;
1630 
1631 	conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1632 	if (!conn) {
1633 		hci_chan_del(hchan);
1634 		return NULL;
1635 	}
1636 
1637 	kref_init(&conn->ref);
1638 	hcon->l2cap_data = conn;
1639 	conn->hcon = hcon;
1640 	hci_conn_get(conn->hcon);
1641 	conn->hchan = hchan;
1642 
1643 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1644 
1645 	switch (hcon->type) {
1646 	case LE_LINK:
1647 		if (hcon->hdev->le_mtu) {
1648 			conn->mtu = hcon->hdev->le_mtu;
1649 			break;
1650 		}
1651 		/* fall through */
1652 	default:
1653 		conn->mtu = hcon->hdev->acl_mtu;
1654 		break;
1655 	}
1656 
1657 	conn->feat_mask = 0;
1658 
1659 	if (hcon->type == ACL_LINK)
1660 		conn->hs_enabled = test_bit(HCI_HS_ENABLED,
1661 					    &hcon->hdev->dev_flags);
1662 
1663 	spin_lock_init(&conn->lock);
1664 	mutex_init(&conn->chan_lock);
1665 
1666 	INIT_LIST_HEAD(&conn->chan_l);
1667 	INIT_LIST_HEAD(&conn->users);
1668 
1669 	if (hcon->type == LE_LINK)
1670 		INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1671 	else
1672 		INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1673 
1674 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1675 
1676 	return conn;
1677 }
1678 
1679 static void l2cap_conn_free(struct kref *ref)
1680 {
1681 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1682 
1683 	hci_conn_put(conn->hcon);
1684 	kfree(conn);
1685 }
1686 
1687 void l2cap_conn_get(struct l2cap_conn *conn)
1688 {
1689 	kref_get(&conn->ref);
1690 }
1691 EXPORT_SYMBOL(l2cap_conn_get);
1692 
1693 void l2cap_conn_put(struct l2cap_conn *conn)
1694 {
1695 	kref_put(&conn->ref, l2cap_conn_free);
1696 }
1697 EXPORT_SYMBOL(l2cap_conn_put);
1698 
1699 /* ---- Socket interface ---- */
1700 
1701 /* Find socket with psm and source / destination bdaddr.
1702  * Returns closest match.
1703  */
1704 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1705 						   bdaddr_t *src,
1706 						   bdaddr_t *dst)
1707 {
1708 	struct l2cap_chan *c, *c1 = NULL;
1709 
1710 	read_lock(&chan_list_lock);
1711 
1712 	list_for_each_entry(c, &chan_list, global_l) {
1713 		if (state && c->state != state)
1714 			continue;
1715 
1716 		if (c->psm == psm) {
1717 			int src_match, dst_match;
1718 			int src_any, dst_any;
1719 
1720 			/* Exact match. */
1721 			src_match = !bacmp(&c->src, src);
1722 			dst_match = !bacmp(&c->dst, dst);
1723 			if (src_match && dst_match) {
1724 				read_unlock(&chan_list_lock);
1725 				return c;
1726 			}
1727 
1728 			/* Closest match */
1729 			src_any = !bacmp(&c->src, BDADDR_ANY);
1730 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1731 			if ((src_match && dst_any) || (src_any && dst_match) ||
1732 			    (src_any && dst_any))
1733 				c1 = c;
1734 		}
1735 	}
1736 
1737 	read_unlock(&chan_list_lock);
1738 
1739 	return c1;
1740 }
1741 
1742 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1743 		       bdaddr_t *dst, u8 dst_type)
1744 {
1745 	struct l2cap_conn *conn;
1746 	struct hci_conn *hcon;
1747 	struct hci_dev *hdev;
1748 	__u8 auth_type;
1749 	int err;
1750 
1751 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
1752 	       dst_type, __le16_to_cpu(psm));
1753 
1754 	hdev = hci_get_route(dst, &chan->src);
1755 	if (!hdev)
1756 		return -EHOSTUNREACH;
1757 
1758 	hci_dev_lock(hdev);
1759 
1760 	l2cap_chan_lock(chan);
1761 
1762 	/* PSM must be odd and lsb of upper byte must be 0 */
1763 	if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1764 	    chan->chan_type != L2CAP_CHAN_RAW) {
1765 		err = -EINVAL;
1766 		goto done;
1767 	}
1768 
1769 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1770 		err = -EINVAL;
1771 		goto done;
1772 	}
1773 
1774 	switch (chan->mode) {
1775 	case L2CAP_MODE_BASIC:
1776 		break;
1777 	case L2CAP_MODE_ERTM:
1778 	case L2CAP_MODE_STREAMING:
1779 		if (!disable_ertm)
1780 			break;
1781 		/* fall through */
1782 	default:
1783 		err = -ENOTSUPP;
1784 		goto done;
1785 	}
1786 
1787 	switch (chan->state) {
1788 	case BT_CONNECT:
1789 	case BT_CONNECT2:
1790 	case BT_CONFIG:
1791 		/* Already connecting */
1792 		err = 0;
1793 		goto done;
1794 
1795 	case BT_CONNECTED:
1796 		/* Already connected */
1797 		err = -EISCONN;
1798 		goto done;
1799 
1800 	case BT_OPEN:
1801 	case BT_BOUND:
1802 		/* Can connect */
1803 		break;
1804 
1805 	default:
1806 		err = -EBADFD;
1807 		goto done;
1808 	}
1809 
1810 	/* Set destination address and psm */
1811 	bacpy(&chan->dst, dst);
1812 	chan->dst_type = dst_type;
1813 
1814 	chan->psm = psm;
1815 	chan->dcid = cid;
1816 
1817 	auth_type = l2cap_get_auth_type(chan);
1818 
1819 	if (bdaddr_type_is_le(dst_type))
1820 		hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1821 				   chan->sec_level, auth_type);
1822 	else
1823 		hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1824 				   chan->sec_level, auth_type);
1825 
1826 	if (IS_ERR(hcon)) {
1827 		err = PTR_ERR(hcon);
1828 		goto done;
1829 	}
1830 
1831 	conn = l2cap_conn_add(hcon);
1832 	if (!conn) {
1833 		hci_conn_drop(hcon);
1834 		err = -ENOMEM;
1835 		goto done;
1836 	}
1837 
1838 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1839 		hci_conn_drop(hcon);
1840 		err = -EBUSY;
1841 		goto done;
1842 	}
1843 
1844 	/* Update source addr of the socket */
1845 	bacpy(&chan->src, &hcon->src);
1846 	chan->src_type = bdaddr_type(hcon, hcon->src_type);
1847 
1848 	l2cap_chan_unlock(chan);
1849 	l2cap_chan_add(conn, chan);
1850 	l2cap_chan_lock(chan);
1851 
1852 	/* l2cap_chan_add takes its own ref so we can drop this one */
1853 	hci_conn_drop(hcon);
1854 
1855 	l2cap_state_change(chan, BT_CONNECT);
1856 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
1857 
1858 	if (hcon->state == BT_CONNECTED) {
1859 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1860 			__clear_chan_timer(chan);
1861 			if (l2cap_chan_check_security(chan))
1862 				l2cap_state_change(chan, BT_CONNECTED);
1863 		} else
1864 			l2cap_do_start(chan);
1865 	}
1866 
1867 	err = 0;
1868 
1869 done:
1870 	l2cap_chan_unlock(chan);
1871 	hci_dev_unlock(hdev);
1872 	hci_dev_put(hdev);
1873 	return err;
1874 }
1875 
1876 static void l2cap_monitor_timeout(struct work_struct *work)
1877 {
1878 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1879 					       monitor_timer.work);
1880 
1881 	BT_DBG("chan %p", chan);
1882 
1883 	l2cap_chan_lock(chan);
1884 
1885 	if (!chan->conn) {
1886 		l2cap_chan_unlock(chan);
1887 		l2cap_chan_put(chan);
1888 		return;
1889 	}
1890 
1891 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1892 
1893 	l2cap_chan_unlock(chan);
1894 	l2cap_chan_put(chan);
1895 }
1896 
1897 static void l2cap_retrans_timeout(struct work_struct *work)
1898 {
1899 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1900 					       retrans_timer.work);
1901 
1902 	BT_DBG("chan %p", chan);
1903 
1904 	l2cap_chan_lock(chan);
1905 
1906 	if (!chan->conn) {
1907 		l2cap_chan_unlock(chan);
1908 		l2cap_chan_put(chan);
1909 		return;
1910 	}
1911 
1912 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1913 	l2cap_chan_unlock(chan);
1914 	l2cap_chan_put(chan);
1915 }
1916 
1917 static void l2cap_streaming_send(struct l2cap_chan *chan,
1918 				 struct sk_buff_head *skbs)
1919 {
1920 	struct sk_buff *skb;
1921 	struct l2cap_ctrl *control;
1922 
1923 	BT_DBG("chan %p, skbs %p", chan, skbs);
1924 
1925 	if (__chan_is_moving(chan))
1926 		return;
1927 
1928 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1929 
1930 	while (!skb_queue_empty(&chan->tx_q)) {
1931 
1932 		skb = skb_dequeue(&chan->tx_q);
1933 
1934 		bt_cb(skb)->control.retries = 1;
1935 		control = &bt_cb(skb)->control;
1936 
1937 		control->reqseq = 0;
1938 		control->txseq = chan->next_tx_seq;
1939 
1940 		__pack_control(chan, control, skb);
1941 
1942 		if (chan->fcs == L2CAP_FCS_CRC16) {
1943 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1944 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1945 		}
1946 
1947 		l2cap_do_send(chan, skb);
1948 
1949 		BT_DBG("Sent txseq %u", control->txseq);
1950 
1951 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1952 		chan->frames_sent++;
1953 	}
1954 }
1955 
1956 static int l2cap_ertm_send(struct l2cap_chan *chan)
1957 {
1958 	struct sk_buff *skb, *tx_skb;
1959 	struct l2cap_ctrl *control;
1960 	int sent = 0;
1961 
1962 	BT_DBG("chan %p", chan);
1963 
1964 	if (chan->state != BT_CONNECTED)
1965 		return -ENOTCONN;
1966 
1967 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1968 		return 0;
1969 
1970 	if (__chan_is_moving(chan))
1971 		return 0;
1972 
1973 	while (chan->tx_send_head &&
1974 	       chan->unacked_frames < chan->remote_tx_win &&
1975 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1976 
1977 		skb = chan->tx_send_head;
1978 
1979 		bt_cb(skb)->control.retries = 1;
1980 		control = &bt_cb(skb)->control;
1981 
1982 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1983 			control->final = 1;
1984 
1985 		control->reqseq = chan->buffer_seq;
1986 		chan->last_acked_seq = chan->buffer_seq;
1987 		control->txseq = chan->next_tx_seq;
1988 
1989 		__pack_control(chan, control, skb);
1990 
1991 		if (chan->fcs == L2CAP_FCS_CRC16) {
1992 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1993 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1994 		}
1995 
1996 		/* Clone after data has been modified. Data is assumed to be
1997 		   read-only (for locking purposes) on cloned sk_buffs.
1998 		 */
1999 		tx_skb = skb_clone(skb, GFP_KERNEL);
2000 
2001 		if (!tx_skb)
2002 			break;
2003 
2004 		__set_retrans_timer(chan);
2005 
2006 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2007 		chan->unacked_frames++;
2008 		chan->frames_sent++;
2009 		sent++;
2010 
2011 		if (skb_queue_is_last(&chan->tx_q, skb))
2012 			chan->tx_send_head = NULL;
2013 		else
2014 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2015 
2016 		l2cap_do_send(chan, tx_skb);
2017 		BT_DBG("Sent txseq %u", control->txseq);
2018 	}
2019 
2020 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2021 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2022 
2023 	return sent;
2024 }
2025 
2026 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2027 {
2028 	struct l2cap_ctrl control;
2029 	struct sk_buff *skb;
2030 	struct sk_buff *tx_skb;
2031 	u16 seq;
2032 
2033 	BT_DBG("chan %p", chan);
2034 
2035 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2036 		return;
2037 
2038 	if (__chan_is_moving(chan))
2039 		return;
2040 
2041 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2042 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2043 
2044 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2045 		if (!skb) {
2046 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2047 			       seq);
2048 			continue;
2049 		}
2050 
2051 		bt_cb(skb)->control.retries++;
2052 		control = bt_cb(skb)->control;
2053 
2054 		if (chan->max_tx != 0 &&
2055 		    bt_cb(skb)->control.retries > chan->max_tx) {
2056 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2057 			l2cap_send_disconn_req(chan, ECONNRESET);
2058 			l2cap_seq_list_clear(&chan->retrans_list);
2059 			break;
2060 		}
2061 
2062 		control.reqseq = chan->buffer_seq;
2063 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2064 			control.final = 1;
2065 		else
2066 			control.final = 0;
2067 
2068 		if (skb_cloned(skb)) {
2069 			/* Cloned sk_buffs are read-only, so we need a
2070 			 * writeable copy
2071 			 */
2072 			tx_skb = skb_copy(skb, GFP_KERNEL);
2073 		} else {
2074 			tx_skb = skb_clone(skb, GFP_KERNEL);
2075 		}
2076 
2077 		if (!tx_skb) {
2078 			l2cap_seq_list_clear(&chan->retrans_list);
2079 			break;
2080 		}
2081 
2082 		/* Update skb contents */
2083 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2084 			put_unaligned_le32(__pack_extended_control(&control),
2085 					   tx_skb->data + L2CAP_HDR_SIZE);
2086 		} else {
2087 			put_unaligned_le16(__pack_enhanced_control(&control),
2088 					   tx_skb->data + L2CAP_HDR_SIZE);
2089 		}
2090 
2091 		if (chan->fcs == L2CAP_FCS_CRC16) {
2092 			u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2093 			put_unaligned_le16(fcs, skb_put(tx_skb,
2094 							L2CAP_FCS_SIZE));
2095 		}
2096 
2097 		l2cap_do_send(chan, tx_skb);
2098 
2099 		BT_DBG("Resent txseq %d", control.txseq);
2100 
2101 		chan->last_acked_seq = chan->buffer_seq;
2102 	}
2103 }
2104 
2105 static void l2cap_retransmit(struct l2cap_chan *chan,
2106 			     struct l2cap_ctrl *control)
2107 {
2108 	BT_DBG("chan %p, control %p", chan, control);
2109 
2110 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2111 	l2cap_ertm_resend(chan);
2112 }
2113 
2114 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2115 				 struct l2cap_ctrl *control)
2116 {
2117 	struct sk_buff *skb;
2118 
2119 	BT_DBG("chan %p, control %p", chan, control);
2120 
2121 	if (control->poll)
2122 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2123 
2124 	l2cap_seq_list_clear(&chan->retrans_list);
2125 
2126 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2127 		return;
2128 
2129 	if (chan->unacked_frames) {
2130 		skb_queue_walk(&chan->tx_q, skb) {
2131 			if (bt_cb(skb)->control.txseq == control->reqseq ||
2132 			    skb == chan->tx_send_head)
2133 				break;
2134 		}
2135 
2136 		skb_queue_walk_from(&chan->tx_q, skb) {
2137 			if (skb == chan->tx_send_head)
2138 				break;
2139 
2140 			l2cap_seq_list_append(&chan->retrans_list,
2141 					      bt_cb(skb)->control.txseq);
2142 		}
2143 
2144 		l2cap_ertm_resend(chan);
2145 	}
2146 }
2147 
2148 static void l2cap_send_ack(struct l2cap_chan *chan)
2149 {
2150 	struct l2cap_ctrl control;
2151 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2152 					 chan->last_acked_seq);
2153 	int threshold;
2154 
2155 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2156 	       chan, chan->last_acked_seq, chan->buffer_seq);
2157 
2158 	memset(&control, 0, sizeof(control));
2159 	control.sframe = 1;
2160 
2161 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2162 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2163 		__clear_ack_timer(chan);
2164 		control.super = L2CAP_SUPER_RNR;
2165 		control.reqseq = chan->buffer_seq;
2166 		l2cap_send_sframe(chan, &control);
2167 	} else {
2168 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2169 			l2cap_ertm_send(chan);
2170 			/* If any i-frames were sent, they included an ack */
2171 			if (chan->buffer_seq == chan->last_acked_seq)
2172 				frames_to_ack = 0;
2173 		}
2174 
2175 		/* Ack now if the window is 3/4ths full.
2176 		 * Calculate without mul or div
2177 		 */
2178 		threshold = chan->ack_win;
2179 		threshold += threshold << 1;
2180 		threshold >>= 2;
2181 
2182 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2183 		       threshold);
2184 
2185 		if (frames_to_ack >= threshold) {
2186 			__clear_ack_timer(chan);
2187 			control.super = L2CAP_SUPER_RR;
2188 			control.reqseq = chan->buffer_seq;
2189 			l2cap_send_sframe(chan, &control);
2190 			frames_to_ack = 0;
2191 		}
2192 
2193 		if (frames_to_ack)
2194 			__set_ack_timer(chan);
2195 	}
2196 }
2197 
2198 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2199 					 struct msghdr *msg, int len,
2200 					 int count, struct sk_buff *skb)
2201 {
2202 	struct l2cap_conn *conn = chan->conn;
2203 	struct sk_buff **frag;
2204 	int sent = 0;
2205 
2206 	if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2207 		return -EFAULT;
2208 
2209 	sent += count;
2210 	len  -= count;
2211 
2212 	/* Continuation fragments (no L2CAP header) */
2213 	frag = &skb_shinfo(skb)->frag_list;
2214 	while (len) {
2215 		struct sk_buff *tmp;
2216 
2217 		count = min_t(unsigned int, conn->mtu, len);
2218 
2219 		tmp = chan->ops->alloc_skb(chan, count,
2220 					   msg->msg_flags & MSG_DONTWAIT);
2221 		if (IS_ERR(tmp))
2222 			return PTR_ERR(tmp);
2223 
2224 		*frag = tmp;
2225 
2226 		if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2227 			return -EFAULT;
2228 
2229 		(*frag)->priority = skb->priority;
2230 
2231 		sent += count;
2232 		len  -= count;
2233 
2234 		skb->len += (*frag)->len;
2235 		skb->data_len += (*frag)->len;
2236 
2237 		frag = &(*frag)->next;
2238 	}
2239 
2240 	return sent;
2241 }
2242 
2243 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2244 						 struct msghdr *msg, size_t len,
2245 						 u32 priority)
2246 {
2247 	struct l2cap_conn *conn = chan->conn;
2248 	struct sk_buff *skb;
2249 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2250 	struct l2cap_hdr *lh;
2251 
2252 	BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2253 	       __le16_to_cpu(chan->psm), len, priority);
2254 
2255 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2256 
2257 	skb = chan->ops->alloc_skb(chan, count + hlen,
2258 				   msg->msg_flags & MSG_DONTWAIT);
2259 	if (IS_ERR(skb))
2260 		return skb;
2261 
2262 	skb->priority = priority;
2263 
2264 	/* Create L2CAP header */
2265 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2266 	lh->cid = cpu_to_le16(chan->dcid);
2267 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2268 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2269 
2270 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2271 	if (unlikely(err < 0)) {
2272 		kfree_skb(skb);
2273 		return ERR_PTR(err);
2274 	}
2275 	return skb;
2276 }
2277 
2278 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2279 					      struct msghdr *msg, size_t len,
2280 					      u32 priority)
2281 {
2282 	struct l2cap_conn *conn = chan->conn;
2283 	struct sk_buff *skb;
2284 	int err, count;
2285 	struct l2cap_hdr *lh;
2286 
2287 	BT_DBG("chan %p len %zu", chan, len);
2288 
2289 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2290 
2291 	skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2292 				   msg->msg_flags & MSG_DONTWAIT);
2293 	if (IS_ERR(skb))
2294 		return skb;
2295 
2296 	skb->priority = priority;
2297 
2298 	/* Create L2CAP header */
2299 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2300 	lh->cid = cpu_to_le16(chan->dcid);
2301 	lh->len = cpu_to_le16(len);
2302 
2303 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2304 	if (unlikely(err < 0)) {
2305 		kfree_skb(skb);
2306 		return ERR_PTR(err);
2307 	}
2308 	return skb;
2309 }
2310 
2311 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2312 					       struct msghdr *msg, size_t len,
2313 					       u16 sdulen)
2314 {
2315 	struct l2cap_conn *conn = chan->conn;
2316 	struct sk_buff *skb;
2317 	int err, count, hlen;
2318 	struct l2cap_hdr *lh;
2319 
2320 	BT_DBG("chan %p len %zu", chan, len);
2321 
2322 	if (!conn)
2323 		return ERR_PTR(-ENOTCONN);
2324 
2325 	hlen = __ertm_hdr_size(chan);
2326 
2327 	if (sdulen)
2328 		hlen += L2CAP_SDULEN_SIZE;
2329 
2330 	if (chan->fcs == L2CAP_FCS_CRC16)
2331 		hlen += L2CAP_FCS_SIZE;
2332 
2333 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2334 
2335 	skb = chan->ops->alloc_skb(chan, count + hlen,
2336 				   msg->msg_flags & MSG_DONTWAIT);
2337 	if (IS_ERR(skb))
2338 		return skb;
2339 
2340 	/* Create L2CAP header */
2341 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2342 	lh->cid = cpu_to_le16(chan->dcid);
2343 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2344 
2345 	/* Control header is populated later */
2346 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2347 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2348 	else
2349 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2350 
2351 	if (sdulen)
2352 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2353 
2354 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2355 	if (unlikely(err < 0)) {
2356 		kfree_skb(skb);
2357 		return ERR_PTR(err);
2358 	}
2359 
2360 	bt_cb(skb)->control.fcs = chan->fcs;
2361 	bt_cb(skb)->control.retries = 0;
2362 	return skb;
2363 }
2364 
2365 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2366 			     struct sk_buff_head *seg_queue,
2367 			     struct msghdr *msg, size_t len)
2368 {
2369 	struct sk_buff *skb;
2370 	u16 sdu_len;
2371 	size_t pdu_len;
2372 	u8 sar;
2373 
2374 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2375 
2376 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2377 	 * so fragmented skbs are not used.  The HCI layer's handling
2378 	 * of fragmented skbs is not compatible with ERTM's queueing.
2379 	 */
2380 
2381 	/* PDU size is derived from the HCI MTU */
2382 	pdu_len = chan->conn->mtu;
2383 
2384 	/* Constrain PDU size for BR/EDR connections */
2385 	if (!chan->hs_hcon)
2386 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2387 
2388 	/* Adjust for largest possible L2CAP overhead. */
2389 	if (chan->fcs)
2390 		pdu_len -= L2CAP_FCS_SIZE;
2391 
2392 	pdu_len -= __ertm_hdr_size(chan);
2393 
2394 	/* Remote device may have requested smaller PDUs */
2395 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2396 
2397 	if (len <= pdu_len) {
2398 		sar = L2CAP_SAR_UNSEGMENTED;
2399 		sdu_len = 0;
2400 		pdu_len = len;
2401 	} else {
2402 		sar = L2CAP_SAR_START;
2403 		sdu_len = len;
2404 		pdu_len -= L2CAP_SDULEN_SIZE;
2405 	}
2406 
2407 	while (len > 0) {
2408 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2409 
2410 		if (IS_ERR(skb)) {
2411 			__skb_queue_purge(seg_queue);
2412 			return PTR_ERR(skb);
2413 		}
2414 
2415 		bt_cb(skb)->control.sar = sar;
2416 		__skb_queue_tail(seg_queue, skb);
2417 
2418 		len -= pdu_len;
2419 		if (sdu_len) {
2420 			sdu_len = 0;
2421 			pdu_len += L2CAP_SDULEN_SIZE;
2422 		}
2423 
2424 		if (len <= pdu_len) {
2425 			sar = L2CAP_SAR_END;
2426 			pdu_len = len;
2427 		} else {
2428 			sar = L2CAP_SAR_CONTINUE;
2429 		}
2430 	}
2431 
2432 	return 0;
2433 }
2434 
2435 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2436 		    u32 priority)
2437 {
2438 	struct sk_buff *skb;
2439 	int err;
2440 	struct sk_buff_head seg_queue;
2441 
2442 	/* Connectionless channel */
2443 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2444 		skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2445 		if (IS_ERR(skb))
2446 			return PTR_ERR(skb);
2447 
2448 		l2cap_do_send(chan, skb);
2449 		return len;
2450 	}
2451 
2452 	switch (chan->mode) {
2453 	case L2CAP_MODE_BASIC:
2454 		/* Check outgoing MTU */
2455 		if (len > chan->omtu)
2456 			return -EMSGSIZE;
2457 
2458 		/* Create a basic PDU */
2459 		skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2460 		if (IS_ERR(skb))
2461 			return PTR_ERR(skb);
2462 
2463 		l2cap_do_send(chan, skb);
2464 		err = len;
2465 		break;
2466 
2467 	case L2CAP_MODE_ERTM:
2468 	case L2CAP_MODE_STREAMING:
2469 		/* Check outgoing MTU */
2470 		if (len > chan->omtu) {
2471 			err = -EMSGSIZE;
2472 			break;
2473 		}
2474 
2475 		__skb_queue_head_init(&seg_queue);
2476 
2477 		/* Do segmentation before calling in to the state machine,
2478 		 * since it's possible to block while waiting for memory
2479 		 * allocation.
2480 		 */
2481 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2482 
2483 		/* The channel could have been closed while segmenting,
2484 		 * check that it is still connected.
2485 		 */
2486 		if (chan->state != BT_CONNECTED) {
2487 			__skb_queue_purge(&seg_queue);
2488 			err = -ENOTCONN;
2489 		}
2490 
2491 		if (err)
2492 			break;
2493 
2494 		if (chan->mode == L2CAP_MODE_ERTM)
2495 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2496 		else
2497 			l2cap_streaming_send(chan, &seg_queue);
2498 
2499 		err = len;
2500 
2501 		/* If the skbs were not queued for sending, they'll still be in
2502 		 * seg_queue and need to be purged.
2503 		 */
2504 		__skb_queue_purge(&seg_queue);
2505 		break;
2506 
2507 	default:
2508 		BT_DBG("bad state %1.1x", chan->mode);
2509 		err = -EBADFD;
2510 	}
2511 
2512 	return err;
2513 }
2514 
2515 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2516 {
2517 	struct l2cap_ctrl control;
2518 	u16 seq;
2519 
2520 	BT_DBG("chan %p, txseq %u", chan, txseq);
2521 
2522 	memset(&control, 0, sizeof(control));
2523 	control.sframe = 1;
2524 	control.super = L2CAP_SUPER_SREJ;
2525 
2526 	for (seq = chan->expected_tx_seq; seq != txseq;
2527 	     seq = __next_seq(chan, seq)) {
2528 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2529 			control.reqseq = seq;
2530 			l2cap_send_sframe(chan, &control);
2531 			l2cap_seq_list_append(&chan->srej_list, seq);
2532 		}
2533 	}
2534 
2535 	chan->expected_tx_seq = __next_seq(chan, txseq);
2536 }
2537 
2538 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2539 {
2540 	struct l2cap_ctrl control;
2541 
2542 	BT_DBG("chan %p", chan);
2543 
2544 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2545 		return;
2546 
2547 	memset(&control, 0, sizeof(control));
2548 	control.sframe = 1;
2549 	control.super = L2CAP_SUPER_SREJ;
2550 	control.reqseq = chan->srej_list.tail;
2551 	l2cap_send_sframe(chan, &control);
2552 }
2553 
2554 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2555 {
2556 	struct l2cap_ctrl control;
2557 	u16 initial_head;
2558 	u16 seq;
2559 
2560 	BT_DBG("chan %p, txseq %u", chan, txseq);
2561 
2562 	memset(&control, 0, sizeof(control));
2563 	control.sframe = 1;
2564 	control.super = L2CAP_SUPER_SREJ;
2565 
2566 	/* Capture initial list head to allow only one pass through the list. */
2567 	initial_head = chan->srej_list.head;
2568 
2569 	do {
2570 		seq = l2cap_seq_list_pop(&chan->srej_list);
2571 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2572 			break;
2573 
2574 		control.reqseq = seq;
2575 		l2cap_send_sframe(chan, &control);
2576 		l2cap_seq_list_append(&chan->srej_list, seq);
2577 	} while (chan->srej_list.head != initial_head);
2578 }
2579 
2580 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2581 {
2582 	struct sk_buff *acked_skb;
2583 	u16 ackseq;
2584 
2585 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2586 
2587 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2588 		return;
2589 
2590 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2591 	       chan->expected_ack_seq, chan->unacked_frames);
2592 
2593 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2594 	     ackseq = __next_seq(chan, ackseq)) {
2595 
2596 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2597 		if (acked_skb) {
2598 			skb_unlink(acked_skb, &chan->tx_q);
2599 			kfree_skb(acked_skb);
2600 			chan->unacked_frames--;
2601 		}
2602 	}
2603 
2604 	chan->expected_ack_seq = reqseq;
2605 
2606 	if (chan->unacked_frames == 0)
2607 		__clear_retrans_timer(chan);
2608 
2609 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2610 }
2611 
2612 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2613 {
2614 	BT_DBG("chan %p", chan);
2615 
2616 	chan->expected_tx_seq = chan->buffer_seq;
2617 	l2cap_seq_list_clear(&chan->srej_list);
2618 	skb_queue_purge(&chan->srej_q);
2619 	chan->rx_state = L2CAP_RX_STATE_RECV;
2620 }
2621 
2622 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2623 				struct l2cap_ctrl *control,
2624 				struct sk_buff_head *skbs, u8 event)
2625 {
2626 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2627 	       event);
2628 
2629 	switch (event) {
2630 	case L2CAP_EV_DATA_REQUEST:
2631 		if (chan->tx_send_head == NULL)
2632 			chan->tx_send_head = skb_peek(skbs);
2633 
2634 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2635 		l2cap_ertm_send(chan);
2636 		break;
2637 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2638 		BT_DBG("Enter LOCAL_BUSY");
2639 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2640 
2641 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2642 			/* The SREJ_SENT state must be aborted if we are to
2643 			 * enter the LOCAL_BUSY state.
2644 			 */
2645 			l2cap_abort_rx_srej_sent(chan);
2646 		}
2647 
2648 		l2cap_send_ack(chan);
2649 
2650 		break;
2651 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2652 		BT_DBG("Exit LOCAL_BUSY");
2653 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2654 
2655 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2656 			struct l2cap_ctrl local_control;
2657 
2658 			memset(&local_control, 0, sizeof(local_control));
2659 			local_control.sframe = 1;
2660 			local_control.super = L2CAP_SUPER_RR;
2661 			local_control.poll = 1;
2662 			local_control.reqseq = chan->buffer_seq;
2663 			l2cap_send_sframe(chan, &local_control);
2664 
2665 			chan->retry_count = 1;
2666 			__set_monitor_timer(chan);
2667 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2668 		}
2669 		break;
2670 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2671 		l2cap_process_reqseq(chan, control->reqseq);
2672 		break;
2673 	case L2CAP_EV_EXPLICIT_POLL:
2674 		l2cap_send_rr_or_rnr(chan, 1);
2675 		chan->retry_count = 1;
2676 		__set_monitor_timer(chan);
2677 		__clear_ack_timer(chan);
2678 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2679 		break;
2680 	case L2CAP_EV_RETRANS_TO:
2681 		l2cap_send_rr_or_rnr(chan, 1);
2682 		chan->retry_count = 1;
2683 		__set_monitor_timer(chan);
2684 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2685 		break;
2686 	case L2CAP_EV_RECV_FBIT:
2687 		/* Nothing to process */
2688 		break;
2689 	default:
2690 		break;
2691 	}
2692 }
2693 
2694 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2695 				  struct l2cap_ctrl *control,
2696 				  struct sk_buff_head *skbs, u8 event)
2697 {
2698 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2699 	       event);
2700 
2701 	switch (event) {
2702 	case L2CAP_EV_DATA_REQUEST:
2703 		if (chan->tx_send_head == NULL)
2704 			chan->tx_send_head = skb_peek(skbs);
2705 		/* Queue data, but don't send. */
2706 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2707 		break;
2708 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2709 		BT_DBG("Enter LOCAL_BUSY");
2710 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2711 
2712 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2713 			/* The SREJ_SENT state must be aborted if we are to
2714 			 * enter the LOCAL_BUSY state.
2715 			 */
2716 			l2cap_abort_rx_srej_sent(chan);
2717 		}
2718 
2719 		l2cap_send_ack(chan);
2720 
2721 		break;
2722 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2723 		BT_DBG("Exit LOCAL_BUSY");
2724 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2725 
2726 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2727 			struct l2cap_ctrl local_control;
2728 			memset(&local_control, 0, sizeof(local_control));
2729 			local_control.sframe = 1;
2730 			local_control.super = L2CAP_SUPER_RR;
2731 			local_control.poll = 1;
2732 			local_control.reqseq = chan->buffer_seq;
2733 			l2cap_send_sframe(chan, &local_control);
2734 
2735 			chan->retry_count = 1;
2736 			__set_monitor_timer(chan);
2737 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2738 		}
2739 		break;
2740 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2741 		l2cap_process_reqseq(chan, control->reqseq);
2742 
2743 		/* Fall through */
2744 
2745 	case L2CAP_EV_RECV_FBIT:
2746 		if (control && control->final) {
2747 			__clear_monitor_timer(chan);
2748 			if (chan->unacked_frames > 0)
2749 				__set_retrans_timer(chan);
2750 			chan->retry_count = 0;
2751 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2752 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2753 		}
2754 		break;
2755 	case L2CAP_EV_EXPLICIT_POLL:
2756 		/* Ignore */
2757 		break;
2758 	case L2CAP_EV_MONITOR_TO:
2759 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2760 			l2cap_send_rr_or_rnr(chan, 1);
2761 			__set_monitor_timer(chan);
2762 			chan->retry_count++;
2763 		} else {
2764 			l2cap_send_disconn_req(chan, ECONNABORTED);
2765 		}
2766 		break;
2767 	default:
2768 		break;
2769 	}
2770 }
2771 
2772 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2773 		     struct sk_buff_head *skbs, u8 event)
2774 {
2775 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2776 	       chan, control, skbs, event, chan->tx_state);
2777 
2778 	switch (chan->tx_state) {
2779 	case L2CAP_TX_STATE_XMIT:
2780 		l2cap_tx_state_xmit(chan, control, skbs, event);
2781 		break;
2782 	case L2CAP_TX_STATE_WAIT_F:
2783 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2784 		break;
2785 	default:
2786 		/* Ignore event */
2787 		break;
2788 	}
2789 }
2790 
2791 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2792 			     struct l2cap_ctrl *control)
2793 {
2794 	BT_DBG("chan %p, control %p", chan, control);
2795 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2796 }
2797 
2798 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2799 				  struct l2cap_ctrl *control)
2800 {
2801 	BT_DBG("chan %p, control %p", chan, control);
2802 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2803 }
2804 
2805 /* Copy frame to all raw sockets on that connection */
2806 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2807 {
2808 	struct sk_buff *nskb;
2809 	struct l2cap_chan *chan;
2810 
2811 	BT_DBG("conn %p", conn);
2812 
2813 	mutex_lock(&conn->chan_lock);
2814 
2815 	list_for_each_entry(chan, &conn->chan_l, list) {
2816 		if (chan->chan_type != L2CAP_CHAN_RAW)
2817 			continue;
2818 
2819 		/* Don't send frame to the channel it came from */
2820 		if (bt_cb(skb)->chan == chan)
2821 			continue;
2822 
2823 		nskb = skb_clone(skb, GFP_KERNEL);
2824 		if (!nskb)
2825 			continue;
2826 		if (chan->ops->recv(chan, nskb))
2827 			kfree_skb(nskb);
2828 	}
2829 
2830 	mutex_unlock(&conn->chan_lock);
2831 }
2832 
2833 /* ---- L2CAP signalling commands ---- */
2834 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2835 				       u8 ident, u16 dlen, void *data)
2836 {
2837 	struct sk_buff *skb, **frag;
2838 	struct l2cap_cmd_hdr *cmd;
2839 	struct l2cap_hdr *lh;
2840 	int len, count;
2841 
2842 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2843 	       conn, code, ident, dlen);
2844 
2845 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2846 		return NULL;
2847 
2848 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2849 	count = min_t(unsigned int, conn->mtu, len);
2850 
2851 	skb = bt_skb_alloc(count, GFP_KERNEL);
2852 	if (!skb)
2853 		return NULL;
2854 
2855 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2856 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2857 
2858 	if (conn->hcon->type == LE_LINK)
2859 		lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2860 	else
2861 		lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2862 
2863 	cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2864 	cmd->code  = code;
2865 	cmd->ident = ident;
2866 	cmd->len   = cpu_to_le16(dlen);
2867 
2868 	if (dlen) {
2869 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2870 		memcpy(skb_put(skb, count), data, count);
2871 		data += count;
2872 	}
2873 
2874 	len -= skb->len;
2875 
2876 	/* Continuation fragments (no L2CAP header) */
2877 	frag = &skb_shinfo(skb)->frag_list;
2878 	while (len) {
2879 		count = min_t(unsigned int, conn->mtu, len);
2880 
2881 		*frag = bt_skb_alloc(count, GFP_KERNEL);
2882 		if (!*frag)
2883 			goto fail;
2884 
2885 		memcpy(skb_put(*frag, count), data, count);
2886 
2887 		len  -= count;
2888 		data += count;
2889 
2890 		frag = &(*frag)->next;
2891 	}
2892 
2893 	return skb;
2894 
2895 fail:
2896 	kfree_skb(skb);
2897 	return NULL;
2898 }
2899 
2900 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2901 				     unsigned long *val)
2902 {
2903 	struct l2cap_conf_opt *opt = *ptr;
2904 	int len;
2905 
2906 	len = L2CAP_CONF_OPT_SIZE + opt->len;
2907 	*ptr += len;
2908 
2909 	*type = opt->type;
2910 	*olen = opt->len;
2911 
2912 	switch (opt->len) {
2913 	case 1:
2914 		*val = *((u8 *) opt->val);
2915 		break;
2916 
2917 	case 2:
2918 		*val = get_unaligned_le16(opt->val);
2919 		break;
2920 
2921 	case 4:
2922 		*val = get_unaligned_le32(opt->val);
2923 		break;
2924 
2925 	default:
2926 		*val = (unsigned long) opt->val;
2927 		break;
2928 	}
2929 
2930 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2931 	return len;
2932 }
2933 
2934 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2935 {
2936 	struct l2cap_conf_opt *opt = *ptr;
2937 
2938 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2939 
2940 	opt->type = type;
2941 	opt->len  = len;
2942 
2943 	switch (len) {
2944 	case 1:
2945 		*((u8 *) opt->val)  = val;
2946 		break;
2947 
2948 	case 2:
2949 		put_unaligned_le16(val, opt->val);
2950 		break;
2951 
2952 	case 4:
2953 		put_unaligned_le32(val, opt->val);
2954 		break;
2955 
2956 	default:
2957 		memcpy(opt->val, (void *) val, len);
2958 		break;
2959 	}
2960 
2961 	*ptr += L2CAP_CONF_OPT_SIZE + len;
2962 }
2963 
2964 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2965 {
2966 	struct l2cap_conf_efs efs;
2967 
2968 	switch (chan->mode) {
2969 	case L2CAP_MODE_ERTM:
2970 		efs.id		= chan->local_id;
2971 		efs.stype	= chan->local_stype;
2972 		efs.msdu	= cpu_to_le16(chan->local_msdu);
2973 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
2974 		efs.acc_lat	= __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2975 		efs.flush_to	= __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2976 		break;
2977 
2978 	case L2CAP_MODE_STREAMING:
2979 		efs.id		= 1;
2980 		efs.stype	= L2CAP_SERV_BESTEFFORT;
2981 		efs.msdu	= cpu_to_le16(chan->local_msdu);
2982 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
2983 		efs.acc_lat	= 0;
2984 		efs.flush_to	= 0;
2985 		break;
2986 
2987 	default:
2988 		return;
2989 	}
2990 
2991 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2992 			   (unsigned long) &efs);
2993 }
2994 
2995 static void l2cap_ack_timeout(struct work_struct *work)
2996 {
2997 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2998 					       ack_timer.work);
2999 	u16 frames_to_ack;
3000 
3001 	BT_DBG("chan %p", chan);
3002 
3003 	l2cap_chan_lock(chan);
3004 
3005 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3006 				     chan->last_acked_seq);
3007 
3008 	if (frames_to_ack)
3009 		l2cap_send_rr_or_rnr(chan, 0);
3010 
3011 	l2cap_chan_unlock(chan);
3012 	l2cap_chan_put(chan);
3013 }
3014 
3015 int l2cap_ertm_init(struct l2cap_chan *chan)
3016 {
3017 	int err;
3018 
3019 	chan->next_tx_seq = 0;
3020 	chan->expected_tx_seq = 0;
3021 	chan->expected_ack_seq = 0;
3022 	chan->unacked_frames = 0;
3023 	chan->buffer_seq = 0;
3024 	chan->frames_sent = 0;
3025 	chan->last_acked_seq = 0;
3026 	chan->sdu = NULL;
3027 	chan->sdu_last_frag = NULL;
3028 	chan->sdu_len = 0;
3029 
3030 	skb_queue_head_init(&chan->tx_q);
3031 
3032 	chan->local_amp_id = AMP_ID_BREDR;
3033 	chan->move_id = AMP_ID_BREDR;
3034 	chan->move_state = L2CAP_MOVE_STABLE;
3035 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3036 
3037 	if (chan->mode != L2CAP_MODE_ERTM)
3038 		return 0;
3039 
3040 	chan->rx_state = L2CAP_RX_STATE_RECV;
3041 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3042 
3043 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3044 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3045 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3046 
3047 	skb_queue_head_init(&chan->srej_q);
3048 
3049 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3050 	if (err < 0)
3051 		return err;
3052 
3053 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3054 	if (err < 0)
3055 		l2cap_seq_list_free(&chan->srej_list);
3056 
3057 	return err;
3058 }
3059 
3060 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3061 {
3062 	switch (mode) {
3063 	case L2CAP_MODE_STREAMING:
3064 	case L2CAP_MODE_ERTM:
3065 		if (l2cap_mode_supported(mode, remote_feat_mask))
3066 			return mode;
3067 		/* fall through */
3068 	default:
3069 		return L2CAP_MODE_BASIC;
3070 	}
3071 }
3072 
3073 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3074 {
3075 	return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3076 }
3077 
3078 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3079 {
3080 	return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3081 }
3082 
3083 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3084 				      struct l2cap_conf_rfc *rfc)
3085 {
3086 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3087 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3088 
3089 		/* Class 1 devices have must have ERTM timeouts
3090 		 * exceeding the Link Supervision Timeout.  The
3091 		 * default Link Supervision Timeout for AMP
3092 		 * controllers is 10 seconds.
3093 		 *
3094 		 * Class 1 devices use 0xffffffff for their
3095 		 * best-effort flush timeout, so the clamping logic
3096 		 * will result in a timeout that meets the above
3097 		 * requirement.  ERTM timeouts are 16-bit values, so
3098 		 * the maximum timeout is 65.535 seconds.
3099 		 */
3100 
3101 		/* Convert timeout to milliseconds and round */
3102 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3103 
3104 		/* This is the recommended formula for class 2 devices
3105 		 * that start ERTM timers when packets are sent to the
3106 		 * controller.
3107 		 */
3108 		ertm_to = 3 * ertm_to + 500;
3109 
3110 		if (ertm_to > 0xffff)
3111 			ertm_to = 0xffff;
3112 
3113 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3114 		rfc->monitor_timeout = rfc->retrans_timeout;
3115 	} else {
3116 		rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3117 		rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3118 	}
3119 }
3120 
3121 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3122 {
3123 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3124 	    __l2cap_ews_supported(chan->conn)) {
3125 		/* use extended control field */
3126 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3127 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3128 	} else {
3129 		chan->tx_win = min_t(u16, chan->tx_win,
3130 				     L2CAP_DEFAULT_TX_WINDOW);
3131 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3132 	}
3133 	chan->ack_win = chan->tx_win;
3134 }
3135 
3136 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3137 {
3138 	struct l2cap_conf_req *req = data;
3139 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3140 	void *ptr = req->data;
3141 	u16 size;
3142 
3143 	BT_DBG("chan %p", chan);
3144 
3145 	if (chan->num_conf_req || chan->num_conf_rsp)
3146 		goto done;
3147 
3148 	switch (chan->mode) {
3149 	case L2CAP_MODE_STREAMING:
3150 	case L2CAP_MODE_ERTM:
3151 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3152 			break;
3153 
3154 		if (__l2cap_efs_supported(chan->conn))
3155 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3156 
3157 		/* fall through */
3158 	default:
3159 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3160 		break;
3161 	}
3162 
3163 done:
3164 	if (chan->imtu != L2CAP_DEFAULT_MTU)
3165 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3166 
3167 	switch (chan->mode) {
3168 	case L2CAP_MODE_BASIC:
3169 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3170 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3171 			break;
3172 
3173 		rfc.mode            = L2CAP_MODE_BASIC;
3174 		rfc.txwin_size      = 0;
3175 		rfc.max_transmit    = 0;
3176 		rfc.retrans_timeout = 0;
3177 		rfc.monitor_timeout = 0;
3178 		rfc.max_pdu_size    = 0;
3179 
3180 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3181 				   (unsigned long) &rfc);
3182 		break;
3183 
3184 	case L2CAP_MODE_ERTM:
3185 		rfc.mode            = L2CAP_MODE_ERTM;
3186 		rfc.max_transmit    = chan->max_tx;
3187 
3188 		__l2cap_set_ertm_timeouts(chan, &rfc);
3189 
3190 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3191 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3192 			     L2CAP_FCS_SIZE);
3193 		rfc.max_pdu_size = cpu_to_le16(size);
3194 
3195 		l2cap_txwin_setup(chan);
3196 
3197 		rfc.txwin_size = min_t(u16, chan->tx_win,
3198 				       L2CAP_DEFAULT_TX_WINDOW);
3199 
3200 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3201 				   (unsigned long) &rfc);
3202 
3203 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3204 			l2cap_add_opt_efs(&ptr, chan);
3205 
3206 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3207 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3208 					   chan->tx_win);
3209 
3210 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3211 			if (chan->fcs == L2CAP_FCS_NONE ||
3212 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3213 				chan->fcs = L2CAP_FCS_NONE;
3214 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3215 						   chan->fcs);
3216 			}
3217 		break;
3218 
3219 	case L2CAP_MODE_STREAMING:
3220 		l2cap_txwin_setup(chan);
3221 		rfc.mode            = L2CAP_MODE_STREAMING;
3222 		rfc.txwin_size      = 0;
3223 		rfc.max_transmit    = 0;
3224 		rfc.retrans_timeout = 0;
3225 		rfc.monitor_timeout = 0;
3226 
3227 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3228 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3229 			     L2CAP_FCS_SIZE);
3230 		rfc.max_pdu_size = cpu_to_le16(size);
3231 
3232 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3233 				   (unsigned long) &rfc);
3234 
3235 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3236 			l2cap_add_opt_efs(&ptr, chan);
3237 
3238 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3239 			if (chan->fcs == L2CAP_FCS_NONE ||
3240 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3241 				chan->fcs = L2CAP_FCS_NONE;
3242 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3243 						   chan->fcs);
3244 			}
3245 		break;
3246 	}
3247 
3248 	req->dcid  = cpu_to_le16(chan->dcid);
3249 	req->flags = __constant_cpu_to_le16(0);
3250 
3251 	return ptr - data;
3252 }
3253 
3254 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3255 {
3256 	struct l2cap_conf_rsp *rsp = data;
3257 	void *ptr = rsp->data;
3258 	void *req = chan->conf_req;
3259 	int len = chan->conf_len;
3260 	int type, hint, olen;
3261 	unsigned long val;
3262 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3263 	struct l2cap_conf_efs efs;
3264 	u8 remote_efs = 0;
3265 	u16 mtu = L2CAP_DEFAULT_MTU;
3266 	u16 result = L2CAP_CONF_SUCCESS;
3267 	u16 size;
3268 
3269 	BT_DBG("chan %p", chan);
3270 
3271 	while (len >= L2CAP_CONF_OPT_SIZE) {
3272 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3273 
3274 		hint  = type & L2CAP_CONF_HINT;
3275 		type &= L2CAP_CONF_MASK;
3276 
3277 		switch (type) {
3278 		case L2CAP_CONF_MTU:
3279 			mtu = val;
3280 			break;
3281 
3282 		case L2CAP_CONF_FLUSH_TO:
3283 			chan->flush_to = val;
3284 			break;
3285 
3286 		case L2CAP_CONF_QOS:
3287 			break;
3288 
3289 		case L2CAP_CONF_RFC:
3290 			if (olen == sizeof(rfc))
3291 				memcpy(&rfc, (void *) val, olen);
3292 			break;
3293 
3294 		case L2CAP_CONF_FCS:
3295 			if (val == L2CAP_FCS_NONE)
3296 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3297 			break;
3298 
3299 		case L2CAP_CONF_EFS:
3300 			remote_efs = 1;
3301 			if (olen == sizeof(efs))
3302 				memcpy(&efs, (void *) val, olen);
3303 			break;
3304 
3305 		case L2CAP_CONF_EWS:
3306 			if (!chan->conn->hs_enabled)
3307 				return -ECONNREFUSED;
3308 
3309 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3310 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3311 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3312 			chan->remote_tx_win = val;
3313 			break;
3314 
3315 		default:
3316 			if (hint)
3317 				break;
3318 
3319 			result = L2CAP_CONF_UNKNOWN;
3320 			*((u8 *) ptr++) = type;
3321 			break;
3322 		}
3323 	}
3324 
3325 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3326 		goto done;
3327 
3328 	switch (chan->mode) {
3329 	case L2CAP_MODE_STREAMING:
3330 	case L2CAP_MODE_ERTM:
3331 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3332 			chan->mode = l2cap_select_mode(rfc.mode,
3333 						       chan->conn->feat_mask);
3334 			break;
3335 		}
3336 
3337 		if (remote_efs) {
3338 			if (__l2cap_efs_supported(chan->conn))
3339 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3340 			else
3341 				return -ECONNREFUSED;
3342 		}
3343 
3344 		if (chan->mode != rfc.mode)
3345 			return -ECONNREFUSED;
3346 
3347 		break;
3348 	}
3349 
3350 done:
3351 	if (chan->mode != rfc.mode) {
3352 		result = L2CAP_CONF_UNACCEPT;
3353 		rfc.mode = chan->mode;
3354 
3355 		if (chan->num_conf_rsp == 1)
3356 			return -ECONNREFUSED;
3357 
3358 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3359 				   (unsigned long) &rfc);
3360 	}
3361 
3362 	if (result == L2CAP_CONF_SUCCESS) {
3363 		/* Configure output options and let the other side know
3364 		 * which ones we don't like. */
3365 
3366 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3367 			result = L2CAP_CONF_UNACCEPT;
3368 		else {
3369 			chan->omtu = mtu;
3370 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3371 		}
3372 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3373 
3374 		if (remote_efs) {
3375 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3376 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3377 			    efs.stype != chan->local_stype) {
3378 
3379 				result = L2CAP_CONF_UNACCEPT;
3380 
3381 				if (chan->num_conf_req >= 1)
3382 					return -ECONNREFUSED;
3383 
3384 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3385 						   sizeof(efs),
3386 						   (unsigned long) &efs);
3387 			} else {
3388 				/* Send PENDING Conf Rsp */
3389 				result = L2CAP_CONF_PENDING;
3390 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3391 			}
3392 		}
3393 
3394 		switch (rfc.mode) {
3395 		case L2CAP_MODE_BASIC:
3396 			chan->fcs = L2CAP_FCS_NONE;
3397 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3398 			break;
3399 
3400 		case L2CAP_MODE_ERTM:
3401 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3402 				chan->remote_tx_win = rfc.txwin_size;
3403 			else
3404 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3405 
3406 			chan->remote_max_tx = rfc.max_transmit;
3407 
3408 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3409 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3410 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3411 			rfc.max_pdu_size = cpu_to_le16(size);
3412 			chan->remote_mps = size;
3413 
3414 			__l2cap_set_ertm_timeouts(chan, &rfc);
3415 
3416 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3417 
3418 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3419 					   sizeof(rfc), (unsigned long) &rfc);
3420 
3421 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3422 				chan->remote_id = efs.id;
3423 				chan->remote_stype = efs.stype;
3424 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3425 				chan->remote_flush_to =
3426 					le32_to_cpu(efs.flush_to);
3427 				chan->remote_acc_lat =
3428 					le32_to_cpu(efs.acc_lat);
3429 				chan->remote_sdu_itime =
3430 					le32_to_cpu(efs.sdu_itime);
3431 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3432 						   sizeof(efs),
3433 						   (unsigned long) &efs);
3434 			}
3435 			break;
3436 
3437 		case L2CAP_MODE_STREAMING:
3438 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3439 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3440 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3441 			rfc.max_pdu_size = cpu_to_le16(size);
3442 			chan->remote_mps = size;
3443 
3444 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3445 
3446 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3447 					   (unsigned long) &rfc);
3448 
3449 			break;
3450 
3451 		default:
3452 			result = L2CAP_CONF_UNACCEPT;
3453 
3454 			memset(&rfc, 0, sizeof(rfc));
3455 			rfc.mode = chan->mode;
3456 		}
3457 
3458 		if (result == L2CAP_CONF_SUCCESS)
3459 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3460 	}
3461 	rsp->scid   = cpu_to_le16(chan->dcid);
3462 	rsp->result = cpu_to_le16(result);
3463 	rsp->flags  = __constant_cpu_to_le16(0);
3464 
3465 	return ptr - data;
3466 }
3467 
3468 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3469 				void *data, u16 *result)
3470 {
3471 	struct l2cap_conf_req *req = data;
3472 	void *ptr = req->data;
3473 	int type, olen;
3474 	unsigned long val;
3475 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3476 	struct l2cap_conf_efs efs;
3477 
3478 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3479 
3480 	while (len >= L2CAP_CONF_OPT_SIZE) {
3481 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3482 
3483 		switch (type) {
3484 		case L2CAP_CONF_MTU:
3485 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3486 				*result = L2CAP_CONF_UNACCEPT;
3487 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3488 			} else
3489 				chan->imtu = val;
3490 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3491 			break;
3492 
3493 		case L2CAP_CONF_FLUSH_TO:
3494 			chan->flush_to = val;
3495 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3496 					   2, chan->flush_to);
3497 			break;
3498 
3499 		case L2CAP_CONF_RFC:
3500 			if (olen == sizeof(rfc))
3501 				memcpy(&rfc, (void *)val, olen);
3502 
3503 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3504 			    rfc.mode != chan->mode)
3505 				return -ECONNREFUSED;
3506 
3507 			chan->fcs = 0;
3508 
3509 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3510 					   sizeof(rfc), (unsigned long) &rfc);
3511 			break;
3512 
3513 		case L2CAP_CONF_EWS:
3514 			chan->ack_win = min_t(u16, val, chan->ack_win);
3515 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3516 					   chan->tx_win);
3517 			break;
3518 
3519 		case L2CAP_CONF_EFS:
3520 			if (olen == sizeof(efs))
3521 				memcpy(&efs, (void *)val, olen);
3522 
3523 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3524 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3525 			    efs.stype != chan->local_stype)
3526 				return -ECONNREFUSED;
3527 
3528 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3529 					   (unsigned long) &efs);
3530 			break;
3531 
3532 		case L2CAP_CONF_FCS:
3533 			if (*result == L2CAP_CONF_PENDING)
3534 				if (val == L2CAP_FCS_NONE)
3535 					set_bit(CONF_RECV_NO_FCS,
3536 						&chan->conf_state);
3537 			break;
3538 		}
3539 	}
3540 
3541 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3542 		return -ECONNREFUSED;
3543 
3544 	chan->mode = rfc.mode;
3545 
3546 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3547 		switch (rfc.mode) {
3548 		case L2CAP_MODE_ERTM:
3549 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3550 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3551 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3552 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3553 				chan->ack_win = min_t(u16, chan->ack_win,
3554 						      rfc.txwin_size);
3555 
3556 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3557 				chan->local_msdu = le16_to_cpu(efs.msdu);
3558 				chan->local_sdu_itime =
3559 					le32_to_cpu(efs.sdu_itime);
3560 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3561 				chan->local_flush_to =
3562 					le32_to_cpu(efs.flush_to);
3563 			}
3564 			break;
3565 
3566 		case L2CAP_MODE_STREAMING:
3567 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3568 		}
3569 	}
3570 
3571 	req->dcid   = cpu_to_le16(chan->dcid);
3572 	req->flags  = __constant_cpu_to_le16(0);
3573 
3574 	return ptr - data;
3575 }
3576 
3577 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3578 				u16 result, u16 flags)
3579 {
3580 	struct l2cap_conf_rsp *rsp = data;
3581 	void *ptr = rsp->data;
3582 
3583 	BT_DBG("chan %p", chan);
3584 
3585 	rsp->scid   = cpu_to_le16(chan->dcid);
3586 	rsp->result = cpu_to_le16(result);
3587 	rsp->flags  = cpu_to_le16(flags);
3588 
3589 	return ptr - data;
3590 }
3591 
3592 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3593 {
3594 	struct l2cap_conn_rsp rsp;
3595 	struct l2cap_conn *conn = chan->conn;
3596 	u8 buf[128];
3597 	u8 rsp_code;
3598 
3599 	rsp.scid   = cpu_to_le16(chan->dcid);
3600 	rsp.dcid   = cpu_to_le16(chan->scid);
3601 	rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3602 	rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3603 
3604 	if (chan->hs_hcon)
3605 		rsp_code = L2CAP_CREATE_CHAN_RSP;
3606 	else
3607 		rsp_code = L2CAP_CONN_RSP;
3608 
3609 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3610 
3611 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3612 
3613 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3614 		return;
3615 
3616 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3617 		       l2cap_build_conf_req(chan, buf), buf);
3618 	chan->num_conf_req++;
3619 }
3620 
3621 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3622 {
3623 	int type, olen;
3624 	unsigned long val;
3625 	/* Use sane default values in case a misbehaving remote device
3626 	 * did not send an RFC or extended window size option.
3627 	 */
3628 	u16 txwin_ext = chan->ack_win;
3629 	struct l2cap_conf_rfc rfc = {
3630 		.mode = chan->mode,
3631 		.retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3632 		.monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3633 		.max_pdu_size = cpu_to_le16(chan->imtu),
3634 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3635 	};
3636 
3637 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3638 
3639 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3640 		return;
3641 
3642 	while (len >= L2CAP_CONF_OPT_SIZE) {
3643 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3644 
3645 		switch (type) {
3646 		case L2CAP_CONF_RFC:
3647 			if (olen == sizeof(rfc))
3648 				memcpy(&rfc, (void *)val, olen);
3649 			break;
3650 		case L2CAP_CONF_EWS:
3651 			txwin_ext = val;
3652 			break;
3653 		}
3654 	}
3655 
3656 	switch (rfc.mode) {
3657 	case L2CAP_MODE_ERTM:
3658 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3659 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3660 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3661 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3662 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3663 		else
3664 			chan->ack_win = min_t(u16, chan->ack_win,
3665 					      rfc.txwin_size);
3666 		break;
3667 	case L2CAP_MODE_STREAMING:
3668 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3669 	}
3670 }
3671 
3672 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3673 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3674 				    u8 *data)
3675 {
3676 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3677 
3678 	if (cmd_len < sizeof(*rej))
3679 		return -EPROTO;
3680 
3681 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3682 		return 0;
3683 
3684 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3685 	    cmd->ident == conn->info_ident) {
3686 		cancel_delayed_work(&conn->info_timer);
3687 
3688 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3689 		conn->info_ident = 0;
3690 
3691 		l2cap_conn_start(conn);
3692 	}
3693 
3694 	return 0;
3695 }
3696 
3697 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3698 					struct l2cap_cmd_hdr *cmd,
3699 					u8 *data, u8 rsp_code, u8 amp_id)
3700 {
3701 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3702 	struct l2cap_conn_rsp rsp;
3703 	struct l2cap_chan *chan = NULL, *pchan;
3704 	int result, status = L2CAP_CS_NO_INFO;
3705 
3706 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3707 	__le16 psm = req->psm;
3708 
3709 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3710 
3711 	/* Check if we have socket listening on psm */
3712 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3713 					 &conn->hcon->dst);
3714 	if (!pchan) {
3715 		result = L2CAP_CR_BAD_PSM;
3716 		goto sendresp;
3717 	}
3718 
3719 	mutex_lock(&conn->chan_lock);
3720 	l2cap_chan_lock(pchan);
3721 
3722 	/* Check if the ACL is secure enough (if not SDP) */
3723 	if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3724 	    !hci_conn_check_link_mode(conn->hcon)) {
3725 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3726 		result = L2CAP_CR_SEC_BLOCK;
3727 		goto response;
3728 	}
3729 
3730 	result = L2CAP_CR_NO_MEM;
3731 
3732 	/* Check if we already have channel with that dcid */
3733 	if (__l2cap_get_chan_by_dcid(conn, scid))
3734 		goto response;
3735 
3736 	chan = pchan->ops->new_connection(pchan);
3737 	if (!chan)
3738 		goto response;
3739 
3740 	/* For certain devices (ex: HID mouse), support for authentication,
3741 	 * pairing and bonding is optional. For such devices, inorder to avoid
3742 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3743 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3744 	 */
3745 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3746 
3747 	bacpy(&chan->src, &conn->hcon->src);
3748 	bacpy(&chan->dst, &conn->hcon->dst);
3749 	chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3750 	chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3751 	chan->psm  = psm;
3752 	chan->dcid = scid;
3753 	chan->local_amp_id = amp_id;
3754 
3755 	__l2cap_chan_add(conn, chan);
3756 
3757 	dcid = chan->scid;
3758 
3759 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3760 
3761 	chan->ident = cmd->ident;
3762 
3763 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3764 		if (l2cap_chan_check_security(chan)) {
3765 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3766 				l2cap_state_change(chan, BT_CONNECT2);
3767 				result = L2CAP_CR_PEND;
3768 				status = L2CAP_CS_AUTHOR_PEND;
3769 				chan->ops->defer(chan);
3770 			} else {
3771 				/* Force pending result for AMP controllers.
3772 				 * The connection will succeed after the
3773 				 * physical link is up.
3774 				 */
3775 				if (amp_id == AMP_ID_BREDR) {
3776 					l2cap_state_change(chan, BT_CONFIG);
3777 					result = L2CAP_CR_SUCCESS;
3778 				} else {
3779 					l2cap_state_change(chan, BT_CONNECT2);
3780 					result = L2CAP_CR_PEND;
3781 				}
3782 				status = L2CAP_CS_NO_INFO;
3783 			}
3784 		} else {
3785 			l2cap_state_change(chan, BT_CONNECT2);
3786 			result = L2CAP_CR_PEND;
3787 			status = L2CAP_CS_AUTHEN_PEND;
3788 		}
3789 	} else {
3790 		l2cap_state_change(chan, BT_CONNECT2);
3791 		result = L2CAP_CR_PEND;
3792 		status = L2CAP_CS_NO_INFO;
3793 	}
3794 
3795 response:
3796 	l2cap_chan_unlock(pchan);
3797 	mutex_unlock(&conn->chan_lock);
3798 
3799 sendresp:
3800 	rsp.scid   = cpu_to_le16(scid);
3801 	rsp.dcid   = cpu_to_le16(dcid);
3802 	rsp.result = cpu_to_le16(result);
3803 	rsp.status = cpu_to_le16(status);
3804 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3805 
3806 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3807 		struct l2cap_info_req info;
3808 		info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3809 
3810 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3811 		conn->info_ident = l2cap_get_ident(conn);
3812 
3813 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3814 
3815 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3816 			       sizeof(info), &info);
3817 	}
3818 
3819 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3820 	    result == L2CAP_CR_SUCCESS) {
3821 		u8 buf[128];
3822 		set_bit(CONF_REQ_SENT, &chan->conf_state);
3823 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3824 			       l2cap_build_conf_req(chan, buf), buf);
3825 		chan->num_conf_req++;
3826 	}
3827 
3828 	return chan;
3829 }
3830 
3831 static int l2cap_connect_req(struct l2cap_conn *conn,
3832 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3833 {
3834 	struct hci_dev *hdev = conn->hcon->hdev;
3835 	struct hci_conn *hcon = conn->hcon;
3836 
3837 	if (cmd_len < sizeof(struct l2cap_conn_req))
3838 		return -EPROTO;
3839 
3840 	hci_dev_lock(hdev);
3841 	if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3842 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3843 		mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3844 				      hcon->dst_type, 0, NULL, 0,
3845 				      hcon->dev_class);
3846 	hci_dev_unlock(hdev);
3847 
3848 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3849 	return 0;
3850 }
3851 
3852 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3853 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3854 				    u8 *data)
3855 {
3856 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3857 	u16 scid, dcid, result, status;
3858 	struct l2cap_chan *chan;
3859 	u8 req[128];
3860 	int err;
3861 
3862 	if (cmd_len < sizeof(*rsp))
3863 		return -EPROTO;
3864 
3865 	scid   = __le16_to_cpu(rsp->scid);
3866 	dcid   = __le16_to_cpu(rsp->dcid);
3867 	result = __le16_to_cpu(rsp->result);
3868 	status = __le16_to_cpu(rsp->status);
3869 
3870 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3871 	       dcid, scid, result, status);
3872 
3873 	mutex_lock(&conn->chan_lock);
3874 
3875 	if (scid) {
3876 		chan = __l2cap_get_chan_by_scid(conn, scid);
3877 		if (!chan) {
3878 			err = -EBADSLT;
3879 			goto unlock;
3880 		}
3881 	} else {
3882 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3883 		if (!chan) {
3884 			err = -EBADSLT;
3885 			goto unlock;
3886 		}
3887 	}
3888 
3889 	err = 0;
3890 
3891 	l2cap_chan_lock(chan);
3892 
3893 	switch (result) {
3894 	case L2CAP_CR_SUCCESS:
3895 		l2cap_state_change(chan, BT_CONFIG);
3896 		chan->ident = 0;
3897 		chan->dcid = dcid;
3898 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3899 
3900 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3901 			break;
3902 
3903 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3904 			       l2cap_build_conf_req(chan, req), req);
3905 		chan->num_conf_req++;
3906 		break;
3907 
3908 	case L2CAP_CR_PEND:
3909 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3910 		break;
3911 
3912 	default:
3913 		l2cap_chan_del(chan, ECONNREFUSED);
3914 		break;
3915 	}
3916 
3917 	l2cap_chan_unlock(chan);
3918 
3919 unlock:
3920 	mutex_unlock(&conn->chan_lock);
3921 
3922 	return err;
3923 }
3924 
3925 static inline void set_default_fcs(struct l2cap_chan *chan)
3926 {
3927 	/* FCS is enabled only in ERTM or streaming mode, if one or both
3928 	 * sides request it.
3929 	 */
3930 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3931 		chan->fcs = L2CAP_FCS_NONE;
3932 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3933 		chan->fcs = L2CAP_FCS_CRC16;
3934 }
3935 
3936 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3937 				    u8 ident, u16 flags)
3938 {
3939 	struct l2cap_conn *conn = chan->conn;
3940 
3941 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3942 	       flags);
3943 
3944 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3945 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3946 
3947 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3948 		       l2cap_build_conf_rsp(chan, data,
3949 					    L2CAP_CONF_SUCCESS, flags), data);
3950 }
3951 
3952 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
3953 				   u16 scid, u16 dcid)
3954 {
3955 	struct l2cap_cmd_rej_cid rej;
3956 
3957 	rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3958 	rej.scid = __cpu_to_le16(scid);
3959 	rej.dcid = __cpu_to_le16(dcid);
3960 
3961 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3962 }
3963 
3964 static inline int l2cap_config_req(struct l2cap_conn *conn,
3965 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3966 				   u8 *data)
3967 {
3968 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3969 	u16 dcid, flags;
3970 	u8 rsp[64];
3971 	struct l2cap_chan *chan;
3972 	int len, err = 0;
3973 
3974 	if (cmd_len < sizeof(*req))
3975 		return -EPROTO;
3976 
3977 	dcid  = __le16_to_cpu(req->dcid);
3978 	flags = __le16_to_cpu(req->flags);
3979 
3980 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3981 
3982 	chan = l2cap_get_chan_by_scid(conn, dcid);
3983 	if (!chan) {
3984 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
3985 		return 0;
3986 	}
3987 
3988 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3989 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
3990 				       chan->dcid);
3991 		goto unlock;
3992 	}
3993 
3994 	/* Reject if config buffer is too small. */
3995 	len = cmd_len - sizeof(*req);
3996 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
3997 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3998 			       l2cap_build_conf_rsp(chan, rsp,
3999 			       L2CAP_CONF_REJECT, flags), rsp);
4000 		goto unlock;
4001 	}
4002 
4003 	/* Store config. */
4004 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4005 	chan->conf_len += len;
4006 
4007 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4008 		/* Incomplete config. Send empty response. */
4009 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4010 			       l2cap_build_conf_rsp(chan, rsp,
4011 			       L2CAP_CONF_SUCCESS, flags), rsp);
4012 		goto unlock;
4013 	}
4014 
4015 	/* Complete config. */
4016 	len = l2cap_parse_conf_req(chan, rsp);
4017 	if (len < 0) {
4018 		l2cap_send_disconn_req(chan, ECONNRESET);
4019 		goto unlock;
4020 	}
4021 
4022 	chan->ident = cmd->ident;
4023 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4024 	chan->num_conf_rsp++;
4025 
4026 	/* Reset config buffer. */
4027 	chan->conf_len = 0;
4028 
4029 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4030 		goto unlock;
4031 
4032 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4033 		set_default_fcs(chan);
4034 
4035 		if (chan->mode == L2CAP_MODE_ERTM ||
4036 		    chan->mode == L2CAP_MODE_STREAMING)
4037 			err = l2cap_ertm_init(chan);
4038 
4039 		if (err < 0)
4040 			l2cap_send_disconn_req(chan, -err);
4041 		else
4042 			l2cap_chan_ready(chan);
4043 
4044 		goto unlock;
4045 	}
4046 
4047 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4048 		u8 buf[64];
4049 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4050 			       l2cap_build_conf_req(chan, buf), buf);
4051 		chan->num_conf_req++;
4052 	}
4053 
4054 	/* Got Conf Rsp PENDING from remote side and asume we sent
4055 	   Conf Rsp PENDING in the code above */
4056 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4057 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4058 
4059 		/* check compatibility */
4060 
4061 		/* Send rsp for BR/EDR channel */
4062 		if (!chan->hs_hcon)
4063 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4064 		else
4065 			chan->ident = cmd->ident;
4066 	}
4067 
4068 unlock:
4069 	l2cap_chan_unlock(chan);
4070 	return err;
4071 }
4072 
4073 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4074 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4075 				   u8 *data)
4076 {
4077 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4078 	u16 scid, flags, result;
4079 	struct l2cap_chan *chan;
4080 	int len = cmd_len - sizeof(*rsp);
4081 	int err = 0;
4082 
4083 	if (cmd_len < sizeof(*rsp))
4084 		return -EPROTO;
4085 
4086 	scid   = __le16_to_cpu(rsp->scid);
4087 	flags  = __le16_to_cpu(rsp->flags);
4088 	result = __le16_to_cpu(rsp->result);
4089 
4090 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4091 	       result, len);
4092 
4093 	chan = l2cap_get_chan_by_scid(conn, scid);
4094 	if (!chan)
4095 		return 0;
4096 
4097 	switch (result) {
4098 	case L2CAP_CONF_SUCCESS:
4099 		l2cap_conf_rfc_get(chan, rsp->data, len);
4100 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4101 		break;
4102 
4103 	case L2CAP_CONF_PENDING:
4104 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4105 
4106 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4107 			char buf[64];
4108 
4109 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4110 						   buf, &result);
4111 			if (len < 0) {
4112 				l2cap_send_disconn_req(chan, ECONNRESET);
4113 				goto done;
4114 			}
4115 
4116 			if (!chan->hs_hcon) {
4117 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4118 							0);
4119 			} else {
4120 				if (l2cap_check_efs(chan)) {
4121 					amp_create_logical_link(chan);
4122 					chan->ident = cmd->ident;
4123 				}
4124 			}
4125 		}
4126 		goto done;
4127 
4128 	case L2CAP_CONF_UNACCEPT:
4129 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4130 			char req[64];
4131 
4132 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4133 				l2cap_send_disconn_req(chan, ECONNRESET);
4134 				goto done;
4135 			}
4136 
4137 			/* throw out any old stored conf requests */
4138 			result = L2CAP_CONF_SUCCESS;
4139 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4140 						   req, &result);
4141 			if (len < 0) {
4142 				l2cap_send_disconn_req(chan, ECONNRESET);
4143 				goto done;
4144 			}
4145 
4146 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4147 				       L2CAP_CONF_REQ, len, req);
4148 			chan->num_conf_req++;
4149 			if (result != L2CAP_CONF_SUCCESS)
4150 				goto done;
4151 			break;
4152 		}
4153 
4154 	default:
4155 		l2cap_chan_set_err(chan, ECONNRESET);
4156 
4157 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4158 		l2cap_send_disconn_req(chan, ECONNRESET);
4159 		goto done;
4160 	}
4161 
4162 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4163 		goto done;
4164 
4165 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4166 
4167 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4168 		set_default_fcs(chan);
4169 
4170 		if (chan->mode == L2CAP_MODE_ERTM ||
4171 		    chan->mode == L2CAP_MODE_STREAMING)
4172 			err = l2cap_ertm_init(chan);
4173 
4174 		if (err < 0)
4175 			l2cap_send_disconn_req(chan, -err);
4176 		else
4177 			l2cap_chan_ready(chan);
4178 	}
4179 
4180 done:
4181 	l2cap_chan_unlock(chan);
4182 	return err;
4183 }
4184 
4185 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4186 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4187 				       u8 *data)
4188 {
4189 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4190 	struct l2cap_disconn_rsp rsp;
4191 	u16 dcid, scid;
4192 	struct l2cap_chan *chan;
4193 
4194 	if (cmd_len != sizeof(*req))
4195 		return -EPROTO;
4196 
4197 	scid = __le16_to_cpu(req->scid);
4198 	dcid = __le16_to_cpu(req->dcid);
4199 
4200 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4201 
4202 	mutex_lock(&conn->chan_lock);
4203 
4204 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4205 	if (!chan) {
4206 		mutex_unlock(&conn->chan_lock);
4207 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4208 		return 0;
4209 	}
4210 
4211 	l2cap_chan_lock(chan);
4212 
4213 	rsp.dcid = cpu_to_le16(chan->scid);
4214 	rsp.scid = cpu_to_le16(chan->dcid);
4215 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4216 
4217 	chan->ops->set_shutdown(chan);
4218 
4219 	l2cap_chan_hold(chan);
4220 	l2cap_chan_del(chan, ECONNRESET);
4221 
4222 	l2cap_chan_unlock(chan);
4223 
4224 	chan->ops->close(chan);
4225 	l2cap_chan_put(chan);
4226 
4227 	mutex_unlock(&conn->chan_lock);
4228 
4229 	return 0;
4230 }
4231 
4232 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4233 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4234 				       u8 *data)
4235 {
4236 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4237 	u16 dcid, scid;
4238 	struct l2cap_chan *chan;
4239 
4240 	if (cmd_len != sizeof(*rsp))
4241 		return -EPROTO;
4242 
4243 	scid = __le16_to_cpu(rsp->scid);
4244 	dcid = __le16_to_cpu(rsp->dcid);
4245 
4246 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4247 
4248 	mutex_lock(&conn->chan_lock);
4249 
4250 	chan = __l2cap_get_chan_by_scid(conn, scid);
4251 	if (!chan) {
4252 		mutex_unlock(&conn->chan_lock);
4253 		return 0;
4254 	}
4255 
4256 	l2cap_chan_lock(chan);
4257 
4258 	l2cap_chan_hold(chan);
4259 	l2cap_chan_del(chan, 0);
4260 
4261 	l2cap_chan_unlock(chan);
4262 
4263 	chan->ops->close(chan);
4264 	l2cap_chan_put(chan);
4265 
4266 	mutex_unlock(&conn->chan_lock);
4267 
4268 	return 0;
4269 }
4270 
4271 static inline int l2cap_information_req(struct l2cap_conn *conn,
4272 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4273 					u8 *data)
4274 {
4275 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4276 	u16 type;
4277 
4278 	if (cmd_len != sizeof(*req))
4279 		return -EPROTO;
4280 
4281 	type = __le16_to_cpu(req->type);
4282 
4283 	BT_DBG("type 0x%4.4x", type);
4284 
4285 	if (type == L2CAP_IT_FEAT_MASK) {
4286 		u8 buf[8];
4287 		u32 feat_mask = l2cap_feat_mask;
4288 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4289 		rsp->type   = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4290 		rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4291 		if (!disable_ertm)
4292 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4293 				| L2CAP_FEAT_FCS;
4294 		if (conn->hs_enabled)
4295 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4296 				| L2CAP_FEAT_EXT_WINDOW;
4297 
4298 		put_unaligned_le32(feat_mask, rsp->data);
4299 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4300 			       buf);
4301 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4302 		u8 buf[12];
4303 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4304 
4305 		if (conn->hs_enabled)
4306 			l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4307 		else
4308 			l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4309 
4310 		rsp->type   = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4311 		rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4312 		memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4313 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4314 			       buf);
4315 	} else {
4316 		struct l2cap_info_rsp rsp;
4317 		rsp.type   = cpu_to_le16(type);
4318 		rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4319 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4320 			       &rsp);
4321 	}
4322 
4323 	return 0;
4324 }
4325 
4326 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4327 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4328 					u8 *data)
4329 {
4330 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4331 	u16 type, result;
4332 
4333 	if (cmd_len < sizeof(*rsp))
4334 		return -EPROTO;
4335 
4336 	type   = __le16_to_cpu(rsp->type);
4337 	result = __le16_to_cpu(rsp->result);
4338 
4339 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4340 
4341 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4342 	if (cmd->ident != conn->info_ident ||
4343 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4344 		return 0;
4345 
4346 	cancel_delayed_work(&conn->info_timer);
4347 
4348 	if (result != L2CAP_IR_SUCCESS) {
4349 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4350 		conn->info_ident = 0;
4351 
4352 		l2cap_conn_start(conn);
4353 
4354 		return 0;
4355 	}
4356 
4357 	switch (type) {
4358 	case L2CAP_IT_FEAT_MASK:
4359 		conn->feat_mask = get_unaligned_le32(rsp->data);
4360 
4361 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4362 			struct l2cap_info_req req;
4363 			req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4364 
4365 			conn->info_ident = l2cap_get_ident(conn);
4366 
4367 			l2cap_send_cmd(conn, conn->info_ident,
4368 				       L2CAP_INFO_REQ, sizeof(req), &req);
4369 		} else {
4370 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4371 			conn->info_ident = 0;
4372 
4373 			l2cap_conn_start(conn);
4374 		}
4375 		break;
4376 
4377 	case L2CAP_IT_FIXED_CHAN:
4378 		conn->fixed_chan_mask = rsp->data[0];
4379 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4380 		conn->info_ident = 0;
4381 
4382 		l2cap_conn_start(conn);
4383 		break;
4384 	}
4385 
4386 	return 0;
4387 }
4388 
4389 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4390 				    struct l2cap_cmd_hdr *cmd,
4391 				    u16 cmd_len, void *data)
4392 {
4393 	struct l2cap_create_chan_req *req = data;
4394 	struct l2cap_create_chan_rsp rsp;
4395 	struct l2cap_chan *chan;
4396 	struct hci_dev *hdev;
4397 	u16 psm, scid;
4398 
4399 	if (cmd_len != sizeof(*req))
4400 		return -EPROTO;
4401 
4402 	if (!conn->hs_enabled)
4403 		return -EINVAL;
4404 
4405 	psm = le16_to_cpu(req->psm);
4406 	scid = le16_to_cpu(req->scid);
4407 
4408 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4409 
4410 	/* For controller id 0 make BR/EDR connection */
4411 	if (req->amp_id == AMP_ID_BREDR) {
4412 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4413 			      req->amp_id);
4414 		return 0;
4415 	}
4416 
4417 	/* Validate AMP controller id */
4418 	hdev = hci_dev_get(req->amp_id);
4419 	if (!hdev)
4420 		goto error;
4421 
4422 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4423 		hci_dev_put(hdev);
4424 		goto error;
4425 	}
4426 
4427 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4428 			     req->amp_id);
4429 	if (chan) {
4430 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4431 		struct hci_conn *hs_hcon;
4432 
4433 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4434 						  &conn->hcon->dst);
4435 		if (!hs_hcon) {
4436 			hci_dev_put(hdev);
4437 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4438 					       chan->dcid);
4439 			return 0;
4440 		}
4441 
4442 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4443 
4444 		mgr->bredr_chan = chan;
4445 		chan->hs_hcon = hs_hcon;
4446 		chan->fcs = L2CAP_FCS_NONE;
4447 		conn->mtu = hdev->block_mtu;
4448 	}
4449 
4450 	hci_dev_put(hdev);
4451 
4452 	return 0;
4453 
4454 error:
4455 	rsp.dcid = 0;
4456 	rsp.scid = cpu_to_le16(scid);
4457 	rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4458 	rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4459 
4460 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4461 		       sizeof(rsp), &rsp);
4462 
4463 	return 0;
4464 }
4465 
4466 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4467 {
4468 	struct l2cap_move_chan_req req;
4469 	u8 ident;
4470 
4471 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4472 
4473 	ident = l2cap_get_ident(chan->conn);
4474 	chan->ident = ident;
4475 
4476 	req.icid = cpu_to_le16(chan->scid);
4477 	req.dest_amp_id = dest_amp_id;
4478 
4479 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4480 		       &req);
4481 
4482 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4483 }
4484 
4485 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4486 {
4487 	struct l2cap_move_chan_rsp rsp;
4488 
4489 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4490 
4491 	rsp.icid = cpu_to_le16(chan->dcid);
4492 	rsp.result = cpu_to_le16(result);
4493 
4494 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4495 		       sizeof(rsp), &rsp);
4496 }
4497 
4498 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4499 {
4500 	struct l2cap_move_chan_cfm cfm;
4501 
4502 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4503 
4504 	chan->ident = l2cap_get_ident(chan->conn);
4505 
4506 	cfm.icid = cpu_to_le16(chan->scid);
4507 	cfm.result = cpu_to_le16(result);
4508 
4509 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4510 		       sizeof(cfm), &cfm);
4511 
4512 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4513 }
4514 
4515 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4516 {
4517 	struct l2cap_move_chan_cfm cfm;
4518 
4519 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4520 
4521 	cfm.icid = cpu_to_le16(icid);
4522 	cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4523 
4524 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4525 		       sizeof(cfm), &cfm);
4526 }
4527 
4528 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4529 					 u16 icid)
4530 {
4531 	struct l2cap_move_chan_cfm_rsp rsp;
4532 
4533 	BT_DBG("icid 0x%4.4x", icid);
4534 
4535 	rsp.icid = cpu_to_le16(icid);
4536 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4537 }
4538 
4539 static void __release_logical_link(struct l2cap_chan *chan)
4540 {
4541 	chan->hs_hchan = NULL;
4542 	chan->hs_hcon = NULL;
4543 
4544 	/* Placeholder - release the logical link */
4545 }
4546 
4547 static void l2cap_logical_fail(struct l2cap_chan *chan)
4548 {
4549 	/* Logical link setup failed */
4550 	if (chan->state != BT_CONNECTED) {
4551 		/* Create channel failure, disconnect */
4552 		l2cap_send_disconn_req(chan, ECONNRESET);
4553 		return;
4554 	}
4555 
4556 	switch (chan->move_role) {
4557 	case L2CAP_MOVE_ROLE_RESPONDER:
4558 		l2cap_move_done(chan);
4559 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4560 		break;
4561 	case L2CAP_MOVE_ROLE_INITIATOR:
4562 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4563 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4564 			/* Remote has only sent pending or
4565 			 * success responses, clean up
4566 			 */
4567 			l2cap_move_done(chan);
4568 		}
4569 
4570 		/* Other amp move states imply that the move
4571 		 * has already aborted
4572 		 */
4573 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4574 		break;
4575 	}
4576 }
4577 
4578 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4579 					struct hci_chan *hchan)
4580 {
4581 	struct l2cap_conf_rsp rsp;
4582 
4583 	chan->hs_hchan = hchan;
4584 	chan->hs_hcon->l2cap_data = chan->conn;
4585 
4586 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4587 
4588 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4589 		int err;
4590 
4591 		set_default_fcs(chan);
4592 
4593 		err = l2cap_ertm_init(chan);
4594 		if (err < 0)
4595 			l2cap_send_disconn_req(chan, -err);
4596 		else
4597 			l2cap_chan_ready(chan);
4598 	}
4599 }
4600 
4601 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4602 				      struct hci_chan *hchan)
4603 {
4604 	chan->hs_hcon = hchan->conn;
4605 	chan->hs_hcon->l2cap_data = chan->conn;
4606 
4607 	BT_DBG("move_state %d", chan->move_state);
4608 
4609 	switch (chan->move_state) {
4610 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4611 		/* Move confirm will be sent after a success
4612 		 * response is received
4613 		 */
4614 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4615 		break;
4616 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4617 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4618 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4619 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4620 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4621 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4622 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4623 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4624 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4625 		}
4626 		break;
4627 	default:
4628 		/* Move was not in expected state, free the channel */
4629 		__release_logical_link(chan);
4630 
4631 		chan->move_state = L2CAP_MOVE_STABLE;
4632 	}
4633 }
4634 
4635 /* Call with chan locked */
4636 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4637 		       u8 status)
4638 {
4639 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4640 
4641 	if (status) {
4642 		l2cap_logical_fail(chan);
4643 		__release_logical_link(chan);
4644 		return;
4645 	}
4646 
4647 	if (chan->state != BT_CONNECTED) {
4648 		/* Ignore logical link if channel is on BR/EDR */
4649 		if (chan->local_amp_id != AMP_ID_BREDR)
4650 			l2cap_logical_finish_create(chan, hchan);
4651 	} else {
4652 		l2cap_logical_finish_move(chan, hchan);
4653 	}
4654 }
4655 
4656 void l2cap_move_start(struct l2cap_chan *chan)
4657 {
4658 	BT_DBG("chan %p", chan);
4659 
4660 	if (chan->local_amp_id == AMP_ID_BREDR) {
4661 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4662 			return;
4663 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4664 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4665 		/* Placeholder - start physical link setup */
4666 	} else {
4667 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4668 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4669 		chan->move_id = 0;
4670 		l2cap_move_setup(chan);
4671 		l2cap_send_move_chan_req(chan, 0);
4672 	}
4673 }
4674 
4675 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4676 			    u8 local_amp_id, u8 remote_amp_id)
4677 {
4678 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4679 	       local_amp_id, remote_amp_id);
4680 
4681 	chan->fcs = L2CAP_FCS_NONE;
4682 
4683 	/* Outgoing channel on AMP */
4684 	if (chan->state == BT_CONNECT) {
4685 		if (result == L2CAP_CR_SUCCESS) {
4686 			chan->local_amp_id = local_amp_id;
4687 			l2cap_send_create_chan_req(chan, remote_amp_id);
4688 		} else {
4689 			/* Revert to BR/EDR connect */
4690 			l2cap_send_conn_req(chan);
4691 		}
4692 
4693 		return;
4694 	}
4695 
4696 	/* Incoming channel on AMP */
4697 	if (__l2cap_no_conn_pending(chan)) {
4698 		struct l2cap_conn_rsp rsp;
4699 		char buf[128];
4700 		rsp.scid = cpu_to_le16(chan->dcid);
4701 		rsp.dcid = cpu_to_le16(chan->scid);
4702 
4703 		if (result == L2CAP_CR_SUCCESS) {
4704 			/* Send successful response */
4705 			rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4706 			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4707 		} else {
4708 			/* Send negative response */
4709 			rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4710 			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4711 		}
4712 
4713 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4714 			       sizeof(rsp), &rsp);
4715 
4716 		if (result == L2CAP_CR_SUCCESS) {
4717 			l2cap_state_change(chan, BT_CONFIG);
4718 			set_bit(CONF_REQ_SENT, &chan->conf_state);
4719 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4720 				       L2CAP_CONF_REQ,
4721 				       l2cap_build_conf_req(chan, buf), buf);
4722 			chan->num_conf_req++;
4723 		}
4724 	}
4725 }
4726 
4727 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4728 				   u8 remote_amp_id)
4729 {
4730 	l2cap_move_setup(chan);
4731 	chan->move_id = local_amp_id;
4732 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
4733 
4734 	l2cap_send_move_chan_req(chan, remote_amp_id);
4735 }
4736 
4737 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4738 {
4739 	struct hci_chan *hchan = NULL;
4740 
4741 	/* Placeholder - get hci_chan for logical link */
4742 
4743 	if (hchan) {
4744 		if (hchan->state == BT_CONNECTED) {
4745 			/* Logical link is ready to go */
4746 			chan->hs_hcon = hchan->conn;
4747 			chan->hs_hcon->l2cap_data = chan->conn;
4748 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4749 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4750 
4751 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4752 		} else {
4753 			/* Wait for logical link to be ready */
4754 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4755 		}
4756 	} else {
4757 		/* Logical link not available */
4758 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4759 	}
4760 }
4761 
4762 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4763 {
4764 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4765 		u8 rsp_result;
4766 		if (result == -EINVAL)
4767 			rsp_result = L2CAP_MR_BAD_ID;
4768 		else
4769 			rsp_result = L2CAP_MR_NOT_ALLOWED;
4770 
4771 		l2cap_send_move_chan_rsp(chan, rsp_result);
4772 	}
4773 
4774 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
4775 	chan->move_state = L2CAP_MOVE_STABLE;
4776 
4777 	/* Restart data transmission */
4778 	l2cap_ertm_send(chan);
4779 }
4780 
4781 /* Invoke with locked chan */
4782 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4783 {
4784 	u8 local_amp_id = chan->local_amp_id;
4785 	u8 remote_amp_id = chan->remote_amp_id;
4786 
4787 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4788 	       chan, result, local_amp_id, remote_amp_id);
4789 
4790 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4791 		l2cap_chan_unlock(chan);
4792 		return;
4793 	}
4794 
4795 	if (chan->state != BT_CONNECTED) {
4796 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4797 	} else if (result != L2CAP_MR_SUCCESS) {
4798 		l2cap_do_move_cancel(chan, result);
4799 	} else {
4800 		switch (chan->move_role) {
4801 		case L2CAP_MOVE_ROLE_INITIATOR:
4802 			l2cap_do_move_initiate(chan, local_amp_id,
4803 					       remote_amp_id);
4804 			break;
4805 		case L2CAP_MOVE_ROLE_RESPONDER:
4806 			l2cap_do_move_respond(chan, result);
4807 			break;
4808 		default:
4809 			l2cap_do_move_cancel(chan, result);
4810 			break;
4811 		}
4812 	}
4813 }
4814 
4815 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4816 					 struct l2cap_cmd_hdr *cmd,
4817 					 u16 cmd_len, void *data)
4818 {
4819 	struct l2cap_move_chan_req *req = data;
4820 	struct l2cap_move_chan_rsp rsp;
4821 	struct l2cap_chan *chan;
4822 	u16 icid = 0;
4823 	u16 result = L2CAP_MR_NOT_ALLOWED;
4824 
4825 	if (cmd_len != sizeof(*req))
4826 		return -EPROTO;
4827 
4828 	icid = le16_to_cpu(req->icid);
4829 
4830 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4831 
4832 	if (!conn->hs_enabled)
4833 		return -EINVAL;
4834 
4835 	chan = l2cap_get_chan_by_dcid(conn, icid);
4836 	if (!chan) {
4837 		rsp.icid = cpu_to_le16(icid);
4838 		rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4839 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4840 			       sizeof(rsp), &rsp);
4841 		return 0;
4842 	}
4843 
4844 	chan->ident = cmd->ident;
4845 
4846 	if (chan->scid < L2CAP_CID_DYN_START ||
4847 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4848 	    (chan->mode != L2CAP_MODE_ERTM &&
4849 	     chan->mode != L2CAP_MODE_STREAMING)) {
4850 		result = L2CAP_MR_NOT_ALLOWED;
4851 		goto send_move_response;
4852 	}
4853 
4854 	if (chan->local_amp_id == req->dest_amp_id) {
4855 		result = L2CAP_MR_SAME_ID;
4856 		goto send_move_response;
4857 	}
4858 
4859 	if (req->dest_amp_id != AMP_ID_BREDR) {
4860 		struct hci_dev *hdev;
4861 		hdev = hci_dev_get(req->dest_amp_id);
4862 		if (!hdev || hdev->dev_type != HCI_AMP ||
4863 		    !test_bit(HCI_UP, &hdev->flags)) {
4864 			if (hdev)
4865 				hci_dev_put(hdev);
4866 
4867 			result = L2CAP_MR_BAD_ID;
4868 			goto send_move_response;
4869 		}
4870 		hci_dev_put(hdev);
4871 	}
4872 
4873 	/* Detect a move collision.  Only send a collision response
4874 	 * if this side has "lost", otherwise proceed with the move.
4875 	 * The winner has the larger bd_addr.
4876 	 */
4877 	if ((__chan_is_moving(chan) ||
4878 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4879 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4880 		result = L2CAP_MR_COLLISION;
4881 		goto send_move_response;
4882 	}
4883 
4884 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4885 	l2cap_move_setup(chan);
4886 	chan->move_id = req->dest_amp_id;
4887 	icid = chan->dcid;
4888 
4889 	if (req->dest_amp_id == AMP_ID_BREDR) {
4890 		/* Moving to BR/EDR */
4891 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4892 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4893 			result = L2CAP_MR_PEND;
4894 		} else {
4895 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4896 			result = L2CAP_MR_SUCCESS;
4897 		}
4898 	} else {
4899 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4900 		/* Placeholder - uncomment when amp functions are available */
4901 		/*amp_accept_physical(chan, req->dest_amp_id);*/
4902 		result = L2CAP_MR_PEND;
4903 	}
4904 
4905 send_move_response:
4906 	l2cap_send_move_chan_rsp(chan, result);
4907 
4908 	l2cap_chan_unlock(chan);
4909 
4910 	return 0;
4911 }
4912 
4913 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4914 {
4915 	struct l2cap_chan *chan;
4916 	struct hci_chan *hchan = NULL;
4917 
4918 	chan = l2cap_get_chan_by_scid(conn, icid);
4919 	if (!chan) {
4920 		l2cap_send_move_chan_cfm_icid(conn, icid);
4921 		return;
4922 	}
4923 
4924 	__clear_chan_timer(chan);
4925 	if (result == L2CAP_MR_PEND)
4926 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4927 
4928 	switch (chan->move_state) {
4929 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4930 		/* Move confirm will be sent when logical link
4931 		 * is complete.
4932 		 */
4933 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4934 		break;
4935 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4936 		if (result == L2CAP_MR_PEND) {
4937 			break;
4938 		} else if (test_bit(CONN_LOCAL_BUSY,
4939 				    &chan->conn_state)) {
4940 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4941 		} else {
4942 			/* Logical link is up or moving to BR/EDR,
4943 			 * proceed with move
4944 			 */
4945 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4946 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4947 		}
4948 		break;
4949 	case L2CAP_MOVE_WAIT_RSP:
4950 		/* Moving to AMP */
4951 		if (result == L2CAP_MR_SUCCESS) {
4952 			/* Remote is ready, send confirm immediately
4953 			 * after logical link is ready
4954 			 */
4955 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4956 		} else {
4957 			/* Both logical link and move success
4958 			 * are required to confirm
4959 			 */
4960 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4961 		}
4962 
4963 		/* Placeholder - get hci_chan for logical link */
4964 		if (!hchan) {
4965 			/* Logical link not available */
4966 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4967 			break;
4968 		}
4969 
4970 		/* If the logical link is not yet connected, do not
4971 		 * send confirmation.
4972 		 */
4973 		if (hchan->state != BT_CONNECTED)
4974 			break;
4975 
4976 		/* Logical link is already ready to go */
4977 
4978 		chan->hs_hcon = hchan->conn;
4979 		chan->hs_hcon->l2cap_data = chan->conn;
4980 
4981 		if (result == L2CAP_MR_SUCCESS) {
4982 			/* Can confirm now */
4983 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4984 		} else {
4985 			/* Now only need move success
4986 			 * to confirm
4987 			 */
4988 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4989 		}
4990 
4991 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4992 		break;
4993 	default:
4994 		/* Any other amp move state means the move failed. */
4995 		chan->move_id = chan->local_amp_id;
4996 		l2cap_move_done(chan);
4997 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4998 	}
4999 
5000 	l2cap_chan_unlock(chan);
5001 }
5002 
5003 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5004 			    u16 result)
5005 {
5006 	struct l2cap_chan *chan;
5007 
5008 	chan = l2cap_get_chan_by_ident(conn, ident);
5009 	if (!chan) {
5010 		/* Could not locate channel, icid is best guess */
5011 		l2cap_send_move_chan_cfm_icid(conn, icid);
5012 		return;
5013 	}
5014 
5015 	__clear_chan_timer(chan);
5016 
5017 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5018 		if (result == L2CAP_MR_COLLISION) {
5019 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5020 		} else {
5021 			/* Cleanup - cancel move */
5022 			chan->move_id = chan->local_amp_id;
5023 			l2cap_move_done(chan);
5024 		}
5025 	}
5026 
5027 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5028 
5029 	l2cap_chan_unlock(chan);
5030 }
5031 
5032 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5033 				  struct l2cap_cmd_hdr *cmd,
5034 				  u16 cmd_len, void *data)
5035 {
5036 	struct l2cap_move_chan_rsp *rsp = data;
5037 	u16 icid, result;
5038 
5039 	if (cmd_len != sizeof(*rsp))
5040 		return -EPROTO;
5041 
5042 	icid = le16_to_cpu(rsp->icid);
5043 	result = le16_to_cpu(rsp->result);
5044 
5045 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5046 
5047 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5048 		l2cap_move_continue(conn, icid, result);
5049 	else
5050 		l2cap_move_fail(conn, cmd->ident, icid, result);
5051 
5052 	return 0;
5053 }
5054 
5055 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5056 				      struct l2cap_cmd_hdr *cmd,
5057 				      u16 cmd_len, void *data)
5058 {
5059 	struct l2cap_move_chan_cfm *cfm = data;
5060 	struct l2cap_chan *chan;
5061 	u16 icid, result;
5062 
5063 	if (cmd_len != sizeof(*cfm))
5064 		return -EPROTO;
5065 
5066 	icid = le16_to_cpu(cfm->icid);
5067 	result = le16_to_cpu(cfm->result);
5068 
5069 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5070 
5071 	chan = l2cap_get_chan_by_dcid(conn, icid);
5072 	if (!chan) {
5073 		/* Spec requires a response even if the icid was not found */
5074 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5075 		return 0;
5076 	}
5077 
5078 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5079 		if (result == L2CAP_MC_CONFIRMED) {
5080 			chan->local_amp_id = chan->move_id;
5081 			if (chan->local_amp_id == AMP_ID_BREDR)
5082 				__release_logical_link(chan);
5083 		} else {
5084 			chan->move_id = chan->local_amp_id;
5085 		}
5086 
5087 		l2cap_move_done(chan);
5088 	}
5089 
5090 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5091 
5092 	l2cap_chan_unlock(chan);
5093 
5094 	return 0;
5095 }
5096 
5097 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5098 						 struct l2cap_cmd_hdr *cmd,
5099 						 u16 cmd_len, void *data)
5100 {
5101 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5102 	struct l2cap_chan *chan;
5103 	u16 icid;
5104 
5105 	if (cmd_len != sizeof(*rsp))
5106 		return -EPROTO;
5107 
5108 	icid = le16_to_cpu(rsp->icid);
5109 
5110 	BT_DBG("icid 0x%4.4x", icid);
5111 
5112 	chan = l2cap_get_chan_by_scid(conn, icid);
5113 	if (!chan)
5114 		return 0;
5115 
5116 	__clear_chan_timer(chan);
5117 
5118 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5119 		chan->local_amp_id = chan->move_id;
5120 
5121 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5122 			__release_logical_link(chan);
5123 
5124 		l2cap_move_done(chan);
5125 	}
5126 
5127 	l2cap_chan_unlock(chan);
5128 
5129 	return 0;
5130 }
5131 
5132 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5133 					 u16 to_multiplier)
5134 {
5135 	u16 max_latency;
5136 
5137 	if (min > max || min < 6 || max > 3200)
5138 		return -EINVAL;
5139 
5140 	if (to_multiplier < 10 || to_multiplier > 3200)
5141 		return -EINVAL;
5142 
5143 	if (max >= to_multiplier * 8)
5144 		return -EINVAL;
5145 
5146 	max_latency = (to_multiplier * 8 / max) - 1;
5147 	if (latency > 499 || latency > max_latency)
5148 		return -EINVAL;
5149 
5150 	return 0;
5151 }
5152 
5153 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5154 					      struct l2cap_cmd_hdr *cmd,
5155 					      u8 *data)
5156 {
5157 	struct hci_conn *hcon = conn->hcon;
5158 	struct l2cap_conn_param_update_req *req;
5159 	struct l2cap_conn_param_update_rsp rsp;
5160 	u16 min, max, latency, to_multiplier, cmd_len;
5161 	int err;
5162 
5163 	if (!(hcon->link_mode & HCI_LM_MASTER))
5164 		return -EINVAL;
5165 
5166 	cmd_len = __le16_to_cpu(cmd->len);
5167 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5168 		return -EPROTO;
5169 
5170 	req = (struct l2cap_conn_param_update_req *) data;
5171 	min		= __le16_to_cpu(req->min);
5172 	max		= __le16_to_cpu(req->max);
5173 	latency		= __le16_to_cpu(req->latency);
5174 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5175 
5176 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5177 	       min, max, latency, to_multiplier);
5178 
5179 	memset(&rsp, 0, sizeof(rsp));
5180 
5181 	err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5182 	if (err)
5183 		rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5184 	else
5185 		rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5186 
5187 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5188 		       sizeof(rsp), &rsp);
5189 
5190 	if (!err)
5191 		hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5192 
5193 	return 0;
5194 }
5195 
5196 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5197 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5198 				      u8 *data)
5199 {
5200 	int err = 0;
5201 
5202 	switch (cmd->code) {
5203 	case L2CAP_COMMAND_REJ:
5204 		l2cap_command_rej(conn, cmd, cmd_len, data);
5205 		break;
5206 
5207 	case L2CAP_CONN_REQ:
5208 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5209 		break;
5210 
5211 	case L2CAP_CONN_RSP:
5212 	case L2CAP_CREATE_CHAN_RSP:
5213 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5214 		break;
5215 
5216 	case L2CAP_CONF_REQ:
5217 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5218 		break;
5219 
5220 	case L2CAP_CONF_RSP:
5221 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5222 		break;
5223 
5224 	case L2CAP_DISCONN_REQ:
5225 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5226 		break;
5227 
5228 	case L2CAP_DISCONN_RSP:
5229 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5230 		break;
5231 
5232 	case L2CAP_ECHO_REQ:
5233 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5234 		break;
5235 
5236 	case L2CAP_ECHO_RSP:
5237 		break;
5238 
5239 	case L2CAP_INFO_REQ:
5240 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5241 		break;
5242 
5243 	case L2CAP_INFO_RSP:
5244 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5245 		break;
5246 
5247 	case L2CAP_CREATE_CHAN_REQ:
5248 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5249 		break;
5250 
5251 	case L2CAP_MOVE_CHAN_REQ:
5252 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5253 		break;
5254 
5255 	case L2CAP_MOVE_CHAN_RSP:
5256 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5257 		break;
5258 
5259 	case L2CAP_MOVE_CHAN_CFM:
5260 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5261 		break;
5262 
5263 	case L2CAP_MOVE_CHAN_CFM_RSP:
5264 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5265 		break;
5266 
5267 	default:
5268 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5269 		err = -EINVAL;
5270 		break;
5271 	}
5272 
5273 	return err;
5274 }
5275 
5276 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5277 				   struct l2cap_cmd_hdr *cmd, u8 *data)
5278 {
5279 	switch (cmd->code) {
5280 	case L2CAP_COMMAND_REJ:
5281 		return 0;
5282 
5283 	case L2CAP_CONN_PARAM_UPDATE_REQ:
5284 		return l2cap_conn_param_update_req(conn, cmd, data);
5285 
5286 	case L2CAP_CONN_PARAM_UPDATE_RSP:
5287 		return 0;
5288 
5289 	default:
5290 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5291 		return -EINVAL;
5292 	}
5293 }
5294 
5295 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5296 					struct sk_buff *skb)
5297 {
5298 	struct hci_conn *hcon = conn->hcon;
5299 	struct l2cap_cmd_hdr *cmd;
5300 	u16 len;
5301 	int err;
5302 
5303 	if (hcon->type != LE_LINK)
5304 		goto drop;
5305 
5306 	if (skb->len < L2CAP_CMD_HDR_SIZE)
5307 		goto drop;
5308 
5309 	cmd = (void *) skb->data;
5310 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5311 
5312 	len = le16_to_cpu(cmd->len);
5313 
5314 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5315 
5316 	if (len != skb->len || !cmd->ident) {
5317 		BT_DBG("corrupted command");
5318 		goto drop;
5319 	}
5320 
5321 	err = l2cap_le_sig_cmd(conn, cmd, skb->data);
5322 	if (err) {
5323 		struct l2cap_cmd_rej_unk rej;
5324 
5325 		BT_ERR("Wrong link type (%d)", err);
5326 
5327 		rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5328 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5329 			       sizeof(rej), &rej);
5330 	}
5331 
5332 drop:
5333 	kfree_skb(skb);
5334 }
5335 
5336 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5337 				     struct sk_buff *skb)
5338 {
5339 	struct hci_conn *hcon = conn->hcon;
5340 	u8 *data = skb->data;
5341 	int len = skb->len;
5342 	struct l2cap_cmd_hdr cmd;
5343 	int err;
5344 
5345 	l2cap_raw_recv(conn, skb);
5346 
5347 	if (hcon->type != ACL_LINK)
5348 		goto drop;
5349 
5350 	while (len >= L2CAP_CMD_HDR_SIZE) {
5351 		u16 cmd_len;
5352 		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5353 		data += L2CAP_CMD_HDR_SIZE;
5354 		len  -= L2CAP_CMD_HDR_SIZE;
5355 
5356 		cmd_len = le16_to_cpu(cmd.len);
5357 
5358 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5359 		       cmd.ident);
5360 
5361 		if (cmd_len > len || !cmd.ident) {
5362 			BT_DBG("corrupted command");
5363 			break;
5364 		}
5365 
5366 		err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5367 		if (err) {
5368 			struct l2cap_cmd_rej_unk rej;
5369 
5370 			BT_ERR("Wrong link type (%d)", err);
5371 
5372 			rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5373 			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5374 				       sizeof(rej), &rej);
5375 		}
5376 
5377 		data += cmd_len;
5378 		len  -= cmd_len;
5379 	}
5380 
5381 drop:
5382 	kfree_skb(skb);
5383 }
5384 
5385 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5386 {
5387 	u16 our_fcs, rcv_fcs;
5388 	int hdr_size;
5389 
5390 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5391 		hdr_size = L2CAP_EXT_HDR_SIZE;
5392 	else
5393 		hdr_size = L2CAP_ENH_HDR_SIZE;
5394 
5395 	if (chan->fcs == L2CAP_FCS_CRC16) {
5396 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5397 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5398 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5399 
5400 		if (our_fcs != rcv_fcs)
5401 			return -EBADMSG;
5402 	}
5403 	return 0;
5404 }
5405 
5406 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5407 {
5408 	struct l2cap_ctrl control;
5409 
5410 	BT_DBG("chan %p", chan);
5411 
5412 	memset(&control, 0, sizeof(control));
5413 	control.sframe = 1;
5414 	control.final = 1;
5415 	control.reqseq = chan->buffer_seq;
5416 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5417 
5418 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5419 		control.super = L2CAP_SUPER_RNR;
5420 		l2cap_send_sframe(chan, &control);
5421 	}
5422 
5423 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5424 	    chan->unacked_frames > 0)
5425 		__set_retrans_timer(chan);
5426 
5427 	/* Send pending iframes */
5428 	l2cap_ertm_send(chan);
5429 
5430 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5431 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5432 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
5433 		 * send it now.
5434 		 */
5435 		control.super = L2CAP_SUPER_RR;
5436 		l2cap_send_sframe(chan, &control);
5437 	}
5438 }
5439 
5440 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5441 			    struct sk_buff **last_frag)
5442 {
5443 	/* skb->len reflects data in skb as well as all fragments
5444 	 * skb->data_len reflects only data in fragments
5445 	 */
5446 	if (!skb_has_frag_list(skb))
5447 		skb_shinfo(skb)->frag_list = new_frag;
5448 
5449 	new_frag->next = NULL;
5450 
5451 	(*last_frag)->next = new_frag;
5452 	*last_frag = new_frag;
5453 
5454 	skb->len += new_frag->len;
5455 	skb->data_len += new_frag->len;
5456 	skb->truesize += new_frag->truesize;
5457 }
5458 
5459 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5460 				struct l2cap_ctrl *control)
5461 {
5462 	int err = -EINVAL;
5463 
5464 	switch (control->sar) {
5465 	case L2CAP_SAR_UNSEGMENTED:
5466 		if (chan->sdu)
5467 			break;
5468 
5469 		err = chan->ops->recv(chan, skb);
5470 		break;
5471 
5472 	case L2CAP_SAR_START:
5473 		if (chan->sdu)
5474 			break;
5475 
5476 		chan->sdu_len = get_unaligned_le16(skb->data);
5477 		skb_pull(skb, L2CAP_SDULEN_SIZE);
5478 
5479 		if (chan->sdu_len > chan->imtu) {
5480 			err = -EMSGSIZE;
5481 			break;
5482 		}
5483 
5484 		if (skb->len >= chan->sdu_len)
5485 			break;
5486 
5487 		chan->sdu = skb;
5488 		chan->sdu_last_frag = skb;
5489 
5490 		skb = NULL;
5491 		err = 0;
5492 		break;
5493 
5494 	case L2CAP_SAR_CONTINUE:
5495 		if (!chan->sdu)
5496 			break;
5497 
5498 		append_skb_frag(chan->sdu, skb,
5499 				&chan->sdu_last_frag);
5500 		skb = NULL;
5501 
5502 		if (chan->sdu->len >= chan->sdu_len)
5503 			break;
5504 
5505 		err = 0;
5506 		break;
5507 
5508 	case L2CAP_SAR_END:
5509 		if (!chan->sdu)
5510 			break;
5511 
5512 		append_skb_frag(chan->sdu, skb,
5513 				&chan->sdu_last_frag);
5514 		skb = NULL;
5515 
5516 		if (chan->sdu->len != chan->sdu_len)
5517 			break;
5518 
5519 		err = chan->ops->recv(chan, chan->sdu);
5520 
5521 		if (!err) {
5522 			/* Reassembly complete */
5523 			chan->sdu = NULL;
5524 			chan->sdu_last_frag = NULL;
5525 			chan->sdu_len = 0;
5526 		}
5527 		break;
5528 	}
5529 
5530 	if (err) {
5531 		kfree_skb(skb);
5532 		kfree_skb(chan->sdu);
5533 		chan->sdu = NULL;
5534 		chan->sdu_last_frag = NULL;
5535 		chan->sdu_len = 0;
5536 	}
5537 
5538 	return err;
5539 }
5540 
5541 static int l2cap_resegment(struct l2cap_chan *chan)
5542 {
5543 	/* Placeholder */
5544 	return 0;
5545 }
5546 
5547 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5548 {
5549 	u8 event;
5550 
5551 	if (chan->mode != L2CAP_MODE_ERTM)
5552 		return;
5553 
5554 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5555 	l2cap_tx(chan, NULL, NULL, event);
5556 }
5557 
5558 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5559 {
5560 	int err = 0;
5561 	/* Pass sequential frames to l2cap_reassemble_sdu()
5562 	 * until a gap is encountered.
5563 	 */
5564 
5565 	BT_DBG("chan %p", chan);
5566 
5567 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5568 		struct sk_buff *skb;
5569 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
5570 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
5571 
5572 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5573 
5574 		if (!skb)
5575 			break;
5576 
5577 		skb_unlink(skb, &chan->srej_q);
5578 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5579 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5580 		if (err)
5581 			break;
5582 	}
5583 
5584 	if (skb_queue_empty(&chan->srej_q)) {
5585 		chan->rx_state = L2CAP_RX_STATE_RECV;
5586 		l2cap_send_ack(chan);
5587 	}
5588 
5589 	return err;
5590 }
5591 
5592 static void l2cap_handle_srej(struct l2cap_chan *chan,
5593 			      struct l2cap_ctrl *control)
5594 {
5595 	struct sk_buff *skb;
5596 
5597 	BT_DBG("chan %p, control %p", chan, control);
5598 
5599 	if (control->reqseq == chan->next_tx_seq) {
5600 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5601 		l2cap_send_disconn_req(chan, ECONNRESET);
5602 		return;
5603 	}
5604 
5605 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5606 
5607 	if (skb == NULL) {
5608 		BT_DBG("Seq %d not available for retransmission",
5609 		       control->reqseq);
5610 		return;
5611 	}
5612 
5613 	if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5614 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5615 		l2cap_send_disconn_req(chan, ECONNRESET);
5616 		return;
5617 	}
5618 
5619 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5620 
5621 	if (control->poll) {
5622 		l2cap_pass_to_tx(chan, control);
5623 
5624 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
5625 		l2cap_retransmit(chan, control);
5626 		l2cap_ertm_send(chan);
5627 
5628 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5629 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
5630 			chan->srej_save_reqseq = control->reqseq;
5631 		}
5632 	} else {
5633 		l2cap_pass_to_tx_fbit(chan, control);
5634 
5635 		if (control->final) {
5636 			if (chan->srej_save_reqseq != control->reqseq ||
5637 			    !test_and_clear_bit(CONN_SREJ_ACT,
5638 						&chan->conn_state))
5639 				l2cap_retransmit(chan, control);
5640 		} else {
5641 			l2cap_retransmit(chan, control);
5642 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5643 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
5644 				chan->srej_save_reqseq = control->reqseq;
5645 			}
5646 		}
5647 	}
5648 }
5649 
5650 static void l2cap_handle_rej(struct l2cap_chan *chan,
5651 			     struct l2cap_ctrl *control)
5652 {
5653 	struct sk_buff *skb;
5654 
5655 	BT_DBG("chan %p, control %p", chan, control);
5656 
5657 	if (control->reqseq == chan->next_tx_seq) {
5658 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5659 		l2cap_send_disconn_req(chan, ECONNRESET);
5660 		return;
5661 	}
5662 
5663 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5664 
5665 	if (chan->max_tx && skb &&
5666 	    bt_cb(skb)->control.retries >= chan->max_tx) {
5667 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5668 		l2cap_send_disconn_req(chan, ECONNRESET);
5669 		return;
5670 	}
5671 
5672 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5673 
5674 	l2cap_pass_to_tx(chan, control);
5675 
5676 	if (control->final) {
5677 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5678 			l2cap_retransmit_all(chan, control);
5679 	} else {
5680 		l2cap_retransmit_all(chan, control);
5681 		l2cap_ertm_send(chan);
5682 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5683 			set_bit(CONN_REJ_ACT, &chan->conn_state);
5684 	}
5685 }
5686 
5687 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5688 {
5689 	BT_DBG("chan %p, txseq %d", chan, txseq);
5690 
5691 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5692 	       chan->expected_tx_seq);
5693 
5694 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5695 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5696 		    chan->tx_win) {
5697 			/* See notes below regarding "double poll" and
5698 			 * invalid packets.
5699 			 */
5700 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5701 				BT_DBG("Invalid/Ignore - after SREJ");
5702 				return L2CAP_TXSEQ_INVALID_IGNORE;
5703 			} else {
5704 				BT_DBG("Invalid - in window after SREJ sent");
5705 				return L2CAP_TXSEQ_INVALID;
5706 			}
5707 		}
5708 
5709 		if (chan->srej_list.head == txseq) {
5710 			BT_DBG("Expected SREJ");
5711 			return L2CAP_TXSEQ_EXPECTED_SREJ;
5712 		}
5713 
5714 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5715 			BT_DBG("Duplicate SREJ - txseq already stored");
5716 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
5717 		}
5718 
5719 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5720 			BT_DBG("Unexpected SREJ - not requested");
5721 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5722 		}
5723 	}
5724 
5725 	if (chan->expected_tx_seq == txseq) {
5726 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5727 		    chan->tx_win) {
5728 			BT_DBG("Invalid - txseq outside tx window");
5729 			return L2CAP_TXSEQ_INVALID;
5730 		} else {
5731 			BT_DBG("Expected");
5732 			return L2CAP_TXSEQ_EXPECTED;
5733 		}
5734 	}
5735 
5736 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5737 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5738 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
5739 		return L2CAP_TXSEQ_DUPLICATE;
5740 	}
5741 
5742 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5743 		/* A source of invalid packets is a "double poll" condition,
5744 		 * where delays cause us to send multiple poll packets.  If
5745 		 * the remote stack receives and processes both polls,
5746 		 * sequence numbers can wrap around in such a way that a
5747 		 * resent frame has a sequence number that looks like new data
5748 		 * with a sequence gap.  This would trigger an erroneous SREJ
5749 		 * request.
5750 		 *
5751 		 * Fortunately, this is impossible with a tx window that's
5752 		 * less than half of the maximum sequence number, which allows
5753 		 * invalid frames to be safely ignored.
5754 		 *
5755 		 * With tx window sizes greater than half of the tx window
5756 		 * maximum, the frame is invalid and cannot be ignored.  This
5757 		 * causes a disconnect.
5758 		 */
5759 
5760 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5761 			BT_DBG("Invalid/Ignore - txseq outside tx window");
5762 			return L2CAP_TXSEQ_INVALID_IGNORE;
5763 		} else {
5764 			BT_DBG("Invalid - txseq outside tx window");
5765 			return L2CAP_TXSEQ_INVALID;
5766 		}
5767 	} else {
5768 		BT_DBG("Unexpected - txseq indicates missing frames");
5769 		return L2CAP_TXSEQ_UNEXPECTED;
5770 	}
5771 }
5772 
5773 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5774 			       struct l2cap_ctrl *control,
5775 			       struct sk_buff *skb, u8 event)
5776 {
5777 	int err = 0;
5778 	bool skb_in_use = false;
5779 
5780 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5781 	       event);
5782 
5783 	switch (event) {
5784 	case L2CAP_EV_RECV_IFRAME:
5785 		switch (l2cap_classify_txseq(chan, control->txseq)) {
5786 		case L2CAP_TXSEQ_EXPECTED:
5787 			l2cap_pass_to_tx(chan, control);
5788 
5789 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5790 				BT_DBG("Busy, discarding expected seq %d",
5791 				       control->txseq);
5792 				break;
5793 			}
5794 
5795 			chan->expected_tx_seq = __next_seq(chan,
5796 							   control->txseq);
5797 
5798 			chan->buffer_seq = chan->expected_tx_seq;
5799 			skb_in_use = true;
5800 
5801 			err = l2cap_reassemble_sdu(chan, skb, control);
5802 			if (err)
5803 				break;
5804 
5805 			if (control->final) {
5806 				if (!test_and_clear_bit(CONN_REJ_ACT,
5807 							&chan->conn_state)) {
5808 					control->final = 0;
5809 					l2cap_retransmit_all(chan, control);
5810 					l2cap_ertm_send(chan);
5811 				}
5812 			}
5813 
5814 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5815 				l2cap_send_ack(chan);
5816 			break;
5817 		case L2CAP_TXSEQ_UNEXPECTED:
5818 			l2cap_pass_to_tx(chan, control);
5819 
5820 			/* Can't issue SREJ frames in the local busy state.
5821 			 * Drop this frame, it will be seen as missing
5822 			 * when local busy is exited.
5823 			 */
5824 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5825 				BT_DBG("Busy, discarding unexpected seq %d",
5826 				       control->txseq);
5827 				break;
5828 			}
5829 
5830 			/* There was a gap in the sequence, so an SREJ
5831 			 * must be sent for each missing frame.  The
5832 			 * current frame is stored for later use.
5833 			 */
5834 			skb_queue_tail(&chan->srej_q, skb);
5835 			skb_in_use = true;
5836 			BT_DBG("Queued %p (queue len %d)", skb,
5837 			       skb_queue_len(&chan->srej_q));
5838 
5839 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5840 			l2cap_seq_list_clear(&chan->srej_list);
5841 			l2cap_send_srej(chan, control->txseq);
5842 
5843 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5844 			break;
5845 		case L2CAP_TXSEQ_DUPLICATE:
5846 			l2cap_pass_to_tx(chan, control);
5847 			break;
5848 		case L2CAP_TXSEQ_INVALID_IGNORE:
5849 			break;
5850 		case L2CAP_TXSEQ_INVALID:
5851 		default:
5852 			l2cap_send_disconn_req(chan, ECONNRESET);
5853 			break;
5854 		}
5855 		break;
5856 	case L2CAP_EV_RECV_RR:
5857 		l2cap_pass_to_tx(chan, control);
5858 		if (control->final) {
5859 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5860 
5861 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5862 			    !__chan_is_moving(chan)) {
5863 				control->final = 0;
5864 				l2cap_retransmit_all(chan, control);
5865 			}
5866 
5867 			l2cap_ertm_send(chan);
5868 		} else if (control->poll) {
5869 			l2cap_send_i_or_rr_or_rnr(chan);
5870 		} else {
5871 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
5872 					       &chan->conn_state) &&
5873 			    chan->unacked_frames)
5874 				__set_retrans_timer(chan);
5875 
5876 			l2cap_ertm_send(chan);
5877 		}
5878 		break;
5879 	case L2CAP_EV_RECV_RNR:
5880 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5881 		l2cap_pass_to_tx(chan, control);
5882 		if (control && control->poll) {
5883 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
5884 			l2cap_send_rr_or_rnr(chan, 0);
5885 		}
5886 		__clear_retrans_timer(chan);
5887 		l2cap_seq_list_clear(&chan->retrans_list);
5888 		break;
5889 	case L2CAP_EV_RECV_REJ:
5890 		l2cap_handle_rej(chan, control);
5891 		break;
5892 	case L2CAP_EV_RECV_SREJ:
5893 		l2cap_handle_srej(chan, control);
5894 		break;
5895 	default:
5896 		break;
5897 	}
5898 
5899 	if (skb && !skb_in_use) {
5900 		BT_DBG("Freeing %p", skb);
5901 		kfree_skb(skb);
5902 	}
5903 
5904 	return err;
5905 }
5906 
5907 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5908 				    struct l2cap_ctrl *control,
5909 				    struct sk_buff *skb, u8 event)
5910 {
5911 	int err = 0;
5912 	u16 txseq = control->txseq;
5913 	bool skb_in_use = false;
5914 
5915 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5916 	       event);
5917 
5918 	switch (event) {
5919 	case L2CAP_EV_RECV_IFRAME:
5920 		switch (l2cap_classify_txseq(chan, txseq)) {
5921 		case L2CAP_TXSEQ_EXPECTED:
5922 			/* Keep frame for reassembly later */
5923 			l2cap_pass_to_tx(chan, control);
5924 			skb_queue_tail(&chan->srej_q, skb);
5925 			skb_in_use = true;
5926 			BT_DBG("Queued %p (queue len %d)", skb,
5927 			       skb_queue_len(&chan->srej_q));
5928 
5929 			chan->expected_tx_seq = __next_seq(chan, txseq);
5930 			break;
5931 		case L2CAP_TXSEQ_EXPECTED_SREJ:
5932 			l2cap_seq_list_pop(&chan->srej_list);
5933 
5934 			l2cap_pass_to_tx(chan, control);
5935 			skb_queue_tail(&chan->srej_q, skb);
5936 			skb_in_use = true;
5937 			BT_DBG("Queued %p (queue len %d)", skb,
5938 			       skb_queue_len(&chan->srej_q));
5939 
5940 			err = l2cap_rx_queued_iframes(chan);
5941 			if (err)
5942 				break;
5943 
5944 			break;
5945 		case L2CAP_TXSEQ_UNEXPECTED:
5946 			/* Got a frame that can't be reassembled yet.
5947 			 * Save it for later, and send SREJs to cover
5948 			 * the missing frames.
5949 			 */
5950 			skb_queue_tail(&chan->srej_q, skb);
5951 			skb_in_use = true;
5952 			BT_DBG("Queued %p (queue len %d)", skb,
5953 			       skb_queue_len(&chan->srej_q));
5954 
5955 			l2cap_pass_to_tx(chan, control);
5956 			l2cap_send_srej(chan, control->txseq);
5957 			break;
5958 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
5959 			/* This frame was requested with an SREJ, but
5960 			 * some expected retransmitted frames are
5961 			 * missing.  Request retransmission of missing
5962 			 * SREJ'd frames.
5963 			 */
5964 			skb_queue_tail(&chan->srej_q, skb);
5965 			skb_in_use = true;
5966 			BT_DBG("Queued %p (queue len %d)", skb,
5967 			       skb_queue_len(&chan->srej_q));
5968 
5969 			l2cap_pass_to_tx(chan, control);
5970 			l2cap_send_srej_list(chan, control->txseq);
5971 			break;
5972 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
5973 			/* We've already queued this frame.  Drop this copy. */
5974 			l2cap_pass_to_tx(chan, control);
5975 			break;
5976 		case L2CAP_TXSEQ_DUPLICATE:
5977 			/* Expecting a later sequence number, so this frame
5978 			 * was already received.  Ignore it completely.
5979 			 */
5980 			break;
5981 		case L2CAP_TXSEQ_INVALID_IGNORE:
5982 			break;
5983 		case L2CAP_TXSEQ_INVALID:
5984 		default:
5985 			l2cap_send_disconn_req(chan, ECONNRESET);
5986 			break;
5987 		}
5988 		break;
5989 	case L2CAP_EV_RECV_RR:
5990 		l2cap_pass_to_tx(chan, control);
5991 		if (control->final) {
5992 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5993 
5994 			if (!test_and_clear_bit(CONN_REJ_ACT,
5995 						&chan->conn_state)) {
5996 				control->final = 0;
5997 				l2cap_retransmit_all(chan, control);
5998 			}
5999 
6000 			l2cap_ertm_send(chan);
6001 		} else if (control->poll) {
6002 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6003 					       &chan->conn_state) &&
6004 			    chan->unacked_frames) {
6005 				__set_retrans_timer(chan);
6006 			}
6007 
6008 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6009 			l2cap_send_srej_tail(chan);
6010 		} else {
6011 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6012 					       &chan->conn_state) &&
6013 			    chan->unacked_frames)
6014 				__set_retrans_timer(chan);
6015 
6016 			l2cap_send_ack(chan);
6017 		}
6018 		break;
6019 	case L2CAP_EV_RECV_RNR:
6020 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6021 		l2cap_pass_to_tx(chan, control);
6022 		if (control->poll) {
6023 			l2cap_send_srej_tail(chan);
6024 		} else {
6025 			struct l2cap_ctrl rr_control;
6026 			memset(&rr_control, 0, sizeof(rr_control));
6027 			rr_control.sframe = 1;
6028 			rr_control.super = L2CAP_SUPER_RR;
6029 			rr_control.reqseq = chan->buffer_seq;
6030 			l2cap_send_sframe(chan, &rr_control);
6031 		}
6032 
6033 		break;
6034 	case L2CAP_EV_RECV_REJ:
6035 		l2cap_handle_rej(chan, control);
6036 		break;
6037 	case L2CAP_EV_RECV_SREJ:
6038 		l2cap_handle_srej(chan, control);
6039 		break;
6040 	}
6041 
6042 	if (skb && !skb_in_use) {
6043 		BT_DBG("Freeing %p", skb);
6044 		kfree_skb(skb);
6045 	}
6046 
6047 	return err;
6048 }
6049 
6050 static int l2cap_finish_move(struct l2cap_chan *chan)
6051 {
6052 	BT_DBG("chan %p", chan);
6053 
6054 	chan->rx_state = L2CAP_RX_STATE_RECV;
6055 
6056 	if (chan->hs_hcon)
6057 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6058 	else
6059 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6060 
6061 	return l2cap_resegment(chan);
6062 }
6063 
6064 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6065 				 struct l2cap_ctrl *control,
6066 				 struct sk_buff *skb, u8 event)
6067 {
6068 	int err;
6069 
6070 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6071 	       event);
6072 
6073 	if (!control->poll)
6074 		return -EPROTO;
6075 
6076 	l2cap_process_reqseq(chan, control->reqseq);
6077 
6078 	if (!skb_queue_empty(&chan->tx_q))
6079 		chan->tx_send_head = skb_peek(&chan->tx_q);
6080 	else
6081 		chan->tx_send_head = NULL;
6082 
6083 	/* Rewind next_tx_seq to the point expected
6084 	 * by the receiver.
6085 	 */
6086 	chan->next_tx_seq = control->reqseq;
6087 	chan->unacked_frames = 0;
6088 
6089 	err = l2cap_finish_move(chan);
6090 	if (err)
6091 		return err;
6092 
6093 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6094 	l2cap_send_i_or_rr_or_rnr(chan);
6095 
6096 	if (event == L2CAP_EV_RECV_IFRAME)
6097 		return -EPROTO;
6098 
6099 	return l2cap_rx_state_recv(chan, control, NULL, event);
6100 }
6101 
6102 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6103 				 struct l2cap_ctrl *control,
6104 				 struct sk_buff *skb, u8 event)
6105 {
6106 	int err;
6107 
6108 	if (!control->final)
6109 		return -EPROTO;
6110 
6111 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6112 
6113 	chan->rx_state = L2CAP_RX_STATE_RECV;
6114 	l2cap_process_reqseq(chan, control->reqseq);
6115 
6116 	if (!skb_queue_empty(&chan->tx_q))
6117 		chan->tx_send_head = skb_peek(&chan->tx_q);
6118 	else
6119 		chan->tx_send_head = NULL;
6120 
6121 	/* Rewind next_tx_seq to the point expected
6122 	 * by the receiver.
6123 	 */
6124 	chan->next_tx_seq = control->reqseq;
6125 	chan->unacked_frames = 0;
6126 
6127 	if (chan->hs_hcon)
6128 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6129 	else
6130 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6131 
6132 	err = l2cap_resegment(chan);
6133 
6134 	if (!err)
6135 		err = l2cap_rx_state_recv(chan, control, skb, event);
6136 
6137 	return err;
6138 }
6139 
6140 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6141 {
6142 	/* Make sure reqseq is for a packet that has been sent but not acked */
6143 	u16 unacked;
6144 
6145 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6146 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6147 }
6148 
6149 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6150 		    struct sk_buff *skb, u8 event)
6151 {
6152 	int err = 0;
6153 
6154 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6155 	       control, skb, event, chan->rx_state);
6156 
6157 	if (__valid_reqseq(chan, control->reqseq)) {
6158 		switch (chan->rx_state) {
6159 		case L2CAP_RX_STATE_RECV:
6160 			err = l2cap_rx_state_recv(chan, control, skb, event);
6161 			break;
6162 		case L2CAP_RX_STATE_SREJ_SENT:
6163 			err = l2cap_rx_state_srej_sent(chan, control, skb,
6164 						       event);
6165 			break;
6166 		case L2CAP_RX_STATE_WAIT_P:
6167 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
6168 			break;
6169 		case L2CAP_RX_STATE_WAIT_F:
6170 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
6171 			break;
6172 		default:
6173 			/* shut it down */
6174 			break;
6175 		}
6176 	} else {
6177 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6178 		       control->reqseq, chan->next_tx_seq,
6179 		       chan->expected_ack_seq);
6180 		l2cap_send_disconn_req(chan, ECONNRESET);
6181 	}
6182 
6183 	return err;
6184 }
6185 
6186 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6187 			   struct sk_buff *skb)
6188 {
6189 	int err = 0;
6190 
6191 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6192 	       chan->rx_state);
6193 
6194 	if (l2cap_classify_txseq(chan, control->txseq) ==
6195 	    L2CAP_TXSEQ_EXPECTED) {
6196 		l2cap_pass_to_tx(chan, control);
6197 
6198 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6199 		       __next_seq(chan, chan->buffer_seq));
6200 
6201 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6202 
6203 		l2cap_reassemble_sdu(chan, skb, control);
6204 	} else {
6205 		if (chan->sdu) {
6206 			kfree_skb(chan->sdu);
6207 			chan->sdu = NULL;
6208 		}
6209 		chan->sdu_last_frag = NULL;
6210 		chan->sdu_len = 0;
6211 
6212 		if (skb) {
6213 			BT_DBG("Freeing %p", skb);
6214 			kfree_skb(skb);
6215 		}
6216 	}
6217 
6218 	chan->last_acked_seq = control->txseq;
6219 	chan->expected_tx_seq = __next_seq(chan, control->txseq);
6220 
6221 	return err;
6222 }
6223 
6224 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6225 {
6226 	struct l2cap_ctrl *control = &bt_cb(skb)->control;
6227 	u16 len;
6228 	u8 event;
6229 
6230 	__unpack_control(chan, skb);
6231 
6232 	len = skb->len;
6233 
6234 	/*
6235 	 * We can just drop the corrupted I-frame here.
6236 	 * Receiver will miss it and start proper recovery
6237 	 * procedures and ask for retransmission.
6238 	 */
6239 	if (l2cap_check_fcs(chan, skb))
6240 		goto drop;
6241 
6242 	if (!control->sframe && control->sar == L2CAP_SAR_START)
6243 		len -= L2CAP_SDULEN_SIZE;
6244 
6245 	if (chan->fcs == L2CAP_FCS_CRC16)
6246 		len -= L2CAP_FCS_SIZE;
6247 
6248 	if (len > chan->mps) {
6249 		l2cap_send_disconn_req(chan, ECONNRESET);
6250 		goto drop;
6251 	}
6252 
6253 	if (!control->sframe) {
6254 		int err;
6255 
6256 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6257 		       control->sar, control->reqseq, control->final,
6258 		       control->txseq);
6259 
6260 		/* Validate F-bit - F=0 always valid, F=1 only
6261 		 * valid in TX WAIT_F
6262 		 */
6263 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6264 			goto drop;
6265 
6266 		if (chan->mode != L2CAP_MODE_STREAMING) {
6267 			event = L2CAP_EV_RECV_IFRAME;
6268 			err = l2cap_rx(chan, control, skb, event);
6269 		} else {
6270 			err = l2cap_stream_rx(chan, control, skb);
6271 		}
6272 
6273 		if (err)
6274 			l2cap_send_disconn_req(chan, ECONNRESET);
6275 	} else {
6276 		const u8 rx_func_to_event[4] = {
6277 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6278 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6279 		};
6280 
6281 		/* Only I-frames are expected in streaming mode */
6282 		if (chan->mode == L2CAP_MODE_STREAMING)
6283 			goto drop;
6284 
6285 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6286 		       control->reqseq, control->final, control->poll,
6287 		       control->super);
6288 
6289 		if (len != 0) {
6290 			BT_ERR("Trailing bytes: %d in sframe", len);
6291 			l2cap_send_disconn_req(chan, ECONNRESET);
6292 			goto drop;
6293 		}
6294 
6295 		/* Validate F and P bits */
6296 		if (control->final && (control->poll ||
6297 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6298 			goto drop;
6299 
6300 		event = rx_func_to_event[control->super];
6301 		if (l2cap_rx(chan, control, skb, event))
6302 			l2cap_send_disconn_req(chan, ECONNRESET);
6303 	}
6304 
6305 	return 0;
6306 
6307 drop:
6308 	kfree_skb(skb);
6309 	return 0;
6310 }
6311 
6312 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6313 			       struct sk_buff *skb)
6314 {
6315 	struct l2cap_chan *chan;
6316 
6317 	chan = l2cap_get_chan_by_scid(conn, cid);
6318 	if (!chan) {
6319 		if (cid == L2CAP_CID_A2MP) {
6320 			chan = a2mp_channel_create(conn, skb);
6321 			if (!chan) {
6322 				kfree_skb(skb);
6323 				return;
6324 			}
6325 
6326 			l2cap_chan_lock(chan);
6327 		} else {
6328 			BT_DBG("unknown cid 0x%4.4x", cid);
6329 			/* Drop packet and return */
6330 			kfree_skb(skb);
6331 			return;
6332 		}
6333 	}
6334 
6335 	BT_DBG("chan %p, len %d", chan, skb->len);
6336 
6337 	if (chan->state != BT_CONNECTED)
6338 		goto drop;
6339 
6340 	switch (chan->mode) {
6341 	case L2CAP_MODE_BASIC:
6342 		/* If socket recv buffers overflows we drop data here
6343 		 * which is *bad* because L2CAP has to be reliable.
6344 		 * But we don't have any other choice. L2CAP doesn't
6345 		 * provide flow control mechanism. */
6346 
6347 		if (chan->imtu < skb->len)
6348 			goto drop;
6349 
6350 		if (!chan->ops->recv(chan, skb))
6351 			goto done;
6352 		break;
6353 
6354 	case L2CAP_MODE_ERTM:
6355 	case L2CAP_MODE_STREAMING:
6356 		l2cap_data_rcv(chan, skb);
6357 		goto done;
6358 
6359 	default:
6360 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6361 		break;
6362 	}
6363 
6364 drop:
6365 	kfree_skb(skb);
6366 
6367 done:
6368 	l2cap_chan_unlock(chan);
6369 }
6370 
6371 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6372 				  struct sk_buff *skb)
6373 {
6374 	struct hci_conn *hcon = conn->hcon;
6375 	struct l2cap_chan *chan;
6376 
6377 	if (hcon->type != ACL_LINK)
6378 		goto drop;
6379 
6380 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst);
6381 	if (!chan)
6382 		goto drop;
6383 
6384 	BT_DBG("chan %p, len %d", chan, skb->len);
6385 
6386 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6387 		goto drop;
6388 
6389 	if (chan->imtu < skb->len)
6390 		goto drop;
6391 
6392 	/* Store remote BD_ADDR and PSM for msg_name */
6393 	bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6394 	bt_cb(skb)->psm = psm;
6395 
6396 	if (!chan->ops->recv(chan, skb))
6397 		return;
6398 
6399 drop:
6400 	kfree_skb(skb);
6401 }
6402 
6403 static void l2cap_att_channel(struct l2cap_conn *conn,
6404 			      struct sk_buff *skb)
6405 {
6406 	struct hci_conn *hcon = conn->hcon;
6407 	struct l2cap_chan *chan;
6408 
6409 	if (hcon->type != LE_LINK)
6410 		goto drop;
6411 
6412 	chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6413 					 &hcon->src, &hcon->dst);
6414 	if (!chan)
6415 		goto drop;
6416 
6417 	BT_DBG("chan %p, len %d", chan, skb->len);
6418 
6419 	if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
6420 		goto drop;
6421 
6422 	if (chan->imtu < skb->len)
6423 		goto drop;
6424 
6425 	if (!chan->ops->recv(chan, skb))
6426 		return;
6427 
6428 drop:
6429 	kfree_skb(skb);
6430 }
6431 
6432 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6433 {
6434 	struct l2cap_hdr *lh = (void *) skb->data;
6435 	u16 cid, len;
6436 	__le16 psm;
6437 
6438 	skb_pull(skb, L2CAP_HDR_SIZE);
6439 	cid = __le16_to_cpu(lh->cid);
6440 	len = __le16_to_cpu(lh->len);
6441 
6442 	if (len != skb->len) {
6443 		kfree_skb(skb);
6444 		return;
6445 	}
6446 
6447 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
6448 
6449 	switch (cid) {
6450 	case L2CAP_CID_SIGNALING:
6451 		l2cap_sig_channel(conn, skb);
6452 		break;
6453 
6454 	case L2CAP_CID_CONN_LESS:
6455 		psm = get_unaligned((__le16 *) skb->data);
6456 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
6457 		l2cap_conless_channel(conn, psm, skb);
6458 		break;
6459 
6460 	case L2CAP_CID_ATT:
6461 		l2cap_att_channel(conn, skb);
6462 		break;
6463 
6464 	case L2CAP_CID_LE_SIGNALING:
6465 		l2cap_le_sig_channel(conn, skb);
6466 		break;
6467 
6468 	case L2CAP_CID_SMP:
6469 		if (smp_sig_channel(conn, skb))
6470 			l2cap_conn_del(conn->hcon, EACCES);
6471 		break;
6472 
6473 	default:
6474 		l2cap_data_channel(conn, cid, skb);
6475 		break;
6476 	}
6477 }
6478 
6479 /* ---- L2CAP interface with lower layer (HCI) ---- */
6480 
6481 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6482 {
6483 	int exact = 0, lm1 = 0, lm2 = 0;
6484 	struct l2cap_chan *c;
6485 
6486 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6487 
6488 	/* Find listening sockets and check their link_mode */
6489 	read_lock(&chan_list_lock);
6490 	list_for_each_entry(c, &chan_list, global_l) {
6491 		if (c->state != BT_LISTEN)
6492 			continue;
6493 
6494 		if (!bacmp(&c->src, &hdev->bdaddr)) {
6495 			lm1 |= HCI_LM_ACCEPT;
6496 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6497 				lm1 |= HCI_LM_MASTER;
6498 			exact++;
6499 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
6500 			lm2 |= HCI_LM_ACCEPT;
6501 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6502 				lm2 |= HCI_LM_MASTER;
6503 		}
6504 	}
6505 	read_unlock(&chan_list_lock);
6506 
6507 	return exact ? lm1 : lm2;
6508 }
6509 
6510 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6511 {
6512 	struct l2cap_conn *conn;
6513 
6514 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6515 
6516 	if (!status) {
6517 		conn = l2cap_conn_add(hcon);
6518 		if (conn)
6519 			l2cap_conn_ready(conn);
6520 	} else {
6521 		l2cap_conn_del(hcon, bt_to_errno(status));
6522 	}
6523 }
6524 
6525 int l2cap_disconn_ind(struct hci_conn *hcon)
6526 {
6527 	struct l2cap_conn *conn = hcon->l2cap_data;
6528 
6529 	BT_DBG("hcon %p", hcon);
6530 
6531 	if (!conn)
6532 		return HCI_ERROR_REMOTE_USER_TERM;
6533 	return conn->disc_reason;
6534 }
6535 
6536 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6537 {
6538 	BT_DBG("hcon %p reason %d", hcon, reason);
6539 
6540 	l2cap_conn_del(hcon, bt_to_errno(reason));
6541 }
6542 
6543 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6544 {
6545 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6546 		return;
6547 
6548 	if (encrypt == 0x00) {
6549 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
6550 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6551 		} else if (chan->sec_level == BT_SECURITY_HIGH)
6552 			l2cap_chan_close(chan, ECONNREFUSED);
6553 	} else {
6554 		if (chan->sec_level == BT_SECURITY_MEDIUM)
6555 			__clear_chan_timer(chan);
6556 	}
6557 }
6558 
6559 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6560 {
6561 	struct l2cap_conn *conn = hcon->l2cap_data;
6562 	struct l2cap_chan *chan;
6563 
6564 	if (!conn)
6565 		return 0;
6566 
6567 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6568 
6569 	if (hcon->type == LE_LINK) {
6570 		if (!status && encrypt)
6571 			smp_distribute_keys(conn, 0);
6572 		cancel_delayed_work(&conn->security_timer);
6573 	}
6574 
6575 	mutex_lock(&conn->chan_lock);
6576 
6577 	list_for_each_entry(chan, &conn->chan_l, list) {
6578 		l2cap_chan_lock(chan);
6579 
6580 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6581 		       state_to_string(chan->state));
6582 
6583 		if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6584 			l2cap_chan_unlock(chan);
6585 			continue;
6586 		}
6587 
6588 		if (chan->scid == L2CAP_CID_ATT) {
6589 			if (!status && encrypt) {
6590 				chan->sec_level = hcon->sec_level;
6591 				l2cap_chan_ready(chan);
6592 			}
6593 
6594 			l2cap_chan_unlock(chan);
6595 			continue;
6596 		}
6597 
6598 		if (!__l2cap_no_conn_pending(chan)) {
6599 			l2cap_chan_unlock(chan);
6600 			continue;
6601 		}
6602 
6603 		if (!status && (chan->state == BT_CONNECTED ||
6604 				chan->state == BT_CONFIG)) {
6605 			chan->ops->resume(chan);
6606 			l2cap_check_encryption(chan, encrypt);
6607 			l2cap_chan_unlock(chan);
6608 			continue;
6609 		}
6610 
6611 		if (chan->state == BT_CONNECT) {
6612 			if (!status) {
6613 				l2cap_start_connection(chan);
6614 			} else {
6615 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6616 			}
6617 		} else if (chan->state == BT_CONNECT2) {
6618 			struct l2cap_conn_rsp rsp;
6619 			__u16 res, stat;
6620 
6621 			if (!status) {
6622 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6623 					res = L2CAP_CR_PEND;
6624 					stat = L2CAP_CS_AUTHOR_PEND;
6625 					chan->ops->defer(chan);
6626 				} else {
6627 					l2cap_state_change(chan, BT_CONFIG);
6628 					res = L2CAP_CR_SUCCESS;
6629 					stat = L2CAP_CS_NO_INFO;
6630 				}
6631 			} else {
6632 				l2cap_state_change(chan, BT_DISCONN);
6633 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6634 				res = L2CAP_CR_SEC_BLOCK;
6635 				stat = L2CAP_CS_NO_INFO;
6636 			}
6637 
6638 			rsp.scid   = cpu_to_le16(chan->dcid);
6639 			rsp.dcid   = cpu_to_le16(chan->scid);
6640 			rsp.result = cpu_to_le16(res);
6641 			rsp.status = cpu_to_le16(stat);
6642 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6643 				       sizeof(rsp), &rsp);
6644 
6645 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6646 			    res == L2CAP_CR_SUCCESS) {
6647 				char buf[128];
6648 				set_bit(CONF_REQ_SENT, &chan->conf_state);
6649 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
6650 					       L2CAP_CONF_REQ,
6651 					       l2cap_build_conf_req(chan, buf),
6652 					       buf);
6653 				chan->num_conf_req++;
6654 			}
6655 		}
6656 
6657 		l2cap_chan_unlock(chan);
6658 	}
6659 
6660 	mutex_unlock(&conn->chan_lock);
6661 
6662 	return 0;
6663 }
6664 
6665 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6666 {
6667 	struct l2cap_conn *conn = hcon->l2cap_data;
6668 	struct l2cap_hdr *hdr;
6669 	int len;
6670 
6671 	/* For AMP controller do not create l2cap conn */
6672 	if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6673 		goto drop;
6674 
6675 	if (!conn)
6676 		conn = l2cap_conn_add(hcon);
6677 
6678 	if (!conn)
6679 		goto drop;
6680 
6681 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6682 
6683 	switch (flags) {
6684 	case ACL_START:
6685 	case ACL_START_NO_FLUSH:
6686 	case ACL_COMPLETE:
6687 		if (conn->rx_len) {
6688 			BT_ERR("Unexpected start frame (len %d)", skb->len);
6689 			kfree_skb(conn->rx_skb);
6690 			conn->rx_skb = NULL;
6691 			conn->rx_len = 0;
6692 			l2cap_conn_unreliable(conn, ECOMM);
6693 		}
6694 
6695 		/* Start fragment always begin with Basic L2CAP header */
6696 		if (skb->len < L2CAP_HDR_SIZE) {
6697 			BT_ERR("Frame is too short (len %d)", skb->len);
6698 			l2cap_conn_unreliable(conn, ECOMM);
6699 			goto drop;
6700 		}
6701 
6702 		hdr = (struct l2cap_hdr *) skb->data;
6703 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6704 
6705 		if (len == skb->len) {
6706 			/* Complete frame received */
6707 			l2cap_recv_frame(conn, skb);
6708 			return 0;
6709 		}
6710 
6711 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6712 
6713 		if (skb->len > len) {
6714 			BT_ERR("Frame is too long (len %d, expected len %d)",
6715 			       skb->len, len);
6716 			l2cap_conn_unreliable(conn, ECOMM);
6717 			goto drop;
6718 		}
6719 
6720 		/* Allocate skb for the complete frame (with header) */
6721 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6722 		if (!conn->rx_skb)
6723 			goto drop;
6724 
6725 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6726 					  skb->len);
6727 		conn->rx_len = len - skb->len;
6728 		break;
6729 
6730 	case ACL_CONT:
6731 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6732 
6733 		if (!conn->rx_len) {
6734 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6735 			l2cap_conn_unreliable(conn, ECOMM);
6736 			goto drop;
6737 		}
6738 
6739 		if (skb->len > conn->rx_len) {
6740 			BT_ERR("Fragment is too long (len %d, expected %d)",
6741 			       skb->len, conn->rx_len);
6742 			kfree_skb(conn->rx_skb);
6743 			conn->rx_skb = NULL;
6744 			conn->rx_len = 0;
6745 			l2cap_conn_unreliable(conn, ECOMM);
6746 			goto drop;
6747 		}
6748 
6749 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6750 					  skb->len);
6751 		conn->rx_len -= skb->len;
6752 
6753 		if (!conn->rx_len) {
6754 			/* Complete frame received. l2cap_recv_frame
6755 			 * takes ownership of the skb so set the global
6756 			 * rx_skb pointer to NULL first.
6757 			 */
6758 			struct sk_buff *rx_skb = conn->rx_skb;
6759 			conn->rx_skb = NULL;
6760 			l2cap_recv_frame(conn, rx_skb);
6761 		}
6762 		break;
6763 	}
6764 
6765 drop:
6766 	kfree_skb(skb);
6767 	return 0;
6768 }
6769 
6770 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6771 {
6772 	struct l2cap_chan *c;
6773 
6774 	read_lock(&chan_list_lock);
6775 
6776 	list_for_each_entry(c, &chan_list, global_l) {
6777 		seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6778 			   &c->src, &c->dst,
6779 			   c->state, __le16_to_cpu(c->psm),
6780 			   c->scid, c->dcid, c->imtu, c->omtu,
6781 			   c->sec_level, c->mode);
6782 	}
6783 
6784 	read_unlock(&chan_list_lock);
6785 
6786 	return 0;
6787 }
6788 
6789 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6790 {
6791 	return single_open(file, l2cap_debugfs_show, inode->i_private);
6792 }
6793 
6794 static const struct file_operations l2cap_debugfs_fops = {
6795 	.open		= l2cap_debugfs_open,
6796 	.read		= seq_read,
6797 	.llseek		= seq_lseek,
6798 	.release	= single_release,
6799 };
6800 
6801 static struct dentry *l2cap_debugfs;
6802 
6803 int __init l2cap_init(void)
6804 {
6805 	int err;
6806 
6807 	err = l2cap_init_sockets();
6808 	if (err < 0)
6809 		return err;
6810 
6811 	if (IS_ERR_OR_NULL(bt_debugfs))
6812 		return 0;
6813 
6814 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6815 					    NULL, &l2cap_debugfs_fops);
6816 
6817 	return 0;
6818 }
6819 
6820 void l2cap_exit(void)
6821 {
6822 	debugfs_remove(l2cap_debugfs);
6823 	l2cap_cleanup_sockets();
6824 }
6825 
6826 module_param(disable_ertm, bool, 0644);
6827 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
6828